spi-ep93xx.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976
  1. /*
  2. * Driver for Cirrus Logic EP93xx SPI controller.
  3. *
  4. * Copyright (C) 2010-2011 Mika Westerberg
  5. *
  6. * Explicit FIFO handling code was inspired by amba-pl022 driver.
  7. *
  8. * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
  9. *
  10. * For more information about the SPI controller see documentation on Cirrus
  11. * Logic web site:
  12. * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/io.h>
  19. #include <linux/clk.h>
  20. #include <linux/err.h>
  21. #include <linux/delay.h>
  22. #include <linux/device.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/bitops.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/module.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/sched.h>
  29. #include <linux/scatterlist.h>
  30. #include <linux/spi/spi.h>
  31. #include <linux/platform_data/dma-ep93xx.h>
  32. #include <linux/platform_data/spi-ep93xx.h>
  33. #define SSPCR0 0x0000
  34. #define SSPCR0_MODE_SHIFT 6
  35. #define SSPCR0_SCR_SHIFT 8
  36. #define SSPCR1 0x0004
  37. #define SSPCR1_RIE BIT(0)
  38. #define SSPCR1_TIE BIT(1)
  39. #define SSPCR1_RORIE BIT(2)
  40. #define SSPCR1_LBM BIT(3)
  41. #define SSPCR1_SSE BIT(4)
  42. #define SSPCR1_MS BIT(5)
  43. #define SSPCR1_SOD BIT(6)
  44. #define SSPDR 0x0008
  45. #define SSPSR 0x000c
  46. #define SSPSR_TFE BIT(0)
  47. #define SSPSR_TNF BIT(1)
  48. #define SSPSR_RNE BIT(2)
  49. #define SSPSR_RFF BIT(3)
  50. #define SSPSR_BSY BIT(4)
  51. #define SSPCPSR 0x0010
  52. #define SSPIIR 0x0014
  53. #define SSPIIR_RIS BIT(0)
  54. #define SSPIIR_TIS BIT(1)
  55. #define SSPIIR_RORIS BIT(2)
  56. #define SSPICR SSPIIR
  57. /* timeout in milliseconds */
  58. #define SPI_TIMEOUT 5
  59. /* maximum depth of RX/TX FIFO */
  60. #define SPI_FIFO_SIZE 8
  61. /**
  62. * struct ep93xx_spi - EP93xx SPI controller structure
  63. * @pdev: pointer to platform device
  64. * @clk: clock for the controller
  65. * @regs_base: pointer to ioremap()'d registers
  66. * @sspdr_phys: physical address of the SSPDR register
  67. * @wait: wait here until given transfer is completed
  68. * @current_msg: message that is currently processed (or %NULL if none)
  69. * @tx: current byte in transfer to transmit
  70. * @rx: current byte in transfer to receive
  71. * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
  72. * frame decreases this level and sending one frame increases it.
  73. * @dma_rx: RX DMA channel
  74. * @dma_tx: TX DMA channel
  75. * @dma_rx_data: RX parameters passed to the DMA engine
  76. * @dma_tx_data: TX parameters passed to the DMA engine
  77. * @rx_sgt: sg table for RX transfers
  78. * @tx_sgt: sg table for TX transfers
  79. * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
  80. * the client
  81. */
  82. struct ep93xx_spi {
  83. const struct platform_device *pdev;
  84. struct clk *clk;
  85. void __iomem *regs_base;
  86. unsigned long sspdr_phys;
  87. struct completion wait;
  88. struct spi_message *current_msg;
  89. size_t tx;
  90. size_t rx;
  91. size_t fifo_level;
  92. struct dma_chan *dma_rx;
  93. struct dma_chan *dma_tx;
  94. struct ep93xx_dma_data dma_rx_data;
  95. struct ep93xx_dma_data dma_tx_data;
  96. struct sg_table rx_sgt;
  97. struct sg_table tx_sgt;
  98. void *zeropage;
  99. };
  100. /**
  101. * struct ep93xx_spi_chip - SPI device hardware settings
  102. * @spi: back pointer to the SPI device
  103. * @ops: private chip operations
  104. */
  105. struct ep93xx_spi_chip {
  106. const struct spi_device *spi;
  107. struct ep93xx_spi_chip_ops *ops;
  108. };
  109. /* converts bits per word to CR0.DSS value */
  110. #define bits_per_word_to_dss(bpw) ((bpw) - 1)
  111. static void ep93xx_spi_write_u8(const struct ep93xx_spi *espi,
  112. u16 reg, u8 value)
  113. {
  114. writeb(value, espi->regs_base + reg);
  115. }
  116. static u8 ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
  117. {
  118. return readb(spi->regs_base + reg);
  119. }
  120. static void ep93xx_spi_write_u16(const struct ep93xx_spi *espi,
  121. u16 reg, u16 value)
  122. {
  123. writew(value, espi->regs_base + reg);
  124. }
  125. static u16 ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
  126. {
  127. return readw(spi->regs_base + reg);
  128. }
  129. static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
  130. {
  131. u8 regval;
  132. int err;
  133. err = clk_enable(espi->clk);
  134. if (err)
  135. return err;
  136. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  137. regval |= SSPCR1_SSE;
  138. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  139. return 0;
  140. }
  141. static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
  142. {
  143. u8 regval;
  144. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  145. regval &= ~SSPCR1_SSE;
  146. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  147. clk_disable(espi->clk);
  148. }
  149. static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
  150. {
  151. u8 regval;
  152. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  153. regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  154. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  155. }
  156. static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
  157. {
  158. u8 regval;
  159. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  160. regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  161. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  162. }
  163. /**
  164. * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
  165. * @espi: ep93xx SPI controller struct
  166. * @rate: desired SPI output clock rate
  167. * @div_cpsr: pointer to return the cpsr (pre-scaler) divider
  168. * @div_scr: pointer to return the scr divider
  169. */
  170. static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
  171. u32 rate, u8 *div_cpsr, u8 *div_scr)
  172. {
  173. struct spi_master *master = platform_get_drvdata(espi->pdev);
  174. unsigned long spi_clk_rate = clk_get_rate(espi->clk);
  175. int cpsr, scr;
  176. /*
  177. * Make sure that max value is between values supported by the
  178. * controller. Note that minimum value is already checked in
  179. * ep93xx_spi_transfer_one_message().
  180. */
  181. rate = clamp(rate, master->min_speed_hz, master->max_speed_hz);
  182. /*
  183. * Calculate divisors so that we can get speed according the
  184. * following formula:
  185. * rate = spi_clock_rate / (cpsr * (1 + scr))
  186. *
  187. * cpsr must be even number and starts from 2, scr can be any number
  188. * between 0 and 255.
  189. */
  190. for (cpsr = 2; cpsr <= 254; cpsr += 2) {
  191. for (scr = 0; scr <= 255; scr++) {
  192. if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
  193. *div_scr = (u8)scr;
  194. *div_cpsr = (u8)cpsr;
  195. return 0;
  196. }
  197. }
  198. }
  199. return -EINVAL;
  200. }
  201. static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
  202. {
  203. struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
  204. int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
  205. if (chip->ops && chip->ops->cs_control)
  206. chip->ops->cs_control(spi, value);
  207. }
  208. /**
  209. * ep93xx_spi_setup() - setup an SPI device
  210. * @spi: SPI device to setup
  211. *
  212. * This function sets up SPI device mode, speed etc. Can be called multiple
  213. * times for a single device. Returns %0 in case of success, negative error in
  214. * case of failure. When this function returns success, the device is
  215. * deselected.
  216. */
  217. static int ep93xx_spi_setup(struct spi_device *spi)
  218. {
  219. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  220. struct ep93xx_spi_chip *chip;
  221. chip = spi_get_ctldata(spi);
  222. if (!chip) {
  223. dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
  224. spi->modalias);
  225. chip = kzalloc(sizeof(*chip), GFP_KERNEL);
  226. if (!chip)
  227. return -ENOMEM;
  228. chip->spi = spi;
  229. chip->ops = spi->controller_data;
  230. if (chip->ops && chip->ops->setup) {
  231. int ret = chip->ops->setup(spi);
  232. if (ret) {
  233. kfree(chip);
  234. return ret;
  235. }
  236. }
  237. spi_set_ctldata(spi, chip);
  238. }
  239. ep93xx_spi_cs_control(spi, false);
  240. return 0;
  241. }
  242. /**
  243. * ep93xx_spi_cleanup() - cleans up master controller specific state
  244. * @spi: SPI device to cleanup
  245. *
  246. * This function releases master controller specific state for given @spi
  247. * device.
  248. */
  249. static void ep93xx_spi_cleanup(struct spi_device *spi)
  250. {
  251. struct ep93xx_spi_chip *chip;
  252. chip = spi_get_ctldata(spi);
  253. if (chip) {
  254. if (chip->ops && chip->ops->cleanup)
  255. chip->ops->cleanup(spi);
  256. spi_set_ctldata(spi, NULL);
  257. kfree(chip);
  258. }
  259. }
  260. /**
  261. * ep93xx_spi_chip_setup() - configures hardware according to given @chip
  262. * @espi: ep93xx SPI controller struct
  263. * @chip: chip specific settings
  264. * @speed_hz: transfer speed
  265. * @bits_per_word: transfer bits_per_word
  266. */
  267. static int ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
  268. const struct ep93xx_spi_chip *chip,
  269. u32 speed_hz, u8 bits_per_word)
  270. {
  271. u8 dss = bits_per_word_to_dss(bits_per_word);
  272. u8 div_cpsr = 0;
  273. u8 div_scr = 0;
  274. u16 cr0;
  275. int err;
  276. err = ep93xx_spi_calc_divisors(espi, speed_hz, &div_cpsr, &div_scr);
  277. if (err)
  278. return err;
  279. cr0 = div_scr << SSPCR0_SCR_SHIFT;
  280. cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
  281. cr0 |= dss;
  282. dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
  283. chip->spi->mode, div_cpsr, div_scr, dss);
  284. dev_dbg(&espi->pdev->dev, "setup: cr0 %#x\n", cr0);
  285. ep93xx_spi_write_u8(espi, SSPCPSR, div_cpsr);
  286. ep93xx_spi_write_u16(espi, SSPCR0, cr0);
  287. return 0;
  288. }
  289. static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
  290. {
  291. if (t->bits_per_word > 8) {
  292. u16 tx_val = 0;
  293. if (t->tx_buf)
  294. tx_val = ((u16 *)t->tx_buf)[espi->tx];
  295. ep93xx_spi_write_u16(espi, SSPDR, tx_val);
  296. espi->tx += sizeof(tx_val);
  297. } else {
  298. u8 tx_val = 0;
  299. if (t->tx_buf)
  300. tx_val = ((u8 *)t->tx_buf)[espi->tx];
  301. ep93xx_spi_write_u8(espi, SSPDR, tx_val);
  302. espi->tx += sizeof(tx_val);
  303. }
  304. }
  305. static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
  306. {
  307. if (t->bits_per_word > 8) {
  308. u16 rx_val;
  309. rx_val = ep93xx_spi_read_u16(espi, SSPDR);
  310. if (t->rx_buf)
  311. ((u16 *)t->rx_buf)[espi->rx] = rx_val;
  312. espi->rx += sizeof(rx_val);
  313. } else {
  314. u8 rx_val;
  315. rx_val = ep93xx_spi_read_u8(espi, SSPDR);
  316. if (t->rx_buf)
  317. ((u8 *)t->rx_buf)[espi->rx] = rx_val;
  318. espi->rx += sizeof(rx_val);
  319. }
  320. }
  321. /**
  322. * ep93xx_spi_read_write() - perform next RX/TX transfer
  323. * @espi: ep93xx SPI controller struct
  324. *
  325. * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
  326. * called several times, the whole transfer will be completed. Returns
  327. * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
  328. *
  329. * When this function is finished, RX FIFO should be empty and TX FIFO should be
  330. * full.
  331. */
  332. static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
  333. {
  334. struct spi_message *msg = espi->current_msg;
  335. struct spi_transfer *t = msg->state;
  336. /* read as long as RX FIFO has frames in it */
  337. while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
  338. ep93xx_do_read(espi, t);
  339. espi->fifo_level--;
  340. }
  341. /* write as long as TX FIFO has room */
  342. while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
  343. ep93xx_do_write(espi, t);
  344. espi->fifo_level++;
  345. }
  346. if (espi->rx == t->len)
  347. return 0;
  348. return -EINPROGRESS;
  349. }
  350. static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
  351. {
  352. /*
  353. * Now everything is set up for the current transfer. We prime the TX
  354. * FIFO, enable interrupts, and wait for the transfer to complete.
  355. */
  356. if (ep93xx_spi_read_write(espi)) {
  357. ep93xx_spi_enable_interrupts(espi);
  358. wait_for_completion(&espi->wait);
  359. }
  360. }
  361. /**
  362. * ep93xx_spi_dma_prepare() - prepares a DMA transfer
  363. * @espi: ep93xx SPI controller struct
  364. * @dir: DMA transfer direction
  365. *
  366. * Function configures the DMA, maps the buffer and prepares the DMA
  367. * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
  368. * in case of failure.
  369. */
  370. static struct dma_async_tx_descriptor *
  371. ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
  372. {
  373. struct spi_transfer *t = espi->current_msg->state;
  374. struct dma_async_tx_descriptor *txd;
  375. enum dma_slave_buswidth buswidth;
  376. struct dma_slave_config conf;
  377. struct scatterlist *sg;
  378. struct sg_table *sgt;
  379. struct dma_chan *chan;
  380. const void *buf, *pbuf;
  381. size_t len = t->len;
  382. int i, ret, nents;
  383. if (t->bits_per_word > 8)
  384. buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  385. else
  386. buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
  387. memset(&conf, 0, sizeof(conf));
  388. conf.direction = dir;
  389. if (dir == DMA_DEV_TO_MEM) {
  390. chan = espi->dma_rx;
  391. buf = t->rx_buf;
  392. sgt = &espi->rx_sgt;
  393. conf.src_addr = espi->sspdr_phys;
  394. conf.src_addr_width = buswidth;
  395. } else {
  396. chan = espi->dma_tx;
  397. buf = t->tx_buf;
  398. sgt = &espi->tx_sgt;
  399. conf.dst_addr = espi->sspdr_phys;
  400. conf.dst_addr_width = buswidth;
  401. }
  402. ret = dmaengine_slave_config(chan, &conf);
  403. if (ret)
  404. return ERR_PTR(ret);
  405. /*
  406. * We need to split the transfer into PAGE_SIZE'd chunks. This is
  407. * because we are using @espi->zeropage to provide a zero RX buffer
  408. * for the TX transfers and we have only allocated one page for that.
  409. *
  410. * For performance reasons we allocate a new sg_table only when
  411. * needed. Otherwise we will re-use the current one. Eventually the
  412. * last sg_table is released in ep93xx_spi_release_dma().
  413. */
  414. nents = DIV_ROUND_UP(len, PAGE_SIZE);
  415. if (nents != sgt->nents) {
  416. sg_free_table(sgt);
  417. ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
  418. if (ret)
  419. return ERR_PTR(ret);
  420. }
  421. pbuf = buf;
  422. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  423. size_t bytes = min_t(size_t, len, PAGE_SIZE);
  424. if (buf) {
  425. sg_set_page(sg, virt_to_page(pbuf), bytes,
  426. offset_in_page(pbuf));
  427. } else {
  428. sg_set_page(sg, virt_to_page(espi->zeropage),
  429. bytes, 0);
  430. }
  431. pbuf += bytes;
  432. len -= bytes;
  433. }
  434. if (WARN_ON(len)) {
  435. dev_warn(&espi->pdev->dev, "len = %zu expected 0!\n", len);
  436. return ERR_PTR(-EINVAL);
  437. }
  438. nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  439. if (!nents)
  440. return ERR_PTR(-ENOMEM);
  441. txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
  442. if (!txd) {
  443. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  444. return ERR_PTR(-ENOMEM);
  445. }
  446. return txd;
  447. }
  448. /**
  449. * ep93xx_spi_dma_finish() - finishes with a DMA transfer
  450. * @espi: ep93xx SPI controller struct
  451. * @dir: DMA transfer direction
  452. *
  453. * Function finishes with the DMA transfer. After this, the DMA buffer is
  454. * unmapped.
  455. */
  456. static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
  457. enum dma_transfer_direction dir)
  458. {
  459. struct dma_chan *chan;
  460. struct sg_table *sgt;
  461. if (dir == DMA_DEV_TO_MEM) {
  462. chan = espi->dma_rx;
  463. sgt = &espi->rx_sgt;
  464. } else {
  465. chan = espi->dma_tx;
  466. sgt = &espi->tx_sgt;
  467. }
  468. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  469. }
  470. static void ep93xx_spi_dma_callback(void *callback_param)
  471. {
  472. complete(callback_param);
  473. }
  474. static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
  475. {
  476. struct spi_message *msg = espi->current_msg;
  477. struct dma_async_tx_descriptor *rxd, *txd;
  478. rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
  479. if (IS_ERR(rxd)) {
  480. dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
  481. msg->status = PTR_ERR(rxd);
  482. return;
  483. }
  484. txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
  485. if (IS_ERR(txd)) {
  486. ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
  487. dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
  488. msg->status = PTR_ERR(txd);
  489. return;
  490. }
  491. /* We are ready when RX is done */
  492. rxd->callback = ep93xx_spi_dma_callback;
  493. rxd->callback_param = &espi->wait;
  494. /* Now submit both descriptors and wait while they finish */
  495. dmaengine_submit(rxd);
  496. dmaengine_submit(txd);
  497. dma_async_issue_pending(espi->dma_rx);
  498. dma_async_issue_pending(espi->dma_tx);
  499. wait_for_completion(&espi->wait);
  500. ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
  501. ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
  502. }
  503. /**
  504. * ep93xx_spi_process_transfer() - processes one SPI transfer
  505. * @espi: ep93xx SPI controller struct
  506. * @msg: current message
  507. * @t: transfer to process
  508. *
  509. * This function processes one SPI transfer given in @t. Function waits until
  510. * transfer is complete (may sleep) and updates @msg->status based on whether
  511. * transfer was successfully processed or not.
  512. */
  513. static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
  514. struct spi_message *msg,
  515. struct spi_transfer *t)
  516. {
  517. struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
  518. int err;
  519. msg->state = t;
  520. err = ep93xx_spi_chip_setup(espi, chip, t->speed_hz, t->bits_per_word);
  521. if (err) {
  522. dev_err(&espi->pdev->dev,
  523. "failed to setup chip for transfer\n");
  524. msg->status = err;
  525. return;
  526. }
  527. espi->rx = 0;
  528. espi->tx = 0;
  529. /*
  530. * There is no point of setting up DMA for the transfers which will
  531. * fit into the FIFO and can be transferred with a single interrupt.
  532. * So in these cases we will be using PIO and don't bother for DMA.
  533. */
  534. if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
  535. ep93xx_spi_dma_transfer(espi);
  536. else
  537. ep93xx_spi_pio_transfer(espi);
  538. /*
  539. * In case of error during transmit, we bail out from processing
  540. * the message.
  541. */
  542. if (msg->status)
  543. return;
  544. msg->actual_length += t->len;
  545. /*
  546. * After this transfer is finished, perform any possible
  547. * post-transfer actions requested by the protocol driver.
  548. */
  549. if (t->delay_usecs) {
  550. set_current_state(TASK_UNINTERRUPTIBLE);
  551. schedule_timeout(usecs_to_jiffies(t->delay_usecs));
  552. }
  553. if (t->cs_change) {
  554. if (!list_is_last(&t->transfer_list, &msg->transfers)) {
  555. /*
  556. * In case protocol driver is asking us to drop the
  557. * chipselect briefly, we let the scheduler to handle
  558. * any "delay" here.
  559. */
  560. ep93xx_spi_cs_control(msg->spi, false);
  561. cond_resched();
  562. ep93xx_spi_cs_control(msg->spi, true);
  563. }
  564. }
  565. }
  566. /*
  567. * ep93xx_spi_process_message() - process one SPI message
  568. * @espi: ep93xx SPI controller struct
  569. * @msg: message to process
  570. *
  571. * This function processes a single SPI message. We go through all transfers in
  572. * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
  573. * asserted during the whole message (unless per transfer cs_change is set).
  574. *
  575. * @msg->status contains %0 in case of success or negative error code in case of
  576. * failure.
  577. */
  578. static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
  579. struct spi_message *msg)
  580. {
  581. unsigned long timeout;
  582. struct spi_transfer *t;
  583. int err;
  584. /*
  585. * Enable the SPI controller and its clock.
  586. */
  587. err = ep93xx_spi_enable(espi);
  588. if (err) {
  589. dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
  590. msg->status = err;
  591. return;
  592. }
  593. /*
  594. * Just to be sure: flush any data from RX FIFO.
  595. */
  596. timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
  597. while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
  598. if (time_after(jiffies, timeout)) {
  599. dev_warn(&espi->pdev->dev,
  600. "timeout while flushing RX FIFO\n");
  601. msg->status = -ETIMEDOUT;
  602. return;
  603. }
  604. ep93xx_spi_read_u16(espi, SSPDR);
  605. }
  606. /*
  607. * We explicitly handle FIFO level. This way we don't have to check TX
  608. * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
  609. */
  610. espi->fifo_level = 0;
  611. /*
  612. * Assert the chipselect.
  613. */
  614. ep93xx_spi_cs_control(msg->spi, true);
  615. list_for_each_entry(t, &msg->transfers, transfer_list) {
  616. ep93xx_spi_process_transfer(espi, msg, t);
  617. if (msg->status)
  618. break;
  619. }
  620. /*
  621. * Now the whole message is transferred (or failed for some reason). We
  622. * deselect the device and disable the SPI controller.
  623. */
  624. ep93xx_spi_cs_control(msg->spi, false);
  625. ep93xx_spi_disable(espi);
  626. }
  627. static int ep93xx_spi_transfer_one_message(struct spi_master *master,
  628. struct spi_message *msg)
  629. {
  630. struct ep93xx_spi *espi = spi_master_get_devdata(master);
  631. msg->state = NULL;
  632. msg->status = 0;
  633. msg->actual_length = 0;
  634. espi->current_msg = msg;
  635. ep93xx_spi_process_message(espi, msg);
  636. espi->current_msg = NULL;
  637. spi_finalize_current_message(master);
  638. return 0;
  639. }
  640. static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
  641. {
  642. struct ep93xx_spi *espi = dev_id;
  643. u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
  644. /*
  645. * If we got ROR (receive overrun) interrupt we know that something is
  646. * wrong. Just abort the message.
  647. */
  648. if (unlikely(irq_status & SSPIIR_RORIS)) {
  649. /* clear the overrun interrupt */
  650. ep93xx_spi_write_u8(espi, SSPICR, 0);
  651. dev_warn(&espi->pdev->dev,
  652. "receive overrun, aborting the message\n");
  653. espi->current_msg->status = -EIO;
  654. } else {
  655. /*
  656. * Interrupt is either RX (RIS) or TX (TIS). For both cases we
  657. * simply execute next data transfer.
  658. */
  659. if (ep93xx_spi_read_write(espi)) {
  660. /*
  661. * In normal case, there still is some processing left
  662. * for current transfer. Let's wait for the next
  663. * interrupt then.
  664. */
  665. return IRQ_HANDLED;
  666. }
  667. }
  668. /*
  669. * Current transfer is finished, either with error or with success. In
  670. * any case we disable interrupts and notify the worker to handle
  671. * any post-processing of the message.
  672. */
  673. ep93xx_spi_disable_interrupts(espi);
  674. complete(&espi->wait);
  675. return IRQ_HANDLED;
  676. }
  677. static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
  678. {
  679. if (ep93xx_dma_chan_is_m2p(chan))
  680. return false;
  681. chan->private = filter_param;
  682. return true;
  683. }
  684. static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
  685. {
  686. dma_cap_mask_t mask;
  687. int ret;
  688. espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
  689. if (!espi->zeropage)
  690. return -ENOMEM;
  691. dma_cap_zero(mask);
  692. dma_cap_set(DMA_SLAVE, mask);
  693. espi->dma_rx_data.port = EP93XX_DMA_SSP;
  694. espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
  695. espi->dma_rx_data.name = "ep93xx-spi-rx";
  696. espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  697. &espi->dma_rx_data);
  698. if (!espi->dma_rx) {
  699. ret = -ENODEV;
  700. goto fail_free_page;
  701. }
  702. espi->dma_tx_data.port = EP93XX_DMA_SSP;
  703. espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
  704. espi->dma_tx_data.name = "ep93xx-spi-tx";
  705. espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  706. &espi->dma_tx_data);
  707. if (!espi->dma_tx) {
  708. ret = -ENODEV;
  709. goto fail_release_rx;
  710. }
  711. return 0;
  712. fail_release_rx:
  713. dma_release_channel(espi->dma_rx);
  714. espi->dma_rx = NULL;
  715. fail_free_page:
  716. free_page((unsigned long)espi->zeropage);
  717. return ret;
  718. }
  719. static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
  720. {
  721. if (espi->dma_rx) {
  722. dma_release_channel(espi->dma_rx);
  723. sg_free_table(&espi->rx_sgt);
  724. }
  725. if (espi->dma_tx) {
  726. dma_release_channel(espi->dma_tx);
  727. sg_free_table(&espi->tx_sgt);
  728. }
  729. if (espi->zeropage)
  730. free_page((unsigned long)espi->zeropage);
  731. }
  732. static int ep93xx_spi_probe(struct platform_device *pdev)
  733. {
  734. struct spi_master *master;
  735. struct ep93xx_spi_info *info;
  736. struct ep93xx_spi *espi;
  737. struct resource *res;
  738. int irq;
  739. int error;
  740. info = dev_get_platdata(&pdev->dev);
  741. irq = platform_get_irq(pdev, 0);
  742. if (irq < 0) {
  743. dev_err(&pdev->dev, "failed to get irq resources\n");
  744. return -EBUSY;
  745. }
  746. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  747. if (!res) {
  748. dev_err(&pdev->dev, "unable to get iomem resource\n");
  749. return -ENODEV;
  750. }
  751. master = spi_alloc_master(&pdev->dev, sizeof(*espi));
  752. if (!master)
  753. return -ENOMEM;
  754. master->setup = ep93xx_spi_setup;
  755. master->transfer_one_message = ep93xx_spi_transfer_one_message;
  756. master->cleanup = ep93xx_spi_cleanup;
  757. master->bus_num = pdev->id;
  758. master->num_chipselect = info->num_chipselect;
  759. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
  760. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
  761. platform_set_drvdata(pdev, master);
  762. espi = spi_master_get_devdata(master);
  763. espi->clk = devm_clk_get(&pdev->dev, NULL);
  764. if (IS_ERR(espi->clk)) {
  765. dev_err(&pdev->dev, "unable to get spi clock\n");
  766. error = PTR_ERR(espi->clk);
  767. goto fail_release_master;
  768. }
  769. init_completion(&espi->wait);
  770. /*
  771. * Calculate maximum and minimum supported clock rates
  772. * for the controller.
  773. */
  774. master->max_speed_hz = clk_get_rate(espi->clk) / 2;
  775. master->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
  776. espi->pdev = pdev;
  777. espi->sspdr_phys = res->start + SSPDR;
  778. espi->regs_base = devm_ioremap_resource(&pdev->dev, res);
  779. if (IS_ERR(espi->regs_base)) {
  780. error = PTR_ERR(espi->regs_base);
  781. goto fail_release_master;
  782. }
  783. error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
  784. 0, "ep93xx-spi", espi);
  785. if (error) {
  786. dev_err(&pdev->dev, "failed to request irq\n");
  787. goto fail_release_master;
  788. }
  789. if (info->use_dma && ep93xx_spi_setup_dma(espi))
  790. dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
  791. /* make sure that the hardware is disabled */
  792. ep93xx_spi_write_u8(espi, SSPCR1, 0);
  793. error = devm_spi_register_master(&pdev->dev, master);
  794. if (error) {
  795. dev_err(&pdev->dev, "failed to register SPI master\n");
  796. goto fail_free_dma;
  797. }
  798. dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
  799. (unsigned long)res->start, irq);
  800. return 0;
  801. fail_free_dma:
  802. ep93xx_spi_release_dma(espi);
  803. fail_release_master:
  804. spi_master_put(master);
  805. return error;
  806. }
  807. static int ep93xx_spi_remove(struct platform_device *pdev)
  808. {
  809. struct spi_master *master = platform_get_drvdata(pdev);
  810. struct ep93xx_spi *espi = spi_master_get_devdata(master);
  811. ep93xx_spi_release_dma(espi);
  812. return 0;
  813. }
  814. static struct platform_driver ep93xx_spi_driver = {
  815. .driver = {
  816. .name = "ep93xx-spi",
  817. },
  818. .probe = ep93xx_spi_probe,
  819. .remove = ep93xx_spi_remove,
  820. };
  821. module_platform_driver(ep93xx_spi_driver);
  822. MODULE_DESCRIPTION("EP93xx SPI Controller driver");
  823. MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
  824. MODULE_LICENSE("GPL");
  825. MODULE_ALIAS("platform:ep93xx-spi");