spi-qup.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050
  1. /*
  2. * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License rev 2 and
  6. * only rev 2 as published by the free Software foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/err.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/list.h>
  19. #include <linux/module.h>
  20. #include <linux/of.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/spi/spi.h>
  24. #include <linux/dmaengine.h>
  25. #include <linux/dma-mapping.h>
  26. #define QUP_CONFIG 0x0000
  27. #define QUP_STATE 0x0004
  28. #define QUP_IO_M_MODES 0x0008
  29. #define QUP_SW_RESET 0x000c
  30. #define QUP_OPERATIONAL 0x0018
  31. #define QUP_ERROR_FLAGS 0x001c
  32. #define QUP_ERROR_FLAGS_EN 0x0020
  33. #define QUP_OPERATIONAL_MASK 0x0028
  34. #define QUP_HW_VERSION 0x0030
  35. #define QUP_MX_OUTPUT_CNT 0x0100
  36. #define QUP_OUTPUT_FIFO 0x0110
  37. #define QUP_MX_WRITE_CNT 0x0150
  38. #define QUP_MX_INPUT_CNT 0x0200
  39. #define QUP_MX_READ_CNT 0x0208
  40. #define QUP_INPUT_FIFO 0x0218
  41. #define SPI_CONFIG 0x0300
  42. #define SPI_IO_CONTROL 0x0304
  43. #define SPI_ERROR_FLAGS 0x0308
  44. #define SPI_ERROR_FLAGS_EN 0x030c
  45. /* QUP_CONFIG fields */
  46. #define QUP_CONFIG_SPI_MODE (1 << 8)
  47. #define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
  48. #define QUP_CONFIG_NO_INPUT BIT(7)
  49. #define QUP_CONFIG_NO_OUTPUT BIT(6)
  50. #define QUP_CONFIG_N 0x001f
  51. /* QUP_STATE fields */
  52. #define QUP_STATE_VALID BIT(2)
  53. #define QUP_STATE_RESET 0
  54. #define QUP_STATE_RUN 1
  55. #define QUP_STATE_PAUSE 3
  56. #define QUP_STATE_MASK 3
  57. #define QUP_STATE_CLEAR 2
  58. #define QUP_HW_VERSION_2_1_1 0x20010001
  59. /* QUP_IO_M_MODES fields */
  60. #define QUP_IO_M_PACK_EN BIT(15)
  61. #define QUP_IO_M_UNPACK_EN BIT(14)
  62. #define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
  63. #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
  64. #define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
  65. #define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
  66. #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
  67. #define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
  68. #define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
  69. #define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
  70. #define QUP_IO_M_MODE_FIFO 0
  71. #define QUP_IO_M_MODE_BLOCK 1
  72. #define QUP_IO_M_MODE_DMOV 2
  73. #define QUP_IO_M_MODE_BAM 3
  74. /* QUP_OPERATIONAL fields */
  75. #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
  76. #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
  77. #define QUP_OP_IN_SERVICE_FLAG BIT(9)
  78. #define QUP_OP_OUT_SERVICE_FLAG BIT(8)
  79. #define QUP_OP_IN_FIFO_FULL BIT(7)
  80. #define QUP_OP_OUT_FIFO_FULL BIT(6)
  81. #define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
  82. #define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
  83. /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
  84. #define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
  85. #define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
  86. #define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
  87. #define QUP_ERROR_INPUT_OVER_RUN BIT(2)
  88. /* SPI_CONFIG fields */
  89. #define SPI_CONFIG_HS_MODE BIT(10)
  90. #define SPI_CONFIG_INPUT_FIRST BIT(9)
  91. #define SPI_CONFIG_LOOPBACK BIT(8)
  92. /* SPI_IO_CONTROL fields */
  93. #define SPI_IO_C_FORCE_CS BIT(11)
  94. #define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
  95. #define SPI_IO_C_MX_CS_MODE BIT(8)
  96. #define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
  97. #define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
  98. #define SPI_IO_C_CS_SELECT_MASK 0x000c
  99. #define SPI_IO_C_TRISTATE_CS BIT(1)
  100. #define SPI_IO_C_NO_TRI_STATE BIT(0)
  101. /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
  102. #define SPI_ERROR_CLK_OVER_RUN BIT(1)
  103. #define SPI_ERROR_CLK_UNDER_RUN BIT(0)
  104. #define SPI_NUM_CHIPSELECTS 4
  105. #define SPI_MAX_DMA_XFER (SZ_64K - 64)
  106. /* high speed mode is when bus rate is greater then 26MHz */
  107. #define SPI_HS_MIN_RATE 26000000
  108. #define SPI_MAX_RATE 50000000
  109. #define SPI_DELAY_THRESHOLD 1
  110. #define SPI_DELAY_RETRY 10
  111. struct spi_qup {
  112. void __iomem *base;
  113. struct device *dev;
  114. struct clk *cclk; /* core clock */
  115. struct clk *iclk; /* interface clock */
  116. int irq;
  117. spinlock_t lock;
  118. int in_fifo_sz;
  119. int out_fifo_sz;
  120. int in_blk_sz;
  121. int out_blk_sz;
  122. struct spi_transfer *xfer;
  123. struct completion done;
  124. int error;
  125. int w_size; /* bytes per SPI word */
  126. int n_words;
  127. int tx_bytes;
  128. int rx_bytes;
  129. int qup_v1;
  130. int use_dma;
  131. struct dma_slave_config rx_conf;
  132. struct dma_slave_config tx_conf;
  133. };
  134. static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
  135. {
  136. u32 opstate = readl_relaxed(controller->base + QUP_STATE);
  137. return opstate & QUP_STATE_VALID;
  138. }
  139. static int spi_qup_set_state(struct spi_qup *controller, u32 state)
  140. {
  141. unsigned long loop;
  142. u32 cur_state;
  143. loop = 0;
  144. while (!spi_qup_is_valid_state(controller)) {
  145. usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
  146. if (++loop > SPI_DELAY_RETRY)
  147. return -EIO;
  148. }
  149. if (loop)
  150. dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
  151. loop, state);
  152. cur_state = readl_relaxed(controller->base + QUP_STATE);
  153. /*
  154. * Per spec: for PAUSE_STATE to RESET_STATE, two writes
  155. * of (b10) are required
  156. */
  157. if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
  158. (state == QUP_STATE_RESET)) {
  159. writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
  160. writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
  161. } else {
  162. cur_state &= ~QUP_STATE_MASK;
  163. cur_state |= state;
  164. writel_relaxed(cur_state, controller->base + QUP_STATE);
  165. }
  166. loop = 0;
  167. while (!spi_qup_is_valid_state(controller)) {
  168. usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
  169. if (++loop > SPI_DELAY_RETRY)
  170. return -EIO;
  171. }
  172. return 0;
  173. }
  174. static void spi_qup_fifo_read(struct spi_qup *controller,
  175. struct spi_transfer *xfer)
  176. {
  177. u8 *rx_buf = xfer->rx_buf;
  178. u32 word, state;
  179. int idx, shift, w_size;
  180. w_size = controller->w_size;
  181. while (controller->rx_bytes < xfer->len) {
  182. state = readl_relaxed(controller->base + QUP_OPERATIONAL);
  183. if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
  184. break;
  185. word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
  186. if (!rx_buf) {
  187. controller->rx_bytes += w_size;
  188. continue;
  189. }
  190. for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
  191. /*
  192. * The data format depends on bytes per SPI word:
  193. * 4 bytes: 0x12345678
  194. * 2 bytes: 0x00001234
  195. * 1 byte : 0x00000012
  196. */
  197. shift = BITS_PER_BYTE;
  198. shift *= (w_size - idx - 1);
  199. rx_buf[controller->rx_bytes] = word >> shift;
  200. }
  201. }
  202. }
  203. static void spi_qup_fifo_write(struct spi_qup *controller,
  204. struct spi_transfer *xfer)
  205. {
  206. const u8 *tx_buf = xfer->tx_buf;
  207. u32 word, state, data;
  208. int idx, w_size;
  209. w_size = controller->w_size;
  210. while (controller->tx_bytes < xfer->len) {
  211. state = readl_relaxed(controller->base + QUP_OPERATIONAL);
  212. if (state & QUP_OP_OUT_FIFO_FULL)
  213. break;
  214. word = 0;
  215. for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
  216. if (!tx_buf) {
  217. controller->tx_bytes += w_size;
  218. break;
  219. }
  220. data = tx_buf[controller->tx_bytes];
  221. word |= data << (BITS_PER_BYTE * (3 - idx));
  222. }
  223. writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
  224. }
  225. }
  226. static void spi_qup_dma_done(void *data)
  227. {
  228. struct spi_qup *qup = data;
  229. complete(&qup->done);
  230. }
  231. static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
  232. enum dma_transfer_direction dir,
  233. dma_async_tx_callback callback)
  234. {
  235. struct spi_qup *qup = spi_master_get_devdata(master);
  236. unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
  237. struct dma_async_tx_descriptor *desc;
  238. struct scatterlist *sgl;
  239. struct dma_chan *chan;
  240. dma_cookie_t cookie;
  241. unsigned int nents;
  242. if (dir == DMA_MEM_TO_DEV) {
  243. chan = master->dma_tx;
  244. nents = xfer->tx_sg.nents;
  245. sgl = xfer->tx_sg.sgl;
  246. } else {
  247. chan = master->dma_rx;
  248. nents = xfer->rx_sg.nents;
  249. sgl = xfer->rx_sg.sgl;
  250. }
  251. desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
  252. if (!desc)
  253. return -EINVAL;
  254. desc->callback = callback;
  255. desc->callback_param = qup;
  256. cookie = dmaengine_submit(desc);
  257. return dma_submit_error(cookie);
  258. }
  259. static void spi_qup_dma_terminate(struct spi_master *master,
  260. struct spi_transfer *xfer)
  261. {
  262. if (xfer->tx_buf)
  263. dmaengine_terminate_all(master->dma_tx);
  264. if (xfer->rx_buf)
  265. dmaengine_terminate_all(master->dma_rx);
  266. }
  267. static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer)
  268. {
  269. dma_async_tx_callback rx_done = NULL, tx_done = NULL;
  270. int ret;
  271. if (xfer->rx_buf)
  272. rx_done = spi_qup_dma_done;
  273. else if (xfer->tx_buf)
  274. tx_done = spi_qup_dma_done;
  275. if (xfer->rx_buf) {
  276. ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done);
  277. if (ret)
  278. return ret;
  279. dma_async_issue_pending(master->dma_rx);
  280. }
  281. if (xfer->tx_buf) {
  282. ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done);
  283. if (ret)
  284. return ret;
  285. dma_async_issue_pending(master->dma_tx);
  286. }
  287. return 0;
  288. }
  289. static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer)
  290. {
  291. struct spi_qup *qup = spi_master_get_devdata(master);
  292. int ret;
  293. ret = spi_qup_set_state(qup, QUP_STATE_RUN);
  294. if (ret) {
  295. dev_warn(qup->dev, "cannot set RUN state\n");
  296. return ret;
  297. }
  298. ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
  299. if (ret) {
  300. dev_warn(qup->dev, "cannot set PAUSE state\n");
  301. return ret;
  302. }
  303. spi_qup_fifo_write(qup, xfer);
  304. return 0;
  305. }
  306. static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
  307. {
  308. struct spi_qup *controller = dev_id;
  309. struct spi_transfer *xfer;
  310. u32 opflags, qup_err, spi_err;
  311. unsigned long flags;
  312. int error = 0;
  313. spin_lock_irqsave(&controller->lock, flags);
  314. xfer = controller->xfer;
  315. controller->xfer = NULL;
  316. spin_unlock_irqrestore(&controller->lock, flags);
  317. qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
  318. spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
  319. opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
  320. writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
  321. writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
  322. writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
  323. if (!xfer) {
  324. dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
  325. qup_err, spi_err, opflags);
  326. return IRQ_HANDLED;
  327. }
  328. if (qup_err) {
  329. if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
  330. dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
  331. if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
  332. dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
  333. if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
  334. dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
  335. if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
  336. dev_warn(controller->dev, "INPUT_OVER_RUN\n");
  337. error = -EIO;
  338. }
  339. if (spi_err) {
  340. if (spi_err & SPI_ERROR_CLK_OVER_RUN)
  341. dev_warn(controller->dev, "CLK_OVER_RUN\n");
  342. if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
  343. dev_warn(controller->dev, "CLK_UNDER_RUN\n");
  344. error = -EIO;
  345. }
  346. if (!controller->use_dma) {
  347. if (opflags & QUP_OP_IN_SERVICE_FLAG)
  348. spi_qup_fifo_read(controller, xfer);
  349. if (opflags & QUP_OP_OUT_SERVICE_FLAG)
  350. spi_qup_fifo_write(controller, xfer);
  351. }
  352. spin_lock_irqsave(&controller->lock, flags);
  353. controller->error = error;
  354. controller->xfer = xfer;
  355. spin_unlock_irqrestore(&controller->lock, flags);
  356. if (controller->rx_bytes == xfer->len || error)
  357. complete(&controller->done);
  358. return IRQ_HANDLED;
  359. }
  360. static u32
  361. spi_qup_get_mode(struct spi_master *master, struct spi_transfer *xfer)
  362. {
  363. struct spi_qup *qup = spi_master_get_devdata(master);
  364. u32 mode;
  365. qup->w_size = 4;
  366. if (xfer->bits_per_word <= 8)
  367. qup->w_size = 1;
  368. else if (xfer->bits_per_word <= 16)
  369. qup->w_size = 2;
  370. qup->n_words = xfer->len / qup->w_size;
  371. if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
  372. mode = QUP_IO_M_MODE_FIFO;
  373. else
  374. mode = QUP_IO_M_MODE_BLOCK;
  375. return mode;
  376. }
  377. /* set clock freq ... bits per word */
  378. static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
  379. {
  380. struct spi_qup *controller = spi_master_get_devdata(spi->master);
  381. u32 config, iomode, mode, control;
  382. int ret, n_words;
  383. if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
  384. dev_err(controller->dev, "too big size for loopback %d > %d\n",
  385. xfer->len, controller->in_fifo_sz);
  386. return -EIO;
  387. }
  388. ret = clk_set_rate(controller->cclk, xfer->speed_hz);
  389. if (ret) {
  390. dev_err(controller->dev, "fail to set frequency %d",
  391. xfer->speed_hz);
  392. return -EIO;
  393. }
  394. if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
  395. dev_err(controller->dev, "cannot set RESET state\n");
  396. return -EIO;
  397. }
  398. mode = spi_qup_get_mode(spi->master, xfer);
  399. n_words = controller->n_words;
  400. if (mode == QUP_IO_M_MODE_FIFO) {
  401. writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
  402. writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
  403. /* must be zero for FIFO */
  404. writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
  405. writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
  406. } else if (!controller->use_dma) {
  407. writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
  408. writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
  409. /* must be zero for BLOCK and BAM */
  410. writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
  411. writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
  412. } else {
  413. mode = QUP_IO_M_MODE_BAM;
  414. writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
  415. writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
  416. if (!controller->qup_v1) {
  417. void __iomem *input_cnt;
  418. input_cnt = controller->base + QUP_MX_INPUT_CNT;
  419. /*
  420. * for DMA transfers, both QUP_MX_INPUT_CNT and
  421. * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
  422. * That case is a non-balanced transfer when there is
  423. * only a rx_buf.
  424. */
  425. if (xfer->tx_buf)
  426. writel_relaxed(0, input_cnt);
  427. else
  428. writel_relaxed(n_words, input_cnt);
  429. writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
  430. }
  431. }
  432. iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
  433. /* Set input and output transfer mode */
  434. iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
  435. if (!controller->use_dma)
  436. iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
  437. else
  438. iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
  439. iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
  440. iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
  441. writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
  442. control = readl_relaxed(controller->base + SPI_IO_CONTROL);
  443. if (spi->mode & SPI_CPOL)
  444. control |= SPI_IO_C_CLK_IDLE_HIGH;
  445. else
  446. control &= ~SPI_IO_C_CLK_IDLE_HIGH;
  447. writel_relaxed(control, controller->base + SPI_IO_CONTROL);
  448. config = readl_relaxed(controller->base + SPI_CONFIG);
  449. if (spi->mode & SPI_LOOP)
  450. config |= SPI_CONFIG_LOOPBACK;
  451. else
  452. config &= ~SPI_CONFIG_LOOPBACK;
  453. if (spi->mode & SPI_CPHA)
  454. config &= ~SPI_CONFIG_INPUT_FIRST;
  455. else
  456. config |= SPI_CONFIG_INPUT_FIRST;
  457. /*
  458. * HS_MODE improves signal stability for spi-clk high rates,
  459. * but is invalid in loop back mode.
  460. */
  461. if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
  462. config |= SPI_CONFIG_HS_MODE;
  463. else
  464. config &= ~SPI_CONFIG_HS_MODE;
  465. writel_relaxed(config, controller->base + SPI_CONFIG);
  466. config = readl_relaxed(controller->base + QUP_CONFIG);
  467. config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
  468. config |= xfer->bits_per_word - 1;
  469. config |= QUP_CONFIG_SPI_MODE;
  470. if (controller->use_dma) {
  471. if (!xfer->tx_buf)
  472. config |= QUP_CONFIG_NO_OUTPUT;
  473. if (!xfer->rx_buf)
  474. config |= QUP_CONFIG_NO_INPUT;
  475. }
  476. writel_relaxed(config, controller->base + QUP_CONFIG);
  477. /* only write to OPERATIONAL_MASK when register is present */
  478. if (!controller->qup_v1) {
  479. u32 mask = 0;
  480. /*
  481. * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
  482. * status change in BAM mode
  483. */
  484. if (mode == QUP_IO_M_MODE_BAM)
  485. mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
  486. writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
  487. }
  488. return 0;
  489. }
  490. static int spi_qup_transfer_one(struct spi_master *master,
  491. struct spi_device *spi,
  492. struct spi_transfer *xfer)
  493. {
  494. struct spi_qup *controller = spi_master_get_devdata(master);
  495. unsigned long timeout, flags;
  496. int ret = -EIO;
  497. ret = spi_qup_io_config(spi, xfer);
  498. if (ret)
  499. return ret;
  500. timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
  501. timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
  502. timeout = 100 * msecs_to_jiffies(timeout);
  503. reinit_completion(&controller->done);
  504. spin_lock_irqsave(&controller->lock, flags);
  505. controller->xfer = xfer;
  506. controller->error = 0;
  507. controller->rx_bytes = 0;
  508. controller->tx_bytes = 0;
  509. spin_unlock_irqrestore(&controller->lock, flags);
  510. if (controller->use_dma)
  511. ret = spi_qup_do_dma(master, xfer);
  512. else
  513. ret = spi_qup_do_pio(master, xfer);
  514. if (ret)
  515. goto exit;
  516. if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
  517. dev_warn(controller->dev, "cannot set EXECUTE state\n");
  518. goto exit;
  519. }
  520. if (!wait_for_completion_timeout(&controller->done, timeout))
  521. ret = -ETIMEDOUT;
  522. exit:
  523. spi_qup_set_state(controller, QUP_STATE_RESET);
  524. spin_lock_irqsave(&controller->lock, flags);
  525. controller->xfer = NULL;
  526. if (!ret)
  527. ret = controller->error;
  528. spin_unlock_irqrestore(&controller->lock, flags);
  529. if (ret && controller->use_dma)
  530. spi_qup_dma_terminate(master, xfer);
  531. return ret;
  532. }
  533. static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
  534. struct spi_transfer *xfer)
  535. {
  536. struct spi_qup *qup = spi_master_get_devdata(master);
  537. size_t dma_align = dma_get_cache_alignment();
  538. u32 mode;
  539. qup->use_dma = 0;
  540. if (xfer->rx_buf && (xfer->len % qup->in_blk_sz ||
  541. IS_ERR_OR_NULL(master->dma_rx) ||
  542. !IS_ALIGNED((size_t)xfer->rx_buf, dma_align)))
  543. return false;
  544. if (xfer->tx_buf && (xfer->len % qup->out_blk_sz ||
  545. IS_ERR_OR_NULL(master->dma_tx) ||
  546. !IS_ALIGNED((size_t)xfer->tx_buf, dma_align)))
  547. return false;
  548. mode = spi_qup_get_mode(master, xfer);
  549. if (mode == QUP_IO_M_MODE_FIFO)
  550. return false;
  551. qup->use_dma = 1;
  552. return true;
  553. }
  554. static void spi_qup_release_dma(struct spi_master *master)
  555. {
  556. if (!IS_ERR_OR_NULL(master->dma_rx))
  557. dma_release_channel(master->dma_rx);
  558. if (!IS_ERR_OR_NULL(master->dma_tx))
  559. dma_release_channel(master->dma_tx);
  560. }
  561. static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
  562. {
  563. struct spi_qup *spi = spi_master_get_devdata(master);
  564. struct dma_slave_config *rx_conf = &spi->rx_conf,
  565. *tx_conf = &spi->tx_conf;
  566. struct device *dev = spi->dev;
  567. int ret;
  568. /* allocate dma resources, if available */
  569. master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
  570. if (IS_ERR(master->dma_rx))
  571. return PTR_ERR(master->dma_rx);
  572. master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
  573. if (IS_ERR(master->dma_tx)) {
  574. ret = PTR_ERR(master->dma_tx);
  575. goto err_tx;
  576. }
  577. /* set DMA parameters */
  578. rx_conf->direction = DMA_DEV_TO_MEM;
  579. rx_conf->device_fc = 1;
  580. rx_conf->src_addr = base + QUP_INPUT_FIFO;
  581. rx_conf->src_maxburst = spi->in_blk_sz;
  582. tx_conf->direction = DMA_MEM_TO_DEV;
  583. tx_conf->device_fc = 1;
  584. tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
  585. tx_conf->dst_maxburst = spi->out_blk_sz;
  586. ret = dmaengine_slave_config(master->dma_rx, rx_conf);
  587. if (ret) {
  588. dev_err(dev, "failed to configure RX channel\n");
  589. goto err;
  590. }
  591. ret = dmaengine_slave_config(master->dma_tx, tx_conf);
  592. if (ret) {
  593. dev_err(dev, "failed to configure TX channel\n");
  594. goto err;
  595. }
  596. return 0;
  597. err:
  598. dma_release_channel(master->dma_tx);
  599. err_tx:
  600. dma_release_channel(master->dma_rx);
  601. return ret;
  602. }
  603. static int spi_qup_probe(struct platform_device *pdev)
  604. {
  605. struct spi_master *master;
  606. struct clk *iclk, *cclk;
  607. struct spi_qup *controller;
  608. struct resource *res;
  609. struct device *dev;
  610. void __iomem *base;
  611. u32 max_freq, iomode, num_cs;
  612. int ret, irq, size;
  613. dev = &pdev->dev;
  614. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  615. base = devm_ioremap_resource(dev, res);
  616. if (IS_ERR(base))
  617. return PTR_ERR(base);
  618. irq = platform_get_irq(pdev, 0);
  619. if (irq < 0)
  620. return irq;
  621. cclk = devm_clk_get(dev, "core");
  622. if (IS_ERR(cclk))
  623. return PTR_ERR(cclk);
  624. iclk = devm_clk_get(dev, "iface");
  625. if (IS_ERR(iclk))
  626. return PTR_ERR(iclk);
  627. /* This is optional parameter */
  628. if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
  629. max_freq = SPI_MAX_RATE;
  630. if (!max_freq || max_freq > SPI_MAX_RATE) {
  631. dev_err(dev, "invalid clock frequency %d\n", max_freq);
  632. return -ENXIO;
  633. }
  634. ret = clk_prepare_enable(cclk);
  635. if (ret) {
  636. dev_err(dev, "cannot enable core clock\n");
  637. return ret;
  638. }
  639. ret = clk_prepare_enable(iclk);
  640. if (ret) {
  641. clk_disable_unprepare(cclk);
  642. dev_err(dev, "cannot enable iface clock\n");
  643. return ret;
  644. }
  645. master = spi_alloc_master(dev, sizeof(struct spi_qup));
  646. if (!master) {
  647. clk_disable_unprepare(cclk);
  648. clk_disable_unprepare(iclk);
  649. dev_err(dev, "cannot allocate master\n");
  650. return -ENOMEM;
  651. }
  652. /* use num-cs unless not present or out of range */
  653. if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
  654. num_cs > SPI_NUM_CHIPSELECTS)
  655. master->num_chipselect = SPI_NUM_CHIPSELECTS;
  656. else
  657. master->num_chipselect = num_cs;
  658. master->bus_num = pdev->id;
  659. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
  660. master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
  661. master->max_speed_hz = max_freq;
  662. master->transfer_one = spi_qup_transfer_one;
  663. master->dev.of_node = pdev->dev.of_node;
  664. master->auto_runtime_pm = true;
  665. master->dma_alignment = dma_get_cache_alignment();
  666. master->max_dma_len = SPI_MAX_DMA_XFER;
  667. platform_set_drvdata(pdev, master);
  668. controller = spi_master_get_devdata(master);
  669. controller->dev = dev;
  670. controller->base = base;
  671. controller->iclk = iclk;
  672. controller->cclk = cclk;
  673. controller->irq = irq;
  674. ret = spi_qup_init_dma(master, res->start);
  675. if (ret == -EPROBE_DEFER)
  676. goto error;
  677. else if (!ret)
  678. master->can_dma = spi_qup_can_dma;
  679. /* set v1 flag if device is version 1 */
  680. if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1"))
  681. controller->qup_v1 = 1;
  682. spin_lock_init(&controller->lock);
  683. init_completion(&controller->done);
  684. iomode = readl_relaxed(base + QUP_IO_M_MODES);
  685. size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
  686. if (size)
  687. controller->out_blk_sz = size * 16;
  688. else
  689. controller->out_blk_sz = 4;
  690. size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
  691. if (size)
  692. controller->in_blk_sz = size * 16;
  693. else
  694. controller->in_blk_sz = 4;
  695. size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
  696. controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
  697. size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
  698. controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
  699. dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
  700. controller->in_blk_sz, controller->in_fifo_sz,
  701. controller->out_blk_sz, controller->out_fifo_sz);
  702. writel_relaxed(1, base + QUP_SW_RESET);
  703. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  704. if (ret) {
  705. dev_err(dev, "cannot set RESET state\n");
  706. goto error_dma;
  707. }
  708. writel_relaxed(0, base + QUP_OPERATIONAL);
  709. writel_relaxed(0, base + QUP_IO_M_MODES);
  710. if (!controller->qup_v1)
  711. writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
  712. writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
  713. base + SPI_ERROR_FLAGS_EN);
  714. /* if earlier version of the QUP, disable INPUT_OVERRUN */
  715. if (controller->qup_v1)
  716. writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
  717. QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
  718. base + QUP_ERROR_FLAGS_EN);
  719. writel_relaxed(0, base + SPI_CONFIG);
  720. writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
  721. ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
  722. IRQF_TRIGGER_HIGH, pdev->name, controller);
  723. if (ret)
  724. goto error_dma;
  725. pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
  726. pm_runtime_use_autosuspend(dev);
  727. pm_runtime_set_active(dev);
  728. pm_runtime_enable(dev);
  729. ret = devm_spi_register_master(dev, master);
  730. if (ret)
  731. goto disable_pm;
  732. return 0;
  733. disable_pm:
  734. pm_runtime_disable(&pdev->dev);
  735. error_dma:
  736. spi_qup_release_dma(master);
  737. error:
  738. clk_disable_unprepare(cclk);
  739. clk_disable_unprepare(iclk);
  740. spi_master_put(master);
  741. return ret;
  742. }
  743. #ifdef CONFIG_PM
  744. static int spi_qup_pm_suspend_runtime(struct device *device)
  745. {
  746. struct spi_master *master = dev_get_drvdata(device);
  747. struct spi_qup *controller = spi_master_get_devdata(master);
  748. u32 config;
  749. /* Enable clocks auto gaiting */
  750. config = readl(controller->base + QUP_CONFIG);
  751. config |= QUP_CONFIG_CLOCK_AUTO_GATE;
  752. writel_relaxed(config, controller->base + QUP_CONFIG);
  753. return 0;
  754. }
  755. static int spi_qup_pm_resume_runtime(struct device *device)
  756. {
  757. struct spi_master *master = dev_get_drvdata(device);
  758. struct spi_qup *controller = spi_master_get_devdata(master);
  759. u32 config;
  760. /* Disable clocks auto gaiting */
  761. config = readl_relaxed(controller->base + QUP_CONFIG);
  762. config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
  763. writel_relaxed(config, controller->base + QUP_CONFIG);
  764. return 0;
  765. }
  766. #endif /* CONFIG_PM */
  767. #ifdef CONFIG_PM_SLEEP
  768. static int spi_qup_suspend(struct device *device)
  769. {
  770. struct spi_master *master = dev_get_drvdata(device);
  771. struct spi_qup *controller = spi_master_get_devdata(master);
  772. int ret;
  773. ret = spi_master_suspend(master);
  774. if (ret)
  775. return ret;
  776. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  777. if (ret)
  778. return ret;
  779. clk_disable_unprepare(controller->cclk);
  780. clk_disable_unprepare(controller->iclk);
  781. return 0;
  782. }
  783. static int spi_qup_resume(struct device *device)
  784. {
  785. struct spi_master *master = dev_get_drvdata(device);
  786. struct spi_qup *controller = spi_master_get_devdata(master);
  787. int ret;
  788. ret = clk_prepare_enable(controller->iclk);
  789. if (ret)
  790. return ret;
  791. ret = clk_prepare_enable(controller->cclk);
  792. if (ret)
  793. return ret;
  794. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  795. if (ret)
  796. return ret;
  797. return spi_master_resume(master);
  798. }
  799. #endif /* CONFIG_PM_SLEEP */
  800. static int spi_qup_remove(struct platform_device *pdev)
  801. {
  802. struct spi_master *master = dev_get_drvdata(&pdev->dev);
  803. struct spi_qup *controller = spi_master_get_devdata(master);
  804. int ret;
  805. ret = pm_runtime_get_sync(&pdev->dev);
  806. if (ret < 0)
  807. return ret;
  808. ret = spi_qup_set_state(controller, QUP_STATE_RESET);
  809. if (ret)
  810. return ret;
  811. spi_qup_release_dma(master);
  812. clk_disable_unprepare(controller->cclk);
  813. clk_disable_unprepare(controller->iclk);
  814. pm_runtime_put_noidle(&pdev->dev);
  815. pm_runtime_disable(&pdev->dev);
  816. return 0;
  817. }
  818. static const struct of_device_id spi_qup_dt_match[] = {
  819. { .compatible = "qcom,spi-qup-v1.1.1", },
  820. { .compatible = "qcom,spi-qup-v2.1.1", },
  821. { .compatible = "qcom,spi-qup-v2.2.1", },
  822. { }
  823. };
  824. MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
  825. static const struct dev_pm_ops spi_qup_dev_pm_ops = {
  826. SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
  827. SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
  828. spi_qup_pm_resume_runtime,
  829. NULL)
  830. };
  831. static struct platform_driver spi_qup_driver = {
  832. .driver = {
  833. .name = "spi_qup",
  834. .pm = &spi_qup_dev_pm_ops,
  835. .of_match_table = spi_qup_dt_match,
  836. },
  837. .probe = spi_qup_probe,
  838. .remove = spi_qup_remove,
  839. };
  840. module_platform_driver(spi_qup_driver);
  841. MODULE_LICENSE("GPL v2");
  842. MODULE_ALIAS("platform:spi_qup");