mxs-dma.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887
  1. /*
  2. * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
  3. *
  4. * Refer to drivers/dma/imx-sdma.c
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/types.h>
  12. #include <linux/mm.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/clk.h>
  15. #include <linux/wait.h>
  16. #include <linux/sched.h>
  17. #include <linux/semaphore.h>
  18. #include <linux/device.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/slab.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/delay.h>
  24. #include <linux/module.h>
  25. #include <linux/stmp_device.h>
  26. #include <linux/of.h>
  27. #include <linux/of_device.h>
  28. #include <linux/of_dma.h>
  29. #include <linux/list.h>
  30. #include <asm/irq.h>
  31. #include "dmaengine.h"
  32. /*
  33. * NOTE: The term "PIO" throughout the mxs-dma implementation means
  34. * PIO mode of mxs apbh-dma and apbx-dma. With this working mode,
  35. * dma can program the controller registers of peripheral devices.
  36. */
  37. #define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH)
  38. #define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA)
  39. #define HW_APBHX_CTRL0 0x000
  40. #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
  41. #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
  42. #define BP_APBH_CTRL0_RESET_CHANNEL 16
  43. #define HW_APBHX_CTRL1 0x010
  44. #define HW_APBHX_CTRL2 0x020
  45. #define HW_APBHX_CHANNEL_CTRL 0x030
  46. #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
  47. /*
  48. * The offset of NXTCMDAR register is different per both dma type and version,
  49. * while stride for each channel is all the same 0x70.
  50. */
  51. #define HW_APBHX_CHn_NXTCMDAR(d, n) \
  52. (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
  53. #define HW_APBHX_CHn_SEMA(d, n) \
  54. (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
  55. #define HW_APBHX_CHn_BAR(d, n) \
  56. (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70)
  57. #define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70)
  58. /*
  59. * ccw bits definitions
  60. *
  61. * COMMAND: 0..1 (2)
  62. * CHAIN: 2 (1)
  63. * IRQ: 3 (1)
  64. * NAND_LOCK: 4 (1) - not implemented
  65. * NAND_WAIT4READY: 5 (1) - not implemented
  66. * DEC_SEM: 6 (1)
  67. * WAIT4END: 7 (1)
  68. * HALT_ON_TERMINATE: 8 (1)
  69. * TERMINATE_FLUSH: 9 (1)
  70. * RESERVED: 10..11 (2)
  71. * PIO_NUM: 12..15 (4)
  72. */
  73. #define BP_CCW_COMMAND 0
  74. #define BM_CCW_COMMAND (3 << 0)
  75. #define CCW_CHAIN (1 << 2)
  76. #define CCW_IRQ (1 << 3)
  77. #define CCW_DEC_SEM (1 << 6)
  78. #define CCW_WAIT4END (1 << 7)
  79. #define CCW_HALT_ON_TERM (1 << 8)
  80. #define CCW_TERM_FLUSH (1 << 9)
  81. #define BP_CCW_PIO_NUM 12
  82. #define BM_CCW_PIO_NUM (0xf << 12)
  83. #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field)
  84. #define MXS_DMA_CMD_NO_XFER 0
  85. #define MXS_DMA_CMD_WRITE 1
  86. #define MXS_DMA_CMD_READ 2
  87. #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */
  88. struct mxs_dma_ccw {
  89. u32 next;
  90. u16 bits;
  91. u16 xfer_bytes;
  92. #define MAX_XFER_BYTES 0xff00
  93. u32 bufaddr;
  94. #define MXS_PIO_WORDS 16
  95. u32 pio_words[MXS_PIO_WORDS];
  96. };
  97. #define CCW_BLOCK_SIZE (4 * PAGE_SIZE)
  98. #define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw))
  99. struct mxs_dma_chan {
  100. struct mxs_dma_engine *mxs_dma;
  101. struct dma_chan chan;
  102. struct dma_async_tx_descriptor desc;
  103. struct tasklet_struct tasklet;
  104. unsigned int chan_irq;
  105. struct mxs_dma_ccw *ccw;
  106. dma_addr_t ccw_phys;
  107. int desc_count;
  108. enum dma_status status;
  109. unsigned int flags;
  110. bool reset;
  111. #define MXS_DMA_SG_LOOP (1 << 0)
  112. #define MXS_DMA_USE_SEMAPHORE (1 << 1)
  113. };
  114. #define MXS_DMA_CHANNELS 16
  115. #define MXS_DMA_CHANNELS_MASK 0xffff
  116. enum mxs_dma_devtype {
  117. MXS_DMA_APBH,
  118. MXS_DMA_APBX,
  119. };
  120. enum mxs_dma_id {
  121. IMX23_DMA,
  122. IMX28_DMA,
  123. };
  124. struct mxs_dma_engine {
  125. enum mxs_dma_id dev_id;
  126. enum mxs_dma_devtype type;
  127. void __iomem *base;
  128. struct clk *clk;
  129. struct dma_device dma_device;
  130. struct device_dma_parameters dma_parms;
  131. struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
  132. struct platform_device *pdev;
  133. unsigned int nr_channels;
  134. };
  135. struct mxs_dma_type {
  136. enum mxs_dma_id id;
  137. enum mxs_dma_devtype type;
  138. };
  139. static struct mxs_dma_type mxs_dma_types[] = {
  140. {
  141. .id = IMX23_DMA,
  142. .type = MXS_DMA_APBH,
  143. }, {
  144. .id = IMX23_DMA,
  145. .type = MXS_DMA_APBX,
  146. }, {
  147. .id = IMX28_DMA,
  148. .type = MXS_DMA_APBH,
  149. }, {
  150. .id = IMX28_DMA,
  151. .type = MXS_DMA_APBX,
  152. }
  153. };
  154. static const struct platform_device_id mxs_dma_ids[] = {
  155. {
  156. .name = "imx23-dma-apbh",
  157. .driver_data = (kernel_ulong_t) &mxs_dma_types[0],
  158. }, {
  159. .name = "imx23-dma-apbx",
  160. .driver_data = (kernel_ulong_t) &mxs_dma_types[1],
  161. }, {
  162. .name = "imx28-dma-apbh",
  163. .driver_data = (kernel_ulong_t) &mxs_dma_types[2],
  164. }, {
  165. .name = "imx28-dma-apbx",
  166. .driver_data = (kernel_ulong_t) &mxs_dma_types[3],
  167. }, {
  168. /* end of list */
  169. }
  170. };
  171. static const struct of_device_id mxs_dma_dt_ids[] = {
  172. { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
  173. { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
  174. { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
  175. { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
  176. { /* sentinel */ }
  177. };
  178. MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
  179. static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
  180. {
  181. return container_of(chan, struct mxs_dma_chan, chan);
  182. }
  183. static void mxs_dma_reset_chan(struct dma_chan *chan)
  184. {
  185. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  186. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  187. int chan_id = mxs_chan->chan.chan_id;
  188. /*
  189. * mxs dma channel resets can cause a channel stall. To recover from a
  190. * channel stall, we have to reset the whole DMA engine. To avoid this,
  191. * we use cyclic DMA with semaphores, that are enhanced in
  192. * mxs_dma_int_handler. To reset the channel, we can simply stop writing
  193. * into the semaphore counter.
  194. */
  195. if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
  196. mxs_chan->flags & MXS_DMA_SG_LOOP) {
  197. mxs_chan->reset = true;
  198. } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) {
  199. writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
  200. mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
  201. } else {
  202. unsigned long elapsed = 0;
  203. const unsigned long max_wait = 50000; /* 50ms */
  204. void __iomem *reg_dbg1 = mxs_dma->base +
  205. HW_APBX_CHn_DEBUG1(mxs_dma, chan_id);
  206. /*
  207. * On i.MX28 APBX, the DMA channel can stop working if we reset
  208. * the channel while it is in READ_FLUSH (0x08) state.
  209. * We wait here until we leave the state. Then we trigger the
  210. * reset. Waiting a maximum of 50ms, the kernel shouldn't crash
  211. * because of this.
  212. */
  213. while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) {
  214. udelay(100);
  215. elapsed += 100;
  216. }
  217. if (elapsed >= max_wait)
  218. dev_err(&mxs_chan->mxs_dma->pdev->dev,
  219. "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n",
  220. chan_id);
  221. writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
  222. mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
  223. }
  224. mxs_chan->status = DMA_COMPLETE;
  225. }
  226. static void mxs_dma_enable_chan(struct dma_chan *chan)
  227. {
  228. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  229. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  230. int chan_id = mxs_chan->chan.chan_id;
  231. /* set cmd_addr up */
  232. writel(mxs_chan->ccw_phys,
  233. mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
  234. /* write 1 to SEMA to kick off the channel */
  235. if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
  236. mxs_chan->flags & MXS_DMA_SG_LOOP) {
  237. /* A cyclic DMA consists of at least 2 segments, so initialize
  238. * the semaphore with 2 so we have enough time to add 1 to the
  239. * semaphore if we need to */
  240. writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
  241. } else {
  242. writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
  243. }
  244. mxs_chan->reset = false;
  245. }
  246. static void mxs_dma_disable_chan(struct dma_chan *chan)
  247. {
  248. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  249. mxs_chan->status = DMA_COMPLETE;
  250. }
  251. static int mxs_dma_pause_chan(struct dma_chan *chan)
  252. {
  253. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  254. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  255. int chan_id = mxs_chan->chan.chan_id;
  256. /* freeze the channel */
  257. if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
  258. writel(1 << chan_id,
  259. mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
  260. else
  261. writel(1 << chan_id,
  262. mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
  263. mxs_chan->status = DMA_PAUSED;
  264. return 0;
  265. }
  266. static int mxs_dma_resume_chan(struct dma_chan *chan)
  267. {
  268. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  269. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  270. int chan_id = mxs_chan->chan.chan_id;
  271. /* unfreeze the channel */
  272. if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
  273. writel(1 << chan_id,
  274. mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
  275. else
  276. writel(1 << chan_id,
  277. mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
  278. mxs_chan->status = DMA_IN_PROGRESS;
  279. return 0;
  280. }
  281. static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
  282. {
  283. return dma_cookie_assign(tx);
  284. }
  285. static void mxs_dma_tasklet(unsigned long data)
  286. {
  287. struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
  288. if (mxs_chan->desc.callback)
  289. mxs_chan->desc.callback(mxs_chan->desc.callback_param);
  290. }
  291. static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
  292. {
  293. int i;
  294. for (i = 0; i != mxs_dma->nr_channels; ++i)
  295. if (mxs_dma->mxs_chans[i].chan_irq == irq)
  296. return i;
  297. return -EINVAL;
  298. }
  299. static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
  300. {
  301. struct mxs_dma_engine *mxs_dma = dev_id;
  302. struct mxs_dma_chan *mxs_chan;
  303. u32 completed;
  304. u32 err;
  305. int chan = mxs_dma_irq_to_chan(mxs_dma, irq);
  306. if (chan < 0)
  307. return IRQ_NONE;
  308. /* completion status */
  309. completed = readl(mxs_dma->base + HW_APBHX_CTRL1);
  310. completed = (completed >> chan) & 0x1;
  311. /* Clear interrupt */
  312. writel((1 << chan),
  313. mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
  314. /* error status */
  315. err = readl(mxs_dma->base + HW_APBHX_CTRL2);
  316. err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan);
  317. /*
  318. * error status bit is in the upper 16 bits, error irq bit in the lower
  319. * 16 bits. We transform it into a simpler error code:
  320. * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR
  321. */
  322. err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan);
  323. /* Clear error irq */
  324. writel((1 << chan),
  325. mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
  326. /*
  327. * When both completion and error of termination bits set at the
  328. * same time, we do not take it as an error. IOW, it only becomes
  329. * an error we need to handle here in case of either it's a bus
  330. * error or a termination error with no completion. 0x01 is termination
  331. * error, so we can subtract err & completed to get the real error case.
  332. */
  333. err -= err & completed;
  334. mxs_chan = &mxs_dma->mxs_chans[chan];
  335. if (err) {
  336. dev_dbg(mxs_dma->dma_device.dev,
  337. "%s: error in channel %d\n", __func__,
  338. chan);
  339. mxs_chan->status = DMA_ERROR;
  340. mxs_dma_reset_chan(&mxs_chan->chan);
  341. } else if (mxs_chan->status != DMA_COMPLETE) {
  342. if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
  343. mxs_chan->status = DMA_IN_PROGRESS;
  344. if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE)
  345. writel(1, mxs_dma->base +
  346. HW_APBHX_CHn_SEMA(mxs_dma, chan));
  347. } else {
  348. mxs_chan->status = DMA_COMPLETE;
  349. }
  350. }
  351. if (mxs_chan->status == DMA_COMPLETE) {
  352. if (mxs_chan->reset)
  353. return IRQ_HANDLED;
  354. dma_cookie_complete(&mxs_chan->desc);
  355. }
  356. /* schedule tasklet on this channel */
  357. tasklet_schedule(&mxs_chan->tasklet);
  358. return IRQ_HANDLED;
  359. }
  360. static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
  361. {
  362. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  363. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  364. int ret;
  365. mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev,
  366. CCW_BLOCK_SIZE,
  367. &mxs_chan->ccw_phys, GFP_KERNEL);
  368. if (!mxs_chan->ccw) {
  369. ret = -ENOMEM;
  370. goto err_alloc;
  371. }
  372. if (mxs_chan->chan_irq != NO_IRQ) {
  373. ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
  374. 0, "mxs-dma", mxs_dma);
  375. if (ret)
  376. goto err_irq;
  377. }
  378. ret = clk_prepare_enable(mxs_dma->clk);
  379. if (ret)
  380. goto err_clk;
  381. mxs_dma_reset_chan(chan);
  382. dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
  383. mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
  384. /* the descriptor is ready */
  385. async_tx_ack(&mxs_chan->desc);
  386. return 0;
  387. err_clk:
  388. free_irq(mxs_chan->chan_irq, mxs_dma);
  389. err_irq:
  390. dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
  391. mxs_chan->ccw, mxs_chan->ccw_phys);
  392. err_alloc:
  393. return ret;
  394. }
  395. static void mxs_dma_free_chan_resources(struct dma_chan *chan)
  396. {
  397. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  398. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  399. mxs_dma_disable_chan(chan);
  400. free_irq(mxs_chan->chan_irq, mxs_dma);
  401. dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE,
  402. mxs_chan->ccw, mxs_chan->ccw_phys);
  403. clk_disable_unprepare(mxs_dma->clk);
  404. }
  405. /*
  406. * How to use the flags for ->device_prep_slave_sg() :
  407. * [1] If there is only one DMA command in the DMA chain, the code should be:
  408. * ......
  409. * ->device_prep_slave_sg(DMA_CTRL_ACK);
  410. * ......
  411. * [2] If there are two DMA commands in the DMA chain, the code should be
  412. * ......
  413. * ->device_prep_slave_sg(0);
  414. * ......
  415. * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  416. * ......
  417. * [3] If there are more than two DMA commands in the DMA chain, the code
  418. * should be:
  419. * ......
  420. * ->device_prep_slave_sg(0); // First
  421. * ......
  422. * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
  423. * ......
  424. * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
  425. * ......
  426. */
  427. static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
  428. struct dma_chan *chan, struct scatterlist *sgl,
  429. unsigned int sg_len, enum dma_transfer_direction direction,
  430. unsigned long flags, void *context)
  431. {
  432. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  433. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  434. struct mxs_dma_ccw *ccw;
  435. struct scatterlist *sg;
  436. u32 i, j;
  437. u32 *pio;
  438. bool append = flags & DMA_PREP_INTERRUPT;
  439. int idx = append ? mxs_chan->desc_count : 0;
  440. if (mxs_chan->status == DMA_IN_PROGRESS && !append)
  441. return NULL;
  442. if (sg_len + (append ? idx : 0) > NUM_CCW) {
  443. dev_err(mxs_dma->dma_device.dev,
  444. "maximum number of sg exceeded: %d > %d\n",
  445. sg_len, NUM_CCW);
  446. goto err_out;
  447. }
  448. mxs_chan->status = DMA_IN_PROGRESS;
  449. mxs_chan->flags = 0;
  450. /*
  451. * If the sg is prepared with append flag set, the sg
  452. * will be appended to the last prepared sg.
  453. */
  454. if (append) {
  455. BUG_ON(idx < 1);
  456. ccw = &mxs_chan->ccw[idx - 1];
  457. ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
  458. ccw->bits |= CCW_CHAIN;
  459. ccw->bits &= ~CCW_IRQ;
  460. ccw->bits &= ~CCW_DEC_SEM;
  461. } else {
  462. idx = 0;
  463. }
  464. if (direction == DMA_TRANS_NONE) {
  465. ccw = &mxs_chan->ccw[idx++];
  466. pio = (u32 *) sgl;
  467. for (j = 0; j < sg_len;)
  468. ccw->pio_words[j++] = *pio++;
  469. ccw->bits = 0;
  470. ccw->bits |= CCW_IRQ;
  471. ccw->bits |= CCW_DEC_SEM;
  472. if (flags & DMA_CTRL_ACK)
  473. ccw->bits |= CCW_WAIT4END;
  474. ccw->bits |= CCW_HALT_ON_TERM;
  475. ccw->bits |= CCW_TERM_FLUSH;
  476. ccw->bits |= BF_CCW(sg_len, PIO_NUM);
  477. ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
  478. } else {
  479. for_each_sg(sgl, sg, sg_len, i) {
  480. if (sg_dma_len(sg) > MAX_XFER_BYTES) {
  481. dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
  482. sg_dma_len(sg), MAX_XFER_BYTES);
  483. goto err_out;
  484. }
  485. ccw = &mxs_chan->ccw[idx++];
  486. ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
  487. ccw->bufaddr = sg->dma_address;
  488. ccw->xfer_bytes = sg_dma_len(sg);
  489. ccw->bits = 0;
  490. ccw->bits |= CCW_CHAIN;
  491. ccw->bits |= CCW_HALT_ON_TERM;
  492. ccw->bits |= CCW_TERM_FLUSH;
  493. ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
  494. MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
  495. COMMAND);
  496. if (i + 1 == sg_len) {
  497. ccw->bits &= ~CCW_CHAIN;
  498. ccw->bits |= CCW_IRQ;
  499. ccw->bits |= CCW_DEC_SEM;
  500. if (flags & DMA_CTRL_ACK)
  501. ccw->bits |= CCW_WAIT4END;
  502. }
  503. }
  504. }
  505. mxs_chan->desc_count = idx;
  506. return &mxs_chan->desc;
  507. err_out:
  508. mxs_chan->status = DMA_ERROR;
  509. return NULL;
  510. }
  511. static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
  512. struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
  513. size_t period_len, enum dma_transfer_direction direction,
  514. unsigned long flags)
  515. {
  516. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  517. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  518. u32 num_periods = buf_len / period_len;
  519. u32 i = 0, buf = 0;
  520. if (mxs_chan->status == DMA_IN_PROGRESS)
  521. return NULL;
  522. mxs_chan->status = DMA_IN_PROGRESS;
  523. mxs_chan->flags |= MXS_DMA_SG_LOOP;
  524. mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE;
  525. if (num_periods > NUM_CCW) {
  526. dev_err(mxs_dma->dma_device.dev,
  527. "maximum number of sg exceeded: %d > %d\n",
  528. num_periods, NUM_CCW);
  529. goto err_out;
  530. }
  531. if (period_len > MAX_XFER_BYTES) {
  532. dev_err(mxs_dma->dma_device.dev,
  533. "maximum period size exceeded: %d > %d\n",
  534. period_len, MAX_XFER_BYTES);
  535. goto err_out;
  536. }
  537. while (buf < buf_len) {
  538. struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i];
  539. if (i + 1 == num_periods)
  540. ccw->next = mxs_chan->ccw_phys;
  541. else
  542. ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1);
  543. ccw->bufaddr = dma_addr;
  544. ccw->xfer_bytes = period_len;
  545. ccw->bits = 0;
  546. ccw->bits |= CCW_CHAIN;
  547. ccw->bits |= CCW_IRQ;
  548. ccw->bits |= CCW_HALT_ON_TERM;
  549. ccw->bits |= CCW_TERM_FLUSH;
  550. ccw->bits |= CCW_DEC_SEM;
  551. ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
  552. MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
  553. dma_addr += period_len;
  554. buf += period_len;
  555. i++;
  556. }
  557. mxs_chan->desc_count = i;
  558. return &mxs_chan->desc;
  559. err_out:
  560. mxs_chan->status = DMA_ERROR;
  561. return NULL;
  562. }
  563. static int mxs_dma_terminate_all(struct dma_chan *chan)
  564. {
  565. mxs_dma_reset_chan(chan);
  566. mxs_dma_disable_chan(chan);
  567. return 0;
  568. }
  569. static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
  570. dma_cookie_t cookie, struct dma_tx_state *txstate)
  571. {
  572. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  573. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  574. u32 residue = 0;
  575. if (mxs_chan->status == DMA_IN_PROGRESS &&
  576. mxs_chan->flags & MXS_DMA_SG_LOOP) {
  577. struct mxs_dma_ccw *last_ccw;
  578. u32 bar;
  579. last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1];
  580. residue = last_ccw->xfer_bytes + last_ccw->bufaddr;
  581. bar = readl(mxs_dma->base +
  582. HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id));
  583. residue -= bar;
  584. }
  585. dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
  586. residue);
  587. return mxs_chan->status;
  588. }
  589. static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
  590. {
  591. int ret;
  592. ret = clk_prepare_enable(mxs_dma->clk);
  593. if (ret)
  594. return ret;
  595. ret = stmp_reset_block(mxs_dma->base);
  596. if (ret)
  597. goto err_out;
  598. /* enable apbh burst */
  599. if (dma_is_apbh(mxs_dma)) {
  600. writel(BM_APBH_CTRL0_APB_BURST_EN,
  601. mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
  602. writel(BM_APBH_CTRL0_APB_BURST8_EN,
  603. mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
  604. }
  605. /* enable irq for all the channels */
  606. writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
  607. mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
  608. err_out:
  609. clk_disable_unprepare(mxs_dma->clk);
  610. return ret;
  611. }
  612. struct mxs_dma_filter_param {
  613. struct device_node *of_node;
  614. unsigned int chan_id;
  615. };
  616. static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param)
  617. {
  618. struct mxs_dma_filter_param *param = fn_param;
  619. struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
  620. struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
  621. int chan_irq;
  622. if (mxs_dma->dma_device.dev->of_node != param->of_node)
  623. return false;
  624. if (chan->chan_id != param->chan_id)
  625. return false;
  626. chan_irq = platform_get_irq(mxs_dma->pdev, param->chan_id);
  627. if (chan_irq < 0)
  628. return false;
  629. mxs_chan->chan_irq = chan_irq;
  630. return true;
  631. }
  632. static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
  633. struct of_dma *ofdma)
  634. {
  635. struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data;
  636. dma_cap_mask_t mask = mxs_dma->dma_device.cap_mask;
  637. struct mxs_dma_filter_param param;
  638. if (dma_spec->args_count != 1)
  639. return NULL;
  640. param.of_node = ofdma->of_node;
  641. param.chan_id = dma_spec->args[0];
  642. if (param.chan_id >= mxs_dma->nr_channels)
  643. return NULL;
  644. return dma_request_channel(mask, mxs_dma_filter_fn, &param);
  645. }
  646. static int __init mxs_dma_probe(struct platform_device *pdev)
  647. {
  648. struct device_node *np = pdev->dev.of_node;
  649. const struct platform_device_id *id_entry;
  650. const struct of_device_id *of_id;
  651. const struct mxs_dma_type *dma_type;
  652. struct mxs_dma_engine *mxs_dma;
  653. struct resource *iores;
  654. int ret, i;
  655. mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL);
  656. if (!mxs_dma)
  657. return -ENOMEM;
  658. ret = of_property_read_u32(np, "dma-channels", &mxs_dma->nr_channels);
  659. if (ret) {
  660. dev_err(&pdev->dev, "failed to read dma-channels\n");
  661. return ret;
  662. }
  663. of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
  664. if (of_id)
  665. id_entry = of_id->data;
  666. else
  667. id_entry = platform_get_device_id(pdev);
  668. dma_type = (struct mxs_dma_type *)id_entry->driver_data;
  669. mxs_dma->type = dma_type->type;
  670. mxs_dma->dev_id = dma_type->id;
  671. iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  672. mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores);
  673. if (IS_ERR(mxs_dma->base))
  674. return PTR_ERR(mxs_dma->base);
  675. mxs_dma->clk = devm_clk_get(&pdev->dev, NULL);
  676. if (IS_ERR(mxs_dma->clk))
  677. return PTR_ERR(mxs_dma->clk);
  678. dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
  679. dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
  680. INIT_LIST_HEAD(&mxs_dma->dma_device.channels);
  681. /* Initialize channel parameters */
  682. for (i = 0; i < MXS_DMA_CHANNELS; i++) {
  683. struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
  684. mxs_chan->mxs_dma = mxs_dma;
  685. mxs_chan->chan.device = &mxs_dma->dma_device;
  686. dma_cookie_init(&mxs_chan->chan);
  687. tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
  688. (unsigned long) mxs_chan);
  689. /* Add the channel to mxs_chan list */
  690. list_add_tail(&mxs_chan->chan.device_node,
  691. &mxs_dma->dma_device.channels);
  692. }
  693. ret = mxs_dma_init(mxs_dma);
  694. if (ret)
  695. return ret;
  696. mxs_dma->pdev = pdev;
  697. mxs_dma->dma_device.dev = &pdev->dev;
  698. /* mxs_dma gets 65535 bytes maximum sg size */
  699. mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms;
  700. dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
  701. mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
  702. mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
  703. mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
  704. mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
  705. mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
  706. mxs_dma->dma_device.device_pause = mxs_dma_pause_chan;
  707. mxs_dma->dma_device.device_resume = mxs_dma_resume_chan;
  708. mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all;
  709. mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  710. mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
  711. mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  712. mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  713. mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
  714. ret = dma_async_device_register(&mxs_dma->dma_device);
  715. if (ret) {
  716. dev_err(mxs_dma->dma_device.dev, "unable to register\n");
  717. return ret;
  718. }
  719. ret = of_dma_controller_register(np, mxs_dma_xlate, mxs_dma);
  720. if (ret) {
  721. dev_err(mxs_dma->dma_device.dev,
  722. "failed to register controller\n");
  723. dma_async_device_unregister(&mxs_dma->dma_device);
  724. }
  725. dev_info(mxs_dma->dma_device.dev, "initialized\n");
  726. return 0;
  727. }
  728. static struct platform_driver mxs_dma_driver = {
  729. .driver = {
  730. .name = "mxs-dma",
  731. .of_match_table = mxs_dma_dt_ids,
  732. },
  733. .id_table = mxs_dma_ids,
  734. };
  735. static int __init mxs_dma_module_init(void)
  736. {
  737. return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
  738. }
  739. subsys_initcall(mxs_dma_module_init);