dma-jz4740.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. /*
  2. * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
  3. * JZ4740 DMAC support
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the
  7. * Free Software Foundation; either version 2 of the License, or (at your
  8. * option) any later version.
  9. *
  10. */
  11. #include <linux/dmaengine.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/err.h>
  14. #include <linux/init.h>
  15. #include <linux/list.h>
  16. #include <linux/module.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/irq.h>
  21. #include <linux/clk.h>
  22. #include <asm/mach-jz4740/dma.h>
  23. #include "virt-dma.h"
  24. #define JZ_DMA_NR_CHANS 6
  25. #define JZ_REG_DMA_SRC_ADDR(x) (0x00 + (x) * 0x20)
  26. #define JZ_REG_DMA_DST_ADDR(x) (0x04 + (x) * 0x20)
  27. #define JZ_REG_DMA_TRANSFER_COUNT(x) (0x08 + (x) * 0x20)
  28. #define JZ_REG_DMA_REQ_TYPE(x) (0x0C + (x) * 0x20)
  29. #define JZ_REG_DMA_STATUS_CTRL(x) (0x10 + (x) * 0x20)
  30. #define JZ_REG_DMA_CMD(x) (0x14 + (x) * 0x20)
  31. #define JZ_REG_DMA_DESC_ADDR(x) (0x18 + (x) * 0x20)
  32. #define JZ_REG_DMA_CTRL 0x300
  33. #define JZ_REG_DMA_IRQ 0x304
  34. #define JZ_REG_DMA_DOORBELL 0x308
  35. #define JZ_REG_DMA_DOORBELL_SET 0x30C
  36. #define JZ_DMA_STATUS_CTRL_NO_DESC BIT(31)
  37. #define JZ_DMA_STATUS_CTRL_DESC_INV BIT(6)
  38. #define JZ_DMA_STATUS_CTRL_ADDR_ERR BIT(4)
  39. #define JZ_DMA_STATUS_CTRL_TRANSFER_DONE BIT(3)
  40. #define JZ_DMA_STATUS_CTRL_HALT BIT(2)
  41. #define JZ_DMA_STATUS_CTRL_COUNT_TERMINATE BIT(1)
  42. #define JZ_DMA_STATUS_CTRL_ENABLE BIT(0)
  43. #define JZ_DMA_CMD_SRC_INC BIT(23)
  44. #define JZ_DMA_CMD_DST_INC BIT(22)
  45. #define JZ_DMA_CMD_RDIL_MASK (0xf << 16)
  46. #define JZ_DMA_CMD_SRC_WIDTH_MASK (0x3 << 14)
  47. #define JZ_DMA_CMD_DST_WIDTH_MASK (0x3 << 12)
  48. #define JZ_DMA_CMD_INTERVAL_LENGTH_MASK (0x7 << 8)
  49. #define JZ_DMA_CMD_BLOCK_MODE BIT(7)
  50. #define JZ_DMA_CMD_DESC_VALID BIT(4)
  51. #define JZ_DMA_CMD_DESC_VALID_MODE BIT(3)
  52. #define JZ_DMA_CMD_VALID_IRQ_ENABLE BIT(2)
  53. #define JZ_DMA_CMD_TRANSFER_IRQ_ENABLE BIT(1)
  54. #define JZ_DMA_CMD_LINK_ENABLE BIT(0)
  55. #define JZ_DMA_CMD_FLAGS_OFFSET 22
  56. #define JZ_DMA_CMD_RDIL_OFFSET 16
  57. #define JZ_DMA_CMD_SRC_WIDTH_OFFSET 14
  58. #define JZ_DMA_CMD_DST_WIDTH_OFFSET 12
  59. #define JZ_DMA_CMD_TRANSFER_SIZE_OFFSET 8
  60. #define JZ_DMA_CMD_MODE_OFFSET 7
  61. #define JZ_DMA_CTRL_PRIORITY_MASK (0x3 << 8)
  62. #define JZ_DMA_CTRL_HALT BIT(3)
  63. #define JZ_DMA_CTRL_ADDRESS_ERROR BIT(2)
  64. #define JZ_DMA_CTRL_ENABLE BIT(0)
  65. enum jz4740_dma_width {
  66. JZ4740_DMA_WIDTH_32BIT = 0,
  67. JZ4740_DMA_WIDTH_8BIT = 1,
  68. JZ4740_DMA_WIDTH_16BIT = 2,
  69. };
  70. enum jz4740_dma_transfer_size {
  71. JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0,
  72. JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1,
  73. JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2,
  74. JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3,
  75. JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4,
  76. };
  77. enum jz4740_dma_flags {
  78. JZ4740_DMA_SRC_AUTOINC = 0x2,
  79. JZ4740_DMA_DST_AUTOINC = 0x1,
  80. };
  81. enum jz4740_dma_mode {
  82. JZ4740_DMA_MODE_SINGLE = 0,
  83. JZ4740_DMA_MODE_BLOCK = 1,
  84. };
  85. struct jz4740_dma_sg {
  86. dma_addr_t addr;
  87. unsigned int len;
  88. };
  89. struct jz4740_dma_desc {
  90. struct virt_dma_desc vdesc;
  91. enum dma_transfer_direction direction;
  92. bool cyclic;
  93. unsigned int num_sgs;
  94. struct jz4740_dma_sg sg[];
  95. };
  96. struct jz4740_dmaengine_chan {
  97. struct virt_dma_chan vchan;
  98. unsigned int id;
  99. dma_addr_t fifo_addr;
  100. unsigned int transfer_shift;
  101. struct jz4740_dma_desc *desc;
  102. unsigned int next_sg;
  103. };
  104. struct jz4740_dma_dev {
  105. struct dma_device ddev;
  106. void __iomem *base;
  107. struct clk *clk;
  108. struct jz4740_dmaengine_chan chan[JZ_DMA_NR_CHANS];
  109. };
  110. static struct jz4740_dma_dev *jz4740_dma_chan_get_dev(
  111. struct jz4740_dmaengine_chan *chan)
  112. {
  113. return container_of(chan->vchan.chan.device, struct jz4740_dma_dev,
  114. ddev);
  115. }
  116. static struct jz4740_dmaengine_chan *to_jz4740_dma_chan(struct dma_chan *c)
  117. {
  118. return container_of(c, struct jz4740_dmaengine_chan, vchan.chan);
  119. }
  120. static struct jz4740_dma_desc *to_jz4740_dma_desc(struct virt_dma_desc *vdesc)
  121. {
  122. return container_of(vdesc, struct jz4740_dma_desc, vdesc);
  123. }
  124. static inline uint32_t jz4740_dma_read(struct jz4740_dma_dev *dmadev,
  125. unsigned int reg)
  126. {
  127. return readl(dmadev->base + reg);
  128. }
  129. static inline void jz4740_dma_write(struct jz4740_dma_dev *dmadev,
  130. unsigned reg, uint32_t val)
  131. {
  132. writel(val, dmadev->base + reg);
  133. }
  134. static inline void jz4740_dma_write_mask(struct jz4740_dma_dev *dmadev,
  135. unsigned int reg, uint32_t val, uint32_t mask)
  136. {
  137. uint32_t tmp;
  138. tmp = jz4740_dma_read(dmadev, reg);
  139. tmp &= ~mask;
  140. tmp |= val;
  141. jz4740_dma_write(dmadev, reg, tmp);
  142. }
  143. static struct jz4740_dma_desc *jz4740_dma_alloc_desc(unsigned int num_sgs)
  144. {
  145. return kzalloc(sizeof(struct jz4740_dma_desc) +
  146. sizeof(struct jz4740_dma_sg) * num_sgs, GFP_ATOMIC);
  147. }
  148. static enum jz4740_dma_width jz4740_dma_width(enum dma_slave_buswidth width)
  149. {
  150. switch (width) {
  151. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  152. return JZ4740_DMA_WIDTH_8BIT;
  153. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  154. return JZ4740_DMA_WIDTH_16BIT;
  155. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  156. return JZ4740_DMA_WIDTH_32BIT;
  157. default:
  158. return JZ4740_DMA_WIDTH_32BIT;
  159. }
  160. }
  161. static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
  162. {
  163. if (maxburst <= 1)
  164. return JZ4740_DMA_TRANSFER_SIZE_1BYTE;
  165. else if (maxburst <= 3)
  166. return JZ4740_DMA_TRANSFER_SIZE_2BYTE;
  167. else if (maxburst <= 15)
  168. return JZ4740_DMA_TRANSFER_SIZE_4BYTE;
  169. else if (maxburst <= 31)
  170. return JZ4740_DMA_TRANSFER_SIZE_16BYTE;
  171. return JZ4740_DMA_TRANSFER_SIZE_32BYTE;
  172. }
  173. static int jz4740_dma_slave_config(struct dma_chan *c,
  174. struct dma_slave_config *config)
  175. {
  176. struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
  177. struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
  178. enum jz4740_dma_width src_width;
  179. enum jz4740_dma_width dst_width;
  180. enum jz4740_dma_transfer_size transfer_size;
  181. enum jz4740_dma_flags flags;
  182. uint32_t cmd;
  183. switch (config->direction) {
  184. case DMA_MEM_TO_DEV:
  185. flags = JZ4740_DMA_SRC_AUTOINC;
  186. transfer_size = jz4740_dma_maxburst(config->dst_maxburst);
  187. chan->fifo_addr = config->dst_addr;
  188. break;
  189. case DMA_DEV_TO_MEM:
  190. flags = JZ4740_DMA_DST_AUTOINC;
  191. transfer_size = jz4740_dma_maxburst(config->src_maxburst);
  192. chan->fifo_addr = config->src_addr;
  193. break;
  194. default:
  195. return -EINVAL;
  196. }
  197. src_width = jz4740_dma_width(config->src_addr_width);
  198. dst_width = jz4740_dma_width(config->dst_addr_width);
  199. switch (transfer_size) {
  200. case JZ4740_DMA_TRANSFER_SIZE_2BYTE:
  201. chan->transfer_shift = 1;
  202. break;
  203. case JZ4740_DMA_TRANSFER_SIZE_4BYTE:
  204. chan->transfer_shift = 2;
  205. break;
  206. case JZ4740_DMA_TRANSFER_SIZE_16BYTE:
  207. chan->transfer_shift = 4;
  208. break;
  209. case JZ4740_DMA_TRANSFER_SIZE_32BYTE:
  210. chan->transfer_shift = 5;
  211. break;
  212. default:
  213. chan->transfer_shift = 0;
  214. break;
  215. }
  216. cmd = flags << JZ_DMA_CMD_FLAGS_OFFSET;
  217. cmd |= src_width << JZ_DMA_CMD_SRC_WIDTH_OFFSET;
  218. cmd |= dst_width << JZ_DMA_CMD_DST_WIDTH_OFFSET;
  219. cmd |= transfer_size << JZ_DMA_CMD_TRANSFER_SIZE_OFFSET;
  220. cmd |= JZ4740_DMA_MODE_SINGLE << JZ_DMA_CMD_MODE_OFFSET;
  221. cmd |= JZ_DMA_CMD_TRANSFER_IRQ_ENABLE;
  222. jz4740_dma_write(dmadev, JZ_REG_DMA_CMD(chan->id), cmd);
  223. jz4740_dma_write(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0);
  224. jz4740_dma_write(dmadev, JZ_REG_DMA_REQ_TYPE(chan->id),
  225. config->slave_id);
  226. return 0;
  227. }
  228. static int jz4740_dma_terminate_all(struct dma_chan *c)
  229. {
  230. struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
  231. struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
  232. unsigned long flags;
  233. LIST_HEAD(head);
  234. spin_lock_irqsave(&chan->vchan.lock, flags);
  235. jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0,
  236. JZ_DMA_STATUS_CTRL_ENABLE);
  237. chan->desc = NULL;
  238. vchan_get_all_descriptors(&chan->vchan, &head);
  239. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  240. vchan_dma_desc_free_list(&chan->vchan, &head);
  241. return 0;
  242. }
  243. static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan)
  244. {
  245. struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
  246. dma_addr_t src_addr, dst_addr;
  247. struct virt_dma_desc *vdesc;
  248. struct jz4740_dma_sg *sg;
  249. jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id), 0,
  250. JZ_DMA_STATUS_CTRL_ENABLE);
  251. if (!chan->desc) {
  252. vdesc = vchan_next_desc(&chan->vchan);
  253. if (!vdesc)
  254. return 0;
  255. chan->desc = to_jz4740_dma_desc(vdesc);
  256. chan->next_sg = 0;
  257. }
  258. if (chan->next_sg == chan->desc->num_sgs)
  259. chan->next_sg = 0;
  260. sg = &chan->desc->sg[chan->next_sg];
  261. if (chan->desc->direction == DMA_MEM_TO_DEV) {
  262. src_addr = sg->addr;
  263. dst_addr = chan->fifo_addr;
  264. } else {
  265. src_addr = chan->fifo_addr;
  266. dst_addr = sg->addr;
  267. }
  268. jz4740_dma_write(dmadev, JZ_REG_DMA_SRC_ADDR(chan->id), src_addr);
  269. jz4740_dma_write(dmadev, JZ_REG_DMA_DST_ADDR(chan->id), dst_addr);
  270. jz4740_dma_write(dmadev, JZ_REG_DMA_TRANSFER_COUNT(chan->id),
  271. sg->len >> chan->transfer_shift);
  272. chan->next_sg++;
  273. jz4740_dma_write_mask(dmadev, JZ_REG_DMA_STATUS_CTRL(chan->id),
  274. JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE,
  275. JZ_DMA_STATUS_CTRL_HALT | JZ_DMA_STATUS_CTRL_NO_DESC |
  276. JZ_DMA_STATUS_CTRL_ENABLE);
  277. jz4740_dma_write_mask(dmadev, JZ_REG_DMA_CTRL,
  278. JZ_DMA_CTRL_ENABLE,
  279. JZ_DMA_CTRL_HALT | JZ_DMA_CTRL_ENABLE);
  280. return 0;
  281. }
  282. static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan)
  283. {
  284. spin_lock(&chan->vchan.lock);
  285. if (chan->desc) {
  286. if (chan->desc->cyclic) {
  287. vchan_cyclic_callback(&chan->desc->vdesc);
  288. } else {
  289. if (chan->next_sg == chan->desc->num_sgs) {
  290. list_del(&chan->desc->vdesc.node);
  291. vchan_cookie_complete(&chan->desc->vdesc);
  292. chan->desc = NULL;
  293. }
  294. }
  295. }
  296. jz4740_dma_start_transfer(chan);
  297. spin_unlock(&chan->vchan.lock);
  298. }
  299. static irqreturn_t jz4740_dma_irq(int irq, void *devid)
  300. {
  301. struct jz4740_dma_dev *dmadev = devid;
  302. uint32_t irq_status;
  303. unsigned int i;
  304. irq_status = readl(dmadev->base + JZ_REG_DMA_IRQ);
  305. for (i = 0; i < 6; ++i) {
  306. if (irq_status & (1 << i)) {
  307. jz4740_dma_write_mask(dmadev,
  308. JZ_REG_DMA_STATUS_CTRL(i), 0,
  309. JZ_DMA_STATUS_CTRL_ENABLE |
  310. JZ_DMA_STATUS_CTRL_TRANSFER_DONE);
  311. jz4740_dma_chan_irq(&dmadev->chan[i]);
  312. }
  313. }
  314. return IRQ_HANDLED;
  315. }
  316. static void jz4740_dma_issue_pending(struct dma_chan *c)
  317. {
  318. struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
  319. unsigned long flags;
  320. spin_lock_irqsave(&chan->vchan.lock, flags);
  321. if (vchan_issue_pending(&chan->vchan) && !chan->desc)
  322. jz4740_dma_start_transfer(chan);
  323. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  324. }
  325. static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
  326. struct dma_chan *c, struct scatterlist *sgl,
  327. unsigned int sg_len, enum dma_transfer_direction direction,
  328. unsigned long flags, void *context)
  329. {
  330. struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
  331. struct jz4740_dma_desc *desc;
  332. struct scatterlist *sg;
  333. unsigned int i;
  334. desc = jz4740_dma_alloc_desc(sg_len);
  335. if (!desc)
  336. return NULL;
  337. for_each_sg(sgl, sg, sg_len, i) {
  338. desc->sg[i].addr = sg_dma_address(sg);
  339. desc->sg[i].len = sg_dma_len(sg);
  340. }
  341. desc->num_sgs = sg_len;
  342. desc->direction = direction;
  343. desc->cyclic = false;
  344. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  345. }
  346. static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
  347. struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
  348. size_t period_len, enum dma_transfer_direction direction,
  349. unsigned long flags)
  350. {
  351. struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
  352. struct jz4740_dma_desc *desc;
  353. unsigned int num_periods, i;
  354. if (buf_len % period_len)
  355. return NULL;
  356. num_periods = buf_len / period_len;
  357. desc = jz4740_dma_alloc_desc(num_periods);
  358. if (!desc)
  359. return NULL;
  360. for (i = 0; i < num_periods; i++) {
  361. desc->sg[i].addr = buf_addr;
  362. desc->sg[i].len = period_len;
  363. buf_addr += period_len;
  364. }
  365. desc->num_sgs = num_periods;
  366. desc->direction = direction;
  367. desc->cyclic = true;
  368. return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  369. }
  370. static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan,
  371. struct jz4740_dma_desc *desc, unsigned int next_sg)
  372. {
  373. struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
  374. unsigned int residue, count;
  375. unsigned int i;
  376. residue = 0;
  377. for (i = next_sg; i < desc->num_sgs; i++)
  378. residue += desc->sg[i].len;
  379. if (next_sg != 0) {
  380. count = jz4740_dma_read(dmadev,
  381. JZ_REG_DMA_TRANSFER_COUNT(chan->id));
  382. residue += count << chan->transfer_shift;
  383. }
  384. return residue;
  385. }
  386. static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
  387. dma_cookie_t cookie, struct dma_tx_state *state)
  388. {
  389. struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
  390. struct virt_dma_desc *vdesc;
  391. enum dma_status status;
  392. unsigned long flags;
  393. status = dma_cookie_status(c, cookie, state);
  394. if (status == DMA_COMPLETE || !state)
  395. return status;
  396. spin_lock_irqsave(&chan->vchan.lock, flags);
  397. vdesc = vchan_find_desc(&chan->vchan, cookie);
  398. if (cookie == chan->desc->vdesc.tx.cookie) {
  399. state->residue = jz4740_dma_desc_residue(chan, chan->desc,
  400. chan->next_sg);
  401. } else if (vdesc) {
  402. state->residue = jz4740_dma_desc_residue(chan,
  403. to_jz4740_dma_desc(vdesc), 0);
  404. } else {
  405. state->residue = 0;
  406. }
  407. spin_unlock_irqrestore(&chan->vchan.lock, flags);
  408. return status;
  409. }
  410. static void jz4740_dma_free_chan_resources(struct dma_chan *c)
  411. {
  412. vchan_free_chan_resources(to_virt_chan(c));
  413. }
  414. static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
  415. {
  416. kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
  417. }
  418. #define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  419. BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  420. static int jz4740_dma_probe(struct platform_device *pdev)
  421. {
  422. struct jz4740_dmaengine_chan *chan;
  423. struct jz4740_dma_dev *dmadev;
  424. struct dma_device *dd;
  425. unsigned int i;
  426. struct resource *res;
  427. int ret;
  428. int irq;
  429. dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
  430. if (!dmadev)
  431. return -EINVAL;
  432. dd = &dmadev->ddev;
  433. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  434. dmadev->base = devm_ioremap_resource(&pdev->dev, res);
  435. if (IS_ERR(dmadev->base))
  436. return PTR_ERR(dmadev->base);
  437. dmadev->clk = clk_get(&pdev->dev, "dma");
  438. if (IS_ERR(dmadev->clk))
  439. return PTR_ERR(dmadev->clk);
  440. clk_prepare_enable(dmadev->clk);
  441. dma_cap_set(DMA_SLAVE, dd->cap_mask);
  442. dma_cap_set(DMA_CYCLIC, dd->cap_mask);
  443. dd->device_free_chan_resources = jz4740_dma_free_chan_resources;
  444. dd->device_tx_status = jz4740_dma_tx_status;
  445. dd->device_issue_pending = jz4740_dma_issue_pending;
  446. dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg;
  447. dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
  448. dd->device_config = jz4740_dma_slave_config;
  449. dd->device_terminate_all = jz4740_dma_terminate_all;
  450. dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS;
  451. dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS;
  452. dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  453. dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  454. dd->dev = &pdev->dev;
  455. INIT_LIST_HEAD(&dd->channels);
  456. for (i = 0; i < JZ_DMA_NR_CHANS; i++) {
  457. chan = &dmadev->chan[i];
  458. chan->id = i;
  459. chan->vchan.desc_free = jz4740_dma_desc_free;
  460. vchan_init(&chan->vchan, dd);
  461. }
  462. ret = dma_async_device_register(dd);
  463. if (ret)
  464. goto err_clk;
  465. irq = platform_get_irq(pdev, 0);
  466. ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
  467. if (ret)
  468. goto err_unregister;
  469. platform_set_drvdata(pdev, dmadev);
  470. return 0;
  471. err_unregister:
  472. dma_async_device_unregister(dd);
  473. err_clk:
  474. clk_disable_unprepare(dmadev->clk);
  475. return ret;
  476. }
  477. static int jz4740_dma_remove(struct platform_device *pdev)
  478. {
  479. struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev);
  480. int irq = platform_get_irq(pdev, 0);
  481. free_irq(irq, dmadev);
  482. dma_async_device_unregister(&dmadev->ddev);
  483. clk_disable_unprepare(dmadev->clk);
  484. return 0;
  485. }
  486. static struct platform_driver jz4740_dma_driver = {
  487. .probe = jz4740_dma_probe,
  488. .remove = jz4740_dma_remove,
  489. .driver = {
  490. .name = "jz4740-dma",
  491. },
  492. };
  493. module_platform_driver(jz4740_dma_driver);
  494. MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
  495. MODULE_DESCRIPTION("JZ4740 DMA driver");
  496. MODULE_LICENSE("GPL v2");