img-mdc-dma.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005
  1. /*
  2. * IMG Multi-threaded DMA Controller (MDC)
  3. *
  4. * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd.
  5. * Copyright (C) 2014 Google, Inc.
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms and conditions of the GNU General Public License,
  9. * version 2, as published by the Free Software Foundation.
  10. */
  11. #include <linux/clk.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/dmaengine.h>
  14. #include <linux/dmapool.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/io.h>
  17. #include <linux/irq.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mfd/syscon.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/of_dma.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/regmap.h>
  26. #include <linux/slab.h>
  27. #include <linux/spinlock.h>
  28. #include "dmaengine.h"
  29. #include "virt-dma.h"
  30. #define MDC_MAX_DMA_CHANNELS 32
  31. #define MDC_GENERAL_CONFIG 0x000
  32. #define MDC_GENERAL_CONFIG_LIST_IEN BIT(31)
  33. #define MDC_GENERAL_CONFIG_IEN BIT(29)
  34. #define MDC_GENERAL_CONFIG_LEVEL_INT BIT(28)
  35. #define MDC_GENERAL_CONFIG_INC_W BIT(12)
  36. #define MDC_GENERAL_CONFIG_INC_R BIT(8)
  37. #define MDC_GENERAL_CONFIG_PHYSICAL_W BIT(7)
  38. #define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT 4
  39. #define MDC_GENERAL_CONFIG_WIDTH_W_MASK 0x7
  40. #define MDC_GENERAL_CONFIG_PHYSICAL_R BIT(3)
  41. #define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT 0
  42. #define MDC_GENERAL_CONFIG_WIDTH_R_MASK 0x7
  43. #define MDC_READ_PORT_CONFIG 0x004
  44. #define MDC_READ_PORT_CONFIG_STHREAD_SHIFT 28
  45. #define MDC_READ_PORT_CONFIG_STHREAD_MASK 0xf
  46. #define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT 24
  47. #define MDC_READ_PORT_CONFIG_RTHREAD_MASK 0xf
  48. #define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT 16
  49. #define MDC_READ_PORT_CONFIG_WTHREAD_MASK 0xf
  50. #define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT 4
  51. #define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK 0xff
  52. #define MDC_READ_PORT_CONFIG_DREQ_ENABLE BIT(1)
  53. #define MDC_READ_ADDRESS 0x008
  54. #define MDC_WRITE_ADDRESS 0x00c
  55. #define MDC_TRANSFER_SIZE 0x010
  56. #define MDC_TRANSFER_SIZE_MASK 0xffffff
  57. #define MDC_LIST_NODE_ADDRESS 0x014
  58. #define MDC_CMDS_PROCESSED 0x018
  59. #define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT 16
  60. #define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK 0x3f
  61. #define MDC_CMDS_PROCESSED_INT_ACTIVE BIT(8)
  62. #define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT 0
  63. #define MDC_CMDS_PROCESSED_CMDS_DONE_MASK 0x3f
  64. #define MDC_CONTROL_AND_STATUS 0x01c
  65. #define MDC_CONTROL_AND_STATUS_CANCEL BIT(20)
  66. #define MDC_CONTROL_AND_STATUS_LIST_EN BIT(4)
  67. #define MDC_CONTROL_AND_STATUS_EN BIT(0)
  68. #define MDC_ACTIVE_TRANSFER_SIZE 0x030
  69. #define MDC_GLOBAL_CONFIG_A 0x900
  70. #define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT 16
  71. #define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK 0xff
  72. #define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT 8
  73. #define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK 0xff
  74. #define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT 0
  75. #define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK 0xff
  76. struct mdc_hw_list_desc {
  77. u32 gen_conf;
  78. u32 readport_conf;
  79. u32 read_addr;
  80. u32 write_addr;
  81. u32 xfer_size;
  82. u32 node_addr;
  83. u32 cmds_done;
  84. u32 ctrl_status;
  85. /*
  86. * Not part of the list descriptor, but instead used by the CPU to
  87. * traverse the list.
  88. */
  89. struct mdc_hw_list_desc *next_desc;
  90. };
  91. struct mdc_tx_desc {
  92. struct mdc_chan *chan;
  93. struct virt_dma_desc vd;
  94. dma_addr_t list_phys;
  95. struct mdc_hw_list_desc *list;
  96. bool cyclic;
  97. bool cmd_loaded;
  98. unsigned int list_len;
  99. unsigned int list_period_len;
  100. size_t list_xfer_size;
  101. unsigned int list_cmds_done;
  102. };
  103. struct mdc_chan {
  104. struct mdc_dma *mdma;
  105. struct virt_dma_chan vc;
  106. struct dma_slave_config config;
  107. struct mdc_tx_desc *desc;
  108. int irq;
  109. unsigned int periph;
  110. unsigned int thread;
  111. unsigned int chan_nr;
  112. };
  113. struct mdc_dma_soc_data {
  114. void (*enable_chan)(struct mdc_chan *mchan);
  115. void (*disable_chan)(struct mdc_chan *mchan);
  116. };
  117. struct mdc_dma {
  118. struct dma_device dma_dev;
  119. void __iomem *regs;
  120. struct clk *clk;
  121. struct dma_pool *desc_pool;
  122. struct regmap *periph_regs;
  123. spinlock_t lock;
  124. unsigned int nr_threads;
  125. unsigned int nr_channels;
  126. unsigned int bus_width;
  127. unsigned int max_burst_mult;
  128. unsigned int max_xfer_size;
  129. const struct mdc_dma_soc_data *soc;
  130. struct mdc_chan channels[MDC_MAX_DMA_CHANNELS];
  131. };
  132. static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg)
  133. {
  134. return readl(mdma->regs + reg);
  135. }
  136. static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg)
  137. {
  138. writel(val, mdma->regs + reg);
  139. }
  140. static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg)
  141. {
  142. return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg);
  143. }
  144. static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg)
  145. {
  146. mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg);
  147. }
  148. static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c)
  149. {
  150. return container_of(to_virt_chan(c), struct mdc_chan, vc);
  151. }
  152. static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t)
  153. {
  154. struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx);
  155. return container_of(vdesc, struct mdc_tx_desc, vd);
  156. }
  157. static inline struct device *mdma2dev(struct mdc_dma *mdma)
  158. {
  159. return mdma->dma_dev.dev;
  160. }
  161. static inline unsigned int to_mdc_width(unsigned int bytes)
  162. {
  163. return ffs(bytes) - 1;
  164. }
  165. static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc,
  166. unsigned int bytes)
  167. {
  168. ldesc->gen_conf |= to_mdc_width(bytes) <<
  169. MDC_GENERAL_CONFIG_WIDTH_R_SHIFT;
  170. }
  171. static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc,
  172. unsigned int bytes)
  173. {
  174. ldesc->gen_conf |= to_mdc_width(bytes) <<
  175. MDC_GENERAL_CONFIG_WIDTH_W_SHIFT;
  176. }
  177. static void mdc_list_desc_config(struct mdc_chan *mchan,
  178. struct mdc_hw_list_desc *ldesc,
  179. enum dma_transfer_direction dir,
  180. dma_addr_t src, dma_addr_t dst, size_t len)
  181. {
  182. struct mdc_dma *mdma = mchan->mdma;
  183. unsigned int max_burst, burst_size;
  184. ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN |
  185. MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
  186. MDC_GENERAL_CONFIG_PHYSICAL_R;
  187. ldesc->readport_conf =
  188. (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
  189. (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
  190. (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
  191. ldesc->read_addr = src;
  192. ldesc->write_addr = dst;
  193. ldesc->xfer_size = len - 1;
  194. ldesc->node_addr = 0;
  195. ldesc->cmds_done = 0;
  196. ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN |
  197. MDC_CONTROL_AND_STATUS_EN;
  198. ldesc->next_desc = NULL;
  199. if (IS_ALIGNED(dst, mdma->bus_width) &&
  200. IS_ALIGNED(src, mdma->bus_width))
  201. max_burst = mdma->bus_width * mdma->max_burst_mult;
  202. else
  203. max_burst = mdma->bus_width * (mdma->max_burst_mult - 1);
  204. if (dir == DMA_MEM_TO_DEV) {
  205. ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R;
  206. ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
  207. mdc_set_read_width(ldesc, mdma->bus_width);
  208. mdc_set_write_width(ldesc, mchan->config.dst_addr_width);
  209. burst_size = min(max_burst, mchan->config.dst_maxburst *
  210. mchan->config.dst_addr_width);
  211. } else if (dir == DMA_DEV_TO_MEM) {
  212. ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W;
  213. ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
  214. mdc_set_read_width(ldesc, mchan->config.src_addr_width);
  215. mdc_set_write_width(ldesc, mdma->bus_width);
  216. burst_size = min(max_burst, mchan->config.src_maxburst *
  217. mchan->config.src_addr_width);
  218. } else {
  219. ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R |
  220. MDC_GENERAL_CONFIG_INC_W;
  221. mdc_set_read_width(ldesc, mdma->bus_width);
  222. mdc_set_write_width(ldesc, mdma->bus_width);
  223. burst_size = max_burst;
  224. }
  225. ldesc->readport_conf |= (burst_size - 1) <<
  226. MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT;
  227. }
  228. static void mdc_list_desc_free(struct mdc_tx_desc *mdesc)
  229. {
  230. struct mdc_dma *mdma = mdesc->chan->mdma;
  231. struct mdc_hw_list_desc *curr, *next;
  232. dma_addr_t curr_phys, next_phys;
  233. curr = mdesc->list;
  234. curr_phys = mdesc->list_phys;
  235. while (curr) {
  236. next = curr->next_desc;
  237. next_phys = curr->node_addr;
  238. dma_pool_free(mdma->desc_pool, curr, curr_phys);
  239. curr = next;
  240. curr_phys = next_phys;
  241. }
  242. }
  243. static void mdc_desc_free(struct virt_dma_desc *vd)
  244. {
  245. struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx);
  246. mdc_list_desc_free(mdesc);
  247. kfree(mdesc);
  248. }
  249. static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
  250. struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len,
  251. unsigned long flags)
  252. {
  253. struct mdc_chan *mchan = to_mdc_chan(chan);
  254. struct mdc_dma *mdma = mchan->mdma;
  255. struct mdc_tx_desc *mdesc;
  256. struct mdc_hw_list_desc *curr, *prev = NULL;
  257. dma_addr_t curr_phys, prev_phys;
  258. if (!len)
  259. return NULL;
  260. mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
  261. if (!mdesc)
  262. return NULL;
  263. mdesc->chan = mchan;
  264. mdesc->list_xfer_size = len;
  265. while (len > 0) {
  266. size_t xfer_size;
  267. curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys);
  268. if (!curr)
  269. goto free_desc;
  270. if (prev) {
  271. prev->node_addr = curr_phys;
  272. prev->next_desc = curr;
  273. } else {
  274. mdesc->list_phys = curr_phys;
  275. mdesc->list = curr;
  276. }
  277. xfer_size = min_t(size_t, mdma->max_xfer_size, len);
  278. mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest,
  279. xfer_size);
  280. prev = curr;
  281. prev_phys = curr_phys;
  282. mdesc->list_len++;
  283. src += xfer_size;
  284. dest += xfer_size;
  285. len -= xfer_size;
  286. }
  287. return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
  288. free_desc:
  289. mdc_desc_free(&mdesc->vd);
  290. return NULL;
  291. }
  292. static int mdc_check_slave_width(struct mdc_chan *mchan,
  293. enum dma_transfer_direction dir)
  294. {
  295. enum dma_slave_buswidth width;
  296. if (dir == DMA_MEM_TO_DEV)
  297. width = mchan->config.dst_addr_width;
  298. else
  299. width = mchan->config.src_addr_width;
  300. switch (width) {
  301. case DMA_SLAVE_BUSWIDTH_1_BYTE:
  302. case DMA_SLAVE_BUSWIDTH_2_BYTES:
  303. case DMA_SLAVE_BUSWIDTH_4_BYTES:
  304. case DMA_SLAVE_BUSWIDTH_8_BYTES:
  305. break;
  306. default:
  307. return -EINVAL;
  308. }
  309. if (width > mchan->mdma->bus_width)
  310. return -EINVAL;
  311. return 0;
  312. }
  313. static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
  314. struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
  315. size_t period_len, enum dma_transfer_direction dir,
  316. unsigned long flags)
  317. {
  318. struct mdc_chan *mchan = to_mdc_chan(chan);
  319. struct mdc_dma *mdma = mchan->mdma;
  320. struct mdc_tx_desc *mdesc;
  321. struct mdc_hw_list_desc *curr, *prev = NULL;
  322. dma_addr_t curr_phys, prev_phys;
  323. if (!buf_len && !period_len)
  324. return NULL;
  325. if (!is_slave_direction(dir))
  326. return NULL;
  327. if (mdc_check_slave_width(mchan, dir) < 0)
  328. return NULL;
  329. mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
  330. if (!mdesc)
  331. return NULL;
  332. mdesc->chan = mchan;
  333. mdesc->cyclic = true;
  334. mdesc->list_xfer_size = buf_len;
  335. mdesc->list_period_len = DIV_ROUND_UP(period_len,
  336. mdma->max_xfer_size);
  337. while (buf_len > 0) {
  338. size_t remainder = min(period_len, buf_len);
  339. while (remainder > 0) {
  340. size_t xfer_size;
  341. curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
  342. &curr_phys);
  343. if (!curr)
  344. goto free_desc;
  345. if (!prev) {
  346. mdesc->list_phys = curr_phys;
  347. mdesc->list = curr;
  348. } else {
  349. prev->node_addr = curr_phys;
  350. prev->next_desc = curr;
  351. }
  352. xfer_size = min_t(size_t, mdma->max_xfer_size,
  353. remainder);
  354. if (dir == DMA_MEM_TO_DEV) {
  355. mdc_list_desc_config(mchan, curr, dir,
  356. buf_addr,
  357. mchan->config.dst_addr,
  358. xfer_size);
  359. } else {
  360. mdc_list_desc_config(mchan, curr, dir,
  361. mchan->config.src_addr,
  362. buf_addr,
  363. xfer_size);
  364. }
  365. prev = curr;
  366. prev_phys = curr_phys;
  367. mdesc->list_len++;
  368. buf_addr += xfer_size;
  369. buf_len -= xfer_size;
  370. remainder -= xfer_size;
  371. }
  372. }
  373. prev->node_addr = mdesc->list_phys;
  374. return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
  375. free_desc:
  376. mdc_desc_free(&mdesc->vd);
  377. return NULL;
  378. }
  379. static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
  380. struct dma_chan *chan, struct scatterlist *sgl,
  381. unsigned int sg_len, enum dma_transfer_direction dir,
  382. unsigned long flags, void *context)
  383. {
  384. struct mdc_chan *mchan = to_mdc_chan(chan);
  385. struct mdc_dma *mdma = mchan->mdma;
  386. struct mdc_tx_desc *mdesc;
  387. struct scatterlist *sg;
  388. struct mdc_hw_list_desc *curr, *prev = NULL;
  389. dma_addr_t curr_phys, prev_phys;
  390. unsigned int i;
  391. if (!sgl)
  392. return NULL;
  393. if (!is_slave_direction(dir))
  394. return NULL;
  395. if (mdc_check_slave_width(mchan, dir) < 0)
  396. return NULL;
  397. mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
  398. if (!mdesc)
  399. return NULL;
  400. mdesc->chan = mchan;
  401. for_each_sg(sgl, sg, sg_len, i) {
  402. dma_addr_t buf = sg_dma_address(sg);
  403. size_t buf_len = sg_dma_len(sg);
  404. while (buf_len > 0) {
  405. size_t xfer_size;
  406. curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
  407. &curr_phys);
  408. if (!curr)
  409. goto free_desc;
  410. if (!prev) {
  411. mdesc->list_phys = curr_phys;
  412. mdesc->list = curr;
  413. } else {
  414. prev->node_addr = curr_phys;
  415. prev->next_desc = curr;
  416. }
  417. xfer_size = min_t(size_t, mdma->max_xfer_size,
  418. buf_len);
  419. if (dir == DMA_MEM_TO_DEV) {
  420. mdc_list_desc_config(mchan, curr, dir, buf,
  421. mchan->config.dst_addr,
  422. xfer_size);
  423. } else {
  424. mdc_list_desc_config(mchan, curr, dir,
  425. mchan->config.src_addr,
  426. buf, xfer_size);
  427. }
  428. prev = curr;
  429. prev_phys = curr_phys;
  430. mdesc->list_len++;
  431. mdesc->list_xfer_size += xfer_size;
  432. buf += xfer_size;
  433. buf_len -= xfer_size;
  434. }
  435. }
  436. return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
  437. free_desc:
  438. mdc_desc_free(&mdesc->vd);
  439. return NULL;
  440. }
  441. static void mdc_issue_desc(struct mdc_chan *mchan)
  442. {
  443. struct mdc_dma *mdma = mchan->mdma;
  444. struct virt_dma_desc *vd;
  445. struct mdc_tx_desc *mdesc;
  446. u32 val;
  447. vd = vchan_next_desc(&mchan->vc);
  448. if (!vd)
  449. return;
  450. list_del(&vd->node);
  451. mdesc = to_mdc_desc(&vd->tx);
  452. mchan->desc = mdesc;
  453. dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n",
  454. mchan->chan_nr);
  455. mdma->soc->enable_chan(mchan);
  456. val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG);
  457. val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN |
  458. MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
  459. MDC_GENERAL_CONFIG_PHYSICAL_R;
  460. mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG);
  461. val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
  462. (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
  463. (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
  464. mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG);
  465. mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS);
  466. val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS);
  467. val |= MDC_CONTROL_AND_STATUS_LIST_EN;
  468. mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS);
  469. }
  470. static void mdc_issue_pending(struct dma_chan *chan)
  471. {
  472. struct mdc_chan *mchan = to_mdc_chan(chan);
  473. unsigned long flags;
  474. spin_lock_irqsave(&mchan->vc.lock, flags);
  475. if (vchan_issue_pending(&mchan->vc) && !mchan->desc)
  476. mdc_issue_desc(mchan);
  477. spin_unlock_irqrestore(&mchan->vc.lock, flags);
  478. }
  479. static enum dma_status mdc_tx_status(struct dma_chan *chan,
  480. dma_cookie_t cookie, struct dma_tx_state *txstate)
  481. {
  482. struct mdc_chan *mchan = to_mdc_chan(chan);
  483. struct mdc_tx_desc *mdesc;
  484. struct virt_dma_desc *vd;
  485. unsigned long flags;
  486. size_t bytes = 0;
  487. int ret;
  488. ret = dma_cookie_status(chan, cookie, txstate);
  489. if (ret == DMA_COMPLETE)
  490. return ret;
  491. if (!txstate)
  492. return ret;
  493. spin_lock_irqsave(&mchan->vc.lock, flags);
  494. vd = vchan_find_desc(&mchan->vc, cookie);
  495. if (vd) {
  496. mdesc = to_mdc_desc(&vd->tx);
  497. bytes = mdesc->list_xfer_size;
  498. } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) {
  499. struct mdc_hw_list_desc *ldesc;
  500. u32 val1, val2, done, processed, residue;
  501. int i, cmds;
  502. mdesc = mchan->desc;
  503. /*
  504. * Determine the number of commands that haven't been
  505. * processed (handled by the IRQ handler) yet.
  506. */
  507. do {
  508. val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
  509. ~MDC_CMDS_PROCESSED_INT_ACTIVE;
  510. residue = mdc_chan_readl(mchan,
  511. MDC_ACTIVE_TRANSFER_SIZE);
  512. val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
  513. ~MDC_CMDS_PROCESSED_INT_ACTIVE;
  514. } while (val1 != val2);
  515. done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
  516. MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
  517. processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
  518. MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
  519. cmds = (done - processed) %
  520. (MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1);
  521. /*
  522. * If the command loaded event hasn't been processed yet, then
  523. * the difference above includes an extra command.
  524. */
  525. if (!mdesc->cmd_loaded)
  526. cmds--;
  527. else
  528. cmds += mdesc->list_cmds_done;
  529. bytes = mdesc->list_xfer_size;
  530. ldesc = mdesc->list;
  531. for (i = 0; i < cmds; i++) {
  532. bytes -= ldesc->xfer_size + 1;
  533. ldesc = ldesc->next_desc;
  534. }
  535. if (ldesc) {
  536. if (residue != MDC_TRANSFER_SIZE_MASK)
  537. bytes -= ldesc->xfer_size - residue;
  538. else
  539. bytes -= ldesc->xfer_size + 1;
  540. }
  541. }
  542. spin_unlock_irqrestore(&mchan->vc.lock, flags);
  543. dma_set_residue(txstate, bytes);
  544. return ret;
  545. }
  546. static int mdc_terminate_all(struct dma_chan *chan)
  547. {
  548. struct mdc_chan *mchan = to_mdc_chan(chan);
  549. struct mdc_tx_desc *mdesc;
  550. unsigned long flags;
  551. LIST_HEAD(head);
  552. spin_lock_irqsave(&mchan->vc.lock, flags);
  553. mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
  554. MDC_CONTROL_AND_STATUS);
  555. mdesc = mchan->desc;
  556. mchan->desc = NULL;
  557. vchan_get_all_descriptors(&mchan->vc, &head);
  558. spin_unlock_irqrestore(&mchan->vc.lock, flags);
  559. if (mdesc)
  560. mdc_desc_free(&mdesc->vd);
  561. vchan_dma_desc_free_list(&mchan->vc, &head);
  562. return 0;
  563. }
  564. static int mdc_slave_config(struct dma_chan *chan,
  565. struct dma_slave_config *config)
  566. {
  567. struct mdc_chan *mchan = to_mdc_chan(chan);
  568. unsigned long flags;
  569. spin_lock_irqsave(&mchan->vc.lock, flags);
  570. mchan->config = *config;
  571. spin_unlock_irqrestore(&mchan->vc.lock, flags);
  572. return 0;
  573. }
  574. static void mdc_free_chan_resources(struct dma_chan *chan)
  575. {
  576. struct mdc_chan *mchan = to_mdc_chan(chan);
  577. struct mdc_dma *mdma = mchan->mdma;
  578. mdc_terminate_all(chan);
  579. mdma->soc->disable_chan(mchan);
  580. }
  581. static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
  582. {
  583. struct mdc_chan *mchan = (struct mdc_chan *)dev_id;
  584. struct mdc_tx_desc *mdesc;
  585. u32 val, processed, done1, done2;
  586. unsigned int i;
  587. spin_lock(&mchan->vc.lock);
  588. val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
  589. processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
  590. MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
  591. /*
  592. * CMDS_DONE may have incremented between reading CMDS_PROCESSED
  593. * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we
  594. * didn't miss a command completion.
  595. */
  596. do {
  597. val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
  598. done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
  599. MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
  600. val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
  601. MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
  602. MDC_CMDS_PROCESSED_INT_ACTIVE);
  603. val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
  604. mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
  605. val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
  606. done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
  607. MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
  608. } while (done1 != done2);
  609. dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
  610. mdesc = mchan->desc;
  611. if (!mdesc) {
  612. dev_warn(mdma2dev(mchan->mdma),
  613. "IRQ with no active descriptor on channel %d\n",
  614. mchan->chan_nr);
  615. goto out;
  616. }
  617. for (i = processed; i != done1;
  618. i = (i + 1) % (MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1)) {
  619. /*
  620. * The first interrupt in a transfer indicates that the
  621. * command list has been loaded, not that a command has
  622. * been completed.
  623. */
  624. if (!mdesc->cmd_loaded) {
  625. mdesc->cmd_loaded = true;
  626. continue;
  627. }
  628. mdesc->list_cmds_done++;
  629. if (mdesc->cyclic) {
  630. mdesc->list_cmds_done %= mdesc->list_len;
  631. if (mdesc->list_cmds_done % mdesc->list_period_len == 0)
  632. vchan_cyclic_callback(&mdesc->vd);
  633. } else if (mdesc->list_cmds_done == mdesc->list_len) {
  634. mchan->desc = NULL;
  635. vchan_cookie_complete(&mdesc->vd);
  636. mdc_issue_desc(mchan);
  637. break;
  638. }
  639. }
  640. out:
  641. spin_unlock(&mchan->vc.lock);
  642. return IRQ_HANDLED;
  643. }
  644. static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec,
  645. struct of_dma *ofdma)
  646. {
  647. struct mdc_dma *mdma = ofdma->of_dma_data;
  648. struct dma_chan *chan;
  649. if (dma_spec->args_count != 3)
  650. return NULL;
  651. list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) {
  652. struct mdc_chan *mchan = to_mdc_chan(chan);
  653. if (!(dma_spec->args[1] & BIT(mchan->chan_nr)))
  654. continue;
  655. if (dma_get_slave_channel(chan)) {
  656. mchan->periph = dma_spec->args[0];
  657. mchan->thread = dma_spec->args[2];
  658. return chan;
  659. }
  660. }
  661. return NULL;
  662. }
  663. #define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch) (0x120 + 0x4 * ((ch) / 4))
  664. #define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4))
  665. #define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK 0x3f
  666. static void pistachio_mdc_enable_chan(struct mdc_chan *mchan)
  667. {
  668. struct mdc_dma *mdma = mchan->mdma;
  669. regmap_update_bits(mdma->periph_regs,
  670. PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
  671. PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
  672. PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
  673. mchan->periph <<
  674. PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr));
  675. }
  676. static void pistachio_mdc_disable_chan(struct mdc_chan *mchan)
  677. {
  678. struct mdc_dma *mdma = mchan->mdma;
  679. regmap_update_bits(mdma->periph_regs,
  680. PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
  681. PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
  682. PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
  683. 0);
  684. }
  685. static const struct mdc_dma_soc_data pistachio_mdc_data = {
  686. .enable_chan = pistachio_mdc_enable_chan,
  687. .disable_chan = pistachio_mdc_disable_chan,
  688. };
  689. static const struct of_device_id mdc_dma_of_match[] = {
  690. { .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, },
  691. { },
  692. };
  693. MODULE_DEVICE_TABLE(of, mdc_dma_of_match);
  694. static int mdc_dma_probe(struct platform_device *pdev)
  695. {
  696. struct mdc_dma *mdma;
  697. struct resource *res;
  698. const struct of_device_id *match;
  699. unsigned int i;
  700. u32 val;
  701. int ret;
  702. mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL);
  703. if (!mdma)
  704. return -ENOMEM;
  705. platform_set_drvdata(pdev, mdma);
  706. match = of_match_device(mdc_dma_of_match, &pdev->dev);
  707. mdma->soc = match->data;
  708. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  709. mdma->regs = devm_ioremap_resource(&pdev->dev, res);
  710. if (IS_ERR(mdma->regs))
  711. return PTR_ERR(mdma->regs);
  712. mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
  713. "img,cr-periph");
  714. if (IS_ERR(mdma->periph_regs))
  715. return PTR_ERR(mdma->periph_regs);
  716. mdma->clk = devm_clk_get(&pdev->dev, "sys");
  717. if (IS_ERR(mdma->clk))
  718. return PTR_ERR(mdma->clk);
  719. ret = clk_prepare_enable(mdma->clk);
  720. if (ret)
  721. return ret;
  722. dma_cap_zero(mdma->dma_dev.cap_mask);
  723. dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask);
  724. dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask);
  725. dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask);
  726. dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask);
  727. val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A);
  728. mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) &
  729. MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK;
  730. mdma->nr_threads =
  731. 1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) &
  732. MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK);
  733. mdma->bus_width =
  734. (1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) &
  735. MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8;
  736. /*
  737. * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes
  738. * are supported, this makes it possible for the value reported in
  739. * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size
  740. * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or
  741. * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining. To eliminate this
  742. * ambiguity, restrict transfer sizes to one bus-width less than the
  743. * actual maximum.
  744. */
  745. mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width;
  746. of_property_read_u32(pdev->dev.of_node, "dma-channels",
  747. &mdma->nr_channels);
  748. ret = of_property_read_u32(pdev->dev.of_node,
  749. "img,max-burst-multiplier",
  750. &mdma->max_burst_mult);
  751. if (ret)
  752. goto disable_clk;
  753. mdma->dma_dev.dev = &pdev->dev;
  754. mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
  755. mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
  756. mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
  757. mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
  758. mdma->dma_dev.device_tx_status = mdc_tx_status;
  759. mdma->dma_dev.device_issue_pending = mdc_issue_pending;
  760. mdma->dma_dev.device_terminate_all = mdc_terminate_all;
  761. mdma->dma_dev.device_config = mdc_slave_config;
  762. mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
  763. mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
  764. for (i = 1; i <= mdma->bus_width; i <<= 1) {
  765. mdma->dma_dev.src_addr_widths |= BIT(i);
  766. mdma->dma_dev.dst_addr_widths |= BIT(i);
  767. }
  768. INIT_LIST_HEAD(&mdma->dma_dev.channels);
  769. for (i = 0; i < mdma->nr_channels; i++) {
  770. struct mdc_chan *mchan = &mdma->channels[i];
  771. mchan->mdma = mdma;
  772. mchan->chan_nr = i;
  773. mchan->irq = platform_get_irq(pdev, i);
  774. if (mchan->irq < 0) {
  775. ret = mchan->irq;
  776. goto disable_clk;
  777. }
  778. ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq,
  779. IRQ_TYPE_LEVEL_HIGH,
  780. dev_name(&pdev->dev), mchan);
  781. if (ret < 0)
  782. goto disable_clk;
  783. mchan->vc.desc_free = mdc_desc_free;
  784. vchan_init(&mchan->vc, &mdma->dma_dev);
  785. }
  786. mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
  787. sizeof(struct mdc_hw_list_desc),
  788. 4, 0);
  789. if (!mdma->desc_pool) {
  790. ret = -ENOMEM;
  791. goto disable_clk;
  792. }
  793. ret = dma_async_device_register(&mdma->dma_dev);
  794. if (ret)
  795. goto disable_clk;
  796. ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma);
  797. if (ret)
  798. goto unregister;
  799. dev_info(&pdev->dev, "MDC with %u channels and %u threads\n",
  800. mdma->nr_channels, mdma->nr_threads);
  801. return 0;
  802. unregister:
  803. dma_async_device_unregister(&mdma->dma_dev);
  804. disable_clk:
  805. clk_disable_unprepare(mdma->clk);
  806. return ret;
  807. }
  808. static int mdc_dma_remove(struct platform_device *pdev)
  809. {
  810. struct mdc_dma *mdma = platform_get_drvdata(pdev);
  811. struct mdc_chan *mchan, *next;
  812. of_dma_controller_free(pdev->dev.of_node);
  813. dma_async_device_unregister(&mdma->dma_dev);
  814. list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels,
  815. vc.chan.device_node) {
  816. list_del(&mchan->vc.chan.device_node);
  817. synchronize_irq(mchan->irq);
  818. devm_free_irq(&pdev->dev, mchan->irq, mchan);
  819. tasklet_kill(&mchan->vc.task);
  820. }
  821. clk_disable_unprepare(mdma->clk);
  822. return 0;
  823. }
  824. static struct platform_driver mdc_dma_driver = {
  825. .driver = {
  826. .name = "img-mdc-dma",
  827. .of_match_table = of_match_ptr(mdc_dma_of_match),
  828. },
  829. .probe = mdc_dma_probe,
  830. .remove = mdc_dma_remove,
  831. };
  832. module_platform_driver(mdc_dma_driver);
  833. MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver");
  834. MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
  835. MODULE_LICENSE("GPL v2");