altera_sgdma.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. /* Altera TSE SGDMA and MSGDMA Linux driver
  2. * Copyright (C) 2014 Altera Corporation. All rights reserved
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/list.h>
  17. #include "altera_utils.h"
  18. #include "altera_tse.h"
  19. #include "altera_sgdmahw.h"
  20. #include "altera_sgdma.h"
  21. static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
  22. struct sgdma_descrip __iomem *ndesc,
  23. dma_addr_t ndesc_phys,
  24. dma_addr_t raddr,
  25. dma_addr_t waddr,
  26. u16 length,
  27. int generate_eop,
  28. int rfixed,
  29. int wfixed);
  30. static int sgdma_async_write(struct altera_tse_private *priv,
  31. struct sgdma_descrip __iomem *desc);
  32. static int sgdma_async_read(struct altera_tse_private *priv);
  33. static dma_addr_t
  34. sgdma_txphysaddr(struct altera_tse_private *priv,
  35. struct sgdma_descrip __iomem *desc);
  36. static dma_addr_t
  37. sgdma_rxphysaddr(struct altera_tse_private *priv,
  38. struct sgdma_descrip __iomem *desc);
  39. static int sgdma_txbusy(struct altera_tse_private *priv);
  40. static int sgdma_rxbusy(struct altera_tse_private *priv);
  41. static void
  42. queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
  43. static void
  44. queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
  45. static struct tse_buffer *
  46. dequeue_tx(struct altera_tse_private *priv);
  47. static struct tse_buffer *
  48. dequeue_rx(struct altera_tse_private *priv);
  49. static struct tse_buffer *
  50. queue_rx_peekhead(struct altera_tse_private *priv);
  51. int sgdma_initialize(struct altera_tse_private *priv)
  52. {
  53. priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
  54. SGDMA_CTRLREG_INTEN;
  55. priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
  56. SGDMA_CTRLREG_INTEN |
  57. SGDMA_CTRLREG_ILASTD;
  58. INIT_LIST_HEAD(&priv->txlisthd);
  59. INIT_LIST_HEAD(&priv->rxlisthd);
  60. priv->rxdescphys = (dma_addr_t) 0;
  61. priv->txdescphys = (dma_addr_t) 0;
  62. priv->rxdescphys = dma_map_single(priv->device,
  63. (void __force *)priv->rx_dma_desc,
  64. priv->rxdescmem, DMA_BIDIRECTIONAL);
  65. if (dma_mapping_error(priv->device, priv->rxdescphys)) {
  66. sgdma_uninitialize(priv);
  67. netdev_err(priv->dev, "error mapping rx descriptor memory\n");
  68. return -EINVAL;
  69. }
  70. priv->txdescphys = dma_map_single(priv->device,
  71. (void __force *)priv->tx_dma_desc,
  72. priv->txdescmem, DMA_TO_DEVICE);
  73. if (dma_mapping_error(priv->device, priv->txdescphys)) {
  74. sgdma_uninitialize(priv);
  75. netdev_err(priv->dev, "error mapping tx descriptor memory\n");
  76. return -EINVAL;
  77. }
  78. /* Initialize descriptor memory to all 0's, sync memory to cache */
  79. memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
  80. memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
  81. dma_sync_single_for_device(priv->device, priv->txdescphys,
  82. priv->txdescmem, DMA_TO_DEVICE);
  83. dma_sync_single_for_device(priv->device, priv->rxdescphys,
  84. priv->rxdescmem, DMA_TO_DEVICE);
  85. return 0;
  86. }
  87. void sgdma_uninitialize(struct altera_tse_private *priv)
  88. {
  89. if (priv->rxdescphys)
  90. dma_unmap_single(priv->device, priv->rxdescphys,
  91. priv->rxdescmem, DMA_BIDIRECTIONAL);
  92. if (priv->txdescphys)
  93. dma_unmap_single(priv->device, priv->txdescphys,
  94. priv->txdescmem, DMA_TO_DEVICE);
  95. }
  96. /* This function resets the SGDMA controller and clears the
  97. * descriptor memory used for transmits and receives.
  98. */
  99. void sgdma_reset(struct altera_tse_private *priv)
  100. {
  101. /* Initialize descriptor memory to 0 */
  102. memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
  103. memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
  104. csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
  105. csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
  106. csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
  107. csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
  108. }
  109. /* For SGDMA, interrupts remain enabled after initially enabling,
  110. * so no need to provide implementations for abstract enable
  111. * and disable
  112. */
  113. void sgdma_enable_rxirq(struct altera_tse_private *priv)
  114. {
  115. }
  116. void sgdma_enable_txirq(struct altera_tse_private *priv)
  117. {
  118. }
  119. void sgdma_disable_rxirq(struct altera_tse_private *priv)
  120. {
  121. }
  122. void sgdma_disable_txirq(struct altera_tse_private *priv)
  123. {
  124. }
  125. void sgdma_clear_rxirq(struct altera_tse_private *priv)
  126. {
  127. tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
  128. SGDMA_CTRLREG_CLRINT);
  129. }
  130. void sgdma_clear_txirq(struct altera_tse_private *priv)
  131. {
  132. tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
  133. SGDMA_CTRLREG_CLRINT);
  134. }
  135. /* transmits buffer through SGDMA. Returns number of buffers
  136. * transmitted, 0 if not possible.
  137. *
  138. * tx_lock is held by the caller
  139. */
  140. int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
  141. {
  142. struct sgdma_descrip __iomem *descbase =
  143. (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
  144. struct sgdma_descrip __iomem *cdesc = &descbase[0];
  145. struct sgdma_descrip __iomem *ndesc = &descbase[1];
  146. /* wait 'til the tx sgdma is ready for the next transmit request */
  147. if (sgdma_txbusy(priv))
  148. return 0;
  149. sgdma_setup_descrip(cdesc, /* current descriptor */
  150. ndesc, /* next descriptor */
  151. sgdma_txphysaddr(priv, ndesc),
  152. buffer->dma_addr, /* address of packet to xmit */
  153. 0, /* write addr 0 for tx dma */
  154. buffer->len, /* length of packet */
  155. SGDMA_CONTROL_EOP, /* Generate EOP */
  156. 0, /* read fixed */
  157. SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
  158. sgdma_async_write(priv, cdesc);
  159. /* enqueue the request to the pending transmit queue */
  160. queue_tx(priv, buffer);
  161. return 1;
  162. }
  163. /* tx_lock held to protect access to queued tx list
  164. */
  165. u32 sgdma_tx_completions(struct altera_tse_private *priv)
  166. {
  167. u32 ready = 0;
  168. if (!sgdma_txbusy(priv) &&
  169. ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
  170. & SGDMA_CONTROL_HW_OWNED) == 0) &&
  171. (dequeue_tx(priv))) {
  172. ready = 1;
  173. }
  174. return ready;
  175. }
  176. void sgdma_start_rxdma(struct altera_tse_private *priv)
  177. {
  178. sgdma_async_read(priv);
  179. }
  180. void sgdma_add_rx_desc(struct altera_tse_private *priv,
  181. struct tse_buffer *rxbuffer)
  182. {
  183. queue_rx(priv, rxbuffer);
  184. }
  185. /* status is returned on upper 16 bits,
  186. * length is returned in lower 16 bits
  187. */
  188. u32 sgdma_rx_status(struct altera_tse_private *priv)
  189. {
  190. struct sgdma_descrip __iomem *base =
  191. (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
  192. struct sgdma_descrip __iomem *desc = NULL;
  193. struct tse_buffer *rxbuffer = NULL;
  194. unsigned int rxstatus = 0;
  195. u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
  196. desc = &base[0];
  197. if (sts & SGDMA_STSREG_EOP) {
  198. unsigned int pktlength = 0;
  199. unsigned int pktstatus = 0;
  200. dma_sync_single_for_cpu(priv->device,
  201. priv->rxdescphys,
  202. SGDMA_DESC_LEN,
  203. DMA_FROM_DEVICE);
  204. pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
  205. pktstatus = csrrd8(desc, sgdma_descroffs(status));
  206. rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
  207. rxstatus = rxstatus << 16;
  208. rxstatus |= (pktlength & 0xffff);
  209. if (rxstatus) {
  210. csrwr8(0, desc, sgdma_descroffs(status));
  211. rxbuffer = dequeue_rx(priv);
  212. if (rxbuffer == NULL)
  213. netdev_info(priv->dev,
  214. "sgdma rx and rx queue empty!\n");
  215. /* Clear control */
  216. csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
  217. /* clear status */
  218. csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
  219. /* kick the rx sgdma after reaping this descriptor */
  220. sgdma_async_read(priv);
  221. } else {
  222. /* If the SGDMA indicated an end of packet on recv,
  223. * then it's expected that the rxstatus from the
  224. * descriptor is non-zero - meaning a valid packet
  225. * with a nonzero length, or an error has been
  226. * indicated. if not, then all we can do is signal
  227. * an error and return no packet received. Most likely
  228. * there is a system design error, or an error in the
  229. * underlying kernel (cache or cache management problem)
  230. */
  231. netdev_err(priv->dev,
  232. "SGDMA RX Error Info: %x, %x, %x\n",
  233. sts, csrrd8(desc, sgdma_descroffs(status)),
  234. rxstatus);
  235. }
  236. } else if (sts == 0) {
  237. sgdma_async_read(priv);
  238. }
  239. return rxstatus;
  240. }
  241. /* Private functions */
  242. static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
  243. struct sgdma_descrip __iomem *ndesc,
  244. dma_addr_t ndesc_phys,
  245. dma_addr_t raddr,
  246. dma_addr_t waddr,
  247. u16 length,
  248. int generate_eop,
  249. int rfixed,
  250. int wfixed)
  251. {
  252. /* Clear the next descriptor as not owned by hardware */
  253. u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
  254. ctrl &= ~SGDMA_CONTROL_HW_OWNED;
  255. csrwr8(ctrl, ndesc, sgdma_descroffs(control));
  256. ctrl = SGDMA_CONTROL_HW_OWNED;
  257. ctrl |= generate_eop;
  258. ctrl |= rfixed;
  259. ctrl |= wfixed;
  260. /* Channel is implicitly zero, initialized to 0 by default */
  261. csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
  262. csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
  263. csrwr32(0, desc, sgdma_descroffs(pad1));
  264. csrwr32(0, desc, sgdma_descroffs(pad2));
  265. csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
  266. csrwr8(ctrl, desc, sgdma_descroffs(control));
  267. csrwr8(0, desc, sgdma_descroffs(status));
  268. csrwr8(0, desc, sgdma_descroffs(wburst));
  269. csrwr8(0, desc, sgdma_descroffs(rburst));
  270. csrwr16(length, desc, sgdma_descroffs(bytes));
  271. csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
  272. }
  273. /* If hardware is busy, don't restart async read.
  274. * if status register is 0 - meaning initial state, restart async read,
  275. * probably for the first time when populating a receive buffer.
  276. * If read status indicate not busy and a status, restart the async
  277. * DMA read.
  278. */
  279. static int sgdma_async_read(struct altera_tse_private *priv)
  280. {
  281. struct sgdma_descrip __iomem *descbase =
  282. (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
  283. struct sgdma_descrip __iomem *cdesc = &descbase[0];
  284. struct sgdma_descrip __iomem *ndesc = &descbase[1];
  285. struct tse_buffer *rxbuffer = NULL;
  286. if (!sgdma_rxbusy(priv)) {
  287. rxbuffer = queue_rx_peekhead(priv);
  288. if (rxbuffer == NULL) {
  289. netdev_err(priv->dev, "no rx buffers available\n");
  290. return 0;
  291. }
  292. sgdma_setup_descrip(cdesc, /* current descriptor */
  293. ndesc, /* next descriptor */
  294. sgdma_rxphysaddr(priv, ndesc),
  295. 0, /* read addr 0 for rx dma */
  296. rxbuffer->dma_addr, /* write addr for rx dma */
  297. 0, /* read 'til EOP */
  298. 0, /* EOP: NA for rx dma */
  299. 0, /* read fixed: NA for rx dma */
  300. 0); /* SOP: NA for rx DMA */
  301. dma_sync_single_for_device(priv->device,
  302. priv->rxdescphys,
  303. SGDMA_DESC_LEN,
  304. DMA_TO_DEVICE);
  305. csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
  306. priv->rx_dma_csr,
  307. sgdma_csroffs(next_descrip));
  308. csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
  309. priv->rx_dma_csr,
  310. sgdma_csroffs(control));
  311. return 1;
  312. }
  313. return 0;
  314. }
  315. static int sgdma_async_write(struct altera_tse_private *priv,
  316. struct sgdma_descrip __iomem *desc)
  317. {
  318. if (sgdma_txbusy(priv))
  319. return 0;
  320. /* clear control and status */
  321. csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
  322. csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
  323. dma_sync_single_for_device(priv->device, priv->txdescphys,
  324. SGDMA_DESC_LEN, DMA_TO_DEVICE);
  325. csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
  326. priv->tx_dma_csr,
  327. sgdma_csroffs(next_descrip));
  328. csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
  329. priv->tx_dma_csr,
  330. sgdma_csroffs(control));
  331. return 1;
  332. }
  333. static dma_addr_t
  334. sgdma_txphysaddr(struct altera_tse_private *priv,
  335. struct sgdma_descrip __iomem *desc)
  336. {
  337. dma_addr_t paddr = priv->txdescmem_busaddr;
  338. uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
  339. return (dma_addr_t)((uintptr_t)paddr + offs);
  340. }
  341. static dma_addr_t
  342. sgdma_rxphysaddr(struct altera_tse_private *priv,
  343. struct sgdma_descrip __iomem *desc)
  344. {
  345. dma_addr_t paddr = priv->rxdescmem_busaddr;
  346. uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
  347. return (dma_addr_t)((uintptr_t)paddr + offs);
  348. }
  349. #define list_remove_head(list, entry, type, member) \
  350. do { \
  351. entry = NULL; \
  352. if (!list_empty(list)) { \
  353. entry = list_entry((list)->next, type, member); \
  354. list_del_init(&entry->member); \
  355. } \
  356. } while (0)
  357. #define list_peek_head(list, entry, type, member) \
  358. do { \
  359. entry = NULL; \
  360. if (!list_empty(list)) { \
  361. entry = list_entry((list)->next, type, member); \
  362. } \
  363. } while (0)
  364. /* adds a tse_buffer to the tail of a tx buffer list.
  365. * assumes the caller is managing and holding a mutual exclusion
  366. * primitive to avoid simultaneous pushes/pops to the list.
  367. */
  368. static void
  369. queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
  370. {
  371. list_add_tail(&buffer->lh, &priv->txlisthd);
  372. }
  373. /* adds a tse_buffer to the tail of a rx buffer list
  374. * assumes the caller is managing and holding a mutual exclusion
  375. * primitive to avoid simultaneous pushes/pops to the list.
  376. */
  377. static void
  378. queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
  379. {
  380. list_add_tail(&buffer->lh, &priv->rxlisthd);
  381. }
  382. /* dequeues a tse_buffer from the transmit buffer list, otherwise
  383. * returns NULL if empty.
  384. * assumes the caller is managing and holding a mutual exclusion
  385. * primitive to avoid simultaneous pushes/pops to the list.
  386. */
  387. static struct tse_buffer *
  388. dequeue_tx(struct altera_tse_private *priv)
  389. {
  390. struct tse_buffer *buffer = NULL;
  391. list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
  392. return buffer;
  393. }
  394. /* dequeues a tse_buffer from the receive buffer list, otherwise
  395. * returns NULL if empty
  396. * assumes the caller is managing and holding a mutual exclusion
  397. * primitive to avoid simultaneous pushes/pops to the list.
  398. */
  399. static struct tse_buffer *
  400. dequeue_rx(struct altera_tse_private *priv)
  401. {
  402. struct tse_buffer *buffer = NULL;
  403. list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
  404. return buffer;
  405. }
  406. /* dequeues a tse_buffer from the receive buffer list, otherwise
  407. * returns NULL if empty
  408. * assumes the caller is managing and holding a mutual exclusion
  409. * primitive to avoid simultaneous pushes/pops to the list while the
  410. * head is being examined.
  411. */
  412. static struct tse_buffer *
  413. queue_rx_peekhead(struct altera_tse_private *priv)
  414. {
  415. struct tse_buffer *buffer = NULL;
  416. list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
  417. return buffer;
  418. }
  419. /* check and return rx sgdma status without polling
  420. */
  421. static int sgdma_rxbusy(struct altera_tse_private *priv)
  422. {
  423. return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
  424. & SGDMA_STSREG_BUSY;
  425. }
  426. /* waits for the tx sgdma to finish it's current operation, returns 0
  427. * when it transitions to nonbusy, returns 1 if the operation times out
  428. */
  429. static int sgdma_txbusy(struct altera_tse_private *priv)
  430. {
  431. int delay = 0;
  432. /* if DMA is busy, wait for current transactino to finish */
  433. while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
  434. & SGDMA_STSREG_BUSY) && (delay++ < 100))
  435. udelay(1);
  436. if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
  437. & SGDMA_STSREG_BUSY) {
  438. netdev_err(priv->dev, "timeout waiting for tx dma\n");
  439. return 1;
  440. }
  441. return 0;
  442. }