sxgbe_dma.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. /* 10G controller driver for Samsung SoCs
  2. *
  3. * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  4. * http://www.samsung.com
  5. *
  6. * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/delay.h>
  13. #include <linux/export.h>
  14. #include <linux/io.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/phy.h>
  17. #include "sxgbe_common.h"
  18. #include "sxgbe_dma.h"
  19. #include "sxgbe_reg.h"
  20. #include "sxgbe_desc.h"
  21. /* DMA core initialization */
  22. static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
  23. {
  24. u32 reg_val;
  25. reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
  26. /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
  27. * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
  28. * burst_map is bitmap for BLEN[4, 8, 16, 32, 64, 128 and 256].
  29. * Set burst_map irrespective of fix_burst value.
  30. */
  31. if (!fix_burst)
  32. reg_val |= SXGBE_DMA_AXI_UNDEF_BURST;
  33. /* write burst len map */
  34. reg_val |= (burst_map << SXGBE_DMA_BLENMAP_LSHIFT);
  35. writel(reg_val, ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
  36. return 0;
  37. }
  38. static void sxgbe_dma_channel_init(void __iomem *ioaddr, int cha_num,
  39. int fix_burst, int pbl, dma_addr_t dma_tx,
  40. dma_addr_t dma_rx, int t_rsize, int r_rsize)
  41. {
  42. u32 reg_val;
  43. dma_addr_t dma_addr;
  44. reg_val = readl(ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
  45. /* set the pbl */
  46. if (fix_burst) {
  47. reg_val |= SXGBE_DMA_PBL_X8MODE;
  48. writel(reg_val, ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
  49. /* program the TX pbl */
  50. reg_val = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
  51. reg_val |= (pbl << SXGBE_DMA_TXPBL_LSHIFT);
  52. writel(reg_val, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
  53. /* program the RX pbl */
  54. reg_val = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
  55. reg_val |= (pbl << SXGBE_DMA_RXPBL_LSHIFT);
  56. writel(reg_val, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
  57. }
  58. /* program desc registers */
  59. writel(upper_32_bits(dma_tx),
  60. ioaddr + SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num));
  61. writel(lower_32_bits(dma_tx),
  62. ioaddr + SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num));
  63. writel(upper_32_bits(dma_rx),
  64. ioaddr + SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num));
  65. writel(lower_32_bits(dma_rx),
  66. ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
  67. /* program tail pointers */
  68. /* assumption: upper 32 bits are constant and
  69. * same as TX/RX desc list
  70. */
  71. dma_addr = dma_tx + ((t_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
  72. writel(lower_32_bits(dma_addr),
  73. ioaddr + SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num));
  74. dma_addr = dma_rx + ((r_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
  75. writel(lower_32_bits(dma_addr),
  76. ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
  77. /* program the ring sizes */
  78. writel(t_rsize - 1, ioaddr + SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num));
  79. writel(r_rsize - 1, ioaddr + SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num));
  80. /* Enable TX/RX interrupts */
  81. writel(SXGBE_DMA_ENA_INT,
  82. ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num));
  83. }
  84. static void sxgbe_enable_dma_transmission(void __iomem *ioaddr, int cha_num)
  85. {
  86. u32 tx_config;
  87. tx_config = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
  88. tx_config |= SXGBE_TX_START_DMA;
  89. writel(tx_config, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
  90. }
  91. static void sxgbe_enable_dma_irq(void __iomem *ioaddr, int dma_cnum)
  92. {
  93. /* Enable TX/RX interrupts */
  94. writel(SXGBE_DMA_ENA_INT,
  95. ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
  96. }
  97. static void sxgbe_disable_dma_irq(void __iomem *ioaddr, int dma_cnum)
  98. {
  99. /* Disable TX/RX interrupts */
  100. writel(0, ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
  101. }
  102. static void sxgbe_dma_start_tx(void __iomem *ioaddr, int tchannels)
  103. {
  104. int cnum;
  105. u32 tx_ctl_reg;
  106. for (cnum = 0; cnum < tchannels; cnum++) {
  107. tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
  108. tx_ctl_reg |= SXGBE_TX_ENABLE;
  109. writel(tx_ctl_reg,
  110. ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
  111. }
  112. }
  113. static void sxgbe_dma_start_tx_queue(void __iomem *ioaddr, int dma_cnum)
  114. {
  115. u32 tx_ctl_reg;
  116. tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
  117. tx_ctl_reg |= SXGBE_TX_ENABLE;
  118. writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
  119. }
  120. static void sxgbe_dma_stop_tx_queue(void __iomem *ioaddr, int dma_cnum)
  121. {
  122. u32 tx_ctl_reg;
  123. tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
  124. tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
  125. writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
  126. }
  127. static void sxgbe_dma_stop_tx(void __iomem *ioaddr, int tchannels)
  128. {
  129. int cnum;
  130. u32 tx_ctl_reg;
  131. for (cnum = 0; cnum < tchannels; cnum++) {
  132. tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
  133. tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
  134. writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
  135. }
  136. }
  137. static void sxgbe_dma_start_rx(void __iomem *ioaddr, int rchannels)
  138. {
  139. int cnum;
  140. u32 rx_ctl_reg;
  141. for (cnum = 0; cnum < rchannels; cnum++) {
  142. rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
  143. rx_ctl_reg |= SXGBE_RX_ENABLE;
  144. writel(rx_ctl_reg,
  145. ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
  146. }
  147. }
  148. static void sxgbe_dma_stop_rx(void __iomem *ioaddr, int rchannels)
  149. {
  150. int cnum;
  151. u32 rx_ctl_reg;
  152. for (cnum = 0; cnum < rchannels; cnum++) {
  153. rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
  154. rx_ctl_reg &= ~(SXGBE_RX_ENABLE);
  155. writel(rx_ctl_reg, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
  156. }
  157. }
  158. static int sxgbe_tx_dma_int_status(void __iomem *ioaddr, int channel_no,
  159. struct sxgbe_extra_stats *x)
  160. {
  161. u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
  162. u32 clear_val = 0;
  163. u32 ret_val = 0;
  164. /* TX Normal Interrupt Summary */
  165. if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
  166. x->normal_irq_n++;
  167. if (int_status & SXGBE_DMA_INT_STATUS_TI) {
  168. ret_val |= handle_tx;
  169. x->tx_normal_irq_n++;
  170. clear_val |= SXGBE_DMA_INT_STATUS_TI;
  171. }
  172. if (int_status & SXGBE_DMA_INT_STATUS_TBU) {
  173. x->tx_underflow_irq++;
  174. ret_val |= tx_bump_tc;
  175. clear_val |= SXGBE_DMA_INT_STATUS_TBU;
  176. }
  177. } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
  178. /* TX Abnormal Interrupt Summary */
  179. if (int_status & SXGBE_DMA_INT_STATUS_TPS) {
  180. ret_val |= tx_hard_error;
  181. clear_val |= SXGBE_DMA_INT_STATUS_TPS;
  182. x->tx_process_stopped_irq++;
  183. }
  184. if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
  185. ret_val |= tx_hard_error;
  186. x->fatal_bus_error_irq++;
  187. /* Assumption: FBE bit is the combination of
  188. * all the bus access erros and cleared when
  189. * the respective error bits cleared
  190. */
  191. /* check for actual cause */
  192. if (int_status & SXGBE_DMA_INT_STATUS_TEB0) {
  193. x->tx_read_transfer_err++;
  194. clear_val |= SXGBE_DMA_INT_STATUS_TEB0;
  195. } else {
  196. x->tx_write_transfer_err++;
  197. }
  198. if (int_status & SXGBE_DMA_INT_STATUS_TEB1) {
  199. x->tx_desc_access_err++;
  200. clear_val |= SXGBE_DMA_INT_STATUS_TEB1;
  201. } else {
  202. x->tx_buffer_access_err++;
  203. }
  204. if (int_status & SXGBE_DMA_INT_STATUS_TEB2) {
  205. x->tx_data_transfer_err++;
  206. clear_val |= SXGBE_DMA_INT_STATUS_TEB2;
  207. }
  208. }
  209. /* context descriptor error */
  210. if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) {
  211. x->tx_ctxt_desc_err++;
  212. clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR;
  213. }
  214. }
  215. /* clear the served bits */
  216. writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
  217. return ret_val;
  218. }
  219. static int sxgbe_rx_dma_int_status(void __iomem *ioaddr, int channel_no,
  220. struct sxgbe_extra_stats *x)
  221. {
  222. u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
  223. u32 clear_val = 0;
  224. u32 ret_val = 0;
  225. /* RX Normal Interrupt Summary */
  226. if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
  227. x->normal_irq_n++;
  228. if (int_status & SXGBE_DMA_INT_STATUS_RI) {
  229. ret_val |= handle_rx;
  230. x->rx_normal_irq_n++;
  231. clear_val |= SXGBE_DMA_INT_STATUS_RI;
  232. }
  233. } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
  234. /* RX Abnormal Interrupt Summary */
  235. if (int_status & SXGBE_DMA_INT_STATUS_RBU) {
  236. ret_val |= rx_bump_tc;
  237. clear_val |= SXGBE_DMA_INT_STATUS_RBU;
  238. x->rx_underflow_irq++;
  239. }
  240. if (int_status & SXGBE_DMA_INT_STATUS_RPS) {
  241. ret_val |= rx_hard_error;
  242. clear_val |= SXGBE_DMA_INT_STATUS_RPS;
  243. x->rx_process_stopped_irq++;
  244. }
  245. if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
  246. ret_val |= rx_hard_error;
  247. x->fatal_bus_error_irq++;
  248. /* Assumption: FBE bit is the combination of
  249. * all the bus access erros and cleared when
  250. * the respective error bits cleared
  251. */
  252. /* check for actual cause */
  253. if (int_status & SXGBE_DMA_INT_STATUS_REB0) {
  254. x->rx_read_transfer_err++;
  255. clear_val |= SXGBE_DMA_INT_STATUS_REB0;
  256. } else {
  257. x->rx_write_transfer_err++;
  258. }
  259. if (int_status & SXGBE_DMA_INT_STATUS_REB1) {
  260. x->rx_desc_access_err++;
  261. clear_val |= SXGBE_DMA_INT_STATUS_REB1;
  262. } else {
  263. x->rx_buffer_access_err++;
  264. }
  265. if (int_status & SXGBE_DMA_INT_STATUS_REB2) {
  266. x->rx_data_transfer_err++;
  267. clear_val |= SXGBE_DMA_INT_STATUS_REB2;
  268. }
  269. }
  270. }
  271. /* clear the served bits */
  272. writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
  273. return ret_val;
  274. }
  275. /* Program the HW RX Watchdog */
  276. static void sxgbe_dma_rx_watchdog(void __iomem *ioaddr, u32 riwt)
  277. {
  278. u32 que_num;
  279. SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, que_num) {
  280. writel(riwt,
  281. ioaddr + SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num));
  282. }
  283. }
  284. static void sxgbe_enable_tso(void __iomem *ioaddr, u8 chan_num)
  285. {
  286. u32 ctrl;
  287. ctrl = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
  288. ctrl |= SXGBE_DMA_CHA_TXCTL_TSE_ENABLE;
  289. writel(ctrl, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
  290. }
  291. static const struct sxgbe_dma_ops sxgbe_dma_ops = {
  292. .init = sxgbe_dma_init,
  293. .cha_init = sxgbe_dma_channel_init,
  294. .enable_dma_transmission = sxgbe_enable_dma_transmission,
  295. .enable_dma_irq = sxgbe_enable_dma_irq,
  296. .disable_dma_irq = sxgbe_disable_dma_irq,
  297. .start_tx = sxgbe_dma_start_tx,
  298. .start_tx_queue = sxgbe_dma_start_tx_queue,
  299. .stop_tx = sxgbe_dma_stop_tx,
  300. .stop_tx_queue = sxgbe_dma_stop_tx_queue,
  301. .start_rx = sxgbe_dma_start_rx,
  302. .stop_rx = sxgbe_dma_stop_rx,
  303. .tx_dma_int_status = sxgbe_tx_dma_int_status,
  304. .rx_dma_int_status = sxgbe_rx_dma_int_status,
  305. .rx_watchdog = sxgbe_dma_rx_watchdog,
  306. .enable_tso = sxgbe_enable_tso,
  307. };
  308. const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void)
  309. {
  310. return &sxgbe_dma_ops;
  311. }