nb8800.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551
  1. /*
  2. * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
  3. *
  4. * Mostly rewritten, based on driver from Sigma Designs. Original
  5. * copyright notice below.
  6. *
  7. *
  8. * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac.
  9. *
  10. * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. */
  22. #include <linux/module.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/delay.h>
  25. #include <linux/ethtool.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/of_device.h>
  29. #include <linux/of_mdio.h>
  30. #include <linux/of_net.h>
  31. #include <linux/dma-mapping.h>
  32. #include <linux/phy.h>
  33. #include <linux/cache.h>
  34. #include <linux/jiffies.h>
  35. #include <linux/io.h>
  36. #include <linux/iopoll.h>
  37. #include <asm/barrier.h>
  38. #include "nb8800.h"
  39. static void nb8800_tx_done(struct net_device *dev);
  40. static int nb8800_dma_stop(struct net_device *dev);
  41. static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg)
  42. {
  43. return readb_relaxed(priv->base + reg);
  44. }
  45. static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg)
  46. {
  47. return readl_relaxed(priv->base + reg);
  48. }
  49. static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val)
  50. {
  51. writeb_relaxed(val, priv->base + reg);
  52. }
  53. static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val)
  54. {
  55. writew_relaxed(val, priv->base + reg);
  56. }
  57. static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val)
  58. {
  59. writel_relaxed(val, priv->base + reg);
  60. }
  61. static inline void nb8800_maskb(struct nb8800_priv *priv, int reg,
  62. u32 mask, u32 val)
  63. {
  64. u32 old = nb8800_readb(priv, reg);
  65. u32 new = (old & ~mask) | (val & mask);
  66. if (new != old)
  67. nb8800_writeb(priv, reg, new);
  68. }
  69. static inline void nb8800_maskl(struct nb8800_priv *priv, int reg,
  70. u32 mask, u32 val)
  71. {
  72. u32 old = nb8800_readl(priv, reg);
  73. u32 new = (old & ~mask) | (val & mask);
  74. if (new != old)
  75. nb8800_writel(priv, reg, new);
  76. }
  77. static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits,
  78. bool set)
  79. {
  80. nb8800_maskb(priv, reg, bits, set ? bits : 0);
  81. }
  82. static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits)
  83. {
  84. nb8800_maskb(priv, reg, bits, bits);
  85. }
  86. static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits)
  87. {
  88. nb8800_maskb(priv, reg, bits, 0);
  89. }
  90. static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits,
  91. bool set)
  92. {
  93. nb8800_maskl(priv, reg, bits, set ? bits : 0);
  94. }
  95. static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits)
  96. {
  97. nb8800_maskl(priv, reg, bits, bits);
  98. }
  99. static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits)
  100. {
  101. nb8800_maskl(priv, reg, bits, 0);
  102. }
  103. static int nb8800_mdio_wait(struct mii_bus *bus)
  104. {
  105. struct nb8800_priv *priv = bus->priv;
  106. u32 val;
  107. return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD,
  108. val, !(val & MDIO_CMD_GO), 1, 1000);
  109. }
  110. static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd)
  111. {
  112. struct nb8800_priv *priv = bus->priv;
  113. int err;
  114. err = nb8800_mdio_wait(bus);
  115. if (err)
  116. return err;
  117. nb8800_writel(priv, NB8800_MDIO_CMD, cmd);
  118. udelay(10);
  119. nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO);
  120. return nb8800_mdio_wait(bus);
  121. }
  122. static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg)
  123. {
  124. struct nb8800_priv *priv = bus->priv;
  125. u32 val;
  126. int err;
  127. err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg));
  128. if (err)
  129. return err;
  130. val = nb8800_readl(priv, NB8800_MDIO_STS);
  131. if (val & MDIO_STS_ERR)
  132. return 0xffff;
  133. return val & 0xffff;
  134. }
  135. static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
  136. {
  137. u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) |
  138. MDIO_CMD_DATA(val) | MDIO_CMD_WR;
  139. return nb8800_mdio_cmd(bus, cmd);
  140. }
  141. static void nb8800_mac_tx(struct net_device *dev, bool enable)
  142. {
  143. struct nb8800_priv *priv = netdev_priv(dev);
  144. while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN)
  145. cpu_relax();
  146. nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable);
  147. }
  148. static void nb8800_mac_rx(struct net_device *dev, bool enable)
  149. {
  150. nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable);
  151. }
  152. static void nb8800_mac_af(struct net_device *dev, bool enable)
  153. {
  154. nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable);
  155. }
  156. static void nb8800_start_rx(struct net_device *dev)
  157. {
  158. nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN);
  159. }
  160. static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi)
  161. {
  162. struct nb8800_priv *priv = netdev_priv(dev);
  163. struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
  164. struct nb8800_rx_buf *rxb = &priv->rx_bufs[i];
  165. int size = L1_CACHE_ALIGN(RX_BUF_SIZE);
  166. dma_addr_t dma_addr;
  167. struct page *page;
  168. unsigned long offset;
  169. void *data;
  170. data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size);
  171. if (!data)
  172. return -ENOMEM;
  173. page = virt_to_head_page(data);
  174. offset = data - page_address(page);
  175. dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE,
  176. DMA_FROM_DEVICE);
  177. if (dma_mapping_error(&dev->dev, dma_addr)) {
  178. skb_free_frag(data);
  179. return -ENOMEM;
  180. }
  181. rxb->page = page;
  182. rxb->offset = offset;
  183. rxd->desc.s_addr = dma_addr;
  184. return 0;
  185. }
  186. static void nb8800_receive(struct net_device *dev, unsigned int i,
  187. unsigned int len)
  188. {
  189. struct nb8800_priv *priv = netdev_priv(dev);
  190. struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
  191. struct page *page = priv->rx_bufs[i].page;
  192. int offset = priv->rx_bufs[i].offset;
  193. void *data = page_address(page) + offset;
  194. dma_addr_t dma = rxd->desc.s_addr;
  195. struct sk_buff *skb;
  196. unsigned int size;
  197. int err;
  198. size = len <= RX_COPYBREAK ? len : RX_COPYHDR;
  199. skb = napi_alloc_skb(&priv->napi, size);
  200. if (!skb) {
  201. netdev_err(dev, "rx skb allocation failed\n");
  202. dev->stats.rx_dropped++;
  203. return;
  204. }
  205. if (len <= RX_COPYBREAK) {
  206. dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE);
  207. memcpy(skb_put(skb, len), data, len);
  208. dma_sync_single_for_device(&dev->dev, dma, len,
  209. DMA_FROM_DEVICE);
  210. } else {
  211. err = nb8800_alloc_rx(dev, i, true);
  212. if (err) {
  213. netdev_err(dev, "rx buffer allocation failed\n");
  214. dev->stats.rx_dropped++;
  215. return;
  216. }
  217. dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE);
  218. memcpy(skb_put(skb, RX_COPYHDR), data, RX_COPYHDR);
  219. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
  220. offset + RX_COPYHDR, len - RX_COPYHDR,
  221. RX_BUF_SIZE);
  222. }
  223. skb->protocol = eth_type_trans(skb, dev);
  224. napi_gro_receive(&priv->napi, skb);
  225. }
  226. static void nb8800_rx_error(struct net_device *dev, u32 report)
  227. {
  228. if (report & RX_LENGTH_ERR)
  229. dev->stats.rx_length_errors++;
  230. if (report & RX_FCS_ERR)
  231. dev->stats.rx_crc_errors++;
  232. if (report & RX_FIFO_OVERRUN)
  233. dev->stats.rx_fifo_errors++;
  234. if (report & RX_ALIGNMENT_ERROR)
  235. dev->stats.rx_frame_errors++;
  236. dev->stats.rx_errors++;
  237. }
  238. static int nb8800_poll(struct napi_struct *napi, int budget)
  239. {
  240. struct net_device *dev = napi->dev;
  241. struct nb8800_priv *priv = netdev_priv(dev);
  242. struct nb8800_rx_desc *rxd;
  243. unsigned int last = priv->rx_eoc;
  244. unsigned int next;
  245. int work = 0;
  246. nb8800_tx_done(dev);
  247. again:
  248. while (work < budget) {
  249. struct nb8800_rx_buf *rxb;
  250. unsigned int len;
  251. next = (last + 1) % RX_DESC_COUNT;
  252. rxb = &priv->rx_bufs[next];
  253. rxd = &priv->rx_descs[next];
  254. if (!rxd->report)
  255. break;
  256. len = RX_BYTES_TRANSFERRED(rxd->report);
  257. if (IS_RX_ERROR(rxd->report))
  258. nb8800_rx_error(dev, rxd->report);
  259. else
  260. nb8800_receive(dev, next, len);
  261. dev->stats.rx_packets++;
  262. dev->stats.rx_bytes += len;
  263. if (rxd->report & RX_MULTICAST_PKT)
  264. dev->stats.multicast++;
  265. rxd->report = 0;
  266. last = next;
  267. work++;
  268. }
  269. if (work) {
  270. priv->rx_descs[last].desc.config |= DESC_EOC;
  271. wmb(); /* ensure new EOC is written before clearing old */
  272. priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC;
  273. priv->rx_eoc = last;
  274. nb8800_start_rx(dev);
  275. }
  276. if (work < budget) {
  277. nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
  278. /* If a packet arrived after we last checked but
  279. * before writing RX_ITR, the interrupt will be
  280. * delayed, so we retrieve it now.
  281. */
  282. if (priv->rx_descs[next].report)
  283. goto again;
  284. napi_complete_done(napi, work);
  285. }
  286. return work;
  287. }
  288. static void __nb8800_tx_dma_start(struct net_device *dev)
  289. {
  290. struct nb8800_priv *priv = netdev_priv(dev);
  291. struct nb8800_tx_buf *txb;
  292. u32 txc_cr;
  293. txb = &priv->tx_bufs[priv->tx_queue];
  294. if (!txb->ready)
  295. return;
  296. txc_cr = nb8800_readl(priv, NB8800_TXC_CR);
  297. if (txc_cr & TCR_EN)
  298. return;
  299. nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
  300. wmb(); /* ensure desc addr is written before starting DMA */
  301. nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN);
  302. priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT;
  303. }
  304. static void nb8800_tx_dma_start(struct net_device *dev)
  305. {
  306. struct nb8800_priv *priv = netdev_priv(dev);
  307. spin_lock_irq(&priv->tx_lock);
  308. __nb8800_tx_dma_start(dev);
  309. spin_unlock_irq(&priv->tx_lock);
  310. }
  311. static void nb8800_tx_dma_start_irq(struct net_device *dev)
  312. {
  313. struct nb8800_priv *priv = netdev_priv(dev);
  314. spin_lock(&priv->tx_lock);
  315. __nb8800_tx_dma_start(dev);
  316. spin_unlock(&priv->tx_lock);
  317. }
  318. static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
  319. {
  320. struct nb8800_priv *priv = netdev_priv(dev);
  321. struct nb8800_tx_desc *txd;
  322. struct nb8800_tx_buf *txb;
  323. struct nb8800_dma_desc *desc;
  324. dma_addr_t dma_addr;
  325. unsigned int dma_len;
  326. unsigned int align;
  327. unsigned int next;
  328. if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
  329. netif_stop_queue(dev);
  330. return NETDEV_TX_BUSY;
  331. }
  332. align = (8 - (uintptr_t)skb->data) & 7;
  333. dma_len = skb->len - align;
  334. dma_addr = dma_map_single(&dev->dev, skb->data + align,
  335. dma_len, DMA_TO_DEVICE);
  336. if (dma_mapping_error(&dev->dev, dma_addr)) {
  337. netdev_err(dev, "tx dma mapping error\n");
  338. kfree_skb(skb);
  339. dev->stats.tx_dropped++;
  340. return NETDEV_TX_OK;
  341. }
  342. if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
  343. netif_stop_queue(dev);
  344. skb->xmit_more = 0;
  345. }
  346. next = priv->tx_next;
  347. txb = &priv->tx_bufs[next];
  348. txd = &priv->tx_descs[next];
  349. desc = &txd->desc[0];
  350. next = (next + 1) % TX_DESC_COUNT;
  351. if (align) {
  352. memcpy(txd->buf, skb->data, align);
  353. desc->s_addr =
  354. txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
  355. desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]);
  356. desc->config = DESC_BTS(2) | DESC_DS | align;
  357. desc++;
  358. }
  359. desc->s_addr = dma_addr;
  360. desc->n_addr = priv->tx_bufs[next].dma_desc;
  361. desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
  362. if (!skb->xmit_more)
  363. desc->config |= DESC_EOC;
  364. txb->skb = skb;
  365. txb->dma_addr = dma_addr;
  366. txb->dma_len = dma_len;
  367. if (!priv->tx_chain) {
  368. txb->chain_len = 1;
  369. priv->tx_chain = txb;
  370. } else {
  371. priv->tx_chain->chain_len++;
  372. }
  373. netdev_sent_queue(dev, skb->len);
  374. priv->tx_next = next;
  375. if (!skb->xmit_more) {
  376. smp_wmb();
  377. priv->tx_chain->ready = true;
  378. priv->tx_chain = NULL;
  379. nb8800_tx_dma_start(dev);
  380. }
  381. return NETDEV_TX_OK;
  382. }
  383. static void nb8800_tx_error(struct net_device *dev, u32 report)
  384. {
  385. if (report & TX_LATE_COLLISION)
  386. dev->stats.collisions++;
  387. if (report & TX_PACKET_DROPPED)
  388. dev->stats.tx_dropped++;
  389. if (report & TX_FIFO_UNDERRUN)
  390. dev->stats.tx_fifo_errors++;
  391. dev->stats.tx_errors++;
  392. }
  393. static void nb8800_tx_done(struct net_device *dev)
  394. {
  395. struct nb8800_priv *priv = netdev_priv(dev);
  396. unsigned int limit = priv->tx_next;
  397. unsigned int done = priv->tx_done;
  398. unsigned int packets = 0;
  399. unsigned int len = 0;
  400. while (done != limit) {
  401. struct nb8800_tx_desc *txd = &priv->tx_descs[done];
  402. struct nb8800_tx_buf *txb = &priv->tx_bufs[done];
  403. struct sk_buff *skb;
  404. if (!txd->report)
  405. break;
  406. skb = txb->skb;
  407. len += skb->len;
  408. dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len,
  409. DMA_TO_DEVICE);
  410. if (IS_TX_ERROR(txd->report)) {
  411. nb8800_tx_error(dev, txd->report);
  412. kfree_skb(skb);
  413. } else {
  414. consume_skb(skb);
  415. }
  416. dev->stats.tx_packets++;
  417. dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report);
  418. dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report);
  419. txb->skb = NULL;
  420. txb->ready = false;
  421. txd->report = 0;
  422. done = (done + 1) % TX_DESC_COUNT;
  423. packets++;
  424. }
  425. if (packets) {
  426. smp_mb__before_atomic();
  427. atomic_add(packets, &priv->tx_free);
  428. netdev_completed_queue(dev, packets, len);
  429. netif_wake_queue(dev);
  430. priv->tx_done = done;
  431. }
  432. }
  433. static irqreturn_t nb8800_irq(int irq, void *dev_id)
  434. {
  435. struct net_device *dev = dev_id;
  436. struct nb8800_priv *priv = netdev_priv(dev);
  437. irqreturn_t ret = IRQ_NONE;
  438. u32 val;
  439. /* tx interrupt */
  440. val = nb8800_readl(priv, NB8800_TXC_SR);
  441. if (val) {
  442. nb8800_writel(priv, NB8800_TXC_SR, val);
  443. if (val & TSR_DI)
  444. nb8800_tx_dma_start_irq(dev);
  445. if (val & TSR_TI)
  446. napi_schedule_irqoff(&priv->napi);
  447. if (unlikely(val & TSR_DE))
  448. netdev_err(dev, "TX DMA error\n");
  449. /* should never happen with automatic status retrieval */
  450. if (unlikely(val & TSR_TO))
  451. netdev_err(dev, "TX Status FIFO overflow\n");
  452. ret = IRQ_HANDLED;
  453. }
  454. /* rx interrupt */
  455. val = nb8800_readl(priv, NB8800_RXC_SR);
  456. if (val) {
  457. nb8800_writel(priv, NB8800_RXC_SR, val);
  458. if (likely(val & (RSR_RI | RSR_DI))) {
  459. nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll);
  460. napi_schedule_irqoff(&priv->napi);
  461. }
  462. if (unlikely(val & RSR_DE))
  463. netdev_err(dev, "RX DMA error\n");
  464. /* should never happen with automatic status retrieval */
  465. if (unlikely(val & RSR_RO))
  466. netdev_err(dev, "RX Status FIFO overflow\n");
  467. ret = IRQ_HANDLED;
  468. }
  469. return ret;
  470. }
  471. static void nb8800_mac_config(struct net_device *dev)
  472. {
  473. struct nb8800_priv *priv = netdev_priv(dev);
  474. bool gigabit = priv->speed == SPEED_1000;
  475. u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE;
  476. u32 mac_mode = 0;
  477. u32 slot_time;
  478. u32 phy_clk;
  479. u32 ict;
  480. if (!priv->duplex)
  481. mac_mode |= HALF_DUPLEX;
  482. if (gigabit) {
  483. if (phy_interface_is_rgmii(dev->phydev))
  484. mac_mode |= RGMII_MODE;
  485. mac_mode |= GMAC_MODE;
  486. phy_clk = 125000000;
  487. /* Should be 512 but register is only 8 bits */
  488. slot_time = 255;
  489. } else {
  490. phy_clk = 25000000;
  491. slot_time = 128;
  492. }
  493. ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk));
  494. nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict);
  495. nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time);
  496. nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode);
  497. }
  498. static void nb8800_pause_config(struct net_device *dev)
  499. {
  500. struct nb8800_priv *priv = netdev_priv(dev);
  501. struct phy_device *phydev = priv->phydev;
  502. u32 rxcr;
  503. if (priv->pause_aneg) {
  504. if (!phydev || !phydev->link)
  505. return;
  506. priv->pause_rx = phydev->pause;
  507. priv->pause_tx = phydev->pause ^ phydev->asym_pause;
  508. }
  509. nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx);
  510. rxcr = nb8800_readl(priv, NB8800_RXC_CR);
  511. if (!!(rxcr & RCR_FL) == priv->pause_tx)
  512. return;
  513. if (netif_running(dev)) {
  514. napi_disable(&priv->napi);
  515. netif_tx_lock_bh(dev);
  516. nb8800_dma_stop(dev);
  517. nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
  518. nb8800_start_rx(dev);
  519. netif_tx_unlock_bh(dev);
  520. napi_enable(&priv->napi);
  521. } else {
  522. nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
  523. }
  524. }
  525. static void nb8800_link_reconfigure(struct net_device *dev)
  526. {
  527. struct nb8800_priv *priv = netdev_priv(dev);
  528. struct phy_device *phydev = priv->phydev;
  529. int change = 0;
  530. if (phydev->link) {
  531. if (phydev->speed != priv->speed) {
  532. priv->speed = phydev->speed;
  533. change = 1;
  534. }
  535. if (phydev->duplex != priv->duplex) {
  536. priv->duplex = phydev->duplex;
  537. change = 1;
  538. }
  539. if (change)
  540. nb8800_mac_config(dev);
  541. nb8800_pause_config(dev);
  542. }
  543. if (phydev->link != priv->link) {
  544. priv->link = phydev->link;
  545. change = 1;
  546. }
  547. if (change)
  548. phy_print_status(priv->phydev);
  549. }
  550. static void nb8800_update_mac_addr(struct net_device *dev)
  551. {
  552. struct nb8800_priv *priv = netdev_priv(dev);
  553. int i;
  554. for (i = 0; i < ETH_ALEN; i++)
  555. nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]);
  556. for (i = 0; i < ETH_ALEN; i++)
  557. nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]);
  558. }
  559. static int nb8800_set_mac_address(struct net_device *dev, void *addr)
  560. {
  561. struct sockaddr *sock = addr;
  562. if (netif_running(dev))
  563. return -EBUSY;
  564. ether_addr_copy(dev->dev_addr, sock->sa_data);
  565. nb8800_update_mac_addr(dev);
  566. return 0;
  567. }
  568. static void nb8800_mc_init(struct net_device *dev, int val)
  569. {
  570. struct nb8800_priv *priv = netdev_priv(dev);
  571. nb8800_writeb(priv, NB8800_MC_INIT, val);
  572. readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val,
  573. 1, 1000);
  574. }
  575. static void nb8800_set_rx_mode(struct net_device *dev)
  576. {
  577. struct nb8800_priv *priv = netdev_priv(dev);
  578. struct netdev_hw_addr *ha;
  579. int i;
  580. if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
  581. nb8800_mac_af(dev, false);
  582. return;
  583. }
  584. nb8800_mac_af(dev, true);
  585. nb8800_mc_init(dev, 0);
  586. netdev_for_each_mc_addr(ha, dev) {
  587. for (i = 0; i < ETH_ALEN; i++)
  588. nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]);
  589. nb8800_mc_init(dev, 0xff);
  590. }
  591. }
  592. #define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc))
  593. #define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc))
  594. static void nb8800_dma_free(struct net_device *dev)
  595. {
  596. struct nb8800_priv *priv = netdev_priv(dev);
  597. unsigned int i;
  598. if (priv->rx_bufs) {
  599. for (i = 0; i < RX_DESC_COUNT; i++)
  600. if (priv->rx_bufs[i].page)
  601. put_page(priv->rx_bufs[i].page);
  602. kfree(priv->rx_bufs);
  603. priv->rx_bufs = NULL;
  604. }
  605. if (priv->tx_bufs) {
  606. for (i = 0; i < TX_DESC_COUNT; i++)
  607. kfree_skb(priv->tx_bufs[i].skb);
  608. kfree(priv->tx_bufs);
  609. priv->tx_bufs = NULL;
  610. }
  611. if (priv->rx_descs) {
  612. dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs,
  613. priv->rx_desc_dma);
  614. priv->rx_descs = NULL;
  615. }
  616. if (priv->tx_descs) {
  617. dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs,
  618. priv->tx_desc_dma);
  619. priv->tx_descs = NULL;
  620. }
  621. }
  622. static void nb8800_dma_reset(struct net_device *dev)
  623. {
  624. struct nb8800_priv *priv = netdev_priv(dev);
  625. struct nb8800_rx_desc *rxd;
  626. struct nb8800_tx_desc *txd;
  627. unsigned int i;
  628. for (i = 0; i < RX_DESC_COUNT; i++) {
  629. dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd);
  630. rxd = &priv->rx_descs[i];
  631. rxd->desc.n_addr = rx_dma + sizeof(*rxd);
  632. rxd->desc.r_addr =
  633. rx_dma + offsetof(struct nb8800_rx_desc, report);
  634. rxd->desc.config = priv->rx_dma_config;
  635. rxd->report = 0;
  636. }
  637. rxd->desc.n_addr = priv->rx_desc_dma;
  638. rxd->desc.config |= DESC_EOC;
  639. priv->rx_eoc = RX_DESC_COUNT - 1;
  640. for (i = 0; i < TX_DESC_COUNT; i++) {
  641. struct nb8800_tx_buf *txb = &priv->tx_bufs[i];
  642. dma_addr_t r_dma = txb->dma_desc +
  643. offsetof(struct nb8800_tx_desc, report);
  644. txd = &priv->tx_descs[i];
  645. txd->desc[0].r_addr = r_dma;
  646. txd->desc[1].r_addr = r_dma;
  647. txd->report = 0;
  648. }
  649. priv->tx_next = 0;
  650. priv->tx_queue = 0;
  651. priv->tx_done = 0;
  652. atomic_set(&priv->tx_free, TX_DESC_COUNT);
  653. nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma);
  654. wmb(); /* ensure all setup is written before starting */
  655. }
  656. static int nb8800_dma_init(struct net_device *dev)
  657. {
  658. struct nb8800_priv *priv = netdev_priv(dev);
  659. unsigned int n_rx = RX_DESC_COUNT;
  660. unsigned int n_tx = TX_DESC_COUNT;
  661. unsigned int i;
  662. int err;
  663. priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE,
  664. &priv->rx_desc_dma, GFP_KERNEL);
  665. if (!priv->rx_descs)
  666. goto err_out;
  667. priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL);
  668. if (!priv->rx_bufs)
  669. goto err_out;
  670. for (i = 0; i < n_rx; i++) {
  671. err = nb8800_alloc_rx(dev, i, false);
  672. if (err)
  673. goto err_out;
  674. }
  675. priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE,
  676. &priv->tx_desc_dma, GFP_KERNEL);
  677. if (!priv->tx_descs)
  678. goto err_out;
  679. priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL);
  680. if (!priv->tx_bufs)
  681. goto err_out;
  682. for (i = 0; i < n_tx; i++)
  683. priv->tx_bufs[i].dma_desc =
  684. priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc);
  685. nb8800_dma_reset(dev);
  686. return 0;
  687. err_out:
  688. nb8800_dma_free(dev);
  689. return -ENOMEM;
  690. }
  691. static int nb8800_dma_stop(struct net_device *dev)
  692. {
  693. struct nb8800_priv *priv = netdev_priv(dev);
  694. struct nb8800_tx_buf *txb = &priv->tx_bufs[0];
  695. struct nb8800_tx_desc *txd = &priv->tx_descs[0];
  696. int retry = 5;
  697. u32 txcr;
  698. u32 rxcr;
  699. int err;
  700. unsigned int i;
  701. /* wait for tx to finish */
  702. err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr,
  703. !(txcr & TCR_EN) &&
  704. priv->tx_done == priv->tx_next,
  705. 1000, 1000000);
  706. if (err)
  707. return err;
  708. /* The rx DMA only stops if it reaches the end of chain.
  709. * To make this happen, we set the EOC flag on all rx
  710. * descriptors, put the device in loopback mode, and send
  711. * a few dummy frames. The interrupt handler will ignore
  712. * these since NAPI is disabled and no real frames are in
  713. * the tx queue.
  714. */
  715. for (i = 0; i < RX_DESC_COUNT; i++)
  716. priv->rx_descs[i].desc.config |= DESC_EOC;
  717. txd->desc[0].s_addr =
  718. txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
  719. txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8;
  720. memset(txd->buf, 0, sizeof(txd->buf));
  721. nb8800_mac_af(dev, false);
  722. nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
  723. do {
  724. nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
  725. wmb();
  726. nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN);
  727. err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR,
  728. rxcr, !(rxcr & RCR_EN),
  729. 1000, 100000);
  730. } while (err && --retry);
  731. nb8800_mac_af(dev, true);
  732. nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
  733. nb8800_dma_reset(dev);
  734. return retry ? 0 : -ETIMEDOUT;
  735. }
  736. static void nb8800_pause_adv(struct net_device *dev)
  737. {
  738. struct nb8800_priv *priv = netdev_priv(dev);
  739. u32 adv = 0;
  740. if (!priv->phydev)
  741. return;
  742. if (priv->pause_rx)
  743. adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
  744. if (priv->pause_tx)
  745. adv ^= ADVERTISED_Asym_Pause;
  746. priv->phydev->supported |= adv;
  747. priv->phydev->advertising |= adv;
  748. }
  749. static int nb8800_open(struct net_device *dev)
  750. {
  751. struct nb8800_priv *priv = netdev_priv(dev);
  752. int err;
  753. /* clear any pending interrupts */
  754. nb8800_writel(priv, NB8800_RXC_SR, 0xf);
  755. nb8800_writel(priv, NB8800_TXC_SR, 0xf);
  756. err = nb8800_dma_init(dev);
  757. if (err)
  758. return err;
  759. err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev);
  760. if (err)
  761. goto err_free_dma;
  762. nb8800_mac_rx(dev, true);
  763. nb8800_mac_tx(dev, true);
  764. priv->phydev = of_phy_connect(dev, priv->phy_node,
  765. nb8800_link_reconfigure, 0,
  766. priv->phy_mode);
  767. if (!priv->phydev)
  768. goto err_free_irq;
  769. nb8800_pause_adv(dev);
  770. netdev_reset_queue(dev);
  771. napi_enable(&priv->napi);
  772. netif_start_queue(dev);
  773. nb8800_start_rx(dev);
  774. phy_start(priv->phydev);
  775. return 0;
  776. err_free_irq:
  777. free_irq(dev->irq, dev);
  778. err_free_dma:
  779. nb8800_dma_free(dev);
  780. return err;
  781. }
  782. static int nb8800_stop(struct net_device *dev)
  783. {
  784. struct nb8800_priv *priv = netdev_priv(dev);
  785. phy_stop(priv->phydev);
  786. netif_stop_queue(dev);
  787. napi_disable(&priv->napi);
  788. nb8800_dma_stop(dev);
  789. nb8800_mac_rx(dev, false);
  790. nb8800_mac_tx(dev, false);
  791. phy_disconnect(priv->phydev);
  792. priv->phydev = NULL;
  793. free_irq(dev->irq, dev);
  794. nb8800_dma_free(dev);
  795. return 0;
  796. }
  797. static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  798. {
  799. struct nb8800_priv *priv = netdev_priv(dev);
  800. return phy_mii_ioctl(priv->phydev, rq, cmd);
  801. }
  802. static const struct net_device_ops nb8800_netdev_ops = {
  803. .ndo_open = nb8800_open,
  804. .ndo_stop = nb8800_stop,
  805. .ndo_start_xmit = nb8800_xmit,
  806. .ndo_set_mac_address = nb8800_set_mac_address,
  807. .ndo_set_rx_mode = nb8800_set_rx_mode,
  808. .ndo_do_ioctl = nb8800_ioctl,
  809. .ndo_change_mtu = eth_change_mtu,
  810. .ndo_validate_addr = eth_validate_addr,
  811. };
  812. static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  813. {
  814. struct nb8800_priv *priv = netdev_priv(dev);
  815. if (!priv->phydev)
  816. return -ENODEV;
  817. return phy_ethtool_gset(priv->phydev, cmd);
  818. }
  819. static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  820. {
  821. struct nb8800_priv *priv = netdev_priv(dev);
  822. if (!priv->phydev)
  823. return -ENODEV;
  824. return phy_ethtool_sset(priv->phydev, cmd);
  825. }
  826. static int nb8800_nway_reset(struct net_device *dev)
  827. {
  828. struct nb8800_priv *priv = netdev_priv(dev);
  829. if (!priv->phydev)
  830. return -ENODEV;
  831. return genphy_restart_aneg(priv->phydev);
  832. }
  833. static void nb8800_get_pauseparam(struct net_device *dev,
  834. struct ethtool_pauseparam *pp)
  835. {
  836. struct nb8800_priv *priv = netdev_priv(dev);
  837. pp->autoneg = priv->pause_aneg;
  838. pp->rx_pause = priv->pause_rx;
  839. pp->tx_pause = priv->pause_tx;
  840. }
  841. static int nb8800_set_pauseparam(struct net_device *dev,
  842. struct ethtool_pauseparam *pp)
  843. {
  844. struct nb8800_priv *priv = netdev_priv(dev);
  845. priv->pause_aneg = pp->autoneg;
  846. priv->pause_rx = pp->rx_pause;
  847. priv->pause_tx = pp->tx_pause;
  848. nb8800_pause_adv(dev);
  849. if (!priv->pause_aneg)
  850. nb8800_pause_config(dev);
  851. else if (priv->phydev)
  852. phy_start_aneg(priv->phydev);
  853. return 0;
  854. }
  855. static const char nb8800_stats_names[][ETH_GSTRING_LEN] = {
  856. "rx_bytes_ok",
  857. "rx_frames_ok",
  858. "rx_undersize_frames",
  859. "rx_fragment_frames",
  860. "rx_64_byte_frames",
  861. "rx_127_byte_frames",
  862. "rx_255_byte_frames",
  863. "rx_511_byte_frames",
  864. "rx_1023_byte_frames",
  865. "rx_max_size_frames",
  866. "rx_oversize_frames",
  867. "rx_bad_fcs_frames",
  868. "rx_broadcast_frames",
  869. "rx_multicast_frames",
  870. "rx_control_frames",
  871. "rx_pause_frames",
  872. "rx_unsup_control_frames",
  873. "rx_align_error_frames",
  874. "rx_overrun_frames",
  875. "rx_jabber_frames",
  876. "rx_bytes",
  877. "rx_frames",
  878. "tx_bytes_ok",
  879. "tx_frames_ok",
  880. "tx_64_byte_frames",
  881. "tx_127_byte_frames",
  882. "tx_255_byte_frames",
  883. "tx_511_byte_frames",
  884. "tx_1023_byte_frames",
  885. "tx_max_size_frames",
  886. "tx_oversize_frames",
  887. "tx_broadcast_frames",
  888. "tx_multicast_frames",
  889. "tx_control_frames",
  890. "tx_pause_frames",
  891. "tx_underrun_frames",
  892. "tx_single_collision_frames",
  893. "tx_multi_collision_frames",
  894. "tx_deferred_collision_frames",
  895. "tx_late_collision_frames",
  896. "tx_excessive_collision_frames",
  897. "tx_bytes",
  898. "tx_frames",
  899. "tx_collisions",
  900. };
  901. #define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names)
  902. static int nb8800_get_sset_count(struct net_device *dev, int sset)
  903. {
  904. if (sset == ETH_SS_STATS)
  905. return NB8800_NUM_STATS;
  906. return -EOPNOTSUPP;
  907. }
  908. static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf)
  909. {
  910. if (sset == ETH_SS_STATS)
  911. memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names));
  912. }
  913. static u32 nb8800_read_stat(struct net_device *dev, int index)
  914. {
  915. struct nb8800_priv *priv = netdev_priv(dev);
  916. nb8800_writeb(priv, NB8800_STAT_INDEX, index);
  917. return nb8800_readl(priv, NB8800_STAT_DATA);
  918. }
  919. static void nb8800_get_ethtool_stats(struct net_device *dev,
  920. struct ethtool_stats *estats, u64 *st)
  921. {
  922. unsigned int i;
  923. u32 rx, tx;
  924. for (i = 0; i < NB8800_NUM_STATS / 2; i++) {
  925. rx = nb8800_read_stat(dev, i);
  926. tx = nb8800_read_stat(dev, i | 0x80);
  927. st[i] = rx;
  928. st[i + NB8800_NUM_STATS / 2] = tx;
  929. }
  930. }
  931. static const struct ethtool_ops nb8800_ethtool_ops = {
  932. .get_settings = nb8800_get_settings,
  933. .set_settings = nb8800_set_settings,
  934. .nway_reset = nb8800_nway_reset,
  935. .get_link = ethtool_op_get_link,
  936. .get_pauseparam = nb8800_get_pauseparam,
  937. .set_pauseparam = nb8800_set_pauseparam,
  938. .get_sset_count = nb8800_get_sset_count,
  939. .get_strings = nb8800_get_strings,
  940. .get_ethtool_stats = nb8800_get_ethtool_stats,
  941. };
  942. static int nb8800_hw_init(struct net_device *dev)
  943. {
  944. struct nb8800_priv *priv = netdev_priv(dev);
  945. u32 val;
  946. val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS;
  947. nb8800_writeb(priv, NB8800_TX_CTL1, val);
  948. /* Collision retry count */
  949. nb8800_writeb(priv, NB8800_TX_CTL2, 5);
  950. val = RX_PAD_STRIP | RX_AF_EN;
  951. nb8800_writeb(priv, NB8800_RX_CTL, val);
  952. /* Chosen by fair dice roll */
  953. nb8800_writeb(priv, NB8800_RANDOM_SEED, 4);
  954. /* TX cycles per deferral period */
  955. nb8800_writeb(priv, NB8800_TX_SDP, 12);
  956. /* The following three threshold values have been
  957. * experimentally determined for good results.
  958. */
  959. /* RX/TX FIFO threshold for partial empty (64-bit entries) */
  960. nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0);
  961. /* RX/TX FIFO threshold for partial full (64-bit entries) */
  962. nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255);
  963. /* Buffer size for transmit (64-bit entries) */
  964. nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64);
  965. /* Configure tx DMA */
  966. val = nb8800_readl(priv, NB8800_TXC_CR);
  967. val &= TCR_LE; /* keep endian setting */
  968. val |= TCR_DM; /* DMA descriptor mode */
  969. val |= TCR_RS; /* automatically store tx status */
  970. val |= TCR_DIE; /* interrupt on DMA chain completion */
  971. val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */
  972. val |= TCR_BTS(2); /* 32-byte bus transaction size */
  973. nb8800_writel(priv, NB8800_TXC_CR, val);
  974. /* TX complete interrupt after 10 ms or 7 frames (see above) */
  975. val = clk_get_rate(priv->clk) / 100;
  976. nb8800_writel(priv, NB8800_TX_ITR, val);
  977. /* Configure rx DMA */
  978. val = nb8800_readl(priv, NB8800_RXC_CR);
  979. val &= RCR_LE; /* keep endian setting */
  980. val |= RCR_DM; /* DMA descriptor mode */
  981. val |= RCR_RS; /* automatically store rx status */
  982. val |= RCR_DIE; /* interrupt at end of DMA chain */
  983. val |= RCR_RFI(7); /* interrupt after 7 frames received */
  984. val |= RCR_BTS(2); /* 32-byte bus transaction size */
  985. nb8800_writel(priv, NB8800_RXC_CR, val);
  986. /* The rx interrupt can fire before the DMA has completed
  987. * unless a small delay is added. 50 us is hopefully enough.
  988. */
  989. priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000;
  990. /* In NAPI poll mode we want to disable interrupts, but the
  991. * hardware does not permit this. Delay 10 ms instead.
  992. */
  993. priv->rx_itr_poll = clk_get_rate(priv->clk) / 100;
  994. nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
  995. priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF;
  996. /* Flow control settings */
  997. /* Pause time of 0.1 ms */
  998. val = 100000 / 512;
  999. nb8800_writeb(priv, NB8800_PQ1, val >> 8);
  1000. nb8800_writeb(priv, NB8800_PQ2, val & 0xff);
  1001. /* Auto-negotiate by default */
  1002. priv->pause_aneg = true;
  1003. priv->pause_rx = true;
  1004. priv->pause_tx = true;
  1005. nb8800_mc_init(dev, 0);
  1006. return 0;
  1007. }
  1008. static int nb8800_tangox_init(struct net_device *dev)
  1009. {
  1010. struct nb8800_priv *priv = netdev_priv(dev);
  1011. u32 pad_mode = PAD_MODE_MII;
  1012. switch (priv->phy_mode) {
  1013. case PHY_INTERFACE_MODE_MII:
  1014. case PHY_INTERFACE_MODE_GMII:
  1015. pad_mode = PAD_MODE_MII;
  1016. break;
  1017. case PHY_INTERFACE_MODE_RGMII:
  1018. case PHY_INTERFACE_MODE_RGMII_ID:
  1019. case PHY_INTERFACE_MODE_RGMII_RXID:
  1020. case PHY_INTERFACE_MODE_RGMII_TXID:
  1021. pad_mode = PAD_MODE_RGMII;
  1022. break;
  1023. default:
  1024. dev_err(dev->dev.parent, "unsupported phy mode %s\n",
  1025. phy_modes(priv->phy_mode));
  1026. return -EINVAL;
  1027. }
  1028. nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode);
  1029. return 0;
  1030. }
  1031. static int nb8800_tangox_reset(struct net_device *dev)
  1032. {
  1033. struct nb8800_priv *priv = netdev_priv(dev);
  1034. int clk_div;
  1035. nb8800_writeb(priv, NB8800_TANGOX_RESET, 0);
  1036. usleep_range(1000, 10000);
  1037. nb8800_writeb(priv, NB8800_TANGOX_RESET, 1);
  1038. wmb(); /* ensure reset is cleared before proceeding */
  1039. clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK);
  1040. nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div);
  1041. return 0;
  1042. }
  1043. static const struct nb8800_ops nb8800_tangox_ops = {
  1044. .init = nb8800_tangox_init,
  1045. .reset = nb8800_tangox_reset,
  1046. };
  1047. static int nb8800_tango4_init(struct net_device *dev)
  1048. {
  1049. struct nb8800_priv *priv = netdev_priv(dev);
  1050. int err;
  1051. err = nb8800_tangox_init(dev);
  1052. if (err)
  1053. return err;
  1054. /* On tango4 interrupt on DMA completion per frame works and gives
  1055. * better performance despite generating more rx interrupts.
  1056. */
  1057. /* Disable unnecessary interrupt on rx completion */
  1058. nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7));
  1059. /* Request interrupt on descriptor DMA completion */
  1060. priv->rx_dma_config |= DESC_ID;
  1061. return 0;
  1062. }
  1063. static const struct nb8800_ops nb8800_tango4_ops = {
  1064. .init = nb8800_tango4_init,
  1065. .reset = nb8800_tangox_reset,
  1066. };
  1067. static const struct of_device_id nb8800_dt_ids[] = {
  1068. {
  1069. .compatible = "aurora,nb8800",
  1070. },
  1071. {
  1072. .compatible = "sigma,smp8642-ethernet",
  1073. .data = &nb8800_tangox_ops,
  1074. },
  1075. {
  1076. .compatible = "sigma,smp8734-ethernet",
  1077. .data = &nb8800_tango4_ops,
  1078. },
  1079. { }
  1080. };
  1081. static int nb8800_probe(struct platform_device *pdev)
  1082. {
  1083. const struct of_device_id *match;
  1084. const struct nb8800_ops *ops = NULL;
  1085. struct nb8800_priv *priv;
  1086. struct resource *res;
  1087. struct net_device *dev;
  1088. struct mii_bus *bus;
  1089. const unsigned char *mac;
  1090. void __iomem *base;
  1091. int irq;
  1092. int ret;
  1093. match = of_match_device(nb8800_dt_ids, &pdev->dev);
  1094. if (match)
  1095. ops = match->data;
  1096. irq = platform_get_irq(pdev, 0);
  1097. if (irq <= 0) {
  1098. dev_err(&pdev->dev, "No IRQ\n");
  1099. return -EINVAL;
  1100. }
  1101. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1102. base = devm_ioremap_resource(&pdev->dev, res);
  1103. if (IS_ERR(base))
  1104. return PTR_ERR(base);
  1105. dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start);
  1106. dev = alloc_etherdev(sizeof(*priv));
  1107. if (!dev)
  1108. return -ENOMEM;
  1109. platform_set_drvdata(pdev, dev);
  1110. SET_NETDEV_DEV(dev, &pdev->dev);
  1111. priv = netdev_priv(dev);
  1112. priv->base = base;
  1113. priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
  1114. if (priv->phy_mode < 0)
  1115. priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
  1116. priv->clk = devm_clk_get(&pdev->dev, NULL);
  1117. if (IS_ERR(priv->clk)) {
  1118. dev_err(&pdev->dev, "failed to get clock\n");
  1119. ret = PTR_ERR(priv->clk);
  1120. goto err_free_dev;
  1121. }
  1122. ret = clk_prepare_enable(priv->clk);
  1123. if (ret)
  1124. goto err_free_dev;
  1125. spin_lock_init(&priv->tx_lock);
  1126. if (ops && ops->reset) {
  1127. ret = ops->reset(dev);
  1128. if (ret)
  1129. goto err_free_dev;
  1130. }
  1131. bus = devm_mdiobus_alloc(&pdev->dev);
  1132. if (!bus) {
  1133. ret = -ENOMEM;
  1134. goto err_disable_clk;
  1135. }
  1136. bus->name = "nb8800-mii";
  1137. bus->read = nb8800_mdio_read;
  1138. bus->write = nb8800_mdio_write;
  1139. bus->parent = &pdev->dev;
  1140. snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii",
  1141. (unsigned long)res->start);
  1142. bus->priv = priv;
  1143. ret = of_mdiobus_register(bus, pdev->dev.of_node);
  1144. if (ret) {
  1145. dev_err(&pdev->dev, "failed to register MII bus\n");
  1146. goto err_disable_clk;
  1147. }
  1148. priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
  1149. if (!priv->phy_node) {
  1150. dev_err(&pdev->dev, "no PHY specified\n");
  1151. ret = -ENODEV;
  1152. goto err_free_bus;
  1153. }
  1154. priv->mii_bus = bus;
  1155. ret = nb8800_hw_init(dev);
  1156. if (ret)
  1157. goto err_free_bus;
  1158. if (ops && ops->init) {
  1159. ret = ops->init(dev);
  1160. if (ret)
  1161. goto err_free_bus;
  1162. }
  1163. dev->netdev_ops = &nb8800_netdev_ops;
  1164. dev->ethtool_ops = &nb8800_ethtool_ops;
  1165. dev->flags |= IFF_MULTICAST;
  1166. dev->irq = irq;
  1167. mac = of_get_mac_address(pdev->dev.of_node);
  1168. if (mac)
  1169. ether_addr_copy(dev->dev_addr, mac);
  1170. if (!is_valid_ether_addr(dev->dev_addr))
  1171. eth_hw_addr_random(dev);
  1172. nb8800_update_mac_addr(dev);
  1173. netif_carrier_off(dev);
  1174. ret = register_netdev(dev);
  1175. if (ret) {
  1176. netdev_err(dev, "failed to register netdev\n");
  1177. goto err_free_dma;
  1178. }
  1179. netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT);
  1180. netdev_info(dev, "MAC address %pM\n", dev->dev_addr);
  1181. return 0;
  1182. err_free_dma:
  1183. nb8800_dma_free(dev);
  1184. err_free_bus:
  1185. mdiobus_unregister(bus);
  1186. err_disable_clk:
  1187. clk_disable_unprepare(priv->clk);
  1188. err_free_dev:
  1189. free_netdev(dev);
  1190. return ret;
  1191. }
  1192. static int nb8800_remove(struct platform_device *pdev)
  1193. {
  1194. struct net_device *ndev = platform_get_drvdata(pdev);
  1195. struct nb8800_priv *priv = netdev_priv(ndev);
  1196. unregister_netdev(ndev);
  1197. mdiobus_unregister(priv->mii_bus);
  1198. clk_disable_unprepare(priv->clk);
  1199. nb8800_dma_free(ndev);
  1200. free_netdev(ndev);
  1201. return 0;
  1202. }
  1203. static struct platform_driver nb8800_driver = {
  1204. .driver = {
  1205. .name = "nb8800",
  1206. .of_match_table = nb8800_dt_ids,
  1207. },
  1208. .probe = nb8800_probe,
  1209. .remove = nb8800_remove,
  1210. };
  1211. module_platform_driver(nb8800_driver);
  1212. MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver");
  1213. MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
  1214. MODULE_LICENSE("GPL");