sunqe.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995
  1. /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
  2. * Once again I am out to prove that every ethernet
  3. * controller out there can be most efficiently programmed
  4. * if you make it look like a LANCE.
  5. *
  6. * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net)
  7. */
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/types.h>
  11. #include <linux/errno.h>
  12. #include <linux/fcntl.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/ioport.h>
  15. #include <linux/in.h>
  16. #include <linux/slab.h>
  17. #include <linux/string.h>
  18. #include <linux/delay.h>
  19. #include <linux/init.h>
  20. #include <linux/crc32.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/etherdevice.h>
  23. #include <linux/skbuff.h>
  24. #include <linux/ethtool.h>
  25. #include <linux/bitops.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/of.h>
  28. #include <linux/of_device.h>
  29. #include <asm/io.h>
  30. #include <asm/dma.h>
  31. #include <asm/byteorder.h>
  32. #include <asm/idprom.h>
  33. #include <asm/openprom.h>
  34. #include <asm/oplib.h>
  35. #include <asm/auxio.h>
  36. #include <asm/pgtable.h>
  37. #include <asm/irq.h>
  38. #include "sunqe.h"
  39. #define DRV_NAME "sunqe"
  40. #define DRV_VERSION "4.1"
  41. #define DRV_RELDATE "August 27, 2008"
  42. #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
  43. static char version[] =
  44. DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
  45. MODULE_VERSION(DRV_VERSION);
  46. MODULE_AUTHOR(DRV_AUTHOR);
  47. MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
  48. MODULE_LICENSE("GPL");
  49. static struct sunqec *root_qec_dev;
  50. static void qe_set_multicast(struct net_device *dev);
  51. #define QEC_RESET_TRIES 200
  52. static inline int qec_global_reset(void __iomem *gregs)
  53. {
  54. int tries = QEC_RESET_TRIES;
  55. sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
  56. while (--tries) {
  57. u32 tmp = sbus_readl(gregs + GLOB_CTRL);
  58. if (tmp & GLOB_CTRL_RESET) {
  59. udelay(20);
  60. continue;
  61. }
  62. break;
  63. }
  64. if (tries)
  65. return 0;
  66. printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
  67. return -1;
  68. }
  69. #define MACE_RESET_RETRIES 200
  70. #define QE_RESET_RETRIES 200
  71. static inline int qe_stop(struct sunqe *qep)
  72. {
  73. void __iomem *cregs = qep->qcregs;
  74. void __iomem *mregs = qep->mregs;
  75. int tries;
  76. /* Reset the MACE, then the QEC channel. */
  77. sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
  78. tries = MACE_RESET_RETRIES;
  79. while (--tries) {
  80. u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
  81. if (tmp & MREGS_BCONFIG_RESET) {
  82. udelay(20);
  83. continue;
  84. }
  85. break;
  86. }
  87. if (!tries) {
  88. printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
  89. return -1;
  90. }
  91. sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
  92. tries = QE_RESET_RETRIES;
  93. while (--tries) {
  94. u32 tmp = sbus_readl(cregs + CREG_CTRL);
  95. if (tmp & CREG_CTRL_RESET) {
  96. udelay(20);
  97. continue;
  98. }
  99. break;
  100. }
  101. if (!tries) {
  102. printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
  103. return -1;
  104. }
  105. return 0;
  106. }
  107. static void qe_init_rings(struct sunqe *qep)
  108. {
  109. struct qe_init_block *qb = qep->qe_block;
  110. struct sunqe_buffers *qbufs = qep->buffers;
  111. __u32 qbufs_dvma = qep->buffers_dvma;
  112. int i;
  113. qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
  114. memset(qb, 0, sizeof(struct qe_init_block));
  115. memset(qbufs, 0, sizeof(struct sunqe_buffers));
  116. for (i = 0; i < RX_RING_SIZE; i++) {
  117. qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
  118. qb->qe_rxd[i].rx_flags =
  119. (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
  120. }
  121. }
  122. static int qe_init(struct sunqe *qep, int from_irq)
  123. {
  124. struct sunqec *qecp = qep->parent;
  125. void __iomem *cregs = qep->qcregs;
  126. void __iomem *mregs = qep->mregs;
  127. void __iomem *gregs = qecp->gregs;
  128. unsigned char *e = &qep->dev->dev_addr[0];
  129. u32 tmp;
  130. int i;
  131. /* Shut it up. */
  132. if (qe_stop(qep))
  133. return -EAGAIN;
  134. /* Setup initial rx/tx init block pointers. */
  135. sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
  136. sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
  137. /* Enable/mask the various irq's. */
  138. sbus_writel(0, cregs + CREG_RIMASK);
  139. sbus_writel(1, cregs + CREG_TIMASK);
  140. sbus_writel(0, cregs + CREG_QMASK);
  141. sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
  142. /* Setup the FIFO pointers into QEC local memory. */
  143. tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
  144. sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
  145. sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
  146. tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
  147. sbus_readl(gregs + GLOB_RSIZE);
  148. sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
  149. sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
  150. /* Clear the channel collision counter. */
  151. sbus_writel(0, cregs + CREG_CCNT);
  152. /* For 10baseT, inter frame space nor throttle seems to be necessary. */
  153. sbus_writel(0, cregs + CREG_PIPG);
  154. /* Now dork with the AMD MACE. */
  155. sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
  156. sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
  157. sbus_writeb(0, mregs + MREGS_RXFCNTL);
  158. /* The QEC dma's the rx'd packets from local memory out to main memory,
  159. * and therefore it interrupts when the packet reception is "complete".
  160. * So don't listen for the MACE talking about it.
  161. */
  162. sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
  163. sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
  164. sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
  165. MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
  166. mregs + MREGS_FCONFIG);
  167. /* Only usable interface on QuadEther is twisted pair. */
  168. sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
  169. /* Tell MACE we are changing the ether address. */
  170. sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
  171. mregs + MREGS_IACONFIG);
  172. while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
  173. barrier();
  174. sbus_writeb(e[0], mregs + MREGS_ETHADDR);
  175. sbus_writeb(e[1], mregs + MREGS_ETHADDR);
  176. sbus_writeb(e[2], mregs + MREGS_ETHADDR);
  177. sbus_writeb(e[3], mregs + MREGS_ETHADDR);
  178. sbus_writeb(e[4], mregs + MREGS_ETHADDR);
  179. sbus_writeb(e[5], mregs + MREGS_ETHADDR);
  180. /* Clear out the address filter. */
  181. sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
  182. mregs + MREGS_IACONFIG);
  183. while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
  184. barrier();
  185. for (i = 0; i < 8; i++)
  186. sbus_writeb(0, mregs + MREGS_FILTER);
  187. /* Address changes are now complete. */
  188. sbus_writeb(0, mregs + MREGS_IACONFIG);
  189. qe_init_rings(qep);
  190. /* Wait a little bit for the link to come up... */
  191. mdelay(5);
  192. if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
  193. int tries = 50;
  194. while (--tries) {
  195. u8 tmp;
  196. mdelay(5);
  197. barrier();
  198. tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
  199. if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
  200. break;
  201. }
  202. if (tries == 0)
  203. printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
  204. }
  205. /* Missed packet counter is cleared on a read. */
  206. sbus_readb(mregs + MREGS_MPCNT);
  207. /* Reload multicast information, this will enable the receiver
  208. * and transmitter.
  209. */
  210. qe_set_multicast(qep->dev);
  211. /* QEC should now start to show interrupts. */
  212. return 0;
  213. }
  214. /* Grrr, certain error conditions completely lock up the AMD MACE,
  215. * so when we get these we _must_ reset the chip.
  216. */
  217. static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
  218. {
  219. struct net_device *dev = qep->dev;
  220. int mace_hwbug_workaround = 0;
  221. if (qe_status & CREG_STAT_EDEFER) {
  222. printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
  223. dev->stats.tx_errors++;
  224. }
  225. if (qe_status & CREG_STAT_CLOSS) {
  226. printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
  227. dev->stats.tx_errors++;
  228. dev->stats.tx_carrier_errors++;
  229. }
  230. if (qe_status & CREG_STAT_ERETRIES) {
  231. printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
  232. dev->stats.tx_errors++;
  233. mace_hwbug_workaround = 1;
  234. }
  235. if (qe_status & CREG_STAT_LCOLL) {
  236. printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
  237. dev->stats.tx_errors++;
  238. dev->stats.collisions++;
  239. mace_hwbug_workaround = 1;
  240. }
  241. if (qe_status & CREG_STAT_FUFLOW) {
  242. printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
  243. dev->stats.tx_errors++;
  244. mace_hwbug_workaround = 1;
  245. }
  246. if (qe_status & CREG_STAT_JERROR) {
  247. printk(KERN_ERR "%s: Jabber error.\n", dev->name);
  248. }
  249. if (qe_status & CREG_STAT_BERROR) {
  250. printk(KERN_ERR "%s: Babble error.\n", dev->name);
  251. }
  252. if (qe_status & CREG_STAT_CCOFLOW) {
  253. dev->stats.tx_errors += 256;
  254. dev->stats.collisions += 256;
  255. }
  256. if (qe_status & CREG_STAT_TXDERROR) {
  257. printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
  258. dev->stats.tx_errors++;
  259. dev->stats.tx_aborted_errors++;
  260. mace_hwbug_workaround = 1;
  261. }
  262. if (qe_status & CREG_STAT_TXLERR) {
  263. printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
  264. dev->stats.tx_errors++;
  265. mace_hwbug_workaround = 1;
  266. }
  267. if (qe_status & CREG_STAT_TXPERR) {
  268. printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
  269. dev->stats.tx_errors++;
  270. dev->stats.tx_aborted_errors++;
  271. mace_hwbug_workaround = 1;
  272. }
  273. if (qe_status & CREG_STAT_TXSERR) {
  274. printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
  275. dev->stats.tx_errors++;
  276. dev->stats.tx_aborted_errors++;
  277. mace_hwbug_workaround = 1;
  278. }
  279. if (qe_status & CREG_STAT_RCCOFLOW) {
  280. dev->stats.rx_errors += 256;
  281. dev->stats.collisions += 256;
  282. }
  283. if (qe_status & CREG_STAT_RUOFLOW) {
  284. dev->stats.rx_errors += 256;
  285. dev->stats.rx_over_errors += 256;
  286. }
  287. if (qe_status & CREG_STAT_MCOFLOW) {
  288. dev->stats.rx_errors += 256;
  289. dev->stats.rx_missed_errors += 256;
  290. }
  291. if (qe_status & CREG_STAT_RXFOFLOW) {
  292. printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
  293. dev->stats.rx_errors++;
  294. dev->stats.rx_over_errors++;
  295. }
  296. if (qe_status & CREG_STAT_RLCOLL) {
  297. printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
  298. dev->stats.rx_errors++;
  299. dev->stats.collisions++;
  300. }
  301. if (qe_status & CREG_STAT_FCOFLOW) {
  302. dev->stats.rx_errors += 256;
  303. dev->stats.rx_frame_errors += 256;
  304. }
  305. if (qe_status & CREG_STAT_CECOFLOW) {
  306. dev->stats.rx_errors += 256;
  307. dev->stats.rx_crc_errors += 256;
  308. }
  309. if (qe_status & CREG_STAT_RXDROP) {
  310. printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
  311. dev->stats.rx_errors++;
  312. dev->stats.rx_dropped++;
  313. dev->stats.rx_missed_errors++;
  314. }
  315. if (qe_status & CREG_STAT_RXSMALL) {
  316. printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
  317. dev->stats.rx_errors++;
  318. dev->stats.rx_length_errors++;
  319. }
  320. if (qe_status & CREG_STAT_RXLERR) {
  321. printk(KERN_ERR "%s: Receive late error.\n", dev->name);
  322. dev->stats.rx_errors++;
  323. mace_hwbug_workaround = 1;
  324. }
  325. if (qe_status & CREG_STAT_RXPERR) {
  326. printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
  327. dev->stats.rx_errors++;
  328. dev->stats.rx_missed_errors++;
  329. mace_hwbug_workaround = 1;
  330. }
  331. if (qe_status & CREG_STAT_RXSERR) {
  332. printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
  333. dev->stats.rx_errors++;
  334. dev->stats.rx_missed_errors++;
  335. mace_hwbug_workaround = 1;
  336. }
  337. if (mace_hwbug_workaround)
  338. qe_init(qep, 1);
  339. return mace_hwbug_workaround;
  340. }
  341. /* Per-QE receive interrupt service routine. Just like on the happy meal
  342. * we receive directly into skb's with a small packet copy water mark.
  343. */
  344. static void qe_rx(struct sunqe *qep)
  345. {
  346. struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
  347. struct net_device *dev = qep->dev;
  348. struct qe_rxd *this;
  349. struct sunqe_buffers *qbufs = qep->buffers;
  350. __u32 qbufs_dvma = qep->buffers_dvma;
  351. int elem = qep->rx_new;
  352. u32 flags;
  353. this = &rxbase[elem];
  354. while (!((flags = this->rx_flags) & RXD_OWN)) {
  355. struct sk_buff *skb;
  356. unsigned char *this_qbuf =
  357. &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
  358. __u32 this_qbuf_dvma = qbufs_dvma +
  359. qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
  360. struct qe_rxd *end_rxd =
  361. &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
  362. int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */
  363. /* Check for errors. */
  364. if (len < ETH_ZLEN) {
  365. dev->stats.rx_errors++;
  366. dev->stats.rx_length_errors++;
  367. dev->stats.rx_dropped++;
  368. } else {
  369. skb = netdev_alloc_skb(dev, len + 2);
  370. if (skb == NULL) {
  371. dev->stats.rx_dropped++;
  372. } else {
  373. skb_reserve(skb, 2);
  374. skb_put(skb, len);
  375. skb_copy_to_linear_data(skb, this_qbuf,
  376. len);
  377. skb->protocol = eth_type_trans(skb, qep->dev);
  378. netif_rx(skb);
  379. dev->stats.rx_packets++;
  380. dev->stats.rx_bytes += len;
  381. }
  382. }
  383. end_rxd->rx_addr = this_qbuf_dvma;
  384. end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
  385. elem = NEXT_RX(elem);
  386. this = &rxbase[elem];
  387. }
  388. qep->rx_new = elem;
  389. }
  390. static void qe_tx_reclaim(struct sunqe *qep);
  391. /* Interrupts for all QE's get filtered out via the QEC master controller,
  392. * so we just run through each qe and check to see who is signaling
  393. * and thus needs to be serviced.
  394. */
  395. static irqreturn_t qec_interrupt(int irq, void *dev_id)
  396. {
  397. struct sunqec *qecp = dev_id;
  398. u32 qec_status;
  399. int channel = 0;
  400. /* Latch the status now. */
  401. qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
  402. while (channel < 4) {
  403. if (qec_status & 0xf) {
  404. struct sunqe *qep = qecp->qes[channel];
  405. u32 qe_status;
  406. qe_status = sbus_readl(qep->qcregs + CREG_STAT);
  407. if (qe_status & CREG_STAT_ERRORS) {
  408. if (qe_is_bolixed(qep, qe_status))
  409. goto next;
  410. }
  411. if (qe_status & CREG_STAT_RXIRQ)
  412. qe_rx(qep);
  413. if (netif_queue_stopped(qep->dev) &&
  414. (qe_status & CREG_STAT_TXIRQ)) {
  415. spin_lock(&qep->lock);
  416. qe_tx_reclaim(qep);
  417. if (TX_BUFFS_AVAIL(qep) > 0) {
  418. /* Wake net queue and return to
  419. * lazy tx reclaim.
  420. */
  421. netif_wake_queue(qep->dev);
  422. sbus_writel(1, qep->qcregs + CREG_TIMASK);
  423. }
  424. spin_unlock(&qep->lock);
  425. }
  426. next:
  427. ;
  428. }
  429. qec_status >>= 4;
  430. channel++;
  431. }
  432. return IRQ_HANDLED;
  433. }
  434. static int qe_open(struct net_device *dev)
  435. {
  436. struct sunqe *qep = netdev_priv(dev);
  437. qep->mconfig = (MREGS_MCONFIG_TXENAB |
  438. MREGS_MCONFIG_RXENAB |
  439. MREGS_MCONFIG_MBAENAB);
  440. return qe_init(qep, 0);
  441. }
  442. static int qe_close(struct net_device *dev)
  443. {
  444. struct sunqe *qep = netdev_priv(dev);
  445. qe_stop(qep);
  446. return 0;
  447. }
  448. /* Reclaim TX'd frames from the ring. This must always run under
  449. * the IRQ protected qep->lock.
  450. */
  451. static void qe_tx_reclaim(struct sunqe *qep)
  452. {
  453. struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
  454. int elem = qep->tx_old;
  455. while (elem != qep->tx_new) {
  456. u32 flags = txbase[elem].tx_flags;
  457. if (flags & TXD_OWN)
  458. break;
  459. elem = NEXT_TX(elem);
  460. }
  461. qep->tx_old = elem;
  462. }
  463. static void qe_tx_timeout(struct net_device *dev)
  464. {
  465. struct sunqe *qep = netdev_priv(dev);
  466. int tx_full;
  467. spin_lock_irq(&qep->lock);
  468. /* Try to reclaim, if that frees up some tx
  469. * entries, we're fine.
  470. */
  471. qe_tx_reclaim(qep);
  472. tx_full = TX_BUFFS_AVAIL(qep) <= 0;
  473. spin_unlock_irq(&qep->lock);
  474. if (! tx_full)
  475. goto out;
  476. printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
  477. qe_init(qep, 1);
  478. out:
  479. netif_wake_queue(dev);
  480. }
  481. /* Get a packet queued to go onto the wire. */
  482. static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
  483. {
  484. struct sunqe *qep = netdev_priv(dev);
  485. struct sunqe_buffers *qbufs = qep->buffers;
  486. __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
  487. unsigned char *txbuf;
  488. int len, entry;
  489. spin_lock_irq(&qep->lock);
  490. qe_tx_reclaim(qep);
  491. len = skb->len;
  492. entry = qep->tx_new;
  493. txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
  494. txbuf_dvma = qbufs_dvma +
  495. qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
  496. /* Avoid a race... */
  497. qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
  498. skb_copy_from_linear_data(skb, txbuf, len);
  499. qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
  500. qep->qe_block->qe_txd[entry].tx_flags =
  501. (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
  502. qep->tx_new = NEXT_TX(entry);
  503. /* Get it going. */
  504. sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
  505. dev->stats.tx_packets++;
  506. dev->stats.tx_bytes += len;
  507. if (TX_BUFFS_AVAIL(qep) <= 0) {
  508. /* Halt the net queue and enable tx interrupts.
  509. * When the tx queue empties the tx irq handler
  510. * will wake up the queue and return us back to
  511. * the lazy tx reclaim scheme.
  512. */
  513. netif_stop_queue(dev);
  514. sbus_writel(0, qep->qcregs + CREG_TIMASK);
  515. }
  516. spin_unlock_irq(&qep->lock);
  517. dev_kfree_skb(skb);
  518. return NETDEV_TX_OK;
  519. }
  520. static void qe_set_multicast(struct net_device *dev)
  521. {
  522. struct sunqe *qep = netdev_priv(dev);
  523. struct netdev_hw_addr *ha;
  524. u8 new_mconfig = qep->mconfig;
  525. int i;
  526. u32 crc;
  527. /* Lock out others. */
  528. netif_stop_queue(dev);
  529. if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
  530. sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
  531. qep->mregs + MREGS_IACONFIG);
  532. while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
  533. barrier();
  534. for (i = 0; i < 8; i++)
  535. sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
  536. sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
  537. } else if (dev->flags & IFF_PROMISC) {
  538. new_mconfig |= MREGS_MCONFIG_PROMISC;
  539. } else {
  540. u16 hash_table[4];
  541. u8 *hbytes = (unsigned char *) &hash_table[0];
  542. memset(hash_table, 0, sizeof(hash_table));
  543. netdev_for_each_mc_addr(ha, dev) {
  544. crc = ether_crc_le(6, ha->addr);
  545. crc >>= 26;
  546. hash_table[crc >> 4] |= 1 << (crc & 0xf);
  547. }
  548. /* Program the qe with the new filter value. */
  549. sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
  550. qep->mregs + MREGS_IACONFIG);
  551. while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
  552. barrier();
  553. for (i = 0; i < 8; i++) {
  554. u8 tmp = *hbytes++;
  555. sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
  556. }
  557. sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
  558. }
  559. /* Any change of the logical address filter, the physical address,
  560. * or enabling/disabling promiscuous mode causes the MACE to disable
  561. * the receiver. So we must re-enable them here or else the MACE
  562. * refuses to listen to anything on the network. Sheesh, took
  563. * me a day or two to find this bug.
  564. */
  565. qep->mconfig = new_mconfig;
  566. sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
  567. /* Let us get going again. */
  568. netif_wake_queue(dev);
  569. }
  570. /* Ethtool support... */
  571. static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  572. {
  573. const struct linux_prom_registers *regs;
  574. struct sunqe *qep = netdev_priv(dev);
  575. struct platform_device *op;
  576. strlcpy(info->driver, "sunqe", sizeof(info->driver));
  577. strlcpy(info->version, "3.0", sizeof(info->version));
  578. op = qep->op;
  579. regs = of_get_property(op->dev.of_node, "reg", NULL);
  580. if (regs)
  581. snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d",
  582. regs->which_io);
  583. }
  584. static u32 qe_get_link(struct net_device *dev)
  585. {
  586. struct sunqe *qep = netdev_priv(dev);
  587. void __iomem *mregs = qep->mregs;
  588. u8 phyconfig;
  589. spin_lock_irq(&qep->lock);
  590. phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
  591. spin_unlock_irq(&qep->lock);
  592. return phyconfig & MREGS_PHYCONFIG_LSTAT;
  593. }
  594. static const struct ethtool_ops qe_ethtool_ops = {
  595. .get_drvinfo = qe_get_drvinfo,
  596. .get_link = qe_get_link,
  597. };
  598. /* This is only called once at boot time for each card probed. */
  599. static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
  600. {
  601. u8 bsizes = qecp->qec_bursts;
  602. if (sbus_can_burst64() && (bsizes & DMA_BURST64)) {
  603. sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
  604. } else if (bsizes & DMA_BURST32) {
  605. sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
  606. } else {
  607. sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
  608. }
  609. /* Packetsize only used in 100baseT BigMAC configurations,
  610. * set it to zero just to be on the safe side.
  611. */
  612. sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
  613. /* Set the local memsize register, divided up to one piece per QE channel. */
  614. sbus_writel((resource_size(&op->resource[1]) >> 2),
  615. qecp->gregs + GLOB_MSIZE);
  616. /* Divide up the local QEC memory amongst the 4 QE receiver and
  617. * transmitter FIFOs. Basically it is (total / 2 / num_channels).
  618. */
  619. sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
  620. qecp->gregs + GLOB_TSIZE);
  621. sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
  622. qecp->gregs + GLOB_RSIZE);
  623. }
  624. static u8 qec_get_burst(struct device_node *dp)
  625. {
  626. u8 bsizes, bsizes_more;
  627. /* Find and set the burst sizes for the QEC, since it
  628. * does the actual dma for all 4 channels.
  629. */
  630. bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
  631. bsizes &= 0xff;
  632. bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
  633. if (bsizes_more != 0xff)
  634. bsizes &= bsizes_more;
  635. if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
  636. (bsizes & DMA_BURST32)==0)
  637. bsizes = (DMA_BURST32 - 1);
  638. return bsizes;
  639. }
  640. static struct sunqec *get_qec(struct platform_device *child)
  641. {
  642. struct platform_device *op = to_platform_device(child->dev.parent);
  643. struct sunqec *qecp;
  644. qecp = platform_get_drvdata(op);
  645. if (!qecp) {
  646. qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
  647. if (qecp) {
  648. u32 ctrl;
  649. qecp->op = op;
  650. qecp->gregs = of_ioremap(&op->resource[0], 0,
  651. GLOB_REG_SIZE,
  652. "QEC Global Registers");
  653. if (!qecp->gregs)
  654. goto fail;
  655. /* Make sure the QEC is in MACE mode. */
  656. ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
  657. ctrl &= 0xf0000000;
  658. if (ctrl != GLOB_CTRL_MMODE) {
  659. printk(KERN_ERR "qec: Not in MACE mode!\n");
  660. goto fail;
  661. }
  662. if (qec_global_reset(qecp->gregs))
  663. goto fail;
  664. qecp->qec_bursts = qec_get_burst(op->dev.of_node);
  665. qec_init_once(qecp, op);
  666. if (request_irq(op->archdata.irqs[0], qec_interrupt,
  667. IRQF_SHARED, "qec", (void *) qecp)) {
  668. printk(KERN_ERR "qec: Can't register irq.\n");
  669. goto fail;
  670. }
  671. platform_set_drvdata(op, qecp);
  672. qecp->next_module = root_qec_dev;
  673. root_qec_dev = qecp;
  674. }
  675. }
  676. return qecp;
  677. fail:
  678. if (qecp->gregs)
  679. of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE);
  680. kfree(qecp);
  681. return NULL;
  682. }
  683. static const struct net_device_ops qec_ops = {
  684. .ndo_open = qe_open,
  685. .ndo_stop = qe_close,
  686. .ndo_start_xmit = qe_start_xmit,
  687. .ndo_set_rx_mode = qe_set_multicast,
  688. .ndo_tx_timeout = qe_tx_timeout,
  689. .ndo_change_mtu = eth_change_mtu,
  690. .ndo_set_mac_address = eth_mac_addr,
  691. .ndo_validate_addr = eth_validate_addr,
  692. };
  693. static int qec_ether_init(struct platform_device *op)
  694. {
  695. static unsigned version_printed;
  696. struct net_device *dev;
  697. struct sunqec *qecp;
  698. struct sunqe *qe;
  699. int i, res;
  700. if (version_printed++ == 0)
  701. printk(KERN_INFO "%s", version);
  702. dev = alloc_etherdev(sizeof(struct sunqe));
  703. if (!dev)
  704. return -ENOMEM;
  705. memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
  706. qe = netdev_priv(dev);
  707. res = -ENODEV;
  708. i = of_getintprop_default(op->dev.of_node, "channel#", -1);
  709. if (i == -1)
  710. goto fail;
  711. qe->channel = i;
  712. spin_lock_init(&qe->lock);
  713. qecp = get_qec(op);
  714. if (!qecp)
  715. goto fail;
  716. qecp->qes[qe->channel] = qe;
  717. qe->dev = dev;
  718. qe->parent = qecp;
  719. qe->op = op;
  720. res = -ENOMEM;
  721. qe->qcregs = of_ioremap(&op->resource[0], 0,
  722. CREG_REG_SIZE, "QEC Channel Registers");
  723. if (!qe->qcregs) {
  724. printk(KERN_ERR "qe: Cannot map channel registers.\n");
  725. goto fail;
  726. }
  727. qe->mregs = of_ioremap(&op->resource[1], 0,
  728. MREGS_REG_SIZE, "QE MACE Registers");
  729. if (!qe->mregs) {
  730. printk(KERN_ERR "qe: Cannot map MACE registers.\n");
  731. goto fail;
  732. }
  733. qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
  734. &qe->qblock_dvma, GFP_ATOMIC);
  735. qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers),
  736. &qe->buffers_dvma, GFP_ATOMIC);
  737. if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
  738. qe->buffers == NULL || qe->buffers_dvma == 0)
  739. goto fail;
  740. /* Stop this QE. */
  741. qe_stop(qe);
  742. SET_NETDEV_DEV(dev, &op->dev);
  743. dev->watchdog_timeo = 5*HZ;
  744. dev->irq = op->archdata.irqs[0];
  745. dev->dma = 0;
  746. dev->ethtool_ops = &qe_ethtool_ops;
  747. dev->netdev_ops = &qec_ops;
  748. res = register_netdev(dev);
  749. if (res)
  750. goto fail;
  751. platform_set_drvdata(op, qe);
  752. printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel,
  753. dev->dev_addr);
  754. return 0;
  755. fail:
  756. if (qe->qcregs)
  757. of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE);
  758. if (qe->mregs)
  759. of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE);
  760. if (qe->qe_block)
  761. dma_free_coherent(&op->dev, PAGE_SIZE,
  762. qe->qe_block, qe->qblock_dvma);
  763. if (qe->buffers)
  764. dma_free_coherent(&op->dev,
  765. sizeof(struct sunqe_buffers),
  766. qe->buffers,
  767. qe->buffers_dvma);
  768. free_netdev(dev);
  769. return res;
  770. }
  771. static int qec_sbus_probe(struct platform_device *op)
  772. {
  773. return qec_ether_init(op);
  774. }
  775. static int qec_sbus_remove(struct platform_device *op)
  776. {
  777. struct sunqe *qp = platform_get_drvdata(op);
  778. struct net_device *net_dev = qp->dev;
  779. unregister_netdev(net_dev);
  780. of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
  781. of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
  782. dma_free_coherent(&op->dev, PAGE_SIZE,
  783. qp->qe_block, qp->qblock_dvma);
  784. dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers),
  785. qp->buffers, qp->buffers_dvma);
  786. free_netdev(net_dev);
  787. return 0;
  788. }
  789. static const struct of_device_id qec_sbus_match[] = {
  790. {
  791. .name = "qe",
  792. },
  793. {},
  794. };
  795. MODULE_DEVICE_TABLE(of, qec_sbus_match);
  796. static struct platform_driver qec_sbus_driver = {
  797. .driver = {
  798. .name = "qec",
  799. .of_match_table = qec_sbus_match,
  800. },
  801. .probe = qec_sbus_probe,
  802. .remove = qec_sbus_remove,
  803. };
  804. static int __init qec_init(void)
  805. {
  806. return platform_driver_register(&qec_sbus_driver);
  807. }
  808. static void __exit qec_exit(void)
  809. {
  810. platform_driver_unregister(&qec_sbus_driver);
  811. while (root_qec_dev) {
  812. struct sunqec *next = root_qec_dev->next_module;
  813. struct platform_device *op = root_qec_dev->op;
  814. free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
  815. of_iounmap(&op->resource[0], root_qec_dev->gregs,
  816. GLOB_REG_SIZE);
  817. kfree(root_qec_dev);
  818. root_qec_dev = next;
  819. }
  820. }
  821. module_init(qec_init);
  822. module_exit(qec_exit);