encx24j600.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129
  1. /**
  2. * Microchip ENCX24J600 ethernet driver
  3. *
  4. * Copyright (C) 2015 Gridpoint
  5. * Author: Jon Ringle <jringle@gridpoint.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. */
  13. #include <linux/device.h>
  14. #include <linux/errno.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/ethtool.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/netdevice.h>
  21. #include <linux/regmap.h>
  22. #include <linux/skbuff.h>
  23. #include <linux/spi/spi.h>
  24. #include "encx24j600_hw.h"
  25. #define DRV_NAME "encx24j600"
  26. #define DRV_VERSION "1.0"
  27. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
  28. static int debug = -1;
  29. module_param(debug, int, 0);
  30. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  31. /* SRAM memory layout:
  32. *
  33. * 0x0000-0x05ff TX buffers 1.5KB (1*1536) reside in the GP area in SRAM
  34. * 0x0600-0x5fff RX buffers 22.5KB (15*1536) reside in the RX area in SRAM
  35. */
  36. #define ENC_TX_BUF_START 0x0000U
  37. #define ENC_RX_BUF_START 0x0600U
  38. #define ENC_RX_BUF_END 0x5fffU
  39. #define ENC_SRAM_SIZE 0x6000U
  40. enum {
  41. RXFILTER_NORMAL,
  42. RXFILTER_MULTI,
  43. RXFILTER_PROMISC
  44. };
  45. struct encx24j600_priv {
  46. struct net_device *ndev;
  47. struct mutex lock; /* device access lock */
  48. struct encx24j600_context ctx;
  49. struct sk_buff *tx_skb;
  50. struct task_struct *kworker_task;
  51. struct kthread_worker kworker;
  52. struct kthread_work tx_work;
  53. struct kthread_work setrx_work;
  54. u16 next_packet;
  55. bool hw_enabled;
  56. bool full_duplex;
  57. bool autoneg;
  58. u16 speed;
  59. int rxfilter;
  60. u32 msg_enable;
  61. };
  62. static void dump_packet(const char *msg, int len, const char *data)
  63. {
  64. pr_debug(DRV_NAME ": %s - packet len:%d\n", msg, len);
  65. print_hex_dump_bytes("pk data: ", DUMP_PREFIX_OFFSET, data, len);
  66. }
  67. static void encx24j600_dump_rsv(struct encx24j600_priv *priv, const char *msg,
  68. struct rsv *rsv)
  69. {
  70. struct net_device *dev = priv->ndev;
  71. netdev_info(dev, "RX packet Len:%d\n", rsv->len);
  72. netdev_dbg(dev, "%s - NextPk: 0x%04x\n", msg,
  73. rsv->next_packet);
  74. netdev_dbg(dev, "RxOK: %d, DribbleNibble: %d\n",
  75. RSV_GETBIT(rsv->rxstat, RSV_RXOK),
  76. RSV_GETBIT(rsv->rxstat, RSV_DRIBBLENIBBLE));
  77. netdev_dbg(dev, "CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
  78. RSV_GETBIT(rsv->rxstat, RSV_CRCERROR),
  79. RSV_GETBIT(rsv->rxstat, RSV_LENCHECKERR),
  80. RSV_GETBIT(rsv->rxstat, RSV_LENOUTOFRANGE));
  81. netdev_dbg(dev, "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n",
  82. RSV_GETBIT(rsv->rxstat, RSV_RXMULTICAST),
  83. RSV_GETBIT(rsv->rxstat, RSV_RXBROADCAST),
  84. RSV_GETBIT(rsv->rxstat, RSV_RXLONGEVDROPEV),
  85. RSV_GETBIT(rsv->rxstat, RSV_CARRIEREV));
  86. netdev_dbg(dev, "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n",
  87. RSV_GETBIT(rsv->rxstat, RSV_RXCONTROLFRAME),
  88. RSV_GETBIT(rsv->rxstat, RSV_RXPAUSEFRAME),
  89. RSV_GETBIT(rsv->rxstat, RSV_RXUNKNOWNOPCODE),
  90. RSV_GETBIT(rsv->rxstat, RSV_RXTYPEVLAN));
  91. }
  92. static u16 encx24j600_read_reg(struct encx24j600_priv *priv, u8 reg)
  93. {
  94. struct net_device *dev = priv->ndev;
  95. unsigned int val = 0;
  96. int ret = regmap_read(priv->ctx.regmap, reg, &val);
  97. if (unlikely(ret))
  98. netif_err(priv, drv, dev, "%s: error %d reading reg %02x\n",
  99. __func__, ret, reg);
  100. return val;
  101. }
  102. static void encx24j600_write_reg(struct encx24j600_priv *priv, u8 reg, u16 val)
  103. {
  104. struct net_device *dev = priv->ndev;
  105. int ret = regmap_write(priv->ctx.regmap, reg, val);
  106. if (unlikely(ret))
  107. netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
  108. __func__, ret, reg, val);
  109. }
  110. static void encx24j600_update_reg(struct encx24j600_priv *priv, u8 reg,
  111. u16 mask, u16 val)
  112. {
  113. struct net_device *dev = priv->ndev;
  114. int ret = regmap_update_bits(priv->ctx.regmap, reg, mask, val);
  115. if (unlikely(ret))
  116. netif_err(priv, drv, dev, "%s: error %d updating reg %02x=%04x~%04x\n",
  117. __func__, ret, reg, val, mask);
  118. }
  119. static u16 encx24j600_read_phy(struct encx24j600_priv *priv, u8 reg)
  120. {
  121. struct net_device *dev = priv->ndev;
  122. unsigned int val = 0;
  123. int ret = regmap_read(priv->ctx.phymap, reg, &val);
  124. if (unlikely(ret))
  125. netif_err(priv, drv, dev, "%s: error %d reading %02x\n",
  126. __func__, ret, reg);
  127. return val;
  128. }
  129. static void encx24j600_write_phy(struct encx24j600_priv *priv, u8 reg, u16 val)
  130. {
  131. struct net_device *dev = priv->ndev;
  132. int ret = regmap_write(priv->ctx.phymap, reg, val);
  133. if (unlikely(ret))
  134. netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
  135. __func__, ret, reg, val);
  136. }
  137. static void encx24j600_clr_bits(struct encx24j600_priv *priv, u8 reg, u16 mask)
  138. {
  139. encx24j600_update_reg(priv, reg, mask, 0);
  140. }
  141. static void encx24j600_set_bits(struct encx24j600_priv *priv, u8 reg, u16 mask)
  142. {
  143. encx24j600_update_reg(priv, reg, mask, mask);
  144. }
  145. static void encx24j600_cmd(struct encx24j600_priv *priv, u8 cmd)
  146. {
  147. struct net_device *dev = priv->ndev;
  148. int ret = regmap_write(priv->ctx.regmap, cmd, 0);
  149. if (unlikely(ret))
  150. netif_err(priv, drv, dev, "%s: error %d with cmd %02x\n",
  151. __func__, ret, cmd);
  152. }
  153. static int encx24j600_raw_read(struct encx24j600_priv *priv, u8 reg, u8 *data,
  154. size_t count)
  155. {
  156. int ret;
  157. mutex_lock(&priv->ctx.mutex);
  158. ret = regmap_encx24j600_spi_read(&priv->ctx, reg, data, count);
  159. mutex_unlock(&priv->ctx.mutex);
  160. return ret;
  161. }
  162. static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
  163. const u8 *data, size_t count)
  164. {
  165. int ret;
  166. mutex_lock(&priv->ctx.mutex);
  167. ret = regmap_encx24j600_spi_write(&priv->ctx, reg, data, count);
  168. mutex_unlock(&priv->ctx.mutex);
  169. return ret;
  170. }
  171. static void encx24j600_update_phcon1(struct encx24j600_priv *priv)
  172. {
  173. u16 phcon1 = encx24j600_read_phy(priv, PHCON1);
  174. if (priv->autoneg == AUTONEG_ENABLE) {
  175. phcon1 |= ANEN | RENEG;
  176. } else {
  177. phcon1 &= ~ANEN;
  178. if (priv->speed == SPEED_100)
  179. phcon1 |= SPD100;
  180. else
  181. phcon1 &= ~SPD100;
  182. if (priv->full_duplex)
  183. phcon1 |= PFULDPX;
  184. else
  185. phcon1 &= ~PFULDPX;
  186. }
  187. encx24j600_write_phy(priv, PHCON1, phcon1);
  188. }
  189. /* Waits for autonegotiation to complete. */
  190. static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv)
  191. {
  192. struct net_device *dev = priv->ndev;
  193. unsigned long timeout = jiffies + msecs_to_jiffies(2000);
  194. u16 phstat1;
  195. u16 estat;
  196. int ret = 0;
  197. phstat1 = encx24j600_read_phy(priv, PHSTAT1);
  198. while ((phstat1 & ANDONE) == 0) {
  199. if (time_after(jiffies, timeout)) {
  200. u16 phstat3;
  201. netif_notice(priv, drv, dev, "timeout waiting for autoneg done\n");
  202. priv->autoneg = AUTONEG_DISABLE;
  203. phstat3 = encx24j600_read_phy(priv, PHSTAT3);
  204. priv->speed = (phstat3 & PHY3SPD100)
  205. ? SPEED_100 : SPEED_10;
  206. priv->full_duplex = (phstat3 & PHY3DPX) ? 1 : 0;
  207. encx24j600_update_phcon1(priv);
  208. netif_notice(priv, drv, dev, "Using parallel detection: %s/%s",
  209. priv->speed == SPEED_100 ? "100" : "10",
  210. priv->full_duplex ? "Full" : "Half");
  211. return -ETIMEDOUT;
  212. }
  213. cpu_relax();
  214. phstat1 = encx24j600_read_phy(priv, PHSTAT1);
  215. }
  216. estat = encx24j600_read_reg(priv, ESTAT);
  217. if (estat & PHYDPX) {
  218. encx24j600_set_bits(priv, MACON2, FULDPX);
  219. encx24j600_write_reg(priv, MABBIPG, 0x15);
  220. } else {
  221. encx24j600_clr_bits(priv, MACON2, FULDPX);
  222. encx24j600_write_reg(priv, MABBIPG, 0x12);
  223. /* Max retransmittions attempt */
  224. encx24j600_write_reg(priv, MACLCON, 0x370f);
  225. }
  226. return ret;
  227. }
  228. /* Access the PHY to determine link status */
  229. static void encx24j600_check_link_status(struct encx24j600_priv *priv)
  230. {
  231. struct net_device *dev = priv->ndev;
  232. u16 estat;
  233. estat = encx24j600_read_reg(priv, ESTAT);
  234. if (estat & PHYLNK) {
  235. if (priv->autoneg == AUTONEG_ENABLE)
  236. encx24j600_wait_for_autoneg(priv);
  237. netif_carrier_on(dev);
  238. netif_info(priv, ifup, dev, "link up\n");
  239. } else {
  240. netif_info(priv, ifdown, dev, "link down\n");
  241. /* Re-enable autoneg since we won't know what we might be
  242. * connected to when the link is brought back up again.
  243. */
  244. priv->autoneg = AUTONEG_ENABLE;
  245. priv->full_duplex = true;
  246. priv->speed = SPEED_100;
  247. netif_carrier_off(dev);
  248. }
  249. }
  250. static void encx24j600_int_link_handler(struct encx24j600_priv *priv)
  251. {
  252. struct net_device *dev = priv->ndev;
  253. netif_dbg(priv, intr, dev, "%s", __func__);
  254. encx24j600_check_link_status(priv);
  255. encx24j600_clr_bits(priv, EIR, LINKIF);
  256. }
  257. static void encx24j600_tx_complete(struct encx24j600_priv *priv, bool err)
  258. {
  259. struct net_device *dev = priv->ndev;
  260. if (!priv->tx_skb) {
  261. BUG();
  262. return;
  263. }
  264. mutex_lock(&priv->lock);
  265. if (err)
  266. dev->stats.tx_errors++;
  267. else
  268. dev->stats.tx_packets++;
  269. dev->stats.tx_bytes += priv->tx_skb->len;
  270. encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF);
  271. netif_dbg(priv, tx_done, dev, "TX Done%s\n", err ? ": Err" : "");
  272. dev_kfree_skb(priv->tx_skb);
  273. priv->tx_skb = NULL;
  274. netif_wake_queue(dev);
  275. mutex_unlock(&priv->lock);
  276. }
  277. static int encx24j600_receive_packet(struct encx24j600_priv *priv,
  278. struct rsv *rsv)
  279. {
  280. struct net_device *dev = priv->ndev;
  281. struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN);
  282. if (!skb) {
  283. pr_err_ratelimited("RX: OOM: packet dropped\n");
  284. dev->stats.rx_dropped++;
  285. return -ENOMEM;
  286. }
  287. skb_reserve(skb, NET_IP_ALIGN);
  288. encx24j600_raw_read(priv, RRXDATA, skb_put(skb, rsv->len), rsv->len);
  289. if (netif_msg_pktdata(priv))
  290. dump_packet("RX", skb->len, skb->data);
  291. skb->dev = dev;
  292. skb->protocol = eth_type_trans(skb, dev);
  293. skb->ip_summed = CHECKSUM_COMPLETE;
  294. /* Maintain stats */
  295. dev->stats.rx_packets++;
  296. dev->stats.rx_bytes += rsv->len;
  297. priv->next_packet = rsv->next_packet;
  298. netif_rx(skb);
  299. return 0;
  300. }
  301. static void encx24j600_rx_packets(struct encx24j600_priv *priv, u8 packet_count)
  302. {
  303. struct net_device *dev = priv->ndev;
  304. while (packet_count--) {
  305. struct rsv rsv;
  306. u16 newrxtail;
  307. encx24j600_write_reg(priv, ERXRDPT, priv->next_packet);
  308. encx24j600_raw_read(priv, RRXDATA, (u8 *)&rsv, sizeof(rsv));
  309. if (netif_msg_rx_status(priv))
  310. encx24j600_dump_rsv(priv, __func__, &rsv);
  311. if (!RSV_GETBIT(rsv.rxstat, RSV_RXOK) ||
  312. (rsv.len > MAX_FRAMELEN)) {
  313. netif_err(priv, rx_err, dev, "RX Error %04x\n",
  314. rsv.rxstat);
  315. dev->stats.rx_errors++;
  316. if (RSV_GETBIT(rsv.rxstat, RSV_CRCERROR))
  317. dev->stats.rx_crc_errors++;
  318. if (RSV_GETBIT(rsv.rxstat, RSV_LENCHECKERR))
  319. dev->stats.rx_frame_errors++;
  320. if (rsv.len > MAX_FRAMELEN)
  321. dev->stats.rx_over_errors++;
  322. } else {
  323. encx24j600_receive_packet(priv, &rsv);
  324. }
  325. newrxtail = priv->next_packet - 2;
  326. if (newrxtail == ENC_RX_BUF_START)
  327. newrxtail = SRAM_SIZE - 2;
  328. encx24j600_cmd(priv, SETPKTDEC);
  329. encx24j600_write_reg(priv, ERXTAIL, newrxtail);
  330. }
  331. }
  332. static irqreturn_t encx24j600_isr(int irq, void *dev_id)
  333. {
  334. struct encx24j600_priv *priv = dev_id;
  335. struct net_device *dev = priv->ndev;
  336. int eir;
  337. /* Clear interrupts */
  338. encx24j600_cmd(priv, CLREIE);
  339. eir = encx24j600_read_reg(priv, EIR);
  340. if (eir & LINKIF)
  341. encx24j600_int_link_handler(priv);
  342. if (eir & TXIF)
  343. encx24j600_tx_complete(priv, false);
  344. if (eir & TXABTIF)
  345. encx24j600_tx_complete(priv, true);
  346. if (eir & RXABTIF) {
  347. if (eir & PCFULIF) {
  348. /* Packet counter is full */
  349. netif_err(priv, rx_err, dev, "Packet counter full\n");
  350. }
  351. dev->stats.rx_dropped++;
  352. encx24j600_clr_bits(priv, EIR, RXABTIF);
  353. }
  354. if (eir & PKTIF) {
  355. u8 packet_count;
  356. mutex_lock(&priv->lock);
  357. packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff;
  358. while (packet_count) {
  359. encx24j600_rx_packets(priv, packet_count);
  360. packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff;
  361. }
  362. mutex_unlock(&priv->lock);
  363. }
  364. /* Enable interrupts */
  365. encx24j600_cmd(priv, SETEIE);
  366. return IRQ_HANDLED;
  367. }
  368. static int encx24j600_soft_reset(struct encx24j600_priv *priv)
  369. {
  370. int ret = 0;
  371. int timeout;
  372. u16 eudast;
  373. /* Write and verify a test value to EUDAST */
  374. regcache_cache_bypass(priv->ctx.regmap, true);
  375. timeout = 10;
  376. do {
  377. encx24j600_write_reg(priv, EUDAST, EUDAST_TEST_VAL);
  378. eudast = encx24j600_read_reg(priv, EUDAST);
  379. usleep_range(25, 100);
  380. } while ((eudast != EUDAST_TEST_VAL) && --timeout);
  381. regcache_cache_bypass(priv->ctx.regmap, false);
  382. if (timeout == 0) {
  383. ret = -ETIMEDOUT;
  384. goto err_out;
  385. }
  386. /* Wait for CLKRDY to become set */
  387. timeout = 10;
  388. while (!(encx24j600_read_reg(priv, ESTAT) & CLKRDY) && --timeout)
  389. usleep_range(25, 100);
  390. if (timeout == 0) {
  391. ret = -ETIMEDOUT;
  392. goto err_out;
  393. }
  394. /* Issue a System Reset command */
  395. encx24j600_cmd(priv, SETETHRST);
  396. usleep_range(25, 100);
  397. /* Confirm that EUDAST has 0000h after system reset */
  398. if (encx24j600_read_reg(priv, EUDAST) != 0) {
  399. ret = -EINVAL;
  400. goto err_out;
  401. }
  402. /* Wait for PHY register and status bits to become available */
  403. usleep_range(256, 1000);
  404. err_out:
  405. return ret;
  406. }
  407. static int encx24j600_hw_reset(struct encx24j600_priv *priv)
  408. {
  409. int ret;
  410. mutex_lock(&priv->lock);
  411. ret = encx24j600_soft_reset(priv);
  412. mutex_unlock(&priv->lock);
  413. return ret;
  414. }
  415. static void encx24j600_reset_hw_tx(struct encx24j600_priv *priv)
  416. {
  417. encx24j600_set_bits(priv, ECON2, TXRST);
  418. encx24j600_clr_bits(priv, ECON2, TXRST);
  419. }
  420. static void encx24j600_hw_init_tx(struct encx24j600_priv *priv)
  421. {
  422. /* Reset TX */
  423. encx24j600_reset_hw_tx(priv);
  424. /* Clear the TXIF flag if were previously set */
  425. encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF);
  426. /* Write the Tx Buffer pointer */
  427. encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START);
  428. }
  429. static void encx24j600_hw_init_rx(struct encx24j600_priv *priv)
  430. {
  431. encx24j600_cmd(priv, DISABLERX);
  432. /* Set up RX packet start address in the SRAM */
  433. encx24j600_write_reg(priv, ERXST, ENC_RX_BUF_START);
  434. /* Preload the RX Data pointer to the beginning of the RX area */
  435. encx24j600_write_reg(priv, ERXRDPT, ENC_RX_BUF_START);
  436. priv->next_packet = ENC_RX_BUF_START;
  437. /* Set up RX end address in the SRAM */
  438. encx24j600_write_reg(priv, ERXTAIL, ENC_SRAM_SIZE - 2);
  439. /* Reset the user data pointers */
  440. encx24j600_write_reg(priv, EUDAST, ENC_SRAM_SIZE);
  441. encx24j600_write_reg(priv, EUDAND, ENC_SRAM_SIZE + 1);
  442. /* Set Max Frame length */
  443. encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN);
  444. }
  445. static void encx24j600_dump_config(struct encx24j600_priv *priv,
  446. const char *msg)
  447. {
  448. pr_info(DRV_NAME ": %s\n", msg);
  449. /* CHIP configuration */
  450. pr_info(DRV_NAME " ECON1: %04X\n", encx24j600_read_reg(priv, ECON1));
  451. pr_info(DRV_NAME " ECON2: %04X\n", encx24j600_read_reg(priv, ECON2));
  452. pr_info(DRV_NAME " ERXFCON: %04X\n", encx24j600_read_reg(priv,
  453. ERXFCON));
  454. pr_info(DRV_NAME " ESTAT: %04X\n", encx24j600_read_reg(priv, ESTAT));
  455. pr_info(DRV_NAME " EIR: %04X\n", encx24j600_read_reg(priv, EIR));
  456. pr_info(DRV_NAME " EIDLED: %04X\n", encx24j600_read_reg(priv, EIDLED));
  457. /* MAC layer configuration */
  458. pr_info(DRV_NAME " MACON1: %04X\n", encx24j600_read_reg(priv, MACON1));
  459. pr_info(DRV_NAME " MACON2: %04X\n", encx24j600_read_reg(priv, MACON2));
  460. pr_info(DRV_NAME " MAIPG: %04X\n", encx24j600_read_reg(priv, MAIPG));
  461. pr_info(DRV_NAME " MACLCON: %04X\n", encx24j600_read_reg(priv,
  462. MACLCON));
  463. pr_info(DRV_NAME " MABBIPG: %04X\n", encx24j600_read_reg(priv,
  464. MABBIPG));
  465. /* PHY configuation */
  466. pr_info(DRV_NAME " PHCON1: %04X\n", encx24j600_read_phy(priv, PHCON1));
  467. pr_info(DRV_NAME " PHCON2: %04X\n", encx24j600_read_phy(priv, PHCON2));
  468. pr_info(DRV_NAME " PHANA: %04X\n", encx24j600_read_phy(priv, PHANA));
  469. pr_info(DRV_NAME " PHANLPA: %04X\n", encx24j600_read_phy(priv,
  470. PHANLPA));
  471. pr_info(DRV_NAME " PHANE: %04X\n", encx24j600_read_phy(priv, PHANE));
  472. pr_info(DRV_NAME " PHSTAT1: %04X\n", encx24j600_read_phy(priv,
  473. PHSTAT1));
  474. pr_info(DRV_NAME " PHSTAT2: %04X\n", encx24j600_read_phy(priv,
  475. PHSTAT2));
  476. pr_info(DRV_NAME " PHSTAT3: %04X\n", encx24j600_read_phy(priv,
  477. PHSTAT3));
  478. }
  479. static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv)
  480. {
  481. switch (priv->rxfilter) {
  482. case RXFILTER_PROMISC:
  483. encx24j600_set_bits(priv, MACON1, PASSALL);
  484. encx24j600_write_reg(priv, ERXFCON, UCEN | MCEN | NOTMEEN);
  485. break;
  486. case RXFILTER_MULTI:
  487. encx24j600_clr_bits(priv, MACON1, PASSALL);
  488. encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN | MCEN);
  489. break;
  490. case RXFILTER_NORMAL:
  491. default:
  492. encx24j600_clr_bits(priv, MACON1, PASSALL);
  493. encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN);
  494. break;
  495. }
  496. }
  497. static int encx24j600_hw_init(struct encx24j600_priv *priv)
  498. {
  499. struct net_device *dev = priv->ndev;
  500. int ret = 0;
  501. u16 eidled;
  502. u16 macon2;
  503. priv->hw_enabled = false;
  504. eidled = encx24j600_read_reg(priv, EIDLED);
  505. if (((eidled & DEVID_MASK) >> DEVID_SHIFT) != ENCX24J600_DEV_ID) {
  506. ret = -EINVAL;
  507. goto err_out;
  508. }
  509. netif_info(priv, drv, dev, "Silicon rev ID: 0x%02x\n",
  510. (eidled & REVID_MASK) >> REVID_SHIFT);
  511. /* PHY Leds: link status,
  512. * LEDA: Link State + collision events
  513. * LEDB: Link State + transmit/receive events
  514. */
  515. encx24j600_update_reg(priv, EIDLED, 0xff00, 0xcb00);
  516. /* Loopback disabled */
  517. encx24j600_write_reg(priv, MACON1, 0x9);
  518. /* interpacket gap value */
  519. encx24j600_write_reg(priv, MAIPG, 0x0c12);
  520. /* Write the auto negotiation pattern */
  521. encx24j600_write_phy(priv, PHANA, PHANA_DEFAULT);
  522. encx24j600_update_phcon1(priv);
  523. encx24j600_check_link_status(priv);
  524. macon2 = MACON2_RSV1 | TXCRCEN | PADCFG0 | PADCFG2 | MACON2_DEFER;
  525. if ((priv->autoneg == AUTONEG_DISABLE) && priv->full_duplex)
  526. macon2 |= FULDPX;
  527. encx24j600_set_bits(priv, MACON2, macon2);
  528. priv->rxfilter = RXFILTER_NORMAL;
  529. encx24j600_set_rxfilter_mode(priv);
  530. /* Program the Maximum frame length */
  531. encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN);
  532. /* Init Tx pointers */
  533. encx24j600_hw_init_tx(priv);
  534. /* Init Rx pointers */
  535. encx24j600_hw_init_rx(priv);
  536. if (netif_msg_hw(priv))
  537. encx24j600_dump_config(priv, "Hw is initialized");
  538. err_out:
  539. return ret;
  540. }
  541. static void encx24j600_hw_enable(struct encx24j600_priv *priv)
  542. {
  543. /* Clear the interrupt flags in case was set */
  544. encx24j600_clr_bits(priv, EIR, (PCFULIF | RXABTIF | TXABTIF | TXIF |
  545. PKTIF | LINKIF));
  546. /* Enable the interrupts */
  547. encx24j600_write_reg(priv, EIE, (PCFULIE | RXABTIE | TXABTIE | TXIE |
  548. PKTIE | LINKIE | INTIE));
  549. /* Enable RX */
  550. encx24j600_cmd(priv, ENABLERX);
  551. priv->hw_enabled = true;
  552. }
  553. static void encx24j600_hw_disable(struct encx24j600_priv *priv)
  554. {
  555. /* Disable all interrupts */
  556. encx24j600_write_reg(priv, EIE, 0);
  557. /* Disable RX */
  558. encx24j600_cmd(priv, DISABLERX);
  559. priv->hw_enabled = false;
  560. }
  561. static int encx24j600_setlink(struct net_device *dev, u8 autoneg, u16 speed,
  562. u8 duplex)
  563. {
  564. struct encx24j600_priv *priv = netdev_priv(dev);
  565. int ret = 0;
  566. if (!priv->hw_enabled) {
  567. /* link is in low power mode now; duplex setting
  568. * will take effect on next encx24j600_hw_init()
  569. */
  570. if (speed == SPEED_10 || speed == SPEED_100) {
  571. priv->autoneg = (autoneg == AUTONEG_ENABLE);
  572. priv->full_duplex = (duplex == DUPLEX_FULL);
  573. priv->speed = (speed == SPEED_100);
  574. } else {
  575. netif_warn(priv, link, dev, "unsupported link speed setting\n");
  576. /*speeds other than SPEED_10 and SPEED_100 */
  577. /*are not supported by chip */
  578. ret = -EOPNOTSUPP;
  579. }
  580. } else {
  581. netif_warn(priv, link, dev, "Warning: hw must be disabled to set link mode\n");
  582. ret = -EBUSY;
  583. }
  584. return ret;
  585. }
  586. static void encx24j600_hw_get_macaddr(struct encx24j600_priv *priv,
  587. unsigned char *ethaddr)
  588. {
  589. unsigned short val;
  590. val = encx24j600_read_reg(priv, MAADR1);
  591. ethaddr[0] = val & 0x00ff;
  592. ethaddr[1] = (val & 0xff00) >> 8;
  593. val = encx24j600_read_reg(priv, MAADR2);
  594. ethaddr[2] = val & 0x00ffU;
  595. ethaddr[3] = (val & 0xff00U) >> 8;
  596. val = encx24j600_read_reg(priv, MAADR3);
  597. ethaddr[4] = val & 0x00ffU;
  598. ethaddr[5] = (val & 0xff00U) >> 8;
  599. }
  600. /* Program the hardware MAC address from dev->dev_addr.*/
  601. static int encx24j600_set_hw_macaddr(struct net_device *dev)
  602. {
  603. struct encx24j600_priv *priv = netdev_priv(dev);
  604. if (priv->hw_enabled) {
  605. netif_info(priv, drv, dev, "Hardware must be disabled to set Mac address\n");
  606. return -EBUSY;
  607. }
  608. mutex_lock(&priv->lock);
  609. netif_info(priv, drv, dev, "%s: Setting MAC address to %pM\n",
  610. dev->name, dev->dev_addr);
  611. encx24j600_write_reg(priv, MAADR3, (dev->dev_addr[4] |
  612. dev->dev_addr[5] << 8));
  613. encx24j600_write_reg(priv, MAADR2, (dev->dev_addr[2] |
  614. dev->dev_addr[3] << 8));
  615. encx24j600_write_reg(priv, MAADR1, (dev->dev_addr[0] |
  616. dev->dev_addr[1] << 8));
  617. mutex_unlock(&priv->lock);
  618. return 0;
  619. }
  620. /* Store the new hardware address in dev->dev_addr, and update the MAC.*/
  621. static int encx24j600_set_mac_address(struct net_device *dev, void *addr)
  622. {
  623. struct sockaddr *address = addr;
  624. if (netif_running(dev))
  625. return -EBUSY;
  626. if (!is_valid_ether_addr(address->sa_data))
  627. return -EADDRNOTAVAIL;
  628. memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
  629. return encx24j600_set_hw_macaddr(dev);
  630. }
  631. static int encx24j600_open(struct net_device *dev)
  632. {
  633. struct encx24j600_priv *priv = netdev_priv(dev);
  634. int ret = request_threaded_irq(priv->ctx.spi->irq, NULL, encx24j600_isr,
  635. IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
  636. DRV_NAME, priv);
  637. if (unlikely(ret < 0)) {
  638. netdev_err(dev, "request irq %d failed (ret = %d)\n",
  639. priv->ctx.spi->irq, ret);
  640. return ret;
  641. }
  642. encx24j600_hw_disable(priv);
  643. encx24j600_hw_init(priv);
  644. encx24j600_hw_enable(priv);
  645. netif_start_queue(dev);
  646. return 0;
  647. }
  648. static int encx24j600_stop(struct net_device *dev)
  649. {
  650. struct encx24j600_priv *priv = netdev_priv(dev);
  651. netif_stop_queue(dev);
  652. free_irq(priv->ctx.spi->irq, priv);
  653. return 0;
  654. }
  655. static void encx24j600_setrx_proc(struct kthread_work *ws)
  656. {
  657. struct encx24j600_priv *priv =
  658. container_of(ws, struct encx24j600_priv, setrx_work);
  659. mutex_lock(&priv->lock);
  660. encx24j600_set_rxfilter_mode(priv);
  661. mutex_unlock(&priv->lock);
  662. }
  663. static void encx24j600_set_multicast_list(struct net_device *dev)
  664. {
  665. struct encx24j600_priv *priv = netdev_priv(dev);
  666. int oldfilter = priv->rxfilter;
  667. if (dev->flags & IFF_PROMISC) {
  668. netif_dbg(priv, link, dev, "promiscuous mode\n");
  669. priv->rxfilter = RXFILTER_PROMISC;
  670. } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
  671. netif_dbg(priv, link, dev, "%smulticast mode\n",
  672. (dev->flags & IFF_ALLMULTI) ? "all-" : "");
  673. priv->rxfilter = RXFILTER_MULTI;
  674. } else {
  675. netif_dbg(priv, link, dev, "normal mode\n");
  676. priv->rxfilter = RXFILTER_NORMAL;
  677. }
  678. if (oldfilter != priv->rxfilter)
  679. queue_kthread_work(&priv->kworker, &priv->setrx_work);
  680. }
  681. static void encx24j600_hw_tx(struct encx24j600_priv *priv)
  682. {
  683. struct net_device *dev = priv->ndev;
  684. netif_info(priv, tx_queued, dev, "TX Packet Len:%d\n",
  685. priv->tx_skb->len);
  686. if (netif_msg_pktdata(priv))
  687. dump_packet("TX", priv->tx_skb->len, priv->tx_skb->data);
  688. if (encx24j600_read_reg(priv, EIR) & TXABTIF)
  689. /* Last transmition aborted due to error. Reset TX interface */
  690. encx24j600_reset_hw_tx(priv);
  691. /* Clear the TXIF flag if were previously set */
  692. encx24j600_clr_bits(priv, EIR, TXIF);
  693. /* Set the data pointer to the TX buffer address in the SRAM */
  694. encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START);
  695. /* Copy the packet into the SRAM */
  696. encx24j600_raw_write(priv, WGPDATA, (u8 *)priv->tx_skb->data,
  697. priv->tx_skb->len);
  698. /* Program the Tx buffer start pointer */
  699. encx24j600_write_reg(priv, ETXST, ENC_TX_BUF_START);
  700. /* Program the packet length */
  701. encx24j600_write_reg(priv, ETXLEN, priv->tx_skb->len);
  702. /* Start the transmission */
  703. encx24j600_cmd(priv, SETTXRTS);
  704. }
  705. static void encx24j600_tx_proc(struct kthread_work *ws)
  706. {
  707. struct encx24j600_priv *priv =
  708. container_of(ws, struct encx24j600_priv, tx_work);
  709. mutex_lock(&priv->lock);
  710. encx24j600_hw_tx(priv);
  711. mutex_unlock(&priv->lock);
  712. }
  713. static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
  714. {
  715. struct encx24j600_priv *priv = netdev_priv(dev);
  716. netif_stop_queue(dev);
  717. /* save the timestamp */
  718. dev->trans_start = jiffies;
  719. /* Remember the skb for deferred processing */
  720. priv->tx_skb = skb;
  721. queue_kthread_work(&priv->kworker, &priv->tx_work);
  722. return NETDEV_TX_OK;
  723. }
  724. /* Deal with a transmit timeout */
  725. static void encx24j600_tx_timeout(struct net_device *dev)
  726. {
  727. struct encx24j600_priv *priv = netdev_priv(dev);
  728. netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n",
  729. jiffies, jiffies - dev->trans_start);
  730. dev->stats.tx_errors++;
  731. netif_wake_queue(dev);
  732. return;
  733. }
  734. static int encx24j600_get_regs_len(struct net_device *dev)
  735. {
  736. return SFR_REG_COUNT;
  737. }
  738. static void encx24j600_get_regs(struct net_device *dev,
  739. struct ethtool_regs *regs, void *p)
  740. {
  741. struct encx24j600_priv *priv = netdev_priv(dev);
  742. u16 *buff = p;
  743. u8 reg;
  744. regs->version = 1;
  745. mutex_lock(&priv->lock);
  746. for (reg = 0; reg < SFR_REG_COUNT; reg += 2) {
  747. unsigned int val = 0;
  748. /* ignore errors for unreadable registers */
  749. regmap_read(priv->ctx.regmap, reg, &val);
  750. buff[reg] = val & 0xffff;
  751. }
  752. mutex_unlock(&priv->lock);
  753. }
  754. static void encx24j600_get_drvinfo(struct net_device *dev,
  755. struct ethtool_drvinfo *info)
  756. {
  757. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  758. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  759. strlcpy(info->bus_info, dev_name(dev->dev.parent),
  760. sizeof(info->bus_info));
  761. }
  762. static int encx24j600_get_settings(struct net_device *dev,
  763. struct ethtool_cmd *cmd)
  764. {
  765. struct encx24j600_priv *priv = netdev_priv(dev);
  766. cmd->transceiver = XCVR_INTERNAL;
  767. cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
  768. SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
  769. SUPPORTED_Autoneg | SUPPORTED_TP;
  770. ethtool_cmd_speed_set(cmd, priv->speed);
  771. cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
  772. cmd->port = PORT_TP;
  773. cmd->autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
  774. return 0;
  775. }
  776. static int encx24j600_set_settings(struct net_device *dev,
  777. struct ethtool_cmd *cmd)
  778. {
  779. return encx24j600_setlink(dev, cmd->autoneg,
  780. ethtool_cmd_speed(cmd), cmd->duplex);
  781. }
  782. static u32 encx24j600_get_msglevel(struct net_device *dev)
  783. {
  784. struct encx24j600_priv *priv = netdev_priv(dev);
  785. return priv->msg_enable;
  786. }
  787. static void encx24j600_set_msglevel(struct net_device *dev, u32 val)
  788. {
  789. struct encx24j600_priv *priv = netdev_priv(dev);
  790. priv->msg_enable = val;
  791. }
  792. static const struct ethtool_ops encx24j600_ethtool_ops = {
  793. .get_settings = encx24j600_get_settings,
  794. .set_settings = encx24j600_set_settings,
  795. .get_drvinfo = encx24j600_get_drvinfo,
  796. .get_msglevel = encx24j600_get_msglevel,
  797. .set_msglevel = encx24j600_set_msglevel,
  798. .get_regs_len = encx24j600_get_regs_len,
  799. .get_regs = encx24j600_get_regs,
  800. };
  801. static const struct net_device_ops encx24j600_netdev_ops = {
  802. .ndo_open = encx24j600_open,
  803. .ndo_stop = encx24j600_stop,
  804. .ndo_start_xmit = encx24j600_tx,
  805. .ndo_set_rx_mode = encx24j600_set_multicast_list,
  806. .ndo_set_mac_address = encx24j600_set_mac_address,
  807. .ndo_tx_timeout = encx24j600_tx_timeout,
  808. .ndo_validate_addr = eth_validate_addr,
  809. };
  810. static int encx24j600_spi_probe(struct spi_device *spi)
  811. {
  812. int ret;
  813. struct net_device *ndev;
  814. struct encx24j600_priv *priv;
  815. ndev = alloc_etherdev(sizeof(struct encx24j600_priv));
  816. if (!ndev) {
  817. ret = -ENOMEM;
  818. goto error_out;
  819. }
  820. priv = netdev_priv(ndev);
  821. spi_set_drvdata(spi, priv);
  822. dev_set_drvdata(&spi->dev, priv);
  823. SET_NETDEV_DEV(ndev, &spi->dev);
  824. priv->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
  825. priv->ndev = ndev;
  826. /* Default configuration PHY configuration */
  827. priv->full_duplex = true;
  828. priv->autoneg = AUTONEG_ENABLE;
  829. priv->speed = SPEED_100;
  830. priv->ctx.spi = spi;
  831. devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
  832. ndev->irq = spi->irq;
  833. ndev->netdev_ops = &encx24j600_netdev_ops;
  834. mutex_init(&priv->lock);
  835. /* Reset device and check if it is connected */
  836. if (encx24j600_hw_reset(priv)) {
  837. netif_err(priv, probe, ndev,
  838. DRV_NAME ": Chip is not detected\n");
  839. ret = -EIO;
  840. goto out_free;
  841. }
  842. /* Initialize the device HW to the consistent state */
  843. if (encx24j600_hw_init(priv)) {
  844. netif_err(priv, probe, ndev,
  845. DRV_NAME ": HW initialization error\n");
  846. ret = -EIO;
  847. goto out_free;
  848. }
  849. init_kthread_worker(&priv->kworker);
  850. init_kthread_work(&priv->tx_work, encx24j600_tx_proc);
  851. init_kthread_work(&priv->setrx_work, encx24j600_setrx_proc);
  852. priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker,
  853. "encx24j600");
  854. if (IS_ERR(priv->kworker_task)) {
  855. ret = PTR_ERR(priv->kworker_task);
  856. goto out_free;
  857. }
  858. /* Get the MAC address from the chip */
  859. encx24j600_hw_get_macaddr(priv, ndev->dev_addr);
  860. ndev->ethtool_ops = &encx24j600_ethtool_ops;
  861. ret = register_netdev(ndev);
  862. if (unlikely(ret)) {
  863. netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
  864. ret);
  865. goto out_free;
  866. }
  867. netif_info(priv, drv, priv->ndev, "MAC address %pM\n", ndev->dev_addr);
  868. return ret;
  869. out_free:
  870. free_netdev(ndev);
  871. error_out:
  872. return ret;
  873. }
  874. static int encx24j600_spi_remove(struct spi_device *spi)
  875. {
  876. struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
  877. unregister_netdev(priv->ndev);
  878. free_netdev(priv->ndev);
  879. return 0;
  880. }
  881. static const struct spi_device_id encx24j600_spi_id_table[] = {
  882. { .name = "encx24j600" },
  883. { /* sentinel */ }
  884. };
  885. MODULE_DEVICE_TABLE(spi, encx24j600_spi_id_table);
  886. static struct spi_driver encx24j600_spi_net_driver = {
  887. .driver = {
  888. .name = DRV_NAME,
  889. .owner = THIS_MODULE,
  890. .bus = &spi_bus_type,
  891. },
  892. .probe = encx24j600_spi_probe,
  893. .remove = encx24j600_spi_remove,
  894. .id_table = encx24j600_spi_id_table,
  895. };
  896. static int __init encx24j600_init(void)
  897. {
  898. return spi_register_driver(&encx24j600_spi_net_driver);
  899. }
  900. module_init(encx24j600_init);
  901. static void encx24j600_exit(void)
  902. {
  903. spi_unregister_driver(&encx24j600_spi_net_driver);
  904. }
  905. module_exit(encx24j600_exit);
  906. MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
  907. MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>");
  908. MODULE_LICENSE("GPL");
  909. MODULE_ALIAS("spi:" DRV_NAME);