ixp4xx_eth.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537
  1. /*
  2. * Intel IXP4xx Ethernet driver for Linux
  3. *
  4. * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of version 2 of the GNU General Public License
  8. * as published by the Free Software Foundation.
  9. *
  10. * Ethernet port config (0x00 is not present on IXP42X):
  11. *
  12. * logical port 0x00 0x10 0x20
  13. * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C)
  14. * physical PortId 2 0 1
  15. * TX queue 23 24 25
  16. * RX-free queue 26 27 28
  17. * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
  18. *
  19. *
  20. * Queue entries:
  21. * bits 0 -> 1 - NPE ID (RX and TX-done)
  22. * bits 0 -> 2 - priority (TX, per 802.1D)
  23. * bits 3 -> 4 - port ID (user-set?)
  24. * bits 5 -> 31 - physical descriptor address
  25. */
  26. #include <linux/delay.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/dmapool.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/io.h>
  31. #include <linux/kernel.h>
  32. #include <linux/net_tstamp.h>
  33. #include <linux/phy.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/ptp_classify.h>
  36. #include <linux/slab.h>
  37. #include <linux/module.h>
  38. #include <mach/ixp46x_ts.h>
  39. #include <mach/npe.h>
  40. #include <mach/qmgr.h>
  41. #define DEBUG_DESC 0
  42. #define DEBUG_RX 0
  43. #define DEBUG_TX 0
  44. #define DEBUG_PKT_BYTES 0
  45. #define DEBUG_MDIO 0
  46. #define DEBUG_CLOSE 0
  47. #define DRV_NAME "ixp4xx_eth"
  48. #define MAX_NPES 3
  49. #define RX_DESCS 64 /* also length of all RX queues */
  50. #define TX_DESCS 16 /* also length of all TX queues */
  51. #define TXDONE_QUEUE_LEN 64 /* dwords */
  52. #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
  53. #define REGS_SIZE 0x1000
  54. #define MAX_MRU 1536 /* 0x600 */
  55. #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
  56. #define NAPI_WEIGHT 16
  57. #define MDIO_INTERVAL (3 * HZ)
  58. #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
  59. #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
  60. #define NPE_ID(port_id) ((port_id) >> 4)
  61. #define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3)
  62. #define TX_QUEUE(port_id) (NPE_ID(port_id) + 23)
  63. #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26)
  64. #define TXDONE_QUEUE 31
  65. #define PTP_SLAVE_MODE 1
  66. #define PTP_MASTER_MODE 2
  67. #define PORT2CHANNEL(p) NPE_ID(p->id)
  68. /* TX Control Registers */
  69. #define TX_CNTRL0_TX_EN 0x01
  70. #define TX_CNTRL0_HALFDUPLEX 0x02
  71. #define TX_CNTRL0_RETRY 0x04
  72. #define TX_CNTRL0_PAD_EN 0x08
  73. #define TX_CNTRL0_APPEND_FCS 0x10
  74. #define TX_CNTRL0_2DEFER 0x20
  75. #define TX_CNTRL0_RMII 0x40 /* reduced MII */
  76. #define TX_CNTRL1_RETRIES 0x0F /* 4 bits */
  77. /* RX Control Registers */
  78. #define RX_CNTRL0_RX_EN 0x01
  79. #define RX_CNTRL0_PADSTRIP_EN 0x02
  80. #define RX_CNTRL0_SEND_FCS 0x04
  81. #define RX_CNTRL0_PAUSE_EN 0x08
  82. #define RX_CNTRL0_LOOP_EN 0x10
  83. #define RX_CNTRL0_ADDR_FLTR_EN 0x20
  84. #define RX_CNTRL0_RX_RUNT_EN 0x40
  85. #define RX_CNTRL0_BCAST_DIS 0x80
  86. #define RX_CNTRL1_DEFER_EN 0x01
  87. /* Core Control Register */
  88. #define CORE_RESET 0x01
  89. #define CORE_RX_FIFO_FLUSH 0x02
  90. #define CORE_TX_FIFO_FLUSH 0x04
  91. #define CORE_SEND_JAM 0x08
  92. #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */
  93. #define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \
  94. TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
  95. TX_CNTRL0_2DEFER)
  96. #define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN
  97. #define DEFAULT_CORE_CNTRL CORE_MDC_EN
  98. /* NPE message codes */
  99. #define NPE_GETSTATUS 0x00
  100. #define NPE_EDB_SETPORTADDRESS 0x01
  101. #define NPE_EDB_GETMACADDRESSDATABASE 0x02
  102. #define NPE_EDB_SETMACADDRESSSDATABASE 0x03
  103. #define NPE_GETSTATS 0x04
  104. #define NPE_RESETSTATS 0x05
  105. #define NPE_SETMAXFRAMELENGTHS 0x06
  106. #define NPE_VLAN_SETRXTAGMODE 0x07
  107. #define NPE_VLAN_SETDEFAULTRXVID 0x08
  108. #define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09
  109. #define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A
  110. #define NPE_VLAN_SETRXQOSENTRY 0x0B
  111. #define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
  112. #define NPE_STP_SETBLOCKINGSTATE 0x0D
  113. #define NPE_FW_SETFIREWALLMODE 0x0E
  114. #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
  115. #define NPE_PC_SETAPMACTABLE 0x11
  116. #define NPE_SETLOOPBACK_MODE 0x12
  117. #define NPE_PC_SETBSSIDTABLE 0x13
  118. #define NPE_ADDRESS_FILTER_CONFIG 0x14
  119. #define NPE_APPENDFCSCONFIG 0x15
  120. #define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16
  121. #define NPE_MAC_RECOVERY_START 0x17
  122. #ifdef __ARMEB__
  123. typedef struct sk_buff buffer_t;
  124. #define free_buffer dev_kfree_skb
  125. #define free_buffer_irq dev_kfree_skb_irq
  126. #else
  127. typedef void buffer_t;
  128. #define free_buffer kfree
  129. #define free_buffer_irq kfree
  130. #endif
  131. struct eth_regs {
  132. u32 tx_control[2], __res1[2]; /* 000 */
  133. u32 rx_control[2], __res2[2]; /* 010 */
  134. u32 random_seed, __res3[3]; /* 020 */
  135. u32 partial_empty_threshold, __res4; /* 030 */
  136. u32 partial_full_threshold, __res5; /* 038 */
  137. u32 tx_start_bytes, __res6[3]; /* 040 */
  138. u32 tx_deferral, rx_deferral, __res7[2];/* 050 */
  139. u32 tx_2part_deferral[2], __res8[2]; /* 060 */
  140. u32 slot_time, __res9[3]; /* 070 */
  141. u32 mdio_command[4]; /* 080 */
  142. u32 mdio_status[4]; /* 090 */
  143. u32 mcast_mask[6], __res10[2]; /* 0A0 */
  144. u32 mcast_addr[6], __res11[2]; /* 0C0 */
  145. u32 int_clock_threshold, __res12[3]; /* 0E0 */
  146. u32 hw_addr[6], __res13[61]; /* 0F0 */
  147. u32 core_control; /* 1FC */
  148. };
  149. struct port {
  150. struct resource *mem_res;
  151. struct eth_regs __iomem *regs;
  152. struct npe *npe;
  153. struct net_device *netdev;
  154. struct napi_struct napi;
  155. struct phy_device *phydev;
  156. struct eth_plat_info *plat;
  157. buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
  158. struct desc *desc_tab; /* coherent */
  159. u32 desc_tab_phys;
  160. int id; /* logical port ID */
  161. int speed, duplex;
  162. u8 firmware[4];
  163. int hwts_tx_en;
  164. int hwts_rx_en;
  165. };
  166. /* NPE message structure */
  167. struct msg {
  168. #ifdef __ARMEB__
  169. u8 cmd, eth_id, byte2, byte3;
  170. u8 byte4, byte5, byte6, byte7;
  171. #else
  172. u8 byte3, byte2, eth_id, cmd;
  173. u8 byte7, byte6, byte5, byte4;
  174. #endif
  175. };
  176. /* Ethernet packet descriptor */
  177. struct desc {
  178. u32 next; /* pointer to next buffer, unused */
  179. #ifdef __ARMEB__
  180. u16 buf_len; /* buffer length */
  181. u16 pkt_len; /* packet length */
  182. u32 data; /* pointer to data buffer in RAM */
  183. u8 dest_id;
  184. u8 src_id;
  185. u16 flags;
  186. u8 qos;
  187. u8 padlen;
  188. u16 vlan_tci;
  189. #else
  190. u16 pkt_len; /* packet length */
  191. u16 buf_len; /* buffer length */
  192. u32 data; /* pointer to data buffer in RAM */
  193. u16 flags;
  194. u8 src_id;
  195. u8 dest_id;
  196. u16 vlan_tci;
  197. u8 padlen;
  198. u8 qos;
  199. #endif
  200. #ifdef __ARMEB__
  201. u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
  202. u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
  203. u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
  204. #else
  205. u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
  206. u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
  207. u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
  208. #endif
  209. };
  210. #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
  211. (n) * sizeof(struct desc))
  212. #define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
  213. #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
  214. ((n) + RX_DESCS) * sizeof(struct desc))
  215. #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
  216. #ifndef __ARMEB__
  217. static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
  218. {
  219. int i;
  220. for (i = 0; i < cnt; i++)
  221. dest[i] = swab32(src[i]);
  222. }
  223. #endif
  224. static spinlock_t mdio_lock;
  225. static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
  226. static struct mii_bus *mdio_bus;
  227. static int ports_open;
  228. static struct port *npe_port_tab[MAX_NPES];
  229. static struct dma_pool *dma_pool;
  230. static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
  231. {
  232. u8 *data = skb->data;
  233. unsigned int offset;
  234. u16 *hi, *id;
  235. u32 lo;
  236. if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4)
  237. return 0;
  238. offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
  239. if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
  240. return 0;
  241. hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
  242. id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
  243. memcpy(&lo, &hi[1], sizeof(lo));
  244. return (uid_hi == ntohs(*hi) &&
  245. uid_lo == ntohl(lo) &&
  246. seqid == ntohs(*id));
  247. }
  248. static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb)
  249. {
  250. struct skb_shared_hwtstamps *shhwtstamps;
  251. struct ixp46x_ts_regs *regs;
  252. u64 ns;
  253. u32 ch, hi, lo, val;
  254. u16 uid, seq;
  255. if (!port->hwts_rx_en)
  256. return;
  257. ch = PORT2CHANNEL(port);
  258. regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
  259. val = __raw_readl(&regs->channel[ch].ch_event);
  260. if (!(val & RX_SNAPSHOT_LOCKED))
  261. return;
  262. lo = __raw_readl(&regs->channel[ch].src_uuid_lo);
  263. hi = __raw_readl(&regs->channel[ch].src_uuid_hi);
  264. uid = hi & 0xffff;
  265. seq = (hi >> 16) & 0xffff;
  266. if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
  267. goto out;
  268. lo = __raw_readl(&regs->channel[ch].rx_snap_lo);
  269. hi = __raw_readl(&regs->channel[ch].rx_snap_hi);
  270. ns = ((u64) hi) << 32;
  271. ns |= lo;
  272. ns <<= TICKS_NS_SHIFT;
  273. shhwtstamps = skb_hwtstamps(skb);
  274. memset(shhwtstamps, 0, sizeof(*shhwtstamps));
  275. shhwtstamps->hwtstamp = ns_to_ktime(ns);
  276. out:
  277. __raw_writel(RX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
  278. }
  279. static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
  280. {
  281. struct skb_shared_hwtstamps shhwtstamps;
  282. struct ixp46x_ts_regs *regs;
  283. struct skb_shared_info *shtx;
  284. u64 ns;
  285. u32 ch, cnt, hi, lo, val;
  286. shtx = skb_shinfo(skb);
  287. if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en))
  288. shtx->tx_flags |= SKBTX_IN_PROGRESS;
  289. else
  290. return;
  291. ch = PORT2CHANNEL(port);
  292. regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
  293. /*
  294. * This really stinks, but we have to poll for the Tx time stamp.
  295. * Usually, the time stamp is ready after 4 to 6 microseconds.
  296. */
  297. for (cnt = 0; cnt < 100; cnt++) {
  298. val = __raw_readl(&regs->channel[ch].ch_event);
  299. if (val & TX_SNAPSHOT_LOCKED)
  300. break;
  301. udelay(1);
  302. }
  303. if (!(val & TX_SNAPSHOT_LOCKED)) {
  304. shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
  305. return;
  306. }
  307. lo = __raw_readl(&regs->channel[ch].tx_snap_lo);
  308. hi = __raw_readl(&regs->channel[ch].tx_snap_hi);
  309. ns = ((u64) hi) << 32;
  310. ns |= lo;
  311. ns <<= TICKS_NS_SHIFT;
  312. memset(&shhwtstamps, 0, sizeof(shhwtstamps));
  313. shhwtstamps.hwtstamp = ns_to_ktime(ns);
  314. skb_tstamp_tx(skb, &shhwtstamps);
  315. __raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
  316. }
  317. static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
  318. {
  319. struct hwtstamp_config cfg;
  320. struct ixp46x_ts_regs *regs;
  321. struct port *port = netdev_priv(netdev);
  322. int ch;
  323. if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
  324. return -EFAULT;
  325. if (cfg.flags) /* reserved for future extensions */
  326. return -EINVAL;
  327. ch = PORT2CHANNEL(port);
  328. regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
  329. if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
  330. return -ERANGE;
  331. switch (cfg.rx_filter) {
  332. case HWTSTAMP_FILTER_NONE:
  333. port->hwts_rx_en = 0;
  334. break;
  335. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  336. port->hwts_rx_en = PTP_SLAVE_MODE;
  337. __raw_writel(0, &regs->channel[ch].ch_control);
  338. break;
  339. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  340. port->hwts_rx_en = PTP_MASTER_MODE;
  341. __raw_writel(MASTER_MODE, &regs->channel[ch].ch_control);
  342. break;
  343. default:
  344. return -ERANGE;
  345. }
  346. port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
  347. /* Clear out any old time stamps. */
  348. __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
  349. &regs->channel[ch].ch_event);
  350. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  351. }
  352. static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
  353. {
  354. struct hwtstamp_config cfg;
  355. struct port *port = netdev_priv(netdev);
  356. cfg.flags = 0;
  357. cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
  358. switch (port->hwts_rx_en) {
  359. case 0:
  360. cfg.rx_filter = HWTSTAMP_FILTER_NONE;
  361. break;
  362. case PTP_SLAVE_MODE:
  363. cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
  364. break;
  365. case PTP_MASTER_MODE:
  366. cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
  367. break;
  368. default:
  369. WARN_ON_ONCE(1);
  370. return -ERANGE;
  371. }
  372. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  373. }
  374. static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
  375. int write, u16 cmd)
  376. {
  377. int cycles = 0;
  378. if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
  379. printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
  380. return -1;
  381. }
  382. if (write) {
  383. __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
  384. __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
  385. }
  386. __raw_writel(((phy_id << 5) | location) & 0xFF,
  387. &mdio_regs->mdio_command[2]);
  388. __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
  389. &mdio_regs->mdio_command[3]);
  390. while ((cycles < MAX_MDIO_RETRIES) &&
  391. (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
  392. udelay(1);
  393. cycles++;
  394. }
  395. if (cycles == MAX_MDIO_RETRIES) {
  396. printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
  397. phy_id);
  398. return -1;
  399. }
  400. #if DEBUG_MDIO
  401. printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
  402. phy_id, write ? "write" : "read", cycles);
  403. #endif
  404. if (write)
  405. return 0;
  406. if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
  407. #if DEBUG_MDIO
  408. printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
  409. phy_id);
  410. #endif
  411. return 0xFFFF; /* don't return error */
  412. }
  413. return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
  414. ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
  415. }
  416. static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
  417. {
  418. unsigned long flags;
  419. int ret;
  420. spin_lock_irqsave(&mdio_lock, flags);
  421. ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
  422. spin_unlock_irqrestore(&mdio_lock, flags);
  423. #if DEBUG_MDIO
  424. printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
  425. phy_id, location, ret);
  426. #endif
  427. return ret;
  428. }
  429. static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
  430. u16 val)
  431. {
  432. unsigned long flags;
  433. int ret;
  434. spin_lock_irqsave(&mdio_lock, flags);
  435. ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
  436. spin_unlock_irqrestore(&mdio_lock, flags);
  437. #if DEBUG_MDIO
  438. printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n",
  439. bus->name, phy_id, location, val, ret);
  440. #endif
  441. return ret;
  442. }
  443. static int ixp4xx_mdio_register(void)
  444. {
  445. int err;
  446. if (!(mdio_bus = mdiobus_alloc()))
  447. return -ENOMEM;
  448. if (cpu_is_ixp43x()) {
  449. /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */
  450. if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH))
  451. return -ENODEV;
  452. mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
  453. } else {
  454. /* All MII PHY accesses use NPE-B Ethernet registers */
  455. if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
  456. return -ENODEV;
  457. mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
  458. }
  459. __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
  460. spin_lock_init(&mdio_lock);
  461. mdio_bus->name = "IXP4xx MII Bus";
  462. mdio_bus->read = &ixp4xx_mdio_read;
  463. mdio_bus->write = &ixp4xx_mdio_write;
  464. snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0");
  465. if ((err = mdiobus_register(mdio_bus)))
  466. mdiobus_free(mdio_bus);
  467. return err;
  468. }
  469. static void ixp4xx_mdio_remove(void)
  470. {
  471. mdiobus_unregister(mdio_bus);
  472. mdiobus_free(mdio_bus);
  473. }
  474. static void ixp4xx_adjust_link(struct net_device *dev)
  475. {
  476. struct port *port = netdev_priv(dev);
  477. struct phy_device *phydev = port->phydev;
  478. if (!phydev->link) {
  479. if (port->speed) {
  480. port->speed = 0;
  481. printk(KERN_INFO "%s: link down\n", dev->name);
  482. }
  483. return;
  484. }
  485. if (port->speed == phydev->speed && port->duplex == phydev->duplex)
  486. return;
  487. port->speed = phydev->speed;
  488. port->duplex = phydev->duplex;
  489. if (port->duplex)
  490. __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
  491. &port->regs->tx_control[0]);
  492. else
  493. __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
  494. &port->regs->tx_control[0]);
  495. printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
  496. dev->name, port->speed, port->duplex ? "full" : "half");
  497. }
  498. static inline void debug_pkt(struct net_device *dev, const char *func,
  499. u8 *data, int len)
  500. {
  501. #if DEBUG_PKT_BYTES
  502. int i;
  503. printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
  504. for (i = 0; i < len; i++) {
  505. if (i >= DEBUG_PKT_BYTES)
  506. break;
  507. printk("%s%02X",
  508. ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
  509. data[i]);
  510. }
  511. printk("\n");
  512. #endif
  513. }
  514. static inline void debug_desc(u32 phys, struct desc *desc)
  515. {
  516. #if DEBUG_DESC
  517. printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
  518. " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
  519. phys, desc->next, desc->buf_len, desc->pkt_len,
  520. desc->data, desc->dest_id, desc->src_id, desc->flags,
  521. desc->qos, desc->padlen, desc->vlan_tci,
  522. desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
  523. desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
  524. desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
  525. desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
  526. #endif
  527. }
  528. static inline int queue_get_desc(unsigned int queue, struct port *port,
  529. int is_tx)
  530. {
  531. u32 phys, tab_phys, n_desc;
  532. struct desc *tab;
  533. if (!(phys = qmgr_get_entry(queue)))
  534. return -1;
  535. phys &= ~0x1F; /* mask out non-address bits */
  536. tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
  537. tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
  538. n_desc = (phys - tab_phys) / sizeof(struct desc);
  539. BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
  540. debug_desc(phys, &tab[n_desc]);
  541. BUG_ON(tab[n_desc].next);
  542. return n_desc;
  543. }
  544. static inline void queue_put_desc(unsigned int queue, u32 phys,
  545. struct desc *desc)
  546. {
  547. debug_desc(phys, desc);
  548. BUG_ON(phys & 0x1F);
  549. qmgr_put_entry(queue, phys);
  550. /* Don't check for queue overflow here, we've allocated sufficient
  551. length and queues >= 32 don't support this check anyway. */
  552. }
  553. static inline void dma_unmap_tx(struct port *port, struct desc *desc)
  554. {
  555. #ifdef __ARMEB__
  556. dma_unmap_single(&port->netdev->dev, desc->data,
  557. desc->buf_len, DMA_TO_DEVICE);
  558. #else
  559. dma_unmap_single(&port->netdev->dev, desc->data & ~3,
  560. ALIGN((desc->data & 3) + desc->buf_len, 4),
  561. DMA_TO_DEVICE);
  562. #endif
  563. }
  564. static void eth_rx_irq(void *pdev)
  565. {
  566. struct net_device *dev = pdev;
  567. struct port *port = netdev_priv(dev);
  568. #if DEBUG_RX
  569. printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
  570. #endif
  571. qmgr_disable_irq(port->plat->rxq);
  572. napi_schedule(&port->napi);
  573. }
  574. static int eth_poll(struct napi_struct *napi, int budget)
  575. {
  576. struct port *port = container_of(napi, struct port, napi);
  577. struct net_device *dev = port->netdev;
  578. unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
  579. int received = 0;
  580. #if DEBUG_RX
  581. printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
  582. #endif
  583. while (received < budget) {
  584. struct sk_buff *skb;
  585. struct desc *desc;
  586. int n;
  587. #ifdef __ARMEB__
  588. struct sk_buff *temp;
  589. u32 phys;
  590. #endif
  591. if ((n = queue_get_desc(rxq, port, 0)) < 0) {
  592. #if DEBUG_RX
  593. printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
  594. dev->name);
  595. #endif
  596. napi_complete(napi);
  597. qmgr_enable_irq(rxq);
  598. if (!qmgr_stat_below_low_watermark(rxq) &&
  599. napi_reschedule(napi)) { /* not empty again */
  600. #if DEBUG_RX
  601. printk(KERN_DEBUG "%s: eth_poll"
  602. " napi_reschedule successed\n",
  603. dev->name);
  604. #endif
  605. qmgr_disable_irq(rxq);
  606. continue;
  607. }
  608. #if DEBUG_RX
  609. printk(KERN_DEBUG "%s: eth_poll all done\n",
  610. dev->name);
  611. #endif
  612. return received; /* all work done */
  613. }
  614. desc = rx_desc_ptr(port, n);
  615. #ifdef __ARMEB__
  616. if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
  617. phys = dma_map_single(&dev->dev, skb->data,
  618. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  619. if (dma_mapping_error(&dev->dev, phys)) {
  620. dev_kfree_skb(skb);
  621. skb = NULL;
  622. }
  623. }
  624. #else
  625. skb = netdev_alloc_skb(dev,
  626. ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
  627. #endif
  628. if (!skb) {
  629. dev->stats.rx_dropped++;
  630. /* put the desc back on RX-ready queue */
  631. desc->buf_len = MAX_MRU;
  632. desc->pkt_len = 0;
  633. queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
  634. continue;
  635. }
  636. /* process received frame */
  637. #ifdef __ARMEB__
  638. temp = skb;
  639. skb = port->rx_buff_tab[n];
  640. dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
  641. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  642. #else
  643. dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
  644. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  645. memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
  646. ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
  647. #endif
  648. skb_reserve(skb, NET_IP_ALIGN);
  649. skb_put(skb, desc->pkt_len);
  650. debug_pkt(dev, "eth_poll", skb->data, skb->len);
  651. ixp_rx_timestamp(port, skb);
  652. skb->protocol = eth_type_trans(skb, dev);
  653. dev->stats.rx_packets++;
  654. dev->stats.rx_bytes += skb->len;
  655. netif_receive_skb(skb);
  656. /* put the new buffer on RX-free queue */
  657. #ifdef __ARMEB__
  658. port->rx_buff_tab[n] = temp;
  659. desc->data = phys + NET_IP_ALIGN;
  660. #endif
  661. desc->buf_len = MAX_MRU;
  662. desc->pkt_len = 0;
  663. queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
  664. received++;
  665. }
  666. #if DEBUG_RX
  667. printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
  668. #endif
  669. return received; /* not all work done */
  670. }
  671. static void eth_txdone_irq(void *unused)
  672. {
  673. u32 phys;
  674. #if DEBUG_TX
  675. printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
  676. #endif
  677. while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) {
  678. u32 npe_id, n_desc;
  679. struct port *port;
  680. struct desc *desc;
  681. int start;
  682. npe_id = phys & 3;
  683. BUG_ON(npe_id >= MAX_NPES);
  684. port = npe_port_tab[npe_id];
  685. BUG_ON(!port);
  686. phys &= ~0x1F; /* mask out non-address bits */
  687. n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
  688. BUG_ON(n_desc >= TX_DESCS);
  689. desc = tx_desc_ptr(port, n_desc);
  690. debug_desc(phys, desc);
  691. if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
  692. port->netdev->stats.tx_packets++;
  693. port->netdev->stats.tx_bytes += desc->pkt_len;
  694. dma_unmap_tx(port, desc);
  695. #if DEBUG_TX
  696. printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
  697. port->netdev->name, port->tx_buff_tab[n_desc]);
  698. #endif
  699. free_buffer_irq(port->tx_buff_tab[n_desc]);
  700. port->tx_buff_tab[n_desc] = NULL;
  701. }
  702. start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
  703. queue_put_desc(port->plat->txreadyq, phys, desc);
  704. if (start) { /* TX-ready queue was empty */
  705. #if DEBUG_TX
  706. printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
  707. port->netdev->name);
  708. #endif
  709. netif_wake_queue(port->netdev);
  710. }
  711. }
  712. }
  713. static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
  714. {
  715. struct port *port = netdev_priv(dev);
  716. unsigned int txreadyq = port->plat->txreadyq;
  717. int len, offset, bytes, n;
  718. void *mem;
  719. u32 phys;
  720. struct desc *desc;
  721. #if DEBUG_TX
  722. printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
  723. #endif
  724. if (unlikely(skb->len > MAX_MRU)) {
  725. dev_kfree_skb(skb);
  726. dev->stats.tx_errors++;
  727. return NETDEV_TX_OK;
  728. }
  729. debug_pkt(dev, "eth_xmit", skb->data, skb->len);
  730. len = skb->len;
  731. #ifdef __ARMEB__
  732. offset = 0; /* no need to keep alignment */
  733. bytes = len;
  734. mem = skb->data;
  735. #else
  736. offset = (int)skb->data & 3; /* keep 32-bit alignment */
  737. bytes = ALIGN(offset + len, 4);
  738. if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
  739. dev_kfree_skb(skb);
  740. dev->stats.tx_dropped++;
  741. return NETDEV_TX_OK;
  742. }
  743. memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
  744. #endif
  745. phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
  746. if (dma_mapping_error(&dev->dev, phys)) {
  747. dev_kfree_skb(skb);
  748. #ifndef __ARMEB__
  749. kfree(mem);
  750. #endif
  751. dev->stats.tx_dropped++;
  752. return NETDEV_TX_OK;
  753. }
  754. n = queue_get_desc(txreadyq, port, 1);
  755. BUG_ON(n < 0);
  756. desc = tx_desc_ptr(port, n);
  757. #ifdef __ARMEB__
  758. port->tx_buff_tab[n] = skb;
  759. #else
  760. port->tx_buff_tab[n] = mem;
  761. #endif
  762. desc->data = phys + offset;
  763. desc->buf_len = desc->pkt_len = len;
  764. /* NPE firmware pads short frames with zeros internally */
  765. wmb();
  766. queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
  767. if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
  768. #if DEBUG_TX
  769. printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
  770. #endif
  771. netif_stop_queue(dev);
  772. /* we could miss TX ready interrupt */
  773. /* really empty in fact */
  774. if (!qmgr_stat_below_low_watermark(txreadyq)) {
  775. #if DEBUG_TX
  776. printk(KERN_DEBUG "%s: eth_xmit ready again\n",
  777. dev->name);
  778. #endif
  779. netif_wake_queue(dev);
  780. }
  781. }
  782. #if DEBUG_TX
  783. printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
  784. #endif
  785. ixp_tx_timestamp(port, skb);
  786. skb_tx_timestamp(skb);
  787. #ifndef __ARMEB__
  788. dev_kfree_skb(skb);
  789. #endif
  790. return NETDEV_TX_OK;
  791. }
  792. static void eth_set_mcast_list(struct net_device *dev)
  793. {
  794. struct port *port = netdev_priv(dev);
  795. struct netdev_hw_addr *ha;
  796. u8 diffs[ETH_ALEN], *addr;
  797. int i;
  798. static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
  799. if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
  800. for (i = 0; i < ETH_ALEN; i++) {
  801. __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
  802. __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
  803. }
  804. __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
  805. &port->regs->rx_control[0]);
  806. return;
  807. }
  808. if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
  809. __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
  810. &port->regs->rx_control[0]);
  811. return;
  812. }
  813. eth_zero_addr(diffs);
  814. addr = NULL;
  815. netdev_for_each_mc_addr(ha, dev) {
  816. if (!addr)
  817. addr = ha->addr; /* first MAC address */
  818. for (i = 0; i < ETH_ALEN; i++)
  819. diffs[i] |= addr[i] ^ ha->addr[i];
  820. }
  821. for (i = 0; i < ETH_ALEN; i++) {
  822. __raw_writel(addr[i], &port->regs->mcast_addr[i]);
  823. __raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
  824. }
  825. __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
  826. &port->regs->rx_control[0]);
  827. }
  828. static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
  829. {
  830. struct port *port = netdev_priv(dev);
  831. if (!netif_running(dev))
  832. return -EINVAL;
  833. if (cpu_is_ixp46x()) {
  834. if (cmd == SIOCSHWTSTAMP)
  835. return hwtstamp_set(dev, req);
  836. if (cmd == SIOCGHWTSTAMP)
  837. return hwtstamp_get(dev, req);
  838. }
  839. return phy_mii_ioctl(port->phydev, req, cmd);
  840. }
  841. /* ethtool support */
  842. static void ixp4xx_get_drvinfo(struct net_device *dev,
  843. struct ethtool_drvinfo *info)
  844. {
  845. struct port *port = netdev_priv(dev);
  846. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  847. snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
  848. port->firmware[0], port->firmware[1],
  849. port->firmware[2], port->firmware[3]);
  850. strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
  851. }
  852. static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  853. {
  854. struct port *port = netdev_priv(dev);
  855. return phy_ethtool_gset(port->phydev, cmd);
  856. }
  857. static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  858. {
  859. struct port *port = netdev_priv(dev);
  860. return phy_ethtool_sset(port->phydev, cmd);
  861. }
  862. static int ixp4xx_nway_reset(struct net_device *dev)
  863. {
  864. struct port *port = netdev_priv(dev);
  865. return phy_start_aneg(port->phydev);
  866. }
  867. int ixp46x_phc_index = -1;
  868. EXPORT_SYMBOL_GPL(ixp46x_phc_index);
  869. static int ixp4xx_get_ts_info(struct net_device *dev,
  870. struct ethtool_ts_info *info)
  871. {
  872. if (!cpu_is_ixp46x()) {
  873. info->so_timestamping =
  874. SOF_TIMESTAMPING_TX_SOFTWARE |
  875. SOF_TIMESTAMPING_RX_SOFTWARE |
  876. SOF_TIMESTAMPING_SOFTWARE;
  877. info->phc_index = -1;
  878. return 0;
  879. }
  880. info->so_timestamping =
  881. SOF_TIMESTAMPING_TX_HARDWARE |
  882. SOF_TIMESTAMPING_RX_HARDWARE |
  883. SOF_TIMESTAMPING_RAW_HARDWARE;
  884. info->phc_index = ixp46x_phc_index;
  885. info->tx_types =
  886. (1 << HWTSTAMP_TX_OFF) |
  887. (1 << HWTSTAMP_TX_ON);
  888. info->rx_filters =
  889. (1 << HWTSTAMP_FILTER_NONE) |
  890. (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
  891. (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
  892. return 0;
  893. }
  894. static const struct ethtool_ops ixp4xx_ethtool_ops = {
  895. .get_drvinfo = ixp4xx_get_drvinfo,
  896. .get_settings = ixp4xx_get_settings,
  897. .set_settings = ixp4xx_set_settings,
  898. .nway_reset = ixp4xx_nway_reset,
  899. .get_link = ethtool_op_get_link,
  900. .get_ts_info = ixp4xx_get_ts_info,
  901. };
  902. static int request_queues(struct port *port)
  903. {
  904. int err;
  905. err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
  906. "%s:RX-free", port->netdev->name);
  907. if (err)
  908. return err;
  909. err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
  910. "%s:RX", port->netdev->name);
  911. if (err)
  912. goto rel_rxfree;
  913. err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
  914. "%s:TX", port->netdev->name);
  915. if (err)
  916. goto rel_rx;
  917. err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
  918. "%s:TX-ready", port->netdev->name);
  919. if (err)
  920. goto rel_tx;
  921. /* TX-done queue handles skbs sent out by the NPEs */
  922. if (!ports_open) {
  923. err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
  924. "%s:TX-done", DRV_NAME);
  925. if (err)
  926. goto rel_txready;
  927. }
  928. return 0;
  929. rel_txready:
  930. qmgr_release_queue(port->plat->txreadyq);
  931. rel_tx:
  932. qmgr_release_queue(TX_QUEUE(port->id));
  933. rel_rx:
  934. qmgr_release_queue(port->plat->rxq);
  935. rel_rxfree:
  936. qmgr_release_queue(RXFREE_QUEUE(port->id));
  937. printk(KERN_DEBUG "%s: unable to request hardware queues\n",
  938. port->netdev->name);
  939. return err;
  940. }
  941. static void release_queues(struct port *port)
  942. {
  943. qmgr_release_queue(RXFREE_QUEUE(port->id));
  944. qmgr_release_queue(port->plat->rxq);
  945. qmgr_release_queue(TX_QUEUE(port->id));
  946. qmgr_release_queue(port->plat->txreadyq);
  947. if (!ports_open)
  948. qmgr_release_queue(TXDONE_QUEUE);
  949. }
  950. static int init_queues(struct port *port)
  951. {
  952. int i;
  953. if (!ports_open) {
  954. dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
  955. POOL_ALLOC_SIZE, 32, 0);
  956. if (!dma_pool)
  957. return -ENOMEM;
  958. }
  959. if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
  960. &port->desc_tab_phys)))
  961. return -ENOMEM;
  962. memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
  963. memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
  964. memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
  965. /* Setup RX buffers */
  966. for (i = 0; i < RX_DESCS; i++) {
  967. struct desc *desc = rx_desc_ptr(port, i);
  968. buffer_t *buff; /* skb or kmalloc()ated memory */
  969. void *data;
  970. #ifdef __ARMEB__
  971. if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
  972. return -ENOMEM;
  973. data = buff->data;
  974. #else
  975. if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
  976. return -ENOMEM;
  977. data = buff;
  978. #endif
  979. desc->buf_len = MAX_MRU;
  980. desc->data = dma_map_single(&port->netdev->dev, data,
  981. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  982. if (dma_mapping_error(&port->netdev->dev, desc->data)) {
  983. free_buffer(buff);
  984. return -EIO;
  985. }
  986. desc->data += NET_IP_ALIGN;
  987. port->rx_buff_tab[i] = buff;
  988. }
  989. return 0;
  990. }
  991. static void destroy_queues(struct port *port)
  992. {
  993. int i;
  994. if (port->desc_tab) {
  995. for (i = 0; i < RX_DESCS; i++) {
  996. struct desc *desc = rx_desc_ptr(port, i);
  997. buffer_t *buff = port->rx_buff_tab[i];
  998. if (buff) {
  999. dma_unmap_single(&port->netdev->dev,
  1000. desc->data - NET_IP_ALIGN,
  1001. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  1002. free_buffer(buff);
  1003. }
  1004. }
  1005. for (i = 0; i < TX_DESCS; i++) {
  1006. struct desc *desc = tx_desc_ptr(port, i);
  1007. buffer_t *buff = port->tx_buff_tab[i];
  1008. if (buff) {
  1009. dma_unmap_tx(port, desc);
  1010. free_buffer(buff);
  1011. }
  1012. }
  1013. dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
  1014. port->desc_tab = NULL;
  1015. }
  1016. if (!ports_open && dma_pool) {
  1017. dma_pool_destroy(dma_pool);
  1018. dma_pool = NULL;
  1019. }
  1020. }
  1021. static int eth_open(struct net_device *dev)
  1022. {
  1023. struct port *port = netdev_priv(dev);
  1024. struct npe *npe = port->npe;
  1025. struct msg msg;
  1026. int i, err;
  1027. if (!npe_running(npe)) {
  1028. err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
  1029. if (err)
  1030. return err;
  1031. if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
  1032. printk(KERN_ERR "%s: %s not responding\n", dev->name,
  1033. npe_name(npe));
  1034. return -EIO;
  1035. }
  1036. port->firmware[0] = msg.byte4;
  1037. port->firmware[1] = msg.byte5;
  1038. port->firmware[2] = msg.byte6;
  1039. port->firmware[3] = msg.byte7;
  1040. }
  1041. memset(&msg, 0, sizeof(msg));
  1042. msg.cmd = NPE_VLAN_SETRXQOSENTRY;
  1043. msg.eth_id = port->id;
  1044. msg.byte5 = port->plat->rxq | 0x80;
  1045. msg.byte7 = port->plat->rxq << 4;
  1046. for (i = 0; i < 8; i++) {
  1047. msg.byte3 = i;
  1048. if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
  1049. return -EIO;
  1050. }
  1051. msg.cmd = NPE_EDB_SETPORTADDRESS;
  1052. msg.eth_id = PHYSICAL_ID(port->id);
  1053. msg.byte2 = dev->dev_addr[0];
  1054. msg.byte3 = dev->dev_addr[1];
  1055. msg.byte4 = dev->dev_addr[2];
  1056. msg.byte5 = dev->dev_addr[3];
  1057. msg.byte6 = dev->dev_addr[4];
  1058. msg.byte7 = dev->dev_addr[5];
  1059. if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
  1060. return -EIO;
  1061. memset(&msg, 0, sizeof(msg));
  1062. msg.cmd = NPE_FW_SETFIREWALLMODE;
  1063. msg.eth_id = port->id;
  1064. if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
  1065. return -EIO;
  1066. if ((err = request_queues(port)) != 0)
  1067. return err;
  1068. if ((err = init_queues(port)) != 0) {
  1069. destroy_queues(port);
  1070. release_queues(port);
  1071. return err;
  1072. }
  1073. port->speed = 0; /* force "link up" message */
  1074. phy_start(port->phydev);
  1075. for (i = 0; i < ETH_ALEN; i++)
  1076. __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
  1077. __raw_writel(0x08, &port->regs->random_seed);
  1078. __raw_writel(0x12, &port->regs->partial_empty_threshold);
  1079. __raw_writel(0x30, &port->regs->partial_full_threshold);
  1080. __raw_writel(0x08, &port->regs->tx_start_bytes);
  1081. __raw_writel(0x15, &port->regs->tx_deferral);
  1082. __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
  1083. __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
  1084. __raw_writel(0x80, &port->regs->slot_time);
  1085. __raw_writel(0x01, &port->regs->int_clock_threshold);
  1086. /* Populate queues with buffers, no failure after this point */
  1087. for (i = 0; i < TX_DESCS; i++)
  1088. queue_put_desc(port->plat->txreadyq,
  1089. tx_desc_phys(port, i), tx_desc_ptr(port, i));
  1090. for (i = 0; i < RX_DESCS; i++)
  1091. queue_put_desc(RXFREE_QUEUE(port->id),
  1092. rx_desc_phys(port, i), rx_desc_ptr(port, i));
  1093. __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
  1094. __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
  1095. __raw_writel(0, &port->regs->rx_control[1]);
  1096. __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
  1097. napi_enable(&port->napi);
  1098. eth_set_mcast_list(dev);
  1099. netif_start_queue(dev);
  1100. qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
  1101. eth_rx_irq, dev);
  1102. if (!ports_open) {
  1103. qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
  1104. eth_txdone_irq, NULL);
  1105. qmgr_enable_irq(TXDONE_QUEUE);
  1106. }
  1107. ports_open++;
  1108. /* we may already have RX data, enables IRQ */
  1109. napi_schedule(&port->napi);
  1110. return 0;
  1111. }
  1112. static int eth_close(struct net_device *dev)
  1113. {
  1114. struct port *port = netdev_priv(dev);
  1115. struct msg msg;
  1116. int buffs = RX_DESCS; /* allocated RX buffers */
  1117. int i;
  1118. ports_open--;
  1119. qmgr_disable_irq(port->plat->rxq);
  1120. napi_disable(&port->napi);
  1121. netif_stop_queue(dev);
  1122. while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
  1123. buffs--;
  1124. memset(&msg, 0, sizeof(msg));
  1125. msg.cmd = NPE_SETLOOPBACK_MODE;
  1126. msg.eth_id = port->id;
  1127. msg.byte3 = 1;
  1128. if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
  1129. printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
  1130. i = 0;
  1131. do { /* drain RX buffers */
  1132. while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
  1133. buffs--;
  1134. if (!buffs)
  1135. break;
  1136. if (qmgr_stat_empty(TX_QUEUE(port->id))) {
  1137. /* we have to inject some packet */
  1138. struct desc *desc;
  1139. u32 phys;
  1140. int n = queue_get_desc(port->plat->txreadyq, port, 1);
  1141. BUG_ON(n < 0);
  1142. desc = tx_desc_ptr(port, n);
  1143. phys = tx_desc_phys(port, n);
  1144. desc->buf_len = desc->pkt_len = 1;
  1145. wmb();
  1146. queue_put_desc(TX_QUEUE(port->id), phys, desc);
  1147. }
  1148. udelay(1);
  1149. } while (++i < MAX_CLOSE_WAIT);
  1150. if (buffs)
  1151. printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
  1152. " left in NPE\n", dev->name, buffs);
  1153. #if DEBUG_CLOSE
  1154. if (!buffs)
  1155. printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
  1156. #endif
  1157. buffs = TX_DESCS;
  1158. while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
  1159. buffs--; /* cancel TX */
  1160. i = 0;
  1161. do {
  1162. while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
  1163. buffs--;
  1164. if (!buffs)
  1165. break;
  1166. } while (++i < MAX_CLOSE_WAIT);
  1167. if (buffs)
  1168. printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
  1169. "left in NPE\n", dev->name, buffs);
  1170. #if DEBUG_CLOSE
  1171. if (!buffs)
  1172. printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
  1173. #endif
  1174. msg.byte3 = 0;
  1175. if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
  1176. printk(KERN_CRIT "%s: unable to disable loopback\n",
  1177. dev->name);
  1178. phy_stop(port->phydev);
  1179. if (!ports_open)
  1180. qmgr_disable_irq(TXDONE_QUEUE);
  1181. destroy_queues(port);
  1182. release_queues(port);
  1183. return 0;
  1184. }
  1185. static const struct net_device_ops ixp4xx_netdev_ops = {
  1186. .ndo_open = eth_open,
  1187. .ndo_stop = eth_close,
  1188. .ndo_start_xmit = eth_xmit,
  1189. .ndo_set_rx_mode = eth_set_mcast_list,
  1190. .ndo_do_ioctl = eth_ioctl,
  1191. .ndo_change_mtu = eth_change_mtu,
  1192. .ndo_set_mac_address = eth_mac_addr,
  1193. .ndo_validate_addr = eth_validate_addr,
  1194. };
  1195. static int eth_init_one(struct platform_device *pdev)
  1196. {
  1197. struct port *port;
  1198. struct net_device *dev;
  1199. struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
  1200. u32 regs_phys;
  1201. char phy_id[MII_BUS_ID_SIZE + 3];
  1202. int err;
  1203. if (!(dev = alloc_etherdev(sizeof(struct port))))
  1204. return -ENOMEM;
  1205. SET_NETDEV_DEV(dev, &pdev->dev);
  1206. port = netdev_priv(dev);
  1207. port->netdev = dev;
  1208. port->id = pdev->id;
  1209. switch (port->id) {
  1210. case IXP4XX_ETH_NPEA:
  1211. port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
  1212. regs_phys = IXP4XX_EthA_BASE_PHYS;
  1213. break;
  1214. case IXP4XX_ETH_NPEB:
  1215. port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
  1216. regs_phys = IXP4XX_EthB_BASE_PHYS;
  1217. break;
  1218. case IXP4XX_ETH_NPEC:
  1219. port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
  1220. regs_phys = IXP4XX_EthC_BASE_PHYS;
  1221. break;
  1222. default:
  1223. err = -ENODEV;
  1224. goto err_free;
  1225. }
  1226. dev->netdev_ops = &ixp4xx_netdev_ops;
  1227. dev->ethtool_ops = &ixp4xx_ethtool_ops;
  1228. dev->tx_queue_len = 100;
  1229. netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
  1230. if (!(port->npe = npe_request(NPE_ID(port->id)))) {
  1231. err = -EIO;
  1232. goto err_free;
  1233. }
  1234. port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
  1235. if (!port->mem_res) {
  1236. err = -EBUSY;
  1237. goto err_npe_rel;
  1238. }
  1239. port->plat = plat;
  1240. npe_port_tab[NPE_ID(port->id)] = port;
  1241. memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
  1242. platform_set_drvdata(pdev, dev);
  1243. __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
  1244. &port->regs->core_control);
  1245. udelay(50);
  1246. __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
  1247. udelay(50);
  1248. snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
  1249. mdio_bus->id, plat->phy);
  1250. port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
  1251. PHY_INTERFACE_MODE_MII);
  1252. if (IS_ERR(port->phydev)) {
  1253. err = PTR_ERR(port->phydev);
  1254. goto err_free_mem;
  1255. }
  1256. port->phydev->irq = PHY_POLL;
  1257. if ((err = register_netdev(dev)))
  1258. goto err_phy_dis;
  1259. printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
  1260. npe_name(port->npe));
  1261. return 0;
  1262. err_phy_dis:
  1263. phy_disconnect(port->phydev);
  1264. err_free_mem:
  1265. npe_port_tab[NPE_ID(port->id)] = NULL;
  1266. release_resource(port->mem_res);
  1267. err_npe_rel:
  1268. npe_release(port->npe);
  1269. err_free:
  1270. free_netdev(dev);
  1271. return err;
  1272. }
  1273. static int eth_remove_one(struct platform_device *pdev)
  1274. {
  1275. struct net_device *dev = platform_get_drvdata(pdev);
  1276. struct port *port = netdev_priv(dev);
  1277. unregister_netdev(dev);
  1278. phy_disconnect(port->phydev);
  1279. npe_port_tab[NPE_ID(port->id)] = NULL;
  1280. npe_release(port->npe);
  1281. release_resource(port->mem_res);
  1282. free_netdev(dev);
  1283. return 0;
  1284. }
  1285. static struct platform_driver ixp4xx_eth_driver = {
  1286. .driver.name = DRV_NAME,
  1287. .probe = eth_init_one,
  1288. .remove = eth_remove_one,
  1289. };
  1290. static int __init eth_init_module(void)
  1291. {
  1292. int err;
  1293. if ((err = ixp4xx_mdio_register()))
  1294. return err;
  1295. return platform_driver_register(&ixp4xx_eth_driver);
  1296. }
  1297. static void __exit eth_cleanup_module(void)
  1298. {
  1299. platform_driver_unregister(&ixp4xx_eth_driver);
  1300. ixp4xx_mdio_remove();
  1301. }
  1302. MODULE_AUTHOR("Krzysztof Halasa");
  1303. MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
  1304. MODULE_LICENSE("GPL v2");
  1305. MODULE_ALIAS("platform:ixp4xx_eth");
  1306. module_init(eth_init_module);
  1307. module_exit(eth_cleanup_module);