thunder_bgx.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157
  1. /*
  2. * Copyright (C) 2015 Cavium, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of version 2 of the GNU General Public License
  6. * as published by the Free Software Foundation.
  7. */
  8. #include <linux/acpi.h>
  9. #include <linux/module.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/pci.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/phy.h>
  15. #include <linux/of.h>
  16. #include <linux/of_mdio.h>
  17. #include <linux/of_net.h>
  18. #include "nic_reg.h"
  19. #include "nic.h"
  20. #include "thunder_bgx.h"
  21. #define DRV_NAME "thunder-BGX"
  22. #define DRV_VERSION "1.0"
  23. struct lmac {
  24. struct bgx *bgx;
  25. int dmac;
  26. u8 mac[ETH_ALEN];
  27. bool link_up;
  28. int lmacid; /* ID within BGX */
  29. int lmacid_bd; /* ID on board */
  30. struct net_device netdev;
  31. struct phy_device *phydev;
  32. unsigned int last_duplex;
  33. unsigned int last_link;
  34. unsigned int last_speed;
  35. bool is_sgmii;
  36. struct delayed_work dwork;
  37. struct workqueue_struct *check_link;
  38. };
  39. struct bgx {
  40. u8 bgx_id;
  41. u8 qlm_mode;
  42. struct lmac lmac[MAX_LMAC_PER_BGX];
  43. int lmac_count;
  44. int lmac_type;
  45. int lane_to_sds;
  46. int use_training;
  47. void __iomem *reg_base;
  48. struct pci_dev *pdev;
  49. };
  50. static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
  51. static int lmac_count; /* Total no of LMACs in system */
  52. static int bgx_xaui_check_link(struct lmac *lmac);
  53. /* Supported devices */
  54. static const struct pci_device_id bgx_id_table[] = {
  55. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
  56. { 0, } /* end of table */
  57. };
  58. MODULE_AUTHOR("Cavium Inc");
  59. MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
  60. MODULE_LICENSE("GPL v2");
  61. MODULE_VERSION(DRV_VERSION);
  62. MODULE_DEVICE_TABLE(pci, bgx_id_table);
  63. /* The Cavium ThunderX network controller can *only* be found in SoCs
  64. * containing the ThunderX ARM64 CPU implementation. All accesses to the device
  65. * registers on this platform are implicitly strongly ordered with respect
  66. * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
  67. * with no memory barriers in this driver. The readq()/writeq() functions add
  68. * explicit ordering operation which in this case are redundant, and only
  69. * add overhead.
  70. */
  71. /* Register read/write APIs */
  72. static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
  73. {
  74. void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
  75. return readq_relaxed(addr);
  76. }
  77. static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
  78. {
  79. void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
  80. writeq_relaxed(val, addr);
  81. }
  82. static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
  83. {
  84. void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
  85. writeq_relaxed(val | readq_relaxed(addr), addr);
  86. }
  87. static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
  88. {
  89. int timeout = 100;
  90. u64 reg_val;
  91. while (timeout) {
  92. reg_val = bgx_reg_read(bgx, lmac, reg);
  93. if (zero && !(reg_val & mask))
  94. return 0;
  95. if (!zero && (reg_val & mask))
  96. return 0;
  97. usleep_range(1000, 2000);
  98. timeout--;
  99. }
  100. return 1;
  101. }
  102. /* Return number of BGX present in HW */
  103. unsigned bgx_get_map(int node)
  104. {
  105. int i;
  106. unsigned map = 0;
  107. for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
  108. if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
  109. map |= (1 << i);
  110. }
  111. return map;
  112. }
  113. EXPORT_SYMBOL(bgx_get_map);
  114. /* Return number of LMAC configured for this BGX */
  115. int bgx_get_lmac_count(int node, int bgx_idx)
  116. {
  117. struct bgx *bgx;
  118. bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
  119. if (bgx)
  120. return bgx->lmac_count;
  121. return 0;
  122. }
  123. EXPORT_SYMBOL(bgx_get_lmac_count);
  124. /* Returns the current link status of LMAC */
  125. void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
  126. {
  127. struct bgx_link_status *link = (struct bgx_link_status *)status;
  128. struct bgx *bgx;
  129. struct lmac *lmac;
  130. bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
  131. if (!bgx)
  132. return;
  133. lmac = &bgx->lmac[lmacid];
  134. link->link_up = lmac->link_up;
  135. link->duplex = lmac->last_duplex;
  136. link->speed = lmac->last_speed;
  137. }
  138. EXPORT_SYMBOL(bgx_get_lmac_link_state);
  139. const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
  140. {
  141. struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
  142. if (bgx)
  143. return bgx->lmac[lmacid].mac;
  144. return NULL;
  145. }
  146. EXPORT_SYMBOL(bgx_get_lmac_mac);
  147. void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
  148. {
  149. struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
  150. if (!bgx)
  151. return;
  152. ether_addr_copy(bgx->lmac[lmacid].mac, mac);
  153. }
  154. EXPORT_SYMBOL(bgx_set_lmac_mac);
  155. void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
  156. {
  157. struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
  158. u64 cfg;
  159. if (!bgx)
  160. return;
  161. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  162. if (enable)
  163. cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
  164. else
  165. cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
  166. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  167. }
  168. EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
  169. static void bgx_sgmii_change_link_state(struct lmac *lmac)
  170. {
  171. struct bgx *bgx = lmac->bgx;
  172. u64 cmr_cfg;
  173. u64 port_cfg = 0;
  174. u64 misc_ctl = 0;
  175. cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
  176. cmr_cfg &= ~CMR_EN;
  177. bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
  178. port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
  179. misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
  180. if (lmac->link_up) {
  181. misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
  182. port_cfg &= ~GMI_PORT_CFG_DUPLEX;
  183. port_cfg |= (lmac->last_duplex << 2);
  184. } else {
  185. misc_ctl |= PCS_MISC_CTL_GMX_ENO;
  186. }
  187. switch (lmac->last_speed) {
  188. case 10:
  189. port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
  190. port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
  191. port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
  192. misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
  193. misc_ctl |= 50; /* samp_pt */
  194. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
  195. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
  196. break;
  197. case 100:
  198. port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
  199. port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
  200. port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
  201. misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
  202. misc_ctl |= 5; /* samp_pt */
  203. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
  204. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
  205. break;
  206. case 1000:
  207. port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
  208. port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
  209. port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
  210. misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
  211. misc_ctl |= 1; /* samp_pt */
  212. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
  213. if (lmac->last_duplex)
  214. bgx_reg_write(bgx, lmac->lmacid,
  215. BGX_GMP_GMI_TXX_BURST, 0);
  216. else
  217. bgx_reg_write(bgx, lmac->lmacid,
  218. BGX_GMP_GMI_TXX_BURST, 8192);
  219. break;
  220. default:
  221. break;
  222. }
  223. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
  224. bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
  225. port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
  226. /* renable lmac */
  227. cmr_cfg |= CMR_EN;
  228. bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
  229. }
  230. static void bgx_lmac_handler(struct net_device *netdev)
  231. {
  232. struct lmac *lmac = container_of(netdev, struct lmac, netdev);
  233. struct phy_device *phydev = lmac->phydev;
  234. int link_changed = 0;
  235. if (!lmac)
  236. return;
  237. if (!phydev->link && lmac->last_link)
  238. link_changed = -1;
  239. if (phydev->link &&
  240. (lmac->last_duplex != phydev->duplex ||
  241. lmac->last_link != phydev->link ||
  242. lmac->last_speed != phydev->speed)) {
  243. link_changed = 1;
  244. }
  245. lmac->last_link = phydev->link;
  246. lmac->last_speed = phydev->speed;
  247. lmac->last_duplex = phydev->duplex;
  248. if (!link_changed)
  249. return;
  250. if (link_changed > 0)
  251. lmac->link_up = true;
  252. else
  253. lmac->link_up = false;
  254. if (lmac->is_sgmii)
  255. bgx_sgmii_change_link_state(lmac);
  256. else
  257. bgx_xaui_check_link(lmac);
  258. }
  259. u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
  260. {
  261. struct bgx *bgx;
  262. bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
  263. if (!bgx)
  264. return 0;
  265. if (idx > 8)
  266. lmac = 0;
  267. return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
  268. }
  269. EXPORT_SYMBOL(bgx_get_rx_stats);
  270. u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
  271. {
  272. struct bgx *bgx;
  273. bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
  274. if (!bgx)
  275. return 0;
  276. return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
  277. }
  278. EXPORT_SYMBOL(bgx_get_tx_stats);
  279. static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
  280. {
  281. u64 offset;
  282. while (bgx->lmac[lmac].dmac > 0) {
  283. offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
  284. (lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
  285. bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
  286. bgx->lmac[lmac].dmac--;
  287. }
  288. }
  289. /* Configure BGX LMAC in internal loopback mode */
  290. void bgx_lmac_internal_loopback(int node, int bgx_idx,
  291. int lmac_idx, bool enable)
  292. {
  293. struct bgx *bgx;
  294. struct lmac *lmac;
  295. u64 cfg;
  296. bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
  297. if (!bgx)
  298. return;
  299. lmac = &bgx->lmac[lmac_idx];
  300. if (lmac->is_sgmii) {
  301. cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
  302. if (enable)
  303. cfg |= PCS_MRX_CTL_LOOPBACK1;
  304. else
  305. cfg &= ~PCS_MRX_CTL_LOOPBACK1;
  306. bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
  307. } else {
  308. cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
  309. if (enable)
  310. cfg |= SPU_CTL_LOOPBACK;
  311. else
  312. cfg &= ~SPU_CTL_LOOPBACK;
  313. bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
  314. }
  315. }
  316. EXPORT_SYMBOL(bgx_lmac_internal_loopback);
  317. static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
  318. {
  319. u64 cfg;
  320. bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
  321. /* max packet size */
  322. bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
  323. /* Disable frame alignment if using preamble */
  324. cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
  325. if (cfg & 1)
  326. bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
  327. /* Enable lmac */
  328. bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
  329. /* PCS reset */
  330. bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
  331. if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
  332. PCS_MRX_CTL_RESET, true)) {
  333. dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
  334. return -1;
  335. }
  336. /* power down, reset autoneg, autoneg enable */
  337. cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
  338. cfg &= ~PCS_MRX_CTL_PWR_DN;
  339. cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
  340. bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
  341. if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
  342. PCS_MRX_STATUS_AN_CPT, false)) {
  343. dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
  344. return -1;
  345. }
  346. return 0;
  347. }
  348. static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
  349. {
  350. u64 cfg;
  351. /* Reset SPU */
  352. bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
  353. if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
  354. dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
  355. return -1;
  356. }
  357. /* Disable LMAC */
  358. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  359. cfg &= ~CMR_EN;
  360. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  361. bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
  362. /* Set interleaved running disparity for RXAUI */
  363. if (bgx->lmac_type != BGX_MODE_RXAUI)
  364. bgx_reg_modify(bgx, lmacid,
  365. BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
  366. else
  367. bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
  368. SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
  369. /* clear all interrupts */
  370. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
  371. bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
  372. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
  373. bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
  374. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
  375. bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
  376. if (bgx->use_training) {
  377. bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
  378. bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
  379. bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
  380. /* training enable */
  381. bgx_reg_modify(bgx, lmacid,
  382. BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
  383. }
  384. /* Append FCS to each packet */
  385. bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
  386. /* Disable forward error correction */
  387. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
  388. cfg &= ~SPU_FEC_CTL_FEC_EN;
  389. bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
  390. /* Disable autoneg */
  391. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
  392. cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
  393. bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
  394. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
  395. if (bgx->lmac_type == BGX_MODE_10G_KR)
  396. cfg |= (1 << 23);
  397. else if (bgx->lmac_type == BGX_MODE_40G_KR)
  398. cfg |= (1 << 24);
  399. else
  400. cfg &= ~((1 << 23) | (1 << 24));
  401. cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
  402. bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
  403. cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
  404. cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
  405. bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
  406. /* Enable lmac */
  407. bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
  408. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
  409. cfg &= ~SPU_CTL_LOW_POWER;
  410. bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
  411. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
  412. cfg &= ~SMU_TX_CTL_UNI_EN;
  413. cfg |= SMU_TX_CTL_DIC_EN;
  414. bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
  415. /* take lmac_count into account */
  416. bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
  417. /* max packet size */
  418. bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
  419. return 0;
  420. }
  421. static int bgx_xaui_check_link(struct lmac *lmac)
  422. {
  423. struct bgx *bgx = lmac->bgx;
  424. int lmacid = lmac->lmacid;
  425. int lmac_type = bgx->lmac_type;
  426. u64 cfg;
  427. bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
  428. if (bgx->use_training) {
  429. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
  430. if (!(cfg & (1ull << 13))) {
  431. cfg = (1ull << 13) | (1ull << 14);
  432. bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
  433. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
  434. cfg |= (1ull << 0);
  435. bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
  436. return -1;
  437. }
  438. }
  439. /* wait for PCS to come out of reset */
  440. if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
  441. dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
  442. return -1;
  443. }
  444. if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
  445. (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
  446. if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
  447. SPU_BR_STATUS_BLK_LOCK, false)) {
  448. dev_err(&bgx->pdev->dev,
  449. "SPU_BR_STATUS_BLK_LOCK not completed\n");
  450. return -1;
  451. }
  452. } else {
  453. if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
  454. SPU_BX_STATUS_RX_ALIGN, false)) {
  455. dev_err(&bgx->pdev->dev,
  456. "SPU_BX_STATUS_RX_ALIGN not completed\n");
  457. return -1;
  458. }
  459. }
  460. /* Clear rcvflt bit (latching high) and read it back */
  461. if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
  462. bgx_reg_modify(bgx, lmacid,
  463. BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
  464. if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
  465. dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
  466. if (bgx->use_training) {
  467. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
  468. if (!(cfg & (1ull << 13))) {
  469. cfg = (1ull << 13) | (1ull << 14);
  470. bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
  471. cfg = bgx_reg_read(bgx, lmacid,
  472. BGX_SPUX_BR_PMD_CRTL);
  473. cfg |= (1ull << 0);
  474. bgx_reg_write(bgx, lmacid,
  475. BGX_SPUX_BR_PMD_CRTL, cfg);
  476. return -1;
  477. }
  478. }
  479. return -1;
  480. }
  481. /* Wait for BGX RX to be idle */
  482. if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
  483. dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
  484. return -1;
  485. }
  486. /* Wait for BGX TX to be idle */
  487. if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
  488. dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
  489. return -1;
  490. }
  491. /* Clear receive packet disable */
  492. cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
  493. cfg &= ~SPU_MISC_CTL_RX_DIS;
  494. bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
  495. /* Check for MAC RX faults */
  496. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
  497. /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
  498. cfg &= SMU_RX_CTL_STATUS;
  499. if (!cfg)
  500. return 0;
  501. /* Rx local/remote fault seen.
  502. * Do lmac reinit to see if condition recovers
  503. */
  504. bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
  505. return -1;
  506. }
  507. static void bgx_poll_for_link(struct work_struct *work)
  508. {
  509. struct lmac *lmac;
  510. u64 spu_link, smu_link;
  511. lmac = container_of(work, struct lmac, dwork.work);
  512. /* Receive link is latching low. Force it high and verify it */
  513. bgx_reg_modify(lmac->bgx, lmac->lmacid,
  514. BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
  515. bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
  516. SPU_STATUS1_RCV_LNK, false);
  517. spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
  518. smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
  519. if ((spu_link & SPU_STATUS1_RCV_LNK) &&
  520. !(smu_link & SMU_RX_CTL_STATUS)) {
  521. lmac->link_up = 1;
  522. if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
  523. lmac->last_speed = 40000;
  524. else
  525. lmac->last_speed = 10000;
  526. lmac->last_duplex = 1;
  527. } else {
  528. lmac->link_up = 0;
  529. lmac->last_speed = SPEED_UNKNOWN;
  530. lmac->last_duplex = DUPLEX_UNKNOWN;
  531. }
  532. if (lmac->last_link != lmac->link_up) {
  533. if (lmac->link_up) {
  534. if (bgx_xaui_check_link(lmac)) {
  535. /* Errors, clear link_up state */
  536. lmac->link_up = 0;
  537. lmac->last_speed = SPEED_UNKNOWN;
  538. lmac->last_duplex = DUPLEX_UNKNOWN;
  539. }
  540. }
  541. lmac->last_link = lmac->link_up;
  542. }
  543. queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
  544. }
  545. static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
  546. {
  547. struct lmac *lmac;
  548. u64 cfg;
  549. lmac = &bgx->lmac[lmacid];
  550. lmac->bgx = bgx;
  551. if (bgx->lmac_type == BGX_MODE_SGMII) {
  552. lmac->is_sgmii = 1;
  553. if (bgx_lmac_sgmii_init(bgx, lmacid))
  554. return -1;
  555. } else {
  556. lmac->is_sgmii = 0;
  557. if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
  558. return -1;
  559. }
  560. if (lmac->is_sgmii) {
  561. cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
  562. cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
  563. bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
  564. bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
  565. } else {
  566. cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
  567. cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
  568. bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
  569. bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
  570. }
  571. /* Enable lmac */
  572. bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
  573. /* Restore default cfg, incase low level firmware changed it */
  574. bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
  575. if ((bgx->lmac_type != BGX_MODE_XFI) &&
  576. (bgx->lmac_type != BGX_MODE_XLAUI) &&
  577. (bgx->lmac_type != BGX_MODE_40G_KR) &&
  578. (bgx->lmac_type != BGX_MODE_10G_KR)) {
  579. if (!lmac->phydev)
  580. return -ENODEV;
  581. lmac->phydev->dev_flags = 0;
  582. if (phy_connect_direct(&lmac->netdev, lmac->phydev,
  583. bgx_lmac_handler,
  584. PHY_INTERFACE_MODE_SGMII))
  585. return -ENODEV;
  586. phy_start_aneg(lmac->phydev);
  587. } else {
  588. lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
  589. WQ_MEM_RECLAIM, 1);
  590. if (!lmac->check_link)
  591. return -ENOMEM;
  592. INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
  593. queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
  594. }
  595. return 0;
  596. }
  597. static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
  598. {
  599. struct lmac *lmac;
  600. u64 cfg;
  601. lmac = &bgx->lmac[lmacid];
  602. if (lmac->check_link) {
  603. /* Destroy work queue */
  604. cancel_delayed_work_sync(&lmac->dwork);
  605. destroy_workqueue(lmac->check_link);
  606. }
  607. /* Disable packet reception */
  608. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  609. cfg &= ~CMR_PKT_RX_EN;
  610. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  611. /* Give chance for Rx/Tx FIFO to get drained */
  612. bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
  613. bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
  614. /* Disable packet transmission */
  615. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  616. cfg &= ~CMR_PKT_TX_EN;
  617. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  618. /* Disable serdes lanes */
  619. if (!lmac->is_sgmii)
  620. bgx_reg_modify(bgx, lmacid,
  621. BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
  622. else
  623. bgx_reg_modify(bgx, lmacid,
  624. BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
  625. /* Disable LMAC */
  626. cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
  627. cfg &= ~CMR_EN;
  628. bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
  629. bgx_flush_dmac_addrs(bgx, lmacid);
  630. if ((bgx->lmac_type != BGX_MODE_XFI) &&
  631. (bgx->lmac_type != BGX_MODE_XLAUI) &&
  632. (bgx->lmac_type != BGX_MODE_40G_KR) &&
  633. (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
  634. phy_disconnect(lmac->phydev);
  635. lmac->phydev = NULL;
  636. }
  637. static void bgx_set_num_ports(struct bgx *bgx)
  638. {
  639. u64 lmac_count;
  640. switch (bgx->qlm_mode) {
  641. case QLM_MODE_SGMII:
  642. bgx->lmac_count = 4;
  643. bgx->lmac_type = BGX_MODE_SGMII;
  644. bgx->lane_to_sds = 0;
  645. break;
  646. case QLM_MODE_XAUI_1X4:
  647. bgx->lmac_count = 1;
  648. bgx->lmac_type = BGX_MODE_XAUI;
  649. bgx->lane_to_sds = 0xE4;
  650. break;
  651. case QLM_MODE_RXAUI_2X2:
  652. bgx->lmac_count = 2;
  653. bgx->lmac_type = BGX_MODE_RXAUI;
  654. bgx->lane_to_sds = 0xE4;
  655. break;
  656. case QLM_MODE_XFI_4X1:
  657. bgx->lmac_count = 4;
  658. bgx->lmac_type = BGX_MODE_XFI;
  659. bgx->lane_to_sds = 0;
  660. break;
  661. case QLM_MODE_XLAUI_1X4:
  662. bgx->lmac_count = 1;
  663. bgx->lmac_type = BGX_MODE_XLAUI;
  664. bgx->lane_to_sds = 0xE4;
  665. break;
  666. case QLM_MODE_10G_KR_4X1:
  667. bgx->lmac_count = 4;
  668. bgx->lmac_type = BGX_MODE_10G_KR;
  669. bgx->lane_to_sds = 0;
  670. bgx->use_training = 1;
  671. break;
  672. case QLM_MODE_40G_KR4_1X4:
  673. bgx->lmac_count = 1;
  674. bgx->lmac_type = BGX_MODE_40G_KR;
  675. bgx->lane_to_sds = 0xE4;
  676. bgx->use_training = 1;
  677. break;
  678. default:
  679. bgx->lmac_count = 0;
  680. break;
  681. }
  682. /* Check if low level firmware has programmed LMAC count
  683. * based on board type, if yes consider that otherwise
  684. * the default static values
  685. */
  686. lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
  687. if (lmac_count != 4)
  688. bgx->lmac_count = lmac_count;
  689. }
  690. static void bgx_init_hw(struct bgx *bgx)
  691. {
  692. int i;
  693. bgx_set_num_ports(bgx);
  694. bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
  695. if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
  696. dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
  697. /* Set lmac type and lane2serdes mapping */
  698. for (i = 0; i < bgx->lmac_count; i++) {
  699. if (bgx->lmac_type == BGX_MODE_RXAUI) {
  700. if (i)
  701. bgx->lane_to_sds = 0x0e;
  702. else
  703. bgx->lane_to_sds = 0x04;
  704. bgx_reg_write(bgx, i, BGX_CMRX_CFG,
  705. (bgx->lmac_type << 8) | bgx->lane_to_sds);
  706. continue;
  707. }
  708. bgx_reg_write(bgx, i, BGX_CMRX_CFG,
  709. (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
  710. bgx->lmac[i].lmacid_bd = lmac_count;
  711. lmac_count++;
  712. }
  713. bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
  714. bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
  715. /* Set the backpressure AND mask */
  716. for (i = 0; i < bgx->lmac_count; i++)
  717. bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
  718. ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
  719. (i * MAX_BGX_CHANS_PER_LMAC));
  720. /* Disable all MAC filtering */
  721. for (i = 0; i < RX_DMAC_COUNT; i++)
  722. bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
  723. /* Disable MAC steering (NCSI traffic) */
  724. for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
  725. bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
  726. }
  727. static void bgx_get_qlm_mode(struct bgx *bgx)
  728. {
  729. struct device *dev = &bgx->pdev->dev;
  730. int lmac_type;
  731. int train_en;
  732. /* Read LMAC0 type to figure out QLM mode
  733. * This is configured by low level firmware
  734. */
  735. lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
  736. lmac_type = (lmac_type >> 8) & 0x07;
  737. train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
  738. SPU_PMD_CRTL_TRAIN_EN;
  739. switch (lmac_type) {
  740. case BGX_MODE_SGMII:
  741. bgx->qlm_mode = QLM_MODE_SGMII;
  742. dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id);
  743. break;
  744. case BGX_MODE_XAUI:
  745. bgx->qlm_mode = QLM_MODE_XAUI_1X4;
  746. dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id);
  747. break;
  748. case BGX_MODE_RXAUI:
  749. bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
  750. dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id);
  751. break;
  752. case BGX_MODE_XFI:
  753. if (!train_en) {
  754. bgx->qlm_mode = QLM_MODE_XFI_4X1;
  755. dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id);
  756. } else {
  757. bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
  758. dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id);
  759. }
  760. break;
  761. case BGX_MODE_XLAUI:
  762. if (!train_en) {
  763. bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
  764. dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id);
  765. } else {
  766. bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
  767. dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id);
  768. }
  769. break;
  770. default:
  771. bgx->qlm_mode = QLM_MODE_SGMII;
  772. dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id);
  773. }
  774. }
  775. #ifdef CONFIG_ACPI
  776. static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst)
  777. {
  778. u8 mac[ETH_ALEN];
  779. int ret;
  780. ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
  781. "mac-address", mac, ETH_ALEN);
  782. if (ret)
  783. goto out;
  784. if (!is_valid_ether_addr(mac)) {
  785. ret = -EINVAL;
  786. goto out;
  787. }
  788. memcpy(dst, mac, ETH_ALEN);
  789. out:
  790. return ret;
  791. }
  792. /* Currently only sets the MAC address. */
  793. static acpi_status bgx_acpi_register_phy(acpi_handle handle,
  794. u32 lvl, void *context, void **rv)
  795. {
  796. struct bgx *bgx = context;
  797. struct acpi_device *adev;
  798. if (acpi_bus_get_device(handle, &adev))
  799. goto out;
  800. acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac);
  801. SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev);
  802. bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
  803. out:
  804. bgx->lmac_count++;
  805. return AE_OK;
  806. }
  807. static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
  808. void *context, void **ret_val)
  809. {
  810. struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
  811. struct bgx *bgx = context;
  812. char bgx_sel[5];
  813. snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
  814. if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
  815. pr_warn("Invalid link device\n");
  816. return AE_OK;
  817. }
  818. if (strncmp(string.pointer, bgx_sel, 4))
  819. return AE_OK;
  820. acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
  821. bgx_acpi_register_phy, NULL, bgx, NULL);
  822. kfree(string.pointer);
  823. return AE_CTRL_TERMINATE;
  824. }
  825. static int bgx_init_acpi_phy(struct bgx *bgx)
  826. {
  827. acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
  828. return 0;
  829. }
  830. #else
  831. static int bgx_init_acpi_phy(struct bgx *bgx)
  832. {
  833. return -ENODEV;
  834. }
  835. #endif /* CONFIG_ACPI */
  836. #if IS_ENABLED(CONFIG_OF_MDIO)
  837. static int bgx_init_of_phy(struct bgx *bgx)
  838. {
  839. struct device_node *np;
  840. struct device_node *np_child;
  841. u8 lmac = 0;
  842. char bgx_sel[5];
  843. const char *mac;
  844. /* Get BGX node from DT */
  845. snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
  846. np = of_find_node_by_name(NULL, bgx_sel);
  847. if (!np)
  848. return -ENODEV;
  849. for_each_child_of_node(np, np_child) {
  850. struct device_node *phy_np = of_parse_phandle(np_child,
  851. "phy-handle", 0);
  852. if (!phy_np)
  853. continue;
  854. bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
  855. mac = of_get_mac_address(np_child);
  856. if (mac)
  857. ether_addr_copy(bgx->lmac[lmac].mac, mac);
  858. SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
  859. bgx->lmac[lmac].lmacid = lmac;
  860. lmac++;
  861. if (lmac == MAX_LMAC_PER_BGX) {
  862. of_node_put(np_child);
  863. break;
  864. }
  865. }
  866. return 0;
  867. }
  868. #else
  869. static int bgx_init_of_phy(struct bgx *bgx)
  870. {
  871. return -ENODEV;
  872. }
  873. #endif /* CONFIG_OF_MDIO */
  874. static int bgx_init_phy(struct bgx *bgx)
  875. {
  876. if (!acpi_disabled)
  877. return bgx_init_acpi_phy(bgx);
  878. return bgx_init_of_phy(bgx);
  879. }
  880. static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  881. {
  882. int err;
  883. struct device *dev = &pdev->dev;
  884. struct bgx *bgx = NULL;
  885. u8 lmac;
  886. /* Load octeon mdio driver */
  887. octeon_mdiobus_force_mod_depencency();
  888. bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
  889. if (!bgx)
  890. return -ENOMEM;
  891. bgx->pdev = pdev;
  892. pci_set_drvdata(pdev, bgx);
  893. err = pci_enable_device(pdev);
  894. if (err) {
  895. dev_err(dev, "Failed to enable PCI device\n");
  896. pci_set_drvdata(pdev, NULL);
  897. return err;
  898. }
  899. err = pci_request_regions(pdev, DRV_NAME);
  900. if (err) {
  901. dev_err(dev, "PCI request regions failed 0x%x\n", err);
  902. goto err_disable_device;
  903. }
  904. /* MAP configuration registers */
  905. bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
  906. if (!bgx->reg_base) {
  907. dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
  908. err = -ENOMEM;
  909. goto err_release_regions;
  910. }
  911. bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
  912. bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX;
  913. bgx_vnic[bgx->bgx_id] = bgx;
  914. bgx_get_qlm_mode(bgx);
  915. err = bgx_init_phy(bgx);
  916. if (err)
  917. goto err_enable;
  918. bgx_init_hw(bgx);
  919. /* Enable all LMACs */
  920. for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
  921. err = bgx_lmac_enable(bgx, lmac);
  922. if (err) {
  923. dev_err(dev, "BGX%d failed to enable lmac%d\n",
  924. bgx->bgx_id, lmac);
  925. goto err_enable;
  926. }
  927. }
  928. return 0;
  929. err_enable:
  930. bgx_vnic[bgx->bgx_id] = NULL;
  931. err_release_regions:
  932. pci_release_regions(pdev);
  933. err_disable_device:
  934. pci_disable_device(pdev);
  935. pci_set_drvdata(pdev, NULL);
  936. return err;
  937. }
  938. static void bgx_remove(struct pci_dev *pdev)
  939. {
  940. struct bgx *bgx = pci_get_drvdata(pdev);
  941. u8 lmac;
  942. /* Disable all LMACs */
  943. for (lmac = 0; lmac < bgx->lmac_count; lmac++)
  944. bgx_lmac_disable(bgx, lmac);
  945. bgx_vnic[bgx->bgx_id] = NULL;
  946. pci_release_regions(pdev);
  947. pci_disable_device(pdev);
  948. pci_set_drvdata(pdev, NULL);
  949. }
  950. static struct pci_driver bgx_driver = {
  951. .name = DRV_NAME,
  952. .id_table = bgx_id_table,
  953. .probe = bgx_probe,
  954. .remove = bgx_remove,
  955. };
  956. static int __init bgx_init_module(void)
  957. {
  958. pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
  959. return pci_register_driver(&bgx_driver);
  960. }
  961. static void __exit bgx_cleanup_module(void)
  962. {
  963. pci_unregister_driver(&bgx_driver);
  964. }
  965. module_init(bgx_init_module);
  966. module_exit(bgx_cleanup_module);