bcm_sf2.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418
  1. /*
  2. * Broadcom Starfighter 2 DSA switch driver
  3. *
  4. * Copyright (C) 2014, Broadcom Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/list.h>
  12. #include <linux/module.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/of.h>
  17. #include <linux/phy.h>
  18. #include <linux/phy_fixed.h>
  19. #include <linux/mii.h>
  20. #include <linux/of.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/of_address.h>
  23. #include <linux/of_net.h>
  24. #include <net/dsa.h>
  25. #include <linux/ethtool.h>
  26. #include <linux/if_bridge.h>
  27. #include <linux/brcmphy.h>
  28. #include <linux/etherdevice.h>
  29. #include <net/switchdev.h>
  30. #include "bcm_sf2.h"
  31. #include "bcm_sf2_regs.h"
  32. /* String, offset, and register size in bytes if different from 4 bytes */
  33. static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = {
  34. { "TxOctets", 0x000, 8 },
  35. { "TxDropPkts", 0x020 },
  36. { "TxQPKTQ0", 0x030 },
  37. { "TxBroadcastPkts", 0x040 },
  38. { "TxMulticastPkts", 0x050 },
  39. { "TxUnicastPKts", 0x060 },
  40. { "TxCollisions", 0x070 },
  41. { "TxSingleCollision", 0x080 },
  42. { "TxMultipleCollision", 0x090 },
  43. { "TxDeferredCollision", 0x0a0 },
  44. { "TxLateCollision", 0x0b0 },
  45. { "TxExcessiveCollision", 0x0c0 },
  46. { "TxFrameInDisc", 0x0d0 },
  47. { "TxPausePkts", 0x0e0 },
  48. { "TxQPKTQ1", 0x0f0 },
  49. { "TxQPKTQ2", 0x100 },
  50. { "TxQPKTQ3", 0x110 },
  51. { "TxQPKTQ4", 0x120 },
  52. { "TxQPKTQ5", 0x130 },
  53. { "RxOctets", 0x140, 8 },
  54. { "RxUndersizePkts", 0x160 },
  55. { "RxPausePkts", 0x170 },
  56. { "RxPkts64Octets", 0x180 },
  57. { "RxPkts65to127Octets", 0x190 },
  58. { "RxPkts128to255Octets", 0x1a0 },
  59. { "RxPkts256to511Octets", 0x1b0 },
  60. { "RxPkts512to1023Octets", 0x1c0 },
  61. { "RxPkts1024toMaxPktsOctets", 0x1d0 },
  62. { "RxOversizePkts", 0x1e0 },
  63. { "RxJabbers", 0x1f0 },
  64. { "RxAlignmentErrors", 0x200 },
  65. { "RxFCSErrors", 0x210 },
  66. { "RxGoodOctets", 0x220, 8 },
  67. { "RxDropPkts", 0x240 },
  68. { "RxUnicastPkts", 0x250 },
  69. { "RxMulticastPkts", 0x260 },
  70. { "RxBroadcastPkts", 0x270 },
  71. { "RxSAChanges", 0x280 },
  72. { "RxFragments", 0x290 },
  73. { "RxJumboPkt", 0x2a0 },
  74. { "RxSymblErr", 0x2b0 },
  75. { "InRangeErrCount", 0x2c0 },
  76. { "OutRangeErrCount", 0x2d0 },
  77. { "EEELpiEvent", 0x2e0 },
  78. { "EEELpiDuration", 0x2f0 },
  79. { "RxDiscard", 0x300, 8 },
  80. { "TxQPKTQ6", 0x320 },
  81. { "TxQPKTQ7", 0x330 },
  82. { "TxPkts64Octets", 0x340 },
  83. { "TxPkts65to127Octets", 0x350 },
  84. { "TxPkts128to255Octets", 0x360 },
  85. { "TxPkts256to511Ocets", 0x370 },
  86. { "TxPkts512to1023Ocets", 0x380 },
  87. { "TxPkts1024toMaxPktOcets", 0x390 },
  88. };
  89. #define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib)
  90. static void bcm_sf2_sw_get_strings(struct dsa_switch *ds,
  91. int port, uint8_t *data)
  92. {
  93. unsigned int i;
  94. for (i = 0; i < BCM_SF2_STATS_SIZE; i++)
  95. memcpy(data + i * ETH_GSTRING_LEN,
  96. bcm_sf2_mib[i].string, ETH_GSTRING_LEN);
  97. }
  98. static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds,
  99. int port, uint64_t *data)
  100. {
  101. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  102. const struct bcm_sf2_hw_stats *s;
  103. unsigned int i;
  104. u64 val = 0;
  105. u32 offset;
  106. mutex_lock(&priv->stats_mutex);
  107. /* Now fetch the per-port counters */
  108. for (i = 0; i < BCM_SF2_STATS_SIZE; i++) {
  109. s = &bcm_sf2_mib[i];
  110. /* Do a latched 64-bit read if needed */
  111. offset = s->reg + CORE_P_MIB_OFFSET(port);
  112. if (s->sizeof_stat == 8)
  113. val = core_readq(priv, offset);
  114. else
  115. val = core_readl(priv, offset);
  116. data[i] = (u64)val;
  117. }
  118. mutex_unlock(&priv->stats_mutex);
  119. }
  120. static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
  121. {
  122. return BCM_SF2_STATS_SIZE;
  123. }
  124. static char *bcm_sf2_sw_probe(struct device *host_dev, int sw_addr)
  125. {
  126. return "Broadcom Starfighter 2";
  127. }
  128. static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
  129. {
  130. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  131. unsigned int i;
  132. u32 reg;
  133. /* Enable the IMP Port to be in the same VLAN as the other ports
  134. * on a per-port basis such that we only have Port i and IMP in
  135. * the same VLAN.
  136. */
  137. for (i = 0; i < priv->hw_params.num_ports; i++) {
  138. if (!((1 << i) & ds->phys_port_mask))
  139. continue;
  140. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  141. reg |= (1 << cpu_port);
  142. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  143. }
  144. }
  145. static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
  146. {
  147. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  148. u32 reg, val;
  149. /* Enable the port memories */
  150. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  151. reg &= ~P_TXQ_PSM_VDD(port);
  152. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  153. /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
  154. reg = core_readl(priv, CORE_IMP_CTL);
  155. reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
  156. reg &= ~(RX_DIS | TX_DIS);
  157. core_writel(priv, reg, CORE_IMP_CTL);
  158. /* Enable forwarding */
  159. core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
  160. /* Enable IMP port in dumb mode */
  161. reg = core_readl(priv, CORE_SWITCH_CTRL);
  162. reg |= MII_DUMB_FWDG_EN;
  163. core_writel(priv, reg, CORE_SWITCH_CTRL);
  164. /* Resolve which bit controls the Broadcom tag */
  165. switch (port) {
  166. case 8:
  167. val = BRCM_HDR_EN_P8;
  168. break;
  169. case 7:
  170. val = BRCM_HDR_EN_P7;
  171. break;
  172. case 5:
  173. val = BRCM_HDR_EN_P5;
  174. break;
  175. default:
  176. val = 0;
  177. break;
  178. }
  179. /* Enable Broadcom tags for IMP port */
  180. reg = core_readl(priv, CORE_BRCM_HDR_CTRL);
  181. reg |= val;
  182. core_writel(priv, reg, CORE_BRCM_HDR_CTRL);
  183. /* Enable reception Broadcom tag for CPU TX (switch RX) to
  184. * allow us to tag outgoing frames
  185. */
  186. reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS);
  187. reg &= ~(1 << port);
  188. core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS);
  189. /* Enable transmission of Broadcom tags from the switch (CPU RX) to
  190. * allow delivering frames to the per-port net_devices
  191. */
  192. reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
  193. reg &= ~(1 << port);
  194. core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
  195. /* Force link status for IMP port */
  196. reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
  197. reg |= (MII_SW_OR | LINK_STS);
  198. core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
  199. }
  200. static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
  201. {
  202. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  203. u32 reg;
  204. reg = core_readl(priv, CORE_EEE_EN_CTRL);
  205. if (enable)
  206. reg |= 1 << port;
  207. else
  208. reg &= ~(1 << port);
  209. core_writel(priv, reg, CORE_EEE_EN_CTRL);
  210. }
  211. static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
  212. {
  213. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  214. u32 reg;
  215. reg = reg_readl(priv, REG_SPHY_CNTRL);
  216. if (enable) {
  217. reg |= PHY_RESET;
  218. reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
  219. reg_writel(priv, reg, REG_SPHY_CNTRL);
  220. udelay(21);
  221. reg = reg_readl(priv, REG_SPHY_CNTRL);
  222. reg &= ~PHY_RESET;
  223. } else {
  224. reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
  225. reg_writel(priv, reg, REG_SPHY_CNTRL);
  226. mdelay(1);
  227. reg |= CK25_DIS;
  228. }
  229. reg_writel(priv, reg, REG_SPHY_CNTRL);
  230. /* Use PHY-driven LED signaling */
  231. if (!enable) {
  232. reg = reg_readl(priv, REG_LED_CNTRL(0));
  233. reg |= SPDLNK_SRC_SEL;
  234. reg_writel(priv, reg, REG_LED_CNTRL(0));
  235. }
  236. }
  237. static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
  238. int port)
  239. {
  240. unsigned int off;
  241. switch (port) {
  242. case 7:
  243. off = P7_IRQ_OFF;
  244. break;
  245. case 0:
  246. /* Port 0 interrupts are located on the first bank */
  247. intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
  248. return;
  249. default:
  250. off = P_IRQ_OFF(port);
  251. break;
  252. }
  253. intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
  254. }
  255. static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
  256. int port)
  257. {
  258. unsigned int off;
  259. switch (port) {
  260. case 7:
  261. off = P7_IRQ_OFF;
  262. break;
  263. case 0:
  264. /* Port 0 interrupts are located on the first bank */
  265. intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
  266. intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
  267. return;
  268. default:
  269. off = P_IRQ_OFF(port);
  270. break;
  271. }
  272. intrl2_1_mask_set(priv, P_IRQ_MASK(off));
  273. intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
  274. }
  275. static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
  276. struct phy_device *phy)
  277. {
  278. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  279. s8 cpu_port = ds->dst[ds->index].cpu_port;
  280. u32 reg;
  281. /* Clear the memory power down */
  282. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  283. reg &= ~P_TXQ_PSM_VDD(port);
  284. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  285. /* Clear the Rx and Tx disable bits and set to no spanning tree */
  286. core_writel(priv, 0, CORE_G_PCTL_PORT(port));
  287. /* Re-enable the GPHY and re-apply workarounds */
  288. if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
  289. bcm_sf2_gphy_enable_set(ds, true);
  290. if (phy) {
  291. /* if phy_stop() has been called before, phy
  292. * will be in halted state, and phy_start()
  293. * will call resume.
  294. *
  295. * the resume path does not configure back
  296. * autoneg settings, and since we hard reset
  297. * the phy manually here, we need to reset the
  298. * state machine also.
  299. */
  300. phy->state = PHY_READY;
  301. phy_init_hw(phy);
  302. }
  303. }
  304. /* Enable MoCA port interrupts to get notified */
  305. if (port == priv->moca_port)
  306. bcm_sf2_port_intr_enable(priv, port);
  307. /* Set this port, and only this one to be in the default VLAN,
  308. * if member of a bridge, restore its membership prior to
  309. * bringing down this port.
  310. */
  311. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  312. reg &= ~PORT_VLAN_CTRL_MASK;
  313. reg |= (1 << port);
  314. reg |= priv->port_sts[port].vlan_ctl_mask;
  315. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
  316. bcm_sf2_imp_vlan_setup(ds, cpu_port);
  317. /* If EEE was enabled, restore it */
  318. if (priv->port_sts[port].eee.eee_enabled)
  319. bcm_sf2_eee_enable_set(ds, port, true);
  320. return 0;
  321. }
  322. static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
  323. struct phy_device *phy)
  324. {
  325. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  326. u32 off, reg;
  327. if (priv->wol_ports_mask & (1 << port))
  328. return;
  329. if (port == priv->moca_port)
  330. bcm_sf2_port_intr_disable(priv, port);
  331. if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
  332. bcm_sf2_gphy_enable_set(ds, false);
  333. if (dsa_is_cpu_port(ds, port))
  334. off = CORE_IMP_CTL;
  335. else
  336. off = CORE_G_PCTL_PORT(port);
  337. reg = core_readl(priv, off);
  338. reg |= RX_DIS | TX_DIS;
  339. core_writel(priv, reg, off);
  340. /* Power down the port memory */
  341. reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
  342. reg |= P_TXQ_PSM_VDD(port);
  343. core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
  344. }
  345. /* Returns 0 if EEE was not enabled, or 1 otherwise
  346. */
  347. static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
  348. struct phy_device *phy)
  349. {
  350. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  351. struct ethtool_eee *p = &priv->port_sts[port].eee;
  352. int ret;
  353. p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
  354. ret = phy_init_eee(phy, 0);
  355. if (ret)
  356. return 0;
  357. bcm_sf2_eee_enable_set(ds, port, true);
  358. return 1;
  359. }
  360. static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port,
  361. struct ethtool_eee *e)
  362. {
  363. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  364. struct ethtool_eee *p = &priv->port_sts[port].eee;
  365. u32 reg;
  366. reg = core_readl(priv, CORE_EEE_LPI_INDICATE);
  367. e->eee_enabled = p->eee_enabled;
  368. e->eee_active = !!(reg & (1 << port));
  369. return 0;
  370. }
  371. static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
  372. struct phy_device *phydev,
  373. struct ethtool_eee *e)
  374. {
  375. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  376. struct ethtool_eee *p = &priv->port_sts[port].eee;
  377. p->eee_enabled = e->eee_enabled;
  378. if (!p->eee_enabled) {
  379. bcm_sf2_eee_enable_set(ds, port, false);
  380. } else {
  381. p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
  382. if (!p->eee_enabled)
  383. return -EOPNOTSUPP;
  384. }
  385. return 0;
  386. }
  387. /* Fast-ageing of ARL entries for a given port, equivalent to an ARL
  388. * flush for that port.
  389. */
  390. static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
  391. {
  392. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  393. unsigned int timeout = 1000;
  394. u32 reg;
  395. core_writel(priv, port, CORE_FAST_AGE_PORT);
  396. reg = core_readl(priv, CORE_FAST_AGE_CTRL);
  397. reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
  398. core_writel(priv, reg, CORE_FAST_AGE_CTRL);
  399. do {
  400. reg = core_readl(priv, CORE_FAST_AGE_CTRL);
  401. if (!(reg & FAST_AGE_STR_DONE))
  402. break;
  403. cpu_relax();
  404. } while (timeout--);
  405. if (!timeout)
  406. return -ETIMEDOUT;
  407. core_writel(priv, 0, CORE_FAST_AGE_CTRL);
  408. return 0;
  409. }
  410. static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
  411. u32 br_port_mask)
  412. {
  413. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  414. unsigned int i;
  415. u32 reg, p_ctl;
  416. p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  417. for (i = 0; i < priv->hw_params.num_ports; i++) {
  418. if (!((1 << i) & br_port_mask))
  419. continue;
  420. /* Add this local port to the remote port VLAN control
  421. * membership and update the remote port bitmask
  422. */
  423. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  424. reg |= 1 << port;
  425. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  426. priv->port_sts[i].vlan_ctl_mask = reg;
  427. p_ctl |= 1 << i;
  428. }
  429. /* Configure the local port VLAN control membership to include
  430. * remote ports and update the local port bitmask
  431. */
  432. core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
  433. priv->port_sts[port].vlan_ctl_mask = p_ctl;
  434. return 0;
  435. }
  436. static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port,
  437. u32 br_port_mask)
  438. {
  439. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  440. unsigned int i;
  441. u32 reg, p_ctl;
  442. p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
  443. for (i = 0; i < priv->hw_params.num_ports; i++) {
  444. /* Don't touch the remaining ports */
  445. if (!((1 << i) & br_port_mask))
  446. continue;
  447. reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
  448. reg &= ~(1 << port);
  449. core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
  450. priv->port_sts[port].vlan_ctl_mask = reg;
  451. /* Prevent self removal to preserve isolation */
  452. if (port != i)
  453. p_ctl &= ~(1 << i);
  454. }
  455. core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
  456. priv->port_sts[port].vlan_ctl_mask = p_ctl;
  457. return 0;
  458. }
  459. static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
  460. u8 state)
  461. {
  462. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  463. u8 hw_state, cur_hw_state;
  464. int ret = 0;
  465. u32 reg;
  466. reg = core_readl(priv, CORE_G_PCTL_PORT(port));
  467. cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
  468. switch (state) {
  469. case BR_STATE_DISABLED:
  470. hw_state = G_MISTP_DIS_STATE;
  471. break;
  472. case BR_STATE_LISTENING:
  473. hw_state = G_MISTP_LISTEN_STATE;
  474. break;
  475. case BR_STATE_LEARNING:
  476. hw_state = G_MISTP_LEARN_STATE;
  477. break;
  478. case BR_STATE_FORWARDING:
  479. hw_state = G_MISTP_FWD_STATE;
  480. break;
  481. case BR_STATE_BLOCKING:
  482. hw_state = G_MISTP_BLOCK_STATE;
  483. break;
  484. default:
  485. pr_err("%s: invalid STP state: %d\n", __func__, state);
  486. return -EINVAL;
  487. }
  488. /* Fast-age ARL entries if we are moving a port from Learning or
  489. * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
  490. * state (hw_state)
  491. */
  492. if (cur_hw_state != hw_state) {
  493. if (cur_hw_state >= G_MISTP_LEARN_STATE &&
  494. hw_state <= G_MISTP_LISTEN_STATE) {
  495. ret = bcm_sf2_sw_fast_age_port(ds, port);
  496. if (ret) {
  497. pr_err("%s: fast-ageing failed\n", __func__);
  498. return ret;
  499. }
  500. }
  501. }
  502. reg = core_readl(priv, CORE_G_PCTL_PORT(port));
  503. reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
  504. reg |= hw_state;
  505. core_writel(priv, reg, CORE_G_PCTL_PORT(port));
  506. return 0;
  507. }
  508. /* Address Resolution Logic routines */
  509. static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv)
  510. {
  511. unsigned int timeout = 10;
  512. u32 reg;
  513. do {
  514. reg = core_readl(priv, CORE_ARLA_RWCTL);
  515. if (!(reg & ARL_STRTDN))
  516. return 0;
  517. usleep_range(1000, 2000);
  518. } while (timeout--);
  519. return -ETIMEDOUT;
  520. }
  521. static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op)
  522. {
  523. u32 cmd;
  524. if (op > ARL_RW)
  525. return -EINVAL;
  526. cmd = core_readl(priv, CORE_ARLA_RWCTL);
  527. cmd &= ~IVL_SVL_SELECT;
  528. cmd |= ARL_STRTDN;
  529. if (op)
  530. cmd |= ARL_RW;
  531. else
  532. cmd &= ~ARL_RW;
  533. core_writel(priv, cmd, CORE_ARLA_RWCTL);
  534. return bcm_sf2_arl_op_wait(priv);
  535. }
  536. static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac,
  537. u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx,
  538. bool is_valid)
  539. {
  540. unsigned int i;
  541. int ret;
  542. ret = bcm_sf2_arl_op_wait(priv);
  543. if (ret)
  544. return ret;
  545. /* Read the 4 bins */
  546. for (i = 0; i < 4; i++) {
  547. u64 mac_vid;
  548. u32 fwd_entry;
  549. mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i));
  550. fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i));
  551. bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
  552. if (ent->is_valid && is_valid) {
  553. *idx = i;
  554. return 0;
  555. }
  556. /* This is the MAC we just deleted */
  557. if (!is_valid && (mac_vid & mac))
  558. return 0;
  559. }
  560. return -ENOENT;
  561. }
  562. static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port,
  563. const unsigned char *addr, u16 vid, bool is_valid)
  564. {
  565. struct bcm_sf2_arl_entry ent;
  566. u32 fwd_entry;
  567. u64 mac, mac_vid = 0;
  568. u8 idx = 0;
  569. int ret;
  570. /* Convert the array into a 64-bit MAC */
  571. mac = bcm_sf2_mac_to_u64(addr);
  572. /* Perform a read for the given MAC and VID */
  573. core_writeq(priv, mac, CORE_ARLA_MAC);
  574. core_writel(priv, vid, CORE_ARLA_VID);
  575. /* Issue a read operation for this MAC */
  576. ret = bcm_sf2_arl_rw_op(priv, 1);
  577. if (ret)
  578. return ret;
  579. ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
  580. /* If this is a read, just finish now */
  581. if (op)
  582. return ret;
  583. /* We could not find a matching MAC, so reset to a new entry */
  584. if (ret) {
  585. fwd_entry = 0;
  586. idx = 0;
  587. }
  588. memset(&ent, 0, sizeof(ent));
  589. ent.port = port;
  590. ent.is_valid = is_valid;
  591. ent.vid = vid;
  592. ent.is_static = true;
  593. memcpy(ent.mac, addr, ETH_ALEN);
  594. bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent);
  595. core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx));
  596. core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx));
  597. ret = bcm_sf2_arl_rw_op(priv, 0);
  598. if (ret)
  599. return ret;
  600. /* Re-read the entry to check */
  601. return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
  602. }
  603. static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
  604. const struct switchdev_obj_port_fdb *fdb,
  605. struct switchdev_trans *trans)
  606. {
  607. /* We do not need to do anything specific here yet */
  608. return 0;
  609. }
  610. static int bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
  611. const struct switchdev_obj_port_fdb *fdb,
  612. struct switchdev_trans *trans)
  613. {
  614. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  615. return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true);
  616. }
  617. static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
  618. const struct switchdev_obj_port_fdb *fdb)
  619. {
  620. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  621. return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
  622. }
  623. static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv)
  624. {
  625. unsigned timeout = 1000;
  626. u32 reg;
  627. do {
  628. reg = core_readl(priv, CORE_ARLA_SRCH_CTL);
  629. if (!(reg & ARLA_SRCH_STDN))
  630. return 0;
  631. if (reg & ARLA_SRCH_VLID)
  632. return 0;
  633. usleep_range(1000, 2000);
  634. } while (timeout--);
  635. return -ETIMEDOUT;
  636. }
  637. static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx,
  638. struct bcm_sf2_arl_entry *ent)
  639. {
  640. u64 mac_vid;
  641. u32 fwd_entry;
  642. mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx));
  643. fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx));
  644. bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
  645. }
  646. static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port,
  647. const struct bcm_sf2_arl_entry *ent,
  648. struct switchdev_obj_port_fdb *fdb,
  649. int (*cb)(struct switchdev_obj *obj))
  650. {
  651. if (!ent->is_valid)
  652. return 0;
  653. if (port != ent->port)
  654. return 0;
  655. ether_addr_copy(fdb->addr, ent->mac);
  656. fdb->vid = ent->vid;
  657. fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
  658. return cb(&fdb->obj);
  659. }
  660. static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
  661. struct switchdev_obj_port_fdb *fdb,
  662. int (*cb)(struct switchdev_obj *obj))
  663. {
  664. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  665. struct net_device *dev = ds->ports[port];
  666. struct bcm_sf2_arl_entry results[2];
  667. unsigned int count = 0;
  668. int ret;
  669. /* Start search operation */
  670. core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL);
  671. do {
  672. ret = bcm_sf2_arl_search_wait(priv);
  673. if (ret)
  674. return ret;
  675. /* Read both entries, then return their values back */
  676. bcm_sf2_arl_search_rd(priv, 0, &results[0]);
  677. ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb);
  678. if (ret)
  679. return ret;
  680. bcm_sf2_arl_search_rd(priv, 1, &results[1]);
  681. ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb);
  682. if (ret)
  683. return ret;
  684. if (!results[0].is_valid && !results[1].is_valid)
  685. break;
  686. } while (count++ < CORE_ARLA_NUM_ENTRIES);
  687. return 0;
  688. }
  689. static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
  690. {
  691. struct bcm_sf2_priv *priv = dev_id;
  692. priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
  693. ~priv->irq0_mask;
  694. intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
  695. return IRQ_HANDLED;
  696. }
  697. static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
  698. {
  699. struct bcm_sf2_priv *priv = dev_id;
  700. priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
  701. ~priv->irq1_mask;
  702. intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
  703. if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
  704. priv->port_sts[7].link = 1;
  705. if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
  706. priv->port_sts[7].link = 0;
  707. return IRQ_HANDLED;
  708. }
  709. static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
  710. {
  711. unsigned int timeout = 1000;
  712. u32 reg;
  713. reg = core_readl(priv, CORE_WATCHDOG_CTRL);
  714. reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
  715. core_writel(priv, reg, CORE_WATCHDOG_CTRL);
  716. do {
  717. reg = core_readl(priv, CORE_WATCHDOG_CTRL);
  718. if (!(reg & SOFTWARE_RESET))
  719. break;
  720. usleep_range(1000, 2000);
  721. } while (timeout-- > 0);
  722. if (timeout == 0)
  723. return -ETIMEDOUT;
  724. return 0;
  725. }
  726. static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
  727. {
  728. intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
  729. intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  730. intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  731. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
  732. intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
  733. intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  734. }
  735. static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
  736. struct device_node *dn)
  737. {
  738. struct device_node *port;
  739. const char *phy_mode_str;
  740. int mode;
  741. unsigned int port_num;
  742. int ret;
  743. priv->moca_port = -1;
  744. for_each_available_child_of_node(dn, port) {
  745. if (of_property_read_u32(port, "reg", &port_num))
  746. continue;
  747. /* Internal PHYs get assigned a specific 'phy-mode' property
  748. * value: "internal" to help flag them before MDIO probing
  749. * has completed, since they might be turned off at that
  750. * time
  751. */
  752. mode = of_get_phy_mode(port);
  753. if (mode < 0) {
  754. ret = of_property_read_string(port, "phy-mode",
  755. &phy_mode_str);
  756. if (ret < 0)
  757. continue;
  758. if (!strcasecmp(phy_mode_str, "internal"))
  759. priv->int_phy_mask |= 1 << port_num;
  760. }
  761. if (mode == PHY_INTERFACE_MODE_MOCA)
  762. priv->moca_port = port_num;
  763. }
  764. }
  765. static int bcm_sf2_sw_setup(struct dsa_switch *ds)
  766. {
  767. const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
  768. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  769. struct device_node *dn;
  770. void __iomem **base;
  771. unsigned int port;
  772. unsigned int i;
  773. u32 reg, rev;
  774. int ret;
  775. spin_lock_init(&priv->indir_lock);
  776. mutex_init(&priv->stats_mutex);
  777. /* All the interesting properties are at the parent device_node
  778. * level
  779. */
  780. dn = ds->pd->of_node->parent;
  781. bcm_sf2_identify_ports(priv, ds->pd->of_node);
  782. priv->irq0 = irq_of_parse_and_map(dn, 0);
  783. priv->irq1 = irq_of_parse_and_map(dn, 1);
  784. base = &priv->core;
  785. for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
  786. *base = of_iomap(dn, i);
  787. if (*base == NULL) {
  788. pr_err("unable to find register: %s\n", reg_names[i]);
  789. ret = -ENOMEM;
  790. goto out_unmap;
  791. }
  792. base++;
  793. }
  794. ret = bcm_sf2_sw_rst(priv);
  795. if (ret) {
  796. pr_err("unable to software reset switch: %d\n", ret);
  797. goto out_unmap;
  798. }
  799. /* Disable all interrupts and request them */
  800. bcm_sf2_intr_disable(priv);
  801. ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
  802. "switch_0", priv);
  803. if (ret < 0) {
  804. pr_err("failed to request switch_0 IRQ\n");
  805. goto out_unmap;
  806. }
  807. ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
  808. "switch_1", priv);
  809. if (ret < 0) {
  810. pr_err("failed to request switch_1 IRQ\n");
  811. goto out_free_irq0;
  812. }
  813. /* Reset the MIB counters */
  814. reg = core_readl(priv, CORE_GMNCFGCFG);
  815. reg |= RST_MIB_CNT;
  816. core_writel(priv, reg, CORE_GMNCFGCFG);
  817. reg &= ~RST_MIB_CNT;
  818. core_writel(priv, reg, CORE_GMNCFGCFG);
  819. /* Get the maximum number of ports for this switch */
  820. priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
  821. if (priv->hw_params.num_ports > DSA_MAX_PORTS)
  822. priv->hw_params.num_ports = DSA_MAX_PORTS;
  823. /* Assume a single GPHY setup if we can't read that property */
  824. if (of_property_read_u32(dn, "brcm,num-gphy",
  825. &priv->hw_params.num_gphy))
  826. priv->hw_params.num_gphy = 1;
  827. /* Enable all valid ports and disable those unused */
  828. for (port = 0; port < priv->hw_params.num_ports; port++) {
  829. /* IMP port receives special treatment */
  830. if ((1 << port) & ds->phys_port_mask)
  831. bcm_sf2_port_setup(ds, port, NULL);
  832. else if (dsa_is_cpu_port(ds, port))
  833. bcm_sf2_imp_setup(ds, port);
  834. else
  835. bcm_sf2_port_disable(ds, port, NULL);
  836. }
  837. /* Include the pseudo-PHY address and the broadcast PHY address to
  838. * divert reads towards our workaround. This is only required for
  839. * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
  840. * that we can use the regular SWITCH_MDIO master controller instead.
  841. *
  842. * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
  843. * to have a 1:1 mapping between Port address and PHY address in order
  844. * to utilize the slave_mii_bus instance to read from Port PHYs. This is
  845. * not what we want here, so we initialize phys_mii_mask 0 to always
  846. * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
  847. */
  848. if (of_machine_is_compatible("brcm,bcm7445d0"))
  849. ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
  850. else
  851. ds->phys_mii_mask = 0;
  852. rev = reg_readl(priv, REG_SWITCH_REVISION);
  853. priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
  854. SWITCH_TOP_REV_MASK;
  855. priv->hw_params.core_rev = (rev & SF2_REV_MASK);
  856. rev = reg_readl(priv, REG_PHY_REVISION);
  857. priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
  858. pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
  859. priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
  860. priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
  861. priv->core, priv->irq0, priv->irq1);
  862. return 0;
  863. out_free_irq0:
  864. free_irq(priv->irq0, priv);
  865. out_unmap:
  866. base = &priv->core;
  867. for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
  868. if (*base)
  869. iounmap(*base);
  870. base++;
  871. }
  872. return ret;
  873. }
  874. static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
  875. {
  876. return 0;
  877. }
  878. static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
  879. {
  880. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  881. /* The BCM7xxx PHY driver expects to find the integrated PHY revision
  882. * in bits 15:8 and the patch level in bits 7:0 which is exactly what
  883. * the REG_PHY_REVISION register layout is.
  884. */
  885. return priv->hw_params.gphy_rev;
  886. }
  887. static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr,
  888. int regnum, u16 val)
  889. {
  890. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  891. int ret = 0;
  892. u32 reg;
  893. reg = reg_readl(priv, REG_SWITCH_CNTRL);
  894. reg |= MDIO_MASTER_SEL;
  895. reg_writel(priv, reg, REG_SWITCH_CNTRL);
  896. /* Page << 8 | offset */
  897. reg = 0x70;
  898. reg <<= 2;
  899. core_writel(priv, addr, reg);
  900. /* Page << 8 | offset */
  901. reg = 0x80 << 8 | regnum << 1;
  902. reg <<= 2;
  903. if (op)
  904. ret = core_readl(priv, reg);
  905. else
  906. core_writel(priv, val, reg);
  907. reg = reg_readl(priv, REG_SWITCH_CNTRL);
  908. reg &= ~MDIO_MASTER_SEL;
  909. reg_writel(priv, reg, REG_SWITCH_CNTRL);
  910. return ret & 0xffff;
  911. }
  912. static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum)
  913. {
  914. /* Intercept reads from the MDIO broadcast address or Broadcom
  915. * pseudo-PHY address
  916. */
  917. switch (addr) {
  918. case 0:
  919. case BRCM_PSEUDO_PHY_ADDR:
  920. return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
  921. default:
  922. return 0xffff;
  923. }
  924. }
  925. static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum,
  926. u16 val)
  927. {
  928. /* Intercept writes to the MDIO broadcast address or Broadcom
  929. * pseudo-PHY address
  930. */
  931. switch (addr) {
  932. case 0:
  933. case BRCM_PSEUDO_PHY_ADDR:
  934. bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
  935. break;
  936. }
  937. return 0;
  938. }
  939. static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
  940. struct phy_device *phydev)
  941. {
  942. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  943. struct ethtool_eee *p = &priv->port_sts[port].eee;
  944. u32 id_mode_dis = 0, port_mode;
  945. const char *str = NULL;
  946. u32 reg;
  947. switch (phydev->interface) {
  948. case PHY_INTERFACE_MODE_RGMII:
  949. str = "RGMII (no delay)";
  950. id_mode_dis = 1;
  951. case PHY_INTERFACE_MODE_RGMII_TXID:
  952. if (!str)
  953. str = "RGMII (TX delay)";
  954. port_mode = EXT_GPHY;
  955. break;
  956. case PHY_INTERFACE_MODE_MII:
  957. str = "MII";
  958. port_mode = EXT_EPHY;
  959. break;
  960. case PHY_INTERFACE_MODE_REVMII:
  961. str = "Reverse MII";
  962. port_mode = EXT_REVMII;
  963. break;
  964. default:
  965. /* All other PHYs: internal and MoCA */
  966. goto force_link;
  967. }
  968. /* If the link is down, just disable the interface to conserve power */
  969. if (!phydev->link) {
  970. reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
  971. reg &= ~RGMII_MODE_EN;
  972. reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
  973. goto force_link;
  974. }
  975. /* Clear id_mode_dis bit, and the existing port mode, but
  976. * make sure we enable the RGMII block for data to pass
  977. */
  978. reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
  979. reg &= ~ID_MODE_DIS;
  980. reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
  981. reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
  982. reg |= port_mode | RGMII_MODE_EN;
  983. if (id_mode_dis)
  984. reg |= ID_MODE_DIS;
  985. if (phydev->pause) {
  986. if (phydev->asym_pause)
  987. reg |= TX_PAUSE_EN;
  988. reg |= RX_PAUSE_EN;
  989. }
  990. reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
  991. pr_info("Port %d configured for %s\n", port, str);
  992. force_link:
  993. /* Force link settings detected from the PHY */
  994. reg = SW_OVERRIDE;
  995. switch (phydev->speed) {
  996. case SPEED_1000:
  997. reg |= SPDSTS_1000 << SPEED_SHIFT;
  998. break;
  999. case SPEED_100:
  1000. reg |= SPDSTS_100 << SPEED_SHIFT;
  1001. break;
  1002. }
  1003. if (phydev->link)
  1004. reg |= LINK_STS;
  1005. if (phydev->duplex == DUPLEX_FULL)
  1006. reg |= DUPLX_MODE;
  1007. core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1008. if (!phydev->is_pseudo_fixed_link)
  1009. p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
  1010. }
  1011. static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
  1012. struct fixed_phy_status *status)
  1013. {
  1014. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1015. u32 duplex, pause;
  1016. u32 reg;
  1017. duplex = core_readl(priv, CORE_DUPSTS);
  1018. pause = core_readl(priv, CORE_PAUSESTS);
  1019. status->link = 0;
  1020. /* MoCA port is special as we do not get link status from CORE_LNKSTS,
  1021. * which means that we need to force the link at the port override
  1022. * level to get the data to flow. We do use what the interrupt handler
  1023. * did determine before.
  1024. *
  1025. * For the other ports, we just force the link status, since this is
  1026. * a fixed PHY device.
  1027. */
  1028. if (port == priv->moca_port) {
  1029. status->link = priv->port_sts[port].link;
  1030. /* For MoCA interfaces, also force a link down notification
  1031. * since some version of the user-space daemon (mocad) use
  1032. * cmd->autoneg to force the link, which messes up the PHY
  1033. * state machine and make it go in PHY_FORCING state instead.
  1034. */
  1035. if (!status->link)
  1036. netif_carrier_off(ds->ports[port]);
  1037. status->duplex = 1;
  1038. } else {
  1039. status->link = 1;
  1040. status->duplex = !!(duplex & (1 << port));
  1041. }
  1042. reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1043. reg |= SW_OVERRIDE;
  1044. if (status->link)
  1045. reg |= LINK_STS;
  1046. else
  1047. reg &= ~LINK_STS;
  1048. core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
  1049. if ((pause & (1 << port)) &&
  1050. (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
  1051. status->asym_pause = 1;
  1052. status->pause = 1;
  1053. }
  1054. if (pause & (1 << port))
  1055. status->pause = 1;
  1056. }
  1057. static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
  1058. {
  1059. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1060. unsigned int port;
  1061. bcm_sf2_intr_disable(priv);
  1062. /* Disable all ports physically present including the IMP
  1063. * port, the other ones have already been disabled during
  1064. * bcm_sf2_sw_setup
  1065. */
  1066. for (port = 0; port < DSA_MAX_PORTS; port++) {
  1067. if ((1 << port) & ds->phys_port_mask ||
  1068. dsa_is_cpu_port(ds, port))
  1069. bcm_sf2_port_disable(ds, port, NULL);
  1070. }
  1071. return 0;
  1072. }
  1073. static int bcm_sf2_sw_resume(struct dsa_switch *ds)
  1074. {
  1075. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1076. unsigned int port;
  1077. int ret;
  1078. ret = bcm_sf2_sw_rst(priv);
  1079. if (ret) {
  1080. pr_err("%s: failed to software reset switch\n", __func__);
  1081. return ret;
  1082. }
  1083. if (priv->hw_params.num_gphy == 1)
  1084. bcm_sf2_gphy_enable_set(ds, true);
  1085. for (port = 0; port < DSA_MAX_PORTS; port++) {
  1086. if ((1 << port) & ds->phys_port_mask)
  1087. bcm_sf2_port_setup(ds, port, NULL);
  1088. else if (dsa_is_cpu_port(ds, port))
  1089. bcm_sf2_imp_setup(ds, port);
  1090. }
  1091. return 0;
  1092. }
  1093. static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
  1094. struct ethtool_wolinfo *wol)
  1095. {
  1096. struct net_device *p = ds->dst[ds->index].master_netdev;
  1097. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1098. struct ethtool_wolinfo pwol;
  1099. /* Get the parent device WoL settings */
  1100. p->ethtool_ops->get_wol(p, &pwol);
  1101. /* Advertise the parent device supported settings */
  1102. wol->supported = pwol.supported;
  1103. memset(&wol->sopass, 0, sizeof(wol->sopass));
  1104. if (pwol.wolopts & WAKE_MAGICSECURE)
  1105. memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
  1106. if (priv->wol_ports_mask & (1 << port))
  1107. wol->wolopts = pwol.wolopts;
  1108. else
  1109. wol->wolopts = 0;
  1110. }
  1111. static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
  1112. struct ethtool_wolinfo *wol)
  1113. {
  1114. struct net_device *p = ds->dst[ds->index].master_netdev;
  1115. struct bcm_sf2_priv *priv = ds_to_priv(ds);
  1116. s8 cpu_port = ds->dst[ds->index].cpu_port;
  1117. struct ethtool_wolinfo pwol;
  1118. p->ethtool_ops->get_wol(p, &pwol);
  1119. if (wol->wolopts & ~pwol.supported)
  1120. return -EINVAL;
  1121. if (wol->wolopts)
  1122. priv->wol_ports_mask |= (1 << port);
  1123. else
  1124. priv->wol_ports_mask &= ~(1 << port);
  1125. /* If we have at least one port enabled, make sure the CPU port
  1126. * is also enabled. If the CPU port is the last one enabled, we disable
  1127. * it since this configuration does not make sense.
  1128. */
  1129. if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
  1130. priv->wol_ports_mask |= (1 << cpu_port);
  1131. else
  1132. priv->wol_ports_mask &= ~(1 << cpu_port);
  1133. return p->ethtool_ops->set_wol(p, wol);
  1134. }
  1135. static struct dsa_switch_driver bcm_sf2_switch_driver = {
  1136. .tag_protocol = DSA_TAG_PROTO_BRCM,
  1137. .priv_size = sizeof(struct bcm_sf2_priv),
  1138. .probe = bcm_sf2_sw_probe,
  1139. .setup = bcm_sf2_sw_setup,
  1140. .set_addr = bcm_sf2_sw_set_addr,
  1141. .get_phy_flags = bcm_sf2_sw_get_phy_flags,
  1142. .phy_read = bcm_sf2_sw_phy_read,
  1143. .phy_write = bcm_sf2_sw_phy_write,
  1144. .get_strings = bcm_sf2_sw_get_strings,
  1145. .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
  1146. .get_sset_count = bcm_sf2_sw_get_sset_count,
  1147. .adjust_link = bcm_sf2_sw_adjust_link,
  1148. .fixed_link_update = bcm_sf2_sw_fixed_link_update,
  1149. .suspend = bcm_sf2_sw_suspend,
  1150. .resume = bcm_sf2_sw_resume,
  1151. .get_wol = bcm_sf2_sw_get_wol,
  1152. .set_wol = bcm_sf2_sw_set_wol,
  1153. .port_enable = bcm_sf2_port_setup,
  1154. .port_disable = bcm_sf2_port_disable,
  1155. .get_eee = bcm_sf2_sw_get_eee,
  1156. .set_eee = bcm_sf2_sw_set_eee,
  1157. .port_join_bridge = bcm_sf2_sw_br_join,
  1158. .port_leave_bridge = bcm_sf2_sw_br_leave,
  1159. .port_stp_update = bcm_sf2_sw_br_set_stp_state,
  1160. .port_fdb_prepare = bcm_sf2_sw_fdb_prepare,
  1161. .port_fdb_add = bcm_sf2_sw_fdb_add,
  1162. .port_fdb_del = bcm_sf2_sw_fdb_del,
  1163. .port_fdb_dump = bcm_sf2_sw_fdb_dump,
  1164. };
  1165. static int __init bcm_sf2_init(void)
  1166. {
  1167. register_switch_driver(&bcm_sf2_switch_driver);
  1168. return 0;
  1169. }
  1170. module_init(bcm_sf2_init);
  1171. static void __exit bcm_sf2_exit(void)
  1172. {
  1173. unregister_switch_driver(&bcm_sf2_switch_driver);
  1174. }
  1175. module_exit(bcm_sf2_exit);
  1176. MODULE_AUTHOR("Broadcom Corporation");
  1177. MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
  1178. MODULE_LICENSE("GPL");
  1179. MODULE_ALIAS("platform:brcm-sf2");