mv88e6xxx.c 63 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665
  1. /*
  2. * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
  3. * Copyright (c) 2008 Marvell Semiconductor
  4. *
  5. * Copyright (c) 2015 CMC Electronics, Inc.
  6. * Added support for VLAN Table Unit operations
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/delay.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/ethtool.h>
  16. #include <linux/if_bridge.h>
  17. #include <linux/jiffies.h>
  18. #include <linux/list.h>
  19. #include <linux/module.h>
  20. #include <linux/netdevice.h>
  21. #include <linux/phy.h>
  22. #include <net/dsa.h>
  23. #include <net/switchdev.h>
  24. #include "mv88e6xxx.h"
  25. static void assert_smi_lock(struct dsa_switch *ds)
  26. {
  27. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  28. if (unlikely(!mutex_is_locked(&ps->smi_mutex))) {
  29. dev_err(ds->master_dev, "SMI lock not held!\n");
  30. dump_stack();
  31. }
  32. }
  33. /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
  34. * use all 32 SMI bus addresses on its SMI bus, and all switch registers
  35. * will be directly accessible on some {device address,register address}
  36. * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
  37. * will only respond to SMI transactions to that specific address, and
  38. * an indirect addressing mechanism needs to be used to access its
  39. * registers.
  40. */
  41. static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
  42. {
  43. int ret;
  44. int i;
  45. for (i = 0; i < 16; i++) {
  46. ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
  47. if (ret < 0)
  48. return ret;
  49. if ((ret & SMI_CMD_BUSY) == 0)
  50. return 0;
  51. }
  52. return -ETIMEDOUT;
  53. }
  54. static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr,
  55. int reg)
  56. {
  57. int ret;
  58. if (sw_addr == 0)
  59. return mdiobus_read_nested(bus, addr, reg);
  60. /* Wait for the bus to become free. */
  61. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  62. if (ret < 0)
  63. return ret;
  64. /* Transmit the read command. */
  65. ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
  66. SMI_CMD_OP_22_READ | (addr << 5) | reg);
  67. if (ret < 0)
  68. return ret;
  69. /* Wait for the read command to complete. */
  70. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  71. if (ret < 0)
  72. return ret;
  73. /* Read the data. */
  74. ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
  75. if (ret < 0)
  76. return ret;
  77. return ret & 0xffff;
  78. }
  79. static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
  80. {
  81. struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
  82. int ret;
  83. assert_smi_lock(ds);
  84. if (bus == NULL)
  85. return -EINVAL;
  86. ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
  87. if (ret < 0)
  88. return ret;
  89. dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
  90. addr, reg, ret);
  91. return ret;
  92. }
  93. int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
  94. {
  95. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  96. int ret;
  97. mutex_lock(&ps->smi_mutex);
  98. ret = _mv88e6xxx_reg_read(ds, addr, reg);
  99. mutex_unlock(&ps->smi_mutex);
  100. return ret;
  101. }
  102. static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
  103. int reg, u16 val)
  104. {
  105. int ret;
  106. if (sw_addr == 0)
  107. return mdiobus_write_nested(bus, addr, reg, val);
  108. /* Wait for the bus to become free. */
  109. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  110. if (ret < 0)
  111. return ret;
  112. /* Transmit the data to write. */
  113. ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
  114. if (ret < 0)
  115. return ret;
  116. /* Transmit the write command. */
  117. ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
  118. SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
  119. if (ret < 0)
  120. return ret;
  121. /* Wait for the write command to complete. */
  122. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  123. if (ret < 0)
  124. return ret;
  125. return 0;
  126. }
  127. static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
  128. u16 val)
  129. {
  130. struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
  131. assert_smi_lock(ds);
  132. if (bus == NULL)
  133. return -EINVAL;
  134. dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
  135. addr, reg, val);
  136. return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
  137. }
  138. int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
  139. {
  140. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  141. int ret;
  142. mutex_lock(&ps->smi_mutex);
  143. ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
  144. mutex_unlock(&ps->smi_mutex);
  145. return ret;
  146. }
  147. int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
  148. {
  149. REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
  150. REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
  151. REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
  152. return 0;
  153. }
  154. int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
  155. {
  156. int i;
  157. int ret;
  158. for (i = 0; i < 6; i++) {
  159. int j;
  160. /* Write the MAC address byte. */
  161. REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
  162. GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
  163. /* Wait for the write to complete. */
  164. for (j = 0; j < 16; j++) {
  165. ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
  166. if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
  167. break;
  168. }
  169. if (j == 16)
  170. return -ETIMEDOUT;
  171. }
  172. return 0;
  173. }
  174. static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
  175. {
  176. if (addr >= 0)
  177. return _mv88e6xxx_reg_read(ds, addr, regnum);
  178. return 0xffff;
  179. }
  180. static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
  181. u16 val)
  182. {
  183. if (addr >= 0)
  184. return _mv88e6xxx_reg_write(ds, addr, regnum, val);
  185. return 0;
  186. }
  187. #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
  188. static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
  189. {
  190. int ret;
  191. unsigned long timeout;
  192. ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
  193. REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
  194. ret & ~GLOBAL_CONTROL_PPU_ENABLE);
  195. timeout = jiffies + 1 * HZ;
  196. while (time_before(jiffies, timeout)) {
  197. ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
  198. usleep_range(1000, 2000);
  199. if ((ret & GLOBAL_STATUS_PPU_MASK) !=
  200. GLOBAL_STATUS_PPU_POLLING)
  201. return 0;
  202. }
  203. return -ETIMEDOUT;
  204. }
  205. static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
  206. {
  207. int ret;
  208. unsigned long timeout;
  209. ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
  210. REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
  211. timeout = jiffies + 1 * HZ;
  212. while (time_before(jiffies, timeout)) {
  213. ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
  214. usleep_range(1000, 2000);
  215. if ((ret & GLOBAL_STATUS_PPU_MASK) ==
  216. GLOBAL_STATUS_PPU_POLLING)
  217. return 0;
  218. }
  219. return -ETIMEDOUT;
  220. }
  221. static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
  222. {
  223. struct mv88e6xxx_priv_state *ps;
  224. ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
  225. if (mutex_trylock(&ps->ppu_mutex)) {
  226. struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
  227. if (mv88e6xxx_ppu_enable(ds) == 0)
  228. ps->ppu_disabled = 0;
  229. mutex_unlock(&ps->ppu_mutex);
  230. }
  231. }
  232. static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
  233. {
  234. struct mv88e6xxx_priv_state *ps = (void *)_ps;
  235. schedule_work(&ps->ppu_work);
  236. }
  237. static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
  238. {
  239. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  240. int ret;
  241. mutex_lock(&ps->ppu_mutex);
  242. /* If the PHY polling unit is enabled, disable it so that
  243. * we can access the PHY registers. If it was already
  244. * disabled, cancel the timer that is going to re-enable
  245. * it.
  246. */
  247. if (!ps->ppu_disabled) {
  248. ret = mv88e6xxx_ppu_disable(ds);
  249. if (ret < 0) {
  250. mutex_unlock(&ps->ppu_mutex);
  251. return ret;
  252. }
  253. ps->ppu_disabled = 1;
  254. } else {
  255. del_timer(&ps->ppu_timer);
  256. ret = 0;
  257. }
  258. return ret;
  259. }
  260. static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
  261. {
  262. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  263. /* Schedule a timer to re-enable the PHY polling unit. */
  264. mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
  265. mutex_unlock(&ps->ppu_mutex);
  266. }
  267. void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
  268. {
  269. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  270. mutex_init(&ps->ppu_mutex);
  271. INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
  272. init_timer(&ps->ppu_timer);
  273. ps->ppu_timer.data = (unsigned long)ps;
  274. ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
  275. }
  276. int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
  277. {
  278. int ret;
  279. ret = mv88e6xxx_ppu_access_get(ds);
  280. if (ret >= 0) {
  281. ret = mv88e6xxx_reg_read(ds, addr, regnum);
  282. mv88e6xxx_ppu_access_put(ds);
  283. }
  284. return ret;
  285. }
  286. int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
  287. int regnum, u16 val)
  288. {
  289. int ret;
  290. ret = mv88e6xxx_ppu_access_get(ds);
  291. if (ret >= 0) {
  292. ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
  293. mv88e6xxx_ppu_access_put(ds);
  294. }
  295. return ret;
  296. }
  297. #endif
  298. static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
  299. {
  300. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  301. switch (ps->id) {
  302. case PORT_SWITCH_ID_6031:
  303. case PORT_SWITCH_ID_6061:
  304. case PORT_SWITCH_ID_6035:
  305. case PORT_SWITCH_ID_6065:
  306. return true;
  307. }
  308. return false;
  309. }
  310. static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
  311. {
  312. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  313. switch (ps->id) {
  314. case PORT_SWITCH_ID_6092:
  315. case PORT_SWITCH_ID_6095:
  316. return true;
  317. }
  318. return false;
  319. }
  320. static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
  321. {
  322. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  323. switch (ps->id) {
  324. case PORT_SWITCH_ID_6046:
  325. case PORT_SWITCH_ID_6085:
  326. case PORT_SWITCH_ID_6096:
  327. case PORT_SWITCH_ID_6097:
  328. return true;
  329. }
  330. return false;
  331. }
  332. static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
  333. {
  334. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  335. switch (ps->id) {
  336. case PORT_SWITCH_ID_6123:
  337. case PORT_SWITCH_ID_6161:
  338. case PORT_SWITCH_ID_6165:
  339. return true;
  340. }
  341. return false;
  342. }
  343. static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
  344. {
  345. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  346. switch (ps->id) {
  347. case PORT_SWITCH_ID_6121:
  348. case PORT_SWITCH_ID_6122:
  349. case PORT_SWITCH_ID_6152:
  350. case PORT_SWITCH_ID_6155:
  351. case PORT_SWITCH_ID_6182:
  352. case PORT_SWITCH_ID_6185:
  353. case PORT_SWITCH_ID_6108:
  354. case PORT_SWITCH_ID_6131:
  355. return true;
  356. }
  357. return false;
  358. }
  359. static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
  360. {
  361. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  362. switch (ps->id) {
  363. case PORT_SWITCH_ID_6320:
  364. case PORT_SWITCH_ID_6321:
  365. return true;
  366. }
  367. return false;
  368. }
  369. static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
  370. {
  371. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  372. switch (ps->id) {
  373. case PORT_SWITCH_ID_6171:
  374. case PORT_SWITCH_ID_6175:
  375. case PORT_SWITCH_ID_6350:
  376. case PORT_SWITCH_ID_6351:
  377. return true;
  378. }
  379. return false;
  380. }
  381. static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
  382. {
  383. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  384. switch (ps->id) {
  385. case PORT_SWITCH_ID_6172:
  386. case PORT_SWITCH_ID_6176:
  387. case PORT_SWITCH_ID_6240:
  388. case PORT_SWITCH_ID_6352:
  389. return true;
  390. }
  391. return false;
  392. }
  393. /* We expect the switch to perform auto negotiation if there is a real
  394. * phy. However, in the case of a fixed link phy, we force the port
  395. * settings from the fixed link settings.
  396. */
  397. void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
  398. struct phy_device *phydev)
  399. {
  400. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  401. u32 reg;
  402. int ret;
  403. if (!phy_is_pseudo_fixed_link(phydev))
  404. return;
  405. mutex_lock(&ps->smi_mutex);
  406. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
  407. if (ret < 0)
  408. goto out;
  409. reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
  410. PORT_PCS_CTRL_FORCE_LINK |
  411. PORT_PCS_CTRL_DUPLEX_FULL |
  412. PORT_PCS_CTRL_FORCE_DUPLEX |
  413. PORT_PCS_CTRL_UNFORCED);
  414. reg |= PORT_PCS_CTRL_FORCE_LINK;
  415. if (phydev->link)
  416. reg |= PORT_PCS_CTRL_LINK_UP;
  417. if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
  418. goto out;
  419. switch (phydev->speed) {
  420. case SPEED_1000:
  421. reg |= PORT_PCS_CTRL_1000;
  422. break;
  423. case SPEED_100:
  424. reg |= PORT_PCS_CTRL_100;
  425. break;
  426. case SPEED_10:
  427. reg |= PORT_PCS_CTRL_10;
  428. break;
  429. default:
  430. pr_info("Unknown speed");
  431. goto out;
  432. }
  433. reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
  434. if (phydev->duplex == DUPLEX_FULL)
  435. reg |= PORT_PCS_CTRL_DUPLEX_FULL;
  436. if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
  437. (port >= ps->num_ports - 2)) {
  438. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
  439. reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
  440. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
  441. reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
  442. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
  443. reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
  444. PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
  445. }
  446. _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
  447. out:
  448. mutex_unlock(&ps->smi_mutex);
  449. }
  450. static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
  451. {
  452. int ret;
  453. int i;
  454. for (i = 0; i < 10; i++) {
  455. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
  456. if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
  457. return 0;
  458. }
  459. return -ETIMEDOUT;
  460. }
  461. static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
  462. {
  463. int ret;
  464. if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
  465. port = (port + 1) << 5;
  466. /* Snapshot the hardware statistics counters for this port. */
  467. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
  468. GLOBAL_STATS_OP_CAPTURE_PORT |
  469. GLOBAL_STATS_OP_HIST_RX_TX | port);
  470. if (ret < 0)
  471. return ret;
  472. /* Wait for the snapshotting to complete. */
  473. ret = _mv88e6xxx_stats_wait(ds);
  474. if (ret < 0)
  475. return ret;
  476. return 0;
  477. }
  478. static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
  479. {
  480. u32 _val;
  481. int ret;
  482. *val = 0;
  483. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
  484. GLOBAL_STATS_OP_READ_CAPTURED |
  485. GLOBAL_STATS_OP_HIST_RX_TX | stat);
  486. if (ret < 0)
  487. return;
  488. ret = _mv88e6xxx_stats_wait(ds);
  489. if (ret < 0)
  490. return;
  491. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
  492. if (ret < 0)
  493. return;
  494. _val = ret << 16;
  495. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
  496. if (ret < 0)
  497. return;
  498. *val = _val | ret;
  499. }
  500. static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
  501. { "in_good_octets", 8, 0x00, },
  502. { "in_bad_octets", 4, 0x02, },
  503. { "in_unicast", 4, 0x04, },
  504. { "in_broadcasts", 4, 0x06, },
  505. { "in_multicasts", 4, 0x07, },
  506. { "in_pause", 4, 0x16, },
  507. { "in_undersize", 4, 0x18, },
  508. { "in_fragments", 4, 0x19, },
  509. { "in_oversize", 4, 0x1a, },
  510. { "in_jabber", 4, 0x1b, },
  511. { "in_rx_error", 4, 0x1c, },
  512. { "in_fcs_error", 4, 0x1d, },
  513. { "out_octets", 8, 0x0e, },
  514. { "out_unicast", 4, 0x10, },
  515. { "out_broadcasts", 4, 0x13, },
  516. { "out_multicasts", 4, 0x12, },
  517. { "out_pause", 4, 0x15, },
  518. { "excessive", 4, 0x11, },
  519. { "collisions", 4, 0x1e, },
  520. { "deferred", 4, 0x05, },
  521. { "single", 4, 0x14, },
  522. { "multiple", 4, 0x17, },
  523. { "out_fcs_error", 4, 0x03, },
  524. { "late", 4, 0x1f, },
  525. { "hist_64bytes", 4, 0x08, },
  526. { "hist_65_127bytes", 4, 0x09, },
  527. { "hist_128_255bytes", 4, 0x0a, },
  528. { "hist_256_511bytes", 4, 0x0b, },
  529. { "hist_512_1023bytes", 4, 0x0c, },
  530. { "hist_1024_max_bytes", 4, 0x0d, },
  531. /* Not all devices have the following counters */
  532. { "sw_in_discards", 4, 0x110, },
  533. { "sw_in_filtered", 2, 0x112, },
  534. { "sw_out_filtered", 2, 0x113, },
  535. };
  536. static bool have_sw_in_discards(struct dsa_switch *ds)
  537. {
  538. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  539. switch (ps->id) {
  540. case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
  541. case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
  542. case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
  543. case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
  544. case PORT_SWITCH_ID_6352:
  545. return true;
  546. default:
  547. return false;
  548. }
  549. }
  550. static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
  551. int nr_stats,
  552. struct mv88e6xxx_hw_stat *stats,
  553. int port, uint8_t *data)
  554. {
  555. int i;
  556. for (i = 0; i < nr_stats; i++) {
  557. memcpy(data + i * ETH_GSTRING_LEN,
  558. stats[i].string, ETH_GSTRING_LEN);
  559. }
  560. }
  561. static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
  562. int stat,
  563. struct mv88e6xxx_hw_stat *stats,
  564. int port)
  565. {
  566. struct mv88e6xxx_hw_stat *s = stats + stat;
  567. u32 low;
  568. u32 high = 0;
  569. int ret;
  570. u64 value;
  571. if (s->reg >= 0x100) {
  572. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
  573. s->reg - 0x100);
  574. if (ret < 0)
  575. return UINT64_MAX;
  576. low = ret;
  577. if (s->sizeof_stat == 4) {
  578. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
  579. s->reg - 0x100 + 1);
  580. if (ret < 0)
  581. return UINT64_MAX;
  582. high = ret;
  583. }
  584. } else {
  585. _mv88e6xxx_stats_read(ds, s->reg, &low);
  586. if (s->sizeof_stat == 8)
  587. _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
  588. }
  589. value = (((u64)high) << 32) | low;
  590. return value;
  591. }
  592. static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
  593. int nr_stats,
  594. struct mv88e6xxx_hw_stat *stats,
  595. int port, uint64_t *data)
  596. {
  597. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  598. int ret;
  599. int i;
  600. mutex_lock(&ps->smi_mutex);
  601. ret = _mv88e6xxx_stats_snapshot(ds, port);
  602. if (ret < 0) {
  603. mutex_unlock(&ps->smi_mutex);
  604. return;
  605. }
  606. /* Read each of the counters. */
  607. for (i = 0; i < nr_stats; i++)
  608. data[i] = _mv88e6xxx_get_ethtool_stat(ds, i, stats, port);
  609. mutex_unlock(&ps->smi_mutex);
  610. }
  611. /* All the statistics in the table */
  612. void
  613. mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
  614. {
  615. if (have_sw_in_discards(ds))
  616. _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
  617. mv88e6xxx_hw_stats, port, data);
  618. else
  619. _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
  620. mv88e6xxx_hw_stats, port, data);
  621. }
  622. int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
  623. {
  624. if (have_sw_in_discards(ds))
  625. return ARRAY_SIZE(mv88e6xxx_hw_stats);
  626. return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
  627. }
  628. void
  629. mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
  630. int port, uint64_t *data)
  631. {
  632. if (have_sw_in_discards(ds))
  633. _mv88e6xxx_get_ethtool_stats(
  634. ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
  635. mv88e6xxx_hw_stats, port, data);
  636. else
  637. _mv88e6xxx_get_ethtool_stats(
  638. ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
  639. mv88e6xxx_hw_stats, port, data);
  640. }
  641. int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
  642. {
  643. return 32 * sizeof(u16);
  644. }
  645. void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
  646. struct ethtool_regs *regs, void *_p)
  647. {
  648. u16 *p = _p;
  649. int i;
  650. regs->version = 0;
  651. memset(p, 0xff, 32 * sizeof(u16));
  652. for (i = 0; i < 32; i++) {
  653. int ret;
  654. ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
  655. if (ret >= 0)
  656. p[i] = ret;
  657. }
  658. }
  659. static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
  660. u16 mask)
  661. {
  662. unsigned long timeout = jiffies + HZ / 10;
  663. while (time_before(jiffies, timeout)) {
  664. int ret;
  665. ret = _mv88e6xxx_reg_read(ds, reg, offset);
  666. if (ret < 0)
  667. return ret;
  668. if (!(ret & mask))
  669. return 0;
  670. usleep_range(1000, 2000);
  671. }
  672. return -ETIMEDOUT;
  673. }
  674. static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
  675. {
  676. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  677. int ret;
  678. mutex_lock(&ps->smi_mutex);
  679. ret = _mv88e6xxx_wait(ds, reg, offset, mask);
  680. mutex_unlock(&ps->smi_mutex);
  681. return ret;
  682. }
  683. static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
  684. {
  685. return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
  686. GLOBAL2_SMI_OP_BUSY);
  687. }
  688. int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
  689. {
  690. return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
  691. GLOBAL2_EEPROM_OP_LOAD);
  692. }
  693. int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
  694. {
  695. return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
  696. GLOBAL2_EEPROM_OP_BUSY);
  697. }
  698. static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
  699. {
  700. return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
  701. GLOBAL_ATU_OP_BUSY);
  702. }
  703. static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
  704. int regnum)
  705. {
  706. int ret;
  707. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
  708. GLOBAL2_SMI_OP_22_READ | (addr << 5) |
  709. regnum);
  710. if (ret < 0)
  711. return ret;
  712. ret = _mv88e6xxx_phy_wait(ds);
  713. if (ret < 0)
  714. return ret;
  715. return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
  716. }
  717. static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
  718. int regnum, u16 val)
  719. {
  720. int ret;
  721. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
  722. if (ret < 0)
  723. return ret;
  724. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
  725. GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
  726. regnum);
  727. return _mv88e6xxx_phy_wait(ds);
  728. }
  729. int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
  730. {
  731. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  732. int reg;
  733. mutex_lock(&ps->smi_mutex);
  734. reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
  735. if (reg < 0)
  736. goto out;
  737. e->eee_enabled = !!(reg & 0x0200);
  738. e->tx_lpi_enabled = !!(reg & 0x0100);
  739. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
  740. if (reg < 0)
  741. goto out;
  742. e->eee_active = !!(reg & PORT_STATUS_EEE);
  743. reg = 0;
  744. out:
  745. mutex_unlock(&ps->smi_mutex);
  746. return reg;
  747. }
  748. int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
  749. struct phy_device *phydev, struct ethtool_eee *e)
  750. {
  751. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  752. int reg;
  753. int ret;
  754. mutex_lock(&ps->smi_mutex);
  755. ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
  756. if (ret < 0)
  757. goto out;
  758. reg = ret & ~0x0300;
  759. if (e->eee_enabled)
  760. reg |= 0x0200;
  761. if (e->tx_lpi_enabled)
  762. reg |= 0x0100;
  763. ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
  764. out:
  765. mutex_unlock(&ps->smi_mutex);
  766. return ret;
  767. }
  768. static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd)
  769. {
  770. int ret;
  771. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
  772. if (ret < 0)
  773. return ret;
  774. return _mv88e6xxx_atu_wait(ds);
  775. }
  776. static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
  777. struct mv88e6xxx_atu_entry *entry)
  778. {
  779. u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
  780. if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
  781. unsigned int mask, shift;
  782. if (entry->trunk) {
  783. data |= GLOBAL_ATU_DATA_TRUNK;
  784. mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
  785. shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
  786. } else {
  787. mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
  788. shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
  789. }
  790. data |= (entry->portv_trunkid << shift) & mask;
  791. }
  792. return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, data);
  793. }
  794. static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
  795. struct mv88e6xxx_atu_entry *entry,
  796. bool static_too)
  797. {
  798. int op;
  799. int err;
  800. err = _mv88e6xxx_atu_wait(ds);
  801. if (err)
  802. return err;
  803. err = _mv88e6xxx_atu_data_write(ds, entry);
  804. if (err)
  805. return err;
  806. if (entry->fid) {
  807. err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
  808. entry->fid);
  809. if (err)
  810. return err;
  811. op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
  812. GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
  813. } else {
  814. op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
  815. GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
  816. }
  817. return _mv88e6xxx_atu_cmd(ds, op);
  818. }
  819. static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
  820. {
  821. struct mv88e6xxx_atu_entry entry = {
  822. .fid = fid,
  823. .state = 0, /* EntryState bits must be 0 */
  824. };
  825. return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
  826. }
  827. static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
  828. int to_port, bool static_too)
  829. {
  830. struct mv88e6xxx_atu_entry entry = {
  831. .trunk = false,
  832. .fid = fid,
  833. };
  834. /* EntryState bits must be 0xF */
  835. entry.state = GLOBAL_ATU_DATA_STATE_MASK;
  836. /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
  837. entry.portv_trunkid = (to_port & 0x0f) << 4;
  838. entry.portv_trunkid |= from_port & 0x0f;
  839. return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
  840. }
  841. static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port,
  842. bool static_too)
  843. {
  844. /* Destination port 0xF means remove the entries */
  845. return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too);
  846. }
  847. static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
  848. {
  849. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  850. int reg, ret = 0;
  851. u8 oldstate;
  852. mutex_lock(&ps->smi_mutex);
  853. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
  854. if (reg < 0) {
  855. ret = reg;
  856. goto abort;
  857. }
  858. oldstate = reg & PORT_CONTROL_STATE_MASK;
  859. if (oldstate != state) {
  860. /* Flush forwarding database if we're moving a port
  861. * from Learning or Forwarding state to Disabled or
  862. * Blocking or Listening state.
  863. */
  864. if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
  865. state <= PORT_CONTROL_STATE_BLOCKING) {
  866. ret = _mv88e6xxx_atu_remove(ds, 0, port, false);
  867. if (ret)
  868. goto abort;
  869. }
  870. reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
  871. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
  872. reg);
  873. }
  874. abort:
  875. mutex_unlock(&ps->smi_mutex);
  876. return ret;
  877. }
  878. static int _mv88e6xxx_port_vlan_map_set(struct dsa_switch *ds, int port,
  879. u16 output_ports)
  880. {
  881. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  882. const u16 mask = (1 << ps->num_ports) - 1;
  883. int reg;
  884. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
  885. if (reg < 0)
  886. return reg;
  887. reg &= ~mask;
  888. reg |= output_ports & mask;
  889. return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
  890. }
  891. int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
  892. {
  893. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  894. int stp_state;
  895. switch (state) {
  896. case BR_STATE_DISABLED:
  897. stp_state = PORT_CONTROL_STATE_DISABLED;
  898. break;
  899. case BR_STATE_BLOCKING:
  900. case BR_STATE_LISTENING:
  901. stp_state = PORT_CONTROL_STATE_BLOCKING;
  902. break;
  903. case BR_STATE_LEARNING:
  904. stp_state = PORT_CONTROL_STATE_LEARNING;
  905. break;
  906. case BR_STATE_FORWARDING:
  907. default:
  908. stp_state = PORT_CONTROL_STATE_FORWARDING;
  909. break;
  910. }
  911. netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
  912. /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
  913. * so we can not update the port state directly but need to schedule it.
  914. */
  915. ps->port_state[port] = stp_state;
  916. set_bit(port, &ps->port_state_update_mask);
  917. schedule_work(&ps->bridge_work);
  918. return 0;
  919. }
  920. static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
  921. {
  922. int ret;
  923. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
  924. if (ret < 0)
  925. return ret;
  926. *pvid = ret & PORT_DEFAULT_VLAN_MASK;
  927. return 0;
  928. }
  929. int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
  930. {
  931. int ret;
  932. ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
  933. if (ret < 0)
  934. return ret;
  935. *pvid = ret & PORT_DEFAULT_VLAN_MASK;
  936. return 0;
  937. }
  938. static int _mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
  939. {
  940. return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
  941. pvid & PORT_DEFAULT_VLAN_MASK);
  942. }
  943. static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
  944. {
  945. return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
  946. GLOBAL_VTU_OP_BUSY);
  947. }
  948. static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
  949. {
  950. int ret;
  951. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
  952. if (ret < 0)
  953. return ret;
  954. return _mv88e6xxx_vtu_wait(ds);
  955. }
  956. static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
  957. {
  958. int ret;
  959. ret = _mv88e6xxx_vtu_wait(ds);
  960. if (ret < 0)
  961. return ret;
  962. return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
  963. }
  964. static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
  965. struct mv88e6xxx_vtu_stu_entry *entry,
  966. unsigned int nibble_offset)
  967. {
  968. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  969. u16 regs[3];
  970. int i;
  971. int ret;
  972. for (i = 0; i < 3; ++i) {
  973. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  974. GLOBAL_VTU_DATA_0_3 + i);
  975. if (ret < 0)
  976. return ret;
  977. regs[i] = ret;
  978. }
  979. for (i = 0; i < ps->num_ports; ++i) {
  980. unsigned int shift = (i % 4) * 4 + nibble_offset;
  981. u16 reg = regs[i / 4];
  982. entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
  983. }
  984. return 0;
  985. }
  986. static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
  987. struct mv88e6xxx_vtu_stu_entry *entry,
  988. unsigned int nibble_offset)
  989. {
  990. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  991. u16 regs[3] = { 0 };
  992. int i;
  993. int ret;
  994. for (i = 0; i < ps->num_ports; ++i) {
  995. unsigned int shift = (i % 4) * 4 + nibble_offset;
  996. u8 data = entry->data[i];
  997. regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
  998. }
  999. for (i = 0; i < 3; ++i) {
  1000. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
  1001. GLOBAL_VTU_DATA_0_3 + i, regs[i]);
  1002. if (ret < 0)
  1003. return ret;
  1004. }
  1005. return 0;
  1006. }
  1007. static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid)
  1008. {
  1009. return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
  1010. vid & GLOBAL_VTU_VID_MASK);
  1011. }
  1012. static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
  1013. struct mv88e6xxx_vtu_stu_entry *entry)
  1014. {
  1015. struct mv88e6xxx_vtu_stu_entry next = { 0 };
  1016. int ret;
  1017. ret = _mv88e6xxx_vtu_wait(ds);
  1018. if (ret < 0)
  1019. return ret;
  1020. ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
  1021. if (ret < 0)
  1022. return ret;
  1023. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
  1024. if (ret < 0)
  1025. return ret;
  1026. next.vid = ret & GLOBAL_VTU_VID_MASK;
  1027. next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
  1028. if (next.valid) {
  1029. ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
  1030. if (ret < 0)
  1031. return ret;
  1032. if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
  1033. mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
  1034. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1035. GLOBAL_VTU_FID);
  1036. if (ret < 0)
  1037. return ret;
  1038. next.fid = ret & GLOBAL_VTU_FID_MASK;
  1039. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1040. GLOBAL_VTU_SID);
  1041. if (ret < 0)
  1042. return ret;
  1043. next.sid = ret & GLOBAL_VTU_SID_MASK;
  1044. }
  1045. }
  1046. *entry = next;
  1047. return 0;
  1048. }
  1049. static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
  1050. struct mv88e6xxx_vtu_stu_entry *entry)
  1051. {
  1052. u16 reg = 0;
  1053. int ret;
  1054. ret = _mv88e6xxx_vtu_wait(ds);
  1055. if (ret < 0)
  1056. return ret;
  1057. if (!entry->valid)
  1058. goto loadpurge;
  1059. /* Write port member tags */
  1060. ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
  1061. if (ret < 0)
  1062. return ret;
  1063. if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
  1064. mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
  1065. reg = entry->sid & GLOBAL_VTU_SID_MASK;
  1066. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
  1067. if (ret < 0)
  1068. return ret;
  1069. reg = entry->fid & GLOBAL_VTU_FID_MASK;
  1070. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
  1071. if (ret < 0)
  1072. return ret;
  1073. }
  1074. reg = GLOBAL_VTU_VID_VALID;
  1075. loadpurge:
  1076. reg |= entry->vid & GLOBAL_VTU_VID_MASK;
  1077. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
  1078. if (ret < 0)
  1079. return ret;
  1080. return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
  1081. }
  1082. static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
  1083. struct mv88e6xxx_vtu_stu_entry *entry)
  1084. {
  1085. struct mv88e6xxx_vtu_stu_entry next = { 0 };
  1086. int ret;
  1087. ret = _mv88e6xxx_vtu_wait(ds);
  1088. if (ret < 0)
  1089. return ret;
  1090. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
  1091. sid & GLOBAL_VTU_SID_MASK);
  1092. if (ret < 0)
  1093. return ret;
  1094. ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
  1095. if (ret < 0)
  1096. return ret;
  1097. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
  1098. if (ret < 0)
  1099. return ret;
  1100. next.sid = ret & GLOBAL_VTU_SID_MASK;
  1101. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
  1102. if (ret < 0)
  1103. return ret;
  1104. next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
  1105. if (next.valid) {
  1106. ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
  1107. if (ret < 0)
  1108. return ret;
  1109. }
  1110. *entry = next;
  1111. return 0;
  1112. }
  1113. static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
  1114. struct mv88e6xxx_vtu_stu_entry *entry)
  1115. {
  1116. u16 reg = 0;
  1117. int ret;
  1118. ret = _mv88e6xxx_vtu_wait(ds);
  1119. if (ret < 0)
  1120. return ret;
  1121. if (!entry->valid)
  1122. goto loadpurge;
  1123. /* Write port states */
  1124. ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
  1125. if (ret < 0)
  1126. return ret;
  1127. reg = GLOBAL_VTU_VID_VALID;
  1128. loadpurge:
  1129. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
  1130. if (ret < 0)
  1131. return ret;
  1132. reg = entry->sid & GLOBAL_VTU_SID_MASK;
  1133. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
  1134. if (ret < 0)
  1135. return ret;
  1136. return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
  1137. }
  1138. static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
  1139. struct mv88e6xxx_vtu_stu_entry *entry)
  1140. {
  1141. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1142. struct mv88e6xxx_vtu_stu_entry vlan = {
  1143. .valid = true,
  1144. .vid = vid,
  1145. .fid = vid, /* We use one FID per VLAN */
  1146. };
  1147. int i;
  1148. /* exclude all ports except the CPU and DSA ports */
  1149. for (i = 0; i < ps->num_ports; ++i)
  1150. vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
  1151. ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
  1152. : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
  1153. if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
  1154. mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
  1155. struct mv88e6xxx_vtu_stu_entry vstp;
  1156. int err;
  1157. /* Adding a VTU entry requires a valid STU entry. As VSTP is not
  1158. * implemented, only one STU entry is needed to cover all VTU
  1159. * entries. Thus, validate the SID 0.
  1160. */
  1161. vlan.sid = 0;
  1162. err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
  1163. if (err)
  1164. return err;
  1165. if (vstp.sid != vlan.sid || !vstp.valid) {
  1166. memset(&vstp, 0, sizeof(vstp));
  1167. vstp.valid = true;
  1168. vstp.sid = vlan.sid;
  1169. err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
  1170. if (err)
  1171. return err;
  1172. }
  1173. /* Clear all MAC addresses from the new database */
  1174. err = _mv88e6xxx_atu_flush(ds, vlan.fid, true);
  1175. if (err)
  1176. return err;
  1177. }
  1178. *entry = vlan;
  1179. return 0;
  1180. }
  1181. int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
  1182. const struct switchdev_obj_port_vlan *vlan,
  1183. struct switchdev_trans *trans)
  1184. {
  1185. /* We reserve a few VLANs to isolate unbridged ports */
  1186. if (vlan->vid_end >= 4000)
  1187. return -EOPNOTSUPP;
  1188. /* We don't need any dynamic resource from the kernel (yet),
  1189. * so skip the prepare phase.
  1190. */
  1191. return 0;
  1192. }
  1193. static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
  1194. bool untagged)
  1195. {
  1196. struct mv88e6xxx_vtu_stu_entry vlan;
  1197. int err;
  1198. err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
  1199. if (err)
  1200. return err;
  1201. err = _mv88e6xxx_vtu_getnext(ds, &vlan);
  1202. if (err)
  1203. return err;
  1204. if (vlan.vid != vid || !vlan.valid) {
  1205. err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
  1206. if (err)
  1207. return err;
  1208. }
  1209. vlan.data[port] = untagged ?
  1210. GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
  1211. GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
  1212. return _mv88e6xxx_vtu_loadpurge(ds, &vlan);
  1213. }
  1214. int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
  1215. const struct switchdev_obj_port_vlan *vlan,
  1216. struct switchdev_trans *trans)
  1217. {
  1218. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1219. bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  1220. bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  1221. u16 vid;
  1222. int err = 0;
  1223. mutex_lock(&ps->smi_mutex);
  1224. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  1225. err = _mv88e6xxx_port_vlan_add(ds, port, vid, untagged);
  1226. if (err)
  1227. goto unlock;
  1228. }
  1229. /* no PVID with ranges, otherwise it's a bug */
  1230. if (pvid)
  1231. err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
  1232. unlock:
  1233. mutex_unlock(&ps->smi_mutex);
  1234. return err;
  1235. }
  1236. static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
  1237. {
  1238. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1239. struct mv88e6xxx_vtu_stu_entry vlan;
  1240. int i, err;
  1241. err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
  1242. if (err)
  1243. return err;
  1244. err = _mv88e6xxx_vtu_getnext(ds, &vlan);
  1245. if (err)
  1246. return err;
  1247. if (vlan.vid != vid || !vlan.valid ||
  1248. vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
  1249. return -ENOENT;
  1250. vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
  1251. /* keep the VLAN unless all ports are excluded */
  1252. vlan.valid = false;
  1253. for (i = 0; i < ps->num_ports; ++i) {
  1254. if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
  1255. continue;
  1256. if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
  1257. vlan.valid = true;
  1258. break;
  1259. }
  1260. }
  1261. err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
  1262. if (err)
  1263. return err;
  1264. return _mv88e6xxx_atu_remove(ds, vlan.fid, port, false);
  1265. }
  1266. int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
  1267. const struct switchdev_obj_port_vlan *vlan)
  1268. {
  1269. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1270. u16 pvid, vid;
  1271. int err = 0;
  1272. mutex_lock(&ps->smi_mutex);
  1273. err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
  1274. if (err)
  1275. goto unlock;
  1276. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  1277. err = _mv88e6xxx_port_vlan_del(ds, port, vid);
  1278. if (err)
  1279. goto unlock;
  1280. if (vid == pvid) {
  1281. err = _mv88e6xxx_port_pvid_set(ds, port, 0);
  1282. if (err)
  1283. goto unlock;
  1284. }
  1285. }
  1286. unlock:
  1287. mutex_unlock(&ps->smi_mutex);
  1288. return err;
  1289. }
  1290. int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
  1291. unsigned long *ports, unsigned long *untagged)
  1292. {
  1293. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1294. struct mv88e6xxx_vtu_stu_entry next;
  1295. int port;
  1296. int err;
  1297. if (*vid == 4095)
  1298. return -ENOENT;
  1299. mutex_lock(&ps->smi_mutex);
  1300. err = _mv88e6xxx_vtu_vid_write(ds, *vid);
  1301. if (err)
  1302. goto unlock;
  1303. err = _mv88e6xxx_vtu_getnext(ds, &next);
  1304. unlock:
  1305. mutex_unlock(&ps->smi_mutex);
  1306. if (err)
  1307. return err;
  1308. if (!next.valid)
  1309. return -ENOENT;
  1310. *vid = next.vid;
  1311. for (port = 0; port < ps->num_ports; ++port) {
  1312. clear_bit(port, ports);
  1313. clear_bit(port, untagged);
  1314. if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
  1315. continue;
  1316. if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED ||
  1317. next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
  1318. set_bit(port, ports);
  1319. if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
  1320. set_bit(port, untagged);
  1321. }
  1322. return 0;
  1323. }
  1324. static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
  1325. const unsigned char *addr)
  1326. {
  1327. int i, ret;
  1328. for (i = 0; i < 3; i++) {
  1329. ret = _mv88e6xxx_reg_write(
  1330. ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
  1331. (addr[i * 2] << 8) | addr[i * 2 + 1]);
  1332. if (ret < 0)
  1333. return ret;
  1334. }
  1335. return 0;
  1336. }
  1337. static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
  1338. {
  1339. int i, ret;
  1340. for (i = 0; i < 3; i++) {
  1341. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1342. GLOBAL_ATU_MAC_01 + i);
  1343. if (ret < 0)
  1344. return ret;
  1345. addr[i * 2] = ret >> 8;
  1346. addr[i * 2 + 1] = ret & 0xff;
  1347. }
  1348. return 0;
  1349. }
  1350. static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
  1351. struct mv88e6xxx_atu_entry *entry)
  1352. {
  1353. int ret;
  1354. ret = _mv88e6xxx_atu_wait(ds);
  1355. if (ret < 0)
  1356. return ret;
  1357. ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
  1358. if (ret < 0)
  1359. return ret;
  1360. ret = _mv88e6xxx_atu_data_write(ds, entry);
  1361. if (ret < 0)
  1362. return ret;
  1363. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid);
  1364. if (ret < 0)
  1365. return ret;
  1366. return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
  1367. }
  1368. static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
  1369. const unsigned char *addr, u16 vid,
  1370. u8 state)
  1371. {
  1372. struct mv88e6xxx_atu_entry entry = { 0 };
  1373. entry.fid = vid; /* We use one FID per VLAN */
  1374. entry.state = state;
  1375. ether_addr_copy(entry.mac, addr);
  1376. if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
  1377. entry.trunk = false;
  1378. entry.portv_trunkid = BIT(port);
  1379. }
  1380. return _mv88e6xxx_atu_load(ds, &entry);
  1381. }
  1382. int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
  1383. const struct switchdev_obj_port_fdb *fdb,
  1384. struct switchdev_trans *trans)
  1385. {
  1386. /* We don't use per-port FDB */
  1387. if (fdb->vid == 0)
  1388. return -EOPNOTSUPP;
  1389. /* We don't need any dynamic resource from the kernel (yet),
  1390. * so skip the prepare phase.
  1391. */
  1392. return 0;
  1393. }
  1394. int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
  1395. const struct switchdev_obj_port_fdb *fdb,
  1396. struct switchdev_trans *trans)
  1397. {
  1398. int state = is_multicast_ether_addr(fdb->addr) ?
  1399. GLOBAL_ATU_DATA_STATE_MC_STATIC :
  1400. GLOBAL_ATU_DATA_STATE_UC_STATIC;
  1401. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1402. int ret;
  1403. mutex_lock(&ps->smi_mutex);
  1404. ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state);
  1405. mutex_unlock(&ps->smi_mutex);
  1406. return ret;
  1407. }
  1408. int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
  1409. const struct switchdev_obj_port_fdb *fdb)
  1410. {
  1411. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1412. int ret;
  1413. mutex_lock(&ps->smi_mutex);
  1414. ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid,
  1415. GLOBAL_ATU_DATA_STATE_UNUSED);
  1416. mutex_unlock(&ps->smi_mutex);
  1417. return ret;
  1418. }
  1419. static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
  1420. struct mv88e6xxx_atu_entry *entry)
  1421. {
  1422. struct mv88e6xxx_atu_entry next = { 0 };
  1423. int ret;
  1424. next.fid = fid;
  1425. ret = _mv88e6xxx_atu_wait(ds);
  1426. if (ret < 0)
  1427. return ret;
  1428. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
  1429. if (ret < 0)
  1430. return ret;
  1431. ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
  1432. if (ret < 0)
  1433. return ret;
  1434. ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
  1435. if (ret < 0)
  1436. return ret;
  1437. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
  1438. if (ret < 0)
  1439. return ret;
  1440. next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
  1441. if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
  1442. unsigned int mask, shift;
  1443. if (ret & GLOBAL_ATU_DATA_TRUNK) {
  1444. next.trunk = true;
  1445. mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
  1446. shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
  1447. } else {
  1448. next.trunk = false;
  1449. mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
  1450. shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
  1451. }
  1452. next.portv_trunkid = (ret & mask) >> shift;
  1453. }
  1454. *entry = next;
  1455. return 0;
  1456. }
  1457. int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
  1458. struct switchdev_obj_port_fdb *fdb,
  1459. int (*cb)(struct switchdev_obj *obj))
  1460. {
  1461. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1462. struct mv88e6xxx_vtu_stu_entry vlan = {
  1463. .vid = GLOBAL_VTU_VID_MASK, /* all ones */
  1464. };
  1465. int err;
  1466. mutex_lock(&ps->smi_mutex);
  1467. err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
  1468. if (err)
  1469. goto unlock;
  1470. do {
  1471. struct mv88e6xxx_atu_entry addr = {
  1472. .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
  1473. };
  1474. err = _mv88e6xxx_vtu_getnext(ds, &vlan);
  1475. if (err)
  1476. goto unlock;
  1477. if (!vlan.valid)
  1478. break;
  1479. err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
  1480. if (err)
  1481. goto unlock;
  1482. do {
  1483. err = _mv88e6xxx_atu_getnext(ds, vlan.fid, &addr);
  1484. if (err)
  1485. goto unlock;
  1486. if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
  1487. break;
  1488. if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
  1489. bool is_static = addr.state ==
  1490. (is_multicast_ether_addr(addr.mac) ?
  1491. GLOBAL_ATU_DATA_STATE_MC_STATIC :
  1492. GLOBAL_ATU_DATA_STATE_UC_STATIC);
  1493. fdb->vid = vlan.vid;
  1494. ether_addr_copy(fdb->addr, addr.mac);
  1495. fdb->ndm_state = is_static ? NUD_NOARP :
  1496. NUD_REACHABLE;
  1497. err = cb(&fdb->obj);
  1498. if (err)
  1499. goto unlock;
  1500. }
  1501. } while (!is_broadcast_ether_addr(addr.mac));
  1502. } while (vlan.vid < GLOBAL_VTU_VID_MASK);
  1503. unlock:
  1504. mutex_unlock(&ps->smi_mutex);
  1505. return err;
  1506. }
  1507. int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members)
  1508. {
  1509. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1510. const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
  1511. int err;
  1512. /* The port joined a bridge, so leave its reserved VLAN */
  1513. mutex_lock(&ps->smi_mutex);
  1514. err = _mv88e6xxx_port_vlan_del(ds, port, pvid);
  1515. if (!err)
  1516. err = _mv88e6xxx_port_pvid_set(ds, port, 0);
  1517. mutex_unlock(&ps->smi_mutex);
  1518. return err;
  1519. }
  1520. int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members)
  1521. {
  1522. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1523. const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
  1524. int err;
  1525. /* The port left the bridge, so join its reserved VLAN */
  1526. mutex_lock(&ps->smi_mutex);
  1527. err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true);
  1528. if (!err)
  1529. err = _mv88e6xxx_port_pvid_set(ds, port, pvid);
  1530. mutex_unlock(&ps->smi_mutex);
  1531. return err;
  1532. }
  1533. static void mv88e6xxx_bridge_work(struct work_struct *work)
  1534. {
  1535. struct mv88e6xxx_priv_state *ps;
  1536. struct dsa_switch *ds;
  1537. int port;
  1538. ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
  1539. ds = ((struct dsa_switch *)ps) - 1;
  1540. while (ps->port_state_update_mask) {
  1541. port = __ffs(ps->port_state_update_mask);
  1542. clear_bit(port, &ps->port_state_update_mask);
  1543. mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
  1544. }
  1545. }
  1546. static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
  1547. {
  1548. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1549. int ret;
  1550. u16 reg;
  1551. mutex_lock(&ps->smi_mutex);
  1552. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1553. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1554. mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
  1555. mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
  1556. /* MAC Forcing register: don't force link, speed,
  1557. * duplex or flow control state to any particular
  1558. * values on physical ports, but force the CPU port
  1559. * and all DSA ports to their maximum bandwidth and
  1560. * full duplex.
  1561. */
  1562. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
  1563. if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
  1564. reg &= ~PORT_PCS_CTRL_UNFORCED;
  1565. reg |= PORT_PCS_CTRL_FORCE_LINK |
  1566. PORT_PCS_CTRL_LINK_UP |
  1567. PORT_PCS_CTRL_DUPLEX_FULL |
  1568. PORT_PCS_CTRL_FORCE_DUPLEX;
  1569. if (mv88e6xxx_6065_family(ds))
  1570. reg |= PORT_PCS_CTRL_100;
  1571. else
  1572. reg |= PORT_PCS_CTRL_1000;
  1573. } else {
  1574. reg |= PORT_PCS_CTRL_UNFORCED;
  1575. }
  1576. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1577. PORT_PCS_CTRL, reg);
  1578. if (ret)
  1579. goto abort;
  1580. }
  1581. /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
  1582. * disable Header mode, enable IGMP/MLD snooping, disable VLAN
  1583. * tunneling, determine priority by looking at 802.1p and IP
  1584. * priority fields (IP prio has precedence), and set STP state
  1585. * to Forwarding.
  1586. *
  1587. * If this is the CPU link, use DSA or EDSA tagging depending
  1588. * on which tagging mode was configured.
  1589. *
  1590. * If this is a link to another switch, use DSA tagging mode.
  1591. *
  1592. * If this is the upstream port for this switch, enable
  1593. * forwarding of unknown unicasts and multicasts.
  1594. */
  1595. reg = 0;
  1596. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1597. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1598. mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
  1599. mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
  1600. reg = PORT_CONTROL_IGMP_MLD_SNOOP |
  1601. PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
  1602. PORT_CONTROL_STATE_FORWARDING;
  1603. if (dsa_is_cpu_port(ds, port)) {
  1604. if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
  1605. reg |= PORT_CONTROL_DSA_TAG;
  1606. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1607. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1608. mv88e6xxx_6320_family(ds)) {
  1609. if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
  1610. reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
  1611. else
  1612. reg |= PORT_CONTROL_FRAME_MODE_DSA;
  1613. reg |= PORT_CONTROL_FORWARD_UNKNOWN |
  1614. PORT_CONTROL_FORWARD_UNKNOWN_MC;
  1615. }
  1616. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1617. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1618. mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
  1619. mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
  1620. if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
  1621. reg |= PORT_CONTROL_EGRESS_ADD_TAG;
  1622. }
  1623. }
  1624. if (dsa_is_dsa_port(ds, port)) {
  1625. if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
  1626. reg |= PORT_CONTROL_DSA_TAG;
  1627. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1628. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1629. mv88e6xxx_6320_family(ds)) {
  1630. reg |= PORT_CONTROL_FRAME_MODE_DSA;
  1631. }
  1632. if (port == dsa_upstream_port(ds))
  1633. reg |= PORT_CONTROL_FORWARD_UNKNOWN |
  1634. PORT_CONTROL_FORWARD_UNKNOWN_MC;
  1635. }
  1636. if (reg) {
  1637. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1638. PORT_CONTROL, reg);
  1639. if (ret)
  1640. goto abort;
  1641. }
  1642. /* Port Control 2: don't force a good FCS, set the maximum frame size to
  1643. * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
  1644. * untagged frames on this port, do a destination address lookup on all
  1645. * received packets as usual, disable ARP mirroring and don't send a
  1646. * copy of all transmitted/received frames on this port to the CPU.
  1647. */
  1648. reg = 0;
  1649. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1650. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1651. mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
  1652. reg = PORT_CONTROL_2_MAP_DA;
  1653. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1654. mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
  1655. reg |= PORT_CONTROL_2_JUMBO_10240;
  1656. if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
  1657. /* Set the upstream port this port should use */
  1658. reg |= dsa_upstream_port(ds);
  1659. /* enable forwarding of unknown multicast addresses to
  1660. * the upstream port
  1661. */
  1662. if (port == dsa_upstream_port(ds))
  1663. reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
  1664. }
  1665. reg |= PORT_CONTROL_2_8021Q_SECURE;
  1666. if (reg) {
  1667. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1668. PORT_CONTROL_2, reg);
  1669. if (ret)
  1670. goto abort;
  1671. }
  1672. /* Port Association Vector: when learning source addresses
  1673. * of packets, add the address to the address database using
  1674. * a port bitmap that has only the bit for this port set and
  1675. * the other bits clear.
  1676. */
  1677. reg = 1 << port;
  1678. /* Disable learning for DSA and CPU ports */
  1679. if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
  1680. reg = PORT_ASSOC_VECTOR_LOCKED_PORT;
  1681. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
  1682. if (ret)
  1683. goto abort;
  1684. /* Egress rate control 2: disable egress rate control. */
  1685. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
  1686. 0x0000);
  1687. if (ret)
  1688. goto abort;
  1689. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1690. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1691. mv88e6xxx_6320_family(ds)) {
  1692. /* Do not limit the period of time that this port can
  1693. * be paused for by the remote end or the period of
  1694. * time that this port can pause the remote end.
  1695. */
  1696. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1697. PORT_PAUSE_CTRL, 0x0000);
  1698. if (ret)
  1699. goto abort;
  1700. /* Port ATU control: disable limiting the number of
  1701. * address database entries that this port is allowed
  1702. * to use.
  1703. */
  1704. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1705. PORT_ATU_CONTROL, 0x0000);
  1706. /* Priority Override: disable DA, SA and VTU priority
  1707. * override.
  1708. */
  1709. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1710. PORT_PRI_OVERRIDE, 0x0000);
  1711. if (ret)
  1712. goto abort;
  1713. /* Port Ethertype: use the Ethertype DSA Ethertype
  1714. * value.
  1715. */
  1716. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1717. PORT_ETH_TYPE, ETH_P_EDSA);
  1718. if (ret)
  1719. goto abort;
  1720. /* Tag Remap: use an identity 802.1p prio -> switch
  1721. * prio mapping.
  1722. */
  1723. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1724. PORT_TAG_REGMAP_0123, 0x3210);
  1725. if (ret)
  1726. goto abort;
  1727. /* Tag Remap 2: use an identity 802.1p prio -> switch
  1728. * prio mapping.
  1729. */
  1730. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1731. PORT_TAG_REGMAP_4567, 0x7654);
  1732. if (ret)
  1733. goto abort;
  1734. }
  1735. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1736. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1737. mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
  1738. mv88e6xxx_6320_family(ds)) {
  1739. /* Rate Control: disable ingress rate limiting. */
  1740. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1741. PORT_RATE_CONTROL, 0x0001);
  1742. if (ret)
  1743. goto abort;
  1744. }
  1745. /* Port Control 1: disable trunking, disable sending
  1746. * learning messages to this port.
  1747. */
  1748. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
  1749. if (ret)
  1750. goto abort;
  1751. /* Port based VLAN map: do not give each port its own address
  1752. * database, and allow every port to egress frames on all other ports.
  1753. */
  1754. reg = BIT(ps->num_ports) - 1; /* all ports */
  1755. ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg & ~port);
  1756. if (ret)
  1757. goto abort;
  1758. /* Default VLAN ID and priority: don't set a default VLAN
  1759. * ID, and set the default packet priority to zero.
  1760. */
  1761. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
  1762. 0x0000);
  1763. abort:
  1764. mutex_unlock(&ps->smi_mutex);
  1765. return ret;
  1766. }
  1767. int mv88e6xxx_setup_ports(struct dsa_switch *ds)
  1768. {
  1769. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1770. int ret;
  1771. int i;
  1772. for (i = 0; i < ps->num_ports; i++) {
  1773. ret = mv88e6xxx_setup_port(ds, i);
  1774. if (ret < 0)
  1775. return ret;
  1776. if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
  1777. continue;
  1778. /* setup the unbridged state */
  1779. ret = mv88e6xxx_port_bridge_leave(ds, i, 0);
  1780. if (ret < 0)
  1781. return ret;
  1782. }
  1783. return 0;
  1784. }
  1785. int mv88e6xxx_setup_common(struct dsa_switch *ds)
  1786. {
  1787. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1788. mutex_init(&ps->smi_mutex);
  1789. ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
  1790. INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
  1791. return 0;
  1792. }
  1793. int mv88e6xxx_setup_global(struct dsa_switch *ds)
  1794. {
  1795. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1796. int ret;
  1797. int i;
  1798. /* Set the default address aging time to 5 minutes, and
  1799. * enable address learn messages to be sent to all message
  1800. * ports.
  1801. */
  1802. REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
  1803. 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
  1804. /* Configure the IP ToS mapping registers. */
  1805. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
  1806. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
  1807. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
  1808. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
  1809. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
  1810. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
  1811. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
  1812. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
  1813. /* Configure the IEEE 802.1p priority mapping register. */
  1814. REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
  1815. /* Send all frames with destination addresses matching
  1816. * 01:80:c2:00:00:0x to the CPU port.
  1817. */
  1818. REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
  1819. /* Ignore removed tag data on doubly tagged packets, disable
  1820. * flow control messages, force flow control priority to the
  1821. * highest, and send all special multicast frames to the CPU
  1822. * port at the highest priority.
  1823. */
  1824. REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
  1825. 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
  1826. GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
  1827. /* Program the DSA routing table. */
  1828. for (i = 0; i < 32; i++) {
  1829. int nexthop = 0x1f;
  1830. if (ds->pd->rtable &&
  1831. i != ds->index && i < ds->dst->pd->nr_chips)
  1832. nexthop = ds->pd->rtable[i] & 0x1f;
  1833. REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
  1834. GLOBAL2_DEVICE_MAPPING_UPDATE |
  1835. (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
  1836. nexthop);
  1837. }
  1838. /* Clear all trunk masks. */
  1839. for (i = 0; i < 8; i++)
  1840. REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
  1841. 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
  1842. ((1 << ps->num_ports) - 1));
  1843. /* Clear all trunk mappings. */
  1844. for (i = 0; i < 16; i++)
  1845. REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
  1846. GLOBAL2_TRUNK_MAPPING_UPDATE |
  1847. (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
  1848. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1849. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1850. mv88e6xxx_6320_family(ds)) {
  1851. /* Send all frames with destination addresses matching
  1852. * 01:80:c2:00:00:2x to the CPU port.
  1853. */
  1854. REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
  1855. /* Initialise cross-chip port VLAN table to reset
  1856. * defaults.
  1857. */
  1858. REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
  1859. /* Clear the priority override table. */
  1860. for (i = 0; i < 16; i++)
  1861. REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
  1862. 0x8000 | (i << 8));
  1863. }
  1864. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1865. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1866. mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
  1867. mv88e6xxx_6320_family(ds)) {
  1868. /* Disable ingress rate limiting by resetting all
  1869. * ingress rate limit registers to their initial
  1870. * state.
  1871. */
  1872. for (i = 0; i < ps->num_ports; i++)
  1873. REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
  1874. 0x9000 | (i << 8));
  1875. }
  1876. /* Clear the statistics counters for all ports */
  1877. REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
  1878. /* Wait for the flush to complete. */
  1879. mutex_lock(&ps->smi_mutex);
  1880. ret = _mv88e6xxx_stats_wait(ds);
  1881. if (ret < 0)
  1882. goto unlock;
  1883. /* Clear all ATU entries */
  1884. ret = _mv88e6xxx_atu_flush(ds, 0, true);
  1885. if (ret < 0)
  1886. goto unlock;
  1887. /* Clear all the VTU and STU entries */
  1888. ret = _mv88e6xxx_vtu_stu_flush(ds);
  1889. unlock:
  1890. mutex_unlock(&ps->smi_mutex);
  1891. return ret;
  1892. }
  1893. int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
  1894. {
  1895. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1896. u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
  1897. unsigned long timeout;
  1898. int ret;
  1899. int i;
  1900. /* Set all ports to the disabled state. */
  1901. for (i = 0; i < ps->num_ports; i++) {
  1902. ret = REG_READ(REG_PORT(i), PORT_CONTROL);
  1903. REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
  1904. }
  1905. /* Wait for transmit queues to drain. */
  1906. usleep_range(2000, 4000);
  1907. /* Reset the switch. Keep the PPU active if requested. The PPU
  1908. * needs to be active to support indirect phy register access
  1909. * through global registers 0x18 and 0x19.
  1910. */
  1911. if (ppu_active)
  1912. REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
  1913. else
  1914. REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
  1915. /* Wait up to one second for reset to complete. */
  1916. timeout = jiffies + 1 * HZ;
  1917. while (time_before(jiffies, timeout)) {
  1918. ret = REG_READ(REG_GLOBAL, 0x00);
  1919. if ((ret & is_reset) == is_reset)
  1920. break;
  1921. usleep_range(1000, 2000);
  1922. }
  1923. if (time_after(jiffies, timeout))
  1924. return -ETIMEDOUT;
  1925. return 0;
  1926. }
  1927. int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
  1928. {
  1929. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1930. int ret;
  1931. mutex_lock(&ps->smi_mutex);
  1932. ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
  1933. if (ret < 0)
  1934. goto error;
  1935. ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
  1936. error:
  1937. _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
  1938. mutex_unlock(&ps->smi_mutex);
  1939. return ret;
  1940. }
  1941. int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
  1942. int reg, int val)
  1943. {
  1944. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1945. int ret;
  1946. mutex_lock(&ps->smi_mutex);
  1947. ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
  1948. if (ret < 0)
  1949. goto error;
  1950. ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
  1951. error:
  1952. _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
  1953. mutex_unlock(&ps->smi_mutex);
  1954. return ret;
  1955. }
  1956. static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
  1957. {
  1958. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1959. if (port >= 0 && port < ps->num_ports)
  1960. return port;
  1961. return -EINVAL;
  1962. }
  1963. int
  1964. mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
  1965. {
  1966. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1967. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  1968. int ret;
  1969. if (addr < 0)
  1970. return addr;
  1971. mutex_lock(&ps->smi_mutex);
  1972. ret = _mv88e6xxx_phy_read(ds, addr, regnum);
  1973. mutex_unlock(&ps->smi_mutex);
  1974. return ret;
  1975. }
  1976. int
  1977. mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
  1978. {
  1979. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1980. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  1981. int ret;
  1982. if (addr < 0)
  1983. return addr;
  1984. mutex_lock(&ps->smi_mutex);
  1985. ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
  1986. mutex_unlock(&ps->smi_mutex);
  1987. return ret;
  1988. }
  1989. int
  1990. mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
  1991. {
  1992. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1993. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  1994. int ret;
  1995. if (addr < 0)
  1996. return addr;
  1997. mutex_lock(&ps->smi_mutex);
  1998. ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
  1999. mutex_unlock(&ps->smi_mutex);
  2000. return ret;
  2001. }
  2002. int
  2003. mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
  2004. u16 val)
  2005. {
  2006. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2007. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  2008. int ret;
  2009. if (addr < 0)
  2010. return addr;
  2011. mutex_lock(&ps->smi_mutex);
  2012. ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
  2013. mutex_unlock(&ps->smi_mutex);
  2014. return ret;
  2015. }
  2016. #ifdef CONFIG_NET_DSA_HWMON
  2017. static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
  2018. {
  2019. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2020. int ret;
  2021. int val;
  2022. *temp = 0;
  2023. mutex_lock(&ps->smi_mutex);
  2024. ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
  2025. if (ret < 0)
  2026. goto error;
  2027. /* Enable temperature sensor */
  2028. ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
  2029. if (ret < 0)
  2030. goto error;
  2031. ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
  2032. if (ret < 0)
  2033. goto error;
  2034. /* Wait for temperature to stabilize */
  2035. usleep_range(10000, 12000);
  2036. val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
  2037. if (val < 0) {
  2038. ret = val;
  2039. goto error;
  2040. }
  2041. /* Disable temperature sensor */
  2042. ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
  2043. if (ret < 0)
  2044. goto error;
  2045. *temp = ((val & 0x1f) - 5) * 5;
  2046. error:
  2047. _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
  2048. mutex_unlock(&ps->smi_mutex);
  2049. return ret;
  2050. }
  2051. static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
  2052. {
  2053. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2054. int ret;
  2055. *temp = 0;
  2056. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
  2057. if (ret < 0)
  2058. return ret;
  2059. *temp = (ret & 0xff) - 25;
  2060. return 0;
  2061. }
  2062. int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
  2063. {
  2064. if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
  2065. return mv88e63xx_get_temp(ds, temp);
  2066. return mv88e61xx_get_temp(ds, temp);
  2067. }
  2068. int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
  2069. {
  2070. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2071. int ret;
  2072. if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
  2073. return -EOPNOTSUPP;
  2074. *temp = 0;
  2075. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
  2076. if (ret < 0)
  2077. return ret;
  2078. *temp = (((ret >> 8) & 0x1f) * 5) - 25;
  2079. return 0;
  2080. }
  2081. int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
  2082. {
  2083. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2084. int ret;
  2085. if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
  2086. return -EOPNOTSUPP;
  2087. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
  2088. if (ret < 0)
  2089. return ret;
  2090. temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
  2091. return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
  2092. (ret & 0xe0ff) | (temp << 8));
  2093. }
  2094. int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
  2095. {
  2096. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2097. int ret;
  2098. if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
  2099. return -EOPNOTSUPP;
  2100. *alarm = false;
  2101. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
  2102. if (ret < 0)
  2103. return ret;
  2104. *alarm = !!(ret & 0x40);
  2105. return 0;
  2106. }
  2107. #endif /* CONFIG_NET_DSA_HWMON */
  2108. char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr,
  2109. const struct mv88e6xxx_switch_id *table,
  2110. unsigned int num)
  2111. {
  2112. struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
  2113. int i, ret;
  2114. if (!bus)
  2115. return NULL;
  2116. ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
  2117. if (ret < 0)
  2118. return NULL;
  2119. /* Look up the exact switch ID */
  2120. for (i = 0; i < num; ++i)
  2121. if (table[i].id == ret)
  2122. return table[i].name;
  2123. /* Look up only the product number */
  2124. for (i = 0; i < num; ++i) {
  2125. if (table[i].id == (ret & PORT_SWITCH_ID_PROD_NUM_MASK)) {
  2126. dev_warn(host_dev, "unknown revision %d, using base switch 0x%x\n",
  2127. ret & PORT_SWITCH_ID_REV_MASK,
  2128. ret & PORT_SWITCH_ID_PROD_NUM_MASK);
  2129. return table[i].name;
  2130. }
  2131. }
  2132. return NULL;
  2133. }
  2134. static int __init mv88e6xxx_init(void)
  2135. {
  2136. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
  2137. register_switch_driver(&mv88e6131_switch_driver);
  2138. #endif
  2139. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
  2140. register_switch_driver(&mv88e6123_61_65_switch_driver);
  2141. #endif
  2142. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
  2143. register_switch_driver(&mv88e6352_switch_driver);
  2144. #endif
  2145. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
  2146. register_switch_driver(&mv88e6171_switch_driver);
  2147. #endif
  2148. return 0;
  2149. }
  2150. module_init(mv88e6xxx_init);
  2151. static void __exit mv88e6xxx_cleanup(void)
  2152. {
  2153. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
  2154. unregister_switch_driver(&mv88e6171_switch_driver);
  2155. #endif
  2156. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
  2157. unregister_switch_driver(&mv88e6352_switch_driver);
  2158. #endif
  2159. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
  2160. unregister_switch_driver(&mv88e6123_61_65_switch_driver);
  2161. #endif
  2162. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
  2163. unregister_switch_driver(&mv88e6131_switch_driver);
  2164. #endif
  2165. }
  2166. module_exit(mv88e6xxx_cleanup);
  2167. MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
  2168. MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
  2169. MODULE_LICENSE("GPL");