spectrum.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950
  1. /*
  2. * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
  3. * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
  5. * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
  6. * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include <linux/kernel.h>
  37. #include <linux/module.h>
  38. #include <linux/types.h>
  39. #include <linux/netdevice.h>
  40. #include <linux/etherdevice.h>
  41. #include <linux/ethtool.h>
  42. #include <linux/slab.h>
  43. #include <linux/device.h>
  44. #include <linux/skbuff.h>
  45. #include <linux/if_vlan.h>
  46. #include <linux/if_bridge.h>
  47. #include <linux/workqueue.h>
  48. #include <linux/jiffies.h>
  49. #include <linux/bitops.h>
  50. #include <net/switchdev.h>
  51. #include <generated/utsrelease.h>
  52. #include "spectrum.h"
  53. #include "core.h"
  54. #include "reg.h"
  55. #include "port.h"
  56. #include "trap.h"
  57. #include "txheader.h"
  58. static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
  59. static const char mlxsw_sp_driver_version[] = "1.0";
  60. /* tx_hdr_version
  61. * Tx header version.
  62. * Must be set to 1.
  63. */
  64. MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
  65. /* tx_hdr_ctl
  66. * Packet control type.
  67. * 0 - Ethernet control (e.g. EMADs, LACP)
  68. * 1 - Ethernet data
  69. */
  70. MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
  71. /* tx_hdr_proto
  72. * Packet protocol type. Must be set to 1 (Ethernet).
  73. */
  74. MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
  75. /* tx_hdr_rx_is_router
  76. * Packet is sent from the router. Valid for data packets only.
  77. */
  78. MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
  79. /* tx_hdr_fid_valid
  80. * Indicates if the 'fid' field is valid and should be used for
  81. * forwarding lookup. Valid for data packets only.
  82. */
  83. MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
  84. /* tx_hdr_swid
  85. * Switch partition ID. Must be set to 0.
  86. */
  87. MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
  88. /* tx_hdr_control_tclass
  89. * Indicates if the packet should use the control TClass and not one
  90. * of the data TClasses.
  91. */
  92. MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
  93. /* tx_hdr_etclass
  94. * Egress TClass to be used on the egress device on the egress port.
  95. */
  96. MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
  97. /* tx_hdr_port_mid
  98. * Destination local port for unicast packets.
  99. * Destination multicast ID for multicast packets.
  100. *
  101. * Control packets are directed to a specific egress port, while data
  102. * packets are transmitted through the CPU port (0) into the switch partition,
  103. * where forwarding rules are applied.
  104. */
  105. MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
  106. /* tx_hdr_fid
  107. * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
  108. * set, otherwise calculated based on the packet's VID using VID to FID mapping.
  109. * Valid for data packets only.
  110. */
  111. MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
  112. /* tx_hdr_type
  113. * 0 - Data packets
  114. * 6 - Control packets
  115. */
  116. MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
  117. static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
  118. const struct mlxsw_tx_info *tx_info)
  119. {
  120. char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
  121. memset(txhdr, 0, MLXSW_TXHDR_LEN);
  122. mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
  123. mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
  124. mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
  125. mlxsw_tx_hdr_swid_set(txhdr, 0);
  126. mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
  127. mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
  128. mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
  129. }
  130. static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
  131. {
  132. char spad_pl[MLXSW_REG_SPAD_LEN];
  133. int err;
  134. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
  135. if (err)
  136. return err;
  137. mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
  138. return 0;
  139. }
  140. static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
  141. bool is_up)
  142. {
  143. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  144. char paos_pl[MLXSW_REG_PAOS_LEN];
  145. mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
  146. is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
  147. MLXSW_PORT_ADMIN_STATUS_DOWN);
  148. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
  149. }
  150. static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
  151. bool *p_is_up)
  152. {
  153. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  154. char paos_pl[MLXSW_REG_PAOS_LEN];
  155. u8 oper_status;
  156. int err;
  157. mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
  158. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
  159. if (err)
  160. return err;
  161. oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
  162. *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
  163. return 0;
  164. }
  165. static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
  166. {
  167. char sfmr_pl[MLXSW_REG_SFMR_LEN];
  168. int err;
  169. mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
  170. MLXSW_SP_VFID_BASE + vfid, 0);
  171. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
  172. if (err)
  173. return err;
  174. set_bit(vfid, mlxsw_sp->active_vfids);
  175. return 0;
  176. }
  177. static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
  178. {
  179. char sfmr_pl[MLXSW_REG_SFMR_LEN];
  180. clear_bit(vfid, mlxsw_sp->active_vfids);
  181. mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
  182. MLXSW_SP_VFID_BASE + vfid, 0);
  183. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
  184. }
  185. static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
  186. unsigned char *addr)
  187. {
  188. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  189. char ppad_pl[MLXSW_REG_PPAD_LEN];
  190. mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
  191. mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
  192. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
  193. }
  194. static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
  195. {
  196. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  197. unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
  198. ether_addr_copy(addr, mlxsw_sp->base_mac);
  199. addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
  200. return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
  201. }
  202. static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
  203. u16 vid, enum mlxsw_reg_spms_state state)
  204. {
  205. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  206. char *spms_pl;
  207. int err;
  208. spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
  209. if (!spms_pl)
  210. return -ENOMEM;
  211. mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
  212. mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
  213. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
  214. kfree(spms_pl);
  215. return err;
  216. }
  217. static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
  218. {
  219. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  220. char pmtu_pl[MLXSW_REG_PMTU_LEN];
  221. int max_mtu;
  222. int err;
  223. mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
  224. mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
  225. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
  226. if (err)
  227. return err;
  228. max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
  229. if (mtu > max_mtu)
  230. return -EINVAL;
  231. mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
  232. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
  233. }
  234. static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
  235. {
  236. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  237. char pspa_pl[MLXSW_REG_PSPA_LEN];
  238. mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
  239. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
  240. }
  241. static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
  242. bool enable)
  243. {
  244. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  245. char svpe_pl[MLXSW_REG_SVPE_LEN];
  246. mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
  247. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
  248. }
  249. int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
  250. enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
  251. u16 vid)
  252. {
  253. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  254. char svfa_pl[MLXSW_REG_SVFA_LEN];
  255. mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
  256. fid, vid);
  257. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
  258. }
  259. static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
  260. u16 vid, bool learn_enable)
  261. {
  262. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  263. char *spvmlr_pl;
  264. int err;
  265. spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
  266. if (!spvmlr_pl)
  267. return -ENOMEM;
  268. mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
  269. learn_enable);
  270. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
  271. kfree(spvmlr_pl);
  272. return err;
  273. }
  274. static int
  275. mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
  276. {
  277. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  278. char sspr_pl[MLXSW_REG_SSPR_LEN];
  279. mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
  280. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
  281. }
  282. static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
  283. bool *p_usable)
  284. {
  285. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  286. char pmlp_pl[MLXSW_REG_PMLP_LEN];
  287. int err;
  288. mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
  289. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
  290. if (err)
  291. return err;
  292. *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
  293. return 0;
  294. }
  295. static int mlxsw_sp_port_open(struct net_device *dev)
  296. {
  297. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  298. int err;
  299. err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
  300. if (err)
  301. return err;
  302. netif_start_queue(dev);
  303. return 0;
  304. }
  305. static int mlxsw_sp_port_stop(struct net_device *dev)
  306. {
  307. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  308. netif_stop_queue(dev);
  309. return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
  310. }
  311. static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
  312. struct net_device *dev)
  313. {
  314. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  315. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  316. struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
  317. const struct mlxsw_tx_info tx_info = {
  318. .local_port = mlxsw_sp_port->local_port,
  319. .is_emad = false,
  320. };
  321. u64 len;
  322. int err;
  323. if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
  324. return NETDEV_TX_BUSY;
  325. if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
  326. struct sk_buff *skb_orig = skb;
  327. skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
  328. if (!skb) {
  329. this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
  330. dev_kfree_skb_any(skb_orig);
  331. return NETDEV_TX_OK;
  332. }
  333. dev_consume_skb_any(skb_orig);
  334. }
  335. if (eth_skb_pad(skb)) {
  336. this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
  337. return NETDEV_TX_OK;
  338. }
  339. mlxsw_sp_txhdr_construct(skb, &tx_info);
  340. len = skb->len;
  341. /* Due to a race we might fail here because of a full queue. In that
  342. * unlikely case we simply drop the packet.
  343. */
  344. err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
  345. if (!err) {
  346. pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
  347. u64_stats_update_begin(&pcpu_stats->syncp);
  348. pcpu_stats->tx_packets++;
  349. pcpu_stats->tx_bytes += len;
  350. u64_stats_update_end(&pcpu_stats->syncp);
  351. } else {
  352. this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
  353. dev_kfree_skb_any(skb);
  354. }
  355. return NETDEV_TX_OK;
  356. }
  357. static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
  358. {
  359. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  360. struct sockaddr *addr = p;
  361. int err;
  362. if (!is_valid_ether_addr(addr->sa_data))
  363. return -EADDRNOTAVAIL;
  364. err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
  365. if (err)
  366. return err;
  367. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  368. return 0;
  369. }
  370. static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
  371. {
  372. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  373. int err;
  374. err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
  375. if (err)
  376. return err;
  377. dev->mtu = mtu;
  378. return 0;
  379. }
  380. static struct rtnl_link_stats64 *
  381. mlxsw_sp_port_get_stats64(struct net_device *dev,
  382. struct rtnl_link_stats64 *stats)
  383. {
  384. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  385. struct mlxsw_sp_port_pcpu_stats *p;
  386. u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
  387. u32 tx_dropped = 0;
  388. unsigned int start;
  389. int i;
  390. for_each_possible_cpu(i) {
  391. p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
  392. do {
  393. start = u64_stats_fetch_begin_irq(&p->syncp);
  394. rx_packets = p->rx_packets;
  395. rx_bytes = p->rx_bytes;
  396. tx_packets = p->tx_packets;
  397. tx_bytes = p->tx_bytes;
  398. } while (u64_stats_fetch_retry_irq(&p->syncp, start));
  399. stats->rx_packets += rx_packets;
  400. stats->rx_bytes += rx_bytes;
  401. stats->tx_packets += tx_packets;
  402. stats->tx_bytes += tx_bytes;
  403. /* tx_dropped is u32, updated without syncp protection. */
  404. tx_dropped += p->tx_dropped;
  405. }
  406. stats->tx_dropped = tx_dropped;
  407. return stats;
  408. }
  409. int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
  410. u16 vid_end, bool is_member, bool untagged)
  411. {
  412. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  413. char *spvm_pl;
  414. int err;
  415. spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
  416. if (!spvm_pl)
  417. return -ENOMEM;
  418. mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
  419. vid_end, is_member, untagged);
  420. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
  421. kfree(spvm_pl);
  422. return err;
  423. }
  424. static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
  425. {
  426. enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
  427. u16 vid, last_visited_vid;
  428. int err;
  429. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
  430. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
  431. vid);
  432. if (err) {
  433. last_visited_vid = vid;
  434. goto err_port_vid_to_fid_set;
  435. }
  436. }
  437. err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
  438. if (err) {
  439. last_visited_vid = VLAN_N_VID;
  440. goto err_port_vid_to_fid_set;
  441. }
  442. return 0;
  443. err_port_vid_to_fid_set:
  444. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
  445. mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
  446. vid);
  447. return err;
  448. }
  449. static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
  450. {
  451. enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
  452. u16 vid;
  453. int err;
  454. err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
  455. if (err)
  456. return err;
  457. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
  458. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
  459. vid, vid);
  460. if (err)
  461. return err;
  462. }
  463. return 0;
  464. }
  465. int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
  466. u16 vid)
  467. {
  468. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  469. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  470. char *sftr_pl;
  471. int err;
  472. /* VLAN 0 is added to HW filter when device goes up, but it is
  473. * reserved in our case, so simply return.
  474. */
  475. if (!vid)
  476. return 0;
  477. if (test_bit(vid, mlxsw_sp_port->active_vfids)) {
  478. netdev_warn(dev, "VID=%d already configured\n", vid);
  479. return 0;
  480. }
  481. if (!test_bit(vid, mlxsw_sp->active_vfids)) {
  482. err = mlxsw_sp_vfid_create(mlxsw_sp, vid);
  483. if (err) {
  484. netdev_err(dev, "Failed to create vFID=%d\n",
  485. MLXSW_SP_VFID_BASE + vid);
  486. return err;
  487. }
  488. sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
  489. if (!sftr_pl) {
  490. err = -ENOMEM;
  491. goto err_flood_table_alloc;
  492. }
  493. mlxsw_reg_sftr_pack(sftr_pl, 0, vid,
  494. MLXSW_REG_SFGC_TABLE_TYPE_FID, 0,
  495. MLXSW_PORT_CPU_PORT, true);
  496. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
  497. kfree(sftr_pl);
  498. if (err) {
  499. netdev_err(dev, "Failed to configure flood table\n");
  500. goto err_flood_table_config;
  501. }
  502. }
  503. /* In case we fail in the following steps, we intentionally do not
  504. * destroy the associated vFID.
  505. */
  506. /* When adding the first VLAN interface on a bridged port we need to
  507. * transition all the active 802.1Q bridge VLANs to use explicit
  508. * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
  509. */
  510. if (!mlxsw_sp_port->nr_vfids) {
  511. err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
  512. if (err) {
  513. netdev_err(dev, "Failed to set to Virtual mode\n");
  514. return err;
  515. }
  516. }
  517. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
  518. MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
  519. true, MLXSW_SP_VFID_BASE + vid, vid);
  520. if (err) {
  521. netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
  522. vid, MLXSW_SP_VFID_BASE + vid);
  523. goto err_port_vid_to_fid_set;
  524. }
  525. err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
  526. if (err) {
  527. netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
  528. goto err_port_vid_learning_set;
  529. }
  530. err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false);
  531. if (err) {
  532. netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
  533. vid);
  534. goto err_port_add_vid;
  535. }
  536. err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
  537. MLXSW_REG_SPMS_STATE_FORWARDING);
  538. if (err) {
  539. netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
  540. goto err_port_stp_state_set;
  541. }
  542. mlxsw_sp_port->nr_vfids++;
  543. set_bit(vid, mlxsw_sp_port->active_vfids);
  544. return 0;
  545. err_flood_table_config:
  546. err_flood_table_alloc:
  547. mlxsw_sp_vfid_destroy(mlxsw_sp, vid);
  548. return err;
  549. err_port_stp_state_set:
  550. mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
  551. err_port_add_vid:
  552. mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
  553. err_port_vid_learning_set:
  554. mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
  555. MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
  556. MLXSW_SP_VFID_BASE + vid, vid);
  557. err_port_vid_to_fid_set:
  558. mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
  559. return err;
  560. }
  561. int mlxsw_sp_port_kill_vid(struct net_device *dev,
  562. __be16 __always_unused proto, u16 vid)
  563. {
  564. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  565. int err;
  566. /* VLAN 0 is removed from HW filter when device goes down, but
  567. * it is reserved in our case, so simply return.
  568. */
  569. if (!vid)
  570. return 0;
  571. if (!test_bit(vid, mlxsw_sp_port->active_vfids)) {
  572. netdev_warn(dev, "VID=%d does not exist\n", vid);
  573. return 0;
  574. }
  575. err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
  576. MLXSW_REG_SPMS_STATE_DISCARDING);
  577. if (err) {
  578. netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
  579. return err;
  580. }
  581. err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
  582. if (err) {
  583. netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
  584. vid);
  585. return err;
  586. }
  587. err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
  588. if (err) {
  589. netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
  590. return err;
  591. }
  592. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
  593. MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
  594. false, MLXSW_SP_VFID_BASE + vid,
  595. vid);
  596. if (err) {
  597. netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
  598. vid, MLXSW_SP_VFID_BASE + vid);
  599. return err;
  600. }
  601. /* When removing the last VLAN interface on a bridged port we need to
  602. * transition all active 802.1Q bridge VLANs to use VID to FID
  603. * mappings and set port's mode to VLAN mode.
  604. */
  605. if (mlxsw_sp_port->nr_vfids == 1) {
  606. err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
  607. if (err) {
  608. netdev_err(dev, "Failed to set to VLAN mode\n");
  609. return err;
  610. }
  611. }
  612. mlxsw_sp_port->nr_vfids--;
  613. clear_bit(vid, mlxsw_sp_port->active_vfids);
  614. return 0;
  615. }
  616. static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
  617. .ndo_open = mlxsw_sp_port_open,
  618. .ndo_stop = mlxsw_sp_port_stop,
  619. .ndo_start_xmit = mlxsw_sp_port_xmit,
  620. .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
  621. .ndo_change_mtu = mlxsw_sp_port_change_mtu,
  622. .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
  623. .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
  624. .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
  625. .ndo_fdb_add = switchdev_port_fdb_add,
  626. .ndo_fdb_del = switchdev_port_fdb_del,
  627. .ndo_fdb_dump = switchdev_port_fdb_dump,
  628. .ndo_bridge_setlink = switchdev_port_bridge_setlink,
  629. .ndo_bridge_getlink = switchdev_port_bridge_getlink,
  630. .ndo_bridge_dellink = switchdev_port_bridge_dellink,
  631. };
  632. static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
  633. struct ethtool_drvinfo *drvinfo)
  634. {
  635. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  636. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  637. strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
  638. strlcpy(drvinfo->version, mlxsw_sp_driver_version,
  639. sizeof(drvinfo->version));
  640. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  641. "%d.%d.%d",
  642. mlxsw_sp->bus_info->fw_rev.major,
  643. mlxsw_sp->bus_info->fw_rev.minor,
  644. mlxsw_sp->bus_info->fw_rev.subminor);
  645. strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
  646. sizeof(drvinfo->bus_info));
  647. }
  648. struct mlxsw_sp_port_hw_stats {
  649. char str[ETH_GSTRING_LEN];
  650. u64 (*getter)(char *payload);
  651. };
  652. static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
  653. {
  654. .str = "a_frames_transmitted_ok",
  655. .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
  656. },
  657. {
  658. .str = "a_frames_received_ok",
  659. .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
  660. },
  661. {
  662. .str = "a_frame_check_sequence_errors",
  663. .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
  664. },
  665. {
  666. .str = "a_alignment_errors",
  667. .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
  668. },
  669. {
  670. .str = "a_octets_transmitted_ok",
  671. .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
  672. },
  673. {
  674. .str = "a_octets_received_ok",
  675. .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
  676. },
  677. {
  678. .str = "a_multicast_frames_xmitted_ok",
  679. .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
  680. },
  681. {
  682. .str = "a_broadcast_frames_xmitted_ok",
  683. .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
  684. },
  685. {
  686. .str = "a_multicast_frames_received_ok",
  687. .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
  688. },
  689. {
  690. .str = "a_broadcast_frames_received_ok",
  691. .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
  692. },
  693. {
  694. .str = "a_in_range_length_errors",
  695. .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
  696. },
  697. {
  698. .str = "a_out_of_range_length_field",
  699. .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
  700. },
  701. {
  702. .str = "a_frame_too_long_errors",
  703. .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
  704. },
  705. {
  706. .str = "a_symbol_error_during_carrier",
  707. .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
  708. },
  709. {
  710. .str = "a_mac_control_frames_transmitted",
  711. .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
  712. },
  713. {
  714. .str = "a_mac_control_frames_received",
  715. .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
  716. },
  717. {
  718. .str = "a_unsupported_opcodes_received",
  719. .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
  720. },
  721. {
  722. .str = "a_pause_mac_ctrl_frames_received",
  723. .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
  724. },
  725. {
  726. .str = "a_pause_mac_ctrl_frames_xmitted",
  727. .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
  728. },
  729. };
  730. #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
  731. static void mlxsw_sp_port_get_strings(struct net_device *dev,
  732. u32 stringset, u8 *data)
  733. {
  734. u8 *p = data;
  735. int i;
  736. switch (stringset) {
  737. case ETH_SS_STATS:
  738. for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
  739. memcpy(p, mlxsw_sp_port_hw_stats[i].str,
  740. ETH_GSTRING_LEN);
  741. p += ETH_GSTRING_LEN;
  742. }
  743. break;
  744. }
  745. }
  746. static void mlxsw_sp_port_get_stats(struct net_device *dev,
  747. struct ethtool_stats *stats, u64 *data)
  748. {
  749. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  750. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  751. char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
  752. int i;
  753. int err;
  754. mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
  755. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
  756. for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
  757. data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
  758. }
  759. static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
  760. {
  761. switch (sset) {
  762. case ETH_SS_STATS:
  763. return MLXSW_SP_PORT_HW_STATS_LEN;
  764. default:
  765. return -EOPNOTSUPP;
  766. }
  767. }
  768. struct mlxsw_sp_port_link_mode {
  769. u32 mask;
  770. u32 supported;
  771. u32 advertised;
  772. u32 speed;
  773. };
  774. static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
  775. {
  776. .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
  777. .supported = SUPPORTED_100baseT_Full,
  778. .advertised = ADVERTISED_100baseT_Full,
  779. .speed = 100,
  780. },
  781. {
  782. .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
  783. .speed = 100,
  784. },
  785. {
  786. .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
  787. MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
  788. .supported = SUPPORTED_1000baseKX_Full,
  789. .advertised = ADVERTISED_1000baseKX_Full,
  790. .speed = 1000,
  791. },
  792. {
  793. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
  794. .supported = SUPPORTED_10000baseT_Full,
  795. .advertised = ADVERTISED_10000baseT_Full,
  796. .speed = 10000,
  797. },
  798. {
  799. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
  800. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
  801. .supported = SUPPORTED_10000baseKX4_Full,
  802. .advertised = ADVERTISED_10000baseKX4_Full,
  803. .speed = 10000,
  804. },
  805. {
  806. .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  807. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  808. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  809. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
  810. .supported = SUPPORTED_10000baseKR_Full,
  811. .advertised = ADVERTISED_10000baseKR_Full,
  812. .speed = 10000,
  813. },
  814. {
  815. .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
  816. .supported = SUPPORTED_20000baseKR2_Full,
  817. .advertised = ADVERTISED_20000baseKR2_Full,
  818. .speed = 20000,
  819. },
  820. {
  821. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
  822. .supported = SUPPORTED_40000baseCR4_Full,
  823. .advertised = ADVERTISED_40000baseCR4_Full,
  824. .speed = 40000,
  825. },
  826. {
  827. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
  828. .supported = SUPPORTED_40000baseKR4_Full,
  829. .advertised = ADVERTISED_40000baseKR4_Full,
  830. .speed = 40000,
  831. },
  832. {
  833. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
  834. .supported = SUPPORTED_40000baseSR4_Full,
  835. .advertised = ADVERTISED_40000baseSR4_Full,
  836. .speed = 40000,
  837. },
  838. {
  839. .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
  840. .supported = SUPPORTED_40000baseLR4_Full,
  841. .advertised = ADVERTISED_40000baseLR4_Full,
  842. .speed = 40000,
  843. },
  844. {
  845. .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
  846. MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
  847. MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
  848. .speed = 25000,
  849. },
  850. {
  851. .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
  852. MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
  853. MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
  854. .speed = 50000,
  855. },
  856. {
  857. .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
  858. .supported = SUPPORTED_56000baseKR4_Full,
  859. .advertised = ADVERTISED_56000baseKR4_Full,
  860. .speed = 56000,
  861. },
  862. {
  863. .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
  864. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
  865. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
  866. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
  867. .speed = 100000,
  868. },
  869. };
  870. #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
  871. static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
  872. {
  873. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  874. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  875. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
  876. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
  877. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
  878. MLXSW_REG_PTYS_ETH_SPEED_SGMII))
  879. return SUPPORTED_FIBRE;
  880. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  881. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
  882. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
  883. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
  884. MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
  885. return SUPPORTED_Backplane;
  886. return 0;
  887. }
  888. static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
  889. {
  890. u32 modes = 0;
  891. int i;
  892. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  893. if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
  894. modes |= mlxsw_sp_port_link_mode[i].supported;
  895. }
  896. return modes;
  897. }
  898. static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
  899. {
  900. u32 modes = 0;
  901. int i;
  902. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  903. if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
  904. modes |= mlxsw_sp_port_link_mode[i].advertised;
  905. }
  906. return modes;
  907. }
  908. static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
  909. struct ethtool_cmd *cmd)
  910. {
  911. u32 speed = SPEED_UNKNOWN;
  912. u8 duplex = DUPLEX_UNKNOWN;
  913. int i;
  914. if (!carrier_ok)
  915. goto out;
  916. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  917. if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
  918. speed = mlxsw_sp_port_link_mode[i].speed;
  919. duplex = DUPLEX_FULL;
  920. break;
  921. }
  922. }
  923. out:
  924. ethtool_cmd_speed_set(cmd, speed);
  925. cmd->duplex = duplex;
  926. }
  927. static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
  928. {
  929. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
  930. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
  931. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
  932. MLXSW_REG_PTYS_ETH_SPEED_SGMII))
  933. return PORT_FIBRE;
  934. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
  935. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
  936. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
  937. return PORT_DA;
  938. if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
  939. MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
  940. MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
  941. MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
  942. return PORT_NONE;
  943. return PORT_OTHER;
  944. }
  945. static int mlxsw_sp_port_get_settings(struct net_device *dev,
  946. struct ethtool_cmd *cmd)
  947. {
  948. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  949. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  950. char ptys_pl[MLXSW_REG_PTYS_LEN];
  951. u32 eth_proto_cap;
  952. u32 eth_proto_admin;
  953. u32 eth_proto_oper;
  954. int err;
  955. mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
  956. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
  957. if (err) {
  958. netdev_err(dev, "Failed to get proto");
  959. return err;
  960. }
  961. mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
  962. &eth_proto_admin, &eth_proto_oper);
  963. cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
  964. mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
  965. SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  966. cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
  967. mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
  968. eth_proto_oper, cmd);
  969. eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
  970. cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
  971. cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
  972. cmd->transceiver = XCVR_INTERNAL;
  973. return 0;
  974. }
  975. static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
  976. {
  977. u32 ptys_proto = 0;
  978. int i;
  979. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  980. if (advertising & mlxsw_sp_port_link_mode[i].advertised)
  981. ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
  982. }
  983. return ptys_proto;
  984. }
  985. static u32 mlxsw_sp_to_ptys_speed(u32 speed)
  986. {
  987. u32 ptys_proto = 0;
  988. int i;
  989. for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
  990. if (speed == mlxsw_sp_port_link_mode[i].speed)
  991. ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
  992. }
  993. return ptys_proto;
  994. }
  995. static int mlxsw_sp_port_set_settings(struct net_device *dev,
  996. struct ethtool_cmd *cmd)
  997. {
  998. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  999. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1000. char ptys_pl[MLXSW_REG_PTYS_LEN];
  1001. u32 speed;
  1002. u32 eth_proto_new;
  1003. u32 eth_proto_cap;
  1004. u32 eth_proto_admin;
  1005. bool is_up;
  1006. int err;
  1007. speed = ethtool_cmd_speed(cmd);
  1008. eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
  1009. mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
  1010. mlxsw_sp_to_ptys_speed(speed);
  1011. mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
  1012. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
  1013. if (err) {
  1014. netdev_err(dev, "Failed to get proto");
  1015. return err;
  1016. }
  1017. mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
  1018. eth_proto_new = eth_proto_new & eth_proto_cap;
  1019. if (!eth_proto_new) {
  1020. netdev_err(dev, "Not supported proto admin requested");
  1021. return -EINVAL;
  1022. }
  1023. if (eth_proto_new == eth_proto_admin)
  1024. return 0;
  1025. mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
  1026. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
  1027. if (err) {
  1028. netdev_err(dev, "Failed to set proto admin");
  1029. return err;
  1030. }
  1031. err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
  1032. if (err) {
  1033. netdev_err(dev, "Failed to get oper status");
  1034. return err;
  1035. }
  1036. if (!is_up)
  1037. return 0;
  1038. err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
  1039. if (err) {
  1040. netdev_err(dev, "Failed to set admin status");
  1041. return err;
  1042. }
  1043. err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
  1044. if (err) {
  1045. netdev_err(dev, "Failed to set admin status");
  1046. return err;
  1047. }
  1048. return 0;
  1049. }
  1050. static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
  1051. .get_drvinfo = mlxsw_sp_port_get_drvinfo,
  1052. .get_link = ethtool_op_get_link,
  1053. .get_strings = mlxsw_sp_port_get_strings,
  1054. .get_ethtool_stats = mlxsw_sp_port_get_stats,
  1055. .get_sset_count = mlxsw_sp_port_get_sset_count,
  1056. .get_settings = mlxsw_sp_port_get_settings,
  1057. .set_settings = mlxsw_sp_port_set_settings,
  1058. };
  1059. static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
  1060. {
  1061. struct mlxsw_sp_port *mlxsw_sp_port;
  1062. struct net_device *dev;
  1063. bool usable;
  1064. int err;
  1065. dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
  1066. if (!dev)
  1067. return -ENOMEM;
  1068. mlxsw_sp_port = netdev_priv(dev);
  1069. mlxsw_sp_port->dev = dev;
  1070. mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
  1071. mlxsw_sp_port->local_port = local_port;
  1072. mlxsw_sp_port->learning = 1;
  1073. mlxsw_sp_port->learning_sync = 1;
  1074. mlxsw_sp_port->uc_flood = 1;
  1075. mlxsw_sp_port->pvid = 1;
  1076. mlxsw_sp_port->pcpu_stats =
  1077. netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
  1078. if (!mlxsw_sp_port->pcpu_stats) {
  1079. err = -ENOMEM;
  1080. goto err_alloc_stats;
  1081. }
  1082. dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
  1083. dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
  1084. err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
  1085. if (err) {
  1086. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
  1087. mlxsw_sp_port->local_port);
  1088. goto err_dev_addr_init;
  1089. }
  1090. netif_carrier_off(dev);
  1091. dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
  1092. NETIF_F_HW_VLAN_CTAG_FILTER;
  1093. /* Each packet needs to have a Tx header (metadata) on top all other
  1094. * headers.
  1095. */
  1096. dev->hard_header_len += MLXSW_TXHDR_LEN;
  1097. err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
  1098. if (err) {
  1099. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
  1100. mlxsw_sp_port->local_port);
  1101. goto err_port_module_check;
  1102. }
  1103. if (!usable) {
  1104. dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
  1105. mlxsw_sp_port->local_port);
  1106. goto port_not_usable;
  1107. }
  1108. err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
  1109. if (err) {
  1110. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
  1111. mlxsw_sp_port->local_port);
  1112. goto err_port_system_port_mapping_set;
  1113. }
  1114. err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
  1115. if (err) {
  1116. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
  1117. mlxsw_sp_port->local_port);
  1118. goto err_port_swid_set;
  1119. }
  1120. err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
  1121. if (err) {
  1122. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
  1123. mlxsw_sp_port->local_port);
  1124. goto err_port_mtu_set;
  1125. }
  1126. err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
  1127. if (err)
  1128. goto err_port_admin_status_set;
  1129. err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
  1130. if (err) {
  1131. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
  1132. mlxsw_sp_port->local_port);
  1133. goto err_port_buffers_init;
  1134. }
  1135. mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
  1136. err = register_netdev(dev);
  1137. if (err) {
  1138. dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
  1139. mlxsw_sp_port->local_port);
  1140. goto err_register_netdev;
  1141. }
  1142. err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
  1143. if (err)
  1144. goto err_port_vlan_init;
  1145. mlxsw_sp->ports[local_port] = mlxsw_sp_port;
  1146. return 0;
  1147. err_port_vlan_init:
  1148. unregister_netdev(dev);
  1149. err_register_netdev:
  1150. err_port_buffers_init:
  1151. err_port_admin_status_set:
  1152. err_port_mtu_set:
  1153. err_port_swid_set:
  1154. err_port_system_port_mapping_set:
  1155. port_not_usable:
  1156. err_port_module_check:
  1157. err_dev_addr_init:
  1158. free_percpu(mlxsw_sp_port->pcpu_stats);
  1159. err_alloc_stats:
  1160. free_netdev(dev);
  1161. return err;
  1162. }
  1163. static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp)
  1164. {
  1165. u16 vfid;
  1166. for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID)
  1167. mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
  1168. }
  1169. static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
  1170. {
  1171. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
  1172. if (!mlxsw_sp_port)
  1173. return;
  1174. mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
  1175. unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
  1176. mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
  1177. free_percpu(mlxsw_sp_port->pcpu_stats);
  1178. free_netdev(mlxsw_sp_port->dev);
  1179. }
  1180. static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
  1181. {
  1182. int i;
  1183. for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
  1184. mlxsw_sp_port_remove(mlxsw_sp, i);
  1185. kfree(mlxsw_sp->ports);
  1186. }
  1187. static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
  1188. {
  1189. size_t alloc_size;
  1190. int i;
  1191. int err;
  1192. alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
  1193. mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
  1194. if (!mlxsw_sp->ports)
  1195. return -ENOMEM;
  1196. for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
  1197. err = mlxsw_sp_port_create(mlxsw_sp, i);
  1198. if (err)
  1199. goto err_port_create;
  1200. }
  1201. return 0;
  1202. err_port_create:
  1203. for (i--; i >= 1; i--)
  1204. mlxsw_sp_port_remove(mlxsw_sp, i);
  1205. kfree(mlxsw_sp->ports);
  1206. return err;
  1207. }
  1208. static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
  1209. char *pude_pl, void *priv)
  1210. {
  1211. struct mlxsw_sp *mlxsw_sp = priv;
  1212. struct mlxsw_sp_port *mlxsw_sp_port;
  1213. enum mlxsw_reg_pude_oper_status status;
  1214. u8 local_port;
  1215. local_port = mlxsw_reg_pude_local_port_get(pude_pl);
  1216. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  1217. if (!mlxsw_sp_port) {
  1218. dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
  1219. local_port);
  1220. return;
  1221. }
  1222. status = mlxsw_reg_pude_oper_status_get(pude_pl);
  1223. if (status == MLXSW_PORT_OPER_STATUS_UP) {
  1224. netdev_info(mlxsw_sp_port->dev, "link up\n");
  1225. netif_carrier_on(mlxsw_sp_port->dev);
  1226. } else {
  1227. netdev_info(mlxsw_sp_port->dev, "link down\n");
  1228. netif_carrier_off(mlxsw_sp_port->dev);
  1229. }
  1230. }
  1231. static struct mlxsw_event_listener mlxsw_sp_pude_event = {
  1232. .func = mlxsw_sp_pude_event_func,
  1233. .trap_id = MLXSW_TRAP_ID_PUDE,
  1234. };
  1235. static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
  1236. enum mlxsw_event_trap_id trap_id)
  1237. {
  1238. struct mlxsw_event_listener *el;
  1239. char hpkt_pl[MLXSW_REG_HPKT_LEN];
  1240. int err;
  1241. switch (trap_id) {
  1242. case MLXSW_TRAP_ID_PUDE:
  1243. el = &mlxsw_sp_pude_event;
  1244. break;
  1245. }
  1246. err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
  1247. if (err)
  1248. return err;
  1249. mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
  1250. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
  1251. if (err)
  1252. goto err_event_trap_set;
  1253. return 0;
  1254. err_event_trap_set:
  1255. mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
  1256. return err;
  1257. }
  1258. static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
  1259. enum mlxsw_event_trap_id trap_id)
  1260. {
  1261. struct mlxsw_event_listener *el;
  1262. switch (trap_id) {
  1263. case MLXSW_TRAP_ID_PUDE:
  1264. el = &mlxsw_sp_pude_event;
  1265. break;
  1266. }
  1267. mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
  1268. }
  1269. static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
  1270. void *priv)
  1271. {
  1272. struct mlxsw_sp *mlxsw_sp = priv;
  1273. struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
  1274. struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
  1275. if (unlikely(!mlxsw_sp_port)) {
  1276. dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
  1277. local_port);
  1278. return;
  1279. }
  1280. skb->dev = mlxsw_sp_port->dev;
  1281. pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
  1282. u64_stats_update_begin(&pcpu_stats->syncp);
  1283. pcpu_stats->rx_packets++;
  1284. pcpu_stats->rx_bytes += skb->len;
  1285. u64_stats_update_end(&pcpu_stats->syncp);
  1286. skb->protocol = eth_type_trans(skb, skb->dev);
  1287. netif_receive_skb(skb);
  1288. }
  1289. static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
  1290. {
  1291. .func = mlxsw_sp_rx_listener_func,
  1292. .local_port = MLXSW_PORT_DONT_CARE,
  1293. .trap_id = MLXSW_TRAP_ID_FDB_MC,
  1294. },
  1295. /* Traps for specific L2 packet types, not trapped as FDB MC */
  1296. {
  1297. .func = mlxsw_sp_rx_listener_func,
  1298. .local_port = MLXSW_PORT_DONT_CARE,
  1299. .trap_id = MLXSW_TRAP_ID_STP,
  1300. },
  1301. {
  1302. .func = mlxsw_sp_rx_listener_func,
  1303. .local_port = MLXSW_PORT_DONT_CARE,
  1304. .trap_id = MLXSW_TRAP_ID_LACP,
  1305. },
  1306. {
  1307. .func = mlxsw_sp_rx_listener_func,
  1308. .local_port = MLXSW_PORT_DONT_CARE,
  1309. .trap_id = MLXSW_TRAP_ID_EAPOL,
  1310. },
  1311. {
  1312. .func = mlxsw_sp_rx_listener_func,
  1313. .local_port = MLXSW_PORT_DONT_CARE,
  1314. .trap_id = MLXSW_TRAP_ID_LLDP,
  1315. },
  1316. {
  1317. .func = mlxsw_sp_rx_listener_func,
  1318. .local_port = MLXSW_PORT_DONT_CARE,
  1319. .trap_id = MLXSW_TRAP_ID_MMRP,
  1320. },
  1321. {
  1322. .func = mlxsw_sp_rx_listener_func,
  1323. .local_port = MLXSW_PORT_DONT_CARE,
  1324. .trap_id = MLXSW_TRAP_ID_MVRP,
  1325. },
  1326. {
  1327. .func = mlxsw_sp_rx_listener_func,
  1328. .local_port = MLXSW_PORT_DONT_CARE,
  1329. .trap_id = MLXSW_TRAP_ID_RPVST,
  1330. },
  1331. {
  1332. .func = mlxsw_sp_rx_listener_func,
  1333. .local_port = MLXSW_PORT_DONT_CARE,
  1334. .trap_id = MLXSW_TRAP_ID_DHCP,
  1335. },
  1336. {
  1337. .func = mlxsw_sp_rx_listener_func,
  1338. .local_port = MLXSW_PORT_DONT_CARE,
  1339. .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
  1340. },
  1341. {
  1342. .func = mlxsw_sp_rx_listener_func,
  1343. .local_port = MLXSW_PORT_DONT_CARE,
  1344. .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
  1345. },
  1346. {
  1347. .func = mlxsw_sp_rx_listener_func,
  1348. .local_port = MLXSW_PORT_DONT_CARE,
  1349. .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
  1350. },
  1351. {
  1352. .func = mlxsw_sp_rx_listener_func,
  1353. .local_port = MLXSW_PORT_DONT_CARE,
  1354. .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
  1355. },
  1356. {
  1357. .func = mlxsw_sp_rx_listener_func,
  1358. .local_port = MLXSW_PORT_DONT_CARE,
  1359. .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
  1360. },
  1361. };
  1362. static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
  1363. {
  1364. char htgt_pl[MLXSW_REG_HTGT_LEN];
  1365. char hpkt_pl[MLXSW_REG_HPKT_LEN];
  1366. int i;
  1367. int err;
  1368. mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
  1369. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
  1370. if (err)
  1371. return err;
  1372. mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
  1373. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
  1374. if (err)
  1375. return err;
  1376. for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
  1377. err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
  1378. &mlxsw_sp_rx_listener[i],
  1379. mlxsw_sp);
  1380. if (err)
  1381. goto err_rx_listener_register;
  1382. mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
  1383. mlxsw_sp_rx_listener[i].trap_id);
  1384. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
  1385. if (err)
  1386. goto err_rx_trap_set;
  1387. }
  1388. return 0;
  1389. err_rx_trap_set:
  1390. mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
  1391. &mlxsw_sp_rx_listener[i],
  1392. mlxsw_sp);
  1393. err_rx_listener_register:
  1394. for (i--; i >= 0; i--) {
  1395. mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
  1396. mlxsw_sp_rx_listener[i].trap_id);
  1397. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
  1398. mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
  1399. &mlxsw_sp_rx_listener[i],
  1400. mlxsw_sp);
  1401. }
  1402. return err;
  1403. }
  1404. static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
  1405. {
  1406. char hpkt_pl[MLXSW_REG_HPKT_LEN];
  1407. int i;
  1408. for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
  1409. mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
  1410. mlxsw_sp_rx_listener[i].trap_id);
  1411. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
  1412. mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
  1413. &mlxsw_sp_rx_listener[i],
  1414. mlxsw_sp);
  1415. }
  1416. }
  1417. static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
  1418. enum mlxsw_reg_sfgc_type type,
  1419. enum mlxsw_reg_sfgc_bridge_type bridge_type)
  1420. {
  1421. enum mlxsw_flood_table_type table_type;
  1422. enum mlxsw_sp_flood_table flood_table;
  1423. char sfgc_pl[MLXSW_REG_SFGC_LEN];
  1424. if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) {
  1425. table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
  1426. flood_table = 0;
  1427. } else {
  1428. table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
  1429. if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
  1430. flood_table = MLXSW_SP_FLOOD_TABLE_UC;
  1431. else
  1432. flood_table = MLXSW_SP_FLOOD_TABLE_BM;
  1433. }
  1434. mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
  1435. flood_table);
  1436. return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
  1437. }
  1438. static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
  1439. {
  1440. int type, err;
  1441. /* For non-offloaded netdevs, flood all traffic types to CPU
  1442. * port.
  1443. */
  1444. for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
  1445. if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
  1446. continue;
  1447. err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
  1448. MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
  1449. if (err)
  1450. return err;
  1451. }
  1452. /* For bridged ports, use one flooding table for unknown unicast
  1453. * traffic and a second table for unregistered multicast and
  1454. * broadcast.
  1455. */
  1456. for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
  1457. if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
  1458. continue;
  1459. err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
  1460. MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
  1461. if (err)
  1462. return err;
  1463. }
  1464. return 0;
  1465. }
  1466. static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
  1467. const struct mlxsw_bus_info *mlxsw_bus_info)
  1468. {
  1469. struct mlxsw_sp *mlxsw_sp = priv;
  1470. int err;
  1471. mlxsw_sp->core = mlxsw_core;
  1472. mlxsw_sp->bus_info = mlxsw_bus_info;
  1473. err = mlxsw_sp_base_mac_get(mlxsw_sp);
  1474. if (err) {
  1475. dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
  1476. return err;
  1477. }
  1478. err = mlxsw_sp_ports_create(mlxsw_sp);
  1479. if (err) {
  1480. dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
  1481. goto err_ports_create;
  1482. }
  1483. err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
  1484. if (err) {
  1485. dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
  1486. goto err_event_register;
  1487. }
  1488. err = mlxsw_sp_traps_init(mlxsw_sp);
  1489. if (err) {
  1490. dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
  1491. goto err_rx_listener_register;
  1492. }
  1493. err = mlxsw_sp_flood_init(mlxsw_sp);
  1494. if (err) {
  1495. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
  1496. goto err_flood_init;
  1497. }
  1498. err = mlxsw_sp_buffers_init(mlxsw_sp);
  1499. if (err) {
  1500. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
  1501. goto err_buffers_init;
  1502. }
  1503. err = mlxsw_sp_switchdev_init(mlxsw_sp);
  1504. if (err) {
  1505. dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
  1506. goto err_switchdev_init;
  1507. }
  1508. return 0;
  1509. err_switchdev_init:
  1510. err_buffers_init:
  1511. err_flood_init:
  1512. mlxsw_sp_traps_fini(mlxsw_sp);
  1513. err_rx_listener_register:
  1514. mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
  1515. err_event_register:
  1516. mlxsw_sp_ports_remove(mlxsw_sp);
  1517. err_ports_create:
  1518. mlxsw_sp_vfids_fini(mlxsw_sp);
  1519. return err;
  1520. }
  1521. static void mlxsw_sp_fini(void *priv)
  1522. {
  1523. struct mlxsw_sp *mlxsw_sp = priv;
  1524. mlxsw_sp_switchdev_fini(mlxsw_sp);
  1525. mlxsw_sp_traps_fini(mlxsw_sp);
  1526. mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
  1527. mlxsw_sp_ports_remove(mlxsw_sp);
  1528. mlxsw_sp_vfids_fini(mlxsw_sp);
  1529. }
  1530. static struct mlxsw_config_profile mlxsw_sp_config_profile = {
  1531. .used_max_vepa_channels = 1,
  1532. .max_vepa_channels = 0,
  1533. .used_max_lag = 1,
  1534. .max_lag = 64,
  1535. .used_max_port_per_lag = 1,
  1536. .max_port_per_lag = 16,
  1537. .used_max_mid = 1,
  1538. .max_mid = 7000,
  1539. .used_max_pgt = 1,
  1540. .max_pgt = 0,
  1541. .used_max_system_port = 1,
  1542. .max_system_port = 64,
  1543. .used_max_vlan_groups = 1,
  1544. .max_vlan_groups = 127,
  1545. .used_max_regions = 1,
  1546. .max_regions = 400,
  1547. .used_flood_tables = 1,
  1548. .used_flood_mode = 1,
  1549. .flood_mode = 3,
  1550. .max_fid_offset_flood_tables = 2,
  1551. .fid_offset_flood_table_size = VLAN_N_VID - 1,
  1552. .max_fid_flood_tables = 1,
  1553. .fid_flood_table_size = VLAN_N_VID,
  1554. .used_max_ib_mc = 1,
  1555. .max_ib_mc = 0,
  1556. .used_max_pkey = 1,
  1557. .max_pkey = 0,
  1558. .swid_config = {
  1559. {
  1560. .used_type = 1,
  1561. .type = MLXSW_PORT_SWID_TYPE_ETH,
  1562. }
  1563. },
  1564. };
  1565. static struct mlxsw_driver mlxsw_sp_driver = {
  1566. .kind = MLXSW_DEVICE_KIND_SPECTRUM,
  1567. .owner = THIS_MODULE,
  1568. .priv_size = sizeof(struct mlxsw_sp),
  1569. .init = mlxsw_sp_init,
  1570. .fini = mlxsw_sp_fini,
  1571. .txhdr_construct = mlxsw_sp_txhdr_construct,
  1572. .txhdr_len = MLXSW_TXHDR_LEN,
  1573. .profile = &mlxsw_sp_config_profile,
  1574. };
  1575. static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
  1576. {
  1577. return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
  1578. }
  1579. static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
  1580. {
  1581. struct net_device *dev = mlxsw_sp_port->dev;
  1582. int err;
  1583. /* When port is not bridged untagged packets are tagged with
  1584. * PVID=VID=1, thereby creating an implicit VLAN interface in
  1585. * the device. Remove it and let bridge code take care of its
  1586. * own VLANs.
  1587. */
  1588. err = mlxsw_sp_port_kill_vid(dev, 0, 1);
  1589. if (err)
  1590. netdev_err(dev, "Failed to remove VID 1\n");
  1591. return err;
  1592. }
  1593. static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
  1594. {
  1595. struct net_device *dev = mlxsw_sp_port->dev;
  1596. int err;
  1597. /* Add implicit VLAN interface in the device, so that untagged
  1598. * packets will be classified to the default vFID.
  1599. */
  1600. err = mlxsw_sp_port_add_vid(dev, 0, 1);
  1601. if (err)
  1602. netdev_err(dev, "Failed to add VID 1\n");
  1603. return err;
  1604. }
  1605. static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
  1606. struct net_device *br_dev)
  1607. {
  1608. return !mlxsw_sp->master_bridge.dev ||
  1609. mlxsw_sp->master_bridge.dev == br_dev;
  1610. }
  1611. static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
  1612. struct net_device *br_dev)
  1613. {
  1614. mlxsw_sp->master_bridge.dev = br_dev;
  1615. mlxsw_sp->master_bridge.ref_count++;
  1616. }
  1617. static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
  1618. struct net_device *br_dev)
  1619. {
  1620. if (--mlxsw_sp->master_bridge.ref_count == 0)
  1621. mlxsw_sp->master_bridge.dev = NULL;
  1622. }
  1623. static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
  1624. unsigned long event, void *ptr)
  1625. {
  1626. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  1627. struct netdev_notifier_changeupper_info *info;
  1628. struct mlxsw_sp_port *mlxsw_sp_port;
  1629. struct net_device *upper_dev;
  1630. struct mlxsw_sp *mlxsw_sp;
  1631. int err;
  1632. if (!mlxsw_sp_port_dev_check(dev))
  1633. return NOTIFY_DONE;
  1634. mlxsw_sp_port = netdev_priv(dev);
  1635. mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  1636. info = ptr;
  1637. switch (event) {
  1638. case NETDEV_PRECHANGEUPPER:
  1639. upper_dev = info->upper_dev;
  1640. /* HW limitation forbids to put ports to multiple bridges. */
  1641. if (info->master && info->linking &&
  1642. netif_is_bridge_master(upper_dev) &&
  1643. !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
  1644. return NOTIFY_BAD;
  1645. break;
  1646. case NETDEV_CHANGEUPPER:
  1647. upper_dev = info->upper_dev;
  1648. if (info->master &&
  1649. netif_is_bridge_master(upper_dev)) {
  1650. if (info->linking) {
  1651. err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
  1652. if (err)
  1653. netdev_err(dev, "Failed to join bridge\n");
  1654. mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
  1655. mlxsw_sp_port->bridged = 1;
  1656. } else {
  1657. err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
  1658. if (err)
  1659. netdev_err(dev, "Failed to leave bridge\n");
  1660. mlxsw_sp_port->bridged = 0;
  1661. mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
  1662. }
  1663. }
  1664. break;
  1665. }
  1666. return NOTIFY_DONE;
  1667. }
  1668. static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
  1669. .notifier_call = mlxsw_sp_netdevice_event,
  1670. };
  1671. static int __init mlxsw_sp_module_init(void)
  1672. {
  1673. int err;
  1674. register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
  1675. err = mlxsw_core_driver_register(&mlxsw_sp_driver);
  1676. if (err)
  1677. goto err_core_driver_register;
  1678. return 0;
  1679. err_core_driver_register:
  1680. unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
  1681. return err;
  1682. }
  1683. static void __exit mlxsw_sp_module_exit(void)
  1684. {
  1685. mlxsw_core_driver_unregister(&mlxsw_sp_driver);
  1686. unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
  1687. }
  1688. module_init(mlxsw_sp_module_init);
  1689. module_exit(mlxsw_sp_module_exit);
  1690. MODULE_LICENSE("Dual BSD/GPL");
  1691. MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
  1692. MODULE_DESCRIPTION("Mellanox Spectrum driver");
  1693. MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);