spectrum_switchdev.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911
  1. /*
  2. * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
  3. * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
  5. * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
  6. * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include <linux/kernel.h>
  37. #include <linux/types.h>
  38. #include <linux/netdevice.h>
  39. #include <linux/etherdevice.h>
  40. #include <linux/slab.h>
  41. #include <linux/device.h>
  42. #include <linux/skbuff.h>
  43. #include <linux/if_vlan.h>
  44. #include <linux/if_bridge.h>
  45. #include <linux/workqueue.h>
  46. #include <linux/jiffies.h>
  47. #include <linux/rtnetlink.h>
  48. #include <net/switchdev.h>
  49. #include "spectrum.h"
  50. #include "core.h"
  51. #include "reg.h"
  52. static int mlxsw_sp_port_attr_get(struct net_device *dev,
  53. struct switchdev_attr *attr)
  54. {
  55. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  56. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  57. switch (attr->id) {
  58. case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
  59. attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
  60. memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
  61. attr->u.ppid.id_len);
  62. break;
  63. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
  64. attr->u.brport_flags =
  65. (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
  66. (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
  67. (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
  68. break;
  69. default:
  70. return -EOPNOTSUPP;
  71. }
  72. return 0;
  73. }
  74. static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
  75. u8 state)
  76. {
  77. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  78. enum mlxsw_reg_spms_state spms_state;
  79. char *spms_pl;
  80. u16 vid;
  81. int err;
  82. switch (state) {
  83. case BR_STATE_DISABLED: /* fall-through */
  84. case BR_STATE_FORWARDING:
  85. spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
  86. break;
  87. case BR_STATE_LISTENING: /* fall-through */
  88. case BR_STATE_LEARNING:
  89. spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
  90. break;
  91. case BR_STATE_BLOCKING:
  92. spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
  93. break;
  94. default:
  95. BUG();
  96. }
  97. spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
  98. if (!spms_pl)
  99. return -ENOMEM;
  100. mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
  101. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
  102. mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
  103. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
  104. kfree(spms_pl);
  105. return err;
  106. }
  107. static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
  108. struct switchdev_trans *trans,
  109. u8 state)
  110. {
  111. if (switchdev_trans_ph_prepare(trans))
  112. return 0;
  113. mlxsw_sp_port->stp_state = state;
  114. return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
  115. }
  116. static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
  117. u16 fid_begin, u16 fid_end, bool set,
  118. bool only_uc)
  119. {
  120. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  121. u16 range = fid_end - fid_begin + 1;
  122. char *sftr_pl;
  123. int err;
  124. sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
  125. if (!sftr_pl)
  126. return -ENOMEM;
  127. mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin,
  128. MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
  129. mlxsw_sp_port->local_port, set);
  130. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
  131. if (err)
  132. goto buffer_out;
  133. /* Flooding control allows one to decide whether a given port will
  134. * flood unicast traffic for which there is no FDB entry.
  135. */
  136. if (only_uc)
  137. goto buffer_out;
  138. mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin,
  139. MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
  140. mlxsw_sp_port->local_port, set);
  141. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
  142. buffer_out:
  143. kfree(sftr_pl);
  144. return err;
  145. }
  146. static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
  147. bool set)
  148. {
  149. struct net_device *dev = mlxsw_sp_port->dev;
  150. u16 vid, last_visited_vid;
  151. int err;
  152. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
  153. err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
  154. true);
  155. if (err) {
  156. last_visited_vid = vid;
  157. goto err_port_flood_set;
  158. }
  159. }
  160. return 0;
  161. err_port_flood_set:
  162. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
  163. __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
  164. netdev_err(dev, "Failed to configure unicast flooding\n");
  165. return err;
  166. }
  167. static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
  168. struct switchdev_trans *trans,
  169. unsigned long brport_flags)
  170. {
  171. unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
  172. bool set;
  173. int err;
  174. if (switchdev_trans_ph_prepare(trans))
  175. return 0;
  176. if ((uc_flood ^ brport_flags) & BR_FLOOD) {
  177. set = mlxsw_sp_port->uc_flood ? false : true;
  178. err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
  179. if (err)
  180. return err;
  181. }
  182. mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
  183. mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
  184. mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
  185. return 0;
  186. }
  187. static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
  188. {
  189. char sfdat_pl[MLXSW_REG_SFDAT_LEN];
  190. int err;
  191. mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
  192. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
  193. if (err)
  194. return err;
  195. mlxsw_sp->ageing_time = ageing_time;
  196. return 0;
  197. }
  198. static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
  199. struct switchdev_trans *trans,
  200. unsigned long ageing_clock_t)
  201. {
  202. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  203. unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
  204. u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
  205. if (switchdev_trans_ph_prepare(trans)) {
  206. if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
  207. ageing_time > MLXSW_SP_MAX_AGEING_TIME)
  208. return -ERANGE;
  209. else
  210. return 0;
  211. }
  212. return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
  213. }
  214. static int mlxsw_sp_port_attr_set(struct net_device *dev,
  215. const struct switchdev_attr *attr,
  216. struct switchdev_trans *trans)
  217. {
  218. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  219. int err = 0;
  220. switch (attr->id) {
  221. case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
  222. err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
  223. attr->u.stp_state);
  224. break;
  225. case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
  226. err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
  227. attr->u.brport_flags);
  228. break;
  229. case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
  230. err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
  231. attr->u.ageing_time);
  232. break;
  233. default:
  234. err = -EOPNOTSUPP;
  235. break;
  236. }
  237. return err;
  238. }
  239. static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
  240. {
  241. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  242. char spvid_pl[MLXSW_REG_SPVID_LEN];
  243. mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
  244. return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
  245. }
  246. static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
  247. {
  248. char sfmr_pl[MLXSW_REG_SFMR_LEN];
  249. int err;
  250. mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
  251. err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
  252. if (err)
  253. return err;
  254. set_bit(fid, mlxsw_sp->active_fids);
  255. return 0;
  256. }
  257. static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
  258. {
  259. char sfmr_pl[MLXSW_REG_SFMR_LEN];
  260. clear_bit(fid, mlxsw_sp->active_fids);
  261. mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
  262. fid, fid);
  263. mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
  264. }
  265. static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
  266. {
  267. enum mlxsw_reg_svfa_mt mt;
  268. if (mlxsw_sp_port->nr_vfids)
  269. mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
  270. else
  271. mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
  272. return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
  273. }
  274. static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
  275. {
  276. enum mlxsw_reg_svfa_mt mt;
  277. if (!mlxsw_sp_port->nr_vfids)
  278. return 0;
  279. mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
  280. return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
  281. }
  282. static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
  283. u16 vid_end)
  284. {
  285. u16 vid;
  286. int err;
  287. for (vid = vid_begin; vid <= vid_end; vid++) {
  288. err = mlxsw_sp_port_add_vid(dev, 0, vid);
  289. if (err)
  290. goto err_port_add_vid;
  291. }
  292. return 0;
  293. err_port_add_vid:
  294. for (vid--; vid >= vid_begin; vid--)
  295. mlxsw_sp_port_kill_vid(dev, 0, vid);
  296. return err;
  297. }
  298. static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
  299. u16 vid_begin, u16 vid_end,
  300. bool flag_untagged, bool flag_pvid)
  301. {
  302. struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  303. struct net_device *dev = mlxsw_sp_port->dev;
  304. enum mlxsw_reg_svfa_mt mt;
  305. u16 vid, vid_e;
  306. int err;
  307. /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
  308. * not bridged, then packets ingressing through the port with
  309. * the specified VIDs will be directed to CPU.
  310. */
  311. if (!mlxsw_sp_port->bridged)
  312. return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
  313. for (vid = vid_begin; vid <= vid_end; vid++) {
  314. if (!test_bit(vid, mlxsw_sp->active_fids)) {
  315. err = mlxsw_sp_fid_create(mlxsw_sp, vid);
  316. if (err) {
  317. netdev_err(dev, "Failed to create FID=%d\n",
  318. vid);
  319. return err;
  320. }
  321. /* When creating a FID, we set a VID to FID mapping
  322. * regardless of the port's mode.
  323. */
  324. mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
  325. err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
  326. true, vid, vid);
  327. if (err) {
  328. netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
  329. vid);
  330. return err;
  331. }
  332. }
  333. /* Set FID mapping according to port's mode */
  334. err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
  335. if (err) {
  336. netdev_err(dev, "Failed to map FID=%d", vid);
  337. return err;
  338. }
  339. }
  340. err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
  341. true, false);
  342. if (err) {
  343. netdev_err(dev, "Failed to configure flooding\n");
  344. return err;
  345. }
  346. for (vid = vid_begin; vid <= vid_end;
  347. vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
  348. vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
  349. vid_end);
  350. err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, true,
  351. flag_untagged);
  352. if (err) {
  353. netdev_err(mlxsw_sp_port->dev, "Unable to add VIDs %d-%d\n",
  354. vid, vid_e);
  355. return err;
  356. }
  357. }
  358. vid = vid_begin;
  359. if (flag_pvid && mlxsw_sp_port->pvid != vid) {
  360. err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
  361. if (err) {
  362. netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n",
  363. vid);
  364. return err;
  365. }
  366. mlxsw_sp_port->pvid = vid;
  367. }
  368. /* Changing activity bits only if HW operation succeded */
  369. for (vid = vid_begin; vid <= vid_end; vid++)
  370. set_bit(vid, mlxsw_sp_port->active_vlans);
  371. return mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
  372. mlxsw_sp_port->stp_state);
  373. }
  374. static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
  375. const struct switchdev_obj_port_vlan *vlan,
  376. struct switchdev_trans *trans)
  377. {
  378. bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  379. bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  380. if (switchdev_trans_ph_prepare(trans))
  381. return 0;
  382. return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
  383. vlan->vid_begin, vlan->vid_end,
  384. untagged_flag, pvid_flag);
  385. }
  386. static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port,
  387. const char *mac, u16 vid, bool adding,
  388. bool dynamic)
  389. {
  390. enum mlxsw_reg_sfd_rec_policy policy;
  391. enum mlxsw_reg_sfd_op op;
  392. char *sfd_pl;
  393. int err;
  394. if (!vid)
  395. vid = mlxsw_sp_port->pvid;
  396. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  397. if (!sfd_pl)
  398. return -ENOMEM;
  399. policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
  400. MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
  401. op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
  402. MLXSW_REG_SFD_OP_WRITE_REMOVE;
  403. mlxsw_reg_sfd_pack(sfd_pl, op, 0);
  404. mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy,
  405. mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
  406. mlxsw_sp_port->local_port);
  407. err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd),
  408. sfd_pl);
  409. kfree(sfd_pl);
  410. return err;
  411. }
  412. static int
  413. mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
  414. const struct switchdev_obj_port_fdb *fdb,
  415. struct switchdev_trans *trans)
  416. {
  417. if (switchdev_trans_ph_prepare(trans))
  418. return 0;
  419. return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
  420. true, false);
  421. }
  422. static int mlxsw_sp_port_obj_add(struct net_device *dev,
  423. const struct switchdev_obj *obj,
  424. struct switchdev_trans *trans)
  425. {
  426. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  427. int err = 0;
  428. switch (obj->id) {
  429. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  430. err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
  431. SWITCHDEV_OBJ_PORT_VLAN(obj),
  432. trans);
  433. break;
  434. case SWITCHDEV_OBJ_ID_PORT_FDB:
  435. err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
  436. SWITCHDEV_OBJ_PORT_FDB(obj),
  437. trans);
  438. break;
  439. default:
  440. err = -EOPNOTSUPP;
  441. break;
  442. }
  443. return err;
  444. }
  445. static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
  446. u16 vid_end)
  447. {
  448. u16 vid;
  449. int err;
  450. for (vid = vid_begin; vid <= vid_end; vid++) {
  451. err = mlxsw_sp_port_kill_vid(dev, 0, vid);
  452. if (err)
  453. return err;
  454. }
  455. return 0;
  456. }
  457. static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
  458. u16 vid_begin, u16 vid_end, bool init)
  459. {
  460. struct net_device *dev = mlxsw_sp_port->dev;
  461. u16 vid, vid_e;
  462. int err;
  463. /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
  464. * not bridged, then prevent packets ingressing through the
  465. * port with the specified VIDs from being trapped to CPU.
  466. */
  467. if (!init && !mlxsw_sp_port->bridged)
  468. return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
  469. for (vid = vid_begin; vid <= vid_end;
  470. vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
  471. vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
  472. vid_end);
  473. err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, false,
  474. false);
  475. if (err) {
  476. netdev_err(mlxsw_sp_port->dev, "Unable to del VIDs %d-%d\n",
  477. vid, vid_e);
  478. return err;
  479. }
  480. }
  481. if ((mlxsw_sp_port->pvid >= vid_begin) &&
  482. (mlxsw_sp_port->pvid <= vid_end)) {
  483. /* Default VLAN is always 1 */
  484. mlxsw_sp_port->pvid = 1;
  485. err = mlxsw_sp_port_pvid_set(mlxsw_sp_port,
  486. mlxsw_sp_port->pvid);
  487. if (err) {
  488. netdev_err(mlxsw_sp_port->dev, "Unable to del PVID %d\n",
  489. vid);
  490. return err;
  491. }
  492. }
  493. if (init)
  494. goto out;
  495. err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
  496. false, false);
  497. if (err) {
  498. netdev_err(dev, "Failed to clear flooding\n");
  499. return err;
  500. }
  501. for (vid = vid_begin; vid <= vid_end; vid++) {
  502. /* Remove FID mapping in case of Virtual mode */
  503. err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
  504. if (err) {
  505. netdev_err(dev, "Failed to unmap FID=%d", vid);
  506. return err;
  507. }
  508. }
  509. out:
  510. /* Changing activity bits only if HW operation succeded */
  511. for (vid = vid_begin; vid <= vid_end; vid++)
  512. clear_bit(vid, mlxsw_sp_port->active_vlans);
  513. return 0;
  514. }
  515. static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
  516. const struct switchdev_obj_port_vlan *vlan)
  517. {
  518. return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
  519. vlan->vid_begin, vlan->vid_end, false);
  520. }
  521. static int
  522. mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
  523. const struct switchdev_obj_port_fdb *fdb)
  524. {
  525. return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
  526. false, false);
  527. }
  528. static int mlxsw_sp_port_obj_del(struct net_device *dev,
  529. const struct switchdev_obj *obj)
  530. {
  531. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  532. int err = 0;
  533. switch (obj->id) {
  534. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  535. err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
  536. SWITCHDEV_OBJ_PORT_VLAN(obj));
  537. break;
  538. case SWITCHDEV_OBJ_ID_PORT_FDB:
  539. err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
  540. SWITCHDEV_OBJ_PORT_FDB(obj));
  541. break;
  542. default:
  543. err = -EOPNOTSUPP;
  544. break;
  545. }
  546. return err;
  547. }
  548. static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
  549. struct switchdev_obj_port_fdb *fdb,
  550. switchdev_obj_dump_cb_t *cb)
  551. {
  552. char *sfd_pl;
  553. char mac[ETH_ALEN];
  554. u16 vid;
  555. u8 local_port;
  556. u8 num_rec;
  557. int stored_err = 0;
  558. int i;
  559. int err;
  560. sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
  561. if (!sfd_pl)
  562. return -ENOMEM;
  563. mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
  564. do {
  565. mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
  566. err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core,
  567. MLXSW_REG(sfd), sfd_pl);
  568. if (err)
  569. goto out;
  570. num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  571. /* Even in case of error, we have to run the dump to the end
  572. * so the session in firmware is finished.
  573. */
  574. if (stored_err)
  575. continue;
  576. for (i = 0; i < num_rec; i++) {
  577. switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
  578. case MLXSW_REG_SFD_REC_TYPE_UNICAST:
  579. mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid,
  580. &local_port);
  581. if (local_port == mlxsw_sp_port->local_port) {
  582. ether_addr_copy(fdb->addr, mac);
  583. fdb->ndm_state = NUD_REACHABLE;
  584. fdb->vid = vid;
  585. err = cb(&fdb->obj);
  586. if (err)
  587. stored_err = err;
  588. }
  589. }
  590. }
  591. } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
  592. out:
  593. kfree(sfd_pl);
  594. return stored_err ? stored_err : err;
  595. }
  596. static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
  597. struct switchdev_obj_port_vlan *vlan,
  598. switchdev_obj_dump_cb_t *cb)
  599. {
  600. u16 vid;
  601. int err = 0;
  602. for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
  603. vlan->flags = 0;
  604. if (vid == mlxsw_sp_port->pvid)
  605. vlan->flags |= BRIDGE_VLAN_INFO_PVID;
  606. vlan->vid_begin = vid;
  607. vlan->vid_end = vid;
  608. err = cb(&vlan->obj);
  609. if (err)
  610. break;
  611. }
  612. return err;
  613. }
  614. static int mlxsw_sp_port_obj_dump(struct net_device *dev,
  615. struct switchdev_obj *obj,
  616. switchdev_obj_dump_cb_t *cb)
  617. {
  618. struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
  619. int err = 0;
  620. switch (obj->id) {
  621. case SWITCHDEV_OBJ_ID_PORT_VLAN:
  622. err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
  623. SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
  624. break;
  625. case SWITCHDEV_OBJ_ID_PORT_FDB:
  626. err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
  627. SWITCHDEV_OBJ_PORT_FDB(obj), cb);
  628. break;
  629. default:
  630. err = -EOPNOTSUPP;
  631. break;
  632. }
  633. return err;
  634. }
  635. static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
  636. .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
  637. .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
  638. .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
  639. .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
  640. .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
  641. };
  642. static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
  643. char *sfn_pl, int rec_index,
  644. bool adding)
  645. {
  646. struct mlxsw_sp_port *mlxsw_sp_port;
  647. char mac[ETH_ALEN];
  648. u8 local_port;
  649. u16 vid;
  650. int err;
  651. mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port);
  652. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  653. if (!mlxsw_sp_port) {
  654. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
  655. return;
  656. }
  657. err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid,
  658. adding && mlxsw_sp_port->learning, true);
  659. if (err) {
  660. if (net_ratelimit())
  661. netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
  662. return;
  663. }
  664. if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) {
  665. struct switchdev_notifier_fdb_info info;
  666. unsigned long notifier_type;
  667. info.addr = mac;
  668. info.vid = vid;
  669. notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
  670. call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev,
  671. &info.info);
  672. }
  673. }
  674. static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
  675. char *sfn_pl, int rec_index)
  676. {
  677. switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
  678. case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
  679. mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
  680. rec_index, true);
  681. break;
  682. case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
  683. mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
  684. rec_index, false);
  685. break;
  686. }
  687. }
  688. static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
  689. {
  690. schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
  691. msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
  692. }
  693. static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
  694. {
  695. struct mlxsw_sp *mlxsw_sp;
  696. char *sfn_pl;
  697. u8 num_rec;
  698. int i;
  699. int err;
  700. sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
  701. if (!sfn_pl)
  702. return;
  703. mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
  704. rtnl_lock();
  705. do {
  706. mlxsw_reg_sfn_pack(sfn_pl);
  707. err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
  708. if (err) {
  709. dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
  710. break;
  711. }
  712. num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
  713. for (i = 0; i < num_rec; i++)
  714. mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
  715. } while (num_rec);
  716. rtnl_unlock();
  717. kfree(sfn_pl);
  718. mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
  719. }
  720. static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
  721. {
  722. int err;
  723. err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
  724. if (err) {
  725. dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
  726. return err;
  727. }
  728. INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
  729. mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
  730. mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
  731. return 0;
  732. }
  733. static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
  734. {
  735. cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
  736. }
  737. static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
  738. {
  739. u16 fid;
  740. for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
  741. mlxsw_sp_fid_destroy(mlxsw_sp, fid);
  742. }
  743. int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
  744. {
  745. return mlxsw_sp_fdb_init(mlxsw_sp);
  746. }
  747. void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
  748. {
  749. mlxsw_sp_fdb_fini(mlxsw_sp);
  750. mlxsw_sp_fids_fini(mlxsw_sp);
  751. }
  752. int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
  753. {
  754. struct net_device *dev = mlxsw_sp_port->dev;
  755. int err;
  756. /* Allow only untagged packets to ingress and tag them internally
  757. * with VID 1.
  758. */
  759. mlxsw_sp_port->pvid = 1;
  760. err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true);
  761. if (err) {
  762. netdev_err(dev, "Unable to init VLANs\n");
  763. return err;
  764. }
  765. /* Add implicit VLAN interface in the device, so that untagged
  766. * packets will be classified to the default vFID.
  767. */
  768. err = mlxsw_sp_port_add_vid(dev, 0, 1);
  769. if (err)
  770. netdev_err(dev, "Failed to configure default vFID\n");
  771. return err;
  772. }
  773. void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
  774. {
  775. mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
  776. }
  777. void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
  778. {
  779. }