br_vlan.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984
  1. #include <linux/kernel.h>
  2. #include <linux/netdevice.h>
  3. #include <linux/rtnetlink.h>
  4. #include <linux/slab.h>
  5. #include <net/switchdev.h>
  6. #include "br_private.h"
  7. static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  8. const void *ptr)
  9. {
  10. const struct net_bridge_vlan *vle = ptr;
  11. u16 vid = *(u16 *)arg->key;
  12. return vle->vid != vid;
  13. }
  14. static const struct rhashtable_params br_vlan_rht_params = {
  15. .head_offset = offsetof(struct net_bridge_vlan, vnode),
  16. .key_offset = offsetof(struct net_bridge_vlan, vid),
  17. .key_len = sizeof(u16),
  18. .nelem_hint = 3,
  19. .locks_mul = 1,
  20. .max_size = VLAN_N_VID,
  21. .obj_cmpfn = br_vlan_cmp,
  22. .automatic_shrinking = true,
  23. };
  24. static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  25. {
  26. return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  27. }
  28. static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  29. {
  30. if (vg->pvid == vid)
  31. return;
  32. smp_wmb();
  33. vg->pvid = vid;
  34. }
  35. static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  36. {
  37. if (vg->pvid != vid)
  38. return;
  39. smp_wmb();
  40. vg->pvid = 0;
  41. }
  42. static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
  43. {
  44. struct net_bridge_vlan_group *vg;
  45. if (br_vlan_is_master(v))
  46. vg = br_vlan_group(v->br);
  47. else
  48. vg = nbp_vlan_group(v->port);
  49. if (flags & BRIDGE_VLAN_INFO_PVID)
  50. __vlan_add_pvid(vg, v->vid);
  51. else
  52. __vlan_delete_pvid(vg, v->vid);
  53. if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  54. v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  55. else
  56. v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  57. }
  58. static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  59. u16 vid, u16 flags)
  60. {
  61. struct switchdev_obj_port_vlan v = {
  62. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  63. .flags = flags,
  64. .vid_begin = vid,
  65. .vid_end = vid,
  66. };
  67. int err;
  68. /* Try switchdev op first. In case it is not supported, fallback to
  69. * 8021q add.
  70. */
  71. err = switchdev_port_obj_add(dev, &v.obj);
  72. if (err == -EOPNOTSUPP)
  73. return vlan_vid_add(dev, br->vlan_proto, vid);
  74. return err;
  75. }
  76. static void __vlan_add_list(struct net_bridge_vlan *v)
  77. {
  78. struct net_bridge_vlan_group *vg;
  79. struct list_head *headp, *hpos;
  80. struct net_bridge_vlan *vent;
  81. if (br_vlan_is_master(v))
  82. vg = br_vlan_group(v->br);
  83. else
  84. vg = nbp_vlan_group(v->port);
  85. headp = &vg->vlan_list;
  86. list_for_each_prev(hpos, headp) {
  87. vent = list_entry(hpos, struct net_bridge_vlan, vlist);
  88. if (v->vid < vent->vid)
  89. continue;
  90. else
  91. break;
  92. }
  93. list_add_rcu(&v->vlist, hpos);
  94. }
  95. static void __vlan_del_list(struct net_bridge_vlan *v)
  96. {
  97. list_del_rcu(&v->vlist);
  98. }
  99. static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
  100. u16 vid)
  101. {
  102. struct switchdev_obj_port_vlan v = {
  103. .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  104. .vid_begin = vid,
  105. .vid_end = vid,
  106. };
  107. int err;
  108. /* Try switchdev op first. In case it is not supported, fallback to
  109. * 8021q del.
  110. */
  111. err = switchdev_port_obj_del(dev, &v.obj);
  112. if (err == -EOPNOTSUPP) {
  113. vlan_vid_del(dev, br->vlan_proto, vid);
  114. return 0;
  115. }
  116. return err;
  117. }
  118. /* Returns a master vlan, if it didn't exist it gets created. In all cases a
  119. * a reference is taken to the master vlan before returning.
  120. */
  121. static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
  122. {
  123. struct net_bridge_vlan_group *vg;
  124. struct net_bridge_vlan *masterv;
  125. vg = br_vlan_group(br);
  126. masterv = br_vlan_find(vg, vid);
  127. if (!masterv) {
  128. /* missing global ctx, create it now */
  129. if (br_vlan_add(br, vid, 0))
  130. return NULL;
  131. masterv = br_vlan_find(vg, vid);
  132. if (WARN_ON(!masterv))
  133. return NULL;
  134. }
  135. atomic_inc(&masterv->refcnt);
  136. return masterv;
  137. }
  138. static void br_vlan_put_master(struct net_bridge_vlan *masterv)
  139. {
  140. struct net_bridge_vlan_group *vg;
  141. if (!br_vlan_is_master(masterv))
  142. return;
  143. vg = br_vlan_group(masterv->br);
  144. if (atomic_dec_and_test(&masterv->refcnt)) {
  145. rhashtable_remove_fast(&vg->vlan_hash,
  146. &masterv->vnode, br_vlan_rht_params);
  147. __vlan_del_list(masterv);
  148. kfree_rcu(masterv, rcu);
  149. }
  150. }
  151. /* This is the shared VLAN add function which works for both ports and bridge
  152. * devices. There are four possible calls to this function in terms of the
  153. * vlan entry type:
  154. * 1. vlan is being added on a port (no master flags, global entry exists)
  155. * 2. vlan is being added on a bridge (both master and brentry flags)
  156. * 3. vlan is being added on a port, but a global entry didn't exist which
  157. * is being created right now (master flag set, brentry flag unset), the
  158. * global entry is used for global per-vlan features, but not for filtering
  159. * 4. same as 3 but with both master and brentry flags set so the entry
  160. * will be used for filtering in both the port and the bridge
  161. */
  162. static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
  163. {
  164. struct net_bridge_vlan *masterv = NULL;
  165. struct net_bridge_port *p = NULL;
  166. struct net_bridge_vlan_group *vg;
  167. struct net_device *dev;
  168. struct net_bridge *br;
  169. int err;
  170. if (br_vlan_is_master(v)) {
  171. br = v->br;
  172. dev = br->dev;
  173. vg = br_vlan_group(br);
  174. } else {
  175. p = v->port;
  176. br = p->br;
  177. dev = p->dev;
  178. vg = nbp_vlan_group(p);
  179. }
  180. if (p) {
  181. /* Add VLAN to the device filter if it is supported.
  182. * This ensures tagged traffic enters the bridge when
  183. * promiscuous mode is disabled by br_manage_promisc().
  184. */
  185. err = __vlan_vid_add(dev, br, v->vid, flags);
  186. if (err)
  187. goto out;
  188. /* need to work on the master vlan too */
  189. if (flags & BRIDGE_VLAN_INFO_MASTER) {
  190. err = br_vlan_add(br, v->vid, flags |
  191. BRIDGE_VLAN_INFO_BRENTRY);
  192. if (err)
  193. goto out_filt;
  194. }
  195. masterv = br_vlan_get_master(br, v->vid);
  196. if (!masterv)
  197. goto out_filt;
  198. v->brvlan = masterv;
  199. }
  200. /* Add the dev mac and count the vlan only if it's usable */
  201. if (br_vlan_should_use(v)) {
  202. err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
  203. if (err) {
  204. br_err(br, "failed insert local address into bridge forwarding table\n");
  205. goto out_filt;
  206. }
  207. vg->num_vlans++;
  208. }
  209. err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
  210. br_vlan_rht_params);
  211. if (err)
  212. goto out_fdb_insert;
  213. __vlan_add_list(v);
  214. __vlan_add_flags(v, flags);
  215. out:
  216. return err;
  217. out_fdb_insert:
  218. if (br_vlan_should_use(v)) {
  219. br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
  220. vg->num_vlans--;
  221. }
  222. out_filt:
  223. if (p) {
  224. __vlan_vid_del(dev, br, v->vid);
  225. if (masterv) {
  226. br_vlan_put_master(masterv);
  227. v->brvlan = NULL;
  228. }
  229. }
  230. goto out;
  231. }
  232. static int __vlan_del(struct net_bridge_vlan *v)
  233. {
  234. struct net_bridge_vlan *masterv = v;
  235. struct net_bridge_vlan_group *vg;
  236. struct net_bridge_port *p = NULL;
  237. int err = 0;
  238. if (br_vlan_is_master(v)) {
  239. vg = br_vlan_group(v->br);
  240. } else {
  241. p = v->port;
  242. vg = nbp_vlan_group(v->port);
  243. masterv = v->brvlan;
  244. }
  245. __vlan_delete_pvid(vg, v->vid);
  246. if (p) {
  247. err = __vlan_vid_del(p->dev, p->br, v->vid);
  248. if (err)
  249. goto out;
  250. }
  251. if (br_vlan_should_use(v)) {
  252. v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
  253. vg->num_vlans--;
  254. }
  255. if (masterv != v) {
  256. rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
  257. br_vlan_rht_params);
  258. __vlan_del_list(v);
  259. kfree_rcu(v, rcu);
  260. }
  261. br_vlan_put_master(masterv);
  262. out:
  263. return err;
  264. }
  265. static void __vlan_group_free(struct net_bridge_vlan_group *vg)
  266. {
  267. WARN_ON(!list_empty(&vg->vlan_list));
  268. rhashtable_destroy(&vg->vlan_hash);
  269. kfree(vg);
  270. }
  271. static void __vlan_flush(struct net_bridge_vlan_group *vg)
  272. {
  273. struct net_bridge_vlan *vlan, *tmp;
  274. __vlan_delete_pvid(vg, vg->pvid);
  275. list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
  276. __vlan_del(vlan);
  277. }
  278. struct sk_buff *br_handle_vlan(struct net_bridge *br,
  279. struct net_bridge_vlan_group *vg,
  280. struct sk_buff *skb)
  281. {
  282. struct net_bridge_vlan *v;
  283. u16 vid;
  284. /* If this packet was not filtered at input, let it pass */
  285. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  286. goto out;
  287. /* At this point, we know that the frame was filtered and contains
  288. * a valid vlan id. If the vlan id has untagged flag set,
  289. * send untagged; otherwise, send tagged.
  290. */
  291. br_vlan_get_tag(skb, &vid);
  292. v = br_vlan_find(vg, vid);
  293. /* Vlan entry must be configured at this point. The
  294. * only exception is the bridge is set in promisc mode and the
  295. * packet is destined for the bridge device. In this case
  296. * pass the packet as is.
  297. */
  298. if (!v || !br_vlan_should_use(v)) {
  299. if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
  300. goto out;
  301. } else {
  302. kfree_skb(skb);
  303. return NULL;
  304. }
  305. }
  306. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  307. skb->vlan_tci = 0;
  308. out:
  309. return skb;
  310. }
  311. /* Called under RCU */
  312. static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
  313. struct sk_buff *skb, u16 *vid)
  314. {
  315. const struct net_bridge_vlan *v;
  316. bool tagged;
  317. BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
  318. /* If vlan tx offload is disabled on bridge device and frame was
  319. * sent from vlan device on the bridge device, it does not have
  320. * HW accelerated vlan tag.
  321. */
  322. if (unlikely(!skb_vlan_tag_present(skb) &&
  323. skb->protocol == proto)) {
  324. skb = skb_vlan_untag(skb);
  325. if (unlikely(!skb))
  326. return false;
  327. }
  328. if (!br_vlan_get_tag(skb, vid)) {
  329. /* Tagged frame */
  330. if (skb->vlan_proto != proto) {
  331. /* Protocol-mismatch, empty out vlan_tci for new tag */
  332. skb_push(skb, ETH_HLEN);
  333. skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
  334. skb_vlan_tag_get(skb));
  335. if (unlikely(!skb))
  336. return false;
  337. skb_pull(skb, ETH_HLEN);
  338. skb_reset_mac_len(skb);
  339. *vid = 0;
  340. tagged = false;
  341. } else {
  342. tagged = true;
  343. }
  344. } else {
  345. /* Untagged frame */
  346. tagged = false;
  347. }
  348. if (!*vid) {
  349. u16 pvid = br_get_pvid(vg);
  350. /* Frame had a tag with VID 0 or did not have a tag.
  351. * See if pvid is set on this port. That tells us which
  352. * vlan untagged or priority-tagged traffic belongs to.
  353. */
  354. if (!pvid)
  355. goto drop;
  356. /* PVID is set on this port. Any untagged or priority-tagged
  357. * ingress frame is considered to belong to this vlan.
  358. */
  359. *vid = pvid;
  360. if (likely(!tagged))
  361. /* Untagged Frame. */
  362. __vlan_hwaccel_put_tag(skb, proto, pvid);
  363. else
  364. /* Priority-tagged Frame.
  365. * At this point, We know that skb->vlan_tci had
  366. * VLAN_TAG_PRESENT bit and its VID field was 0x000.
  367. * We update only VID field and preserve PCP field.
  368. */
  369. skb->vlan_tci |= pvid;
  370. return true;
  371. }
  372. /* Frame had a valid vlan tag. See if vlan is allowed */
  373. v = br_vlan_find(vg, *vid);
  374. if (v && br_vlan_should_use(v))
  375. return true;
  376. drop:
  377. kfree_skb(skb);
  378. return false;
  379. }
  380. bool br_allowed_ingress(const struct net_bridge *br,
  381. struct net_bridge_vlan_group *vg, struct sk_buff *skb,
  382. u16 *vid)
  383. {
  384. /* If VLAN filtering is disabled on the bridge, all packets are
  385. * permitted.
  386. */
  387. if (!br->vlan_enabled) {
  388. BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
  389. return true;
  390. }
  391. return __allowed_ingress(vg, br->vlan_proto, skb, vid);
  392. }
  393. /* Called under RCU. */
  394. bool br_allowed_egress(struct net_bridge_vlan_group *vg,
  395. const struct sk_buff *skb)
  396. {
  397. const struct net_bridge_vlan *v;
  398. u16 vid;
  399. /* If this packet was not filtered at input, let it pass */
  400. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  401. return true;
  402. br_vlan_get_tag(skb, &vid);
  403. v = br_vlan_find(vg, vid);
  404. if (v && br_vlan_should_use(v))
  405. return true;
  406. return false;
  407. }
  408. /* Called under RCU */
  409. bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
  410. {
  411. struct net_bridge_vlan_group *vg;
  412. struct net_bridge *br = p->br;
  413. /* If filtering was disabled at input, let it pass. */
  414. if (!br->vlan_enabled)
  415. return true;
  416. vg = nbp_vlan_group_rcu(p);
  417. if (!vg || !vg->num_vlans)
  418. return false;
  419. if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
  420. *vid = 0;
  421. if (!*vid) {
  422. *vid = br_get_pvid(vg);
  423. if (!*vid)
  424. return false;
  425. return true;
  426. }
  427. if (br_vlan_find(vg, *vid))
  428. return true;
  429. return false;
  430. }
  431. /* Must be protected by RTNL.
  432. * Must be called with vid in range from 1 to 4094 inclusive.
  433. */
  434. int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
  435. {
  436. struct net_bridge_vlan_group *vg;
  437. struct net_bridge_vlan *vlan;
  438. int ret;
  439. ASSERT_RTNL();
  440. vg = br_vlan_group(br);
  441. vlan = br_vlan_find(vg, vid);
  442. if (vlan) {
  443. if (!br_vlan_is_brentry(vlan)) {
  444. /* Trying to change flags of non-existent bridge vlan */
  445. if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
  446. return -EINVAL;
  447. /* It was only kept for port vlans, now make it real */
  448. ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
  449. vlan->vid);
  450. if (ret) {
  451. br_err(br, "failed insert local address into bridge forwarding table\n");
  452. return ret;
  453. }
  454. atomic_inc(&vlan->refcnt);
  455. vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
  456. vg->num_vlans++;
  457. }
  458. __vlan_add_flags(vlan, flags);
  459. return 0;
  460. }
  461. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  462. if (!vlan)
  463. return -ENOMEM;
  464. vlan->vid = vid;
  465. vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
  466. vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
  467. vlan->br = br;
  468. if (flags & BRIDGE_VLAN_INFO_BRENTRY)
  469. atomic_set(&vlan->refcnt, 1);
  470. ret = __vlan_add(vlan, flags);
  471. if (ret)
  472. kfree(vlan);
  473. return ret;
  474. }
  475. /* Must be protected by RTNL.
  476. * Must be called with vid in range from 1 to 4094 inclusive.
  477. */
  478. int br_vlan_delete(struct net_bridge *br, u16 vid)
  479. {
  480. struct net_bridge_vlan_group *vg;
  481. struct net_bridge_vlan *v;
  482. ASSERT_RTNL();
  483. vg = br_vlan_group(br);
  484. v = br_vlan_find(vg, vid);
  485. if (!v || !br_vlan_is_brentry(v))
  486. return -ENOENT;
  487. br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
  488. br_fdb_delete_by_port(br, NULL, vid, 0);
  489. return __vlan_del(v);
  490. }
  491. void br_vlan_flush(struct net_bridge *br)
  492. {
  493. struct net_bridge_vlan_group *vg;
  494. ASSERT_RTNL();
  495. vg = br_vlan_group(br);
  496. __vlan_flush(vg);
  497. RCU_INIT_POINTER(br->vlgrp, NULL);
  498. synchronize_rcu();
  499. __vlan_group_free(vg);
  500. }
  501. struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
  502. {
  503. if (!vg)
  504. return NULL;
  505. return br_vlan_lookup(&vg->vlan_hash, vid);
  506. }
  507. /* Must be protected by RTNL. */
  508. static void recalculate_group_addr(struct net_bridge *br)
  509. {
  510. if (br->group_addr_set)
  511. return;
  512. spin_lock_bh(&br->lock);
  513. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
  514. /* Bridge Group Address */
  515. br->group_addr[5] = 0x00;
  516. } else { /* vlan_enabled && ETH_P_8021AD */
  517. /* Provider Bridge Group Address */
  518. br->group_addr[5] = 0x08;
  519. }
  520. spin_unlock_bh(&br->lock);
  521. }
  522. /* Must be protected by RTNL. */
  523. void br_recalculate_fwd_mask(struct net_bridge *br)
  524. {
  525. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
  526. br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
  527. else /* vlan_enabled && ETH_P_8021AD */
  528. br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
  529. ~(1u << br->group_addr[5]);
  530. }
  531. int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  532. {
  533. if (br->vlan_enabled == val)
  534. return 0;
  535. br->vlan_enabled = val;
  536. br_manage_promisc(br);
  537. recalculate_group_addr(br);
  538. br_recalculate_fwd_mask(br);
  539. return 0;
  540. }
  541. int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  542. {
  543. if (!rtnl_trylock())
  544. return restart_syscall();
  545. __br_vlan_filter_toggle(br, val);
  546. rtnl_unlock();
  547. return 0;
  548. }
  549. int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
  550. {
  551. int err = 0;
  552. struct net_bridge_port *p;
  553. struct net_bridge_vlan *vlan;
  554. struct net_bridge_vlan_group *vg;
  555. __be16 oldproto;
  556. if (br->vlan_proto == proto)
  557. return 0;
  558. /* Add VLANs for the new proto to the device filter. */
  559. list_for_each_entry(p, &br->port_list, list) {
  560. vg = nbp_vlan_group(p);
  561. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  562. err = vlan_vid_add(p->dev, proto, vlan->vid);
  563. if (err)
  564. goto err_filt;
  565. }
  566. }
  567. oldproto = br->vlan_proto;
  568. br->vlan_proto = proto;
  569. recalculate_group_addr(br);
  570. br_recalculate_fwd_mask(br);
  571. /* Delete VLANs for the old proto from the device filter. */
  572. list_for_each_entry(p, &br->port_list, list) {
  573. vg = nbp_vlan_group(p);
  574. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  575. vlan_vid_del(p->dev, oldproto, vlan->vid);
  576. }
  577. return 0;
  578. err_filt:
  579. list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
  580. vlan_vid_del(p->dev, proto, vlan->vid);
  581. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  582. vg = nbp_vlan_group(p);
  583. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  584. vlan_vid_del(p->dev, proto, vlan->vid);
  585. }
  586. return err;
  587. }
  588. int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
  589. {
  590. int err;
  591. if (val != ETH_P_8021Q && val != ETH_P_8021AD)
  592. return -EPROTONOSUPPORT;
  593. if (!rtnl_trylock())
  594. return restart_syscall();
  595. err = __br_vlan_set_proto(br, htons(val));
  596. rtnl_unlock();
  597. return err;
  598. }
  599. static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  600. {
  601. struct net_bridge_vlan *v;
  602. if (vid != vg->pvid)
  603. return false;
  604. v = br_vlan_lookup(&vg->vlan_hash, vid);
  605. if (v && br_vlan_should_use(v) &&
  606. (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
  607. return true;
  608. return false;
  609. }
  610. static void br_vlan_disable_default_pvid(struct net_bridge *br)
  611. {
  612. struct net_bridge_port *p;
  613. u16 pvid = br->default_pvid;
  614. /* Disable default_pvid on all ports where it is still
  615. * configured.
  616. */
  617. if (vlan_default_pvid(br_vlan_group(br), pvid))
  618. br_vlan_delete(br, pvid);
  619. list_for_each_entry(p, &br->port_list, list) {
  620. if (vlan_default_pvid(nbp_vlan_group(p), pvid))
  621. nbp_vlan_delete(p, pvid);
  622. }
  623. br->default_pvid = 0;
  624. }
  625. int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
  626. {
  627. const struct net_bridge_vlan *pvent;
  628. struct net_bridge_vlan_group *vg;
  629. struct net_bridge_port *p;
  630. u16 old_pvid;
  631. int err = 0;
  632. unsigned long *changed;
  633. if (!pvid) {
  634. br_vlan_disable_default_pvid(br);
  635. return 0;
  636. }
  637. changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
  638. GFP_KERNEL);
  639. if (!changed)
  640. return -ENOMEM;
  641. old_pvid = br->default_pvid;
  642. /* Update default_pvid config only if we do not conflict with
  643. * user configuration.
  644. */
  645. vg = br_vlan_group(br);
  646. pvent = br_vlan_find(vg, pvid);
  647. if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
  648. (!pvent || !br_vlan_should_use(pvent))) {
  649. err = br_vlan_add(br, pvid,
  650. BRIDGE_VLAN_INFO_PVID |
  651. BRIDGE_VLAN_INFO_UNTAGGED |
  652. BRIDGE_VLAN_INFO_BRENTRY);
  653. if (err)
  654. goto out;
  655. br_vlan_delete(br, old_pvid);
  656. set_bit(0, changed);
  657. }
  658. list_for_each_entry(p, &br->port_list, list) {
  659. /* Update default_pvid config only if we do not conflict with
  660. * user configuration.
  661. */
  662. vg = nbp_vlan_group(p);
  663. if ((old_pvid &&
  664. !vlan_default_pvid(vg, old_pvid)) ||
  665. br_vlan_find(vg, pvid))
  666. continue;
  667. err = nbp_vlan_add(p, pvid,
  668. BRIDGE_VLAN_INFO_PVID |
  669. BRIDGE_VLAN_INFO_UNTAGGED);
  670. if (err)
  671. goto err_port;
  672. nbp_vlan_delete(p, old_pvid);
  673. set_bit(p->port_no, changed);
  674. }
  675. br->default_pvid = pvid;
  676. out:
  677. kfree(changed);
  678. return err;
  679. err_port:
  680. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  681. if (!test_bit(p->port_no, changed))
  682. continue;
  683. if (old_pvid)
  684. nbp_vlan_add(p, old_pvid,
  685. BRIDGE_VLAN_INFO_PVID |
  686. BRIDGE_VLAN_INFO_UNTAGGED);
  687. nbp_vlan_delete(p, pvid);
  688. }
  689. if (test_bit(0, changed)) {
  690. if (old_pvid)
  691. br_vlan_add(br, old_pvid,
  692. BRIDGE_VLAN_INFO_PVID |
  693. BRIDGE_VLAN_INFO_UNTAGGED |
  694. BRIDGE_VLAN_INFO_BRENTRY);
  695. br_vlan_delete(br, pvid);
  696. }
  697. goto out;
  698. }
  699. int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
  700. {
  701. u16 pvid = val;
  702. int err = 0;
  703. if (val >= VLAN_VID_MASK)
  704. return -EINVAL;
  705. if (!rtnl_trylock())
  706. return restart_syscall();
  707. if (pvid == br->default_pvid)
  708. goto unlock;
  709. /* Only allow default pvid change when filtering is disabled */
  710. if (br->vlan_enabled) {
  711. pr_info_once("Please disable vlan filtering to change default_pvid\n");
  712. err = -EPERM;
  713. goto unlock;
  714. }
  715. err = __br_vlan_set_default_pvid(br, pvid);
  716. unlock:
  717. rtnl_unlock();
  718. return err;
  719. }
  720. int br_vlan_init(struct net_bridge *br)
  721. {
  722. struct net_bridge_vlan_group *vg;
  723. int ret = -ENOMEM;
  724. vg = kzalloc(sizeof(*vg), GFP_KERNEL);
  725. if (!vg)
  726. goto out;
  727. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  728. if (ret)
  729. goto err_rhtbl;
  730. INIT_LIST_HEAD(&vg->vlan_list);
  731. br->vlan_proto = htons(ETH_P_8021Q);
  732. br->default_pvid = 1;
  733. rcu_assign_pointer(br->vlgrp, vg);
  734. ret = br_vlan_add(br, 1,
  735. BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
  736. BRIDGE_VLAN_INFO_BRENTRY);
  737. if (ret)
  738. goto err_vlan_add;
  739. out:
  740. return ret;
  741. err_vlan_add:
  742. rhashtable_destroy(&vg->vlan_hash);
  743. err_rhtbl:
  744. kfree(vg);
  745. goto out;
  746. }
  747. int nbp_vlan_init(struct net_bridge_port *p)
  748. {
  749. struct net_bridge_vlan_group *vg;
  750. int ret = -ENOMEM;
  751. vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
  752. if (!vg)
  753. goto out;
  754. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  755. if (ret)
  756. goto err_rhtbl;
  757. INIT_LIST_HEAD(&vg->vlan_list);
  758. rcu_assign_pointer(p->vlgrp, vg);
  759. if (p->br->default_pvid) {
  760. ret = nbp_vlan_add(p, p->br->default_pvid,
  761. BRIDGE_VLAN_INFO_PVID |
  762. BRIDGE_VLAN_INFO_UNTAGGED);
  763. if (ret)
  764. goto err_vlan_add;
  765. }
  766. out:
  767. return ret;
  768. err_vlan_add:
  769. RCU_INIT_POINTER(p->vlgrp, NULL);
  770. synchronize_rcu();
  771. rhashtable_destroy(&vg->vlan_hash);
  772. err_rhtbl:
  773. kfree(vg);
  774. goto out;
  775. }
  776. /* Must be protected by RTNL.
  777. * Must be called with vid in range from 1 to 4094 inclusive.
  778. */
  779. int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
  780. {
  781. struct net_bridge_vlan *vlan;
  782. int ret;
  783. ASSERT_RTNL();
  784. vlan = br_vlan_find(nbp_vlan_group(port), vid);
  785. if (vlan) {
  786. __vlan_add_flags(vlan, flags);
  787. return 0;
  788. }
  789. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  790. if (!vlan)
  791. return -ENOMEM;
  792. vlan->vid = vid;
  793. vlan->port = port;
  794. ret = __vlan_add(vlan, flags);
  795. if (ret)
  796. kfree(vlan);
  797. return ret;
  798. }
  799. /* Must be protected by RTNL.
  800. * Must be called with vid in range from 1 to 4094 inclusive.
  801. */
  802. int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
  803. {
  804. struct net_bridge_vlan *v;
  805. ASSERT_RTNL();
  806. v = br_vlan_find(nbp_vlan_group(port), vid);
  807. if (!v)
  808. return -ENOENT;
  809. br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
  810. br_fdb_delete_by_port(port->br, port, vid, 0);
  811. return __vlan_del(v);
  812. }
  813. void nbp_vlan_flush(struct net_bridge_port *port)
  814. {
  815. struct net_bridge_vlan_group *vg;
  816. ASSERT_RTNL();
  817. vg = nbp_vlan_group(port);
  818. __vlan_flush(vg);
  819. RCU_INIT_POINTER(port->vlgrp, NULL);
  820. synchronize_rcu();
  821. __vlan_group_free(vg);
  822. }