cls_flower.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718
  1. /*
  2. * net/sched/cls_flower.c Flower classifier
  3. *
  4. * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/module.h>
  14. #include <linux/rhashtable.h>
  15. #include <linux/workqueue.h>
  16. #include <linux/if_ether.h>
  17. #include <linux/in6.h>
  18. #include <linux/ip.h>
  19. #include <net/sch_generic.h>
  20. #include <net/pkt_cls.h>
  21. #include <net/ip.h>
  22. #include <net/flow_dissector.h>
  23. struct fl_flow_key {
  24. int indev_ifindex;
  25. struct flow_dissector_key_control control;
  26. struct flow_dissector_key_basic basic;
  27. struct flow_dissector_key_eth_addrs eth;
  28. struct flow_dissector_key_addrs ipaddrs;
  29. union {
  30. struct flow_dissector_key_ipv4_addrs ipv4;
  31. struct flow_dissector_key_ipv6_addrs ipv6;
  32. };
  33. struct flow_dissector_key_ports tp;
  34. } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
  35. struct fl_flow_mask_range {
  36. unsigned short int start;
  37. unsigned short int end;
  38. };
  39. struct fl_flow_mask {
  40. struct fl_flow_key key;
  41. struct fl_flow_mask_range range;
  42. struct rcu_head rcu;
  43. };
  44. struct cls_fl_head {
  45. struct rhashtable ht;
  46. struct fl_flow_mask mask;
  47. struct flow_dissector dissector;
  48. u32 hgen;
  49. bool mask_assigned;
  50. struct list_head filters;
  51. struct rhashtable_params ht_params;
  52. union {
  53. struct work_struct work;
  54. struct rcu_head rcu;
  55. };
  56. };
  57. struct cls_fl_filter {
  58. struct rhash_head ht_node;
  59. struct fl_flow_key mkey;
  60. struct tcf_exts exts;
  61. struct tcf_result res;
  62. struct fl_flow_key key;
  63. struct list_head list;
  64. u32 handle;
  65. struct rcu_head rcu;
  66. };
  67. static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
  68. {
  69. return mask->range.end - mask->range.start;
  70. }
  71. static void fl_mask_update_range(struct fl_flow_mask *mask)
  72. {
  73. const u8 *bytes = (const u8 *) &mask->key;
  74. size_t size = sizeof(mask->key);
  75. size_t i, first = 0, last = size - 1;
  76. for (i = 0; i < sizeof(mask->key); i++) {
  77. if (bytes[i]) {
  78. if (!first && i)
  79. first = i;
  80. last = i;
  81. }
  82. }
  83. mask->range.start = rounddown(first, sizeof(long));
  84. mask->range.end = roundup(last + 1, sizeof(long));
  85. }
  86. static void *fl_key_get_start(struct fl_flow_key *key,
  87. const struct fl_flow_mask *mask)
  88. {
  89. return (u8 *) key + mask->range.start;
  90. }
  91. static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
  92. struct fl_flow_mask *mask)
  93. {
  94. const long *lkey = fl_key_get_start(key, mask);
  95. const long *lmask = fl_key_get_start(&mask->key, mask);
  96. long *lmkey = fl_key_get_start(mkey, mask);
  97. int i;
  98. for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
  99. *lmkey++ = *lkey++ & *lmask++;
  100. }
  101. static void fl_clear_masked_range(struct fl_flow_key *key,
  102. struct fl_flow_mask *mask)
  103. {
  104. memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
  105. }
  106. static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  107. struct tcf_result *res)
  108. {
  109. struct cls_fl_head *head = rcu_dereference_bh(tp->root);
  110. struct cls_fl_filter *f;
  111. struct fl_flow_key skb_key;
  112. struct fl_flow_key skb_mkey;
  113. fl_clear_masked_range(&skb_key, &head->mask);
  114. skb_key.indev_ifindex = skb->skb_iif;
  115. /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
  116. * so do it rather here.
  117. */
  118. skb_key.basic.n_proto = skb->protocol;
  119. skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
  120. fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
  121. f = rhashtable_lookup_fast(&head->ht,
  122. fl_key_get_start(&skb_mkey, &head->mask),
  123. head->ht_params);
  124. if (f) {
  125. *res = f->res;
  126. return tcf_exts_exec(skb, &f->exts, res);
  127. }
  128. return -1;
  129. }
  130. static int fl_init(struct tcf_proto *tp)
  131. {
  132. struct cls_fl_head *head;
  133. head = kzalloc(sizeof(*head), GFP_KERNEL);
  134. if (!head)
  135. return -ENOBUFS;
  136. INIT_LIST_HEAD_RCU(&head->filters);
  137. rcu_assign_pointer(tp->root, head);
  138. return 0;
  139. }
  140. static void fl_destroy_filter(struct rcu_head *head)
  141. {
  142. struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
  143. tcf_exts_destroy(&f->exts);
  144. kfree(f);
  145. }
  146. static void fl_destroy_sleepable(struct work_struct *work)
  147. {
  148. struct cls_fl_head *head = container_of(work, struct cls_fl_head,
  149. work);
  150. if (head->mask_assigned)
  151. rhashtable_destroy(&head->ht);
  152. kfree(head);
  153. module_put(THIS_MODULE);
  154. }
  155. static void fl_destroy_rcu(struct rcu_head *rcu)
  156. {
  157. struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
  158. INIT_WORK(&head->work, fl_destroy_sleepable);
  159. schedule_work(&head->work);
  160. }
  161. static bool fl_destroy(struct tcf_proto *tp, bool force)
  162. {
  163. struct cls_fl_head *head = rtnl_dereference(tp->root);
  164. struct cls_fl_filter *f, *next;
  165. if (!force && !list_empty(&head->filters))
  166. return false;
  167. list_for_each_entry_safe(f, next, &head->filters, list) {
  168. list_del_rcu(&f->list);
  169. call_rcu(&f->rcu, fl_destroy_filter);
  170. }
  171. __module_get(THIS_MODULE);
  172. call_rcu(&head->rcu, fl_destroy_rcu);
  173. return true;
  174. }
  175. static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
  176. {
  177. struct cls_fl_head *head = rtnl_dereference(tp->root);
  178. struct cls_fl_filter *f;
  179. list_for_each_entry(f, &head->filters, list)
  180. if (f->handle == handle)
  181. return (unsigned long) f;
  182. return 0;
  183. }
  184. static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
  185. [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
  186. [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
  187. [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
  188. .len = IFNAMSIZ },
  189. [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
  190. [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
  191. [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
  192. [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
  193. [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
  194. [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
  195. [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
  196. [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
  197. [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
  198. [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
  199. [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
  200. [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
  201. [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
  202. [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
  203. [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
  204. [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
  205. [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
  206. [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
  207. };
  208. static void fl_set_key_val(struct nlattr **tb,
  209. void *val, int val_type,
  210. void *mask, int mask_type, int len)
  211. {
  212. if (!tb[val_type])
  213. return;
  214. memcpy(val, nla_data(tb[val_type]), len);
  215. if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
  216. memset(mask, 0xff, len);
  217. else
  218. memcpy(mask, nla_data(tb[mask_type]), len);
  219. }
  220. static int fl_set_key(struct net *net, struct nlattr **tb,
  221. struct fl_flow_key *key, struct fl_flow_key *mask)
  222. {
  223. #ifdef CONFIG_NET_CLS_IND
  224. if (tb[TCA_FLOWER_INDEV]) {
  225. int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
  226. if (err < 0)
  227. return err;
  228. key->indev_ifindex = err;
  229. mask->indev_ifindex = 0xffffffff;
  230. }
  231. #endif
  232. fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  233. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  234. sizeof(key->eth.dst));
  235. fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  236. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  237. sizeof(key->eth.src));
  238. fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
  239. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  240. sizeof(key->basic.n_proto));
  241. if (key->basic.n_proto == htons(ETH_P_IP) ||
  242. key->basic.n_proto == htons(ETH_P_IPV6)) {
  243. fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  244. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  245. sizeof(key->basic.ip_proto));
  246. }
  247. if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
  248. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  249. fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  250. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  251. sizeof(key->ipv4.src));
  252. fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  253. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  254. sizeof(key->ipv4.dst));
  255. } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
  256. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  257. fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  258. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  259. sizeof(key->ipv6.src));
  260. fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  261. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  262. sizeof(key->ipv6.dst));
  263. }
  264. if (key->basic.ip_proto == IPPROTO_TCP) {
  265. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  266. &mask->tp.src, TCA_FLOWER_UNSPEC,
  267. sizeof(key->tp.src));
  268. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  269. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  270. sizeof(key->tp.dst));
  271. } else if (key->basic.ip_proto == IPPROTO_UDP) {
  272. fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  273. &mask->tp.src, TCA_FLOWER_UNSPEC,
  274. sizeof(key->tp.src));
  275. fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  276. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  277. sizeof(key->tp.dst));
  278. }
  279. return 0;
  280. }
  281. static bool fl_mask_eq(struct fl_flow_mask *mask1,
  282. struct fl_flow_mask *mask2)
  283. {
  284. const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
  285. const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
  286. return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
  287. !memcmp(lmask1, lmask2, fl_mask_range(mask1));
  288. }
  289. static const struct rhashtable_params fl_ht_params = {
  290. .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
  291. .head_offset = offsetof(struct cls_fl_filter, ht_node),
  292. .automatic_shrinking = true,
  293. };
  294. static int fl_init_hashtable(struct cls_fl_head *head,
  295. struct fl_flow_mask *mask)
  296. {
  297. head->ht_params = fl_ht_params;
  298. head->ht_params.key_len = fl_mask_range(mask);
  299. head->ht_params.key_offset += mask->range.start;
  300. return rhashtable_init(&head->ht, &head->ht_params);
  301. }
  302. #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
  303. #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
  304. #define FL_KEY_MEMBER_END_OFFSET(member) \
  305. (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
  306. #define FL_KEY_IN_RANGE(mask, member) \
  307. (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end && \
  308. FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
  309. #define FL_KEY_SET(keys, cnt, id, member) \
  310. do { \
  311. keys[cnt].key_id = id; \
  312. keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
  313. cnt++; \
  314. } while(0);
  315. #define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member) \
  316. do { \
  317. if (FL_KEY_IN_RANGE(mask, member)) \
  318. FL_KEY_SET(keys, cnt, id, member); \
  319. } while(0);
  320. static void fl_init_dissector(struct cls_fl_head *head,
  321. struct fl_flow_mask *mask)
  322. {
  323. struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
  324. size_t cnt = 0;
  325. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
  326. FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
  327. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  328. FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
  329. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  330. FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
  331. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  332. FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
  333. FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
  334. FLOW_DISSECTOR_KEY_PORTS, tp);
  335. skb_flow_dissector_init(&head->dissector, keys, cnt);
  336. }
  337. static int fl_check_assign_mask(struct cls_fl_head *head,
  338. struct fl_flow_mask *mask)
  339. {
  340. int err;
  341. if (head->mask_assigned) {
  342. if (!fl_mask_eq(&head->mask, mask))
  343. return -EINVAL;
  344. else
  345. return 0;
  346. }
  347. /* Mask is not assigned yet. So assign it and init hashtable
  348. * according to that.
  349. */
  350. err = fl_init_hashtable(head, mask);
  351. if (err)
  352. return err;
  353. memcpy(&head->mask, mask, sizeof(head->mask));
  354. head->mask_assigned = true;
  355. fl_init_dissector(head, mask);
  356. return 0;
  357. }
  358. static int fl_set_parms(struct net *net, struct tcf_proto *tp,
  359. struct cls_fl_filter *f, struct fl_flow_mask *mask,
  360. unsigned long base, struct nlattr **tb,
  361. struct nlattr *est, bool ovr)
  362. {
  363. struct tcf_exts e;
  364. int err;
  365. tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
  366. err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
  367. if (err < 0)
  368. return err;
  369. if (tb[TCA_FLOWER_CLASSID]) {
  370. f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
  371. tcf_bind_filter(tp, &f->res, base);
  372. }
  373. err = fl_set_key(net, tb, &f->key, &mask->key);
  374. if (err)
  375. goto errout;
  376. fl_mask_update_range(mask);
  377. fl_set_masked_key(&f->mkey, &f->key, mask);
  378. tcf_exts_change(tp, &f->exts, &e);
  379. return 0;
  380. errout:
  381. tcf_exts_destroy(&e);
  382. return err;
  383. }
  384. static u32 fl_grab_new_handle(struct tcf_proto *tp,
  385. struct cls_fl_head *head)
  386. {
  387. unsigned int i = 0x80000000;
  388. u32 handle;
  389. do {
  390. if (++head->hgen == 0x7FFFFFFF)
  391. head->hgen = 1;
  392. } while (--i > 0 && fl_get(tp, head->hgen));
  393. if (unlikely(i == 0)) {
  394. pr_err("Insufficient number of handles\n");
  395. handle = 0;
  396. } else {
  397. handle = head->hgen;
  398. }
  399. return handle;
  400. }
  401. static int fl_change(struct net *net, struct sk_buff *in_skb,
  402. struct tcf_proto *tp, unsigned long base,
  403. u32 handle, struct nlattr **tca,
  404. unsigned long *arg, bool ovr)
  405. {
  406. struct cls_fl_head *head = rtnl_dereference(tp->root);
  407. struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
  408. struct cls_fl_filter *fnew;
  409. struct nlattr *tb[TCA_FLOWER_MAX + 1];
  410. struct fl_flow_mask mask = {};
  411. int err;
  412. if (!tca[TCA_OPTIONS])
  413. return -EINVAL;
  414. err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
  415. if (err < 0)
  416. return err;
  417. if (fold && handle && fold->handle != handle)
  418. return -EINVAL;
  419. fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
  420. if (!fnew)
  421. return -ENOBUFS;
  422. tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
  423. if (!handle) {
  424. handle = fl_grab_new_handle(tp, head);
  425. if (!handle) {
  426. err = -EINVAL;
  427. goto errout;
  428. }
  429. }
  430. fnew->handle = handle;
  431. err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
  432. if (err)
  433. goto errout;
  434. err = fl_check_assign_mask(head, &mask);
  435. if (err)
  436. goto errout;
  437. err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
  438. head->ht_params);
  439. if (err)
  440. goto errout;
  441. if (fold)
  442. rhashtable_remove_fast(&head->ht, &fold->ht_node,
  443. head->ht_params);
  444. *arg = (unsigned long) fnew;
  445. if (fold) {
  446. list_replace_rcu(&fold->list, &fnew->list);
  447. tcf_unbind_filter(tp, &fold->res);
  448. call_rcu(&fold->rcu, fl_destroy_filter);
  449. } else {
  450. list_add_tail_rcu(&fnew->list, &head->filters);
  451. }
  452. return 0;
  453. errout:
  454. kfree(fnew);
  455. return err;
  456. }
  457. static int fl_delete(struct tcf_proto *tp, unsigned long arg)
  458. {
  459. struct cls_fl_head *head = rtnl_dereference(tp->root);
  460. struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
  461. rhashtable_remove_fast(&head->ht, &f->ht_node,
  462. head->ht_params);
  463. list_del_rcu(&f->list);
  464. tcf_unbind_filter(tp, &f->res);
  465. call_rcu(&f->rcu, fl_destroy_filter);
  466. return 0;
  467. }
  468. static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
  469. {
  470. struct cls_fl_head *head = rtnl_dereference(tp->root);
  471. struct cls_fl_filter *f;
  472. list_for_each_entry_rcu(f, &head->filters, list) {
  473. if (arg->count < arg->skip)
  474. goto skip;
  475. if (arg->fn(tp, (unsigned long) f, arg) < 0) {
  476. arg->stop = 1;
  477. break;
  478. }
  479. skip:
  480. arg->count++;
  481. }
  482. }
  483. static int fl_dump_key_val(struct sk_buff *skb,
  484. void *val, int val_type,
  485. void *mask, int mask_type, int len)
  486. {
  487. int err;
  488. if (!memchr_inv(mask, 0, len))
  489. return 0;
  490. err = nla_put(skb, val_type, len, val);
  491. if (err)
  492. return err;
  493. if (mask_type != TCA_FLOWER_UNSPEC) {
  494. err = nla_put(skb, mask_type, len, mask);
  495. if (err)
  496. return err;
  497. }
  498. return 0;
  499. }
  500. static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
  501. struct sk_buff *skb, struct tcmsg *t)
  502. {
  503. struct cls_fl_head *head = rtnl_dereference(tp->root);
  504. struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
  505. struct nlattr *nest;
  506. struct fl_flow_key *key, *mask;
  507. if (!f)
  508. return skb->len;
  509. t->tcm_handle = f->handle;
  510. nest = nla_nest_start(skb, TCA_OPTIONS);
  511. if (!nest)
  512. goto nla_put_failure;
  513. if (f->res.classid &&
  514. nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
  515. goto nla_put_failure;
  516. key = &f->key;
  517. mask = &head->mask.key;
  518. if (mask->indev_ifindex) {
  519. struct net_device *dev;
  520. dev = __dev_get_by_index(net, key->indev_ifindex);
  521. if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
  522. goto nla_put_failure;
  523. }
  524. if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
  525. mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
  526. sizeof(key->eth.dst)) ||
  527. fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
  528. mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
  529. sizeof(key->eth.src)) ||
  530. fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
  531. &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
  532. sizeof(key->basic.n_proto)))
  533. goto nla_put_failure;
  534. if ((key->basic.n_proto == htons(ETH_P_IP) ||
  535. key->basic.n_proto == htons(ETH_P_IPV6)) &&
  536. fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
  537. &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
  538. sizeof(key->basic.ip_proto)))
  539. goto nla_put_failure;
  540. if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
  541. (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
  542. &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
  543. sizeof(key->ipv4.src)) ||
  544. fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
  545. &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
  546. sizeof(key->ipv4.dst))))
  547. goto nla_put_failure;
  548. else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
  549. (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
  550. &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
  551. sizeof(key->ipv6.src)) ||
  552. fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
  553. &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
  554. sizeof(key->ipv6.dst))))
  555. goto nla_put_failure;
  556. if (key->basic.ip_proto == IPPROTO_TCP &&
  557. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
  558. &mask->tp.src, TCA_FLOWER_UNSPEC,
  559. sizeof(key->tp.src)) ||
  560. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
  561. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  562. sizeof(key->tp.dst))))
  563. goto nla_put_failure;
  564. else if (key->basic.ip_proto == IPPROTO_UDP &&
  565. (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
  566. &mask->tp.src, TCA_FLOWER_UNSPEC,
  567. sizeof(key->tp.src)) ||
  568. fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
  569. &mask->tp.dst, TCA_FLOWER_UNSPEC,
  570. sizeof(key->tp.dst))))
  571. goto nla_put_failure;
  572. if (tcf_exts_dump(skb, &f->exts))
  573. goto nla_put_failure;
  574. nla_nest_end(skb, nest);
  575. if (tcf_exts_dump_stats(skb, &f->exts) < 0)
  576. goto nla_put_failure;
  577. return skb->len;
  578. nla_put_failure:
  579. nla_nest_cancel(skb, nest);
  580. return -1;
  581. }
  582. static struct tcf_proto_ops cls_fl_ops __read_mostly = {
  583. .kind = "flower",
  584. .classify = fl_classify,
  585. .init = fl_init,
  586. .destroy = fl_destroy,
  587. .get = fl_get,
  588. .change = fl_change,
  589. .delete = fl_delete,
  590. .walk = fl_walk,
  591. .dump = fl_dump,
  592. .owner = THIS_MODULE,
  593. };
  594. static int __init cls_fl_init(void)
  595. {
  596. return register_tcf_proto_ops(&cls_fl_ops);
  597. }
  598. static void __exit cls_fl_exit(void)
  599. {
  600. unregister_tcf_proto_ops(&cls_fl_ops);
  601. }
  602. module_init(cls_fl_init);
  603. module_exit(cls_fl_exit);
  604. MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
  605. MODULE_DESCRIPTION("Flower classifier");
  606. MODULE_LICENSE("GPL v2");