ip_set_list_set.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679
  1. /* Copyright (C) 2008-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 as
  5. * published by the Free Software Foundation.
  6. */
  7. /* Kernel module implementing an IP set type: the list:set type */
  8. #include <linux/module.h>
  9. #include <linux/ip.h>
  10. #include <linux/rculist.h>
  11. #include <linux/skbuff.h>
  12. #include <linux/errno.h>
  13. #include <linux/netfilter/ipset/ip_set.h>
  14. #include <linux/netfilter/ipset/ip_set_list.h>
  15. #define IPSET_TYPE_REV_MIN 0
  16. /* 1 Counters support added */
  17. /* 2 Comments support added */
  18. #define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */
  19. MODULE_LICENSE("GPL");
  20. MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
  21. IP_SET_MODULE_DESC("list:set", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
  22. MODULE_ALIAS("ip_set_list:set");
  23. /* Member elements */
  24. struct set_elem {
  25. struct rcu_head rcu;
  26. struct list_head list;
  27. ip_set_id_t id;
  28. } __aligned(__alignof__(u64));
  29. struct set_adt_elem {
  30. ip_set_id_t id;
  31. ip_set_id_t refid;
  32. int before;
  33. };
  34. /* Type structure */
  35. struct list_set {
  36. u32 size; /* size of set list array */
  37. struct timer_list gc; /* garbage collection */
  38. struct net *net; /* namespace */
  39. struct list_head members; /* the set members */
  40. };
  41. static int
  42. list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
  43. const struct xt_action_param *par,
  44. struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
  45. {
  46. struct list_set *map = set->data;
  47. struct set_elem *e;
  48. u32 cmdflags = opt->cmdflags;
  49. int ret;
  50. /* Don't lookup sub-counters at all */
  51. opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS;
  52. if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
  53. opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
  54. list_for_each_entry_rcu(e, &map->members, list) {
  55. if (SET_WITH_TIMEOUT(set) &&
  56. ip_set_timeout_expired(ext_timeout(e, set)))
  57. continue;
  58. ret = ip_set_test(e->id, skb, par, opt);
  59. if (ret > 0) {
  60. if (SET_WITH_COUNTER(set))
  61. ip_set_update_counter(ext_counter(e, set),
  62. ext, &opt->ext,
  63. cmdflags);
  64. if (SET_WITH_SKBINFO(set))
  65. ip_set_get_skbinfo(ext_skbinfo(e, set),
  66. ext, &opt->ext,
  67. cmdflags);
  68. return ret;
  69. }
  70. }
  71. return 0;
  72. }
  73. static int
  74. list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
  75. const struct xt_action_param *par,
  76. struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
  77. {
  78. struct list_set *map = set->data;
  79. struct set_elem *e;
  80. int ret;
  81. list_for_each_entry(e, &map->members, list) {
  82. if (SET_WITH_TIMEOUT(set) &&
  83. ip_set_timeout_expired(ext_timeout(e, set)))
  84. continue;
  85. ret = ip_set_add(e->id, skb, par, opt);
  86. if (ret == 0)
  87. return ret;
  88. }
  89. return 0;
  90. }
  91. static int
  92. list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
  93. const struct xt_action_param *par,
  94. struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
  95. {
  96. struct list_set *map = set->data;
  97. struct set_elem *e;
  98. int ret;
  99. list_for_each_entry(e, &map->members, list) {
  100. if (SET_WITH_TIMEOUT(set) &&
  101. ip_set_timeout_expired(ext_timeout(e, set)))
  102. continue;
  103. ret = ip_set_del(e->id, skb, par, opt);
  104. if (ret == 0)
  105. return ret;
  106. }
  107. return 0;
  108. }
  109. static int
  110. list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
  111. const struct xt_action_param *par,
  112. enum ipset_adt adt, struct ip_set_adt_opt *opt)
  113. {
  114. struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
  115. int ret = -EINVAL;
  116. rcu_read_lock();
  117. switch (adt) {
  118. case IPSET_TEST:
  119. ret = list_set_ktest(set, skb, par, opt, &ext);
  120. break;
  121. case IPSET_ADD:
  122. ret = list_set_kadd(set, skb, par, opt, &ext);
  123. break;
  124. case IPSET_DEL:
  125. ret = list_set_kdel(set, skb, par, opt, &ext);
  126. break;
  127. default:
  128. break;
  129. }
  130. rcu_read_unlock();
  131. return ret;
  132. }
  133. /* Userspace interfaces: we are protected by the nfnl mutex */
  134. static void
  135. __list_set_del(struct ip_set *set, struct set_elem *e)
  136. {
  137. struct list_set *map = set->data;
  138. ip_set_put_byindex(map->net, e->id);
  139. /* We may call it, because we don't have a to be destroyed
  140. * extension which is used by the kernel.
  141. */
  142. ip_set_ext_destroy(set, e);
  143. kfree_rcu(e, rcu);
  144. }
  145. static inline void
  146. list_set_del(struct ip_set *set, struct set_elem *e)
  147. {
  148. list_del_rcu(&e->list);
  149. __list_set_del(set, e);
  150. }
  151. static inline void
  152. list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
  153. {
  154. list_replace_rcu(&old->list, &e->list);
  155. __list_set_del(set, old);
  156. }
  157. static void
  158. set_cleanup_entries(struct ip_set *set)
  159. {
  160. struct list_set *map = set->data;
  161. struct set_elem *e, *n;
  162. list_for_each_entry_safe(e, n, &map->members, list)
  163. if (ip_set_timeout_expired(ext_timeout(e, set)))
  164. list_set_del(set, e);
  165. }
  166. static int
  167. list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
  168. struct ip_set_ext *mext, u32 flags)
  169. {
  170. struct list_set *map = set->data;
  171. struct set_adt_elem *d = value;
  172. struct set_elem *e, *next, *prev = NULL;
  173. int ret;
  174. list_for_each_entry(e, &map->members, list) {
  175. if (SET_WITH_TIMEOUT(set) &&
  176. ip_set_timeout_expired(ext_timeout(e, set)))
  177. continue;
  178. else if (e->id != d->id) {
  179. prev = e;
  180. continue;
  181. }
  182. if (d->before == 0) {
  183. ret = 1;
  184. } else if (d->before > 0) {
  185. next = list_next_entry(e, list);
  186. ret = !list_is_last(&e->list, &map->members) &&
  187. next->id == d->refid;
  188. } else {
  189. ret = prev && prev->id == d->refid;
  190. }
  191. return ret;
  192. }
  193. return 0;
  194. }
  195. static void
  196. list_set_init_extensions(struct ip_set *set, const struct ip_set_ext *ext,
  197. struct set_elem *e)
  198. {
  199. if (SET_WITH_COUNTER(set))
  200. ip_set_init_counter(ext_counter(e, set), ext);
  201. if (SET_WITH_COMMENT(set))
  202. ip_set_init_comment(ext_comment(e, set), ext);
  203. if (SET_WITH_SKBINFO(set))
  204. ip_set_init_skbinfo(ext_skbinfo(e, set), ext);
  205. /* Update timeout last */
  206. if (SET_WITH_TIMEOUT(set))
  207. ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
  208. }
  209. static int
  210. list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
  211. struct ip_set_ext *mext, u32 flags)
  212. {
  213. struct list_set *map = set->data;
  214. struct set_adt_elem *d = value;
  215. struct set_elem *e, *n, *prev, *next;
  216. bool flag_exist = flags & IPSET_FLAG_EXIST;
  217. if (SET_WITH_TIMEOUT(set))
  218. set_cleanup_entries(set);
  219. /* Find where to add the new entry */
  220. n = prev = next = NULL;
  221. list_for_each_entry(e, &map->members, list) {
  222. if (SET_WITH_TIMEOUT(set) &&
  223. ip_set_timeout_expired(ext_timeout(e, set)))
  224. continue;
  225. else if (d->id == e->id)
  226. n = e;
  227. else if (d->before == 0 || e->id != d->refid)
  228. continue;
  229. else if (d->before > 0)
  230. next = e;
  231. else
  232. prev = e;
  233. }
  234. /* Re-add already existing element */
  235. if (n) {
  236. if ((d->before > 0 && !next) ||
  237. (d->before < 0 && !prev))
  238. return -IPSET_ERR_REF_EXIST;
  239. if (!flag_exist)
  240. return -IPSET_ERR_EXIST;
  241. /* Update extensions */
  242. ip_set_ext_destroy(set, n);
  243. list_set_init_extensions(set, ext, n);
  244. /* Set is already added to the list */
  245. ip_set_put_byindex(map->net, d->id);
  246. return 0;
  247. }
  248. /* Add new entry */
  249. if (d->before == 0) {
  250. /* Append */
  251. n = list_empty(&map->members) ? NULL :
  252. list_last_entry(&map->members, struct set_elem, list);
  253. } else if (d->before > 0) {
  254. /* Insert after next element */
  255. if (!list_is_last(&next->list, &map->members))
  256. n = list_next_entry(next, list);
  257. } else {
  258. /* Insert before prev element */
  259. if (prev->list.prev != &map->members)
  260. n = list_prev_entry(prev, list);
  261. }
  262. /* Can we replace a timed out entry? */
  263. if (n &&
  264. !(SET_WITH_TIMEOUT(set) &&
  265. ip_set_timeout_expired(ext_timeout(n, set))))
  266. n = NULL;
  267. e = kzalloc(set->dsize, GFP_ATOMIC);
  268. if (!e)
  269. return -ENOMEM;
  270. e->id = d->id;
  271. INIT_LIST_HEAD(&e->list);
  272. list_set_init_extensions(set, ext, e);
  273. if (n)
  274. list_set_replace(set, e, n);
  275. else if (next)
  276. list_add_tail_rcu(&e->list, &next->list);
  277. else if (prev)
  278. list_add_rcu(&e->list, &prev->list);
  279. else
  280. list_add_tail_rcu(&e->list, &map->members);
  281. return 0;
  282. }
  283. static int
  284. list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
  285. struct ip_set_ext *mext, u32 flags)
  286. {
  287. struct list_set *map = set->data;
  288. struct set_adt_elem *d = value;
  289. struct set_elem *e, *next, *prev = NULL;
  290. list_for_each_entry(e, &map->members, list) {
  291. if (SET_WITH_TIMEOUT(set) &&
  292. ip_set_timeout_expired(ext_timeout(e, set)))
  293. continue;
  294. else if (e->id != d->id) {
  295. prev = e;
  296. continue;
  297. }
  298. if (d->before > 0) {
  299. next = list_next_entry(e, list);
  300. if (list_is_last(&e->list, &map->members) ||
  301. next->id != d->refid)
  302. return -IPSET_ERR_REF_EXIST;
  303. } else if (d->before < 0) {
  304. if (!prev || prev->id != d->refid)
  305. return -IPSET_ERR_REF_EXIST;
  306. }
  307. list_set_del(set, e);
  308. return 0;
  309. }
  310. return d->before != 0 ? -IPSET_ERR_REF_EXIST : -IPSET_ERR_EXIST;
  311. }
  312. static int
  313. list_set_uadt(struct ip_set *set, struct nlattr *tb[],
  314. enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
  315. {
  316. struct list_set *map = set->data;
  317. ipset_adtfn adtfn = set->variant->adt[adt];
  318. struct set_adt_elem e = { .refid = IPSET_INVALID_ID };
  319. struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
  320. struct ip_set *s;
  321. int ret = 0;
  322. if (tb[IPSET_ATTR_LINENO])
  323. *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
  324. if (unlikely(!tb[IPSET_ATTR_NAME] ||
  325. !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
  326. return -IPSET_ERR_PROTOCOL;
  327. ret = ip_set_get_extensions(set, tb, &ext);
  328. if (ret)
  329. return ret;
  330. e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s);
  331. if (e.id == IPSET_INVALID_ID)
  332. return -IPSET_ERR_NAME;
  333. /* "Loop detection" */
  334. if (s->type->features & IPSET_TYPE_NAME) {
  335. ret = -IPSET_ERR_LOOP;
  336. goto finish;
  337. }
  338. if (tb[IPSET_ATTR_CADT_FLAGS]) {
  339. u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
  340. e.before = f & IPSET_FLAG_BEFORE;
  341. }
  342. if (e.before && !tb[IPSET_ATTR_NAMEREF]) {
  343. ret = -IPSET_ERR_BEFORE;
  344. goto finish;
  345. }
  346. if (tb[IPSET_ATTR_NAMEREF]) {
  347. e.refid = ip_set_get_byname(map->net,
  348. nla_data(tb[IPSET_ATTR_NAMEREF]),
  349. &s);
  350. if (e.refid == IPSET_INVALID_ID) {
  351. ret = -IPSET_ERR_NAMEREF;
  352. goto finish;
  353. }
  354. if (!e.before)
  355. e.before = -1;
  356. }
  357. if (adt != IPSET_TEST && SET_WITH_TIMEOUT(set))
  358. set_cleanup_entries(set);
  359. ret = adtfn(set, &e, &ext, &ext, flags);
  360. finish:
  361. if (e.refid != IPSET_INVALID_ID)
  362. ip_set_put_byindex(map->net, e.refid);
  363. if (adt != IPSET_ADD || ret)
  364. ip_set_put_byindex(map->net, e.id);
  365. return ip_set_eexist(ret, flags) ? 0 : ret;
  366. }
  367. static void
  368. list_set_flush(struct ip_set *set)
  369. {
  370. struct list_set *map = set->data;
  371. struct set_elem *e, *n;
  372. list_for_each_entry_safe(e, n, &map->members, list)
  373. list_set_del(set, e);
  374. }
  375. static void
  376. list_set_destroy(struct ip_set *set)
  377. {
  378. struct list_set *map = set->data;
  379. struct set_elem *e, *n;
  380. if (SET_WITH_TIMEOUT(set))
  381. del_timer_sync(&map->gc);
  382. list_for_each_entry_safe(e, n, &map->members, list) {
  383. list_del(&e->list);
  384. ip_set_put_byindex(map->net, e->id);
  385. ip_set_ext_destroy(set, e);
  386. kfree(e);
  387. }
  388. kfree(map);
  389. set->data = NULL;
  390. }
  391. static int
  392. list_set_head(struct ip_set *set, struct sk_buff *skb)
  393. {
  394. const struct list_set *map = set->data;
  395. struct nlattr *nested;
  396. struct set_elem *e;
  397. u32 n = 0;
  398. list_for_each_entry(e, &map->members, list)
  399. n++;
  400. nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
  401. if (!nested)
  402. goto nla_put_failure;
  403. if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
  404. nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
  405. nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
  406. htonl(sizeof(*map) + n * set->dsize)))
  407. goto nla_put_failure;
  408. if (unlikely(ip_set_put_flags(skb, set)))
  409. goto nla_put_failure;
  410. ipset_nest_end(skb, nested);
  411. return 0;
  412. nla_put_failure:
  413. return -EMSGSIZE;
  414. }
  415. static int
  416. list_set_list(const struct ip_set *set,
  417. struct sk_buff *skb, struct netlink_callback *cb)
  418. {
  419. const struct list_set *map = set->data;
  420. struct nlattr *atd, *nested;
  421. u32 i = 0, first = cb->args[IPSET_CB_ARG0];
  422. struct set_elem *e;
  423. int ret = 0;
  424. atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
  425. if (!atd)
  426. return -EMSGSIZE;
  427. list_for_each_entry(e, &map->members, list) {
  428. if (i == first)
  429. break;
  430. i++;
  431. }
  432. rcu_read_lock();
  433. list_for_each_entry_from(e, &map->members, list) {
  434. i++;
  435. if (SET_WITH_TIMEOUT(set) &&
  436. ip_set_timeout_expired(ext_timeout(e, set)))
  437. continue;
  438. nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
  439. if (!nested) {
  440. if (i == first) {
  441. nla_nest_cancel(skb, atd);
  442. ret = -EMSGSIZE;
  443. goto out;
  444. }
  445. goto nla_put_failure;
  446. }
  447. if (nla_put_string(skb, IPSET_ATTR_NAME,
  448. ip_set_name_byindex(map->net, e->id)))
  449. goto nla_put_failure;
  450. if (ip_set_put_extensions(skb, set, e, true))
  451. goto nla_put_failure;
  452. ipset_nest_end(skb, nested);
  453. }
  454. ipset_nest_end(skb, atd);
  455. /* Set listing finished */
  456. cb->args[IPSET_CB_ARG0] = 0;
  457. goto out;
  458. nla_put_failure:
  459. nla_nest_cancel(skb, nested);
  460. if (unlikely(i == first)) {
  461. cb->args[IPSET_CB_ARG0] = 0;
  462. ret = -EMSGSIZE;
  463. }
  464. cb->args[IPSET_CB_ARG0] = i - 1;
  465. ipset_nest_end(skb, atd);
  466. out:
  467. rcu_read_unlock();
  468. return ret;
  469. }
  470. static bool
  471. list_set_same_set(const struct ip_set *a, const struct ip_set *b)
  472. {
  473. const struct list_set *x = a->data;
  474. const struct list_set *y = b->data;
  475. return x->size == y->size &&
  476. a->timeout == b->timeout &&
  477. a->extensions == b->extensions;
  478. }
  479. static const struct ip_set_type_variant set_variant = {
  480. .kadt = list_set_kadt,
  481. .uadt = list_set_uadt,
  482. .adt = {
  483. [IPSET_ADD] = list_set_uadd,
  484. [IPSET_DEL] = list_set_udel,
  485. [IPSET_TEST] = list_set_utest,
  486. },
  487. .destroy = list_set_destroy,
  488. .flush = list_set_flush,
  489. .head = list_set_head,
  490. .list = list_set_list,
  491. .same_set = list_set_same_set,
  492. };
  493. static void
  494. list_set_gc(unsigned long ul_set)
  495. {
  496. struct ip_set *set = (struct ip_set *)ul_set;
  497. struct list_set *map = set->data;
  498. spin_lock_bh(&set->lock);
  499. set_cleanup_entries(set);
  500. spin_unlock_bh(&set->lock);
  501. map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
  502. add_timer(&map->gc);
  503. }
  504. static void
  505. list_set_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
  506. {
  507. struct list_set *map = set->data;
  508. init_timer(&map->gc);
  509. map->gc.data = (unsigned long)set;
  510. map->gc.function = gc;
  511. map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
  512. add_timer(&map->gc);
  513. }
  514. /* Create list:set type of sets */
  515. static bool
  516. init_list_set(struct net *net, struct ip_set *set, u32 size)
  517. {
  518. struct list_set *map;
  519. map = kzalloc(sizeof(*map), GFP_KERNEL);
  520. if (!map)
  521. return false;
  522. map->size = size;
  523. map->net = net;
  524. INIT_LIST_HEAD(&map->members);
  525. set->data = map;
  526. return true;
  527. }
  528. static int
  529. list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
  530. u32 flags)
  531. {
  532. u32 size = IP_SET_LIST_DEFAULT_SIZE;
  533. if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
  534. !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
  535. !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
  536. return -IPSET_ERR_PROTOCOL;
  537. if (tb[IPSET_ATTR_SIZE])
  538. size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]);
  539. if (size < IP_SET_LIST_MIN_SIZE)
  540. size = IP_SET_LIST_MIN_SIZE;
  541. set->variant = &set_variant;
  542. set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
  543. __alignof__(struct set_elem));
  544. if (!init_list_set(net, set, size))
  545. return -ENOMEM;
  546. if (tb[IPSET_ATTR_TIMEOUT]) {
  547. set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
  548. list_set_gc_init(set, list_set_gc);
  549. }
  550. return 0;
  551. }
  552. static struct ip_set_type list_set_type __read_mostly = {
  553. .name = "list:set",
  554. .protocol = IPSET_PROTOCOL,
  555. .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
  556. .dimension = IPSET_DIM_ONE,
  557. .family = NFPROTO_UNSPEC,
  558. .revision_min = IPSET_TYPE_REV_MIN,
  559. .revision_max = IPSET_TYPE_REV_MAX,
  560. .create = list_set_create,
  561. .create_policy = {
  562. [IPSET_ATTR_SIZE] = { .type = NLA_U32 },
  563. [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
  564. [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
  565. },
  566. .adt_policy = {
  567. [IPSET_ATTR_NAME] = { .type = NLA_STRING,
  568. .len = IPSET_MAXNAMELEN },
  569. [IPSET_ATTR_NAMEREF] = { .type = NLA_STRING,
  570. .len = IPSET_MAXNAMELEN },
  571. [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
  572. [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
  573. [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
  574. [IPSET_ATTR_BYTES] = { .type = NLA_U64 },
  575. [IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
  576. [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
  577. .len = IPSET_MAX_COMMENT_SIZE },
  578. [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
  579. [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
  580. [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
  581. },
  582. .me = THIS_MODULE,
  583. };
  584. static int __init
  585. list_set_init(void)
  586. {
  587. return ip_set_type_register(&list_set_type);
  588. }
  589. static void __exit
  590. list_set_fini(void)
  591. {
  592. rcu_barrier();
  593. ip_set_type_unregister(&list_set_type);
  594. }
  595. module_init(list_set_init);
  596. module_exit(list_set_fini);