x_tables.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729
  1. /*
  2. * x_tables core - Backend for {ip,ip6,arp}_tables
  3. *
  4. * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
  5. * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
  6. *
  7. * Based on existing ip_tables code which is
  8. * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
  9. * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. *
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/socket.h>
  20. #include <linux/net.h>
  21. #include <linux/proc_fs.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/string.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/mutex.h>
  26. #include <linux/mm.h>
  27. #include <linux/slab.h>
  28. #include <linux/audit.h>
  29. #include <net/net_namespace.h>
  30. #include <linux/netfilter/x_tables.h>
  31. #include <linux/netfilter_arp.h>
  32. #include <linux/netfilter_ipv4/ip_tables.h>
  33. #include <linux/netfilter_ipv6/ip6_tables.h>
  34. #include <linux/netfilter_arp/arp_tables.h>
  35. MODULE_LICENSE("GPL");
  36. MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
  37. MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
  38. #define XT_PCPU_BLOCK_SIZE 4096
  39. struct compat_delta {
  40. unsigned int offset; /* offset in kernel */
  41. int delta; /* delta in 32bit user land */
  42. };
  43. struct xt_af {
  44. struct mutex mutex;
  45. struct list_head match;
  46. struct list_head target;
  47. #ifdef CONFIG_COMPAT
  48. struct mutex compat_mutex;
  49. struct compat_delta *compat_tab;
  50. unsigned int number; /* number of slots in compat_tab[] */
  51. unsigned int cur; /* number of used slots in compat_tab[] */
  52. #endif
  53. };
  54. static struct xt_af *xt;
  55. static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
  56. [NFPROTO_UNSPEC] = "x",
  57. [NFPROTO_IPV4] = "ip",
  58. [NFPROTO_ARP] = "arp",
  59. [NFPROTO_BRIDGE] = "eb",
  60. [NFPROTO_IPV6] = "ip6",
  61. };
  62. /* Registration hooks for targets. */
  63. int xt_register_target(struct xt_target *target)
  64. {
  65. u_int8_t af = target->family;
  66. mutex_lock(&xt[af].mutex);
  67. list_add(&target->list, &xt[af].target);
  68. mutex_unlock(&xt[af].mutex);
  69. return 0;
  70. }
  71. EXPORT_SYMBOL(xt_register_target);
  72. void
  73. xt_unregister_target(struct xt_target *target)
  74. {
  75. u_int8_t af = target->family;
  76. mutex_lock(&xt[af].mutex);
  77. list_del(&target->list);
  78. mutex_unlock(&xt[af].mutex);
  79. }
  80. EXPORT_SYMBOL(xt_unregister_target);
  81. int
  82. xt_register_targets(struct xt_target *target, unsigned int n)
  83. {
  84. unsigned int i;
  85. int err = 0;
  86. for (i = 0; i < n; i++) {
  87. err = xt_register_target(&target[i]);
  88. if (err)
  89. goto err;
  90. }
  91. return err;
  92. err:
  93. if (i > 0)
  94. xt_unregister_targets(target, i);
  95. return err;
  96. }
  97. EXPORT_SYMBOL(xt_register_targets);
  98. void
  99. xt_unregister_targets(struct xt_target *target, unsigned int n)
  100. {
  101. while (n-- > 0)
  102. xt_unregister_target(&target[n]);
  103. }
  104. EXPORT_SYMBOL(xt_unregister_targets);
  105. int xt_register_match(struct xt_match *match)
  106. {
  107. u_int8_t af = match->family;
  108. mutex_lock(&xt[af].mutex);
  109. list_add(&match->list, &xt[af].match);
  110. mutex_unlock(&xt[af].mutex);
  111. return 0;
  112. }
  113. EXPORT_SYMBOL(xt_register_match);
  114. void
  115. xt_unregister_match(struct xt_match *match)
  116. {
  117. u_int8_t af = match->family;
  118. mutex_lock(&xt[af].mutex);
  119. list_del(&match->list);
  120. mutex_unlock(&xt[af].mutex);
  121. }
  122. EXPORT_SYMBOL(xt_unregister_match);
  123. int
  124. xt_register_matches(struct xt_match *match, unsigned int n)
  125. {
  126. unsigned int i;
  127. int err = 0;
  128. for (i = 0; i < n; i++) {
  129. err = xt_register_match(&match[i]);
  130. if (err)
  131. goto err;
  132. }
  133. return err;
  134. err:
  135. if (i > 0)
  136. xt_unregister_matches(match, i);
  137. return err;
  138. }
  139. EXPORT_SYMBOL(xt_register_matches);
  140. void
  141. xt_unregister_matches(struct xt_match *match, unsigned int n)
  142. {
  143. while (n-- > 0)
  144. xt_unregister_match(&match[n]);
  145. }
  146. EXPORT_SYMBOL(xt_unregister_matches);
  147. /*
  148. * These are weird, but module loading must not be done with mutex
  149. * held (since they will register), and we have to have a single
  150. * function to use.
  151. */
  152. /* Find match, grabs ref. Returns ERR_PTR() on error. */
  153. struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
  154. {
  155. struct xt_match *m;
  156. int err = -ENOENT;
  157. mutex_lock(&xt[af].mutex);
  158. list_for_each_entry(m, &xt[af].match, list) {
  159. if (strcmp(m->name, name) == 0) {
  160. if (m->revision == revision) {
  161. if (try_module_get(m->me)) {
  162. mutex_unlock(&xt[af].mutex);
  163. return m;
  164. }
  165. } else
  166. err = -EPROTOTYPE; /* Found something. */
  167. }
  168. }
  169. mutex_unlock(&xt[af].mutex);
  170. if (af != NFPROTO_UNSPEC)
  171. /* Try searching again in the family-independent list */
  172. return xt_find_match(NFPROTO_UNSPEC, name, revision);
  173. return ERR_PTR(err);
  174. }
  175. EXPORT_SYMBOL(xt_find_match);
  176. struct xt_match *
  177. xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
  178. {
  179. struct xt_match *match;
  180. if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
  181. return ERR_PTR(-EINVAL);
  182. match = xt_find_match(nfproto, name, revision);
  183. if (IS_ERR(match)) {
  184. request_module("%st_%s", xt_prefix[nfproto], name);
  185. match = xt_find_match(nfproto, name, revision);
  186. }
  187. return match;
  188. }
  189. EXPORT_SYMBOL_GPL(xt_request_find_match);
  190. /* Find target, grabs ref. Returns ERR_PTR() on error. */
  191. struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
  192. {
  193. struct xt_target *t;
  194. int err = -ENOENT;
  195. mutex_lock(&xt[af].mutex);
  196. list_for_each_entry(t, &xt[af].target, list) {
  197. if (strcmp(t->name, name) == 0) {
  198. if (t->revision == revision) {
  199. if (try_module_get(t->me)) {
  200. mutex_unlock(&xt[af].mutex);
  201. return t;
  202. }
  203. } else
  204. err = -EPROTOTYPE; /* Found something. */
  205. }
  206. }
  207. mutex_unlock(&xt[af].mutex);
  208. if (af != NFPROTO_UNSPEC)
  209. /* Try searching again in the family-independent list */
  210. return xt_find_target(NFPROTO_UNSPEC, name, revision);
  211. return ERR_PTR(err);
  212. }
  213. EXPORT_SYMBOL(xt_find_target);
  214. struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
  215. {
  216. struct xt_target *target;
  217. if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
  218. return ERR_PTR(-EINVAL);
  219. target = xt_find_target(af, name, revision);
  220. if (IS_ERR(target)) {
  221. request_module("%st_%s", xt_prefix[af], name);
  222. target = xt_find_target(af, name, revision);
  223. }
  224. return target;
  225. }
  226. EXPORT_SYMBOL_GPL(xt_request_find_target);
  227. static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
  228. {
  229. const struct xt_match *m;
  230. int have_rev = 0;
  231. list_for_each_entry(m, &xt[af].match, list) {
  232. if (strcmp(m->name, name) == 0) {
  233. if (m->revision > *bestp)
  234. *bestp = m->revision;
  235. if (m->revision == revision)
  236. have_rev = 1;
  237. }
  238. }
  239. if (af != NFPROTO_UNSPEC && !have_rev)
  240. return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
  241. return have_rev;
  242. }
  243. static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
  244. {
  245. const struct xt_target *t;
  246. int have_rev = 0;
  247. list_for_each_entry(t, &xt[af].target, list) {
  248. if (strcmp(t->name, name) == 0) {
  249. if (t->revision > *bestp)
  250. *bestp = t->revision;
  251. if (t->revision == revision)
  252. have_rev = 1;
  253. }
  254. }
  255. if (af != NFPROTO_UNSPEC && !have_rev)
  256. return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
  257. return have_rev;
  258. }
  259. /* Returns true or false (if no such extension at all) */
  260. int xt_find_revision(u8 af, const char *name, u8 revision, int target,
  261. int *err)
  262. {
  263. int have_rev, best = -1;
  264. mutex_lock(&xt[af].mutex);
  265. if (target == 1)
  266. have_rev = target_revfn(af, name, revision, &best);
  267. else
  268. have_rev = match_revfn(af, name, revision, &best);
  269. mutex_unlock(&xt[af].mutex);
  270. /* Nothing at all? Return 0 to try loading module. */
  271. if (best == -1) {
  272. *err = -ENOENT;
  273. return 0;
  274. }
  275. *err = best;
  276. if (!have_rev)
  277. *err = -EPROTONOSUPPORT;
  278. return 1;
  279. }
  280. EXPORT_SYMBOL_GPL(xt_find_revision);
  281. static char *
  282. textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
  283. {
  284. static const char *const inetbr_names[] = {
  285. "PREROUTING", "INPUT", "FORWARD",
  286. "OUTPUT", "POSTROUTING", "BROUTING",
  287. };
  288. static const char *const arp_names[] = {
  289. "INPUT", "FORWARD", "OUTPUT",
  290. };
  291. const char *const *names;
  292. unsigned int i, max;
  293. char *p = buf;
  294. bool np = false;
  295. int res;
  296. names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
  297. max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
  298. ARRAY_SIZE(inetbr_names);
  299. *p = '\0';
  300. for (i = 0; i < max; ++i) {
  301. if (!(mask & (1 << i)))
  302. continue;
  303. res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
  304. if (res > 0) {
  305. size -= res;
  306. p += res;
  307. }
  308. np = true;
  309. }
  310. return buf;
  311. }
  312. /**
  313. * xt_check_proc_name - check that name is suitable for /proc file creation
  314. *
  315. * @name: file name candidate
  316. * @size: length of buffer
  317. *
  318. * some x_tables modules wish to create a file in /proc.
  319. * This function makes sure that the name is suitable for this
  320. * purpose, it checks that name is NUL terminated and isn't a 'special'
  321. * name, like "..".
  322. *
  323. * returns negative number on error or 0 if name is useable.
  324. */
  325. int xt_check_proc_name(const char *name, unsigned int size)
  326. {
  327. if (name[0] == '\0')
  328. return -EINVAL;
  329. if (strnlen(name, size) == size)
  330. return -ENAMETOOLONG;
  331. if (strcmp(name, ".") == 0 ||
  332. strcmp(name, "..") == 0 ||
  333. strchr(name, '/'))
  334. return -EINVAL;
  335. return 0;
  336. }
  337. EXPORT_SYMBOL(xt_check_proc_name);
  338. int xt_check_match(struct xt_mtchk_param *par,
  339. unsigned int size, u_int8_t proto, bool inv_proto)
  340. {
  341. int ret;
  342. if (XT_ALIGN(par->match->matchsize) != size &&
  343. par->match->matchsize != -1) {
  344. /*
  345. * ebt_among is exempt from centralized matchsize checking
  346. * because it uses a dynamic-size data set.
  347. */
  348. pr_err("%s_tables: %s.%u match: invalid size "
  349. "%u (kernel) != (user) %u\n",
  350. xt_prefix[par->family], par->match->name,
  351. par->match->revision,
  352. XT_ALIGN(par->match->matchsize), size);
  353. return -EINVAL;
  354. }
  355. if (par->match->table != NULL &&
  356. strcmp(par->match->table, par->table) != 0) {
  357. pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
  358. xt_prefix[par->family], par->match->name,
  359. par->match->table, par->table);
  360. return -EINVAL;
  361. }
  362. if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
  363. char used[64], allow[64];
  364. pr_err("%s_tables: %s match: used from hooks %s, but only "
  365. "valid from %s\n",
  366. xt_prefix[par->family], par->match->name,
  367. textify_hooks(used, sizeof(used), par->hook_mask,
  368. par->family),
  369. textify_hooks(allow, sizeof(allow), par->match->hooks,
  370. par->family));
  371. return -EINVAL;
  372. }
  373. if (par->match->proto && (par->match->proto != proto || inv_proto)) {
  374. pr_err("%s_tables: %s match: only valid for protocol %u\n",
  375. xt_prefix[par->family], par->match->name,
  376. par->match->proto);
  377. return -EINVAL;
  378. }
  379. if (par->match->checkentry != NULL) {
  380. ret = par->match->checkentry(par);
  381. if (ret < 0)
  382. return ret;
  383. else if (ret > 0)
  384. /* Flag up potential errors. */
  385. return -EIO;
  386. }
  387. return 0;
  388. }
  389. EXPORT_SYMBOL_GPL(xt_check_match);
  390. /** xt_check_entry_match - check that matches end before start of target
  391. *
  392. * @match: beginning of xt_entry_match
  393. * @target: beginning of this rules target (alleged end of matches)
  394. * @alignment: alignment requirement of match structures
  395. *
  396. * Validates that all matches add up to the beginning of the target,
  397. * and that each match covers at least the base structure size.
  398. *
  399. * Return: 0 on success, negative errno on failure.
  400. */
  401. static int xt_check_entry_match(const char *match, const char *target,
  402. const size_t alignment)
  403. {
  404. const struct xt_entry_match *pos;
  405. int length = target - match;
  406. if (length == 0) /* no matches */
  407. return 0;
  408. pos = (struct xt_entry_match *)match;
  409. do {
  410. if ((unsigned long)pos % alignment)
  411. return -EINVAL;
  412. if (length < (int)sizeof(struct xt_entry_match))
  413. return -EINVAL;
  414. if (pos->u.match_size < sizeof(struct xt_entry_match))
  415. return -EINVAL;
  416. if (pos->u.match_size > length)
  417. return -EINVAL;
  418. length -= pos->u.match_size;
  419. pos = ((void *)((char *)(pos) + (pos)->u.match_size));
  420. } while (length > 0);
  421. return 0;
  422. }
  423. #ifdef CONFIG_COMPAT
  424. int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
  425. {
  426. struct xt_af *xp = &xt[af];
  427. if (!xp->compat_tab) {
  428. if (!xp->number)
  429. return -EINVAL;
  430. xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
  431. if (!xp->compat_tab)
  432. return -ENOMEM;
  433. xp->cur = 0;
  434. }
  435. if (xp->cur >= xp->number)
  436. return -EINVAL;
  437. if (xp->cur)
  438. delta += xp->compat_tab[xp->cur - 1].delta;
  439. xp->compat_tab[xp->cur].offset = offset;
  440. xp->compat_tab[xp->cur].delta = delta;
  441. xp->cur++;
  442. return 0;
  443. }
  444. EXPORT_SYMBOL_GPL(xt_compat_add_offset);
  445. void xt_compat_flush_offsets(u_int8_t af)
  446. {
  447. if (xt[af].compat_tab) {
  448. vfree(xt[af].compat_tab);
  449. xt[af].compat_tab = NULL;
  450. xt[af].number = 0;
  451. xt[af].cur = 0;
  452. }
  453. }
  454. EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
  455. int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
  456. {
  457. struct compat_delta *tmp = xt[af].compat_tab;
  458. int mid, left = 0, right = xt[af].cur - 1;
  459. while (left <= right) {
  460. mid = (left + right) >> 1;
  461. if (offset > tmp[mid].offset)
  462. left = mid + 1;
  463. else if (offset < tmp[mid].offset)
  464. right = mid - 1;
  465. else
  466. return mid ? tmp[mid - 1].delta : 0;
  467. }
  468. return left ? tmp[left - 1].delta : 0;
  469. }
  470. EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
  471. void xt_compat_init_offsets(u_int8_t af, unsigned int number)
  472. {
  473. xt[af].number = number;
  474. xt[af].cur = 0;
  475. }
  476. EXPORT_SYMBOL(xt_compat_init_offsets);
  477. int xt_compat_match_offset(const struct xt_match *match)
  478. {
  479. u_int16_t csize = match->compatsize ? : match->matchsize;
  480. return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
  481. }
  482. EXPORT_SYMBOL_GPL(xt_compat_match_offset);
  483. void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
  484. unsigned int *size)
  485. {
  486. const struct xt_match *match = m->u.kernel.match;
  487. struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
  488. int pad, off = xt_compat_match_offset(match);
  489. u_int16_t msize = cm->u.user.match_size;
  490. char name[sizeof(m->u.user.name)];
  491. m = *dstptr;
  492. memcpy(m, cm, sizeof(*cm));
  493. if (match->compat_from_user)
  494. match->compat_from_user(m->data, cm->data);
  495. else
  496. memcpy(m->data, cm->data, msize - sizeof(*cm));
  497. pad = XT_ALIGN(match->matchsize) - match->matchsize;
  498. if (pad > 0)
  499. memset(m->data + match->matchsize, 0, pad);
  500. msize += off;
  501. m->u.user.match_size = msize;
  502. strlcpy(name, match->name, sizeof(name));
  503. module_put(match->me);
  504. strncpy(m->u.user.name, name, sizeof(m->u.user.name));
  505. *size += off;
  506. *dstptr += msize;
  507. }
  508. EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
  509. int xt_compat_match_to_user(const struct xt_entry_match *m,
  510. void __user **dstptr, unsigned int *size)
  511. {
  512. const struct xt_match *match = m->u.kernel.match;
  513. struct compat_xt_entry_match __user *cm = *dstptr;
  514. int off = xt_compat_match_offset(match);
  515. u_int16_t msize = m->u.user.match_size - off;
  516. if (copy_to_user(cm, m, sizeof(*cm)) ||
  517. put_user(msize, &cm->u.user.match_size) ||
  518. copy_to_user(cm->u.user.name, m->u.kernel.match->name,
  519. strlen(m->u.kernel.match->name) + 1))
  520. return -EFAULT;
  521. if (match->compat_to_user) {
  522. if (match->compat_to_user((void __user *)cm->data, m->data))
  523. return -EFAULT;
  524. } else {
  525. if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
  526. return -EFAULT;
  527. }
  528. *size -= off;
  529. *dstptr += msize;
  530. return 0;
  531. }
  532. EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
  533. /* non-compat version may have padding after verdict */
  534. struct compat_xt_standard_target {
  535. struct compat_xt_entry_target t;
  536. compat_uint_t verdict;
  537. };
  538. int xt_compat_check_entry_offsets(const void *base, const char *elems,
  539. unsigned int target_offset,
  540. unsigned int next_offset)
  541. {
  542. long size_of_base_struct = elems - (const char *)base;
  543. const struct compat_xt_entry_target *t;
  544. const char *e = base;
  545. if (target_offset < size_of_base_struct)
  546. return -EINVAL;
  547. if (target_offset + sizeof(*t) > next_offset)
  548. return -EINVAL;
  549. t = (void *)(e + target_offset);
  550. if (t->u.target_size < sizeof(*t))
  551. return -EINVAL;
  552. if (target_offset + t->u.target_size > next_offset)
  553. return -EINVAL;
  554. if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
  555. COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
  556. return -EINVAL;
  557. /* compat_xt_entry match has less strict aligment requirements,
  558. * otherwise they are identical. In case of padding differences
  559. * we need to add compat version of xt_check_entry_match.
  560. */
  561. BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
  562. return xt_check_entry_match(elems, base + target_offset,
  563. __alignof__(struct compat_xt_entry_match));
  564. }
  565. EXPORT_SYMBOL(xt_compat_check_entry_offsets);
  566. #endif /* CONFIG_COMPAT */
  567. /**
  568. * xt_check_entry_offsets - validate arp/ip/ip6t_entry
  569. *
  570. * @base: pointer to arp/ip/ip6t_entry
  571. * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
  572. * @target_offset: the arp/ip/ip6_t->target_offset
  573. * @next_offset: the arp/ip/ip6_t->next_offset
  574. *
  575. * validates that target_offset and next_offset are sane and that all
  576. * match sizes (if any) align with the target offset.
  577. *
  578. * This function does not validate the targets or matches themselves, it
  579. * only tests that all the offsets and sizes are correct, that all
  580. * match structures are aligned, and that the last structure ends where
  581. * the target structure begins.
  582. *
  583. * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
  584. *
  585. * The arp/ip/ip6t_entry structure @base must have passed following tests:
  586. * - it must point to a valid memory location
  587. * - base to base + next_offset must be accessible, i.e. not exceed allocated
  588. * length.
  589. *
  590. * A well-formed entry looks like this:
  591. *
  592. * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry
  593. * e->elems[]-----' | |
  594. * matchsize | |
  595. * matchsize | |
  596. * | |
  597. * target_offset---------------------------------' |
  598. * next_offset---------------------------------------------------'
  599. *
  600. * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
  601. * This is where matches (if any) and the target reside.
  602. * target_offset: beginning of target.
  603. * next_offset: start of the next rule; also: size of this rule.
  604. * Since targets have a minimum size, target_offset + minlen <= next_offset.
  605. *
  606. * Every match stores its size, sum of sizes must not exceed target_offset.
  607. *
  608. * Return: 0 on success, negative errno on failure.
  609. */
  610. int xt_check_entry_offsets(const void *base,
  611. const char *elems,
  612. unsigned int target_offset,
  613. unsigned int next_offset)
  614. {
  615. long size_of_base_struct = elems - (const char *)base;
  616. const struct xt_entry_target *t;
  617. const char *e = base;
  618. /* target start is within the ip/ip6/arpt_entry struct */
  619. if (target_offset < size_of_base_struct)
  620. return -EINVAL;
  621. if (target_offset + sizeof(*t) > next_offset)
  622. return -EINVAL;
  623. t = (void *)(e + target_offset);
  624. if (t->u.target_size < sizeof(*t))
  625. return -EINVAL;
  626. if (target_offset + t->u.target_size > next_offset)
  627. return -EINVAL;
  628. if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
  629. XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
  630. return -EINVAL;
  631. return xt_check_entry_match(elems, base + target_offset,
  632. __alignof__(struct xt_entry_match));
  633. }
  634. EXPORT_SYMBOL(xt_check_entry_offsets);
  635. /**
  636. * xt_alloc_entry_offsets - allocate array to store rule head offsets
  637. *
  638. * @size: number of entries
  639. *
  640. * Return: NULL or kmalloc'd or vmalloc'd array
  641. */
  642. unsigned int *xt_alloc_entry_offsets(unsigned int size)
  643. {
  644. unsigned int *off;
  645. off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN);
  646. if (off)
  647. return off;
  648. if (size < (SIZE_MAX / sizeof(unsigned int)))
  649. off = vmalloc(size * sizeof(unsigned int));
  650. return off;
  651. }
  652. EXPORT_SYMBOL(xt_alloc_entry_offsets);
  653. /**
  654. * xt_find_jump_offset - check if target is a valid jump offset
  655. *
  656. * @offsets: array containing all valid rule start offsets of a rule blob
  657. * @target: the jump target to search for
  658. * @size: entries in @offset
  659. */
  660. bool xt_find_jump_offset(const unsigned int *offsets,
  661. unsigned int target, unsigned int size)
  662. {
  663. int m, low = 0, hi = size;
  664. while (hi > low) {
  665. m = (low + hi) / 2u;
  666. if (offsets[m] > target)
  667. hi = m;
  668. else if (offsets[m] < target)
  669. low = m + 1;
  670. else
  671. return true;
  672. }
  673. return false;
  674. }
  675. EXPORT_SYMBOL(xt_find_jump_offset);
  676. int xt_check_target(struct xt_tgchk_param *par,
  677. unsigned int size, u_int8_t proto, bool inv_proto)
  678. {
  679. int ret;
  680. if (XT_ALIGN(par->target->targetsize) != size) {
  681. pr_err("%s_tables: %s.%u target: invalid size "
  682. "%u (kernel) != (user) %u\n",
  683. xt_prefix[par->family], par->target->name,
  684. par->target->revision,
  685. XT_ALIGN(par->target->targetsize), size);
  686. return -EINVAL;
  687. }
  688. if (par->target->table != NULL &&
  689. strcmp(par->target->table, par->table) != 0) {
  690. pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
  691. xt_prefix[par->family], par->target->name,
  692. par->target->table, par->table);
  693. return -EINVAL;
  694. }
  695. if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
  696. char used[64], allow[64];
  697. pr_err("%s_tables: %s target: used from hooks %s, but only "
  698. "usable from %s\n",
  699. xt_prefix[par->family], par->target->name,
  700. textify_hooks(used, sizeof(used), par->hook_mask,
  701. par->family),
  702. textify_hooks(allow, sizeof(allow), par->target->hooks,
  703. par->family));
  704. return -EINVAL;
  705. }
  706. if (par->target->proto && (par->target->proto != proto || inv_proto)) {
  707. pr_err("%s_tables: %s target: only valid for protocol %u\n",
  708. xt_prefix[par->family], par->target->name,
  709. par->target->proto);
  710. return -EINVAL;
  711. }
  712. if (par->target->checkentry != NULL) {
  713. ret = par->target->checkentry(par);
  714. if (ret < 0)
  715. return ret;
  716. else if (ret > 0)
  717. /* Flag up potential errors. */
  718. return -EIO;
  719. }
  720. return 0;
  721. }
  722. EXPORT_SYMBOL_GPL(xt_check_target);
  723. /**
  724. * xt_copy_counters_from_user - copy counters and metadata from userspace
  725. *
  726. * @user: src pointer to userspace memory
  727. * @len: alleged size of userspace memory
  728. * @info: where to store the xt_counters_info metadata
  729. * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
  730. *
  731. * Copies counter meta data from @user and stores it in @info.
  732. *
  733. * vmallocs memory to hold the counters, then copies the counter data
  734. * from @user to the new memory and returns a pointer to it.
  735. *
  736. * If @compat is true, @info gets converted automatically to the 64bit
  737. * representation.
  738. *
  739. * The metadata associated with the counters is stored in @info.
  740. *
  741. * Return: returns pointer that caller has to test via IS_ERR().
  742. * If IS_ERR is false, caller has to vfree the pointer.
  743. */
  744. void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
  745. struct xt_counters_info *info, bool compat)
  746. {
  747. void *mem;
  748. u64 size;
  749. #ifdef CONFIG_COMPAT
  750. if (compat) {
  751. /* structures only differ in size due to alignment */
  752. struct compat_xt_counters_info compat_tmp;
  753. if (len <= sizeof(compat_tmp))
  754. return ERR_PTR(-EINVAL);
  755. len -= sizeof(compat_tmp);
  756. if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
  757. return ERR_PTR(-EFAULT);
  758. memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
  759. info->num_counters = compat_tmp.num_counters;
  760. user += sizeof(compat_tmp);
  761. } else
  762. #endif
  763. {
  764. if (len <= sizeof(*info))
  765. return ERR_PTR(-EINVAL);
  766. len -= sizeof(*info);
  767. if (copy_from_user(info, user, sizeof(*info)) != 0)
  768. return ERR_PTR(-EFAULT);
  769. user += sizeof(*info);
  770. }
  771. info->name[sizeof(info->name) - 1] = '\0';
  772. size = sizeof(struct xt_counters);
  773. size *= info->num_counters;
  774. if (size != (u64)len)
  775. return ERR_PTR(-EINVAL);
  776. mem = vmalloc(len);
  777. if (!mem)
  778. return ERR_PTR(-ENOMEM);
  779. if (copy_from_user(mem, user, len) == 0)
  780. return mem;
  781. vfree(mem);
  782. return ERR_PTR(-EFAULT);
  783. }
  784. EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
  785. #ifdef CONFIG_COMPAT
  786. int xt_compat_target_offset(const struct xt_target *target)
  787. {
  788. u_int16_t csize = target->compatsize ? : target->targetsize;
  789. return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
  790. }
  791. EXPORT_SYMBOL_GPL(xt_compat_target_offset);
  792. void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
  793. unsigned int *size)
  794. {
  795. const struct xt_target *target = t->u.kernel.target;
  796. struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
  797. int pad, off = xt_compat_target_offset(target);
  798. u_int16_t tsize = ct->u.user.target_size;
  799. char name[sizeof(t->u.user.name)];
  800. t = *dstptr;
  801. memcpy(t, ct, sizeof(*ct));
  802. if (target->compat_from_user)
  803. target->compat_from_user(t->data, ct->data);
  804. else
  805. memcpy(t->data, ct->data, tsize - sizeof(*ct));
  806. pad = XT_ALIGN(target->targetsize) - target->targetsize;
  807. if (pad > 0)
  808. memset(t->data + target->targetsize, 0, pad);
  809. tsize += off;
  810. t->u.user.target_size = tsize;
  811. strlcpy(name, target->name, sizeof(name));
  812. module_put(target->me);
  813. strncpy(t->u.user.name, name, sizeof(t->u.user.name));
  814. *size += off;
  815. *dstptr += tsize;
  816. }
  817. EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
  818. int xt_compat_target_to_user(const struct xt_entry_target *t,
  819. void __user **dstptr, unsigned int *size)
  820. {
  821. const struct xt_target *target = t->u.kernel.target;
  822. struct compat_xt_entry_target __user *ct = *dstptr;
  823. int off = xt_compat_target_offset(target);
  824. u_int16_t tsize = t->u.user.target_size - off;
  825. if (copy_to_user(ct, t, sizeof(*ct)) ||
  826. put_user(tsize, &ct->u.user.target_size) ||
  827. copy_to_user(ct->u.user.name, t->u.kernel.target->name,
  828. strlen(t->u.kernel.target->name) + 1))
  829. return -EFAULT;
  830. if (target->compat_to_user) {
  831. if (target->compat_to_user((void __user *)ct->data, t->data))
  832. return -EFAULT;
  833. } else {
  834. if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
  835. return -EFAULT;
  836. }
  837. *size -= off;
  838. *dstptr += tsize;
  839. return 0;
  840. }
  841. EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
  842. #endif
  843. struct xt_table_info *xt_alloc_table_info(unsigned int size)
  844. {
  845. struct xt_table_info *info = NULL;
  846. size_t sz = sizeof(*info) + size;
  847. if (sz < sizeof(*info))
  848. return NULL;
  849. if (sz < sizeof(*info))
  850. return NULL;
  851. /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
  852. if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
  853. return NULL;
  854. if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
  855. info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
  856. if (!info) {
  857. info = vmalloc(sz);
  858. if (!info)
  859. return NULL;
  860. }
  861. memset(info, 0, sizeof(*info));
  862. info->size = size;
  863. return info;
  864. }
  865. EXPORT_SYMBOL(xt_alloc_table_info);
  866. void xt_free_table_info(struct xt_table_info *info)
  867. {
  868. int cpu;
  869. if (info->jumpstack != NULL) {
  870. for_each_possible_cpu(cpu)
  871. kvfree(info->jumpstack[cpu]);
  872. kvfree(info->jumpstack);
  873. }
  874. kvfree(info);
  875. }
  876. EXPORT_SYMBOL(xt_free_table_info);
  877. /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
  878. struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
  879. const char *name)
  880. {
  881. struct xt_table *t;
  882. mutex_lock(&xt[af].mutex);
  883. list_for_each_entry(t, &net->xt.tables[af], list)
  884. if (strcmp(t->name, name) == 0 && try_module_get(t->me))
  885. return t;
  886. mutex_unlock(&xt[af].mutex);
  887. return NULL;
  888. }
  889. EXPORT_SYMBOL_GPL(xt_find_table_lock);
  890. void xt_table_unlock(struct xt_table *table)
  891. {
  892. mutex_unlock(&xt[table->af].mutex);
  893. }
  894. EXPORT_SYMBOL_GPL(xt_table_unlock);
  895. #ifdef CONFIG_COMPAT
  896. void xt_compat_lock(u_int8_t af)
  897. {
  898. mutex_lock(&xt[af].compat_mutex);
  899. }
  900. EXPORT_SYMBOL_GPL(xt_compat_lock);
  901. void xt_compat_unlock(u_int8_t af)
  902. {
  903. mutex_unlock(&xt[af].compat_mutex);
  904. }
  905. EXPORT_SYMBOL_GPL(xt_compat_unlock);
  906. #endif
  907. DEFINE_PER_CPU(seqcount_t, xt_recseq);
  908. EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
  909. struct static_key xt_tee_enabled __read_mostly;
  910. EXPORT_SYMBOL_GPL(xt_tee_enabled);
  911. static int xt_jumpstack_alloc(struct xt_table_info *i)
  912. {
  913. unsigned int size;
  914. int cpu;
  915. size = sizeof(void **) * nr_cpu_ids;
  916. if (size > PAGE_SIZE)
  917. i->jumpstack = vzalloc(size);
  918. else
  919. i->jumpstack = kzalloc(size, GFP_KERNEL);
  920. if (i->jumpstack == NULL)
  921. return -ENOMEM;
  922. /* ruleset without jumps -- no stack needed */
  923. if (i->stacksize == 0)
  924. return 0;
  925. /* Jumpstack needs to be able to record two full callchains, one
  926. * from the first rule set traversal, plus one table reentrancy
  927. * via -j TEE without clobbering the callchain that brought us to
  928. * TEE target.
  929. *
  930. * This is done by allocating two jumpstacks per cpu, on reentry
  931. * the upper half of the stack is used.
  932. *
  933. * see the jumpstack setup in ipt_do_table() for more details.
  934. */
  935. size = sizeof(void *) * i->stacksize * 2u;
  936. for_each_possible_cpu(cpu) {
  937. if (size > PAGE_SIZE)
  938. i->jumpstack[cpu] = vmalloc_node(size,
  939. cpu_to_node(cpu));
  940. else
  941. i->jumpstack[cpu] = kmalloc_node(size,
  942. GFP_KERNEL, cpu_to_node(cpu));
  943. if (i->jumpstack[cpu] == NULL)
  944. /*
  945. * Freeing will be done later on by the callers. The
  946. * chain is: xt_replace_table -> __do_replace ->
  947. * do_replace -> xt_free_table_info.
  948. */
  949. return -ENOMEM;
  950. }
  951. return 0;
  952. }
  953. struct xt_table_info *
  954. xt_replace_table(struct xt_table *table,
  955. unsigned int num_counters,
  956. struct xt_table_info *newinfo,
  957. int *error)
  958. {
  959. struct xt_table_info *private;
  960. int ret;
  961. ret = xt_jumpstack_alloc(newinfo);
  962. if (ret < 0) {
  963. *error = ret;
  964. return NULL;
  965. }
  966. /* Do the substitution. */
  967. local_bh_disable();
  968. private = table->private;
  969. /* Check inside lock: is the old number correct? */
  970. if (num_counters != private->number) {
  971. pr_debug("num_counters != table->private->number (%u/%u)\n",
  972. num_counters, private->number);
  973. local_bh_enable();
  974. *error = -EAGAIN;
  975. return NULL;
  976. }
  977. newinfo->initial_entries = private->initial_entries;
  978. /*
  979. * Ensure contents of newinfo are visible before assigning to
  980. * private.
  981. */
  982. smp_wmb();
  983. table->private = newinfo;
  984. /*
  985. * Even though table entries have now been swapped, other CPU's
  986. * may still be using the old entries. This is okay, because
  987. * resynchronization happens because of the locking done
  988. * during the get_counters() routine.
  989. */
  990. local_bh_enable();
  991. #ifdef CONFIG_AUDIT
  992. if (audit_enabled) {
  993. struct audit_buffer *ab;
  994. ab = audit_log_start(current->audit_context, GFP_KERNEL,
  995. AUDIT_NETFILTER_CFG);
  996. if (ab) {
  997. audit_log_format(ab, "table=%s family=%u entries=%u",
  998. table->name, table->af,
  999. private->number);
  1000. audit_log_end(ab);
  1001. }
  1002. }
  1003. #endif
  1004. return private;
  1005. }
  1006. EXPORT_SYMBOL_GPL(xt_replace_table);
  1007. struct xt_table *xt_register_table(struct net *net,
  1008. const struct xt_table *input_table,
  1009. struct xt_table_info *bootstrap,
  1010. struct xt_table_info *newinfo)
  1011. {
  1012. int ret;
  1013. struct xt_table_info *private;
  1014. struct xt_table *t, *table;
  1015. /* Don't add one object to multiple lists. */
  1016. table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
  1017. if (!table) {
  1018. ret = -ENOMEM;
  1019. goto out;
  1020. }
  1021. mutex_lock(&xt[table->af].mutex);
  1022. /* Don't autoload: we'd eat our tail... */
  1023. list_for_each_entry(t, &net->xt.tables[table->af], list) {
  1024. if (strcmp(t->name, table->name) == 0) {
  1025. ret = -EEXIST;
  1026. goto unlock;
  1027. }
  1028. }
  1029. /* Simplifies replace_table code. */
  1030. table->private = bootstrap;
  1031. if (!xt_replace_table(table, 0, newinfo, &ret))
  1032. goto unlock;
  1033. private = table->private;
  1034. pr_debug("table->private->number = %u\n", private->number);
  1035. /* save number of initial entries */
  1036. private->initial_entries = private->number;
  1037. list_add(&table->list, &net->xt.tables[table->af]);
  1038. mutex_unlock(&xt[table->af].mutex);
  1039. return table;
  1040. unlock:
  1041. mutex_unlock(&xt[table->af].mutex);
  1042. kfree(table);
  1043. out:
  1044. return ERR_PTR(ret);
  1045. }
  1046. EXPORT_SYMBOL_GPL(xt_register_table);
  1047. void *xt_unregister_table(struct xt_table *table)
  1048. {
  1049. struct xt_table_info *private;
  1050. mutex_lock(&xt[table->af].mutex);
  1051. private = table->private;
  1052. list_del(&table->list);
  1053. mutex_unlock(&xt[table->af].mutex);
  1054. kfree(table);
  1055. return private;
  1056. }
  1057. EXPORT_SYMBOL_GPL(xt_unregister_table);
  1058. #ifdef CONFIG_PROC_FS
  1059. struct xt_names_priv {
  1060. struct seq_net_private p;
  1061. u_int8_t af;
  1062. };
  1063. static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
  1064. {
  1065. struct xt_names_priv *priv = seq->private;
  1066. struct net *net = seq_file_net(seq);
  1067. u_int8_t af = priv->af;
  1068. mutex_lock(&xt[af].mutex);
  1069. return seq_list_start(&net->xt.tables[af], *pos);
  1070. }
  1071. static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  1072. {
  1073. struct xt_names_priv *priv = seq->private;
  1074. struct net *net = seq_file_net(seq);
  1075. u_int8_t af = priv->af;
  1076. return seq_list_next(v, &net->xt.tables[af], pos);
  1077. }
  1078. static void xt_table_seq_stop(struct seq_file *seq, void *v)
  1079. {
  1080. struct xt_names_priv *priv = seq->private;
  1081. u_int8_t af = priv->af;
  1082. mutex_unlock(&xt[af].mutex);
  1083. }
  1084. static int xt_table_seq_show(struct seq_file *seq, void *v)
  1085. {
  1086. struct xt_table *table = list_entry(v, struct xt_table, list);
  1087. if (*table->name)
  1088. seq_printf(seq, "%s\n", table->name);
  1089. return 0;
  1090. }
  1091. static const struct seq_operations xt_table_seq_ops = {
  1092. .start = xt_table_seq_start,
  1093. .next = xt_table_seq_next,
  1094. .stop = xt_table_seq_stop,
  1095. .show = xt_table_seq_show,
  1096. };
  1097. static int xt_table_open(struct inode *inode, struct file *file)
  1098. {
  1099. int ret;
  1100. struct xt_names_priv *priv;
  1101. ret = seq_open_net(inode, file, &xt_table_seq_ops,
  1102. sizeof(struct xt_names_priv));
  1103. if (!ret) {
  1104. priv = ((struct seq_file *)file->private_data)->private;
  1105. priv->af = (unsigned long)PDE_DATA(inode);
  1106. }
  1107. return ret;
  1108. }
  1109. static const struct file_operations xt_table_ops = {
  1110. .owner = THIS_MODULE,
  1111. .open = xt_table_open,
  1112. .read = seq_read,
  1113. .llseek = seq_lseek,
  1114. .release = seq_release_net,
  1115. };
  1116. /*
  1117. * Traverse state for ip{,6}_{tables,matches} for helping crossing
  1118. * the multi-AF mutexes.
  1119. */
  1120. struct nf_mttg_trav {
  1121. struct list_head *head, *curr;
  1122. uint8_t class, nfproto;
  1123. };
  1124. enum {
  1125. MTTG_TRAV_INIT,
  1126. MTTG_TRAV_NFP_UNSPEC,
  1127. MTTG_TRAV_NFP_SPEC,
  1128. MTTG_TRAV_DONE,
  1129. };
  1130. static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
  1131. bool is_target)
  1132. {
  1133. static const uint8_t next_class[] = {
  1134. [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
  1135. [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
  1136. };
  1137. struct nf_mttg_trav *trav = seq->private;
  1138. switch (trav->class) {
  1139. case MTTG_TRAV_INIT:
  1140. trav->class = MTTG_TRAV_NFP_UNSPEC;
  1141. mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
  1142. trav->head = trav->curr = is_target ?
  1143. &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
  1144. break;
  1145. case MTTG_TRAV_NFP_UNSPEC:
  1146. trav->curr = trav->curr->next;
  1147. if (trav->curr != trav->head)
  1148. break;
  1149. mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
  1150. mutex_lock(&xt[trav->nfproto].mutex);
  1151. trav->head = trav->curr = is_target ?
  1152. &xt[trav->nfproto].target : &xt[trav->nfproto].match;
  1153. trav->class = next_class[trav->class];
  1154. break;
  1155. case MTTG_TRAV_NFP_SPEC:
  1156. trav->curr = trav->curr->next;
  1157. if (trav->curr != trav->head)
  1158. break;
  1159. /* fallthru, _stop will unlock */
  1160. default:
  1161. return NULL;
  1162. }
  1163. if (ppos != NULL)
  1164. ++*ppos;
  1165. return trav;
  1166. }
  1167. static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
  1168. bool is_target)
  1169. {
  1170. struct nf_mttg_trav *trav = seq->private;
  1171. unsigned int j;
  1172. trav->class = MTTG_TRAV_INIT;
  1173. for (j = 0; j < *pos; ++j)
  1174. if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
  1175. return NULL;
  1176. return trav;
  1177. }
  1178. static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
  1179. {
  1180. struct nf_mttg_trav *trav = seq->private;
  1181. switch (trav->class) {
  1182. case MTTG_TRAV_NFP_UNSPEC:
  1183. mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
  1184. break;
  1185. case MTTG_TRAV_NFP_SPEC:
  1186. mutex_unlock(&xt[trav->nfproto].mutex);
  1187. break;
  1188. }
  1189. }
  1190. static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
  1191. {
  1192. return xt_mttg_seq_start(seq, pos, false);
  1193. }
  1194. static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
  1195. {
  1196. return xt_mttg_seq_next(seq, v, ppos, false);
  1197. }
  1198. static int xt_match_seq_show(struct seq_file *seq, void *v)
  1199. {
  1200. const struct nf_mttg_trav *trav = seq->private;
  1201. const struct xt_match *match;
  1202. switch (trav->class) {
  1203. case MTTG_TRAV_NFP_UNSPEC:
  1204. case MTTG_TRAV_NFP_SPEC:
  1205. if (trav->curr == trav->head)
  1206. return 0;
  1207. match = list_entry(trav->curr, struct xt_match, list);
  1208. if (*match->name)
  1209. seq_printf(seq, "%s\n", match->name);
  1210. }
  1211. return 0;
  1212. }
  1213. static const struct seq_operations xt_match_seq_ops = {
  1214. .start = xt_match_seq_start,
  1215. .next = xt_match_seq_next,
  1216. .stop = xt_mttg_seq_stop,
  1217. .show = xt_match_seq_show,
  1218. };
  1219. static int xt_match_open(struct inode *inode, struct file *file)
  1220. {
  1221. struct nf_mttg_trav *trav;
  1222. trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav));
  1223. if (!trav)
  1224. return -ENOMEM;
  1225. trav->nfproto = (unsigned long)PDE_DATA(inode);
  1226. return 0;
  1227. }
  1228. static const struct file_operations xt_match_ops = {
  1229. .owner = THIS_MODULE,
  1230. .open = xt_match_open,
  1231. .read = seq_read,
  1232. .llseek = seq_lseek,
  1233. .release = seq_release_private,
  1234. };
  1235. static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
  1236. {
  1237. return xt_mttg_seq_start(seq, pos, true);
  1238. }
  1239. static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
  1240. {
  1241. return xt_mttg_seq_next(seq, v, ppos, true);
  1242. }
  1243. static int xt_target_seq_show(struct seq_file *seq, void *v)
  1244. {
  1245. const struct nf_mttg_trav *trav = seq->private;
  1246. const struct xt_target *target;
  1247. switch (trav->class) {
  1248. case MTTG_TRAV_NFP_UNSPEC:
  1249. case MTTG_TRAV_NFP_SPEC:
  1250. if (trav->curr == trav->head)
  1251. return 0;
  1252. target = list_entry(trav->curr, struct xt_target, list);
  1253. if (*target->name)
  1254. seq_printf(seq, "%s\n", target->name);
  1255. }
  1256. return 0;
  1257. }
  1258. static const struct seq_operations xt_target_seq_ops = {
  1259. .start = xt_target_seq_start,
  1260. .next = xt_target_seq_next,
  1261. .stop = xt_mttg_seq_stop,
  1262. .show = xt_target_seq_show,
  1263. };
  1264. static int xt_target_open(struct inode *inode, struct file *file)
  1265. {
  1266. struct nf_mttg_trav *trav;
  1267. trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav));
  1268. if (!trav)
  1269. return -ENOMEM;
  1270. trav->nfproto = (unsigned long)PDE_DATA(inode);
  1271. return 0;
  1272. }
  1273. static const struct file_operations xt_target_ops = {
  1274. .owner = THIS_MODULE,
  1275. .open = xt_target_open,
  1276. .read = seq_read,
  1277. .llseek = seq_lseek,
  1278. .release = seq_release_private,
  1279. };
  1280. #define FORMAT_TABLES "_tables_names"
  1281. #define FORMAT_MATCHES "_tables_matches"
  1282. #define FORMAT_TARGETS "_tables_targets"
  1283. #endif /* CONFIG_PROC_FS */
  1284. /**
  1285. * xt_hook_link - set up hooks for a new table
  1286. * @table: table with metadata needed to set up hooks
  1287. * @fn: Hook function
  1288. *
  1289. * This function will take care of creating and registering the necessary
  1290. * Netfilter hooks for XT tables.
  1291. */
  1292. struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
  1293. {
  1294. unsigned int hook_mask = table->valid_hooks;
  1295. uint8_t i, num_hooks = hweight32(hook_mask);
  1296. uint8_t hooknum;
  1297. struct nf_hook_ops *ops;
  1298. int ret;
  1299. ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
  1300. if (ops == NULL)
  1301. return ERR_PTR(-ENOMEM);
  1302. for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
  1303. hook_mask >>= 1, ++hooknum) {
  1304. if (!(hook_mask & 1))
  1305. continue;
  1306. ops[i].hook = fn;
  1307. ops[i].pf = table->af;
  1308. ops[i].hooknum = hooknum;
  1309. ops[i].priority = table->priority;
  1310. ++i;
  1311. }
  1312. ret = nf_register_hooks(ops, num_hooks);
  1313. if (ret < 0) {
  1314. kfree(ops);
  1315. return ERR_PTR(ret);
  1316. }
  1317. return ops;
  1318. }
  1319. EXPORT_SYMBOL_GPL(xt_hook_link);
  1320. /**
  1321. * xt_hook_unlink - remove hooks for a table
  1322. * @ops: nf_hook_ops array as returned by nf_hook_link
  1323. * @hook_mask: the very same mask that was passed to nf_hook_link
  1324. */
  1325. void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
  1326. {
  1327. nf_unregister_hooks(ops, hweight32(table->valid_hooks));
  1328. kfree(ops);
  1329. }
  1330. EXPORT_SYMBOL_GPL(xt_hook_unlink);
  1331. int xt_proto_init(struct net *net, u_int8_t af)
  1332. {
  1333. #ifdef CONFIG_PROC_FS
  1334. char buf[XT_FUNCTION_MAXNAMELEN];
  1335. struct proc_dir_entry *proc;
  1336. #endif
  1337. if (af >= ARRAY_SIZE(xt_prefix))
  1338. return -EINVAL;
  1339. #ifdef CONFIG_PROC_FS
  1340. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1341. strlcat(buf, FORMAT_TABLES, sizeof(buf));
  1342. proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
  1343. (void *)(unsigned long)af);
  1344. if (!proc)
  1345. goto out;
  1346. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1347. strlcat(buf, FORMAT_MATCHES, sizeof(buf));
  1348. proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
  1349. (void *)(unsigned long)af);
  1350. if (!proc)
  1351. goto out_remove_tables;
  1352. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1353. strlcat(buf, FORMAT_TARGETS, sizeof(buf));
  1354. proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
  1355. (void *)(unsigned long)af);
  1356. if (!proc)
  1357. goto out_remove_matches;
  1358. #endif
  1359. return 0;
  1360. #ifdef CONFIG_PROC_FS
  1361. out_remove_matches:
  1362. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1363. strlcat(buf, FORMAT_MATCHES, sizeof(buf));
  1364. remove_proc_entry(buf, net->proc_net);
  1365. out_remove_tables:
  1366. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1367. strlcat(buf, FORMAT_TABLES, sizeof(buf));
  1368. remove_proc_entry(buf, net->proc_net);
  1369. out:
  1370. return -1;
  1371. #endif
  1372. }
  1373. EXPORT_SYMBOL_GPL(xt_proto_init);
  1374. void xt_proto_fini(struct net *net, u_int8_t af)
  1375. {
  1376. #ifdef CONFIG_PROC_FS
  1377. char buf[XT_FUNCTION_MAXNAMELEN];
  1378. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1379. strlcat(buf, FORMAT_TABLES, sizeof(buf));
  1380. remove_proc_entry(buf, net->proc_net);
  1381. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1382. strlcat(buf, FORMAT_TARGETS, sizeof(buf));
  1383. remove_proc_entry(buf, net->proc_net);
  1384. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1385. strlcat(buf, FORMAT_MATCHES, sizeof(buf));
  1386. remove_proc_entry(buf, net->proc_net);
  1387. #endif /*CONFIG_PROC_FS*/
  1388. }
  1389. EXPORT_SYMBOL_GPL(xt_proto_fini);
  1390. /**
  1391. * xt_percpu_counter_alloc - allocate x_tables rule counter
  1392. *
  1393. * @state: pointer to xt_percpu allocation state
  1394. * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
  1395. *
  1396. * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
  1397. * contain the address of the real (percpu) counter.
  1398. *
  1399. * Rule evaluation needs to use xt_get_this_cpu_counter() helper
  1400. * to fetch the real percpu counter.
  1401. *
  1402. * To speed up allocation and improve data locality, a 4kb block is
  1403. * allocated.
  1404. *
  1405. * xt_percpu_counter_alloc_state contains the base address of the
  1406. * allocated page and the current sub-offset.
  1407. *
  1408. * returns false on error.
  1409. */
  1410. bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
  1411. struct xt_counters *counter)
  1412. {
  1413. BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
  1414. if (nr_cpu_ids <= 1)
  1415. return true;
  1416. if (!state->mem) {
  1417. state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
  1418. XT_PCPU_BLOCK_SIZE);
  1419. if (!state->mem)
  1420. return false;
  1421. }
  1422. counter->pcnt = (__force unsigned long)(state->mem + state->off);
  1423. state->off += sizeof(*counter);
  1424. if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
  1425. state->mem = NULL;
  1426. state->off = 0;
  1427. }
  1428. return true;
  1429. }
  1430. EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
  1431. void xt_percpu_counter_free(struct xt_counters *counters)
  1432. {
  1433. unsigned long pcnt = counters->pcnt;
  1434. if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
  1435. free_percpu((void __percpu *)pcnt);
  1436. }
  1437. EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
  1438. static int __net_init xt_net_init(struct net *net)
  1439. {
  1440. int i;
  1441. for (i = 0; i < NFPROTO_NUMPROTO; i++)
  1442. INIT_LIST_HEAD(&net->xt.tables[i]);
  1443. return 0;
  1444. }
  1445. static struct pernet_operations xt_net_ops = {
  1446. .init = xt_net_init,
  1447. };
  1448. static int __init xt_init(void)
  1449. {
  1450. unsigned int i;
  1451. int rv;
  1452. for_each_possible_cpu(i) {
  1453. seqcount_init(&per_cpu(xt_recseq, i));
  1454. }
  1455. xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
  1456. if (!xt)
  1457. return -ENOMEM;
  1458. for (i = 0; i < NFPROTO_NUMPROTO; i++) {
  1459. mutex_init(&xt[i].mutex);
  1460. #ifdef CONFIG_COMPAT
  1461. mutex_init(&xt[i].compat_mutex);
  1462. xt[i].compat_tab = NULL;
  1463. #endif
  1464. INIT_LIST_HEAD(&xt[i].target);
  1465. INIT_LIST_HEAD(&xt[i].match);
  1466. }
  1467. rv = register_pernet_subsys(&xt_net_ops);
  1468. if (rv < 0)
  1469. kfree(xt);
  1470. return rv;
  1471. }
  1472. static void __exit xt_fini(void)
  1473. {
  1474. unregister_pernet_subsys(&xt_net_ops);
  1475. kfree(xt);
  1476. }
  1477. module_init(xt_init);
  1478. module_exit(xt_fini);