ip6_tables.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145
  1. /*
  2. * Packet matching code.
  3. *
  4. * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
  5. * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
  6. * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/kernel.h>
  14. #include <linux/capability.h>
  15. #include <linux/in.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/kmod.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/netdevice.h>
  20. #include <linux/module.h>
  21. #include <linux/poison.h>
  22. #include <linux/icmpv6.h>
  23. #include <net/ipv6.h>
  24. #include <net/compat.h>
  25. #include <asm/uaccess.h>
  26. #include <linux/mutex.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/err.h>
  29. #include <linux/cpumask.h>
  30. #include <linux/netfilter_ipv6/ip6_tables.h>
  31. #include <linux/netfilter/x_tables.h>
  32. #include <net/netfilter/nf_log.h>
  33. #include "../../netfilter/xt_repldata.h"
  34. MODULE_LICENSE("GPL");
  35. MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
  36. MODULE_DESCRIPTION("IPv6 packet filter");
  37. /*#define DEBUG_IP_FIREWALL*/
  38. /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
  39. /*#define DEBUG_IP_FIREWALL_USER*/
  40. #ifdef DEBUG_IP_FIREWALL
  41. #define dprintf(format, args...) pr_info(format , ## args)
  42. #else
  43. #define dprintf(format, args...)
  44. #endif
  45. #ifdef DEBUG_IP_FIREWALL_USER
  46. #define duprintf(format, args...) pr_info(format , ## args)
  47. #else
  48. #define duprintf(format, args...)
  49. #endif
  50. #ifdef CONFIG_NETFILTER_DEBUG
  51. #define IP_NF_ASSERT(x) WARN_ON(!(x))
  52. #else
  53. #define IP_NF_ASSERT(x)
  54. #endif
  55. #if 0
  56. /* All the better to debug you with... */
  57. #define static
  58. #define inline
  59. #endif
  60. void *ip6t_alloc_initial_table(const struct xt_table *info)
  61. {
  62. return xt_alloc_initial_table(ip6t, IP6T);
  63. }
  64. EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
  65. /*
  66. We keep a set of rules for each CPU, so we can avoid write-locking
  67. them in the softirq when updating the counters and therefore
  68. only need to read-lock in the softirq; doing a write_lock_bh() in user
  69. context stops packets coming through and allows user context to read
  70. the counters or update the rules.
  71. Hence the start of any table is given by get_table() below. */
  72. /* Returns whether matches rule or not. */
  73. /* Performance critical - called for every packet */
  74. static inline bool
  75. ip6_packet_match(const struct sk_buff *skb,
  76. const char *indev,
  77. const char *outdev,
  78. const struct ip6t_ip6 *ip6info,
  79. unsigned int *protoff,
  80. int *fragoff, bool *hotdrop)
  81. {
  82. unsigned long ret;
  83. const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
  84. #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
  85. if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
  86. &ip6info->src), IP6T_INV_SRCIP) ||
  87. FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
  88. &ip6info->dst), IP6T_INV_DSTIP)) {
  89. dprintf("Source or dest mismatch.\n");
  90. /*
  91. dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
  92. ipinfo->smsk.s_addr, ipinfo->src.s_addr,
  93. ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
  94. dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
  95. ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
  96. ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
  97. return false;
  98. }
  99. ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
  100. if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
  101. dprintf("VIA in mismatch (%s vs %s).%s\n",
  102. indev, ip6info->iniface,
  103. ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : "");
  104. return false;
  105. }
  106. ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
  107. if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
  108. dprintf("VIA out mismatch (%s vs %s).%s\n",
  109. outdev, ip6info->outiface,
  110. ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : "");
  111. return false;
  112. }
  113. /* ... might want to do something with class and flowlabel here ... */
  114. /* look for the desired protocol header */
  115. if (ip6info->flags & IP6T_F_PROTO) {
  116. int protohdr;
  117. unsigned short _frag_off;
  118. protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
  119. if (protohdr < 0) {
  120. if (_frag_off == 0)
  121. *hotdrop = true;
  122. return false;
  123. }
  124. *fragoff = _frag_off;
  125. dprintf("Packet protocol %hi ?= %s%hi.\n",
  126. protohdr,
  127. ip6info->invflags & IP6T_INV_PROTO ? "!":"",
  128. ip6info->proto);
  129. if (ip6info->proto == protohdr) {
  130. if (ip6info->invflags & IP6T_INV_PROTO)
  131. return false;
  132. return true;
  133. }
  134. /* We need match for the '-p all', too! */
  135. if ((ip6info->proto != 0) &&
  136. !(ip6info->invflags & IP6T_INV_PROTO))
  137. return false;
  138. }
  139. return true;
  140. }
  141. /* should be ip6 safe */
  142. static bool
  143. ip6_checkentry(const struct ip6t_ip6 *ipv6)
  144. {
  145. if (ipv6->flags & ~IP6T_F_MASK) {
  146. duprintf("Unknown flag bits set: %08X\n",
  147. ipv6->flags & ~IP6T_F_MASK);
  148. return false;
  149. }
  150. if (ipv6->invflags & ~IP6T_INV_MASK) {
  151. duprintf("Unknown invflag bits set: %08X\n",
  152. ipv6->invflags & ~IP6T_INV_MASK);
  153. return false;
  154. }
  155. return true;
  156. }
  157. static unsigned int
  158. ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
  159. {
  160. net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
  161. return NF_DROP;
  162. }
  163. static inline struct ip6t_entry *
  164. get_entry(const void *base, unsigned int offset)
  165. {
  166. return (struct ip6t_entry *)(base + offset);
  167. }
  168. /* All zeroes == unconditional rule. */
  169. /* Mildly perf critical (only if packet tracing is on) */
  170. static inline bool unconditional(const struct ip6t_entry *e)
  171. {
  172. static const struct ip6t_ip6 uncond;
  173. return e->target_offset == sizeof(struct ip6t_entry) &&
  174. memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
  175. }
  176. static inline const struct xt_entry_target *
  177. ip6t_get_target_c(const struct ip6t_entry *e)
  178. {
  179. return ip6t_get_target((struct ip6t_entry *)e);
  180. }
  181. #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
  182. /* This cries for unification! */
  183. static const char *const hooknames[] = {
  184. [NF_INET_PRE_ROUTING] = "PREROUTING",
  185. [NF_INET_LOCAL_IN] = "INPUT",
  186. [NF_INET_FORWARD] = "FORWARD",
  187. [NF_INET_LOCAL_OUT] = "OUTPUT",
  188. [NF_INET_POST_ROUTING] = "POSTROUTING",
  189. };
  190. enum nf_ip_trace_comments {
  191. NF_IP6_TRACE_COMMENT_RULE,
  192. NF_IP6_TRACE_COMMENT_RETURN,
  193. NF_IP6_TRACE_COMMENT_POLICY,
  194. };
  195. static const char *const comments[] = {
  196. [NF_IP6_TRACE_COMMENT_RULE] = "rule",
  197. [NF_IP6_TRACE_COMMENT_RETURN] = "return",
  198. [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
  199. };
  200. static struct nf_loginfo trace_loginfo = {
  201. .type = NF_LOG_TYPE_LOG,
  202. .u = {
  203. .log = {
  204. .level = LOGLEVEL_WARNING,
  205. .logflags = NF_LOG_MASK,
  206. },
  207. },
  208. };
  209. /* Mildly perf critical (only if packet tracing is on) */
  210. static inline int
  211. get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
  212. const char *hookname, const char **chainname,
  213. const char **comment, unsigned int *rulenum)
  214. {
  215. const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
  216. if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
  217. /* Head of user chain: ERROR target with chainname */
  218. *chainname = t->target.data;
  219. (*rulenum) = 0;
  220. } else if (s == e) {
  221. (*rulenum)++;
  222. if (unconditional(s) &&
  223. strcmp(t->target.u.kernel.target->name,
  224. XT_STANDARD_TARGET) == 0 &&
  225. t->verdict < 0) {
  226. /* Tail of chains: STANDARD target (return/policy) */
  227. *comment = *chainname == hookname
  228. ? comments[NF_IP6_TRACE_COMMENT_POLICY]
  229. : comments[NF_IP6_TRACE_COMMENT_RETURN];
  230. }
  231. return 1;
  232. } else
  233. (*rulenum)++;
  234. return 0;
  235. }
  236. static void trace_packet(struct net *net,
  237. const struct sk_buff *skb,
  238. unsigned int hook,
  239. const struct net_device *in,
  240. const struct net_device *out,
  241. const char *tablename,
  242. const struct xt_table_info *private,
  243. const struct ip6t_entry *e)
  244. {
  245. const struct ip6t_entry *root;
  246. const char *hookname, *chainname, *comment;
  247. const struct ip6t_entry *iter;
  248. unsigned int rulenum = 0;
  249. root = get_entry(private->entries, private->hook_entry[hook]);
  250. hookname = chainname = hooknames[hook];
  251. comment = comments[NF_IP6_TRACE_COMMENT_RULE];
  252. xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
  253. if (get_chainname_rulenum(iter, e, hookname,
  254. &chainname, &comment, &rulenum) != 0)
  255. break;
  256. nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
  257. "TRACE: %s:%s:%s:%u ",
  258. tablename, chainname, comment, rulenum);
  259. }
  260. #endif
  261. static inline struct ip6t_entry *
  262. ip6t_next_entry(const struct ip6t_entry *entry)
  263. {
  264. return (void *)entry + entry->next_offset;
  265. }
  266. /* Returns one of the generic firewall policies, like NF_ACCEPT. */
  267. unsigned int
  268. ip6t_do_table(struct sk_buff *skb,
  269. const struct nf_hook_state *state,
  270. struct xt_table *table)
  271. {
  272. unsigned int hook = state->hook;
  273. static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
  274. /* Initializing verdict to NF_DROP keeps gcc happy. */
  275. unsigned int verdict = NF_DROP;
  276. const char *indev, *outdev;
  277. const void *table_base;
  278. struct ip6t_entry *e, **jumpstack;
  279. unsigned int stackidx, cpu;
  280. const struct xt_table_info *private;
  281. struct xt_action_param acpar;
  282. unsigned int addend;
  283. /* Initialization */
  284. stackidx = 0;
  285. indev = state->in ? state->in->name : nulldevname;
  286. outdev = state->out ? state->out->name : nulldevname;
  287. /* We handle fragments by dealing with the first fragment as
  288. * if it was a normal packet. All other fragments are treated
  289. * normally, except that they will NEVER match rules that ask
  290. * things we don't know, ie. tcp syn flag or ports). If the
  291. * rule is also a fragment-specific rule, non-fragments won't
  292. * match it. */
  293. acpar.hotdrop = false;
  294. acpar.net = state->net;
  295. acpar.in = state->in;
  296. acpar.out = state->out;
  297. acpar.family = NFPROTO_IPV6;
  298. acpar.hooknum = hook;
  299. IP_NF_ASSERT(table->valid_hooks & (1 << hook));
  300. local_bh_disable();
  301. addend = xt_write_recseq_begin();
  302. private = table->private;
  303. /*
  304. * Ensure we load private-> members after we've fetched the base
  305. * pointer.
  306. */
  307. smp_read_barrier_depends();
  308. cpu = smp_processor_id();
  309. table_base = private->entries;
  310. jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
  311. /* Switch to alternate jumpstack if we're being invoked via TEE.
  312. * TEE issues XT_CONTINUE verdict on original skb so we must not
  313. * clobber the jumpstack.
  314. *
  315. * For recursion via REJECT or SYNPROXY the stack will be clobbered
  316. * but it is no problem since absolute verdict is issued by these.
  317. */
  318. if (static_key_false(&xt_tee_enabled))
  319. jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
  320. e = get_entry(table_base, private->hook_entry[hook]);
  321. do {
  322. const struct xt_entry_target *t;
  323. const struct xt_entry_match *ematch;
  324. struct xt_counters *counter;
  325. IP_NF_ASSERT(e);
  326. acpar.thoff = 0;
  327. if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
  328. &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
  329. no_match:
  330. e = ip6t_next_entry(e);
  331. continue;
  332. }
  333. xt_ematch_foreach(ematch, e) {
  334. acpar.match = ematch->u.kernel.match;
  335. acpar.matchinfo = ematch->data;
  336. if (!acpar.match->match(skb, &acpar))
  337. goto no_match;
  338. }
  339. counter = xt_get_this_cpu_counter(&e->counters);
  340. ADD_COUNTER(*counter, skb->len, 1);
  341. t = ip6t_get_target_c(e);
  342. IP_NF_ASSERT(t->u.kernel.target);
  343. #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
  344. /* The packet is traced: log it */
  345. if (unlikely(skb->nf_trace))
  346. trace_packet(state->net, skb, hook, state->in,
  347. state->out, table->name, private, e);
  348. #endif
  349. /* Standard target? */
  350. if (!t->u.kernel.target->target) {
  351. int v;
  352. v = ((struct xt_standard_target *)t)->verdict;
  353. if (v < 0) {
  354. /* Pop from stack? */
  355. if (v != XT_RETURN) {
  356. verdict = (unsigned int)(-v) - 1;
  357. break;
  358. }
  359. if (stackidx == 0)
  360. e = get_entry(table_base,
  361. private->underflow[hook]);
  362. else
  363. e = ip6t_next_entry(jumpstack[--stackidx]);
  364. continue;
  365. }
  366. if (table_base + v != ip6t_next_entry(e) &&
  367. !(e->ipv6.flags & IP6T_F_GOTO)) {
  368. if (unlikely(stackidx >= private->stacksize)) {
  369. verdict = NF_DROP;
  370. break;
  371. }
  372. jumpstack[stackidx++] = e;
  373. }
  374. e = get_entry(table_base, v);
  375. continue;
  376. }
  377. acpar.target = t->u.kernel.target;
  378. acpar.targinfo = t->data;
  379. verdict = t->u.kernel.target->target(skb, &acpar);
  380. if (verdict == XT_CONTINUE)
  381. e = ip6t_next_entry(e);
  382. else
  383. /* Verdict */
  384. break;
  385. } while (!acpar.hotdrop);
  386. xt_write_recseq_end(addend);
  387. local_bh_enable();
  388. #ifdef DEBUG_ALLOW_ALL
  389. return NF_ACCEPT;
  390. #else
  391. if (acpar.hotdrop)
  392. return NF_DROP;
  393. else return verdict;
  394. #endif
  395. }
  396. /* Figures out from what hook each rule can be called: returns 0 if
  397. there are loops. Puts hook bitmask in comefrom. */
  398. static int
  399. mark_source_chains(const struct xt_table_info *newinfo,
  400. unsigned int valid_hooks, void *entry0,
  401. unsigned int *offsets)
  402. {
  403. unsigned int hook;
  404. /* No recursion; use packet counter to save back ptrs (reset
  405. to 0 as we leave), and comefrom to save source hook bitmask */
  406. for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
  407. unsigned int pos = newinfo->hook_entry[hook];
  408. struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
  409. if (!(valid_hooks & (1 << hook)))
  410. continue;
  411. /* Set initial back pointer. */
  412. e->counters.pcnt = pos;
  413. for (;;) {
  414. const struct xt_standard_target *t
  415. = (void *)ip6t_get_target_c(e);
  416. int visited = e->comefrom & (1 << hook);
  417. if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
  418. pr_err("iptables: loop hook %u pos %u %08X.\n",
  419. hook, pos, e->comefrom);
  420. return 0;
  421. }
  422. e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
  423. /* Unconditional return/END. */
  424. if ((unconditional(e) &&
  425. (strcmp(t->target.u.user.name,
  426. XT_STANDARD_TARGET) == 0) &&
  427. t->verdict < 0) || visited) {
  428. unsigned int oldpos, size;
  429. if ((strcmp(t->target.u.user.name,
  430. XT_STANDARD_TARGET) == 0) &&
  431. t->verdict < -NF_MAX_VERDICT - 1) {
  432. duprintf("mark_source_chains: bad "
  433. "negative verdict (%i)\n",
  434. t->verdict);
  435. return 0;
  436. }
  437. /* Return: backtrack through the last
  438. big jump. */
  439. do {
  440. e->comefrom ^= (1<<NF_INET_NUMHOOKS);
  441. #ifdef DEBUG_IP_FIREWALL_USER
  442. if (e->comefrom
  443. & (1 << NF_INET_NUMHOOKS)) {
  444. duprintf("Back unset "
  445. "on hook %u "
  446. "rule %u\n",
  447. hook, pos);
  448. }
  449. #endif
  450. oldpos = pos;
  451. pos = e->counters.pcnt;
  452. e->counters.pcnt = 0;
  453. /* We're at the start. */
  454. if (pos == oldpos)
  455. goto next;
  456. e = (struct ip6t_entry *)
  457. (entry0 + pos);
  458. } while (oldpos == pos + e->next_offset);
  459. /* Move along one */
  460. size = e->next_offset;
  461. e = (struct ip6t_entry *)
  462. (entry0 + pos + size);
  463. if (pos + size >= newinfo->size)
  464. return 0;
  465. e->counters.pcnt = pos;
  466. pos += size;
  467. } else {
  468. int newpos = t->verdict;
  469. if (strcmp(t->target.u.user.name,
  470. XT_STANDARD_TARGET) == 0 &&
  471. newpos >= 0) {
  472. if (newpos > newinfo->size -
  473. sizeof(struct ip6t_entry)) {
  474. duprintf("mark_source_chains: "
  475. "bad verdict (%i)\n",
  476. newpos);
  477. return 0;
  478. }
  479. /* This a jump; chase it. */
  480. duprintf("Jump rule %u -> %u\n",
  481. pos, newpos);
  482. if (!xt_find_jump_offset(offsets, newpos,
  483. newinfo->number))
  484. return 0;
  485. e = (struct ip6t_entry *)
  486. (entry0 + newpos);
  487. } else {
  488. /* ... this is a fallthru */
  489. newpos = pos + e->next_offset;
  490. if (newpos >= newinfo->size)
  491. return 0;
  492. }
  493. e = (struct ip6t_entry *)
  494. (entry0 + newpos);
  495. e->counters.pcnt = pos;
  496. pos = newpos;
  497. }
  498. }
  499. next:
  500. duprintf("Finished chain %u\n", hook);
  501. }
  502. return 1;
  503. }
  504. static void cleanup_match(struct xt_entry_match *m, struct net *net)
  505. {
  506. struct xt_mtdtor_param par;
  507. par.net = net;
  508. par.match = m->u.kernel.match;
  509. par.matchinfo = m->data;
  510. par.family = NFPROTO_IPV6;
  511. if (par.match->destroy != NULL)
  512. par.match->destroy(&par);
  513. module_put(par.match->me);
  514. }
  515. static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
  516. {
  517. const struct ip6t_ip6 *ipv6 = par->entryinfo;
  518. int ret;
  519. par->match = m->u.kernel.match;
  520. par->matchinfo = m->data;
  521. ret = xt_check_match(par, m->u.match_size - sizeof(*m),
  522. ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
  523. if (ret < 0) {
  524. duprintf("ip_tables: check failed for `%s'.\n",
  525. par.match->name);
  526. return ret;
  527. }
  528. return 0;
  529. }
  530. static int
  531. find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
  532. {
  533. struct xt_match *match;
  534. int ret;
  535. match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
  536. m->u.user.revision);
  537. if (IS_ERR(match)) {
  538. duprintf("find_check_match: `%s' not found\n", m->u.user.name);
  539. return PTR_ERR(match);
  540. }
  541. m->u.kernel.match = match;
  542. ret = check_match(m, par);
  543. if (ret)
  544. goto err;
  545. return 0;
  546. err:
  547. module_put(m->u.kernel.match->me);
  548. return ret;
  549. }
  550. static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
  551. {
  552. struct xt_entry_target *t = ip6t_get_target(e);
  553. struct xt_tgchk_param par = {
  554. .net = net,
  555. .table = name,
  556. .entryinfo = e,
  557. .target = t->u.kernel.target,
  558. .targinfo = t->data,
  559. .hook_mask = e->comefrom,
  560. .family = NFPROTO_IPV6,
  561. };
  562. int ret;
  563. t = ip6t_get_target(e);
  564. ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
  565. e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
  566. if (ret < 0) {
  567. duprintf("ip_tables: check failed for `%s'.\n",
  568. t->u.kernel.target->name);
  569. return ret;
  570. }
  571. return 0;
  572. }
  573. static int
  574. find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
  575. unsigned int size,
  576. struct xt_percpu_counter_alloc_state *alloc_state)
  577. {
  578. struct xt_entry_target *t;
  579. struct xt_target *target;
  580. int ret;
  581. unsigned int j;
  582. struct xt_mtchk_param mtpar;
  583. struct xt_entry_match *ematch;
  584. if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
  585. return -ENOMEM;
  586. j = 0;
  587. memset(&mtpar, 0, sizeof(mtpar));
  588. mtpar.net = net;
  589. mtpar.table = name;
  590. mtpar.entryinfo = &e->ipv6;
  591. mtpar.hook_mask = e->comefrom;
  592. mtpar.family = NFPROTO_IPV6;
  593. xt_ematch_foreach(ematch, e) {
  594. ret = find_check_match(ematch, &mtpar);
  595. if (ret != 0)
  596. goto cleanup_matches;
  597. ++j;
  598. }
  599. t = ip6t_get_target(e);
  600. target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
  601. t->u.user.revision);
  602. if (IS_ERR(target)) {
  603. duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
  604. ret = PTR_ERR(target);
  605. goto cleanup_matches;
  606. }
  607. t->u.kernel.target = target;
  608. ret = check_target(e, net, name);
  609. if (ret)
  610. goto err;
  611. return 0;
  612. err:
  613. module_put(t->u.kernel.target->me);
  614. cleanup_matches:
  615. xt_ematch_foreach(ematch, e) {
  616. if (j-- == 0)
  617. break;
  618. cleanup_match(ematch, net);
  619. }
  620. xt_percpu_counter_free(&e->counters);
  621. return ret;
  622. }
  623. static bool check_underflow(const struct ip6t_entry *e)
  624. {
  625. const struct xt_entry_target *t;
  626. unsigned int verdict;
  627. if (!unconditional(e))
  628. return false;
  629. t = ip6t_get_target_c(e);
  630. if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
  631. return false;
  632. verdict = ((struct xt_standard_target *)t)->verdict;
  633. verdict = -verdict - 1;
  634. return verdict == NF_DROP || verdict == NF_ACCEPT;
  635. }
  636. static int
  637. check_entry_size_and_hooks(struct ip6t_entry *e,
  638. struct xt_table_info *newinfo,
  639. const unsigned char *base,
  640. const unsigned char *limit,
  641. const unsigned int *hook_entries,
  642. const unsigned int *underflows,
  643. unsigned int valid_hooks)
  644. {
  645. unsigned int h;
  646. int err;
  647. if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
  648. (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
  649. (unsigned char *)e + e->next_offset > limit) {
  650. duprintf("Bad offset %p\n", e);
  651. return -EINVAL;
  652. }
  653. if (e->next_offset
  654. < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
  655. duprintf("checking: element %p size %u\n",
  656. e, e->next_offset);
  657. return -EINVAL;
  658. }
  659. if (!ip6_checkentry(&e->ipv6))
  660. return -EINVAL;
  661. err = xt_check_entry_offsets(e, e->elems, e->target_offset,
  662. e->next_offset);
  663. if (err)
  664. return err;
  665. /* Check hooks & underflows */
  666. for (h = 0; h < NF_INET_NUMHOOKS; h++) {
  667. if (!(valid_hooks & (1 << h)))
  668. continue;
  669. if ((unsigned char *)e - base == hook_entries[h])
  670. newinfo->hook_entry[h] = hook_entries[h];
  671. if ((unsigned char *)e - base == underflows[h]) {
  672. if (!check_underflow(e)) {
  673. pr_debug("Underflows must be unconditional and "
  674. "use the STANDARD target with "
  675. "ACCEPT/DROP\n");
  676. return -EINVAL;
  677. }
  678. newinfo->underflow[h] = underflows[h];
  679. }
  680. }
  681. /* Clear counters and comefrom */
  682. e->counters = ((struct xt_counters) { 0, 0 });
  683. e->comefrom = 0;
  684. return 0;
  685. }
  686. static void cleanup_entry(struct ip6t_entry *e, struct net *net)
  687. {
  688. struct xt_tgdtor_param par;
  689. struct xt_entry_target *t;
  690. struct xt_entry_match *ematch;
  691. /* Cleanup all matches */
  692. xt_ematch_foreach(ematch, e)
  693. cleanup_match(ematch, net);
  694. t = ip6t_get_target(e);
  695. par.net = net;
  696. par.target = t->u.kernel.target;
  697. par.targinfo = t->data;
  698. par.family = NFPROTO_IPV6;
  699. if (par.target->destroy != NULL)
  700. par.target->destroy(&par);
  701. module_put(par.target->me);
  702. xt_percpu_counter_free(&e->counters);
  703. }
  704. /* Checks and translates the user-supplied table segment (held in
  705. newinfo) */
  706. static int
  707. translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
  708. const struct ip6t_replace *repl)
  709. {
  710. struct xt_percpu_counter_alloc_state alloc_state = { 0 };
  711. struct ip6t_entry *iter;
  712. unsigned int *offsets;
  713. unsigned int i;
  714. int ret = 0;
  715. newinfo->size = repl->size;
  716. newinfo->number = repl->num_entries;
  717. /* Init all hooks to impossible value. */
  718. for (i = 0; i < NF_INET_NUMHOOKS; i++) {
  719. newinfo->hook_entry[i] = 0xFFFFFFFF;
  720. newinfo->underflow[i] = 0xFFFFFFFF;
  721. }
  722. duprintf("translate_table: size %u\n", newinfo->size);
  723. offsets = xt_alloc_entry_offsets(newinfo->number);
  724. if (!offsets)
  725. return -ENOMEM;
  726. i = 0;
  727. /* Walk through entries, checking offsets. */
  728. xt_entry_foreach(iter, entry0, newinfo->size) {
  729. ret = check_entry_size_and_hooks(iter, newinfo, entry0,
  730. entry0 + repl->size,
  731. repl->hook_entry,
  732. repl->underflow,
  733. repl->valid_hooks);
  734. if (ret != 0)
  735. goto out_free;
  736. if (i < repl->num_entries)
  737. offsets[i] = (void *)iter - entry0;
  738. ++i;
  739. if (strcmp(ip6t_get_target(iter)->u.user.name,
  740. XT_ERROR_TARGET) == 0)
  741. ++newinfo->stacksize;
  742. }
  743. ret = -EINVAL;
  744. if (i != repl->num_entries) {
  745. duprintf("translate_table: %u not %u entries\n",
  746. i, repl->num_entries);
  747. goto out_free;
  748. }
  749. /* Check hooks all assigned */
  750. for (i = 0; i < NF_INET_NUMHOOKS; i++) {
  751. /* Only hooks which are valid */
  752. if (!(repl->valid_hooks & (1 << i)))
  753. continue;
  754. if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
  755. duprintf("Invalid hook entry %u %u\n",
  756. i, repl->hook_entry[i]);
  757. goto out_free;
  758. }
  759. if (newinfo->underflow[i] == 0xFFFFFFFF) {
  760. duprintf("Invalid underflow %u %u\n",
  761. i, repl->underflow[i]);
  762. goto out_free;
  763. }
  764. }
  765. if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
  766. ret = -ELOOP;
  767. goto out_free;
  768. }
  769. kvfree(offsets);
  770. /* Finally, each sanity check must pass */
  771. i = 0;
  772. xt_entry_foreach(iter, entry0, newinfo->size) {
  773. ret = find_check_entry(iter, net, repl->name, repl->size,
  774. &alloc_state);
  775. if (ret != 0)
  776. break;
  777. ++i;
  778. }
  779. if (ret != 0) {
  780. xt_entry_foreach(iter, entry0, newinfo->size) {
  781. if (i-- == 0)
  782. break;
  783. cleanup_entry(iter, net);
  784. }
  785. return ret;
  786. }
  787. return ret;
  788. out_free:
  789. kvfree(offsets);
  790. return ret;
  791. }
  792. static void
  793. get_counters(const struct xt_table_info *t,
  794. struct xt_counters counters[])
  795. {
  796. struct ip6t_entry *iter;
  797. unsigned int cpu;
  798. unsigned int i;
  799. for_each_possible_cpu(cpu) {
  800. seqcount_t *s = &per_cpu(xt_recseq, cpu);
  801. i = 0;
  802. xt_entry_foreach(iter, t->entries, t->size) {
  803. struct xt_counters *tmp;
  804. u64 bcnt, pcnt;
  805. unsigned int start;
  806. tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
  807. do {
  808. start = read_seqcount_begin(s);
  809. bcnt = tmp->bcnt;
  810. pcnt = tmp->pcnt;
  811. } while (read_seqcount_retry(s, start));
  812. ADD_COUNTER(counters[i], bcnt, pcnt);
  813. ++i;
  814. }
  815. }
  816. }
  817. static struct xt_counters *alloc_counters(const struct xt_table *table)
  818. {
  819. unsigned int countersize;
  820. struct xt_counters *counters;
  821. const struct xt_table_info *private = table->private;
  822. /* We need atomic snapshot of counters: rest doesn't change
  823. (other than comefrom, which userspace doesn't care
  824. about). */
  825. countersize = sizeof(struct xt_counters) * private->number;
  826. counters = vzalloc(countersize);
  827. if (counters == NULL)
  828. return ERR_PTR(-ENOMEM);
  829. get_counters(private, counters);
  830. return counters;
  831. }
  832. static int
  833. copy_entries_to_user(unsigned int total_size,
  834. const struct xt_table *table,
  835. void __user *userptr)
  836. {
  837. unsigned int off, num;
  838. const struct ip6t_entry *e;
  839. struct xt_counters *counters;
  840. const struct xt_table_info *private = table->private;
  841. int ret = 0;
  842. const void *loc_cpu_entry;
  843. counters = alloc_counters(table);
  844. if (IS_ERR(counters))
  845. return PTR_ERR(counters);
  846. loc_cpu_entry = private->entries;
  847. if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
  848. ret = -EFAULT;
  849. goto free_counters;
  850. }
  851. /* FIXME: use iterator macros --RR */
  852. /* ... then go back and fix counters and names */
  853. for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
  854. unsigned int i;
  855. const struct xt_entry_match *m;
  856. const struct xt_entry_target *t;
  857. e = (struct ip6t_entry *)(loc_cpu_entry + off);
  858. if (copy_to_user(userptr + off
  859. + offsetof(struct ip6t_entry, counters),
  860. &counters[num],
  861. sizeof(counters[num])) != 0) {
  862. ret = -EFAULT;
  863. goto free_counters;
  864. }
  865. for (i = sizeof(struct ip6t_entry);
  866. i < e->target_offset;
  867. i += m->u.match_size) {
  868. m = (void *)e + i;
  869. if (copy_to_user(userptr + off + i
  870. + offsetof(struct xt_entry_match,
  871. u.user.name),
  872. m->u.kernel.match->name,
  873. strlen(m->u.kernel.match->name)+1)
  874. != 0) {
  875. ret = -EFAULT;
  876. goto free_counters;
  877. }
  878. }
  879. t = ip6t_get_target_c(e);
  880. if (copy_to_user(userptr + off + e->target_offset
  881. + offsetof(struct xt_entry_target,
  882. u.user.name),
  883. t->u.kernel.target->name,
  884. strlen(t->u.kernel.target->name)+1) != 0) {
  885. ret = -EFAULT;
  886. goto free_counters;
  887. }
  888. }
  889. free_counters:
  890. vfree(counters);
  891. return ret;
  892. }
  893. #ifdef CONFIG_COMPAT
  894. static void compat_standard_from_user(void *dst, const void *src)
  895. {
  896. int v = *(compat_int_t *)src;
  897. if (v > 0)
  898. v += xt_compat_calc_jump(AF_INET6, v);
  899. memcpy(dst, &v, sizeof(v));
  900. }
  901. static int compat_standard_to_user(void __user *dst, const void *src)
  902. {
  903. compat_int_t cv = *(int *)src;
  904. if (cv > 0)
  905. cv -= xt_compat_calc_jump(AF_INET6, cv);
  906. return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
  907. }
  908. static int compat_calc_entry(const struct ip6t_entry *e,
  909. const struct xt_table_info *info,
  910. const void *base, struct xt_table_info *newinfo)
  911. {
  912. const struct xt_entry_match *ematch;
  913. const struct xt_entry_target *t;
  914. unsigned int entry_offset;
  915. int off, i, ret;
  916. off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
  917. entry_offset = (void *)e - base;
  918. xt_ematch_foreach(ematch, e)
  919. off += xt_compat_match_offset(ematch->u.kernel.match);
  920. t = ip6t_get_target_c(e);
  921. off += xt_compat_target_offset(t->u.kernel.target);
  922. newinfo->size -= off;
  923. ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
  924. if (ret)
  925. return ret;
  926. for (i = 0; i < NF_INET_NUMHOOKS; i++) {
  927. if (info->hook_entry[i] &&
  928. (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
  929. newinfo->hook_entry[i] -= off;
  930. if (info->underflow[i] &&
  931. (e < (struct ip6t_entry *)(base + info->underflow[i])))
  932. newinfo->underflow[i] -= off;
  933. }
  934. return 0;
  935. }
  936. static int compat_table_info(const struct xt_table_info *info,
  937. struct xt_table_info *newinfo)
  938. {
  939. struct ip6t_entry *iter;
  940. const void *loc_cpu_entry;
  941. int ret;
  942. if (!newinfo || !info)
  943. return -EINVAL;
  944. /* we dont care about newinfo->entries */
  945. memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
  946. newinfo->initial_entries = 0;
  947. loc_cpu_entry = info->entries;
  948. xt_compat_init_offsets(AF_INET6, info->number);
  949. xt_entry_foreach(iter, loc_cpu_entry, info->size) {
  950. ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
  951. if (ret != 0)
  952. return ret;
  953. }
  954. return 0;
  955. }
  956. #endif
  957. static int get_info(struct net *net, void __user *user,
  958. const int *len, int compat)
  959. {
  960. char name[XT_TABLE_MAXNAMELEN];
  961. struct xt_table *t;
  962. int ret;
  963. if (*len != sizeof(struct ip6t_getinfo)) {
  964. duprintf("length %u != %zu\n", *len,
  965. sizeof(struct ip6t_getinfo));
  966. return -EINVAL;
  967. }
  968. if (copy_from_user(name, user, sizeof(name)) != 0)
  969. return -EFAULT;
  970. name[XT_TABLE_MAXNAMELEN-1] = '\0';
  971. #ifdef CONFIG_COMPAT
  972. if (compat)
  973. xt_compat_lock(AF_INET6);
  974. #endif
  975. t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
  976. "ip6table_%s", name);
  977. if (!IS_ERR_OR_NULL(t)) {
  978. struct ip6t_getinfo info;
  979. const struct xt_table_info *private = t->private;
  980. #ifdef CONFIG_COMPAT
  981. struct xt_table_info tmp;
  982. if (compat) {
  983. ret = compat_table_info(private, &tmp);
  984. xt_compat_flush_offsets(AF_INET6);
  985. private = &tmp;
  986. }
  987. #endif
  988. memset(&info, 0, sizeof(info));
  989. info.valid_hooks = t->valid_hooks;
  990. memcpy(info.hook_entry, private->hook_entry,
  991. sizeof(info.hook_entry));
  992. memcpy(info.underflow, private->underflow,
  993. sizeof(info.underflow));
  994. info.num_entries = private->number;
  995. info.size = private->size;
  996. strcpy(info.name, name);
  997. if (copy_to_user(user, &info, *len) != 0)
  998. ret = -EFAULT;
  999. else
  1000. ret = 0;
  1001. xt_table_unlock(t);
  1002. module_put(t->me);
  1003. } else
  1004. ret = t ? PTR_ERR(t) : -ENOENT;
  1005. #ifdef CONFIG_COMPAT
  1006. if (compat)
  1007. xt_compat_unlock(AF_INET6);
  1008. #endif
  1009. return ret;
  1010. }
  1011. static int
  1012. get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
  1013. const int *len)
  1014. {
  1015. int ret;
  1016. struct ip6t_get_entries get;
  1017. struct xt_table *t;
  1018. if (*len < sizeof(get)) {
  1019. duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
  1020. return -EINVAL;
  1021. }
  1022. if (copy_from_user(&get, uptr, sizeof(get)) != 0)
  1023. return -EFAULT;
  1024. if (*len != sizeof(struct ip6t_get_entries) + get.size) {
  1025. duprintf("get_entries: %u != %zu\n",
  1026. *len, sizeof(get) + get.size);
  1027. return -EINVAL;
  1028. }
  1029. get.name[sizeof(get.name) - 1] = '\0';
  1030. t = xt_find_table_lock(net, AF_INET6, get.name);
  1031. if (!IS_ERR_OR_NULL(t)) {
  1032. struct xt_table_info *private = t->private;
  1033. duprintf("t->private->number = %u\n", private->number);
  1034. if (get.size == private->size)
  1035. ret = copy_entries_to_user(private->size,
  1036. t, uptr->entrytable);
  1037. else {
  1038. duprintf("get_entries: I've got %u not %u!\n",
  1039. private->size, get.size);
  1040. ret = -EAGAIN;
  1041. }
  1042. module_put(t->me);
  1043. xt_table_unlock(t);
  1044. } else
  1045. ret = t ? PTR_ERR(t) : -ENOENT;
  1046. return ret;
  1047. }
  1048. static int
  1049. __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
  1050. struct xt_table_info *newinfo, unsigned int num_counters,
  1051. void __user *counters_ptr)
  1052. {
  1053. int ret;
  1054. struct xt_table *t;
  1055. struct xt_table_info *oldinfo;
  1056. struct xt_counters *counters;
  1057. struct ip6t_entry *iter;
  1058. ret = 0;
  1059. counters = vzalloc(num_counters * sizeof(struct xt_counters));
  1060. if (!counters) {
  1061. ret = -ENOMEM;
  1062. goto out;
  1063. }
  1064. t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
  1065. "ip6table_%s", name);
  1066. if (IS_ERR_OR_NULL(t)) {
  1067. ret = t ? PTR_ERR(t) : -ENOENT;
  1068. goto free_newinfo_counters_untrans;
  1069. }
  1070. /* You lied! */
  1071. if (valid_hooks != t->valid_hooks) {
  1072. duprintf("Valid hook crap: %08X vs %08X\n",
  1073. valid_hooks, t->valid_hooks);
  1074. ret = -EINVAL;
  1075. goto put_module;
  1076. }
  1077. oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
  1078. if (!oldinfo)
  1079. goto put_module;
  1080. /* Update module usage count based on number of rules */
  1081. duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
  1082. oldinfo->number, oldinfo->initial_entries, newinfo->number);
  1083. if ((oldinfo->number > oldinfo->initial_entries) ||
  1084. (newinfo->number <= oldinfo->initial_entries))
  1085. module_put(t->me);
  1086. if ((oldinfo->number > oldinfo->initial_entries) &&
  1087. (newinfo->number <= oldinfo->initial_entries))
  1088. module_put(t->me);
  1089. /* Get the old counters, and synchronize with replace */
  1090. get_counters(oldinfo, counters);
  1091. /* Decrease module usage counts and free resource */
  1092. xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
  1093. cleanup_entry(iter, net);
  1094. xt_free_table_info(oldinfo);
  1095. if (copy_to_user(counters_ptr, counters,
  1096. sizeof(struct xt_counters) * num_counters) != 0) {
  1097. /* Silent error, can't fail, new table is already in place */
  1098. net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
  1099. }
  1100. vfree(counters);
  1101. xt_table_unlock(t);
  1102. return ret;
  1103. put_module:
  1104. module_put(t->me);
  1105. xt_table_unlock(t);
  1106. free_newinfo_counters_untrans:
  1107. vfree(counters);
  1108. out:
  1109. return ret;
  1110. }
  1111. static int
  1112. do_replace(struct net *net, const void __user *user, unsigned int len)
  1113. {
  1114. int ret;
  1115. struct ip6t_replace tmp;
  1116. struct xt_table_info *newinfo;
  1117. void *loc_cpu_entry;
  1118. struct ip6t_entry *iter;
  1119. if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
  1120. return -EFAULT;
  1121. /* overflow check */
  1122. if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
  1123. return -ENOMEM;
  1124. if (tmp.num_counters == 0)
  1125. return -EINVAL;
  1126. tmp.name[sizeof(tmp.name)-1] = 0;
  1127. newinfo = xt_alloc_table_info(tmp.size);
  1128. if (!newinfo)
  1129. return -ENOMEM;
  1130. loc_cpu_entry = newinfo->entries;
  1131. if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
  1132. tmp.size) != 0) {
  1133. ret = -EFAULT;
  1134. goto free_newinfo;
  1135. }
  1136. ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
  1137. if (ret != 0)
  1138. goto free_newinfo;
  1139. duprintf("ip_tables: Translated table\n");
  1140. ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
  1141. tmp.num_counters, tmp.counters);
  1142. if (ret)
  1143. goto free_newinfo_untrans;
  1144. return 0;
  1145. free_newinfo_untrans:
  1146. xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
  1147. cleanup_entry(iter, net);
  1148. free_newinfo:
  1149. xt_free_table_info(newinfo);
  1150. return ret;
  1151. }
  1152. static int
  1153. do_add_counters(struct net *net, const void __user *user, unsigned int len,
  1154. int compat)
  1155. {
  1156. unsigned int i;
  1157. struct xt_counters_info tmp;
  1158. struct xt_counters *paddc;
  1159. struct xt_table *t;
  1160. const struct xt_table_info *private;
  1161. int ret = 0;
  1162. struct ip6t_entry *iter;
  1163. unsigned int addend;
  1164. paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
  1165. if (IS_ERR(paddc))
  1166. return PTR_ERR(paddc);
  1167. t = xt_find_table_lock(net, AF_INET6, tmp.name);
  1168. if (IS_ERR_OR_NULL(t)) {
  1169. ret = t ? PTR_ERR(t) : -ENOENT;
  1170. goto free;
  1171. }
  1172. local_bh_disable();
  1173. private = t->private;
  1174. if (private->number != tmp.num_counters) {
  1175. ret = -EINVAL;
  1176. goto unlock_up_free;
  1177. }
  1178. i = 0;
  1179. addend = xt_write_recseq_begin();
  1180. xt_entry_foreach(iter, private->entries, private->size) {
  1181. struct xt_counters *tmp;
  1182. tmp = xt_get_this_cpu_counter(&iter->counters);
  1183. ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
  1184. ++i;
  1185. }
  1186. xt_write_recseq_end(addend);
  1187. unlock_up_free:
  1188. local_bh_enable();
  1189. xt_table_unlock(t);
  1190. module_put(t->me);
  1191. free:
  1192. vfree(paddc);
  1193. return ret;
  1194. }
  1195. #ifdef CONFIG_COMPAT
  1196. struct compat_ip6t_replace {
  1197. char name[XT_TABLE_MAXNAMELEN];
  1198. u32 valid_hooks;
  1199. u32 num_entries;
  1200. u32 size;
  1201. u32 hook_entry[NF_INET_NUMHOOKS];
  1202. u32 underflow[NF_INET_NUMHOOKS];
  1203. u32 num_counters;
  1204. compat_uptr_t counters; /* struct xt_counters * */
  1205. struct compat_ip6t_entry entries[0];
  1206. };
  1207. static int
  1208. compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
  1209. unsigned int *size, struct xt_counters *counters,
  1210. unsigned int i)
  1211. {
  1212. struct xt_entry_target *t;
  1213. struct compat_ip6t_entry __user *ce;
  1214. u_int16_t target_offset, next_offset;
  1215. compat_uint_t origsize;
  1216. const struct xt_entry_match *ematch;
  1217. int ret = 0;
  1218. origsize = *size;
  1219. ce = (struct compat_ip6t_entry __user *)*dstptr;
  1220. if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
  1221. copy_to_user(&ce->counters, &counters[i],
  1222. sizeof(counters[i])) != 0)
  1223. return -EFAULT;
  1224. *dstptr += sizeof(struct compat_ip6t_entry);
  1225. *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
  1226. xt_ematch_foreach(ematch, e) {
  1227. ret = xt_compat_match_to_user(ematch, dstptr, size);
  1228. if (ret != 0)
  1229. return ret;
  1230. }
  1231. target_offset = e->target_offset - (origsize - *size);
  1232. t = ip6t_get_target(e);
  1233. ret = xt_compat_target_to_user(t, dstptr, size);
  1234. if (ret)
  1235. return ret;
  1236. next_offset = e->next_offset - (origsize - *size);
  1237. if (put_user(target_offset, &ce->target_offset) != 0 ||
  1238. put_user(next_offset, &ce->next_offset) != 0)
  1239. return -EFAULT;
  1240. return 0;
  1241. }
  1242. static int
  1243. compat_find_calc_match(struct xt_entry_match *m,
  1244. const struct ip6t_ip6 *ipv6,
  1245. int *size)
  1246. {
  1247. struct xt_match *match;
  1248. match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
  1249. m->u.user.revision);
  1250. if (IS_ERR(match)) {
  1251. duprintf("compat_check_calc_match: `%s' not found\n",
  1252. m->u.user.name);
  1253. return PTR_ERR(match);
  1254. }
  1255. m->u.kernel.match = match;
  1256. *size += xt_compat_match_offset(match);
  1257. return 0;
  1258. }
  1259. static void compat_release_entry(struct compat_ip6t_entry *e)
  1260. {
  1261. struct xt_entry_target *t;
  1262. struct xt_entry_match *ematch;
  1263. /* Cleanup all matches */
  1264. xt_ematch_foreach(ematch, e)
  1265. module_put(ematch->u.kernel.match->me);
  1266. t = compat_ip6t_get_target(e);
  1267. module_put(t->u.kernel.target->me);
  1268. }
  1269. static int
  1270. check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
  1271. struct xt_table_info *newinfo,
  1272. unsigned int *size,
  1273. const unsigned char *base,
  1274. const unsigned char *limit)
  1275. {
  1276. struct xt_entry_match *ematch;
  1277. struct xt_entry_target *t;
  1278. struct xt_target *target;
  1279. unsigned int entry_offset;
  1280. unsigned int j;
  1281. int ret, off;
  1282. duprintf("check_compat_entry_size_and_hooks %p\n", e);
  1283. if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
  1284. (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
  1285. (unsigned char *)e + e->next_offset > limit) {
  1286. duprintf("Bad offset %p, limit = %p\n", e, limit);
  1287. return -EINVAL;
  1288. }
  1289. if (e->next_offset < sizeof(struct compat_ip6t_entry) +
  1290. sizeof(struct compat_xt_entry_target)) {
  1291. duprintf("checking: element %p size %u\n",
  1292. e, e->next_offset);
  1293. return -EINVAL;
  1294. }
  1295. if (!ip6_checkentry(&e->ipv6))
  1296. return -EINVAL;
  1297. ret = xt_compat_check_entry_offsets(e, e->elems,
  1298. e->target_offset, e->next_offset);
  1299. if (ret)
  1300. return ret;
  1301. off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
  1302. entry_offset = (void *)e - (void *)base;
  1303. j = 0;
  1304. xt_ematch_foreach(ematch, e) {
  1305. ret = compat_find_calc_match(ematch, &e->ipv6, &off);
  1306. if (ret != 0)
  1307. goto release_matches;
  1308. ++j;
  1309. }
  1310. t = compat_ip6t_get_target(e);
  1311. target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
  1312. t->u.user.revision);
  1313. if (IS_ERR(target)) {
  1314. duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
  1315. t->u.user.name);
  1316. ret = PTR_ERR(target);
  1317. goto release_matches;
  1318. }
  1319. t->u.kernel.target = target;
  1320. off += xt_compat_target_offset(target);
  1321. *size += off;
  1322. ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
  1323. if (ret)
  1324. goto out;
  1325. return 0;
  1326. out:
  1327. module_put(t->u.kernel.target->me);
  1328. release_matches:
  1329. xt_ematch_foreach(ematch, e) {
  1330. if (j-- == 0)
  1331. break;
  1332. module_put(ematch->u.kernel.match->me);
  1333. }
  1334. return ret;
  1335. }
  1336. static void
  1337. compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
  1338. unsigned int *size,
  1339. struct xt_table_info *newinfo, unsigned char *base)
  1340. {
  1341. struct xt_entry_target *t;
  1342. struct ip6t_entry *de;
  1343. unsigned int origsize;
  1344. int h;
  1345. struct xt_entry_match *ematch;
  1346. origsize = *size;
  1347. de = (struct ip6t_entry *)*dstptr;
  1348. memcpy(de, e, sizeof(struct ip6t_entry));
  1349. memcpy(&de->counters, &e->counters, sizeof(e->counters));
  1350. *dstptr += sizeof(struct ip6t_entry);
  1351. *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
  1352. xt_ematch_foreach(ematch, e)
  1353. xt_compat_match_from_user(ematch, dstptr, size);
  1354. de->target_offset = e->target_offset - (origsize - *size);
  1355. t = compat_ip6t_get_target(e);
  1356. xt_compat_target_from_user(t, dstptr, size);
  1357. de->next_offset = e->next_offset - (origsize - *size);
  1358. for (h = 0; h < NF_INET_NUMHOOKS; h++) {
  1359. if ((unsigned char *)de - base < newinfo->hook_entry[h])
  1360. newinfo->hook_entry[h] -= origsize - *size;
  1361. if ((unsigned char *)de - base < newinfo->underflow[h])
  1362. newinfo->underflow[h] -= origsize - *size;
  1363. }
  1364. }
  1365. static int
  1366. translate_compat_table(struct net *net,
  1367. struct xt_table_info **pinfo,
  1368. void **pentry0,
  1369. const struct compat_ip6t_replace *compatr)
  1370. {
  1371. unsigned int i, j;
  1372. struct xt_table_info *newinfo, *info;
  1373. void *pos, *entry0, *entry1;
  1374. struct compat_ip6t_entry *iter0;
  1375. struct ip6t_replace repl;
  1376. unsigned int size;
  1377. int ret = 0;
  1378. info = *pinfo;
  1379. entry0 = *pentry0;
  1380. size = compatr->size;
  1381. info->number = compatr->num_entries;
  1382. duprintf("translate_compat_table: size %u\n", info->size);
  1383. j = 0;
  1384. xt_compat_lock(AF_INET6);
  1385. xt_compat_init_offsets(AF_INET6, compatr->num_entries);
  1386. /* Walk through entries, checking offsets. */
  1387. xt_entry_foreach(iter0, entry0, compatr->size) {
  1388. ret = check_compat_entry_size_and_hooks(iter0, info, &size,
  1389. entry0,
  1390. entry0 + compatr->size);
  1391. if (ret != 0)
  1392. goto out_unlock;
  1393. ++j;
  1394. }
  1395. ret = -EINVAL;
  1396. if (j != compatr->num_entries) {
  1397. duprintf("translate_compat_table: %u not %u entries\n",
  1398. j, compatr->num_entries);
  1399. goto out_unlock;
  1400. }
  1401. ret = -ENOMEM;
  1402. newinfo = xt_alloc_table_info(size);
  1403. if (!newinfo)
  1404. goto out_unlock;
  1405. newinfo->number = compatr->num_entries;
  1406. for (i = 0; i < NF_INET_NUMHOOKS; i++) {
  1407. newinfo->hook_entry[i] = compatr->hook_entry[i];
  1408. newinfo->underflow[i] = compatr->underflow[i];
  1409. }
  1410. entry1 = newinfo->entries;
  1411. pos = entry1;
  1412. size = compatr->size;
  1413. xt_entry_foreach(iter0, entry0, compatr->size)
  1414. compat_copy_entry_from_user(iter0, &pos, &size,
  1415. newinfo, entry1);
  1416. /* all module references in entry0 are now gone. */
  1417. xt_compat_flush_offsets(AF_INET6);
  1418. xt_compat_unlock(AF_INET6);
  1419. memcpy(&repl, compatr, sizeof(*compatr));
  1420. for (i = 0; i < NF_INET_NUMHOOKS; i++) {
  1421. repl.hook_entry[i] = newinfo->hook_entry[i];
  1422. repl.underflow[i] = newinfo->underflow[i];
  1423. }
  1424. repl.num_counters = 0;
  1425. repl.counters = NULL;
  1426. repl.size = newinfo->size;
  1427. ret = translate_table(net, newinfo, entry1, &repl);
  1428. if (ret)
  1429. goto free_newinfo;
  1430. *pinfo = newinfo;
  1431. *pentry0 = entry1;
  1432. xt_free_table_info(info);
  1433. return 0;
  1434. free_newinfo:
  1435. xt_free_table_info(newinfo);
  1436. return ret;
  1437. out_unlock:
  1438. xt_compat_flush_offsets(AF_INET6);
  1439. xt_compat_unlock(AF_INET6);
  1440. xt_entry_foreach(iter0, entry0, compatr->size) {
  1441. if (j-- == 0)
  1442. break;
  1443. compat_release_entry(iter0);
  1444. }
  1445. return ret;
  1446. }
  1447. static int
  1448. compat_do_replace(struct net *net, void __user *user, unsigned int len)
  1449. {
  1450. int ret;
  1451. struct compat_ip6t_replace tmp;
  1452. struct xt_table_info *newinfo;
  1453. void *loc_cpu_entry;
  1454. struct ip6t_entry *iter;
  1455. if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
  1456. return -EFAULT;
  1457. /* overflow check */
  1458. if (tmp.size >= INT_MAX / num_possible_cpus())
  1459. return -ENOMEM;
  1460. if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
  1461. return -ENOMEM;
  1462. if (tmp.num_counters == 0)
  1463. return -EINVAL;
  1464. tmp.name[sizeof(tmp.name)-1] = 0;
  1465. newinfo = xt_alloc_table_info(tmp.size);
  1466. if (!newinfo)
  1467. return -ENOMEM;
  1468. loc_cpu_entry = newinfo->entries;
  1469. if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
  1470. tmp.size) != 0) {
  1471. ret = -EFAULT;
  1472. goto free_newinfo;
  1473. }
  1474. ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
  1475. if (ret != 0)
  1476. goto free_newinfo;
  1477. duprintf("compat_do_replace: Translated table\n");
  1478. ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
  1479. tmp.num_counters, compat_ptr(tmp.counters));
  1480. if (ret)
  1481. goto free_newinfo_untrans;
  1482. return 0;
  1483. free_newinfo_untrans:
  1484. xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
  1485. cleanup_entry(iter, net);
  1486. free_newinfo:
  1487. xt_free_table_info(newinfo);
  1488. return ret;
  1489. }
  1490. static int
  1491. compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
  1492. unsigned int len)
  1493. {
  1494. int ret;
  1495. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  1496. return -EPERM;
  1497. switch (cmd) {
  1498. case IP6T_SO_SET_REPLACE:
  1499. ret = compat_do_replace(sock_net(sk), user, len);
  1500. break;
  1501. case IP6T_SO_SET_ADD_COUNTERS:
  1502. ret = do_add_counters(sock_net(sk), user, len, 1);
  1503. break;
  1504. default:
  1505. duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
  1506. ret = -EINVAL;
  1507. }
  1508. return ret;
  1509. }
  1510. struct compat_ip6t_get_entries {
  1511. char name[XT_TABLE_MAXNAMELEN];
  1512. compat_uint_t size;
  1513. struct compat_ip6t_entry entrytable[0];
  1514. };
  1515. static int
  1516. compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
  1517. void __user *userptr)
  1518. {
  1519. struct xt_counters *counters;
  1520. const struct xt_table_info *private = table->private;
  1521. void __user *pos;
  1522. unsigned int size;
  1523. int ret = 0;
  1524. unsigned int i = 0;
  1525. struct ip6t_entry *iter;
  1526. counters = alloc_counters(table);
  1527. if (IS_ERR(counters))
  1528. return PTR_ERR(counters);
  1529. pos = userptr;
  1530. size = total_size;
  1531. xt_entry_foreach(iter, private->entries, total_size) {
  1532. ret = compat_copy_entry_to_user(iter, &pos,
  1533. &size, counters, i++);
  1534. if (ret != 0)
  1535. break;
  1536. }
  1537. vfree(counters);
  1538. return ret;
  1539. }
  1540. static int
  1541. compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
  1542. int *len)
  1543. {
  1544. int ret;
  1545. struct compat_ip6t_get_entries get;
  1546. struct xt_table *t;
  1547. if (*len < sizeof(get)) {
  1548. duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
  1549. return -EINVAL;
  1550. }
  1551. if (copy_from_user(&get, uptr, sizeof(get)) != 0)
  1552. return -EFAULT;
  1553. if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
  1554. duprintf("compat_get_entries: %u != %zu\n",
  1555. *len, sizeof(get) + get.size);
  1556. return -EINVAL;
  1557. }
  1558. get.name[sizeof(get.name) - 1] = '\0';
  1559. xt_compat_lock(AF_INET6);
  1560. t = xt_find_table_lock(net, AF_INET6, get.name);
  1561. if (!IS_ERR_OR_NULL(t)) {
  1562. const struct xt_table_info *private = t->private;
  1563. struct xt_table_info info;
  1564. duprintf("t->private->number = %u\n", private->number);
  1565. ret = compat_table_info(private, &info);
  1566. if (!ret && get.size == info.size) {
  1567. ret = compat_copy_entries_to_user(private->size,
  1568. t, uptr->entrytable);
  1569. } else if (!ret) {
  1570. duprintf("compat_get_entries: I've got %u not %u!\n",
  1571. private->size, get.size);
  1572. ret = -EAGAIN;
  1573. }
  1574. xt_compat_flush_offsets(AF_INET6);
  1575. module_put(t->me);
  1576. xt_table_unlock(t);
  1577. } else
  1578. ret = t ? PTR_ERR(t) : -ENOENT;
  1579. xt_compat_unlock(AF_INET6);
  1580. return ret;
  1581. }
  1582. static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
  1583. static int
  1584. compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
  1585. {
  1586. int ret;
  1587. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  1588. return -EPERM;
  1589. switch (cmd) {
  1590. case IP6T_SO_GET_INFO:
  1591. ret = get_info(sock_net(sk), user, len, 1);
  1592. break;
  1593. case IP6T_SO_GET_ENTRIES:
  1594. ret = compat_get_entries(sock_net(sk), user, len);
  1595. break;
  1596. default:
  1597. ret = do_ip6t_get_ctl(sk, cmd, user, len);
  1598. }
  1599. return ret;
  1600. }
  1601. #endif
  1602. static int
  1603. do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
  1604. {
  1605. int ret;
  1606. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  1607. return -EPERM;
  1608. switch (cmd) {
  1609. case IP6T_SO_SET_REPLACE:
  1610. ret = do_replace(sock_net(sk), user, len);
  1611. break;
  1612. case IP6T_SO_SET_ADD_COUNTERS:
  1613. ret = do_add_counters(sock_net(sk), user, len, 0);
  1614. break;
  1615. default:
  1616. duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
  1617. ret = -EINVAL;
  1618. }
  1619. return ret;
  1620. }
  1621. static int
  1622. do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
  1623. {
  1624. int ret;
  1625. if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
  1626. return -EPERM;
  1627. switch (cmd) {
  1628. case IP6T_SO_GET_INFO:
  1629. ret = get_info(sock_net(sk), user, len, 0);
  1630. break;
  1631. case IP6T_SO_GET_ENTRIES:
  1632. ret = get_entries(sock_net(sk), user, len);
  1633. break;
  1634. case IP6T_SO_GET_REVISION_MATCH:
  1635. case IP6T_SO_GET_REVISION_TARGET: {
  1636. struct xt_get_revision rev;
  1637. int target;
  1638. if (*len != sizeof(rev)) {
  1639. ret = -EINVAL;
  1640. break;
  1641. }
  1642. if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
  1643. ret = -EFAULT;
  1644. break;
  1645. }
  1646. rev.name[sizeof(rev.name)-1] = 0;
  1647. if (cmd == IP6T_SO_GET_REVISION_TARGET)
  1648. target = 1;
  1649. else
  1650. target = 0;
  1651. try_then_request_module(xt_find_revision(AF_INET6, rev.name,
  1652. rev.revision,
  1653. target, &ret),
  1654. "ip6t_%s", rev.name);
  1655. break;
  1656. }
  1657. default:
  1658. duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
  1659. ret = -EINVAL;
  1660. }
  1661. return ret;
  1662. }
  1663. struct xt_table *ip6t_register_table(struct net *net,
  1664. const struct xt_table *table,
  1665. const struct ip6t_replace *repl)
  1666. {
  1667. int ret;
  1668. struct xt_table_info *newinfo;
  1669. struct xt_table_info bootstrap = {0};
  1670. void *loc_cpu_entry;
  1671. struct xt_table *new_table;
  1672. newinfo = xt_alloc_table_info(repl->size);
  1673. if (!newinfo) {
  1674. ret = -ENOMEM;
  1675. goto out;
  1676. }
  1677. loc_cpu_entry = newinfo->entries;
  1678. memcpy(loc_cpu_entry, repl->entries, repl->size);
  1679. ret = translate_table(net, newinfo, loc_cpu_entry, repl);
  1680. if (ret != 0)
  1681. goto out_free;
  1682. new_table = xt_register_table(net, table, &bootstrap, newinfo);
  1683. if (IS_ERR(new_table)) {
  1684. ret = PTR_ERR(new_table);
  1685. goto out_free;
  1686. }
  1687. return new_table;
  1688. out_free:
  1689. xt_free_table_info(newinfo);
  1690. out:
  1691. return ERR_PTR(ret);
  1692. }
  1693. void ip6t_unregister_table(struct net *net, struct xt_table *table)
  1694. {
  1695. struct xt_table_info *private;
  1696. void *loc_cpu_entry;
  1697. struct module *table_owner = table->me;
  1698. struct ip6t_entry *iter;
  1699. private = xt_unregister_table(table);
  1700. /* Decrease module usage counts and free resources */
  1701. loc_cpu_entry = private->entries;
  1702. xt_entry_foreach(iter, loc_cpu_entry, private->size)
  1703. cleanup_entry(iter, net);
  1704. if (private->number > private->initial_entries)
  1705. module_put(table_owner);
  1706. xt_free_table_info(private);
  1707. }
  1708. /* Returns 1 if the type and code is matched by the range, 0 otherwise */
  1709. static inline bool
  1710. icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
  1711. u_int8_t type, u_int8_t code,
  1712. bool invert)
  1713. {
  1714. return (type == test_type && code >= min_code && code <= max_code)
  1715. ^ invert;
  1716. }
  1717. static bool
  1718. icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
  1719. {
  1720. const struct icmp6hdr *ic;
  1721. struct icmp6hdr _icmph;
  1722. const struct ip6t_icmp *icmpinfo = par->matchinfo;
  1723. /* Must not be a fragment. */
  1724. if (par->fragoff != 0)
  1725. return false;
  1726. ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
  1727. if (ic == NULL) {
  1728. /* We've been asked to examine this packet, and we
  1729. * can't. Hence, no choice but to drop.
  1730. */
  1731. duprintf("Dropping evil ICMP tinygram.\n");
  1732. par->hotdrop = true;
  1733. return false;
  1734. }
  1735. return icmp6_type_code_match(icmpinfo->type,
  1736. icmpinfo->code[0],
  1737. icmpinfo->code[1],
  1738. ic->icmp6_type, ic->icmp6_code,
  1739. !!(icmpinfo->invflags&IP6T_ICMP_INV));
  1740. }
  1741. /* Called when user tries to insert an entry of this type. */
  1742. static int icmp6_checkentry(const struct xt_mtchk_param *par)
  1743. {
  1744. const struct ip6t_icmp *icmpinfo = par->matchinfo;
  1745. /* Must specify no unknown invflags */
  1746. return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
  1747. }
  1748. /* The built-in targets: standard (NULL) and error. */
  1749. static struct xt_target ip6t_builtin_tg[] __read_mostly = {
  1750. {
  1751. .name = XT_STANDARD_TARGET,
  1752. .targetsize = sizeof(int),
  1753. .family = NFPROTO_IPV6,
  1754. #ifdef CONFIG_COMPAT
  1755. .compatsize = sizeof(compat_int_t),
  1756. .compat_from_user = compat_standard_from_user,
  1757. .compat_to_user = compat_standard_to_user,
  1758. #endif
  1759. },
  1760. {
  1761. .name = XT_ERROR_TARGET,
  1762. .target = ip6t_error,
  1763. .targetsize = XT_FUNCTION_MAXNAMELEN,
  1764. .family = NFPROTO_IPV6,
  1765. },
  1766. };
  1767. static struct nf_sockopt_ops ip6t_sockopts = {
  1768. .pf = PF_INET6,
  1769. .set_optmin = IP6T_BASE_CTL,
  1770. .set_optmax = IP6T_SO_SET_MAX+1,
  1771. .set = do_ip6t_set_ctl,
  1772. #ifdef CONFIG_COMPAT
  1773. .compat_set = compat_do_ip6t_set_ctl,
  1774. #endif
  1775. .get_optmin = IP6T_BASE_CTL,
  1776. .get_optmax = IP6T_SO_GET_MAX+1,
  1777. .get = do_ip6t_get_ctl,
  1778. #ifdef CONFIG_COMPAT
  1779. .compat_get = compat_do_ip6t_get_ctl,
  1780. #endif
  1781. .owner = THIS_MODULE,
  1782. };
  1783. static struct xt_match ip6t_builtin_mt[] __read_mostly = {
  1784. {
  1785. .name = "icmp6",
  1786. .match = icmp6_match,
  1787. .matchsize = sizeof(struct ip6t_icmp),
  1788. .checkentry = icmp6_checkentry,
  1789. .proto = IPPROTO_ICMPV6,
  1790. .family = NFPROTO_IPV6,
  1791. .me = THIS_MODULE,
  1792. },
  1793. };
  1794. static int __net_init ip6_tables_net_init(struct net *net)
  1795. {
  1796. return xt_proto_init(net, NFPROTO_IPV6);
  1797. }
  1798. static void __net_exit ip6_tables_net_exit(struct net *net)
  1799. {
  1800. xt_proto_fini(net, NFPROTO_IPV6);
  1801. }
  1802. static struct pernet_operations ip6_tables_net_ops = {
  1803. .init = ip6_tables_net_init,
  1804. .exit = ip6_tables_net_exit,
  1805. };
  1806. static int __init ip6_tables_init(void)
  1807. {
  1808. int ret;
  1809. ret = register_pernet_subsys(&ip6_tables_net_ops);
  1810. if (ret < 0)
  1811. goto err1;
  1812. /* No one else will be downing sem now, so we won't sleep */
  1813. ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
  1814. if (ret < 0)
  1815. goto err2;
  1816. ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
  1817. if (ret < 0)
  1818. goto err4;
  1819. /* Register setsockopt */
  1820. ret = nf_register_sockopt(&ip6t_sockopts);
  1821. if (ret < 0)
  1822. goto err5;
  1823. pr_info("(C) 2000-2006 Netfilter Core Team\n");
  1824. return 0;
  1825. err5:
  1826. xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
  1827. err4:
  1828. xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
  1829. err2:
  1830. unregister_pernet_subsys(&ip6_tables_net_ops);
  1831. err1:
  1832. return ret;
  1833. }
  1834. static void __exit ip6_tables_fini(void)
  1835. {
  1836. nf_unregister_sockopt(&ip6t_sockopts);
  1837. xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
  1838. xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
  1839. unregister_pernet_subsys(&ip6_tables_net_ops);
  1840. }
  1841. EXPORT_SYMBOL(ip6t_register_table);
  1842. EXPORT_SYMBOL(ip6t_unregister_table);
  1843. EXPORT_SYMBOL(ip6t_do_table);
  1844. module_init(ip6_tables_init);
  1845. module_exit(ip6_tables_fini);