nf_nat_l3proto_ipv4.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. /*
  2. * (C) 1999-2001 Paul `Rusty' Russell
  3. * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
  4. * (C) 2011 Patrick McHardy <kaber@trash.net>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/types.h>
  11. #include <linux/module.h>
  12. #include <linux/skbuff.h>
  13. #include <linux/ip.h>
  14. #include <linux/icmp.h>
  15. #include <linux/netfilter.h>
  16. #include <linux/netfilter_ipv4.h>
  17. #include <net/secure_seq.h>
  18. #include <net/checksum.h>
  19. #include <net/route.h>
  20. #include <net/ip.h>
  21. #include <net/netfilter/nf_conntrack_core.h>
  22. #include <net/netfilter/nf_conntrack.h>
  23. #include <net/netfilter/nf_nat_core.h>
  24. #include <net/netfilter/nf_nat_l3proto.h>
  25. #include <net/netfilter/nf_nat_l4proto.h>
  26. static const struct nf_nat_l3proto nf_nat_l3proto_ipv4;
  27. #ifdef CONFIG_XFRM
  28. static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
  29. const struct nf_conn *ct,
  30. enum ip_conntrack_dir dir,
  31. unsigned long statusbit,
  32. struct flowi *fl)
  33. {
  34. const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
  35. struct flowi4 *fl4 = &fl->u.ip4;
  36. if (ct->status & statusbit) {
  37. fl4->daddr = t->dst.u3.ip;
  38. if (t->dst.protonum == IPPROTO_TCP ||
  39. t->dst.protonum == IPPROTO_UDP ||
  40. t->dst.protonum == IPPROTO_UDPLITE ||
  41. t->dst.protonum == IPPROTO_DCCP ||
  42. t->dst.protonum == IPPROTO_SCTP)
  43. fl4->fl4_dport = t->dst.u.all;
  44. }
  45. statusbit ^= IPS_NAT_MASK;
  46. if (ct->status & statusbit) {
  47. fl4->saddr = t->src.u3.ip;
  48. if (t->dst.protonum == IPPROTO_TCP ||
  49. t->dst.protonum == IPPROTO_UDP ||
  50. t->dst.protonum == IPPROTO_UDPLITE ||
  51. t->dst.protonum == IPPROTO_DCCP ||
  52. t->dst.protonum == IPPROTO_SCTP)
  53. fl4->fl4_sport = t->src.u.all;
  54. }
  55. }
  56. #endif /* CONFIG_XFRM */
  57. static bool nf_nat_ipv4_in_range(const struct nf_conntrack_tuple *t,
  58. const struct nf_nat_range *range)
  59. {
  60. return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
  61. ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
  62. }
  63. static u32 nf_nat_ipv4_secure_port(const struct nf_conntrack_tuple *t,
  64. __be16 dport)
  65. {
  66. return secure_ipv4_port_ephemeral(t->src.u3.ip, t->dst.u3.ip, dport);
  67. }
  68. static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb,
  69. unsigned int iphdroff,
  70. const struct nf_nat_l4proto *l4proto,
  71. const struct nf_conntrack_tuple *target,
  72. enum nf_nat_manip_type maniptype)
  73. {
  74. struct iphdr *iph;
  75. unsigned int hdroff;
  76. if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
  77. return false;
  78. iph = (void *)skb->data + iphdroff;
  79. hdroff = iphdroff + iph->ihl * 4;
  80. if (!l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv4, iphdroff, hdroff,
  81. target, maniptype))
  82. return false;
  83. iph = (void *)skb->data + iphdroff;
  84. if (maniptype == NF_NAT_MANIP_SRC) {
  85. csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
  86. iph->saddr = target->src.u3.ip;
  87. } else {
  88. csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
  89. iph->daddr = target->dst.u3.ip;
  90. }
  91. return true;
  92. }
  93. static void nf_nat_ipv4_csum_update(struct sk_buff *skb,
  94. unsigned int iphdroff, __sum16 *check,
  95. const struct nf_conntrack_tuple *t,
  96. enum nf_nat_manip_type maniptype)
  97. {
  98. struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
  99. __be32 oldip, newip;
  100. if (maniptype == NF_NAT_MANIP_SRC) {
  101. oldip = iph->saddr;
  102. newip = t->src.u3.ip;
  103. } else {
  104. oldip = iph->daddr;
  105. newip = t->dst.u3.ip;
  106. }
  107. inet_proto_csum_replace4(check, skb, oldip, newip, true);
  108. }
  109. static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
  110. u8 proto, void *data, __sum16 *check,
  111. int datalen, int oldlen)
  112. {
  113. const struct iphdr *iph = ip_hdr(skb);
  114. struct rtable *rt = skb_rtable(skb);
  115. if (skb->ip_summed != CHECKSUM_PARTIAL) {
  116. if (!(rt->rt_flags & RTCF_LOCAL) &&
  117. (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
  118. skb->ip_summed = CHECKSUM_PARTIAL;
  119. skb->csum_start = skb_headroom(skb) +
  120. skb_network_offset(skb) +
  121. ip_hdrlen(skb);
  122. skb->csum_offset = (void *)check - data;
  123. *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
  124. datalen, proto, 0);
  125. } else {
  126. *check = 0;
  127. *check = csum_tcpudp_magic(iph->saddr, iph->daddr,
  128. datalen, proto,
  129. csum_partial(data, datalen,
  130. 0));
  131. if (proto == IPPROTO_UDP && !*check)
  132. *check = CSUM_MANGLED_0;
  133. }
  134. } else
  135. inet_proto_csum_replace2(check, skb,
  136. htons(oldlen), htons(datalen), true);
  137. }
  138. #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
  139. static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
  140. struct nf_nat_range *range)
  141. {
  142. if (tb[CTA_NAT_V4_MINIP]) {
  143. range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
  144. range->flags |= NF_NAT_RANGE_MAP_IPS;
  145. }
  146. if (tb[CTA_NAT_V4_MAXIP])
  147. range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
  148. else
  149. range->max_addr.ip = range->min_addr.ip;
  150. return 0;
  151. }
  152. #endif
  153. static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = {
  154. .l3proto = NFPROTO_IPV4,
  155. .in_range = nf_nat_ipv4_in_range,
  156. .secure_port = nf_nat_ipv4_secure_port,
  157. .manip_pkt = nf_nat_ipv4_manip_pkt,
  158. .csum_update = nf_nat_ipv4_csum_update,
  159. .csum_recalc = nf_nat_ipv4_csum_recalc,
  160. #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
  161. .nlattr_to_range = nf_nat_ipv4_nlattr_to_range,
  162. #endif
  163. #ifdef CONFIG_XFRM
  164. .decode_session = nf_nat_ipv4_decode_session,
  165. #endif
  166. };
  167. int nf_nat_icmp_reply_translation(struct sk_buff *skb,
  168. struct nf_conn *ct,
  169. enum ip_conntrack_info ctinfo,
  170. unsigned int hooknum)
  171. {
  172. struct {
  173. struct icmphdr icmp;
  174. struct iphdr ip;
  175. } *inside;
  176. enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
  177. enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
  178. unsigned int hdrlen = ip_hdrlen(skb);
  179. const struct nf_nat_l4proto *l4proto;
  180. struct nf_conntrack_tuple target;
  181. unsigned long statusbit;
  182. NF_CT_ASSERT(ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY);
  183. if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
  184. return 0;
  185. if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
  186. return 0;
  187. inside = (void *)skb->data + hdrlen;
  188. if (inside->icmp.type == ICMP_REDIRECT) {
  189. if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
  190. return 0;
  191. if (ct->status & IPS_NAT_MASK)
  192. return 0;
  193. }
  194. if (manip == NF_NAT_MANIP_SRC)
  195. statusbit = IPS_SRC_NAT;
  196. else
  197. statusbit = IPS_DST_NAT;
  198. /* Invert if this is reply direction */
  199. if (dir == IP_CT_DIR_REPLY)
  200. statusbit ^= IPS_NAT_MASK;
  201. if (!(ct->status & statusbit))
  202. return 1;
  203. l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, inside->ip.protocol);
  204. if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp),
  205. l4proto, &ct->tuplehash[!dir].tuple, !manip))
  206. return 0;
  207. if (skb->ip_summed != CHECKSUM_PARTIAL) {
  208. /* Reloading "inside" here since manip_pkt may reallocate */
  209. inside = (void *)skb->data + hdrlen;
  210. inside->icmp.checksum = 0;
  211. inside->icmp.checksum =
  212. csum_fold(skb_checksum(skb, hdrlen,
  213. skb->len - hdrlen, 0));
  214. }
  215. /* Change outer to look like the reply to an incoming packet */
  216. nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
  217. l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, 0);
  218. if (!nf_nat_ipv4_manip_pkt(skb, 0, l4proto, &target, manip))
  219. return 0;
  220. return 1;
  221. }
  222. EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
  223. unsigned int
  224. nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
  225. const struct nf_hook_state *state,
  226. unsigned int (*do_chain)(void *priv,
  227. struct sk_buff *skb,
  228. const struct nf_hook_state *state,
  229. struct nf_conn *ct))
  230. {
  231. struct nf_conn *ct;
  232. enum ip_conntrack_info ctinfo;
  233. struct nf_conn_nat *nat;
  234. /* maniptype == SRC for postrouting. */
  235. enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
  236. ct = nf_ct_get(skb, &ctinfo);
  237. /* Can't track? It's not due to stress, or conntrack would
  238. * have dropped it. Hence it's the user's responsibilty to
  239. * packet filter it out, or implement conntrack/NAT for that
  240. * protocol. 8) --RR
  241. */
  242. if (!ct)
  243. return NF_ACCEPT;
  244. /* Don't try to NAT if this packet is not conntracked */
  245. if (nf_ct_is_untracked(ct))
  246. return NF_ACCEPT;
  247. nat = nf_ct_nat_ext_add(ct);
  248. if (nat == NULL)
  249. return NF_ACCEPT;
  250. switch (ctinfo) {
  251. case IP_CT_RELATED:
  252. case IP_CT_RELATED_REPLY:
  253. if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
  254. if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
  255. state->hook))
  256. return NF_DROP;
  257. else
  258. return NF_ACCEPT;
  259. }
  260. /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
  261. case IP_CT_NEW:
  262. /* Seen it before? This can happen for loopback, retrans,
  263. * or local packets.
  264. */
  265. if (!nf_nat_initialized(ct, maniptype)) {
  266. unsigned int ret;
  267. ret = do_chain(priv, skb, state, ct);
  268. if (ret != NF_ACCEPT)
  269. return ret;
  270. if (nf_nat_initialized(ct, HOOK2MANIP(state->hook)))
  271. break;
  272. ret = nf_nat_alloc_null_binding(ct, state->hook);
  273. if (ret != NF_ACCEPT)
  274. return ret;
  275. } else {
  276. pr_debug("Already setup manip %s for ct %p\n",
  277. maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
  278. ct);
  279. if (nf_nat_oif_changed(state->hook, ctinfo, nat,
  280. state->out))
  281. goto oif_changed;
  282. }
  283. break;
  284. default:
  285. /* ESTABLISHED */
  286. NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
  287. ctinfo == IP_CT_ESTABLISHED_REPLY);
  288. if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
  289. goto oif_changed;
  290. }
  291. return nf_nat_packet(ct, ctinfo, state->hook, skb);
  292. oif_changed:
  293. nf_ct_kill_acct(ct, ctinfo, skb);
  294. return NF_DROP;
  295. }
  296. EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn);
  297. unsigned int
  298. nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
  299. const struct nf_hook_state *state,
  300. unsigned int (*do_chain)(void *priv,
  301. struct sk_buff *skb,
  302. const struct nf_hook_state *state,
  303. struct nf_conn *ct))
  304. {
  305. unsigned int ret;
  306. __be32 daddr = ip_hdr(skb)->daddr;
  307. ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
  308. if (ret != NF_DROP && ret != NF_STOLEN &&
  309. daddr != ip_hdr(skb)->daddr)
  310. skb_dst_drop(skb);
  311. return ret;
  312. }
  313. EXPORT_SYMBOL_GPL(nf_nat_ipv4_in);
  314. unsigned int
  315. nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
  316. const struct nf_hook_state *state,
  317. unsigned int (*do_chain)(void *priv,
  318. struct sk_buff *skb,
  319. const struct nf_hook_state *state,
  320. struct nf_conn *ct))
  321. {
  322. #ifdef CONFIG_XFRM
  323. const struct nf_conn *ct;
  324. enum ip_conntrack_info ctinfo;
  325. int err;
  326. #endif
  327. unsigned int ret;
  328. /* root is playing with raw sockets. */
  329. if (skb->len < sizeof(struct iphdr) ||
  330. ip_hdrlen(skb) < sizeof(struct iphdr))
  331. return NF_ACCEPT;
  332. ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
  333. #ifdef CONFIG_XFRM
  334. if (ret != NF_DROP && ret != NF_STOLEN &&
  335. !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
  336. (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
  337. enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
  338. if ((ct->tuplehash[dir].tuple.src.u3.ip !=
  339. ct->tuplehash[!dir].tuple.dst.u3.ip) ||
  340. (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
  341. ct->tuplehash[dir].tuple.src.u.all !=
  342. ct->tuplehash[!dir].tuple.dst.u.all)) {
  343. err = nf_xfrm_me_harder(state->net, skb, AF_INET);
  344. if (err < 0)
  345. ret = NF_DROP_ERR(err);
  346. }
  347. }
  348. #endif
  349. return ret;
  350. }
  351. EXPORT_SYMBOL_GPL(nf_nat_ipv4_out);
  352. unsigned int
  353. nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
  354. const struct nf_hook_state *state,
  355. unsigned int (*do_chain)(void *priv,
  356. struct sk_buff *skb,
  357. const struct nf_hook_state *state,
  358. struct nf_conn *ct))
  359. {
  360. const struct nf_conn *ct;
  361. enum ip_conntrack_info ctinfo;
  362. unsigned int ret;
  363. int err;
  364. /* root is playing with raw sockets. */
  365. if (skb->len < sizeof(struct iphdr) ||
  366. ip_hdrlen(skb) < sizeof(struct iphdr))
  367. return NF_ACCEPT;
  368. ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
  369. if (ret != NF_DROP && ret != NF_STOLEN &&
  370. (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
  371. enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
  372. if (ct->tuplehash[dir].tuple.dst.u3.ip !=
  373. ct->tuplehash[!dir].tuple.src.u3.ip) {
  374. err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
  375. if (err < 0)
  376. ret = NF_DROP_ERR(err);
  377. }
  378. #ifdef CONFIG_XFRM
  379. else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
  380. ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
  381. ct->tuplehash[dir].tuple.dst.u.all !=
  382. ct->tuplehash[!dir].tuple.src.u.all) {
  383. err = nf_xfrm_me_harder(state->net, skb, AF_INET);
  384. if (err < 0)
  385. ret = NF_DROP_ERR(err);
  386. }
  387. #endif
  388. }
  389. return ret;
  390. }
  391. EXPORT_SYMBOL_GPL(nf_nat_ipv4_local_fn);
  392. static int __init nf_nat_l3proto_ipv4_init(void)
  393. {
  394. int err;
  395. err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
  396. if (err < 0)
  397. goto err1;
  398. err = nf_nat_l3proto_register(&nf_nat_l3proto_ipv4);
  399. if (err < 0)
  400. goto err2;
  401. return err;
  402. err2:
  403. nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
  404. err1:
  405. return err;
  406. }
  407. static void __exit nf_nat_l3proto_ipv4_exit(void)
  408. {
  409. nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv4);
  410. nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
  411. }
  412. MODULE_LICENSE("GPL");
  413. MODULE_ALIAS("nf-nat-" __stringify(AF_INET));
  414. module_init(nf_nat_l3proto_ipv4_init);
  415. module_exit(nf_nat_l3proto_ipv4_exit);