nf_conntrack_l3proto_ipv4_compat.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. /* ip_conntrack proc compat - based on ip_conntrack_standalone.c
  2. *
  3. * (C) 1999-2001 Paul `Rusty' Russell
  4. * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
  5. * (C) 2006-2010 Patrick McHardy <kaber@trash.net>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/types.h>
  12. #include <linux/proc_fs.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/percpu.h>
  15. #include <linux/security.h>
  16. #include <net/net_namespace.h>
  17. #include <linux/netfilter.h>
  18. #include <net/netfilter/nf_conntrack_core.h>
  19. #include <net/netfilter/nf_conntrack_l3proto.h>
  20. #include <net/netfilter/nf_conntrack_l4proto.h>
  21. #include <net/netfilter/nf_conntrack_expect.h>
  22. #include <net/netfilter/nf_conntrack_acct.h>
  23. #include <linux/rculist_nulls.h>
  24. #include <linux/export.h>
  25. struct ct_iter_state {
  26. struct seq_net_private p;
  27. unsigned int bucket;
  28. };
  29. static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
  30. {
  31. struct net *net = seq_file_net(seq);
  32. struct ct_iter_state *st = seq->private;
  33. struct hlist_nulls_node *n;
  34. for (st->bucket = 0;
  35. st->bucket < net->ct.htable_size;
  36. st->bucket++) {
  37. n = rcu_dereference(
  38. hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
  39. if (!is_a_nulls(n))
  40. return n;
  41. }
  42. return NULL;
  43. }
  44. static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
  45. struct hlist_nulls_node *head)
  46. {
  47. struct net *net = seq_file_net(seq);
  48. struct ct_iter_state *st = seq->private;
  49. head = rcu_dereference(hlist_nulls_next_rcu(head));
  50. while (is_a_nulls(head)) {
  51. if (likely(get_nulls_value(head) == st->bucket)) {
  52. if (++st->bucket >= net->ct.htable_size)
  53. return NULL;
  54. }
  55. head = rcu_dereference(
  56. hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
  57. }
  58. return head;
  59. }
  60. static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
  61. {
  62. struct hlist_nulls_node *head = ct_get_first(seq);
  63. if (head)
  64. while (pos && (head = ct_get_next(seq, head)))
  65. pos--;
  66. return pos ? NULL : head;
  67. }
  68. static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
  69. __acquires(RCU)
  70. {
  71. rcu_read_lock();
  72. return ct_get_idx(seq, *pos);
  73. }
  74. static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
  75. {
  76. (*pos)++;
  77. return ct_get_next(s, v);
  78. }
  79. static void ct_seq_stop(struct seq_file *s, void *v)
  80. __releases(RCU)
  81. {
  82. rcu_read_unlock();
  83. }
  84. #ifdef CONFIG_NF_CONNTRACK_SECMARK
  85. static void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
  86. {
  87. int ret;
  88. u32 len;
  89. char *secctx;
  90. ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
  91. if (ret)
  92. return;
  93. seq_printf(s, "secctx=%s ", secctx);
  94. security_release_secctx(secctx, len);
  95. }
  96. #else
  97. static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
  98. {
  99. }
  100. #endif
  101. static int ct_seq_show(struct seq_file *s, void *v)
  102. {
  103. struct nf_conntrack_tuple_hash *hash = v;
  104. struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
  105. const struct nf_conntrack_l3proto *l3proto;
  106. const struct nf_conntrack_l4proto *l4proto;
  107. int ret = 0;
  108. NF_CT_ASSERT(ct);
  109. if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
  110. return 0;
  111. /* we only want to print DIR_ORIGINAL */
  112. if (NF_CT_DIRECTION(hash))
  113. goto release;
  114. if (nf_ct_l3num(ct) != AF_INET)
  115. goto release;
  116. l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
  117. NF_CT_ASSERT(l3proto);
  118. l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
  119. NF_CT_ASSERT(l4proto);
  120. ret = -ENOSPC;
  121. seq_printf(s, "%-8s %u %ld ",
  122. l4proto->name, nf_ct_protonum(ct),
  123. timer_pending(&ct->timeout)
  124. ? (long)(ct->timeout.expires - jiffies)/HZ : 0);
  125. if (l4proto->print_conntrack)
  126. l4proto->print_conntrack(s, ct);
  127. if (seq_has_overflowed(s))
  128. goto release;
  129. print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
  130. l3proto, l4proto);
  131. if (seq_has_overflowed(s))
  132. goto release;
  133. if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
  134. goto release;
  135. if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
  136. seq_printf(s, "[UNREPLIED] ");
  137. print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
  138. l3proto, l4proto);
  139. if (seq_has_overflowed(s))
  140. goto release;
  141. if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
  142. goto release;
  143. if (test_bit(IPS_ASSURED_BIT, &ct->status))
  144. seq_printf(s, "[ASSURED] ");
  145. #ifdef CONFIG_NF_CONNTRACK_MARK
  146. seq_printf(s, "mark=%u ", ct->mark);
  147. #endif
  148. ct_show_secctx(s, ct);
  149. seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
  150. if (seq_has_overflowed(s))
  151. goto release;
  152. ret = 0;
  153. release:
  154. nf_ct_put(ct);
  155. return ret;
  156. }
  157. static const struct seq_operations ct_seq_ops = {
  158. .start = ct_seq_start,
  159. .next = ct_seq_next,
  160. .stop = ct_seq_stop,
  161. .show = ct_seq_show
  162. };
  163. static int ct_open(struct inode *inode, struct file *file)
  164. {
  165. return seq_open_net(inode, file, &ct_seq_ops,
  166. sizeof(struct ct_iter_state));
  167. }
  168. static const struct file_operations ct_file_ops = {
  169. .owner = THIS_MODULE,
  170. .open = ct_open,
  171. .read = seq_read,
  172. .llseek = seq_lseek,
  173. .release = seq_release_net,
  174. };
  175. /* expects */
  176. struct ct_expect_iter_state {
  177. struct seq_net_private p;
  178. unsigned int bucket;
  179. };
  180. static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
  181. {
  182. struct net *net = seq_file_net(seq);
  183. struct ct_expect_iter_state *st = seq->private;
  184. struct hlist_node *n;
  185. for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
  186. n = rcu_dereference(
  187. hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
  188. if (n)
  189. return n;
  190. }
  191. return NULL;
  192. }
  193. static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
  194. struct hlist_node *head)
  195. {
  196. struct net *net = seq_file_net(seq);
  197. struct ct_expect_iter_state *st = seq->private;
  198. head = rcu_dereference(hlist_next_rcu(head));
  199. while (head == NULL) {
  200. if (++st->bucket >= nf_ct_expect_hsize)
  201. return NULL;
  202. head = rcu_dereference(
  203. hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
  204. }
  205. return head;
  206. }
  207. static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
  208. {
  209. struct hlist_node *head = ct_expect_get_first(seq);
  210. if (head)
  211. while (pos && (head = ct_expect_get_next(seq, head)))
  212. pos--;
  213. return pos ? NULL : head;
  214. }
  215. static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
  216. __acquires(RCU)
  217. {
  218. rcu_read_lock();
  219. return ct_expect_get_idx(seq, *pos);
  220. }
  221. static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  222. {
  223. (*pos)++;
  224. return ct_expect_get_next(seq, v);
  225. }
  226. static void exp_seq_stop(struct seq_file *seq, void *v)
  227. __releases(RCU)
  228. {
  229. rcu_read_unlock();
  230. }
  231. static int exp_seq_show(struct seq_file *s, void *v)
  232. {
  233. struct nf_conntrack_expect *exp;
  234. const struct hlist_node *n = v;
  235. exp = hlist_entry(n, struct nf_conntrack_expect, hnode);
  236. if (exp->tuple.src.l3num != AF_INET)
  237. return 0;
  238. if (exp->timeout.function)
  239. seq_printf(s, "%ld ", timer_pending(&exp->timeout)
  240. ? (long)(exp->timeout.expires - jiffies)/HZ : 0);
  241. else
  242. seq_printf(s, "- ");
  243. seq_printf(s, "proto=%u ", exp->tuple.dst.protonum);
  244. print_tuple(s, &exp->tuple,
  245. __nf_ct_l3proto_find(exp->tuple.src.l3num),
  246. __nf_ct_l4proto_find(exp->tuple.src.l3num,
  247. exp->tuple.dst.protonum));
  248. seq_putc(s, '\n');
  249. return 0;
  250. }
  251. static const struct seq_operations exp_seq_ops = {
  252. .start = exp_seq_start,
  253. .next = exp_seq_next,
  254. .stop = exp_seq_stop,
  255. .show = exp_seq_show
  256. };
  257. static int exp_open(struct inode *inode, struct file *file)
  258. {
  259. return seq_open_net(inode, file, &exp_seq_ops,
  260. sizeof(struct ct_expect_iter_state));
  261. }
  262. static const struct file_operations ip_exp_file_ops = {
  263. .owner = THIS_MODULE,
  264. .open = exp_open,
  265. .read = seq_read,
  266. .llseek = seq_lseek,
  267. .release = seq_release_net,
  268. };
  269. static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
  270. {
  271. struct net *net = seq_file_net(seq);
  272. int cpu;
  273. if (*pos == 0)
  274. return SEQ_START_TOKEN;
  275. for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
  276. if (!cpu_possible(cpu))
  277. continue;
  278. *pos = cpu+1;
  279. return per_cpu_ptr(net->ct.stat, cpu);
  280. }
  281. return NULL;
  282. }
  283. static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  284. {
  285. struct net *net = seq_file_net(seq);
  286. int cpu;
  287. for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
  288. if (!cpu_possible(cpu))
  289. continue;
  290. *pos = cpu+1;
  291. return per_cpu_ptr(net->ct.stat, cpu);
  292. }
  293. return NULL;
  294. }
  295. static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
  296. {
  297. }
  298. static int ct_cpu_seq_show(struct seq_file *seq, void *v)
  299. {
  300. struct net *net = seq_file_net(seq);
  301. unsigned int nr_conntracks = atomic_read(&net->ct.count);
  302. const struct ip_conntrack_stat *st = v;
  303. if (v == SEQ_START_TOKEN) {
  304. seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
  305. return 0;
  306. }
  307. seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
  308. "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
  309. nr_conntracks,
  310. st->searched,
  311. st->found,
  312. st->new,
  313. st->invalid,
  314. st->ignore,
  315. st->delete,
  316. st->delete_list,
  317. st->insert,
  318. st->insert_failed,
  319. st->drop,
  320. st->early_drop,
  321. st->error,
  322. st->expect_new,
  323. st->expect_create,
  324. st->expect_delete,
  325. st->search_restart
  326. );
  327. return 0;
  328. }
  329. static const struct seq_operations ct_cpu_seq_ops = {
  330. .start = ct_cpu_seq_start,
  331. .next = ct_cpu_seq_next,
  332. .stop = ct_cpu_seq_stop,
  333. .show = ct_cpu_seq_show,
  334. };
  335. static int ct_cpu_seq_open(struct inode *inode, struct file *file)
  336. {
  337. return seq_open_net(inode, file, &ct_cpu_seq_ops,
  338. sizeof(struct seq_net_private));
  339. }
  340. static const struct file_operations ct_cpu_seq_fops = {
  341. .owner = THIS_MODULE,
  342. .open = ct_cpu_seq_open,
  343. .read = seq_read,
  344. .llseek = seq_lseek,
  345. .release = seq_release_net,
  346. };
  347. static int __net_init ip_conntrack_net_init(struct net *net)
  348. {
  349. struct proc_dir_entry *proc, *proc_exp, *proc_stat;
  350. proc = proc_create("ip_conntrack", 0440, net->proc_net, &ct_file_ops);
  351. if (!proc)
  352. goto err1;
  353. proc_exp = proc_create("ip_conntrack_expect", 0440, net->proc_net,
  354. &ip_exp_file_ops);
  355. if (!proc_exp)
  356. goto err2;
  357. proc_stat = proc_create("ip_conntrack", S_IRUGO,
  358. net->proc_net_stat, &ct_cpu_seq_fops);
  359. if (!proc_stat)
  360. goto err3;
  361. return 0;
  362. err3:
  363. remove_proc_entry("ip_conntrack_expect", net->proc_net);
  364. err2:
  365. remove_proc_entry("ip_conntrack", net->proc_net);
  366. err1:
  367. return -ENOMEM;
  368. }
  369. static void __net_exit ip_conntrack_net_exit(struct net *net)
  370. {
  371. remove_proc_entry("ip_conntrack", net->proc_net_stat);
  372. remove_proc_entry("ip_conntrack_expect", net->proc_net);
  373. remove_proc_entry("ip_conntrack", net->proc_net);
  374. }
  375. static struct pernet_operations ip_conntrack_net_ops = {
  376. .init = ip_conntrack_net_init,
  377. .exit = ip_conntrack_net_exit,
  378. };
  379. int __init nf_conntrack_ipv4_compat_init(void)
  380. {
  381. return register_pernet_subsys(&ip_conntrack_net_ops);
  382. }
  383. void __exit nf_conntrack_ipv4_compat_fini(void)
  384. {
  385. unregister_pernet_subsys(&ip_conntrack_net_ops);
  386. }