nf_queue.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. /*
  2. * Rusty Russell (C)2000 -- This code is GPL.
  3. * Patrick McHardy (c) 2006-2012
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/slab.h>
  7. #include <linux/init.h>
  8. #include <linux/module.h>
  9. #include <linux/proc_fs.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/netfilter.h>
  12. #include <linux/netfilter_bridge.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/rcupdate.h>
  15. #include <net/protocol.h>
  16. #include <net/netfilter/nf_queue.h>
  17. #include <net/dst.h>
  18. #include "nf_internals.h"
  19. /*
  20. * Hook for nfnetlink_queue to register its queue handler.
  21. * We do this so that most of the NFQUEUE code can be modular.
  22. *
  23. * Once the queue is registered it must reinject all packets it
  24. * receives, no matter what.
  25. */
  26. /* return EBUSY when somebody else is registered, return EEXIST if the
  27. * same handler is registered, return 0 in case of success. */
  28. void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
  29. {
  30. /* should never happen, we only have one queueing backend in kernel */
  31. WARN_ON(rcu_access_pointer(net->nf.queue_handler));
  32. rcu_assign_pointer(net->nf.queue_handler, qh);
  33. }
  34. EXPORT_SYMBOL(nf_register_queue_handler);
  35. /* The caller must flush their queue before this */
  36. void nf_unregister_queue_handler(struct net *net)
  37. {
  38. RCU_INIT_POINTER(net->nf.queue_handler, NULL);
  39. }
  40. EXPORT_SYMBOL(nf_unregister_queue_handler);
  41. void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
  42. {
  43. struct nf_hook_state *state = &entry->state;
  44. /* Release those devices we held, or Alexey will kill me. */
  45. if (state->in)
  46. dev_put(state->in);
  47. if (state->out)
  48. dev_put(state->out);
  49. if (state->sk)
  50. sock_put(state->sk);
  51. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  52. if (entry->skb->nf_bridge) {
  53. struct net_device *physdev;
  54. physdev = nf_bridge_get_physindev(entry->skb);
  55. if (physdev)
  56. dev_put(physdev);
  57. physdev = nf_bridge_get_physoutdev(entry->skb);
  58. if (physdev)
  59. dev_put(physdev);
  60. }
  61. #endif
  62. }
  63. EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
  64. /* Bump dev refs so they don't vanish while packet is out */
  65. void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
  66. {
  67. struct nf_hook_state *state = &entry->state;
  68. if (state->in)
  69. dev_hold(state->in);
  70. if (state->out)
  71. dev_hold(state->out);
  72. if (state->sk)
  73. sock_hold(state->sk);
  74. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  75. if (entry->skb->nf_bridge) {
  76. struct net_device *physdev;
  77. physdev = nf_bridge_get_physindev(entry->skb);
  78. if (physdev)
  79. dev_hold(physdev);
  80. physdev = nf_bridge_get_physoutdev(entry->skb);
  81. if (physdev)
  82. dev_hold(physdev);
  83. }
  84. #endif
  85. }
  86. EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
  87. void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
  88. {
  89. const struct nf_queue_handler *qh;
  90. rcu_read_lock();
  91. qh = rcu_dereference(net->nf.queue_handler);
  92. if (qh)
  93. qh->nf_hook_drop(net, ops);
  94. rcu_read_unlock();
  95. }
  96. /*
  97. * Any packet that leaves via this function must come back
  98. * through nf_reinject().
  99. */
  100. int nf_queue(struct sk_buff *skb,
  101. struct nf_hook_ops *elem,
  102. struct nf_hook_state *state,
  103. unsigned int queuenum)
  104. {
  105. int status = -ENOENT;
  106. struct nf_queue_entry *entry = NULL;
  107. const struct nf_afinfo *afinfo;
  108. const struct nf_queue_handler *qh;
  109. struct net *net = state->net;
  110. /* QUEUE == DROP if no one is waiting, to be safe. */
  111. qh = rcu_dereference(net->nf.queue_handler);
  112. if (!qh) {
  113. status = -ESRCH;
  114. goto err;
  115. }
  116. afinfo = nf_get_afinfo(state->pf);
  117. if (!afinfo)
  118. goto err;
  119. entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
  120. if (!entry) {
  121. status = -ENOMEM;
  122. goto err;
  123. }
  124. *entry = (struct nf_queue_entry) {
  125. .skb = skb,
  126. .elem = elem,
  127. .state = *state,
  128. .size = sizeof(*entry) + afinfo->route_key_size,
  129. };
  130. nf_queue_entry_get_refs(entry);
  131. skb_dst_force(skb);
  132. afinfo->saveroute(skb, entry);
  133. status = qh->outfn(entry, queuenum);
  134. if (status < 0) {
  135. nf_queue_entry_release_refs(entry);
  136. goto err;
  137. }
  138. return 0;
  139. err:
  140. kfree(entry);
  141. return status;
  142. }
  143. void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
  144. {
  145. struct sk_buff *skb = entry->skb;
  146. struct nf_hook_ops *elem = entry->elem;
  147. const struct nf_afinfo *afinfo;
  148. int err;
  149. nf_queue_entry_release_refs(entry);
  150. /* Continue traversal iff userspace said ok... */
  151. if (verdict == NF_REPEAT)
  152. verdict = elem->hook(elem->priv, skb, &entry->state);
  153. if (verdict == NF_ACCEPT) {
  154. afinfo = nf_get_afinfo(entry->state.pf);
  155. if (!afinfo || afinfo->reroute(entry->state.net, skb, entry) < 0)
  156. verdict = NF_DROP;
  157. }
  158. entry->state.thresh = INT_MIN;
  159. if (verdict == NF_ACCEPT) {
  160. next_hook:
  161. verdict = nf_iterate(entry->state.hook_list,
  162. skb, &entry->state, &elem);
  163. }
  164. switch (verdict & NF_VERDICT_MASK) {
  165. case NF_ACCEPT:
  166. case NF_STOP:
  167. local_bh_disable();
  168. entry->state.okfn(entry->state.net, entry->state.sk, skb);
  169. local_bh_enable();
  170. break;
  171. case NF_QUEUE:
  172. err = nf_queue(skb, elem, &entry->state,
  173. verdict >> NF_VERDICT_QBITS);
  174. if (err < 0) {
  175. if (err == -ESRCH &&
  176. (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
  177. goto next_hook;
  178. kfree_skb(skb);
  179. }
  180. break;
  181. case NF_STOLEN:
  182. break;
  183. default:
  184. kfree_skb(skb);
  185. }
  186. kfree(entry);
  187. }
  188. EXPORT_SYMBOL(nf_reinject);