udp_offload.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. /*
  2. * IPV4 GSO/GRO offload support
  3. * Linux INET implementation
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; either version
  8. * 2 of the License, or (at your option) any later version.
  9. *
  10. * UDPv4 GSO support
  11. */
  12. #include <linux/skbuff.h>
  13. #include <net/udp.h>
  14. #include <net/protocol.h>
  15. static DEFINE_SPINLOCK(udp_offload_lock);
  16. static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
  17. #define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
  18. struct udp_offload_priv {
  19. struct udp_offload *offload;
  20. struct rcu_head rcu;
  21. struct udp_offload_priv __rcu *next;
  22. };
  23. static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
  24. netdev_features_t features,
  25. struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
  26. netdev_features_t features),
  27. __be16 new_protocol, bool is_ipv6)
  28. {
  29. struct sk_buff *segs = ERR_PTR(-EINVAL);
  30. u16 mac_offset = skb->mac_header;
  31. int mac_len = skb->mac_len;
  32. int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
  33. __be16 protocol = skb->protocol;
  34. netdev_features_t enc_features;
  35. int udp_offset, outer_hlen;
  36. unsigned int oldlen;
  37. bool need_csum = !!(skb_shinfo(skb)->gso_type &
  38. SKB_GSO_UDP_TUNNEL_CSUM);
  39. bool remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
  40. bool offload_csum = false, dont_encap = (need_csum || remcsum);
  41. oldlen = (u16)~skb->len;
  42. if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
  43. goto out;
  44. skb->encapsulation = 0;
  45. __skb_pull(skb, tnl_hlen);
  46. skb_reset_mac_header(skb);
  47. skb_set_network_header(skb, skb_inner_network_offset(skb));
  48. skb->mac_len = skb_inner_network_offset(skb);
  49. skb->protocol = new_protocol;
  50. skb->encap_hdr_csum = need_csum;
  51. skb->remcsum_offload = remcsum;
  52. /* Try to offload checksum if possible */
  53. offload_csum = !!(need_csum &&
  54. (skb->dev->features &
  55. (is_ipv6 ? NETIF_F_V6_CSUM : NETIF_F_V4_CSUM)));
  56. /* segment inner packet. */
  57. enc_features = skb->dev->hw_enc_features & features;
  58. segs = gso_inner_segment(skb, enc_features);
  59. if (IS_ERR_OR_NULL(segs)) {
  60. skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
  61. mac_len);
  62. goto out;
  63. }
  64. outer_hlen = skb_tnl_header_len(skb);
  65. udp_offset = outer_hlen - tnl_hlen;
  66. skb = segs;
  67. do {
  68. struct udphdr *uh;
  69. int len;
  70. __be32 delta;
  71. if (dont_encap) {
  72. skb->encapsulation = 0;
  73. skb->ip_summed = CHECKSUM_NONE;
  74. } else {
  75. /* Only set up inner headers if we might be offloading
  76. * inner checksum.
  77. */
  78. skb_reset_inner_headers(skb);
  79. skb->encapsulation = 1;
  80. }
  81. skb->mac_len = mac_len;
  82. skb->protocol = protocol;
  83. skb_push(skb, outer_hlen);
  84. skb_reset_mac_header(skb);
  85. skb_set_network_header(skb, mac_len);
  86. skb_set_transport_header(skb, udp_offset);
  87. len = skb->len - udp_offset;
  88. uh = udp_hdr(skb);
  89. uh->len = htons(len);
  90. if (!need_csum)
  91. continue;
  92. delta = htonl(oldlen + len);
  93. uh->check = ~csum_fold((__force __wsum)
  94. ((__force u32)uh->check +
  95. (__force u32)delta));
  96. if (offload_csum) {
  97. skb->ip_summed = CHECKSUM_PARTIAL;
  98. skb->csum_start = skb_transport_header(skb) - skb->head;
  99. skb->csum_offset = offsetof(struct udphdr, check);
  100. } else if (remcsum) {
  101. /* Need to calculate checksum from scratch,
  102. * inner checksums are never when doing
  103. * remote_checksum_offload.
  104. */
  105. skb->csum = skb_checksum(skb, udp_offset,
  106. skb->len - udp_offset,
  107. 0);
  108. uh->check = csum_fold(skb->csum);
  109. if (uh->check == 0)
  110. uh->check = CSUM_MANGLED_0;
  111. } else {
  112. uh->check = gso_make_checksum(skb, ~uh->check);
  113. if (uh->check == 0)
  114. uh->check = CSUM_MANGLED_0;
  115. }
  116. } while ((skb = skb->next));
  117. out:
  118. return segs;
  119. }
  120. struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
  121. netdev_features_t features,
  122. bool is_ipv6)
  123. {
  124. __be16 protocol = skb->protocol;
  125. const struct net_offload **offloads;
  126. const struct net_offload *ops;
  127. struct sk_buff *segs = ERR_PTR(-EINVAL);
  128. struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
  129. netdev_features_t features);
  130. rcu_read_lock();
  131. switch (skb->inner_protocol_type) {
  132. case ENCAP_TYPE_ETHER:
  133. protocol = skb->inner_protocol;
  134. gso_inner_segment = skb_mac_gso_segment;
  135. break;
  136. case ENCAP_TYPE_IPPROTO:
  137. offloads = is_ipv6 ? inet6_offloads : inet_offloads;
  138. ops = rcu_dereference(offloads[skb->inner_ipproto]);
  139. if (!ops || !ops->callbacks.gso_segment)
  140. goto out_unlock;
  141. gso_inner_segment = ops->callbacks.gso_segment;
  142. break;
  143. default:
  144. goto out_unlock;
  145. }
  146. segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
  147. protocol, is_ipv6);
  148. out_unlock:
  149. rcu_read_unlock();
  150. return segs;
  151. }
  152. static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
  153. netdev_features_t features)
  154. {
  155. struct sk_buff *segs = ERR_PTR(-EINVAL);
  156. unsigned int mss;
  157. __wsum csum;
  158. struct udphdr *uh;
  159. struct iphdr *iph;
  160. if (skb->encapsulation &&
  161. (skb_shinfo(skb)->gso_type &
  162. (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
  163. segs = skb_udp_tunnel_segment(skb, features, false);
  164. goto out;
  165. }
  166. if (!pskb_may_pull(skb, sizeof(struct udphdr)))
  167. goto out;
  168. mss = skb_shinfo(skb)->gso_size;
  169. if (unlikely(skb->len <= mss))
  170. goto out;
  171. if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
  172. /* Packet is from an untrusted source, reset gso_segs. */
  173. int type = skb_shinfo(skb)->gso_type;
  174. if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
  175. SKB_GSO_UDP_TUNNEL |
  176. SKB_GSO_UDP_TUNNEL_CSUM |
  177. SKB_GSO_TUNNEL_REMCSUM |
  178. SKB_GSO_IPIP |
  179. SKB_GSO_GRE | SKB_GSO_GRE_CSUM) ||
  180. !(type & (SKB_GSO_UDP))))
  181. goto out;
  182. skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
  183. segs = NULL;
  184. goto out;
  185. }
  186. /* Do software UFO. Complete and fill in the UDP checksum as
  187. * HW cannot do checksum of UDP packets sent as multiple
  188. * IP fragments.
  189. */
  190. uh = udp_hdr(skb);
  191. iph = ip_hdr(skb);
  192. uh->check = 0;
  193. csum = skb_checksum(skb, 0, skb->len, 0);
  194. uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
  195. if (uh->check == 0)
  196. uh->check = CSUM_MANGLED_0;
  197. skb->ip_summed = CHECKSUM_UNNECESSARY;
  198. /* Fragment the skb. IP headers of the fragments are updated in
  199. * inet_gso_segment()
  200. */
  201. segs = skb_segment(skb, features);
  202. out:
  203. return segs;
  204. }
  205. int udp_add_offload(struct udp_offload *uo)
  206. {
  207. struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
  208. if (!new_offload)
  209. return -ENOMEM;
  210. new_offload->offload = uo;
  211. spin_lock(&udp_offload_lock);
  212. new_offload->next = udp_offload_base;
  213. rcu_assign_pointer(udp_offload_base, new_offload);
  214. spin_unlock(&udp_offload_lock);
  215. return 0;
  216. }
  217. EXPORT_SYMBOL(udp_add_offload);
  218. static void udp_offload_free_routine(struct rcu_head *head)
  219. {
  220. struct udp_offload_priv *ou_priv = container_of(head, struct udp_offload_priv, rcu);
  221. kfree(ou_priv);
  222. }
  223. void udp_del_offload(struct udp_offload *uo)
  224. {
  225. struct udp_offload_priv __rcu **head = &udp_offload_base;
  226. struct udp_offload_priv *uo_priv;
  227. spin_lock(&udp_offload_lock);
  228. uo_priv = udp_deref_protected(*head);
  229. for (; uo_priv != NULL;
  230. uo_priv = udp_deref_protected(*head)) {
  231. if (uo_priv->offload == uo) {
  232. rcu_assign_pointer(*head,
  233. udp_deref_protected(uo_priv->next));
  234. goto unlock;
  235. }
  236. head = &uo_priv->next;
  237. }
  238. pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
  239. unlock:
  240. spin_unlock(&udp_offload_lock);
  241. if (uo_priv)
  242. call_rcu(&uo_priv->rcu, udp_offload_free_routine);
  243. }
  244. EXPORT_SYMBOL(udp_del_offload);
  245. struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
  246. struct udphdr *uh)
  247. {
  248. struct udp_offload_priv *uo_priv;
  249. struct sk_buff *p, **pp = NULL;
  250. struct udphdr *uh2;
  251. unsigned int off = skb_gro_offset(skb);
  252. int flush = 1;
  253. if (NAPI_GRO_CB(skb)->encap_mark ||
  254. (skb->ip_summed != CHECKSUM_PARTIAL &&
  255. NAPI_GRO_CB(skb)->csum_cnt == 0 &&
  256. !NAPI_GRO_CB(skb)->csum_valid))
  257. goto out;
  258. /* mark that this skb passed once through the tunnel gro layer */
  259. NAPI_GRO_CB(skb)->encap_mark = 1;
  260. rcu_read_lock();
  261. uo_priv = rcu_dereference(udp_offload_base);
  262. for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
  263. if (uo_priv->offload->port == uh->dest &&
  264. uo_priv->offload->callbacks.gro_receive)
  265. goto unflush;
  266. }
  267. goto out_unlock;
  268. unflush:
  269. flush = 0;
  270. for (p = *head; p; p = p->next) {
  271. if (!NAPI_GRO_CB(p)->same_flow)
  272. continue;
  273. uh2 = (struct udphdr *)(p->data + off);
  274. /* Match ports and either checksums are either both zero
  275. * or nonzero.
  276. */
  277. if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) ||
  278. (!uh->check ^ !uh2->check)) {
  279. NAPI_GRO_CB(p)->same_flow = 0;
  280. continue;
  281. }
  282. }
  283. skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
  284. skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
  285. NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
  286. pp = call_gro_receive_udp(uo_priv->offload->callbacks.gro_receive,
  287. head, skb, uo_priv->offload);
  288. out_unlock:
  289. rcu_read_unlock();
  290. out:
  291. NAPI_GRO_CB(skb)->flush |= flush;
  292. return pp;
  293. }
  294. static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
  295. struct sk_buff *skb)
  296. {
  297. struct udphdr *uh = udp_gro_udphdr(skb);
  298. if (unlikely(!uh))
  299. goto flush;
  300. /* Don't bother verifying checksum if we're going to flush anyway. */
  301. if (NAPI_GRO_CB(skb)->flush)
  302. goto skip;
  303. if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
  304. inet_gro_compute_pseudo))
  305. goto flush;
  306. else if (uh->check)
  307. skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
  308. inet_gro_compute_pseudo);
  309. skip:
  310. NAPI_GRO_CB(skb)->is_ipv6 = 0;
  311. return udp_gro_receive(head, skb, uh);
  312. flush:
  313. NAPI_GRO_CB(skb)->flush = 1;
  314. return NULL;
  315. }
  316. int udp_gro_complete(struct sk_buff *skb, int nhoff)
  317. {
  318. struct udp_offload_priv *uo_priv;
  319. __be16 newlen = htons(skb->len - nhoff);
  320. struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
  321. int err = -ENOSYS;
  322. uh->len = newlen;
  323. rcu_read_lock();
  324. uo_priv = rcu_dereference(udp_offload_base);
  325. for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
  326. if (uo_priv->offload->port == uh->dest &&
  327. uo_priv->offload->callbacks.gro_complete)
  328. break;
  329. }
  330. if (uo_priv) {
  331. NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
  332. err = uo_priv->offload->callbacks.gro_complete(skb,
  333. nhoff + sizeof(struct udphdr),
  334. uo_priv->offload);
  335. }
  336. rcu_read_unlock();
  337. if (skb->remcsum_offload)
  338. skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
  339. skb->encapsulation = 1;
  340. skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr));
  341. return err;
  342. }
  343. static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
  344. {
  345. const struct iphdr *iph = ip_hdr(skb);
  346. struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
  347. if (uh->check) {
  348. skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
  349. uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
  350. iph->daddr, 0);
  351. } else {
  352. skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
  353. }
  354. return udp_gro_complete(skb, nhoff);
  355. }
  356. static const struct net_offload udpv4_offload = {
  357. .callbacks = {
  358. .gso_segment = udp4_ufo_fragment,
  359. .gro_receive = udp4_gro_receive,
  360. .gro_complete = udp4_gro_complete,
  361. },
  362. };
  363. int __init udpv4_offload_init(void)
  364. {
  365. return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
  366. }