gro_cells.h 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. #ifndef _NET_GRO_CELLS_H
  2. #define _NET_GRO_CELLS_H
  3. #include <linux/skbuff.h>
  4. #include <linux/slab.h>
  5. #include <linux/netdevice.h>
  6. struct gro_cell {
  7. struct sk_buff_head napi_skbs;
  8. struct napi_struct napi;
  9. };
  10. struct gro_cells {
  11. struct gro_cell __percpu *cells;
  12. };
  13. static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
  14. {
  15. struct gro_cell *cell;
  16. struct net_device *dev = skb->dev;
  17. rcu_read_lock();
  18. if (unlikely(!(dev->flags & IFF_UP)))
  19. goto drop;
  20. if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
  21. netif_rx(skb);
  22. goto unlock;
  23. }
  24. cell = this_cpu_ptr(gcells->cells);
  25. if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
  26. drop:
  27. atomic_long_inc(&dev->rx_dropped);
  28. kfree_skb(skb);
  29. goto unlock;
  30. }
  31. __skb_queue_tail(&cell->napi_skbs, skb);
  32. if (skb_queue_len(&cell->napi_skbs) == 1)
  33. napi_schedule(&cell->napi);
  34. unlock:
  35. rcu_read_unlock();
  36. }
  37. /* called under BH context */
  38. static inline int gro_cell_poll(struct napi_struct *napi, int budget)
  39. {
  40. struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
  41. struct sk_buff *skb;
  42. int work_done = 0;
  43. while (work_done < budget) {
  44. skb = __skb_dequeue(&cell->napi_skbs);
  45. if (!skb)
  46. break;
  47. napi_gro_receive(napi, skb);
  48. work_done++;
  49. }
  50. if (work_done < budget)
  51. napi_complete_done(napi, work_done);
  52. return work_done;
  53. }
  54. static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
  55. {
  56. int i;
  57. gcells->cells = alloc_percpu(struct gro_cell);
  58. if (!gcells->cells)
  59. return -ENOMEM;
  60. for_each_possible_cpu(i) {
  61. struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
  62. __skb_queue_head_init(&cell->napi_skbs);
  63. netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
  64. napi_enable(&cell->napi);
  65. }
  66. return 0;
  67. }
  68. static inline void gro_cells_destroy(struct gro_cells *gcells)
  69. {
  70. int i;
  71. if (!gcells->cells)
  72. return;
  73. for_each_possible_cpu(i) {
  74. struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
  75. napi_disable(&cell->napi);
  76. netif_napi_del(&cell->napi);
  77. __skb_queue_purge(&cell->napi_skbs);
  78. }
  79. free_percpu(gcells->cells);
  80. gcells->cells = NULL;
  81. }
  82. #endif