net.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. #undef TRACE_SYSTEM
  2. #define TRACE_SYSTEM net
  3. #if !defined(_TRACE_NET_H) || defined(TRACE_HEADER_MULTI_READ)
  4. #define _TRACE_NET_H
  5. #include <linux/skbuff.h>
  6. #include <linux/netdevice.h>
  7. #include <linux/if_vlan.h>
  8. #include <linux/ip.h>
  9. #include <linux/tracepoint.h>
  10. TRACE_EVENT(net_dev_start_xmit,
  11. TP_PROTO(const struct sk_buff *skb, const struct net_device *dev),
  12. TP_ARGS(skb, dev),
  13. TP_STRUCT__entry(
  14. __string( name, dev->name )
  15. __field( u16, queue_mapping )
  16. __field( const void *, skbaddr )
  17. __field( bool, vlan_tagged )
  18. __field( u16, vlan_proto )
  19. __field( u16, vlan_tci )
  20. __field( u16, protocol )
  21. __field( u8, ip_summed )
  22. __field( unsigned int, len )
  23. __field( unsigned int, data_len )
  24. __field( int, network_offset )
  25. __field( bool, transport_offset_valid)
  26. __field( int, transport_offset)
  27. __field( u8, tx_flags )
  28. __field( u16, gso_size )
  29. __field( u16, gso_segs )
  30. __field( u16, gso_type )
  31. ),
  32. TP_fast_assign(
  33. __assign_str(name, dev->name);
  34. __entry->queue_mapping = skb->queue_mapping;
  35. __entry->skbaddr = skb;
  36. __entry->vlan_tagged = skb_vlan_tag_present(skb);
  37. __entry->vlan_proto = ntohs(skb->vlan_proto);
  38. __entry->vlan_tci = skb_vlan_tag_get(skb);
  39. __entry->protocol = ntohs(skb->protocol);
  40. __entry->ip_summed = skb->ip_summed;
  41. __entry->len = skb->len;
  42. __entry->data_len = skb->data_len;
  43. __entry->network_offset = skb_network_offset(skb);
  44. __entry->transport_offset_valid =
  45. skb_transport_header_was_set(skb);
  46. __entry->transport_offset = skb_transport_offset(skb);
  47. __entry->tx_flags = skb_shinfo(skb)->tx_flags;
  48. __entry->gso_size = skb_shinfo(skb)->gso_size;
  49. __entry->gso_segs = skb_shinfo(skb)->gso_segs;
  50. __entry->gso_type = skb_shinfo(skb)->gso_type;
  51. ),
  52. TP_printk("dev=%s queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x",
  53. __get_str(name), __entry->queue_mapping, __entry->skbaddr,
  54. __entry->vlan_tagged, __entry->vlan_proto, __entry->vlan_tci,
  55. __entry->protocol, __entry->ip_summed, __entry->len,
  56. __entry->data_len,
  57. __entry->network_offset, __entry->transport_offset_valid,
  58. __entry->transport_offset, __entry->tx_flags,
  59. __entry->gso_size, __entry->gso_segs, __entry->gso_type)
  60. );
  61. TRACE_EVENT(net_dev_xmit,
  62. TP_PROTO(struct sk_buff *skb,
  63. int rc,
  64. struct net_device *dev,
  65. unsigned int skb_len),
  66. TP_ARGS(skb, rc, dev, skb_len),
  67. TP_STRUCT__entry(
  68. __field( void *, skbaddr )
  69. __field( unsigned int, len )
  70. __field( int, rc )
  71. __string( name, dev->name )
  72. ),
  73. TP_fast_assign(
  74. __entry->skbaddr = skb;
  75. __entry->len = skb_len;
  76. __entry->rc = rc;
  77. __assign_str(name, dev->name);
  78. ),
  79. TP_printk("dev=%s skbaddr=%p len=%u rc=%d",
  80. __get_str(name), __entry->skbaddr, __entry->len, __entry->rc)
  81. );
  82. DECLARE_EVENT_CLASS(net_dev_template,
  83. TP_PROTO(struct sk_buff *skb),
  84. TP_ARGS(skb),
  85. TP_STRUCT__entry(
  86. __field( void *, skbaddr )
  87. __field( unsigned int, len )
  88. __string( name, skb->dev->name )
  89. ),
  90. TP_fast_assign(
  91. __entry->skbaddr = skb;
  92. __entry->len = skb->len;
  93. __assign_str(name, skb->dev->name);
  94. ),
  95. TP_printk("dev=%s skbaddr=%p len=%u",
  96. __get_str(name), __entry->skbaddr, __entry->len)
  97. )
  98. DEFINE_EVENT(net_dev_template, net_dev_queue,
  99. TP_PROTO(struct sk_buff *skb),
  100. TP_ARGS(skb)
  101. );
  102. DEFINE_EVENT(net_dev_template, netif_receive_skb,
  103. TP_PROTO(struct sk_buff *skb),
  104. TP_ARGS(skb)
  105. );
  106. DEFINE_EVENT(net_dev_template, netif_rx,
  107. TP_PROTO(struct sk_buff *skb),
  108. TP_ARGS(skb)
  109. );
  110. DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
  111. TP_PROTO(const struct sk_buff *skb),
  112. TP_ARGS(skb),
  113. TP_STRUCT__entry(
  114. __string( name, skb->dev->name )
  115. __field( unsigned int, napi_id )
  116. __field( u16, queue_mapping )
  117. __field( const void *, skbaddr )
  118. __field( bool, vlan_tagged )
  119. __field( u16, vlan_proto )
  120. __field( u16, vlan_tci )
  121. __field( u16, protocol )
  122. __field( u8, ip_summed )
  123. __field( u32, hash )
  124. __field( bool, l4_hash )
  125. __field( unsigned int, len )
  126. __field( unsigned int, data_len )
  127. __field( unsigned int, truesize )
  128. __field( bool, mac_header_valid)
  129. __field( int, mac_header )
  130. __field( unsigned char, nr_frags )
  131. __field( u16, gso_size )
  132. __field( u16, gso_type )
  133. ),
  134. TP_fast_assign(
  135. __assign_str(name, skb->dev->name);
  136. #ifdef CONFIG_NET_RX_BUSY_POLL
  137. __entry->napi_id = skb->napi_id;
  138. #else
  139. __entry->napi_id = 0;
  140. #endif
  141. __entry->queue_mapping = skb->queue_mapping;
  142. __entry->skbaddr = skb;
  143. __entry->vlan_tagged = skb_vlan_tag_present(skb);
  144. __entry->vlan_proto = ntohs(skb->vlan_proto);
  145. __entry->vlan_tci = skb_vlan_tag_get(skb);
  146. __entry->protocol = ntohs(skb->protocol);
  147. __entry->ip_summed = skb->ip_summed;
  148. __entry->hash = skb->hash;
  149. __entry->l4_hash = skb->l4_hash;
  150. __entry->len = skb->len;
  151. __entry->data_len = skb->data_len;
  152. __entry->truesize = skb->truesize;
  153. __entry->mac_header_valid = skb_mac_header_was_set(skb);
  154. __entry->mac_header = skb_mac_header(skb) - skb->data;
  155. __entry->nr_frags = skb_shinfo(skb)->nr_frags;
  156. __entry->gso_size = skb_shinfo(skb)->gso_size;
  157. __entry->gso_type = skb_shinfo(skb)->gso_type;
  158. ),
  159. TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x",
  160. __get_str(name), __entry->napi_id, __entry->queue_mapping,
  161. __entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto,
  162. __entry->vlan_tci, __entry->protocol, __entry->ip_summed,
  163. __entry->hash, __entry->l4_hash, __entry->len,
  164. __entry->data_len, __entry->truesize,
  165. __entry->mac_header_valid, __entry->mac_header,
  166. __entry->nr_frags, __entry->gso_size, __entry->gso_type)
  167. );
  168. DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_frags_entry,
  169. TP_PROTO(const struct sk_buff *skb),
  170. TP_ARGS(skb)
  171. );
  172. DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_receive_entry,
  173. TP_PROTO(const struct sk_buff *skb),
  174. TP_ARGS(skb)
  175. );
  176. DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry,
  177. TP_PROTO(const struct sk_buff *skb),
  178. TP_ARGS(skb)
  179. );
  180. DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
  181. TP_PROTO(const struct sk_buff *skb),
  182. TP_ARGS(skb)
  183. );
  184. DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry,
  185. TP_PROTO(const struct sk_buff *skb),
  186. TP_ARGS(skb)
  187. );
  188. #endif /* _TRACE_NET_H */
  189. /* This part must be outside protection */
  190. #include <trace/define_trace.h>