event.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507
  1. #ifndef __PERF_RECORD_H
  2. #define __PERF_RECORD_H
  3. #include <limits.h>
  4. #include <stdio.h>
  5. #include "../perf.h"
  6. #include "map.h"
  7. #include "build-id.h"
  8. #include "perf_regs.h"
  9. struct mmap_event {
  10. struct perf_event_header header;
  11. u32 pid, tid;
  12. u64 start;
  13. u64 len;
  14. u64 pgoff;
  15. char filename[PATH_MAX];
  16. };
  17. struct mmap2_event {
  18. struct perf_event_header header;
  19. u32 pid, tid;
  20. u64 start;
  21. u64 len;
  22. u64 pgoff;
  23. u32 maj;
  24. u32 min;
  25. u64 ino;
  26. u64 ino_generation;
  27. u32 prot;
  28. u32 flags;
  29. char filename[PATH_MAX];
  30. };
  31. struct comm_event {
  32. struct perf_event_header header;
  33. u32 pid, tid;
  34. char comm[16];
  35. };
  36. struct fork_event {
  37. struct perf_event_header header;
  38. u32 pid, ppid;
  39. u32 tid, ptid;
  40. u64 time;
  41. };
  42. struct lost_event {
  43. struct perf_event_header header;
  44. u64 id;
  45. u64 lost;
  46. };
  47. struct lost_samples_event {
  48. struct perf_event_header header;
  49. u64 lost;
  50. };
  51. /*
  52. * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
  53. */
  54. struct read_event {
  55. struct perf_event_header header;
  56. u32 pid, tid;
  57. u64 value;
  58. u64 time_enabled;
  59. u64 time_running;
  60. u64 id;
  61. };
  62. struct throttle_event {
  63. struct perf_event_header header;
  64. u64 time;
  65. u64 id;
  66. u64 stream_id;
  67. };
  68. #define PERF_SAMPLE_MASK \
  69. (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
  70. PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
  71. PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
  72. PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \
  73. PERF_SAMPLE_IDENTIFIER)
  74. /* perf sample has 16 bits size limit */
  75. #define PERF_SAMPLE_MAX_SIZE (1 << 16)
  76. struct sample_event {
  77. struct perf_event_header header;
  78. u64 array[];
  79. };
  80. struct regs_dump {
  81. u64 abi;
  82. u64 mask;
  83. u64 *regs;
  84. /* Cached values/mask filled by first register access. */
  85. u64 cache_regs[PERF_REGS_MAX];
  86. u64 cache_mask;
  87. };
  88. struct stack_dump {
  89. u16 offset;
  90. u64 size;
  91. char *data;
  92. };
  93. struct sample_read_value {
  94. u64 value;
  95. u64 id;
  96. };
  97. struct sample_read {
  98. u64 time_enabled;
  99. u64 time_running;
  100. union {
  101. struct {
  102. u64 nr;
  103. struct sample_read_value *values;
  104. } group;
  105. struct sample_read_value one;
  106. };
  107. };
  108. struct ip_callchain {
  109. u64 nr;
  110. u64 ips[0];
  111. };
  112. struct branch_flags {
  113. u64 mispred:1;
  114. u64 predicted:1;
  115. u64 in_tx:1;
  116. u64 abort:1;
  117. u64 cycles:16;
  118. u64 reserved:44;
  119. };
  120. struct branch_entry {
  121. u64 from;
  122. u64 to;
  123. struct branch_flags flags;
  124. };
  125. struct branch_stack {
  126. u64 nr;
  127. struct branch_entry entries[0];
  128. };
  129. enum {
  130. PERF_IP_FLAG_BRANCH = 1ULL << 0,
  131. PERF_IP_FLAG_CALL = 1ULL << 1,
  132. PERF_IP_FLAG_RETURN = 1ULL << 2,
  133. PERF_IP_FLAG_CONDITIONAL = 1ULL << 3,
  134. PERF_IP_FLAG_SYSCALLRET = 1ULL << 4,
  135. PERF_IP_FLAG_ASYNC = 1ULL << 5,
  136. PERF_IP_FLAG_INTERRUPT = 1ULL << 6,
  137. PERF_IP_FLAG_TX_ABORT = 1ULL << 7,
  138. PERF_IP_FLAG_TRACE_BEGIN = 1ULL << 8,
  139. PERF_IP_FLAG_TRACE_END = 1ULL << 9,
  140. PERF_IP_FLAG_IN_TX = 1ULL << 10,
  141. };
  142. #define PERF_IP_FLAG_CHARS "bcrosyiABEx"
  143. #define PERF_BRANCH_MASK (\
  144. PERF_IP_FLAG_BRANCH |\
  145. PERF_IP_FLAG_CALL |\
  146. PERF_IP_FLAG_RETURN |\
  147. PERF_IP_FLAG_CONDITIONAL |\
  148. PERF_IP_FLAG_SYSCALLRET |\
  149. PERF_IP_FLAG_ASYNC |\
  150. PERF_IP_FLAG_INTERRUPT |\
  151. PERF_IP_FLAG_TX_ABORT |\
  152. PERF_IP_FLAG_TRACE_BEGIN |\
  153. PERF_IP_FLAG_TRACE_END)
  154. struct perf_sample {
  155. u64 ip;
  156. u32 pid, tid;
  157. u64 time;
  158. u64 addr;
  159. u64 id;
  160. u64 stream_id;
  161. u64 period;
  162. u64 weight;
  163. u64 transaction;
  164. u32 cpu;
  165. u32 raw_size;
  166. u64 data_src;
  167. u32 flags;
  168. u16 insn_len;
  169. void *raw_data;
  170. struct ip_callchain *callchain;
  171. struct branch_stack *branch_stack;
  172. struct regs_dump user_regs;
  173. struct regs_dump intr_regs;
  174. struct stack_dump user_stack;
  175. struct sample_read read;
  176. };
  177. #define PERF_MEM_DATA_SRC_NONE \
  178. (PERF_MEM_S(OP, NA) |\
  179. PERF_MEM_S(LVL, NA) |\
  180. PERF_MEM_S(SNOOP, NA) |\
  181. PERF_MEM_S(LOCK, NA) |\
  182. PERF_MEM_S(TLB, NA))
  183. struct build_id_event {
  184. struct perf_event_header header;
  185. pid_t pid;
  186. u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
  187. char filename[];
  188. };
  189. enum perf_user_event_type { /* above any possible kernel type */
  190. PERF_RECORD_USER_TYPE_START = 64,
  191. PERF_RECORD_HEADER_ATTR = 64,
  192. PERF_RECORD_HEADER_EVENT_TYPE = 65, /* depreceated */
  193. PERF_RECORD_HEADER_TRACING_DATA = 66,
  194. PERF_RECORD_HEADER_BUILD_ID = 67,
  195. PERF_RECORD_FINISHED_ROUND = 68,
  196. PERF_RECORD_ID_INDEX = 69,
  197. PERF_RECORD_AUXTRACE_INFO = 70,
  198. PERF_RECORD_AUXTRACE = 71,
  199. PERF_RECORD_AUXTRACE_ERROR = 72,
  200. PERF_RECORD_HEADER_MAX
  201. };
  202. enum auxtrace_error_type {
  203. PERF_AUXTRACE_ERROR_ITRACE = 1,
  204. PERF_AUXTRACE_ERROR_MAX
  205. };
  206. /*
  207. * The kernel collects the number of events it couldn't send in a stretch and
  208. * when possible sends this number in a PERF_RECORD_LOST event. The number of
  209. * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
  210. * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
  211. * the sum of all struct lost_event.lost fields reported.
  212. *
  213. * The kernel discards mixed up samples and sends the number in a
  214. * PERF_RECORD_LOST_SAMPLES event. The number of lost-samples events is stored
  215. * in .nr_events[PERF_RECORD_LOST_SAMPLES] while total_lost_samples tells
  216. * exactly how many samples the kernel in fact dropped, i.e. it is the sum of
  217. * all struct lost_samples_event.lost fields reported.
  218. *
  219. * The total_period is needed because by default auto-freq is used, so
  220. * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
  221. * the total number of low level events, it is necessary to to sum all struct
  222. * sample_event.period and stash the result in total_period.
  223. */
  224. struct events_stats {
  225. u64 total_period;
  226. u64 total_non_filtered_period;
  227. u64 total_lost;
  228. u64 total_lost_samples;
  229. u64 total_aux_lost;
  230. u64 total_invalid_chains;
  231. u32 nr_events[PERF_RECORD_HEADER_MAX];
  232. u32 nr_non_filtered_samples;
  233. u32 nr_lost_warned;
  234. u32 nr_unknown_events;
  235. u32 nr_invalid_chains;
  236. u32 nr_unknown_id;
  237. u32 nr_unprocessable_samples;
  238. u32 nr_auxtrace_errors[PERF_AUXTRACE_ERROR_MAX];
  239. u32 nr_proc_map_timeout;
  240. };
  241. struct attr_event {
  242. struct perf_event_header header;
  243. struct perf_event_attr attr;
  244. u64 id[];
  245. };
  246. #define MAX_EVENT_NAME 64
  247. struct perf_trace_event_type {
  248. u64 event_id;
  249. char name[MAX_EVENT_NAME];
  250. };
  251. struct event_type_event {
  252. struct perf_event_header header;
  253. struct perf_trace_event_type event_type;
  254. };
  255. struct tracing_data_event {
  256. struct perf_event_header header;
  257. u32 size;
  258. };
  259. struct id_index_entry {
  260. u64 id;
  261. u64 idx;
  262. u64 cpu;
  263. u64 tid;
  264. };
  265. struct id_index_event {
  266. struct perf_event_header header;
  267. u64 nr;
  268. struct id_index_entry entries[0];
  269. };
  270. struct auxtrace_info_event {
  271. struct perf_event_header header;
  272. u32 type;
  273. u32 reserved__; /* For alignment */
  274. u64 priv[];
  275. };
  276. struct auxtrace_event {
  277. struct perf_event_header header;
  278. u64 size;
  279. u64 offset;
  280. u64 reference;
  281. u32 idx;
  282. u32 tid;
  283. u32 cpu;
  284. u32 reserved__; /* For alignment */
  285. };
  286. #define MAX_AUXTRACE_ERROR_MSG 64
  287. struct auxtrace_error_event {
  288. struct perf_event_header header;
  289. u32 type;
  290. u32 code;
  291. u32 cpu;
  292. u32 pid;
  293. u32 tid;
  294. u32 reserved__; /* For alignment */
  295. u64 ip;
  296. char msg[MAX_AUXTRACE_ERROR_MSG];
  297. };
  298. struct aux_event {
  299. struct perf_event_header header;
  300. u64 aux_offset;
  301. u64 aux_size;
  302. u64 flags;
  303. };
  304. struct itrace_start_event {
  305. struct perf_event_header header;
  306. u32 pid, tid;
  307. };
  308. struct context_switch_event {
  309. struct perf_event_header header;
  310. u32 next_prev_pid;
  311. u32 next_prev_tid;
  312. };
  313. union perf_event {
  314. struct perf_event_header header;
  315. struct mmap_event mmap;
  316. struct mmap2_event mmap2;
  317. struct comm_event comm;
  318. struct fork_event fork;
  319. struct lost_event lost;
  320. struct lost_samples_event lost_samples;
  321. struct read_event read;
  322. struct throttle_event throttle;
  323. struct sample_event sample;
  324. struct attr_event attr;
  325. struct event_type_event event_type;
  326. struct tracing_data_event tracing_data;
  327. struct build_id_event build_id;
  328. struct id_index_event id_index;
  329. struct auxtrace_info_event auxtrace_info;
  330. struct auxtrace_event auxtrace;
  331. struct auxtrace_error_event auxtrace_error;
  332. struct aux_event aux;
  333. struct itrace_start_event itrace_start;
  334. struct context_switch_event context_switch;
  335. };
  336. void perf_event__print_totals(void);
  337. struct perf_tool;
  338. struct thread_map;
  339. typedef int (*perf_event__handler_t)(struct perf_tool *tool,
  340. union perf_event *event,
  341. struct perf_sample *sample,
  342. struct machine *machine);
  343. int perf_event__synthesize_thread_map(struct perf_tool *tool,
  344. struct thread_map *threads,
  345. perf_event__handler_t process,
  346. struct machine *machine, bool mmap_data,
  347. unsigned int proc_map_timeout);
  348. int perf_event__synthesize_threads(struct perf_tool *tool,
  349. perf_event__handler_t process,
  350. struct machine *machine, bool mmap_data,
  351. unsigned int proc_map_timeout);
  352. int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
  353. perf_event__handler_t process,
  354. struct machine *machine);
  355. int perf_event__synthesize_modules(struct perf_tool *tool,
  356. perf_event__handler_t process,
  357. struct machine *machine);
  358. int perf_event__process_comm(struct perf_tool *tool,
  359. union perf_event *event,
  360. struct perf_sample *sample,
  361. struct machine *machine);
  362. int perf_event__process_lost(struct perf_tool *tool,
  363. union perf_event *event,
  364. struct perf_sample *sample,
  365. struct machine *machine);
  366. int perf_event__process_lost_samples(struct perf_tool *tool,
  367. union perf_event *event,
  368. struct perf_sample *sample,
  369. struct machine *machine);
  370. int perf_event__process_aux(struct perf_tool *tool,
  371. union perf_event *event,
  372. struct perf_sample *sample,
  373. struct machine *machine);
  374. int perf_event__process_itrace_start(struct perf_tool *tool,
  375. union perf_event *event,
  376. struct perf_sample *sample,
  377. struct machine *machine);
  378. int perf_event__process_switch(struct perf_tool *tool,
  379. union perf_event *event,
  380. struct perf_sample *sample,
  381. struct machine *machine);
  382. int perf_event__process_mmap(struct perf_tool *tool,
  383. union perf_event *event,
  384. struct perf_sample *sample,
  385. struct machine *machine);
  386. int perf_event__process_mmap2(struct perf_tool *tool,
  387. union perf_event *event,
  388. struct perf_sample *sample,
  389. struct machine *machine);
  390. int perf_event__process_fork(struct perf_tool *tool,
  391. union perf_event *event,
  392. struct perf_sample *sample,
  393. struct machine *machine);
  394. int perf_event__process_exit(struct perf_tool *tool,
  395. union perf_event *event,
  396. struct perf_sample *sample,
  397. struct machine *machine);
  398. int perf_event__process(struct perf_tool *tool,
  399. union perf_event *event,
  400. struct perf_sample *sample,
  401. struct machine *machine);
  402. struct addr_location;
  403. int perf_event__preprocess_sample(const union perf_event *event,
  404. struct machine *machine,
  405. struct addr_location *al,
  406. struct perf_sample *sample);
  407. void addr_location__put(struct addr_location *al);
  408. struct thread;
  409. bool is_bts_event(struct perf_event_attr *attr);
  410. bool sample_addr_correlates_sym(struct perf_event_attr *attr);
  411. void perf_event__preprocess_sample_addr(union perf_event *event,
  412. struct perf_sample *sample,
  413. struct thread *thread,
  414. struct addr_location *al);
  415. const char *perf_event__name(unsigned int id);
  416. size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
  417. u64 read_format);
  418. int perf_event__synthesize_sample(union perf_event *event, u64 type,
  419. u64 read_format,
  420. const struct perf_sample *sample,
  421. bool swapped);
  422. pid_t perf_event__synthesize_comm(struct perf_tool *tool,
  423. union perf_event *event, pid_t pid,
  424. perf_event__handler_t process,
  425. struct machine *machine);
  426. int perf_event__synthesize_mmap_events(struct perf_tool *tool,
  427. union perf_event *event,
  428. pid_t pid, pid_t tgid,
  429. perf_event__handler_t process,
  430. struct machine *machine,
  431. bool mmap_data,
  432. unsigned int proc_map_timeout);
  433. size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
  434. size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
  435. size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
  436. size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
  437. size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp);
  438. size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp);
  439. size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
  440. size_t perf_event__fprintf(union perf_event *event, FILE *fp);
  441. u64 kallsyms__get_function_start(const char *kallsyms_filename,
  442. const char *symbol_name);
  443. #endif /* __PERF_RECORD_H */