bpf.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. #include <stdio.h>
  2. #include <sys/epoll.h>
  3. #include <util/bpf-loader.h>
  4. #include <util/evlist.h>
  5. #include "tests.h"
  6. #include "llvm.h"
  7. #include "debug.h"
  8. #define NR_ITERS 111
  9. #ifdef HAVE_LIBBPF_SUPPORT
  10. static int epoll_pwait_loop(void)
  11. {
  12. int i;
  13. /* Should fail NR_ITERS times */
  14. for (i = 0; i < NR_ITERS; i++)
  15. epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
  16. return 0;
  17. }
  18. static struct {
  19. enum test_llvm__testcase prog_id;
  20. const char *desc;
  21. const char *name;
  22. const char *msg_compile_fail;
  23. const char *msg_load_fail;
  24. int (*target_func)(void);
  25. int expect_result;
  26. } bpf_testcase_table[] = {
  27. {
  28. LLVM_TESTCASE_BASE,
  29. "Test basic BPF filtering",
  30. "[basic_bpf_test]",
  31. "fix 'perf test LLVM' first",
  32. "load bpf object failed",
  33. &epoll_pwait_loop,
  34. (NR_ITERS + 1) / 2,
  35. },
  36. };
  37. static int do_test(struct bpf_object *obj, int (*func)(void),
  38. int expect)
  39. {
  40. struct record_opts opts = {
  41. .target = {
  42. .uid = UINT_MAX,
  43. .uses_mmap = true,
  44. },
  45. .freq = 0,
  46. .mmap_pages = 256,
  47. .default_interval = 1,
  48. };
  49. char pid[16];
  50. char sbuf[STRERR_BUFSIZE];
  51. struct perf_evlist *evlist;
  52. int i, ret = TEST_FAIL, err = 0, count = 0;
  53. struct parse_events_evlist parse_evlist;
  54. struct parse_events_error parse_error;
  55. bzero(&parse_error, sizeof(parse_error));
  56. bzero(&parse_evlist, sizeof(parse_evlist));
  57. parse_evlist.error = &parse_error;
  58. INIT_LIST_HEAD(&parse_evlist.list);
  59. err = parse_events_load_bpf_obj(&parse_evlist, &parse_evlist.list, obj);
  60. if (err || list_empty(&parse_evlist.list)) {
  61. pr_debug("Failed to add events selected by BPF\n");
  62. if (!err)
  63. return TEST_FAIL;
  64. }
  65. snprintf(pid, sizeof(pid), "%d", getpid());
  66. pid[sizeof(pid) - 1] = '\0';
  67. opts.target.tid = opts.target.pid = pid;
  68. /* Instead of perf_evlist__new_default, don't add default events */
  69. evlist = perf_evlist__new();
  70. if (!evlist) {
  71. pr_debug("No ehough memory to create evlist\n");
  72. return TEST_FAIL;
  73. }
  74. err = perf_evlist__create_maps(evlist, &opts.target);
  75. if (err < 0) {
  76. pr_debug("Not enough memory to create thread/cpu maps\n");
  77. goto out_delete_evlist;
  78. }
  79. perf_evlist__splice_list_tail(evlist, &parse_evlist.list);
  80. evlist->nr_groups = parse_evlist.nr_groups;
  81. perf_evlist__config(evlist, &opts);
  82. err = perf_evlist__open(evlist);
  83. if (err < 0) {
  84. pr_debug("perf_evlist__open: %s\n",
  85. strerror_r(errno, sbuf, sizeof(sbuf)));
  86. goto out_delete_evlist;
  87. }
  88. err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
  89. if (err < 0) {
  90. pr_debug("perf_evlist__mmap: %s\n",
  91. strerror_r(errno, sbuf, sizeof(sbuf)));
  92. goto out_delete_evlist;
  93. }
  94. perf_evlist__enable(evlist);
  95. (*func)();
  96. perf_evlist__disable(evlist);
  97. for (i = 0; i < evlist->nr_mmaps; i++) {
  98. union perf_event *event;
  99. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  100. const u32 type = event->header.type;
  101. if (type == PERF_RECORD_SAMPLE)
  102. count ++;
  103. }
  104. }
  105. if (count != expect)
  106. pr_debug("BPF filter result incorrect\n");
  107. ret = TEST_OK;
  108. out_delete_evlist:
  109. perf_evlist__delete(evlist);
  110. return ret;
  111. }
  112. static struct bpf_object *
  113. prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
  114. {
  115. struct bpf_object *obj;
  116. obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name);
  117. if (IS_ERR(obj)) {
  118. pr_debug("Compile BPF program failed.\n");
  119. return NULL;
  120. }
  121. return obj;
  122. }
  123. static int __test__bpf(int idx)
  124. {
  125. int ret;
  126. void *obj_buf;
  127. size_t obj_buf_sz;
  128. struct bpf_object *obj;
  129. ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
  130. bpf_testcase_table[idx].prog_id,
  131. true);
  132. if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
  133. pr_debug("Unable to get BPF object, %s\n",
  134. bpf_testcase_table[idx].msg_compile_fail);
  135. if (idx == 0)
  136. return TEST_SKIP;
  137. else
  138. return TEST_FAIL;
  139. }
  140. obj = prepare_bpf(obj_buf, obj_buf_sz,
  141. bpf_testcase_table[idx].name);
  142. if (!obj) {
  143. ret = TEST_FAIL;
  144. goto out;
  145. }
  146. ret = do_test(obj,
  147. bpf_testcase_table[idx].target_func,
  148. bpf_testcase_table[idx].expect_result);
  149. out:
  150. bpf__clear();
  151. return ret;
  152. }
  153. int test__bpf(void)
  154. {
  155. unsigned int i;
  156. int err;
  157. if (geteuid() != 0) {
  158. pr_debug("Only root can run BPF test\n");
  159. return TEST_SKIP;
  160. }
  161. for (i = 0; i < ARRAY_SIZE(bpf_testcase_table); i++) {
  162. err = __test__bpf(i);
  163. if (err != TEST_OK)
  164. return err;
  165. }
  166. return TEST_OK;
  167. }
  168. #else
  169. int test__bpf(void)
  170. {
  171. pr_debug("Skip BPF test because BPF support is not compiled\n");
  172. return TEST_SKIP;
  173. }
  174. #endif