record.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. #include "evlist.h"
  2. #include "evsel.h"
  3. #include "cpumap.h"
  4. #include "parse-events.h"
  5. #include <api/fs/fs.h>
  6. #include "util.h"
  7. #include "cloexec.h"
  8. typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
  9. static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
  10. {
  11. struct perf_evlist *evlist;
  12. struct perf_evsel *evsel;
  13. unsigned long flags = perf_event_open_cloexec_flag();
  14. int err = -EAGAIN, fd;
  15. static pid_t pid = -1;
  16. evlist = perf_evlist__new();
  17. if (!evlist)
  18. return -ENOMEM;
  19. if (parse_events(evlist, str, NULL))
  20. goto out_delete;
  21. evsel = perf_evlist__first(evlist);
  22. while (1) {
  23. fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
  24. if (fd < 0) {
  25. if (pid == -1 && errno == EACCES) {
  26. pid = 0;
  27. continue;
  28. }
  29. goto out_delete;
  30. }
  31. break;
  32. }
  33. close(fd);
  34. fn(evsel);
  35. fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
  36. if (fd < 0) {
  37. if (errno == EINVAL)
  38. err = -EINVAL;
  39. goto out_delete;
  40. }
  41. close(fd);
  42. err = 0;
  43. out_delete:
  44. perf_evlist__delete(evlist);
  45. return err;
  46. }
  47. static bool perf_probe_api(setup_probe_fn_t fn)
  48. {
  49. const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
  50. struct cpu_map *cpus;
  51. int cpu, ret, i = 0;
  52. cpus = cpu_map__new(NULL);
  53. if (!cpus)
  54. return false;
  55. cpu = cpus->map[0];
  56. cpu_map__put(cpus);
  57. do {
  58. ret = perf_do_probe_api(fn, cpu, try[i++]);
  59. if (!ret)
  60. return true;
  61. } while (ret == -EAGAIN && try[i]);
  62. return false;
  63. }
  64. static void perf_probe_sample_identifier(struct perf_evsel *evsel)
  65. {
  66. evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
  67. }
  68. static void perf_probe_comm_exec(struct perf_evsel *evsel)
  69. {
  70. evsel->attr.comm_exec = 1;
  71. }
  72. static void perf_probe_context_switch(struct perf_evsel *evsel)
  73. {
  74. evsel->attr.context_switch = 1;
  75. }
  76. bool perf_can_sample_identifier(void)
  77. {
  78. return perf_probe_api(perf_probe_sample_identifier);
  79. }
  80. static bool perf_can_comm_exec(void)
  81. {
  82. return perf_probe_api(perf_probe_comm_exec);
  83. }
  84. bool perf_can_record_switch_events(void)
  85. {
  86. return perf_probe_api(perf_probe_context_switch);
  87. }
  88. bool perf_can_record_cpu_wide(void)
  89. {
  90. struct perf_event_attr attr = {
  91. .type = PERF_TYPE_SOFTWARE,
  92. .config = PERF_COUNT_SW_CPU_CLOCK,
  93. .exclude_kernel = 1,
  94. };
  95. struct cpu_map *cpus;
  96. int cpu, fd;
  97. cpus = cpu_map__new(NULL);
  98. if (!cpus)
  99. return false;
  100. cpu = cpus->map[0];
  101. cpu_map__put(cpus);
  102. fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
  103. if (fd < 0)
  104. return false;
  105. close(fd);
  106. return true;
  107. }
  108. void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
  109. {
  110. struct perf_evsel *evsel;
  111. bool use_sample_identifier = false;
  112. bool use_comm_exec;
  113. /*
  114. * Set the evsel leader links before we configure attributes,
  115. * since some might depend on this info.
  116. */
  117. if (opts->group)
  118. perf_evlist__set_leader(evlist);
  119. if (evlist->cpus->map[0] < 0)
  120. opts->no_inherit = true;
  121. use_comm_exec = perf_can_comm_exec();
  122. evlist__for_each(evlist, evsel) {
  123. perf_evsel__config(evsel, opts);
  124. if (evsel->tracking && use_comm_exec)
  125. evsel->attr.comm_exec = 1;
  126. }
  127. if (opts->full_auxtrace) {
  128. /*
  129. * Need to be able to synthesize and parse selected events with
  130. * arbitrary sample types, which requires always being able to
  131. * match the id.
  132. */
  133. use_sample_identifier = perf_can_sample_identifier();
  134. evlist__for_each(evlist, evsel)
  135. perf_evsel__set_sample_id(evsel, use_sample_identifier);
  136. } else if (evlist->nr_entries > 1) {
  137. struct perf_evsel *first = perf_evlist__first(evlist);
  138. evlist__for_each(evlist, evsel) {
  139. if (evsel->attr.sample_type == first->attr.sample_type)
  140. continue;
  141. use_sample_identifier = perf_can_sample_identifier();
  142. break;
  143. }
  144. evlist__for_each(evlist, evsel)
  145. perf_evsel__set_sample_id(evsel, use_sample_identifier);
  146. }
  147. perf_evlist__set_id_pos(evlist);
  148. }
  149. static int get_max_rate(unsigned int *rate)
  150. {
  151. return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
  152. }
  153. static int record_opts__config_freq(struct record_opts *opts)
  154. {
  155. bool user_freq = opts->user_freq != UINT_MAX;
  156. unsigned int max_rate;
  157. if (opts->user_interval != ULLONG_MAX)
  158. opts->default_interval = opts->user_interval;
  159. if (user_freq)
  160. opts->freq = opts->user_freq;
  161. /*
  162. * User specified count overrides default frequency.
  163. */
  164. if (opts->default_interval)
  165. opts->freq = 0;
  166. else if (opts->freq) {
  167. opts->default_interval = opts->freq;
  168. } else {
  169. pr_err("frequency and count are zero, aborting\n");
  170. return -1;
  171. }
  172. if (get_max_rate(&max_rate))
  173. return 0;
  174. /*
  175. * User specified frequency is over current maximum.
  176. */
  177. if (user_freq && (max_rate < opts->freq)) {
  178. pr_err("Maximum frequency rate (%u) reached.\n"
  179. "Please use -F freq option with lower value or consider\n"
  180. "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
  181. max_rate);
  182. return -1;
  183. }
  184. /*
  185. * Default frequency is over current maximum.
  186. */
  187. if (max_rate < opts->freq) {
  188. pr_warning("Lowering default frequency rate to %u.\n"
  189. "Please consider tweaking "
  190. "/proc/sys/kernel/perf_event_max_sample_rate.\n",
  191. max_rate);
  192. opts->freq = max_rate;
  193. }
  194. return 0;
  195. }
  196. int record_opts__config(struct record_opts *opts)
  197. {
  198. return record_opts__config_freq(opts);
  199. }
  200. bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
  201. {
  202. struct perf_evlist *temp_evlist;
  203. struct perf_evsel *evsel;
  204. int err, fd, cpu;
  205. bool ret = false;
  206. pid_t pid = -1;
  207. temp_evlist = perf_evlist__new();
  208. if (!temp_evlist)
  209. return false;
  210. err = parse_events(temp_evlist, str, NULL);
  211. if (err)
  212. goto out_delete;
  213. evsel = perf_evlist__last(temp_evlist);
  214. if (!evlist || cpu_map__empty(evlist->cpus)) {
  215. struct cpu_map *cpus = cpu_map__new(NULL);
  216. cpu = cpus ? cpus->map[0] : 0;
  217. cpu_map__put(cpus);
  218. } else {
  219. cpu = evlist->cpus->map[0];
  220. }
  221. while (1) {
  222. fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1,
  223. perf_event_open_cloexec_flag());
  224. if (fd < 0) {
  225. if (pid == -1 && errno == EACCES) {
  226. pid = 0;
  227. continue;
  228. }
  229. goto out_delete;
  230. }
  231. break;
  232. }
  233. close(fd);
  234. ret = true;
  235. out_delete:
  236. perf_evlist__delete(temp_evlist);
  237. return ret;
  238. }