hist.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. #include <math.h>
  2. #include <linux/compiler.h>
  3. #include "../util/hist.h"
  4. #include "../util/util.h"
  5. #include "../util/sort.h"
  6. #include "../util/evsel.h"
  7. /* hist period print (hpp) functions */
  8. #define hpp__call_print_fn(hpp, fn, fmt, ...) \
  9. ({ \
  10. int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
  11. advance_hpp(hpp, __ret); \
  12. __ret; \
  13. })
  14. static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
  15. hpp_field_fn get_field, const char *fmt, int len,
  16. hpp_snprint_fn print_fn, bool fmt_percent)
  17. {
  18. int ret;
  19. struct hists *hists = he->hists;
  20. struct perf_evsel *evsel = hists_to_evsel(hists);
  21. char *buf = hpp->buf;
  22. size_t size = hpp->size;
  23. if (fmt_percent) {
  24. double percent = 0.0;
  25. u64 total = hists__total_period(hists);
  26. if (total)
  27. percent = 100.0 * get_field(he) / total;
  28. ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
  29. } else
  30. ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
  31. if (perf_evsel__is_group_event(evsel)) {
  32. int prev_idx, idx_delta;
  33. struct hist_entry *pair;
  34. int nr_members = evsel->nr_members;
  35. prev_idx = perf_evsel__group_idx(evsel);
  36. list_for_each_entry(pair, &he->pairs.head, pairs.node) {
  37. u64 period = get_field(pair);
  38. u64 total = hists__total_period(pair->hists);
  39. if (!total)
  40. continue;
  41. evsel = hists_to_evsel(pair->hists);
  42. idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
  43. while (idx_delta--) {
  44. /*
  45. * zero-fill group members in the middle which
  46. * have no sample
  47. */
  48. if (fmt_percent) {
  49. ret += hpp__call_print_fn(hpp, print_fn,
  50. fmt, len, 0.0);
  51. } else {
  52. ret += hpp__call_print_fn(hpp, print_fn,
  53. fmt, len, 0ULL);
  54. }
  55. }
  56. if (fmt_percent) {
  57. ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
  58. 100.0 * period / total);
  59. } else {
  60. ret += hpp__call_print_fn(hpp, print_fn, fmt,
  61. len, period);
  62. }
  63. prev_idx = perf_evsel__group_idx(evsel);
  64. }
  65. idx_delta = nr_members - prev_idx - 1;
  66. while (idx_delta--) {
  67. /*
  68. * zero-fill group members at last which have no sample
  69. */
  70. if (fmt_percent) {
  71. ret += hpp__call_print_fn(hpp, print_fn,
  72. fmt, len, 0.0);
  73. } else {
  74. ret += hpp__call_print_fn(hpp, print_fn,
  75. fmt, len, 0ULL);
  76. }
  77. }
  78. }
  79. /*
  80. * Restore original buf and size as it's where caller expects
  81. * the result will be saved.
  82. */
  83. hpp->buf = buf;
  84. hpp->size = size;
  85. return ret;
  86. }
  87. int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  88. struct hist_entry *he, hpp_field_fn get_field,
  89. const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
  90. {
  91. int len = fmt->user_len ?: fmt->len;
  92. if (symbol_conf.field_sep) {
  93. return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
  94. print_fn, fmt_percent);
  95. }
  96. if (fmt_percent)
  97. len -= 2; /* 2 for a space and a % sign */
  98. else
  99. len -= 1;
  100. return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
  101. }
  102. int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  103. struct hist_entry *he, hpp_field_fn get_field,
  104. const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
  105. {
  106. if (!symbol_conf.cumulate_callchain) {
  107. int len = fmt->user_len ?: fmt->len;
  108. return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
  109. }
  110. return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
  111. }
  112. static int field_cmp(u64 field_a, u64 field_b)
  113. {
  114. if (field_a > field_b)
  115. return 1;
  116. if (field_a < field_b)
  117. return -1;
  118. return 0;
  119. }
  120. static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
  121. hpp_field_fn get_field)
  122. {
  123. s64 ret;
  124. int i, nr_members;
  125. struct perf_evsel *evsel;
  126. struct hist_entry *pair;
  127. u64 *fields_a, *fields_b;
  128. ret = field_cmp(get_field(a), get_field(b));
  129. if (ret || !symbol_conf.event_group)
  130. return ret;
  131. evsel = hists_to_evsel(a->hists);
  132. if (!perf_evsel__is_group_event(evsel))
  133. return ret;
  134. nr_members = evsel->nr_members;
  135. fields_a = calloc(nr_members, sizeof(*fields_a));
  136. fields_b = calloc(nr_members, sizeof(*fields_b));
  137. if (!fields_a || !fields_b)
  138. goto out;
  139. list_for_each_entry(pair, &a->pairs.head, pairs.node) {
  140. evsel = hists_to_evsel(pair->hists);
  141. fields_a[perf_evsel__group_idx(evsel)] = get_field(pair);
  142. }
  143. list_for_each_entry(pair, &b->pairs.head, pairs.node) {
  144. evsel = hists_to_evsel(pair->hists);
  145. fields_b[perf_evsel__group_idx(evsel)] = get_field(pair);
  146. }
  147. for (i = 1; i < nr_members; i++) {
  148. ret = field_cmp(fields_a[i], fields_b[i]);
  149. if (ret)
  150. break;
  151. }
  152. out:
  153. free(fields_a);
  154. free(fields_b);
  155. return ret;
  156. }
  157. static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
  158. hpp_field_fn get_field)
  159. {
  160. s64 ret = 0;
  161. if (symbol_conf.cumulate_callchain) {
  162. /*
  163. * Put caller above callee when they have equal period.
  164. */
  165. ret = field_cmp(get_field(a), get_field(b));
  166. if (ret)
  167. return ret;
  168. if (a->thread != b->thread || !symbol_conf.use_callchain)
  169. return 0;
  170. ret = b->callchain->max_depth - a->callchain->max_depth;
  171. }
  172. return ret;
  173. }
  174. static int hpp__width_fn(struct perf_hpp_fmt *fmt,
  175. struct perf_hpp *hpp __maybe_unused,
  176. struct perf_evsel *evsel)
  177. {
  178. int len = fmt->user_len ?: fmt->len;
  179. if (symbol_conf.event_group)
  180. len = max(len, evsel->nr_members * fmt->len);
  181. if (len < (int)strlen(fmt->name))
  182. len = strlen(fmt->name);
  183. return len;
  184. }
  185. static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  186. struct perf_evsel *evsel)
  187. {
  188. int len = hpp__width_fn(fmt, hpp, evsel);
  189. return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
  190. }
  191. static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
  192. {
  193. va_list args;
  194. ssize_t ssize = hpp->size;
  195. double percent;
  196. int ret, len;
  197. va_start(args, fmt);
  198. len = va_arg(args, int);
  199. percent = va_arg(args, double);
  200. ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
  201. va_end(args);
  202. return (ret >= ssize) ? (ssize - 1) : ret;
  203. }
  204. static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
  205. {
  206. va_list args;
  207. ssize_t ssize = hpp->size;
  208. int ret;
  209. va_start(args, fmt);
  210. ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
  211. va_end(args);
  212. return (ret >= ssize) ? (ssize - 1) : ret;
  213. }
  214. #define __HPP_COLOR_PERCENT_FN(_type, _field) \
  215. static u64 he_get_##_field(struct hist_entry *he) \
  216. { \
  217. return he->stat._field; \
  218. } \
  219. \
  220. static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
  221. struct perf_hpp *hpp, struct hist_entry *he) \
  222. { \
  223. return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
  224. hpp_color_scnprintf, true); \
  225. }
  226. #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
  227. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  228. struct perf_hpp *hpp, struct hist_entry *he) \
  229. { \
  230. return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
  231. hpp_entry_scnprintf, true); \
  232. }
  233. #define __HPP_SORT_FN(_type, _field) \
  234. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  235. struct hist_entry *a, struct hist_entry *b) \
  236. { \
  237. return __hpp__sort(a, b, he_get_##_field); \
  238. }
  239. #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
  240. static u64 he_get_acc_##_field(struct hist_entry *he) \
  241. { \
  242. return he->stat_acc->_field; \
  243. } \
  244. \
  245. static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
  246. struct perf_hpp *hpp, struct hist_entry *he) \
  247. { \
  248. return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
  249. hpp_color_scnprintf, true); \
  250. }
  251. #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
  252. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  253. struct perf_hpp *hpp, struct hist_entry *he) \
  254. { \
  255. return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
  256. hpp_entry_scnprintf, true); \
  257. }
  258. #define __HPP_SORT_ACC_FN(_type, _field) \
  259. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  260. struct hist_entry *a, struct hist_entry *b) \
  261. { \
  262. return __hpp__sort_acc(a, b, he_get_acc_##_field); \
  263. }
  264. #define __HPP_ENTRY_RAW_FN(_type, _field) \
  265. static u64 he_get_raw_##_field(struct hist_entry *he) \
  266. { \
  267. return he->stat._field; \
  268. } \
  269. \
  270. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  271. struct perf_hpp *hpp, struct hist_entry *he) \
  272. { \
  273. return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
  274. hpp_entry_scnprintf, false); \
  275. }
  276. #define __HPP_SORT_RAW_FN(_type, _field) \
  277. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  278. struct hist_entry *a, struct hist_entry *b) \
  279. { \
  280. return __hpp__sort(a, b, he_get_raw_##_field); \
  281. }
  282. #define HPP_PERCENT_FNS(_type, _field) \
  283. __HPP_COLOR_PERCENT_FN(_type, _field) \
  284. __HPP_ENTRY_PERCENT_FN(_type, _field) \
  285. __HPP_SORT_FN(_type, _field)
  286. #define HPP_PERCENT_ACC_FNS(_type, _field) \
  287. __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
  288. __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
  289. __HPP_SORT_ACC_FN(_type, _field)
  290. #define HPP_RAW_FNS(_type, _field) \
  291. __HPP_ENTRY_RAW_FN(_type, _field) \
  292. __HPP_SORT_RAW_FN(_type, _field)
  293. HPP_PERCENT_FNS(overhead, period)
  294. HPP_PERCENT_FNS(overhead_sys, period_sys)
  295. HPP_PERCENT_FNS(overhead_us, period_us)
  296. HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
  297. HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
  298. HPP_PERCENT_ACC_FNS(overhead_acc, period)
  299. HPP_RAW_FNS(samples, nr_events)
  300. HPP_RAW_FNS(period, period)
  301. static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
  302. struct hist_entry *a __maybe_unused,
  303. struct hist_entry *b __maybe_unused)
  304. {
  305. return 0;
  306. }
  307. #define HPP__COLOR_PRINT_FNS(_name, _fn) \
  308. { \
  309. .name = _name, \
  310. .header = hpp__header_fn, \
  311. .width = hpp__width_fn, \
  312. .color = hpp__color_ ## _fn, \
  313. .entry = hpp__entry_ ## _fn, \
  314. .cmp = hpp__nop_cmp, \
  315. .collapse = hpp__nop_cmp, \
  316. .sort = hpp__sort_ ## _fn, \
  317. }
  318. #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn) \
  319. { \
  320. .name = _name, \
  321. .header = hpp__header_fn, \
  322. .width = hpp__width_fn, \
  323. .color = hpp__color_ ## _fn, \
  324. .entry = hpp__entry_ ## _fn, \
  325. .cmp = hpp__nop_cmp, \
  326. .collapse = hpp__nop_cmp, \
  327. .sort = hpp__sort_ ## _fn, \
  328. }
  329. #define HPP__PRINT_FNS(_name, _fn) \
  330. { \
  331. .name = _name, \
  332. .header = hpp__header_fn, \
  333. .width = hpp__width_fn, \
  334. .entry = hpp__entry_ ## _fn, \
  335. .cmp = hpp__nop_cmp, \
  336. .collapse = hpp__nop_cmp, \
  337. .sort = hpp__sort_ ## _fn, \
  338. }
  339. struct perf_hpp_fmt perf_hpp__format[] = {
  340. HPP__COLOR_PRINT_FNS("Overhead", overhead),
  341. HPP__COLOR_PRINT_FNS("sys", overhead_sys),
  342. HPP__COLOR_PRINT_FNS("usr", overhead_us),
  343. HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys),
  344. HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us),
  345. HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc),
  346. HPP__PRINT_FNS("Samples", samples),
  347. HPP__PRINT_FNS("Period", period)
  348. };
  349. LIST_HEAD(perf_hpp__list);
  350. LIST_HEAD(perf_hpp__sort_list);
  351. #undef HPP__COLOR_PRINT_FNS
  352. #undef HPP__COLOR_ACC_PRINT_FNS
  353. #undef HPP__PRINT_FNS
  354. #undef HPP_PERCENT_FNS
  355. #undef HPP_PERCENT_ACC_FNS
  356. #undef HPP_RAW_FNS
  357. #undef __HPP_HEADER_FN
  358. #undef __HPP_WIDTH_FN
  359. #undef __HPP_COLOR_PERCENT_FN
  360. #undef __HPP_ENTRY_PERCENT_FN
  361. #undef __HPP_COLOR_ACC_PERCENT_FN
  362. #undef __HPP_ENTRY_ACC_PERCENT_FN
  363. #undef __HPP_ENTRY_RAW_FN
  364. #undef __HPP_SORT_FN
  365. #undef __HPP_SORT_ACC_FN
  366. #undef __HPP_SORT_RAW_FN
  367. void perf_hpp__init(void)
  368. {
  369. struct list_head *list;
  370. int i;
  371. for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
  372. struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
  373. INIT_LIST_HEAD(&fmt->list);
  374. /* sort_list may be linked by setup_sorting() */
  375. if (fmt->sort_list.next == NULL)
  376. INIT_LIST_HEAD(&fmt->sort_list);
  377. }
  378. /*
  379. * If user specified field order, no need to setup default fields.
  380. */
  381. if (is_strict_order(field_order))
  382. return;
  383. if (symbol_conf.cumulate_callchain) {
  384. hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
  385. perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
  386. }
  387. hpp_dimension__add_output(PERF_HPP__OVERHEAD);
  388. if (symbol_conf.show_cpu_utilization) {
  389. hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
  390. hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
  391. if (perf_guest) {
  392. hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
  393. hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
  394. }
  395. }
  396. if (symbol_conf.show_nr_samples)
  397. hpp_dimension__add_output(PERF_HPP__SAMPLES);
  398. if (symbol_conf.show_total_period)
  399. hpp_dimension__add_output(PERF_HPP__PERIOD);
  400. /* prepend overhead field for backward compatiblity. */
  401. list = &perf_hpp__format[PERF_HPP__OVERHEAD].sort_list;
  402. if (list_empty(list))
  403. list_add(list, &perf_hpp__sort_list);
  404. if (symbol_conf.cumulate_callchain) {
  405. list = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC].sort_list;
  406. if (list_empty(list))
  407. list_add(list, &perf_hpp__sort_list);
  408. }
  409. }
  410. void perf_hpp__column_register(struct perf_hpp_fmt *format)
  411. {
  412. list_add_tail(&format->list, &perf_hpp__list);
  413. }
  414. void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
  415. {
  416. list_del(&format->list);
  417. }
  418. void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
  419. {
  420. list_add_tail(&format->sort_list, &perf_hpp__sort_list);
  421. }
  422. void perf_hpp__column_enable(unsigned col)
  423. {
  424. BUG_ON(col >= PERF_HPP__MAX_INDEX);
  425. perf_hpp__column_register(&perf_hpp__format[col]);
  426. }
  427. void perf_hpp__column_disable(unsigned col)
  428. {
  429. BUG_ON(col >= PERF_HPP__MAX_INDEX);
  430. perf_hpp__column_unregister(&perf_hpp__format[col]);
  431. }
  432. void perf_hpp__cancel_cumulate(void)
  433. {
  434. if (is_strict_order(field_order))
  435. return;
  436. perf_hpp__column_disable(PERF_HPP__OVERHEAD_ACC);
  437. perf_hpp__format[PERF_HPP__OVERHEAD].name = "Overhead";
  438. }
  439. void perf_hpp__setup_output_field(void)
  440. {
  441. struct perf_hpp_fmt *fmt;
  442. /* append sort keys to output field */
  443. perf_hpp__for_each_sort_list(fmt) {
  444. if (!list_empty(&fmt->list))
  445. continue;
  446. /*
  447. * sort entry fields are dynamically created,
  448. * so they can share a same sort key even though
  449. * the list is empty.
  450. */
  451. if (perf_hpp__is_sort_entry(fmt)) {
  452. struct perf_hpp_fmt *pos;
  453. perf_hpp__for_each_format(pos) {
  454. if (perf_hpp__same_sort_entry(pos, fmt))
  455. goto next;
  456. }
  457. }
  458. perf_hpp__column_register(fmt);
  459. next:
  460. continue;
  461. }
  462. }
  463. void perf_hpp__append_sort_keys(void)
  464. {
  465. struct perf_hpp_fmt *fmt;
  466. /* append output fields to sort keys */
  467. perf_hpp__for_each_format(fmt) {
  468. if (!list_empty(&fmt->sort_list))
  469. continue;
  470. /*
  471. * sort entry fields are dynamically created,
  472. * so they can share a same sort key even though
  473. * the list is empty.
  474. */
  475. if (perf_hpp__is_sort_entry(fmt)) {
  476. struct perf_hpp_fmt *pos;
  477. perf_hpp__for_each_sort_list(pos) {
  478. if (perf_hpp__same_sort_entry(pos, fmt))
  479. goto next;
  480. }
  481. }
  482. perf_hpp__register_sort_field(fmt);
  483. next:
  484. continue;
  485. }
  486. }
  487. void perf_hpp__reset_output_field(void)
  488. {
  489. struct perf_hpp_fmt *fmt, *tmp;
  490. /* reset output fields */
  491. perf_hpp__for_each_format_safe(fmt, tmp) {
  492. list_del_init(&fmt->list);
  493. list_del_init(&fmt->sort_list);
  494. }
  495. /* reset sort keys */
  496. perf_hpp__for_each_sort_list_safe(fmt, tmp) {
  497. list_del_init(&fmt->list);
  498. list_del_init(&fmt->sort_list);
  499. }
  500. }
  501. /*
  502. * See hists__fprintf to match the column widths
  503. */
  504. unsigned int hists__sort_list_width(struct hists *hists)
  505. {
  506. struct perf_hpp_fmt *fmt;
  507. int ret = 0;
  508. bool first = true;
  509. struct perf_hpp dummy_hpp;
  510. perf_hpp__for_each_format(fmt) {
  511. if (perf_hpp__should_skip(fmt))
  512. continue;
  513. if (first)
  514. first = false;
  515. else
  516. ret += 2;
  517. ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
  518. }
  519. if (verbose && sort__has_sym) /* Addr + origin */
  520. ret += 3 + BITS_PER_LONG / 4;
  521. return ret;
  522. }
  523. void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
  524. {
  525. int idx;
  526. if (perf_hpp__is_sort_entry(fmt))
  527. return perf_hpp__reset_sort_width(fmt, hists);
  528. for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) {
  529. if (fmt == &perf_hpp__format[idx])
  530. break;
  531. }
  532. if (idx == PERF_HPP__MAX_INDEX)
  533. return;
  534. switch (idx) {
  535. case PERF_HPP__OVERHEAD:
  536. case PERF_HPP__OVERHEAD_SYS:
  537. case PERF_HPP__OVERHEAD_US:
  538. case PERF_HPP__OVERHEAD_ACC:
  539. fmt->len = 8;
  540. break;
  541. case PERF_HPP__OVERHEAD_GUEST_SYS:
  542. case PERF_HPP__OVERHEAD_GUEST_US:
  543. fmt->len = 9;
  544. break;
  545. case PERF_HPP__SAMPLES:
  546. case PERF_HPP__PERIOD:
  547. fmt->len = 12;
  548. break;
  549. default:
  550. break;
  551. }
  552. }
  553. void perf_hpp__set_user_width(const char *width_list_str)
  554. {
  555. struct perf_hpp_fmt *fmt;
  556. const char *ptr = width_list_str;
  557. perf_hpp__for_each_format(fmt) {
  558. char *p;
  559. int len = strtol(ptr, &p, 10);
  560. fmt->user_len = len;
  561. if (*p == ',')
  562. ptr = p + 1;
  563. else
  564. break;
  565. }
  566. }