thread-stack.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755
  1. /*
  2. * thread-stack.c: Synthesize a thread's stack using call / return events
  3. * Copyright (c) 2014, Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. */
  15. #include <linux/rbtree.h>
  16. #include <linux/list.h>
  17. #include "thread.h"
  18. #include "event.h"
  19. #include "machine.h"
  20. #include "util.h"
  21. #include "debug.h"
  22. #include "symbol.h"
  23. #include "comm.h"
  24. #include "thread-stack.h"
  25. #define CALL_PATH_BLOCK_SHIFT 8
  26. #define CALL_PATH_BLOCK_SIZE (1 << CALL_PATH_BLOCK_SHIFT)
  27. #define CALL_PATH_BLOCK_MASK (CALL_PATH_BLOCK_SIZE - 1)
  28. struct call_path_block {
  29. struct call_path cp[CALL_PATH_BLOCK_SIZE];
  30. struct list_head node;
  31. };
  32. /**
  33. * struct call_path_root - root of all call paths.
  34. * @call_path: root call path
  35. * @blocks: list of blocks to store call paths
  36. * @next: next free space
  37. * @sz: number of spaces
  38. */
  39. struct call_path_root {
  40. struct call_path call_path;
  41. struct list_head blocks;
  42. size_t next;
  43. size_t sz;
  44. };
  45. /**
  46. * struct call_return_processor - provides a call-back to consume call-return
  47. * information.
  48. * @cpr: call path root
  49. * @process: call-back that accepts call/return information
  50. * @data: anonymous data for call-back
  51. */
  52. struct call_return_processor {
  53. struct call_path_root *cpr;
  54. int (*process)(struct call_return *cr, void *data);
  55. void *data;
  56. };
  57. #define STACK_GROWTH 2048
  58. /**
  59. * struct thread_stack_entry - thread stack entry.
  60. * @ret_addr: return address
  61. * @timestamp: timestamp (if known)
  62. * @ref: external reference (e.g. db_id of sample)
  63. * @branch_count: the branch count when the entry was created
  64. * @cp: call path
  65. * @no_call: a 'call' was not seen
  66. */
  67. struct thread_stack_entry {
  68. u64 ret_addr;
  69. u64 timestamp;
  70. u64 ref;
  71. u64 branch_count;
  72. struct call_path *cp;
  73. bool no_call;
  74. };
  75. /**
  76. * struct thread_stack - thread stack constructed from 'call' and 'return'
  77. * branch samples.
  78. * @stack: array that holds the stack
  79. * @cnt: number of entries in the stack
  80. * @sz: current maximum stack size
  81. * @trace_nr: current trace number
  82. * @branch_count: running branch count
  83. * @kernel_start: kernel start address
  84. * @last_time: last timestamp
  85. * @crp: call/return processor
  86. * @comm: current comm
  87. */
  88. struct thread_stack {
  89. struct thread_stack_entry *stack;
  90. size_t cnt;
  91. size_t sz;
  92. u64 trace_nr;
  93. u64 branch_count;
  94. u64 kernel_start;
  95. u64 last_time;
  96. struct call_return_processor *crp;
  97. struct comm *comm;
  98. };
  99. static int thread_stack__grow(struct thread_stack *ts)
  100. {
  101. struct thread_stack_entry *new_stack;
  102. size_t sz, new_sz;
  103. new_sz = ts->sz + STACK_GROWTH;
  104. sz = new_sz * sizeof(struct thread_stack_entry);
  105. new_stack = realloc(ts->stack, sz);
  106. if (!new_stack)
  107. return -ENOMEM;
  108. ts->stack = new_stack;
  109. ts->sz = new_sz;
  110. return 0;
  111. }
  112. static struct thread_stack *thread_stack__new(struct thread *thread,
  113. struct call_return_processor *crp)
  114. {
  115. struct thread_stack *ts;
  116. ts = zalloc(sizeof(struct thread_stack));
  117. if (!ts)
  118. return NULL;
  119. if (thread_stack__grow(ts)) {
  120. free(ts);
  121. return NULL;
  122. }
  123. if (thread->mg && thread->mg->machine)
  124. ts->kernel_start = machine__kernel_start(thread->mg->machine);
  125. else
  126. ts->kernel_start = 1ULL << 63;
  127. ts->crp = crp;
  128. return ts;
  129. }
  130. static int thread_stack__push(struct thread_stack *ts, u64 ret_addr)
  131. {
  132. int err = 0;
  133. if (ts->cnt == ts->sz) {
  134. err = thread_stack__grow(ts);
  135. if (err) {
  136. pr_warning("Out of memory: discarding thread stack\n");
  137. ts->cnt = 0;
  138. }
  139. }
  140. ts->stack[ts->cnt++].ret_addr = ret_addr;
  141. return err;
  142. }
  143. static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
  144. {
  145. size_t i;
  146. /*
  147. * In some cases there may be functions which are not seen to return.
  148. * For example when setjmp / longjmp has been used. Or the perf context
  149. * switch in the kernel which doesn't stop and start tracing in exactly
  150. * the same code path. When that happens the return address will be
  151. * further down the stack. If the return address is not found at all,
  152. * we assume the opposite (i.e. this is a return for a call that wasn't
  153. * seen for some reason) and leave the stack alone.
  154. */
  155. for (i = ts->cnt; i; ) {
  156. if (ts->stack[--i].ret_addr == ret_addr) {
  157. ts->cnt = i;
  158. return;
  159. }
  160. }
  161. }
  162. static bool thread_stack__in_kernel(struct thread_stack *ts)
  163. {
  164. if (!ts->cnt)
  165. return false;
  166. return ts->stack[ts->cnt - 1].cp->in_kernel;
  167. }
  168. static int thread_stack__call_return(struct thread *thread,
  169. struct thread_stack *ts, size_t idx,
  170. u64 timestamp, u64 ref, bool no_return)
  171. {
  172. struct call_return_processor *crp = ts->crp;
  173. struct thread_stack_entry *tse;
  174. struct call_return cr = {
  175. .thread = thread,
  176. .comm = ts->comm,
  177. .db_id = 0,
  178. };
  179. tse = &ts->stack[idx];
  180. cr.cp = tse->cp;
  181. cr.call_time = tse->timestamp;
  182. cr.return_time = timestamp;
  183. cr.branch_count = ts->branch_count - tse->branch_count;
  184. cr.call_ref = tse->ref;
  185. cr.return_ref = ref;
  186. if (tse->no_call)
  187. cr.flags |= CALL_RETURN_NO_CALL;
  188. if (no_return)
  189. cr.flags |= CALL_RETURN_NO_RETURN;
  190. return crp->process(&cr, crp->data);
  191. }
  192. static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
  193. {
  194. struct call_return_processor *crp = ts->crp;
  195. int err;
  196. if (!crp) {
  197. ts->cnt = 0;
  198. return 0;
  199. }
  200. while (ts->cnt) {
  201. err = thread_stack__call_return(thread, ts, --ts->cnt,
  202. ts->last_time, 0, true);
  203. if (err) {
  204. pr_err("Error flushing thread stack!\n");
  205. ts->cnt = 0;
  206. return err;
  207. }
  208. }
  209. return 0;
  210. }
  211. int thread_stack__flush(struct thread *thread)
  212. {
  213. if (thread->ts)
  214. return __thread_stack__flush(thread, thread->ts);
  215. return 0;
  216. }
  217. int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
  218. u64 to_ip, u16 insn_len, u64 trace_nr)
  219. {
  220. if (!thread)
  221. return -EINVAL;
  222. if (!thread->ts) {
  223. thread->ts = thread_stack__new(thread, NULL);
  224. if (!thread->ts) {
  225. pr_warning("Out of memory: no thread stack\n");
  226. return -ENOMEM;
  227. }
  228. thread->ts->trace_nr = trace_nr;
  229. }
  230. /*
  231. * When the trace is discontinuous, the trace_nr changes. In that case
  232. * the stack might be completely invalid. Better to report nothing than
  233. * to report something misleading, so flush the stack.
  234. */
  235. if (trace_nr != thread->ts->trace_nr) {
  236. if (thread->ts->trace_nr)
  237. __thread_stack__flush(thread, thread->ts);
  238. thread->ts->trace_nr = trace_nr;
  239. }
  240. /* Stop here if thread_stack__process() is in use */
  241. if (thread->ts->crp)
  242. return 0;
  243. if (flags & PERF_IP_FLAG_CALL) {
  244. u64 ret_addr;
  245. if (!to_ip)
  246. return 0;
  247. ret_addr = from_ip + insn_len;
  248. if (ret_addr == to_ip)
  249. return 0; /* Zero-length calls are excluded */
  250. return thread_stack__push(thread->ts, ret_addr);
  251. } else if (flags & PERF_IP_FLAG_RETURN) {
  252. if (!from_ip)
  253. return 0;
  254. thread_stack__pop(thread->ts, to_ip);
  255. }
  256. return 0;
  257. }
  258. void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
  259. {
  260. if (!thread || !thread->ts)
  261. return;
  262. if (trace_nr != thread->ts->trace_nr) {
  263. if (thread->ts->trace_nr)
  264. __thread_stack__flush(thread, thread->ts);
  265. thread->ts->trace_nr = trace_nr;
  266. }
  267. }
  268. void thread_stack__free(struct thread *thread)
  269. {
  270. if (thread->ts) {
  271. __thread_stack__flush(thread, thread->ts);
  272. zfree(&thread->ts->stack);
  273. zfree(&thread->ts);
  274. }
  275. }
  276. void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
  277. size_t sz, u64 ip)
  278. {
  279. size_t i;
  280. if (!thread || !thread->ts)
  281. chain->nr = 1;
  282. else
  283. chain->nr = min(sz, thread->ts->cnt + 1);
  284. chain->ips[0] = ip;
  285. for (i = 1; i < chain->nr; i++)
  286. chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr;
  287. }
  288. static void call_path__init(struct call_path *cp, struct call_path *parent,
  289. struct symbol *sym, u64 ip, bool in_kernel)
  290. {
  291. cp->parent = parent;
  292. cp->sym = sym;
  293. cp->ip = sym ? 0 : ip;
  294. cp->db_id = 0;
  295. cp->in_kernel = in_kernel;
  296. RB_CLEAR_NODE(&cp->rb_node);
  297. cp->children = RB_ROOT;
  298. }
  299. static struct call_path_root *call_path_root__new(void)
  300. {
  301. struct call_path_root *cpr;
  302. cpr = zalloc(sizeof(struct call_path_root));
  303. if (!cpr)
  304. return NULL;
  305. call_path__init(&cpr->call_path, NULL, NULL, 0, false);
  306. INIT_LIST_HEAD(&cpr->blocks);
  307. return cpr;
  308. }
  309. static void call_path_root__free(struct call_path_root *cpr)
  310. {
  311. struct call_path_block *pos, *n;
  312. list_for_each_entry_safe(pos, n, &cpr->blocks, node) {
  313. list_del(&pos->node);
  314. free(pos);
  315. }
  316. free(cpr);
  317. }
  318. static struct call_path *call_path__new(struct call_path_root *cpr,
  319. struct call_path *parent,
  320. struct symbol *sym, u64 ip,
  321. bool in_kernel)
  322. {
  323. struct call_path_block *cpb;
  324. struct call_path *cp;
  325. size_t n;
  326. if (cpr->next < cpr->sz) {
  327. cpb = list_last_entry(&cpr->blocks, struct call_path_block,
  328. node);
  329. } else {
  330. cpb = zalloc(sizeof(struct call_path_block));
  331. if (!cpb)
  332. return NULL;
  333. list_add_tail(&cpb->node, &cpr->blocks);
  334. cpr->sz += CALL_PATH_BLOCK_SIZE;
  335. }
  336. n = cpr->next++ & CALL_PATH_BLOCK_MASK;
  337. cp = &cpb->cp[n];
  338. call_path__init(cp, parent, sym, ip, in_kernel);
  339. return cp;
  340. }
  341. static struct call_path *call_path__findnew(struct call_path_root *cpr,
  342. struct call_path *parent,
  343. struct symbol *sym, u64 ip, u64 ks)
  344. {
  345. struct rb_node **p;
  346. struct rb_node *node_parent = NULL;
  347. struct call_path *cp;
  348. bool in_kernel = ip >= ks;
  349. if (sym)
  350. ip = 0;
  351. if (!parent)
  352. return call_path__new(cpr, parent, sym, ip, in_kernel);
  353. p = &parent->children.rb_node;
  354. while (*p != NULL) {
  355. node_parent = *p;
  356. cp = rb_entry(node_parent, struct call_path, rb_node);
  357. if (cp->sym == sym && cp->ip == ip)
  358. return cp;
  359. if (sym < cp->sym || (sym == cp->sym && ip < cp->ip))
  360. p = &(*p)->rb_left;
  361. else
  362. p = &(*p)->rb_right;
  363. }
  364. cp = call_path__new(cpr, parent, sym, ip, in_kernel);
  365. if (!cp)
  366. return NULL;
  367. rb_link_node(&cp->rb_node, node_parent, p);
  368. rb_insert_color(&cp->rb_node, &parent->children);
  369. return cp;
  370. }
  371. struct call_return_processor *
  372. call_return_processor__new(int (*process)(struct call_return *cr, void *data),
  373. void *data)
  374. {
  375. struct call_return_processor *crp;
  376. crp = zalloc(sizeof(struct call_return_processor));
  377. if (!crp)
  378. return NULL;
  379. crp->cpr = call_path_root__new();
  380. if (!crp->cpr)
  381. goto out_free;
  382. crp->process = process;
  383. crp->data = data;
  384. return crp;
  385. out_free:
  386. free(crp);
  387. return NULL;
  388. }
  389. void call_return_processor__free(struct call_return_processor *crp)
  390. {
  391. if (crp) {
  392. call_path_root__free(crp->cpr);
  393. free(crp);
  394. }
  395. }
  396. static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
  397. u64 timestamp, u64 ref, struct call_path *cp,
  398. bool no_call)
  399. {
  400. struct thread_stack_entry *tse;
  401. int err;
  402. if (ts->cnt == ts->sz) {
  403. err = thread_stack__grow(ts);
  404. if (err)
  405. return err;
  406. }
  407. tse = &ts->stack[ts->cnt++];
  408. tse->ret_addr = ret_addr;
  409. tse->timestamp = timestamp;
  410. tse->ref = ref;
  411. tse->branch_count = ts->branch_count;
  412. tse->cp = cp;
  413. tse->no_call = no_call;
  414. return 0;
  415. }
  416. static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
  417. u64 ret_addr, u64 timestamp, u64 ref,
  418. struct symbol *sym)
  419. {
  420. int err;
  421. if (!ts->cnt)
  422. return 1;
  423. if (ts->cnt == 1) {
  424. struct thread_stack_entry *tse = &ts->stack[0];
  425. if (tse->cp->sym == sym)
  426. return thread_stack__call_return(thread, ts, --ts->cnt,
  427. timestamp, ref, false);
  428. }
  429. if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) {
  430. return thread_stack__call_return(thread, ts, --ts->cnt,
  431. timestamp, ref, false);
  432. } else {
  433. size_t i = ts->cnt - 1;
  434. while (i--) {
  435. if (ts->stack[i].ret_addr != ret_addr)
  436. continue;
  437. i += 1;
  438. while (ts->cnt > i) {
  439. err = thread_stack__call_return(thread, ts,
  440. --ts->cnt,
  441. timestamp, ref,
  442. true);
  443. if (err)
  444. return err;
  445. }
  446. return thread_stack__call_return(thread, ts, --ts->cnt,
  447. timestamp, ref, false);
  448. }
  449. }
  450. return 1;
  451. }
  452. static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts,
  453. struct perf_sample *sample,
  454. struct addr_location *from_al,
  455. struct addr_location *to_al, u64 ref)
  456. {
  457. struct call_path_root *cpr = ts->crp->cpr;
  458. struct call_path *cp;
  459. struct symbol *sym;
  460. u64 ip;
  461. if (sample->ip) {
  462. ip = sample->ip;
  463. sym = from_al->sym;
  464. } else if (sample->addr) {
  465. ip = sample->addr;
  466. sym = to_al->sym;
  467. } else {
  468. return 0;
  469. }
  470. cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
  471. ts->kernel_start);
  472. if (!cp)
  473. return -ENOMEM;
  474. return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp,
  475. true);
  476. }
  477. static int thread_stack__no_call_return(struct thread *thread,
  478. struct thread_stack *ts,
  479. struct perf_sample *sample,
  480. struct addr_location *from_al,
  481. struct addr_location *to_al, u64 ref)
  482. {
  483. struct call_path_root *cpr = ts->crp->cpr;
  484. struct call_path *cp, *parent;
  485. u64 ks = ts->kernel_start;
  486. int err;
  487. if (sample->ip >= ks && sample->addr < ks) {
  488. /* Return to userspace, so pop all kernel addresses */
  489. while (thread_stack__in_kernel(ts)) {
  490. err = thread_stack__call_return(thread, ts, --ts->cnt,
  491. sample->time, ref,
  492. true);
  493. if (err)
  494. return err;
  495. }
  496. /* If the stack is empty, push the userspace address */
  497. if (!ts->cnt) {
  498. cp = call_path__findnew(cpr, &cpr->call_path,
  499. to_al->sym, sample->addr,
  500. ts->kernel_start);
  501. if (!cp)
  502. return -ENOMEM;
  503. return thread_stack__push_cp(ts, 0, sample->time, ref,
  504. cp, true);
  505. }
  506. } else if (thread_stack__in_kernel(ts) && sample->ip < ks) {
  507. /* Return to userspace, so pop all kernel addresses */
  508. while (thread_stack__in_kernel(ts)) {
  509. err = thread_stack__call_return(thread, ts, --ts->cnt,
  510. sample->time, ref,
  511. true);
  512. if (err)
  513. return err;
  514. }
  515. }
  516. if (ts->cnt)
  517. parent = ts->stack[ts->cnt - 1].cp;
  518. else
  519. parent = &cpr->call_path;
  520. /* This 'return' had no 'call', so push and pop top of stack */
  521. cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip,
  522. ts->kernel_start);
  523. if (!cp)
  524. return -ENOMEM;
  525. err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp,
  526. true);
  527. if (err)
  528. return err;
  529. return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref,
  530. to_al->sym);
  531. }
  532. static int thread_stack__trace_begin(struct thread *thread,
  533. struct thread_stack *ts, u64 timestamp,
  534. u64 ref)
  535. {
  536. struct thread_stack_entry *tse;
  537. int err;
  538. if (!ts->cnt)
  539. return 0;
  540. /* Pop trace end */
  541. tse = &ts->stack[ts->cnt - 1];
  542. if (tse->cp->sym == NULL && tse->cp->ip == 0) {
  543. err = thread_stack__call_return(thread, ts, --ts->cnt,
  544. timestamp, ref, false);
  545. if (err)
  546. return err;
  547. }
  548. return 0;
  549. }
  550. static int thread_stack__trace_end(struct thread_stack *ts,
  551. struct perf_sample *sample, u64 ref)
  552. {
  553. struct call_path_root *cpr = ts->crp->cpr;
  554. struct call_path *cp;
  555. u64 ret_addr;
  556. /* No point having 'trace end' on the bottom of the stack */
  557. if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
  558. return 0;
  559. cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
  560. ts->kernel_start);
  561. if (!cp)
  562. return -ENOMEM;
  563. ret_addr = sample->ip + sample->insn_len;
  564. return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
  565. false);
  566. }
  567. int thread_stack__process(struct thread *thread, struct comm *comm,
  568. struct perf_sample *sample,
  569. struct addr_location *from_al,
  570. struct addr_location *to_al, u64 ref,
  571. struct call_return_processor *crp)
  572. {
  573. struct thread_stack *ts = thread->ts;
  574. int err = 0;
  575. if (ts) {
  576. if (!ts->crp) {
  577. /* Supersede thread_stack__event() */
  578. thread_stack__free(thread);
  579. thread->ts = thread_stack__new(thread, crp);
  580. if (!thread->ts)
  581. return -ENOMEM;
  582. ts = thread->ts;
  583. ts->comm = comm;
  584. }
  585. } else {
  586. thread->ts = thread_stack__new(thread, crp);
  587. if (!thread->ts)
  588. return -ENOMEM;
  589. ts = thread->ts;
  590. ts->comm = comm;
  591. }
  592. /* Flush stack on exec */
  593. if (ts->comm != comm && thread->pid_ == thread->tid) {
  594. err = __thread_stack__flush(thread, ts);
  595. if (err)
  596. return err;
  597. ts->comm = comm;
  598. }
  599. /* If the stack is empty, put the current symbol on the stack */
  600. if (!ts->cnt) {
  601. err = thread_stack__bottom(thread, ts, sample, from_al, to_al,
  602. ref);
  603. if (err)
  604. return err;
  605. }
  606. ts->branch_count += 1;
  607. ts->last_time = sample->time;
  608. if (sample->flags & PERF_IP_FLAG_CALL) {
  609. struct call_path_root *cpr = ts->crp->cpr;
  610. struct call_path *cp;
  611. u64 ret_addr;
  612. if (!sample->ip || !sample->addr)
  613. return 0;
  614. ret_addr = sample->ip + sample->insn_len;
  615. if (ret_addr == sample->addr)
  616. return 0; /* Zero-length calls are excluded */
  617. cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
  618. to_al->sym, sample->addr,
  619. ts->kernel_start);
  620. if (!cp)
  621. return -ENOMEM;
  622. err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
  623. cp, false);
  624. } else if (sample->flags & PERF_IP_FLAG_RETURN) {
  625. if (!sample->ip || !sample->addr)
  626. return 0;
  627. err = thread_stack__pop_cp(thread, ts, sample->addr,
  628. sample->time, ref, from_al->sym);
  629. if (err) {
  630. if (err < 0)
  631. return err;
  632. err = thread_stack__no_call_return(thread, ts, sample,
  633. from_al, to_al, ref);
  634. }
  635. } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
  636. err = thread_stack__trace_begin(thread, ts, sample->time, ref);
  637. } else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
  638. err = thread_stack__trace_end(ts, sample, ref);
  639. }
  640. return err;
  641. }