ordered-events.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311
  1. #include <linux/list.h>
  2. #include <linux/compiler.h>
  3. #include <linux/string.h>
  4. #include "ordered-events.h"
  5. #include "session.h"
  6. #include "asm/bug.h"
  7. #include "debug.h"
  8. #define pr_N(n, fmt, ...) \
  9. eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
  10. #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  11. static void queue_event(struct ordered_events *oe, struct ordered_event *new)
  12. {
  13. struct ordered_event *last = oe->last;
  14. u64 timestamp = new->timestamp;
  15. struct list_head *p;
  16. ++oe->nr_events;
  17. oe->last = new;
  18. pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
  19. if (!last) {
  20. list_add(&new->list, &oe->events);
  21. oe->max_timestamp = timestamp;
  22. return;
  23. }
  24. /*
  25. * last event might point to some random place in the list as it's
  26. * the last queued event. We expect that the new event is close to
  27. * this.
  28. */
  29. if (last->timestamp <= timestamp) {
  30. while (last->timestamp <= timestamp) {
  31. p = last->list.next;
  32. if (p == &oe->events) {
  33. list_add_tail(&new->list, &oe->events);
  34. oe->max_timestamp = timestamp;
  35. return;
  36. }
  37. last = list_entry(p, struct ordered_event, list);
  38. }
  39. list_add_tail(&new->list, &last->list);
  40. } else {
  41. while (last->timestamp > timestamp) {
  42. p = last->list.prev;
  43. if (p == &oe->events) {
  44. list_add(&new->list, &oe->events);
  45. return;
  46. }
  47. last = list_entry(p, struct ordered_event, list);
  48. }
  49. list_add(&new->list, &last->list);
  50. }
  51. }
  52. static union perf_event *__dup_event(struct ordered_events *oe,
  53. union perf_event *event)
  54. {
  55. union perf_event *new_event = NULL;
  56. if (oe->cur_alloc_size < oe->max_alloc_size) {
  57. new_event = memdup(event, event->header.size);
  58. if (new_event)
  59. oe->cur_alloc_size += event->header.size;
  60. }
  61. return new_event;
  62. }
  63. static union perf_event *dup_event(struct ordered_events *oe,
  64. union perf_event *event)
  65. {
  66. return oe->copy_on_queue ? __dup_event(oe, event) : event;
  67. }
  68. static void free_dup_event(struct ordered_events *oe, union perf_event *event)
  69. {
  70. if (event && oe->copy_on_queue) {
  71. oe->cur_alloc_size -= event->header.size;
  72. free(event);
  73. }
  74. }
  75. #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
  76. static struct ordered_event *alloc_event(struct ordered_events *oe,
  77. union perf_event *event)
  78. {
  79. struct list_head *cache = &oe->cache;
  80. struct ordered_event *new = NULL;
  81. union perf_event *new_event;
  82. new_event = dup_event(oe, event);
  83. if (!new_event)
  84. return NULL;
  85. if (!list_empty(cache)) {
  86. new = list_entry(cache->next, struct ordered_event, list);
  87. list_del(&new->list);
  88. } else if (oe->buffer) {
  89. new = oe->buffer + oe->buffer_idx;
  90. if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
  91. oe->buffer = NULL;
  92. } else if (oe->cur_alloc_size < oe->max_alloc_size) {
  93. size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
  94. oe->buffer = malloc(size);
  95. if (!oe->buffer) {
  96. free_dup_event(oe, new_event);
  97. return NULL;
  98. }
  99. pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
  100. oe->cur_alloc_size, size, oe->max_alloc_size);
  101. oe->cur_alloc_size += size;
  102. list_add(&oe->buffer->list, &oe->to_free);
  103. /* First entry is abused to maintain the to_free list. */
  104. oe->buffer_idx = 2;
  105. new = oe->buffer + 1;
  106. } else {
  107. pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
  108. }
  109. new->event = new_event;
  110. return new;
  111. }
  112. static struct ordered_event *
  113. ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
  114. union perf_event *event)
  115. {
  116. struct ordered_event *new;
  117. new = alloc_event(oe, event);
  118. if (new) {
  119. new->timestamp = timestamp;
  120. queue_event(oe, new);
  121. }
  122. return new;
  123. }
  124. void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
  125. {
  126. list_move(&event->list, &oe->cache);
  127. oe->nr_events--;
  128. free_dup_event(oe, event->event);
  129. event->event = NULL;
  130. }
  131. int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
  132. struct perf_sample *sample, u64 file_offset)
  133. {
  134. u64 timestamp = sample->time;
  135. struct ordered_event *oevent;
  136. if (!timestamp || timestamp == ~0ULL)
  137. return -ETIME;
  138. if (timestamp < oe->last_flush) {
  139. pr_oe_time(timestamp, "out of order event\n");
  140. pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
  141. oe->last_flush_type);
  142. oe->nr_unordered_events++;
  143. }
  144. oevent = ordered_events__new_event(oe, timestamp, event);
  145. if (!oevent) {
  146. ordered_events__flush(oe, OE_FLUSH__HALF);
  147. oevent = ordered_events__new_event(oe, timestamp, event);
  148. }
  149. if (!oevent)
  150. return -ENOMEM;
  151. oevent->file_offset = file_offset;
  152. return 0;
  153. }
  154. static int __ordered_events__flush(struct ordered_events *oe)
  155. {
  156. struct list_head *head = &oe->events;
  157. struct ordered_event *tmp, *iter;
  158. u64 limit = oe->next_flush;
  159. u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
  160. bool show_progress = limit == ULLONG_MAX;
  161. struct ui_progress prog;
  162. int ret;
  163. if (!limit)
  164. return 0;
  165. if (show_progress)
  166. ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
  167. list_for_each_entry_safe(iter, tmp, head, list) {
  168. if (session_done())
  169. return 0;
  170. if (iter->timestamp > limit)
  171. break;
  172. ret = oe->deliver(oe, iter);
  173. if (ret)
  174. return ret;
  175. ordered_events__delete(oe, iter);
  176. oe->last_flush = iter->timestamp;
  177. if (show_progress)
  178. ui_progress__update(&prog, 1);
  179. }
  180. if (list_empty(head))
  181. oe->last = NULL;
  182. else if (last_ts <= limit)
  183. oe->last = list_entry(head->prev, struct ordered_event, list);
  184. if (show_progress)
  185. ui_progress__finish();
  186. return 0;
  187. }
  188. int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
  189. {
  190. static const char * const str[] = {
  191. "NONE",
  192. "FINAL",
  193. "ROUND",
  194. "HALF ",
  195. };
  196. int err;
  197. if (oe->nr_events == 0)
  198. return 0;
  199. switch (how) {
  200. case OE_FLUSH__FINAL:
  201. oe->next_flush = ULLONG_MAX;
  202. break;
  203. case OE_FLUSH__HALF:
  204. {
  205. struct ordered_event *first, *last;
  206. struct list_head *head = &oe->events;
  207. first = list_entry(head->next, struct ordered_event, list);
  208. last = oe->last;
  209. /* Warn if we are called before any event got allocated. */
  210. if (WARN_ONCE(!last || list_empty(head), "empty queue"))
  211. return 0;
  212. oe->next_flush = first->timestamp;
  213. oe->next_flush += (last->timestamp - first->timestamp) / 2;
  214. break;
  215. }
  216. case OE_FLUSH__ROUND:
  217. case OE_FLUSH__NONE:
  218. default:
  219. break;
  220. };
  221. pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
  222. str[how], oe->nr_events);
  223. pr_oe_time(oe->max_timestamp, "max_timestamp\n");
  224. err = __ordered_events__flush(oe);
  225. if (!err) {
  226. if (how == OE_FLUSH__ROUND)
  227. oe->next_flush = oe->max_timestamp;
  228. oe->last_flush_type = how;
  229. }
  230. pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
  231. str[how], oe->nr_events);
  232. pr_oe_time(oe->last_flush, "last_flush\n");
  233. return err;
  234. }
  235. void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
  236. {
  237. INIT_LIST_HEAD(&oe->events);
  238. INIT_LIST_HEAD(&oe->cache);
  239. INIT_LIST_HEAD(&oe->to_free);
  240. oe->max_alloc_size = (u64) -1;
  241. oe->cur_alloc_size = 0;
  242. oe->deliver = deliver;
  243. }
  244. void ordered_events__free(struct ordered_events *oe)
  245. {
  246. while (!list_empty(&oe->to_free)) {
  247. struct ordered_event *event;
  248. event = list_entry(oe->to_free.next, struct ordered_event, list);
  249. list_del(&event->list);
  250. free_dup_event(oe, event->event);
  251. free(event);
  252. }
  253. }