internal.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. #ifndef _KERNEL_EVENTS_INTERNAL_H
  2. #define _KERNEL_EVENTS_INTERNAL_H
  3. #include <linux/hardirq.h>
  4. #include <linux/uaccess.h>
  5. /* Buffer handling */
  6. #define RING_BUFFER_WRITABLE 0x01
  7. struct ring_buffer {
  8. atomic_t refcount;
  9. struct rcu_head rcu_head;
  10. struct irq_work irq_work;
  11. #ifdef CONFIG_PERF_USE_VMALLOC
  12. struct work_struct work;
  13. int page_order; /* allocation order */
  14. #endif
  15. int nr_pages; /* nr of data pages */
  16. int overwrite; /* can overwrite itself */
  17. atomic_t poll; /* POLL_ for wakeups */
  18. local_t head; /* write position */
  19. local_t nest; /* nested writers */
  20. local_t events; /* event limit */
  21. local_t wakeup; /* wakeup stamp */
  22. local_t lost; /* nr records lost */
  23. long watermark; /* wakeup watermark */
  24. long aux_watermark;
  25. /* poll crap */
  26. spinlock_t event_lock;
  27. struct list_head event_list;
  28. atomic_t mmap_count;
  29. unsigned long mmap_locked;
  30. struct user_struct *mmap_user;
  31. /* AUX area */
  32. local_t aux_head;
  33. local_t aux_nest;
  34. local_t aux_wakeup;
  35. unsigned long aux_pgoff;
  36. int aux_nr_pages;
  37. int aux_overwrite;
  38. atomic_t aux_mmap_count;
  39. unsigned long aux_mmap_locked;
  40. void (*free_aux)(void *);
  41. atomic_t aux_refcount;
  42. void **aux_pages;
  43. void *aux_priv;
  44. struct perf_event_mmap_page *user_page;
  45. void *data_pages[0];
  46. };
  47. extern void rb_free(struct ring_buffer *rb);
  48. static inline void rb_free_rcu(struct rcu_head *rcu_head)
  49. {
  50. struct ring_buffer *rb;
  51. rb = container_of(rcu_head, struct ring_buffer, rcu_head);
  52. rb_free(rb);
  53. }
  54. extern struct ring_buffer *
  55. rb_alloc(int nr_pages, long watermark, int cpu, int flags);
  56. extern void perf_event_wakeup(struct perf_event *event);
  57. extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
  58. pgoff_t pgoff, int nr_pages, long watermark, int flags);
  59. extern void rb_free_aux(struct ring_buffer *rb);
  60. extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
  61. extern void ring_buffer_put(struct ring_buffer *rb);
  62. static inline bool rb_has_aux(struct ring_buffer *rb)
  63. {
  64. return !!rb->aux_nr_pages;
  65. }
  66. void perf_event_aux_event(struct perf_event *event, unsigned long head,
  67. unsigned long size, u64 flags);
  68. extern struct page *
  69. perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
  70. #ifdef CONFIG_PERF_USE_VMALLOC
  71. /*
  72. * Back perf_mmap() with vmalloc memory.
  73. *
  74. * Required for architectures that have d-cache aliasing issues.
  75. */
  76. static inline int page_order(struct ring_buffer *rb)
  77. {
  78. return rb->page_order;
  79. }
  80. #else
  81. static inline int page_order(struct ring_buffer *rb)
  82. {
  83. return 0;
  84. }
  85. #endif
  86. static inline unsigned long perf_data_size(struct ring_buffer *rb)
  87. {
  88. return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
  89. }
  90. static inline unsigned long perf_aux_size(struct ring_buffer *rb)
  91. {
  92. return rb->aux_nr_pages << PAGE_SHIFT;
  93. }
  94. #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
  95. static inline unsigned long \
  96. func_name(struct perf_output_handle *handle, \
  97. const void *buf, unsigned long len) \
  98. { \
  99. unsigned long size, written; \
  100. \
  101. do { \
  102. size = min(handle->size, len); \
  103. written = memcpy_func(handle->addr, buf, size); \
  104. written = size - written; \
  105. \
  106. len -= written; \
  107. handle->addr += written; \
  108. buf += written; \
  109. handle->size -= written; \
  110. if (!handle->size) { \
  111. struct ring_buffer *rb = handle->rb; \
  112. \
  113. handle->page++; \
  114. handle->page &= rb->nr_pages - 1; \
  115. handle->addr = rb->data_pages[handle->page]; \
  116. handle->size = PAGE_SIZE << page_order(rb); \
  117. } \
  118. } while (len && written == size); \
  119. \
  120. return len; \
  121. }
  122. static inline unsigned long
  123. memcpy_common(void *dst, const void *src, unsigned long n)
  124. {
  125. memcpy(dst, src, n);
  126. return 0;
  127. }
  128. DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
  129. static inline unsigned long
  130. memcpy_skip(void *dst, const void *src, unsigned long n)
  131. {
  132. return 0;
  133. }
  134. DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
  135. #ifndef arch_perf_out_copy_user
  136. #define arch_perf_out_copy_user arch_perf_out_copy_user
  137. static inline unsigned long
  138. arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
  139. {
  140. unsigned long ret;
  141. pagefault_disable();
  142. ret = __copy_from_user_inatomic(dst, src, n);
  143. pagefault_enable();
  144. return ret;
  145. }
  146. #endif
  147. DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
  148. /* Callchain handling */
  149. extern struct perf_callchain_entry *
  150. perf_callchain(struct perf_event *event, struct pt_regs *regs);
  151. extern int get_callchain_buffers(void);
  152. extern void put_callchain_buffers(void);
  153. static inline int get_recursion_context(int *recursion)
  154. {
  155. int rctx;
  156. if (in_nmi())
  157. rctx = 3;
  158. else if (in_irq())
  159. rctx = 2;
  160. else if (in_softirq())
  161. rctx = 1;
  162. else
  163. rctx = 0;
  164. if (recursion[rctx])
  165. return -1;
  166. recursion[rctx]++;
  167. barrier();
  168. return rctx;
  169. }
  170. static inline void put_recursion_context(int *recursion, int rctx)
  171. {
  172. barrier();
  173. recursion[rctx]--;
  174. }
  175. #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
  176. static inline bool arch_perf_have_user_stack_dump(void)
  177. {
  178. return true;
  179. }
  180. #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
  181. #else
  182. static inline bool arch_perf_have_user_stack_dump(void)
  183. {
  184. return false;
  185. }
  186. #define perf_user_stack_pointer(regs) 0
  187. #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
  188. #endif /* _KERNEL_EVENTS_INTERNAL_H */