mmutrace.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. #if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
  2. #define _TRACE_KVMMMU_H
  3. #include <linux/tracepoint.h>
  4. #include <linux/trace_events.h>
  5. #undef TRACE_SYSTEM
  6. #define TRACE_SYSTEM kvmmmu
  7. #define KVM_MMU_PAGE_FIELDS \
  8. __field(unsigned long, mmu_valid_gen) \
  9. __field(__u64, gfn) \
  10. __field(__u32, role) \
  11. __field(__u32, root_count) \
  12. __field(bool, unsync)
  13. #define KVM_MMU_PAGE_ASSIGN(sp) \
  14. __entry->mmu_valid_gen = sp->mmu_valid_gen; \
  15. __entry->gfn = sp->gfn; \
  16. __entry->role = sp->role.word; \
  17. __entry->root_count = sp->root_count; \
  18. __entry->unsync = sp->unsync;
  19. #define KVM_MMU_PAGE_PRINTK() ({ \
  20. const char *saved_ptr = trace_seq_buffer_ptr(p); \
  21. static const char *access_str[] = { \
  22. "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
  23. }; \
  24. union kvm_mmu_page_role role; \
  25. \
  26. role.word = __entry->role; \
  27. \
  28. trace_seq_printf(p, "sp gen %lx gfn %llx %u%s q%u%s %s%s" \
  29. " %snxe root %u %s%c", __entry->mmu_valid_gen, \
  30. __entry->gfn, role.level, \
  31. role.cr4_pae ? " pae" : "", \
  32. role.quadrant, \
  33. role.direct ? " direct" : "", \
  34. access_str[role.access], \
  35. role.invalid ? " invalid" : "", \
  36. role.nxe ? "" : "!", \
  37. __entry->root_count, \
  38. __entry->unsync ? "unsync" : "sync", 0); \
  39. saved_ptr; \
  40. })
  41. #define kvm_mmu_trace_pferr_flags \
  42. { PFERR_PRESENT_MASK, "P" }, \
  43. { PFERR_WRITE_MASK, "W" }, \
  44. { PFERR_USER_MASK, "U" }, \
  45. { PFERR_RSVD_MASK, "RSVD" }, \
  46. { PFERR_FETCH_MASK, "F" }
  47. /*
  48. * A pagetable walk has started
  49. */
  50. TRACE_EVENT(
  51. kvm_mmu_pagetable_walk,
  52. TP_PROTO(u64 addr, u32 pferr),
  53. TP_ARGS(addr, pferr),
  54. TP_STRUCT__entry(
  55. __field(__u64, addr)
  56. __field(__u32, pferr)
  57. ),
  58. TP_fast_assign(
  59. __entry->addr = addr;
  60. __entry->pferr = pferr;
  61. ),
  62. TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
  63. __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
  64. );
  65. /* We just walked a paging element */
  66. TRACE_EVENT(
  67. kvm_mmu_paging_element,
  68. TP_PROTO(u64 pte, int level),
  69. TP_ARGS(pte, level),
  70. TP_STRUCT__entry(
  71. __field(__u64, pte)
  72. __field(__u32, level)
  73. ),
  74. TP_fast_assign(
  75. __entry->pte = pte;
  76. __entry->level = level;
  77. ),
  78. TP_printk("pte %llx level %u", __entry->pte, __entry->level)
  79. );
  80. DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
  81. TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
  82. TP_ARGS(table_gfn, index, size),
  83. TP_STRUCT__entry(
  84. __field(__u64, gpa)
  85. ),
  86. TP_fast_assign(
  87. __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
  88. + index * size;
  89. ),
  90. TP_printk("gpa %llx", __entry->gpa)
  91. );
  92. /* We set a pte accessed bit */
  93. DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
  94. TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
  95. TP_ARGS(table_gfn, index, size)
  96. );
  97. /* We set a pte dirty bit */
  98. DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
  99. TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
  100. TP_ARGS(table_gfn, index, size)
  101. );
  102. TRACE_EVENT(
  103. kvm_mmu_walker_error,
  104. TP_PROTO(u32 pferr),
  105. TP_ARGS(pferr),
  106. TP_STRUCT__entry(
  107. __field(__u32, pferr)
  108. ),
  109. TP_fast_assign(
  110. __entry->pferr = pferr;
  111. ),
  112. TP_printk("pferr %x %s", __entry->pferr,
  113. __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
  114. );
  115. TRACE_EVENT(
  116. kvm_mmu_get_page,
  117. TP_PROTO(struct kvm_mmu_page *sp, bool created),
  118. TP_ARGS(sp, created),
  119. TP_STRUCT__entry(
  120. KVM_MMU_PAGE_FIELDS
  121. __field(bool, created)
  122. ),
  123. TP_fast_assign(
  124. KVM_MMU_PAGE_ASSIGN(sp)
  125. __entry->created = created;
  126. ),
  127. TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
  128. __entry->created ? "new" : "existing")
  129. );
  130. DECLARE_EVENT_CLASS(kvm_mmu_page_class,
  131. TP_PROTO(struct kvm_mmu_page *sp),
  132. TP_ARGS(sp),
  133. TP_STRUCT__entry(
  134. KVM_MMU_PAGE_FIELDS
  135. ),
  136. TP_fast_assign(
  137. KVM_MMU_PAGE_ASSIGN(sp)
  138. ),
  139. TP_printk("%s", KVM_MMU_PAGE_PRINTK())
  140. );
  141. DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
  142. TP_PROTO(struct kvm_mmu_page *sp),
  143. TP_ARGS(sp)
  144. );
  145. DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
  146. TP_PROTO(struct kvm_mmu_page *sp),
  147. TP_ARGS(sp)
  148. );
  149. DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
  150. TP_PROTO(struct kvm_mmu_page *sp),
  151. TP_ARGS(sp)
  152. );
  153. TRACE_EVENT(
  154. mark_mmio_spte,
  155. TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
  156. TP_ARGS(sptep, gfn, access, gen),
  157. TP_STRUCT__entry(
  158. __field(void *, sptep)
  159. __field(gfn_t, gfn)
  160. __field(unsigned, access)
  161. __field(unsigned int, gen)
  162. ),
  163. TP_fast_assign(
  164. __entry->sptep = sptep;
  165. __entry->gfn = gfn;
  166. __entry->access = access;
  167. __entry->gen = gen;
  168. ),
  169. TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
  170. __entry->gfn, __entry->access, __entry->gen)
  171. );
  172. TRACE_EVENT(
  173. handle_mmio_page_fault,
  174. TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
  175. TP_ARGS(addr, gfn, access),
  176. TP_STRUCT__entry(
  177. __field(u64, addr)
  178. __field(gfn_t, gfn)
  179. __field(unsigned, access)
  180. ),
  181. TP_fast_assign(
  182. __entry->addr = addr;
  183. __entry->gfn = gfn;
  184. __entry->access = access;
  185. ),
  186. TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
  187. __entry->access)
  188. );
  189. #define __spte_satisfied(__spte) \
  190. (__entry->retry && is_writable_pte(__entry->__spte))
  191. TRACE_EVENT(
  192. fast_page_fault,
  193. TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
  194. u64 *sptep, u64 old_spte, bool retry),
  195. TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
  196. TP_STRUCT__entry(
  197. __field(int, vcpu_id)
  198. __field(gva_t, gva)
  199. __field(u32, error_code)
  200. __field(u64 *, sptep)
  201. __field(u64, old_spte)
  202. __field(u64, new_spte)
  203. __field(bool, retry)
  204. ),
  205. TP_fast_assign(
  206. __entry->vcpu_id = vcpu->vcpu_id;
  207. __entry->gva = gva;
  208. __entry->error_code = error_code;
  209. __entry->sptep = sptep;
  210. __entry->old_spte = old_spte;
  211. __entry->new_spte = *sptep;
  212. __entry->retry = retry;
  213. ),
  214. TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
  215. " new %llx spurious %d fixed %d", __entry->vcpu_id,
  216. __entry->gva, __print_flags(__entry->error_code, "|",
  217. kvm_mmu_trace_pferr_flags), __entry->sptep,
  218. __entry->old_spte, __entry->new_spte,
  219. __spte_satisfied(old_spte), __spte_satisfied(new_spte)
  220. )
  221. );
  222. TRACE_EVENT(
  223. kvm_mmu_invalidate_zap_all_pages,
  224. TP_PROTO(struct kvm *kvm),
  225. TP_ARGS(kvm),
  226. TP_STRUCT__entry(
  227. __field(unsigned long, mmu_valid_gen)
  228. __field(unsigned int, mmu_used_pages)
  229. ),
  230. TP_fast_assign(
  231. __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
  232. __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
  233. ),
  234. TP_printk("kvm-mmu-valid-gen %lx used_pages %x",
  235. __entry->mmu_valid_gen, __entry->mmu_used_pages
  236. )
  237. );
  238. TRACE_EVENT(
  239. check_mmio_spte,
  240. TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen),
  241. TP_ARGS(spte, kvm_gen, spte_gen),
  242. TP_STRUCT__entry(
  243. __field(unsigned int, kvm_gen)
  244. __field(unsigned int, spte_gen)
  245. __field(u64, spte)
  246. ),
  247. TP_fast_assign(
  248. __entry->kvm_gen = kvm_gen;
  249. __entry->spte_gen = spte_gen;
  250. __entry->spte = spte;
  251. ),
  252. TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
  253. __entry->kvm_gen, __entry->spte_gen,
  254. __entry->kvm_gen == __entry->spte_gen
  255. )
  256. );
  257. #endif /* _TRACE_KVMMMU_H */
  258. #undef TRACE_INCLUDE_PATH
  259. #define TRACE_INCLUDE_PATH .
  260. #undef TRACE_INCLUDE_FILE
  261. #define TRACE_INCLUDE_FILE mmutrace
  262. /* This part must be outside protection */
  263. #include <trace/define_trace.h>