bcache.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. #undef TRACE_SYSTEM
  2. #define TRACE_SYSTEM bcache
  3. #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ)
  4. #define _TRACE_BCACHE_H
  5. #include <linux/tracepoint.h>
  6. DECLARE_EVENT_CLASS(bcache_request,
  7. TP_PROTO(struct bcache_device *d, struct bio *bio),
  8. TP_ARGS(d, bio),
  9. TP_STRUCT__entry(
  10. __field(dev_t, dev )
  11. __field(unsigned int, orig_major )
  12. __field(unsigned int, orig_minor )
  13. __field(sector_t, sector )
  14. __field(dev_t, orig_sector )
  15. __field(unsigned int, nr_sector )
  16. __array(char, rwbs, 6 )
  17. ),
  18. TP_fast_assign(
  19. __entry->dev = bio->bi_bdev->bd_dev;
  20. __entry->orig_major = d->disk->major;
  21. __entry->orig_minor = d->disk->first_minor;
  22. __entry->sector = bio->bi_iter.bi_sector;
  23. __entry->orig_sector = bio->bi_iter.bi_sector - 16;
  24. __entry->nr_sector = bio->bi_iter.bi_size >> 9;
  25. blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
  26. ),
  27. TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
  28. MAJOR(__entry->dev), MINOR(__entry->dev),
  29. __entry->rwbs, (unsigned long long)__entry->sector,
  30. __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
  31. (unsigned long long)__entry->orig_sector)
  32. );
  33. DECLARE_EVENT_CLASS(bkey,
  34. TP_PROTO(struct bkey *k),
  35. TP_ARGS(k),
  36. TP_STRUCT__entry(
  37. __field(u32, size )
  38. __field(u32, inode )
  39. __field(u64, offset )
  40. __field(bool, dirty )
  41. ),
  42. TP_fast_assign(
  43. __entry->inode = KEY_INODE(k);
  44. __entry->offset = KEY_OFFSET(k);
  45. __entry->size = KEY_SIZE(k);
  46. __entry->dirty = KEY_DIRTY(k);
  47. ),
  48. TP_printk("%u:%llu len %u dirty %u", __entry->inode,
  49. __entry->offset, __entry->size, __entry->dirty)
  50. );
  51. DECLARE_EVENT_CLASS(btree_node,
  52. TP_PROTO(struct btree *b),
  53. TP_ARGS(b),
  54. TP_STRUCT__entry(
  55. __field(size_t, bucket )
  56. ),
  57. TP_fast_assign(
  58. __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
  59. ),
  60. TP_printk("bucket %zu", __entry->bucket)
  61. );
  62. /* request.c */
  63. DEFINE_EVENT(bcache_request, bcache_request_start,
  64. TP_PROTO(struct bcache_device *d, struct bio *bio),
  65. TP_ARGS(d, bio)
  66. );
  67. DEFINE_EVENT(bcache_request, bcache_request_end,
  68. TP_PROTO(struct bcache_device *d, struct bio *bio),
  69. TP_ARGS(d, bio)
  70. );
  71. DECLARE_EVENT_CLASS(bcache_bio,
  72. TP_PROTO(struct bio *bio),
  73. TP_ARGS(bio),
  74. TP_STRUCT__entry(
  75. __field(dev_t, dev )
  76. __field(sector_t, sector )
  77. __field(unsigned int, nr_sector )
  78. __array(char, rwbs, 6 )
  79. ),
  80. TP_fast_assign(
  81. __entry->dev = bio->bi_bdev->bd_dev;
  82. __entry->sector = bio->bi_iter.bi_sector;
  83. __entry->nr_sector = bio->bi_iter.bi_size >> 9;
  84. blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
  85. ),
  86. TP_printk("%d,%d %s %llu + %u",
  87. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
  88. (unsigned long long)__entry->sector, __entry->nr_sector)
  89. );
  90. DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
  91. TP_PROTO(struct bio *bio),
  92. TP_ARGS(bio)
  93. );
  94. DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
  95. TP_PROTO(struct bio *bio),
  96. TP_ARGS(bio)
  97. );
  98. TRACE_EVENT(bcache_read,
  99. TP_PROTO(struct bio *bio, bool hit, bool bypass),
  100. TP_ARGS(bio, hit, bypass),
  101. TP_STRUCT__entry(
  102. __field(dev_t, dev )
  103. __field(sector_t, sector )
  104. __field(unsigned int, nr_sector )
  105. __array(char, rwbs, 6 )
  106. __field(bool, cache_hit )
  107. __field(bool, bypass )
  108. ),
  109. TP_fast_assign(
  110. __entry->dev = bio->bi_bdev->bd_dev;
  111. __entry->sector = bio->bi_iter.bi_sector;
  112. __entry->nr_sector = bio->bi_iter.bi_size >> 9;
  113. blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
  114. __entry->cache_hit = hit;
  115. __entry->bypass = bypass;
  116. ),
  117. TP_printk("%d,%d %s %llu + %u hit %u bypass %u",
  118. MAJOR(__entry->dev), MINOR(__entry->dev),
  119. __entry->rwbs, (unsigned long long)__entry->sector,
  120. __entry->nr_sector, __entry->cache_hit, __entry->bypass)
  121. );
  122. TRACE_EVENT(bcache_write,
  123. TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
  124. bool writeback, bool bypass),
  125. TP_ARGS(c, inode, bio, writeback, bypass),
  126. TP_STRUCT__entry(
  127. __array(char, uuid, 16 )
  128. __field(u64, inode )
  129. __field(sector_t, sector )
  130. __field(unsigned int, nr_sector )
  131. __array(char, rwbs, 6 )
  132. __field(bool, writeback )
  133. __field(bool, bypass )
  134. ),
  135. TP_fast_assign(
  136. memcpy(__entry->uuid, c->sb.set_uuid, 16);
  137. __entry->inode = inode;
  138. __entry->sector = bio->bi_iter.bi_sector;
  139. __entry->nr_sector = bio->bi_iter.bi_size >> 9;
  140. blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
  141. __entry->writeback = writeback;
  142. __entry->bypass = bypass;
  143. ),
  144. TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u",
  145. __entry->uuid, __entry->inode,
  146. __entry->rwbs, (unsigned long long)__entry->sector,
  147. __entry->nr_sector, __entry->writeback, __entry->bypass)
  148. );
  149. DEFINE_EVENT(bcache_bio, bcache_read_retry,
  150. TP_PROTO(struct bio *bio),
  151. TP_ARGS(bio)
  152. );
  153. DEFINE_EVENT(bkey, bcache_cache_insert,
  154. TP_PROTO(struct bkey *k),
  155. TP_ARGS(k)
  156. );
  157. /* Journal */
  158. DECLARE_EVENT_CLASS(cache_set,
  159. TP_PROTO(struct cache_set *c),
  160. TP_ARGS(c),
  161. TP_STRUCT__entry(
  162. __array(char, uuid, 16 )
  163. ),
  164. TP_fast_assign(
  165. memcpy(__entry->uuid, c->sb.set_uuid, 16);
  166. ),
  167. TP_printk("%pU", __entry->uuid)
  168. );
  169. DEFINE_EVENT(bkey, bcache_journal_replay_key,
  170. TP_PROTO(struct bkey *k),
  171. TP_ARGS(k)
  172. );
  173. DEFINE_EVENT(cache_set, bcache_journal_full,
  174. TP_PROTO(struct cache_set *c),
  175. TP_ARGS(c)
  176. );
  177. DEFINE_EVENT(cache_set, bcache_journal_entry_full,
  178. TP_PROTO(struct cache_set *c),
  179. TP_ARGS(c)
  180. );
  181. DEFINE_EVENT(bcache_bio, bcache_journal_write,
  182. TP_PROTO(struct bio *bio),
  183. TP_ARGS(bio)
  184. );
  185. /* Btree */
  186. DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
  187. TP_PROTO(struct cache_set *c),
  188. TP_ARGS(c)
  189. );
  190. DEFINE_EVENT(btree_node, bcache_btree_read,
  191. TP_PROTO(struct btree *b),
  192. TP_ARGS(b)
  193. );
  194. TRACE_EVENT(bcache_btree_write,
  195. TP_PROTO(struct btree *b),
  196. TP_ARGS(b),
  197. TP_STRUCT__entry(
  198. __field(size_t, bucket )
  199. __field(unsigned, block )
  200. __field(unsigned, keys )
  201. ),
  202. TP_fast_assign(
  203. __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
  204. __entry->block = b->written;
  205. __entry->keys = b->keys.set[b->keys.nsets].data->keys;
  206. ),
  207. TP_printk("bucket %zu", __entry->bucket)
  208. );
  209. DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
  210. TP_PROTO(struct btree *b),
  211. TP_ARGS(b)
  212. );
  213. DEFINE_EVENT(cache_set, bcache_btree_node_alloc_fail,
  214. TP_PROTO(struct cache_set *c),
  215. TP_ARGS(c)
  216. );
  217. DEFINE_EVENT(btree_node, bcache_btree_node_free,
  218. TP_PROTO(struct btree *b),
  219. TP_ARGS(b)
  220. );
  221. TRACE_EVENT(bcache_btree_gc_coalesce,
  222. TP_PROTO(unsigned nodes),
  223. TP_ARGS(nodes),
  224. TP_STRUCT__entry(
  225. __field(unsigned, nodes )
  226. ),
  227. TP_fast_assign(
  228. __entry->nodes = nodes;
  229. ),
  230. TP_printk("coalesced %u nodes", __entry->nodes)
  231. );
  232. DEFINE_EVENT(cache_set, bcache_gc_start,
  233. TP_PROTO(struct cache_set *c),
  234. TP_ARGS(c)
  235. );
  236. DEFINE_EVENT(cache_set, bcache_gc_end,
  237. TP_PROTO(struct cache_set *c),
  238. TP_ARGS(c)
  239. );
  240. DEFINE_EVENT(bkey, bcache_gc_copy,
  241. TP_PROTO(struct bkey *k),
  242. TP_ARGS(k)
  243. );
  244. DEFINE_EVENT(bkey, bcache_gc_copy_collision,
  245. TP_PROTO(struct bkey *k),
  246. TP_ARGS(k)
  247. );
  248. TRACE_EVENT(bcache_btree_insert_key,
  249. TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
  250. TP_ARGS(b, k, op, status),
  251. TP_STRUCT__entry(
  252. __field(u64, btree_node )
  253. __field(u32, btree_level )
  254. __field(u32, inode )
  255. __field(u64, offset )
  256. __field(u32, size )
  257. __field(u8, dirty )
  258. __field(u8, op )
  259. __field(u8, status )
  260. ),
  261. TP_fast_assign(
  262. __entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
  263. __entry->btree_level = b->level;
  264. __entry->inode = KEY_INODE(k);
  265. __entry->offset = KEY_OFFSET(k);
  266. __entry->size = KEY_SIZE(k);
  267. __entry->dirty = KEY_DIRTY(k);
  268. __entry->op = op;
  269. __entry->status = status;
  270. ),
  271. TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
  272. __entry->status, __entry->op,
  273. __entry->btree_node, __entry->btree_level,
  274. __entry->inode, __entry->offset,
  275. __entry->size, __entry->dirty)
  276. );
  277. DECLARE_EVENT_CLASS(btree_split,
  278. TP_PROTO(struct btree *b, unsigned keys),
  279. TP_ARGS(b, keys),
  280. TP_STRUCT__entry(
  281. __field(size_t, bucket )
  282. __field(unsigned, keys )
  283. ),
  284. TP_fast_assign(
  285. __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
  286. __entry->keys = keys;
  287. ),
  288. TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
  289. );
  290. DEFINE_EVENT(btree_split, bcache_btree_node_split,
  291. TP_PROTO(struct btree *b, unsigned keys),
  292. TP_ARGS(b, keys)
  293. );
  294. DEFINE_EVENT(btree_split, bcache_btree_node_compact,
  295. TP_PROTO(struct btree *b, unsigned keys),
  296. TP_ARGS(b, keys)
  297. );
  298. DEFINE_EVENT(btree_node, bcache_btree_set_root,
  299. TP_PROTO(struct btree *b),
  300. TP_ARGS(b)
  301. );
  302. TRACE_EVENT(bcache_keyscan,
  303. TP_PROTO(unsigned nr_found,
  304. unsigned start_inode, uint64_t start_offset,
  305. unsigned end_inode, uint64_t end_offset),
  306. TP_ARGS(nr_found,
  307. start_inode, start_offset,
  308. end_inode, end_offset),
  309. TP_STRUCT__entry(
  310. __field(__u32, nr_found )
  311. __field(__u32, start_inode )
  312. __field(__u64, start_offset )
  313. __field(__u32, end_inode )
  314. __field(__u64, end_offset )
  315. ),
  316. TP_fast_assign(
  317. __entry->nr_found = nr_found;
  318. __entry->start_inode = start_inode;
  319. __entry->start_offset = start_offset;
  320. __entry->end_inode = end_inode;
  321. __entry->end_offset = end_offset;
  322. ),
  323. TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found,
  324. __entry->start_inode, __entry->start_offset,
  325. __entry->end_inode, __entry->end_offset)
  326. );
  327. /* Allocator */
  328. TRACE_EVENT(bcache_invalidate,
  329. TP_PROTO(struct cache *ca, size_t bucket),
  330. TP_ARGS(ca, bucket),
  331. TP_STRUCT__entry(
  332. __field(unsigned, sectors )
  333. __field(dev_t, dev )
  334. __field(__u64, offset )
  335. ),
  336. TP_fast_assign(
  337. __entry->dev = ca->bdev->bd_dev;
  338. __entry->offset = bucket << ca->set->bucket_bits;
  339. __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]);
  340. ),
  341. TP_printk("invalidated %u sectors at %d,%d sector=%llu",
  342. __entry->sectors, MAJOR(__entry->dev),
  343. MINOR(__entry->dev), __entry->offset)
  344. );
  345. TRACE_EVENT(bcache_alloc,
  346. TP_PROTO(struct cache *ca, size_t bucket),
  347. TP_ARGS(ca, bucket),
  348. TP_STRUCT__entry(
  349. __field(dev_t, dev )
  350. __field(__u64, offset )
  351. ),
  352. TP_fast_assign(
  353. __entry->dev = ca->bdev->bd_dev;
  354. __entry->offset = bucket << ca->set->bucket_bits;
  355. ),
  356. TP_printk("allocated %d,%d sector=%llu", MAJOR(__entry->dev),
  357. MINOR(__entry->dev), __entry->offset)
  358. );
  359. TRACE_EVENT(bcache_alloc_fail,
  360. TP_PROTO(struct cache *ca, unsigned reserve),
  361. TP_ARGS(ca, reserve),
  362. TP_STRUCT__entry(
  363. __field(dev_t, dev )
  364. __field(unsigned, free )
  365. __field(unsigned, free_inc )
  366. __field(unsigned, blocked )
  367. ),
  368. TP_fast_assign(
  369. __entry->dev = ca->bdev->bd_dev;
  370. __entry->free = fifo_used(&ca->free[reserve]);
  371. __entry->free_inc = fifo_used(&ca->free_inc);
  372. __entry->blocked = atomic_read(&ca->set->prio_blocked);
  373. ),
  374. TP_printk("alloc fail %d,%d free %u free_inc %u blocked %u",
  375. MAJOR(__entry->dev), MINOR(__entry->dev), __entry->free,
  376. __entry->free_inc, __entry->blocked)
  377. );
  378. /* Background writeback */
  379. DEFINE_EVENT(bkey, bcache_writeback,
  380. TP_PROTO(struct bkey *k),
  381. TP_ARGS(k)
  382. );
  383. DEFINE_EVENT(bkey, bcache_writeback_collision,
  384. TP_PROTO(struct bkey *k),
  385. TP_ARGS(k)
  386. );
  387. #endif /* _TRACE_BCACHE_H */
  388. /* This part must be outside protection */
  389. #include <trace/define_trace.h>