writeback.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. #undef TRACE_SYSTEM
  2. #define TRACE_SYSTEM writeback
  3. #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
  4. #define _TRACE_WRITEBACK_H
  5. #include <linux/tracepoint.h>
  6. #include <linux/backing-dev.h>
  7. #include <linux/writeback.h>
  8. #define show_inode_state(state) \
  9. __print_flags(state, "|", \
  10. {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
  11. {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
  12. {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
  13. {I_NEW, "I_NEW"}, \
  14. {I_WILL_FREE, "I_WILL_FREE"}, \
  15. {I_FREEING, "I_FREEING"}, \
  16. {I_CLEAR, "I_CLEAR"}, \
  17. {I_SYNC, "I_SYNC"}, \
  18. {I_DIRTY_TIME, "I_DIRTY_TIME"}, \
  19. {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \
  20. {I_REFERENCED, "I_REFERENCED"} \
  21. )
  22. /* enums need to be exported to user space */
  23. #undef EM
  24. #undef EMe
  25. #define EM(a,b) TRACE_DEFINE_ENUM(a);
  26. #define EMe(a,b) TRACE_DEFINE_ENUM(a);
  27. #define WB_WORK_REASON \
  28. EM( WB_REASON_BACKGROUND, "background") \
  29. EM( WB_REASON_TRY_TO_FREE_PAGES, "try_to_free_pages") \
  30. EM( WB_REASON_SYNC, "sync") \
  31. EM( WB_REASON_PERIODIC, "periodic") \
  32. EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
  33. EM( WB_REASON_FREE_MORE_MEM, "free_more_memory") \
  34. EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
  35. EMe(WB_REASON_FORKER_THREAD, "forker_thread")
  36. WB_WORK_REASON
  37. /*
  38. * Now redefine the EM() and EMe() macros to map the enums to the strings
  39. * that will be printed in the output.
  40. */
  41. #undef EM
  42. #undef EMe
  43. #define EM(a,b) { a, b },
  44. #define EMe(a,b) { a, b }
  45. struct wb_writeback_work;
  46. TRACE_EVENT(writeback_dirty_page,
  47. TP_PROTO(struct page *page, struct address_space *mapping),
  48. TP_ARGS(page, mapping),
  49. TP_STRUCT__entry (
  50. __array(char, name, 32)
  51. __field(unsigned long, ino)
  52. __field(pgoff_t, index)
  53. ),
  54. TP_fast_assign(
  55. strncpy(__entry->name,
  56. mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
  57. __entry->ino = mapping ? mapping->host->i_ino : 0;
  58. __entry->index = page->index;
  59. ),
  60. TP_printk("bdi %s: ino=%lu index=%lu",
  61. __entry->name,
  62. __entry->ino,
  63. __entry->index
  64. )
  65. );
  66. DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
  67. TP_PROTO(struct inode *inode, int flags),
  68. TP_ARGS(inode, flags),
  69. TP_STRUCT__entry (
  70. __array(char, name, 32)
  71. __field(unsigned long, ino)
  72. __field(unsigned long, state)
  73. __field(unsigned long, flags)
  74. ),
  75. TP_fast_assign(
  76. struct backing_dev_info *bdi = inode_to_bdi(inode);
  77. /* may be called for files on pseudo FSes w/ unregistered bdi */
  78. strncpy(__entry->name,
  79. bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
  80. __entry->ino = inode->i_ino;
  81. __entry->state = inode->i_state;
  82. __entry->flags = flags;
  83. ),
  84. TP_printk("bdi %s: ino=%lu state=%s flags=%s",
  85. __entry->name,
  86. __entry->ino,
  87. show_inode_state(__entry->state),
  88. show_inode_state(__entry->flags)
  89. )
  90. );
  91. DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
  92. TP_PROTO(struct inode *inode, int flags),
  93. TP_ARGS(inode, flags)
  94. );
  95. DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
  96. TP_PROTO(struct inode *inode, int flags),
  97. TP_ARGS(inode, flags)
  98. );
  99. DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
  100. TP_PROTO(struct inode *inode, int flags),
  101. TP_ARGS(inode, flags)
  102. );
  103. #ifdef CREATE_TRACE_POINTS
  104. #ifdef CONFIG_CGROUP_WRITEBACK
  105. static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
  106. {
  107. return kernfs_path_len(wb->memcg_css->cgroup->kn) + 1;
  108. }
  109. static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
  110. {
  111. struct cgroup *cgrp = wb->memcg_css->cgroup;
  112. char *path;
  113. path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1);
  114. WARN_ON_ONCE(path != buf);
  115. }
  116. static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
  117. {
  118. if (wbc->wb)
  119. return __trace_wb_cgroup_size(wbc->wb);
  120. else
  121. return 2;
  122. }
  123. static inline void __trace_wbc_assign_cgroup(char *buf,
  124. struct writeback_control *wbc)
  125. {
  126. if (wbc->wb)
  127. __trace_wb_assign_cgroup(buf, wbc->wb);
  128. else
  129. strcpy(buf, "/");
  130. }
  131. #else /* CONFIG_CGROUP_WRITEBACK */
  132. static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
  133. {
  134. return 2;
  135. }
  136. static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
  137. {
  138. strcpy(buf, "/");
  139. }
  140. static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
  141. {
  142. return 2;
  143. }
  144. static inline void __trace_wbc_assign_cgroup(char *buf,
  145. struct writeback_control *wbc)
  146. {
  147. strcpy(buf, "/");
  148. }
  149. #endif /* CONFIG_CGROUP_WRITEBACK */
  150. #endif /* CREATE_TRACE_POINTS */
  151. DECLARE_EVENT_CLASS(writeback_write_inode_template,
  152. TP_PROTO(struct inode *inode, struct writeback_control *wbc),
  153. TP_ARGS(inode, wbc),
  154. TP_STRUCT__entry (
  155. __array(char, name, 32)
  156. __field(unsigned long, ino)
  157. __field(int, sync_mode)
  158. __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
  159. ),
  160. TP_fast_assign(
  161. strncpy(__entry->name,
  162. dev_name(inode_to_bdi(inode)->dev), 32);
  163. __entry->ino = inode->i_ino;
  164. __entry->sync_mode = wbc->sync_mode;
  165. __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
  166. ),
  167. TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup=%s",
  168. __entry->name,
  169. __entry->ino,
  170. __entry->sync_mode,
  171. __get_str(cgroup)
  172. )
  173. );
  174. DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
  175. TP_PROTO(struct inode *inode, struct writeback_control *wbc),
  176. TP_ARGS(inode, wbc)
  177. );
  178. DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
  179. TP_PROTO(struct inode *inode, struct writeback_control *wbc),
  180. TP_ARGS(inode, wbc)
  181. );
  182. DECLARE_EVENT_CLASS(writeback_work_class,
  183. TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work),
  184. TP_ARGS(wb, work),
  185. TP_STRUCT__entry(
  186. __array(char, name, 32)
  187. __field(long, nr_pages)
  188. __field(dev_t, sb_dev)
  189. __field(int, sync_mode)
  190. __field(int, for_kupdate)
  191. __field(int, range_cyclic)
  192. __field(int, for_background)
  193. __field(int, reason)
  194. __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
  195. ),
  196. TP_fast_assign(
  197. strncpy(__entry->name,
  198. wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
  199. __entry->nr_pages = work->nr_pages;
  200. __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
  201. __entry->sync_mode = work->sync_mode;
  202. __entry->for_kupdate = work->for_kupdate;
  203. __entry->range_cyclic = work->range_cyclic;
  204. __entry->for_background = work->for_background;
  205. __entry->reason = work->reason;
  206. __trace_wb_assign_cgroup(__get_str(cgroup), wb);
  207. ),
  208. TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
  209. "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup=%s",
  210. __entry->name,
  211. MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
  212. __entry->nr_pages,
  213. __entry->sync_mode,
  214. __entry->for_kupdate,
  215. __entry->range_cyclic,
  216. __entry->for_background,
  217. __print_symbolic(__entry->reason, WB_WORK_REASON),
  218. __get_str(cgroup)
  219. )
  220. );
  221. #define DEFINE_WRITEBACK_WORK_EVENT(name) \
  222. DEFINE_EVENT(writeback_work_class, name, \
  223. TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \
  224. TP_ARGS(wb, work))
  225. DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
  226. DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
  227. DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
  228. DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
  229. DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
  230. TRACE_EVENT(writeback_pages_written,
  231. TP_PROTO(long pages_written),
  232. TP_ARGS(pages_written),
  233. TP_STRUCT__entry(
  234. __field(long, pages)
  235. ),
  236. TP_fast_assign(
  237. __entry->pages = pages_written;
  238. ),
  239. TP_printk("%ld", __entry->pages)
  240. );
  241. DECLARE_EVENT_CLASS(writeback_class,
  242. TP_PROTO(struct bdi_writeback *wb),
  243. TP_ARGS(wb),
  244. TP_STRUCT__entry(
  245. __array(char, name, 32)
  246. __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
  247. ),
  248. TP_fast_assign(
  249. strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
  250. __trace_wb_assign_cgroup(__get_str(cgroup), wb);
  251. ),
  252. TP_printk("bdi %s: cgroup=%s",
  253. __entry->name,
  254. __get_str(cgroup)
  255. )
  256. );
  257. #define DEFINE_WRITEBACK_EVENT(name) \
  258. DEFINE_EVENT(writeback_class, name, \
  259. TP_PROTO(struct bdi_writeback *wb), \
  260. TP_ARGS(wb))
  261. DEFINE_WRITEBACK_EVENT(writeback_nowork);
  262. DEFINE_WRITEBACK_EVENT(writeback_wake_background);
  263. TRACE_EVENT(writeback_bdi_register,
  264. TP_PROTO(struct backing_dev_info *bdi),
  265. TP_ARGS(bdi),
  266. TP_STRUCT__entry(
  267. __array(char, name, 32)
  268. ),
  269. TP_fast_assign(
  270. strncpy(__entry->name, dev_name(bdi->dev), 32);
  271. ),
  272. TP_printk("bdi %s",
  273. __entry->name
  274. )
  275. );
  276. DECLARE_EVENT_CLASS(wbc_class,
  277. TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
  278. TP_ARGS(wbc, bdi),
  279. TP_STRUCT__entry(
  280. __array(char, name, 32)
  281. __field(long, nr_to_write)
  282. __field(long, pages_skipped)
  283. __field(int, sync_mode)
  284. __field(int, for_kupdate)
  285. __field(int, for_background)
  286. __field(int, for_reclaim)
  287. __field(int, range_cyclic)
  288. __field(long, range_start)
  289. __field(long, range_end)
  290. __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
  291. ),
  292. TP_fast_assign(
  293. strncpy(__entry->name, dev_name(bdi->dev), 32);
  294. __entry->nr_to_write = wbc->nr_to_write;
  295. __entry->pages_skipped = wbc->pages_skipped;
  296. __entry->sync_mode = wbc->sync_mode;
  297. __entry->for_kupdate = wbc->for_kupdate;
  298. __entry->for_background = wbc->for_background;
  299. __entry->for_reclaim = wbc->for_reclaim;
  300. __entry->range_cyclic = wbc->range_cyclic;
  301. __entry->range_start = (long)wbc->range_start;
  302. __entry->range_end = (long)wbc->range_end;
  303. __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
  304. ),
  305. TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
  306. "bgrd=%d reclm=%d cyclic=%d "
  307. "start=0x%lx end=0x%lx cgroup=%s",
  308. __entry->name,
  309. __entry->nr_to_write,
  310. __entry->pages_skipped,
  311. __entry->sync_mode,
  312. __entry->for_kupdate,
  313. __entry->for_background,
  314. __entry->for_reclaim,
  315. __entry->range_cyclic,
  316. __entry->range_start,
  317. __entry->range_end,
  318. __get_str(cgroup)
  319. )
  320. )
  321. #define DEFINE_WBC_EVENT(name) \
  322. DEFINE_EVENT(wbc_class, name, \
  323. TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
  324. TP_ARGS(wbc, bdi))
  325. DEFINE_WBC_EVENT(wbc_writepage);
  326. TRACE_EVENT(writeback_queue_io,
  327. TP_PROTO(struct bdi_writeback *wb,
  328. struct wb_writeback_work *work,
  329. int moved),
  330. TP_ARGS(wb, work, moved),
  331. TP_STRUCT__entry(
  332. __array(char, name, 32)
  333. __field(unsigned long, older)
  334. __field(long, age)
  335. __field(int, moved)
  336. __field(int, reason)
  337. __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
  338. ),
  339. TP_fast_assign(
  340. unsigned long *older_than_this = work->older_than_this;
  341. strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
  342. __entry->older = older_than_this ? *older_than_this : 0;
  343. __entry->age = older_than_this ?
  344. (jiffies - *older_than_this) * 1000 / HZ : -1;
  345. __entry->moved = moved;
  346. __entry->reason = work->reason;
  347. __trace_wb_assign_cgroup(__get_str(cgroup), wb);
  348. ),
  349. TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup=%s",
  350. __entry->name,
  351. __entry->older, /* older_than_this in jiffies */
  352. __entry->age, /* older_than_this in relative milliseconds */
  353. __entry->moved,
  354. __print_symbolic(__entry->reason, WB_WORK_REASON),
  355. __get_str(cgroup)
  356. )
  357. );
  358. TRACE_EVENT(global_dirty_state,
  359. TP_PROTO(unsigned long background_thresh,
  360. unsigned long dirty_thresh
  361. ),
  362. TP_ARGS(background_thresh,
  363. dirty_thresh
  364. ),
  365. TP_STRUCT__entry(
  366. __field(unsigned long, nr_dirty)
  367. __field(unsigned long, nr_writeback)
  368. __field(unsigned long, nr_unstable)
  369. __field(unsigned long, background_thresh)
  370. __field(unsigned long, dirty_thresh)
  371. __field(unsigned long, dirty_limit)
  372. __field(unsigned long, nr_dirtied)
  373. __field(unsigned long, nr_written)
  374. ),
  375. TP_fast_assign(
  376. __entry->nr_dirty = global_page_state(NR_FILE_DIRTY);
  377. __entry->nr_writeback = global_page_state(NR_WRITEBACK);
  378. __entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS);
  379. __entry->nr_dirtied = global_page_state(NR_DIRTIED);
  380. __entry->nr_written = global_page_state(NR_WRITTEN);
  381. __entry->background_thresh = background_thresh;
  382. __entry->dirty_thresh = dirty_thresh;
  383. __entry->dirty_limit = global_wb_domain.dirty_limit;
  384. ),
  385. TP_printk("dirty=%lu writeback=%lu unstable=%lu "
  386. "bg_thresh=%lu thresh=%lu limit=%lu "
  387. "dirtied=%lu written=%lu",
  388. __entry->nr_dirty,
  389. __entry->nr_writeback,
  390. __entry->nr_unstable,
  391. __entry->background_thresh,
  392. __entry->dirty_thresh,
  393. __entry->dirty_limit,
  394. __entry->nr_dirtied,
  395. __entry->nr_written
  396. )
  397. );
  398. #define KBps(x) ((x) << (PAGE_SHIFT - 10))
  399. TRACE_EVENT(bdi_dirty_ratelimit,
  400. TP_PROTO(struct bdi_writeback *wb,
  401. unsigned long dirty_rate,
  402. unsigned long task_ratelimit),
  403. TP_ARGS(wb, dirty_rate, task_ratelimit),
  404. TP_STRUCT__entry(
  405. __array(char, bdi, 32)
  406. __field(unsigned long, write_bw)
  407. __field(unsigned long, avg_write_bw)
  408. __field(unsigned long, dirty_rate)
  409. __field(unsigned long, dirty_ratelimit)
  410. __field(unsigned long, task_ratelimit)
  411. __field(unsigned long, balanced_dirty_ratelimit)
  412. __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
  413. ),
  414. TP_fast_assign(
  415. strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
  416. __entry->write_bw = KBps(wb->write_bandwidth);
  417. __entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
  418. __entry->dirty_rate = KBps(dirty_rate);
  419. __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit);
  420. __entry->task_ratelimit = KBps(task_ratelimit);
  421. __entry->balanced_dirty_ratelimit =
  422. KBps(wb->balanced_dirty_ratelimit);
  423. __trace_wb_assign_cgroup(__get_str(cgroup), wb);
  424. ),
  425. TP_printk("bdi %s: "
  426. "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
  427. "dirty_ratelimit=%lu task_ratelimit=%lu "
  428. "balanced_dirty_ratelimit=%lu cgroup=%s",
  429. __entry->bdi,
  430. __entry->write_bw, /* write bandwidth */
  431. __entry->avg_write_bw, /* avg write bandwidth */
  432. __entry->dirty_rate, /* bdi dirty rate */
  433. __entry->dirty_ratelimit, /* base ratelimit */
  434. __entry->task_ratelimit, /* ratelimit with position control */
  435. __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
  436. __get_str(cgroup)
  437. )
  438. );
  439. TRACE_EVENT(balance_dirty_pages,
  440. TP_PROTO(struct bdi_writeback *wb,
  441. unsigned long thresh,
  442. unsigned long bg_thresh,
  443. unsigned long dirty,
  444. unsigned long bdi_thresh,
  445. unsigned long bdi_dirty,
  446. unsigned long dirty_ratelimit,
  447. unsigned long task_ratelimit,
  448. unsigned long dirtied,
  449. unsigned long period,
  450. long pause,
  451. unsigned long start_time),
  452. TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
  453. dirty_ratelimit, task_ratelimit,
  454. dirtied, period, pause, start_time),
  455. TP_STRUCT__entry(
  456. __array( char, bdi, 32)
  457. __field(unsigned long, limit)
  458. __field(unsigned long, setpoint)
  459. __field(unsigned long, dirty)
  460. __field(unsigned long, bdi_setpoint)
  461. __field(unsigned long, bdi_dirty)
  462. __field(unsigned long, dirty_ratelimit)
  463. __field(unsigned long, task_ratelimit)
  464. __field(unsigned int, dirtied)
  465. __field(unsigned int, dirtied_pause)
  466. __field(unsigned long, paused)
  467. __field( long, pause)
  468. __field(unsigned long, period)
  469. __field( long, think)
  470. __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
  471. ),
  472. TP_fast_assign(
  473. unsigned long freerun = (thresh + bg_thresh) / 2;
  474. strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
  475. __entry->limit = global_wb_domain.dirty_limit;
  476. __entry->setpoint = (global_wb_domain.dirty_limit +
  477. freerun) / 2;
  478. __entry->dirty = dirty;
  479. __entry->bdi_setpoint = __entry->setpoint *
  480. bdi_thresh / (thresh + 1);
  481. __entry->bdi_dirty = bdi_dirty;
  482. __entry->dirty_ratelimit = KBps(dirty_ratelimit);
  483. __entry->task_ratelimit = KBps(task_ratelimit);
  484. __entry->dirtied = dirtied;
  485. __entry->dirtied_pause = current->nr_dirtied_pause;
  486. __entry->think = current->dirty_paused_when == 0 ? 0 :
  487. (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
  488. __entry->period = period * 1000 / HZ;
  489. __entry->pause = pause * 1000 / HZ;
  490. __entry->paused = (jiffies - start_time) * 1000 / HZ;
  491. __trace_wb_assign_cgroup(__get_str(cgroup), wb);
  492. ),
  493. TP_printk("bdi %s: "
  494. "limit=%lu setpoint=%lu dirty=%lu "
  495. "bdi_setpoint=%lu bdi_dirty=%lu "
  496. "dirty_ratelimit=%lu task_ratelimit=%lu "
  497. "dirtied=%u dirtied_pause=%u "
  498. "paused=%lu pause=%ld period=%lu think=%ld cgroup=%s",
  499. __entry->bdi,
  500. __entry->limit,
  501. __entry->setpoint,
  502. __entry->dirty,
  503. __entry->bdi_setpoint,
  504. __entry->bdi_dirty,
  505. __entry->dirty_ratelimit,
  506. __entry->task_ratelimit,
  507. __entry->dirtied,
  508. __entry->dirtied_pause,
  509. __entry->paused, /* ms */
  510. __entry->pause, /* ms */
  511. __entry->period, /* ms */
  512. __entry->think, /* ms */
  513. __get_str(cgroup)
  514. )
  515. );
  516. TRACE_EVENT(writeback_sb_inodes_requeue,
  517. TP_PROTO(struct inode *inode),
  518. TP_ARGS(inode),
  519. TP_STRUCT__entry(
  520. __array(char, name, 32)
  521. __field(unsigned long, ino)
  522. __field(unsigned long, state)
  523. __field(unsigned long, dirtied_when)
  524. __dynamic_array(char, cgroup,
  525. __trace_wb_cgroup_size(inode_to_wb(inode)))
  526. ),
  527. TP_fast_assign(
  528. strncpy(__entry->name,
  529. dev_name(inode_to_bdi(inode)->dev), 32);
  530. __entry->ino = inode->i_ino;
  531. __entry->state = inode->i_state;
  532. __entry->dirtied_when = inode->dirtied_when;
  533. __trace_wb_assign_cgroup(__get_str(cgroup), inode_to_wb(inode));
  534. ),
  535. TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup=%s",
  536. __entry->name,
  537. __entry->ino,
  538. show_inode_state(__entry->state),
  539. __entry->dirtied_when,
  540. (jiffies - __entry->dirtied_when) / HZ,
  541. __get_str(cgroup)
  542. )
  543. );
  544. DECLARE_EVENT_CLASS(writeback_congest_waited_template,
  545. TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
  546. TP_ARGS(usec_timeout, usec_delayed),
  547. TP_STRUCT__entry(
  548. __field( unsigned int, usec_timeout )
  549. __field( unsigned int, usec_delayed )
  550. ),
  551. TP_fast_assign(
  552. __entry->usec_timeout = usec_timeout;
  553. __entry->usec_delayed = usec_delayed;
  554. ),
  555. TP_printk("usec_timeout=%u usec_delayed=%u",
  556. __entry->usec_timeout,
  557. __entry->usec_delayed)
  558. );
  559. DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
  560. TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
  561. TP_ARGS(usec_timeout, usec_delayed)
  562. );
  563. DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
  564. TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
  565. TP_ARGS(usec_timeout, usec_delayed)
  566. );
  567. DECLARE_EVENT_CLASS(writeback_single_inode_template,
  568. TP_PROTO(struct inode *inode,
  569. struct writeback_control *wbc,
  570. unsigned long nr_to_write
  571. ),
  572. TP_ARGS(inode, wbc, nr_to_write),
  573. TP_STRUCT__entry(
  574. __array(char, name, 32)
  575. __field(unsigned long, ino)
  576. __field(unsigned long, state)
  577. __field(unsigned long, dirtied_when)
  578. __field(unsigned long, writeback_index)
  579. __field(long, nr_to_write)
  580. __field(unsigned long, wrote)
  581. __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
  582. ),
  583. TP_fast_assign(
  584. strncpy(__entry->name,
  585. dev_name(inode_to_bdi(inode)->dev), 32);
  586. __entry->ino = inode->i_ino;
  587. __entry->state = inode->i_state;
  588. __entry->dirtied_when = inode->dirtied_when;
  589. __entry->writeback_index = inode->i_mapping->writeback_index;
  590. __entry->nr_to_write = nr_to_write;
  591. __entry->wrote = nr_to_write - wbc->nr_to_write;
  592. __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
  593. ),
  594. TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
  595. "index=%lu to_write=%ld wrote=%lu cgroup=%s",
  596. __entry->name,
  597. __entry->ino,
  598. show_inode_state(__entry->state),
  599. __entry->dirtied_when,
  600. (jiffies - __entry->dirtied_when) / HZ,
  601. __entry->writeback_index,
  602. __entry->nr_to_write,
  603. __entry->wrote,
  604. __get_str(cgroup)
  605. )
  606. );
  607. DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
  608. TP_PROTO(struct inode *inode,
  609. struct writeback_control *wbc,
  610. unsigned long nr_to_write),
  611. TP_ARGS(inode, wbc, nr_to_write)
  612. );
  613. DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
  614. TP_PROTO(struct inode *inode,
  615. struct writeback_control *wbc,
  616. unsigned long nr_to_write),
  617. TP_ARGS(inode, wbc, nr_to_write)
  618. );
  619. DECLARE_EVENT_CLASS(writeback_lazytime_template,
  620. TP_PROTO(struct inode *inode),
  621. TP_ARGS(inode),
  622. TP_STRUCT__entry(
  623. __field( dev_t, dev )
  624. __field(unsigned long, ino )
  625. __field(unsigned long, state )
  626. __field( __u16, mode )
  627. __field(unsigned long, dirtied_when )
  628. ),
  629. TP_fast_assign(
  630. __entry->dev = inode->i_sb->s_dev;
  631. __entry->ino = inode->i_ino;
  632. __entry->state = inode->i_state;
  633. __entry->mode = inode->i_mode;
  634. __entry->dirtied_when = inode->dirtied_when;
  635. ),
  636. TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
  637. MAJOR(__entry->dev), MINOR(__entry->dev),
  638. __entry->ino, __entry->dirtied_when,
  639. show_inode_state(__entry->state), __entry->mode)
  640. );
  641. DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime,
  642. TP_PROTO(struct inode *inode),
  643. TP_ARGS(inode)
  644. );
  645. DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput,
  646. TP_PROTO(struct inode *inode),
  647. TP_ARGS(inode)
  648. );
  649. DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue,
  650. TP_PROTO(struct inode *inode),
  651. TP_ARGS(inode)
  652. );
  653. #endif /* _TRACE_WRITEBACK_H */
  654. /* This part must be outside protection */
  655. #include <trace/define_trace.h>