blk-mq-sysfs.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. #include <linux/kernel.h>
  2. #include <linux/module.h>
  3. #include <linux/backing-dev.h>
  4. #include <linux/bio.h>
  5. #include <linux/blkdev.h>
  6. #include <linux/mm.h>
  7. #include <linux/init.h>
  8. #include <linux/slab.h>
  9. #include <linux/workqueue.h>
  10. #include <linux/smp.h>
  11. #include <linux/blk-mq.h>
  12. #include "blk-mq.h"
  13. #include "blk-mq-tag.h"
  14. static void blk_mq_sysfs_release(struct kobject *kobj)
  15. {
  16. }
  17. struct blk_mq_ctx_sysfs_entry {
  18. struct attribute attr;
  19. ssize_t (*show)(struct blk_mq_ctx *, char *);
  20. ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
  21. };
  22. struct blk_mq_hw_ctx_sysfs_entry {
  23. struct attribute attr;
  24. ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
  25. ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
  26. };
  27. static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
  28. char *page)
  29. {
  30. struct blk_mq_ctx_sysfs_entry *entry;
  31. struct blk_mq_ctx *ctx;
  32. struct request_queue *q;
  33. ssize_t res;
  34. entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  35. ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  36. q = ctx->queue;
  37. if (!entry->show)
  38. return -EIO;
  39. res = -ENOENT;
  40. mutex_lock(&q->sysfs_lock);
  41. if (!blk_queue_dying(q))
  42. res = entry->show(ctx, page);
  43. mutex_unlock(&q->sysfs_lock);
  44. return res;
  45. }
  46. static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
  47. const char *page, size_t length)
  48. {
  49. struct blk_mq_ctx_sysfs_entry *entry;
  50. struct blk_mq_ctx *ctx;
  51. struct request_queue *q;
  52. ssize_t res;
  53. entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  54. ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  55. q = ctx->queue;
  56. if (!entry->store)
  57. return -EIO;
  58. res = -ENOENT;
  59. mutex_lock(&q->sysfs_lock);
  60. if (!blk_queue_dying(q))
  61. res = entry->store(ctx, page, length);
  62. mutex_unlock(&q->sysfs_lock);
  63. return res;
  64. }
  65. static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
  66. struct attribute *attr, char *page)
  67. {
  68. struct blk_mq_hw_ctx_sysfs_entry *entry;
  69. struct blk_mq_hw_ctx *hctx;
  70. struct request_queue *q;
  71. ssize_t res;
  72. entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  73. hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  74. q = hctx->queue;
  75. if (!entry->show)
  76. return -EIO;
  77. res = -ENOENT;
  78. mutex_lock(&q->sysfs_lock);
  79. if (!blk_queue_dying(q))
  80. res = entry->show(hctx, page);
  81. mutex_unlock(&q->sysfs_lock);
  82. return res;
  83. }
  84. static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
  85. struct attribute *attr, const char *page,
  86. size_t length)
  87. {
  88. struct blk_mq_hw_ctx_sysfs_entry *entry;
  89. struct blk_mq_hw_ctx *hctx;
  90. struct request_queue *q;
  91. ssize_t res;
  92. entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  93. hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  94. q = hctx->queue;
  95. if (!entry->store)
  96. return -EIO;
  97. res = -ENOENT;
  98. mutex_lock(&q->sysfs_lock);
  99. if (!blk_queue_dying(q))
  100. res = entry->store(hctx, page, length);
  101. mutex_unlock(&q->sysfs_lock);
  102. return res;
  103. }
  104. static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
  105. {
  106. return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
  107. ctx->rq_dispatched[0]);
  108. }
  109. static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
  110. {
  111. return sprintf(page, "%lu\n", ctx->rq_merged);
  112. }
  113. static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
  114. {
  115. return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
  116. ctx->rq_completed[0]);
  117. }
  118. static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
  119. {
  120. struct request *rq;
  121. int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
  122. list_for_each_entry(rq, list, queuelist) {
  123. const int rq_len = 2 * sizeof(rq) + 2;
  124. /* if the output will be truncated */
  125. if (PAGE_SIZE - 1 < len + rq_len) {
  126. /* backspacing if it can't hold '\t...\n' */
  127. if (PAGE_SIZE - 1 < len + 5)
  128. len -= rq_len;
  129. len += snprintf(page + len, PAGE_SIZE - 1 - len,
  130. "\t...\n");
  131. break;
  132. }
  133. len += snprintf(page + len, PAGE_SIZE - 1 - len,
  134. "\t%p\n", rq);
  135. }
  136. return len;
  137. }
  138. static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
  139. {
  140. ssize_t ret;
  141. spin_lock(&ctx->lock);
  142. ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
  143. spin_unlock(&ctx->lock);
  144. return ret;
  145. }
  146. static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
  147. {
  148. return sprintf(page, "invoked=%lu, success=%lu\n", hctx->poll_invoked, hctx->poll_success);
  149. }
  150. static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
  151. char *page)
  152. {
  153. return sprintf(page, "%lu\n", hctx->queued);
  154. }
  155. static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
  156. {
  157. return sprintf(page, "%lu\n", hctx->run);
  158. }
  159. static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
  160. char *page)
  161. {
  162. char *start_page = page;
  163. int i;
  164. page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
  165. for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
  166. unsigned long d = 1U << (i - 1);
  167. page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
  168. }
  169. return page - start_page;
  170. }
  171. static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
  172. char *page)
  173. {
  174. ssize_t ret;
  175. spin_lock(&hctx->lock);
  176. ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
  177. spin_unlock(&hctx->lock);
  178. return ret;
  179. }
  180. static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
  181. {
  182. return blk_mq_tag_sysfs_show(hctx->tags, page);
  183. }
  184. static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
  185. {
  186. return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
  187. }
  188. static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
  189. {
  190. unsigned int i, first = 1;
  191. ssize_t ret = 0;
  192. for_each_cpu(i, hctx->cpumask) {
  193. if (first)
  194. ret += sprintf(ret + page, "%u", i);
  195. else
  196. ret += sprintf(ret + page, ", %u", i);
  197. first = 0;
  198. }
  199. ret += sprintf(ret + page, "\n");
  200. return ret;
  201. }
  202. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
  203. .attr = {.name = "dispatched", .mode = S_IRUGO },
  204. .show = blk_mq_sysfs_dispatched_show,
  205. };
  206. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
  207. .attr = {.name = "merged", .mode = S_IRUGO },
  208. .show = blk_mq_sysfs_merged_show,
  209. };
  210. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
  211. .attr = {.name = "completed", .mode = S_IRUGO },
  212. .show = blk_mq_sysfs_completed_show,
  213. };
  214. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
  215. .attr = {.name = "rq_list", .mode = S_IRUGO },
  216. .show = blk_mq_sysfs_rq_list_show,
  217. };
  218. static struct attribute *default_ctx_attrs[] = {
  219. &blk_mq_sysfs_dispatched.attr,
  220. &blk_mq_sysfs_merged.attr,
  221. &blk_mq_sysfs_completed.attr,
  222. &blk_mq_sysfs_rq_list.attr,
  223. NULL,
  224. };
  225. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
  226. .attr = {.name = "queued", .mode = S_IRUGO },
  227. .show = blk_mq_hw_sysfs_queued_show,
  228. };
  229. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
  230. .attr = {.name = "run", .mode = S_IRUGO },
  231. .show = blk_mq_hw_sysfs_run_show,
  232. };
  233. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
  234. .attr = {.name = "dispatched", .mode = S_IRUGO },
  235. .show = blk_mq_hw_sysfs_dispatched_show,
  236. };
  237. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
  238. .attr = {.name = "active", .mode = S_IRUGO },
  239. .show = blk_mq_hw_sysfs_active_show,
  240. };
  241. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
  242. .attr = {.name = "pending", .mode = S_IRUGO },
  243. .show = blk_mq_hw_sysfs_rq_list_show,
  244. };
  245. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
  246. .attr = {.name = "tags", .mode = S_IRUGO },
  247. .show = blk_mq_hw_sysfs_tags_show,
  248. };
  249. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
  250. .attr = {.name = "cpu_list", .mode = S_IRUGO },
  251. .show = blk_mq_hw_sysfs_cpus_show,
  252. };
  253. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
  254. .attr = {.name = "io_poll", .mode = S_IRUGO },
  255. .show = blk_mq_hw_sysfs_poll_show,
  256. };
  257. static struct attribute *default_hw_ctx_attrs[] = {
  258. &blk_mq_hw_sysfs_queued.attr,
  259. &blk_mq_hw_sysfs_run.attr,
  260. &blk_mq_hw_sysfs_dispatched.attr,
  261. &blk_mq_hw_sysfs_pending.attr,
  262. &blk_mq_hw_sysfs_tags.attr,
  263. &blk_mq_hw_sysfs_cpus.attr,
  264. &blk_mq_hw_sysfs_active.attr,
  265. &blk_mq_hw_sysfs_poll.attr,
  266. NULL,
  267. };
  268. static const struct sysfs_ops blk_mq_sysfs_ops = {
  269. .show = blk_mq_sysfs_show,
  270. .store = blk_mq_sysfs_store,
  271. };
  272. static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
  273. .show = blk_mq_hw_sysfs_show,
  274. .store = blk_mq_hw_sysfs_store,
  275. };
  276. static struct kobj_type blk_mq_ktype = {
  277. .sysfs_ops = &blk_mq_sysfs_ops,
  278. .release = blk_mq_sysfs_release,
  279. };
  280. static struct kobj_type blk_mq_ctx_ktype = {
  281. .sysfs_ops = &blk_mq_sysfs_ops,
  282. .default_attrs = default_ctx_attrs,
  283. .release = blk_mq_sysfs_release,
  284. };
  285. static struct kobj_type blk_mq_hw_ktype = {
  286. .sysfs_ops = &blk_mq_hw_sysfs_ops,
  287. .default_attrs = default_hw_ctx_attrs,
  288. .release = blk_mq_sysfs_release,
  289. };
  290. static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
  291. {
  292. struct blk_mq_ctx *ctx;
  293. int i;
  294. if (!hctx->nr_ctx)
  295. return;
  296. hctx_for_each_ctx(hctx, ctx, i)
  297. kobject_del(&ctx->kobj);
  298. kobject_del(&hctx->kobj);
  299. }
  300. static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
  301. {
  302. struct request_queue *q = hctx->queue;
  303. struct blk_mq_ctx *ctx;
  304. int i, ret;
  305. if (!hctx->nr_ctx)
  306. return 0;
  307. ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
  308. if (ret)
  309. return ret;
  310. hctx_for_each_ctx(hctx, ctx, i) {
  311. ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
  312. if (ret)
  313. break;
  314. }
  315. return ret;
  316. }
  317. void blk_mq_unregister_disk(struct gendisk *disk)
  318. {
  319. struct request_queue *q = disk->queue;
  320. struct blk_mq_hw_ctx *hctx;
  321. struct blk_mq_ctx *ctx;
  322. int i, j;
  323. blk_mq_disable_hotplug();
  324. queue_for_each_hw_ctx(q, hctx, i) {
  325. blk_mq_unregister_hctx(hctx);
  326. hctx_for_each_ctx(hctx, ctx, j)
  327. kobject_put(&ctx->kobj);
  328. kobject_put(&hctx->kobj);
  329. }
  330. kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
  331. kobject_del(&q->mq_kobj);
  332. kobject_put(&q->mq_kobj);
  333. kobject_put(&disk_to_dev(disk)->kobj);
  334. q->mq_sysfs_init_done = false;
  335. blk_mq_enable_hotplug();
  336. }
  337. static void blk_mq_sysfs_init(struct request_queue *q)
  338. {
  339. struct blk_mq_hw_ctx *hctx;
  340. struct blk_mq_ctx *ctx;
  341. int i;
  342. kobject_init(&q->mq_kobj, &blk_mq_ktype);
  343. queue_for_each_hw_ctx(q, hctx, i)
  344. kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
  345. queue_for_each_ctx(q, ctx, i)
  346. kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
  347. }
  348. int blk_mq_register_disk(struct gendisk *disk)
  349. {
  350. struct device *dev = disk_to_dev(disk);
  351. struct request_queue *q = disk->queue;
  352. struct blk_mq_hw_ctx *hctx;
  353. int ret, i;
  354. blk_mq_disable_hotplug();
  355. blk_mq_sysfs_init(q);
  356. ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
  357. if (ret < 0)
  358. goto out;
  359. kobject_uevent(&q->mq_kobj, KOBJ_ADD);
  360. queue_for_each_hw_ctx(q, hctx, i) {
  361. ret = blk_mq_register_hctx(hctx);
  362. if (ret)
  363. break;
  364. }
  365. if (ret)
  366. blk_mq_unregister_disk(disk);
  367. else
  368. q->mq_sysfs_init_done = true;
  369. out:
  370. blk_mq_enable_hotplug();
  371. return ret;
  372. }
  373. EXPORT_SYMBOL_GPL(blk_mq_register_disk);
  374. void blk_mq_sysfs_unregister(struct request_queue *q)
  375. {
  376. struct blk_mq_hw_ctx *hctx;
  377. int i;
  378. if (!q->mq_sysfs_init_done)
  379. return;
  380. queue_for_each_hw_ctx(q, hctx, i)
  381. blk_mq_unregister_hctx(hctx);
  382. }
  383. int blk_mq_sysfs_register(struct request_queue *q)
  384. {
  385. struct blk_mq_hw_ctx *hctx;
  386. int i, ret = 0;
  387. if (!q->mq_sysfs_init_done)
  388. return ret;
  389. queue_for_each_hw_ctx(q, hctx, i) {
  390. ret = blk_mq_register_hctx(hctx);
  391. if (ret)
  392. break;
  393. }
  394. return ret;
  395. }