dm-log-writes.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810
  1. /*
  2. * Copyright (C) 2014 Facebook. All rights reserved.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include <linux/device-mapper.h>
  7. #include <linux/module.h>
  8. #include <linux/init.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/bio.h>
  11. #include <linux/slab.h>
  12. #include <linux/kthread.h>
  13. #include <linux/freezer.h>
  14. #define DM_MSG_PREFIX "log-writes"
  15. /*
  16. * This target will sequentially log all writes to the target device onto the
  17. * log device. This is helpful for replaying writes to check for fs consistency
  18. * at all times. This target provides a mechanism to mark specific events to
  19. * check data at a later time. So for example you would:
  20. *
  21. * write data
  22. * fsync
  23. * dmsetup message /dev/whatever mark mymark
  24. * unmount /mnt/test
  25. *
  26. * Then replay the log up to mymark and check the contents of the replay to
  27. * verify it matches what was written.
  28. *
  29. * We log writes only after they have been flushed, this makes the log describe
  30. * close to the order in which the data hits the actual disk, not its cache. So
  31. * for example the following sequence (W means write, C means complete)
  32. *
  33. * Wa,Wb,Wc,Cc,Ca,FLUSH,FUAd,Cb,CFLUSH,CFUAd
  34. *
  35. * Would result in the log looking like this:
  36. *
  37. * c,a,flush,fuad,b,<other writes>,<next flush>
  38. *
  39. * This is meant to help expose problems where file systems do not properly wait
  40. * on data being written before invoking a FLUSH. FUA bypasses cache so once it
  41. * completes it is added to the log as it should be on disk.
  42. *
  43. * We treat DISCARDs as if they don't bypass cache so that they are logged in
  44. * order of completion along with the normal writes. If we didn't do it this
  45. * way we would process all the discards first and then write all the data, when
  46. * in fact we want to do the data and the discard in the order that they
  47. * completed.
  48. */
  49. #define LOG_FLUSH_FLAG (1 << 0)
  50. #define LOG_FUA_FLAG (1 << 1)
  51. #define LOG_DISCARD_FLAG (1 << 2)
  52. #define LOG_MARK_FLAG (1 << 3)
  53. #define WRITE_LOG_VERSION 1ULL
  54. #define WRITE_LOG_MAGIC 0x6a736677736872ULL
  55. /*
  56. * The disk format for this is braindead simple.
  57. *
  58. * At byte 0 we have our super, followed by the following sequence for
  59. * nr_entries:
  60. *
  61. * [ 1 sector ][ entry->nr_sectors ]
  62. * [log_write_entry][ data written ]
  63. *
  64. * The log_write_entry takes up a full sector so we can have arbitrary length
  65. * marks and it leaves us room for extra content in the future.
  66. */
  67. /*
  68. * Basic info about the log for userspace.
  69. */
  70. struct log_write_super {
  71. __le64 magic;
  72. __le64 version;
  73. __le64 nr_entries;
  74. __le32 sectorsize;
  75. };
  76. /*
  77. * sector - the sector we wrote.
  78. * nr_sectors - the number of sectors we wrote.
  79. * flags - flags for this log entry.
  80. * data_len - the size of the data in this log entry, this is for private log
  81. * entry stuff, the MARK data provided by userspace for example.
  82. */
  83. struct log_write_entry {
  84. __le64 sector;
  85. __le64 nr_sectors;
  86. __le64 flags;
  87. __le64 data_len;
  88. };
  89. struct log_writes_c {
  90. struct dm_dev *dev;
  91. struct dm_dev *logdev;
  92. u64 logged_entries;
  93. u32 sectorsize;
  94. atomic_t io_blocks;
  95. atomic_t pending_blocks;
  96. sector_t next_sector;
  97. sector_t end_sector;
  98. bool logging_enabled;
  99. bool device_supports_discard;
  100. spinlock_t blocks_lock;
  101. struct list_head unflushed_blocks;
  102. struct list_head logging_blocks;
  103. wait_queue_head_t wait;
  104. struct task_struct *log_kthread;
  105. };
  106. struct pending_block {
  107. int vec_cnt;
  108. u64 flags;
  109. sector_t sector;
  110. sector_t nr_sectors;
  111. char *data;
  112. u32 datalen;
  113. struct list_head list;
  114. struct bio_vec vecs[0];
  115. };
  116. struct per_bio_data {
  117. struct pending_block *block;
  118. };
  119. static void put_pending_block(struct log_writes_c *lc)
  120. {
  121. if (atomic_dec_and_test(&lc->pending_blocks)) {
  122. smp_mb__after_atomic();
  123. if (waitqueue_active(&lc->wait))
  124. wake_up(&lc->wait);
  125. }
  126. }
  127. static void put_io_block(struct log_writes_c *lc)
  128. {
  129. if (atomic_dec_and_test(&lc->io_blocks)) {
  130. smp_mb__after_atomic();
  131. if (waitqueue_active(&lc->wait))
  132. wake_up(&lc->wait);
  133. }
  134. }
  135. static void log_end_io(struct bio *bio)
  136. {
  137. struct log_writes_c *lc = bio->bi_private;
  138. struct bio_vec *bvec;
  139. int i;
  140. if (bio->bi_error) {
  141. unsigned long flags;
  142. DMERR("Error writing log block, error=%d", bio->bi_error);
  143. spin_lock_irqsave(&lc->blocks_lock, flags);
  144. lc->logging_enabled = false;
  145. spin_unlock_irqrestore(&lc->blocks_lock, flags);
  146. }
  147. bio_for_each_segment_all(bvec, bio, i)
  148. __free_page(bvec->bv_page);
  149. put_io_block(lc);
  150. bio_put(bio);
  151. }
  152. /*
  153. * Meant to be called if there is an error, it will free all the pages
  154. * associated with the block.
  155. */
  156. static void free_pending_block(struct log_writes_c *lc,
  157. struct pending_block *block)
  158. {
  159. int i;
  160. for (i = 0; i < block->vec_cnt; i++) {
  161. if (block->vecs[i].bv_page)
  162. __free_page(block->vecs[i].bv_page);
  163. }
  164. kfree(block->data);
  165. kfree(block);
  166. put_pending_block(lc);
  167. }
  168. static int write_metadata(struct log_writes_c *lc, void *entry,
  169. size_t entrylen, void *data, size_t datalen,
  170. sector_t sector)
  171. {
  172. struct bio *bio;
  173. struct page *page;
  174. void *ptr;
  175. size_t ret;
  176. bio = bio_alloc(GFP_KERNEL, 1);
  177. if (!bio) {
  178. DMERR("Couldn't alloc log bio");
  179. goto error;
  180. }
  181. bio->bi_iter.bi_size = 0;
  182. bio->bi_iter.bi_sector = sector;
  183. bio->bi_bdev = lc->logdev->bdev;
  184. bio->bi_end_io = log_end_io;
  185. bio->bi_private = lc;
  186. page = alloc_page(GFP_KERNEL);
  187. if (!page) {
  188. DMERR("Couldn't alloc log page");
  189. bio_put(bio);
  190. goto error;
  191. }
  192. ptr = kmap_atomic(page);
  193. memcpy(ptr, entry, entrylen);
  194. if (datalen)
  195. memcpy(ptr + entrylen, data, datalen);
  196. memset(ptr + entrylen + datalen, 0,
  197. lc->sectorsize - entrylen - datalen);
  198. kunmap_atomic(ptr);
  199. ret = bio_add_page(bio, page, lc->sectorsize, 0);
  200. if (ret != lc->sectorsize) {
  201. DMERR("Couldn't add page to the log block");
  202. goto error_bio;
  203. }
  204. submit_bio(WRITE, bio);
  205. return 0;
  206. error_bio:
  207. bio_put(bio);
  208. __free_page(page);
  209. error:
  210. put_io_block(lc);
  211. return -1;
  212. }
  213. static int log_one_block(struct log_writes_c *lc,
  214. struct pending_block *block, sector_t sector)
  215. {
  216. struct bio *bio;
  217. struct log_write_entry entry;
  218. size_t ret;
  219. int i;
  220. entry.sector = cpu_to_le64(block->sector);
  221. entry.nr_sectors = cpu_to_le64(block->nr_sectors);
  222. entry.flags = cpu_to_le64(block->flags);
  223. entry.data_len = cpu_to_le64(block->datalen);
  224. if (write_metadata(lc, &entry, sizeof(entry), block->data,
  225. block->datalen, sector)) {
  226. free_pending_block(lc, block);
  227. return -1;
  228. }
  229. if (!block->vec_cnt)
  230. goto out;
  231. sector++;
  232. atomic_inc(&lc->io_blocks);
  233. bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
  234. if (!bio) {
  235. DMERR("Couldn't alloc log bio");
  236. goto error;
  237. }
  238. bio->bi_iter.bi_size = 0;
  239. bio->bi_iter.bi_sector = sector;
  240. bio->bi_bdev = lc->logdev->bdev;
  241. bio->bi_end_io = log_end_io;
  242. bio->bi_private = lc;
  243. for (i = 0; i < block->vec_cnt; i++) {
  244. /*
  245. * The page offset is always 0 because we allocate a new page
  246. * for every bvec in the original bio for simplicity sake.
  247. */
  248. ret = bio_add_page(bio, block->vecs[i].bv_page,
  249. block->vecs[i].bv_len, 0);
  250. if (ret != block->vecs[i].bv_len) {
  251. atomic_inc(&lc->io_blocks);
  252. submit_bio(WRITE, bio);
  253. bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
  254. if (!bio) {
  255. DMERR("Couldn't alloc log bio");
  256. goto error;
  257. }
  258. bio->bi_iter.bi_size = 0;
  259. bio->bi_iter.bi_sector = sector;
  260. bio->bi_bdev = lc->logdev->bdev;
  261. bio->bi_end_io = log_end_io;
  262. bio->bi_private = lc;
  263. ret = bio_add_page(bio, block->vecs[i].bv_page,
  264. block->vecs[i].bv_len, 0);
  265. if (ret != block->vecs[i].bv_len) {
  266. DMERR("Couldn't add page on new bio?");
  267. bio_put(bio);
  268. goto error;
  269. }
  270. }
  271. sector += block->vecs[i].bv_len >> SECTOR_SHIFT;
  272. }
  273. submit_bio(WRITE, bio);
  274. out:
  275. kfree(block->data);
  276. kfree(block);
  277. put_pending_block(lc);
  278. return 0;
  279. error:
  280. free_pending_block(lc, block);
  281. put_io_block(lc);
  282. return -1;
  283. }
  284. static int log_super(struct log_writes_c *lc)
  285. {
  286. struct log_write_super super;
  287. super.magic = cpu_to_le64(WRITE_LOG_MAGIC);
  288. super.version = cpu_to_le64(WRITE_LOG_VERSION);
  289. super.nr_entries = cpu_to_le64(lc->logged_entries);
  290. super.sectorsize = cpu_to_le32(lc->sectorsize);
  291. if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) {
  292. DMERR("Couldn't write super");
  293. return -1;
  294. }
  295. return 0;
  296. }
  297. static inline sector_t logdev_last_sector(struct log_writes_c *lc)
  298. {
  299. return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT;
  300. }
  301. static int log_writes_kthread(void *arg)
  302. {
  303. struct log_writes_c *lc = (struct log_writes_c *)arg;
  304. sector_t sector = 0;
  305. while (!kthread_should_stop()) {
  306. bool super = false;
  307. bool logging_enabled;
  308. struct pending_block *block = NULL;
  309. int ret;
  310. spin_lock_irq(&lc->blocks_lock);
  311. if (!list_empty(&lc->logging_blocks)) {
  312. block = list_first_entry(&lc->logging_blocks,
  313. struct pending_block, list);
  314. list_del_init(&block->list);
  315. if (!lc->logging_enabled)
  316. goto next;
  317. sector = lc->next_sector;
  318. if (block->flags & LOG_DISCARD_FLAG)
  319. lc->next_sector++;
  320. else
  321. lc->next_sector += block->nr_sectors + 1;
  322. /*
  323. * Apparently the size of the device may not be known
  324. * right away, so handle this properly.
  325. */
  326. if (!lc->end_sector)
  327. lc->end_sector = logdev_last_sector(lc);
  328. if (lc->end_sector &&
  329. lc->next_sector >= lc->end_sector) {
  330. DMERR("Ran out of space on the logdev");
  331. lc->logging_enabled = false;
  332. goto next;
  333. }
  334. lc->logged_entries++;
  335. atomic_inc(&lc->io_blocks);
  336. super = (block->flags & (LOG_FUA_FLAG | LOG_MARK_FLAG));
  337. if (super)
  338. atomic_inc(&lc->io_blocks);
  339. }
  340. next:
  341. logging_enabled = lc->logging_enabled;
  342. spin_unlock_irq(&lc->blocks_lock);
  343. if (block) {
  344. if (logging_enabled) {
  345. ret = log_one_block(lc, block, sector);
  346. if (!ret && super)
  347. ret = log_super(lc);
  348. if (ret) {
  349. spin_lock_irq(&lc->blocks_lock);
  350. lc->logging_enabled = false;
  351. spin_unlock_irq(&lc->blocks_lock);
  352. }
  353. } else
  354. free_pending_block(lc, block);
  355. continue;
  356. }
  357. if (!try_to_freeze()) {
  358. set_current_state(TASK_INTERRUPTIBLE);
  359. if (!kthread_should_stop() &&
  360. !atomic_read(&lc->pending_blocks))
  361. schedule();
  362. __set_current_state(TASK_RUNNING);
  363. }
  364. }
  365. return 0;
  366. }
  367. /*
  368. * Construct a log-writes mapping:
  369. * log-writes <dev_path> <log_dev_path>
  370. */
  371. static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  372. {
  373. struct log_writes_c *lc;
  374. struct dm_arg_set as;
  375. const char *devname, *logdevname;
  376. int ret;
  377. as.argc = argc;
  378. as.argv = argv;
  379. if (argc < 2) {
  380. ti->error = "Invalid argument count";
  381. return -EINVAL;
  382. }
  383. lc = kzalloc(sizeof(struct log_writes_c), GFP_KERNEL);
  384. if (!lc) {
  385. ti->error = "Cannot allocate context";
  386. return -ENOMEM;
  387. }
  388. spin_lock_init(&lc->blocks_lock);
  389. INIT_LIST_HEAD(&lc->unflushed_blocks);
  390. INIT_LIST_HEAD(&lc->logging_blocks);
  391. init_waitqueue_head(&lc->wait);
  392. lc->sectorsize = 1 << SECTOR_SHIFT;
  393. atomic_set(&lc->io_blocks, 0);
  394. atomic_set(&lc->pending_blocks, 0);
  395. devname = dm_shift_arg(&as);
  396. ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev);
  397. if (ret) {
  398. ti->error = "Device lookup failed";
  399. goto bad;
  400. }
  401. logdevname = dm_shift_arg(&as);
  402. ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table),
  403. &lc->logdev);
  404. if (ret) {
  405. ti->error = "Log device lookup failed";
  406. dm_put_device(ti, lc->dev);
  407. goto bad;
  408. }
  409. lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
  410. if (IS_ERR(lc->log_kthread)) {
  411. ret = PTR_ERR(lc->log_kthread);
  412. ti->error = "Couldn't alloc kthread";
  413. dm_put_device(ti, lc->dev);
  414. dm_put_device(ti, lc->logdev);
  415. goto bad;
  416. }
  417. /* We put the super at sector 0, start logging at sector 1 */
  418. lc->next_sector = 1;
  419. lc->logging_enabled = true;
  420. lc->end_sector = logdev_last_sector(lc);
  421. lc->device_supports_discard = true;
  422. ti->num_flush_bios = 1;
  423. ti->flush_supported = true;
  424. ti->num_discard_bios = 1;
  425. ti->discards_supported = true;
  426. ti->per_bio_data_size = sizeof(struct per_bio_data);
  427. ti->private = lc;
  428. return 0;
  429. bad:
  430. kfree(lc);
  431. return ret;
  432. }
  433. static int log_mark(struct log_writes_c *lc, char *data)
  434. {
  435. struct pending_block *block;
  436. size_t maxsize = lc->sectorsize - sizeof(struct log_write_entry);
  437. block = kzalloc(sizeof(struct pending_block), GFP_KERNEL);
  438. if (!block) {
  439. DMERR("Error allocating pending block");
  440. return -ENOMEM;
  441. }
  442. block->data = kstrndup(data, maxsize, GFP_KERNEL);
  443. if (!block->data) {
  444. DMERR("Error copying mark data");
  445. kfree(block);
  446. return -ENOMEM;
  447. }
  448. atomic_inc(&lc->pending_blocks);
  449. block->datalen = strlen(block->data);
  450. block->flags |= LOG_MARK_FLAG;
  451. spin_lock_irq(&lc->blocks_lock);
  452. list_add_tail(&block->list, &lc->logging_blocks);
  453. spin_unlock_irq(&lc->blocks_lock);
  454. wake_up_process(lc->log_kthread);
  455. return 0;
  456. }
  457. static void log_writes_dtr(struct dm_target *ti)
  458. {
  459. struct log_writes_c *lc = ti->private;
  460. spin_lock_irq(&lc->blocks_lock);
  461. list_splice_init(&lc->unflushed_blocks, &lc->logging_blocks);
  462. spin_unlock_irq(&lc->blocks_lock);
  463. /*
  464. * This is just nice to have since it'll update the super to include the
  465. * unflushed blocks, if it fails we don't really care.
  466. */
  467. log_mark(lc, "dm-log-writes-end");
  468. wake_up_process(lc->log_kthread);
  469. wait_event(lc->wait, !atomic_read(&lc->io_blocks) &&
  470. !atomic_read(&lc->pending_blocks));
  471. kthread_stop(lc->log_kthread);
  472. WARN_ON(!list_empty(&lc->logging_blocks));
  473. WARN_ON(!list_empty(&lc->unflushed_blocks));
  474. dm_put_device(ti, lc->dev);
  475. dm_put_device(ti, lc->logdev);
  476. kfree(lc);
  477. }
  478. static void normal_map_bio(struct dm_target *ti, struct bio *bio)
  479. {
  480. struct log_writes_c *lc = ti->private;
  481. bio->bi_bdev = lc->dev->bdev;
  482. }
  483. static int log_writes_map(struct dm_target *ti, struct bio *bio)
  484. {
  485. struct log_writes_c *lc = ti->private;
  486. struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
  487. struct pending_block *block;
  488. struct bvec_iter iter;
  489. struct bio_vec bv;
  490. size_t alloc_size;
  491. int i = 0;
  492. bool flush_bio = (bio->bi_rw & REQ_FLUSH);
  493. bool fua_bio = (bio->bi_rw & REQ_FUA);
  494. bool discard_bio = (bio->bi_rw & REQ_DISCARD);
  495. pb->block = NULL;
  496. /* Don't bother doing anything if logging has been disabled */
  497. if (!lc->logging_enabled)
  498. goto map_bio;
  499. /*
  500. * Map reads as normal.
  501. */
  502. if (bio_data_dir(bio) == READ)
  503. goto map_bio;
  504. /* No sectors and not a flush? Don't care */
  505. if (!bio_sectors(bio) && !flush_bio)
  506. goto map_bio;
  507. /*
  508. * Discards will have bi_size set but there's no actual data, so just
  509. * allocate the size of the pending block.
  510. */
  511. if (discard_bio)
  512. alloc_size = sizeof(struct pending_block);
  513. else
  514. alloc_size = sizeof(struct pending_block) + sizeof(struct bio_vec) * bio_segments(bio);
  515. block = kzalloc(alloc_size, GFP_NOIO);
  516. if (!block) {
  517. DMERR("Error allocating pending block");
  518. spin_lock_irq(&lc->blocks_lock);
  519. lc->logging_enabled = false;
  520. spin_unlock_irq(&lc->blocks_lock);
  521. return -ENOMEM;
  522. }
  523. INIT_LIST_HEAD(&block->list);
  524. pb->block = block;
  525. atomic_inc(&lc->pending_blocks);
  526. if (flush_bio)
  527. block->flags |= LOG_FLUSH_FLAG;
  528. if (fua_bio)
  529. block->flags |= LOG_FUA_FLAG;
  530. if (discard_bio)
  531. block->flags |= LOG_DISCARD_FLAG;
  532. block->sector = bio->bi_iter.bi_sector;
  533. block->nr_sectors = bio_sectors(bio);
  534. /* We don't need the data, just submit */
  535. if (discard_bio) {
  536. WARN_ON(flush_bio || fua_bio);
  537. if (lc->device_supports_discard)
  538. goto map_bio;
  539. bio_endio(bio);
  540. return DM_MAPIO_SUBMITTED;
  541. }
  542. /* Flush bio, splice the unflushed blocks onto this list and submit */
  543. if (flush_bio && !bio_sectors(bio)) {
  544. spin_lock_irq(&lc->blocks_lock);
  545. list_splice_init(&lc->unflushed_blocks, &block->list);
  546. spin_unlock_irq(&lc->blocks_lock);
  547. goto map_bio;
  548. }
  549. /*
  550. * We will write this bio somewhere else way later so we need to copy
  551. * the actual contents into new pages so we know the data will always be
  552. * there.
  553. *
  554. * We do this because this could be a bio from O_DIRECT in which case we
  555. * can't just hold onto the page until some later point, we have to
  556. * manually copy the contents.
  557. */
  558. bio_for_each_segment(bv, bio, iter) {
  559. struct page *page;
  560. void *src, *dst;
  561. page = alloc_page(GFP_NOIO);
  562. if (!page) {
  563. DMERR("Error allocing page");
  564. free_pending_block(lc, block);
  565. spin_lock_irq(&lc->blocks_lock);
  566. lc->logging_enabled = false;
  567. spin_unlock_irq(&lc->blocks_lock);
  568. return -ENOMEM;
  569. }
  570. src = kmap_atomic(bv.bv_page);
  571. dst = kmap_atomic(page);
  572. memcpy(dst, src + bv.bv_offset, bv.bv_len);
  573. kunmap_atomic(dst);
  574. kunmap_atomic(src);
  575. block->vecs[i].bv_page = page;
  576. block->vecs[i].bv_len = bv.bv_len;
  577. block->vec_cnt++;
  578. i++;
  579. }
  580. /* Had a flush with data in it, weird */
  581. if (flush_bio) {
  582. spin_lock_irq(&lc->blocks_lock);
  583. list_splice_init(&lc->unflushed_blocks, &block->list);
  584. spin_unlock_irq(&lc->blocks_lock);
  585. }
  586. map_bio:
  587. normal_map_bio(ti, bio);
  588. return DM_MAPIO_REMAPPED;
  589. }
  590. static int normal_end_io(struct dm_target *ti, struct bio *bio, int error)
  591. {
  592. struct log_writes_c *lc = ti->private;
  593. struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
  594. if (bio_data_dir(bio) == WRITE && pb->block) {
  595. struct pending_block *block = pb->block;
  596. unsigned long flags;
  597. spin_lock_irqsave(&lc->blocks_lock, flags);
  598. if (block->flags & LOG_FLUSH_FLAG) {
  599. list_splice_tail_init(&block->list, &lc->logging_blocks);
  600. list_add_tail(&block->list, &lc->logging_blocks);
  601. wake_up_process(lc->log_kthread);
  602. } else if (block->flags & LOG_FUA_FLAG) {
  603. list_add_tail(&block->list, &lc->logging_blocks);
  604. wake_up_process(lc->log_kthread);
  605. } else
  606. list_add_tail(&block->list, &lc->unflushed_blocks);
  607. spin_unlock_irqrestore(&lc->blocks_lock, flags);
  608. }
  609. return error;
  610. }
  611. /*
  612. * INFO format: <logged entries> <highest allocated sector>
  613. */
  614. static void log_writes_status(struct dm_target *ti, status_type_t type,
  615. unsigned status_flags, char *result,
  616. unsigned maxlen)
  617. {
  618. unsigned sz = 0;
  619. struct log_writes_c *lc = ti->private;
  620. switch (type) {
  621. case STATUSTYPE_INFO:
  622. DMEMIT("%llu %llu", lc->logged_entries,
  623. (unsigned long long)lc->next_sector - 1);
  624. if (!lc->logging_enabled)
  625. DMEMIT(" logging_disabled");
  626. break;
  627. case STATUSTYPE_TABLE:
  628. DMEMIT("%s %s", lc->dev->name, lc->logdev->name);
  629. break;
  630. }
  631. }
  632. static int log_writes_prepare_ioctl(struct dm_target *ti,
  633. struct block_device **bdev, fmode_t *mode)
  634. {
  635. struct log_writes_c *lc = ti->private;
  636. struct dm_dev *dev = lc->dev;
  637. *bdev = dev->bdev;
  638. /*
  639. * Only pass ioctls through if the device sizes match exactly.
  640. */
  641. if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
  642. return 1;
  643. return 0;
  644. }
  645. static int log_writes_iterate_devices(struct dm_target *ti,
  646. iterate_devices_callout_fn fn,
  647. void *data)
  648. {
  649. struct log_writes_c *lc = ti->private;
  650. return fn(ti, lc->dev, 0, ti->len, data);
  651. }
  652. /*
  653. * Messages supported:
  654. * mark <mark data> - specify the marked data.
  655. */
  656. static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv)
  657. {
  658. int r = -EINVAL;
  659. struct log_writes_c *lc = ti->private;
  660. if (argc != 2) {
  661. DMWARN("Invalid log-writes message arguments, expect 2 arguments, got %d", argc);
  662. return r;
  663. }
  664. if (!strcasecmp(argv[0], "mark"))
  665. r = log_mark(lc, argv[1]);
  666. else
  667. DMWARN("Unrecognised log writes target message received: %s", argv[0]);
  668. return r;
  669. }
  670. static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limits)
  671. {
  672. struct log_writes_c *lc = ti->private;
  673. struct request_queue *q = bdev_get_queue(lc->dev->bdev);
  674. if (!q || !blk_queue_discard(q)) {
  675. lc->device_supports_discard = false;
  676. limits->discard_granularity = 1 << SECTOR_SHIFT;
  677. limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
  678. }
  679. }
  680. static struct target_type log_writes_target = {
  681. .name = "log-writes",
  682. .version = {1, 0, 0},
  683. .module = THIS_MODULE,
  684. .ctr = log_writes_ctr,
  685. .dtr = log_writes_dtr,
  686. .map = log_writes_map,
  687. .end_io = normal_end_io,
  688. .status = log_writes_status,
  689. .prepare_ioctl = log_writes_prepare_ioctl,
  690. .message = log_writes_message,
  691. .iterate_devices = log_writes_iterate_devices,
  692. .io_hints = log_writes_io_hints,
  693. };
  694. static int __init dm_log_writes_init(void)
  695. {
  696. int r = dm_register_target(&log_writes_target);
  697. if (r < 0)
  698. DMERR("register failed %d", r);
  699. return r;
  700. }
  701. static void __exit dm_log_writes_exit(void)
  702. {
  703. dm_unregister_target(&log_writes_target);
  704. }
  705. module_init(dm_log_writes_init);
  706. module_exit(dm_log_writes_exit);
  707. MODULE_DESCRIPTION(DM_NAME " log writes target");
  708. MODULE_AUTHOR("Josef Bacik <jbacik@fb.com>");
  709. MODULE_LICENSE("GPL");