faulty.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. /*
  2. * faulty.c : Multiple Devices driver for Linux
  3. *
  4. * Copyright (C) 2004 Neil Brown
  5. *
  6. * fautly-device-simulator personality for md
  7. *
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2, or (at your option)
  12. * any later version.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * (for example /usr/src/linux/COPYING); if not, write to the Free
  16. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  17. */
  18. /*
  19. * The "faulty" personality causes some requests to fail.
  20. *
  21. * Possible failure modes are:
  22. * reads fail "randomly" but succeed on retry
  23. * writes fail "randomly" but succeed on retry
  24. * reads for some address fail and then persist until a write
  25. * reads for some address fail and then persist irrespective of write
  26. * writes for some address fail and persist
  27. * all writes fail
  28. *
  29. * Different modes can be active at a time, but only
  30. * one can be set at array creation. Others can be added later.
  31. * A mode can be one-shot or recurrent with the recurrence being
  32. * once in every N requests.
  33. * The bottom 5 bits of the "layout" indicate the mode. The
  34. * remainder indicate a period, or 0 for one-shot.
  35. *
  36. * There is an implementation limit on the number of concurrently
  37. * persisting-faulty blocks. When a new fault is requested that would
  38. * exceed the limit, it is ignored.
  39. * All current faults can be clear using a layout of "0".
  40. *
  41. * Requests are always sent to the device. If they are to fail,
  42. * we clone the bio and insert a new b_end_io into the chain.
  43. */
  44. #define WriteTransient 0
  45. #define ReadTransient 1
  46. #define WritePersistent 2
  47. #define ReadPersistent 3
  48. #define WriteAll 4 /* doesn't go to device */
  49. #define ReadFixable 5
  50. #define Modes 6
  51. #define ClearErrors 31
  52. #define ClearFaults 30
  53. #define AllPersist 100 /* internal use only */
  54. #define NoPersist 101
  55. #define ModeMask 0x1f
  56. #define ModeShift 5
  57. #define MaxFault 50
  58. #include <linux/blkdev.h>
  59. #include <linux/module.h>
  60. #include <linux/raid/md_u.h>
  61. #include <linux/slab.h>
  62. #include "md.h"
  63. #include <linux/seq_file.h>
  64. static void faulty_fail(struct bio *bio)
  65. {
  66. struct bio *b = bio->bi_private;
  67. b->bi_iter.bi_size = bio->bi_iter.bi_size;
  68. b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
  69. bio_put(bio);
  70. bio_io_error(b);
  71. }
  72. struct faulty_conf {
  73. int period[Modes];
  74. atomic_t counters[Modes];
  75. sector_t faults[MaxFault];
  76. int modes[MaxFault];
  77. int nfaults;
  78. struct md_rdev *rdev;
  79. };
  80. static int check_mode(struct faulty_conf *conf, int mode)
  81. {
  82. if (conf->period[mode] == 0 &&
  83. atomic_read(&conf->counters[mode]) <= 0)
  84. return 0; /* no failure, no decrement */
  85. if (atomic_dec_and_test(&conf->counters[mode])) {
  86. if (conf->period[mode])
  87. atomic_set(&conf->counters[mode], conf->period[mode]);
  88. return 1;
  89. }
  90. return 0;
  91. }
  92. static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir)
  93. {
  94. /* If we find a ReadFixable sector, we fix it ... */
  95. int i;
  96. for (i=0; i<conf->nfaults; i++)
  97. if (conf->faults[i] >= start &&
  98. conf->faults[i] < end) {
  99. /* found it ... */
  100. switch (conf->modes[i] * 2 + dir) {
  101. case WritePersistent*2+WRITE: return 1;
  102. case ReadPersistent*2+READ: return 1;
  103. case ReadFixable*2+READ: return 1;
  104. case ReadFixable*2+WRITE:
  105. conf->modes[i] = NoPersist;
  106. return 0;
  107. case AllPersist*2+READ:
  108. case AllPersist*2+WRITE: return 1;
  109. default:
  110. return 0;
  111. }
  112. }
  113. return 0;
  114. }
  115. static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
  116. {
  117. int i;
  118. int n = conf->nfaults;
  119. for (i=0; i<conf->nfaults; i++)
  120. if (conf->faults[i] == start) {
  121. switch(mode) {
  122. case NoPersist: conf->modes[i] = mode; return;
  123. case WritePersistent:
  124. if (conf->modes[i] == ReadPersistent ||
  125. conf->modes[i] == ReadFixable)
  126. conf->modes[i] = AllPersist;
  127. else
  128. conf->modes[i] = WritePersistent;
  129. return;
  130. case ReadPersistent:
  131. if (conf->modes[i] == WritePersistent)
  132. conf->modes[i] = AllPersist;
  133. else
  134. conf->modes[i] = ReadPersistent;
  135. return;
  136. case ReadFixable:
  137. if (conf->modes[i] == WritePersistent ||
  138. conf->modes[i] == ReadPersistent)
  139. conf->modes[i] = AllPersist;
  140. else
  141. conf->modes[i] = ReadFixable;
  142. return;
  143. }
  144. } else if (conf->modes[i] == NoPersist)
  145. n = i;
  146. if (n >= MaxFault)
  147. return;
  148. conf->faults[n] = start;
  149. conf->modes[n] = mode;
  150. if (conf->nfaults == n)
  151. conf->nfaults = n+1;
  152. }
  153. static void make_request(struct mddev *mddev, struct bio *bio)
  154. {
  155. struct faulty_conf *conf = mddev->private;
  156. int failit = 0;
  157. if (bio_data_dir(bio) == WRITE) {
  158. /* write request */
  159. if (atomic_read(&conf->counters[WriteAll])) {
  160. /* special case - don't decrement, don't generic_make_request,
  161. * just fail immediately
  162. */
  163. bio_io_error(bio);
  164. return;
  165. }
  166. if (check_sector(conf, bio->bi_iter.bi_sector,
  167. bio_end_sector(bio), WRITE))
  168. failit = 1;
  169. if (check_mode(conf, WritePersistent)) {
  170. add_sector(conf, bio->bi_iter.bi_sector,
  171. WritePersistent);
  172. failit = 1;
  173. }
  174. if (check_mode(conf, WriteTransient))
  175. failit = 1;
  176. } else {
  177. /* read request */
  178. if (check_sector(conf, bio->bi_iter.bi_sector,
  179. bio_end_sector(bio), READ))
  180. failit = 1;
  181. if (check_mode(conf, ReadTransient))
  182. failit = 1;
  183. if (check_mode(conf, ReadPersistent)) {
  184. add_sector(conf, bio->bi_iter.bi_sector,
  185. ReadPersistent);
  186. failit = 1;
  187. }
  188. if (check_mode(conf, ReadFixable)) {
  189. add_sector(conf, bio->bi_iter.bi_sector,
  190. ReadFixable);
  191. failit = 1;
  192. }
  193. }
  194. if (failit) {
  195. struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev);
  196. b->bi_bdev = conf->rdev->bdev;
  197. b->bi_private = bio;
  198. b->bi_end_io = faulty_fail;
  199. bio = b;
  200. } else
  201. bio->bi_bdev = conf->rdev->bdev;
  202. generic_make_request(bio);
  203. }
  204. static void status(struct seq_file *seq, struct mddev *mddev)
  205. {
  206. struct faulty_conf *conf = mddev->private;
  207. int n;
  208. if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
  209. seq_printf(seq, " WriteTransient=%d(%d)",
  210. n, conf->period[WriteTransient]);
  211. if ((n=atomic_read(&conf->counters[ReadTransient])) != 0)
  212. seq_printf(seq, " ReadTransient=%d(%d)",
  213. n, conf->period[ReadTransient]);
  214. if ((n=atomic_read(&conf->counters[WritePersistent])) != 0)
  215. seq_printf(seq, " WritePersistent=%d(%d)",
  216. n, conf->period[WritePersistent]);
  217. if ((n=atomic_read(&conf->counters[ReadPersistent])) != 0)
  218. seq_printf(seq, " ReadPersistent=%d(%d)",
  219. n, conf->period[ReadPersistent]);
  220. if ((n=atomic_read(&conf->counters[ReadFixable])) != 0)
  221. seq_printf(seq, " ReadFixable=%d(%d)",
  222. n, conf->period[ReadFixable]);
  223. if ((n=atomic_read(&conf->counters[WriteAll])) != 0)
  224. seq_printf(seq, " WriteAll");
  225. seq_printf(seq, " nfaults=%d", conf->nfaults);
  226. }
  227. static int reshape(struct mddev *mddev)
  228. {
  229. int mode = mddev->new_layout & ModeMask;
  230. int count = mddev->new_layout >> ModeShift;
  231. struct faulty_conf *conf = mddev->private;
  232. if (mddev->new_layout < 0)
  233. return 0;
  234. /* new layout */
  235. if (mode == ClearFaults)
  236. conf->nfaults = 0;
  237. else if (mode == ClearErrors) {
  238. int i;
  239. for (i=0 ; i < Modes ; i++) {
  240. conf->period[i] = 0;
  241. atomic_set(&conf->counters[i], 0);
  242. }
  243. } else if (mode < Modes) {
  244. conf->period[mode] = count;
  245. if (!count) count++;
  246. atomic_set(&conf->counters[mode], count);
  247. } else
  248. return -EINVAL;
  249. mddev->new_layout = -1;
  250. mddev->layout = -1; /* makes sure further changes come through */
  251. return 0;
  252. }
  253. static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks)
  254. {
  255. WARN_ONCE(raid_disks,
  256. "%s does not support generic reshape\n", __func__);
  257. if (sectors == 0)
  258. return mddev->dev_sectors;
  259. return sectors;
  260. }
  261. static int run(struct mddev *mddev)
  262. {
  263. struct md_rdev *rdev;
  264. int i;
  265. struct faulty_conf *conf;
  266. if (md_check_no_bitmap(mddev))
  267. return -EINVAL;
  268. conf = kmalloc(sizeof(*conf), GFP_KERNEL);
  269. if (!conf)
  270. return -ENOMEM;
  271. for (i=0; i<Modes; i++) {
  272. atomic_set(&conf->counters[i], 0);
  273. conf->period[i] = 0;
  274. }
  275. conf->nfaults = 0;
  276. rdev_for_each(rdev, mddev) {
  277. conf->rdev = rdev;
  278. disk_stack_limits(mddev->gendisk, rdev->bdev,
  279. rdev->data_offset << 9);
  280. }
  281. md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
  282. mddev->private = conf;
  283. reshape(mddev);
  284. return 0;
  285. }
  286. static void faulty_free(struct mddev *mddev, void *priv)
  287. {
  288. struct faulty_conf *conf = priv;
  289. kfree(conf);
  290. }
  291. static struct md_personality faulty_personality =
  292. {
  293. .name = "faulty",
  294. .level = LEVEL_FAULTY,
  295. .owner = THIS_MODULE,
  296. .make_request = make_request,
  297. .run = run,
  298. .free = faulty_free,
  299. .status = status,
  300. .check_reshape = reshape,
  301. .size = faulty_size,
  302. };
  303. static int __init raid_init(void)
  304. {
  305. return register_md_personality(&faulty_personality);
  306. }
  307. static void raid_exit(void)
  308. {
  309. unregister_md_personality(&faulty_personality);
  310. }
  311. module_init(raid_init);
  312. module_exit(raid_exit);
  313. MODULE_LICENSE("GPL");
  314. MODULE_DESCRIPTION("Fault injection personality for MD");
  315. MODULE_ALIAS("md-personality-10"); /* faulty */
  316. MODULE_ALIAS("md-faulty");
  317. MODULE_ALIAS("md-level--5");