dm-bio-prison.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. /*
  2. * Copyright (C) 2012 Red Hat, Inc.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm.h"
  7. #include "dm-bio-prison.h"
  8. #include <linux/spinlock.h>
  9. #include <linux/mempool.h>
  10. #include <linux/module.h>
  11. #include <linux/slab.h>
  12. /*----------------------------------------------------------------*/
  13. #define MIN_CELLS 1024
  14. struct dm_bio_prison {
  15. spinlock_t lock;
  16. mempool_t *cell_pool;
  17. struct rb_root cells;
  18. };
  19. static struct kmem_cache *_cell_cache;
  20. /*----------------------------------------------------------------*/
  21. /*
  22. * @nr_cells should be the number of cells you want in use _concurrently_.
  23. * Don't confuse it with the number of distinct keys.
  24. */
  25. struct dm_bio_prison *dm_bio_prison_create(void)
  26. {
  27. struct dm_bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
  28. if (!prison)
  29. return NULL;
  30. spin_lock_init(&prison->lock);
  31. prison->cell_pool = mempool_create_slab_pool(MIN_CELLS, _cell_cache);
  32. if (!prison->cell_pool) {
  33. kfree(prison);
  34. return NULL;
  35. }
  36. prison->cells = RB_ROOT;
  37. return prison;
  38. }
  39. EXPORT_SYMBOL_GPL(dm_bio_prison_create);
  40. void dm_bio_prison_destroy(struct dm_bio_prison *prison)
  41. {
  42. mempool_destroy(prison->cell_pool);
  43. kfree(prison);
  44. }
  45. EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
  46. struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
  47. {
  48. return mempool_alloc(prison->cell_pool, gfp);
  49. }
  50. EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
  51. void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
  52. struct dm_bio_prison_cell *cell)
  53. {
  54. mempool_free(cell, prison->cell_pool);
  55. }
  56. EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
  57. static void __setup_new_cell(struct dm_cell_key *key,
  58. struct bio *holder,
  59. struct dm_bio_prison_cell *cell)
  60. {
  61. memcpy(&cell->key, key, sizeof(cell->key));
  62. cell->holder = holder;
  63. bio_list_init(&cell->bios);
  64. }
  65. static int cmp_keys(struct dm_cell_key *lhs,
  66. struct dm_cell_key *rhs)
  67. {
  68. if (lhs->virtual < rhs->virtual)
  69. return -1;
  70. if (lhs->virtual > rhs->virtual)
  71. return 1;
  72. if (lhs->dev < rhs->dev)
  73. return -1;
  74. if (lhs->dev > rhs->dev)
  75. return 1;
  76. if (lhs->block_end <= rhs->block_begin)
  77. return -1;
  78. if (lhs->block_begin >= rhs->block_end)
  79. return 1;
  80. return 0;
  81. }
  82. static int __bio_detain(struct dm_bio_prison *prison,
  83. struct dm_cell_key *key,
  84. struct bio *inmate,
  85. struct dm_bio_prison_cell *cell_prealloc,
  86. struct dm_bio_prison_cell **cell_result)
  87. {
  88. int r;
  89. struct rb_node **new = &prison->cells.rb_node, *parent = NULL;
  90. while (*new) {
  91. struct dm_bio_prison_cell *cell =
  92. container_of(*new, struct dm_bio_prison_cell, node);
  93. r = cmp_keys(key, &cell->key);
  94. parent = *new;
  95. if (r < 0)
  96. new = &((*new)->rb_left);
  97. else if (r > 0)
  98. new = &((*new)->rb_right);
  99. else {
  100. if (inmate)
  101. bio_list_add(&cell->bios, inmate);
  102. *cell_result = cell;
  103. return 1;
  104. }
  105. }
  106. __setup_new_cell(key, inmate, cell_prealloc);
  107. *cell_result = cell_prealloc;
  108. rb_link_node(&cell_prealloc->node, parent, new);
  109. rb_insert_color(&cell_prealloc->node, &prison->cells);
  110. return 0;
  111. }
  112. static int bio_detain(struct dm_bio_prison *prison,
  113. struct dm_cell_key *key,
  114. struct bio *inmate,
  115. struct dm_bio_prison_cell *cell_prealloc,
  116. struct dm_bio_prison_cell **cell_result)
  117. {
  118. int r;
  119. unsigned long flags;
  120. spin_lock_irqsave(&prison->lock, flags);
  121. r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
  122. spin_unlock_irqrestore(&prison->lock, flags);
  123. return r;
  124. }
  125. int dm_bio_detain(struct dm_bio_prison *prison,
  126. struct dm_cell_key *key,
  127. struct bio *inmate,
  128. struct dm_bio_prison_cell *cell_prealloc,
  129. struct dm_bio_prison_cell **cell_result)
  130. {
  131. return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
  132. }
  133. EXPORT_SYMBOL_GPL(dm_bio_detain);
  134. int dm_get_cell(struct dm_bio_prison *prison,
  135. struct dm_cell_key *key,
  136. struct dm_bio_prison_cell *cell_prealloc,
  137. struct dm_bio_prison_cell **cell_result)
  138. {
  139. return bio_detain(prison, key, NULL, cell_prealloc, cell_result);
  140. }
  141. EXPORT_SYMBOL_GPL(dm_get_cell);
  142. /*
  143. * @inmates must have been initialised prior to this call
  144. */
  145. static void __cell_release(struct dm_bio_prison *prison,
  146. struct dm_bio_prison_cell *cell,
  147. struct bio_list *inmates)
  148. {
  149. rb_erase(&cell->node, &prison->cells);
  150. if (inmates) {
  151. if (cell->holder)
  152. bio_list_add(inmates, cell->holder);
  153. bio_list_merge(inmates, &cell->bios);
  154. }
  155. }
  156. void dm_cell_release(struct dm_bio_prison *prison,
  157. struct dm_bio_prison_cell *cell,
  158. struct bio_list *bios)
  159. {
  160. unsigned long flags;
  161. spin_lock_irqsave(&prison->lock, flags);
  162. __cell_release(prison, cell, bios);
  163. spin_unlock_irqrestore(&prison->lock, flags);
  164. }
  165. EXPORT_SYMBOL_GPL(dm_cell_release);
  166. /*
  167. * Sometimes we don't want the holder, just the additional bios.
  168. */
  169. static void __cell_release_no_holder(struct dm_bio_prison *prison,
  170. struct dm_bio_prison_cell *cell,
  171. struct bio_list *inmates)
  172. {
  173. rb_erase(&cell->node, &prison->cells);
  174. bio_list_merge(inmates, &cell->bios);
  175. }
  176. void dm_cell_release_no_holder(struct dm_bio_prison *prison,
  177. struct dm_bio_prison_cell *cell,
  178. struct bio_list *inmates)
  179. {
  180. unsigned long flags;
  181. spin_lock_irqsave(&prison->lock, flags);
  182. __cell_release_no_holder(prison, cell, inmates);
  183. spin_unlock_irqrestore(&prison->lock, flags);
  184. }
  185. EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
  186. void dm_cell_error(struct dm_bio_prison *prison,
  187. struct dm_bio_prison_cell *cell, int error)
  188. {
  189. struct bio_list bios;
  190. struct bio *bio;
  191. bio_list_init(&bios);
  192. dm_cell_release(prison, cell, &bios);
  193. while ((bio = bio_list_pop(&bios))) {
  194. bio->bi_error = error;
  195. bio_endio(bio);
  196. }
  197. }
  198. EXPORT_SYMBOL_GPL(dm_cell_error);
  199. void dm_cell_visit_release(struct dm_bio_prison *prison,
  200. void (*visit_fn)(void *, struct dm_bio_prison_cell *),
  201. void *context,
  202. struct dm_bio_prison_cell *cell)
  203. {
  204. unsigned long flags;
  205. spin_lock_irqsave(&prison->lock, flags);
  206. visit_fn(context, cell);
  207. rb_erase(&cell->node, &prison->cells);
  208. spin_unlock_irqrestore(&prison->lock, flags);
  209. }
  210. EXPORT_SYMBOL_GPL(dm_cell_visit_release);
  211. static int __promote_or_release(struct dm_bio_prison *prison,
  212. struct dm_bio_prison_cell *cell)
  213. {
  214. if (bio_list_empty(&cell->bios)) {
  215. rb_erase(&cell->node, &prison->cells);
  216. return 1;
  217. }
  218. cell->holder = bio_list_pop(&cell->bios);
  219. return 0;
  220. }
  221. int dm_cell_promote_or_release(struct dm_bio_prison *prison,
  222. struct dm_bio_prison_cell *cell)
  223. {
  224. int r;
  225. unsigned long flags;
  226. spin_lock_irqsave(&prison->lock, flags);
  227. r = __promote_or_release(prison, cell);
  228. spin_unlock_irqrestore(&prison->lock, flags);
  229. return r;
  230. }
  231. EXPORT_SYMBOL_GPL(dm_cell_promote_or_release);
  232. /*----------------------------------------------------------------*/
  233. #define DEFERRED_SET_SIZE 64
  234. struct dm_deferred_entry {
  235. struct dm_deferred_set *ds;
  236. unsigned count;
  237. struct list_head work_items;
  238. };
  239. struct dm_deferred_set {
  240. spinlock_t lock;
  241. unsigned current_entry;
  242. unsigned sweeper;
  243. struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
  244. };
  245. struct dm_deferred_set *dm_deferred_set_create(void)
  246. {
  247. int i;
  248. struct dm_deferred_set *ds;
  249. ds = kmalloc(sizeof(*ds), GFP_KERNEL);
  250. if (!ds)
  251. return NULL;
  252. spin_lock_init(&ds->lock);
  253. ds->current_entry = 0;
  254. ds->sweeper = 0;
  255. for (i = 0; i < DEFERRED_SET_SIZE; i++) {
  256. ds->entries[i].ds = ds;
  257. ds->entries[i].count = 0;
  258. INIT_LIST_HEAD(&ds->entries[i].work_items);
  259. }
  260. return ds;
  261. }
  262. EXPORT_SYMBOL_GPL(dm_deferred_set_create);
  263. void dm_deferred_set_destroy(struct dm_deferred_set *ds)
  264. {
  265. kfree(ds);
  266. }
  267. EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);
  268. struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
  269. {
  270. unsigned long flags;
  271. struct dm_deferred_entry *entry;
  272. spin_lock_irqsave(&ds->lock, flags);
  273. entry = ds->entries + ds->current_entry;
  274. entry->count++;
  275. spin_unlock_irqrestore(&ds->lock, flags);
  276. return entry;
  277. }
  278. EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
  279. static unsigned ds_next(unsigned index)
  280. {
  281. return (index + 1) % DEFERRED_SET_SIZE;
  282. }
  283. static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
  284. {
  285. while ((ds->sweeper != ds->current_entry) &&
  286. !ds->entries[ds->sweeper].count) {
  287. list_splice_init(&ds->entries[ds->sweeper].work_items, head);
  288. ds->sweeper = ds_next(ds->sweeper);
  289. }
  290. if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
  291. list_splice_init(&ds->entries[ds->sweeper].work_items, head);
  292. }
  293. void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
  294. {
  295. unsigned long flags;
  296. spin_lock_irqsave(&entry->ds->lock, flags);
  297. BUG_ON(!entry->count);
  298. --entry->count;
  299. __sweep(entry->ds, head);
  300. spin_unlock_irqrestore(&entry->ds->lock, flags);
  301. }
  302. EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
  303. /*
  304. * Returns 1 if deferred or 0 if no pending items to delay job.
  305. */
  306. int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
  307. {
  308. int r = 1;
  309. unsigned long flags;
  310. unsigned next_entry;
  311. spin_lock_irqsave(&ds->lock, flags);
  312. if ((ds->sweeper == ds->current_entry) &&
  313. !ds->entries[ds->current_entry].count)
  314. r = 0;
  315. else {
  316. list_add(work, &ds->entries[ds->current_entry].work_items);
  317. next_entry = ds_next(ds->current_entry);
  318. if (!ds->entries[next_entry].count)
  319. ds->current_entry = next_entry;
  320. }
  321. spin_unlock_irqrestore(&ds->lock, flags);
  322. return r;
  323. }
  324. EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);
  325. /*----------------------------------------------------------------*/
  326. static int __init dm_bio_prison_init(void)
  327. {
  328. _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
  329. if (!_cell_cache)
  330. return -ENOMEM;
  331. return 0;
  332. }
  333. static void __exit dm_bio_prison_exit(void)
  334. {
  335. kmem_cache_destroy(_cell_cache);
  336. _cell_cache = NULL;
  337. }
  338. /*
  339. * module hooks
  340. */
  341. module_init(dm_bio_prison_init);
  342. module_exit(dm_bio_prison_exit);
  343. MODULE_DESCRIPTION(DM_NAME " bio prison");
  344. MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
  345. MODULE_LICENSE("GPL");