dm-cache-policy-cleaner.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. /*
  2. * Copyright (C) 2012 Red Hat. All rights reserved.
  3. *
  4. * writeback cache policy supporting flushing out dirty cache blocks.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. #include "dm-cache-policy.h"
  9. #include "dm.h"
  10. #include <linux/hash.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/vmalloc.h>
  14. /*----------------------------------------------------------------*/
  15. #define DM_MSG_PREFIX "cache cleaner"
  16. /* Cache entry struct. */
  17. struct wb_cache_entry {
  18. struct list_head list;
  19. struct hlist_node hlist;
  20. dm_oblock_t oblock;
  21. dm_cblock_t cblock;
  22. bool dirty:1;
  23. bool pending:1;
  24. };
  25. struct hash {
  26. struct hlist_head *table;
  27. dm_block_t hash_bits;
  28. unsigned nr_buckets;
  29. };
  30. struct policy {
  31. struct dm_cache_policy policy;
  32. spinlock_t lock;
  33. struct list_head free;
  34. struct list_head clean;
  35. struct list_head clean_pending;
  36. struct list_head dirty;
  37. /*
  38. * We know exactly how many cblocks will be needed,
  39. * so we can allocate them up front.
  40. */
  41. dm_cblock_t cache_size, nr_cblocks_allocated;
  42. struct wb_cache_entry *cblocks;
  43. struct hash chash;
  44. };
  45. /*----------------------------------------------------------------------------*/
  46. /*
  47. * Low-level functions.
  48. */
  49. static unsigned next_power(unsigned n, unsigned min)
  50. {
  51. return roundup_pow_of_two(max(n, min));
  52. }
  53. static struct policy *to_policy(struct dm_cache_policy *p)
  54. {
  55. return container_of(p, struct policy, policy);
  56. }
  57. static struct list_head *list_pop(struct list_head *q)
  58. {
  59. struct list_head *r = q->next;
  60. list_del(r);
  61. return r;
  62. }
  63. /*----------------------------------------------------------------------------*/
  64. /* Allocate/free various resources. */
  65. static int alloc_hash(struct hash *hash, unsigned elts)
  66. {
  67. hash->nr_buckets = next_power(elts >> 4, 16);
  68. hash->hash_bits = __ffs(hash->nr_buckets);
  69. hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
  70. return hash->table ? 0 : -ENOMEM;
  71. }
  72. static void free_hash(struct hash *hash)
  73. {
  74. vfree(hash->table);
  75. }
  76. static int alloc_cache_blocks_with_hash(struct policy *p, dm_cblock_t cache_size)
  77. {
  78. int r = -ENOMEM;
  79. p->cblocks = vzalloc(sizeof(*p->cblocks) * from_cblock(cache_size));
  80. if (p->cblocks) {
  81. unsigned u = from_cblock(cache_size);
  82. while (u--)
  83. list_add(&p->cblocks[u].list, &p->free);
  84. p->nr_cblocks_allocated = 0;
  85. /* Cache entries hash. */
  86. r = alloc_hash(&p->chash, from_cblock(cache_size));
  87. if (r)
  88. vfree(p->cblocks);
  89. }
  90. return r;
  91. }
  92. static void free_cache_blocks_and_hash(struct policy *p)
  93. {
  94. free_hash(&p->chash);
  95. vfree(p->cblocks);
  96. }
  97. static struct wb_cache_entry *alloc_cache_entry(struct policy *p)
  98. {
  99. struct wb_cache_entry *e;
  100. BUG_ON(from_cblock(p->nr_cblocks_allocated) >= from_cblock(p->cache_size));
  101. e = list_entry(list_pop(&p->free), struct wb_cache_entry, list);
  102. p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) + 1);
  103. return e;
  104. }
  105. /*----------------------------------------------------------------------------*/
  106. /* Hash functions (lookup, insert, remove). */
  107. static struct wb_cache_entry *lookup_cache_entry(struct policy *p, dm_oblock_t oblock)
  108. {
  109. struct hash *hash = &p->chash;
  110. unsigned h = hash_64(from_oblock(oblock), hash->hash_bits);
  111. struct wb_cache_entry *cur;
  112. struct hlist_head *bucket = &hash->table[h];
  113. hlist_for_each_entry(cur, bucket, hlist) {
  114. if (cur->oblock == oblock) {
  115. /* Move upfront bucket for faster access. */
  116. hlist_del(&cur->hlist);
  117. hlist_add_head(&cur->hlist, bucket);
  118. return cur;
  119. }
  120. }
  121. return NULL;
  122. }
  123. static void insert_cache_hash_entry(struct policy *p, struct wb_cache_entry *e)
  124. {
  125. unsigned h = hash_64(from_oblock(e->oblock), p->chash.hash_bits);
  126. hlist_add_head(&e->hlist, &p->chash.table[h]);
  127. }
  128. static void remove_cache_hash_entry(struct wb_cache_entry *e)
  129. {
  130. hlist_del(&e->hlist);
  131. }
  132. /* Public interface (see dm-cache-policy.h */
  133. static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
  134. bool can_block, bool can_migrate, bool discarded_oblock,
  135. struct bio *bio, struct policy_locker *locker,
  136. struct policy_result *result)
  137. {
  138. struct policy *p = to_policy(pe);
  139. struct wb_cache_entry *e;
  140. unsigned long flags;
  141. result->op = POLICY_MISS;
  142. if (can_block)
  143. spin_lock_irqsave(&p->lock, flags);
  144. else if (!spin_trylock_irqsave(&p->lock, flags))
  145. return -EWOULDBLOCK;
  146. e = lookup_cache_entry(p, oblock);
  147. if (e) {
  148. result->op = POLICY_HIT;
  149. result->cblock = e->cblock;
  150. }
  151. spin_unlock_irqrestore(&p->lock, flags);
  152. return 0;
  153. }
  154. static int wb_lookup(struct dm_cache_policy *pe, dm_oblock_t oblock, dm_cblock_t *cblock)
  155. {
  156. int r;
  157. struct policy *p = to_policy(pe);
  158. struct wb_cache_entry *e;
  159. unsigned long flags;
  160. if (!spin_trylock_irqsave(&p->lock, flags))
  161. return -EWOULDBLOCK;
  162. e = lookup_cache_entry(p, oblock);
  163. if (e) {
  164. *cblock = e->cblock;
  165. r = 0;
  166. } else
  167. r = -ENOENT;
  168. spin_unlock_irqrestore(&p->lock, flags);
  169. return r;
  170. }
  171. static void __set_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock, bool set)
  172. {
  173. struct policy *p = to_policy(pe);
  174. struct wb_cache_entry *e;
  175. e = lookup_cache_entry(p, oblock);
  176. BUG_ON(!e);
  177. if (set) {
  178. if (!e->dirty) {
  179. e->dirty = true;
  180. list_move(&e->list, &p->dirty);
  181. }
  182. } else {
  183. if (e->dirty) {
  184. e->pending = false;
  185. e->dirty = false;
  186. list_move(&e->list, &p->clean);
  187. }
  188. }
  189. }
  190. static void wb_set_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
  191. {
  192. struct policy *p = to_policy(pe);
  193. unsigned long flags;
  194. spin_lock_irqsave(&p->lock, flags);
  195. __set_clear_dirty(pe, oblock, true);
  196. spin_unlock_irqrestore(&p->lock, flags);
  197. }
  198. static void wb_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
  199. {
  200. struct policy *p = to_policy(pe);
  201. unsigned long flags;
  202. spin_lock_irqsave(&p->lock, flags);
  203. __set_clear_dirty(pe, oblock, false);
  204. spin_unlock_irqrestore(&p->lock, flags);
  205. }
  206. static void add_cache_entry(struct policy *p, struct wb_cache_entry *e)
  207. {
  208. insert_cache_hash_entry(p, e);
  209. if (e->dirty)
  210. list_add(&e->list, &p->dirty);
  211. else
  212. list_add(&e->list, &p->clean);
  213. }
  214. static int wb_load_mapping(struct dm_cache_policy *pe,
  215. dm_oblock_t oblock, dm_cblock_t cblock,
  216. uint32_t hint, bool hint_valid)
  217. {
  218. int r;
  219. struct policy *p = to_policy(pe);
  220. struct wb_cache_entry *e = alloc_cache_entry(p);
  221. if (e) {
  222. e->cblock = cblock;
  223. e->oblock = oblock;
  224. e->dirty = false; /* blocks default to clean */
  225. add_cache_entry(p, e);
  226. r = 0;
  227. } else
  228. r = -ENOMEM;
  229. return r;
  230. }
  231. static void wb_destroy(struct dm_cache_policy *pe)
  232. {
  233. struct policy *p = to_policy(pe);
  234. free_cache_blocks_and_hash(p);
  235. kfree(p);
  236. }
  237. static struct wb_cache_entry *__wb_force_remove_mapping(struct policy *p, dm_oblock_t oblock)
  238. {
  239. struct wb_cache_entry *r = lookup_cache_entry(p, oblock);
  240. BUG_ON(!r);
  241. remove_cache_hash_entry(r);
  242. list_del(&r->list);
  243. return r;
  244. }
  245. static void wb_remove_mapping(struct dm_cache_policy *pe, dm_oblock_t oblock)
  246. {
  247. struct policy *p = to_policy(pe);
  248. struct wb_cache_entry *e;
  249. unsigned long flags;
  250. spin_lock_irqsave(&p->lock, flags);
  251. e = __wb_force_remove_mapping(p, oblock);
  252. list_add_tail(&e->list, &p->free);
  253. BUG_ON(!from_cblock(p->nr_cblocks_allocated));
  254. p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) - 1);
  255. spin_unlock_irqrestore(&p->lock, flags);
  256. }
  257. static void wb_force_mapping(struct dm_cache_policy *pe,
  258. dm_oblock_t current_oblock, dm_oblock_t oblock)
  259. {
  260. struct policy *p = to_policy(pe);
  261. struct wb_cache_entry *e;
  262. unsigned long flags;
  263. spin_lock_irqsave(&p->lock, flags);
  264. e = __wb_force_remove_mapping(p, current_oblock);
  265. e->oblock = oblock;
  266. add_cache_entry(p, e);
  267. spin_unlock_irqrestore(&p->lock, flags);
  268. }
  269. static struct wb_cache_entry *get_next_dirty_entry(struct policy *p)
  270. {
  271. struct list_head *l;
  272. struct wb_cache_entry *r;
  273. if (list_empty(&p->dirty))
  274. return NULL;
  275. l = list_pop(&p->dirty);
  276. r = container_of(l, struct wb_cache_entry, list);
  277. list_add(l, &p->clean_pending);
  278. return r;
  279. }
  280. static int wb_writeback_work(struct dm_cache_policy *pe,
  281. dm_oblock_t *oblock,
  282. dm_cblock_t *cblock,
  283. bool critical_only)
  284. {
  285. int r = -ENOENT;
  286. struct policy *p = to_policy(pe);
  287. struct wb_cache_entry *e;
  288. unsigned long flags;
  289. spin_lock_irqsave(&p->lock, flags);
  290. e = get_next_dirty_entry(p);
  291. if (e) {
  292. *oblock = e->oblock;
  293. *cblock = e->cblock;
  294. r = 0;
  295. }
  296. spin_unlock_irqrestore(&p->lock, flags);
  297. return r;
  298. }
  299. static dm_cblock_t wb_residency(struct dm_cache_policy *pe)
  300. {
  301. return to_policy(pe)->nr_cblocks_allocated;
  302. }
  303. /* Init the policy plugin interface function pointers. */
  304. static void init_policy_functions(struct policy *p)
  305. {
  306. p->policy.destroy = wb_destroy;
  307. p->policy.map = wb_map;
  308. p->policy.lookup = wb_lookup;
  309. p->policy.set_dirty = wb_set_dirty;
  310. p->policy.clear_dirty = wb_clear_dirty;
  311. p->policy.load_mapping = wb_load_mapping;
  312. p->policy.walk_mappings = NULL;
  313. p->policy.remove_mapping = wb_remove_mapping;
  314. p->policy.writeback_work = wb_writeback_work;
  315. p->policy.force_mapping = wb_force_mapping;
  316. p->policy.residency = wb_residency;
  317. p->policy.tick = NULL;
  318. }
  319. static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
  320. sector_t origin_size,
  321. sector_t cache_block_size)
  322. {
  323. int r;
  324. struct policy *p = kzalloc(sizeof(*p), GFP_KERNEL);
  325. if (!p)
  326. return NULL;
  327. init_policy_functions(p);
  328. INIT_LIST_HEAD(&p->free);
  329. INIT_LIST_HEAD(&p->clean);
  330. INIT_LIST_HEAD(&p->clean_pending);
  331. INIT_LIST_HEAD(&p->dirty);
  332. p->cache_size = cache_size;
  333. spin_lock_init(&p->lock);
  334. /* Allocate cache entry structs and add them to free list. */
  335. r = alloc_cache_blocks_with_hash(p, cache_size);
  336. if (!r)
  337. return &p->policy;
  338. kfree(p);
  339. return NULL;
  340. }
  341. /*----------------------------------------------------------------------------*/
  342. static struct dm_cache_policy_type wb_policy_type = {
  343. .name = "cleaner",
  344. .version = {1, 0, 0},
  345. .hint_size = 4,
  346. .owner = THIS_MODULE,
  347. .create = wb_create
  348. };
  349. static int __init wb_init(void)
  350. {
  351. int r = dm_cache_policy_register(&wb_policy_type);
  352. if (r < 0)
  353. DMERR("register failed %d", r);
  354. else
  355. DMINFO("version %u.%u.%u loaded",
  356. wb_policy_type.version[0],
  357. wb_policy_type.version[1],
  358. wb_policy_type.version[2]);
  359. return r;
  360. }
  361. static void __exit wb_exit(void)
  362. {
  363. dm_cache_policy_unregister(&wb_policy_type);
  364. }
  365. module_init(wb_init);
  366. module_exit(wb_exit);
  367. MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
  368. MODULE_LICENSE("GPL");
  369. MODULE_DESCRIPTION("cleaner cache policy");