dm-btree-spine.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /*
  2. * Copyright (C) 2011 Red Hat, Inc.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm-btree-internal.h"
  7. #include "dm-transaction-manager.h"
  8. #include <linux/device-mapper.h>
  9. #define DM_MSG_PREFIX "btree spine"
  10. /*----------------------------------------------------------------*/
  11. #define BTREE_CSUM_XOR 121107
  12. static int node_check(struct dm_block_validator *v,
  13. struct dm_block *b,
  14. size_t block_size);
  15. static void node_prepare_for_write(struct dm_block_validator *v,
  16. struct dm_block *b,
  17. size_t block_size)
  18. {
  19. struct btree_node *n = dm_block_data(b);
  20. struct node_header *h = &n->header;
  21. h->blocknr = cpu_to_le64(dm_block_location(b));
  22. h->csum = cpu_to_le32(dm_bm_checksum(&h->flags,
  23. block_size - sizeof(__le32),
  24. BTREE_CSUM_XOR));
  25. BUG_ON(node_check(v, b, 4096));
  26. }
  27. static int node_check(struct dm_block_validator *v,
  28. struct dm_block *b,
  29. size_t block_size)
  30. {
  31. struct btree_node *n = dm_block_data(b);
  32. struct node_header *h = &n->header;
  33. size_t value_size;
  34. __le32 csum_disk;
  35. uint32_t flags;
  36. if (dm_block_location(b) != le64_to_cpu(h->blocknr)) {
  37. DMERR_LIMIT("node_check failed: blocknr %llu != wanted %llu",
  38. le64_to_cpu(h->blocknr), dm_block_location(b));
  39. return -ENOTBLK;
  40. }
  41. csum_disk = cpu_to_le32(dm_bm_checksum(&h->flags,
  42. block_size - sizeof(__le32),
  43. BTREE_CSUM_XOR));
  44. if (csum_disk != h->csum) {
  45. DMERR_LIMIT("node_check failed: csum %u != wanted %u",
  46. le32_to_cpu(csum_disk), le32_to_cpu(h->csum));
  47. return -EILSEQ;
  48. }
  49. value_size = le32_to_cpu(h->value_size);
  50. if (sizeof(struct node_header) +
  51. (sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) {
  52. DMERR_LIMIT("node_check failed: max_entries too large");
  53. return -EILSEQ;
  54. }
  55. if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) {
  56. DMERR_LIMIT("node_check failed: too many entries");
  57. return -EILSEQ;
  58. }
  59. /*
  60. * The node must be either INTERNAL or LEAF.
  61. */
  62. flags = le32_to_cpu(h->flags);
  63. if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) {
  64. DMERR_LIMIT("node_check failed: node is neither INTERNAL or LEAF");
  65. return -EILSEQ;
  66. }
  67. return 0;
  68. }
  69. struct dm_block_validator btree_node_validator = {
  70. .name = "btree_node",
  71. .prepare_for_write = node_prepare_for_write,
  72. .check = node_check
  73. };
  74. /*----------------------------------------------------------------*/
  75. int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
  76. struct dm_block **result)
  77. {
  78. return dm_tm_read_lock(info->tm, b, &btree_node_validator, result);
  79. }
  80. static int bn_shadow(struct dm_btree_info *info, dm_block_t orig,
  81. struct dm_btree_value_type *vt,
  82. struct dm_block **result)
  83. {
  84. int r, inc;
  85. r = dm_tm_shadow_block(info->tm, orig, &btree_node_validator,
  86. result, &inc);
  87. if (!r && inc)
  88. inc_children(info->tm, dm_block_data(*result), vt);
  89. return r;
  90. }
  91. int new_block(struct dm_btree_info *info, struct dm_block **result)
  92. {
  93. return dm_tm_new_block(info->tm, &btree_node_validator, result);
  94. }
  95. void unlock_block(struct dm_btree_info *info, struct dm_block *b)
  96. {
  97. dm_tm_unlock(info->tm, b);
  98. }
  99. /*----------------------------------------------------------------*/
  100. void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info)
  101. {
  102. s->info = info;
  103. s->count = 0;
  104. s->nodes[0] = NULL;
  105. s->nodes[1] = NULL;
  106. }
  107. int exit_ro_spine(struct ro_spine *s)
  108. {
  109. int r = 0, i;
  110. for (i = 0; i < s->count; i++) {
  111. unlock_block(s->info, s->nodes[i]);
  112. }
  113. return r;
  114. }
  115. int ro_step(struct ro_spine *s, dm_block_t new_child)
  116. {
  117. int r;
  118. if (s->count == 2) {
  119. unlock_block(s->info, s->nodes[0]);
  120. s->nodes[0] = s->nodes[1];
  121. s->count--;
  122. }
  123. r = bn_read_lock(s->info, new_child, s->nodes + s->count);
  124. if (!r)
  125. s->count++;
  126. return r;
  127. }
  128. void ro_pop(struct ro_spine *s)
  129. {
  130. BUG_ON(!s->count);
  131. --s->count;
  132. unlock_block(s->info, s->nodes[s->count]);
  133. }
  134. struct btree_node *ro_node(struct ro_spine *s)
  135. {
  136. struct dm_block *block;
  137. BUG_ON(!s->count);
  138. block = s->nodes[s->count - 1];
  139. return dm_block_data(block);
  140. }
  141. /*----------------------------------------------------------------*/
  142. void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info)
  143. {
  144. s->info = info;
  145. s->count = 0;
  146. }
  147. int exit_shadow_spine(struct shadow_spine *s)
  148. {
  149. int r = 0, i;
  150. for (i = 0; i < s->count; i++) {
  151. unlock_block(s->info, s->nodes[i]);
  152. }
  153. return r;
  154. }
  155. int shadow_step(struct shadow_spine *s, dm_block_t b,
  156. struct dm_btree_value_type *vt)
  157. {
  158. int r;
  159. if (s->count == 2) {
  160. unlock_block(s->info, s->nodes[0]);
  161. s->nodes[0] = s->nodes[1];
  162. s->count--;
  163. }
  164. r = bn_shadow(s->info, b, vt, s->nodes + s->count);
  165. if (!r) {
  166. if (!s->count)
  167. s->root = dm_block_location(s->nodes[0]);
  168. s->count++;
  169. }
  170. return r;
  171. }
  172. struct dm_block *shadow_current(struct shadow_spine *s)
  173. {
  174. BUG_ON(!s->count);
  175. return s->nodes[s->count - 1];
  176. }
  177. struct dm_block *shadow_parent(struct shadow_spine *s)
  178. {
  179. BUG_ON(s->count != 2);
  180. return s->count == 2 ? s->nodes[0] : NULL;
  181. }
  182. int shadow_has_parent(struct shadow_spine *s)
  183. {
  184. return s->count >= 2;
  185. }
  186. int shadow_root(struct shadow_spine *s)
  187. {
  188. return s->root;
  189. }
  190. static void le64_inc(void *context, const void *value_le)
  191. {
  192. struct dm_transaction_manager *tm = context;
  193. __le64 v_le;
  194. memcpy(&v_le, value_le, sizeof(v_le));
  195. dm_tm_inc(tm, le64_to_cpu(v_le));
  196. }
  197. static void le64_dec(void *context, const void *value_le)
  198. {
  199. struct dm_transaction_manager *tm = context;
  200. __le64 v_le;
  201. memcpy(&v_le, value_le, sizeof(v_le));
  202. dm_tm_dec(tm, le64_to_cpu(v_le));
  203. }
  204. static int le64_equal(void *context, const void *value1_le, const void *value2_le)
  205. {
  206. __le64 v1_le, v2_le;
  207. memcpy(&v1_le, value1_le, sizeof(v1_le));
  208. memcpy(&v2_le, value2_le, sizeof(v2_le));
  209. return v1_le == v2_le;
  210. }
  211. void init_le64_type(struct dm_transaction_manager *tm,
  212. struct dm_btree_value_type *vt)
  213. {
  214. vt->context = tm;
  215. vt->size = sizeof(__le64);
  216. vt->inc = le64_inc;
  217. vt->dec = le64_dec;
  218. vt->equal = le64_equal;
  219. }