dm-array.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. /*
  2. * Copyright (C) 2012 Red Hat, Inc.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm-array.h"
  7. #include "dm-space-map.h"
  8. #include "dm-transaction-manager.h"
  9. #include <linux/export.h>
  10. #include <linux/device-mapper.h>
  11. #define DM_MSG_PREFIX "array"
  12. /*----------------------------------------------------------------*/
  13. /*
  14. * The array is implemented as a fully populated btree, which points to
  15. * blocks that contain the packed values. This is more space efficient
  16. * than just using a btree since we don't store 1 key per value.
  17. */
  18. struct array_block {
  19. __le32 csum;
  20. __le32 max_entries;
  21. __le32 nr_entries;
  22. __le32 value_size;
  23. __le64 blocknr; /* Block this node is supposed to live in. */
  24. } __packed;
  25. /*----------------------------------------------------------------*/
  26. /*
  27. * Validator methods. As usual we calculate a checksum, and also write the
  28. * block location into the header (paranoia about ssds remapping areas by
  29. * mistake).
  30. */
  31. #define CSUM_XOR 595846735
  32. static void array_block_prepare_for_write(struct dm_block_validator *v,
  33. struct dm_block *b,
  34. size_t size_of_block)
  35. {
  36. struct array_block *bh_le = dm_block_data(b);
  37. bh_le->blocknr = cpu_to_le64(dm_block_location(b));
  38. bh_le->csum = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
  39. size_of_block - sizeof(__le32),
  40. CSUM_XOR));
  41. }
  42. static int array_block_check(struct dm_block_validator *v,
  43. struct dm_block *b,
  44. size_t size_of_block)
  45. {
  46. struct array_block *bh_le = dm_block_data(b);
  47. __le32 csum_disk;
  48. if (dm_block_location(b) != le64_to_cpu(bh_le->blocknr)) {
  49. DMERR_LIMIT("array_block_check failed: blocknr %llu != wanted %llu",
  50. (unsigned long long) le64_to_cpu(bh_le->blocknr),
  51. (unsigned long long) dm_block_location(b));
  52. return -ENOTBLK;
  53. }
  54. csum_disk = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
  55. size_of_block - sizeof(__le32),
  56. CSUM_XOR));
  57. if (csum_disk != bh_le->csum) {
  58. DMERR_LIMIT("array_block_check failed: csum %u != wanted %u",
  59. (unsigned) le32_to_cpu(csum_disk),
  60. (unsigned) le32_to_cpu(bh_le->csum));
  61. return -EILSEQ;
  62. }
  63. return 0;
  64. }
  65. static struct dm_block_validator array_validator = {
  66. .name = "array",
  67. .prepare_for_write = array_block_prepare_for_write,
  68. .check = array_block_check
  69. };
  70. /*----------------------------------------------------------------*/
  71. /*
  72. * Functions for manipulating the array blocks.
  73. */
  74. /*
  75. * Returns a pointer to a value within an array block.
  76. *
  77. * index - The index into _this_ specific block.
  78. */
  79. static void *element_at(struct dm_array_info *info, struct array_block *ab,
  80. unsigned index)
  81. {
  82. unsigned char *entry = (unsigned char *) (ab + 1);
  83. entry += index * info->value_type.size;
  84. return entry;
  85. }
  86. /*
  87. * Utility function that calls one of the value_type methods on every value
  88. * in an array block.
  89. */
  90. static void on_entries(struct dm_array_info *info, struct array_block *ab,
  91. void (*fn)(void *, const void *))
  92. {
  93. unsigned i, nr_entries = le32_to_cpu(ab->nr_entries);
  94. for (i = 0; i < nr_entries; i++)
  95. fn(info->value_type.context, element_at(info, ab, i));
  96. }
  97. /*
  98. * Increment every value in an array block.
  99. */
  100. static void inc_ablock_entries(struct dm_array_info *info, struct array_block *ab)
  101. {
  102. struct dm_btree_value_type *vt = &info->value_type;
  103. if (vt->inc)
  104. on_entries(info, ab, vt->inc);
  105. }
  106. /*
  107. * Decrement every value in an array block.
  108. */
  109. static void dec_ablock_entries(struct dm_array_info *info, struct array_block *ab)
  110. {
  111. struct dm_btree_value_type *vt = &info->value_type;
  112. if (vt->dec)
  113. on_entries(info, ab, vt->dec);
  114. }
  115. /*
  116. * Each array block can hold this many values.
  117. */
  118. static uint32_t calc_max_entries(size_t value_size, size_t size_of_block)
  119. {
  120. return (size_of_block - sizeof(struct array_block)) / value_size;
  121. }
  122. /*
  123. * Allocate a new array block. The caller will need to unlock block.
  124. */
  125. static int alloc_ablock(struct dm_array_info *info, size_t size_of_block,
  126. uint32_t max_entries,
  127. struct dm_block **block, struct array_block **ab)
  128. {
  129. int r;
  130. r = dm_tm_new_block(info->btree_info.tm, &array_validator, block);
  131. if (r)
  132. return r;
  133. (*ab) = dm_block_data(*block);
  134. (*ab)->max_entries = cpu_to_le32(max_entries);
  135. (*ab)->nr_entries = cpu_to_le32(0);
  136. (*ab)->value_size = cpu_to_le32(info->value_type.size);
  137. return 0;
  138. }
  139. /*
  140. * Pad an array block out with a particular value. Every instance will
  141. * cause an increment of the value_type. new_nr must always be more than
  142. * the current number of entries.
  143. */
  144. static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
  145. const void *value, unsigned new_nr)
  146. {
  147. unsigned i;
  148. uint32_t nr_entries;
  149. struct dm_btree_value_type *vt = &info->value_type;
  150. BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
  151. BUG_ON(new_nr < le32_to_cpu(ab->nr_entries));
  152. nr_entries = le32_to_cpu(ab->nr_entries);
  153. for (i = nr_entries; i < new_nr; i++) {
  154. if (vt->inc)
  155. vt->inc(vt->context, value);
  156. memcpy(element_at(info, ab, i), value, vt->size);
  157. }
  158. ab->nr_entries = cpu_to_le32(new_nr);
  159. }
  160. /*
  161. * Remove some entries from the back of an array block. Every value
  162. * removed will be decremented. new_nr must be <= the current number of
  163. * entries.
  164. */
  165. static void trim_ablock(struct dm_array_info *info, struct array_block *ab,
  166. unsigned new_nr)
  167. {
  168. unsigned i;
  169. uint32_t nr_entries;
  170. struct dm_btree_value_type *vt = &info->value_type;
  171. BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
  172. BUG_ON(new_nr > le32_to_cpu(ab->nr_entries));
  173. nr_entries = le32_to_cpu(ab->nr_entries);
  174. for (i = nr_entries; i > new_nr; i--)
  175. if (vt->dec)
  176. vt->dec(vt->context, element_at(info, ab, i - 1));
  177. ab->nr_entries = cpu_to_le32(new_nr);
  178. }
  179. /*
  180. * Read locks a block, and coerces it to an array block. The caller must
  181. * unlock 'block' when finished.
  182. */
  183. static int get_ablock(struct dm_array_info *info, dm_block_t b,
  184. struct dm_block **block, struct array_block **ab)
  185. {
  186. int r;
  187. r = dm_tm_read_lock(info->btree_info.tm, b, &array_validator, block);
  188. if (r)
  189. return r;
  190. *ab = dm_block_data(*block);
  191. return 0;
  192. }
  193. /*
  194. * Unlocks an array block.
  195. */
  196. static void unlock_ablock(struct dm_array_info *info, struct dm_block *block)
  197. {
  198. dm_tm_unlock(info->btree_info.tm, block);
  199. }
  200. /*----------------------------------------------------------------*/
  201. /*
  202. * Btree manipulation.
  203. */
  204. /*
  205. * Looks up an array block in the btree, and then read locks it.
  206. *
  207. * index is the index of the index of the array_block, (ie. the array index
  208. * / max_entries).
  209. */
  210. static int lookup_ablock(struct dm_array_info *info, dm_block_t root,
  211. unsigned index, struct dm_block **block,
  212. struct array_block **ab)
  213. {
  214. int r;
  215. uint64_t key = index;
  216. __le64 block_le;
  217. r = dm_btree_lookup(&info->btree_info, root, &key, &block_le);
  218. if (r)
  219. return r;
  220. return get_ablock(info, le64_to_cpu(block_le), block, ab);
  221. }
  222. /*
  223. * Insert an array block into the btree. The block is _not_ unlocked.
  224. */
  225. static int insert_ablock(struct dm_array_info *info, uint64_t index,
  226. struct dm_block *block, dm_block_t *root)
  227. {
  228. __le64 block_le = cpu_to_le64(dm_block_location(block));
  229. __dm_bless_for_disk(block_le);
  230. return dm_btree_insert(&info->btree_info, *root, &index, &block_le, root);
  231. }
  232. /*
  233. * Looks up an array block in the btree. Then shadows it, and updates the
  234. * btree to point to this new shadow. 'root' is an input/output parameter
  235. * for both the current root block, and the new one.
  236. */
  237. static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
  238. unsigned index, struct dm_block **block,
  239. struct array_block **ab)
  240. {
  241. int r, inc;
  242. uint64_t key = index;
  243. dm_block_t b;
  244. __le64 block_le;
  245. /*
  246. * lookup
  247. */
  248. r = dm_btree_lookup(&info->btree_info, *root, &key, &block_le);
  249. if (r)
  250. return r;
  251. b = le64_to_cpu(block_le);
  252. /*
  253. * shadow
  254. */
  255. r = dm_tm_shadow_block(info->btree_info.tm, b,
  256. &array_validator, block, &inc);
  257. if (r)
  258. return r;
  259. *ab = dm_block_data(*block);
  260. if (inc)
  261. inc_ablock_entries(info, *ab);
  262. /*
  263. * Reinsert.
  264. *
  265. * The shadow op will often be a noop. Only insert if it really
  266. * copied data.
  267. */
  268. if (dm_block_location(*block) != b) {
  269. /*
  270. * dm_tm_shadow_block will have already decremented the old
  271. * block, but it is still referenced by the btree. We
  272. * increment to stop the insert decrementing it below zero
  273. * when overwriting the old value.
  274. */
  275. dm_tm_inc(info->btree_info.tm, b);
  276. r = insert_ablock(info, index, *block, root);
  277. }
  278. return r;
  279. }
  280. /*
  281. * Allocate an new array block, and fill it with some values.
  282. */
  283. static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
  284. uint32_t max_entries,
  285. unsigned block_index, uint32_t nr,
  286. const void *value, dm_block_t *root)
  287. {
  288. int r;
  289. struct dm_block *block;
  290. struct array_block *ab;
  291. r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
  292. if (r)
  293. return r;
  294. fill_ablock(info, ab, value, nr);
  295. r = insert_ablock(info, block_index, block, root);
  296. unlock_ablock(info, block);
  297. return r;
  298. }
  299. static int insert_full_ablocks(struct dm_array_info *info, size_t size_of_block,
  300. unsigned begin_block, unsigned end_block,
  301. unsigned max_entries, const void *value,
  302. dm_block_t *root)
  303. {
  304. int r = 0;
  305. for (; !r && begin_block != end_block; begin_block++)
  306. r = insert_new_ablock(info, size_of_block, max_entries, begin_block, max_entries, value, root);
  307. return r;
  308. }
  309. /*
  310. * There are a bunch of functions involved with resizing an array. This
  311. * structure holds information that commonly needed by them. Purely here
  312. * to reduce parameter count.
  313. */
  314. struct resize {
  315. /*
  316. * Describes the array.
  317. */
  318. struct dm_array_info *info;
  319. /*
  320. * The current root of the array. This gets updated.
  321. */
  322. dm_block_t root;
  323. /*
  324. * Metadata block size. Used to calculate the nr entries in an
  325. * array block.
  326. */
  327. size_t size_of_block;
  328. /*
  329. * Maximum nr entries in an array block.
  330. */
  331. unsigned max_entries;
  332. /*
  333. * nr of completely full blocks in the array.
  334. *
  335. * 'old' refers to before the resize, 'new' after.
  336. */
  337. unsigned old_nr_full_blocks, new_nr_full_blocks;
  338. /*
  339. * Number of entries in the final block. 0 iff only full blocks in
  340. * the array.
  341. */
  342. unsigned old_nr_entries_in_last_block, new_nr_entries_in_last_block;
  343. /*
  344. * The default value used when growing the array.
  345. */
  346. const void *value;
  347. };
  348. /*
  349. * Removes a consecutive set of array blocks from the btree. The values
  350. * in block are decremented as a side effect of the btree remove.
  351. *
  352. * begin_index - the index of the first array block to remove.
  353. * end_index - the one-past-the-end value. ie. this block is not removed.
  354. */
  355. static int drop_blocks(struct resize *resize, unsigned begin_index,
  356. unsigned end_index)
  357. {
  358. int r;
  359. while (begin_index != end_index) {
  360. uint64_t key = begin_index++;
  361. r = dm_btree_remove(&resize->info->btree_info, resize->root,
  362. &key, &resize->root);
  363. if (r)
  364. return r;
  365. }
  366. return 0;
  367. }
  368. /*
  369. * Calculates how many blocks are needed for the array.
  370. */
  371. static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
  372. unsigned nr_entries_in_last_block)
  373. {
  374. return nr_full_blocks + (nr_entries_in_last_block ? 1 : 0);
  375. }
  376. /*
  377. * Shrink an array.
  378. */
  379. static int shrink(struct resize *resize)
  380. {
  381. int r;
  382. unsigned begin, end;
  383. struct dm_block *block;
  384. struct array_block *ab;
  385. /*
  386. * Lose some blocks from the back?
  387. */
  388. if (resize->new_nr_full_blocks < resize->old_nr_full_blocks) {
  389. begin = total_nr_blocks_needed(resize->new_nr_full_blocks,
  390. resize->new_nr_entries_in_last_block);
  391. end = total_nr_blocks_needed(resize->old_nr_full_blocks,
  392. resize->old_nr_entries_in_last_block);
  393. r = drop_blocks(resize, begin, end);
  394. if (r)
  395. return r;
  396. }
  397. /*
  398. * Trim the new tail block
  399. */
  400. if (resize->new_nr_entries_in_last_block) {
  401. r = shadow_ablock(resize->info, &resize->root,
  402. resize->new_nr_full_blocks, &block, &ab);
  403. if (r)
  404. return r;
  405. trim_ablock(resize->info, ab, resize->new_nr_entries_in_last_block);
  406. unlock_ablock(resize->info, block);
  407. }
  408. return 0;
  409. }
  410. /*
  411. * Grow an array.
  412. */
  413. static int grow_extend_tail_block(struct resize *resize, uint32_t new_nr_entries)
  414. {
  415. int r;
  416. struct dm_block *block;
  417. struct array_block *ab;
  418. r = shadow_ablock(resize->info, &resize->root,
  419. resize->old_nr_full_blocks, &block, &ab);
  420. if (r)
  421. return r;
  422. fill_ablock(resize->info, ab, resize->value, new_nr_entries);
  423. unlock_ablock(resize->info, block);
  424. return r;
  425. }
  426. static int grow_add_tail_block(struct resize *resize)
  427. {
  428. return insert_new_ablock(resize->info, resize->size_of_block,
  429. resize->max_entries,
  430. resize->new_nr_full_blocks,
  431. resize->new_nr_entries_in_last_block,
  432. resize->value, &resize->root);
  433. }
  434. static int grow_needs_more_blocks(struct resize *resize)
  435. {
  436. int r;
  437. unsigned old_nr_blocks = resize->old_nr_full_blocks;
  438. if (resize->old_nr_entries_in_last_block > 0) {
  439. old_nr_blocks++;
  440. r = grow_extend_tail_block(resize, resize->max_entries);
  441. if (r)
  442. return r;
  443. }
  444. r = insert_full_ablocks(resize->info, resize->size_of_block,
  445. old_nr_blocks,
  446. resize->new_nr_full_blocks,
  447. resize->max_entries, resize->value,
  448. &resize->root);
  449. if (r)
  450. return r;
  451. if (resize->new_nr_entries_in_last_block)
  452. r = grow_add_tail_block(resize);
  453. return r;
  454. }
  455. static int grow(struct resize *resize)
  456. {
  457. if (resize->new_nr_full_blocks > resize->old_nr_full_blocks)
  458. return grow_needs_more_blocks(resize);
  459. else if (resize->old_nr_entries_in_last_block)
  460. return grow_extend_tail_block(resize, resize->new_nr_entries_in_last_block);
  461. else
  462. return grow_add_tail_block(resize);
  463. }
  464. /*----------------------------------------------------------------*/
  465. /*
  466. * These are the value_type functions for the btree elements, which point
  467. * to array blocks.
  468. */
  469. static void block_inc(void *context, const void *value)
  470. {
  471. __le64 block_le;
  472. struct dm_array_info *info = context;
  473. memcpy(&block_le, value, sizeof(block_le));
  474. dm_tm_inc(info->btree_info.tm, le64_to_cpu(block_le));
  475. }
  476. static void block_dec(void *context, const void *value)
  477. {
  478. int r;
  479. uint64_t b;
  480. __le64 block_le;
  481. uint32_t ref_count;
  482. struct dm_block *block;
  483. struct array_block *ab;
  484. struct dm_array_info *info = context;
  485. memcpy(&block_le, value, sizeof(block_le));
  486. b = le64_to_cpu(block_le);
  487. r = dm_tm_ref(info->btree_info.tm, b, &ref_count);
  488. if (r) {
  489. DMERR_LIMIT("couldn't get reference count for block %llu",
  490. (unsigned long long) b);
  491. return;
  492. }
  493. if (ref_count == 1) {
  494. /*
  495. * We're about to drop the last reference to this ablock.
  496. * So we need to decrement the ref count of the contents.
  497. */
  498. r = get_ablock(info, b, &block, &ab);
  499. if (r) {
  500. DMERR_LIMIT("couldn't get array block %llu",
  501. (unsigned long long) b);
  502. return;
  503. }
  504. dec_ablock_entries(info, ab);
  505. unlock_ablock(info, block);
  506. }
  507. dm_tm_dec(info->btree_info.tm, b);
  508. }
  509. static int block_equal(void *context, const void *value1, const void *value2)
  510. {
  511. return !memcmp(value1, value2, sizeof(__le64));
  512. }
  513. /*----------------------------------------------------------------*/
  514. void dm_array_info_init(struct dm_array_info *info,
  515. struct dm_transaction_manager *tm,
  516. struct dm_btree_value_type *vt)
  517. {
  518. struct dm_btree_value_type *bvt = &info->btree_info.value_type;
  519. memcpy(&info->value_type, vt, sizeof(info->value_type));
  520. info->btree_info.tm = tm;
  521. info->btree_info.levels = 1;
  522. bvt->context = info;
  523. bvt->size = sizeof(__le64);
  524. bvt->inc = block_inc;
  525. bvt->dec = block_dec;
  526. bvt->equal = block_equal;
  527. }
  528. EXPORT_SYMBOL_GPL(dm_array_info_init);
  529. int dm_array_empty(struct dm_array_info *info, dm_block_t *root)
  530. {
  531. return dm_btree_empty(&info->btree_info, root);
  532. }
  533. EXPORT_SYMBOL_GPL(dm_array_empty);
  534. static int array_resize(struct dm_array_info *info, dm_block_t root,
  535. uint32_t old_size, uint32_t new_size,
  536. const void *value, dm_block_t *new_root)
  537. {
  538. int r;
  539. struct resize resize;
  540. if (old_size == new_size) {
  541. *new_root = root;
  542. return 0;
  543. }
  544. resize.info = info;
  545. resize.root = root;
  546. resize.size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
  547. resize.max_entries = calc_max_entries(info->value_type.size,
  548. resize.size_of_block);
  549. resize.old_nr_full_blocks = old_size / resize.max_entries;
  550. resize.old_nr_entries_in_last_block = old_size % resize.max_entries;
  551. resize.new_nr_full_blocks = new_size / resize.max_entries;
  552. resize.new_nr_entries_in_last_block = new_size % resize.max_entries;
  553. resize.value = value;
  554. r = ((new_size > old_size) ? grow : shrink)(&resize);
  555. if (r)
  556. return r;
  557. *new_root = resize.root;
  558. return 0;
  559. }
  560. int dm_array_resize(struct dm_array_info *info, dm_block_t root,
  561. uint32_t old_size, uint32_t new_size,
  562. const void *value, dm_block_t *new_root)
  563. __dm_written_to_disk(value)
  564. {
  565. int r = array_resize(info, root, old_size, new_size, value, new_root);
  566. __dm_unbless_for_disk(value);
  567. return r;
  568. }
  569. EXPORT_SYMBOL_GPL(dm_array_resize);
  570. int dm_array_del(struct dm_array_info *info, dm_block_t root)
  571. {
  572. return dm_btree_del(&info->btree_info, root);
  573. }
  574. EXPORT_SYMBOL_GPL(dm_array_del);
  575. int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
  576. uint32_t index, void *value_le)
  577. {
  578. int r;
  579. struct dm_block *block;
  580. struct array_block *ab;
  581. size_t size_of_block;
  582. unsigned entry, max_entries;
  583. size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
  584. max_entries = calc_max_entries(info->value_type.size, size_of_block);
  585. r = lookup_ablock(info, root, index / max_entries, &block, &ab);
  586. if (r)
  587. return r;
  588. entry = index % max_entries;
  589. if (entry >= le32_to_cpu(ab->nr_entries))
  590. r = -ENODATA;
  591. else
  592. memcpy(value_le, element_at(info, ab, entry),
  593. info->value_type.size);
  594. unlock_ablock(info, block);
  595. return r;
  596. }
  597. EXPORT_SYMBOL_GPL(dm_array_get_value);
  598. static int array_set_value(struct dm_array_info *info, dm_block_t root,
  599. uint32_t index, const void *value, dm_block_t *new_root)
  600. {
  601. int r;
  602. struct dm_block *block;
  603. struct array_block *ab;
  604. size_t size_of_block;
  605. unsigned max_entries;
  606. unsigned entry;
  607. void *old_value;
  608. struct dm_btree_value_type *vt = &info->value_type;
  609. size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
  610. max_entries = calc_max_entries(info->value_type.size, size_of_block);
  611. r = shadow_ablock(info, &root, index / max_entries, &block, &ab);
  612. if (r)
  613. return r;
  614. *new_root = root;
  615. entry = index % max_entries;
  616. if (entry >= le32_to_cpu(ab->nr_entries)) {
  617. r = -ENODATA;
  618. goto out;
  619. }
  620. old_value = element_at(info, ab, entry);
  621. if (vt->dec &&
  622. (!vt->equal || !vt->equal(vt->context, old_value, value))) {
  623. vt->dec(vt->context, old_value);
  624. if (vt->inc)
  625. vt->inc(vt->context, value);
  626. }
  627. memcpy(old_value, value, info->value_type.size);
  628. out:
  629. unlock_ablock(info, block);
  630. return r;
  631. }
  632. int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
  633. uint32_t index, const void *value, dm_block_t *new_root)
  634. __dm_written_to_disk(value)
  635. {
  636. int r;
  637. r = array_set_value(info, root, index, value, new_root);
  638. __dm_unbless_for_disk(value);
  639. return r;
  640. }
  641. EXPORT_SYMBOL_GPL(dm_array_set_value);
  642. struct walk_info {
  643. struct dm_array_info *info;
  644. int (*fn)(void *context, uint64_t key, void *leaf);
  645. void *context;
  646. };
  647. static int walk_ablock(void *context, uint64_t *keys, void *leaf)
  648. {
  649. struct walk_info *wi = context;
  650. int r;
  651. unsigned i;
  652. __le64 block_le;
  653. unsigned nr_entries, max_entries;
  654. struct dm_block *block;
  655. struct array_block *ab;
  656. memcpy(&block_le, leaf, sizeof(block_le));
  657. r = get_ablock(wi->info, le64_to_cpu(block_le), &block, &ab);
  658. if (r)
  659. return r;
  660. max_entries = le32_to_cpu(ab->max_entries);
  661. nr_entries = le32_to_cpu(ab->nr_entries);
  662. for (i = 0; i < nr_entries; i++) {
  663. r = wi->fn(wi->context, keys[0] * max_entries + i,
  664. element_at(wi->info, ab, i));
  665. if (r)
  666. break;
  667. }
  668. unlock_ablock(wi->info, block);
  669. return r;
  670. }
  671. int dm_array_walk(struct dm_array_info *info, dm_block_t root,
  672. int (*fn)(void *, uint64_t key, void *leaf),
  673. void *context)
  674. {
  675. struct walk_info wi;
  676. wi.info = info;
  677. wi.fn = fn;
  678. wi.context = context;
  679. return dm_btree_walk(&info->btree_info, root, walk_ablock, &wi);
  680. }
  681. EXPORT_SYMBOL_GPL(dm_array_walk);
  682. /*----------------------------------------------------------------*/