bitmap.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. #include <linux/kernel.h>
  2. #include <linux/fs.h>
  3. #include <linux/buffer_head.h>
  4. #include <asm/div64.h>
  5. #include "omfs.h"
  6. unsigned long omfs_count_free(struct super_block *sb)
  7. {
  8. unsigned int i;
  9. unsigned long sum = 0;
  10. struct omfs_sb_info *sbi = OMFS_SB(sb);
  11. int nbits = sb->s_blocksize * 8;
  12. for (i = 0; i < sbi->s_imap_size; i++)
  13. sum += nbits - bitmap_weight(sbi->s_imap[i], nbits);
  14. return sum;
  15. }
  16. /*
  17. * Counts the run of zero bits starting at bit up to max.
  18. * It handles the case where a run might spill over a buffer.
  19. * Called with bitmap lock.
  20. */
  21. static int count_run(unsigned long **addr, int nbits,
  22. int addrlen, int bit, int max)
  23. {
  24. int count = 0;
  25. int x;
  26. for (; addrlen > 0; addrlen--, addr++) {
  27. x = find_next_bit(*addr, nbits, bit);
  28. count += x - bit;
  29. if (x < nbits || count > max)
  30. return min(count, max);
  31. bit = 0;
  32. }
  33. return min(count, max);
  34. }
  35. /*
  36. * Sets or clears the run of count bits starting with bit.
  37. * Called with bitmap lock.
  38. */
  39. static int set_run(struct super_block *sb, int map,
  40. int nbits, int bit, int count, int set)
  41. {
  42. int i;
  43. int err;
  44. struct buffer_head *bh;
  45. struct omfs_sb_info *sbi = OMFS_SB(sb);
  46. err = -ENOMEM;
  47. bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
  48. if (!bh)
  49. goto out;
  50. for (i = 0; i < count; i++, bit++) {
  51. if (bit >= nbits) {
  52. bit = 0;
  53. map++;
  54. mark_buffer_dirty(bh);
  55. brelse(bh);
  56. bh = sb_bread(sb,
  57. clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
  58. if (!bh)
  59. goto out;
  60. }
  61. if (set) {
  62. set_bit(bit, sbi->s_imap[map]);
  63. set_bit(bit, (unsigned long *)bh->b_data);
  64. } else {
  65. clear_bit(bit, sbi->s_imap[map]);
  66. clear_bit(bit, (unsigned long *)bh->b_data);
  67. }
  68. }
  69. mark_buffer_dirty(bh);
  70. brelse(bh);
  71. err = 0;
  72. out:
  73. return err;
  74. }
  75. /*
  76. * Tries to allocate exactly one block. Returns true if successful.
  77. */
  78. int omfs_allocate_block(struct super_block *sb, u64 block)
  79. {
  80. struct buffer_head *bh;
  81. struct omfs_sb_info *sbi = OMFS_SB(sb);
  82. int bits_per_entry = 8 * sb->s_blocksize;
  83. unsigned int map, bit;
  84. int ret = 0;
  85. u64 tmp;
  86. tmp = block;
  87. bit = do_div(tmp, bits_per_entry);
  88. map = tmp;
  89. mutex_lock(&sbi->s_bitmap_lock);
  90. if (map >= sbi->s_imap_size || test_and_set_bit(bit, sbi->s_imap[map]))
  91. goto out;
  92. if (sbi->s_bitmap_ino > 0) {
  93. bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
  94. if (!bh)
  95. goto out;
  96. set_bit(bit, (unsigned long *)bh->b_data);
  97. mark_buffer_dirty(bh);
  98. brelse(bh);
  99. }
  100. ret = 1;
  101. out:
  102. mutex_unlock(&sbi->s_bitmap_lock);
  103. return ret;
  104. }
  105. /*
  106. * Tries to allocate a set of blocks. The request size depends on the
  107. * type: for inodes, we must allocate sbi->s_mirrors blocks, and for file
  108. * blocks, we try to allocate sbi->s_clustersize, but can always get away
  109. * with just one block.
  110. */
  111. int omfs_allocate_range(struct super_block *sb,
  112. int min_request,
  113. int max_request,
  114. u64 *return_block,
  115. int *return_size)
  116. {
  117. struct omfs_sb_info *sbi = OMFS_SB(sb);
  118. int bits_per_entry = 8 * sb->s_blocksize;
  119. int ret = 0;
  120. int i, run, bit;
  121. mutex_lock(&sbi->s_bitmap_lock);
  122. for (i = 0; i < sbi->s_imap_size; i++) {
  123. bit = 0;
  124. while (bit < bits_per_entry) {
  125. bit = find_next_zero_bit(sbi->s_imap[i], bits_per_entry,
  126. bit);
  127. if (bit == bits_per_entry)
  128. break;
  129. run = count_run(&sbi->s_imap[i], bits_per_entry,
  130. sbi->s_imap_size-i, bit, max_request);
  131. if (run >= min_request)
  132. goto found;
  133. bit += run;
  134. }
  135. }
  136. ret = -ENOSPC;
  137. goto out;
  138. found:
  139. *return_block = (u64) i * bits_per_entry + bit;
  140. *return_size = run;
  141. ret = set_run(sb, i, bits_per_entry, bit, run, 1);
  142. out:
  143. mutex_unlock(&sbi->s_bitmap_lock);
  144. return ret;
  145. }
  146. /*
  147. * Clears count bits starting at a given block.
  148. */
  149. int omfs_clear_range(struct super_block *sb, u64 block, int count)
  150. {
  151. struct omfs_sb_info *sbi = OMFS_SB(sb);
  152. int bits_per_entry = 8 * sb->s_blocksize;
  153. u64 tmp;
  154. unsigned int map, bit;
  155. int ret;
  156. tmp = block;
  157. bit = do_div(tmp, bits_per_entry);
  158. map = tmp;
  159. if (map >= sbi->s_imap_size)
  160. return 0;
  161. mutex_lock(&sbi->s_bitmap_lock);
  162. ret = set_run(sb, map, bits_per_entry, bit, count, 0);
  163. mutex_unlock(&sbi->s_bitmap_lock);
  164. return ret;
  165. }