extent.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545
  1. /*
  2. * linux/fs/hfs/extent.c
  3. *
  4. * Copyright (C) 1995-1997 Paul H. Hargrove
  5. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  6. * This file may be distributed under the terms of the GNU General Public License.
  7. *
  8. * This file contains the functions related to the extents B-tree.
  9. */
  10. #include <linux/pagemap.h>
  11. #include "hfs_fs.h"
  12. #include "btree.h"
  13. /*================ File-local functions ================*/
  14. /*
  15. * build_key
  16. */
  17. static void hfs_ext_build_key(hfs_btree_key *key, u32 cnid, u16 block, u8 type)
  18. {
  19. key->key_len = 7;
  20. key->ext.FkType = type;
  21. key->ext.FNum = cpu_to_be32(cnid);
  22. key->ext.FABN = cpu_to_be16(block);
  23. }
  24. /*
  25. * hfs_ext_compare()
  26. *
  27. * Description:
  28. * This is the comparison function used for the extents B-tree. In
  29. * comparing extent B-tree entries, the file id is the most
  30. * significant field (compared as unsigned ints); the fork type is
  31. * the second most significant field (compared as unsigned chars);
  32. * and the allocation block number field is the least significant
  33. * (compared as unsigned ints).
  34. * Input Variable(s):
  35. * struct hfs_ext_key *key1: pointer to the first key to compare
  36. * struct hfs_ext_key *key2: pointer to the second key to compare
  37. * Output Variable(s):
  38. * NONE
  39. * Returns:
  40. * int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2
  41. * Preconditions:
  42. * key1 and key2 point to "valid" (struct hfs_ext_key)s.
  43. * Postconditions:
  44. * This function has no side-effects */
  45. int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2)
  46. {
  47. __be32 fnum1, fnum2;
  48. __be16 block1, block2;
  49. fnum1 = key1->ext.FNum;
  50. fnum2 = key2->ext.FNum;
  51. if (fnum1 != fnum2)
  52. return be32_to_cpu(fnum1) < be32_to_cpu(fnum2) ? -1 : 1;
  53. if (key1->ext.FkType != key2->ext.FkType)
  54. return key1->ext.FkType < key2->ext.FkType ? -1 : 1;
  55. block1 = key1->ext.FABN;
  56. block2 = key2->ext.FABN;
  57. if (block1 == block2)
  58. return 0;
  59. return be16_to_cpu(block1) < be16_to_cpu(block2) ? -1 : 1;
  60. }
  61. /*
  62. * hfs_ext_find_block
  63. *
  64. * Find a block within an extent record
  65. */
  66. static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
  67. {
  68. int i;
  69. u16 count;
  70. for (i = 0; i < 3; ext++, i++) {
  71. count = be16_to_cpu(ext->count);
  72. if (off < count)
  73. return be16_to_cpu(ext->block) + off;
  74. off -= count;
  75. }
  76. /* panic? */
  77. return 0;
  78. }
  79. static int hfs_ext_block_count(struct hfs_extent *ext)
  80. {
  81. int i;
  82. u16 count = 0;
  83. for (i = 0; i < 3; ext++, i++)
  84. count += be16_to_cpu(ext->count);
  85. return count;
  86. }
  87. static u16 hfs_ext_lastblock(struct hfs_extent *ext)
  88. {
  89. int i;
  90. ext += 2;
  91. for (i = 0; i < 2; ext--, i++)
  92. if (ext->count)
  93. break;
  94. return be16_to_cpu(ext->block) + be16_to_cpu(ext->count);
  95. }
  96. static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
  97. {
  98. int res;
  99. hfs_ext_build_key(fd->search_key, inode->i_ino, HFS_I(inode)->cached_start,
  100. HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
  101. res = hfs_brec_find(fd);
  102. if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
  103. if (res != -ENOENT)
  104. return res;
  105. hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
  106. HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
  107. } else {
  108. if (res)
  109. return res;
  110. hfs_bnode_write(fd->bnode, HFS_I(inode)->cached_extents, fd->entryoffset, fd->entrylength);
  111. HFS_I(inode)->flags &= ~HFS_FLG_EXT_DIRTY;
  112. }
  113. return 0;
  114. }
  115. int hfs_ext_write_extent(struct inode *inode)
  116. {
  117. struct hfs_find_data fd;
  118. int res = 0;
  119. if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
  120. res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
  121. if (res)
  122. return res;
  123. res = __hfs_ext_write_extent(inode, &fd);
  124. hfs_find_exit(&fd);
  125. }
  126. return res;
  127. }
  128. static inline int __hfs_ext_read_extent(struct hfs_find_data *fd, struct hfs_extent *extent,
  129. u32 cnid, u32 block, u8 type)
  130. {
  131. int res;
  132. hfs_ext_build_key(fd->search_key, cnid, block, type);
  133. fd->key->ext.FNum = 0;
  134. res = hfs_brec_find(fd);
  135. if (res && res != -ENOENT)
  136. return res;
  137. if (fd->key->ext.FNum != fd->search_key->ext.FNum ||
  138. fd->key->ext.FkType != fd->search_key->ext.FkType)
  139. return -ENOENT;
  140. if (fd->entrylength != sizeof(hfs_extent_rec))
  141. return -EIO;
  142. hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfs_extent_rec));
  143. return 0;
  144. }
  145. static inline int __hfs_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
  146. {
  147. int res;
  148. if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
  149. res = __hfs_ext_write_extent(inode, fd);
  150. if (res)
  151. return res;
  152. }
  153. res = __hfs_ext_read_extent(fd, HFS_I(inode)->cached_extents, inode->i_ino,
  154. block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
  155. if (!res) {
  156. HFS_I(inode)->cached_start = be16_to_cpu(fd->key->ext.FABN);
  157. HFS_I(inode)->cached_blocks = hfs_ext_block_count(HFS_I(inode)->cached_extents);
  158. } else {
  159. HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
  160. HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
  161. }
  162. return res;
  163. }
  164. static int hfs_ext_read_extent(struct inode *inode, u16 block)
  165. {
  166. struct hfs_find_data fd;
  167. int res;
  168. if (block >= HFS_I(inode)->cached_start &&
  169. block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks)
  170. return 0;
  171. res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
  172. if (!res) {
  173. res = __hfs_ext_cache_extent(&fd, inode, block);
  174. hfs_find_exit(&fd);
  175. }
  176. return res;
  177. }
  178. static void hfs_dump_extent(struct hfs_extent *extent)
  179. {
  180. int i;
  181. hfs_dbg(EXTENT, " ");
  182. for (i = 0; i < 3; i++)
  183. hfs_dbg_cont(EXTENT, " %u:%u",
  184. be16_to_cpu(extent[i].block),
  185. be16_to_cpu(extent[i].count));
  186. hfs_dbg_cont(EXTENT, "\n");
  187. }
  188. static int hfs_add_extent(struct hfs_extent *extent, u16 offset,
  189. u16 alloc_block, u16 block_count)
  190. {
  191. u16 count, start;
  192. int i;
  193. hfs_dump_extent(extent);
  194. for (i = 0; i < 3; extent++, i++) {
  195. count = be16_to_cpu(extent->count);
  196. if (offset == count) {
  197. start = be16_to_cpu(extent->block);
  198. if (alloc_block != start + count) {
  199. if (++i >= 3)
  200. return -ENOSPC;
  201. extent++;
  202. extent->block = cpu_to_be16(alloc_block);
  203. } else
  204. block_count += count;
  205. extent->count = cpu_to_be16(block_count);
  206. return 0;
  207. } else if (offset < count)
  208. break;
  209. offset -= count;
  210. }
  211. /* panic? */
  212. return -EIO;
  213. }
  214. static int hfs_free_extents(struct super_block *sb, struct hfs_extent *extent,
  215. u16 offset, u16 block_nr)
  216. {
  217. u16 count, start;
  218. int i;
  219. hfs_dump_extent(extent);
  220. for (i = 0; i < 3; extent++, i++) {
  221. count = be16_to_cpu(extent->count);
  222. if (offset == count)
  223. goto found;
  224. else if (offset < count)
  225. break;
  226. offset -= count;
  227. }
  228. /* panic? */
  229. return -EIO;
  230. found:
  231. for (;;) {
  232. start = be16_to_cpu(extent->block);
  233. if (count <= block_nr) {
  234. hfs_clear_vbm_bits(sb, start, count);
  235. extent->block = 0;
  236. extent->count = 0;
  237. block_nr -= count;
  238. } else {
  239. count -= block_nr;
  240. hfs_clear_vbm_bits(sb, start + count, block_nr);
  241. extent->count = cpu_to_be16(count);
  242. block_nr = 0;
  243. }
  244. if (!block_nr || !i)
  245. return 0;
  246. i--;
  247. extent--;
  248. count = be16_to_cpu(extent->count);
  249. }
  250. }
  251. int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
  252. {
  253. struct hfs_find_data fd;
  254. u32 total_blocks, blocks, start;
  255. u32 cnid = be32_to_cpu(file->FlNum);
  256. struct hfs_extent *extent;
  257. int res, i;
  258. if (type == HFS_FK_DATA) {
  259. total_blocks = be32_to_cpu(file->PyLen);
  260. extent = file->ExtRec;
  261. } else {
  262. total_blocks = be32_to_cpu(file->RPyLen);
  263. extent = file->RExtRec;
  264. }
  265. total_blocks /= HFS_SB(sb)->alloc_blksz;
  266. if (!total_blocks)
  267. return 0;
  268. blocks = 0;
  269. for (i = 0; i < 3; extent++, i++)
  270. blocks += be16_to_cpu(extent[i].count);
  271. res = hfs_free_extents(sb, extent, blocks, blocks);
  272. if (res)
  273. return res;
  274. if (total_blocks == blocks)
  275. return 0;
  276. res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
  277. if (res)
  278. return res;
  279. do {
  280. res = __hfs_ext_read_extent(&fd, extent, cnid, total_blocks, type);
  281. if (res)
  282. break;
  283. start = be16_to_cpu(fd.key->ext.FABN);
  284. hfs_free_extents(sb, extent, total_blocks - start, total_blocks);
  285. hfs_brec_remove(&fd);
  286. total_blocks = start;
  287. } while (total_blocks > blocks);
  288. hfs_find_exit(&fd);
  289. return res;
  290. }
  291. /*
  292. * hfs_get_block
  293. */
  294. int hfs_get_block(struct inode *inode, sector_t block,
  295. struct buffer_head *bh_result, int create)
  296. {
  297. struct super_block *sb;
  298. u16 dblock, ablock;
  299. int res;
  300. sb = inode->i_sb;
  301. /* Convert inode block to disk allocation block */
  302. ablock = (u32)block / HFS_SB(sb)->fs_div;
  303. if (block >= HFS_I(inode)->fs_blocks) {
  304. if (block > HFS_I(inode)->fs_blocks || !create)
  305. return -EIO;
  306. if (ablock >= HFS_I(inode)->alloc_blocks) {
  307. res = hfs_extend_file(inode);
  308. if (res)
  309. return res;
  310. }
  311. } else
  312. create = 0;
  313. if (ablock < HFS_I(inode)->first_blocks) {
  314. dblock = hfs_ext_find_block(HFS_I(inode)->first_extents, ablock);
  315. goto done;
  316. }
  317. mutex_lock(&HFS_I(inode)->extents_lock);
  318. res = hfs_ext_read_extent(inode, ablock);
  319. if (!res)
  320. dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents,
  321. ablock - HFS_I(inode)->cached_start);
  322. else {
  323. mutex_unlock(&HFS_I(inode)->extents_lock);
  324. return -EIO;
  325. }
  326. mutex_unlock(&HFS_I(inode)->extents_lock);
  327. done:
  328. map_bh(bh_result, sb, HFS_SB(sb)->fs_start +
  329. dblock * HFS_SB(sb)->fs_div +
  330. (u32)block % HFS_SB(sb)->fs_div);
  331. if (create) {
  332. set_buffer_new(bh_result);
  333. HFS_I(inode)->phys_size += sb->s_blocksize;
  334. HFS_I(inode)->fs_blocks++;
  335. inode_add_bytes(inode, sb->s_blocksize);
  336. mark_inode_dirty(inode);
  337. }
  338. return 0;
  339. }
  340. int hfs_extend_file(struct inode *inode)
  341. {
  342. struct super_block *sb = inode->i_sb;
  343. u32 start, len, goal;
  344. int res;
  345. mutex_lock(&HFS_I(inode)->extents_lock);
  346. if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks)
  347. goal = hfs_ext_lastblock(HFS_I(inode)->first_extents);
  348. else {
  349. res = hfs_ext_read_extent(inode, HFS_I(inode)->alloc_blocks);
  350. if (res)
  351. goto out;
  352. goal = hfs_ext_lastblock(HFS_I(inode)->cached_extents);
  353. }
  354. len = HFS_I(inode)->clump_blocks;
  355. start = hfs_vbm_search_free(sb, goal, &len);
  356. if (!len) {
  357. res = -ENOSPC;
  358. goto out;
  359. }
  360. hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
  361. if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) {
  362. if (!HFS_I(inode)->first_blocks) {
  363. hfs_dbg(EXTENT, "first extents\n");
  364. /* no extents yet */
  365. HFS_I(inode)->first_extents[0].block = cpu_to_be16(start);
  366. HFS_I(inode)->first_extents[0].count = cpu_to_be16(len);
  367. res = 0;
  368. } else {
  369. /* try to append to extents in inode */
  370. res = hfs_add_extent(HFS_I(inode)->first_extents,
  371. HFS_I(inode)->alloc_blocks,
  372. start, len);
  373. if (res == -ENOSPC)
  374. goto insert_extent;
  375. }
  376. if (!res) {
  377. hfs_dump_extent(HFS_I(inode)->first_extents);
  378. HFS_I(inode)->first_blocks += len;
  379. }
  380. } else {
  381. res = hfs_add_extent(HFS_I(inode)->cached_extents,
  382. HFS_I(inode)->alloc_blocks -
  383. HFS_I(inode)->cached_start,
  384. start, len);
  385. if (!res) {
  386. hfs_dump_extent(HFS_I(inode)->cached_extents);
  387. HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
  388. HFS_I(inode)->cached_blocks += len;
  389. } else if (res == -ENOSPC)
  390. goto insert_extent;
  391. }
  392. out:
  393. mutex_unlock(&HFS_I(inode)->extents_lock);
  394. if (!res) {
  395. HFS_I(inode)->alloc_blocks += len;
  396. mark_inode_dirty(inode);
  397. if (inode->i_ino < HFS_FIRSTUSER_CNID)
  398. set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags);
  399. set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
  400. hfs_mark_mdb_dirty(sb);
  401. }
  402. return res;
  403. insert_extent:
  404. hfs_dbg(EXTENT, "insert new extent\n");
  405. res = hfs_ext_write_extent(inode);
  406. if (res)
  407. goto out;
  408. memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
  409. HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start);
  410. HFS_I(inode)->cached_extents[0].count = cpu_to_be16(len);
  411. hfs_dump_extent(HFS_I(inode)->cached_extents);
  412. HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW;
  413. HFS_I(inode)->cached_start = HFS_I(inode)->alloc_blocks;
  414. HFS_I(inode)->cached_blocks = len;
  415. res = 0;
  416. goto out;
  417. }
  418. void hfs_file_truncate(struct inode *inode)
  419. {
  420. struct super_block *sb = inode->i_sb;
  421. struct hfs_find_data fd;
  422. u16 blk_cnt, alloc_cnt, start;
  423. u32 size;
  424. int res;
  425. hfs_dbg(INODE, "truncate: %lu, %Lu -> %Lu\n",
  426. inode->i_ino, (long long)HFS_I(inode)->phys_size,
  427. inode->i_size);
  428. if (inode->i_size > HFS_I(inode)->phys_size) {
  429. struct address_space *mapping = inode->i_mapping;
  430. void *fsdata;
  431. struct page *page;
  432. /* XXX: Can use generic_cont_expand? */
  433. size = inode->i_size - 1;
  434. res = pagecache_write_begin(NULL, mapping, size+1, 0,
  435. AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
  436. if (!res) {
  437. res = pagecache_write_end(NULL, mapping, size+1, 0, 0,
  438. page, fsdata);
  439. }
  440. if (res)
  441. inode->i_size = HFS_I(inode)->phys_size;
  442. return;
  443. } else if (inode->i_size == HFS_I(inode)->phys_size)
  444. return;
  445. size = inode->i_size + HFS_SB(sb)->alloc_blksz - 1;
  446. blk_cnt = size / HFS_SB(sb)->alloc_blksz;
  447. alloc_cnt = HFS_I(inode)->alloc_blocks;
  448. if (blk_cnt == alloc_cnt)
  449. goto out;
  450. mutex_lock(&HFS_I(inode)->extents_lock);
  451. res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
  452. if (res) {
  453. mutex_unlock(&HFS_I(inode)->extents_lock);
  454. /* XXX: We lack error handling of hfs_file_truncate() */
  455. return;
  456. }
  457. while (1) {
  458. if (alloc_cnt == HFS_I(inode)->first_blocks) {
  459. hfs_free_extents(sb, HFS_I(inode)->first_extents,
  460. alloc_cnt, alloc_cnt - blk_cnt);
  461. hfs_dump_extent(HFS_I(inode)->first_extents);
  462. HFS_I(inode)->first_blocks = blk_cnt;
  463. break;
  464. }
  465. res = __hfs_ext_cache_extent(&fd, inode, alloc_cnt);
  466. if (res)
  467. break;
  468. start = HFS_I(inode)->cached_start;
  469. hfs_free_extents(sb, HFS_I(inode)->cached_extents,
  470. alloc_cnt - start, alloc_cnt - blk_cnt);
  471. hfs_dump_extent(HFS_I(inode)->cached_extents);
  472. if (blk_cnt > start) {
  473. HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
  474. break;
  475. }
  476. alloc_cnt = start;
  477. HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
  478. HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
  479. hfs_brec_remove(&fd);
  480. }
  481. hfs_find_exit(&fd);
  482. mutex_unlock(&HFS_I(inode)->extents_lock);
  483. HFS_I(inode)->alloc_blocks = blk_cnt;
  484. out:
  485. HFS_I(inode)->phys_size = inode->i_size;
  486. HFS_I(inode)->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
  487. inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
  488. mark_inode_dirty(inode);
  489. }