anode.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496
  1. /*
  2. * linux/fs/hpfs/anode.c
  3. *
  4. * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
  5. *
  6. * handling HPFS anode tree that contains file allocation info
  7. */
  8. #include "hpfs_fn.h"
  9. /* Find a sector in allocation tree */
  10. secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
  11. struct bplus_header *btree, unsigned sec,
  12. struct buffer_head *bh)
  13. {
  14. anode_secno a = -1;
  15. struct anode *anode;
  16. int i;
  17. int c1, c2 = 0;
  18. go_down:
  19. if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
  20. if (bp_internal(btree)) {
  21. for (i = 0; i < btree->n_used_nodes; i++)
  22. if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
  23. a = le32_to_cpu(btree->u.internal[i].down);
  24. brelse(bh);
  25. if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
  26. btree = &anode->btree;
  27. goto go_down;
  28. }
  29. hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a);
  30. brelse(bh);
  31. return -1;
  32. }
  33. for (i = 0; i < btree->n_used_nodes; i++)
  34. if (le32_to_cpu(btree->u.external[i].file_secno) <= sec &&
  35. le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) {
  36. a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno);
  37. if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) {
  38. brelse(bh);
  39. return -1;
  40. }
  41. if (inode) {
  42. struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
  43. hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno);
  44. hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno);
  45. hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length);
  46. }
  47. brelse(bh);
  48. return a;
  49. }
  50. hpfs_error(s, "sector %08x not found in external anode %08x", sec, a);
  51. brelse(bh);
  52. return -1;
  53. }
  54. /* Add a sector to tree */
  55. secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno)
  56. {
  57. struct bplus_header *btree;
  58. struct anode *anode = NULL, *ranode = NULL;
  59. struct fnode *fnode;
  60. anode_secno a, na = -1, ra, up = -1;
  61. secno se;
  62. struct buffer_head *bh, *bh1, *bh2;
  63. int n;
  64. unsigned fs;
  65. int c1, c2 = 0;
  66. if (fnod) {
  67. if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1;
  68. btree = &fnode->btree;
  69. } else {
  70. if (!(anode = hpfs_map_anode(s, node, &bh))) return -1;
  71. btree = &anode->btree;
  72. }
  73. a = node;
  74. go_down:
  75. if ((n = btree->n_used_nodes - 1) < -!!fnod) {
  76. hpfs_error(s, "anode %08x has no entries", a);
  77. brelse(bh);
  78. return -1;
  79. }
  80. if (bp_internal(btree)) {
  81. a = le32_to_cpu(btree->u.internal[n].down);
  82. btree->u.internal[n].file_secno = cpu_to_le32(-1);
  83. mark_buffer_dirty(bh);
  84. brelse(bh);
  85. if (hpfs_sb(s)->sb_chk)
  86. if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1;
  87. if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
  88. btree = &anode->btree;
  89. goto go_down;
  90. }
  91. if (n >= 0) {
  92. if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) {
  93. hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x",
  94. le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno,
  95. fnod?'f':'a', node);
  96. brelse(bh);
  97. return -1;
  98. }
  99. if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) {
  100. le32_add_cpu(&btree->u.external[n].length, 1);
  101. mark_buffer_dirty(bh);
  102. brelse(bh);
  103. return se;
  104. }
  105. } else {
  106. if (fsecno) {
  107. hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno);
  108. brelse(bh);
  109. return -1;
  110. }
  111. se = !fnod ? node : (node + 16384) & ~16383;
  112. }
  113. if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) {
  114. brelse(bh);
  115. return -1;
  116. }
  117. fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length);
  118. if (!btree->n_free_nodes) {
  119. up = a != node ? le32_to_cpu(anode->up) : -1;
  120. if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) {
  121. brelse(bh);
  122. hpfs_free_sectors(s, se, 1);
  123. return -1;
  124. }
  125. if (a == node && fnod) {
  126. anode->up = cpu_to_le32(node);
  127. anode->btree.flags |= BP_fnode_parent;
  128. anode->btree.n_used_nodes = btree->n_used_nodes;
  129. anode->btree.first_free = btree->first_free;
  130. anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes;
  131. memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12);
  132. btree->flags |= BP_internal;
  133. btree->n_free_nodes = 11;
  134. btree->n_used_nodes = 1;
  135. btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
  136. btree->u.internal[0].file_secno = cpu_to_le32(-1);
  137. btree->u.internal[0].down = cpu_to_le32(na);
  138. mark_buffer_dirty(bh);
  139. } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) {
  140. brelse(bh);
  141. brelse(bh1);
  142. hpfs_free_sectors(s, se, 1);
  143. hpfs_free_sectors(s, na, 1);
  144. return -1;
  145. }
  146. brelse(bh);
  147. bh = bh1;
  148. btree = &anode->btree;
  149. }
  150. btree->n_free_nodes--; n = btree->n_used_nodes++;
  151. le16_add_cpu(&btree->first_free, 12);
  152. btree->u.external[n].disk_secno = cpu_to_le32(se);
  153. btree->u.external[n].file_secno = cpu_to_le32(fs);
  154. btree->u.external[n].length = cpu_to_le32(1);
  155. mark_buffer_dirty(bh);
  156. brelse(bh);
  157. if ((a == node && fnod) || na == -1) return se;
  158. c2 = 0;
  159. while (up != (anode_secno)-1) {
  160. struct anode *new_anode;
  161. if (hpfs_sb(s)->sb_chk)
  162. if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
  163. if (up != node || !fnod) {
  164. if (!(anode = hpfs_map_anode(s, up, &bh))) return -1;
  165. btree = &anode->btree;
  166. } else {
  167. if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1;
  168. btree = &fnode->btree;
  169. }
  170. if (btree->n_free_nodes) {
  171. btree->n_free_nodes--; n = btree->n_used_nodes++;
  172. le16_add_cpu(&btree->first_free, 8);
  173. btree->u.internal[n].file_secno = cpu_to_le32(-1);
  174. btree->u.internal[n].down = cpu_to_le32(na);
  175. btree->u.internal[n-1].file_secno = cpu_to_le32(fs);
  176. mark_buffer_dirty(bh);
  177. brelse(bh);
  178. brelse(bh2);
  179. hpfs_free_sectors(s, ra, 1);
  180. if ((anode = hpfs_map_anode(s, na, &bh))) {
  181. anode->up = cpu_to_le32(up);
  182. if (up == node && fnod)
  183. anode->btree.flags |= BP_fnode_parent;
  184. else
  185. anode->btree.flags &= ~BP_fnode_parent;
  186. mark_buffer_dirty(bh);
  187. brelse(bh);
  188. }
  189. return se;
  190. }
  191. up = up != node ? le32_to_cpu(anode->up) : -1;
  192. btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1);
  193. mark_buffer_dirty(bh);
  194. brelse(bh);
  195. a = na;
  196. if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
  197. anode = new_anode;
  198. /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
  199. anode->btree.flags |= BP_internal;
  200. anode->btree.n_used_nodes = 1;
  201. anode->btree.n_free_nodes = 59;
  202. anode->btree.first_free = cpu_to_le16(16);
  203. anode->btree.u.internal[0].down = cpu_to_le32(a);
  204. anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
  205. mark_buffer_dirty(bh);
  206. brelse(bh);
  207. if ((anode = hpfs_map_anode(s, a, &bh))) {
  208. anode->up = cpu_to_le32(na);
  209. mark_buffer_dirty(bh);
  210. brelse(bh);
  211. }
  212. } else na = a;
  213. }
  214. if ((anode = hpfs_map_anode(s, na, &bh))) {
  215. anode->up = cpu_to_le32(node);
  216. if (fnod)
  217. anode->btree.flags |= BP_fnode_parent;
  218. mark_buffer_dirty(bh);
  219. brelse(bh);
  220. }
  221. if (!fnod) {
  222. if (!(anode = hpfs_map_anode(s, node, &bh))) {
  223. brelse(bh2);
  224. return -1;
  225. }
  226. btree = &anode->btree;
  227. } else {
  228. if (!(fnode = hpfs_map_fnode(s, node, &bh))) {
  229. brelse(bh2);
  230. return -1;
  231. }
  232. btree = &fnode->btree;
  233. }
  234. ranode->up = cpu_to_le32(node);
  235. memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
  236. if (fnod)
  237. ranode->btree.flags |= BP_fnode_parent;
  238. ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes;
  239. if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
  240. struct anode *unode;
  241. if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
  242. unode->up = cpu_to_le32(ra);
  243. unode->btree.flags &= ~BP_fnode_parent;
  244. mark_buffer_dirty(bh1);
  245. brelse(bh1);
  246. }
  247. }
  248. btree->flags |= BP_internal;
  249. btree->n_free_nodes = fnod ? 10 : 58;
  250. btree->n_used_nodes = 2;
  251. btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
  252. btree->u.internal[0].file_secno = cpu_to_le32(fs);
  253. btree->u.internal[0].down = cpu_to_le32(ra);
  254. btree->u.internal[1].file_secno = cpu_to_le32(-1);
  255. btree->u.internal[1].down = cpu_to_le32(na);
  256. mark_buffer_dirty(bh);
  257. brelse(bh);
  258. mark_buffer_dirty(bh2);
  259. brelse(bh2);
  260. return se;
  261. }
  262. /*
  263. * Remove allocation tree. Recursion would look much nicer but
  264. * I want to avoid it because it can cause stack overflow.
  265. */
  266. void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
  267. {
  268. struct bplus_header *btree1 = btree;
  269. struct anode *anode = NULL;
  270. anode_secno ano = 0, oano;
  271. struct buffer_head *bh;
  272. int level = 0;
  273. int pos = 0;
  274. int i;
  275. int c1, c2 = 0;
  276. int d1, d2;
  277. go_down:
  278. d2 = 0;
  279. while (bp_internal(btree1)) {
  280. ano = le32_to_cpu(btree1->u.internal[pos].down);
  281. if (level) brelse(bh);
  282. if (hpfs_sb(s)->sb_chk)
  283. if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
  284. return;
  285. if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
  286. btree1 = &anode->btree;
  287. level++;
  288. pos = 0;
  289. }
  290. for (i = 0; i < btree1->n_used_nodes; i++)
  291. hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length));
  292. go_up:
  293. if (!level) return;
  294. brelse(bh);
  295. if (hpfs_sb(s)->sb_chk)
  296. if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return;
  297. hpfs_free_sectors(s, ano, 1);
  298. oano = ano;
  299. ano = le32_to_cpu(anode->up);
  300. if (--level) {
  301. if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
  302. btree1 = &anode->btree;
  303. } else btree1 = btree;
  304. for (i = 0; i < btree1->n_used_nodes; i++) {
  305. if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
  306. if ((pos = i + 1) < btree1->n_used_nodes)
  307. goto go_down;
  308. else
  309. goto go_up;
  310. }
  311. }
  312. hpfs_error(s,
  313. "reference to anode %08x not found in anode %08x "
  314. "(probably bad up pointer)",
  315. oano, level ? ano : -1);
  316. if (level)
  317. brelse(bh);
  318. }
  319. /* Just a wrapper around hpfs_bplus_lookup .. used for reading eas */
  320. static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec)
  321. {
  322. struct anode *anode;
  323. struct buffer_head *bh;
  324. if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
  325. return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh);
  326. }
  327. int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
  328. unsigned len, char *buf)
  329. {
  330. struct buffer_head *bh;
  331. char *data;
  332. secno sec;
  333. unsigned l;
  334. while (len) {
  335. if (ano) {
  336. if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
  337. return -1;
  338. } else sec = a + (pos >> 9);
  339. if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #1")) return -1;
  340. if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
  341. return -1;
  342. l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
  343. memcpy(buf, data + (pos & 0x1ff), l);
  344. brelse(bh);
  345. buf += l; pos += l; len -= l;
  346. }
  347. return 0;
  348. }
  349. int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos,
  350. unsigned len, const char *buf)
  351. {
  352. struct buffer_head *bh;
  353. char *data;
  354. secno sec;
  355. unsigned l;
  356. while (len) {
  357. if (ano) {
  358. if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
  359. return -1;
  360. } else sec = a + (pos >> 9);
  361. if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #2")) return -1;
  362. if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
  363. return -1;
  364. l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
  365. memcpy(data + (pos & 0x1ff), buf, l);
  366. mark_buffer_dirty(bh);
  367. brelse(bh);
  368. buf += l; pos += l; len -= l;
  369. }
  370. return 0;
  371. }
  372. void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len)
  373. {
  374. struct anode *anode;
  375. struct buffer_head *bh;
  376. if (ano) {
  377. if (!(anode = hpfs_map_anode(s, a, &bh))) return;
  378. hpfs_remove_btree(s, &anode->btree);
  379. brelse(bh);
  380. hpfs_free_sectors(s, a, 1);
  381. } else hpfs_free_sectors(s, a, (len + 511) >> 9);
  382. }
  383. /* Truncate allocation tree. Doesn't join anodes - I hope it doesn't matter */
  384. void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
  385. {
  386. struct fnode *fnode;
  387. struct anode *anode;
  388. struct buffer_head *bh;
  389. struct bplus_header *btree;
  390. anode_secno node = f;
  391. int i, j, nodes;
  392. int c1, c2 = 0;
  393. if (fno) {
  394. if (!(fnode = hpfs_map_fnode(s, f, &bh))) return;
  395. btree = &fnode->btree;
  396. } else {
  397. if (!(anode = hpfs_map_anode(s, f, &bh))) return;
  398. btree = &anode->btree;
  399. }
  400. if (!secs) {
  401. hpfs_remove_btree(s, btree);
  402. if (fno) {
  403. btree->n_free_nodes = 8;
  404. btree->n_used_nodes = 0;
  405. btree->first_free = cpu_to_le16(8);
  406. btree->flags &= ~BP_internal;
  407. mark_buffer_dirty(bh);
  408. } else hpfs_free_sectors(s, f, 1);
  409. brelse(bh);
  410. return;
  411. }
  412. while (bp_internal(btree)) {
  413. nodes = btree->n_used_nodes + btree->n_free_nodes;
  414. for (i = 0; i < btree->n_used_nodes; i++)
  415. if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
  416. brelse(bh);
  417. hpfs_error(s, "internal btree %08x doesn't end with -1", node);
  418. return;
  419. f:
  420. for (j = i + 1; j < btree->n_used_nodes; j++)
  421. hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0);
  422. btree->n_used_nodes = i + 1;
  423. btree->n_free_nodes = nodes - btree->n_used_nodes;
  424. btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes);
  425. mark_buffer_dirty(bh);
  426. if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) {
  427. brelse(bh);
  428. return;
  429. }
  430. node = le32_to_cpu(btree->u.internal[i].down);
  431. brelse(bh);
  432. if (hpfs_sb(s)->sb_chk)
  433. if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
  434. return;
  435. if (!(anode = hpfs_map_anode(s, node, &bh))) return;
  436. btree = &anode->btree;
  437. }
  438. nodes = btree->n_used_nodes + btree->n_free_nodes;
  439. for (i = 0; i < btree->n_used_nodes; i++)
  440. if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff;
  441. brelse(bh);
  442. return;
  443. ff:
  444. if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) {
  445. hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs);
  446. if (i) i--;
  447. }
  448. else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) {
  449. hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs -
  450. le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length)
  451. - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */
  452. btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno));
  453. }
  454. for (j = i + 1; j < btree->n_used_nodes; j++)
  455. hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length));
  456. btree->n_used_nodes = i + 1;
  457. btree->n_free_nodes = nodes - btree->n_used_nodes;
  458. btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes);
  459. mark_buffer_dirty(bh);
  460. brelse(bh);
  461. }
  462. /* Remove file or directory and it's eas - note that directory must
  463. be empty when this is called. */
  464. void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
  465. {
  466. struct buffer_head *bh;
  467. struct fnode *fnode;
  468. struct extended_attribute *ea;
  469. struct extended_attribute *ea_end;
  470. if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
  471. if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree);
  472. else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
  473. ea_end = fnode_end_ea(fnode);
  474. for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
  475. if (ea_indirect(ea))
  476. hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
  477. hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l));
  478. brelse(bh);
  479. hpfs_free_sectors(s, fno, 1);
  480. }