quota_tree.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. /*
  2. * vfsv0 quota IO operations on file
  3. */
  4. #include <linux/errno.h>
  5. #include <linux/fs.h>
  6. #include <linux/mount.h>
  7. #include <linux/dqblk_v2.h>
  8. #include <linux/kernel.h>
  9. #include <linux/init.h>
  10. #include <linux/module.h>
  11. #include <linux/slab.h>
  12. #include <linux/quotaops.h>
  13. #include <asm/byteorder.h>
  14. #include "quota_tree.h"
  15. MODULE_AUTHOR("Jan Kara");
  16. MODULE_DESCRIPTION("Quota trie support");
  17. MODULE_LICENSE("GPL");
  18. #define __QUOTA_QT_PARANOIA
  19. static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
  20. {
  21. unsigned int epb = info->dqi_usable_bs >> 2;
  22. qid_t id = from_kqid(&init_user_ns, qid);
  23. depth = info->dqi_qtree_depth - depth - 1;
  24. while (depth--)
  25. id /= epb;
  26. return id % epb;
  27. }
  28. /* Number of entries in one blocks */
  29. static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
  30. {
  31. return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
  32. / info->dqi_entry_size;
  33. }
  34. static char *getdqbuf(size_t size)
  35. {
  36. char *buf = kmalloc(size, GFP_NOFS);
  37. if (!buf)
  38. printk(KERN_WARNING
  39. "VFS: Not enough memory for quota buffers.\n");
  40. return buf;
  41. }
  42. static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
  43. {
  44. struct super_block *sb = info->dqi_sb;
  45. memset(buf, 0, info->dqi_usable_bs);
  46. return sb->s_op->quota_read(sb, info->dqi_type, buf,
  47. info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
  48. }
  49. static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
  50. {
  51. struct super_block *sb = info->dqi_sb;
  52. ssize_t ret;
  53. ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
  54. info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
  55. if (ret != info->dqi_usable_bs) {
  56. quota_error(sb, "dquota write failed");
  57. if (ret >= 0)
  58. ret = -EIO;
  59. }
  60. return ret;
  61. }
  62. /* Remove empty block from list and return it */
  63. static int get_free_dqblk(struct qtree_mem_dqinfo *info)
  64. {
  65. char *buf = getdqbuf(info->dqi_usable_bs);
  66. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  67. int ret, blk;
  68. if (!buf)
  69. return -ENOMEM;
  70. if (info->dqi_free_blk) {
  71. blk = info->dqi_free_blk;
  72. ret = read_blk(info, blk, buf);
  73. if (ret < 0)
  74. goto out_buf;
  75. info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
  76. }
  77. else {
  78. memset(buf, 0, info->dqi_usable_bs);
  79. /* Assure block allocation... */
  80. ret = write_blk(info, info->dqi_blocks, buf);
  81. if (ret < 0)
  82. goto out_buf;
  83. blk = info->dqi_blocks++;
  84. }
  85. mark_info_dirty(info->dqi_sb, info->dqi_type);
  86. ret = blk;
  87. out_buf:
  88. kfree(buf);
  89. return ret;
  90. }
  91. /* Insert empty block to the list */
  92. static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
  93. {
  94. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  95. int err;
  96. dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
  97. dh->dqdh_prev_free = cpu_to_le32(0);
  98. dh->dqdh_entries = cpu_to_le16(0);
  99. err = write_blk(info, blk, buf);
  100. if (err < 0)
  101. return err;
  102. info->dqi_free_blk = blk;
  103. mark_info_dirty(info->dqi_sb, info->dqi_type);
  104. return 0;
  105. }
  106. /* Remove given block from the list of blocks with free entries */
  107. static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
  108. uint blk)
  109. {
  110. char *tmpbuf = getdqbuf(info->dqi_usable_bs);
  111. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  112. uint nextblk = le32_to_cpu(dh->dqdh_next_free);
  113. uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
  114. int err;
  115. if (!tmpbuf)
  116. return -ENOMEM;
  117. if (nextblk) {
  118. err = read_blk(info, nextblk, tmpbuf);
  119. if (err < 0)
  120. goto out_buf;
  121. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
  122. dh->dqdh_prev_free;
  123. err = write_blk(info, nextblk, tmpbuf);
  124. if (err < 0)
  125. goto out_buf;
  126. }
  127. if (prevblk) {
  128. err = read_blk(info, prevblk, tmpbuf);
  129. if (err < 0)
  130. goto out_buf;
  131. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
  132. dh->dqdh_next_free;
  133. err = write_blk(info, prevblk, tmpbuf);
  134. if (err < 0)
  135. goto out_buf;
  136. } else {
  137. info->dqi_free_entry = nextblk;
  138. mark_info_dirty(info->dqi_sb, info->dqi_type);
  139. }
  140. kfree(tmpbuf);
  141. dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
  142. /* No matter whether write succeeds block is out of list */
  143. if (write_blk(info, blk, buf) < 0)
  144. quota_error(info->dqi_sb, "Can't write block (%u) "
  145. "with free entries", blk);
  146. return 0;
  147. out_buf:
  148. kfree(tmpbuf);
  149. return err;
  150. }
  151. /* Insert given block to the beginning of list with free entries */
  152. static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
  153. uint blk)
  154. {
  155. char *tmpbuf = getdqbuf(info->dqi_usable_bs);
  156. struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
  157. int err;
  158. if (!tmpbuf)
  159. return -ENOMEM;
  160. dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
  161. dh->dqdh_prev_free = cpu_to_le32(0);
  162. err = write_blk(info, blk, buf);
  163. if (err < 0)
  164. goto out_buf;
  165. if (info->dqi_free_entry) {
  166. err = read_blk(info, info->dqi_free_entry, tmpbuf);
  167. if (err < 0)
  168. goto out_buf;
  169. ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
  170. cpu_to_le32(blk);
  171. err = write_blk(info, info->dqi_free_entry, tmpbuf);
  172. if (err < 0)
  173. goto out_buf;
  174. }
  175. kfree(tmpbuf);
  176. info->dqi_free_entry = blk;
  177. mark_info_dirty(info->dqi_sb, info->dqi_type);
  178. return 0;
  179. out_buf:
  180. kfree(tmpbuf);
  181. return err;
  182. }
  183. /* Is the entry in the block free? */
  184. int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
  185. {
  186. int i;
  187. for (i = 0; i < info->dqi_entry_size; i++)
  188. if (disk[i])
  189. return 0;
  190. return 1;
  191. }
  192. EXPORT_SYMBOL(qtree_entry_unused);
  193. /* Find space for dquot */
  194. static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
  195. struct dquot *dquot, int *err)
  196. {
  197. uint blk, i;
  198. struct qt_disk_dqdbheader *dh;
  199. char *buf = getdqbuf(info->dqi_usable_bs);
  200. char *ddquot;
  201. *err = 0;
  202. if (!buf) {
  203. *err = -ENOMEM;
  204. return 0;
  205. }
  206. dh = (struct qt_disk_dqdbheader *)buf;
  207. if (info->dqi_free_entry) {
  208. blk = info->dqi_free_entry;
  209. *err = read_blk(info, blk, buf);
  210. if (*err < 0)
  211. goto out_buf;
  212. } else {
  213. blk = get_free_dqblk(info);
  214. if ((int)blk < 0) {
  215. *err = blk;
  216. kfree(buf);
  217. return 0;
  218. }
  219. memset(buf, 0, info->dqi_usable_bs);
  220. /* This is enough as the block is already zeroed and the entry
  221. * list is empty... */
  222. info->dqi_free_entry = blk;
  223. mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
  224. }
  225. /* Block will be full? */
  226. if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
  227. *err = remove_free_dqentry(info, buf, blk);
  228. if (*err < 0) {
  229. quota_error(dquot->dq_sb, "Can't remove block (%u) "
  230. "from entry free list", blk);
  231. goto out_buf;
  232. }
  233. }
  234. le16_add_cpu(&dh->dqdh_entries, 1);
  235. /* Find free structure in block */
  236. ddquot = buf + sizeof(struct qt_disk_dqdbheader);
  237. for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
  238. if (qtree_entry_unused(info, ddquot))
  239. break;
  240. ddquot += info->dqi_entry_size;
  241. }
  242. #ifdef __QUOTA_QT_PARANOIA
  243. if (i == qtree_dqstr_in_blk(info)) {
  244. quota_error(dquot->dq_sb, "Data block full but it shouldn't");
  245. *err = -EIO;
  246. goto out_buf;
  247. }
  248. #endif
  249. *err = write_blk(info, blk, buf);
  250. if (*err < 0) {
  251. quota_error(dquot->dq_sb, "Can't write quota data block %u",
  252. blk);
  253. goto out_buf;
  254. }
  255. dquot->dq_off = (blk << info->dqi_blocksize_bits) +
  256. sizeof(struct qt_disk_dqdbheader) +
  257. i * info->dqi_entry_size;
  258. kfree(buf);
  259. return blk;
  260. out_buf:
  261. kfree(buf);
  262. return 0;
  263. }
  264. /* Insert reference to structure into the trie */
  265. static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  266. uint *treeblk, int depth)
  267. {
  268. char *buf = getdqbuf(info->dqi_usable_bs);
  269. int ret = 0, newson = 0, newact = 0;
  270. __le32 *ref;
  271. uint newblk;
  272. if (!buf)
  273. return -ENOMEM;
  274. if (!*treeblk) {
  275. ret = get_free_dqblk(info);
  276. if (ret < 0)
  277. goto out_buf;
  278. *treeblk = ret;
  279. memset(buf, 0, info->dqi_usable_bs);
  280. newact = 1;
  281. } else {
  282. ret = read_blk(info, *treeblk, buf);
  283. if (ret < 0) {
  284. quota_error(dquot->dq_sb, "Can't read tree quota "
  285. "block %u", *treeblk);
  286. goto out_buf;
  287. }
  288. }
  289. ref = (__le32 *)buf;
  290. newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  291. if (!newblk)
  292. newson = 1;
  293. if (depth == info->dqi_qtree_depth - 1) {
  294. #ifdef __QUOTA_QT_PARANOIA
  295. if (newblk) {
  296. quota_error(dquot->dq_sb, "Inserting already present "
  297. "quota entry (block %u)",
  298. le32_to_cpu(ref[get_index(info,
  299. dquot->dq_id, depth)]));
  300. ret = -EIO;
  301. goto out_buf;
  302. }
  303. #endif
  304. newblk = find_free_dqentry(info, dquot, &ret);
  305. } else {
  306. ret = do_insert_tree(info, dquot, &newblk, depth+1);
  307. }
  308. if (newson && ret >= 0) {
  309. ref[get_index(info, dquot->dq_id, depth)] =
  310. cpu_to_le32(newblk);
  311. ret = write_blk(info, *treeblk, buf);
  312. } else if (newact && ret < 0) {
  313. put_free_dqblk(info, buf, *treeblk);
  314. }
  315. out_buf:
  316. kfree(buf);
  317. return ret;
  318. }
  319. /* Wrapper for inserting quota structure into tree */
  320. static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
  321. struct dquot *dquot)
  322. {
  323. int tmp = QT_TREEOFF;
  324. #ifdef __QUOTA_QT_PARANOIA
  325. if (info->dqi_blocks <= QT_TREEOFF) {
  326. quota_error(dquot->dq_sb, "Quota tree root isn't allocated!");
  327. return -EIO;
  328. }
  329. #endif
  330. return do_insert_tree(info, dquot, &tmp, 0);
  331. }
  332. /*
  333. * We don't have to be afraid of deadlocks as we never have quotas on quota
  334. * files...
  335. */
  336. int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  337. {
  338. int type = dquot->dq_id.type;
  339. struct super_block *sb = dquot->dq_sb;
  340. ssize_t ret;
  341. char *ddquot = getdqbuf(info->dqi_entry_size);
  342. if (!ddquot)
  343. return -ENOMEM;
  344. /* dq_off is guarded by dqio_mutex */
  345. if (!dquot->dq_off) {
  346. ret = dq_insert_tree(info, dquot);
  347. if (ret < 0) {
  348. quota_error(sb, "Error %zd occurred while creating "
  349. "quota", ret);
  350. kfree(ddquot);
  351. return ret;
  352. }
  353. }
  354. spin_lock(&dq_data_lock);
  355. info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
  356. spin_unlock(&dq_data_lock);
  357. ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
  358. dquot->dq_off);
  359. if (ret != info->dqi_entry_size) {
  360. quota_error(sb, "dquota write failed");
  361. if (ret >= 0)
  362. ret = -ENOSPC;
  363. } else {
  364. ret = 0;
  365. }
  366. dqstats_inc(DQST_WRITES);
  367. kfree(ddquot);
  368. return ret;
  369. }
  370. EXPORT_SYMBOL(qtree_write_dquot);
  371. /* Free dquot entry in data block */
  372. static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  373. uint blk)
  374. {
  375. struct qt_disk_dqdbheader *dh;
  376. char *buf = getdqbuf(info->dqi_usable_bs);
  377. int ret = 0;
  378. if (!buf)
  379. return -ENOMEM;
  380. if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
  381. quota_error(dquot->dq_sb, "Quota structure has offset to "
  382. "other block (%u) than it should (%u)", blk,
  383. (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
  384. goto out_buf;
  385. }
  386. ret = read_blk(info, blk, buf);
  387. if (ret < 0) {
  388. quota_error(dquot->dq_sb, "Can't read quota data block %u",
  389. blk);
  390. goto out_buf;
  391. }
  392. dh = (struct qt_disk_dqdbheader *)buf;
  393. le16_add_cpu(&dh->dqdh_entries, -1);
  394. if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
  395. ret = remove_free_dqentry(info, buf, blk);
  396. if (ret >= 0)
  397. ret = put_free_dqblk(info, buf, blk);
  398. if (ret < 0) {
  399. quota_error(dquot->dq_sb, "Can't move quota data block "
  400. "(%u) to free list", blk);
  401. goto out_buf;
  402. }
  403. } else {
  404. memset(buf +
  405. (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
  406. 0, info->dqi_entry_size);
  407. if (le16_to_cpu(dh->dqdh_entries) ==
  408. qtree_dqstr_in_blk(info) - 1) {
  409. /* Insert will write block itself */
  410. ret = insert_free_dqentry(info, buf, blk);
  411. if (ret < 0) {
  412. quota_error(dquot->dq_sb, "Can't insert quota "
  413. "data block (%u) to free entry list", blk);
  414. goto out_buf;
  415. }
  416. } else {
  417. ret = write_blk(info, blk, buf);
  418. if (ret < 0) {
  419. quota_error(dquot->dq_sb, "Can't write quota "
  420. "data block %u", blk);
  421. goto out_buf;
  422. }
  423. }
  424. }
  425. dquot->dq_off = 0; /* Quota is now unattached */
  426. out_buf:
  427. kfree(buf);
  428. return ret;
  429. }
  430. /* Remove reference to dquot from tree */
  431. static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
  432. uint *blk, int depth)
  433. {
  434. char *buf = getdqbuf(info->dqi_usable_bs);
  435. int ret = 0;
  436. uint newblk;
  437. __le32 *ref = (__le32 *)buf;
  438. if (!buf)
  439. return -ENOMEM;
  440. ret = read_blk(info, *blk, buf);
  441. if (ret < 0) {
  442. quota_error(dquot->dq_sb, "Can't read quota data block %u",
  443. *blk);
  444. goto out_buf;
  445. }
  446. newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  447. if (depth == info->dqi_qtree_depth - 1) {
  448. ret = free_dqentry(info, dquot, newblk);
  449. newblk = 0;
  450. } else {
  451. ret = remove_tree(info, dquot, &newblk, depth+1);
  452. }
  453. if (ret >= 0 && !newblk) {
  454. int i;
  455. ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
  456. /* Block got empty? */
  457. for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
  458. ;
  459. /* Don't put the root block into the free block list */
  460. if (i == (info->dqi_usable_bs >> 2)
  461. && *blk != QT_TREEOFF) {
  462. put_free_dqblk(info, buf, *blk);
  463. *blk = 0;
  464. } else {
  465. ret = write_blk(info, *blk, buf);
  466. if (ret < 0)
  467. quota_error(dquot->dq_sb,
  468. "Can't write quota tree block %u",
  469. *blk);
  470. }
  471. }
  472. out_buf:
  473. kfree(buf);
  474. return ret;
  475. }
  476. /* Delete dquot from tree */
  477. int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  478. {
  479. uint tmp = QT_TREEOFF;
  480. if (!dquot->dq_off) /* Even not allocated? */
  481. return 0;
  482. return remove_tree(info, dquot, &tmp, 0);
  483. }
  484. EXPORT_SYMBOL(qtree_delete_dquot);
  485. /* Find entry in block */
  486. static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
  487. struct dquot *dquot, uint blk)
  488. {
  489. char *buf = getdqbuf(info->dqi_usable_bs);
  490. loff_t ret = 0;
  491. int i;
  492. char *ddquot;
  493. if (!buf)
  494. return -ENOMEM;
  495. ret = read_blk(info, blk, buf);
  496. if (ret < 0) {
  497. quota_error(dquot->dq_sb, "Can't read quota tree "
  498. "block %u", blk);
  499. goto out_buf;
  500. }
  501. ddquot = buf + sizeof(struct qt_disk_dqdbheader);
  502. for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
  503. if (info->dqi_ops->is_id(ddquot, dquot))
  504. break;
  505. ddquot += info->dqi_entry_size;
  506. }
  507. if (i == qtree_dqstr_in_blk(info)) {
  508. quota_error(dquot->dq_sb,
  509. "Quota for id %u referenced but not present",
  510. from_kqid(&init_user_ns, dquot->dq_id));
  511. ret = -EIO;
  512. goto out_buf;
  513. } else {
  514. ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
  515. qt_disk_dqdbheader) + i * info->dqi_entry_size;
  516. }
  517. out_buf:
  518. kfree(buf);
  519. return ret;
  520. }
  521. /* Find entry for given id in the tree */
  522. static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
  523. struct dquot *dquot, uint blk, int depth)
  524. {
  525. char *buf = getdqbuf(info->dqi_usable_bs);
  526. loff_t ret = 0;
  527. __le32 *ref = (__le32 *)buf;
  528. if (!buf)
  529. return -ENOMEM;
  530. ret = read_blk(info, blk, buf);
  531. if (ret < 0) {
  532. quota_error(dquot->dq_sb, "Can't read quota tree block %u",
  533. blk);
  534. goto out_buf;
  535. }
  536. ret = 0;
  537. blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
  538. if (!blk) /* No reference? */
  539. goto out_buf;
  540. if (depth < info->dqi_qtree_depth - 1)
  541. ret = find_tree_dqentry(info, dquot, blk, depth+1);
  542. else
  543. ret = find_block_dqentry(info, dquot, blk);
  544. out_buf:
  545. kfree(buf);
  546. return ret;
  547. }
  548. /* Find entry for given id in the tree - wrapper function */
  549. static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
  550. struct dquot *dquot)
  551. {
  552. return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
  553. }
  554. int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  555. {
  556. int type = dquot->dq_id.type;
  557. struct super_block *sb = dquot->dq_sb;
  558. loff_t offset;
  559. char *ddquot;
  560. int ret = 0;
  561. #ifdef __QUOTA_QT_PARANOIA
  562. /* Invalidated quota? */
  563. if (!sb_dqopt(dquot->dq_sb)->files[type]) {
  564. quota_error(sb, "Quota invalidated while reading!");
  565. return -EIO;
  566. }
  567. #endif
  568. /* Do we know offset of the dquot entry in the quota file? */
  569. if (!dquot->dq_off) {
  570. offset = find_dqentry(info, dquot);
  571. if (offset <= 0) { /* Entry not present? */
  572. if (offset < 0)
  573. quota_error(sb,"Can't read quota structure "
  574. "for id %u",
  575. from_kqid(&init_user_ns,
  576. dquot->dq_id));
  577. dquot->dq_off = 0;
  578. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  579. memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
  580. ret = offset;
  581. goto out;
  582. }
  583. dquot->dq_off = offset;
  584. }
  585. ddquot = getdqbuf(info->dqi_entry_size);
  586. if (!ddquot)
  587. return -ENOMEM;
  588. ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
  589. dquot->dq_off);
  590. if (ret != info->dqi_entry_size) {
  591. if (ret >= 0)
  592. ret = -EIO;
  593. quota_error(sb, "Error while reading quota structure for id %u",
  594. from_kqid(&init_user_ns, dquot->dq_id));
  595. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  596. memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
  597. kfree(ddquot);
  598. goto out;
  599. }
  600. spin_lock(&dq_data_lock);
  601. info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
  602. if (!dquot->dq_dqb.dqb_bhardlimit &&
  603. !dquot->dq_dqb.dqb_bsoftlimit &&
  604. !dquot->dq_dqb.dqb_ihardlimit &&
  605. !dquot->dq_dqb.dqb_isoftlimit)
  606. set_bit(DQ_FAKE_B, &dquot->dq_flags);
  607. spin_unlock(&dq_data_lock);
  608. kfree(ddquot);
  609. out:
  610. dqstats_inc(DQST_READS);
  611. return ret;
  612. }
  613. EXPORT_SYMBOL(qtree_read_dquot);
  614. /* Check whether dquot should not be deleted. We know we are
  615. * the only one operating on dquot (thanks to dq_lock) */
  616. int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
  617. {
  618. if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
  619. !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
  620. return qtree_delete_dquot(info, dquot);
  621. return 0;
  622. }
  623. EXPORT_SYMBOL(qtree_release_dquot);