inline.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. /*
  2. * fs/f2fs/inline.c
  3. * Copyright (c) 2013, Intel Corporation
  4. * Authors: Huajun Li <huajun.li@intel.com>
  5. * Haicheng Li <haicheng.li@intel.com>
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/f2fs_fs.h>
  12. #include "f2fs.h"
  13. #include "node.h"
  14. bool f2fs_may_inline_data(struct inode *inode)
  15. {
  16. if (!test_opt(F2FS_I_SB(inode), INLINE_DATA))
  17. return false;
  18. if (f2fs_is_atomic_file(inode))
  19. return false;
  20. if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
  21. return false;
  22. if (i_size_read(inode) > MAX_INLINE_DATA)
  23. return false;
  24. if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
  25. return false;
  26. return true;
  27. }
  28. bool f2fs_may_inline_dentry(struct inode *inode)
  29. {
  30. if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY))
  31. return false;
  32. if (!S_ISDIR(inode->i_mode))
  33. return false;
  34. return true;
  35. }
  36. void read_inline_data(struct page *page, struct page *ipage)
  37. {
  38. void *src_addr, *dst_addr;
  39. if (PageUptodate(page))
  40. return;
  41. f2fs_bug_on(F2FS_P_SB(page), page->index);
  42. zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
  43. /* Copy the whole inline data block */
  44. src_addr = inline_data_addr(ipage);
  45. dst_addr = kmap_atomic(page);
  46. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  47. flush_dcache_page(page);
  48. kunmap_atomic(dst_addr);
  49. SetPageUptodate(page);
  50. }
  51. bool truncate_inline_inode(struct page *ipage, u64 from)
  52. {
  53. void *addr;
  54. if (from >= MAX_INLINE_DATA)
  55. return false;
  56. addr = inline_data_addr(ipage);
  57. f2fs_wait_on_page_writeback(ipage, NODE);
  58. memset(addr + from, 0, MAX_INLINE_DATA - from);
  59. return true;
  60. }
  61. int f2fs_read_inline_data(struct inode *inode, struct page *page)
  62. {
  63. struct page *ipage;
  64. ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
  65. if (IS_ERR(ipage)) {
  66. unlock_page(page);
  67. return PTR_ERR(ipage);
  68. }
  69. if (!f2fs_has_inline_data(inode)) {
  70. f2fs_put_page(ipage, 1);
  71. return -EAGAIN;
  72. }
  73. if (page->index)
  74. zero_user_segment(page, 0, PAGE_CACHE_SIZE);
  75. else
  76. read_inline_data(page, ipage);
  77. SetPageUptodate(page);
  78. f2fs_put_page(ipage, 1);
  79. unlock_page(page);
  80. return 0;
  81. }
  82. int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
  83. {
  84. void *src_addr, *dst_addr;
  85. struct f2fs_io_info fio = {
  86. .sbi = F2FS_I_SB(dn->inode),
  87. .type = DATA,
  88. .rw = WRITE_SYNC | REQ_PRIO,
  89. .page = page,
  90. .encrypted_page = NULL,
  91. };
  92. int dirty, err;
  93. f2fs_bug_on(F2FS_I_SB(dn->inode), page->index);
  94. if (!f2fs_exist_data(dn->inode))
  95. goto clear_out;
  96. err = f2fs_reserve_block(dn, 0);
  97. if (err)
  98. return err;
  99. if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
  100. f2fs_put_dnode(dn);
  101. set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
  102. f2fs_msg(fio.sbi->sb, KERN_WARNING,
  103. "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
  104. "run fsck to fix.",
  105. __func__, dn->inode->i_ino, dn->data_blkaddr);
  106. return -EINVAL;
  107. }
  108. f2fs_wait_on_page_writeback(page, DATA);
  109. if (PageUptodate(page))
  110. goto no_update;
  111. zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
  112. /* Copy the whole inline data block */
  113. src_addr = inline_data_addr(dn->inode_page);
  114. dst_addr = kmap_atomic(page);
  115. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  116. flush_dcache_page(page);
  117. kunmap_atomic(dst_addr);
  118. SetPageUptodate(page);
  119. no_update:
  120. set_page_dirty(page);
  121. /* clear dirty state */
  122. dirty = clear_page_dirty_for_io(page);
  123. /* write data page to try to make data consistent */
  124. set_page_writeback(page);
  125. fio.blk_addr = dn->data_blkaddr;
  126. write_data_page(dn, &fio);
  127. set_data_blkaddr(dn);
  128. f2fs_update_extent_cache(dn);
  129. f2fs_wait_on_page_writeback(page, DATA);
  130. if (dirty)
  131. inode_dec_dirty_pages(dn->inode);
  132. /* this converted inline_data should be recovered. */
  133. set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);
  134. /* clear inline data and flag after data writeback */
  135. truncate_inline_inode(dn->inode_page, 0);
  136. clear_out:
  137. stat_dec_inline_inode(dn->inode);
  138. f2fs_clear_inline_inode(dn->inode);
  139. sync_inode_page(dn);
  140. f2fs_put_dnode(dn);
  141. return 0;
  142. }
  143. int f2fs_convert_inline_inode(struct inode *inode)
  144. {
  145. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  146. struct dnode_of_data dn;
  147. struct page *ipage, *page;
  148. int err = 0;
  149. page = grab_cache_page(inode->i_mapping, 0);
  150. if (!page)
  151. return -ENOMEM;
  152. f2fs_lock_op(sbi);
  153. ipage = get_node_page(sbi, inode->i_ino);
  154. if (IS_ERR(ipage)) {
  155. err = PTR_ERR(ipage);
  156. goto out;
  157. }
  158. set_new_dnode(&dn, inode, ipage, ipage, 0);
  159. if (f2fs_has_inline_data(inode))
  160. err = f2fs_convert_inline_page(&dn, page);
  161. f2fs_put_dnode(&dn);
  162. out:
  163. f2fs_unlock_op(sbi);
  164. f2fs_put_page(page, 1);
  165. return err;
  166. }
  167. int f2fs_write_inline_data(struct inode *inode, struct page *page)
  168. {
  169. void *src_addr, *dst_addr;
  170. struct dnode_of_data dn;
  171. int err;
  172. set_new_dnode(&dn, inode, NULL, NULL, 0);
  173. err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
  174. if (err)
  175. return err;
  176. if (!f2fs_has_inline_data(inode)) {
  177. f2fs_put_dnode(&dn);
  178. return -EAGAIN;
  179. }
  180. f2fs_bug_on(F2FS_I_SB(inode), page->index);
  181. f2fs_wait_on_page_writeback(dn.inode_page, NODE);
  182. src_addr = kmap_atomic(page);
  183. dst_addr = inline_data_addr(dn.inode_page);
  184. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  185. kunmap_atomic(src_addr);
  186. set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
  187. set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
  188. sync_inode_page(&dn);
  189. f2fs_put_dnode(&dn);
  190. return 0;
  191. }
  192. bool recover_inline_data(struct inode *inode, struct page *npage)
  193. {
  194. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  195. struct f2fs_inode *ri = NULL;
  196. void *src_addr, *dst_addr;
  197. struct page *ipage;
  198. /*
  199. * The inline_data recovery policy is as follows.
  200. * [prev.] [next] of inline_data flag
  201. * o o -> recover inline_data
  202. * o x -> remove inline_data, and then recover data blocks
  203. * x o -> remove inline_data, and then recover inline_data
  204. * x x -> recover data blocks
  205. */
  206. if (IS_INODE(npage))
  207. ri = F2FS_INODE(npage);
  208. if (f2fs_has_inline_data(inode) &&
  209. ri && (ri->i_inline & F2FS_INLINE_DATA)) {
  210. process_inline:
  211. ipage = get_node_page(sbi, inode->i_ino);
  212. f2fs_bug_on(sbi, IS_ERR(ipage));
  213. f2fs_wait_on_page_writeback(ipage, NODE);
  214. src_addr = inline_data_addr(npage);
  215. dst_addr = inline_data_addr(ipage);
  216. memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
  217. set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
  218. set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
  219. update_inode(inode, ipage);
  220. f2fs_put_page(ipage, 1);
  221. return true;
  222. }
  223. if (f2fs_has_inline_data(inode)) {
  224. ipage = get_node_page(sbi, inode->i_ino);
  225. f2fs_bug_on(sbi, IS_ERR(ipage));
  226. if (!truncate_inline_inode(ipage, 0))
  227. return false;
  228. f2fs_clear_inline_inode(inode);
  229. update_inode(inode, ipage);
  230. f2fs_put_page(ipage, 1);
  231. } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
  232. if (truncate_blocks(inode, 0, false))
  233. return false;
  234. goto process_inline;
  235. }
  236. return false;
  237. }
  238. struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
  239. struct f2fs_filename *fname, struct page **res_page)
  240. {
  241. struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
  242. struct f2fs_inline_dentry *inline_dentry;
  243. struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
  244. struct f2fs_dir_entry *de;
  245. struct f2fs_dentry_ptr d;
  246. struct page *ipage;
  247. f2fs_hash_t namehash;
  248. ipage = get_node_page(sbi, dir->i_ino);
  249. if (IS_ERR(ipage))
  250. return NULL;
  251. namehash = f2fs_dentry_hash(&name, fname);
  252. inline_dentry = inline_data_addr(ipage);
  253. make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
  254. de = find_target_dentry(fname, namehash, NULL, &d);
  255. unlock_page(ipage);
  256. if (de)
  257. *res_page = ipage;
  258. else
  259. f2fs_put_page(ipage, 0);
  260. /*
  261. * For the most part, it should be a bug when name_len is zero.
  262. * We stop here for figuring out where the bugs has occurred.
  263. */
  264. f2fs_bug_on(sbi, d.max < 0);
  265. return de;
  266. }
  267. struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir,
  268. struct page **p)
  269. {
  270. struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
  271. struct page *ipage;
  272. struct f2fs_dir_entry *de;
  273. struct f2fs_inline_dentry *dentry_blk;
  274. ipage = get_node_page(sbi, dir->i_ino);
  275. if (IS_ERR(ipage))
  276. return NULL;
  277. dentry_blk = inline_data_addr(ipage);
  278. de = &dentry_blk->dentry[1];
  279. *p = ipage;
  280. unlock_page(ipage);
  281. return de;
  282. }
  283. int make_empty_inline_dir(struct inode *inode, struct inode *parent,
  284. struct page *ipage)
  285. {
  286. struct f2fs_inline_dentry *dentry_blk;
  287. struct f2fs_dentry_ptr d;
  288. dentry_blk = inline_data_addr(ipage);
  289. make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
  290. do_make_empty_dir(inode, parent, &d);
  291. set_page_dirty(ipage);
  292. /* update i_size to MAX_INLINE_DATA */
  293. if (i_size_read(inode) < MAX_INLINE_DATA) {
  294. i_size_write(inode, MAX_INLINE_DATA);
  295. set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
  296. }
  297. return 0;
  298. }
  299. /*
  300. * NOTE: ipage is grabbed by caller, but if any error occurs, we should
  301. * release ipage in this function.
  302. */
  303. static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
  304. struct f2fs_inline_dentry *inline_dentry)
  305. {
  306. struct page *page;
  307. struct dnode_of_data dn;
  308. struct f2fs_dentry_block *dentry_blk;
  309. int err;
  310. page = grab_cache_page(dir->i_mapping, 0);
  311. if (!page) {
  312. f2fs_put_page(ipage, 1);
  313. return -ENOMEM;
  314. }
  315. set_new_dnode(&dn, dir, ipage, NULL, 0);
  316. err = f2fs_reserve_block(&dn, 0);
  317. if (err)
  318. goto out;
  319. if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
  320. f2fs_put_dnode(&dn);
  321. set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
  322. f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
  323. "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
  324. "run fsck to fix.",
  325. __func__, dir->i_ino, dn.data_blkaddr);
  326. err = -EINVAL;
  327. goto out;
  328. }
  329. f2fs_wait_on_page_writeback(page, DATA);
  330. zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
  331. dentry_blk = kmap_atomic(page);
  332. /* copy data from inline dentry block to new dentry block */
  333. memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap,
  334. INLINE_DENTRY_BITMAP_SIZE);
  335. memset(dentry_blk->dentry_bitmap + INLINE_DENTRY_BITMAP_SIZE, 0,
  336. SIZE_OF_DENTRY_BITMAP - INLINE_DENTRY_BITMAP_SIZE);
  337. /*
  338. * we do not need to zero out remainder part of dentry and filename
  339. * field, since we have used bitmap for marking the usage status of
  340. * them, besides, we can also ignore copying/zeroing reserved space
  341. * of dentry block, because them haven't been used so far.
  342. */
  343. memcpy(dentry_blk->dentry, inline_dentry->dentry,
  344. sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY);
  345. memcpy(dentry_blk->filename, inline_dentry->filename,
  346. NR_INLINE_DENTRY * F2FS_SLOT_LEN);
  347. kunmap_atomic(dentry_blk);
  348. SetPageUptodate(page);
  349. set_page_dirty(page);
  350. /* clear inline dir and flag after data writeback */
  351. truncate_inline_inode(ipage, 0);
  352. stat_dec_inline_dir(dir);
  353. clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
  354. if (i_size_read(dir) < PAGE_CACHE_SIZE) {
  355. i_size_write(dir, PAGE_CACHE_SIZE);
  356. set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
  357. }
  358. sync_inode_page(&dn);
  359. out:
  360. f2fs_put_page(page, 1);
  361. return err;
  362. }
  363. static int f2fs_add_inline_entries(struct inode *dir,
  364. struct f2fs_inline_dentry *inline_dentry)
  365. {
  366. struct f2fs_dentry_ptr d;
  367. unsigned long bit_pos = 0;
  368. int err = 0;
  369. make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
  370. while (bit_pos < d.max) {
  371. struct f2fs_dir_entry *de;
  372. struct qstr new_name;
  373. nid_t ino;
  374. umode_t fake_mode;
  375. if (!test_bit_le(bit_pos, d.bitmap)) {
  376. bit_pos++;
  377. continue;
  378. }
  379. de = &d.dentry[bit_pos];
  380. new_name.name = d.filename[bit_pos];
  381. new_name.len = de->name_len;
  382. ino = le32_to_cpu(de->ino);
  383. fake_mode = get_de_type(de) << S_SHIFT;
  384. err = f2fs_add_regular_entry(dir, &new_name, NULL,
  385. ino, fake_mode);
  386. if (err)
  387. goto punch_dentry_pages;
  388. if (unlikely(!de->name_len))
  389. d.max = -1;
  390. bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
  391. }
  392. return 0;
  393. punch_dentry_pages:
  394. truncate_inode_pages(&dir->i_data, 0);
  395. truncate_blocks(dir, 0, false);
  396. remove_dirty_dir_inode(dir);
  397. return err;
  398. }
  399. static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
  400. struct f2fs_inline_dentry *inline_dentry)
  401. {
  402. struct f2fs_inline_dentry *backup_dentry;
  403. int err;
  404. backup_dentry = kmalloc(sizeof(struct f2fs_inline_dentry),
  405. GFP_F2FS_ZERO);
  406. if (!backup_dentry)
  407. return -ENOMEM;
  408. memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA);
  409. truncate_inline_inode(ipage, 0);
  410. unlock_page(ipage);
  411. err = f2fs_add_inline_entries(dir, backup_dentry);
  412. if (err)
  413. goto recover;
  414. lock_page(ipage);
  415. stat_dec_inline_dir(dir);
  416. clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
  417. update_inode(dir, ipage);
  418. kfree(backup_dentry);
  419. return 0;
  420. recover:
  421. lock_page(ipage);
  422. memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA);
  423. i_size_write(dir, MAX_INLINE_DATA);
  424. update_inode(dir, ipage);
  425. f2fs_put_page(ipage, 1);
  426. kfree(backup_dentry);
  427. return err;
  428. }
  429. static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
  430. struct f2fs_inline_dentry *inline_dentry)
  431. {
  432. if (!F2FS_I(dir)->i_dir_level)
  433. return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
  434. else
  435. return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
  436. }
  437. int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
  438. struct inode *inode, nid_t ino, umode_t mode)
  439. {
  440. struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
  441. struct page *ipage;
  442. unsigned int bit_pos;
  443. f2fs_hash_t name_hash;
  444. size_t namelen = name->len;
  445. struct f2fs_inline_dentry *dentry_blk = NULL;
  446. struct f2fs_dentry_ptr d;
  447. int slots = GET_DENTRY_SLOTS(namelen);
  448. struct page *page = NULL;
  449. int err = 0;
  450. ipage = get_node_page(sbi, dir->i_ino);
  451. if (IS_ERR(ipage))
  452. return PTR_ERR(ipage);
  453. dentry_blk = inline_data_addr(ipage);
  454. bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
  455. slots, NR_INLINE_DENTRY);
  456. if (bit_pos >= NR_INLINE_DENTRY) {
  457. err = f2fs_convert_inline_dir(dir, ipage, dentry_blk);
  458. if (err)
  459. return err;
  460. err = -EAGAIN;
  461. goto out;
  462. }
  463. if (inode) {
  464. down_write(&F2FS_I(inode)->i_sem);
  465. page = init_inode_metadata(inode, dir, name, ipage);
  466. if (IS_ERR(page)) {
  467. err = PTR_ERR(page);
  468. goto fail;
  469. }
  470. }
  471. f2fs_wait_on_page_writeback(ipage, NODE);
  472. name_hash = f2fs_dentry_hash(name, NULL);
  473. make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
  474. f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos);
  475. set_page_dirty(ipage);
  476. /* we don't need to mark_inode_dirty now */
  477. if (inode) {
  478. F2FS_I(inode)->i_pino = dir->i_ino;
  479. update_inode(inode, page);
  480. f2fs_put_page(page, 1);
  481. }
  482. update_parent_metadata(dir, inode, 0);
  483. fail:
  484. if (inode)
  485. up_write(&F2FS_I(inode)->i_sem);
  486. if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
  487. update_inode(dir, ipage);
  488. clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
  489. }
  490. out:
  491. f2fs_put_page(ipage, 1);
  492. return err;
  493. }
  494. void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
  495. struct inode *dir, struct inode *inode)
  496. {
  497. struct f2fs_inline_dentry *inline_dentry;
  498. int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
  499. unsigned int bit_pos;
  500. int i;
  501. lock_page(page);
  502. f2fs_wait_on_page_writeback(page, NODE);
  503. inline_dentry = inline_data_addr(page);
  504. bit_pos = dentry - inline_dentry->dentry;
  505. for (i = 0; i < slots; i++)
  506. test_and_clear_bit_le(bit_pos + i,
  507. &inline_dentry->dentry_bitmap);
  508. set_page_dirty(page);
  509. dir->i_ctime = dir->i_mtime = CURRENT_TIME;
  510. if (inode)
  511. f2fs_drop_nlink(dir, inode, page);
  512. f2fs_put_page(page, 1);
  513. }
  514. bool f2fs_empty_inline_dir(struct inode *dir)
  515. {
  516. struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
  517. struct page *ipage;
  518. unsigned int bit_pos = 2;
  519. struct f2fs_inline_dentry *dentry_blk;
  520. ipage = get_node_page(sbi, dir->i_ino);
  521. if (IS_ERR(ipage))
  522. return false;
  523. dentry_blk = inline_data_addr(ipage);
  524. bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
  525. NR_INLINE_DENTRY,
  526. bit_pos);
  527. f2fs_put_page(ipage, 1);
  528. if (bit_pos < NR_INLINE_DENTRY)
  529. return false;
  530. return true;
  531. }
  532. int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
  533. struct f2fs_str *fstr)
  534. {
  535. struct inode *inode = file_inode(file);
  536. struct f2fs_inline_dentry *inline_dentry = NULL;
  537. struct page *ipage = NULL;
  538. struct f2fs_dentry_ptr d;
  539. if (ctx->pos == NR_INLINE_DENTRY)
  540. return 0;
  541. ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
  542. if (IS_ERR(ipage))
  543. return PTR_ERR(ipage);
  544. inline_dentry = inline_data_addr(ipage);
  545. make_dentry_ptr(inode, &d, (void *)inline_dentry, 2);
  546. if (!f2fs_fill_dentries(ctx, &d, 0, fstr))
  547. ctx->pos = NR_INLINE_DENTRY;
  548. f2fs_put_page(ipage, 1);
  549. return 0;
  550. }
  551. int f2fs_inline_data_fiemap(struct inode *inode,
  552. struct fiemap_extent_info *fieinfo, __u64 start, __u64 len)
  553. {
  554. __u64 byteaddr, ilen;
  555. __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
  556. FIEMAP_EXTENT_LAST;
  557. struct node_info ni;
  558. struct page *ipage;
  559. int err = 0;
  560. ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
  561. if (IS_ERR(ipage))
  562. return PTR_ERR(ipage);
  563. if (!f2fs_has_inline_data(inode)) {
  564. err = -EAGAIN;
  565. goto out;
  566. }
  567. ilen = min_t(size_t, MAX_INLINE_DATA, i_size_read(inode));
  568. if (start >= ilen)
  569. goto out;
  570. if (start + len < ilen)
  571. ilen = start + len;
  572. ilen -= start;
  573. get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
  574. byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
  575. byteaddr += (char *)inline_data_addr(ipage) - (char *)F2FS_INODE(ipage);
  576. err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
  577. out:
  578. f2fs_put_page(ipage, 1);
  579. return err;
  580. }