dir.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. /*
  2. * dir.c - NILFS directory entry operations
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Modified for NILFS by Amagai Yoshiji <amagai@osrg.net>
  21. */
  22. /*
  23. * linux/fs/ext2/dir.c
  24. *
  25. * Copyright (C) 1992, 1993, 1994, 1995
  26. * Remy Card (card@masi.ibp.fr)
  27. * Laboratoire MASI - Institut Blaise Pascal
  28. * Universite Pierre et Marie Curie (Paris VI)
  29. *
  30. * from
  31. *
  32. * linux/fs/minix/dir.c
  33. *
  34. * Copyright (C) 1991, 1992 Linus Torvalds
  35. *
  36. * ext2 directory handling functions
  37. *
  38. * Big-endian to little-endian byte-swapping/bitmaps by
  39. * David S. Miller (davem@caip.rutgers.edu), 1995
  40. *
  41. * All code that works with directory layout had been switched to pagecache
  42. * and moved here. AV
  43. */
  44. #include <linux/pagemap.h>
  45. #include "nilfs.h"
  46. #include "page.h"
  47. /*
  48. * nilfs uses block-sized chunks. Arguably, sector-sized ones would be
  49. * more robust, but we have what we have
  50. */
  51. static inline unsigned nilfs_chunk_size(struct inode *inode)
  52. {
  53. return inode->i_sb->s_blocksize;
  54. }
  55. static inline void nilfs_put_page(struct page *page)
  56. {
  57. kunmap(page);
  58. page_cache_release(page);
  59. }
  60. /*
  61. * Return the offset into page `page_nr' of the last valid
  62. * byte in that page, plus one.
  63. */
  64. static unsigned nilfs_last_byte(struct inode *inode, unsigned long page_nr)
  65. {
  66. unsigned last_byte = inode->i_size;
  67. last_byte -= page_nr << PAGE_CACHE_SHIFT;
  68. if (last_byte > PAGE_CACHE_SIZE)
  69. last_byte = PAGE_CACHE_SIZE;
  70. return last_byte;
  71. }
  72. static int nilfs_prepare_chunk(struct page *page, unsigned from, unsigned to)
  73. {
  74. loff_t pos = page_offset(page) + from;
  75. return __block_write_begin(page, pos, to - from, nilfs_get_block);
  76. }
  77. static void nilfs_commit_chunk(struct page *page,
  78. struct address_space *mapping,
  79. unsigned from, unsigned to)
  80. {
  81. struct inode *dir = mapping->host;
  82. loff_t pos = page_offset(page) + from;
  83. unsigned len = to - from;
  84. unsigned nr_dirty, copied;
  85. int err;
  86. nr_dirty = nilfs_page_count_clean_buffers(page, from, to);
  87. copied = block_write_end(NULL, mapping, pos, len, len, page, NULL);
  88. if (pos + copied > dir->i_size)
  89. i_size_write(dir, pos + copied);
  90. if (IS_DIRSYNC(dir))
  91. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  92. err = nilfs_set_file_dirty(dir, nr_dirty);
  93. WARN_ON(err); /* do not happen */
  94. unlock_page(page);
  95. }
  96. static void nilfs_check_page(struct page *page)
  97. {
  98. struct inode *dir = page->mapping->host;
  99. struct super_block *sb = dir->i_sb;
  100. unsigned chunk_size = nilfs_chunk_size(dir);
  101. char *kaddr = page_address(page);
  102. unsigned offs, rec_len;
  103. unsigned limit = PAGE_CACHE_SIZE;
  104. struct nilfs_dir_entry *p;
  105. char *error;
  106. if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
  107. limit = dir->i_size & ~PAGE_CACHE_MASK;
  108. if (limit & (chunk_size - 1))
  109. goto Ebadsize;
  110. if (!limit)
  111. goto out;
  112. }
  113. for (offs = 0; offs <= limit - NILFS_DIR_REC_LEN(1); offs += rec_len) {
  114. p = (struct nilfs_dir_entry *)(kaddr + offs);
  115. rec_len = nilfs_rec_len_from_disk(p->rec_len);
  116. if (rec_len < NILFS_DIR_REC_LEN(1))
  117. goto Eshort;
  118. if (rec_len & 3)
  119. goto Ealign;
  120. if (rec_len < NILFS_DIR_REC_LEN(p->name_len))
  121. goto Enamelen;
  122. if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
  123. goto Espan;
  124. }
  125. if (offs != limit)
  126. goto Eend;
  127. out:
  128. SetPageChecked(page);
  129. return;
  130. /* Too bad, we had an error */
  131. Ebadsize:
  132. nilfs_error(sb, "nilfs_check_page",
  133. "size of directory #%lu is not a multiple of chunk size",
  134. dir->i_ino
  135. );
  136. goto fail;
  137. Eshort:
  138. error = "rec_len is smaller than minimal";
  139. goto bad_entry;
  140. Ealign:
  141. error = "unaligned directory entry";
  142. goto bad_entry;
  143. Enamelen:
  144. error = "rec_len is too small for name_len";
  145. goto bad_entry;
  146. Espan:
  147. error = "directory entry across blocks";
  148. bad_entry:
  149. nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - "
  150. "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
  151. dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
  152. (unsigned long) le64_to_cpu(p->inode),
  153. rec_len, p->name_len);
  154. goto fail;
  155. Eend:
  156. p = (struct nilfs_dir_entry *)(kaddr + offs);
  157. nilfs_error(sb, "nilfs_check_page",
  158. "entry in directory #%lu spans the page boundary"
  159. "offset=%lu, inode=%lu",
  160. dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
  161. (unsigned long) le64_to_cpu(p->inode));
  162. fail:
  163. SetPageChecked(page);
  164. SetPageError(page);
  165. }
  166. static struct page *nilfs_get_page(struct inode *dir, unsigned long n)
  167. {
  168. struct address_space *mapping = dir->i_mapping;
  169. struct page *page = read_mapping_page(mapping, n, NULL);
  170. if (!IS_ERR(page)) {
  171. kmap(page);
  172. if (!PageChecked(page))
  173. nilfs_check_page(page);
  174. if (PageError(page))
  175. goto fail;
  176. }
  177. return page;
  178. fail:
  179. nilfs_put_page(page);
  180. return ERR_PTR(-EIO);
  181. }
  182. /*
  183. * NOTE! unlike strncmp, nilfs_match returns 1 for success, 0 for failure.
  184. *
  185. * len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller.
  186. */
  187. static int
  188. nilfs_match(int len, const unsigned char *name, struct nilfs_dir_entry *de)
  189. {
  190. if (len != de->name_len)
  191. return 0;
  192. if (!de->inode)
  193. return 0;
  194. return !memcmp(name, de->name, len);
  195. }
  196. /*
  197. * p is at least 6 bytes before the end of page
  198. */
  199. static struct nilfs_dir_entry *nilfs_next_entry(struct nilfs_dir_entry *p)
  200. {
  201. return (struct nilfs_dir_entry *)((char *)p +
  202. nilfs_rec_len_from_disk(p->rec_len));
  203. }
  204. static unsigned char
  205. nilfs_filetype_table[NILFS_FT_MAX] = {
  206. [NILFS_FT_UNKNOWN] = DT_UNKNOWN,
  207. [NILFS_FT_REG_FILE] = DT_REG,
  208. [NILFS_FT_DIR] = DT_DIR,
  209. [NILFS_FT_CHRDEV] = DT_CHR,
  210. [NILFS_FT_BLKDEV] = DT_BLK,
  211. [NILFS_FT_FIFO] = DT_FIFO,
  212. [NILFS_FT_SOCK] = DT_SOCK,
  213. [NILFS_FT_SYMLINK] = DT_LNK,
  214. };
  215. #define S_SHIFT 12
  216. static unsigned char
  217. nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
  218. [S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE,
  219. [S_IFDIR >> S_SHIFT] = NILFS_FT_DIR,
  220. [S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV,
  221. [S_IFBLK >> S_SHIFT] = NILFS_FT_BLKDEV,
  222. [S_IFIFO >> S_SHIFT] = NILFS_FT_FIFO,
  223. [S_IFSOCK >> S_SHIFT] = NILFS_FT_SOCK,
  224. [S_IFLNK >> S_SHIFT] = NILFS_FT_SYMLINK,
  225. };
  226. static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode)
  227. {
  228. umode_t mode = inode->i_mode;
  229. de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
  230. }
  231. static int nilfs_readdir(struct file *file, struct dir_context *ctx)
  232. {
  233. loff_t pos = ctx->pos;
  234. struct inode *inode = file_inode(file);
  235. struct super_block *sb = inode->i_sb;
  236. unsigned int offset = pos & ~PAGE_CACHE_MASK;
  237. unsigned long n = pos >> PAGE_CACHE_SHIFT;
  238. unsigned long npages = dir_pages(inode);
  239. /* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */
  240. if (pos > inode->i_size - NILFS_DIR_REC_LEN(1))
  241. return 0;
  242. for ( ; n < npages; n++, offset = 0) {
  243. char *kaddr, *limit;
  244. struct nilfs_dir_entry *de;
  245. struct page *page = nilfs_get_page(inode, n);
  246. if (IS_ERR(page)) {
  247. nilfs_error(sb, __func__, "bad page in #%lu",
  248. inode->i_ino);
  249. ctx->pos += PAGE_CACHE_SIZE - offset;
  250. return -EIO;
  251. }
  252. kaddr = page_address(page);
  253. de = (struct nilfs_dir_entry *)(kaddr + offset);
  254. limit = kaddr + nilfs_last_byte(inode, n) -
  255. NILFS_DIR_REC_LEN(1);
  256. for ( ; (char *)de <= limit; de = nilfs_next_entry(de)) {
  257. if (de->rec_len == 0) {
  258. nilfs_error(sb, __func__,
  259. "zero-length directory entry");
  260. nilfs_put_page(page);
  261. return -EIO;
  262. }
  263. if (de->inode) {
  264. unsigned char t;
  265. if (de->file_type < NILFS_FT_MAX)
  266. t = nilfs_filetype_table[de->file_type];
  267. else
  268. t = DT_UNKNOWN;
  269. if (!dir_emit(ctx, de->name, de->name_len,
  270. le64_to_cpu(de->inode), t)) {
  271. nilfs_put_page(page);
  272. return 0;
  273. }
  274. }
  275. ctx->pos += nilfs_rec_len_from_disk(de->rec_len);
  276. }
  277. nilfs_put_page(page);
  278. }
  279. return 0;
  280. }
  281. /*
  282. * nilfs_find_entry()
  283. *
  284. * finds an entry in the specified directory with the wanted name. It
  285. * returns the page in which the entry was found, and the entry itself
  286. * (as a parameter - res_dir). Page is returned mapped and unlocked.
  287. * Entry is guaranteed to be valid.
  288. */
  289. struct nilfs_dir_entry *
  290. nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
  291. struct page **res_page)
  292. {
  293. const unsigned char *name = qstr->name;
  294. int namelen = qstr->len;
  295. unsigned reclen = NILFS_DIR_REC_LEN(namelen);
  296. unsigned long start, n;
  297. unsigned long npages = dir_pages(dir);
  298. struct page *page = NULL;
  299. struct nilfs_inode_info *ei = NILFS_I(dir);
  300. struct nilfs_dir_entry *de;
  301. if (npages == 0)
  302. goto out;
  303. /* OFFSET_CACHE */
  304. *res_page = NULL;
  305. start = ei->i_dir_start_lookup;
  306. if (start >= npages)
  307. start = 0;
  308. n = start;
  309. do {
  310. char *kaddr;
  311. page = nilfs_get_page(dir, n);
  312. if (!IS_ERR(page)) {
  313. kaddr = page_address(page);
  314. de = (struct nilfs_dir_entry *)kaddr;
  315. kaddr += nilfs_last_byte(dir, n) - reclen;
  316. while ((char *) de <= kaddr) {
  317. if (de->rec_len == 0) {
  318. nilfs_error(dir->i_sb, __func__,
  319. "zero-length directory entry");
  320. nilfs_put_page(page);
  321. goto out;
  322. }
  323. if (nilfs_match(namelen, name, de))
  324. goto found;
  325. de = nilfs_next_entry(de);
  326. }
  327. nilfs_put_page(page);
  328. }
  329. if (++n >= npages)
  330. n = 0;
  331. /* next page is past the blocks we've got */
  332. if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
  333. nilfs_error(dir->i_sb, __func__,
  334. "dir %lu size %lld exceeds block count %llu",
  335. dir->i_ino, dir->i_size,
  336. (unsigned long long)dir->i_blocks);
  337. goto out;
  338. }
  339. } while (n != start);
  340. out:
  341. return NULL;
  342. found:
  343. *res_page = page;
  344. ei->i_dir_start_lookup = n;
  345. return de;
  346. }
  347. struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
  348. {
  349. struct page *page = nilfs_get_page(dir, 0);
  350. struct nilfs_dir_entry *de = NULL;
  351. if (!IS_ERR(page)) {
  352. de = nilfs_next_entry(
  353. (struct nilfs_dir_entry *)page_address(page));
  354. *p = page;
  355. }
  356. return de;
  357. }
  358. ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
  359. {
  360. ino_t res = 0;
  361. struct nilfs_dir_entry *de;
  362. struct page *page;
  363. de = nilfs_find_entry(dir, qstr, &page);
  364. if (de) {
  365. res = le64_to_cpu(de->inode);
  366. kunmap(page);
  367. page_cache_release(page);
  368. }
  369. return res;
  370. }
  371. /* Releases the page */
  372. void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
  373. struct page *page, struct inode *inode)
  374. {
  375. unsigned from = (char *) de - (char *) page_address(page);
  376. unsigned to = from + nilfs_rec_len_from_disk(de->rec_len);
  377. struct address_space *mapping = page->mapping;
  378. int err;
  379. lock_page(page);
  380. err = nilfs_prepare_chunk(page, from, to);
  381. BUG_ON(err);
  382. de->inode = cpu_to_le64(inode->i_ino);
  383. nilfs_set_de_type(de, inode);
  384. nilfs_commit_chunk(page, mapping, from, to);
  385. nilfs_put_page(page);
  386. dir->i_mtime = dir->i_ctime = CURRENT_TIME;
  387. }
  388. /*
  389. * Parent is locked.
  390. */
  391. int nilfs_add_link(struct dentry *dentry, struct inode *inode)
  392. {
  393. struct inode *dir = d_inode(dentry->d_parent);
  394. const unsigned char *name = dentry->d_name.name;
  395. int namelen = dentry->d_name.len;
  396. unsigned chunk_size = nilfs_chunk_size(dir);
  397. unsigned reclen = NILFS_DIR_REC_LEN(namelen);
  398. unsigned short rec_len, name_len;
  399. struct page *page = NULL;
  400. struct nilfs_dir_entry *de;
  401. unsigned long npages = dir_pages(dir);
  402. unsigned long n;
  403. char *kaddr;
  404. unsigned from, to;
  405. int err;
  406. /*
  407. * We take care of directory expansion in the same loop.
  408. * This code plays outside i_size, so it locks the page
  409. * to protect that region.
  410. */
  411. for (n = 0; n <= npages; n++) {
  412. char *dir_end;
  413. page = nilfs_get_page(dir, n);
  414. err = PTR_ERR(page);
  415. if (IS_ERR(page))
  416. goto out;
  417. lock_page(page);
  418. kaddr = page_address(page);
  419. dir_end = kaddr + nilfs_last_byte(dir, n);
  420. de = (struct nilfs_dir_entry *)kaddr;
  421. kaddr += PAGE_CACHE_SIZE - reclen;
  422. while ((char *)de <= kaddr) {
  423. if ((char *)de == dir_end) {
  424. /* We hit i_size */
  425. name_len = 0;
  426. rec_len = chunk_size;
  427. de->rec_len = nilfs_rec_len_to_disk(chunk_size);
  428. de->inode = 0;
  429. goto got_it;
  430. }
  431. if (de->rec_len == 0) {
  432. nilfs_error(dir->i_sb, __func__,
  433. "zero-length directory entry");
  434. err = -EIO;
  435. goto out_unlock;
  436. }
  437. err = -EEXIST;
  438. if (nilfs_match(namelen, name, de))
  439. goto out_unlock;
  440. name_len = NILFS_DIR_REC_LEN(de->name_len);
  441. rec_len = nilfs_rec_len_from_disk(de->rec_len);
  442. if (!de->inode && rec_len >= reclen)
  443. goto got_it;
  444. if (rec_len >= name_len + reclen)
  445. goto got_it;
  446. de = (struct nilfs_dir_entry *)((char *)de + rec_len);
  447. }
  448. unlock_page(page);
  449. nilfs_put_page(page);
  450. }
  451. BUG();
  452. return -EINVAL;
  453. got_it:
  454. from = (char *)de - (char *)page_address(page);
  455. to = from + rec_len;
  456. err = nilfs_prepare_chunk(page, from, to);
  457. if (err)
  458. goto out_unlock;
  459. if (de->inode) {
  460. struct nilfs_dir_entry *de1;
  461. de1 = (struct nilfs_dir_entry *)((char *)de + name_len);
  462. de1->rec_len = nilfs_rec_len_to_disk(rec_len - name_len);
  463. de->rec_len = nilfs_rec_len_to_disk(name_len);
  464. de = de1;
  465. }
  466. de->name_len = namelen;
  467. memcpy(de->name, name, namelen);
  468. de->inode = cpu_to_le64(inode->i_ino);
  469. nilfs_set_de_type(de, inode);
  470. nilfs_commit_chunk(page, page->mapping, from, to);
  471. dir->i_mtime = dir->i_ctime = CURRENT_TIME;
  472. nilfs_mark_inode_dirty(dir);
  473. /* OFFSET_CACHE */
  474. out_put:
  475. nilfs_put_page(page);
  476. out:
  477. return err;
  478. out_unlock:
  479. unlock_page(page);
  480. goto out_put;
  481. }
  482. /*
  483. * nilfs_delete_entry deletes a directory entry by merging it with the
  484. * previous entry. Page is up-to-date. Releases the page.
  485. */
  486. int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
  487. {
  488. struct address_space *mapping = page->mapping;
  489. struct inode *inode = mapping->host;
  490. char *kaddr = page_address(page);
  491. unsigned from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1);
  492. unsigned to = ((char *)dir - kaddr) +
  493. nilfs_rec_len_from_disk(dir->rec_len);
  494. struct nilfs_dir_entry *pde = NULL;
  495. struct nilfs_dir_entry *de = (struct nilfs_dir_entry *)(kaddr + from);
  496. int err;
  497. while ((char *)de < (char *)dir) {
  498. if (de->rec_len == 0) {
  499. nilfs_error(inode->i_sb, __func__,
  500. "zero-length directory entry");
  501. err = -EIO;
  502. goto out;
  503. }
  504. pde = de;
  505. de = nilfs_next_entry(de);
  506. }
  507. if (pde)
  508. from = (char *)pde - (char *)page_address(page);
  509. lock_page(page);
  510. err = nilfs_prepare_chunk(page, from, to);
  511. BUG_ON(err);
  512. if (pde)
  513. pde->rec_len = nilfs_rec_len_to_disk(to - from);
  514. dir->inode = 0;
  515. nilfs_commit_chunk(page, mapping, from, to);
  516. inode->i_ctime = inode->i_mtime = CURRENT_TIME;
  517. out:
  518. nilfs_put_page(page);
  519. return err;
  520. }
  521. /*
  522. * Set the first fragment of directory.
  523. */
  524. int nilfs_make_empty(struct inode *inode, struct inode *parent)
  525. {
  526. struct address_space *mapping = inode->i_mapping;
  527. struct page *page = grab_cache_page(mapping, 0);
  528. unsigned chunk_size = nilfs_chunk_size(inode);
  529. struct nilfs_dir_entry *de;
  530. int err;
  531. void *kaddr;
  532. if (!page)
  533. return -ENOMEM;
  534. err = nilfs_prepare_chunk(page, 0, chunk_size);
  535. if (unlikely(err)) {
  536. unlock_page(page);
  537. goto fail;
  538. }
  539. kaddr = kmap_atomic(page);
  540. memset(kaddr, 0, chunk_size);
  541. de = (struct nilfs_dir_entry *)kaddr;
  542. de->name_len = 1;
  543. de->rec_len = nilfs_rec_len_to_disk(NILFS_DIR_REC_LEN(1));
  544. memcpy(de->name, ".\0\0", 4);
  545. de->inode = cpu_to_le64(inode->i_ino);
  546. nilfs_set_de_type(de, inode);
  547. de = (struct nilfs_dir_entry *)(kaddr + NILFS_DIR_REC_LEN(1));
  548. de->name_len = 2;
  549. de->rec_len = nilfs_rec_len_to_disk(chunk_size - NILFS_DIR_REC_LEN(1));
  550. de->inode = cpu_to_le64(parent->i_ino);
  551. memcpy(de->name, "..\0", 4);
  552. nilfs_set_de_type(de, inode);
  553. kunmap_atomic(kaddr);
  554. nilfs_commit_chunk(page, mapping, 0, chunk_size);
  555. fail:
  556. page_cache_release(page);
  557. return err;
  558. }
  559. /*
  560. * routine to check that the specified directory is empty (for rmdir)
  561. */
  562. int nilfs_empty_dir(struct inode *inode)
  563. {
  564. struct page *page = NULL;
  565. unsigned long i, npages = dir_pages(inode);
  566. for (i = 0; i < npages; i++) {
  567. char *kaddr;
  568. struct nilfs_dir_entry *de;
  569. page = nilfs_get_page(inode, i);
  570. if (IS_ERR(page))
  571. continue;
  572. kaddr = page_address(page);
  573. de = (struct nilfs_dir_entry *)kaddr;
  574. kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);
  575. while ((char *)de <= kaddr) {
  576. if (de->rec_len == 0) {
  577. nilfs_error(inode->i_sb, __func__,
  578. "zero-length directory entry "
  579. "(kaddr=%p, de=%p)\n", kaddr, de);
  580. goto not_empty;
  581. }
  582. if (de->inode != 0) {
  583. /* check for . and .. */
  584. if (de->name[0] != '.')
  585. goto not_empty;
  586. if (de->name_len > 2)
  587. goto not_empty;
  588. if (de->name_len < 2) {
  589. if (de->inode !=
  590. cpu_to_le64(inode->i_ino))
  591. goto not_empty;
  592. } else if (de->name[1] != '.')
  593. goto not_empty;
  594. }
  595. de = nilfs_next_entry(de);
  596. }
  597. nilfs_put_page(page);
  598. }
  599. return 1;
  600. not_empty:
  601. nilfs_put_page(page);
  602. return 0;
  603. }
  604. const struct file_operations nilfs_dir_operations = {
  605. .llseek = generic_file_llseek,
  606. .read = generic_read_dir,
  607. .iterate = nilfs_readdir,
  608. .unlocked_ioctl = nilfs_ioctl,
  609. #ifdef CONFIG_COMPAT
  610. .compat_ioctl = nilfs_compat_ioctl,
  611. #endif /* CONFIG_COMPAT */
  612. .fsync = nilfs_sync_file,
  613. };