jfs_metapage.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837
  1. /*
  2. * Copyright (C) International Business Machines Corp., 2000-2005
  3. * Portions Copyright (C) Christoph Hellwig, 2001-2002
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  13. * the GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/fs.h>
  20. #include <linux/mm.h>
  21. #include <linux/module.h>
  22. #include <linux/bio.h>
  23. #include <linux/slab.h>
  24. #include <linux/init.h>
  25. #include <linux/buffer_head.h>
  26. #include <linux/mempool.h>
  27. #include <linux/seq_file.h>
  28. #include "jfs_incore.h"
  29. #include "jfs_superblock.h"
  30. #include "jfs_filsys.h"
  31. #include "jfs_metapage.h"
  32. #include "jfs_txnmgr.h"
  33. #include "jfs_debug.h"
  34. #ifdef CONFIG_JFS_STATISTICS
  35. static struct {
  36. uint pagealloc; /* # of page allocations */
  37. uint pagefree; /* # of page frees */
  38. uint lockwait; /* # of sleeping lock_metapage() calls */
  39. } mpStat;
  40. #endif
  41. #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
  42. #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
  43. static inline void unlock_metapage(struct metapage *mp)
  44. {
  45. clear_bit_unlock(META_locked, &mp->flag);
  46. wake_up(&mp->wait);
  47. }
  48. static inline void __lock_metapage(struct metapage *mp)
  49. {
  50. DECLARE_WAITQUEUE(wait, current);
  51. INCREMENT(mpStat.lockwait);
  52. add_wait_queue_exclusive(&mp->wait, &wait);
  53. do {
  54. set_current_state(TASK_UNINTERRUPTIBLE);
  55. if (metapage_locked(mp)) {
  56. unlock_page(mp->page);
  57. io_schedule();
  58. lock_page(mp->page);
  59. }
  60. } while (trylock_metapage(mp));
  61. __set_current_state(TASK_RUNNING);
  62. remove_wait_queue(&mp->wait, &wait);
  63. }
  64. /*
  65. * Must have mp->page locked
  66. */
  67. static inline void lock_metapage(struct metapage *mp)
  68. {
  69. if (trylock_metapage(mp))
  70. __lock_metapage(mp);
  71. }
  72. #define METAPOOL_MIN_PAGES 32
  73. static struct kmem_cache *metapage_cache;
  74. static mempool_t *metapage_mempool;
  75. #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
  76. #if MPS_PER_PAGE > 1
  77. struct meta_anchor {
  78. int mp_count;
  79. atomic_t io_count;
  80. struct metapage *mp[MPS_PER_PAGE];
  81. };
  82. #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
  83. static inline struct metapage *page_to_mp(struct page *page, int offset)
  84. {
  85. if (!PagePrivate(page))
  86. return NULL;
  87. return mp_anchor(page)->mp[offset >> L2PSIZE];
  88. }
  89. static inline int insert_metapage(struct page *page, struct metapage *mp)
  90. {
  91. struct meta_anchor *a;
  92. int index;
  93. int l2mp_blocks; /* log2 blocks per metapage */
  94. if (PagePrivate(page))
  95. a = mp_anchor(page);
  96. else {
  97. a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
  98. if (!a)
  99. return -ENOMEM;
  100. set_page_private(page, (unsigned long)a);
  101. SetPagePrivate(page);
  102. kmap(page);
  103. }
  104. if (mp) {
  105. l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
  106. index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
  107. a->mp_count++;
  108. a->mp[index] = mp;
  109. }
  110. return 0;
  111. }
  112. static inline void remove_metapage(struct page *page, struct metapage *mp)
  113. {
  114. struct meta_anchor *a = mp_anchor(page);
  115. int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
  116. int index;
  117. index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
  118. BUG_ON(a->mp[index] != mp);
  119. a->mp[index] = NULL;
  120. if (--a->mp_count == 0) {
  121. kfree(a);
  122. set_page_private(page, 0);
  123. ClearPagePrivate(page);
  124. kunmap(page);
  125. }
  126. }
  127. static inline void inc_io(struct page *page)
  128. {
  129. atomic_inc(&mp_anchor(page)->io_count);
  130. }
  131. static inline void dec_io(struct page *page, void (*handler) (struct page *))
  132. {
  133. if (atomic_dec_and_test(&mp_anchor(page)->io_count))
  134. handler(page);
  135. }
  136. #else
  137. static inline struct metapage *page_to_mp(struct page *page, int offset)
  138. {
  139. return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
  140. }
  141. static inline int insert_metapage(struct page *page, struct metapage *mp)
  142. {
  143. if (mp) {
  144. set_page_private(page, (unsigned long)mp);
  145. SetPagePrivate(page);
  146. kmap(page);
  147. }
  148. return 0;
  149. }
  150. static inline void remove_metapage(struct page *page, struct metapage *mp)
  151. {
  152. set_page_private(page, 0);
  153. ClearPagePrivate(page);
  154. kunmap(page);
  155. }
  156. #define inc_io(page) do {} while(0)
  157. #define dec_io(page, handler) handler(page)
  158. #endif
  159. static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
  160. {
  161. struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
  162. if (mp) {
  163. mp->lid = 0;
  164. mp->lsn = 0;
  165. mp->data = NULL;
  166. mp->clsn = 0;
  167. mp->log = NULL;
  168. init_waitqueue_head(&mp->wait);
  169. }
  170. return mp;
  171. }
  172. static inline void free_metapage(struct metapage *mp)
  173. {
  174. mempool_free(mp, metapage_mempool);
  175. }
  176. int __init metapage_init(void)
  177. {
  178. /*
  179. * Allocate the metapage structures
  180. */
  181. metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
  182. 0, 0, NULL);
  183. if (metapage_cache == NULL)
  184. return -ENOMEM;
  185. metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
  186. metapage_cache);
  187. if (metapage_mempool == NULL) {
  188. kmem_cache_destroy(metapage_cache);
  189. return -ENOMEM;
  190. }
  191. return 0;
  192. }
  193. void metapage_exit(void)
  194. {
  195. mempool_destroy(metapage_mempool);
  196. kmem_cache_destroy(metapage_cache);
  197. }
  198. static inline void drop_metapage(struct page *page, struct metapage *mp)
  199. {
  200. if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
  201. test_bit(META_io, &mp->flag))
  202. return;
  203. remove_metapage(page, mp);
  204. INCREMENT(mpStat.pagefree);
  205. free_metapage(mp);
  206. }
  207. /*
  208. * Metapage address space operations
  209. */
  210. static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
  211. int *len)
  212. {
  213. int rc = 0;
  214. int xflag;
  215. s64 xaddr;
  216. sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
  217. inode->i_blkbits;
  218. if (lblock >= file_blocks)
  219. return 0;
  220. if (lblock + *len > file_blocks)
  221. *len = file_blocks - lblock;
  222. if (inode->i_ino) {
  223. rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
  224. if ((rc == 0) && *len)
  225. lblock = (sector_t)xaddr;
  226. else
  227. lblock = 0;
  228. } /* else no mapping */
  229. return lblock;
  230. }
  231. static void last_read_complete(struct page *page)
  232. {
  233. if (!PageError(page))
  234. SetPageUptodate(page);
  235. unlock_page(page);
  236. }
  237. static void metapage_read_end_io(struct bio *bio)
  238. {
  239. struct page *page = bio->bi_private;
  240. if (bio->bi_error) {
  241. printk(KERN_ERR "metapage_read_end_io: I/O error\n");
  242. SetPageError(page);
  243. }
  244. dec_io(page, last_read_complete);
  245. bio_put(bio);
  246. }
  247. static void remove_from_logsync(struct metapage *mp)
  248. {
  249. struct jfs_log *log = mp->log;
  250. unsigned long flags;
  251. /*
  252. * This can race. Recheck that log hasn't been set to null, and after
  253. * acquiring logsync lock, recheck lsn
  254. */
  255. if (!log)
  256. return;
  257. LOGSYNC_LOCK(log, flags);
  258. if (mp->lsn) {
  259. mp->log = NULL;
  260. mp->lsn = 0;
  261. mp->clsn = 0;
  262. log->count--;
  263. list_del(&mp->synclist);
  264. }
  265. LOGSYNC_UNLOCK(log, flags);
  266. }
  267. static void last_write_complete(struct page *page)
  268. {
  269. struct metapage *mp;
  270. unsigned int offset;
  271. for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
  272. mp = page_to_mp(page, offset);
  273. if (mp && test_bit(META_io, &mp->flag)) {
  274. if (mp->lsn)
  275. remove_from_logsync(mp);
  276. clear_bit(META_io, &mp->flag);
  277. }
  278. /*
  279. * I'd like to call drop_metapage here, but I don't think it's
  280. * safe unless I have the page locked
  281. */
  282. }
  283. end_page_writeback(page);
  284. }
  285. static void metapage_write_end_io(struct bio *bio)
  286. {
  287. struct page *page = bio->bi_private;
  288. BUG_ON(!PagePrivate(page));
  289. if (bio->bi_error) {
  290. printk(KERN_ERR "metapage_write_end_io: I/O error\n");
  291. SetPageError(page);
  292. }
  293. dec_io(page, last_write_complete);
  294. bio_put(bio);
  295. }
  296. static int metapage_writepage(struct page *page, struct writeback_control *wbc)
  297. {
  298. struct bio *bio = NULL;
  299. int block_offset; /* block offset of mp within page */
  300. struct inode *inode = page->mapping->host;
  301. int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
  302. int len;
  303. int xlen;
  304. struct metapage *mp;
  305. int redirty = 0;
  306. sector_t lblock;
  307. int nr_underway = 0;
  308. sector_t pblock;
  309. sector_t next_block = 0;
  310. sector_t page_start;
  311. unsigned long bio_bytes = 0;
  312. unsigned long bio_offset = 0;
  313. int offset;
  314. int bad_blocks = 0;
  315. page_start = (sector_t)page->index <<
  316. (PAGE_CACHE_SHIFT - inode->i_blkbits);
  317. BUG_ON(!PageLocked(page));
  318. BUG_ON(PageWriteback(page));
  319. set_page_writeback(page);
  320. for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
  321. mp = page_to_mp(page, offset);
  322. if (!mp || !test_bit(META_dirty, &mp->flag))
  323. continue;
  324. if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
  325. redirty = 1;
  326. /*
  327. * Make sure this page isn't blocked indefinitely.
  328. * If the journal isn't undergoing I/O, push it
  329. */
  330. if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
  331. jfs_flush_journal(mp->log, 0);
  332. continue;
  333. }
  334. clear_bit(META_dirty, &mp->flag);
  335. set_bit(META_io, &mp->flag);
  336. block_offset = offset >> inode->i_blkbits;
  337. lblock = page_start + block_offset;
  338. if (bio) {
  339. if (xlen && lblock == next_block) {
  340. /* Contiguous, in memory & on disk */
  341. len = min(xlen, blocks_per_mp);
  342. xlen -= len;
  343. bio_bytes += len << inode->i_blkbits;
  344. continue;
  345. }
  346. /* Not contiguous */
  347. if (bio_add_page(bio, page, bio_bytes, bio_offset) <
  348. bio_bytes)
  349. goto add_failed;
  350. /*
  351. * Increment counter before submitting i/o to keep
  352. * count from hitting zero before we're through
  353. */
  354. inc_io(page);
  355. if (!bio->bi_iter.bi_size)
  356. goto dump_bio;
  357. submit_bio(WRITE, bio);
  358. nr_underway++;
  359. bio = NULL;
  360. } else
  361. inc_io(page);
  362. xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits;
  363. pblock = metapage_get_blocks(inode, lblock, &xlen);
  364. if (!pblock) {
  365. printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
  366. /*
  367. * We already called inc_io(), but can't cancel it
  368. * with dec_io() until we're done with the page
  369. */
  370. bad_blocks++;
  371. continue;
  372. }
  373. len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
  374. bio = bio_alloc(GFP_NOFS, 1);
  375. bio->bi_bdev = inode->i_sb->s_bdev;
  376. bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
  377. bio->bi_end_io = metapage_write_end_io;
  378. bio->bi_private = page;
  379. /* Don't call bio_add_page yet, we may add to this vec */
  380. bio_offset = offset;
  381. bio_bytes = len << inode->i_blkbits;
  382. xlen -= len;
  383. next_block = lblock + len;
  384. }
  385. if (bio) {
  386. if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
  387. goto add_failed;
  388. if (!bio->bi_iter.bi_size)
  389. goto dump_bio;
  390. submit_bio(WRITE, bio);
  391. nr_underway++;
  392. }
  393. if (redirty)
  394. redirty_page_for_writepage(wbc, page);
  395. unlock_page(page);
  396. if (bad_blocks)
  397. goto err_out;
  398. if (nr_underway == 0)
  399. end_page_writeback(page);
  400. return 0;
  401. add_failed:
  402. /* We should never reach here, since we're only adding one vec */
  403. printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
  404. goto skip;
  405. dump_bio:
  406. print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
  407. 4, bio, sizeof(*bio), 0);
  408. skip:
  409. bio_put(bio);
  410. unlock_page(page);
  411. dec_io(page, last_write_complete);
  412. err_out:
  413. while (bad_blocks--)
  414. dec_io(page, last_write_complete);
  415. return -EIO;
  416. }
  417. static int metapage_readpage(struct file *fp, struct page *page)
  418. {
  419. struct inode *inode = page->mapping->host;
  420. struct bio *bio = NULL;
  421. int block_offset;
  422. int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
  423. sector_t page_start; /* address of page in fs blocks */
  424. sector_t pblock;
  425. int xlen;
  426. unsigned int len;
  427. int offset;
  428. BUG_ON(!PageLocked(page));
  429. page_start = (sector_t)page->index <<
  430. (PAGE_CACHE_SHIFT - inode->i_blkbits);
  431. block_offset = 0;
  432. while (block_offset < blocks_per_page) {
  433. xlen = blocks_per_page - block_offset;
  434. pblock = metapage_get_blocks(inode, page_start + block_offset,
  435. &xlen);
  436. if (pblock) {
  437. if (!PagePrivate(page))
  438. insert_metapage(page, NULL);
  439. inc_io(page);
  440. if (bio)
  441. submit_bio(READ, bio);
  442. bio = bio_alloc(GFP_NOFS, 1);
  443. bio->bi_bdev = inode->i_sb->s_bdev;
  444. bio->bi_iter.bi_sector =
  445. pblock << (inode->i_blkbits - 9);
  446. bio->bi_end_io = metapage_read_end_io;
  447. bio->bi_private = page;
  448. len = xlen << inode->i_blkbits;
  449. offset = block_offset << inode->i_blkbits;
  450. if (bio_add_page(bio, page, len, offset) < len)
  451. goto add_failed;
  452. block_offset += xlen;
  453. } else
  454. block_offset++;
  455. }
  456. if (bio)
  457. submit_bio(READ, bio);
  458. else
  459. unlock_page(page);
  460. return 0;
  461. add_failed:
  462. printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
  463. bio_put(bio);
  464. dec_io(page, last_read_complete);
  465. return -EIO;
  466. }
  467. static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
  468. {
  469. struct metapage *mp;
  470. int ret = 1;
  471. int offset;
  472. for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
  473. mp = page_to_mp(page, offset);
  474. if (!mp)
  475. continue;
  476. jfs_info("metapage_releasepage: mp = 0x%p", mp);
  477. if (mp->count || mp->nohomeok ||
  478. test_bit(META_dirty, &mp->flag)) {
  479. jfs_info("count = %ld, nohomeok = %d", mp->count,
  480. mp->nohomeok);
  481. ret = 0;
  482. continue;
  483. }
  484. if (mp->lsn)
  485. remove_from_logsync(mp);
  486. remove_metapage(page, mp);
  487. INCREMENT(mpStat.pagefree);
  488. free_metapage(mp);
  489. }
  490. return ret;
  491. }
  492. static void metapage_invalidatepage(struct page *page, unsigned int offset,
  493. unsigned int length)
  494. {
  495. BUG_ON(offset || length < PAGE_CACHE_SIZE);
  496. BUG_ON(PageWriteback(page));
  497. metapage_releasepage(page, 0);
  498. }
  499. const struct address_space_operations jfs_metapage_aops = {
  500. .readpage = metapage_readpage,
  501. .writepage = metapage_writepage,
  502. .releasepage = metapage_releasepage,
  503. .invalidatepage = metapage_invalidatepage,
  504. .set_page_dirty = __set_page_dirty_nobuffers,
  505. };
  506. struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
  507. unsigned int size, int absolute,
  508. unsigned long new)
  509. {
  510. int l2BlocksPerPage;
  511. int l2bsize;
  512. struct address_space *mapping;
  513. struct metapage *mp = NULL;
  514. struct page *page;
  515. unsigned long page_index;
  516. unsigned long page_offset;
  517. jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
  518. inode->i_ino, lblock, absolute);
  519. l2bsize = inode->i_blkbits;
  520. l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
  521. page_index = lblock >> l2BlocksPerPage;
  522. page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
  523. if ((page_offset + size) > PAGE_CACHE_SIZE) {
  524. jfs_err("MetaData crosses page boundary!!");
  525. jfs_err("lblock = %lx, size = %d", lblock, size);
  526. dump_stack();
  527. return NULL;
  528. }
  529. if (absolute)
  530. mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
  531. else {
  532. /*
  533. * If an nfs client tries to read an inode that is larger
  534. * than any existing inodes, we may try to read past the
  535. * end of the inode map
  536. */
  537. if ((lblock << inode->i_blkbits) >= inode->i_size)
  538. return NULL;
  539. mapping = inode->i_mapping;
  540. }
  541. if (new && (PSIZE == PAGE_CACHE_SIZE)) {
  542. page = grab_cache_page(mapping, page_index);
  543. if (!page) {
  544. jfs_err("grab_cache_page failed!");
  545. return NULL;
  546. }
  547. SetPageUptodate(page);
  548. } else {
  549. page = read_mapping_page(mapping, page_index, NULL);
  550. if (IS_ERR(page) || !PageUptodate(page)) {
  551. jfs_err("read_mapping_page failed!");
  552. return NULL;
  553. }
  554. lock_page(page);
  555. }
  556. mp = page_to_mp(page, page_offset);
  557. if (mp) {
  558. if (mp->logical_size != size) {
  559. jfs_error(inode->i_sb,
  560. "get_mp->logical_size != size\n");
  561. jfs_err("logical_size = %d, size = %d",
  562. mp->logical_size, size);
  563. dump_stack();
  564. goto unlock;
  565. }
  566. mp->count++;
  567. lock_metapage(mp);
  568. if (test_bit(META_discard, &mp->flag)) {
  569. if (!new) {
  570. jfs_error(inode->i_sb,
  571. "using a discarded metapage\n");
  572. discard_metapage(mp);
  573. goto unlock;
  574. }
  575. clear_bit(META_discard, &mp->flag);
  576. }
  577. } else {
  578. INCREMENT(mpStat.pagealloc);
  579. mp = alloc_metapage(GFP_NOFS);
  580. mp->page = page;
  581. mp->flag = 0;
  582. mp->xflag = COMMIT_PAGE;
  583. mp->count = 1;
  584. mp->nohomeok = 0;
  585. mp->logical_size = size;
  586. mp->data = page_address(page) + page_offset;
  587. mp->index = lblock;
  588. if (unlikely(insert_metapage(page, mp))) {
  589. free_metapage(mp);
  590. goto unlock;
  591. }
  592. lock_metapage(mp);
  593. }
  594. if (new) {
  595. jfs_info("zeroing mp = 0x%p", mp);
  596. memset(mp->data, 0, PSIZE);
  597. }
  598. unlock_page(page);
  599. jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
  600. return mp;
  601. unlock:
  602. unlock_page(page);
  603. return NULL;
  604. }
  605. void grab_metapage(struct metapage * mp)
  606. {
  607. jfs_info("grab_metapage: mp = 0x%p", mp);
  608. page_cache_get(mp->page);
  609. lock_page(mp->page);
  610. mp->count++;
  611. lock_metapage(mp);
  612. unlock_page(mp->page);
  613. }
  614. void force_metapage(struct metapage *mp)
  615. {
  616. struct page *page = mp->page;
  617. jfs_info("force_metapage: mp = 0x%p", mp);
  618. set_bit(META_forcewrite, &mp->flag);
  619. clear_bit(META_sync, &mp->flag);
  620. page_cache_get(page);
  621. lock_page(page);
  622. set_page_dirty(page);
  623. write_one_page(page, 1);
  624. clear_bit(META_forcewrite, &mp->flag);
  625. page_cache_release(page);
  626. }
  627. void hold_metapage(struct metapage *mp)
  628. {
  629. lock_page(mp->page);
  630. }
  631. void put_metapage(struct metapage *mp)
  632. {
  633. if (mp->count || mp->nohomeok) {
  634. /* Someone else will release this */
  635. unlock_page(mp->page);
  636. return;
  637. }
  638. page_cache_get(mp->page);
  639. mp->count++;
  640. lock_metapage(mp);
  641. unlock_page(mp->page);
  642. release_metapage(mp);
  643. }
  644. void release_metapage(struct metapage * mp)
  645. {
  646. struct page *page = mp->page;
  647. jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
  648. BUG_ON(!page);
  649. lock_page(page);
  650. unlock_metapage(mp);
  651. assert(mp->count);
  652. if (--mp->count || mp->nohomeok) {
  653. unlock_page(page);
  654. page_cache_release(page);
  655. return;
  656. }
  657. if (test_bit(META_dirty, &mp->flag)) {
  658. set_page_dirty(page);
  659. if (test_bit(META_sync, &mp->flag)) {
  660. clear_bit(META_sync, &mp->flag);
  661. write_one_page(page, 1);
  662. lock_page(page); /* write_one_page unlocks the page */
  663. }
  664. } else if (mp->lsn) /* discard_metapage doesn't remove it */
  665. remove_from_logsync(mp);
  666. /* Try to keep metapages from using up too much memory */
  667. drop_metapage(page, mp);
  668. unlock_page(page);
  669. page_cache_release(page);
  670. }
  671. void __invalidate_metapages(struct inode *ip, s64 addr, int len)
  672. {
  673. sector_t lblock;
  674. int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits;
  675. int BlocksPerPage = 1 << l2BlocksPerPage;
  676. /* All callers are interested in block device's mapping */
  677. struct address_space *mapping =
  678. JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
  679. struct metapage *mp;
  680. struct page *page;
  681. unsigned int offset;
  682. /*
  683. * Mark metapages to discard. They will eventually be
  684. * released, but should not be written.
  685. */
  686. for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
  687. lblock += BlocksPerPage) {
  688. page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
  689. if (!page)
  690. continue;
  691. for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
  692. mp = page_to_mp(page, offset);
  693. if (!mp)
  694. continue;
  695. if (mp->index < addr)
  696. continue;
  697. if (mp->index >= addr + len)
  698. break;
  699. clear_bit(META_dirty, &mp->flag);
  700. set_bit(META_discard, &mp->flag);
  701. if (mp->lsn)
  702. remove_from_logsync(mp);
  703. }
  704. unlock_page(page);
  705. page_cache_release(page);
  706. }
  707. }
  708. #ifdef CONFIG_JFS_STATISTICS
  709. static int jfs_mpstat_proc_show(struct seq_file *m, void *v)
  710. {
  711. seq_printf(m,
  712. "JFS Metapage statistics\n"
  713. "=======================\n"
  714. "page allocations = %d\n"
  715. "page frees = %d\n"
  716. "lock waits = %d\n",
  717. mpStat.pagealloc,
  718. mpStat.pagefree,
  719. mpStat.lockwait);
  720. return 0;
  721. }
  722. static int jfs_mpstat_proc_open(struct inode *inode, struct file *file)
  723. {
  724. return single_open(file, jfs_mpstat_proc_show, NULL);
  725. }
  726. const struct file_operations jfs_mpstat_proc_fops = {
  727. .owner = THIS_MODULE,
  728. .open = jfs_mpstat_proc_open,
  729. .read = seq_read,
  730. .llseek = seq_lseek,
  731. .release = single_release,
  732. };
  733. #endif