123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328 |
- /*
- * linux/fs/ext4/readpage.c
- *
- * Copyright (C) 2002, Linus Torvalds.
- * Copyright (C) 2015, Google, Inc.
- *
- * This was originally taken from fs/mpage.c
- *
- * The intent is the ext4_mpage_readpages() function here is intended
- * to replace mpage_readpages() in the general case, not just for
- * encrypted files. It has some limitations (see below), where it
- * will fall back to read_block_full_page(), but these limitations
- * should only be hit when page_size != block_size.
- *
- * This will allow us to attach a callback function to support ext4
- * encryption.
- *
- * If anything unusual happens, such as:
- *
- * - encountering a page which has buffers
- * - encountering a page which has a non-hole after a hole
- * - encountering a page with non-contiguous blocks
- *
- * then this code just gives up and calls the buffer_head-based read function.
- * It does handle a page which has holes at the end - that is a common case:
- * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
- *
- */
- #include <linux/kernel.h>
- #include <linux/export.h>
- #include <linux/mm.h>
- #include <linux/kdev_t.h>
- #include <linux/gfp.h>
- #include <linux/bio.h>
- #include <linux/fs.h>
- #include <linux/buffer_head.h>
- #include <linux/blkdev.h>
- #include <linux/highmem.h>
- #include <linux/prefetch.h>
- #include <linux/mpage.h>
- #include <linux/writeback.h>
- #include <linux/backing-dev.h>
- #include <linux/pagevec.h>
- #include <linux/cleancache.h>
- #include "ext4.h"
- /*
- * Call ext4_decrypt on every single page, reusing the encryption
- * context.
- */
- static void completion_pages(struct work_struct *work)
- {
- #ifdef CONFIG_EXT4_FS_ENCRYPTION
- struct ext4_crypto_ctx *ctx =
- container_of(work, struct ext4_crypto_ctx, r.work);
- struct bio *bio = ctx->r.bio;
- struct bio_vec *bv;
- int i;
- bio_for_each_segment_all(bv, bio, i) {
- struct page *page = bv->bv_page;
- int ret = ext4_decrypt(page);
- if (ret) {
- WARN_ON_ONCE(1);
- SetPageError(page);
- } else
- SetPageUptodate(page);
- unlock_page(page);
- }
- ext4_release_crypto_ctx(ctx);
- bio_put(bio);
- #else
- BUG();
- #endif
- }
- static inline bool ext4_bio_encrypted(struct bio *bio)
- {
- #ifdef CONFIG_EXT4_FS_ENCRYPTION
- return unlikely(bio->bi_private != NULL);
- #else
- return false;
- #endif
- }
- /*
- * I/O completion handler for multipage BIOs.
- *
- * The mpage code never puts partial pages into a BIO (except for end-of-file).
- * If a page does not map to a contiguous run of blocks then it simply falls
- * back to block_read_full_page().
- *
- * Why is this? If a page's completion depends on a number of different BIOs
- * which can complete in any order (or at the same time) then determining the
- * status of that page is hard. See end_buffer_async_read() for the details.
- * There is no point in duplicating all that complexity.
- */
- static void mpage_end_io(struct bio *bio)
- {
- struct bio_vec *bv;
- int i;
- if (ext4_bio_encrypted(bio)) {
- struct ext4_crypto_ctx *ctx = bio->bi_private;
- if (bio->bi_error) {
- ext4_release_crypto_ctx(ctx);
- } else {
- INIT_WORK(&ctx->r.work, completion_pages);
- ctx->r.bio = bio;
- queue_work(ext4_read_workqueue, &ctx->r.work);
- return;
- }
- }
- bio_for_each_segment_all(bv, bio, i) {
- struct page *page = bv->bv_page;
- if (!bio->bi_error) {
- SetPageUptodate(page);
- } else {
- ClearPageUptodate(page);
- SetPageError(page);
- }
- unlock_page(page);
- }
- bio_put(bio);
- }
- int ext4_mpage_readpages(struct address_space *mapping,
- struct list_head *pages, struct page *page,
- unsigned nr_pages)
- {
- struct bio *bio = NULL;
- unsigned page_idx;
- sector_t last_block_in_bio = 0;
- struct inode *inode = mapping->host;
- const unsigned blkbits = inode->i_blkbits;
- const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
- const unsigned blocksize = 1 << blkbits;
- sector_t block_in_file;
- sector_t last_block;
- sector_t last_block_in_file;
- sector_t blocks[MAX_BUF_PER_PAGE];
- unsigned page_block;
- struct block_device *bdev = inode->i_sb->s_bdev;
- int length;
- unsigned relative_block = 0;
- struct ext4_map_blocks map;
- map.m_pblk = 0;
- map.m_lblk = 0;
- map.m_len = 0;
- map.m_flags = 0;
- for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
- int fully_mapped = 1;
- unsigned first_hole = blocks_per_page;
- prefetchw(&page->flags);
- if (pages) {
- page = list_entry(pages->prev, struct page, lru);
- list_del(&page->lru);
- if (add_to_page_cache_lru(page, mapping, page->index,
- mapping_gfp_constraint(mapping, GFP_KERNEL)))
- goto next_page;
- }
- if (page_has_buffers(page))
- goto confused;
- block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
- last_block = block_in_file + nr_pages * blocks_per_page;
- last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
- if (last_block > last_block_in_file)
- last_block = last_block_in_file;
- page_block = 0;
- /*
- * Map blocks using the previous result first.
- */
- if ((map.m_flags & EXT4_MAP_MAPPED) &&
- block_in_file > map.m_lblk &&
- block_in_file < (map.m_lblk + map.m_len)) {
- unsigned map_offset = block_in_file - map.m_lblk;
- unsigned last = map.m_len - map_offset;
- for (relative_block = 0; ; relative_block++) {
- if (relative_block == last) {
- /* needed? */
- map.m_flags &= ~EXT4_MAP_MAPPED;
- break;
- }
- if (page_block == blocks_per_page)
- break;
- blocks[page_block] = map.m_pblk + map_offset +
- relative_block;
- page_block++;
- block_in_file++;
- }
- }
- /*
- * Then do more ext4_map_blocks() calls until we are
- * done with this page.
- */
- while (page_block < blocks_per_page) {
- if (block_in_file < last_block) {
- map.m_lblk = block_in_file;
- map.m_len = last_block - block_in_file;
- if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
- set_error_page:
- SetPageError(page);
- zero_user_segment(page, 0,
- PAGE_CACHE_SIZE);
- unlock_page(page);
- goto next_page;
- }
- }
- if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
- fully_mapped = 0;
- if (first_hole == blocks_per_page)
- first_hole = page_block;
- page_block++;
- block_in_file++;
- continue;
- }
- if (first_hole != blocks_per_page)
- goto confused; /* hole -> non-hole */
- /* Contiguous blocks? */
- if (page_block && blocks[page_block-1] != map.m_pblk-1)
- goto confused;
- for (relative_block = 0; ; relative_block++) {
- if (relative_block == map.m_len) {
- /* needed? */
- map.m_flags &= ~EXT4_MAP_MAPPED;
- break;
- } else if (page_block == blocks_per_page)
- break;
- blocks[page_block] = map.m_pblk+relative_block;
- page_block++;
- block_in_file++;
- }
- }
- if (first_hole != blocks_per_page) {
- zero_user_segment(page, first_hole << blkbits,
- PAGE_CACHE_SIZE);
- if (first_hole == 0) {
- SetPageUptodate(page);
- unlock_page(page);
- goto next_page;
- }
- } else if (fully_mapped) {
- SetPageMappedToDisk(page);
- }
- if (fully_mapped && blocks_per_page == 1 &&
- !PageUptodate(page) && cleancache_get_page(page) == 0) {
- SetPageUptodate(page);
- goto confused;
- }
- /*
- * This page will go to BIO. Do we need to send this
- * BIO off first?
- */
- if (bio && (last_block_in_bio != blocks[0] - 1)) {
- submit_and_realloc:
- submit_bio(READ, bio);
- bio = NULL;
- }
- if (bio == NULL) {
- struct ext4_crypto_ctx *ctx = NULL;
- if (ext4_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode)) {
- ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
- if (IS_ERR(ctx))
- goto set_error_page;
- }
- bio = bio_alloc(GFP_KERNEL,
- min_t(int, nr_pages, BIO_MAX_PAGES));
- if (!bio) {
- if (ctx)
- ext4_release_crypto_ctx(ctx);
- goto set_error_page;
- }
- bio->bi_bdev = bdev;
- bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
- bio->bi_end_io = mpage_end_io;
- bio->bi_private = ctx;
- }
- length = first_hole << blkbits;
- if (bio_add_page(bio, page, length, 0) < length)
- goto submit_and_realloc;
- if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
- (relative_block == map.m_len)) ||
- (first_hole != blocks_per_page)) {
- submit_bio(READ, bio);
- bio = NULL;
- } else
- last_block_in_bio = blocks[blocks_per_page - 1];
- goto next_page;
- confused:
- if (bio) {
- submit_bio(READ, bio);
- bio = NULL;
- }
- if (!PageUptodate(page))
- block_read_full_page(page, ext4_get_block);
- else
- unlock_page(page);
- next_page:
- if (pages)
- page_cache_release(page);
- }
- BUG_ON(pages && !list_empty(pages));
- if (bio)
- submit_bio(READ, bio);
- return 0;
- }
|