summaryrefslogtreecommitdiffstats
path: root/fs/mpage.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-24 19:55:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-24 19:55:07 -0700
commitfdaf9a5840acaab18694a19e0eb0aa51162eeeed (patch)
treea027770138bccf9114cc83bafaa57accc13c91a6 /fs/mpage.c
parent8642174b52214dde4d8113f28fb4c9be5a432126 (diff)
parent516edb456f121e819d2130571004ed82f9566c4d (diff)
downloadlinux-fdaf9a5840acaab18694a19e0eb0aa51162eeeed.tar.bz2
Merge tag 'folio-5.19' of git://git.infradead.org/users/willy/pagecache
Pull page cache updates from Matthew Wilcox: - Appoint myself page cache maintainer - Fix how scsicam uses the page cache - Use the memalloc_nofs_save() API to replace AOP_FLAG_NOFS - Remove the AOP flags entirely - Remove pagecache_write_begin() and pagecache_write_end() - Documentation updates - Convert several address_space operations to use folios: - is_dirty_writeback - readpage becomes read_folio - releasepage becomes release_folio - freepage becomes free_folio - Change filler_t to require a struct file pointer be the first argument like ->read_folio * tag 'folio-5.19' of git://git.infradead.org/users/willy/pagecache: (107 commits) nilfs2: Fix some kernel-doc comments Appoint myself page cache maintainer fs: Remove aops->freepage secretmem: Convert to free_folio nfs: Convert to free_folio orangefs: Convert to free_folio fs: Add free_folio address space operation fs: Convert drop_buffers() to use a folio fs: Change try_to_free_buffers() to take a folio jbd2: Convert release_buffer_page() to use a folio jbd2: Convert jbd2_journal_try_to_free_buffers to take a folio reiserfs: Convert release_buffer_page() to use a folio fs: Remove last vestiges of releasepage ubifs: Convert to release_folio reiserfs: Convert to release_folio orangefs: Convert to release_folio ocfs2: Convert to release_folio nilfs2: Remove comment about releasepage nfs: Convert to release_folio jfs: Convert to release_folio ...
Diffstat (limited to 'fs/mpage.c')
-rw-r--r--fs/mpage.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/fs/mpage.c b/fs/mpage.c
index 1fe56f8c495f..0d25f44f5707 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -36,7 +36,7 @@
*
* The mpage code never puts partial pages into a BIO (except for end-of-file).
* If a page does not map to a contiguous run of blocks then it simply falls
- * back to block_read_full_page().
+ * back to block_read_full_folio().
*
* Why is this? If a page's completion depends on a number of different BIOs
* which can complete in any order (or at the same time) then determining the
@@ -68,7 +68,7 @@ static struct bio *mpage_bio_submit(struct bio *bio)
/*
* support function for mpage_readahead. The fs supplied get_block might
* return an up to date buffer. This is used to map that buffer into
- * the page, which allows readpage to avoid triggering a duplicate call
+ * the page, which allows read_folio to avoid triggering a duplicate call
* to get_block.
*
* The idea is to avoid adding buffers to pages that don't already have
@@ -296,7 +296,7 @@ confused:
if (args->bio)
args->bio = mpage_bio_submit(args->bio);
if (!PageUptodate(page))
- block_read_full_page(page, args->get_block);
+ block_read_full_folio(page_folio(page), args->get_block);
else
unlock_page(page);
goto out;
@@ -364,20 +364,22 @@ EXPORT_SYMBOL(mpage_readahead);
/*
* This isn't called much at all
*/
-int mpage_readpage(struct page *page, get_block_t get_block)
+int mpage_read_folio(struct folio *folio, get_block_t get_block)
{
struct mpage_readpage_args args = {
- .page = page,
+ .page = &folio->page,
.nr_pages = 1,
.get_block = get_block,
};
+ VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
+
args.bio = do_mpage_readpage(&args);
if (args.bio)
mpage_bio_submit(args.bio);
return 0;
}
-EXPORT_SYMBOL(mpage_readpage);
+EXPORT_SYMBOL(mpage_read_folio);
/*
* Writing is not so simple.
@@ -425,11 +427,11 @@ static void clean_buffers(struct page *page, unsigned first_unmapped)
/*
* we cannot drop the bh if the page is not uptodate or a concurrent
- * readpage would fail to serialize with the bh and it would read from
+ * read_folio would fail to serialize with the bh and it would read from
* disk before we reach the platter.
*/
if (buffer_heads_over_limit && PageUptodate(page))
- try_to_free_buffers(page);
+ try_to_free_buffers(page_folio(page));
}
/*
@@ -510,7 +512,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
/*
* Page has buffers, but they are all unmapped. The page was
* created by pagein or read over a hole which was handled by
- * block_read_full_page(). If this address_space is also
+ * block_read_full_folio(). If this address_space is also
* using mpage_readahead then this can rarely happen.
*/
goto confused;