summaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-09-02 20:46:29 +0100
committerAndrew Morton <akpm@linux-foundation.org>2022-10-03 14:02:50 -0700
commit923e2f0e7c30db5c1ee5d680050ab781e6c114fb (patch)
tree20e5edd64815a3774f808f85156eea2264ea2a14 /mm/shmem.c
parent12acf4fbc4f78b24822317888b9406d56dc9ad2a (diff)
downloadlinux-923e2f0e7c30db5c1ee5d680050ab781e6c114fb.tar.bz2
shmem: remove shmem_getpage()
With all callers removed, remove this wrapper function. The flags are now mysteriously called SGP, but I think we can live with that. Link: https://lkml.kernel.org/r/20220902194653.1739778-34-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c15
1 files changed, 1 insertions, 14 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 909149b25d98..3d0b729fcc5e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -179,7 +179,7 @@ static inline int shmem_reacct_size(unsigned long flags,
/*
* ... whereas tmpfs objects are accounted incrementally as
* pages are allocated, in order to allow large sparse files.
- * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
+ * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
* so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
*/
static inline int shmem_acct_block(unsigned long flags, long pages)
@@ -2031,19 +2031,6 @@ int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
}
-int shmem_getpage(struct inode *inode, pgoff_t index,
- struct page **pagep, enum sgp_type sgp)
-{
- struct folio *folio = NULL;
- int ret = shmem_get_folio(inode, index, &folio, sgp);
-
- if (folio)
- *pagep = folio_file_page(folio, index);
- else
- *pagep = NULL;
- return ret;
-}
-
/*
* This is like autoremove_wake_function, but it removes the wait queue
* entry unconditionally - even if something else had already woken the