diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2020-12-15 23:11:07 -0500 |
---|---|---|
committer | Matthew Wilcox (Oracle) <willy@infradead.org> | 2021-10-18 07:49:40 -0400 |
commit | bb3c579e25e5757bc5bac1333f4a56dfebf7cb91 (patch) | |
tree | 1a20d81732d86e00c385fc709e8fd87465144e08 | |
parent | cc09cb134124a42fbe3bdcebefdc54e286d8f3e5 (diff) | |
download | linux-bb3c579e25e5757bc5bac1333f4a56dfebf7cb91.tar.bz2 |
mm/filemap: Add filemap_alloc_folio
Reimplement __page_cache_alloc as a wrapper around filemap_alloc_folio
to allow filesystems to be converted at our leisure. Increases
kernel text size by 133 bytes, mostly in cachefiles_read_backing_file().
pagecache_get_page() shrinks by 32 bytes, though.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
-rw-r--r-- | include/linux/pagemap.h | 11 | ||||
-rw-r--r-- | mm/filemap.c | 14 |
2 files changed, 15 insertions, 10 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index fea9615bab35..51c190ae3b0b 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -262,14 +262,19 @@ static inline void *detach_page_private(struct page *page) } #ifdef CONFIG_NUMA -extern struct page *__page_cache_alloc(gfp_t gfp); +struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order); #else -static inline struct page *__page_cache_alloc(gfp_t gfp) +static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) { - return alloc_pages(gfp, 0); + return folio_alloc(gfp, order); } #endif +static inline struct page *__page_cache_alloc(gfp_t gfp) +{ + return &filemap_alloc_folio(gfp, 0)->page; +} + static inline struct page *page_cache_alloc(struct address_space *x) { return __page_cache_alloc(mapping_gfp_mask(x)); diff --git a/mm/filemap.c b/mm/filemap.c index c4a7f3fdca12..6976fb4af390 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1006,24 +1006,24 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, EXPORT_SYMBOL_GPL(add_to_page_cache_lru); #ifdef CONFIG_NUMA -struct page *__page_cache_alloc(gfp_t gfp) +struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) { int n; - struct page *page; + struct folio *folio; if (cpuset_do_page_mem_spread()) { unsigned int cpuset_mems_cookie; do { cpuset_mems_cookie = read_mems_allowed_begin(); n = cpuset_mem_spread_node(); - page = __alloc_pages_node(n, gfp, 0); - } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); + folio = __folio_alloc_node(gfp, order, n); + } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie)); - return page; + return folio; } - return alloc_pages(gfp, 0); + return folio_alloc(gfp, order); } -EXPORT_SYMBOL(__page_cache_alloc); +EXPORT_SYMBOL(filemap_alloc_folio); #endif /* |