diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2021-04-29 23:01:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-30 11:20:42 -0700 |
commit | 6e5e0f286eb0ecf12afaa3e73c321bc5bf599abb (patch) | |
tree | 17d2a29d0969ded08216203cc7020dd85bff1a71 /mm/page_alloc.c | |
parent | 8e6a930bb3ea6aa4b623eececc25465d09ee7b13 (diff) | |
download | linux-6e5e0f286eb0ecf12afaa3e73c321bc5bf599abb.tar.bz2 |
mm/page_alloc: rename gfp_mask to gfp
Shorten some overly-long lines by renaming this identifier.
Link: https://lkml.kernel.org/r/20210225150642.2582252-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5932a95830dd..c565ebad02ee 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5014,7 +5014,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, * This is the 'heart' of the zoned buddy allocator. */ struct page * -__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, +__alloc_pages_nodemask(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask) { struct page *page; @@ -5027,13 +5027,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, * so bail out early if the request is out of bound. */ if (unlikely(order >= MAX_ORDER)) { - WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); + WARN_ON_ONCE(!(gfp & __GFP_NOWARN)); return NULL; } - gfp_mask &= gfp_allowed_mask; - alloc_gfp = gfp_mask; - if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, + gfp &= gfp_allowed_mask; + alloc_gfp = gfp; + if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) return NULL; @@ -5041,7 +5041,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, * Forbid the first pass from falling back to types that fragment * memory until all local zones are considered. */ - alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); + alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); /* First allocation attempt */ page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); @@ -5054,7 +5054,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, * from a particular context which has been marked by * memalloc_no{fs,io}_{save,restore}. */ - alloc_gfp = current_gfp_context(gfp_mask); + alloc_gfp = current_gfp_context(gfp); ac.spread_dirty_pages = false; /* @@ -5066,8 +5066,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, page = __alloc_pages_slowpath(alloc_gfp, order, &ac); out: - if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && - unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) { + if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page && + unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { __free_pages(page, order); page = NULL; } |