summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-07-31 16:44:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 18:42:45 -0700
commitcfd19c5a9ecf8e5e38de2603077c4330af21316e (patch)
tree5ecf3f51012676c60d12e78ad5a762597ec26385 /mm
parent907aed48f65efeecf91575397e3d79335d93a466 (diff)
downloadlinux-cfd19c5a9ecf8e5e38de2603077c4330af21316e.tar.bz2
mm: only set page->pfmemalloc when ALLOC_NO_WATERMARKS was used
__alloc_pages_slowpath() is called when the number of free pages is below the low watermark. If the caller is entitled to use ALLOC_NO_WATERMARKS then the page will be marked page->pfmemalloc. This protects more pages than are strictly necessary as we only need to protect pages allocated below the min watermark (the pfmemalloc reserves). This patch only sets page->pfmemalloc when ALLOC_NO_WATERMARKS was required to allocate the page. [rientjes@google.com: David noticed the problem during review] Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: David Miller <davem@davemloft.net> Cc: Neil Brown <neilb@suse.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Christie <michaelc@cs.wisc.edu> Cc: Eric B Munson <emunson@mgebm.net> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Sebastian Andrzej Siewior <sebastian@breakpoint.cc> Cc: Mel Gorman <mgorman@suse.de> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cd5390f2f18d..f9d925451bfd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2116,8 +2116,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
page = get_page_from_freelist(gfp_mask, nodemask,
order, zonelist, high_zoneidx,
- alloc_flags, preferred_zone,
- migratetype);
+ alloc_flags & ~ALLOC_NO_WATERMARKS,
+ preferred_zone, migratetype);
if (page) {
preferred_zone->compact_considered = 0;
preferred_zone->compact_defer_shift = 0;
@@ -2209,8 +2209,8 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
retry:
page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx,
- alloc_flags, preferred_zone,
- migratetype);
+ alloc_flags & ~ALLOC_NO_WATERMARKS,
+ preferred_zone, migratetype);
/*
* If an allocation failed after direct reclaim, it could be because
@@ -2381,8 +2381,17 @@ rebalance:
page = __alloc_pages_high_priority(gfp_mask, order,
zonelist, high_zoneidx, nodemask,
preferred_zone, migratetype);
- if (page)
+ if (page) {
+ /*
+ * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
+ * necessary to allocate the page. The expectation is
+ * that the caller is taking steps that will free more
+ * memory. The caller should avoid the page being used
+ * for !PFMEMALLOC purposes.
+ */
+ page->pfmemalloc = true;
goto got_pg;
+ }
}
/* Atomic allocations - we can't balance anything */
@@ -2499,14 +2508,6 @@ nopage:
warn_alloc_failed(gfp_mask, order, NULL);
return page;
got_pg:
- /*
- * page->pfmemalloc is set when the caller had PFMEMALLOC set, is
- * been OOM killed or specified __GFP_MEMALLOC. The expectation is
- * that the caller is taking steps that will free more memory. The
- * caller should avoid the page being used for !PFMEMALLOC purposes.
- */
- page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
-
if (kmemcheck_enabled)
kmemcheck_pagealloc_alloc(page, order, gfp_mask);