summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 15:46:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 16:07:41 -0700
commite1a556374abc0dbcc3815ba9b5a1ac2a082f23d9 (patch)
treeba381015cdde7722351d4a15f6f07e8278654cd0 /mm
parentc4a25635b60d08853a3e4eaae3ab34419a36cfa2 (diff)
downloadlinux-e1a556374abc0dbcc3815ba9b5a1ac2a082f23d9.tar.bz2
mm, vmscan: only wakeup kswapd once per node for the requested classzone
kswapd is woken when zones are below the low watermark but the wakeup decision is not taking the classzone into account. Now that reclaim is node-based, it is only required to wake kswapd once per node and only if all zones are unbalanced for the requested classzone. Note that one node might be checked multiple times if the zonelist is ordered by node because there is no cheap way of tracking what nodes have already been visited. For zone-ordering, each node should be checked only once. Link: http://lkml.kernel.org/r/1467970510-21195-22-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/vmscan.c13
2 files changed, 17 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0f92e04b58db..a34d9fcf1339 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3367,10 +3367,14 @@ static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
{
struct zoneref *z;
struct zone *zone;
+ pg_data_t *last_pgdat = NULL;
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
- ac->high_zoneidx, ac->nodemask)
- wakeup_kswapd(zone, order, ac_classzone_idx(ac));
+ ac->high_zoneidx, ac->nodemask) {
+ if (last_pgdat != zone->zone_pgdat)
+ wakeup_kswapd(zone, order, ac_classzone_idx(ac));
+ last_pgdat = zone->zone_pgdat;
+ }
}
static inline unsigned int
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9b61a55b6e38..31edd7776289 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3421,6 +3421,7 @@ kswapd_try_sleep:
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
{
pg_data_t *pgdat;
+ int z;
if (!populated_zone(zone))
return;
@@ -3432,8 +3433,16 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
pgdat->kswapd_order = max(pgdat->kswapd_order, order);
if (!waitqueue_active(&pgdat->kswapd_wait))
return;
- if (zone_balanced(zone, order, 0))
- return;
+
+ /* Only wake kswapd if all zones are unbalanced */
+ for (z = 0; z <= classzone_idx; z++) {
+ zone = pgdat->node_zones + z;
+ if (!populated_zone(zone))
+ continue;
+
+ if (zone_balanced(zone, order, classzone_idx))
+ return;
+ }
trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
wake_up_interruptible(&pgdat->kswapd_wait);