summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2020-06-04 16:48:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-04 19:06:23 -0700
commit52219aeaf2dc6f7607704af2c40e3866fb04aed2 (patch)
tree529b14014eb3c2502b28a80e5d441b19bb8f44ca
parentc68ab18c6aee0397574afb418f6775f23379198e (diff)
downloadlinux-52219aeaf2dc6f7607704af2c40e3866fb04aed2.tar.bz2
mm/memory_hotplug: handle memblocks only with CONFIG_ARCH_KEEP_MEMBLOCK
The comment in add_memory_resource() is stale: hotadd_new_pgdat() will no longer call get_pfn_range_for_nid(), as a hotadded pgdat will simply span no pages at all, until memory is moved to the zone/node via move_pfn_range_to_zone() - e.g., when onlining memory blocks. The only archs that care about memblocks for hotplugged memory (either for iterating over all system RAM or testing for memory validity) are arm64, s390x, and powerpc - due to CONFIG_ARCH_KEEP_MEMBLOCK. Without CONFIG_ARCH_KEEP_MEMBLOCK, we can simply stop messing with memblocks. Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Baoquan He <bhe@redhat.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Link: http://lkml.kernel.org/r/20200422155353.25381-3-david@redhat.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/Kconfig3
-rw-r--r--mm/memory_hotplug.c20
2 files changed, 13 insertions, 10 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index e3490ecac839..5b28240d2af8 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -133,6 +133,9 @@ config HAVE_FAST_GUP
depends on MMU
bool
+# Don't discard allocated memory used to track "memory" and "reserved" memblocks
+# after early boot, so it can still be used to test for validity of memory.
+# Also, memblocks are updated with memory hot(un)plug.
config ARCH_KEEP_MEMBLOCK
bool
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index ee3dcb5ed945..21bc3363a829 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1021,13 +1021,8 @@ int __ref add_memory_resource(int nid, struct resource *res)
mem_hotplug_begin();
- /*
- * Add new range to memblock so that when hotadd_new_pgdat() is called
- * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
- * this new range and calculate total pages correctly. The range will
- * be removed at hot-remove time.
- */
- memblock_add_node(start, size, nid);
+ if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
+ memblock_add_node(start, size, nid);
ret = __try_online_node(nid, false);
if (ret < 0)
@@ -1076,7 +1071,8 @@ error:
/* rollback pgdat allocation and others */
if (new_node)
rollback_node_hotadd(nid);
- memblock_remove(start, size);
+ if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
+ memblock_remove(start, size);
mem_hotplug_done();
return ret;
}
@@ -1673,8 +1669,12 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
mem_hotplug_begin();
arch_remove_memory(nid, start, size, NULL);
- memblock_free(start, size);
- memblock_remove(start, size);
+
+ if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
+ memblock_free(start, size);
+ memblock_remove(start, size);
+ }
+
__release_memory_resource(start, size);
try_offline_node(nid);