summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBaoquan He <bhe@redhat.com>2020-04-06 20:07:00 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-04-07 10:43:40 -0700
commit5d87255cadde243763ca22b35e01312550114167 (patch)
treead9f7e4c462f974e3d09059b94e3b7c9a8d3ae4e
parent6cdd0b30a920b35d901c8ca6b82e9ca4f44f54d6 (diff)
downloadlinux-5d87255cadde243763ca22b35e01312550114167.tar.bz2
mm/sparse.c: introduce new function fill_subsection_map()
Patch series "mm/hotplug: Only use subsection map for VMEMMAP", v4. Memory sub-section hotplug was added to fix the issue that nvdimm could be mapped at non-section aligned starting address. A subsection map is added into struct mem_section_usage to implement it. However, config ZONE_DEVICE depends on SPARSEMEM_VMEMMAP. It means subsection map only makes sense when SPARSEMEM_VMEMMAP enabled. For the classic sparse, subsection map is meaningless and confusing. About the classic sparse which doesn't support subsection hotplug, Dan said it's more because the effort and maintenance burden outweighs the benefit. Besides, the current 64 bit ARCHes all enable SPARSEMEM_VMEMMAP_ENABLE by default. This patch (of 5): Factor out the code that fills the subsection map from section_activate() into fill_subsection_map(), this makes section_activate() cleaner and easier to follow. Signed-off-by: Baoquan He <bhe@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Wei Yang <richard.weiyang@gmail.com> Reviewed-by: David Hildenbrand <david@redhat.com> Acked-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Michal Hocko <mhocko@suse.com> Link: http://lkml.kernel.org/r/20200312124414.439-2-bhe@redhat.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/sparse.c32
1 files changed, 21 insertions, 11 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index f1af4d4ee80b..51965d56cd39 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -777,24 +777,15 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
ms->section_mem_map = (unsigned long)NULL;
}
-static struct page * __meminit section_activate(int nid, unsigned long pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap)
+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
{
- DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
struct mem_section *ms = __pfn_to_section(pfn);
- struct mem_section_usage *usage = NULL;
+ DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
unsigned long *subsection_map;
- struct page *memmap;
int rc = 0;
subsection_mask_set(map, pfn, nr_pages);
- if (!ms->usage) {
- usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
- if (!usage)
- return ERR_PTR(-ENOMEM);
- ms->usage = usage;
- }
subsection_map = &ms->usage->subsection_map[0];
if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
@@ -805,6 +796,25 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn,
bitmap_or(subsection_map, map, subsection_map,
SUBSECTIONS_PER_SECTION);
+ return rc;
+}
+
+static struct page * __meminit section_activate(int nid, unsigned long pfn,
+ unsigned long nr_pages, struct vmem_altmap *altmap)
+{
+ struct mem_section *ms = __pfn_to_section(pfn);
+ struct mem_section_usage *usage = NULL;
+ struct page *memmap;
+ int rc = 0;
+
+ if (!ms->usage) {
+ usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
+ if (!usage)
+ return ERR_PTR(-ENOMEM);
+ ms->usage = usage;
+ }
+
+ rc = fill_subsection_map(pfn, nr_pages);
if (rc) {
if (usage)
ms->usage = NULL;