summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/mem.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-07-02 09:11:44 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-07-02 09:11:44 -0700
commit5411de073362300d99bb35d46d77d656760e4606 (patch)
treece47af6c8c7706f7a57050a2d9607cdd75f52aaa /arch/powerpc/mm/mem.c
parent089866061428ec9bf67221247c936792078c41a4 (diff)
parentac790d09885d36143076e7e02825c541e8eee899 (diff)
downloadlinux-5411de073362300d99bb35d46d77d656760e4606.tar.bz2
Merge tag 'powerpc-5.19-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: - Fix BPF uapi confusion about the correct type of bpf_user_pt_regs_t. - Fix virt_addr_valid() when memory is hotplugged above the boot-time high_memory value. - Fix a bug in 64-bit Book3E map_kernel_page() which would incorrectly allocate a PMD page at PUD level. - Fix a couple of minor issues found since we enabled KASAN for 64-bit Book3S. Thanks to Aneesh Kumar K.V, Cédric Le Goater, Christophe Leroy, Kefeng Wang, Liam Howlett, Nathan Lynch, and Naveen N. Rao. * tag 'powerpc-5.19-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/memhotplug: Add add_pages override for PPC powerpc/bpf: Fix use of user_pt_regs in uapi powerpc/prom_init: Fix kernel config grep powerpc/book3e: Fix PUD allocation size in map_kernel_page() powerpc/xive/spapr: correct bitmap allocation size
Diffstat (limited to 'arch/powerpc/mm/mem.c')
-rw-r--r--arch/powerpc/mm/mem.c33
1 files changed, 32 insertions, 1 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 52b77684acda..a97128a48817 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -105,6 +105,37 @@ void __ref arch_remove_linear_mapping(u64 start, u64 size)
vm_unmap_aliases();
}
+/*
+ * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
+ * updating.
+ */
+static void update_end_of_memory_vars(u64 start, u64 size)
+{
+ unsigned long end_pfn = PFN_UP(start + size);
+
+ if (end_pfn > max_pfn) {
+ max_pfn = end_pfn;
+ max_low_pfn = end_pfn;
+ high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
+ }
+}
+
+int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ struct mhp_params *params)
+{
+ int ret;
+
+ ret = __add_pages(nid, start_pfn, nr_pages, params);
+ if (ret)
+ return ret;
+
+ /* update max_pfn, max_low_pfn and high_memory */
+ update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
+ nr_pages << PAGE_SHIFT);
+
+ return ret;
+}
+
int __ref arch_add_memory(int nid, u64 start, u64 size,
struct mhp_params *params)
{
@@ -115,7 +146,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
rc = arch_create_linear_mapping(nid, start, size, params);
if (rc)
return rc;
- rc = __add_pages(nid, start_pfn, nr_pages, params);
+ rc = add_pages(nid, start_pfn, nr_pages, params);
if (rc)
arch_remove_linear_mapping(start, size);
return rc;