From 19fc5bb93c6bbdce8292b4d7eed04e2fa118d2fe Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Thu, 23 Jun 2022 13:25:09 -0500 Subject: powerpc/xive/spapr: correct bitmap allocation size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kasan detects access beyond the end of the xibm->bitmap allocation: BUG: KASAN: slab-out-of-bounds in _find_first_zero_bit+0x40/0x140 Read of size 8 at addr c00000001d1d0118 by task swapper/0/1 CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-rc2-00001-g90df023b36dd #28 Call Trace: [c00000001d98f770] [c0000000012baab8] dump_stack_lvl+0xac/0x108 (unreliable) [c00000001d98f7b0] [c00000000068faac] print_report+0x37c/0x710 [c00000001d98f880] [c0000000006902c0] kasan_report+0x110/0x354 [c00000001d98f950] [c000000000692324] __asan_load8+0xa4/0xe0 [c00000001d98f970] [c0000000011c6ed0] _find_first_zero_bit+0x40/0x140 [c00000001d98f9b0] [c0000000000dbfbc] xive_spapr_get_ipi+0xcc/0x260 [c00000001d98fa70] [c0000000000d6d28] xive_setup_cpu_ipi+0x1e8/0x450 [c00000001d98fb30] [c000000004032a20] pSeries_smp_probe+0x5c/0x118 [c00000001d98fb60] [c000000004018b44] smp_prepare_cpus+0x944/0x9ac [c00000001d98fc90] [c000000004009f9c] kernel_init_freeable+0x2d4/0x640 [c00000001d98fd90] [c0000000000131e8] kernel_init+0x28/0x1d0 [c00000001d98fe10] [c00000000000cd54] ret_from_kernel_thread+0x5c/0x64 Allocated by task 0: kasan_save_stack+0x34/0x70 __kasan_kmalloc+0xb4/0xf0 __kmalloc+0x268/0x540 xive_spapr_init+0x4d0/0x77c pseries_init_irq+0x40/0x27c init_IRQ+0x44/0x84 start_kernel+0x2a4/0x538 start_here_common+0x1c/0x20 The buggy address belongs to the object at c00000001d1d0118 which belongs to the cache kmalloc-8 of size 8 The buggy address is located 0 bytes inside of 8-byte region [c00000001d1d0118, c00000001d1d0120) The buggy address belongs to the physical page: page:c00c000000074740 refcount:1 mapcount:0 mapping:0000000000000000 index:0xc00000001d1d0558 pfn:0x1d1d flags: 0x7ffff000000200(slab|node=0|zone=0|lastcpupid=0x7ffff) raw: 007ffff000000200 c00000001d0003c8 c00000001d0003c8 c00000001d010480 raw: c00000001d1d0558 0000000001e1000a 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: c00000001d1d0000: fc 00 fc fc fc fc fc fc fc fc fc fc fc fc fc fc c00000001d1d0080: fc fc 00 fc fc fc fc fc fc fc fc fc fc fc fc fc >c00000001d1d0100: fc fc fc 02 fc fc fc fc fc fc fc fc fc fc fc fc ^ c00000001d1d0180: fc fc fc fc 04 fc fc fc fc fc fc fc fc fc fc fc c00000001d1d0200: fc fc fc fc fc 04 fc fc fc fc fc fc fc fc fc fc This happens because the allocation uses the wrong unit (bits) when it should pass (BITS_TO_LONGS(count) * sizeof(long)) or equivalent. With small numbers of bits, the allocated object can be smaller than sizeof(long), which results in invalid accesses. Use bitmap_zalloc() to allocate and initialize the irq bitmap, paired with bitmap_free() for consistency. Signed-off-by: Nathan Lynch Reviewed-by: Cédric Le Goater Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220623182509.3985625-1-nathanl@linux.ibm.com --- arch/powerpc/sysdev/xive/spapr.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 7d5128676e83..d02911e78cfc 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -57,7 +58,7 @@ static int __init xive_irq_bitmap_add(int base, int count) spin_lock_init(&xibm->lock); xibm->base = base; xibm->count = count; - xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL); + xibm->bitmap = bitmap_zalloc(xibm->count, GFP_KERNEL); if (!xibm->bitmap) { kfree(xibm); return -ENOMEM; @@ -75,7 +76,7 @@ static void xive_irq_bitmap_remove_all(void) list_for_each_entry_safe(xibm, tmp, &xive_irq_bitmaps, list) { list_del(&xibm->list); - kfree(xibm->bitmap); + bitmap_free(xibm->bitmap); kfree(xibm); } } -- cgit v1.2.3 From 986481618023e18e187646b0fff05a3c337531cb Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 23 Jun 2022 10:56:17 +0200 Subject: powerpc/book3e: Fix PUD allocation size in map_kernel_page() Commit 2fb4706057bc ("powerpc: add support for folded p4d page tables") erroneously changed PUD setup to a mix of PMD and PUD. Fix it. While at it, use PTE_TABLE_SIZE instead of PAGE_SIZE for PTE tables in order to avoid any confusion. Fixes: 2fb4706057bc ("powerpc: add support for folded p4d page tables") Cc: stable@vger.kernel.org # v5.8+ Signed-off-by: Christophe Leroy Acked-by: Mike Rapoport Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/95ddfd6176d53e6c85e13bd1c358359daa56775f.1655974558.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/nohash/book3e_pgtable.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c b/arch/powerpc/mm/nohash/book3e_pgtable.c index 7d4368d055a6..b80fc4a91a53 100644 --- a/arch/powerpc/mm/nohash/book3e_pgtable.c +++ b/arch/powerpc/mm/nohash/book3e_pgtable.c @@ -96,8 +96,8 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) pgdp = pgd_offset_k(ea); p4dp = p4d_offset(pgdp, ea); if (p4d_none(*p4dp)) { - pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); - p4d_populate(&init_mm, p4dp, pmdp); + pudp = early_alloc_pgtable(PUD_TABLE_SIZE); + p4d_populate(&init_mm, p4dp, pudp); } pudp = pud_offset(p4dp, ea); if (pud_none(*pudp)) { @@ -106,7 +106,7 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) } pmdp = pmd_offset(pudp, ea); if (!pmd_present(*pmdp)) { - ptep = early_alloc_pgtable(PAGE_SIZE); + ptep = early_alloc_pgtable(PTE_TABLE_SIZE); pmd_populate_kernel(&init_mm, pmdp, ptep); } ptep = pte_offset_kernel(pmdp, ea); -- cgit v1.2.3 From 6886da5f49e6d86aad76807a93f3eef5e4f01b10 Mon Sep 17 00:00:00 2001 From: Liam Howlett Date: Fri, 24 Jun 2022 01:17:58 +0000 Subject: powerpc/prom_init: Fix kernel config grep When searching for config options, use the KCONFIG_CONFIG shell variable so that builds using non-standard config locations work. Fixes: 26deb04342e3 ("powerpc: prepare string/mem functions for KASAN") Cc: stable@vger.kernel.org # v5.2+ Signed-off-by: Liam R. Howlett Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220624011745.4060795-1-Liam.Howlett@oracle.com --- arch/powerpc/kernel/prom_init_check.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index b183ab9c5107..dfa5f729f774 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -13,7 +13,7 @@ # If you really need to reference something from prom_init.o add # it to the list below: -grep "^CONFIG_KASAN=y$" .config >/dev/null +grep "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} >/dev/null if [ $? -eq 0 ] then MEM_FUNCS="__memcpy __memset" -- cgit v1.2.3 From b21bd5a4b130f8370861478d2880985daace5913 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Tue, 28 Jun 2022 00:41:19 +0530 Subject: powerpc/bpf: Fix use of user_pt_regs in uapi MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Trying to build a .c file that includes : $ cat test_bpf_headers.c #include throws the below error: /usr/include/linux/bpf_perf_event.h:14:28: error: field ‘regs’ has incomplete type 14 | bpf_user_pt_regs_t regs; | ^~~~ This is because we typedef bpf_user_pt_regs_t to 'struct user_pt_regs' in arch/powerpc/include/uaps/asm/bpf_perf_event.h, but 'struct user_pt_regs' is not exposed to userspace. Powerpc has both pt_regs and user_pt_regs structures. However, unlike arm64 and s390, we expose user_pt_regs to userspace as just 'pt_regs'. As such, we should typedef bpf_user_pt_regs_t to 'struct pt_regs' for userspace. Within the kernel though, we want to typedef bpf_user_pt_regs_t to 'struct user_pt_regs'. Remove arch/powerpc/include/uapi/asm/bpf_perf_event.h so that the uapi/asm-generic version of the header is exposed to userspace. Introduce arch/powerpc/include/asm/bpf_perf_event.h so that we can typedef bpf_user_pt_regs_t to 'struct user_pt_regs' for use within the kernel. Note that this was not showing up with the bpf selftest build since tools/include/uapi/asm/bpf_perf_event.h didn't include the powerpc variant. Fixes: a6460b03f945ee ("powerpc/bpf: Fix broken uapi for BPF_PROG_TYPE_PERF_EVENT") Cc: stable@vger.kernel.org # v4.20+ Signed-off-by: Naveen N. Rao [mpe: Use typical naming for header include guard] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220627191119.142867-1-naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/include/asm/bpf_perf_event.h | 9 +++++++++ arch/powerpc/include/uapi/asm/bpf_perf_event.h | 9 --------- 2 files changed, 9 insertions(+), 9 deletions(-) create mode 100644 arch/powerpc/include/asm/bpf_perf_event.h delete mode 100644 arch/powerpc/include/uapi/asm/bpf_perf_event.h diff --git a/arch/powerpc/include/asm/bpf_perf_event.h b/arch/powerpc/include/asm/bpf_perf_event.h new file mode 100644 index 000000000000..e8a7b4ffb58c --- /dev/null +++ b/arch/powerpc/include/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_BPF_PERF_EVENT_H +#define _ASM_POWERPC_BPF_PERF_EVENT_H + +#include + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +#endif /* _ASM_POWERPC_BPF_PERF_EVENT_H */ diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h b/arch/powerpc/include/uapi/asm/bpf_perf_event.h deleted file mode 100644 index 5e1e648aeec4..000000000000 --- a/arch/powerpc/include/uapi/asm/bpf_perf_event.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ -#define _UAPI__ASM_BPF_PERF_EVENT_H__ - -#include - -typedef struct user_pt_regs bpf_user_pt_regs_t; - -#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */ -- cgit v1.2.3 From ac790d09885d36143076e7e02825c541e8eee899 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 29 Jun 2022 10:39:25 +0530 Subject: powerpc/memhotplug: Add add_pages override for PPC With commit ffa0b64e3be5 ("powerpc: Fix virt_addr_valid() for 64-bit Book3E & 32-bit") the kernel now validate the addr against high_memory value. This results in the below BUG_ON with dax pfns. [ 635.798741][T26531] kernel BUG at mm/page_alloc.c:5521! 1:mon> e cpu 0x1: Vector: 700 (Program Check) at [c000000007287630] pc: c00000000055ed48: free_pages.part.0+0x48/0x110 lr: c00000000053ca70: tlb_finish_mmu+0x80/0xd0 sp: c0000000072878d0 msr: 800000000282b033 current = 0xc00000000afabe00 paca = 0xc00000037ffff300 irqmask: 0x03 irq_happened: 0x05 pid = 26531, comm = 50-landscape-sy kernel BUG at :5521! Linux version 5.19.0-rc3-14659-g4ec05be7c2e1 (kvaneesh@ltc-boston8) (gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0, GNU ld (GNU Binutils for Ubuntu) 2.34) #625 SMP Thu Jun 23 00:35:43 CDT 2022 1:mon> t [link register ] c00000000053ca70 tlb_finish_mmu+0x80/0xd0 [c0000000072878d0] c00000000053ca54 tlb_finish_mmu+0x64/0xd0 (unreliable) [c000000007287900] c000000000539424 exit_mmap+0xe4/0x2a0 [c0000000072879e0] c00000000019fc1c mmput+0xcc/0x210 [c000000007287a20] c000000000629230 begin_new_exec+0x5e0/0xf40 [c000000007287ae0] c00000000070b3cc load_elf_binary+0x3ac/0x1e00 [c000000007287c10] c000000000627af0 bprm_execve+0x3b0/0xaf0 [c000000007287cd0] c000000000628414 do_execveat_common.isra.0+0x1e4/0x310 [c000000007287d80] c00000000062858c sys_execve+0x4c/0x60 [c000000007287db0] c00000000002c1b0 system_call_exception+0x160/0x2c0 [c000000007287e10] c00000000000c53c system_call_common+0xec/0x250 The fix is to make sure we update high_memory on memory hotplug. This is similar to what x86 does in commit 3072e413e305 ("mm/memory_hotplug: introduce add_pages") Fixes: ffa0b64e3be5 ("powerpc: Fix virt_addr_valid() for 64-bit Book3E & 32-bit") Signed-off-by: Aneesh Kumar K.V Reviewed-by: Kefeng Wang Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220629050925.31447-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/Kconfig | 4 ++++ arch/powerpc/mm/mem.c | 33 ++++++++++++++++++++++++++++++++- 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index c2ce2e60c8f0..7aa12e88c580 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -358,6 +358,10 @@ config ARCH_SUSPEND_NONZERO_CPU def_bool y depends on PPC_POWERNV || PPC_PSERIES +config ARCH_HAS_ADD_PAGES + def_bool y + depends on ARCH_ENABLE_MEMORY_HOTPLUG + config PPC_DCR_NATIVE bool diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 52b77684acda..a97128a48817 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -105,6 +105,37 @@ void __ref arch_remove_linear_mapping(u64 start, u64 size) vm_unmap_aliases(); } +/* + * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need + * updating. + */ +static void update_end_of_memory_vars(u64 start, u64 size) +{ + unsigned long end_pfn = PFN_UP(start + size); + + if (end_pfn > max_pfn) { + max_pfn = end_pfn; + max_low_pfn = end_pfn; + high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; + } +} + +int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, + struct mhp_params *params) +{ + int ret; + + ret = __add_pages(nid, start_pfn, nr_pages, params); + if (ret) + return ret; + + /* update max_pfn, max_low_pfn and high_memory */ + update_end_of_memory_vars(start_pfn << PAGE_SHIFT, + nr_pages << PAGE_SHIFT); + + return ret; +} + int __ref arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) { @@ -115,7 +146,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, rc = arch_create_linear_mapping(nid, start, size, params); if (rc) return rc; - rc = __add_pages(nid, start_pfn, nr_pages, params); + rc = add_pages(nid, start_pfn, nr_pages, params); if (rc) arch_remove_linear_mapping(start, size); return rc; -- cgit v1.2.3