From fda7409a8fcfa457814f8186f2861a9f00008e75 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Mon, 8 Aug 2022 17:32:05 +0800 Subject: irqchip/loongson-pch-pic: Move find_pch_pic() into CONFIG_ACPI MIPS doesn't declare find_pch_pic(), which makes a build warning: >> drivers/irqchip/irq-loongson-pch-pic.c:51:5: warning: no previous prototype for function 'find_pch_pic' [-Wmissing-prototypes] int find_pch_pic(u32 gsi) ^ drivers/irqchip/irq-loongson-pch-pic.c:51:1: note: declare 'static' if the function is not intended to be used outside of this translation unit int find_pch_pic(u32 gsi) ^ static 1 warning generated. Move find_pch_pic() into CONFIG_ACPI which only used by LoongArch to fix the warning. BTW, remove the duplicated declaration of find_pch_pic() in irq.h. Reported-by: kernel test robot Signed-off-by: Huacai Chen Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220808093205.3658485-1-chenhuacai@loongson.cn --- arch/loongarch/include/asm/irq.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/loongarch/include/asm') diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index 149b2123e7f4..093aee99167d 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -81,7 +81,6 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS]; #define GSI_MIN_PCH_IRQ LOONGSON_PCH_IRQ_BASE #define GSI_MAX_PCH_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1) -extern int find_pch_pic(u32 gsi); extern int eiointc_get_node(int id); struct acpi_madt_lio_pic; -- cgit v1.2.3 From da48b67cfb6b4f115ae652dd5995c56fe2a2cf9b Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 25 Aug 2022 19:34:59 +0800 Subject: LoongArch: Cleanup reset routines with new API Cleanup reset routines by using new do_kernel_power_off() instead of old pm_power_off(), and then simplify the whole file (reset.c) organization by inlining some functions. This cleanup also fix a poweroff error if EFI runtime is disabled. Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/reboot.h | 10 ------ arch/loongarch/kernel/reset.c | 69 +++++++++++-------------------------- 2 files changed, 21 insertions(+), 58 deletions(-) delete mode 100644 arch/loongarch/include/asm/reboot.h (limited to 'arch/loongarch/include/asm') diff --git a/arch/loongarch/include/asm/reboot.h b/arch/loongarch/include/asm/reboot.h deleted file mode 100644 index 51151749d8f0..000000000000 --- a/arch/loongarch/include/asm/reboot.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2020-2022 Loongson Technology Corporation Limited - */ -#ifndef _ASM_REBOOT_H -#define _ASM_REBOOT_H - -extern void (*pm_restart)(void); - -#endif /* _ASM_REBOOT_H */ diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c index 800c965a17ea..8c82021eb2f4 100644 --- a/arch/loongarch/kernel/reset.c +++ b/arch/loongarch/kernel/reset.c @@ -15,10 +15,16 @@ #include #include #include -#include -static void default_halt(void) +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +void machine_halt(void) { +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif local_irq_disable(); clear_csr_ecfg(ECFG0_IM); @@ -30,18 +36,29 @@ static void default_halt(void) } } -static void default_poweroff(void) +void machine_power_off(void) { +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + do_kernel_power_off(); #ifdef CONFIG_EFI efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); #endif + while (true) { __arch_cpu_idle(); } } -static void default_restart(void) +void machine_restart(char *command) { +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + do_kernel_restart(command); #ifdef CONFIG_EFI if (efi_capsule_pending(NULL)) efi_reboot(REBOOT_WARM, NULL); @@ -55,47 +72,3 @@ static void default_restart(void) __arch_cpu_idle(); } } - -void (*pm_restart)(void); -EXPORT_SYMBOL(pm_restart); - -void (*pm_power_off)(void); -EXPORT_SYMBOL(pm_power_off); - -void machine_halt(void) -{ -#ifdef CONFIG_SMP - preempt_disable(); - smp_send_stop(); -#endif - default_halt(); -} - -void machine_power_off(void) -{ -#ifdef CONFIG_SMP - preempt_disable(); - smp_send_stop(); -#endif - pm_power_off(); -} - -void machine_restart(char *command) -{ -#ifdef CONFIG_SMP - preempt_disable(); - smp_send_stop(); -#endif - do_kernel_restart(command); - pm_restart(); -} - -static int __init loongarch_reboot_setup(void) -{ - pm_restart = default_restart; - pm_power_off = default_poweroff; - - return 0; -} - -arch_initcall(loongarch_reboot_setup); -- cgit v1.2.3 From 092e9ebe52a664f9f58e2d24136ae791fe71c6db Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 25 Aug 2022 19:34:59 +0800 Subject: LoongArch: Cleanup headers to avoid circular dependency When enable GENERIC_IOREMAP, there will be circular dependency to cause build errors. The root cause is that pgtable.h shouldn't include io.h but pgtable.h need some macros defined in io.h. So cleanup those macros and remove the unnecessary inclusions, as other architectures do. Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/addrspace.h | 16 ++++++++++++++++ arch/loongarch/include/asm/io.h | 19 ------------------- arch/loongarch/include/asm/page.h | 2 +- arch/loongarch/include/asm/pgtable.h | 7 +++---- arch/loongarch/mm/mmap.c | 11 ++--------- 5 files changed, 22 insertions(+), 33 deletions(-) (limited to 'arch/loongarch/include/asm') diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h index b91e0733b2e5..d342935e5a72 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -109,4 +109,20 @@ extern unsigned long vm_map_base; */ #define PHYSADDR(a) ((_ACAST64_(a)) & TO_PHYS_MASK) +/* + * On LoongArch, I/O ports mappring is following: + * + * | .... | + * |-----------------------| + * | pci io ports(16K~32M) | + * |-----------------------| + * | isa io ports(0 ~16K) | + * PCI_IOBASE ->|-----------------------| + * | .... | + */ +#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE))) +#define PCI_IOSIZE SZ_32M +#define ISA_IOSIZE SZ_16K +#define IO_SPACE_LIMIT (PCI_IOSIZE - 1) + #endif /* _ASM_ADDRSPACE_H */ diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h index 884599739b36..999944ea1cea 100644 --- a/arch/loongarch/include/asm/io.h +++ b/arch/loongarch/include/asm/io.h @@ -7,34 +7,15 @@ #define ARCH_HAS_IOREMAP_WC -#include #include #include #include -#include -#include #include #include #include #include -/* - * On LoongArch, I/O ports mappring is following: - * - * | .... | - * |-----------------------| - * | pci io ports(64K~32M) | - * |-----------------------| - * | isa io ports(0 ~16K) | - * PCI_IOBASE ->|-----------------------| - * | .... | - */ -#define PCI_IOBASE ((void __iomem *)(vm_map_base + (2 * PAGE_SIZE))) -#define PCI_IOSIZE SZ_32M -#define ISA_IOSIZE SZ_16K -#define IO_SPACE_LIMIT (PCI_IOSIZE - 1) - /* * Change "struct page" to physical address. */ diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h index a37324ac460b..53f284a96182 100644 --- a/arch/loongarch/include/asm/page.h +++ b/arch/loongarch/include/asm/page.h @@ -95,7 +95,7 @@ static inline int pfn_valid(unsigned long pfn) #endif -#define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr))) +#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr)) #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) extern int __virt_addr_valid(volatile void *kaddr); diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index e03443abaf7d..8ea57e2f0e04 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -59,7 +59,6 @@ #include #include #include -#include struct mm_struct; struct vm_area_struct; @@ -145,7 +144,7 @@ static inline void set_p4d(p4d_t *p4d, p4d_t p4dval) *p4d = p4dval; } -#define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d)) +#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d)) #define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT)) #endif @@ -188,7 +187,7 @@ static inline pmd_t *pud_pgtable(pud_t pud) #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0) -#define pud_phys(pud) virt_to_phys((void *)pud_val(pud)) +#define pud_phys(pud) PHYSADDR(pud_val(pud)) #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) #endif @@ -221,7 +220,7 @@ static inline void pmd_clear(pmd_t *pmdp) #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0) -#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) +#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd)) #ifndef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c index 52e40f0ba732..381a569635a9 100644 --- a/arch/loongarch/mm/mmap.c +++ b/arch/loongarch/mm/mmap.c @@ -2,16 +2,9 @@ /* * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ -#include -#include -#include +#include #include #include -#include -#include -#include -#include -#include unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); @@ -120,6 +113,6 @@ int __virt_addr_valid(volatile void *kaddr) if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base)) return 0; - return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); + return pfn_valid(PFN_DOWN(PHYSADDR(kaddr))); } EXPORT_SYMBOL_GPL(__virt_addr_valid); -- cgit v1.2.3 From 720dc7ab252bbdf404cab7b909e26b31e602bf7e Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 25 Aug 2022 19:34:59 +0800 Subject: LoongArch: Add subword xchg/cmpxchg emulation LoongArch only support 32-bit/64-bit xchg/cmpxchg in native. But percpu operation, qspinlock and some drivers need 8-bit/16-bit xchg/cmpxchg. We add subword xchg/cmpxchg emulation in this patch because the emulation has better performance than the generic implementation (on NUMA system), and it can fix some build errors meanwhile [1]. LoongArch's guarantee for forward progress (avoid many ll/sc happening at the same time and no one succeeds): We have the "exclusive access (with timeout) of ll" feature to avoid simultaneous ll (which also blocks other memory load/store on the same address), and the "random delay of sc" feature to avoid simultaneous sc. It is a mandatory requirement for multi-core LoongArch processors to implement such features, only except those single-core and dual-core processors (they also don't support multi-chip interconnection). Feature bits are introduced in CPUCFG3, bit 3 and bit 4 [2]. [1] https://lore.kernel.org/loongarch/CAAhV-H6vvkuOzy8OemWdYK3taj5Jn3bFX0ZTwE=twM8ywpBUYA@mail.gmail.com/T/#t [2] https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg Reported-by: Sudip Mukherjee (Codethink) Suggested-by: Linus Torvalds Signed-off-by: Rui Wang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/cmpxchg.h | 98 +++++++++++++++++++++++++++++++++++- arch/loongarch/include/asm/percpu.h | 8 +++ 2 files changed, 105 insertions(+), 1 deletion(-) (limited to 'arch/loongarch/include/asm') diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h index 0a9b0fac1eee..ae19e33c7754 100644 --- a/arch/loongarch/include/asm/cmpxchg.h +++ b/arch/loongarch/include/asm/cmpxchg.h @@ -5,8 +5,9 @@ #ifndef __ASM_CMPXCHG_H #define __ASM_CMPXCHG_H -#include +#include #include +#include #define __xchg_asm(amswap_db, m, val) \ ({ \ @@ -21,10 +22,53 @@ __ret; \ }) +static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val, + unsigned int size) +{ + unsigned int shift; + u32 old32, mask, temp; + volatile u32 *ptr32; + + /* Mask value to the correct size. */ + mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); + val &= mask; + + /* + * Calculate a shift & mask that correspond to the value we wish to + * exchange within the naturally aligned 4 byte integerthat includes + * it. + */ + shift = (unsigned long)ptr & 0x3; + shift *= BITS_PER_BYTE; + mask <<= shift; + + /* + * Calculate a pointer to the naturally aligned 4 byte integer that + * includes our byte of interest, and load its value. + */ + ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); + + asm volatile ( + "1: ll.w %0, %3 \n" + " andn %1, %0, %z4 \n" + " or %1, %1, %z5 \n" + " sc.w %1, %2 \n" + " beqz %1, 1b \n" + : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32) + : "ZC" (*ptr32), "Jr" (mask), "Jr" (val << shift) + : "memory"); + + return (old32 & mask) >> shift; +} + static inline unsigned long __xchg(volatile void *ptr, unsigned long x, int size) { switch (size) { + case 1: + case 2: + return __xchg_small(ptr, x, size); + case 4: return __xchg_asm("amswap_db.w", (volatile u32 *)ptr, (u32)x); @@ -67,10 +111,62 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x, __ret; \ }) +static inline unsigned int __cmpxchg_small(volatile void *ptr, unsigned int old, + unsigned int new, unsigned int size) +{ + unsigned int shift; + u32 old32, mask, temp; + volatile u32 *ptr32; + + /* Mask inputs to the correct size. */ + mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); + old &= mask; + new &= mask; + + /* + * Calculate a shift & mask that correspond to the value we wish to + * compare & exchange within the naturally aligned 4 byte integer + * that includes it. + */ + shift = (unsigned long)ptr & 0x3; + shift *= BITS_PER_BYTE; + old <<= shift; + new <<= shift; + mask <<= shift; + + /* + * Calculate a pointer to the naturally aligned 4 byte integer that + * includes our byte of interest, and load its value. + */ + ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); + + asm volatile ( + "1: ll.w %0, %3 \n" + " and %1, %0, %z4 \n" + " bne %1, %z5, 2f \n" + " andn %1, %0, %z4 \n" + " or %1, %1, %z6 \n" + " sc.w %1, %2 \n" + " beqz %1, 1b \n" + " b 3f \n" + "2: \n" + __WEAK_LLSC_MB + "3: \n" + : "=&r" (old32), "=&r" (temp), "=ZC" (*ptr32) + : "ZC" (*ptr32), "Jr" (mask), "Jr" (old), "Jr" (new) + : "memory"); + + return (old32 & mask) >> shift; +} + static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { switch (size) { + case 1: + case 2: + return __cmpxchg_small(ptr, old, new, size); + case 4: return __cmpxchg_asm("ll.w", "sc.w", (volatile u32 *)ptr, (u32)old, new); diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h index e6569f18c6dd..0bd6b0110198 100644 --- a/arch/loongarch/include/asm/percpu.h +++ b/arch/loongarch/include/asm/percpu.h @@ -123,6 +123,10 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size) { switch (size) { + case 1: + case 2: + return __xchg_small((volatile void *)ptr, val, size); + case 4: return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val); @@ -204,9 +208,13 @@ do { \ #define this_cpu_write_4(pcp, val) _percpu_write(pcp, val) #define this_cpu_write_8(pcp, val) _percpu_write(pcp, val) +#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val) +#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val) #define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val) #define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val) +#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) +#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) -- cgit v1.2.3 From e0fba87c854347007fb9fc873e890b686cc61302 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Fri, 2 Sep 2022 22:33:42 +0800 Subject: LoongArch: Fix section mismatch due to acpi_os_ioremap() Now acpi_os_ioremap() is marked with __init because it calls memblock_ is_memory() which is also marked with __init in the !ARCH_KEEP_MEMBLOCK case. However, acpi_os_ioremap() is called by ordinary functions such as acpi_os_{read, write}_memory() and causes section mismatch warnings: WARNING: modpost: vmlinux.o: section mismatch in reference: acpi_os_read_memory (section: .text) -> acpi_os_ioremap (section: .init.text) WARNING: modpost: vmlinux.o: section mismatch in reference: acpi_os_write_memory (section: .text) -> acpi_os_ioremap (section: .init.text) Fix these warnings by selecting ARCH_KEEP_MEMBLOCK unconditionally and removing the __init modifier of acpi_os_ioremap(). This can also give a chance to track "memory" and "reserved" memblocks after early boot. Signed-off-by: Huacai Chen --- arch/loongarch/Kconfig | 1 + arch/loongarch/include/asm/acpi.h | 2 +- arch/loongarch/kernel/acpi.c | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/loongarch/include/asm') diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 3bc5b911ae1a..c7dd6ad779af 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -39,6 +39,7 @@ config LOONGARCH select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_KEEP_MEMBLOCK select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_SPARSEMEM_ENABLE diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h index c5108213876c..17162f494b9b 100644 --- a/arch/loongarch/include/asm/acpi.h +++ b/arch/loongarch/include/asm/acpi.h @@ -15,7 +15,7 @@ extern int acpi_pci_disabled; extern int acpi_noirq; #define acpi_os_ioremap acpi_os_ioremap -void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size); +void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size); static inline void disable_acpi(void) { diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index f1c928648a4a..335398482038 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -48,7 +48,7 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size) early_memunmap(map, size); } -void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) +void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) { if (!memblock_is_memory(phys)) return ioremap(phys, size); -- cgit v1.2.3