diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-06 16:57:52 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-06 16:57:52 -0700 |
commit | dd4e5d6106b2380e2c1238406d26df8b2fe1c42c (patch) | |
tree | 33eb172237dd8873de02d5b881bf17cf58d05fab /arch | |
parent | 14be4c61c205dcb0a72251c1e2790814181bd9ba (diff) | |
parent | 9726840d9cf0d42377e1591263d7c1d9ae0988ac (diff) | |
download | linux-dd4e5d6106b2380e2c1238406d26df8b2fe1c42c.tar.bz2 |
Merge tag 'arm64-mmiowb' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull mmiowb removal from Will Deacon:
"Remove Mysterious Macro Intended to Obscure Weird Behaviours (mmiowb())
Remove mmiowb() from the kernel memory barrier API and instead, for
architectures that need it, hide the barrier inside spin_unlock() when
MMIO has been performed inside the critical section.
The only relatively recent changes have been addressing review
comments on the documentation, which is in a much better shape thanks
to the efforts of Ben and Ingo.
I was initially planning to split this into two pull requests so that
you could run the coccinelle script yourself, however it's been plain
sailing in linux-next so I've just included the whole lot here to keep
things simple"
* tag 'arm64-mmiowb' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (23 commits)
docs/memory-barriers.txt: Update I/O section to be clearer about CPU vs thread
docs/memory-barriers.txt: Fix style, spacing and grammar in I/O section
arch: Remove dummy mmiowb() definitions from arch code
net/ethernet/silan/sc92031: Remove stale comment about mmiowb()
i40iw: Redefine i40iw_mmiowb() to do nothing
scsi/qla1280: Remove stale comment about mmiowb()
drivers: Remove explicit invocations of mmiowb()
drivers: Remove useless trailing comments from mmiowb() invocations
Documentation: Kill all references to mmiowb()
riscv/mmiowb: Hook up mmwiob() implementation to asm-generic code
powerpc/mmiowb: Hook up mmwiob() implementation to asm-generic code
ia64/mmiowb: Add unconditional mmiowb() to arch_spin_unlock()
mips/mmiowb: Add unconditional mmiowb() to arch_spin_unlock()
sh/mmiowb: Add unconditional mmiowb() to arch_spin_unlock()
m68k/io: Remove useless definition of mmiowb()
nds32/io: Remove useless definition of mmiowb()
x86/io: Remove useless definition of mmiowb()
arm64/io: Remove useless definition of mmiowb()
ARM/io: Remove useless definition of mmiowb()
mmiowb: Hook up mmiowb helpers to spinlocks and generic I/O accessors
...
Diffstat (limited to 'arch')
47 files changed, 135 insertions, 103 deletions
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index 70b783333965..89e87bbc987f 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild @@ -9,6 +9,7 @@ generic-y += irq_work.h generic-y += kvm_para.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += preempt.h generic-y += sections.h generic-y += trace_clock.h diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index 4c533fc94d62..ccf9d65166bb 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -513,8 +513,6 @@ extern inline void writeq(u64 b, volatile void __iomem *addr) #define writel_relaxed(b, addr) __raw_writel(b, addr) #define writeq_relaxed(b, addr) __raw_writeq(b, addr) -#define mmiowb() - /* * String version of IO memory access ops: */ diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index decc306a3b52..393d4f5e1450 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -16,6 +16,7 @@ generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += msi.h generic-y += parport.h generic-y += percpu.h diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 8fb51b7bf1d5..41deac2451af 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -9,6 +9,7 @@ generic-y += kdebug.h generic-y += local.h generic-y += local64.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += msi.h generic-y += parport.h generic-y += preempt.h diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 6b51826ab3d1..7e22c81398c4 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -281,8 +281,6 @@ extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t); extern void _memcpy_toio(volatile void __iomem *, const void *, size_t); extern void _memset_io(volatile void __iomem *, int, size_t); -#define mmiowb() - /* * Memory access primitives * ------------------------ diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 60a933b07001..eb0df239a759 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -13,6 +13,7 @@ generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += msi.h generic-y += qrwlock.h generic-y += qspinlock.h diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 8bb7210ac286..b807cb9b517d 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -124,8 +124,6 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) #define __io_par(v) __iormb(v) #define __iowmb() wmb() -#define mmiowb() do { } while (0) - /* * Relaxed I/O memory access primitives. These follow the Device memory * ordering rules but do not guarantee any ordering relative to Normal memory diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index 249c9f6f26dc..6b168d32fbff 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -23,6 +23,7 @@ generic-y += kvm_para.h generic-y += local.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += mmu.h generic-y += mmu_context.h generic-y += pci.h diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild index 2a0abe8f2a35..95f4e550db8a 100644 --- a/arch/csky/include/asm/Kbuild +++ b/arch/csky/include/asm/Kbuild @@ -28,6 +28,7 @@ generic-y += linkage.h generic-y += local.h generic-y += local64.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += module.h generic-y += mutex.h generic-y += pci.h diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index e3dead402e5f..123d8f54be4a 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -29,6 +29,7 @@ generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += mmu.h generic-y += mmu_context.h generic-y += module.h diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 3ff5f297acda..6234a303d2a3 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -24,6 +24,7 @@ generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += pci.h generic-y += percpu.h generic-y += preempt.h diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index e17262ad125e..3d0ae09c2b8e 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -184,8 +184,6 @@ static inline void writel(u32 data, volatile void __iomem *addr) #define writew_relaxed __raw_writew #define writel_relaxed __raw_writel -#define mmiowb() - /* * Need an mtype somewhere in here, for cache type deals? * This is probably too long for an inline. diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 1e6fef69bb01..a511d62d447a 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -113,20 +113,6 @@ extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count); */ #define __ia64_mf_a() ia64_mfa() -/** - * ___ia64_mmiowb - I/O write barrier - * - * Ensure ordering of I/O space writes. This will make sure that writes - * following the barrier will arrive after all previous writes. For most - * ia64 platforms, this is a simple 'mf.a' instruction. - * - * See Documentation/driver-api/device-io.rst for more information. - */ -static inline void ___ia64_mmiowb(void) -{ - ia64_mfa(); -} - static inline void* __ia64_mk_io_addr (unsigned long port) { @@ -161,7 +147,6 @@ __ia64_mk_io_addr (unsigned long port) #define __ia64_writew ___ia64_writew #define __ia64_writel ___ia64_writel #define __ia64_writeq ___ia64_writeq -#define __ia64_mmiowb ___ia64_mmiowb /* * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure @@ -296,7 +281,6 @@ __outsl (unsigned long port, const void *src, unsigned long count) #define __outb platform_outb #define __outw platform_outw #define __outl platform_outl -#define __mmiowb platform_mmiowb #define inb(p) __inb(p) #define inw(p) __inw(p) @@ -310,7 +294,6 @@ __outsl (unsigned long port, const void *src, unsigned long count) #define outsb(p,s,c) __outsb(p,s,c) #define outsw(p,s,c) __outsw(p,s,c) #define outsl(p,s,c) __outsl(p,s,c) -#define mmiowb() __mmiowb() /* * The address passed to these functions are ioremap()ped already. diff --git a/arch/ia64/include/asm/mmiowb.h b/arch/ia64/include/asm/mmiowb.h new file mode 100644 index 000000000000..297b85ac84a0 --- /dev/null +++ b/arch/ia64/include/asm/mmiowb.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_IA64_MMIOWB_H +#define _ASM_IA64_MMIOWB_H + +#include <asm/machvec.h> + +/** + * ___ia64_mmiowb - I/O write barrier + * + * Ensure ordering of I/O space writes. This will make sure that writes + * following the barrier will arrive after all previous writes. For most + * ia64 platforms, this is a simple 'mf.a' instruction. + */ +static inline void ___ia64_mmiowb(void) +{ + ia64_mfa(); +} + +#define __ia64_mmiowb ___ia64_mmiowb +#define mmiowb() platform_mmiowb() + +#include <asm-generic/mmiowb.h> + +#endif /* _ASM_IA64_MMIOWB_H */ diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index afd0b3121b4c..5f620e66384e 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -73,6 +73,8 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; + /* This could be optimised with ARCH_HAS_MMIOWB */ + mmiowb(); asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); WRITE_ONCE(*p, (tmp + 2) & ~1); } diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 2c359d9e80f6..0ddae4a74adb 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -18,6 +18,7 @@ generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += percpu.h generic-y += preempt.h generic-y += sections.h diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h index 782b78f8a048..6c03ca5bc436 100644 --- a/arch/m68k/include/asm/io_mm.h +++ b/arch/m68k/include/asm/io_mm.h @@ -377,8 +377,6 @@ static inline void isa_delay(void) #define writesw(port, buf, nr) raw_outsw((port), (u16 *)(buf), (nr)) #define writesl(port, buf, nr) raw_outsl((port), (u32 *)(buf), (nr)) -#define mmiowb() - #ifndef CONFIG_SUN3 #define IO_SPACE_LIMIT 0xffff #else diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 1a8285c3f693..17a8d0a62038 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -23,6 +23,7 @@ generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += parport.h generic-y += percpu.h generic-y += preempt.h diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 845fbbc7a2e3..29997e42480e 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -102,9 +102,6 @@ static inline void set_io_port_base(unsigned long base) #define iobarrier_w() wmb() #define iobarrier_sync() iob() -/* Some callers use this older API instead. */ -#define mmiowb() iobarrier_w() - /* * virt_to_phys - map virtual addresses to physical * @address: address to remap diff --git a/arch/mips/include/asm/mmiowb.h b/arch/mips/include/asm/mmiowb.h new file mode 100644 index 000000000000..a40824e3ef8e --- /dev/null +++ b/arch/mips/include/asm/mmiowb.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_MMIOWB_H +#define _ASM_MMIOWB_H + +#include <asm/io.h> + +#define mmiowb() iobarrier_w() + +#include <asm-generic/mmiowb.h> + +#endif /* _ASM_MMIOWB_H */ diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index ee81297d9117..8a88eb265516 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -11,6 +11,21 @@ #include <asm/processor.h> #include <asm/qrwlock.h> + +#include <asm-generic/qspinlock_types.h> + +#define queued_spin_unlock queued_spin_unlock +/** + * queued_spin_unlock - release a queued spinlock + * @lock : Pointer to queued spinlock structure + */ +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + /* This could be optimised with ARCH_HAS_MMIOWB */ + mmiowb(); + smp_store_release(&lock->locked, 0); +} + #include <asm/qspinlock.h> #endif /* _ASM_SPINLOCK_H */ diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild index 64ceff7ab99b..688b6ed26227 100644 --- a/arch/nds32/include/asm/Kbuild +++ b/arch/nds32/include/asm/Kbuild @@ -31,6 +31,7 @@ generic-y += limits.h generic-y += local.h generic-y += local64.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += parport.h generic-y += pci.h generic-y += percpu.h diff --git a/arch/nds32/include/asm/io.h b/arch/nds32/include/asm/io.h index 71cd226d6863..5ef8ae5ba833 100644 --- a/arch/nds32/include/asm/io.h +++ b/arch/nds32/include/asm/io.h @@ -55,8 +55,6 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) #define __iormb() rmb() #define __iowmb() wmb() -#define mmiowb() __asm__ __volatile__ ("msync all" : : : "memory"); - /* * {read,write}{b,w,l,q}_relaxed() are like the regular version, but * are not guaranteed to provide ordering against spinlocks or memory diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index 88a667d12aaa..d7ef3512504a 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild @@ -27,6 +27,7 @@ generic-y += kvm_para.h generic-y += local.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += module.h generic-y += pci.h generic-y += percpu.h diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index 22aa97136c01..1919cc5e0f11 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -24,6 +24,7 @@ generic-y += kvm_para.h generic-y += local.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += module.h generic-y += pci.h generic-y += percpu.h diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index 9bcd0c903dbb..b8c7db777144 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -16,6 +16,7 @@ generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += percpu.h generic-y += preempt.h generic-y += seccomp.h diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index 30a8315d5c07..93d37010b375 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -229,8 +229,6 @@ static inline void writeq(unsigned long long q, volatile void __iomem *addr) #define writel_relaxed(l, addr) writel(l, addr) #define writeq_relaxed(q, addr) writeq(q, addr) -#define mmiowb() do { } while (0) - void memset_io(volatile void __iomem *addr, unsigned char val, int count); void memcpy_fromio(void *dst, const volatile void __iomem *src, int count); void memcpy_toio(volatile void __iomem *dst, const void *src, int count); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 88a4fb3647a2..fa7219ffeadc 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -125,6 +125,7 @@ config PPC select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_KCOV + select ARCH_HAS_MMIOWB if PPC64 select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PMEM_API if PPC64 select ARCH_HAS_PTE_SPECIAL diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 4b73847e9b95..1fad67b46409 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -34,14 +34,11 @@ extern struct pci_dev *isa_bridge_pcidev; #include <asm/byteorder.h> #include <asm/synch.h> #include <asm/delay.h> +#include <asm/mmiowb.h> #include <asm/mmu.h> #include <asm/ppc_asm.h> #include <asm/pgtable.h> -#ifdef CONFIG_PPC64 -#include <asm/paca.h> -#endif - #define SIO_CONFIG_RA 0x398 #define SIO_CONFIG_RD 0x399 @@ -107,12 +104,6 @@ extern bool isa_io_special; * */ -#ifdef CONFIG_PPC64 -#define IO_SET_SYNC_FLAG() do { local_paca->io_sync = 1; } while(0) -#else -#define IO_SET_SYNC_FLAG() -#endif - #define DEF_MMIO_IN_X(name, size, insn) \ static inline u##size name(const volatile u##size __iomem *addr) \ { \ @@ -127,7 +118,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \ { \ __asm__ __volatile__("sync;"#insn" %1,%y0" \ : "=Z" (*addr) : "r" (val) : "memory"); \ - IO_SET_SYNC_FLAG(); \ + mmiowb_set_pending(); \ } #define DEF_MMIO_IN_D(name, size, insn) \ @@ -144,7 +135,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \ { \ __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \ : "=m" (*addr) : "r" (val) : "memory"); \ - IO_SET_SYNC_FLAG(); \ + mmiowb_set_pending(); \ } DEF_MMIO_IN_D(in_8, 8, lbz); @@ -652,24 +643,6 @@ static inline void name at \ #include <asm-generic/iomap.h> -#ifdef CONFIG_PPC32 -#define mmiowb() -#else -/* - * Enforce synchronisation of stores vs. spin_unlock - * (this does it explicitly, though our implementation of spin_unlock - * does it implicitely too) - */ -static inline void mmiowb(void) -{ - unsigned long tmp; - - __asm__ __volatile__("sync; li %0,0; stb %0,%1(13)" - : "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync)) - : "memory"); -} -#endif /* !CONFIG_PPC32 */ - static inline void iosync(void) { __asm__ __volatile__ ("sync" : : : "memory"); diff --git a/arch/powerpc/include/asm/mmiowb.h b/arch/powerpc/include/asm/mmiowb.h new file mode 100644 index 000000000000..74a00127eb20 --- /dev/null +++ b/arch/powerpc/include/asm/mmiowb.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_MMIOWB_H +#define _ASM_POWERPC_MMIOWB_H + +#ifdef CONFIG_MMIOWB + +#include <linux/compiler.h> +#include <asm/barrier.h> +#include <asm/paca.h> + +#define arch_mmiowb_state() (&local_paca->mmiowb_state) +#define mmiowb() mb() + +#endif /* CONFIG_MMIOWB */ + +#include <asm-generic/mmiowb.h> + +#endif /* _ASM_POWERPC_MMIOWB_H */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index e843bc5d1a0f..134e912d403f 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -34,6 +34,8 @@ #include <asm/cpuidle.h> #include <asm/atomic.h> +#include <asm-generic/mmiowb_types.h> + register struct paca_struct *local_paca asm("r13"); #if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP) @@ -171,7 +173,6 @@ struct paca_struct { u16 trap_save; /* Used when bad stack is encountered */ u8 irq_soft_mask; /* mask for irq soft masking */ u8 irq_happened; /* irq happened while soft-disabled */ - u8 io_sync; /* writel() needs spin_unlock sync */ u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ u8 nap_state_lost; /* NV GPR values lost in power7_idle */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE @@ -264,6 +265,9 @@ struct paca_struct { #ifdef CONFIG_STACKPROTECTOR unsigned long canary; #endif +#ifdef CONFIG_MMIOWB + struct mmiowb_state mmiowb_state; +#endif } ____cacheline_aligned; extern void copy_mm_to_paca(struct mm_struct *mm); diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 685c72310f5d..15b39c407c4e 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -39,19 +39,6 @@ #define LOCK_TOKEN 1 #endif -#if defined(CONFIG_PPC64) && defined(CONFIG_SMP) -#define CLEAR_IO_SYNC (get_paca()->io_sync = 0) -#define SYNC_IO do { \ - if (unlikely(get_paca()->io_sync)) { \ - mb(); \ - get_paca()->io_sync = 0; \ - } \ - } while (0) -#else -#define CLEAR_IO_SYNC -#define SYNC_IO -#endif - #ifdef CONFIG_PPC_PSERIES #define vcpu_is_preempted vcpu_is_preempted static inline bool vcpu_is_preempted(int cpu) @@ -99,7 +86,6 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) static inline int arch_spin_trylock(arch_spinlock_t *lock) { - CLEAR_IO_SYNC; return __arch_spin_trylock(lock) == 0; } @@ -130,7 +116,6 @@ extern void __rw_yield(arch_rwlock_t *lock); static inline void arch_spin_lock(arch_spinlock_t *lock) { - CLEAR_IO_SYNC; while (1) { if (likely(__arch_spin_trylock(lock) == 0)) break; @@ -148,7 +133,6 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long flags_dis; - CLEAR_IO_SYNC; while (1) { if (likely(__arch_spin_trylock(lock) == 0)) break; @@ -167,7 +151,6 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) static inline void arch_spin_unlock(arch_spinlock_t *lock) { - SYNC_IO; __asm__ __volatile__("# arch_spin_unlock\n\t" PPC_RELEASE_BARRIER: : :"memory"); lock->slock = 0; diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index a0f44f992360..13c6a47e6150 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2429,7 +2429,10 @@ static void dump_one_paca(int cpu) DUMP(p, trap_save, "%#-*x"); DUMP(p, irq_soft_mask, "%#-*x"); DUMP(p, irq_happened, "%#-*x"); - DUMP(p, io_sync, "%#-*x"); +#ifdef CONFIG_MMIOWB + DUMP(p, mmiowb_state.nesting_count, "%#-*x"); + DUMP(p, mmiowb_state.mmiowb_pending, "%#-*x"); +#endif DUMP(p, irq_work_pending, "%#-*x"); DUMP(p, nap_state_lost, "%#-*x"); DUMP(p, sprg_vdso, "%#-*llx"); diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 0582260fb6c2..e66745decea1 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -48,6 +48,7 @@ config RISCV select RISCV_TIMER select GENERIC_IRQ_MULTI_HANDLER select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_MMIOWB select HAVE_EBPF_JIT if 64BIT config MMU diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index 1d9c1376dc64..744fd92e77bc 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -20,6 +20,7 @@ #define _ASM_RISCV_IO_H #include <linux/types.h> +#include <asm/mmiowb.h> extern void __iomem *ioremap(phys_addr_t offset, unsigned long size); @@ -100,18 +101,6 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) #endif /* - * FIXME: I'm flip-flopping on whether or not we should keep this or enforce - * the ordering with I/O on spinlocks like PowerPC does. The worry is that - * drivers won't get this correct, but I also don't want to introduce a fence - * into the lock code that otherwise only uses AMOs (and is essentially defined - * by the ISA to be correct). For now I'm leaving this here: "o,w" is - * sufficient to ensure that all writes to the device have completed before the - * write to the spinlock is allowed to commit. I surmised this from reading - * "ACQUIRES VS I/O ACCESSES" in memory-barriers.txt. - */ -#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory"); - -/* * Unordered I/O memory access primitives. These are even more relaxed than * the relaxed versions, as they don't even order accesses between successive * operations to the I/O regions. @@ -165,7 +154,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) #define __io_br() do {} while (0) #define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory"); #define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory"); -#define __io_aw() do {} while (0) +#define __io_aw() mmiowb_set_pending() #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) #define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; }) diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h new file mode 100644 index 000000000000..5d7e3a2b4e3b --- /dev/null +++ b/arch/riscv/include/asm/mmiowb.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_MMIOWB_H +#define _ASM_RISCV_MMIOWB_H + +/* + * "o,w" is sufficient to ensure that all writes to the device have completed + * before the write to the spinlock is allowed to commit. + */ +#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory"); + +#include <asm-generic/mmiowb.h> + +#endif /* ASM_RISCV_MMIOWB_H */ diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index d5fadefea33c..2531f673f099 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -20,6 +20,7 @@ generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += trace_clock.h generic-y += unaligned.h generic-y += word-at-a-time.h diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 4f7f235f15f8..c28e37a344ad 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -229,9 +229,6 @@ __BUILD_IOPORT_STRING(q, u64) #define IO_SPACE_LIMIT 0xffffffff -/* synco on SH-4A, otherwise a nop */ -#define mmiowb() wmb() - /* We really want to try and get these to memcpy etc */ void memcpy_fromio(void *, const volatile void __iomem *, unsigned long); void memcpy_toio(volatile void __iomem *, const void *, unsigned long); diff --git a/arch/sh/include/asm/mmiowb.h b/arch/sh/include/asm/mmiowb.h new file mode 100644 index 000000000000..535d59735f1d --- /dev/null +++ b/arch/sh/include/asm/mmiowb.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_SH_MMIOWB_H +#define __ASM_SH_MMIOWB_H + +#include <asm/barrier.h> + +/* synco on SH-4A, otherwise a nop */ +#define mmiowb() wmb() + +#include <asm-generic/mmiowb.h> + +#endif /* __ASM_SH_MMIOWB_H */ diff --git a/arch/sh/include/asm/spinlock-llsc.h b/arch/sh/include/asm/spinlock-llsc.h index 786ee0fde3b0..7fd929cd2e7a 100644 --- a/arch/sh/include/asm/spinlock-llsc.h +++ b/arch/sh/include/asm/spinlock-llsc.h @@ -47,6 +47,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) { unsigned long tmp; + /* This could be optimised with ARCH_HAS_MMIOWB */ + mmiowb(); __asm__ __volatile__ ( "mov #1, %0 ! arch_spin_unlock \n\t" "mov.l %0, @%1 \n\t" diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 2ca3200d3616..95c44380b1d6 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -15,6 +15,7 @@ generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += module.h generic-y += msi.h generic-y += preempt.h diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h index b162c23ae8c2..688911051b44 100644 --- a/arch/sparc/include/asm/io_64.h +++ b/arch/sparc/include/asm/io_64.h @@ -396,8 +396,6 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, } } -#define mmiowb() - #ifdef __KERNEL__ /* On sparc64 we have the whole physical IO address space accessible diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 00bcbe2326d9..b506ad06aefc 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -16,6 +16,7 @@ generic-y += irq_work.h generic-y += kdebug.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += param.h generic-y += pci.h generic-y += percpu.h diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index d77d953c04c1..b301a0b3c0b2 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -22,6 +22,7 @@ generic-y += kvm_para.h generic-y += local.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += module.h generic-y += parport.h generic-y += percpu.h diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild index a0ab9ab61c75..eebd05942e6c 100644 --- a/arch/x86/include/asm/Kbuild +++ b/arch/x86/include/asm/Kbuild @@ -11,3 +11,4 @@ generic-y += early_ioremap.h generic-y += export.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 686247db3106..a06a9f8294ea 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -90,8 +90,6 @@ build_mmio_write(__writel, "l", unsigned int, "r", ) #define __raw_writew __writew #define __raw_writel __writel -#define mmiowb() barrier() - #ifdef CONFIG_X86_64 build_mmio_read(readq, "q", u64, "=r", :"memory") diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 4148090cafb0..35f83c4bf239 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -20,6 +20,7 @@ generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h +generic-y += mmiowb.h generic-y += param.h generic-y += percpu.h generic-y += preempt.h |