diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-10 10:17:23 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-10 10:17:23 -0700 |
commit | 3d8dfe75ef69f4dd4ba35c09b20a5aa58b4a5078 (patch) | |
tree | e5dd2ba86a027007610df67331304e083fe560ec /arch/riscv/include | |
parent | d6075262969321bcb5d795de25595fc2a141ac02 (diff) | |
parent | b855b58ac1b7891b219e1d9ef60c45c774cadefe (diff) | |
download | linux-3d8dfe75ef69f4dd4ba35c09b20a5aa58b4a5078.tar.bz2 |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas:
- Pseudo NMI support for arm64 using GICv3 interrupt priorities
- uaccess macros clean-up (unsafe user accessors also merged but
reverted, waiting for objtool support on arm64)
- ptrace regsets for Pointer Authentication (ARMv8.3) key management
- inX() ordering w.r.t. delay() on arm64 and riscv (acks in place by
the riscv maintainers)
- arm64/perf updates: PMU bindings converted to json-schema, unused
variable and misleading comment removed
- arm64/debug fixes to ensure checking of the triggering exception
level and to avoid the propagation of the UNKNOWN FAR value into the
si_code for debug signals
- Workaround for Fujitsu A64FX erratum 010001
- lib/raid6 ARM NEON optimisations
- NR_CPUS now defaults to 256 on arm64
- Minor clean-ups (documentation/comments, Kconfig warning, unused
asm-offsets, clang warnings)
- MAINTAINERS update for list information to the ARM64 ACPI entry
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (54 commits)
arm64: mmu: drop paging_init comments
arm64: debug: Ensure debug handlers check triggering exception level
arm64: debug: Don't propagate UNKNOWN FAR into si_code for debug signals
Revert "arm64: uaccess: Implement unsafe accessors"
arm64: avoid clang warning about self-assignment
arm64: Kconfig.platforms: fix warning unmet direct dependencies
lib/raid6: arm: optimize away a mask operation in NEON recovery routine
lib/raid6: use vdupq_n_u8 to avoid endianness warnings
arm64: io: Hook up __io_par() for inX() ordering
riscv: io: Update __io_[p]ar() macros to take an argument
asm-generic/io: Pass result of I/O accessor to __io_[p]ar()
arm64: Add workaround for Fujitsu A64FX erratum 010001
arm64: Rename get_thread_info()
arm64: Remove documentation about TIF_USEDFPU
arm64: irqflags: Fix clang build warnings
arm64: Enable the support of pseudo-NMIs
arm64: Skip irqflags tracing for NMI in IRQs disabled context
arm64: Skip preemption when exiting an NMI
arm64: Handle serror in NMI context
irqchip/gic-v3: Allow interrupts to be set as pseudo-NMI
...
Diffstat (limited to 'arch/riscv/include')
-rw-r--r-- | arch/riscv/include/asm/io.h | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index b269451e7e85..1d9c1376dc64 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -163,20 +163,20 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * doesn't define any ordering between the memory space and the I/O space. */ #define __io_br() do {} while (0) -#define __io_ar() __asm__ __volatile__ ("fence i,r" : : : "memory"); +#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory"); #define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory"); #define __io_aw() do {} while (0) -#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(); __v; }) -#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(); __v; }) -#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(); __v; }) +#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) +#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; }) +#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; }) #define writeb(v,c) ({ __io_bw(); writeb_cpu((v),(c)); __io_aw(); }) #define writew(v,c) ({ __io_bw(); writew_cpu((v),(c)); __io_aw(); }) #define writel(v,c) ({ __io_bw(); writel_cpu((v),(c)); __io_aw(); }) #ifdef CONFIG_64BIT -#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(); __v; }) +#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; }) #define writeq(v,c) ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); }) #endif @@ -198,20 +198,20 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * writes. */ #define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory"); -#define __io_par() __asm__ __volatile__ ("fence i,ior" : : : "memory"); +#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory"); #define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory"); #define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory"); -#define inb(c) ({ u8 __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; }) -#define inw(c) ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; }) -#define inl(c) ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; }) +#define inb(c) ({ u8 __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; }) +#define inw(c) ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; }) +#define inl(c) ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; }) #define outb(v,c) ({ __io_pbw(); writeb_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); }) #define outw(v,c) ({ __io_pbw(); writew_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); }) #define outl(v,c) ({ __io_pbw(); writel_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); }) #ifdef CONFIG_64BIT -#define inq(c) ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(); __v; }) +#define inq(c) ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(__v); __v; }) #define outq(v,c) ({ __io_pbw(); writeq_cpu((v),(void*)(c)); __io_paw(); }) #endif @@ -254,16 +254,16 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) afence; \ } -__io_reads_ins(reads, u8, b, __io_br(), __io_ar()) -__io_reads_ins(reads, u16, w, __io_br(), __io_ar()) -__io_reads_ins(reads, u32, l, __io_br(), __io_ar()) +__io_reads_ins(reads, u8, b, __io_br(), __io_ar(addr)) +__io_reads_ins(reads, u16, w, __io_br(), __io_ar(addr)) +__io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr)) #define readsb(addr, buffer, count) __readsb(addr, buffer, count) #define readsw(addr, buffer, count) __readsw(addr, buffer, count) #define readsl(addr, buffer, count) __readsl(addr, buffer, count) -__io_reads_ins(ins, u8, b, __io_pbr(), __io_par()) -__io_reads_ins(ins, u16, w, __io_pbr(), __io_par()) -__io_reads_ins(ins, u32, l, __io_pbr(), __io_par()) +__io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr)) +__io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr)) +__io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr)) #define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count) #define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count) #define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count) @@ -283,10 +283,10 @@ __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw()) #define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count) #ifdef CONFIG_64BIT -__io_reads_ins(reads, u64, q, __io_br(), __io_ar()) +__io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr)) #define readsq(addr, buffer, count) __readsq(addr, buffer, count) -__io_reads_ins(ins, u64, q, __io_pbr(), __io_par()) +__io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr)) #define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count) __io_writes_outs(writes, u64, q, __io_bw(), __io_aw()) |