diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-20 12:14:29 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-20 12:14:29 -0700 |
commit | 7fdfbe08a2b3e2bf776e2f2b341768ba93c2e158 (patch) | |
tree | 8ecfd85b600a9c91985eeff92d8c6c0610ea500f | |
parent | 27c2760561c0c68595b60a084c317b34e85e7c73 (diff) | |
parent | 0e2c09011d4de4161f615ff860a605a9186cf62a (diff) | |
download | linux-7fdfbe08a2b3e2bf776e2f2b341768ba93c2e158.tar.bz2 |
Merge tag 'riscv-for-linus-5.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V fixes from Palmer Dabbelt:
- a workaround for a compiler surprise related to the "r" inline
assembly that allows LLVM to boot.
- a fix to avoid WX-only mappings, which the ISA does not allow. While
this probably manifests in many ways, the bug was found in stress-ng.
- a missing lock in set_direct_map_*(), which due to a recent lockdep
change started asserting.
* tag 'riscv-for-linus-5.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
RISC-V: Acquire mmap lock before invoking walk_page_range
RISC-V: Don't allow write+exec only page mapping request in mmap
riscv/atomic: Fix sign extension for RV64I
-rw-r--r-- | arch/riscv/include/asm/cmpxchg.h | 8 | ||||
-rw-r--r-- | arch/riscv/kernel/sys_riscv.c | 6 | ||||
-rw-r--r-- | arch/riscv/mm/pageattr.c | 14 |
3 files changed, 22 insertions, 6 deletions
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index d969bab4a26b..262e5bbb2776 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -179,7 +179,7 @@ " bnez %1, 0b\n" \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ @@ -224,7 +224,7 @@ RISCV_ACQUIRE_BARRIER \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ @@ -270,7 +270,7 @@ " bnez %1, 0b\n" \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ @@ -316,7 +316,7 @@ " fence rw, rw\n" \ "1:\n" \ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ + : "rJ" ((long)__old), "rJ" (__new) \ : "memory"); \ break; \ case 8: \ diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index f3619f59d85c..12f8a7fce78b 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -8,6 +8,7 @@ #include <linux/syscalls.h> #include <asm/unistd.h> #include <asm/cacheflush.h> +#include <asm-generic/mman-common.h> static long riscv_sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, @@ -16,6 +17,11 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len, { if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) return -EINVAL; + + if ((prot & PROT_WRITE) && (prot & PROT_EXEC)) + if (unlikely(!(prot & PROT_READ))) + return -EINVAL; + return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - page_shift_offset)); } diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index ec2c70f84994..289a9a5ea5b5 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -151,6 +151,7 @@ int set_memory_nx(unsigned long addr, int numpages) int set_direct_map_invalid_noflush(struct page *page) { + int ret; unsigned long start = (unsigned long)page_address(page); unsigned long end = start + PAGE_SIZE; struct pageattr_masks masks = { @@ -158,11 +159,16 @@ int set_direct_map_invalid_noflush(struct page *page) .clear_mask = __pgprot(_PAGE_PRESENT) }; - return walk_page_range(&init_mm, start, end, &pageattr_ops, &masks); + mmap_read_lock(&init_mm); + ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks); + mmap_read_unlock(&init_mm); + + return ret; } int set_direct_map_default_noflush(struct page *page) { + int ret; unsigned long start = (unsigned long)page_address(page); unsigned long end = start + PAGE_SIZE; struct pageattr_masks masks = { @@ -170,7 +176,11 @@ int set_direct_map_default_noflush(struct page *page) .clear_mask = __pgprot(0) }; - return walk_page_range(&init_mm, start, end, &pageattr_ops, &masks); + mmap_read_lock(&init_mm); + ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks); + mmap_read_unlock(&init_mm); + + return ret; } void __kernel_map_pages(struct page *page, int numpages, int enable) |