summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKhalid Aziz <khalid.aziz@oracle.com>2018-02-21 10:15:49 -0700
committerDavid S. Miller <davem@davemloft.net>2018-03-18 07:38:47 -0700
commit9035cf9a97e429e6b5291841da81c433879f5658 (patch)
treed18593bc9412efa453a2b1e24ff860bfcb70ea9f
parentc6202ca764ac7b3728e29bca616567beb57fbac7 (diff)
downloadlinux-9035cf9a97e429e6b5291841da81c433879f5658.tar.bz2
mm: Add address parameter to arch_validate_prot()
A protection flag may not be valid across entire address space and hence arch_validate_prot() might need the address a protection bit is being set on to ensure it is a valid protection flag. For example, sparc processors support memory corruption detection (as part of ADI feature) flag on memory addresses mapped on to physical RAM but not on PFN mapped pages or addresses mapped on to devices. This patch adds address to the parameters being passed to arch_validate_prot() so protection bits can be validated in the relevant context. Signed-off-by: Khalid Aziz <khalid.aziz@oracle.com> Cc: Khalid Aziz <khalid@gonehiking.org> Reviewed-by: Anthony Yznaga <anthony.yznaga@oracle.com> Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc) Acked-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/powerpc/include/asm/mman.h4
-rw-r--r--arch/powerpc/kernel/syscalls.c2
-rw-r--r--include/linux/mman.h2
-rw-r--r--mm/mprotect.c2
4 files changed, 5 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 07e3f54de9e3..e3f1b5ba5d5c 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -43,7 +43,7 @@ static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
}
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
-static inline bool arch_validate_prot(unsigned long prot)
+static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
{
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
return false;
@@ -51,7 +51,7 @@ static inline bool arch_validate_prot(unsigned long prot)
return false;
return true;
}
-#define arch_validate_prot(prot) arch_validate_prot(prot)
+#define arch_validate_prot arch_validate_prot
#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_MMAN_H */
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index a877bf8269fe..6d90ddbd2d11 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -48,7 +48,7 @@ static inline long do_mmap2(unsigned long addr, size_t len,
{
long ret = -EINVAL;
- if (!arch_validate_prot(prot))
+ if (!arch_validate_prot(prot, addr))
goto out;
if (shift) {
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 6a4d1caaff5c..4b08e9c9c538 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -92,7 +92,7 @@ static inline void vm_unacct_memory(long pages)
*
* Returns true if the prot flags are valid
*/
-static inline bool arch_validate_prot(unsigned long prot)
+static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
{
return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e3309fcf586b..088ea9c08678 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -417,7 +417,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
end = start + len;
if (end <= start)
return -ENOMEM;
- if (!arch_validate_prot(prot))
+ if (!arch_validate_prot(prot, start))
return -EINVAL;
reqprot = prot;