diff options
author | Laura Abbott <lauraa@codeaurora.org> | 2013-12-21 01:03:06 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-12-29 12:46:08 +0000 |
commit | efea3403d4b7c6d1dd5d5ac3234c161e8b314d66 (patch) | |
tree | 588229afaf191338ca5b14672d4fd4bc40ef36b2 /arch | |
parent | 2a7cfcbc0553365d75716f69ee7b704cac7c9248 (diff) | |
download | linux-efea3403d4b7c6d1dd5d5ac3234c161e8b314d66.tar.bz2 |
ARM: 7931/1: Correct virt_addr_valid
The definition of virt_addr_valid is that virt_addr_valid should
return true if and only if virt_to_page returns a valid pointer.
The current definition of virt_addr_valid only checks against the
virtual address range. There's no guarantee that just because a
virtual address falls bewteen PAGE_OFFSET and high_memory the
associated physical memory has a valid backing struct page. Follow
the example of other architectures and convert to pfn_valid to
verify that the virtual address is actually valid. The check for
an address between PAGE_OFFSET and high_memory is still necessary
as vmalloc/highmem addresses are not valid with virt_to_page.
Cc: Will Deacon <will.deacon@arm.com>
Cc: Nicolas Pitre <nico@linaro.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/memory.h | 3 |
1 files changed, 2 insertions, 1 deletions
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 6976b03e5213..8756e4bcdba0 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -347,7 +347,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x) #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) +#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ + && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) ) #endif |