summaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/ioremap.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-02-17 13:23:00 +0900
committerPaul Mundt <lethal@linux-sh.org>2010-02-17 13:23:00 +0900
commit7bdda6209f224aa784a036df54b22cb338d2e859 (patch)
tree2c2ce99f0ec55386246379ffb8412b3a893402b0 /arch/sh/mm/ioremap.c
parent49f3bfe9334a4cf86079d2ee1d08e674b58862a9 (diff)
downloadlinux-7bdda6209f224aa784a036df54b22cb338d2e859.tar.bz2
sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB.
Both the store queue API and the PMB remapping take unsigned long for their pgprot flags, which cuts off the extended protection bits. In the case of the PMB this isn't really a problem since the cache attribute bits that we care about are all in the lower 32-bits, but we do it just to be safe. The store queue remapping on the other hand depends on the extended prot bits for enabling userspace access to the mappings. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/ioremap.c')
-rw-r--r--arch/sh/mm/ioremap.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index 94583c5da855..c68d2d7d00a9 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -80,7 +80,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
if (unlikely(phys_addr >= P1SEG)) {
unsigned long mapped;
- mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot));
+ mapped = pmb_remap(addr, phys_addr, size, pgprot);
if (likely(mapped)) {
addr += mapped;
phys_addr += mapped;