summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2016-05-05 14:53:48 +0530
committerVineet Gupta <vgupta@synopsys.com>2016-05-05 16:35:45 +0530
commit2519d753676bdf2460fbbcde276d5b6ba8d6b695 (patch)
tree4db307eac2f8efbfe9699b6c0bb449d346d6ad10 /arch
parente5bc0478ab6cf565619224536d75ecb2aedca43b (diff)
downloadlinux-2519d753676bdf2460fbbcde276d5b6ba8d6b695.tar.bz2
ARC: Fix PAE40 boot failures due to PTE truncation
So a benign looking cleanup which macro'ized PAGE_SHIFT shifts turned out to be bad (since it was done non-sensically across the board). It caused boot failures with PAE40 as forced cast to (unsigned long) from newly introduced virt_to_pfn() was causing truncatiion of the (long long) pte/paddr values. It is OK to use this in accessors dealing with kernel virtual address, pointers etc, but not for PTE values themelves. Fixes: cJ2ff5cf2735c ("ARC: mm: Use virt_to_pfn() for addr >> PAGE_SHIFT pattern) Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/include/asm/page.h9
-rw-r--r--arch/arc/include/asm/pgtable.h13
2 files changed, 14 insertions, 8 deletions
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 36da89e2c853..533e5a0c6ba5 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -72,6 +72,13 @@ typedef unsigned long pgprot_t;
typedef pte_t * pgtable_t;
+/*
+ * Use virt_to_pfn with caution:
+ * If used in pte or paddr related macros, it could cause truncation
+ * in PAE40 builds
+ * As a rule of thumb, only use it in helpers starting with virt_
+ * You have been warned !
+ */
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_LINK_BASE)
@@ -85,7 +92,7 @@ typedef pte_t * pgtable_t;
* virt here means link-address/program-address as embedded in object code.
* And for ARC, link-addr = physical address
*/
-#define __pa(vaddr) ((unsigned long)vaddr)
+#define __pa(vaddr) ((unsigned long)(vaddr))
#define __va(paddr) ((void *)((unsigned long)(paddr)))
#define virt_to_page(kaddr) \
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 7d6c93e63adf..10d4b8b8e545 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -278,14 +278,13 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
#define pmd_present(x) (pmd_val(x))
#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
-#define pte_page(pte) \
- (mem_map + virt_to_pfn(pte_val(pte) - CONFIG_LINUX_LINK_BASE))
-
+#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-#define pte_pfn(pte) virt_to_pfn(pte_val(pte))
-#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \
- pgprot_val(prot)))
-#define __pte_index(addr) (virt_to_pfn(addr) & (PTRS_PER_PTE - 1))
+#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
+/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
/*
* pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)