diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-03-10 17:53:30 +0000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-03-20 15:56:58 +1100 |
commit | a7d2dac802a7ff0677b0a5c2fdb9fe0d3fdaee0c (patch) | |
tree | 55fa2e474c2827e44f1b48b57288ab3051654f97 /arch/powerpc/include | |
parent | c605782b1c3f1c18a55dc1a75b19ed0288f61ac3 (diff) | |
download | linux-a7d2dac802a7ff0677b0a5c2fdb9fe0d3fdaee0c.tar.bz2 |
powerpc/mm: Unify PTE_RPN_SHIFT and _PAGE_CHG_MASK definitions
This updates the 32-bit headers to use the same definitions for the RPN
shift inside the PTE as 64-bit, and thus updates _PAGE_CHG_MASK to
become identical.
This does introduce a runtime visible difference, which is that now,
_PAGE_HASHPTE will be part of _PAGE_CHG_MASK and thus preserved. However
this should have no practical effect as it should have been preserved in
the first place and we got away with not having it there due to our
PTE access functions preserving it anyway.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc32.h | 36 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pte-fsl-booke.h | 2 |
2 files changed, 26 insertions, 12 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index a9c6ecef3656..67ceffc01b43 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -146,9 +146,29 @@ extern int icache_44x_need_flush; #define _PAGE_HPTEFLAGS _PAGE_HASHPTE -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ - _PAGE_SPECIAL) +/* Location of the PFN in the PTE. Most platforms use the same as _PAGE_SHIFT + * here (ie, naturally aligned). Platform who don't just pre-define the + * value so we don't override it here + */ +#ifndef PTE_RPN_SHIFT +#define PTE_RPN_SHIFT (PAGE_SHIFT) +#endif + +#ifdef CONFIG_PTE_64BIT +#define PTE_RPN_MAX (1ULL << (64 - PTE_RPN_SHIFT)) +#define PTE_RPN_MASK (~((1ULL<<PTE_RPN_SHIFT)-1)) +#else +#define PTE_RPN_MAX (1UL << (32 - PTE_RPN_SHIFT)) +#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1)) +#endif +/* _PAGE_CHG_MASK masks of bits that are to be preserved accross + * pgprot changes + */ +#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ + _PAGE_ACCESSED | _PAGE_SPECIAL) + +/* Mask of bits returned by pte_pgprot() */ #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ _PAGE_WRITETHRU | _PAGE_ENDIAN | \ _PAGE_USER | _PAGE_ACCESSED | \ @@ -236,18 +256,10 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); * Conversions between PTE values and page frame numbers. */ -/* in some case we want to additionaly adjust where the pfn is in the pte to - * allow room for more flags */ -#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) -#define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8) -#else -#define PFN_SHIFT_OFFSET (PAGE_SHIFT) -#endif - -#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) +#define pte_pfn(x) (pte_val(x) >> PTE_RPN_SHIFT) #define pte_page(x) pfn_to_page(pte_pfn(x)) -#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\ +#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |\ pgprot_val(prot)) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/pte-fsl-booke.h b/arch/powerpc/include/asm/pte-fsl-booke.h index 0fe5de7bea3d..10820f58acf5 100644 --- a/arch/powerpc/include/asm/pte-fsl-booke.h +++ b/arch/powerpc/include/asm/pte-fsl-booke.h @@ -36,6 +36,8 @@ #ifdef CONFIG_PTE_64BIT /* ERPN in a PTE never gets cleared, ignore it */ #define _PTE_NONE_MASK 0xffffffffffff0000ULL +/* We extend the size of the PTE flags area when using 64-bit PTEs */ +#define PTE_RPN_SHIFT (PAGE_SHIFT + 8) #endif #define _PMD_PRESENT 0 |