diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2005-11-07 10:10:28 +0000 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2005-11-07 10:10:28 +0000 |
commit | cd03adb0812fe0fb06cdb935e61ec9514254e951 (patch) | |
tree | 6a79344e646b6c3815a77f0eb4ed721f50c31701 /arch/arm/mm/mm-armv.c | |
parent | 0b154bb7d0cce80e9c0bcf11d4f9e71b59409d26 (diff) | |
download | linux-cd03adb0812fe0fb06cdb935e61ec9514254e951.tar.bz2 |
[ARM SMP] Add support for shared memory attribute
We need to set the shared memory attribute in the page tables
on SMP systems to allow the cache coherency to operate.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/mm-armv.c')
-rw-r--r-- | arch/arm/mm/mm-armv.c | 48 |
1 files changed, 32 insertions, 16 deletions
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index fb5b40289de2..9e50127be635 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c @@ -354,7 +354,7 @@ void __init build_mem_type_table(void) { struct cachepolicy *cp; unsigned int cr = get_cr(); - unsigned int user_pgprot; + unsigned int user_pgprot, kern_pgprot; int cpu_arch = cpu_architecture(); int i; @@ -381,7 +381,7 @@ void __init build_mem_type_table(void) } cp = &cache_policies[cachepolicy]; - user_pgprot = cp->pte; + kern_pgprot = user_pgprot = cp->pte; /* * ARMv6 and above have extended page tables. @@ -393,6 +393,7 @@ void __init build_mem_type_table(void) */ mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; + /* * Mark cache clean areas and XIP ROM read only * from SVC mode and no access from userspace. @@ -412,32 +413,47 @@ void __init build_mem_type_table(void) * (iow, non-global) */ user_pgprot |= L_PTE_ASID; + +#ifdef CONFIG_SMP + /* + * Mark memory with the "shared" attribute for SMP systems + */ + user_pgprot |= L_PTE_SHARED; + kern_pgprot |= L_PTE_SHARED; + mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; +#endif } + for (i = 0; i < 16; i++) { + unsigned long v = pgprot_val(protection_map[i]); + v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot; + protection_map[i] = __pgprot(v); + } + + mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot; + mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot; + if (cpu_arch >= CPU_ARCH_ARMv5) { - mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; - mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE; +#ifndef CONFIG_SMP + /* + * Only use write-through for non-SMP systems + */ + mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; + mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; +#endif } else { - mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte; - mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte; mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); } + pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | + L_PTE_DIRTY | L_PTE_WRITE | + L_PTE_EXEC | kern_pgprot); + mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; mem_types[MT_ROM].prot_sect |= cp->pmd; - for (i = 0; i < 16; i++) { - unsigned long v = pgprot_val(protection_map[i]); - v = (v & ~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot; - protection_map[i] = __pgprot(v); - } - - pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | - L_PTE_DIRTY | L_PTE_WRITE | - L_PTE_EXEC | cp->pte); - switch (cp->pmd) { case PMD_SECT_WT: mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; |