summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c39
1 files changed, 35 insertions, 4 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index f86ce1a9f525..cda7c40999b6 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -22,6 +22,7 @@
#include <asm/cputype.h>
#include <asm/sections.h>
#include <asm/cachetype.h>
+#include <asm/fixmap.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -357,6 +358,29 @@ const struct mem_type *get_mem_type(unsigned int type)
EXPORT_SYMBOL(get_mem_type);
/*
+ * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
+ * As a result, this can only be called with preemption disabled, as under
+ * stop_machine().
+ */
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
+{
+ unsigned long vaddr = __fix_to_virt(idx);
+ pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+
+ /* Make sure fixmap region does not exceed available allocation. */
+ BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
+ FIXADDR_END);
+ BUG_ON(idx >= __end_of_fixed_addresses);
+
+ if (pgprot_val(prot))
+ set_pte_at(NULL, vaddr, pte,
+ pfn_pte(phys >> PAGE_SHIFT, prot));
+ else
+ pte_clear(NULL, vaddr, pte);
+ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
+}
+
+/*
* Adjust the PMD section entries according to the CPU in use.
*/
static void __init build_mem_type_table(void)
@@ -1296,10 +1320,10 @@ static void __init kmap_init(void)
#ifdef CONFIG_HIGHMEM
pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
PKMAP_BASE, _PAGE_KERNEL_TABLE);
-
- fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),
- FIXADDR_START, _PAGE_KERNEL_TABLE);
#endif
+
+ early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
+ _PAGE_KERNEL_TABLE);
}
static void __init map_lowmem(void)
@@ -1319,13 +1343,20 @@ static void __init map_lowmem(void)
if (start >= end)
break;
- if (end < kernel_x_start || start >= kernel_x_end) {
+ if (end < kernel_x_start) {
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
map.length = end - start;
map.type = MT_MEMORY_RWX;
create_mapping(&map);
+ } else if (start >= kernel_x_end) {
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY_RW;
+
+ create_mapping(&map);
} else {
/* This better cover the entire kernel */
if (start < kernel_x_start) {