summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2009-04-25 11:25:34 +0200
committerRalf Baechle <ralf@linux-mips.org>2009-05-14 13:50:29 +0100
commitbb86bf28aec6d0a207ae09f38a43e94133d4d6db (patch)
tree34bd8f653eb10dc4eb82aa8ef2576475346070e1 /arch
parent0b54352600b820a6d25f151cbd8975ed9b2aeb09 (diff)
downloadlinux-bb86bf28aec6d0a207ae09f38a43e94133d4d6db.tar.bz2
MIPS: Fix highmem.
Commit 351336929ccf222ae38ff0cb7a8dd5fd5c6236a0 (kernel.org) rsp. b3594a089f1c17ff919f8f78505c3f20e1f6f8ce (linux-mips.org): > From: Chris Dearman <chris@mips.com> > Date: Wed, 19 Sep 2007 00:58:24 +0100 > Subject: [PATCH] [MIPS] Allow setting of the cache attribute at run time. > > Slightly tacky, but there is a precedent in the sparc archirecture code. introduces the variable _page_cachable_default, which defaults to zero and. is used to create the prototype PTE for __kmap_atomic in arch/mips/mm/init.c:kmap_init before initialization in arch/mips/mm/c-r4k.c:coherency_setup, so the default value of 0 will be used as the CCA of kmap atomic pages which on many processors is not a defined CCA value and may result in writes to kmap_atomic pages getting corrupted. Debugged by Jon Fraser (jfraser@broadcom.com). Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/include/asm/fixmap.h3
-rw-r--r--arch/mips/include/asm/highmem.h6
-rw-r--r--arch/mips/mm/highmem.c25
-rw-r--r--arch/mips/mm/init.c26
4 files changed, 26 insertions, 34 deletions
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h
index 9cc8522a394f..0f5caa1307f1 100644
--- a/arch/mips/include/asm/fixmap.h
+++ b/arch/mips/include/asm/fixmap.h
@@ -108,6 +108,9 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
return __virt_to_fix(vaddr);
}
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
+
/*
* Called from pgtable_init()
*/
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 4374ab2adc75..25adfb02923d 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -30,8 +30,6 @@
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
-extern pte_t *kmap_pte;
-extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
/*
@@ -62,6 +60,10 @@ extern struct page *__kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() flush_cache_all()
+extern void kmap_init(void);
+
+#define kmap_prot PAGE_KERNEL
+
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 4481656d1065..2b1309b2580a 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -1,7 +1,12 @@
#include <linux/module.h>
#include <linux/highmem.h>
+#include <asm/fixmap.h>
#include <asm/tlbflush.h>
+static pte_t *kmap_pte;
+
+unsigned long highstart_pfn, highend_pfn;
+
void *__kmap(struct page *page)
{
void *addr;
@@ -14,6 +19,7 @@ void *__kmap(struct page *page)
return addr;
}
+EXPORT_SYMBOL(__kmap);
void __kunmap(struct page *page)
{
@@ -22,6 +28,7 @@ void __kunmap(struct page *page)
return;
kunmap_high(page);
}
+EXPORT_SYMBOL(__kunmap);
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@@ -48,11 +55,12 @@ void *__kmap_atomic(struct page *page, enum km_type type)
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte - idx)));
#endif
- set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
+ set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
local_flush_tlb_one((unsigned long)vaddr);
return (void*) vaddr;
}
+EXPORT_SYMBOL(__kmap_atomic);
void __kunmap_atomic(void *kvaddr, enum km_type type)
{
@@ -77,6 +85,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
pagefault_enable();
}
+EXPORT_SYMBOL(__kunmap_atomic);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
@@ -92,7 +101,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
debug_kmap_atomic(type);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
+ set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
flush_tlb_one(vaddr);
return (void*) vaddr;
@@ -111,7 +120,11 @@ struct page *__kmap_atomic_to_page(void *ptr)
return pte_page(*pte);
}
-EXPORT_SYMBOL(__kmap);
-EXPORT_SYMBOL(__kunmap);
-EXPORT_SYMBOL(__kmap_atomic);
-EXPORT_SYMBOL(__kunmap_atomic);
+void __init kmap_init(void)
+{
+ unsigned long kmap_vstart;
+
+ /* cache the first kmap pte */
+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index d9348946a19e..c5511294a9ee 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -104,14 +104,6 @@ unsigned long setup_zero_pages(void)
return 1UL << order;
}
-/*
- * These are almost like kmap_atomic / kunmap_atmic except they take an
- * additional address argument as the hint.
- */
-
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
-
#ifdef CONFIG_MIPS_MT_SMTC
static pte_t *kmap_coherent_pte;
static void __init kmap_coherent_init(void)
@@ -264,24 +256,6 @@ void copy_from_user_page(struct vm_area_struct *vma,
}
}
-#ifdef CONFIG_HIGHMEM
-unsigned long highstart_pfn, highend_pfn;
-
-pte_t *kmap_pte;
-pgprot_t kmap_prot;
-
-static void __init kmap_init(void)
-{
- unsigned long kmap_vstart;
-
- /* cache the first kmap pte */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-
- kmap_prot = PAGE_KERNEL;
-}
-#endif /* CONFIG_HIGHMEM */
-
void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{