summaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2008-11-10 20:00:45 +0900
committerPaul Mundt <lethal@linux-sh.org>2008-11-10 20:00:45 +0900
commitacca4f4d9bd657e8bc7e1665ba5077465138f133 (patch)
tree39b107c67d9dc59e6064e528457ca97ba4396ba2 /arch/sh/mm
parentf7160c7573615ec82c691e294cf80d920b5d588d (diff)
downloadlinux-acca4f4d9bd657e8bc7e1665ba5077465138f133.tar.bz2
sh: Handle fixmap TLB eviction more coherently.
There was a race in the kmap_coherent() implementation. While we guarded against preemption, there was nothing preventing eviction of the pre-faulted fixmap entry from the UTLB. Under certain workloads this would result in the fixmap entries used for cache colouring being evicted from the UTLB in the midst of a copy_page(). In addition to pre-faulting, we also make sure to preserve the PTEs in the kernel page table and introduce a cached PTE for kmap_coherent() usage. This follows a similar change on MIPS ("[MIPS] Fix aliasing bug in copy_to_user_page / copy_from_user_page"). Reported-by: Hideo Saito <saito@densan.co.jp> Reported-by: CHIKAMA Masaki <masaki.chikama@gmail.com> Tested-by: Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/init.c12
-rw-r--r--arch/sh/mm/pg-sh4.c17
2 files changed, 26 insertions, 3 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 4abf00031dae..6cbef8caeb56 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -137,6 +137,7 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
+ unsigned long vaddr;
int nid;
/* We don't need to map the kernel through the TLB, as
@@ -148,10 +149,15 @@ void __init paging_init(void)
* check for a null value. */
set_TTB(swapper_pg_dir);
- /* Populate the relevant portions of swapper_pg_dir so that
+ /*
+ * Populate the relevant portions of swapper_pg_dir so that
* we can use the fixmap entries without calling kmalloc.
- * pte's will be filled in by __set_fixmap(). */
- page_table_range_init(FIXADDR_START, FIXADDR_TOP, swapper_pg_dir);
+ * pte's will be filled in by __set_fixmap().
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ page_table_range_init(vaddr, 0, swapper_pg_dir);
+
+ kmap_coherent_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index 38870e0fc182..2fe14da1f839 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -7,6 +7,7 @@
* Released under the terms of the GNU GPL v2.0.
*/
#include <linux/mm.h>
+#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/highmem.h>
@@ -16,6 +17,20 @@
#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
+#define kmap_get_fixmap_pte(vaddr) \
+ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
+
+static pte_t *kmap_coherent_pte;
+
+void __init kmap_coherent_init(void)
+{
+ unsigned long vaddr;
+
+ /* cache the first coherent kmap pte */
+ vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
+ kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
+}
+
static inline void *kmap_coherent(struct page *page, unsigned long addr)
{
enum fixed_addresses idx;
@@ -34,6 +49,8 @@ static inline void *kmap_coherent(struct page *page, unsigned long addr)
update_mmu_cache(NULL, vaddr, pte);
+ set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
+
return (void *)vaddr;
}