summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arc/include/asm/cacheflush.h10
-rw-r--r--arch/arc/mm/cache_arc700.c14
-rw-r--r--arch/arc/mm/tlb.c12
3 files changed, 21 insertions, 15 deletions
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index 97ee96f26505..46f13e7314dc 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -20,12 +20,20 @@
#include <linux/mm.h>
+/*
+ * Semantically we need this because icache doesn't snoop dcache/dma.
+ * However ARC Cache flush requires paddr as well as vaddr, latter not available
+ * in the flush_icache_page() API. So we no-op it but do the equivalent work
+ * in update_mmu_cache()
+ */
+#define flush_icache_page(vma, page)
+
void flush_cache_all(void);
void flush_icache_range(unsigned long start, unsigned long end);
-void flush_icache_page(struct vm_area_struct *vma, struct page *page);
void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
int len);
+void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index c02aac649e84..a65c13942766 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -716,18 +716,10 @@ void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
__dc_line_op(paddr, len, OP_FLUSH);
}
-/*
- * XXX: This also needs to be optim using pg_arch_1
- * This is called when a page-cache page is about to be mapped into a
- * user process' address space. It offers an opportunity for a
- * port to ensure d-cache/i-cache coherency if necessary.
- */
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+/* wrapper to compile time eliminate alignment checks in flush loop */
+void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
{
- if (!(vma->vm_flags & VM_EXEC))
- return;
-
- __ic_line_inv((unsigned long)page_address(page), PAGE_SIZE);
+ __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
}
void flush_icache_all(void)
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index c03364af9363..086be526072a 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -422,12 +422,18 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
* when a new PTE is entered in Page Tables or an existing one
* is modified. We aggresively pre-install a TLB entry
*/
-
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddress,
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
pte_t *ptep)
{
+ unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
+
+ create_tlb(vma, vaddr, ptep);
- create_tlb(vma, vaddress, ptep);
+ /* icache doesn't snoop dcache, thus needs to be made coherent here */
+ if (vma->vm_flags & VM_EXEC) {
+ unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
+ __inv_icache_page(paddr, vaddr);
+ }
}
/* Read the Cache Build Confuration Registers, Decode them and save into