summaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/cacheflush.h22
-rw-r--r--arch/arm/include/asm/dma-mapping.h26
-rw-r--r--arch/arm/include/asm/memory.h16
-rw-r--r--arch/arm/include/asm/pgtable.h14
-rw-r--r--arch/arm/include/asm/swab.h19
-rw-r--r--arch/arm/include/asm/system.h19
6 files changed, 82 insertions, 34 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 3d0cdd21b882..9fd6d3ab68c0 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -331,15 +331,15 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
* Convert calls to our calling convention.
*/
#define flush_cache_all() __cpuc_flush_kern_all()
-#ifndef CONFIG_CPU_CACHE_VIPT
-static inline void flush_cache_mm(struct mm_struct *mm)
+
+static inline void vivt_flush_cache_mm(struct mm_struct *mm)
{
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_all();
}
static inline void
-flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
@@ -347,7 +347,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
}
static inline void
-flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
+vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = user_addr & PAGE_MASK;
@@ -356,7 +356,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l
}
static inline void
-flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
+vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr,
unsigned long len, int write)
{
@@ -365,6 +365,16 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
__cpuc_coherent_kern_range(addr, addr + len);
}
}
+
+#ifndef CONFIG_CPU_CACHE_VIPT
+#define flush_cache_mm(mm) \
+ vivt_flush_cache_mm(mm)
+#define flush_cache_range(vma,start,end) \
+ vivt_flush_cache_range(vma,start,end)
+#define flush_cache_page(vma,addr,pfn) \
+ vivt_flush_cache_page(vma,addr,pfn)
+#define flush_ptrace_access(vma,page,ua,ka,len,write) \
+ vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
#else
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
@@ -410,8 +420,6 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
*/
extern void flush_dcache_page(struct page *);
-extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
-
static inline void __flush_icache_all(void)
{
#ifdef CONFIG_ARM_ERRATA_411920
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index ff46dfa68a97..a96300bf83fd 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -15,20 +15,15 @@
* must not be used by drivers.
*/
#ifndef __arch_page_to_dma
-
-#if !defined(CONFIG_HIGHMEM)
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
- return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
+ return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
}
-#elif defined(__pfn_to_bus)
-static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
+
+static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
{
- return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
+ return pfn_to_page(__bus_to_pfn(addr));
}
-#else
-#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM"
-#endif
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
@@ -45,6 +40,11 @@ static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
return __arch_page_to_dma(dev, page);
}
+static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
+{
+ return __arch_dma_to_page(dev, addr);
+}
+
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
return __arch_dma_to_virt(dev, addr);
@@ -257,9 +257,11 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
*/
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
enum dma_data_direction);
+extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
+ enum dma_data_direction);
extern dma_addr_t dma_map_page(struct device *, struct page *,
unsigned long, size_t, enum dma_data_direction);
-extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
+extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
enum dma_data_direction);
/*
@@ -352,7 +354,6 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
{
/* nothing to do */
}
-#endif /* CONFIG_DMABOUNCE */
/**
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
@@ -371,8 +372,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
- dma_unmap_single(dev, handle, size, dir);
+ /* nothing to do */
}
+#endif /* CONFIG_DMABOUNCE */
/**
* dma_sync_single_range_for_cpu
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index cefedf062138..5421d82a2572 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -125,8 +125,10 @@
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/
+#ifndef __virt_to_phys
#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
+#endif
/*
* Convert a physical address to a Page Frame Number and back
@@ -134,6 +136,12 @@
#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
+/*
+ * Convert a page to/from a physical address
+ */
+#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
+#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
+
#ifndef __ASSEMBLY__
/*
@@ -194,7 +202,8 @@ static inline void *phys_to_virt(unsigned long x)
#ifndef __virt_to_bus
#define __virt_to_bus __virt_to_phys
#define __bus_to_virt __phys_to_virt
-#define __pfn_to_bus(x) ((x) << PAGE_SHIFT)
+#define __pfn_to_bus(x) __pfn_to_phys(x)
+#define __bus_to_pfn(x) __phys_to_pfn(x)
#endif
static inline __deprecated unsigned long virt_to_bus(void *x)
@@ -293,11 +302,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
#endif /* !CONFIG_DISCONTIGMEM */
/*
- * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die.
- */
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-
-/*
* Optional coherency support. Currently used only by selected
* Intel XSC3-based systems.
*/
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 201ccaa11f61..11397687f42c 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -304,13 +304,23 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+#define __pgprot_modify(prot,mask,bits) \
+ __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
+
/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) \
- __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED)
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
#define pgprot_writecombine(prot) \
- __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE)
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
+#if __LINUX_ARM_ARCH__ >= 7
+#define pgprot_dmacoherent(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
+#else
+#define pgprot_dmacoherent(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
+#endif
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h
index ca2bf2f6d6ea..9997ad20eff1 100644
--- a/arch/arm/include/asm/swab.h
+++ b/arch/arm/include/asm/swab.h
@@ -22,6 +22,24 @@
# define __SWAB_64_THRU_32__
#endif
+#if defined(__KERNEL__) && __LINUX_ARM_ARCH__ >= 6
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+ __asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x));
+ return x;
+}
+#define __arch_swab16 __arch_swab16
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+ __asm__ ("rev %0, %1" : "=r" (x) : "r" (x));
+ return x;
+}
+#define __arch_swab32 __arch_swab32
+
+#else
+
static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
__u32 t;
@@ -48,3 +66,4 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
#endif
+#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index d65b2f5bf41f..058e7e90881d 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -138,21 +138,26 @@ extern unsigned int user_debug;
#define dmb() __asm__ __volatile__ ("" : : : "memory")
#endif
-#ifndef CONFIG_SMP
+#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
+#define mb() dmb()
+#define rmb() dmb()
+#define wmb() dmb()
+#else
#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+#endif
+
+#ifndef CONFIG_SMP
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#else
-#define mb() dmb()
-#define rmb() dmb()
-#define wmb() dmb()
-#define smp_mb() dmb()
-#define smp_rmb() dmb()
-#define smp_wmb() dmb()
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
#endif
+
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)