summaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-01 09:15:15 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-01 09:15:15 -0800
commitac0f6f927db539e03e1f3f61bcd4ed57d5cde7a9 (patch)
tree816e5ac643b15c2050c64a7075f0f7e13d86ea09 /arch/arm/include
parentb1bf9368407ae7e89d8a005bb40beb70a41df539 (diff)
parent9f33be2c3a80bdc2cc08342dd77fac87652e0548 (diff)
downloadlinux-ac0f6f927db539e03e1f3f61bcd4ed57d5cde7a9.tar.bz2
Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (100 commits) ARM: Eliminate decompressor -Dstatic= PIC hack ARM: 5958/1: ARM: U300: fix inverted clk round rate ARM: 5956/1: misplaced parentheses ARM: 5955/1: ep93xx: move timer defines into core.c and document ARM: 5954/1: ep93xx: move gpio interrupt support to gpio.c ARM: 5953/1: ep93xx: fix broken build of clock.c ARM: 5952/1: ARM: MM: Add ARM_L1_CACHE_SHIFT_6 for handle inside each ARCH Kconfig ARM: 5949/1: NUC900 add gpio virtual memory map ARM: 5948/1: Enable timer0 to time4 clock support for nuc910 ARM: 5940/2: ARM: MMCI: remove custom DBG macro and printk ARM: make_coherent(): fix problems with highpte, part 2 MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself ARM: 5945/1: ep93xx: include correct irq.h in core.c ARM: 5933/1: amba-pl011: support hardware flow control ARM: 5930/1: Add PKMAP area description to memory.txt. ARM: 5929/1: Add checks to detect overlap of memory regions. ARM: 5928/1: Change type of VMALLOC_END to unsigned long. ARM: 5927/1: Make delimiters of DMA area globally visibly. ARM: 5926/1: Add "Virtual kernel memory..." printout. ARM: 5920/1: OMAP4: Enable L2 Cache ... Fix up trivial conflict in arch/arm/mach-mx25/clock.c
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/atomic.h228
-rw-r--r--arch/arm/include/asm/cacheflush.h56
-rw-r--r--arch/arm/include/asm/clkdev.h3
-rw-r--r--arch/arm/include/asm/dma-mapping.h79
-rw-r--r--arch/arm/include/asm/io.h11
-rw-r--r--arch/arm/include/asm/mach/time.h8
-rw-r--r--arch/arm/include/asm/memory.h23
-rw-r--r--arch/arm/include/asm/mmu.h1
-rw-r--r--arch/arm/include/asm/mmu_context.h15
-rw-r--r--arch/arm/include/asm/page.h7
-rw-r--r--arch/arm/include/asm/perf_event.h31
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h4
-rw-r--r--arch/arm/include/asm/pmu.h75
-rw-r--r--arch/arm/include/asm/setup.h12
-rw-r--r--arch/arm/include/asm/smp_plat.h5
-rw-r--r--arch/arm/include/asm/spinlock.h36
-rw-r--r--arch/arm/include/asm/system.h3
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/include/asm/tlbflush.h3
19 files changed, 485 insertions, 118 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index d0daeab2234e..e8ddec2cb158 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
+#ifndef CONFIG_GENERIC_ATOMIC64
+typedef struct {
+ u64 __aligned(8) counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(i) { (i) }
+
+static inline u64 atomic64_read(atomic64_t *v)
+{
+ u64 result;
+
+ __asm__ __volatile__("@ atomic64_read\n"
+" ldrexd %0, %H0, [%1]"
+ : "=&r" (result)
+ : "r" (&v->counter)
+ );
+
+ return result;
+}
+
+static inline void atomic64_set(atomic64_t *v, u64 i)
+{
+ u64 tmp;
+
+ __asm__ __volatile__("@ atomic64_set\n"
+"1: ldrexd %0, %H0, [%1]\n"
+" strexd %0, %2, %H2, [%1]\n"
+" teq %0, #0\n"
+" bne 1b"
+ : "=&r" (tmp)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+}
+
+static inline void atomic64_add(u64 i, atomic64_t *v)
+{
+ u64 result;
+ unsigned long tmp;
+
+ __asm__ __volatile__("@ atomic64_add\n"
+"1: ldrexd %0, %H0, [%2]\n"
+" adds %0, %0, %3\n"
+" adc %H0, %H0, %H3\n"
+" strexd %1, %0, %H0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+}
+
+static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+{
+ u64 result;
+ unsigned long tmp;
+
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic64_add_return\n"
+"1: ldrexd %0, %H0, [%2]\n"
+" adds %0, %0, %3\n"
+" adc %H0, %H0, %H3\n"
+" strexd %1, %0, %H0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+
+ smp_mb();
+
+ return result;
+}
+
+static inline void atomic64_sub(u64 i, atomic64_t *v)
+{
+ u64 result;
+ unsigned long tmp;
+
+ __asm__ __volatile__("@ atomic64_sub\n"
+"1: ldrexd %0, %H0, [%2]\n"
+" subs %0, %0, %3\n"
+" sbc %H0, %H0, %H3\n"
+" strexd %1, %0, %H0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+}
+
+static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
+{
+ u64 result;
+ unsigned long tmp;
+
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic64_sub_return\n"
+"1: ldrexd %0, %H0, [%2]\n"
+" subs %0, %0, %3\n"
+" sbc %H0, %H0, %H3\n"
+" strexd %1, %0, %H0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+
+ smp_mb();
+
+ return result;
+}
+
+static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+{
+ u64 oldval;
+ unsigned long res;
+
+ smp_mb();
+
+ do {
+ __asm__ __volatile__("@ atomic64_cmpxchg\n"
+ "ldrexd %1, %H1, [%2]\n"
+ "mov %0, #0\n"
+ "teq %1, %3\n"
+ "teqeq %H1, %H3\n"
+ "strexdeq %0, %4, %H4, [%2]"
+ : "=&r" (res), "=&r" (oldval)
+ : "r" (&ptr->counter), "r" (old), "r" (new)
+ : "cc");
+ } while (res);
+
+ smp_mb();
+
+ return oldval;
+}
+
+static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+{
+ u64 result;
+ unsigned long tmp;
+
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic64_xchg\n"
+"1: ldrexd %0, %H0, [%2]\n"
+" strexd %1, %3, %H3, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&ptr->counter), "r" (new)
+ : "cc");
+
+ smp_mb();
+
+ return result;
+}
+
+static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+{
+ u64 result;
+ unsigned long tmp;
+
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic64_dec_if_positive\n"
+"1: ldrexd %0, %H0, [%2]\n"
+" subs %0, %0, #1\n"
+" sbc %H0, %H0, #0\n"
+" teq %H0, #0\n"
+" bmi 2f\n"
+" strexd %1, %0, %H0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter)
+ : "cc");
+
+ smp_mb();
+
+ return result;
+}
+
+static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+{
+ u64 val;
+ unsigned long tmp;
+ int ret = 1;
+
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic64_add_unless\n"
+"1: ldrexd %0, %H0, [%3]\n"
+" teq %0, %4\n"
+" teqeq %H0, %H4\n"
+" moveq %1, #0\n"
+" beq 2f\n"
+" adds %0, %0, %5\n"
+" adc %H0, %H0, %H5\n"
+" strexd %2, %0, %H0, [%3]\n"
+" teq %2, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (val), "=&r" (ret), "=&r" (tmp)
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc");
+
+ if (ret)
+ smp_mb();
+
+ return ret;
+}
+
+#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+#define atomic64_inc(v) atomic64_add(1LL, (v))
+#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
+#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+#define atomic64_dec(v) atomic64_sub(1LL, (v))
+#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
+#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+
+#else /* !CONFIG_GENERIC_ATOMIC64 */
+#include <asm-generic/atomic64.h>
+#endif
#include <asm-generic/atomic-long.h>
#endif
#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 5fe4a2ad7fa3..72da7e045c6b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -197,21 +197,6 @@
* DMA Cache Coherency
* ===================
*
- * dma_inv_range(start, end)
- *
- * Invalidate (discard) the specified virtual address range.
- * May not write back any entries. If 'start' or 'end'
- * are not cache line aligned, those lines must be written
- * back.
- * - start - virtual start address
- * - end - virtual end address
- *
- * dma_clean_range(start, end)
- *
- * Clean (write back) the specified virtual address range.
- * - start - virtual start address
- * - end - virtual end address
- *
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -228,8 +213,9 @@ struct cpu_cache_fns {
void (*coherent_user_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_area)(void *, size_t);
- void (*dma_inv_range)(const void *, const void *);
- void (*dma_clean_range)(const void *, const void *);
+ void (*dma_map_area)(const void *, size_t, int);
+ void (*dma_unmap_area)(const void *, size_t, int);
+
void (*dma_flush_range)(const void *, const void *);
};
@@ -259,8 +245,8 @@ extern struct cpu_cache_fns cpu_cache;
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-#define dmac_inv_range cpu_cache.dma_inv_range
-#define dmac_clean_range cpu_cache.dma_clean_range
+#define dmac_map_area cpu_cache.dma_map_area
+#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range
#else
@@ -285,12 +271,12 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
-#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
+#define dmac_map_area __glue(_CACHE,_dma_map_area)
+#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
-extern void dmac_inv_range(const void *, const void *);
-extern void dmac_clean_range(const void *, const void *);
+extern void dmac_map_area(const void *, size_t, int);
+extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *);
#endif
@@ -331,12 +317,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
* processes address space. Really, we want to allow our "user
* space" model to handle this.
*/
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
- } while (0)
-
+extern void copy_to_user_page(struct vm_area_struct *, struct page *,
+ unsigned long, void *, const void *, unsigned long);
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
@@ -370,17 +352,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
}
}
-static inline void
-vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- unsigned long uaddr, void *kaddr,
- unsigned long len, int write)
-{
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
- unsigned long addr = (unsigned long)kaddr;
- __cpuc_coherent_kern_range(addr, addr + len);
- }
-}
-
#ifndef CONFIG_CPU_CACHE_VIPT
#define flush_cache_mm(mm) \
vivt_flush_cache_mm(mm)
@@ -388,15 +359,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
vivt_flush_cache_range(vma,start,end)
#define flush_cache_page(vma,addr,pfn) \
vivt_flush_cache_page(vma,addr,pfn)
-#define flush_ptrace_access(vma,page,ua,ka,len,write) \
- vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
#else
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
-extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- unsigned long uaddr, void *kaddr,
- unsigned long len, int write);
#endif
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h
index b6ec7c627b39..7a0690da5e63 100644
--- a/arch/arm/include/asm/clkdev.h
+++ b/arch/arm/include/asm/clkdev.h
@@ -27,4 +27,7 @@ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
+void clkdev_add_table(struct clk_lookup *, size_t);
+int clk_add_alias(const char *, const char *, char *, struct device *);
+
#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index a96300bf83fd..256ee1c9f51a 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -57,18 +57,58 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
#endif
/*
- * DMA-consistent mapping functions. These allocate/free a region of
- * uncached, unwrite-buffered mapped memory space for use with DMA
- * devices. This is the "generic" version. The PCI specific version
- * is in pci.h
+ * The DMA API is built upon the notion of "buffer ownership". A buffer
+ * is either exclusively owned by the CPU (and therefore may be accessed
+ * by it) or exclusively owned by the DMA device. These helper functions
+ * represent the transitions between these two ownership states.
*
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ * Note, however, that on later ARMs, this notion does not work due to
+ * speculative prefetches. We model our approach on the assumption that
+ * the CPU does do speculative prefetches, which means we clean caches
+ * before transfers and delay cache invalidation until transfer completion.
+ *
+ * Private support functions: these are not part of the API and are
+ * liable to change. Drivers must not use these.
*/
-extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
-extern void dma_cache_maint_page(struct page *page, unsigned long offset,
- size_t size, int rw);
+static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
+ enum dma_data_direction dir)
+{
+ extern void ___dma_single_cpu_to_dev(const void *, size_t,
+ enum dma_data_direction);
+
+ if (!arch_is_coherent())
+ ___dma_single_cpu_to_dev(kaddr, size, dir);
+}
+
+static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
+ enum dma_data_direction dir)
+{
+ extern void ___dma_single_dev_to_cpu(const void *, size_t,
+ enum dma_data_direction);
+
+ if (!arch_is_coherent())
+ ___dma_single_dev_to_cpu(kaddr, size, dir);
+}
+
+static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+{
+ extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+
+ if (!arch_is_coherent())
+ ___dma_page_cpu_to_dev(page, off, size, dir);
+}
+
+static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+{
+ extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+
+ if (!arch_is_coherent())
+ ___dma_page_dev_to_cpu(page, off, size, dir);
+}
/*
* Return whether the given device DMA address mask can be supported
@@ -304,8 +344,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
{
BUG_ON(!valid_dma_direction(dir));
- if (!arch_is_coherent())
- dma_cache_maint(cpu_addr, size, dir);
+ __dma_single_cpu_to_dev(cpu_addr, size, dir);
return virt_to_dma(dev, cpu_addr);
}
@@ -329,8 +368,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
{
BUG_ON(!valid_dma_direction(dir));
- if (!arch_is_coherent())
- dma_cache_maint_page(page, offset, size, dir);
+ __dma_page_cpu_to_dev(page, offset, size, dir);
return page_to_dma(dev, page) + offset;
}
@@ -352,7 +390,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
- /* nothing to do */
+ __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
}
/**
@@ -372,7 +410,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
- /* nothing to do */
+ __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
+ size, dir);
}
#endif /* CONFIG_DMABOUNCE */
@@ -400,7 +439,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
{
BUG_ON(!valid_dma_direction(dir));
- dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
+ if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
+ return;
+
+ __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -412,8 +454,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
return;
- if (!arch_is_coherent())
- dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
+ __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
}
static inline void dma_sync_single_for_cpu(struct device *dev,
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d2a59cfc30ce..c980156f3263 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -69,9 +69,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
/*
* __arm_ioremap takes CPU physical address.
* __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
+ * The _caller variety takes a __builtin_return_address(0) value for
+ * /proc/vmalloc to use - and should only be used in non-inline functions.
*/
-extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int);
+extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
+ size_t, unsigned int, void *);
+extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
+ void *);
+
+extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
+extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
extern void __iounmap(volatile void __iomem *addr);
/*
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index b2cc1fcd0400..8bffc3ff3acf 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -46,12 +46,4 @@ struct sys_timer {
extern struct sys_timer *system_timer;
extern void timer_tick(void);
-/*
- * Kernel time keeping support.
- */
-struct timespec;
-extern int (*set_rtc)(void);
-extern void save_time_delta(struct timespec *delta, struct timespec *rtc);
-extern void restore_time_delta(struct timespec *delta, struct timespec *rtc);
-
#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 5421d82a2572..4312ee5e3d0b 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -76,6 +76,17 @@
*/
#define IOREMAP_MAX_ORDER 24
+/*
+ * Size of DMA-consistent memory region. Must be multiple of 2M,
+ * between 2MB and 14MB inclusive.
+ */
+#ifndef CONSISTENT_DMA_SIZE
+#define CONSISTENT_DMA_SIZE SZ_2M
+#endif
+
+#define CONSISTENT_END (0xffe00000UL)
+#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
+
#else /* CONFIG_MMU */
/*
@@ -93,11 +104,11 @@
#endif
#ifndef PHYS_OFFSET
-#define PHYS_OFFSET (CONFIG_DRAM_BASE)
+#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
#endif
#ifndef END_MEM
-#define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE)
+#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
#endif
#ifndef PAGE_OFFSET
@@ -113,14 +124,6 @@
#endif /* !CONFIG_MMU */
/*
- * Size of DMA-consistent memory region. Must be multiple of 2M,
- * between 2MB and 14MB inclusive.
- */
-#ifndef CONSISTENT_DMA_SIZE
-#define CONSISTENT_DMA_SIZE SZ_2M
-#endif
-
-/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b561584d04a1..68870c776671 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,6 +6,7 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
unsigned int id;
+ spinlock_t id_lock;
#endif
unsigned int kvm_seq;
} mm_context_t;
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index de6cefb329dd..a0b3cac0547c 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm);
#define ASID_FIRST_VERSION (1 << ASID_BITS)
extern unsigned int cpu_last_asid;
+#ifdef CONFIG_SMP
+DECLARE_PER_CPU(struct mm_struct *, current_mm);
+#endif
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void __new_context(struct mm_struct *mm);
static inline void check_context(struct mm_struct *mm)
{
+ /*
+ * This code is executed with interrupts enabled. Therefore,
+ * mm->context.id cannot be updated to the latest ASID version
+ * on a different CPU (and condition below not triggered)
+ * without first getting an IPI to reset the context. The
+ * alternative is to take a read_lock on mm->context.id_lock
+ * (after changing its type to rwlock_t).
+ */
if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
__new_context(mm);
@@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
__flush_icache_all();
#endif
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
+#ifdef CONFIG_SMP
+ struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
+ *crt_mm = next;
+#endif
check_context(next);
cpu_switch_mm(next->pgd, next);
if (cache_is_vivt())
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 3a32af4cce30..a485ac3c8696 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -117,11 +117,12 @@
#endif
struct page;
+struct vm_area_struct;
struct cpu_user_fns {
void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
- unsigned long vaddr);
+ unsigned long vaddr, struct vm_area_struct *vma);
};
#ifdef MULTI_USER
@@ -137,7 +138,7 @@ extern struct cpu_user_fns cpu_user;
extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr);
+ unsigned long vaddr, struct vm_area_struct *vma);
#endif
#define clear_user_highpage(page,vaddr) \
@@ -145,7 +146,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
#define copy_user_highpage(to,from,vaddr,vma) \
- __cpu_copy_user_highpage(to, from, vaddr)
+ __cpu_copy_user_highpage(to, from, vaddr, vma)
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
new file mode 100644
index 000000000000..49e3049aba32
--- /dev/null
+++ b/arch/arm/include/asm/perf_event.h
@@ -0,0 +1,31 @@
+/*
+ * linux/arch/arm/include/asm/perf_event.h
+ *
+ * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ARM_PERF_EVENT_H__
+#define __ARM_PERF_EVENT_H__
+
+/*
+ * NOP: on *most* (read: all supported) ARM platforms, the performance
+ * counter interrupts are regular interrupts and not an NMI. This
+ * means that when we receive the interrupt we can call
+ * perf_event_do_pending() that handles all of the work with
+ * interrupts enabled.
+ */
+static inline void
+set_perf_event_pending(void)
+{
+}
+
+/* ARM performance counters start from 1 (in the cp15 accesses) so use the
+ * same indexes here for consistency. */
+#define PERF_EVENT_INDEX_OFFSET 1
+
+#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index b011f2e939aa..013cfcdc4839 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -86,8 +86,8 @@ extern unsigned int kobjsize(const void *objp);
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
*/
-#define VMALLOC_START 0
-#define VMALLOC_END 0xffffffff
+#define VMALLOC_START 0UL
+#define VMALLOC_END 0xffffffffUL
#define FIRST_USER_ADDRESS (0)
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
new file mode 100644
index 000000000000..2829b9f981a1
--- /dev/null
+++ b/arch/arm/include/asm/pmu.h
@@ -0,0 +1,75 @@
+/*
+ * linux/arch/arm/include/asm/pmu.h
+ *
+ * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ARM_PMU_H__
+#define __ARM_PMU_H__
+
+#ifdef CONFIG_CPU_HAS_PMU
+
+struct pmu_irqs {
+ const int *irqs;
+ int num_irqs;
+};
+
+/**
+ * reserve_pmu() - reserve the hardware performance counters
+ *
+ * Reserve the hardware performance counters in the system for exclusive use.
+ * The 'struct pmu_irqs' for the system is returned on success, ERR_PTR()
+ * encoded error on failure.
+ */
+extern const struct pmu_irqs *
+reserve_pmu(void);
+
+/**
+ * release_pmu() - Relinquish control of the performance counters
+ *
+ * Release the performance counters and allow someone else to use them.
+ * Callers must have disabled the counters and released IRQs before calling
+ * this. The 'struct pmu_irqs' returned from reserve_pmu() must be passed as
+ * a cookie.
+ */
+extern int
+release_pmu(const struct pmu_irqs *irqs);
+
+/**
+ * init_pmu() - Initialise the PMU.
+ *
+ * Initialise the system ready for PMU enabling. This should typically set the
+ * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do
+ * the actual hardware initialisation.
+ */
+extern int
+init_pmu(void);
+
+#else /* CONFIG_CPU_HAS_PMU */
+
+static inline const struct pmu_irqs *
+reserve_pmu(void)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int
+release_pmu(const struct pmu_irqs *irqs)
+{
+ return -ENODEV;
+}
+
+static inline int
+init_pmu(void)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_CPU_HAS_PMU */
+
+#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 5ccce0a9b03c..f392fb4437af 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -223,18 +223,6 @@ extern struct meminfo meminfo;
#define bank_phys_end(bank) ((bank)->start + (bank)->size)
#define bank_phys_size(bank) (bank)->size
-/*
- * Early command line parameters.
- */
-struct early_params {
- const char *arg;
- void (*fn)(char **p);
-};
-
-#define __early_param(name,fn) \
-static struct early_params __early_##fn __used \
-__attribute__((__section__(".early_param.init"))) = { name, fn }
-
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 59303e200845..e6215305544a 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void)
return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
}
+static inline int cache_ops_need_broadcast(void)
+{
+ return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
+}
+
#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index c91c64cab922..17eb355707dd 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,6 +5,22 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
+static inline void dsb_sev(void)
+{
+#if __LINUX_ARM_ARCH__ >= 7
+ __asm__ __volatile__ (
+ "dsb\n"
+ "sev"
+ );
+#elif defined(CONFIG_CPU_32v6K)
+ __asm__ __volatile__ (
+ "mcr p15, 0, %0, c7, c10, 4\n"
+ "sev"
+ : : "r" (0)
+ );
+#endif
+}
+
/*
* ARMv6 Spin-locking.
*
@@ -69,13 +85,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__asm__ __volatile__(
" str %1, [%0]\n"
-#ifdef CONFIG_CPU_32v6K
-" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
-" sev"
-#endif
:
: "r" (&lock->lock), "r" (0)
: "cc");
+
+ dsb_sev();
}
/*
@@ -132,13 +146,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
__asm__ __volatile__(
"str %1, [%0]\n"
-#ifdef CONFIG_CPU_32v6K
-" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
-" sev\n"
-#endif
:
: "r" (&rw->lock), "r" (0)
: "cc");
+
+ dsb_sev();
}
/* write_can_lock - would write_trylock() succeed? */
@@ -188,14 +200,12 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
-#ifdef CONFIG_CPU_32v6K
-"\n cmp %0, #0\n"
-" mcreq p15, 0, %0, c7, c10, 4\n"
-" seveq"
-#endif
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
: "cc");
+
+ if (tmp == 0)
+ dsb_sev();
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 058e7e90881d..ca88e6a84707 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -73,8 +73,7 @@ extern unsigned int mem_fclk_21285;
struct pt_regs;
-void die(const char *msg, struct pt_regs *regs, int err)
- __attribute__((noreturn));
+void die(const char *msg, struct pt_regs *regs, int err);
struct siginfo;
void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 2dfb7d7a66e9..b74970ec02c4 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -115,7 +115,8 @@ extern void iwmmxt_task_restore(struct thread_info *, void *);
extern void iwmmxt_task_release(struct thread_info *);
extern void iwmmxt_task_switch(struct thread_info *);
-extern void vfp_sync_state(struct thread_info *thread);
+extern void vfp_sync_hwstate(struct thread_info *);
+extern void vfp_flush_hwstate(struct thread_info *);
#endif
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index c2f1605de359..e085e2c545eb 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -529,7 +529,8 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
* cache entries for the kernels virtual memory range are written
* back to the page.
*/
-extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
+extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep);
#endif