summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/cris/arch-v10/mm/fault.c26
-rw-r--r--arch/cris/arch-v10/mm/init.c2
-rw-r--r--arch/cris/arch-v10/mm/tlb.c49
-rw-r--r--include/asm-cris/arch-v10/atomic.h7
-rw-r--r--include/asm-cris/atomic.h66
-rw-r--r--include/asm-cris/mmu_context.h2
-rw-r--r--include/asm-cris/semaphore.h21
-rw-r--r--include/asm-cris/smp.h7
-rw-r--r--include/asm-cris/spinlock.h1
-rw-r--r--include/asm-cris/tlbflush.h19
10 files changed, 69 insertions, 131 deletions
diff --git a/arch/cris/arch-v10/mm/fault.c b/arch/cris/arch-v10/mm/fault.c
index 6805cdb25a53..fe2615022b97 100644
--- a/arch/cris/arch-v10/mm/fault.c
+++ b/arch/cris/arch-v10/mm/fault.c
@@ -14,6 +14,7 @@
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/arch/svinto.h>
+#include <asm/mmu_context.h>
/* debug of low-level TLB reload */
#undef DEBUG
@@ -24,8 +25,6 @@
#define D(x)
#endif
-extern volatile pgd_t *current_pgd;
-
extern const struct exception_table_entry
*search_exception_tables(unsigned long addr);
@@ -46,7 +45,7 @@ handle_mmu_bus_fault(struct pt_regs *regs)
int page_id;
int acc, inv;
#endif
- pgd_t* pgd = (pgd_t*)current_pgd;
+ pgd_t* pgd = (pgd_t*)per_cpu(current_pgd, smp_processor_id());
pmd_t *pmd;
pte_t pte;
int miss, we, writeac;
@@ -94,24 +93,3 @@ handle_mmu_bus_fault(struct pt_regs *regs)
*R_TLB_LO = pte_val(pte);
local_irq_restore(flags);
}
-
-/* Called from arch/cris/mm/fault.c to find fixup code. */
-int
-find_fixup_code(struct pt_regs *regs)
-{
- const struct exception_table_entry *fixup;
-
- if ((fixup = search_exception_tables(regs->irp)) != 0) {
- /* Adjust the instruction pointer in the stackframe. */
- regs->irp = fixup->fixup;
-
- /*
- * Don't return by restoring the CPU state, so switch
- * frame-type.
- */
- regs->frametype = CRIS_FRAME_NORMAL;
- return 1;
- }
-
- return 0;
-}
diff --git a/arch/cris/arch-v10/mm/init.c b/arch/cris/arch-v10/mm/init.c
index a9f975a9cfb5..ff3481e76dd4 100644
--- a/arch/cris/arch-v10/mm/init.c
+++ b/arch/cris/arch-v10/mm/init.c
@@ -42,7 +42,7 @@ paging_init(void)
* switch_mm)
*/
- current_pgd = init_mm.pgd;
+ per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
/* initialise the TLB (tlb.c) */
diff --git a/arch/cris/arch-v10/mm/tlb.c b/arch/cris/arch-v10/mm/tlb.c
index 9d06125ff5a2..70a5523eff78 100644
--- a/arch/cris/arch-v10/mm/tlb.c
+++ b/arch/cris/arch-v10/mm/tlb.c
@@ -139,53 +139,6 @@ flush_tlb_page(struct vm_area_struct *vma,
local_irq_restore(flags);
}
-/* invalidate a page range */
-
-void
-flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end)
-{
- struct mm_struct *mm = vma->vm_mm;
- int page_id = mm->context.page_id;
- int i;
- unsigned long flags;
-
- D(printk("tlb: flush range %p<->%p in context %d (%p)\n",
- start, end, page_id, mm));
-
- if(page_id == NO_CONTEXT)
- return;
-
- start &= PAGE_MASK; /* probably not necessary */
- end &= PAGE_MASK; /* dito */
-
- /* invalidate those TLB entries that match both the mm context
- * and the virtual address range
- */
-
- local_save_flags(flags);
- local_irq_disable();
- for(i = 0; i < NUM_TLB_ENTRIES; i++) {
- unsigned long tlb_hi, vpn;
- *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
- tlb_hi = *R_TLB_HI;
- vpn = tlb_hi & PAGE_MASK;
- if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
- vpn >= start && vpn < end) {
- *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
- IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
-
- *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
- IO_STATE(R_TLB_LO, valid, no ) |
- IO_STATE(R_TLB_LO, kernel,no ) |
- IO_STATE(R_TLB_LO, we, no ) |
- IO_FIELD(R_TLB_LO, pfn, 0 ) );
- }
- }
- local_irq_restore(flags);
-}
-
/* dump the entire TLB for debug purposes */
#if 0
@@ -237,7 +190,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
* the pgd.
*/
- current_pgd = next->pgd;
+ per_cpu(current_pgd, smp_processor_id()) = next->pgd;
/* switch context in the MMU */
diff --git a/include/asm-cris/arch-v10/atomic.h b/include/asm-cris/arch-v10/atomic.h
new file mode 100644
index 000000000000..6ef5e7d09024
--- /dev/null
+++ b/include/asm-cris/arch-v10/atomic.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_CRIS_ARCH_ATOMIC__
+#define __ASM_CRIS_ARCH_ATOMIC__
+
+#define cris_atomic_save(addr, flags) local_irq_save(flags);
+#define cris_atomic_restore(addr, flags) local_irq_restore(flags);
+
+#endif
diff --git a/include/asm-cris/atomic.h b/include/asm-cris/atomic.h
index b3dfea5a71e4..70605b09e8b7 100644
--- a/include/asm-cris/atomic.h
+++ b/include/asm-cris/atomic.h
@@ -4,21 +4,14 @@
#define __ASM_CRIS_ATOMIC__
#include <asm/system.h>
+#include <asm/arch/atomic.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
-/*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
- */
-
-#define __atomic_fool_gcc(x) (*(struct { int a[100]; } *)x)
-
-typedef struct { int counter; } atomic_t;
+typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
@@ -30,29 +23,26 @@ typedef struct { int counter; } atomic_t;
extern __inline__ void atomic_add(int i, volatile atomic_t *v)
{
unsigned long flags;
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(v, flags);
v->counter += i;
- local_irq_restore(flags);
+ cris_atomic_restore(v, flags);
}
extern __inline__ void atomic_sub(int i, volatile atomic_t *v)
{
unsigned long flags;
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(v, flags);
v->counter -= i;
- local_irq_restore(flags);
+ cris_atomic_restore(v, flags);
}
extern __inline__ int atomic_add_return(int i, volatile atomic_t *v)
{
unsigned long flags;
int retval;
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(v, flags);
retval = (v->counter += i);
- local_irq_restore(flags);
+ cris_atomic_restore(v, flags);
return retval;
}
@@ -62,10 +52,9 @@ extern __inline__ int atomic_sub_return(int i, volatile atomic_t *v)
{
unsigned long flags;
int retval;
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(v, flags);
retval = (v->counter -= i);
- local_irq_restore(flags);
+ cris_atomic_restore(v, flags);
return retval;
}
@@ -73,39 +62,35 @@ extern __inline__ int atomic_sub_and_test(int i, volatile atomic_t *v)
{
int retval;
unsigned long flags;
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(v, flags);
retval = (v->counter -= i) == 0;
- local_irq_restore(flags);
+ cris_atomic_restore(v, flags);
return retval;
}
extern __inline__ void atomic_inc(volatile atomic_t *v)
{
unsigned long flags;
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(v, flags);
(v->counter)++;
- local_irq_restore(flags);
+ cris_atomic_restore(v, flags);
}
extern __inline__ void atomic_dec(volatile atomic_t *v)
{
unsigned long flags;
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(v, flags);
(v->counter)--;
- local_irq_restore(flags);
+ cris_atomic_restore(v, flags);
}
extern __inline__ int atomic_inc_return(volatile atomic_t *v)
{
unsigned long flags;
int retval;
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(v, flags);
retval = (v->counter)++;
- local_irq_restore(flags);
+ cris_atomic_restore(v, flags);
return retval;
}
@@ -113,20 +98,18 @@ extern __inline__ int atomic_dec_return(volatile atomic_t *v)
{
unsigned long flags;
int retval;
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(v, flags);
retval = (v->counter)--;
- local_irq_restore(flags);
+ cris_atomic_restore(v, flags);
return retval;
}
extern __inline__ int atomic_dec_and_test(volatile atomic_t *v)
{
int retval;
unsigned long flags;
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(v, flags);
retval = --(v->counter) == 0;
- local_irq_restore(flags);
+ cris_atomic_restore(v, flags);
return retval;
}
@@ -134,10 +117,9 @@ extern __inline__ int atomic_inc_and_test(volatile atomic_t *v)
{
int retval;
unsigned long flags;
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(v, flags);
retval = ++(v->counter) == 0;
- local_irq_restore(flags);
+ cris_atomic_restore(v, flags);
return retval;
}
diff --git a/include/asm-cris/mmu_context.h b/include/asm-cris/mmu_context.h
index f9308c5bbd99..e6e659dc757b 100644
--- a/include/asm-cris/mmu_context.h
+++ b/include/asm-cris/mmu_context.h
@@ -15,7 +15,7 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* registers like cr3 on the i386
*/
-extern volatile pgd_t *current_pgd; /* defined in arch/cris/mm/fault.c */
+extern volatile DEFINE_PER_CPU(pgd_t *,current_pgd); /* defined in arch/cris/mm/fault.c */
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
diff --git a/include/asm-cris/semaphore.h b/include/asm-cris/semaphore.h
index 605aa7eaaaf8..8ed7636ab311 100644
--- a/include/asm-cris/semaphore.h
+++ b/include/asm-cris/semaphore.h
@@ -72,10 +72,9 @@ extern inline void down(struct semaphore * sem)
might_sleep();
/* atomically decrement the semaphores count, and if its negative, we wait */
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(sem, flags);
failed = --(sem->count.counter) < 0;
- local_irq_restore(flags);
+ cris_atomic_restore(sem, flags);
if(failed) {
__down(sem);
}
@@ -95,10 +94,9 @@ extern inline int down_interruptible(struct semaphore * sem)
might_sleep();
/* atomically decrement the semaphores count, and if its negative, we wait */
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(sem, flags);
failed = --(sem->count.counter) < 0;
- local_irq_restore(flags);
+ cris_atomic_restore(sem, flags);
if(failed)
failed = __down_interruptible(sem);
return(failed);
@@ -109,13 +107,13 @@ extern inline int down_trylock(struct semaphore * sem)
unsigned long flags;
int failed;
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(sem, flags);
failed = --(sem->count.counter) < 0;
- local_irq_restore(flags);
+ cris_atomic_restore(sem, flags);
if(failed)
failed = __down_trylock(sem);
return(failed);
+
}
/*
@@ -130,10 +128,9 @@ extern inline void up(struct semaphore * sem)
int wakeup;
/* atomically increment the semaphores count, and if it was negative, we wake people */
- local_save_flags(flags);
- local_irq_disable();
+ cris_atomic_save(sem, flags);
wakeup = ++(sem->count.counter) <= 0;
- local_irq_restore(flags);
+ cris_atomic_restore(sem, flags);
if(wakeup) {
__up(sem);
}
diff --git a/include/asm-cris/smp.h b/include/asm-cris/smp.h
index c2f4feaa041d..dca5ef1d8c97 100644
--- a/include/asm-cris/smp.h
+++ b/include/asm-cris/smp.h
@@ -1,4 +1,11 @@
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
+#include <linux/cpumask.h>
+
+extern cpumask_t phys_cpu_present_map;
+#define cpu_possible_map phys_cpu_present_map
+
+#define __smp_processor_id() (current_thread_info()->cpu)
+
#endif
diff --git a/include/asm-cris/spinlock.h b/include/asm-cris/spinlock.h
new file mode 100644
index 000000000000..2e8ba8afc7af
--- /dev/null
+++ b/include/asm-cris/spinlock.h
@@ -0,0 +1 @@
+#include <asm/arch/spinlock.h>
diff --git a/include/asm-cris/tlbflush.h b/include/asm-cris/tlbflush.h
index 1781fe1a32f6..6ed7d9ae90db 100644
--- a/include/asm-cris/tlbflush.h
+++ b/include/asm-cris/tlbflush.h
@@ -18,13 +18,26 @@
*
*/
+extern void __flush_tlb_all(void);
+extern void __flush_tlb_mm(struct mm_struct *mm);
+extern void __flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr);
+
+#ifdef CONFIG_SMP
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr);
-extern void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end);
+#else
+#define flush_tlb_all __flush_tlb_all
+#define flush_tlb_mm __flush_tlb_mm
+#define flush_tlb_page __flush_tlb_page
+#endif
+
+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
+{
+ flush_tlb_mm(vma->vm_mm);
+}
extern inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)