summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/mmu_context.h17
1 files changed, 4 insertions, 13 deletions
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h
index 2da0e2eb036b..87a0d5bc11ef 100644
--- a/arch/ia64/include/asm/mmu_context.h
+++ b/arch/ia64/include/asm/mmu_context.h
@@ -49,11 +49,6 @@ DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
extern void mmu_context_init (void);
extern void wrap_mmu_context (struct mm_struct *mm);
-static inline void
-enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
/*
* When the context counter wraps around all TLBs need to be flushed because
* an old context number might have been reused. This is signalled by the
@@ -116,6 +111,7 @@ out:
* Initialize context number to some sane value. MM is guaranteed to be a
* brand-new address-space, so no TLB flushing is needed, ever.
*/
+#define init_new_context init_new_context
static inline int
init_new_context (struct task_struct *p, struct mm_struct *mm)
{
@@ -124,12 +120,6 @@ init_new_context (struct task_struct *p, struct mm_struct *mm)
}
static inline void
-destroy_context (struct mm_struct *mm)
-{
- /* Nothing to do. */
-}
-
-static inline void
reload_context (nv_mm_context_t context)
{
unsigned long rid;
@@ -178,11 +168,10 @@ activate_context (struct mm_struct *mm)
} while (unlikely(context != mm->context));
}
-#define deactivate_mm(tsk,mm) do { } while (0)
-
/*
* Switch from address space PREV to address space NEXT.
*/
+#define activate_mm activate_mm
static inline void
activate_mm (struct mm_struct *prev, struct mm_struct *next)
{
@@ -196,5 +185,7 @@ activate_mm (struct mm_struct *prev, struct mm_struct *next)
#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
+#include <asm-generic/mmu_context.h>
+
# endif /* ! __ASSEMBLY__ */
#endif /* _ASM_IA64_MMU_CONTEXT_H */