1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
|
/*
* S390 version
*
* Derived from "include/asm-i386/mmu_context.h"
*/
#ifndef __S390_MMU_CONTEXT_H
#define __S390_MMU_CONTEXT_H
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/ctl_reg.h>
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
atomic_set(&mm->context.attach_count, 0);
mm->context.flush_mm = 0;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif
mm->context.has_pgste = 0;
mm->context.asce_limit = STACK_TOP_MAX;
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
}
#define destroy_context(mm) do { } while (0)
#ifndef CONFIG_64BIT
#define LCTL_OPCODE "lctl"
#else
#define LCTL_OPCODE "lctlg"
#endif
static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
{
pgd_t *pgd = mm->pgd;
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
/* Load primary space page table origin. */
asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
set_fs(current->thread.mm_segment);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
int cpu = smp_processor_id();
if (prev == next)
return;
if (atomic_inc_return(&next->context.attach_count) >> 16) {
/* Delay update_mm until all TLB flushes are done. */
set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
} else {
cpumask_set_cpu(cpu, mm_cpumask(next));
update_mm(next, tsk);
if (next->context.flush_mm)
/* Flush pending TLBs */
__tlb_flush_mm(next);
}
atomic_dec(&prev->context.attach_count);
WARN_ON(atomic_read(&prev->context.attach_count) < 0);
}
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
if (!test_tsk_thread_flag(tsk, TIF_TLB_WAIT))
return;
preempt_disable();
clear_tsk_thread_flag(tsk, TIF_TLB_WAIT);
while (atomic_read(&mm->context.attach_count) >> 16)
cpu_relax();
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
update_mm(mm, tsk);
if (mm->context.flush_mm)
__tlb_flush_mm(mm);
preempt_enable();
}
#define enter_lazy_tlb(mm,tsk) do { } while (0)
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
switch_mm(prev, next, current);
}
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
#ifdef CONFIG_64BIT
if (oldmm->context.asce_limit < mm->context.asce_limit)
crst_table_downgrade(mm, oldmm->context.asce_limit);
#endif
}
static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
#endif /* __S390_MMU_CONTEXT_H */
|