summaryrefslogtreecommitdiffstats
path: root/arch/csky/mm/asid.c
blob: b2e914745c1d0e150291fec57320a45444a48ae3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
// SPDX-License-Identifier: GPL-2.0
/*
 * Generic ASID allocator.
 *
 * Based on arch/arm/mm/context.c
 *
 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
 * Copyright (C) 2012 ARM Ltd.
 */

#include <linux/slab.h>
#include <linux/mm_types.h>

#include <asm/asid.h>

#define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu)

#define ASID_MASK(info)			(~GENMASK((info)->bits - 1, 0))
#define ASID_FIRST_VERSION(info)	(1UL << ((info)->bits))

#define asid2idx(info, asid)		(((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift)
#define idx2asid(info, idx)		(((idx) << (info)->ctxt_shift) & ~ASID_MASK(info))

static void flush_context(struct asid_info *info)
{
	int i;
	u64 asid;

	/* Update the list of reserved ASIDs and the ASID bitmap. */
	bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info));

	for_each_possible_cpu(i) {
		asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
		/*
		 * If this CPU has already been through a
		 * rollover, but hasn't run another task in
		 * the meantime, we must preserve its reserved
		 * ASID, as this is the only trace we have of
		 * the process it is still running.
		 */
		if (asid == 0)
			asid = reserved_asid(info, i);
		__set_bit(asid2idx(info, asid), info->map);
		reserved_asid(info, i) = asid;
	}

	/*
	 * Queue a TLB invalidation for each CPU to perform on next
	 * context-switch
	 */
	cpumask_setall(&info->flush_pending);
}

static bool check_update_reserved_asid(struct asid_info *info, u64 asid,
				       u64 newasid)
{
	int cpu;
	bool hit = false;

	/*
	 * Iterate over the set of reserved ASIDs looking for a match.
	 * If we find one, then we can update our mm to use newasid
	 * (i.e. the same ASID in the current generation) but we can't
	 * exit the loop early, since we need to ensure that all copies
	 * of the old ASID are updated to reflect the mm. Failure to do
	 * so could result in us missing the reserved ASID in a future
	 * generation.
	 */
	for_each_possible_cpu(cpu) {
		if (reserved_asid(info, cpu) == asid) {
			hit = true;
			reserved_asid(info, cpu) = newasid;
		}
	}

	return hit;
}

static u64 new_context(struct asid_info *info, atomic64_t *pasid,
		       struct mm_struct *mm)
{
	static u32 cur_idx = 1;
	u64 asid = atomic64_read(pasid);
	u64 generation = atomic64_read(&info->generation);

	if (asid != 0) {
		u64 newasid = generation | (asid & ~ASID_MASK(info));

		/*
		 * If our current ASID was active during a rollover, we
		 * can continue to use it and this was just a false alarm.
		 */
		if (check_update_reserved_asid(info, asid, newasid))
			return newasid;

		/*
		 * We had a valid ASID in a previous life, so try to re-use
		 * it if possible.
		 */
		if (!__test_and_set_bit(asid2idx(info, asid), info->map))
			return newasid;
	}

	/*
	 * Allocate a free ASID. If we can't find one, take a note of the
	 * currently active ASIDs and mark the TLBs as requiring flushes.  We
	 * always count from ASID #2 (index 1), as we use ASID #0 when setting
	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
	 * pairs.
	 */
	asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx);
	if (asid != NUM_CTXT_ASIDS(info))
		goto set_asid;

	/* We're out of ASIDs, so increment the global generation count */
	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info),
						 &info->generation);
	flush_context(info);

	/* We have more ASIDs than CPUs, so this will always succeed */
	asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1);

set_asid:
	__set_bit(asid, info->map);
	cur_idx = asid;
	cpumask_clear(mm_cpumask(mm));
	return idx2asid(info, asid) | generation;
}

/*
 * Generate a new ASID for the context.
 *
 * @pasid: Pointer to the current ASID batch allocated. It will be updated
 * with the new ASID batch.
 * @cpu: current CPU ID. Must have been acquired through get_cpu()
 */
void asid_new_context(struct asid_info *info, atomic64_t *pasid,
		      unsigned int cpu, struct mm_struct *mm)
{
	unsigned long flags;
	u64 asid;

	raw_spin_lock_irqsave(&info->lock, flags);
	/* Check that our ASID belongs to the current generation. */
	asid = atomic64_read(pasid);
	if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
		asid = new_context(info, pasid, mm);
		atomic64_set(pasid, asid);
	}

	if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending))
		info->flush_cpu_ctxt_cb();

	atomic64_set(&active_asid(info, cpu), asid);
	cpumask_set_cpu(cpu, mm_cpumask(mm));
	raw_spin_unlock_irqrestore(&info->lock, flags);
}

/*
 * Initialize the ASID allocator
 *
 * @info: Pointer to the asid allocator structure
 * @bits: Number of ASIDs available
 * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are
 * allocated contiguously for a given context. This value should be a power of
 * 2.
 */
int asid_allocator_init(struct asid_info *info,
			u32 bits, unsigned int asid_per_ctxt,
			void (*flush_cpu_ctxt_cb)(void))
{
	info->bits = bits;
	info->ctxt_shift = ilog2(asid_per_ctxt);
	info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb;
	/*
	 * Expect allocation after rollover to fail if we don't have at least
	 * one more ASID than CPUs. ASID #0 is always reserved.
	 */
	WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus());
	atomic64_set(&info->generation, ASID_FIRST_VERSION(info));
	info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)),
			    sizeof(*info->map), GFP_KERNEL);
	if (!info->map)
		return -ENOMEM;

	raw_spin_lock_init(&info->lock);

	return 0;
}