summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp/nvhe/psci-relay.c
blob: e3947846ffcb9acd8d4528190741fdb48325e711 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2020 - Google LLC
 * Author: David Brazdil <dbrazdil@google.com>
 */

#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
#include <uapi/linux/psci.h>

#include <nvhe/trap_handler.h>

void kvm_hyp_cpu_entry(unsigned long r0);
void kvm_hyp_cpu_resume(unsigned long r0);

void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);

/* Config options set by the host. */
struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
s64 __ro_after_init hyp_physvirt_offset;

#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)

#define INVALID_CPU_ID	UINT_MAX

struct psci_boot_args {
	atomic_t lock;
	unsigned long pc;
	unsigned long r0;
};

#define PSCI_BOOT_ARGS_UNLOCKED		0
#define PSCI_BOOT_ARGS_LOCKED		1

#define PSCI_BOOT_ARGS_INIT					\
	((struct psci_boot_args){				\
		.lock = ATOMIC_INIT(PSCI_BOOT_ARGS_UNLOCKED),	\
	})

static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;

#define	is_psci_0_1(what, func_id)					\
	(kvm_host_psci_config.psci_0_1_ ## what ## _implemented &&	\
	 (func_id) == kvm_host_psci_config.function_ids_0_1.what)

static bool is_psci_0_1_call(u64 func_id)
{
	return (is_psci_0_1(cpu_suspend, func_id) ||
		is_psci_0_1(cpu_on, func_id) ||
		is_psci_0_1(cpu_off, func_id) ||
		is_psci_0_1(migrate, func_id));
}

static bool is_psci_0_2_call(u64 func_id)
{
	/* SMCCC reserves IDs 0x00-1F with the given 32/64-bit base for PSCI. */
	return (PSCI_0_2_FN(0) <= func_id && func_id <= PSCI_0_2_FN(31)) ||
	       (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
}

static unsigned long psci_call(unsigned long fn, unsigned long arg0,
			       unsigned long arg1, unsigned long arg2)
{
	struct arm_smccc_res res;

	arm_smccc_1_1_smc(fn, arg0, arg1, arg2, &res);
	return res.a0;
}

static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
{
	return psci_call(cpu_reg(host_ctxt, 0), cpu_reg(host_ctxt, 1),
			 cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
}

static __noreturn unsigned long psci_forward_noreturn(struct kvm_cpu_context *host_ctxt)
{
	psci_forward(host_ctxt);
	hyp_panic(); /* unreachable */
}

static unsigned int find_cpu_id(u64 mpidr)
{
	unsigned int i;

	/* Reject invalid MPIDRs */
	if (mpidr & ~MPIDR_HWID_BITMASK)
		return INVALID_CPU_ID;

	for (i = 0; i < NR_CPUS; i++) {
		if (cpu_logical_map(i) == mpidr)
			return i;
	}

	return INVALID_CPU_ID;
}

static __always_inline bool try_acquire_boot_args(struct psci_boot_args *args)
{
	return atomic_cmpxchg_acquire(&args->lock,
				      PSCI_BOOT_ARGS_UNLOCKED,
				      PSCI_BOOT_ARGS_LOCKED) ==
		PSCI_BOOT_ARGS_UNLOCKED;
}

static __always_inline void release_boot_args(struct psci_boot_args *args)
{
	atomic_set_release(&args->lock, PSCI_BOOT_ARGS_UNLOCKED);
}

static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
	DECLARE_REG(u64, mpidr, host_ctxt, 1);
	DECLARE_REG(unsigned long, pc, host_ctxt, 2);
	DECLARE_REG(unsigned long, r0, host_ctxt, 3);

	unsigned int cpu_id;
	struct psci_boot_args *boot_args;
	struct kvm_nvhe_init_params *init_params;
	int ret;

	/*
	 * Find the logical CPU ID for the given MPIDR. The search set is
	 * the set of CPUs that were online at the point of KVM initialization.
	 * Booting other CPUs is rejected because their cpufeatures were not
	 * checked against the finalized capabilities. This could be relaxed
	 * by doing the feature checks in hyp.
	 */
	cpu_id = find_cpu_id(mpidr);
	if (cpu_id == INVALID_CPU_ID)
		return PSCI_RET_INVALID_PARAMS;

	boot_args = per_cpu_ptr(hyp_symbol_addr(cpu_on_args), cpu_id);
	init_params = per_cpu_ptr(hyp_symbol_addr(kvm_init_params), cpu_id);

	/* Check if the target CPU is already being booted. */
	if (!try_acquire_boot_args(boot_args))
		return PSCI_RET_ALREADY_ON;

	boot_args->pc = pc;
	boot_args->r0 = r0;
	wmb();

	ret = psci_call(func_id, mpidr,
			__hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_entry)),
			__hyp_pa(init_params));

	/* If successful, the lock will be released by the target CPU. */
	if (ret != PSCI_RET_SUCCESS)
		release_boot_args(boot_args);

	return ret;
}

static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
	DECLARE_REG(u64, power_state, host_ctxt, 1);
	DECLARE_REG(unsigned long, pc, host_ctxt, 2);
	DECLARE_REG(unsigned long, r0, host_ctxt, 3);

	struct psci_boot_args *boot_args;
	struct kvm_nvhe_init_params *init_params;

	boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
	init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params));

	/*
	 * No need to acquire a lock before writing to boot_args because a core
	 * can only suspend itself. Racy CPU_ON calls use a separate struct.
	 */
	boot_args->pc = pc;
	boot_args->r0 = r0;

	/*
	 * Will either return if shallow sleep state, or wake up into the entry
	 * point if it is a deep sleep state.
	 */
	return psci_call(func_id, power_state,
			 __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)),
			 __hyp_pa(init_params));
}

static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
	DECLARE_REG(unsigned long, pc, host_ctxt, 1);
	DECLARE_REG(unsigned long, r0, host_ctxt, 2);

	struct psci_boot_args *boot_args;
	struct kvm_nvhe_init_params *init_params;

	boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
	init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params));

	/*
	 * No need to acquire a lock before writing to boot_args because a core
	 * can only suspend itself. Racy CPU_ON calls use a separate struct.
	 */
	boot_args->pc = pc;
	boot_args->r0 = r0;

	/* Will only return on error. */
	return psci_call(func_id,
			 __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)),
			 __hyp_pa(init_params), 0);
}

asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
{
	struct psci_boot_args *boot_args;
	struct kvm_cpu_context *host_ctxt;

	host_ctxt = &this_cpu_ptr(hyp_symbol_addr(kvm_host_data))->host_ctxt;

	if (is_cpu_on)
		boot_args = this_cpu_ptr(hyp_symbol_addr(cpu_on_args));
	else
		boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));

	cpu_reg(host_ctxt, 0) = boot_args->r0;
	write_sysreg_el2(boot_args->pc, SYS_ELR);

	if (is_cpu_on)
		release_boot_args(boot_args);

	__host_enter(host_ctxt);
}

static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
	if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
		return psci_forward(host_ctxt);
	if (is_psci_0_1(cpu_on, func_id))
		return psci_cpu_on(func_id, host_ctxt);
	if (is_psci_0_1(cpu_suspend, func_id))
		return psci_cpu_suspend(func_id, host_ctxt);

	return PSCI_RET_NOT_SUPPORTED;
}

static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
	switch (func_id) {
	case PSCI_0_2_FN_PSCI_VERSION:
	case PSCI_0_2_FN_CPU_OFF:
	case PSCI_0_2_FN64_AFFINITY_INFO:
	case PSCI_0_2_FN64_MIGRATE:
	case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
	case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
		return psci_forward(host_ctxt);
	case PSCI_0_2_FN_SYSTEM_OFF:
	case PSCI_0_2_FN_SYSTEM_RESET:
		psci_forward_noreturn(host_ctxt);
		unreachable();
	case PSCI_0_2_FN64_CPU_SUSPEND:
		return psci_cpu_suspend(func_id, host_ctxt);
	case PSCI_0_2_FN64_CPU_ON:
		return psci_cpu_on(func_id, host_ctxt);
	default:
		return PSCI_RET_NOT_SUPPORTED;
	}
}

static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
	switch (func_id) {
	case PSCI_1_0_FN_PSCI_FEATURES:
	case PSCI_1_0_FN_SET_SUSPEND_MODE:
	case PSCI_1_1_FN64_SYSTEM_RESET2:
		return psci_forward(host_ctxt);
	case PSCI_1_0_FN64_SYSTEM_SUSPEND:
		return psci_system_suspend(func_id, host_ctxt);
	default:
		return psci_0_2_handler(func_id, host_ctxt);
	}
}

bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
{
	DECLARE_REG(u64, func_id, host_ctxt, 0);
	unsigned long ret;

	switch (kvm_host_psci_config.version) {
	case PSCI_VERSION(0, 1):
		if (!is_psci_0_1_call(func_id))
			return false;
		ret = psci_0_1_handler(func_id, host_ctxt);
		break;
	case PSCI_VERSION(0, 2):
		if (!is_psci_0_2_call(func_id))
			return false;
		ret = psci_0_2_handler(func_id, host_ctxt);
		break;
	default:
		if (!is_psci_0_2_call(func_id))
			return false;
		ret = psci_1_0_handler(func_id, host_ctxt);
		break;
	}

	cpu_reg(host_ctxt, 0) = ret;
	cpu_reg(host_ctxt, 1) = 0;
	cpu_reg(host_ctxt, 2) = 0;
	cpu_reg(host_ctxt, 3) = 0;
	return true;
}