summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/regmap.c
blob: d66613e6ad08035df7c817c4b5a46e4a14aa6f9a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2012,2013 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 *
 * Derived from arch/arm/kvm/emulate.c:
 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
 */

#include <linux/mm.h>
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/ptrace.h>

#define VCPU_NR_MODES 6
#define REG_OFFSET(_reg) \
	(offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long))

#define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R))

static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
	/* USR Registers */
	{
		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
		USR_REG_OFFSET(12), USR_REG_OFFSET(13),	USR_REG_OFFSET(14),
		REG_OFFSET(pc)
	},

	/* FIQ Registers */
	{
		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
		USR_REG_OFFSET(6), USR_REG_OFFSET(7),
		REG_OFFSET(compat_r8_fiq),  /* r8 */
		REG_OFFSET(compat_r9_fiq),  /* r9 */
		REG_OFFSET(compat_r10_fiq), /* r10 */
		REG_OFFSET(compat_r11_fiq), /* r11 */
		REG_OFFSET(compat_r12_fiq), /* r12 */
		REG_OFFSET(compat_sp_fiq),  /* r13 */
		REG_OFFSET(compat_lr_fiq),  /* r14 */
		REG_OFFSET(pc)
	},

	/* IRQ Registers */
	{
		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
		USR_REG_OFFSET(12),
		REG_OFFSET(compat_sp_irq), /* r13 */
		REG_OFFSET(compat_lr_irq), /* r14 */
		REG_OFFSET(pc)
	},

	/* SVC Registers */
	{
		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
		USR_REG_OFFSET(12),
		REG_OFFSET(compat_sp_svc), /* r13 */
		REG_OFFSET(compat_lr_svc), /* r14 */
		REG_OFFSET(pc)
	},

	/* ABT Registers */
	{
		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
		USR_REG_OFFSET(12),
		REG_OFFSET(compat_sp_abt), /* r13 */
		REG_OFFSET(compat_lr_abt), /* r14 */
		REG_OFFSET(pc)
	},

	/* UND Registers */
	{
		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
		USR_REG_OFFSET(12),
		REG_OFFSET(compat_sp_und), /* r13 */
		REG_OFFSET(compat_lr_und), /* r14 */
		REG_OFFSET(pc)
	},
};

/*
 * Return a pointer to the register number valid in the current mode of
 * the virtual CPU.
 */
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
{
	unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
	unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;

	switch (mode) {
	case PSR_AA32_MODE_USR ... PSR_AA32_MODE_SVC:
		mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
		break;

	case PSR_AA32_MODE_ABT:
		mode = 4;
		break;

	case PSR_AA32_MODE_UND:
		mode = 5;
		break;

	case PSR_AA32_MODE_SYS:
		mode = 0;	/* SYS maps to USR */
		break;

	default:
		BUG();
	}

	return reg_array + vcpu_reg_offsets[mode][reg_num];
}

/*
 * Return the SPSR for the current mode of the virtual CPU.
 */
static int vcpu_spsr32_mode(const struct kvm_vcpu *vcpu)
{
	unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
	switch (mode) {
	case PSR_AA32_MODE_SVC: return KVM_SPSR_SVC;
	case PSR_AA32_MODE_ABT: return KVM_SPSR_ABT;
	case PSR_AA32_MODE_UND: return KVM_SPSR_UND;
	case PSR_AA32_MODE_IRQ: return KVM_SPSR_IRQ;
	case PSR_AA32_MODE_FIQ: return KVM_SPSR_FIQ;
	default: BUG();
	}
}

unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu)
{
	int spsr_idx = vcpu_spsr32_mode(vcpu);

	if (!vcpu->arch.sysregs_loaded_on_cpu)
		return vcpu_gp_regs(vcpu)->spsr[spsr_idx];

	switch (spsr_idx) {
	case KVM_SPSR_SVC:
		return read_sysreg_el1(spsr);
	case KVM_SPSR_ABT:
		return read_sysreg(spsr_abt);
	case KVM_SPSR_UND:
		return read_sysreg(spsr_und);
	case KVM_SPSR_IRQ:
		return read_sysreg(spsr_irq);
	case KVM_SPSR_FIQ:
		return read_sysreg(spsr_fiq);
	default:
		BUG();
	}
}

void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
{
	int spsr_idx = vcpu_spsr32_mode(vcpu);

	if (!vcpu->arch.sysregs_loaded_on_cpu) {
		vcpu_gp_regs(vcpu)->spsr[spsr_idx] = v;
		return;
	}

	switch (spsr_idx) {
	case KVM_SPSR_SVC:
		write_sysreg_el1(v, spsr);
	case KVM_SPSR_ABT:
		write_sysreg(v, spsr_abt);
	case KVM_SPSR_UND:
		write_sysreg(v, spsr_und);
	case KVM_SPSR_IRQ:
		write_sysreg(v, spsr_irq);
	case KVM_SPSR_FIQ:
		write_sysreg(v, spsr_fiq);
	}
}