1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17 */
18
19#include <linux/mm.h>
20#include <linux/kvm_host.h>
21#include <asm/kvm_arm.h>
22#include <asm/kvm_emulate.h>
23#include <asm/opcodes.h>
24#include <trace/events/kvm.h>
25
26#include "trace.h"
27
28#define VCPU_NR_MODES		6
29#define VCPU_REG_OFFSET_USR	0
30#define VCPU_REG_OFFSET_FIQ	1
31#define VCPU_REG_OFFSET_IRQ	2
32#define VCPU_REG_OFFSET_SVC	3
33#define VCPU_REG_OFFSET_ABT	4
34#define VCPU_REG_OFFSET_UND	5
35#define REG_OFFSET(_reg) \
36	(offsetof(struct kvm_regs, _reg) / sizeof(u32))
37
38#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
39
40static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
41	/* USR/SYS Registers */
42	[VCPU_REG_OFFSET_USR] = {
43		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
44		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
45		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
46		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
47		USR_REG_OFFSET(12), USR_REG_OFFSET(13),	USR_REG_OFFSET(14),
48	},
49
50	/* FIQ Registers */
51	[VCPU_REG_OFFSET_FIQ] = {
52		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
53		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
54		USR_REG_OFFSET(6), USR_REG_OFFSET(7),
55		REG_OFFSET(fiq_regs[0]), /* r8 */
56		REG_OFFSET(fiq_regs[1]), /* r9 */
57		REG_OFFSET(fiq_regs[2]), /* r10 */
58		REG_OFFSET(fiq_regs[3]), /* r11 */
59		REG_OFFSET(fiq_regs[4]), /* r12 */
60		REG_OFFSET(fiq_regs[5]), /* r13 */
61		REG_OFFSET(fiq_regs[6]), /* r14 */
62	},
63
64	/* IRQ Registers */
65	[VCPU_REG_OFFSET_IRQ] = {
66		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
67		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
68		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
69		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
70		USR_REG_OFFSET(12),
71		REG_OFFSET(irq_regs[0]), /* r13 */
72		REG_OFFSET(irq_regs[1]), /* r14 */
73	},
74
75	/* SVC Registers */
76	[VCPU_REG_OFFSET_SVC] = {
77		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
78		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
79		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
80		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
81		USR_REG_OFFSET(12),
82		REG_OFFSET(svc_regs[0]), /* r13 */
83		REG_OFFSET(svc_regs[1]), /* r14 */
84	},
85
86	/* ABT Registers */
87	[VCPU_REG_OFFSET_ABT] = {
88		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
89		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
90		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
91		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
92		USR_REG_OFFSET(12),
93		REG_OFFSET(abt_regs[0]), /* r13 */
94		REG_OFFSET(abt_regs[1]), /* r14 */
95	},
96
97	/* UND Registers */
98	[VCPU_REG_OFFSET_UND] = {
99		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
100		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
101		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
102		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
103		USR_REG_OFFSET(12),
104		REG_OFFSET(und_regs[0]), /* r13 */
105		REG_OFFSET(und_regs[1]), /* r14 */
106	},
107};
108
109/*
110 * Return a pointer to the register number valid in the current mode of
111 * the virtual CPU.
112 */
113unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
114{
115	unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs;
116	unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
117
118	switch (mode) {
119	case USR_MODE...SVC_MODE:
120		mode &= ~MODE32_BIT; /* 0 ... 3 */
121		break;
122
123	case ABT_MODE:
124		mode = VCPU_REG_OFFSET_ABT;
125		break;
126
127	case UND_MODE:
128		mode = VCPU_REG_OFFSET_UND;
129		break;
130
131	case SYSTEM_MODE:
132		mode = VCPU_REG_OFFSET_USR;
133		break;
134
135	default:
136		BUG();
137	}
138
139	return reg_array + vcpu_reg_offsets[mode][reg_num];
140}
141
142/*
143 * Return the SPSR for the current mode of the virtual CPU.
144 */
145unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
146{
147	unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
148	switch (mode) {
149	case SVC_MODE:
150		return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
151	case ABT_MODE:
152		return &vcpu->arch.regs.KVM_ARM_ABT_spsr;
153	case UND_MODE:
154		return &vcpu->arch.regs.KVM_ARM_UND_spsr;
155	case IRQ_MODE:
156		return &vcpu->arch.regs.KVM_ARM_IRQ_spsr;
157	case FIQ_MODE:
158		return &vcpu->arch.regs.KVM_ARM_FIQ_spsr;
159	default:
160		BUG();
161	}
162}
163
164/*
165 * A conditional instruction is allowed to trap, even though it
166 * wouldn't be executed.  So let's re-implement the hardware, in
167 * software!
168 */
169bool kvm_condition_valid(struct kvm_vcpu *vcpu)
170{
171	unsigned long cpsr, cond, insn;
172
173	/*
174	 * Exception Code 0 can only happen if we set HCR.TGE to 1, to
175	 * catch undefined instructions, and then we won't get past
176	 * the arm_exit_handlers test anyway.
177	 */
178	BUG_ON(!kvm_vcpu_trap_get_class(vcpu));
179
180	/* Top two bits non-zero?  Unconditional. */
181	if (kvm_vcpu_get_hsr(vcpu) >> 30)
182		return true;
183
184	cpsr = *vcpu_cpsr(vcpu);
185
186	/* Is condition field valid? */
187	if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT)
188		cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT;
189	else {
190		/* This can happen in Thumb mode: examine IT state. */
191		unsigned long it;
192
193		it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
194
195		/* it == 0 => unconditional. */
196		if (it == 0)
197			return true;
198
199		/* The cond for this insn works out as the top 4 bits. */
200		cond = (it >> 4);
201	}
202
203	/* Shift makes it look like an ARM-mode instruction */
204	insn = cond << 28;
205	return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
206}
207
208/**
209 * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
210 * @vcpu:	The VCPU pointer
211 *
212 * When exceptions occur while instructions are executed in Thumb IF-THEN
213 * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
214 * to do this little bit of work manually. The fields map like this:
215 *
216 * IT[7:0] -> CPSR[26:25],CPSR[15:10]
217 */
218static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
219{
220	unsigned long itbits, cond;
221	unsigned long cpsr = *vcpu_cpsr(vcpu);
222	bool is_arm = !(cpsr & PSR_T_BIT);
223
224	BUG_ON(is_arm && (cpsr & PSR_IT_MASK));
225
226	if (!(cpsr & PSR_IT_MASK))
227		return;
228
229	cond = (cpsr & 0xe000) >> 13;
230	itbits = (cpsr & 0x1c00) >> (10 - 2);
231	itbits |= (cpsr & (0x3 << 25)) >> 25;
232
233	/* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
234	if ((itbits & 0x7) == 0)
235		itbits = cond = 0;
236	else
237		itbits = (itbits << 1) & 0x1f;
238
239	cpsr &= ~PSR_IT_MASK;
240	cpsr |= cond << 13;
241	cpsr |= (itbits & 0x1c) << (10 - 2);
242	cpsr |= (itbits & 0x3) << 25;
243	*vcpu_cpsr(vcpu) = cpsr;
244}
245
246/**
247 * kvm_skip_instr - skip a trapped instruction and proceed to the next
248 * @vcpu: The vcpu pointer
249 */
250void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
251{
252	bool is_thumb;
253
254	is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
255	if (is_thumb && !is_wide_instr)
256		*vcpu_pc(vcpu) += 2;
257	else
258		*vcpu_pc(vcpu) += 4;
259	kvm_adjust_itstate(vcpu);
260}
261
262
263/******************************************************************************
264 * Inject exceptions into the guest
265 */
266
267static u32 exc_vector_base(struct kvm_vcpu *vcpu)
268{
269	u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
270	u32 vbar = vcpu->arch.cp15[c12_VBAR];
271
272	if (sctlr & SCTLR_V)
273		return 0xffff0000;
274	else /* always have security exceptions */
275		return vbar;
276}
277
278/**
279 * kvm_inject_undefined - inject an undefined exception into the guest
280 * @vcpu: The VCPU to receive the undefined exception
281 *
282 * It is assumed that this code is called from the VCPU thread and that the
283 * VCPU therefore is not currently executing guest code.
284 *
285 * Modelled after TakeUndefInstrException() pseudocode.
286 */
287void kvm_inject_undefined(struct kvm_vcpu *vcpu)
288{
289	unsigned long new_lr_value;
290	unsigned long new_spsr_value;
291	unsigned long cpsr = *vcpu_cpsr(vcpu);
292	u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
293	bool is_thumb = (cpsr & PSR_T_BIT);
294	u32 vect_offset = 4;
295	u32 return_offset = (is_thumb) ? 2 : 4;
296
297	new_spsr_value = cpsr;
298	new_lr_value = *vcpu_pc(vcpu) - return_offset;
299
300	*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
301	*vcpu_cpsr(vcpu) |= PSR_I_BIT;
302	*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
303
304	if (sctlr & SCTLR_TE)
305		*vcpu_cpsr(vcpu) |= PSR_T_BIT;
306	if (sctlr & SCTLR_EE)
307		*vcpu_cpsr(vcpu) |= PSR_E_BIT;
308
309	/* Note: These now point to UND banked copies */
310	*vcpu_spsr(vcpu) = cpsr;
311	*vcpu_reg(vcpu, 14) = new_lr_value;
312
313	/* Branch to exception vector */
314	*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
315}
316
317/*
318 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
319 * pseudocode.
320 */
321static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
322{
323	unsigned long new_lr_value;
324	unsigned long new_spsr_value;
325	unsigned long cpsr = *vcpu_cpsr(vcpu);
326	u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
327	bool is_thumb = (cpsr & PSR_T_BIT);
328	u32 vect_offset;
329	u32 return_offset = (is_thumb) ? 4 : 0;
330	bool is_lpae;
331
332	new_spsr_value = cpsr;
333	new_lr_value = *vcpu_pc(vcpu) + return_offset;
334
335	*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
336	*vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
337	*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
338
339	if (sctlr & SCTLR_TE)
340		*vcpu_cpsr(vcpu) |= PSR_T_BIT;
341	if (sctlr & SCTLR_EE)
342		*vcpu_cpsr(vcpu) |= PSR_E_BIT;
343
344	/* Note: These now point to ABT banked copies */
345	*vcpu_spsr(vcpu) = cpsr;
346	*vcpu_reg(vcpu, 14) = new_lr_value;
347
348	if (is_pabt)
349		vect_offset = 12;
350	else
351		vect_offset = 16;
352
353	/* Branch to exception vector */
354	*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
355
356	if (is_pabt) {
357		/* Set IFAR and IFSR */
358		vcpu->arch.cp15[c6_IFAR] = addr;
359		is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
360		/* Always give debug fault for now - should give guest a clue */
361		if (is_lpae)
362			vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22;
363		else
364			vcpu->arch.cp15[c5_IFSR] = 2;
365	} else { /* !iabt */
366		/* Set DFAR and DFSR */
367		vcpu->arch.cp15[c6_DFAR] = addr;
368		is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
369		/* Always give debug fault for now - should give guest a clue */
370		if (is_lpae)
371			vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22;
372		else
373			vcpu->arch.cp15[c5_DFSR] = 2;
374	}
375
376}
377
378/**
379 * kvm_inject_dabt - inject a data abort into the guest
380 * @vcpu: The VCPU to receive the undefined exception
381 * @addr: The address to report in the DFAR
382 *
383 * It is assumed that this code is called from the VCPU thread and that the
384 * VCPU therefore is not currently executing guest code.
385 */
386void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
387{
388	inject_abt(vcpu, false, addr);
389}
390
391/**
392 * kvm_inject_pabt - inject a prefetch abort into the guest
393 * @vcpu: The VCPU to receive the undefined exception
394 * @addr: The address to report in the DFAR
395 *
396 * It is assumed that this code is called from the VCPU thread and that the
397 * VCPU therefore is not currently executing guest code.
398 */
399void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
400{
401	inject_abt(vcpu, true, addr);
402}
403