1#include <linux/irqchip/arm-gic.h>
2#include <asm/assembler.h>
3
4#define VCPU_USR_REG(_reg_nr)	(VCPU_USR_REGS + (_reg_nr * 4))
5#define VCPU_USR_SP		(VCPU_USR_REG(13))
6#define VCPU_USR_LR		(VCPU_USR_REG(14))
7#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4))
8
9/*
10 * Many of these macros need to access the VCPU structure, which is always
11 * held in r0. These macros should never clobber r1, as it is used to hold the
12 * exception code on the return path (except of course the macro that switches
13 * all the registers before the final jump to the VM).
14 */
15vcpu	.req	r0		@ vcpu pointer always in r0
16
17/* Clobbers {r2-r6} */
18.macro store_vfp_state vfp_base
19	@ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions
20	VFPFMRX	r2, FPEXC
21	@ Make sure VFP is enabled so we can touch the registers.
22	orr	r6, r2, #FPEXC_EN
23	VFPFMXR	FPEXC, r6
24
25	VFPFMRX	r3, FPSCR
26	tst	r2, #FPEXC_EX		@ Check for VFP Subarchitecture
27	beq	1f
28	@ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
29	@ we only need to save them if FPEXC_EX is set.
30	VFPFMRX r4, FPINST
31	tst	r2, #FPEXC_FP2V
32	VFPFMRX r5, FPINST2, ne		@ vmrsne
33	bic	r6, r2, #FPEXC_EX	@ FPEXC_EX disable
34	VFPFMXR	FPEXC, r6
351:
36	VFPFSTMIA \vfp_base, r6		@ Save VFP registers
37	stm	\vfp_base, {r2-r5}	@ Save FPEXC, FPSCR, FPINST, FPINST2
38.endm
39
40/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */
41.macro restore_vfp_state vfp_base
42	VFPFLDMIA \vfp_base, r6		@ Load VFP registers
43	ldm	\vfp_base, {r2-r5}	@ Load FPEXC, FPSCR, FPINST, FPINST2
44
45	VFPFMXR FPSCR, r3
46	tst	r2, #FPEXC_EX		@ Check for VFP Subarchitecture
47	beq	1f
48	VFPFMXR FPINST, r4
49	tst	r2, #FPEXC_FP2V
50	VFPFMXR FPINST2, r5, ne
511:
52	VFPFMXR FPEXC, r2	@ FPEXC	(last, in case !EN)
53.endm
54
55/* These are simply for the macros to work - value don't have meaning */
56.equ usr, 0
57.equ svc, 1
58.equ abt, 2
59.equ und, 3
60.equ irq, 4
61.equ fiq, 5
62
63.macro push_host_regs_mode mode
64	mrs	r2, SP_\mode
65	mrs	r3, LR_\mode
66	mrs	r4, SPSR_\mode
67	push	{r2, r3, r4}
68.endm
69
70/*
71 * Store all host persistent registers on the stack.
72 * Clobbers all registers, in all modes, except r0 and r1.
73 */
74.macro save_host_regs
75	/* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */
76	mrs	r2, ELR_hyp
77	push	{r2}
78
79	/* usr regs */
80	push	{r4-r12}	@ r0-r3 are always clobbered
81	mrs	r2, SP_usr
82	mov	r3, lr
83	push	{r2, r3}
84
85	push_host_regs_mode svc
86	push_host_regs_mode abt
87	push_host_regs_mode und
88	push_host_regs_mode irq
89
90	/* fiq regs */
91	mrs	r2, r8_fiq
92	mrs	r3, r9_fiq
93	mrs	r4, r10_fiq
94	mrs	r5, r11_fiq
95	mrs	r6, r12_fiq
96	mrs	r7, SP_fiq
97	mrs	r8, LR_fiq
98	mrs	r9, SPSR_fiq
99	push	{r2-r9}
100.endm
101
102.macro pop_host_regs_mode mode
103	pop	{r2, r3, r4}
104	msr	SP_\mode, r2
105	msr	LR_\mode, r3
106	msr	SPSR_\mode, r4
107.endm
108
109/*
110 * Restore all host registers from the stack.
111 * Clobbers all registers, in all modes, except r0 and r1.
112 */
113.macro restore_host_regs
114	pop	{r2-r9}
115	msr	r8_fiq, r2
116	msr	r9_fiq, r3
117	msr	r10_fiq, r4
118	msr	r11_fiq, r5
119	msr	r12_fiq, r6
120	msr	SP_fiq, r7
121	msr	LR_fiq, r8
122	msr	SPSR_fiq, r9
123
124	pop_host_regs_mode irq
125	pop_host_regs_mode und
126	pop_host_regs_mode abt
127	pop_host_regs_mode svc
128
129	pop	{r2, r3}
130	msr	SP_usr, r2
131	mov	lr, r3
132	pop	{r4-r12}
133
134	pop	{r2}
135	msr	ELR_hyp, r2
136.endm
137
138/*
139 * Restore SP, LR and SPSR for a given mode. offset is the offset of
140 * this mode's registers from the VCPU base.
141 *
142 * Assumes vcpu pointer in vcpu reg
143 *
144 * Clobbers r1, r2, r3, r4.
145 */
146.macro restore_guest_regs_mode mode, offset
147	add	r1, vcpu, \offset
148	ldm	r1, {r2, r3, r4}
149	msr	SP_\mode, r2
150	msr	LR_\mode, r3
151	msr	SPSR_\mode, r4
152.endm
153
154/*
155 * Restore all guest registers from the vcpu struct.
156 *
157 * Assumes vcpu pointer in vcpu reg
158 *
159 * Clobbers *all* registers.
160 */
161.macro restore_guest_regs
162	restore_guest_regs_mode svc, #VCPU_SVC_REGS
163	restore_guest_regs_mode abt, #VCPU_ABT_REGS
164	restore_guest_regs_mode und, #VCPU_UND_REGS
165	restore_guest_regs_mode irq, #VCPU_IRQ_REGS
166
167	add	r1, vcpu, #VCPU_FIQ_REGS
168	ldm	r1, {r2-r9}
169	msr	r8_fiq, r2
170	msr	r9_fiq, r3
171	msr	r10_fiq, r4
172	msr	r11_fiq, r5
173	msr	r12_fiq, r6
174	msr	SP_fiq, r7
175	msr	LR_fiq, r8
176	msr	SPSR_fiq, r9
177
178	@ Load return state
179	ldr	r2, [vcpu, #VCPU_PC]
180	ldr	r3, [vcpu, #VCPU_CPSR]
181	msr	ELR_hyp, r2
182	msr	SPSR_cxsf, r3
183
184	@ Load user registers
185	ldr	r2, [vcpu, #VCPU_USR_SP]
186	ldr	r3, [vcpu, #VCPU_USR_LR]
187	msr	SP_usr, r2
188	mov	lr, r3
189	add	vcpu, vcpu, #(VCPU_USR_REGS)
190	ldm	vcpu, {r0-r12}
191.endm
192
193/*
194 * Save SP, LR and SPSR for a given mode. offset is the offset of
195 * this mode's registers from the VCPU base.
196 *
197 * Assumes vcpu pointer in vcpu reg
198 *
199 * Clobbers r2, r3, r4, r5.
200 */
201.macro save_guest_regs_mode mode, offset
202	add	r2, vcpu, \offset
203	mrs	r3, SP_\mode
204	mrs	r4, LR_\mode
205	mrs	r5, SPSR_\mode
206	stm	r2, {r3, r4, r5}
207.endm
208
209/*
210 * Save all guest registers to the vcpu struct
211 * Expects guest's r0, r1, r2 on the stack.
212 *
213 * Assumes vcpu pointer in vcpu reg
214 *
215 * Clobbers r2, r3, r4, r5.
216 */
217.macro save_guest_regs
218	@ Store usr registers
219	add	r2, vcpu, #VCPU_USR_REG(3)
220	stm	r2, {r3-r12}
221	add	r2, vcpu, #VCPU_USR_REG(0)
222	pop	{r3, r4, r5}		@ r0, r1, r2
223	stm	r2, {r3, r4, r5}
224	mrs	r2, SP_usr
225	mov	r3, lr
226	str	r2, [vcpu, #VCPU_USR_SP]
227	str	r3, [vcpu, #VCPU_USR_LR]
228
229	@ Store return state
230	mrs	r2, ELR_hyp
231	mrs	r3, spsr
232	str	r2, [vcpu, #VCPU_PC]
233	str	r3, [vcpu, #VCPU_CPSR]
234
235	@ Store other guest registers
236	save_guest_regs_mode svc, #VCPU_SVC_REGS
237	save_guest_regs_mode abt, #VCPU_ABT_REGS
238	save_guest_regs_mode und, #VCPU_UND_REGS
239	save_guest_regs_mode irq, #VCPU_IRQ_REGS
240.endm
241
242/* Reads cp15 registers from hardware and stores them in memory
243 * @store_to_vcpu: If 0, registers are written in-order to the stack,
244 * 		   otherwise to the VCPU struct pointed to by vcpup
245 *
246 * Assumes vcpu pointer in vcpu reg
247 *
248 * Clobbers r2 - r12
249 */
250.macro read_cp15_state store_to_vcpu
251	mrc	p15, 0, r2, c1, c0, 0	@ SCTLR
252	mrc	p15, 0, r3, c1, c0, 2	@ CPACR
253	mrc	p15, 0, r4, c2, c0, 2	@ TTBCR
254	mrc	p15, 0, r5, c3, c0, 0	@ DACR
255	mrrc	p15, 0, r6, r7, c2	@ TTBR 0
256	mrrc	p15, 1, r8, r9, c2	@ TTBR 1
257	mrc	p15, 0, r10, c10, c2, 0	@ PRRR
258	mrc	p15, 0, r11, c10, c2, 1	@ NMRR
259	mrc	p15, 2, r12, c0, c0, 0	@ CSSELR
260
261	.if \store_to_vcpu == 0
262	push	{r2-r12}		@ Push CP15 registers
263	.else
264	str	r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
265	str	r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
266	str	r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
267	str	r5, [vcpu, #CP15_OFFSET(c3_DACR)]
268	add	r2, vcpu, #CP15_OFFSET(c2_TTBR0)
269	strd	r6, r7, [r2]
270	add	r2, vcpu, #CP15_OFFSET(c2_TTBR1)
271	strd	r8, r9, [r2]
272	str	r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
273	str	r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
274	str	r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
275	.endif
276
277	mrc	p15, 0, r2, c13, c0, 1	@ CID
278	mrc	p15, 0, r3, c13, c0, 2	@ TID_URW
279	mrc	p15, 0, r4, c13, c0, 3	@ TID_URO
280	mrc	p15, 0, r5, c13, c0, 4	@ TID_PRIV
281	mrc	p15, 0, r6, c5, c0, 0	@ DFSR
282	mrc	p15, 0, r7, c5, c0, 1	@ IFSR
283	mrc	p15, 0, r8, c5, c1, 0	@ ADFSR
284	mrc	p15, 0, r9, c5, c1, 1	@ AIFSR
285	mrc	p15, 0, r10, c6, c0, 0	@ DFAR
286	mrc	p15, 0, r11, c6, c0, 2	@ IFAR
287	mrc	p15, 0, r12, c12, c0, 0	@ VBAR
288
289	.if \store_to_vcpu == 0
290	push	{r2-r12}		@ Push CP15 registers
291	.else
292	str	r2, [vcpu, #CP15_OFFSET(c13_CID)]
293	str	r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
294	str	r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
295	str	r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
296	str	r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
297	str	r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
298	str	r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
299	str	r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
300	str	r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
301	str	r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
302	str	r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
303	.endif
304
305	mrc	p15, 0, r2, c14, c1, 0	@ CNTKCTL
306	mrrc	p15, 0, r4, r5, c7	@ PAR
307	mrc	p15, 0, r6, c10, c3, 0	@ AMAIR0
308	mrc	p15, 0, r7, c10, c3, 1	@ AMAIR1
309
310	.if \store_to_vcpu == 0
311	push	{r2,r4-r7}
312	.else
313	str	r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
314	add	r12, vcpu, #CP15_OFFSET(c7_PAR)
315	strd	r4, r5, [r12]
316	str	r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
317	str	r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
318	.endif
319.endm
320
321/*
322 * Reads cp15 registers from memory and writes them to hardware
323 * @read_from_vcpu: If 0, registers are read in-order from the stack,
324 *		    otherwise from the VCPU struct pointed to by vcpup
325 *
326 * Assumes vcpu pointer in vcpu reg
327 */
328.macro write_cp15_state read_from_vcpu
329	.if \read_from_vcpu == 0
330	pop	{r2,r4-r7}
331	.else
332	ldr	r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
333	add	r12, vcpu, #CP15_OFFSET(c7_PAR)
334	ldrd	r4, r5, [r12]
335	ldr	r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
336	ldr	r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
337	.endif
338
339	mcr	p15, 0, r2, c14, c1, 0	@ CNTKCTL
340	mcrr	p15, 0, r4, r5, c7	@ PAR
341	mcr	p15, 0, r6, c10, c3, 0	@ AMAIR0
342	mcr	p15, 0, r7, c10, c3, 1	@ AMAIR1
343
344	.if \read_from_vcpu == 0
345	pop	{r2-r12}
346	.else
347	ldr	r2, [vcpu, #CP15_OFFSET(c13_CID)]
348	ldr	r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
349	ldr	r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
350	ldr	r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
351	ldr	r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
352	ldr	r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
353	ldr	r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
354	ldr	r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
355	ldr	r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
356	ldr	r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
357	ldr	r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
358	.endif
359
360	mcr	p15, 0, r2, c13, c0, 1	@ CID
361	mcr	p15, 0, r3, c13, c0, 2	@ TID_URW
362	mcr	p15, 0, r4, c13, c0, 3	@ TID_URO
363	mcr	p15, 0, r5, c13, c0, 4	@ TID_PRIV
364	mcr	p15, 0, r6, c5, c0, 0	@ DFSR
365	mcr	p15, 0, r7, c5, c0, 1	@ IFSR
366	mcr	p15, 0, r8, c5, c1, 0	@ ADFSR
367	mcr	p15, 0, r9, c5, c1, 1	@ AIFSR
368	mcr	p15, 0, r10, c6, c0, 0	@ DFAR
369	mcr	p15, 0, r11, c6, c0, 2	@ IFAR
370	mcr	p15, 0, r12, c12, c0, 0	@ VBAR
371
372	.if \read_from_vcpu == 0
373	pop	{r2-r12}
374	.else
375	ldr	r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
376	ldr	r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
377	ldr	r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
378	ldr	r5, [vcpu, #CP15_OFFSET(c3_DACR)]
379	add	r12, vcpu, #CP15_OFFSET(c2_TTBR0)
380	ldrd	r6, r7, [r12]
381	add	r12, vcpu, #CP15_OFFSET(c2_TTBR1)
382	ldrd	r8, r9, [r12]
383	ldr	r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
384	ldr	r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
385	ldr	r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
386	.endif
387
388	mcr	p15, 0, r2, c1, c0, 0	@ SCTLR
389	mcr	p15, 0, r3, c1, c0, 2	@ CPACR
390	mcr	p15, 0, r4, c2, c0, 2	@ TTBCR
391	mcr	p15, 0, r5, c3, c0, 0	@ DACR
392	mcrr	p15, 0, r6, r7, c2	@ TTBR 0
393	mcrr	p15, 1, r8, r9, c2	@ TTBR 1
394	mcr	p15, 0, r10, c10, c2, 0	@ PRRR
395	mcr	p15, 0, r11, c10, c2, 1	@ NMRR
396	mcr	p15, 2, r12, c0, c0, 0	@ CSSELR
397.endm
398
399/*
400 * Save the VGIC CPU state into memory
401 *
402 * Assumes vcpu pointer in vcpu reg
403 */
404.macro save_vgic_state
405	/* Get VGIC VCTRL base into r2 */
406	ldr	r2, [vcpu, #VCPU_KVM]
407	ldr	r2, [r2, #KVM_VGIC_VCTRL]
408	cmp	r2, #0
409	beq	2f
410
411	/* Compute the address of struct vgic_cpu */
412	add	r11, vcpu, #VCPU_VGIC_CPU
413
414	/* Save all interesting registers */
415	ldr	r3, [r2, #GICH_HCR]
416	ldr	r4, [r2, #GICH_VMCR]
417	ldr	r5, [r2, #GICH_MISR]
418	ldr	r6, [r2, #GICH_EISR0]
419	ldr	r7, [r2, #GICH_EISR1]
420	ldr	r8, [r2, #GICH_ELRSR0]
421	ldr	r9, [r2, #GICH_ELRSR1]
422	ldr	r10, [r2, #GICH_APR]
423ARM_BE8(rev	r3, r3	)
424ARM_BE8(rev	r4, r4	)
425ARM_BE8(rev	r5, r5	)
426ARM_BE8(rev	r6, r6	)
427ARM_BE8(rev	r7, r7	)
428ARM_BE8(rev	r8, r8	)
429ARM_BE8(rev	r9, r9	)
430ARM_BE8(rev	r10, r10	)
431
432	str	r3, [r11, #VGIC_V2_CPU_HCR]
433	str	r4, [r11, #VGIC_V2_CPU_VMCR]
434	str	r5, [r11, #VGIC_V2_CPU_MISR]
435#ifdef CONFIG_CPU_ENDIAN_BE8
436	str	r6, [r11, #(VGIC_V2_CPU_EISR + 4)]
437	str	r7, [r11, #VGIC_V2_CPU_EISR]
438	str	r8, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
439	str	r9, [r11, #VGIC_V2_CPU_ELRSR]
440#else
441	str	r6, [r11, #VGIC_V2_CPU_EISR]
442	str	r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
443	str	r8, [r11, #VGIC_V2_CPU_ELRSR]
444	str	r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
445#endif
446	str	r10, [r11, #VGIC_V2_CPU_APR]
447
448	/* Clear GICH_HCR */
449	mov	r5, #0
450	str	r5, [r2, #GICH_HCR]
451
452	/* Save list registers */
453	add	r2, r2, #GICH_LR0
454	add	r3, r11, #VGIC_V2_CPU_LR
455	ldr	r4, [r11, #VGIC_CPU_NR_LR]
4561:	ldr	r6, [r2], #4
457ARM_BE8(rev	r6, r6	)
458	str	r6, [r3], #4
459	subs	r4, r4, #1
460	bne	1b
4612:
462.endm
463
464/*
465 * Restore the VGIC CPU state from memory
466 *
467 * Assumes vcpu pointer in vcpu reg
468 */
469.macro restore_vgic_state
470	/* Get VGIC VCTRL base into r2 */
471	ldr	r2, [vcpu, #VCPU_KVM]
472	ldr	r2, [r2, #KVM_VGIC_VCTRL]
473	cmp	r2, #0
474	beq	2f
475
476	/* Compute the address of struct vgic_cpu */
477	add	r11, vcpu, #VCPU_VGIC_CPU
478
479	/* We only restore a minimal set of registers */
480	ldr	r3, [r11, #VGIC_V2_CPU_HCR]
481	ldr	r4, [r11, #VGIC_V2_CPU_VMCR]
482	ldr	r8, [r11, #VGIC_V2_CPU_APR]
483ARM_BE8(rev	r3, r3	)
484ARM_BE8(rev	r4, r4	)
485ARM_BE8(rev	r8, r8	)
486
487	str	r3, [r2, #GICH_HCR]
488	str	r4, [r2, #GICH_VMCR]
489	str	r8, [r2, #GICH_APR]
490
491	/* Restore list registers */
492	add	r2, r2, #GICH_LR0
493	add	r3, r11, #VGIC_V2_CPU_LR
494	ldr	r4, [r11, #VGIC_CPU_NR_LR]
4951:	ldr	r6, [r3], #4
496ARM_BE8(rev	r6, r6  )
497	str	r6, [r2], #4
498	subs	r4, r4, #1
499	bne	1b
5002:
501.endm
502
503#define CNTHCTL_PL1PCTEN	(1 << 0)
504#define CNTHCTL_PL1PCEN		(1 << 1)
505
506/*
507 * Save the timer state onto the VCPU and allow physical timer/counter access
508 * for the host.
509 *
510 * Assumes vcpu pointer in vcpu reg
511 * Clobbers r2-r5
512 */
513.macro save_timer_state
514	ldr	r4, [vcpu, #VCPU_KVM]
515	ldr	r2, [r4, #KVM_TIMER_ENABLED]
516	cmp	r2, #0
517	beq	1f
518
519	mrc	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
520	str	r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
521
522	isb
523
524	mrrc	p15, 3, rr_lo_hi(r2, r3), c14	@ CNTV_CVAL
525	ldr	r4, =VCPU_TIMER_CNTV_CVAL
526	add	r5, vcpu, r4
527	strd	r2, r3, [r5]
528
529	@ Ensure host CNTVCT == CNTPCT
530	mov	r2, #0
531	mcrr	p15, 4, r2, r2, c14	@ CNTVOFF
532
5331:
534	mov	r2, #0			@ Clear ENABLE
535	mcr	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
536
537	@ Allow physical timer/counter access for the host
538	mrc	p15, 4, r2, c14, c1, 0	@ CNTHCTL
539	orr	r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
540	mcr	p15, 4, r2, c14, c1, 0	@ CNTHCTL
541.endm
542
543/*
544 * Load the timer state from the VCPU and deny physical timer/counter access
545 * for the host.
546 *
547 * Assumes vcpu pointer in vcpu reg
548 * Clobbers r2-r5
549 */
550.macro restore_timer_state
551	@ Disallow physical timer access for the guest
552	@ Physical counter access is allowed
553	mrc	p15, 4, r2, c14, c1, 0	@ CNTHCTL
554	orr	r2, r2, #CNTHCTL_PL1PCTEN
555	bic	r2, r2, #CNTHCTL_PL1PCEN
556	mcr	p15, 4, r2, c14, c1, 0	@ CNTHCTL
557
558	ldr	r4, [vcpu, #VCPU_KVM]
559	ldr	r2, [r4, #KVM_TIMER_ENABLED]
560	cmp	r2, #0
561	beq	1f
562
563	ldr	r2, [r4, #KVM_TIMER_CNTVOFF]
564	ldr	r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
565	mcrr	p15, 4, rr_lo_hi(r2, r3), c14	@ CNTVOFF
566
567	ldr	r4, =VCPU_TIMER_CNTV_CVAL
568	add	r5, vcpu, r4
569	ldrd	r2, r3, [r5]
570	mcrr	p15, 3, rr_lo_hi(r2, r3), c14	@ CNTV_CVAL
571	isb
572
573	ldr	r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
574	and	r2, r2, #3
575	mcr	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
5761:
577.endm
578
579.equ vmentry,	0
580.equ vmexit,	1
581
582/* Configures the HSTR (Hyp System Trap Register) on entry/return
583 * (hardware reset value is 0) */
584.macro set_hstr operation
585	mrc	p15, 4, r2, c1, c1, 3
586	ldr	r3, =HSTR_T(15)
587	.if \operation == vmentry
588	orr	r2, r2, r3		@ Trap CR{15}
589	.else
590	bic	r2, r2, r3		@ Don't trap any CRx accesses
591	.endif
592	mcr	p15, 4, r2, c1, c1, 3
593.endm
594
595/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
596 * (hardware reset value is 0). Keep previous value in r2.
597 * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
598 * VFP wasn't already enabled (always executed on vmtrap).
599 * If a label is specified with vmexit, it is branched to if VFP wasn't
600 * enabled.
601 */
602.macro set_hcptr operation, mask, label = none
603	mrc	p15, 4, r2, c1, c1, 2
604	ldr	r3, =\mask
605	.if \operation == vmentry
606	orr	r3, r2, r3		@ Trap coproc-accesses defined in mask
607	.else
608	bic	r3, r2, r3		@ Don't trap defined coproc-accesses
609	.endif
610	mcr	p15, 4, r3, c1, c1, 2
611	.if \operation != vmentry
612	.if \operation == vmexit
613	tst	r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
614	beq	1f
615	.endif
616	isb
617	.if \label != none
618	b	\label
619	.endif
6201:
621	.endif
622.endm
623
624/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
625 * (hardware reset value is 0) */
626.macro set_hdcr operation
627	mrc	p15, 4, r2, c1, c1, 1
628	ldr	r3, =(HDCR_TPM|HDCR_TPMCR)
629	.if \operation == vmentry
630	orr	r2, r2, r3		@ Trap some perfmon accesses
631	.else
632	bic	r2, r2, r3		@ Don't trap any perfmon accesses
633	.endif
634	mcr	p15, 4, r2, c1, c1, 1
635.endm
636
637/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
638.macro configure_hyp_role operation
639	.if \operation == vmentry
640	ldr	r2, [vcpu, #VCPU_HCR]
641	ldr	r3, [vcpu, #VCPU_IRQ_LINES]
642	orr	r2, r2, r3
643	.else
644	mov	r2, #0
645	.endif
646	mcr	p15, 4, r2, c1, c1, 0	@ HCR
647.endm
648
649.macro load_vcpu
650	mrc	p15, 4, vcpu, c13, c0, 2	@ HTPIDR
651.endm
652