1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/mmu.h>
24#include <asm/page.h>
25#include <asm/ptrace.h>
26#include <asm/hvcall.h>
27#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
29#include <asm/kvm_book3s_asm.h>
30#include <asm/mmu-hash64.h>
31#include <asm/tm.h>
32
33#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
34
35/* Values in HSTATE_NAPPING(r13) */
36#define NAPPING_CEDE	1
37#define NAPPING_NOVCPU	2
38
39/*
40 * Call kvmppc_hv_entry in real mode.
41 * Must be called with interrupts hard-disabled.
42 *
43 * Input Registers:
44 *
45 * LR = return address to continue at after eventually re-enabling MMU
46 */
47_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
48	mflr	r0
49	std	r0, PPC_LR_STKOFF(r1)
50	stdu	r1, -112(r1)
51	mfmsr	r10
52	LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
53	li	r0,MSR_RI
54	andc	r0,r10,r0
55	li	r6,MSR_IR | MSR_DR
56	andc	r6,r10,r6
57	mtmsrd	r0,1		/* clear RI in MSR */
58	mtsrr0	r5
59	mtsrr1	r6
60	RFI
61
62kvmppc_call_hv_entry:
63	ld	r4, HSTATE_KVM_VCPU(r13)
64	bl	kvmppc_hv_entry
65
66	/* Back from guest - restore host state and return to caller */
67
68BEGIN_FTR_SECTION
69	/* Restore host DABR and DABRX */
70	ld	r5,HSTATE_DABR(r13)
71	li	r6,7
72	mtspr	SPRN_DABR,r5
73	mtspr	SPRN_DABRX,r6
74END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
75
76	/* Restore SPRG3 */
77	ld	r3,PACA_SPRG_VDSO(r13)
78	mtspr	SPRN_SPRG_VDSO_WRITE,r3
79
80	/* Reload the host's PMU registers */
81	ld	r3, PACALPPACAPTR(r13)	/* is the host using the PMU? */
82	lbz	r4, LPPACA_PMCINUSE(r3)
83	cmpwi	r4, 0
84	beq	23f			/* skip if not */
85BEGIN_FTR_SECTION
86	ld	r3, HSTATE_MMCR0(r13)
87	andi.	r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
88	cmpwi	r4, MMCR0_PMAO
89	beql	kvmppc_fix_pmao
90END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
91	lwz	r3, HSTATE_PMC1(r13)
92	lwz	r4, HSTATE_PMC2(r13)
93	lwz	r5, HSTATE_PMC3(r13)
94	lwz	r6, HSTATE_PMC4(r13)
95	lwz	r8, HSTATE_PMC5(r13)
96	lwz	r9, HSTATE_PMC6(r13)
97	mtspr	SPRN_PMC1, r3
98	mtspr	SPRN_PMC2, r4
99	mtspr	SPRN_PMC3, r5
100	mtspr	SPRN_PMC4, r6
101	mtspr	SPRN_PMC5, r8
102	mtspr	SPRN_PMC6, r9
103	ld	r3, HSTATE_MMCR0(r13)
104	ld	r4, HSTATE_MMCR1(r13)
105	ld	r5, HSTATE_MMCRA(r13)
106	ld	r6, HSTATE_SIAR(r13)
107	ld	r7, HSTATE_SDAR(r13)
108	mtspr	SPRN_MMCR1, r4
109	mtspr	SPRN_MMCRA, r5
110	mtspr	SPRN_SIAR, r6
111	mtspr	SPRN_SDAR, r7
112BEGIN_FTR_SECTION
113	ld	r8, HSTATE_MMCR2(r13)
114	ld	r9, HSTATE_SIER(r13)
115	mtspr	SPRN_MMCR2, r8
116	mtspr	SPRN_SIER, r9
117END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
118	mtspr	SPRN_MMCR0, r3
119	isync
12023:
121
122	/*
123	 * Reload DEC.  HDEC interrupts were disabled when
124	 * we reloaded the host's LPCR value.
125	 */
126	ld	r3, HSTATE_DECEXP(r13)
127	mftb	r4
128	subf	r4, r4, r3
129	mtspr	SPRN_DEC, r4
130
131	/*
132	 * For external and machine check interrupts, we need
133	 * to call the Linux handler to process the interrupt.
134	 * We do that by jumping to absolute address 0x500 for
135	 * external interrupts, or the machine_check_fwnmi label
136	 * for machine checks (since firmware might have patched
137	 * the vector area at 0x200).  The [h]rfid at the end of the
138	 * handler will return to the book3s_hv_interrupts.S code.
139	 * For other interrupts we do the rfid to get back
140	 * to the book3s_hv_interrupts.S code here.
141	 */
142	ld	r8, 112+PPC_LR_STKOFF(r1)
143	addi	r1, r1, 112
144	ld	r7, HSTATE_HOST_MSR(r13)
145
146	cmpwi	cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
147	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
148	beq	11f
149	cmpwi	cr2, r12, BOOK3S_INTERRUPT_HMI
150	beq	cr2, 14f			/* HMI check */
151
152	/* RFI into the highmem handler, or branch to interrupt handler */
153	mfmsr	r6
154	li	r0, MSR_RI
155	andc	r6, r6, r0
156	mtmsrd	r6, 1			/* Clear RI in MSR */
157	mtsrr0	r8
158	mtsrr1	r7
159	beq	cr1, 13f		/* machine check */
160	RFI
161
162	/* On POWER7, we have external interrupts set to use HSRR0/1 */
16311:	mtspr	SPRN_HSRR0, r8
164	mtspr	SPRN_HSRR1, r7
165	ba	0x500
166
16713:	b	machine_check_fwnmi
168
16914:	mtspr	SPRN_HSRR0, r8
170	mtspr	SPRN_HSRR1, r7
171	b	hmi_exception_after_realmode
172
173kvmppc_primary_no_guest:
174	/* We handle this much like a ceded vcpu */
175	/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
176	mfspr	r3, SPRN_HDEC
177	mtspr	SPRN_DEC, r3
178	/*
179	 * Make sure the primary has finished the MMU switch.
180	 * We should never get here on a secondary thread, but
181	 * check it for robustness' sake.
182	 */
183	ld	r5, HSTATE_KVM_VCORE(r13)
18465:	lbz	r0, VCORE_IN_GUEST(r5)
185	cmpwi	r0, 0
186	beq	65b
187	/* Set LPCR. */
188	ld	r8,VCORE_LPCR(r5)
189	mtspr	SPRN_LPCR,r8
190	isync
191	/* set our bit in napping_threads */
192	ld	r5, HSTATE_KVM_VCORE(r13)
193	lbz	r7, HSTATE_PTID(r13)
194	li	r0, 1
195	sld	r0, r0, r7
196	addi	r6, r5, VCORE_NAPPING_THREADS
1971:	lwarx	r3, 0, r6
198	or	r3, r3, r0
199	stwcx.	r3, 0, r6
200	bne	1b
201	/* order napping_threads update vs testing entry_exit_map */
202	isync
203	li	r12, 0
204	lwz	r7, VCORE_ENTRY_EXIT(r5)
205	cmpwi	r7, 0x100
206	bge	kvm_novcpu_exit	/* another thread already exiting */
207	li	r3, NAPPING_NOVCPU
208	stb	r3, HSTATE_NAPPING(r13)
209
210	li	r3, 0		/* Don't wake on privileged (OS) doorbell */
211	b	kvm_do_nap
212
213kvm_novcpu_wakeup:
214	ld	r1, HSTATE_HOST_R1(r13)
215	ld	r5, HSTATE_KVM_VCORE(r13)
216	li	r0, 0
217	stb	r0, HSTATE_NAPPING(r13)
218	stb	r0, HSTATE_HWTHREAD_REQ(r13)
219
220	/* check the wake reason */
221	bl	kvmppc_check_wake_reason
222
223	/* see if any other thread is already exiting */
224	lwz	r0, VCORE_ENTRY_EXIT(r5)
225	cmpwi	r0, 0x100
226	bge	kvm_novcpu_exit
227
228	/* clear our bit in napping_threads */
229	lbz	r7, HSTATE_PTID(r13)
230	li	r0, 1
231	sld	r0, r0, r7
232	addi	r6, r5, VCORE_NAPPING_THREADS
2334:	lwarx	r7, 0, r6
234	andc	r7, r7, r0
235	stwcx.	r7, 0, r6
236	bne	4b
237
238	/* See if the wake reason means we need to exit */
239	cmpdi	r3, 0
240	bge	kvm_novcpu_exit
241
242	/* See if our timeslice has expired (HDEC is negative) */
243	mfspr	r0, SPRN_HDEC
244	li	r12, BOOK3S_INTERRUPT_HV_DECREMENTER
245	cmpwi	r0, 0
246	blt	kvm_novcpu_exit
247
248	/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
249	ld	r4, HSTATE_KVM_VCPU(r13)
250	cmpdi	r4, 0
251	beq	kvmppc_primary_no_guest
252
253#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
254	addi	r3, r4, VCPU_TB_RMENTRY
255	bl	kvmhv_start_timing
256#endif
257	b	kvmppc_got_guest
258
259kvm_novcpu_exit:
260#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
261	ld	r4, HSTATE_KVM_VCPU(r13)
262	cmpdi	r4, 0
263	beq	13f
264	addi	r3, r4, VCPU_TB_RMEXIT
265	bl	kvmhv_accumulate_time
266#endif
26713:	mr	r3, r12
268	stw	r12, 112-4(r1)
269	bl	kvmhv_commence_exit
270	nop
271	lwz	r12, 112-4(r1)
272	b	kvmhv_switch_to_host
273
274/*
275 * We come in here when wakened from nap mode.
276 * Relocation is off and most register values are lost.
277 * r13 points to the PACA.
278 */
279	.globl	kvm_start_guest
280kvm_start_guest:
281
282	/* Set runlatch bit the minute you wake up from nap */
283	mfspr	r0, SPRN_CTRLF
284	ori 	r0, r0, 1
285	mtspr	SPRN_CTRLT, r0
286
287	ld	r2,PACATOC(r13)
288
289	li	r0,KVM_HWTHREAD_IN_KVM
290	stb	r0,HSTATE_HWTHREAD_STATE(r13)
291
292	/* NV GPR values from power7_idle() will no longer be valid */
293	li	r0,1
294	stb	r0,PACA_NAPSTATELOST(r13)
295
296	/* were we napping due to cede? */
297	lbz	r0,HSTATE_NAPPING(r13)
298	cmpwi	r0,NAPPING_CEDE
299	beq	kvm_end_cede
300	cmpwi	r0,NAPPING_NOVCPU
301	beq	kvm_novcpu_wakeup
302
303	ld	r1,PACAEMERGSP(r13)
304	subi	r1,r1,STACK_FRAME_OVERHEAD
305
306	/*
307	 * We weren't napping due to cede, so this must be a secondary
308	 * thread being woken up to run a guest, or being woken up due
309	 * to a stray IPI.  (Or due to some machine check or hypervisor
310	 * maintenance interrupt while the core is in KVM.)
311	 */
312
313	/* Check the wake reason in SRR1 to see why we got here */
314	bl	kvmppc_check_wake_reason
315	cmpdi	r3, 0
316	bge	kvm_no_guest
317
318	/* get vcpu pointer, NULL if we have no vcpu to run */
319	ld	r4,HSTATE_KVM_VCPU(r13)
320	cmpdi	r4,0
321	/* if we have no vcpu to run, go back to sleep */
322	beq	kvm_no_guest
323
324kvm_secondary_got_guest:
325
326	/* Set HSTATE_DSCR(r13) to something sensible */
327	ld	r6, PACA_DSCR(r13)
328	std	r6, HSTATE_DSCR(r13)
329
330	/* Order load of vcore, ptid etc. after load of vcpu */
331	lwsync
332	bl	kvmppc_hv_entry
333
334	/* Back from the guest, go back to nap */
335	/* Clear our vcpu pointer so we don't come back in early */
336	li	r0, 0
337	/*
338	 * Once we clear HSTATE_KVM_VCPU(r13), the code in
339	 * kvmppc_run_core() is going to assume that all our vcpu
340	 * state is visible in memory.  This lwsync makes sure
341	 * that that is true.
342	 */
343	lwsync
344	std	r0, HSTATE_KVM_VCPU(r13)
345
346/*
347 * At this point we have finished executing in the guest.
348 * We need to wait for hwthread_req to become zero, since
349 * we may not turn on the MMU while hwthread_req is non-zero.
350 * While waiting we also need to check if we get given a vcpu to run.
351 */
352kvm_no_guest:
353	lbz	r3, HSTATE_HWTHREAD_REQ(r13)
354	cmpwi	r3, 0
355	bne	53f
356	HMT_MEDIUM
357	li	r0, KVM_HWTHREAD_IN_KERNEL
358	stb	r0, HSTATE_HWTHREAD_STATE(r13)
359	/* need to recheck hwthread_req after a barrier, to avoid race */
360	sync
361	lbz	r3, HSTATE_HWTHREAD_REQ(r13)
362	cmpwi	r3, 0
363	bne	54f
364/*
365 * We jump to power7_wakeup_loss, which will return to the caller
366 * of power7_nap in the powernv cpu offline loop.  The value we
367 * put in r3 becomes the return value for power7_nap.
368 */
369	li	r3, LPCR_PECE0
370	mfspr	r4, SPRN_LPCR
371	rlwimi	r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
372	mtspr	SPRN_LPCR, r4
373	li	r3, 0
374	b	power7_wakeup_loss
375
37653:	HMT_LOW
377	ld	r4, HSTATE_KVM_VCPU(r13)
378	cmpdi	r4, 0
379	beq	kvm_no_guest
380	HMT_MEDIUM
381	b	kvm_secondary_got_guest
382
38354:	li	r0, KVM_HWTHREAD_IN_KVM
384	stb	r0, HSTATE_HWTHREAD_STATE(r13)
385	b	kvm_no_guest
386
387/******************************************************************************
388 *                                                                            *
389 *                               Entry code                                   *
390 *                                                                            *
391 *****************************************************************************/
392
393.global kvmppc_hv_entry
394kvmppc_hv_entry:
395
396	/* Required state:
397	 *
398	 * R4 = vcpu pointer (or NULL)
399	 * MSR = ~IR|DR
400	 * R13 = PACA
401	 * R1 = host R1
402	 * R2 = TOC
403	 * all other volatile GPRS = free
404	 */
405	mflr	r0
406	std	r0, PPC_LR_STKOFF(r1)
407	stdu	r1, -112(r1)
408
409	/* Save R1 in the PACA */
410	std	r1, HSTATE_HOST_R1(r13)
411
412	li	r6, KVM_GUEST_MODE_HOST_HV
413	stb	r6, HSTATE_IN_GUEST(r13)
414
415#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
416	/* Store initial timestamp */
417	cmpdi	r4, 0
418	beq	1f
419	addi	r3, r4, VCPU_TB_RMENTRY
420	bl	kvmhv_start_timing
4211:
422#endif
423	/* Clear out SLB */
424	li	r6,0
425	slbmte	r6,r6
426	slbia
427	ptesync
428
429	/*
430	 * POWER7/POWER8 host -> guest partition switch code.
431	 * We don't have to lock against concurrent tlbies,
432	 * but we do have to coordinate across hardware threads.
433	 */
434	/* Set bit in entry map iff exit map is zero. */
435	ld	r5, HSTATE_KVM_VCORE(r13)
436	li	r7, 1
437	lbz	r6, HSTATE_PTID(r13)
438	sld	r7, r7, r6
439	addi	r9, r5, VCORE_ENTRY_EXIT
44021:	lwarx	r3, 0, r9
441	cmpwi	r3, 0x100		/* any threads starting to exit? */
442	bge	secondary_too_late	/* if so we're too late to the party */
443	or	r3, r3, r7
444	stwcx.	r3, 0, r9
445	bne	21b
446
447	/* Primary thread switches to guest partition. */
448	ld	r9,VCORE_KVM(r5)	/* pointer to struct kvm */
449	cmpwi	r6,0
450	bne	10f
451	ld	r6,KVM_SDR1(r9)
452	lwz	r7,KVM_LPID(r9)
453	li	r0,LPID_RSVD		/* switch to reserved LPID */
454	mtspr	SPRN_LPID,r0
455	ptesync
456	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
457	mtspr	SPRN_LPID,r7
458	isync
459
460	/* See if we need to flush the TLB */
461	lhz	r6,PACAPACAINDEX(r13)	/* test_bit(cpu, need_tlb_flush) */
462	clrldi	r7,r6,64-6		/* extract bit number (6 bits) */
463	srdi	r6,r6,6			/* doubleword number */
464	sldi	r6,r6,3			/* address offset */
465	add	r6,r6,r9
466	addi	r6,r6,KVM_NEED_FLUSH	/* dword in kvm->arch.need_tlb_flush */
467	li	r0,1
468	sld	r0,r0,r7
469	ld	r7,0(r6)
470	and.	r7,r7,r0
471	beq	22f
47223:	ldarx	r7,0,r6			/* if set, clear the bit */
473	andc	r7,r7,r0
474	stdcx.	r7,0,r6
475	bne	23b
476	/* Flush the TLB of any entries for this LPID */
477	/* use arch 2.07S as a proxy for POWER8 */
478BEGIN_FTR_SECTION
479	li	r6,512			/* POWER8 has 512 sets */
480FTR_SECTION_ELSE
481	li	r6,128			/* POWER7 has 128 sets */
482ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
483	mtctr	r6
484	li	r7,0x800		/* IS field = 0b10 */
485	ptesync
48628:	tlbiel	r7
487	addi	r7,r7,0x1000
488	bdnz	28b
489	ptesync
490
491	/* Add timebase offset onto timebase */
49222:	ld	r8,VCORE_TB_OFFSET(r5)
493	cmpdi	r8,0
494	beq	37f
495	mftb	r6		/* current host timebase */
496	add	r8,r8,r6
497	mtspr	SPRN_TBU40,r8	/* update upper 40 bits */
498	mftb	r7		/* check if lower 24 bits overflowed */
499	clrldi	r6,r6,40
500	clrldi	r7,r7,40
501	cmpld	r7,r6
502	bge	37f
503	addis	r8,r8,0x100	/* if so, increment upper 40 bits */
504	mtspr	SPRN_TBU40,r8
505
506	/* Load guest PCR value to select appropriate compat mode */
50737:	ld	r7, VCORE_PCR(r5)
508	cmpdi	r7, 0
509	beq	38f
510	mtspr	SPRN_PCR, r7
51138:
512
513BEGIN_FTR_SECTION
514	/* DPDES is shared between threads */
515	ld	r8, VCORE_DPDES(r5)
516	mtspr	SPRN_DPDES, r8
517END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
518
519	li	r0,1
520	stb	r0,VCORE_IN_GUEST(r5)	/* signal secondaries to continue */
521
522	/* Do we have a guest vcpu to run? */
52310:	cmpdi	r4, 0
524	beq	kvmppc_primary_no_guest
525kvmppc_got_guest:
526
527	/* Load up guest SLB entries */
528	lwz	r5,VCPU_SLB_MAX(r4)
529	cmpwi	r5,0
530	beq	9f
531	mtctr	r5
532	addi	r6,r4,VCPU_SLB
5331:	ld	r8,VCPU_SLB_E(r6)
534	ld	r9,VCPU_SLB_V(r6)
535	slbmte	r9,r8
536	addi	r6,r6,VCPU_SLB_SIZE
537	bdnz	1b
5389:
539	/* Increment yield count if they have a VPA */
540	ld	r3, VCPU_VPA(r4)
541	cmpdi	r3, 0
542	beq	25f
543	li	r6, LPPACA_YIELDCOUNT
544	LWZX_BE	r5, r3, r6
545	addi	r5, r5, 1
546	STWX_BE	r5, r3, r6
547	li	r6, 1
548	stb	r6, VCPU_VPA_DIRTY(r4)
54925:
550
551	/* Save purr/spurr */
552	mfspr	r5,SPRN_PURR
553	mfspr	r6,SPRN_SPURR
554	std	r5,HSTATE_PURR(r13)
555	std	r6,HSTATE_SPURR(r13)
556	ld	r7,VCPU_PURR(r4)
557	ld	r8,VCPU_SPURR(r4)
558	mtspr	SPRN_PURR,r7
559	mtspr	SPRN_SPURR,r8
560
561BEGIN_FTR_SECTION
562	/* Set partition DABR */
563	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
564	lwz	r5,VCPU_DABRX(r4)
565	ld	r6,VCPU_DABR(r4)
566	mtspr	SPRN_DABRX,r5
567	mtspr	SPRN_DABR,r6
568	isync
569END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
570
571#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
572BEGIN_FTR_SECTION
573	b	skip_tm
574END_FTR_SECTION_IFCLR(CPU_FTR_TM)
575
576	/* Turn on TM/FP/VSX/VMX so we can restore them. */
577	mfmsr	r5
578	li	r6, MSR_TM >> 32
579	sldi	r6, r6, 32
580	or	r5, r5, r6
581	ori	r5, r5, MSR_FP
582	oris	r5, r5, (MSR_VEC | MSR_VSX)@h
583	mtmsrd	r5
584
585	/*
586	 * The user may change these outside of a transaction, so they must
587	 * always be context switched.
588	 */
589	ld	r5, VCPU_TFHAR(r4)
590	ld	r6, VCPU_TFIAR(r4)
591	ld	r7, VCPU_TEXASR(r4)
592	mtspr	SPRN_TFHAR, r5
593	mtspr	SPRN_TFIAR, r6
594	mtspr	SPRN_TEXASR, r7
595
596	ld	r5, VCPU_MSR(r4)
597	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
598	beq	skip_tm	/* TM not active in guest */
599
600	/* Make sure the failure summary is set, otherwise we'll program check
601	 * when we trechkpt.  It's possible that this might have been not set
602	 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
603	 * host.
604	 */
605	oris	r7, r7, (TEXASR_FS)@h
606	mtspr	SPRN_TEXASR, r7
607
608	/*
609	 * We need to load up the checkpointed state for the guest.
610	 * We need to do this early as it will blow away any GPRs, VSRs and
611	 * some SPRs.
612	 */
613
614	mr	r31, r4
615	addi	r3, r31, VCPU_FPRS_TM
616	bl	load_fp_state
617	addi	r3, r31, VCPU_VRS_TM
618	bl	load_vr_state
619	mr	r4, r31
620	lwz	r7, VCPU_VRSAVE_TM(r4)
621	mtspr	SPRN_VRSAVE, r7
622
623	ld	r5, VCPU_LR_TM(r4)
624	lwz	r6, VCPU_CR_TM(r4)
625	ld	r7, VCPU_CTR_TM(r4)
626	ld	r8, VCPU_AMR_TM(r4)
627	ld	r9, VCPU_TAR_TM(r4)
628	mtlr	r5
629	mtcr	r6
630	mtctr	r7
631	mtspr	SPRN_AMR, r8
632	mtspr	SPRN_TAR, r9
633
634	/*
635	 * Load up PPR and DSCR values but don't put them in the actual SPRs
636	 * till the last moment to avoid running with userspace PPR and DSCR for
637	 * too long.
638	 */
639	ld	r29, VCPU_DSCR_TM(r4)
640	ld	r30, VCPU_PPR_TM(r4)
641
642	std	r2, PACATMSCRATCH(r13) /* Save TOC */
643
644	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
645	li	r5, 0
646	mtmsrd	r5, 1
647
648	/* Load GPRs r0-r28 */
649	reg = 0
650	.rept	29
651	ld	reg, VCPU_GPRS_TM(reg)(r31)
652	reg = reg + 1
653	.endr
654
655	mtspr	SPRN_DSCR, r29
656	mtspr	SPRN_PPR, r30
657
658	/* Load final GPRs */
659	ld	29, VCPU_GPRS_TM(29)(r31)
660	ld	30, VCPU_GPRS_TM(30)(r31)
661	ld	31, VCPU_GPRS_TM(31)(r31)
662
663	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
664	TRECHKPT
665
666	/* Now let's get back the state we need. */
667	HMT_MEDIUM
668	GET_PACA(r13)
669	ld	r29, HSTATE_DSCR(r13)
670	mtspr	SPRN_DSCR, r29
671	ld	r4, HSTATE_KVM_VCPU(r13)
672	ld	r1, HSTATE_HOST_R1(r13)
673	ld	r2, PACATMSCRATCH(r13)
674
675	/* Set the MSR RI since we have our registers back. */
676	li	r5, MSR_RI
677	mtmsrd	r5, 1
678skip_tm:
679#endif
680
681	/* Load guest PMU registers */
682	/* R4 is live here (vcpu pointer) */
683	li	r3, 1
684	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
685	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
686	isync
687BEGIN_FTR_SECTION
688	ld	r3, VCPU_MMCR(r4)
689	andi.	r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
690	cmpwi	r5, MMCR0_PMAO
691	beql	kvmppc_fix_pmao
692END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
693	lwz	r3, VCPU_PMC(r4)	/* always load up guest PMU registers */
694	lwz	r5, VCPU_PMC + 4(r4)	/* to prevent information leak */
695	lwz	r6, VCPU_PMC + 8(r4)
696	lwz	r7, VCPU_PMC + 12(r4)
697	lwz	r8, VCPU_PMC + 16(r4)
698	lwz	r9, VCPU_PMC + 20(r4)
699	mtspr	SPRN_PMC1, r3
700	mtspr	SPRN_PMC2, r5
701	mtspr	SPRN_PMC3, r6
702	mtspr	SPRN_PMC4, r7
703	mtspr	SPRN_PMC5, r8
704	mtspr	SPRN_PMC6, r9
705	ld	r3, VCPU_MMCR(r4)
706	ld	r5, VCPU_MMCR + 8(r4)
707	ld	r6, VCPU_MMCR + 16(r4)
708	ld	r7, VCPU_SIAR(r4)
709	ld	r8, VCPU_SDAR(r4)
710	mtspr	SPRN_MMCR1, r5
711	mtspr	SPRN_MMCRA, r6
712	mtspr	SPRN_SIAR, r7
713	mtspr	SPRN_SDAR, r8
714BEGIN_FTR_SECTION
715	ld	r5, VCPU_MMCR + 24(r4)
716	ld	r6, VCPU_SIER(r4)
717	lwz	r7, VCPU_PMC + 24(r4)
718	lwz	r8, VCPU_PMC + 28(r4)
719	ld	r9, VCPU_MMCR + 32(r4)
720	mtspr	SPRN_MMCR2, r5
721	mtspr	SPRN_SIER, r6
722	mtspr	SPRN_SPMC1, r7
723	mtspr	SPRN_SPMC2, r8
724	mtspr	SPRN_MMCRS, r9
725END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
726	mtspr	SPRN_MMCR0, r3
727	isync
728
729	/* Load up FP, VMX and VSX registers */
730	bl	kvmppc_load_fp
731
732	ld	r14, VCPU_GPR(R14)(r4)
733	ld	r15, VCPU_GPR(R15)(r4)
734	ld	r16, VCPU_GPR(R16)(r4)
735	ld	r17, VCPU_GPR(R17)(r4)
736	ld	r18, VCPU_GPR(R18)(r4)
737	ld	r19, VCPU_GPR(R19)(r4)
738	ld	r20, VCPU_GPR(R20)(r4)
739	ld	r21, VCPU_GPR(R21)(r4)
740	ld	r22, VCPU_GPR(R22)(r4)
741	ld	r23, VCPU_GPR(R23)(r4)
742	ld	r24, VCPU_GPR(R24)(r4)
743	ld	r25, VCPU_GPR(R25)(r4)
744	ld	r26, VCPU_GPR(R26)(r4)
745	ld	r27, VCPU_GPR(R27)(r4)
746	ld	r28, VCPU_GPR(R28)(r4)
747	ld	r29, VCPU_GPR(R29)(r4)
748	ld	r30, VCPU_GPR(R30)(r4)
749	ld	r31, VCPU_GPR(R31)(r4)
750
751	/* Switch DSCR to guest value */
752	ld	r5, VCPU_DSCR(r4)
753	mtspr	SPRN_DSCR, r5
754
755BEGIN_FTR_SECTION
756	/* Skip next section on POWER7 */
757	b	8f
758END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
759	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
760	mfmsr	r8
761	li	r0, 1
762	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
763	mtmsrd	r8
764
765	/* Load up POWER8-specific registers */
766	ld	r5, VCPU_IAMR(r4)
767	lwz	r6, VCPU_PSPB(r4)
768	ld	r7, VCPU_FSCR(r4)
769	mtspr	SPRN_IAMR, r5
770	mtspr	SPRN_PSPB, r6
771	mtspr	SPRN_FSCR, r7
772	ld	r5, VCPU_DAWR(r4)
773	ld	r6, VCPU_DAWRX(r4)
774	ld	r7, VCPU_CIABR(r4)
775	ld	r8, VCPU_TAR(r4)
776	mtspr	SPRN_DAWR, r5
777	mtspr	SPRN_DAWRX, r6
778	mtspr	SPRN_CIABR, r7
779	mtspr	SPRN_TAR, r8
780	ld	r5, VCPU_IC(r4)
781	ld	r6, VCPU_VTB(r4)
782	mtspr	SPRN_IC, r5
783	mtspr	SPRN_VTB, r6
784	ld	r8, VCPU_EBBHR(r4)
785	mtspr	SPRN_EBBHR, r8
786	ld	r5, VCPU_EBBRR(r4)
787	ld	r6, VCPU_BESCR(r4)
788	ld	r7, VCPU_CSIGR(r4)
789	ld	r8, VCPU_TACR(r4)
790	mtspr	SPRN_EBBRR, r5
791	mtspr	SPRN_BESCR, r6
792	mtspr	SPRN_CSIGR, r7
793	mtspr	SPRN_TACR, r8
794	ld	r5, VCPU_TCSCR(r4)
795	ld	r6, VCPU_ACOP(r4)
796	lwz	r7, VCPU_GUEST_PID(r4)
797	ld	r8, VCPU_WORT(r4)
798	mtspr	SPRN_TCSCR, r5
799	mtspr	SPRN_ACOP, r6
800	mtspr	SPRN_PID, r7
801	mtspr	SPRN_WORT, r8
8028:
803
804	/*
805	 * Set the decrementer to the guest decrementer.
806	 */
807	ld	r8,VCPU_DEC_EXPIRES(r4)
808	/* r8 is a host timebase value here, convert to guest TB */
809	ld	r5,HSTATE_KVM_VCORE(r13)
810	ld	r6,VCORE_TB_OFFSET(r5)
811	add	r8,r8,r6
812	mftb	r7
813	subf	r3,r7,r8
814	mtspr	SPRN_DEC,r3
815	stw	r3,VCPU_DEC(r4)
816
817	ld	r5, VCPU_SPRG0(r4)
818	ld	r6, VCPU_SPRG1(r4)
819	ld	r7, VCPU_SPRG2(r4)
820	ld	r8, VCPU_SPRG3(r4)
821	mtspr	SPRN_SPRG0, r5
822	mtspr	SPRN_SPRG1, r6
823	mtspr	SPRN_SPRG2, r7
824	mtspr	SPRN_SPRG3, r8
825
826	/* Load up DAR and DSISR */
827	ld	r5, VCPU_DAR(r4)
828	lwz	r6, VCPU_DSISR(r4)
829	mtspr	SPRN_DAR, r5
830	mtspr	SPRN_DSISR, r6
831
832	/* Restore AMR and UAMOR, set AMOR to all 1s */
833	ld	r5,VCPU_AMR(r4)
834	ld	r6,VCPU_UAMOR(r4)
835	li	r7,-1
836	mtspr	SPRN_AMR,r5
837	mtspr	SPRN_UAMOR,r6
838	mtspr	SPRN_AMOR,r7
839
840	/* Restore state of CTRL run bit; assume 1 on entry */
841	lwz	r5,VCPU_CTRL(r4)
842	andi.	r5,r5,1
843	bne	4f
844	mfspr	r6,SPRN_CTRLF
845	clrrdi	r6,r6,1
846	mtspr	SPRN_CTRLT,r6
8474:
848	/* Secondary threads wait for primary to have done partition switch */
849	ld	r5, HSTATE_KVM_VCORE(r13)
850	lbz	r6, HSTATE_PTID(r13)
851	cmpwi	r6, 0
852	beq	21f
853	lbz	r0, VCORE_IN_GUEST(r5)
854	cmpwi	r0, 0
855	bne	21f
856	HMT_LOW
85720:	lbz	r0, VCORE_IN_GUEST(r5)
858	cmpwi	r0, 0
859	beq	20b
860	HMT_MEDIUM
86121:
862	/* Set LPCR. */
863	ld	r8,VCORE_LPCR(r5)
864	mtspr	SPRN_LPCR,r8
865	isync
866
867	/* Check if HDEC expires soon */
868	mfspr	r3, SPRN_HDEC
869	cmpwi	r3, 512		/* 1 microsecond */
870	blt	hdec_soon
871
872	ld	r6, VCPU_CTR(r4)
873	lwz	r7, VCPU_XER(r4)
874
875	mtctr	r6
876	mtxer	r7
877
878kvmppc_cede_reentry:		/* r4 = vcpu, r13 = paca */
879	ld	r10, VCPU_PC(r4)
880	ld	r11, VCPU_MSR(r4)
881	ld	r6, VCPU_SRR0(r4)
882	ld	r7, VCPU_SRR1(r4)
883	mtspr	SPRN_SRR0, r6
884	mtspr	SPRN_SRR1, r7
885
886deliver_guest_interrupt:
887	/* r11 = vcpu->arch.msr & ~MSR_HV */
888	rldicl	r11, r11, 63 - MSR_HV_LG, 1
889	rotldi	r11, r11, 1 + MSR_HV_LG
890	ori	r11, r11, MSR_ME
891
892	/* Check if we can deliver an external or decrementer interrupt now */
893	ld	r0, VCPU_PENDING_EXC(r4)
894	rldicl	r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
895	cmpdi	cr1, r0, 0
896	andi.	r8, r11, MSR_EE
897	mfspr	r8, SPRN_LPCR
898	/* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
899	rldimi	r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
900	mtspr	SPRN_LPCR, r8
901	isync
902	beq	5f
903	li	r0, BOOK3S_INTERRUPT_EXTERNAL
904	bne	cr1, 12f
905	mfspr	r0, SPRN_DEC
906	cmpwi	r0, 0
907	li	r0, BOOK3S_INTERRUPT_DECREMENTER
908	bge	5f
909
91012:	mtspr	SPRN_SRR0, r10
911	mr	r10,r0
912	mtspr	SPRN_SRR1, r11
913	mr	r9, r4
914	bl	kvmppc_msr_interrupt
9155:
916
917/*
918 * Required state:
919 * R4 = vcpu
920 * R10: value for HSRR0
921 * R11: value for HSRR1
922 * R13 = PACA
923 */
924fast_guest_return:
925	li	r0,0
926	stb	r0,VCPU_CEDED(r4)	/* cancel cede */
927	mtspr	SPRN_HSRR0,r10
928	mtspr	SPRN_HSRR1,r11
929
930	/* Activate guest mode, so faults get handled by KVM */
931	li	r9, KVM_GUEST_MODE_GUEST_HV
932	stb	r9, HSTATE_IN_GUEST(r13)
933
934#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
935	/* Accumulate timing */
936	addi	r3, r4, VCPU_TB_GUEST
937	bl	kvmhv_accumulate_time
938#endif
939
940	/* Enter guest */
941
942BEGIN_FTR_SECTION
943	ld	r5, VCPU_CFAR(r4)
944	mtspr	SPRN_CFAR, r5
945END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
946BEGIN_FTR_SECTION
947	ld	r0, VCPU_PPR(r4)
948END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
949
950	ld	r5, VCPU_LR(r4)
951	lwz	r6, VCPU_CR(r4)
952	mtlr	r5
953	mtcr	r6
954
955	ld	r1, VCPU_GPR(R1)(r4)
956	ld	r2, VCPU_GPR(R2)(r4)
957	ld	r3, VCPU_GPR(R3)(r4)
958	ld	r5, VCPU_GPR(R5)(r4)
959	ld	r6, VCPU_GPR(R6)(r4)
960	ld	r7, VCPU_GPR(R7)(r4)
961	ld	r8, VCPU_GPR(R8)(r4)
962	ld	r9, VCPU_GPR(R9)(r4)
963	ld	r10, VCPU_GPR(R10)(r4)
964	ld	r11, VCPU_GPR(R11)(r4)
965	ld	r12, VCPU_GPR(R12)(r4)
966	ld	r13, VCPU_GPR(R13)(r4)
967
968BEGIN_FTR_SECTION
969	mtspr	SPRN_PPR, r0
970END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
971	ld	r0, VCPU_GPR(R0)(r4)
972	ld	r4, VCPU_GPR(R4)(r4)
973
974	hrfid
975	b	.
976
977secondary_too_late:
978	li	r12, 0
979	cmpdi	r4, 0
980	beq	11f
981	stw	r12, VCPU_TRAP(r4)
982#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
983	addi	r3, r4, VCPU_TB_RMEXIT
984	bl	kvmhv_accumulate_time
985#endif
98611:	b	kvmhv_switch_to_host
987
988hdec_soon:
989	li	r12, BOOK3S_INTERRUPT_HV_DECREMENTER
990	stw	r12, VCPU_TRAP(r4)
991	mr	r9, r4
992#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
993	addi	r3, r4, VCPU_TB_RMEXIT
994	bl	kvmhv_accumulate_time
995#endif
996	b	guest_exit_cont
997
998/******************************************************************************
999 *                                                                            *
1000 *                               Exit code                                    *
1001 *                                                                            *
1002 *****************************************************************************/
1003
1004/*
1005 * We come here from the first-level interrupt handlers.
1006 */
1007	.globl	kvmppc_interrupt_hv
1008kvmppc_interrupt_hv:
1009	/*
1010	 * Register contents:
1011	 * R12		= interrupt vector
1012	 * R13		= PACA
1013	 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
1014	 * guest R13 saved in SPRN_SCRATCH0
1015	 */
1016	std	r9, HSTATE_SCRATCH2(r13)
1017
1018	lbz	r9, HSTATE_IN_GUEST(r13)
1019	cmpwi	r9, KVM_GUEST_MODE_HOST_HV
1020	beq	kvmppc_bad_host_intr
1021#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1022	cmpwi	r9, KVM_GUEST_MODE_GUEST
1023	ld	r9, HSTATE_SCRATCH2(r13)
1024	beq	kvmppc_interrupt_pr
1025#endif
1026	/* We're now back in the host but in guest MMU context */
1027	li	r9, KVM_GUEST_MODE_HOST_HV
1028	stb	r9, HSTATE_IN_GUEST(r13)
1029
1030	ld	r9, HSTATE_KVM_VCPU(r13)
1031
1032	/* Save registers */
1033
1034	std	r0, VCPU_GPR(R0)(r9)
1035	std	r1, VCPU_GPR(R1)(r9)
1036	std	r2, VCPU_GPR(R2)(r9)
1037	std	r3, VCPU_GPR(R3)(r9)
1038	std	r4, VCPU_GPR(R4)(r9)
1039	std	r5, VCPU_GPR(R5)(r9)
1040	std	r6, VCPU_GPR(R6)(r9)
1041	std	r7, VCPU_GPR(R7)(r9)
1042	std	r8, VCPU_GPR(R8)(r9)
1043	ld	r0, HSTATE_SCRATCH2(r13)
1044	std	r0, VCPU_GPR(R9)(r9)
1045	std	r10, VCPU_GPR(R10)(r9)
1046	std	r11, VCPU_GPR(R11)(r9)
1047	ld	r3, HSTATE_SCRATCH0(r13)
1048	lwz	r4, HSTATE_SCRATCH1(r13)
1049	std	r3, VCPU_GPR(R12)(r9)
1050	stw	r4, VCPU_CR(r9)
1051BEGIN_FTR_SECTION
1052	ld	r3, HSTATE_CFAR(r13)
1053	std	r3, VCPU_CFAR(r9)
1054END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1055BEGIN_FTR_SECTION
1056	ld	r4, HSTATE_PPR(r13)
1057	std	r4, VCPU_PPR(r9)
1058END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1059
1060	/* Restore R1/R2 so we can handle faults */
1061	ld	r1, HSTATE_HOST_R1(r13)
1062	ld	r2, PACATOC(r13)
1063
1064	mfspr	r10, SPRN_SRR0
1065	mfspr	r11, SPRN_SRR1
1066	std	r10, VCPU_SRR0(r9)
1067	std	r11, VCPU_SRR1(r9)
1068	andi.	r0, r12, 2		/* need to read HSRR0/1? */
1069	beq	1f
1070	mfspr	r10, SPRN_HSRR0
1071	mfspr	r11, SPRN_HSRR1
1072	clrrdi	r12, r12, 2
10731:	std	r10, VCPU_PC(r9)
1074	std	r11, VCPU_MSR(r9)
1075
1076	GET_SCRATCH0(r3)
1077	mflr	r4
1078	std	r3, VCPU_GPR(R13)(r9)
1079	std	r4, VCPU_LR(r9)
1080
1081	stw	r12,VCPU_TRAP(r9)
1082
1083#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1084	addi	r3, r9, VCPU_TB_RMINTR
1085	mr	r4, r9
1086	bl	kvmhv_accumulate_time
1087	ld	r5, VCPU_GPR(R5)(r9)
1088	ld	r6, VCPU_GPR(R6)(r9)
1089	ld	r7, VCPU_GPR(R7)(r9)
1090	ld	r8, VCPU_GPR(R8)(r9)
1091#endif
1092
1093	/* Save HEIR (HV emulation assist reg) in emul_inst
1094	   if this is an HEI (HV emulation interrupt, e40) */
1095	li	r3,KVM_INST_FETCH_FAILED
1096	stw	r3,VCPU_LAST_INST(r9)
1097	cmpwi	r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1098	bne	11f
1099	mfspr	r3,SPRN_HEIR
110011:	stw	r3,VCPU_HEIR(r9)
1101
1102	/* these are volatile across C function calls */
1103	mfctr	r3
1104	mfxer	r4
1105	std	r3, VCPU_CTR(r9)
1106	stw	r4, VCPU_XER(r9)
1107
1108	/* If this is a page table miss then see if it's theirs or ours */
1109	cmpwi	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1110	beq	kvmppc_hdsi
1111	cmpwi	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1112	beq	kvmppc_hisi
1113
1114	/* See if this is a leftover HDEC interrupt */
1115	cmpwi	r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1116	bne	2f
1117	mfspr	r3,SPRN_HDEC
1118	cmpwi	r3,0
1119	mr	r4,r9
1120	bge	fast_guest_return
11212:
1122	/* See if this is an hcall we can handle in real mode */
1123	cmpwi	r12,BOOK3S_INTERRUPT_SYSCALL
1124	beq	hcall_try_real_mode
1125
1126	/* Hypervisor doorbell - exit only if host IPI flag set */
1127	cmpwi	r12, BOOK3S_INTERRUPT_H_DOORBELL
1128	bne	3f
1129	lbz	r0, HSTATE_HOST_IPI(r13)
1130	cmpwi	r0, 0
1131	beq	4f
1132	b	guest_exit_cont
11333:
1134	/* External interrupt ? */
1135	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
1136	bne+	guest_exit_cont
1137
1138	/* External interrupt, first check for host_ipi. If this is
1139	 * set, we know the host wants us out so let's do it now
1140	 */
1141	bl	kvmppc_read_intr
1142	cmpdi	r3, 0
1143	bgt	guest_exit_cont
1144
1145	/* Check if any CPU is heading out to the host, if so head out too */
11464:	ld	r5, HSTATE_KVM_VCORE(r13)
1147	lwz	r0, VCORE_ENTRY_EXIT(r5)
1148	cmpwi	r0, 0x100
1149	mr	r4, r9
1150	blt	deliver_guest_interrupt
1151
1152guest_exit_cont:		/* r9 = vcpu, r12 = trap, r13 = paca */
1153	/* Save more register state  */
1154	mfdar	r6
1155	mfdsisr	r7
1156	std	r6, VCPU_DAR(r9)
1157	stw	r7, VCPU_DSISR(r9)
1158	/* don't overwrite fault_dar/fault_dsisr if HDSI */
1159	cmpwi	r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1160	beq	mc_cont
1161	std	r6, VCPU_FAULT_DAR(r9)
1162	stw	r7, VCPU_FAULT_DSISR(r9)
1163
1164	/* See if it is a machine check */
1165	cmpwi	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1166	beq	machine_check_realmode
1167mc_cont:
1168#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1169	addi	r3, r9, VCPU_TB_RMEXIT
1170	mr	r4, r9
1171	bl	kvmhv_accumulate_time
1172#endif
1173
1174	mr 	r3, r12
1175	/* Increment exit count, poke other threads to exit */
1176	bl	kvmhv_commence_exit
1177	nop
1178	ld	r9, HSTATE_KVM_VCPU(r13)
1179	lwz	r12, VCPU_TRAP(r9)
1180
1181	/* Save guest CTRL register, set runlatch to 1 */
1182	mfspr	r6,SPRN_CTRLF
1183	stw	r6,VCPU_CTRL(r9)
1184	andi.	r0,r6,1
1185	bne	4f
1186	ori	r6,r6,1
1187	mtspr	SPRN_CTRLT,r6
11884:
1189	/* Read the guest SLB and save it away */
1190	lwz	r0,VCPU_SLB_NR(r9)	/* number of entries in SLB */
1191	mtctr	r0
1192	li	r6,0
1193	addi	r7,r9,VCPU_SLB
1194	li	r5,0
11951:	slbmfee	r8,r6
1196	andis.	r0,r8,SLB_ESID_V@h
1197	beq	2f
1198	add	r8,r8,r6		/* put index in */
1199	slbmfev	r3,r6
1200	std	r8,VCPU_SLB_E(r7)
1201	std	r3,VCPU_SLB_V(r7)
1202	addi	r7,r7,VCPU_SLB_SIZE
1203	addi	r5,r5,1
12042:	addi	r6,r6,1
1205	bdnz	1b
1206	stw	r5,VCPU_SLB_MAX(r9)
1207
1208	/*
1209	 * Save the guest PURR/SPURR
1210	 */
1211	mfspr	r5,SPRN_PURR
1212	mfspr	r6,SPRN_SPURR
1213	ld	r7,VCPU_PURR(r9)
1214	ld	r8,VCPU_SPURR(r9)
1215	std	r5,VCPU_PURR(r9)
1216	std	r6,VCPU_SPURR(r9)
1217	subf	r5,r7,r5
1218	subf	r6,r8,r6
1219
1220	/*
1221	 * Restore host PURR/SPURR and add guest times
1222	 * so that the time in the guest gets accounted.
1223	 */
1224	ld	r3,HSTATE_PURR(r13)
1225	ld	r4,HSTATE_SPURR(r13)
1226	add	r3,r3,r5
1227	add	r4,r4,r6
1228	mtspr	SPRN_PURR,r3
1229	mtspr	SPRN_SPURR,r4
1230
1231	/* Save DEC */
1232	mfspr	r5,SPRN_DEC
1233	mftb	r6
1234	extsw	r5,r5
1235	add	r5,r5,r6
1236	/* r5 is a guest timebase value here, convert to host TB */
1237	ld	r3,HSTATE_KVM_VCORE(r13)
1238	ld	r4,VCORE_TB_OFFSET(r3)
1239	subf	r5,r4,r5
1240	std	r5,VCPU_DEC_EXPIRES(r9)
1241
1242BEGIN_FTR_SECTION
1243	b	8f
1244END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1245	/* Save POWER8-specific registers */
1246	mfspr	r5, SPRN_IAMR
1247	mfspr	r6, SPRN_PSPB
1248	mfspr	r7, SPRN_FSCR
1249	std	r5, VCPU_IAMR(r9)
1250	stw	r6, VCPU_PSPB(r9)
1251	std	r7, VCPU_FSCR(r9)
1252	mfspr	r5, SPRN_IC
1253	mfspr	r6, SPRN_VTB
1254	mfspr	r7, SPRN_TAR
1255	std	r5, VCPU_IC(r9)
1256	std	r6, VCPU_VTB(r9)
1257	std	r7, VCPU_TAR(r9)
1258	mfspr	r8, SPRN_EBBHR
1259	std	r8, VCPU_EBBHR(r9)
1260	mfspr	r5, SPRN_EBBRR
1261	mfspr	r6, SPRN_BESCR
1262	mfspr	r7, SPRN_CSIGR
1263	mfspr	r8, SPRN_TACR
1264	std	r5, VCPU_EBBRR(r9)
1265	std	r6, VCPU_BESCR(r9)
1266	std	r7, VCPU_CSIGR(r9)
1267	std	r8, VCPU_TACR(r9)
1268	mfspr	r5, SPRN_TCSCR
1269	mfspr	r6, SPRN_ACOP
1270	mfspr	r7, SPRN_PID
1271	mfspr	r8, SPRN_WORT
1272	std	r5, VCPU_TCSCR(r9)
1273	std	r6, VCPU_ACOP(r9)
1274	stw	r7, VCPU_GUEST_PID(r9)
1275	std	r8, VCPU_WORT(r9)
1276	/*
1277	 * Restore various registers to 0, where non-zero values
1278	 * set by the guest could disrupt the host.
1279	 */
1280	li	r0, 0
1281	mtspr	SPRN_IAMR, r0
1282	mtspr	SPRN_CIABR, r0
1283	mtspr	SPRN_DAWRX, r0
1284	mtspr	SPRN_TCSCR, r0
1285	mtspr	SPRN_WORT, r0
1286	/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1287	li	r0, 1
1288	sldi	r0, r0, 31
1289	mtspr	SPRN_MMCRS, r0
12908:
1291
1292	/* Save and reset AMR and UAMOR before turning on the MMU */
1293	mfspr	r5,SPRN_AMR
1294	mfspr	r6,SPRN_UAMOR
1295	std	r5,VCPU_AMR(r9)
1296	std	r6,VCPU_UAMOR(r9)
1297	li	r6,0
1298	mtspr	SPRN_AMR,r6
1299
1300	/* Switch DSCR back to host value */
1301	mfspr	r8, SPRN_DSCR
1302	ld	r7, HSTATE_DSCR(r13)
1303	std	r8, VCPU_DSCR(r9)
1304	mtspr	SPRN_DSCR, r7
1305
1306	/* Save non-volatile GPRs */
1307	std	r14, VCPU_GPR(R14)(r9)
1308	std	r15, VCPU_GPR(R15)(r9)
1309	std	r16, VCPU_GPR(R16)(r9)
1310	std	r17, VCPU_GPR(R17)(r9)
1311	std	r18, VCPU_GPR(R18)(r9)
1312	std	r19, VCPU_GPR(R19)(r9)
1313	std	r20, VCPU_GPR(R20)(r9)
1314	std	r21, VCPU_GPR(R21)(r9)
1315	std	r22, VCPU_GPR(R22)(r9)
1316	std	r23, VCPU_GPR(R23)(r9)
1317	std	r24, VCPU_GPR(R24)(r9)
1318	std	r25, VCPU_GPR(R25)(r9)
1319	std	r26, VCPU_GPR(R26)(r9)
1320	std	r27, VCPU_GPR(R27)(r9)
1321	std	r28, VCPU_GPR(R28)(r9)
1322	std	r29, VCPU_GPR(R29)(r9)
1323	std	r30, VCPU_GPR(R30)(r9)
1324	std	r31, VCPU_GPR(R31)(r9)
1325
1326	/* Save SPRGs */
1327	mfspr	r3, SPRN_SPRG0
1328	mfspr	r4, SPRN_SPRG1
1329	mfspr	r5, SPRN_SPRG2
1330	mfspr	r6, SPRN_SPRG3
1331	std	r3, VCPU_SPRG0(r9)
1332	std	r4, VCPU_SPRG1(r9)
1333	std	r5, VCPU_SPRG2(r9)
1334	std	r6, VCPU_SPRG3(r9)
1335
1336	/* save FP state */
1337	mr	r3, r9
1338	bl	kvmppc_save_fp
1339
1340#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1341BEGIN_FTR_SECTION
1342	b	2f
1343END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1344	/* Turn on TM. */
1345	mfmsr	r8
1346	li	r0, 1
1347	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1348	mtmsrd	r8
1349
1350	ld	r5, VCPU_MSR(r9)
1351	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1352	beq	1f	/* TM not active in guest. */
1353
1354	li	r3, TM_CAUSE_KVM_RESCHED
1355
1356	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
1357	li	r5, 0
1358	mtmsrd	r5, 1
1359
1360	/* All GPRs are volatile at this point. */
1361	TRECLAIM(R3)
1362
1363	/* Temporarily store r13 and r9 so we have some regs to play with */
1364	SET_SCRATCH0(r13)
1365	GET_PACA(r13)
1366	std	r9, PACATMSCRATCH(r13)
1367	ld	r9, HSTATE_KVM_VCPU(r13)
1368
1369	/* Get a few more GPRs free. */
1370	std	r29, VCPU_GPRS_TM(29)(r9)
1371	std	r30, VCPU_GPRS_TM(30)(r9)
1372	std	r31, VCPU_GPRS_TM(31)(r9)
1373
1374	/* Save away PPR and DSCR soon so don't run with user values. */
1375	mfspr	r31, SPRN_PPR
1376	HMT_MEDIUM
1377	mfspr	r30, SPRN_DSCR
1378	ld	r29, HSTATE_DSCR(r13)
1379	mtspr	SPRN_DSCR, r29
1380
1381	/* Save all but r9, r13 & r29-r31 */
1382	reg = 0
1383	.rept	29
1384	.if (reg != 9) && (reg != 13)
1385	std	reg, VCPU_GPRS_TM(reg)(r9)
1386	.endif
1387	reg = reg + 1
1388	.endr
1389	/* ... now save r13 */
1390	GET_SCRATCH0(r4)
1391	std	r4, VCPU_GPRS_TM(13)(r9)
1392	/* ... and save r9 */
1393	ld	r4, PACATMSCRATCH(r13)
1394	std	r4, VCPU_GPRS_TM(9)(r9)
1395
1396	/* Reload stack pointer and TOC. */
1397	ld	r1, HSTATE_HOST_R1(r13)
1398	ld	r2, PACATOC(r13)
1399
1400	/* Set MSR RI now we have r1 and r13 back. */
1401	li	r5, MSR_RI
1402	mtmsrd	r5, 1
1403
1404	/* Save away checkpinted SPRs. */
1405	std	r31, VCPU_PPR_TM(r9)
1406	std	r30, VCPU_DSCR_TM(r9)
1407	mflr	r5
1408	mfcr	r6
1409	mfctr	r7
1410	mfspr	r8, SPRN_AMR
1411	mfspr	r10, SPRN_TAR
1412	std	r5, VCPU_LR_TM(r9)
1413	stw	r6, VCPU_CR_TM(r9)
1414	std	r7, VCPU_CTR_TM(r9)
1415	std	r8, VCPU_AMR_TM(r9)
1416	std	r10, VCPU_TAR_TM(r9)
1417
1418	/* Restore r12 as trap number. */
1419	lwz	r12, VCPU_TRAP(r9)
1420
1421	/* Save FP/VSX. */
1422	addi	r3, r9, VCPU_FPRS_TM
1423	bl	store_fp_state
1424	addi	r3, r9, VCPU_VRS_TM
1425	bl	store_vr_state
1426	mfspr	r6, SPRN_VRSAVE
1427	stw	r6, VCPU_VRSAVE_TM(r9)
14281:
1429	/*
1430	 * We need to save these SPRs after the treclaim so that the software
1431	 * error code is recorded correctly in the TEXASR.  Also the user may
1432	 * change these outside of a transaction, so they must always be
1433	 * context switched.
1434	 */
1435	mfspr	r5, SPRN_TFHAR
1436	mfspr	r6, SPRN_TFIAR
1437	mfspr	r7, SPRN_TEXASR
1438	std	r5, VCPU_TFHAR(r9)
1439	std	r6, VCPU_TFIAR(r9)
1440	std	r7, VCPU_TEXASR(r9)
14412:
1442#endif
1443
1444	/* Increment yield count if they have a VPA */
1445	ld	r8, VCPU_VPA(r9)	/* do they have a VPA? */
1446	cmpdi	r8, 0
1447	beq	25f
1448	li	r4, LPPACA_YIELDCOUNT
1449	LWZX_BE	r3, r8, r4
1450	addi	r3, r3, 1
1451	STWX_BE	r3, r8, r4
1452	li	r3, 1
1453	stb	r3, VCPU_VPA_DIRTY(r9)
145425:
1455	/* Save PMU registers if requested */
1456	/* r8 and cr0.eq are live here */
1457BEGIN_FTR_SECTION
1458	/*
1459	 * POWER8 seems to have a hardware bug where setting
1460	 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1461	 * when some counters are already negative doesn't seem
1462	 * to cause a performance monitor alert (and hence interrupt).
1463	 * The effect of this is that when saving the PMU state,
1464	 * if there is no PMU alert pending when we read MMCR0
1465	 * before freezing the counters, but one becomes pending
1466	 * before we read the counters, we lose it.
1467	 * To work around this, we need a way to freeze the counters
1468	 * before reading MMCR0.  Normally, freezing the counters
1469	 * is done by writing MMCR0 (to set MMCR0[FC]) which
1470	 * unavoidably writes MMCR0[PMA0] as well.  On POWER8,
1471	 * we can also freeze the counters using MMCR2, by writing
1472	 * 1s to all the counter freeze condition bits (there are
1473	 * 9 bits each for 6 counters).
1474	 */
1475	li	r3, -1			/* set all freeze bits */
1476	clrrdi	r3, r3, 10
1477	mfspr	r10, SPRN_MMCR2
1478	mtspr	SPRN_MMCR2, r3
1479	isync
1480END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1481	li	r3, 1
1482	sldi	r3, r3, 31		/* MMCR0_FC (freeze counters) bit */
1483	mfspr	r4, SPRN_MMCR0		/* save MMCR0 */
1484	mtspr	SPRN_MMCR0, r3		/* freeze all counters, disable ints */
1485	mfspr	r6, SPRN_MMCRA
1486	/* Clear MMCRA in order to disable SDAR updates */
1487	li	r7, 0
1488	mtspr	SPRN_MMCRA, r7
1489	isync
1490	beq	21f			/* if no VPA, save PMU stuff anyway */
1491	lbz	r7, LPPACA_PMCINUSE(r8)
1492	cmpwi	r7, 0			/* did they ask for PMU stuff to be saved? */
1493	bne	21f
1494	std	r3, VCPU_MMCR(r9)	/* if not, set saved MMCR0 to FC */
1495	b	22f
149621:	mfspr	r5, SPRN_MMCR1
1497	mfspr	r7, SPRN_SIAR
1498	mfspr	r8, SPRN_SDAR
1499	std	r4, VCPU_MMCR(r9)
1500	std	r5, VCPU_MMCR + 8(r9)
1501	std	r6, VCPU_MMCR + 16(r9)
1502BEGIN_FTR_SECTION
1503	std	r10, VCPU_MMCR + 24(r9)
1504END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1505	std	r7, VCPU_SIAR(r9)
1506	std	r8, VCPU_SDAR(r9)
1507	mfspr	r3, SPRN_PMC1
1508	mfspr	r4, SPRN_PMC2
1509	mfspr	r5, SPRN_PMC3
1510	mfspr	r6, SPRN_PMC4
1511	mfspr	r7, SPRN_PMC5
1512	mfspr	r8, SPRN_PMC6
1513	stw	r3, VCPU_PMC(r9)
1514	stw	r4, VCPU_PMC + 4(r9)
1515	stw	r5, VCPU_PMC + 8(r9)
1516	stw	r6, VCPU_PMC + 12(r9)
1517	stw	r7, VCPU_PMC + 16(r9)
1518	stw	r8, VCPU_PMC + 20(r9)
1519BEGIN_FTR_SECTION
1520	mfspr	r5, SPRN_SIER
1521	mfspr	r6, SPRN_SPMC1
1522	mfspr	r7, SPRN_SPMC2
1523	mfspr	r8, SPRN_MMCRS
1524	std	r5, VCPU_SIER(r9)
1525	stw	r6, VCPU_PMC + 24(r9)
1526	stw	r7, VCPU_PMC + 28(r9)
1527	std	r8, VCPU_MMCR + 32(r9)
1528	lis	r4, 0x8000
1529	mtspr	SPRN_MMCRS, r4
1530END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
153122:
1532	/* Clear out SLB */
1533	li	r5,0
1534	slbmte	r5,r5
1535	slbia
1536	ptesync
1537
1538	/*
1539	 * POWER7/POWER8 guest -> host partition switch code.
1540	 * We don't have to lock against tlbies but we do
1541	 * have to coordinate the hardware threads.
1542	 */
1543kvmhv_switch_to_host:
1544	/* Secondary threads wait for primary to do partition switch */
1545	ld	r5,HSTATE_KVM_VCORE(r13)
1546	ld	r4,VCORE_KVM(r5)	/* pointer to struct kvm */
1547	lbz	r3,HSTATE_PTID(r13)
1548	cmpwi	r3,0
1549	beq	15f
1550	HMT_LOW
155113:	lbz	r3,VCORE_IN_GUEST(r5)
1552	cmpwi	r3,0
1553	bne	13b
1554	HMT_MEDIUM
1555	b	16f
1556
1557	/* Primary thread waits for all the secondaries to exit guest */
155815:	lwz	r3,VCORE_ENTRY_EXIT(r5)
1559	srwi	r0,r3,8
1560	clrldi	r3,r3,56
1561	cmpw	r3,r0
1562	bne	15b
1563	isync
1564
1565	/* Primary thread switches back to host partition */
1566	ld	r6,KVM_HOST_SDR1(r4)
1567	lwz	r7,KVM_HOST_LPID(r4)
1568	li	r8,LPID_RSVD		/* switch to reserved LPID */
1569	mtspr	SPRN_LPID,r8
1570	ptesync
1571	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
1572	mtspr	SPRN_LPID,r7
1573	isync
1574
1575BEGIN_FTR_SECTION
1576	/* DPDES is shared between threads */
1577	mfspr	r7, SPRN_DPDES
1578	std	r7, VCORE_DPDES(r5)
1579	/* clear DPDES so we don't get guest doorbells in the host */
1580	li	r8, 0
1581	mtspr	SPRN_DPDES, r8
1582END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1583
1584	/* Subtract timebase offset from timebase */
1585	ld	r8,VCORE_TB_OFFSET(r5)
1586	cmpdi	r8,0
1587	beq	17f
1588	mftb	r6			/* current guest timebase */
1589	subf	r8,r8,r6
1590	mtspr	SPRN_TBU40,r8		/* update upper 40 bits */
1591	mftb	r7			/* check if lower 24 bits overflowed */
1592	clrldi	r6,r6,40
1593	clrldi	r7,r7,40
1594	cmpld	r7,r6
1595	bge	17f
1596	addis	r8,r8,0x100		/* if so, increment upper 40 bits */
1597	mtspr	SPRN_TBU40,r8
1598
1599	/* Reset PCR */
160017:	ld	r0, VCORE_PCR(r5)
1601	cmpdi	r0, 0
1602	beq	18f
1603	li	r0, 0
1604	mtspr	SPRN_PCR, r0
160518:
1606	/* Signal secondary CPUs to continue */
1607	stb	r0,VCORE_IN_GUEST(r5)
1608	lis	r8,0x7fff		/* MAX_INT@h */
1609	mtspr	SPRN_HDEC,r8
1610
161116:	ld	r8,KVM_HOST_LPCR(r4)
1612	mtspr	SPRN_LPCR,r8
1613	isync
1614
1615	/* load host SLB entries */
1616	ld	r8,PACA_SLBSHADOWPTR(r13)
1617
1618	.rept	SLB_NUM_BOLTED
1619	li	r3, SLBSHADOW_SAVEAREA
1620	LDX_BE	r5, r8, r3
1621	addi	r3, r3, 8
1622	LDX_BE	r6, r8, r3
1623	andis.	r7,r5,SLB_ESID_V@h
1624	beq	1f
1625	slbmte	r6,r5
16261:	addi	r8,r8,16
1627	.endr
1628
1629#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1630	/* Finish timing, if we have a vcpu */
1631	ld	r4, HSTATE_KVM_VCPU(r13)
1632	cmpdi	r4, 0
1633	li	r3, 0
1634	beq	2f
1635	bl	kvmhv_accumulate_time
16362:
1637#endif
1638	/* Unset guest mode */
1639	li	r0, KVM_GUEST_MODE_NONE
1640	stb	r0, HSTATE_IN_GUEST(r13)
1641
1642	ld	r0, 112+PPC_LR_STKOFF(r1)
1643	addi	r1, r1, 112
1644	mtlr	r0
1645	blr
1646
1647/*
1648 * Check whether an HDSI is an HPTE not found fault or something else.
1649 * If it is an HPTE not found fault that is due to the guest accessing
1650 * a page that they have mapped but which we have paged out, then
1651 * we continue on with the guest exit path.  In all other cases,
1652 * reflect the HDSI to the guest as a DSI.
1653 */
1654kvmppc_hdsi:
1655	mfspr	r4, SPRN_HDAR
1656	mfspr	r6, SPRN_HDSISR
1657	/* HPTE not found fault or protection fault? */
1658	andis.	r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1659	beq	1f			/* if not, send it to the guest */
1660	andi.	r0, r11, MSR_DR		/* data relocation enabled? */
1661	beq	3f
1662	clrrdi	r0, r4, 28
1663	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
1664	bne	1f			/* if no SLB entry found */
16654:	std	r4, VCPU_FAULT_DAR(r9)
1666	stw	r6, VCPU_FAULT_DSISR(r9)
1667
1668	/* Search the hash table. */
1669	mr	r3, r9			/* vcpu pointer */
1670	li	r7, 1			/* data fault */
1671	bl	kvmppc_hpte_hv_fault
1672	ld	r9, HSTATE_KVM_VCPU(r13)
1673	ld	r10, VCPU_PC(r9)
1674	ld	r11, VCPU_MSR(r9)
1675	li	r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1676	cmpdi	r3, 0			/* retry the instruction */
1677	beq	6f
1678	cmpdi	r3, -1			/* handle in kernel mode */
1679	beq	guest_exit_cont
1680	cmpdi	r3, -2			/* MMIO emulation; need instr word */
1681	beq	2f
1682
1683	/* Synthesize a DSI for the guest */
1684	ld	r4, VCPU_FAULT_DAR(r9)
1685	mr	r6, r3
16861:	mtspr	SPRN_DAR, r4
1687	mtspr	SPRN_DSISR, r6
1688	mtspr	SPRN_SRR0, r10
1689	mtspr	SPRN_SRR1, r11
1690	li	r10, BOOK3S_INTERRUPT_DATA_STORAGE
1691	bl	kvmppc_msr_interrupt
1692fast_interrupt_c_return:
16936:	ld	r7, VCPU_CTR(r9)
1694	lwz	r8, VCPU_XER(r9)
1695	mtctr	r7
1696	mtxer	r8
1697	mr	r4, r9
1698	b	fast_guest_return
1699
17003:	ld	r5, VCPU_KVM(r9)	/* not relocated, use VRMA */
1701	ld	r5, KVM_VRMA_SLB_V(r5)
1702	b	4b
1703
1704	/* If this is for emulated MMIO, load the instruction word */
17052:	li	r8, KVM_INST_FETCH_FAILED	/* In case lwz faults */
1706
1707	/* Set guest mode to 'jump over instruction' so if lwz faults
1708	 * we'll just continue at the next IP. */
1709	li	r0, KVM_GUEST_MODE_SKIP
1710	stb	r0, HSTATE_IN_GUEST(r13)
1711
1712	/* Do the access with MSR:DR enabled */
1713	mfmsr	r3
1714	ori	r4, r3, MSR_DR		/* Enable paging for data */
1715	mtmsrd	r4
1716	lwz	r8, 0(r10)
1717	mtmsrd	r3
1718
1719	/* Store the result */
1720	stw	r8, VCPU_LAST_INST(r9)
1721
1722	/* Unset guest mode. */
1723	li	r0, KVM_GUEST_MODE_HOST_HV
1724	stb	r0, HSTATE_IN_GUEST(r13)
1725	b	guest_exit_cont
1726
1727/*
1728 * Similarly for an HISI, reflect it to the guest as an ISI unless
1729 * it is an HPTE not found fault for a page that we have paged out.
1730 */
1731kvmppc_hisi:
1732	andis.	r0, r11, SRR1_ISI_NOPT@h
1733	beq	1f
1734	andi.	r0, r11, MSR_IR		/* instruction relocation enabled? */
1735	beq	3f
1736	clrrdi	r0, r10, 28
1737	PPC_SLBFEE_DOT(R5, R0)		/* if so, look up SLB */
1738	bne	1f			/* if no SLB entry found */
17394:
1740	/* Search the hash table. */
1741	mr	r3, r9			/* vcpu pointer */
1742	mr	r4, r10
1743	mr	r6, r11
1744	li	r7, 0			/* instruction fault */
1745	bl	kvmppc_hpte_hv_fault
1746	ld	r9, HSTATE_KVM_VCPU(r13)
1747	ld	r10, VCPU_PC(r9)
1748	ld	r11, VCPU_MSR(r9)
1749	li	r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1750	cmpdi	r3, 0			/* retry the instruction */
1751	beq	fast_interrupt_c_return
1752	cmpdi	r3, -1			/* handle in kernel mode */
1753	beq	guest_exit_cont
1754
1755	/* Synthesize an ISI for the guest */
1756	mr	r11, r3
17571:	mtspr	SPRN_SRR0, r10
1758	mtspr	SPRN_SRR1, r11
1759	li	r10, BOOK3S_INTERRUPT_INST_STORAGE
1760	bl	kvmppc_msr_interrupt
1761	b	fast_interrupt_c_return
1762
17633:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
1764	ld	r5, KVM_VRMA_SLB_V(r6)
1765	b	4b
1766
1767/*
1768 * Try to handle an hcall in real mode.
1769 * Returns to the guest if we handle it, or continues on up to
1770 * the kernel if we can't (i.e. if we don't have a handler for
1771 * it, or if the handler returns H_TOO_HARD).
1772 *
1773 * r5 - r8 contain hcall args,
1774 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
1775 */
1776hcall_try_real_mode:
1777	ld	r3,VCPU_GPR(R3)(r9)
1778	andi.	r0,r11,MSR_PR
1779	/* sc 1 from userspace - reflect to guest syscall */
1780	bne	sc_1_fast_return
1781	clrrdi	r3,r3,2
1782	cmpldi	r3,hcall_real_table_end - hcall_real_table
1783	bge	guest_exit_cont
1784	/* See if this hcall is enabled for in-kernel handling */
1785	ld	r4, VCPU_KVM(r9)
1786	srdi	r0, r3, 8	/* r0 = (r3 / 4) >> 6 */
1787	sldi	r0, r0, 3	/* index into kvm->arch.enabled_hcalls[] */
1788	add	r4, r4, r0
1789	ld	r0, KVM_ENABLED_HCALLS(r4)
1790	rlwinm	r4, r3, 32-2, 0x3f	/* r4 = (r3 / 4) & 0x3f */
1791	srd	r0, r0, r4
1792	andi.	r0, r0, 1
1793	beq	guest_exit_cont
1794	/* Get pointer to handler, if any, and call it */
1795	LOAD_REG_ADDR(r4, hcall_real_table)
1796	lwax	r3,r3,r4
1797	cmpwi	r3,0
1798	beq	guest_exit_cont
1799	add	r12,r3,r4
1800	mtctr	r12
1801	mr	r3,r9		/* get vcpu pointer */
1802	ld	r4,VCPU_GPR(R4)(r9)
1803	bctrl
1804	cmpdi	r3,H_TOO_HARD
1805	beq	hcall_real_fallback
1806	ld	r4,HSTATE_KVM_VCPU(r13)
1807	std	r3,VCPU_GPR(R3)(r4)
1808	ld	r10,VCPU_PC(r4)
1809	ld	r11,VCPU_MSR(r4)
1810	b	fast_guest_return
1811
1812sc_1_fast_return:
1813	mtspr	SPRN_SRR0,r10
1814	mtspr	SPRN_SRR1,r11
1815	li	r10, BOOK3S_INTERRUPT_SYSCALL
1816	bl	kvmppc_msr_interrupt
1817	mr	r4,r9
1818	b	fast_guest_return
1819
1820	/* We've attempted a real mode hcall, but it's punted it back
1821	 * to userspace.  We need to restore some clobbered volatiles
1822	 * before resuming the pass-it-to-qemu path */
1823hcall_real_fallback:
1824	li	r12,BOOK3S_INTERRUPT_SYSCALL
1825	ld	r9, HSTATE_KVM_VCPU(r13)
1826
1827	b	guest_exit_cont
1828
1829	.globl	hcall_real_table
1830hcall_real_table:
1831	.long	0		/* 0 - unused */
1832	.long	DOTSYM(kvmppc_h_remove) - hcall_real_table
1833	.long	DOTSYM(kvmppc_h_enter) - hcall_real_table
1834	.long	DOTSYM(kvmppc_h_read) - hcall_real_table
1835	.long	0		/* 0x10 - H_CLEAR_MOD */
1836	.long	0		/* 0x14 - H_CLEAR_REF */
1837	.long	DOTSYM(kvmppc_h_protect) - hcall_real_table
1838	.long	DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1839	.long	DOTSYM(kvmppc_h_put_tce) - hcall_real_table
1840	.long	0		/* 0x24 - H_SET_SPRG0 */
1841	.long	DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
1842	.long	0		/* 0x2c */
1843	.long	0		/* 0x30 */
1844	.long	0		/* 0x34 */
1845	.long	0		/* 0x38 */
1846	.long	0		/* 0x3c */
1847	.long	0		/* 0x40 */
1848	.long	0		/* 0x44 */
1849	.long	0		/* 0x48 */
1850	.long	0		/* 0x4c */
1851	.long	0		/* 0x50 */
1852	.long	0		/* 0x54 */
1853	.long	0		/* 0x58 */
1854	.long	0		/* 0x5c */
1855	.long	0		/* 0x60 */
1856#ifdef CONFIG_KVM_XICS
1857	.long	DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1858	.long	DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1859	.long	DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
1860	.long	0		/* 0x70 - H_IPOLL */
1861	.long	DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
1862#else
1863	.long	0		/* 0x64 - H_EOI */
1864	.long	0		/* 0x68 - H_CPPR */
1865	.long	0		/* 0x6c - H_IPI */
1866	.long	0		/* 0x70 - H_IPOLL */
1867	.long	0		/* 0x74 - H_XIRR */
1868#endif
1869	.long	0		/* 0x78 */
1870	.long	0		/* 0x7c */
1871	.long	0		/* 0x80 */
1872	.long	0		/* 0x84 */
1873	.long	0		/* 0x88 */
1874	.long	0		/* 0x8c */
1875	.long	0		/* 0x90 */
1876	.long	0		/* 0x94 */
1877	.long	0		/* 0x98 */
1878	.long	0		/* 0x9c */
1879	.long	0		/* 0xa0 */
1880	.long	0		/* 0xa4 */
1881	.long	0		/* 0xa8 */
1882	.long	0		/* 0xac */
1883	.long	0		/* 0xb0 */
1884	.long	0		/* 0xb4 */
1885	.long	0		/* 0xb8 */
1886	.long	0		/* 0xbc */
1887	.long	0		/* 0xc0 */
1888	.long	0		/* 0xc4 */
1889	.long	0		/* 0xc8 */
1890	.long	0		/* 0xcc */
1891	.long	0		/* 0xd0 */
1892	.long	0		/* 0xd4 */
1893	.long	0		/* 0xd8 */
1894	.long	0		/* 0xdc */
1895	.long	DOTSYM(kvmppc_h_cede) - hcall_real_table
1896	.long	DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
1897	.long	0		/* 0xe8 */
1898	.long	0		/* 0xec */
1899	.long	0		/* 0xf0 */
1900	.long	0		/* 0xf4 */
1901	.long	0		/* 0xf8 */
1902	.long	0		/* 0xfc */
1903	.long	0		/* 0x100 */
1904	.long	0		/* 0x104 */
1905	.long	0		/* 0x108 */
1906	.long	0		/* 0x10c */
1907	.long	0		/* 0x110 */
1908	.long	0		/* 0x114 */
1909	.long	0		/* 0x118 */
1910	.long	0		/* 0x11c */
1911	.long	0		/* 0x120 */
1912	.long	DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
1913	.long	0		/* 0x128 */
1914	.long	0		/* 0x12c */
1915	.long	0		/* 0x130 */
1916	.long	DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
1917	.long	0		/* 0x138 */
1918	.long	0		/* 0x13c */
1919	.long	0		/* 0x140 */
1920	.long	0		/* 0x144 */
1921	.long	0		/* 0x148 */
1922	.long	0		/* 0x14c */
1923	.long	0		/* 0x150 */
1924	.long	0		/* 0x154 */
1925	.long	0		/* 0x158 */
1926	.long	0		/* 0x15c */
1927	.long	0		/* 0x160 */
1928	.long	0		/* 0x164 */
1929	.long	0		/* 0x168 */
1930	.long	0		/* 0x16c */
1931	.long	0		/* 0x170 */
1932	.long	0		/* 0x174 */
1933	.long	0		/* 0x178 */
1934	.long	0		/* 0x17c */
1935	.long	0		/* 0x180 */
1936	.long	0		/* 0x184 */
1937	.long	0		/* 0x188 */
1938	.long	0		/* 0x18c */
1939	.long	0		/* 0x190 */
1940	.long	0		/* 0x194 */
1941	.long	0		/* 0x198 */
1942	.long	0		/* 0x19c */
1943	.long	0		/* 0x1a0 */
1944	.long	0		/* 0x1a4 */
1945	.long	0		/* 0x1a8 */
1946	.long	0		/* 0x1ac */
1947	.long	0		/* 0x1b0 */
1948	.long	0		/* 0x1b4 */
1949	.long	0		/* 0x1b8 */
1950	.long	0		/* 0x1bc */
1951	.long	0		/* 0x1c0 */
1952	.long	0		/* 0x1c4 */
1953	.long	0		/* 0x1c8 */
1954	.long	0		/* 0x1cc */
1955	.long	0		/* 0x1d0 */
1956	.long	0		/* 0x1d4 */
1957	.long	0		/* 0x1d8 */
1958	.long	0		/* 0x1dc */
1959	.long	0		/* 0x1e0 */
1960	.long	0		/* 0x1e4 */
1961	.long	0		/* 0x1e8 */
1962	.long	0		/* 0x1ec */
1963	.long	0		/* 0x1f0 */
1964	.long	0		/* 0x1f4 */
1965	.long	0		/* 0x1f8 */
1966	.long	0		/* 0x1fc */
1967	.long	0		/* 0x200 */
1968	.long	0		/* 0x204 */
1969	.long	0		/* 0x208 */
1970	.long	0		/* 0x20c */
1971	.long	0		/* 0x210 */
1972	.long	0		/* 0x214 */
1973	.long	0		/* 0x218 */
1974	.long	0		/* 0x21c */
1975	.long	0		/* 0x220 */
1976	.long	0		/* 0x224 */
1977	.long	0		/* 0x228 */
1978	.long	0		/* 0x22c */
1979	.long	0		/* 0x230 */
1980	.long	0		/* 0x234 */
1981	.long	0		/* 0x238 */
1982	.long	0		/* 0x23c */
1983	.long	0		/* 0x240 */
1984	.long	0		/* 0x244 */
1985	.long	0		/* 0x248 */
1986	.long	0		/* 0x24c */
1987	.long	0		/* 0x250 */
1988	.long	0		/* 0x254 */
1989	.long	0		/* 0x258 */
1990	.long	0		/* 0x25c */
1991	.long	0		/* 0x260 */
1992	.long	0		/* 0x264 */
1993	.long	0		/* 0x268 */
1994	.long	0		/* 0x26c */
1995	.long	0		/* 0x270 */
1996	.long	0		/* 0x274 */
1997	.long	0		/* 0x278 */
1998	.long	0		/* 0x27c */
1999	.long	0		/* 0x280 */
2000	.long	0		/* 0x284 */
2001	.long	0		/* 0x288 */
2002	.long	0		/* 0x28c */
2003	.long	0		/* 0x290 */
2004	.long	0		/* 0x294 */
2005	.long	0		/* 0x298 */
2006	.long	0		/* 0x29c */
2007	.long	0		/* 0x2a0 */
2008	.long	0		/* 0x2a4 */
2009	.long	0		/* 0x2a8 */
2010	.long	0		/* 0x2ac */
2011	.long	0		/* 0x2b0 */
2012	.long	0		/* 0x2b4 */
2013	.long	0		/* 0x2b8 */
2014	.long	0		/* 0x2bc */
2015	.long	0		/* 0x2c0 */
2016	.long	0		/* 0x2c4 */
2017	.long	0		/* 0x2c8 */
2018	.long	0		/* 0x2cc */
2019	.long	0		/* 0x2d0 */
2020	.long	0		/* 0x2d4 */
2021	.long	0		/* 0x2d8 */
2022	.long	0		/* 0x2dc */
2023	.long	0		/* 0x2e0 */
2024	.long	0		/* 0x2e4 */
2025	.long	0		/* 0x2e8 */
2026	.long	0		/* 0x2ec */
2027	.long	0		/* 0x2f0 */
2028	.long	0		/* 0x2f4 */
2029	.long	0		/* 0x2f8 */
2030	.long	0		/* 0x2fc */
2031	.long	DOTSYM(kvmppc_h_random) - hcall_real_table
2032	.globl	hcall_real_table_end
2033hcall_real_table_end:
2034
2035_GLOBAL(kvmppc_h_set_xdabr)
2036	andi.	r0, r5, DABRX_USER | DABRX_KERNEL
2037	beq	6f
2038	li	r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2039	andc.	r0, r5, r0
2040	beq	3f
20416:	li	r3, H_PARAMETER
2042	blr
2043
2044_GLOBAL(kvmppc_h_set_dabr)
2045	li	r5, DABRX_USER | DABRX_KERNEL
20463:
2047BEGIN_FTR_SECTION
2048	b	2f
2049END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2050	std	r4,VCPU_DABR(r3)
2051	stw	r5, VCPU_DABRX(r3)
2052	mtspr	SPRN_DABRX, r5
2053	/* Work around P7 bug where DABR can get corrupted on mtspr */
20541:	mtspr	SPRN_DABR,r4
2055	mfspr	r5, SPRN_DABR
2056	cmpd	r4, r5
2057	bne	1b
2058	isync
2059	li	r3,0
2060	blr
2061
2062	/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
20632:	rlwimi	r5, r4, 5, DAWRX_DR | DAWRX_DW
2064	rlwimi	r5, r4, 2, DAWRX_WT
2065	clrrdi	r4, r4, 3
2066	std	r4, VCPU_DAWR(r3)
2067	std	r5, VCPU_DAWRX(r3)
2068	mtspr	SPRN_DAWR, r4
2069	mtspr	SPRN_DAWRX, r5
2070	li	r3, 0
2071	blr
2072
2073_GLOBAL(kvmppc_h_cede)		/* r3 = vcpu pointer, r11 = msr, r13 = paca */
2074	ori	r11,r11,MSR_EE
2075	std	r11,VCPU_MSR(r3)
2076	li	r0,1
2077	stb	r0,VCPU_CEDED(r3)
2078	sync			/* order setting ceded vs. testing prodded */
2079	lbz	r5,VCPU_PRODDED(r3)
2080	cmpwi	r5,0
2081	bne	kvm_cede_prodded
2082	li	r12,0		/* set trap to 0 to say hcall is handled */
2083	stw	r12,VCPU_TRAP(r3)
2084	li	r0,H_SUCCESS
2085	std	r0,VCPU_GPR(R3)(r3)
2086
2087	/*
2088	 * Set our bit in the bitmask of napping threads unless all the
2089	 * other threads are already napping, in which case we send this
2090	 * up to the host.
2091	 */
2092	ld	r5,HSTATE_KVM_VCORE(r13)
2093	lbz	r6,HSTATE_PTID(r13)
2094	lwz	r8,VCORE_ENTRY_EXIT(r5)
2095	clrldi	r8,r8,56
2096	li	r0,1
2097	sld	r0,r0,r6
2098	addi	r6,r5,VCORE_NAPPING_THREADS
209931:	lwarx	r4,0,r6
2100	or	r4,r4,r0
2101	cmpw	r4,r8
2102	beq	kvm_cede_exit
2103	stwcx.	r4,0,r6
2104	bne	31b
2105	/* order napping_threads update vs testing entry_exit_map */
2106	isync
2107	li	r0,NAPPING_CEDE
2108	stb	r0,HSTATE_NAPPING(r13)
2109	lwz	r7,VCORE_ENTRY_EXIT(r5)
2110	cmpwi	r7,0x100
2111	bge	33f		/* another thread already exiting */
2112
2113/*
2114 * Although not specifically required by the architecture, POWER7
2115 * preserves the following registers in nap mode, even if an SMT mode
2116 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2117 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2118 */
2119	/* Save non-volatile GPRs */
2120	std	r14, VCPU_GPR(R14)(r3)
2121	std	r15, VCPU_GPR(R15)(r3)
2122	std	r16, VCPU_GPR(R16)(r3)
2123	std	r17, VCPU_GPR(R17)(r3)
2124	std	r18, VCPU_GPR(R18)(r3)
2125	std	r19, VCPU_GPR(R19)(r3)
2126	std	r20, VCPU_GPR(R20)(r3)
2127	std	r21, VCPU_GPR(R21)(r3)
2128	std	r22, VCPU_GPR(R22)(r3)
2129	std	r23, VCPU_GPR(R23)(r3)
2130	std	r24, VCPU_GPR(R24)(r3)
2131	std	r25, VCPU_GPR(R25)(r3)
2132	std	r26, VCPU_GPR(R26)(r3)
2133	std	r27, VCPU_GPR(R27)(r3)
2134	std	r28, VCPU_GPR(R28)(r3)
2135	std	r29, VCPU_GPR(R29)(r3)
2136	std	r30, VCPU_GPR(R30)(r3)
2137	std	r31, VCPU_GPR(R31)(r3)
2138
2139	/* save FP state */
2140	bl	kvmppc_save_fp
2141
2142	/*
2143	 * Set DEC to the smaller of DEC and HDEC, so that we wake
2144	 * no later than the end of our timeslice (HDEC interrupts
2145	 * don't wake us from nap).
2146	 */
2147	mfspr	r3, SPRN_DEC
2148	mfspr	r4, SPRN_HDEC
2149	mftb	r5
2150	cmpw	r3, r4
2151	ble	67f
2152	mtspr	SPRN_DEC, r4
215367:
2154	/* save expiry time of guest decrementer */
2155	extsw	r3, r3
2156	add	r3, r3, r5
2157	ld	r4, HSTATE_KVM_VCPU(r13)
2158	ld	r5, HSTATE_KVM_VCORE(r13)
2159	ld	r6, VCORE_TB_OFFSET(r5)
2160	subf	r3, r6, r3	/* convert to host TB value */
2161	std	r3, VCPU_DEC_EXPIRES(r4)
2162
2163#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2164	ld	r4, HSTATE_KVM_VCPU(r13)
2165	addi	r3, r4, VCPU_TB_CEDE
2166	bl	kvmhv_accumulate_time
2167#endif
2168
2169	lis	r3, LPCR_PECEDP@h	/* Do wake on privileged doorbell */
2170
2171	/*
2172	 * Take a nap until a decrementer or external or doobell interrupt
2173	 * occurs, with PECE1 and PECE0 set in LPCR.
2174	 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2175	 * Also clear the runlatch bit before napping.
2176	 */
2177kvm_do_nap:
2178	mfspr	r0, SPRN_CTRLF
2179	clrrdi	r0, r0, 1
2180	mtspr	SPRN_CTRLT, r0
2181
2182	li	r0,1
2183	stb	r0,HSTATE_HWTHREAD_REQ(r13)
2184	mfspr	r5,SPRN_LPCR
2185	ori	r5,r5,LPCR_PECE0 | LPCR_PECE1
2186BEGIN_FTR_SECTION
2187	ori	r5, r5, LPCR_PECEDH
2188	rlwimi	r5, r3, 0, LPCR_PECEDP
2189END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2190	mtspr	SPRN_LPCR,r5
2191	isync
2192	li	r0, 0
2193	std	r0, HSTATE_SCRATCH0(r13)
2194	ptesync
2195	ld	r0, HSTATE_SCRATCH0(r13)
21961:	cmpd	r0, r0
2197	bne	1b
2198	nap
2199	b	.
2200
220133:	mr	r4, r3
2202	li	r3, 0
2203	li	r12, 0
2204	b	34f
2205
2206kvm_end_cede:
2207	/* get vcpu pointer */
2208	ld	r4, HSTATE_KVM_VCPU(r13)
2209
2210	/* Woken by external or decrementer interrupt */
2211	ld	r1, HSTATE_HOST_R1(r13)
2212
2213#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2214	addi	r3, r4, VCPU_TB_RMINTR
2215	bl	kvmhv_accumulate_time
2216#endif
2217
2218	/* load up FP state */
2219	bl	kvmppc_load_fp
2220
2221	/* Restore guest decrementer */
2222	ld	r3, VCPU_DEC_EXPIRES(r4)
2223	ld	r5, HSTATE_KVM_VCORE(r13)
2224	ld	r6, VCORE_TB_OFFSET(r5)
2225	add	r3, r3, r6	/* convert host TB to guest TB value */
2226	mftb	r7
2227	subf	r3, r7, r3
2228	mtspr	SPRN_DEC, r3
2229
2230	/* Load NV GPRS */
2231	ld	r14, VCPU_GPR(R14)(r4)
2232	ld	r15, VCPU_GPR(R15)(r4)
2233	ld	r16, VCPU_GPR(R16)(r4)
2234	ld	r17, VCPU_GPR(R17)(r4)
2235	ld	r18, VCPU_GPR(R18)(r4)
2236	ld	r19, VCPU_GPR(R19)(r4)
2237	ld	r20, VCPU_GPR(R20)(r4)
2238	ld	r21, VCPU_GPR(R21)(r4)
2239	ld	r22, VCPU_GPR(R22)(r4)
2240	ld	r23, VCPU_GPR(R23)(r4)
2241	ld	r24, VCPU_GPR(R24)(r4)
2242	ld	r25, VCPU_GPR(R25)(r4)
2243	ld	r26, VCPU_GPR(R26)(r4)
2244	ld	r27, VCPU_GPR(R27)(r4)
2245	ld	r28, VCPU_GPR(R28)(r4)
2246	ld	r29, VCPU_GPR(R29)(r4)
2247	ld	r30, VCPU_GPR(R30)(r4)
2248	ld	r31, VCPU_GPR(R31)(r4)
2249
2250	/* Check the wake reason in SRR1 to see why we got here */
2251	bl	kvmppc_check_wake_reason
2252
2253	/* clear our bit in vcore->napping_threads */
225434:	ld	r5,HSTATE_KVM_VCORE(r13)
2255	lbz	r7,HSTATE_PTID(r13)
2256	li	r0,1
2257	sld	r0,r0,r7
2258	addi	r6,r5,VCORE_NAPPING_THREADS
225932:	lwarx	r7,0,r6
2260	andc	r7,r7,r0
2261	stwcx.	r7,0,r6
2262	bne	32b
2263	li	r0,0
2264	stb	r0,HSTATE_NAPPING(r13)
2265
2266	/* See if the wake reason means we need to exit */
2267	stw	r12, VCPU_TRAP(r4)
2268	mr	r9, r4
2269	cmpdi	r3, 0
2270	bgt	guest_exit_cont
2271
2272	/* see if any other thread is already exiting */
2273	lwz	r0,VCORE_ENTRY_EXIT(r5)
2274	cmpwi	r0,0x100
2275	bge	guest_exit_cont
2276
2277	b	kvmppc_cede_reentry	/* if not go back to guest */
2278
2279	/* cede when already previously prodded case */
2280kvm_cede_prodded:
2281	li	r0,0
2282	stb	r0,VCPU_PRODDED(r3)
2283	sync			/* order testing prodded vs. clearing ceded */
2284	stb	r0,VCPU_CEDED(r3)
2285	li	r3,H_SUCCESS
2286	blr
2287
2288	/* we've ceded but we want to give control to the host */
2289kvm_cede_exit:
2290	ld	r9, HSTATE_KVM_VCPU(r13)
2291	b	guest_exit_cont
2292
2293	/* Try to handle a machine check in real mode */
2294machine_check_realmode:
2295	mr	r3, r9		/* get vcpu pointer */
2296	bl	kvmppc_realmode_machine_check
2297	nop
2298	cmpdi	r3, 0		/* Did we handle MCE ? */
2299	ld	r9, HSTATE_KVM_VCPU(r13)
2300	li	r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2301	/*
2302	 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2303	 * machine check interrupt (set HSRR0 to 0x200). And for handled
2304	 * errors (no-fatal), just go back to guest execution with current
2305	 * HSRR0 instead of exiting guest. This new approach will inject
2306	 * machine check to guest for fatal error causing guest to crash.
2307	 *
2308	 * The old code used to return to host for unhandled errors which
2309	 * was causing guest to hang with soft lockups inside guest and
2310	 * makes it difficult to recover guest instance.
2311	 */
2312	ld	r10, VCPU_PC(r9)
2313	ld	r11, VCPU_MSR(r9)
2314	bne	2f	/* Continue guest execution. */
2315	/* If not, deliver a machine check.  SRR0/1 are already set */
2316	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2317	ld	r11, VCPU_MSR(r9)
2318	bl	kvmppc_msr_interrupt
23192:	b	fast_interrupt_c_return
2320
2321/*
2322 * Check the reason we woke from nap, and take appropriate action.
2323 * Returns (in r3):
2324 *	0 if nothing needs to be done
2325 *	1 if something happened that needs to be handled by the host
2326 *	-1 if there was a guest wakeup (IPI or msgsnd)
2327 *
2328 * Also sets r12 to the interrupt vector for any interrupt that needs
2329 * to be handled now by the host (0x500 for external interrupt), or zero.
2330 * Modifies r0, r6, r7, r8.
2331 */
2332kvmppc_check_wake_reason:
2333	mfspr	r6, SPRN_SRR1
2334BEGIN_FTR_SECTION
2335	rlwinm	r6, r6, 45-31, 0xf	/* extract wake reason field (P8) */
2336FTR_SECTION_ELSE
2337	rlwinm	r6, r6, 45-31, 0xe	/* P7 wake reason field is 3 bits */
2338ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2339	cmpwi	r6, 8			/* was it an external interrupt? */
2340	li	r12, BOOK3S_INTERRUPT_EXTERNAL
2341	beq	kvmppc_read_intr	/* if so, see what it was */
2342	li	r3, 0
2343	li	r12, 0
2344	cmpwi	r6, 6			/* was it the decrementer? */
2345	beq	0f
2346BEGIN_FTR_SECTION
2347	cmpwi	r6, 5			/* privileged doorbell? */
2348	beq	0f
2349	cmpwi	r6, 3			/* hypervisor doorbell? */
2350	beq	3f
2351END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2352	li	r3, 1			/* anything else, return 1 */
23530:	blr
2354
2355	/* hypervisor doorbell */
23563:	li	r12, BOOK3S_INTERRUPT_H_DOORBELL
2357	/* see if it's a host IPI */
2358	li	r3, 1
2359	lbz	r0, HSTATE_HOST_IPI(r13)
2360	cmpwi	r0, 0
2361	bnelr
2362	/* if not, clear it and return -1 */
2363	lis	r6, (PPC_DBELL_SERVER << (63-36))@h
2364	PPC_MSGCLR(6)
2365	li	r3, -1
2366	blr
2367
2368/*
2369 * Determine what sort of external interrupt is pending (if any).
2370 * Returns:
2371 *	0 if no interrupt is pending
2372 *	1 if an interrupt is pending that needs to be handled by the host
2373 *	-1 if there was a guest wakeup IPI (which has now been cleared)
2374 * Modifies r0, r6, r7, r8, returns value in r3.
2375 */
2376kvmppc_read_intr:
2377	/* see if a host IPI is pending */
2378	li	r3, 1
2379	lbz	r0, HSTATE_HOST_IPI(r13)
2380	cmpwi	r0, 0
2381	bne	1f
2382
2383	/* Now read the interrupt from the ICP */
2384	ld	r6, HSTATE_XICS_PHYS(r13)
2385	li	r7, XICS_XIRR
2386	cmpdi	r6, 0
2387	beq-	1f
2388	lwzcix	r0, r6, r7
2389	/*
2390	 * Save XIRR for later. Since we get in in reverse endian on LE
2391	 * systems, save it byte reversed and fetch it back in host endian.
2392	 */
2393	li	r3, HSTATE_SAVED_XIRR
2394	STWX_BE	r0, r3, r13
2395#ifdef __LITTLE_ENDIAN__
2396	lwz	r3, HSTATE_SAVED_XIRR(r13)
2397#else
2398	mr	r3, r0
2399#endif
2400	rlwinm.	r3, r3, 0, 0xffffff
2401	sync
2402	beq	1f			/* if nothing pending in the ICP */
2403
2404	/* We found something in the ICP...
2405	 *
2406	 * If it's not an IPI, stash it in the PACA and return to
2407	 * the host, we don't (yet) handle directing real external
2408	 * interrupts directly to the guest
2409	 */
2410	cmpwi	r3, XICS_IPI		/* if there is, is it an IPI? */
2411	bne	42f
2412
2413	/* It's an IPI, clear the MFRR and EOI it */
2414	li	r3, 0xff
2415	li	r8, XICS_MFRR
2416	stbcix	r3, r6, r8		/* clear the IPI */
2417	stwcix	r0, r6, r7		/* EOI it */
2418	sync
2419
2420	/* We need to re-check host IPI now in case it got set in the
2421	 * meantime. If it's clear, we bounce the interrupt to the
2422	 * guest
2423	 */
2424	lbz	r0, HSTATE_HOST_IPI(r13)
2425	cmpwi	r0, 0
2426	bne-	43f
2427
2428	/* OK, it's an IPI for us */
2429	li	r12, 0
2430	li	r3, -1
24311:	blr
2432
243342:	/* It's not an IPI and it's for the host. We saved a copy of XIRR in
2434	 * the PACA earlier, it will be picked up by the host ICP driver
2435	 */
2436	li	r3, 1
2437	b	1b
2438
243943:	/* We raced with the host, we need to resend that IPI, bummer */
2440	li	r0, IPI_PRIORITY
2441	stbcix	r0, r6, r8		/* set the IPI */
2442	sync
2443	li	r3, 1
2444	b	1b
2445
2446/*
2447 * Save away FP, VMX and VSX registers.
2448 * r3 = vcpu pointer
2449 * N.B. r30 and r31 are volatile across this function,
2450 * thus it is not callable from C.
2451 */
2452kvmppc_save_fp:
2453	mflr	r30
2454	mr	r31,r3
2455	mfmsr	r5
2456	ori	r8,r5,MSR_FP
2457#ifdef CONFIG_ALTIVEC
2458BEGIN_FTR_SECTION
2459	oris	r8,r8,MSR_VEC@h
2460END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2461#endif
2462#ifdef CONFIG_VSX
2463BEGIN_FTR_SECTION
2464	oris	r8,r8,MSR_VSX@h
2465END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2466#endif
2467	mtmsrd	r8
2468	addi	r3,r3,VCPU_FPRS
2469	bl	store_fp_state
2470#ifdef CONFIG_ALTIVEC
2471BEGIN_FTR_SECTION
2472	addi	r3,r31,VCPU_VRS
2473	bl	store_vr_state
2474END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2475#endif
2476	mfspr	r6,SPRN_VRSAVE
2477	stw	r6,VCPU_VRSAVE(r31)
2478	mtlr	r30
2479	blr
2480
2481/*
2482 * Load up FP, VMX and VSX registers
2483 * r4 = vcpu pointer
2484 * N.B. r30 and r31 are volatile across this function,
2485 * thus it is not callable from C.
2486 */
2487kvmppc_load_fp:
2488	mflr	r30
2489	mr	r31,r4
2490	mfmsr	r9
2491	ori	r8,r9,MSR_FP
2492#ifdef CONFIG_ALTIVEC
2493BEGIN_FTR_SECTION
2494	oris	r8,r8,MSR_VEC@h
2495END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2496#endif
2497#ifdef CONFIG_VSX
2498BEGIN_FTR_SECTION
2499	oris	r8,r8,MSR_VSX@h
2500END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2501#endif
2502	mtmsrd	r8
2503	addi	r3,r4,VCPU_FPRS
2504	bl	load_fp_state
2505#ifdef CONFIG_ALTIVEC
2506BEGIN_FTR_SECTION
2507	addi	r3,r31,VCPU_VRS
2508	bl	load_vr_state
2509END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2510#endif
2511	lwz	r7,VCPU_VRSAVE(r31)
2512	mtspr	SPRN_VRSAVE,r7
2513	mtlr	r30
2514	mr	r4,r31
2515	blr
2516
2517/*
2518 * We come here if we get any exception or interrupt while we are
2519 * executing host real mode code while in guest MMU context.
2520 * For now just spin, but we should do something better.
2521 */
2522kvmppc_bad_host_intr:
2523	b	.
2524
2525/*
2526 * This mimics the MSR transition on IRQ delivery.  The new guest MSR is taken
2527 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2528 *   r11 has the guest MSR value (in/out)
2529 *   r9 has a vcpu pointer (in)
2530 *   r0 is used as a scratch register
2531 */
2532kvmppc_msr_interrupt:
2533	rldicl	r0, r11, 64 - MSR_TS_S_LG, 62
2534	cmpwi	r0, 2 /* Check if we are in transactional state..  */
2535	ld	r11, VCPU_INTR_MSR(r9)
2536	bne	1f
2537	/* ... if transactional, change to suspended */
2538	li	r0, 1
25391:	rldimi	r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2540	blr
2541
2542/*
2543 * This works around a hardware bug on POWER8E processors, where
2544 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2545 * performance monitor interrupt.  Instead, when we need to have
2546 * an interrupt pending, we have to arrange for a counter to overflow.
2547 */
2548kvmppc_fix_pmao:
2549	li	r3, 0
2550	mtspr	SPRN_MMCR2, r3
2551	lis	r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2552	ori	r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2553	mtspr	SPRN_MMCR0, r3
2554	lis	r3, 0x7fff
2555	ori	r3, r3, 0xffff
2556	mtspr	SPRN_PMC6, r3
2557	isync
2558	blr
2559
2560#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2561/*
2562 * Start timing an activity
2563 * r3 = pointer to time accumulation struct, r4 = vcpu
2564 */
2565kvmhv_start_timing:
2566	ld	r5, HSTATE_KVM_VCORE(r13)
2567	lbz	r6, VCORE_IN_GUEST(r5)
2568	cmpwi	r6, 0
2569	beq	5f				/* if in guest, need to */
2570	ld	r6, VCORE_TB_OFFSET(r5)		/* subtract timebase offset */
25715:	mftb	r5
2572	subf	r5, r6, r5
2573	std	r3, VCPU_CUR_ACTIVITY(r4)
2574	std	r5, VCPU_ACTIVITY_START(r4)
2575	blr
2576
2577/*
2578 * Accumulate time to one activity and start another.
2579 * r3 = pointer to new time accumulation struct, r4 = vcpu
2580 */
2581kvmhv_accumulate_time:
2582	ld	r5, HSTATE_KVM_VCORE(r13)
2583	lbz	r8, VCORE_IN_GUEST(r5)
2584	cmpwi	r8, 0
2585	beq	4f				/* if in guest, need to */
2586	ld	r8, VCORE_TB_OFFSET(r5)		/* subtract timebase offset */
25874:	ld	r5, VCPU_CUR_ACTIVITY(r4)
2588	ld	r6, VCPU_ACTIVITY_START(r4)
2589	std	r3, VCPU_CUR_ACTIVITY(r4)
2590	mftb	r7
2591	subf	r7, r8, r7
2592	std	r7, VCPU_ACTIVITY_START(r4)
2593	cmpdi	r5, 0
2594	beqlr
2595	subf	r3, r6, r7
2596	ld	r8, TAS_SEQCOUNT(r5)
2597	cmpdi	r8, 0
2598	addi	r8, r8, 1
2599	std	r8, TAS_SEQCOUNT(r5)
2600	lwsync
2601	ld	r7, TAS_TOTAL(r5)
2602	add	r7, r7, r3
2603	std	r7, TAS_TOTAL(r5)
2604	ld	r6, TAS_MIN(r5)
2605	ld	r7, TAS_MAX(r5)
2606	beq	3f
2607	cmpd	r3, r6
2608	bge	1f
26093:	std	r3, TAS_MIN(r5)
26101:	cmpd	r3, r7
2611	ble	2f
2612	std	r3, TAS_MAX(r5)
26132:	lwsync
2614	addi	r8, r8, 1
2615	std	r8, TAS_SEQCOUNT(r5)
2616	blr
2617#endif
2618