Lines Matching refs:vcpu

54 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
56 static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
65 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) in kvmppc_is_split_real() argument
67 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_is_split_real()
71 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) in kvmppc_fixup_split_real() argument
73 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_fixup_split_real()
74 ulong pc = kvmppc_get_pc(vcpu); in kvmppc_fixup_split_real()
81 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) in kvmppc_fixup_split_real()
88 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_fixup_split_real()
89 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); in kvmppc_fixup_split_real()
92 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
94 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) in kvmppc_core_vcpu_load_pr() argument
97 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_core_vcpu_load_pr()
98 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); in kvmppc_core_vcpu_load_pr()
99 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; in kvmppc_core_vcpu_load_pr()
109 vcpu->cpu = smp_processor_id(); in kvmppc_core_vcpu_load_pr()
111 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; in kvmppc_core_vcpu_load_pr()
114 if (kvmppc_is_split_real(vcpu)) in kvmppc_core_vcpu_load_pr()
115 kvmppc_fixup_split_real(vcpu); in kvmppc_core_vcpu_load_pr()
118 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_put_pr() argument
121 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_core_vcpu_put_pr()
123 kvmppc_copy_from_svcpu(vcpu, svcpu); in kvmppc_core_vcpu_put_pr()
125 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); in kvmppc_core_vcpu_put_pr()
126 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; in kvmppc_core_vcpu_put_pr()
130 if (kvmppc_is_split_real(vcpu)) in kvmppc_core_vcpu_put_pr()
131 kvmppc_unfixup_split_real(vcpu); in kvmppc_core_vcpu_put_pr()
133 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); in kvmppc_core_vcpu_put_pr()
134 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); in kvmppc_core_vcpu_put_pr()
141 vcpu->cpu = -1; in kvmppc_core_vcpu_put_pr()
146 struct kvm_vcpu *vcpu) in kvmppc_copy_to_svcpu() argument
148 svcpu->gpr[0] = vcpu->arch.gpr[0]; in kvmppc_copy_to_svcpu()
149 svcpu->gpr[1] = vcpu->arch.gpr[1]; in kvmppc_copy_to_svcpu()
150 svcpu->gpr[2] = vcpu->arch.gpr[2]; in kvmppc_copy_to_svcpu()
151 svcpu->gpr[3] = vcpu->arch.gpr[3]; in kvmppc_copy_to_svcpu()
152 svcpu->gpr[4] = vcpu->arch.gpr[4]; in kvmppc_copy_to_svcpu()
153 svcpu->gpr[5] = vcpu->arch.gpr[5]; in kvmppc_copy_to_svcpu()
154 svcpu->gpr[6] = vcpu->arch.gpr[6]; in kvmppc_copy_to_svcpu()
155 svcpu->gpr[7] = vcpu->arch.gpr[7]; in kvmppc_copy_to_svcpu()
156 svcpu->gpr[8] = vcpu->arch.gpr[8]; in kvmppc_copy_to_svcpu()
157 svcpu->gpr[9] = vcpu->arch.gpr[9]; in kvmppc_copy_to_svcpu()
158 svcpu->gpr[10] = vcpu->arch.gpr[10]; in kvmppc_copy_to_svcpu()
159 svcpu->gpr[11] = vcpu->arch.gpr[11]; in kvmppc_copy_to_svcpu()
160 svcpu->gpr[12] = vcpu->arch.gpr[12]; in kvmppc_copy_to_svcpu()
161 svcpu->gpr[13] = vcpu->arch.gpr[13]; in kvmppc_copy_to_svcpu()
162 svcpu->cr = vcpu->arch.cr; in kvmppc_copy_to_svcpu()
163 svcpu->xer = vcpu->arch.xer; in kvmppc_copy_to_svcpu()
164 svcpu->ctr = vcpu->arch.ctr; in kvmppc_copy_to_svcpu()
165 svcpu->lr = vcpu->arch.lr; in kvmppc_copy_to_svcpu()
166 svcpu->pc = vcpu->arch.pc; in kvmppc_copy_to_svcpu()
168 svcpu->shadow_fscr = vcpu->arch.shadow_fscr; in kvmppc_copy_to_svcpu()
174 vcpu->arch.entry_tb = get_tb(); in kvmppc_copy_to_svcpu()
175 vcpu->arch.entry_vtb = get_vtb(); in kvmppc_copy_to_svcpu()
177 vcpu->arch.entry_ic = mfspr(SPRN_IC); in kvmppc_copy_to_svcpu()
182 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, in kvmppc_copy_from_svcpu() argument
198 vcpu->arch.gpr[0] = svcpu->gpr[0]; in kvmppc_copy_from_svcpu()
199 vcpu->arch.gpr[1] = svcpu->gpr[1]; in kvmppc_copy_from_svcpu()
200 vcpu->arch.gpr[2] = svcpu->gpr[2]; in kvmppc_copy_from_svcpu()
201 vcpu->arch.gpr[3] = svcpu->gpr[3]; in kvmppc_copy_from_svcpu()
202 vcpu->arch.gpr[4] = svcpu->gpr[4]; in kvmppc_copy_from_svcpu()
203 vcpu->arch.gpr[5] = svcpu->gpr[5]; in kvmppc_copy_from_svcpu()
204 vcpu->arch.gpr[6] = svcpu->gpr[6]; in kvmppc_copy_from_svcpu()
205 vcpu->arch.gpr[7] = svcpu->gpr[7]; in kvmppc_copy_from_svcpu()
206 vcpu->arch.gpr[8] = svcpu->gpr[8]; in kvmppc_copy_from_svcpu()
207 vcpu->arch.gpr[9] = svcpu->gpr[9]; in kvmppc_copy_from_svcpu()
208 vcpu->arch.gpr[10] = svcpu->gpr[10]; in kvmppc_copy_from_svcpu()
209 vcpu->arch.gpr[11] = svcpu->gpr[11]; in kvmppc_copy_from_svcpu()
210 vcpu->arch.gpr[12] = svcpu->gpr[12]; in kvmppc_copy_from_svcpu()
211 vcpu->arch.gpr[13] = svcpu->gpr[13]; in kvmppc_copy_from_svcpu()
212 vcpu->arch.cr = svcpu->cr; in kvmppc_copy_from_svcpu()
213 vcpu->arch.xer = svcpu->xer; in kvmppc_copy_from_svcpu()
214 vcpu->arch.ctr = svcpu->ctr; in kvmppc_copy_from_svcpu()
215 vcpu->arch.lr = svcpu->lr; in kvmppc_copy_from_svcpu()
216 vcpu->arch.pc = svcpu->pc; in kvmppc_copy_from_svcpu()
217 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; in kvmppc_copy_from_svcpu()
218 vcpu->arch.fault_dar = svcpu->fault_dar; in kvmppc_copy_from_svcpu()
219 vcpu->arch.fault_dsisr = svcpu->fault_dsisr; in kvmppc_copy_from_svcpu()
220 vcpu->arch.last_inst = svcpu->last_inst; in kvmppc_copy_from_svcpu()
222 vcpu->arch.shadow_fscr = svcpu->shadow_fscr; in kvmppc_copy_from_svcpu()
227 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; in kvmppc_copy_from_svcpu()
228 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; in kvmppc_copy_from_svcpu()
229 vcpu->arch.vtb += get_vtb() - vcpu->arch.entry_vtb; in kvmppc_copy_from_svcpu()
231 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; in kvmppc_copy_from_svcpu()
238 static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) in kvmppc_core_check_requests_pr() argument
244 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) in kvmppc_core_check_requests_pr()
245 kvmppc_mmu_pte_flush(vcpu, 0, 0); in kvmppc_core_check_requests_pr()
255 struct kvm_vcpu *vcpu; in do_kvm_unmap_hva() local
275 kvm_for_each_vcpu(i, vcpu, kvm) in do_kvm_unmap_hva()
276 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, in do_kvm_unmap_hva()
319 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) in kvmppc_recalc_shadow_msr() argument
321 ulong guest_msr = kvmppc_get_msr(vcpu); in kvmppc_recalc_shadow_msr()
329 smsr |= (guest_msr & vcpu->arch.guest_owned_ext); in kvmppc_recalc_shadow_msr()
334 vcpu->arch.shadow_msr = smsr; in kvmppc_recalc_shadow_msr()
337 static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) in kvmppc_set_msr_pr() argument
339 ulong old_msr = kvmppc_get_msr(vcpu); in kvmppc_set_msr_pr()
345 msr &= to_book3s(vcpu)->msr_mask; in kvmppc_set_msr_pr()
346 kvmppc_set_msr_fast(vcpu, msr); in kvmppc_set_msr_pr()
347 kvmppc_recalc_shadow_msr(vcpu); in kvmppc_set_msr_pr()
350 if (!vcpu->arch.pending_exceptions) { in kvmppc_set_msr_pr()
351 kvm_vcpu_block(vcpu); in kvmppc_set_msr_pr()
352 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); in kvmppc_set_msr_pr()
353 vcpu->stat.halt_wakeup++; in kvmppc_set_msr_pr()
357 kvmppc_set_msr_fast(vcpu, msr); in kvmppc_set_msr_pr()
361 if (kvmppc_is_split_real(vcpu)) in kvmppc_set_msr_pr()
362 kvmppc_fixup_split_real(vcpu); in kvmppc_set_msr_pr()
364 kvmppc_unfixup_split_real(vcpu); in kvmppc_set_msr_pr()
366 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != in kvmppc_set_msr_pr()
368 kvmppc_mmu_flush_segments(vcpu); in kvmppc_set_msr_pr()
369 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); in kvmppc_set_msr_pr()
372 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { in kvmppc_set_msr_pr()
373 struct kvm_vcpu_arch *a = &vcpu->arch; in kvmppc_set_msr_pr()
376 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); in kvmppc_set_msr_pr()
378 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); in kvmppc_set_msr_pr()
390 if (vcpu->arch.magic_page_pa && in kvmppc_set_msr_pr()
393 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, in kvmppc_set_msr_pr()
398 if (kvmppc_get_msr(vcpu) & MSR_FP) in kvmppc_set_msr_pr()
399 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); in kvmppc_set_msr_pr()
402 void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) in kvmppc_set_pvr_pr() argument
406 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; in kvmppc_set_pvr_pr()
407 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_pr()
410 kvmppc_mmu_book3s_64_init(vcpu); in kvmppc_set_pvr_pr()
411 if (!to_book3s(vcpu)->hior_explicit) in kvmppc_set_pvr_pr()
412 to_book3s(vcpu)->hior = 0xfff00000; in kvmppc_set_pvr_pr()
413 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; in kvmppc_set_pvr_pr()
414 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_set_pvr_pr()
418 kvmppc_mmu_book3s_32_init(vcpu); in kvmppc_set_pvr_pr()
419 if (!to_book3s(vcpu)->hior_explicit) in kvmppc_set_pvr_pr()
420 to_book3s(vcpu)->hior = 0; in kvmppc_set_pvr_pr()
421 to_book3s(vcpu)->msr_mask = 0xffffffffULL; in kvmppc_set_pvr_pr()
422 vcpu->arch.cpu_type = KVM_CPU_3S_32; in kvmppc_set_pvr_pr()
425 kvmppc_sanity_check(vcpu); in kvmppc_set_pvr_pr()
429 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; in kvmppc_set_pvr_pr()
430 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && in kvmppc_set_pvr_pr()
432 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; in kvmppc_set_pvr_pr()
437 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); in kvmppc_set_pvr_pr()
451 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | in kvmppc_set_pvr_pr()
458 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; in kvmppc_set_pvr_pr()
473 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; in kvmppc_set_pvr_pr()
488 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) in kvmppc_patch_dcbz() argument
495 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); in kvmppc_patch_dcbz()
515 static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) in kvmppc_visible_gpa() argument
517 ulong mp_pa = vcpu->arch.magic_page_pa; in kvmppc_visible_gpa()
519 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) in kvmppc_visible_gpa()
527 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); in kvmppc_visible_gpa()
530 int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_handle_pagefault() argument
540 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; in kvmppc_handle_pagefault()
541 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; in kvmppc_handle_pagefault()
545 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) in kvmppc_handle_pagefault()
550 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); in kvmppc_handle_pagefault()
561 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { in kvmppc_handle_pagefault()
567 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && in kvmppc_handle_pagefault()
572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_handle_pagefault()
574 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR) in kvmppc_handle_pagefault()
585 if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_pagefault()
586 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { in kvmppc_handle_pagefault()
597 u64 ssrr1 = vcpu->arch.shadow_srr1; in kvmppc_handle_pagefault()
598 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_handle_pagefault()
599 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); in kvmppc_handle_pagefault()
600 kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr); in kvmppc_handle_pagefault()
601 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); in kvmppc_handle_pagefault()
602 kvmppc_book3s_queue_irqprio(vcpu, vec); in kvmppc_handle_pagefault()
605 u32 dsisr = vcpu->arch.fault_dsisr; in kvmppc_handle_pagefault()
606 u64 ssrr1 = vcpu->arch.shadow_srr1; in kvmppc_handle_pagefault()
607 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_handle_pagefault()
608 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); in kvmppc_handle_pagefault()
610 kvmppc_set_dsisr(vcpu, dsisr); in kvmppc_handle_pagefault()
611 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); in kvmppc_handle_pagefault()
612 kvmppc_book3s_queue_irqprio(vcpu, vec); in kvmppc_handle_pagefault()
615 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); in kvmppc_handle_pagefault()
616 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); in kvmppc_handle_pagefault()
618 kvmppc_visible_gpa(vcpu, pte.raddr)) { in kvmppc_handle_pagefault()
619 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { in kvmppc_handle_pagefault()
625 kvmppc_mmu_unmap_page(vcpu, &pte); in kvmppc_handle_pagefault()
628 kvmppc_mmu_map_page(vcpu, &pte, iswrite); in kvmppc_handle_pagefault()
630 vcpu->stat.sp_storage++; in kvmppc_handle_pagefault()
631 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_pagefault()
632 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) in kvmppc_handle_pagefault()
633 kvmppc_patch_dcbz(vcpu, &pte); in kvmppc_handle_pagefault()
636 vcpu->stat.mmio_exits++; in kvmppc_handle_pagefault()
637 vcpu->arch.paddr_accessed = pte.raddr; in kvmppc_handle_pagefault()
638 vcpu->arch.vaddr_accessed = pte.eaddr; in kvmppc_handle_pagefault()
639 r = kvmppc_emulate_mmio(run, vcpu); in kvmppc_handle_pagefault()
648 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) in kvmppc_giveup_ext() argument
659 msr &= vcpu->arch.guest_owned_ext; in kvmppc_giveup_ext()
686 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); in kvmppc_giveup_ext()
687 kvmppc_recalc_shadow_msr(vcpu); in kvmppc_giveup_ext()
691 static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) in kvmppc_giveup_fac() argument
694 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { in kvmppc_giveup_fac()
701 vcpu->arch.tar = mfspr(SPRN_TAR); in kvmppc_giveup_fac()
703 vcpu->arch.shadow_fscr &= ~FSCR_TAR; in kvmppc_giveup_fac()
710 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, in kvmppc_handle_ext() argument
716 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) in kvmppc_handle_ext()
719 if (!(kvmppc_get_msr(vcpu) & msr)) { in kvmppc_handle_ext()
720 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_ext()
730 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); in kvmppc_handle_ext()
742 msr &= ~vcpu->arch.guest_owned_ext; in kvmppc_handle_ext()
753 load_fp_state(&vcpu->arch.fp); in kvmppc_handle_ext()
754 t->fp_save_area = &vcpu->arch.fp; in kvmppc_handle_ext()
762 load_vr_state(&vcpu->arch.vr); in kvmppc_handle_ext()
763 t->vr_save_area = &vcpu->arch.vr; in kvmppc_handle_ext()
769 vcpu->arch.guest_owned_ext |= msr; in kvmppc_handle_ext()
770 kvmppc_recalc_shadow_msr(vcpu); in kvmppc_handle_ext()
779 static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) in kvmppc_handle_lost_ext() argument
783 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; in kvmppc_handle_lost_ext()
790 load_fp_state(&vcpu->arch.fp); in kvmppc_handle_lost_ext()
797 load_vr_state(&vcpu->arch.vr); in kvmppc_handle_lost_ext()
806 static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) in kvmppc_trigger_fac_interrupt() argument
809 vcpu->arch.fscr &= ~(0xffULL << 56); in kvmppc_trigger_fac_interrupt()
810 vcpu->arch.fscr |= (fac << 56); in kvmppc_trigger_fac_interrupt()
811 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); in kvmppc_trigger_fac_interrupt()
814 static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) in kvmppc_emulate_fac() argument
818 if (!(kvmppc_get_msr(vcpu) & MSR_PR)) in kvmppc_emulate_fac()
819 er = kvmppc_emulate_instruction(vcpu->run, vcpu); in kvmppc_emulate_fac()
823 kvmppc_trigger_fac_interrupt(vcpu, fac); in kvmppc_emulate_fac()
828 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) in kvmppc_handle_fac() argument
840 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac)); in kvmppc_handle_fac()
843 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM; in kvmppc_handle_fac()
852 kvmppc_trigger_fac_interrupt(vcpu, fac); in kvmppc_handle_fac()
860 mtspr(SPRN_TAR, vcpu->arch.tar); in kvmppc_handle_fac()
861 vcpu->arch.shadow_fscr |= FSCR_TAR; in kvmppc_handle_fac()
864 kvmppc_emulate_fac(vcpu, fac); in kvmppc_handle_fac()
871 void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) in kvmppc_set_fscr() argument
873 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { in kvmppc_set_fscr()
875 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); in kvmppc_set_fscr()
877 vcpu->arch.fscr = fscr; in kvmppc_set_fscr()
881 int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_handle_exit_pr() argument
887 vcpu->stat.sum_exits++; in kvmppc_handle_exit_pr()
894 trace_kvm_exit(exit_nr, vcpu); in kvmppc_handle_exit_pr()
900 ulong shadow_srr1 = vcpu->arch.shadow_srr1; in kvmppc_handle_exit_pr()
901 vcpu->stat.pf_instruc++; in kvmppc_handle_exit_pr()
903 if (kvmppc_is_split_real(vcpu)) in kvmppc_handle_exit_pr()
904 kvmppc_fixup_split_real(vcpu); in kvmppc_handle_exit_pr()
913 svcpu = svcpu_get(vcpu); in kvmppc_handle_exit_pr()
914 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]; in kvmppc_handle_exit_pr()
917 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); in kvmppc_handle_exit_pr()
926 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_exit_pr()
927 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); in kvmppc_handle_exit_pr()
928 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_exit_pr()
929 vcpu->stat.sp_instruc++; in kvmppc_handle_exit_pr()
930 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_exit_pr()
931 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { in kvmppc_handle_exit_pr()
937 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); in kvmppc_handle_exit_pr()
940 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_handle_exit_pr()
942 kvmppc_set_msr_fast(vcpu, msr); in kvmppc_handle_exit_pr()
943 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_exit_pr()
950 ulong dar = kvmppc_get_fault_dar(vcpu); in kvmppc_handle_exit_pr()
951 u32 fault_dsisr = vcpu->arch.fault_dsisr; in kvmppc_handle_exit_pr()
952 vcpu->stat.pf_storage++; in kvmppc_handle_exit_pr()
961 svcpu = svcpu_get(vcpu); in kvmppc_handle_exit_pr()
965 kvmppc_mmu_map_segment(vcpu, dar); in kvmppc_handle_exit_pr()
978 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_exit_pr()
979 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); in kvmppc_handle_exit_pr()
980 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_exit_pr()
982 kvmppc_set_dar(vcpu, dar); in kvmppc_handle_exit_pr()
983 kvmppc_set_dsisr(vcpu, fault_dsisr); in kvmppc_handle_exit_pr()
984 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_exit_pr()
990 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { in kvmppc_handle_exit_pr()
991 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); in kvmppc_handle_exit_pr()
992 kvmppc_book3s_queue_irqprio(vcpu, in kvmppc_handle_exit_pr()
998 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { in kvmppc_handle_exit_pr()
999 kvmppc_book3s_queue_irqprio(vcpu, in kvmppc_handle_exit_pr()
1009 vcpu->stat.dec_exits++; in kvmppc_handle_exit_pr()
1015 vcpu->stat.ext_intr_exits++; in kvmppc_handle_exit_pr()
1030 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; in kvmppc_handle_exit_pr()
1032 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); in kvmppc_handle_exit_pr()
1038 if (kvmppc_get_msr(vcpu) & MSR_PR) { in kvmppc_handle_exit_pr()
1041 kvmppc_get_pc(vcpu), last_inst); in kvmppc_handle_exit_pr()
1045 kvmppc_core_queue_program(vcpu, flags); in kvmppc_handle_exit_pr()
1051 vcpu->stat.emulated_inst_exits++; in kvmppc_handle_exit_pr()
1052 er = kvmppc_emulate_instruction(run, vcpu); in kvmppc_handle_exit_pr()
1062 __func__, kvmppc_get_pc(vcpu), last_inst); in kvmppc_handle_exit_pr()
1063 kvmppc_core_queue_program(vcpu, flags); in kvmppc_handle_exit_pr()
1084 if (vcpu->arch.papr_enabled) { in kvmppc_handle_exit_pr()
1086 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); in kvmppc_handle_exit_pr()
1088 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); in kvmppc_handle_exit_pr()
1094 if (vcpu->arch.papr_enabled && in kvmppc_handle_exit_pr()
1096 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_handle_exit_pr()
1098 ulong cmd = kvmppc_get_gpr(vcpu, 3); in kvmppc_handle_exit_pr()
1102 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { in kvmppc_handle_exit_pr()
1110 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); in kvmppc_handle_exit_pr()
1114 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_pr()
1116 } else if (vcpu->arch.osi_enabled && in kvmppc_handle_exit_pr()
1117 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && in kvmppc_handle_exit_pr()
1118 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { in kvmppc_handle_exit_pr()
1125 gprs[i] = kvmppc_get_gpr(vcpu, i); in kvmppc_handle_exit_pr()
1126 vcpu->arch.osi_needed = 1; in kvmppc_handle_exit_pr()
1128 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && in kvmppc_handle_exit_pr()
1129 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { in kvmppc_handle_exit_pr()
1131 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); in kvmppc_handle_exit_pr()
1135 vcpu->stat.syscall_exits++; in kvmppc_handle_exit_pr()
1136 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1149 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { in kvmppc_handle_exit_pr()
1151 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, in kvmppc_handle_exit_pr()
1176 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); in kvmppc_handle_exit_pr()
1182 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); in kvmppc_handle_exit_pr()
1188 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst); in kvmppc_handle_exit_pr()
1189 dar = kvmppc_alignment_dar(vcpu, last_inst); in kvmppc_handle_exit_pr()
1191 kvmppc_set_dsisr(vcpu, dsisr); in kvmppc_handle_exit_pr()
1192 kvmppc_set_dar(vcpu, dar); in kvmppc_handle_exit_pr()
1194 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1201 kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); in kvmppc_handle_exit_pr()
1207 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1212 ulong shadow_srr1 = vcpu->arch.shadow_srr1; in kvmppc_handle_exit_pr()
1215 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); in kvmppc_handle_exit_pr()
1233 s = kvmppc_prepare_to_enter(vcpu); in kvmppc_handle_exit_pr()
1241 kvmppc_handle_lost_ext(vcpu); in kvmppc_handle_exit_pr()
1244 trace_kvm_book3s_reenter(r, vcpu); in kvmppc_handle_exit_pr()
1249 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs_pr() argument
1252 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in kvm_arch_vcpu_ioctl_get_sregs_pr()
1255 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_pr()
1257 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; in kvm_arch_vcpu_ioctl_get_sregs_pr()
1258 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { in kvm_arch_vcpu_ioctl_get_sregs_pr()
1260 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; in kvm_arch_vcpu_ioctl_get_sregs_pr()
1261 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_pr()
1265 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i); in kvm_arch_vcpu_ioctl_get_sregs_pr()
1276 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs_pr() argument
1279 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1282 kvmppc_set_pvr_pr(vcpu, sregs->pvr); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1285 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { in kvm_arch_vcpu_ioctl_set_sregs_pr()
1287 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, in kvm_arch_vcpu_ioctl_set_sregs_pr()
1292 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1295 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, in kvm_arch_vcpu_ioctl_set_sregs_pr()
1297 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, in kvm_arch_vcpu_ioctl_set_sregs_pr()
1299 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, in kvm_arch_vcpu_ioctl_set_sregs_pr()
1301 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, in kvm_arch_vcpu_ioctl_set_sregs_pr()
1307 kvmppc_mmu_pte_flush(vcpu, 0, 0); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1312 static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, in kvmppc_get_one_reg_pr() argument
1322 *val = get_reg_val(id, to_book3s(vcpu)->hior); in kvmppc_get_one_reg_pr()
1329 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_get_one_reg_pr()
1342 static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr) in kvmppc_set_lpcr_pr() argument
1345 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr_pr()
1347 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr_pr()
1350 static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, in kvmppc_set_one_reg_pr() argument
1357 to_book3s(vcpu)->hior = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1358 to_book3s(vcpu)->hior_explicit = true; in kvmppc_set_one_reg_pr()
1362 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_pr()
1376 struct kvm_vcpu *vcpu; in kvmppc_core_vcpu_create_pr() local
1380 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); in kvmppc_core_vcpu_create_pr()
1381 if (!vcpu) in kvmppc_core_vcpu_create_pr()
1387 vcpu->arch.book3s = vcpu_book3s; in kvmppc_core_vcpu_create_pr()
1390 vcpu->arch.shadow_vcpu = in kvmppc_core_vcpu_create_pr()
1391 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); in kvmppc_core_vcpu_create_pr()
1392 if (!vcpu->arch.shadow_vcpu) in kvmppc_core_vcpu_create_pr()
1396 err = kvm_vcpu_init(vcpu, kvm, id); in kvmppc_core_vcpu_create_pr()
1404 vcpu->arch.shared = (void *)p; in kvmppc_core_vcpu_create_pr()
1408 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_pr()
1410 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_pr()
1418 vcpu->arch.pvr = 0x3C0301; in kvmppc_core_vcpu_create_pr()
1420 vcpu->arch.pvr = mfspr(SPRN_PVR); in kvmppc_core_vcpu_create_pr()
1421 vcpu->arch.intr_msr = MSR_SF; in kvmppc_core_vcpu_create_pr()
1424 vcpu->arch.pvr = 0x84202; in kvmppc_core_vcpu_create_pr()
1426 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); in kvmppc_core_vcpu_create_pr()
1427 vcpu->arch.slb_nr = 64; in kvmppc_core_vcpu_create_pr()
1429 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; in kvmppc_core_vcpu_create_pr()
1431 err = kvmppc_mmu_init(vcpu); in kvmppc_core_vcpu_create_pr()
1435 return vcpu; in kvmppc_core_vcpu_create_pr()
1438 kvm_vcpu_uninit(vcpu); in kvmppc_core_vcpu_create_pr()
1441 kfree(vcpu->arch.shadow_vcpu); in kvmppc_core_vcpu_create_pr()
1446 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvmppc_core_vcpu_create_pr()
1451 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_free_pr() argument
1453 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); in kvmppc_core_vcpu_free_pr()
1455 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); in kvmppc_core_vcpu_free_pr()
1456 kvm_vcpu_uninit(vcpu); in kvmppc_core_vcpu_free_pr()
1458 kfree(vcpu->arch.shadow_vcpu); in kvmppc_core_vcpu_free_pr()
1461 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvmppc_core_vcpu_free_pr()
1464 static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) in kvmppc_vcpu_run_pr() argument
1472 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_pr()
1484 ret = kvmppc_prepare_to_enter(vcpu); in kvmppc_vcpu_run_pr()
1506 if (kvmppc_get_msr(vcpu) & MSR_FP) in kvmppc_vcpu_run_pr()
1507 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); in kvmppc_vcpu_run_pr()
1511 ret = __kvmppc_vcpu_run(kvm_run, vcpu); in kvmppc_vcpu_run_pr()
1517 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); in kvmppc_vcpu_run_pr()
1520 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); in kvmppc_vcpu_run_pr()
1523 vcpu->mode = OUTSIDE_GUEST_MODE; in kvmppc_vcpu_run_pr()
1535 struct kvm_vcpu *vcpu; in kvm_vm_ioctl_get_dirty_log_pr() local
1555 kvm_for_each_vcpu(n, vcpu, kvm) in kvm_vm_ioctl_get_dirty_log_pr()
1556 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); in kvm_vm_ioctl_get_dirty_log_pr()
1607 struct kvm_vcpu *vcpu; in kvm_vm_ioctl_get_smmu_info_pr() local
1630 vcpu = kvm_get_vcpu(kvm, 0); in kvm_vm_ioctl_get_smmu_info_pr()
1631 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { in kvm_vm_ioctl_get_smmu_info_pr()