Lines Matching refs:vcpu
84 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
85 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
114 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) in kvmppc_fast_vcpu_kick_hv() argument
119 wqp = kvm_arch_vcpu_wq(vcpu); in kvmppc_fast_vcpu_kick_hv()
122 ++vcpu->stat.halt_wakeup; in kvmppc_fast_vcpu_kick_hv()
125 if (kvmppc_ipi_thread(vcpu->arch.thread_cpu)) in kvmppc_fast_vcpu_kick_hv()
129 cpu = vcpu->cpu; in kvmppc_fast_vcpu_kick_hv()
188 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) in kvmppc_core_vcpu_load_hv() argument
190 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
199 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) in kvmppc_core_vcpu_load_hv()
202 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
203 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv()
204 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
205 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
206 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
208 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
211 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_put_hv() argument
213 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
216 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) in kvmppc_core_vcpu_put_hv()
219 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
220 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
221 vcpu->arch.busy_preempt = mftb(); in kvmppc_core_vcpu_put_hv()
222 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
225 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) in kvmppc_set_msr_hv() argument
233 vcpu->arch.shregs.msr = msr; in kvmppc_set_msr_hv()
234 kvmppc_end_cede(vcpu); in kvmppc_set_msr_hv()
237 static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) in kvmppc_set_pvr_hv() argument
239 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_hv()
242 static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) in kvmppc_set_arch_compat() argument
245 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
282 static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) in kvmppc_dump_regs() argument
286 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); in kvmppc_dump_regs()
288 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); in kvmppc_dump_regs()
291 r, kvmppc_get_gpr(vcpu, r), in kvmppc_dump_regs()
292 r+16, kvmppc_get_gpr(vcpu, r+16)); in kvmppc_dump_regs()
294 vcpu->arch.ctr, vcpu->arch.lr); in kvmppc_dump_regs()
296 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); in kvmppc_dump_regs()
298 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); in kvmppc_dump_regs()
300 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); in kvmppc_dump_regs()
302 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); in kvmppc_dump_regs()
303 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); in kvmppc_dump_regs()
305 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_dump_regs()
306 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); in kvmppc_dump_regs()
307 for (r = 0; r < vcpu->arch.slb_max; ++r) in kvmppc_dump_regs()
309 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); in kvmppc_dump_regs()
311 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
312 vcpu->arch.last_inst); in kvmppc_dump_regs()
331 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) in init_vpa() argument
337 static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, in set_vpa() argument
343 spin_lock(&vcpu->arch.vpa_update_lock); in set_vpa()
349 spin_unlock(&vcpu->arch.vpa_update_lock); in set_vpa()
369 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, in do_h_register_vpa() argument
373 struct kvm *kvm = vcpu->kvm; in do_h_register_vpa()
479 static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) in kvmppc_update_vpa() argument
481 struct kvm *kvm = vcpu->kvm; in kvmppc_update_vpa()
496 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
501 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
529 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) in kvmppc_update_vpas() argument
531 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
532 vcpu->arch.slb_shadow.update_pending || in kvmppc_update_vpas()
533 vcpu->arch.dtl.update_pending)) in kvmppc_update_vpas()
536 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
537 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
538 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); in kvmppc_update_vpas()
539 if (vcpu->arch.vpa.pinned_addr) in kvmppc_update_vpas()
540 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
542 if (vcpu->arch.dtl.update_pending) { in kvmppc_update_vpas()
543 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); in kvmppc_update_vpas()
544 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; in kvmppc_update_vpas()
545 vcpu->arch.dtl_index = 0; in kvmppc_update_vpas()
547 if (vcpu->arch.slb_shadow.update_pending) in kvmppc_update_vpas()
548 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); in kvmppc_update_vpas()
549 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
570 static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, in kvmppc_create_dtl_entry() argument
579 dt = vcpu->arch.dtl_ptr; in kvmppc_create_dtl_entry()
580 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_create_dtl_entry()
583 stolen = core_stolen - vcpu->arch.stolen_logged; in kvmppc_create_dtl_entry()
584 vcpu->arch.stolen_logged = core_stolen; in kvmppc_create_dtl_entry()
585 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_create_dtl_entry()
586 stolen += vcpu->arch.busy_stolen; in kvmppc_create_dtl_entry()
587 vcpu->arch.busy_stolen = 0; in kvmppc_create_dtl_entry()
588 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_create_dtl_entry()
593 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); in kvmppc_create_dtl_entry()
596 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu)); in kvmppc_create_dtl_entry()
597 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); in kvmppc_create_dtl_entry()
599 if (dt == vcpu->arch.dtl.pinned_end) in kvmppc_create_dtl_entry()
600 dt = vcpu->arch.dtl.pinned_addr; in kvmppc_create_dtl_entry()
601 vcpu->arch.dtl_ptr = dt; in kvmppc_create_dtl_entry()
604 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in kvmppc_create_dtl_entry()
605 vcpu->arch.dtl.dirty = true; in kvmppc_create_dtl_entry()
608 static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu) in kvmppc_power8_compatible() argument
610 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) in kvmppc_power8_compatible()
612 if ((!vcpu->arch.vcore->arch_compat) && in kvmppc_power8_compatible()
618 static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, in kvmppc_h_set_mode() argument
624 if (!kvmppc_power8_compatible(vcpu)) in kvmppc_h_set_mode()
633 vcpu->arch.ciabr = value1; in kvmppc_h_set_mode()
636 if (!kvmppc_power8_compatible(vcpu)) in kvmppc_h_set_mode()
642 vcpu->arch.dawr = value1; in kvmppc_h_set_mode()
643 vcpu->arch.dawrx = value2; in kvmppc_h_set_mode()
672 static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) in kvmppc_get_yield_count() argument
677 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
678 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; in kvmppc_get_yield_count()
681 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
685 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) in kvmppc_pseries_do_hcall() argument
687 unsigned long req = kvmppc_get_gpr(vcpu, 3); in kvmppc_pseries_do_hcall()
694 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) in kvmppc_pseries_do_hcall()
701 target = kvmppc_get_gpr(vcpu, 4); in kvmppc_pseries_do_hcall()
702 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); in kvmppc_pseries_do_hcall()
709 if (vcpu->arch.ceded) { in kvmppc_pseries_do_hcall()
710 if (waitqueue_active(&vcpu->wq)) { in kvmppc_pseries_do_hcall()
711 wake_up_interruptible(&vcpu->wq); in kvmppc_pseries_do_hcall()
712 vcpu->stat.halt_wakeup++; in kvmppc_pseries_do_hcall()
717 target = kvmppc_get_gpr(vcpu, 4); in kvmppc_pseries_do_hcall()
720 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); in kvmppc_pseries_do_hcall()
725 yield_count = kvmppc_get_gpr(vcpu, 5); in kvmppc_pseries_do_hcall()
731 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
732 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
733 kvmppc_get_gpr(vcpu, 6)); in kvmppc_pseries_do_hcall()
736 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) in kvmppc_pseries_do_hcall()
739 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_pseries_do_hcall()
740 rc = kvmppc_rtas_hcall(vcpu); in kvmppc_pseries_do_hcall()
741 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_pseries_do_hcall()
751 ret = kvmppc_h_logical_ci_load(vcpu); in kvmppc_pseries_do_hcall()
756 ret = kvmppc_h_logical_ci_store(vcpu); in kvmppc_pseries_do_hcall()
761 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
762 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
763 kvmppc_get_gpr(vcpu, 6), in kvmppc_pseries_do_hcall()
764 kvmppc_get_gpr(vcpu, 7)); in kvmppc_pseries_do_hcall()
774 if (kvmppc_xics_enabled(vcpu)) { in kvmppc_pseries_do_hcall()
775 ret = kvmppc_xics_hcall(vcpu, req); in kvmppc_pseries_do_hcall()
781 kvmppc_set_gpr(vcpu, 3, ret); in kvmppc_pseries_do_hcall()
782 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
812 struct kvm_vcpu *vcpu) in kvmppc_emulate_debug_inst() argument
816 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != in kvmppc_emulate_debug_inst()
827 run->debug.arch.address = kvmppc_get_pc(vcpu); in kvmppc_emulate_debug_inst()
830 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); in kvmppc_emulate_debug_inst()
835 static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_handle_exit_hv() argument
840 vcpu->stat.sum_exits++; in kvmppc_handle_exit_hv()
844 switch (vcpu->arch.trap) { in kvmppc_handle_exit_hv()
847 vcpu->stat.dec_exits++; in kvmppc_handle_exit_hv()
852 vcpu->stat.ext_intr_exits++; in kvmppc_handle_exit_hv()
867 kvmppc_book3s_queue_irqprio(vcpu, in kvmppc_handle_exit_hv()
880 flags = vcpu->arch.shregs.msr & 0x1f0000ull; in kvmppc_handle_exit_hv()
881 kvmppc_core_queue_program(vcpu, flags); in kvmppc_handle_exit_hv()
894 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); in kvmppc_handle_exit_hv()
896 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); in kvmppc_handle_exit_hv()
898 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_hv()
913 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_exit_hv()
914 vcpu->arch.fault_dsisr = 0; in kvmppc_handle_exit_hv()
925 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) in kvmppc_handle_exit_hv()
926 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? in kvmppc_handle_exit_hv()
927 swab32(vcpu->arch.emul_inst) : in kvmppc_handle_exit_hv()
928 vcpu->arch.emul_inst; in kvmppc_handle_exit_hv()
929 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { in kvmppc_handle_exit_hv()
930 r = kvmppc_emulate_debug_inst(run, vcpu); in kvmppc_handle_exit_hv()
932 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); in kvmppc_handle_exit_hv()
942 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); in kvmppc_handle_exit_hv()
946 kvmppc_dump_regs(vcpu); in kvmppc_handle_exit_hv()
948 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
949 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
950 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
958 static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs_hv() argument
964 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_hv()
965 for (i = 0; i < vcpu->arch.slb_max; i++) { in kvm_arch_vcpu_ioctl_get_sregs_hv()
966 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; in kvm_arch_vcpu_ioctl_get_sregs_hv()
967 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_hv()
973 static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs_hv() argument
979 if (sregs->pvr != vcpu->arch.pvr) in kvm_arch_vcpu_ioctl_set_sregs_hv()
983 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
985 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; in kvm_arch_vcpu_ioctl_set_sregs_hv()
986 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; in kvm_arch_vcpu_ioctl_set_sregs_hv()
990 vcpu->arch.slb_max = j; in kvm_arch_vcpu_ioctl_set_sregs_hv()
995 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, in kvmppc_set_lpcr() argument
998 struct kvm *kvm = vcpu->kvm; in kvmppc_set_lpcr()
999 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
1009 struct kvm_vcpu *vcpu; in kvmppc_set_lpcr() local
1012 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmppc_set_lpcr()
1013 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
1016 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr()
1018 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr()
1039 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, in kvmppc_get_one_reg_hv() argument
1053 *val = get_reg_val(id, vcpu->arch.dabr); in kvmppc_get_one_reg_hv()
1056 *val = get_reg_val(id, vcpu->arch.dabrx); in kvmppc_get_one_reg_hv()
1059 *val = get_reg_val(id, vcpu->arch.dscr); in kvmppc_get_one_reg_hv()
1062 *val = get_reg_val(id, vcpu->arch.purr); in kvmppc_get_one_reg_hv()
1065 *val = get_reg_val(id, vcpu->arch.spurr); in kvmppc_get_one_reg_hv()
1068 *val = get_reg_val(id, vcpu->arch.amr); in kvmppc_get_one_reg_hv()
1071 *val = get_reg_val(id, vcpu->arch.uamor); in kvmppc_get_one_reg_hv()
1075 *val = get_reg_val(id, vcpu->arch.mmcr[i]); in kvmppc_get_one_reg_hv()
1079 *val = get_reg_val(id, vcpu->arch.pmc[i]); in kvmppc_get_one_reg_hv()
1083 *val = get_reg_val(id, vcpu->arch.spmc[i]); in kvmppc_get_one_reg_hv()
1086 *val = get_reg_val(id, vcpu->arch.siar); in kvmppc_get_one_reg_hv()
1089 *val = get_reg_val(id, vcpu->arch.sdar); in kvmppc_get_one_reg_hv()
1092 *val = get_reg_val(id, vcpu->arch.sier); in kvmppc_get_one_reg_hv()
1095 *val = get_reg_val(id, vcpu->arch.iamr); in kvmppc_get_one_reg_hv()
1098 *val = get_reg_val(id, vcpu->arch.pspb); in kvmppc_get_one_reg_hv()
1101 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); in kvmppc_get_one_reg_hv()
1104 *val = get_reg_val(id, vcpu->arch.dawr); in kvmppc_get_one_reg_hv()
1107 *val = get_reg_val(id, vcpu->arch.dawrx); in kvmppc_get_one_reg_hv()
1110 *val = get_reg_val(id, vcpu->arch.ciabr); in kvmppc_get_one_reg_hv()
1113 *val = get_reg_val(id, vcpu->arch.csigr); in kvmppc_get_one_reg_hv()
1116 *val = get_reg_val(id, vcpu->arch.tacr); in kvmppc_get_one_reg_hv()
1119 *val = get_reg_val(id, vcpu->arch.tcscr); in kvmppc_get_one_reg_hv()
1122 *val = get_reg_val(id, vcpu->arch.pid); in kvmppc_get_one_reg_hv()
1125 *val = get_reg_val(id, vcpu->arch.acop); in kvmppc_get_one_reg_hv()
1128 *val = get_reg_val(id, vcpu->arch.wort); in kvmppc_get_one_reg_hv()
1131 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1132 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
1133 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1136 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1137 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; in kvmppc_get_one_reg_hv()
1138 val->vpaval.length = vcpu->arch.slb_shadow.len; in kvmppc_get_one_reg_hv()
1139 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1142 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1143 val->vpaval.addr = vcpu->arch.dtl.next_gpa; in kvmppc_get_one_reg_hv()
1144 val->vpaval.length = vcpu->arch.dtl.len; in kvmppc_get_one_reg_hv()
1145 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1148 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
1152 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); in kvmppc_get_one_reg_hv()
1155 *val = get_reg_val(id, vcpu->arch.ppr); in kvmppc_get_one_reg_hv()
1159 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_hv()
1162 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_hv()
1165 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_hv()
1169 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); in kvmppc_get_one_reg_hv()
1177 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_hv()
1180 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_hv()
1187 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_hv()
1190 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_hv()
1193 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_hv()
1196 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_hv()
1199 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_hv()
1202 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_hv()
1205 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_hv()
1209 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_hv()
1214 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_hv()
1217 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_hv()
1221 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); in kvmppc_get_one_reg_hv()
1231 static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, in kvmppc_set_one_reg_hv() argument
1245 vcpu->arch.dabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1248 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; in kvmppc_set_one_reg_hv()
1251 vcpu->arch.dscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1254 vcpu->arch.purr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1257 vcpu->arch.spurr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1260 vcpu->arch.amr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1263 vcpu->arch.uamor = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1267 vcpu->arch.mmcr[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1271 vcpu->arch.pmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1275 vcpu->arch.spmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1278 vcpu->arch.siar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1281 vcpu->arch.sdar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1284 vcpu->arch.sier = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1287 vcpu->arch.iamr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1290 vcpu->arch.pspb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1293 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1296 vcpu->arch.dawr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1299 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; in kvmppc_set_one_reg_hv()
1302 vcpu->arch.ciabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1304 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) in kvmppc_set_one_reg_hv()
1305 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ in kvmppc_set_one_reg_hv()
1308 vcpu->arch.csigr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1311 vcpu->arch.tacr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1314 vcpu->arch.tcscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1317 vcpu->arch.pid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1320 vcpu->arch.acop = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1323 vcpu->arch.wort = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1328 if (!addr && (vcpu->arch.slb_shadow.next_gpa || in kvmppc_set_one_reg_hv()
1329 vcpu->arch.dtl.next_gpa)) in kvmppc_set_one_reg_hv()
1331 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
1337 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
1339 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); in kvmppc_set_one_reg_hv()
1346 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
1349 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); in kvmppc_set_one_reg_hv()
1353 vcpu->arch.vcore->tb_offset = in kvmppc_set_one_reg_hv()
1357 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true); in kvmppc_set_one_reg_hv()
1360 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false); in kvmppc_set_one_reg_hv()
1363 vcpu->arch.ppr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1367 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1370 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1373 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1377 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1385 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_hv()
1388 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_hv()
1394 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1397 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1400 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1403 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1406 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1409 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1412 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1416 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1421 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1424 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1428 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
1475 struct kvm_vcpu *vcpu; member
1482 struct kvm_vcpu *vcpu = inode->i_private; in debugfs_timings_open() local
1489 kvm_get_kvm(vcpu->kvm); in debugfs_timings_open()
1490 p->vcpu = vcpu; in debugfs_timings_open()
1500 kvm_put_kvm(p->vcpu->kvm); in debugfs_timings_release()
1509 struct kvm_vcpu *vcpu = p->vcpu; in debugfs_timings_read() local
1525 ((unsigned long)vcpu + timings[i].offset); in debugfs_timings_read()
1586 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id) in debugfs_vcpu_init() argument
1589 struct kvm *kvm = vcpu->kvm; in debugfs_vcpu_init()
1594 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir); in debugfs_vcpu_init()
1595 if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir)) in debugfs_vcpu_init()
1597 vcpu->arch.debugfs_timings = in debugfs_vcpu_init()
1598 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, in debugfs_vcpu_init()
1599 vcpu, &debugfs_timings_ops); in debugfs_vcpu_init()
1603 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id) in debugfs_vcpu_init() argument
1611 struct kvm_vcpu *vcpu; in kvmppc_core_vcpu_create_hv() local
1621 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); in kvmppc_core_vcpu_create_hv()
1622 if (!vcpu) in kvmppc_core_vcpu_create_hv()
1625 err = kvm_vcpu_init(vcpu, kvm, id); in kvmppc_core_vcpu_create_hv()
1629 vcpu->arch.shared = &vcpu->arch.shregs; in kvmppc_core_vcpu_create_hv()
1636 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_hv()
1638 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_hv()
1641 vcpu->arch.mmcr[0] = MMCR0_FC; in kvmppc_core_vcpu_create_hv()
1642 vcpu->arch.ctrl = CTRL_RUNLATCH; in kvmppc_core_vcpu_create_hv()
1644 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR)); in kvmppc_core_vcpu_create_hv()
1645 spin_lock_init(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_create_hv()
1646 spin_lock_init(&vcpu->arch.tbacct_lock); in kvmppc_core_vcpu_create_hv()
1647 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_create_hv()
1648 vcpu->arch.intr_msr = MSR_SF | MSR_ME; in kvmppc_core_vcpu_create_hv()
1650 kvmppc_mmu_book3s_hv_init(vcpu); in kvmppc_core_vcpu_create_hv()
1652 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_core_vcpu_create_hv()
1654 init_waitqueue_head(&vcpu->arch.cpu_run); in kvmppc_core_vcpu_create_hv()
1671 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
1672 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
1673 vcpu->arch.thread_cpu = -1; in kvmppc_core_vcpu_create_hv()
1675 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_core_vcpu_create_hv()
1676 kvmppc_sanity_check(vcpu); in kvmppc_core_vcpu_create_hv()
1678 debugfs_vcpu_init(vcpu, id); in kvmppc_core_vcpu_create_hv()
1680 return vcpu; in kvmppc_core_vcpu_create_hv()
1683 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvmppc_core_vcpu_create_hv()
1695 static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_free_hv() argument
1697 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
1698 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); in kvmppc_core_vcpu_free_hv()
1699 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); in kvmppc_core_vcpu_free_hv()
1700 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
1701 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
1702 kvm_vcpu_uninit(vcpu); in kvmppc_core_vcpu_free_hv()
1703 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvmppc_core_vcpu_free_hv()
1706 static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu) in kvmppc_core_check_requests_hv() argument
1712 static void kvmppc_set_timer(struct kvm_vcpu *vcpu) in kvmppc_set_timer() argument
1717 if (now > vcpu->arch.dec_expires) { in kvmppc_set_timer()
1719 kvmppc_core_queue_dec(vcpu); in kvmppc_set_timer()
1720 kvmppc_core_prepare_to_enter(vcpu); in kvmppc_set_timer()
1723 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC in kvmppc_set_timer()
1725 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), in kvmppc_set_timer()
1727 vcpu->arch.timer_running = 1; in kvmppc_set_timer()
1730 static void kvmppc_end_cede(struct kvm_vcpu *vcpu) in kvmppc_end_cede() argument
1732 vcpu->arch.ceded = 0; in kvmppc_end_cede()
1733 if (vcpu->arch.timer_running) { in kvmppc_end_cede()
1734 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_end_cede()
1735 vcpu->arch.timer_running = 0; in kvmppc_end_cede()
1742 struct kvm_vcpu *vcpu) in kvmppc_remove_runnable() argument
1746 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_remove_runnable()
1748 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
1750 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
1751 vcpu->arch.stolen_logged; in kvmppc_remove_runnable()
1752 vcpu->arch.busy_preempt = now; in kvmppc_remove_runnable()
1753 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_remove_runnable()
1754 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
1756 list_del(&vcpu->arch.run_list); in kvmppc_remove_runnable()
1804 static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) in kvmppc_start_thread() argument
1811 if (vcpu) { in kvmppc_start_thread()
1812 if (vcpu->arch.timer_running) { in kvmppc_start_thread()
1813 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_start_thread()
1814 vcpu->arch.timer_running = 0; in kvmppc_start_thread()
1816 cpu += vcpu->arch.ptid; in kvmppc_start_thread()
1817 vcpu->cpu = mvc->pcpu; in kvmppc_start_thread()
1818 vcpu->arch.thread_cpu = cpu; in kvmppc_start_thread()
1821 tpaca->kvm_hstate.kvm_vcpu = vcpu; in kvmppc_start_thread()
2163 struct kvm_vcpu *vcpu, *vnext; in prepare_threads() local
2165 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, in prepare_threads()
2167 if (signal_pending(vcpu->arch.run_task)) in prepare_threads()
2168 vcpu->arch.ret = -EINTR; in prepare_threads()
2169 else if (vcpu->arch.vpa.update_pending || in prepare_threads()
2170 vcpu->arch.slb_shadow.update_pending || in prepare_threads()
2171 vcpu->arch.dtl.update_pending) in prepare_threads()
2172 vcpu->arch.ret = RESUME_GUEST; in prepare_threads()
2175 kvmppc_remove_runnable(vc, vcpu); in prepare_threads()
2176 wake_up(&vcpu->arch.cpu_run); in prepare_threads()
2216 struct kvm_vcpu *vcpu, *vnext; in post_guest_process() local
2220 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, in post_guest_process()
2223 if (now < vcpu->arch.dec_expires && in post_guest_process()
2224 kvmppc_core_pending_dec(vcpu)) in post_guest_process()
2225 kvmppc_core_dequeue_dec(vcpu); in post_guest_process()
2227 trace_kvm_guest_exit(vcpu); in post_guest_process()
2230 if (vcpu->arch.trap) in post_guest_process()
2231 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, in post_guest_process()
2232 vcpu->arch.run_task); in post_guest_process()
2234 vcpu->arch.ret = ret; in post_guest_process()
2235 vcpu->arch.trap = 0; in post_guest_process()
2237 if (is_kvmppc_resume_guest(vcpu->arch.ret)) { in post_guest_process()
2238 if (vcpu->arch.pending_exceptions) in post_guest_process()
2239 kvmppc_core_prepare_to_enter(vcpu); in post_guest_process()
2240 if (vcpu->arch.ceded) in post_guest_process()
2241 kvmppc_set_timer(vcpu); in post_guest_process()
2245 kvmppc_remove_runnable(vc, vcpu); in post_guest_process()
2246 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
2261 vcpu = list_first_entry(&vc->runnable_threads, in post_guest_process()
2263 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
2275 struct kvm_vcpu *vcpu, *vnext; in kvmppc_run_core() local
2311 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, in kvmppc_run_core()
2313 vcpu->arch.ret = -EBUSY; in kvmppc_run_core()
2314 kvmppc_remove_runnable(vc, vcpu); in kvmppc_run_core()
2315 wake_up(&vcpu->arch.cpu_run); in kvmppc_run_core()
2389 list_for_each_entry(vcpu, &pvc->runnable_threads, in kvmppc_run_core()
2391 kvmppc_start_thread(vcpu, pvc); in kvmppc_run_core()
2392 kvmppc_create_dtl_entry(vcpu, pvc); in kvmppc_run_core()
2393 trace_kvm_guest_enter(vcpu); in kvmppc_run_core()
2394 if (!vcpu->arch.ptid) in kvmppc_run_core()
2396 active |= 1 << (thr + vcpu->arch.ptid); in kvmppc_run_core()
2501 struct kvm_vcpu *vcpu, int wait_state) in kvmppc_wait_for_exec() argument
2505 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); in kvmppc_wait_for_exec()
2506 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_wait_for_exec()
2511 finish_wait(&vcpu->arch.cpu_run, &wait); in kvmppc_wait_for_exec()
2520 struct kvm_vcpu *vcpu; in kvmppc_vcore_blocked() local
2531 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { in kvmppc_vcore_blocked()
2532 if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) { in kvmppc_vcore_blocked()
2553 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) in kvmppc_run_vcpu() argument
2559 trace_kvmppc_run_vcpu_enter(vcpu); in kvmppc_run_vcpu()
2562 vcpu->arch.ret = RESUME_GUEST; in kvmppc_run_vcpu()
2563 vcpu->arch.trap = 0; in kvmppc_run_vcpu()
2564 kvmppc_update_vpas(vcpu); in kvmppc_run_vcpu()
2569 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
2571 vcpu->arch.ceded = 0; in kvmppc_run_vcpu()
2572 vcpu->arch.run_task = current; in kvmppc_run_vcpu()
2573 vcpu->arch.kvm_run = kvm_run; in kvmppc_run_vcpu()
2574 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
2575 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmppc_run_vcpu()
2576 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_run_vcpu()
2577 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads); in kvmppc_run_vcpu()
2591 kvmppc_create_dtl_entry(vcpu, vc); in kvmppc_run_vcpu()
2592 kvmppc_start_thread(vcpu, vc); in kvmppc_run_vcpu()
2593 trace_kvm_guest_enter(vcpu); in kvmppc_run_vcpu()
2599 kvmppc_create_dtl_entry(vcpu, vc); in kvmppc_run_vcpu()
2600 kvmppc_start_thread(vcpu, vc); in kvmppc_run_vcpu()
2601 trace_kvm_guest_enter(vcpu); in kvmppc_run_vcpu()
2608 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
2614 kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE); in kvmppc_run_vcpu()
2628 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
2637 vc->runner = vcpu; in kvmppc_run_vcpu()
2652 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
2656 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE); in kvmppc_run_vcpu()
2661 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_run_vcpu()
2662 kvmppc_remove_runnable(vc, vcpu); in kvmppc_run_vcpu()
2663 vcpu->stat.signal_exits++; in kvmppc_run_vcpu()
2665 vcpu->arch.ret = -EINTR; in kvmppc_run_vcpu()
2675 trace_kvmppc_run_vcpu_exit(vcpu, kvm_run); in kvmppc_run_vcpu()
2677 return vcpu->arch.ret; in kvmppc_run_vcpu()
2680 static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) in kvmppc_vcpu_run_hv() argument
2685 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_hv()
2690 kvmppc_core_prepare_to_enter(vcpu); in kvmppc_vcpu_run_hv()
2698 atomic_inc(&vcpu->kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
2703 if (!vcpu->kvm->arch.hpte_setup_done) { in kvmppc_vcpu_run_hv()
2704 r = kvmppc_hv_setup_htab_rma(vcpu); in kvmppc_vcpu_run_hv()
2712 vcpu->arch.wqp = &vcpu->arch.vcore->wq; in kvmppc_vcpu_run_hv()
2713 vcpu->arch.pgdir = current->mm->pgd; in kvmppc_vcpu_run_hv()
2714 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_vcpu_run_hv()
2717 r = kvmppc_run_vcpu(run, vcpu); in kvmppc_vcpu_run_hv()
2720 !(vcpu->arch.shregs.msr & MSR_PR)) { in kvmppc_vcpu_run_hv()
2721 trace_kvm_hcall_enter(vcpu); in kvmppc_vcpu_run_hv()
2722 r = kvmppc_pseries_do_hcall(vcpu); in kvmppc_vcpu_run_hv()
2723 trace_kvm_hcall_exit(vcpu, r); in kvmppc_vcpu_run_hv()
2724 kvmppc_core_prepare_to_enter(vcpu); in kvmppc_vcpu_run_hv()
2726 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_vcpu_run_hv()
2727 r = kvmppc_book3s_hv_page_fault(run, vcpu, in kvmppc_vcpu_run_hv()
2728 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_vcpu_run_hv()
2729 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvmppc_vcpu_run_hv()
2734 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_vcpu_run_hv()
2735 atomic_dec(&vcpu->kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
2893 static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu) in kvmppc_mmu_destroy_hv() argument
2898 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) in kvmppc_hv_setup_htab_rma() argument
2901 struct kvm *kvm = vcpu->kvm; in kvmppc_hv_setup_htab_rma()
2957 kvmppc_map_vrma(vcpu, memslot, porder); in kvmppc_hv_setup_htab_rma()
3052 static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_core_emulate_op_hv() argument
3058 static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn, in kvmppc_core_emulate_mtspr_hv() argument
3064 static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, in kvmppc_core_emulate_mfspr_hv() argument