Lines Matching refs:vcpu

85 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
86 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
115 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) in kvmppc_fast_vcpu_kick_hv() argument
117 int cpu = vcpu->cpu; in kvmppc_fast_vcpu_kick_hv()
120 wqp = kvm_arch_vcpu_wq(vcpu); in kvmppc_fast_vcpu_kick_hv()
123 ++vcpu->stat.halt_wakeup; in kvmppc_fast_vcpu_kick_hv()
126 if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid)) in kvmppc_fast_vcpu_kick_hv()
167 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) in kvmppc_core_vcpu_load_hv() argument
169 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
178 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) { in kvmppc_core_vcpu_load_hv()
186 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
187 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv()
188 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
189 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
190 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
192 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
195 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_put_hv() argument
197 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
200 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) { in kvmppc_core_vcpu_put_hv()
205 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
206 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
207 vcpu->arch.busy_preempt = mftb(); in kvmppc_core_vcpu_put_hv()
208 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
211 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) in kvmppc_set_msr_hv() argument
219 vcpu->arch.shregs.msr = msr; in kvmppc_set_msr_hv()
220 kvmppc_end_cede(vcpu); in kvmppc_set_msr_hv()
223 void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) in kvmppc_set_pvr_hv() argument
225 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_hv()
228 int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) in kvmppc_set_arch_compat() argument
231 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
268 void kvmppc_dump_regs(struct kvm_vcpu *vcpu) in kvmppc_dump_regs() argument
272 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); in kvmppc_dump_regs()
274 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); in kvmppc_dump_regs()
277 r, kvmppc_get_gpr(vcpu, r), in kvmppc_dump_regs()
278 r+16, kvmppc_get_gpr(vcpu, r+16)); in kvmppc_dump_regs()
280 vcpu->arch.ctr, vcpu->arch.lr); in kvmppc_dump_regs()
282 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); in kvmppc_dump_regs()
284 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); in kvmppc_dump_regs()
286 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); in kvmppc_dump_regs()
288 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); in kvmppc_dump_regs()
289 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); in kvmppc_dump_regs()
291 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_dump_regs()
292 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); in kvmppc_dump_regs()
293 for (r = 0; r < vcpu->arch.slb_max; ++r) in kvmppc_dump_regs()
295 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); in kvmppc_dump_regs()
297 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
298 vcpu->arch.last_inst); in kvmppc_dump_regs()
317 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) in init_vpa() argument
323 static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, in set_vpa() argument
329 spin_lock(&vcpu->arch.vpa_update_lock); in set_vpa()
335 spin_unlock(&vcpu->arch.vpa_update_lock); in set_vpa()
355 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, in do_h_register_vpa() argument
359 struct kvm *kvm = vcpu->kvm; in do_h_register_vpa()
465 static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) in kvmppc_update_vpa() argument
467 struct kvm *kvm = vcpu->kvm; in kvmppc_update_vpa()
482 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
487 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
515 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) in kvmppc_update_vpas() argument
517 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
518 vcpu->arch.slb_shadow.update_pending || in kvmppc_update_vpas()
519 vcpu->arch.dtl.update_pending)) in kvmppc_update_vpas()
522 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
523 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
524 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); in kvmppc_update_vpas()
525 if (vcpu->arch.vpa.pinned_addr) in kvmppc_update_vpas()
526 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
528 if (vcpu->arch.dtl.update_pending) { in kvmppc_update_vpas()
529 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); in kvmppc_update_vpas()
530 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; in kvmppc_update_vpas()
531 vcpu->arch.dtl_index = 0; in kvmppc_update_vpas()
533 if (vcpu->arch.slb_shadow.update_pending) in kvmppc_update_vpas()
534 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); in kvmppc_update_vpas()
535 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
556 static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, in kvmppc_create_dtl_entry() argument
565 dt = vcpu->arch.dtl_ptr; in kvmppc_create_dtl_entry()
566 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_create_dtl_entry()
569 stolen = core_stolen - vcpu->arch.stolen_logged; in kvmppc_create_dtl_entry()
570 vcpu->arch.stolen_logged = core_stolen; in kvmppc_create_dtl_entry()
571 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_create_dtl_entry()
572 stolen += vcpu->arch.busy_stolen; in kvmppc_create_dtl_entry()
573 vcpu->arch.busy_stolen = 0; in kvmppc_create_dtl_entry()
574 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_create_dtl_entry()
579 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); in kvmppc_create_dtl_entry()
582 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu)); in kvmppc_create_dtl_entry()
583 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); in kvmppc_create_dtl_entry()
585 if (dt == vcpu->arch.dtl.pinned_end) in kvmppc_create_dtl_entry()
586 dt = vcpu->arch.dtl.pinned_addr; in kvmppc_create_dtl_entry()
587 vcpu->arch.dtl_ptr = dt; in kvmppc_create_dtl_entry()
590 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in kvmppc_create_dtl_entry()
591 vcpu->arch.dtl.dirty = true; in kvmppc_create_dtl_entry()
594 static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu) in kvmppc_power8_compatible() argument
596 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) in kvmppc_power8_compatible()
598 if ((!vcpu->arch.vcore->arch_compat) && in kvmppc_power8_compatible()
604 static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, in kvmppc_h_set_mode() argument
610 if (!kvmppc_power8_compatible(vcpu)) in kvmppc_h_set_mode()
619 vcpu->arch.ciabr = value1; in kvmppc_h_set_mode()
622 if (!kvmppc_power8_compatible(vcpu)) in kvmppc_h_set_mode()
628 vcpu->arch.dawr = value1; in kvmppc_h_set_mode()
629 vcpu->arch.dawrx = value2; in kvmppc_h_set_mode()
657 static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) in kvmppc_get_yield_count() argument
662 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
663 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; in kvmppc_get_yield_count()
666 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
670 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) in kvmppc_pseries_do_hcall() argument
672 unsigned long req = kvmppc_get_gpr(vcpu, 3); in kvmppc_pseries_do_hcall()
679 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) in kvmppc_pseries_do_hcall()
686 target = kvmppc_get_gpr(vcpu, 4); in kvmppc_pseries_do_hcall()
687 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); in kvmppc_pseries_do_hcall()
694 if (vcpu->arch.ceded) { in kvmppc_pseries_do_hcall()
695 if (waitqueue_active(&vcpu->wq)) { in kvmppc_pseries_do_hcall()
696 wake_up_interruptible(&vcpu->wq); in kvmppc_pseries_do_hcall()
697 vcpu->stat.halt_wakeup++; in kvmppc_pseries_do_hcall()
702 target = kvmppc_get_gpr(vcpu, 4); in kvmppc_pseries_do_hcall()
705 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); in kvmppc_pseries_do_hcall()
710 yield_count = kvmppc_get_gpr(vcpu, 5); in kvmppc_pseries_do_hcall()
716 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
717 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
718 kvmppc_get_gpr(vcpu, 6)); in kvmppc_pseries_do_hcall()
721 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) in kvmppc_pseries_do_hcall()
724 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_pseries_do_hcall()
725 rc = kvmppc_rtas_hcall(vcpu); in kvmppc_pseries_do_hcall()
726 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_pseries_do_hcall()
736 ret = kvmppc_h_logical_ci_load(vcpu); in kvmppc_pseries_do_hcall()
741 ret = kvmppc_h_logical_ci_store(vcpu); in kvmppc_pseries_do_hcall()
746 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4), in kvmppc_pseries_do_hcall()
747 kvmppc_get_gpr(vcpu, 5), in kvmppc_pseries_do_hcall()
748 kvmppc_get_gpr(vcpu, 6), in kvmppc_pseries_do_hcall()
749 kvmppc_get_gpr(vcpu, 7)); in kvmppc_pseries_do_hcall()
759 if (kvmppc_xics_enabled(vcpu)) { in kvmppc_pseries_do_hcall()
760 ret = kvmppc_xics_hcall(vcpu, req); in kvmppc_pseries_do_hcall()
766 kvmppc_set_gpr(vcpu, 3, ret); in kvmppc_pseries_do_hcall()
767 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
797 struct kvm_vcpu *vcpu) in kvmppc_emulate_debug_inst() argument
801 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != in kvmppc_emulate_debug_inst()
812 run->debug.arch.address = kvmppc_get_pc(vcpu); in kvmppc_emulate_debug_inst()
815 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); in kvmppc_emulate_debug_inst()
820 static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_handle_exit_hv() argument
825 vcpu->stat.sum_exits++; in kvmppc_handle_exit_hv()
829 switch (vcpu->arch.trap) { in kvmppc_handle_exit_hv()
832 vcpu->stat.dec_exits++; in kvmppc_handle_exit_hv()
837 vcpu->stat.ext_intr_exits++; in kvmppc_handle_exit_hv()
852 kvmppc_book3s_queue_irqprio(vcpu, in kvmppc_handle_exit_hv()
865 flags = vcpu->arch.shregs.msr & 0x1f0000ull; in kvmppc_handle_exit_hv()
866 kvmppc_core_queue_program(vcpu, flags); in kvmppc_handle_exit_hv()
879 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); in kvmppc_handle_exit_hv()
881 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); in kvmppc_handle_exit_hv()
883 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_hv()
898 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_exit_hv()
899 vcpu->arch.fault_dsisr = 0; in kvmppc_handle_exit_hv()
910 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) in kvmppc_handle_exit_hv()
911 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? in kvmppc_handle_exit_hv()
912 swab32(vcpu->arch.emul_inst) : in kvmppc_handle_exit_hv()
913 vcpu->arch.emul_inst; in kvmppc_handle_exit_hv()
914 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { in kvmppc_handle_exit_hv()
915 r = kvmppc_emulate_debug_inst(run, vcpu); in kvmppc_handle_exit_hv()
917 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); in kvmppc_handle_exit_hv()
927 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); in kvmppc_handle_exit_hv()
931 kvmppc_dump_regs(vcpu); in kvmppc_handle_exit_hv()
933 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
934 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
935 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
943 static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs_hv() argument
949 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_hv()
950 for (i = 0; i < vcpu->arch.slb_max; i++) { in kvm_arch_vcpu_ioctl_get_sregs_hv()
951 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; in kvm_arch_vcpu_ioctl_get_sregs_hv()
952 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_hv()
958 static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs_hv() argument
964 if (sregs->pvr != vcpu->arch.pvr) in kvm_arch_vcpu_ioctl_set_sregs_hv()
968 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
970 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; in kvm_arch_vcpu_ioctl_set_sregs_hv()
971 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; in kvm_arch_vcpu_ioctl_set_sregs_hv()
975 vcpu->arch.slb_max = j; in kvm_arch_vcpu_ioctl_set_sregs_hv()
980 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, in kvmppc_set_lpcr() argument
983 struct kvm *kvm = vcpu->kvm; in kvmppc_set_lpcr()
984 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
994 struct kvm_vcpu *vcpu; in kvmppc_set_lpcr() local
997 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmppc_set_lpcr()
998 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
1001 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr()
1003 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr()
1024 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, in kvmppc_get_one_reg_hv() argument
1038 *val = get_reg_val(id, vcpu->arch.dabr); in kvmppc_get_one_reg_hv()
1041 *val = get_reg_val(id, vcpu->arch.dabrx); in kvmppc_get_one_reg_hv()
1044 *val = get_reg_val(id, vcpu->arch.dscr); in kvmppc_get_one_reg_hv()
1047 *val = get_reg_val(id, vcpu->arch.purr); in kvmppc_get_one_reg_hv()
1050 *val = get_reg_val(id, vcpu->arch.spurr); in kvmppc_get_one_reg_hv()
1053 *val = get_reg_val(id, vcpu->arch.amr); in kvmppc_get_one_reg_hv()
1056 *val = get_reg_val(id, vcpu->arch.uamor); in kvmppc_get_one_reg_hv()
1060 *val = get_reg_val(id, vcpu->arch.mmcr[i]); in kvmppc_get_one_reg_hv()
1064 *val = get_reg_val(id, vcpu->arch.pmc[i]); in kvmppc_get_one_reg_hv()
1068 *val = get_reg_val(id, vcpu->arch.spmc[i]); in kvmppc_get_one_reg_hv()
1071 *val = get_reg_val(id, vcpu->arch.siar); in kvmppc_get_one_reg_hv()
1074 *val = get_reg_val(id, vcpu->arch.sdar); in kvmppc_get_one_reg_hv()
1077 *val = get_reg_val(id, vcpu->arch.sier); in kvmppc_get_one_reg_hv()
1080 *val = get_reg_val(id, vcpu->arch.iamr); in kvmppc_get_one_reg_hv()
1083 *val = get_reg_val(id, vcpu->arch.pspb); in kvmppc_get_one_reg_hv()
1086 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); in kvmppc_get_one_reg_hv()
1089 *val = get_reg_val(id, vcpu->arch.dawr); in kvmppc_get_one_reg_hv()
1092 *val = get_reg_val(id, vcpu->arch.dawrx); in kvmppc_get_one_reg_hv()
1095 *val = get_reg_val(id, vcpu->arch.ciabr); in kvmppc_get_one_reg_hv()
1098 *val = get_reg_val(id, vcpu->arch.csigr); in kvmppc_get_one_reg_hv()
1101 *val = get_reg_val(id, vcpu->arch.tacr); in kvmppc_get_one_reg_hv()
1104 *val = get_reg_val(id, vcpu->arch.tcscr); in kvmppc_get_one_reg_hv()
1107 *val = get_reg_val(id, vcpu->arch.pid); in kvmppc_get_one_reg_hv()
1110 *val = get_reg_val(id, vcpu->arch.acop); in kvmppc_get_one_reg_hv()
1113 *val = get_reg_val(id, vcpu->arch.wort); in kvmppc_get_one_reg_hv()
1116 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1117 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
1118 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1121 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1122 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; in kvmppc_get_one_reg_hv()
1123 val->vpaval.length = vcpu->arch.slb_shadow.len; in kvmppc_get_one_reg_hv()
1124 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1127 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1128 val->vpaval.addr = vcpu->arch.dtl.next_gpa; in kvmppc_get_one_reg_hv()
1129 val->vpaval.length = vcpu->arch.dtl.len; in kvmppc_get_one_reg_hv()
1130 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1133 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
1137 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); in kvmppc_get_one_reg_hv()
1140 *val = get_reg_val(id, vcpu->arch.ppr); in kvmppc_get_one_reg_hv()
1144 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_hv()
1147 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_hv()
1150 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_hv()
1154 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); in kvmppc_get_one_reg_hv()
1162 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_hv()
1165 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_hv()
1172 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_hv()
1175 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_hv()
1178 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_hv()
1181 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_hv()
1184 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_hv()
1187 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_hv()
1190 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_hv()
1194 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_hv()
1199 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_hv()
1202 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_hv()
1206 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); in kvmppc_get_one_reg_hv()
1216 static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, in kvmppc_set_one_reg_hv() argument
1230 vcpu->arch.dabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1233 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; in kvmppc_set_one_reg_hv()
1236 vcpu->arch.dscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1239 vcpu->arch.purr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1242 vcpu->arch.spurr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1245 vcpu->arch.amr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1248 vcpu->arch.uamor = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1252 vcpu->arch.mmcr[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1256 vcpu->arch.pmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1260 vcpu->arch.spmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1263 vcpu->arch.siar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1266 vcpu->arch.sdar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1269 vcpu->arch.sier = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1272 vcpu->arch.iamr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1275 vcpu->arch.pspb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1278 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1281 vcpu->arch.dawr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1284 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; in kvmppc_set_one_reg_hv()
1287 vcpu->arch.ciabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1289 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) in kvmppc_set_one_reg_hv()
1290 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ in kvmppc_set_one_reg_hv()
1293 vcpu->arch.csigr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1296 vcpu->arch.tacr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1299 vcpu->arch.tcscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1302 vcpu->arch.pid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1305 vcpu->arch.acop = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1308 vcpu->arch.wort = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1313 if (!addr && (vcpu->arch.slb_shadow.next_gpa || in kvmppc_set_one_reg_hv()
1314 vcpu->arch.dtl.next_gpa)) in kvmppc_set_one_reg_hv()
1316 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
1322 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
1324 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); in kvmppc_set_one_reg_hv()
1331 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
1334 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); in kvmppc_set_one_reg_hv()
1338 vcpu->arch.vcore->tb_offset = in kvmppc_set_one_reg_hv()
1342 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true); in kvmppc_set_one_reg_hv()
1345 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false); in kvmppc_set_one_reg_hv()
1348 vcpu->arch.ppr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1352 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1355 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1358 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1362 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1370 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_hv()
1373 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_hv()
1379 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1382 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1385 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1388 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1391 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1394 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1397 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1401 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1406 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1409 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1413 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_hv()
1466 struct kvm_vcpu *vcpu; member
1473 struct kvm_vcpu *vcpu = inode->i_private; in debugfs_timings_open() local
1480 kvm_get_kvm(vcpu->kvm); in debugfs_timings_open()
1481 p->vcpu = vcpu; in debugfs_timings_open()
1491 kvm_put_kvm(p->vcpu->kvm); in debugfs_timings_release()
1500 struct kvm_vcpu *vcpu = p->vcpu; in debugfs_timings_read() local
1516 ((unsigned long)vcpu + timings[i].offset); in debugfs_timings_read()
1577 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id) in debugfs_vcpu_init() argument
1580 struct kvm *kvm = vcpu->kvm; in debugfs_vcpu_init()
1585 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir); in debugfs_vcpu_init()
1586 if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir)) in debugfs_vcpu_init()
1588 vcpu->arch.debugfs_timings = in debugfs_vcpu_init()
1589 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, in debugfs_vcpu_init()
1590 vcpu, &debugfs_timings_ops); in debugfs_vcpu_init()
1594 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id) in debugfs_vcpu_init() argument
1602 struct kvm_vcpu *vcpu; in kvmppc_core_vcpu_create_hv() local
1612 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); in kvmppc_core_vcpu_create_hv()
1613 if (!vcpu) in kvmppc_core_vcpu_create_hv()
1616 err = kvm_vcpu_init(vcpu, kvm, id); in kvmppc_core_vcpu_create_hv()
1620 vcpu->arch.shared = &vcpu->arch.shregs; in kvmppc_core_vcpu_create_hv()
1627 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_hv()
1629 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_hv()
1632 vcpu->arch.mmcr[0] = MMCR0_FC; in kvmppc_core_vcpu_create_hv()
1633 vcpu->arch.ctrl = CTRL_RUNLATCH; in kvmppc_core_vcpu_create_hv()
1635 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR)); in kvmppc_core_vcpu_create_hv()
1636 spin_lock_init(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_create_hv()
1637 spin_lock_init(&vcpu->arch.tbacct_lock); in kvmppc_core_vcpu_create_hv()
1638 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_create_hv()
1639 vcpu->arch.intr_msr = MSR_SF | MSR_ME; in kvmppc_core_vcpu_create_hv()
1641 kvmppc_mmu_book3s_hv_init(vcpu); in kvmppc_core_vcpu_create_hv()
1643 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_core_vcpu_create_hv()
1645 init_waitqueue_head(&vcpu->arch.cpu_run); in kvmppc_core_vcpu_create_hv()
1662 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
1663 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
1665 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_core_vcpu_create_hv()
1666 kvmppc_sanity_check(vcpu); in kvmppc_core_vcpu_create_hv()
1668 debugfs_vcpu_init(vcpu, id); in kvmppc_core_vcpu_create_hv()
1670 return vcpu; in kvmppc_core_vcpu_create_hv()
1673 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvmppc_core_vcpu_create_hv()
1685 static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_free_hv() argument
1687 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
1688 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); in kvmppc_core_vcpu_free_hv()
1689 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); in kvmppc_core_vcpu_free_hv()
1690 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
1691 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
1692 kvm_vcpu_uninit(vcpu); in kvmppc_core_vcpu_free_hv()
1693 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvmppc_core_vcpu_free_hv()
1696 static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu) in kvmppc_core_check_requests_hv() argument
1702 static void kvmppc_set_timer(struct kvm_vcpu *vcpu) in kvmppc_set_timer() argument
1707 if (now > vcpu->arch.dec_expires) { in kvmppc_set_timer()
1709 kvmppc_core_queue_dec(vcpu); in kvmppc_set_timer()
1710 kvmppc_core_prepare_to_enter(vcpu); in kvmppc_set_timer()
1713 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC in kvmppc_set_timer()
1715 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), in kvmppc_set_timer()
1717 vcpu->arch.timer_running = 1; in kvmppc_set_timer()
1720 static void kvmppc_end_cede(struct kvm_vcpu *vcpu) in kvmppc_end_cede() argument
1722 vcpu->arch.ceded = 0; in kvmppc_end_cede()
1723 if (vcpu->arch.timer_running) { in kvmppc_end_cede()
1724 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_end_cede()
1725 vcpu->arch.timer_running = 0; in kvmppc_end_cede()
1732 struct kvm_vcpu *vcpu) in kvmppc_remove_runnable() argument
1736 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_remove_runnable()
1738 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
1740 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
1741 vcpu->arch.stolen_logged; in kvmppc_remove_runnable()
1742 vcpu->arch.busy_preempt = now; in kvmppc_remove_runnable()
1743 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_remove_runnable()
1744 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
1746 list_del(&vcpu->arch.run_list); in kvmppc_remove_runnable()
1791 static void kvmppc_start_thread(struct kvm_vcpu *vcpu) in kvmppc_start_thread() argument
1795 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_start_thread()
1797 if (vcpu->arch.timer_running) { in kvmppc_start_thread()
1798 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_start_thread()
1799 vcpu->arch.timer_running = 0; in kvmppc_start_thread()
1801 cpu = vc->pcpu + vcpu->arch.ptid; in kvmppc_start_thread()
1804 tpaca->kvm_hstate.ptid = vcpu->arch.ptid; in kvmppc_start_thread()
1805 vcpu->cpu = vc->pcpu; in kvmppc_start_thread()
1808 tpaca->kvm_hstate.kvm_vcpu = vcpu; in kvmppc_start_thread()
1901 struct kvm_vcpu *vcpu, *vnext; in prepare_threads() local
1903 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, in prepare_threads()
1905 if (signal_pending(vcpu->arch.run_task)) in prepare_threads()
1906 vcpu->arch.ret = -EINTR; in prepare_threads()
1907 else if (vcpu->arch.vpa.update_pending || in prepare_threads()
1908 vcpu->arch.slb_shadow.update_pending || in prepare_threads()
1909 vcpu->arch.dtl.update_pending) in prepare_threads()
1910 vcpu->arch.ret = RESUME_GUEST; in prepare_threads()
1913 kvmppc_remove_runnable(vc, vcpu); in prepare_threads()
1914 wake_up(&vcpu->arch.cpu_run); in prepare_threads()
1922 struct kvm_vcpu *vcpu, *vnext; in post_guest_process() local
1925 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, in post_guest_process()
1928 if (now < vcpu->arch.dec_expires && in post_guest_process()
1929 kvmppc_core_pending_dec(vcpu)) in post_guest_process()
1930 kvmppc_core_dequeue_dec(vcpu); in post_guest_process()
1932 trace_kvm_guest_exit(vcpu); in post_guest_process()
1935 if (vcpu->arch.trap) in post_guest_process()
1936 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, in post_guest_process()
1937 vcpu->arch.run_task); in post_guest_process()
1939 vcpu->arch.ret = ret; in post_guest_process()
1940 vcpu->arch.trap = 0; in post_guest_process()
1942 if (vcpu->arch.ceded) { in post_guest_process()
1944 kvmppc_end_cede(vcpu); in post_guest_process()
1946 kvmppc_set_timer(vcpu); in post_guest_process()
1948 if (!is_kvmppc_resume_guest(vcpu->arch.ret)) { in post_guest_process()
1949 kvmppc_remove_runnable(vc, vcpu); in post_guest_process()
1950 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
1961 struct kvm_vcpu *vcpu, *vnext; in kvmppc_run_core() local
1991 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, in kvmppc_run_core()
1993 vcpu->arch.ret = -EBUSY; in kvmppc_run_core()
1994 kvmppc_remove_runnable(vc, vcpu); in kvmppc_run_core()
1995 wake_up(&vcpu->arch.cpu_run); in kvmppc_run_core()
2002 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { in kvmppc_run_core()
2003 kvmppc_start_thread(vcpu); in kvmppc_run_core()
2004 kvmppc_create_dtl_entry(vcpu, vc); in kvmppc_run_core()
2005 trace_kvm_guest_enter(vcpu); in kvmppc_run_core()
2034 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) in kvmppc_run_core()
2035 vcpu->cpu = -1; in kvmppc_run_core()
2064 static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state) in kvmppc_wait_for_exec() argument
2068 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); in kvmppc_wait_for_exec()
2069 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) in kvmppc_wait_for_exec()
2071 finish_wait(&vcpu->arch.cpu_run, &wait); in kvmppc_wait_for_exec()
2080 struct kvm_vcpu *vcpu; in kvmppc_vcore_blocked() local
2091 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { in kvmppc_vcore_blocked()
2092 if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) { in kvmppc_vcore_blocked()
2113 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) in kvmppc_run_vcpu() argument
2119 trace_kvmppc_run_vcpu_enter(vcpu); in kvmppc_run_vcpu()
2122 vcpu->arch.ret = RESUME_GUEST; in kvmppc_run_vcpu()
2123 vcpu->arch.trap = 0; in kvmppc_run_vcpu()
2124 kvmppc_update_vpas(vcpu); in kvmppc_run_vcpu()
2129 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
2131 vcpu->arch.ceded = 0; in kvmppc_run_vcpu()
2132 vcpu->arch.run_task = current; in kvmppc_run_vcpu()
2133 vcpu->arch.kvm_run = kvm_run; in kvmppc_run_vcpu()
2134 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
2135 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmppc_run_vcpu()
2136 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_run_vcpu()
2137 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads); in kvmppc_run_vcpu()
2147 kvmppc_create_dtl_entry(vcpu, vc); in kvmppc_run_vcpu()
2148 kvmppc_start_thread(vcpu); in kvmppc_run_vcpu()
2149 trace_kvm_guest_enter(vcpu); in kvmppc_run_vcpu()
2156 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
2160 kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE); in kvmppc_run_vcpu()
2175 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
2184 vc->runner = vcpu; in kvmppc_run_vcpu()
2198 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
2202 kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE); in kvmppc_run_vcpu()
2206 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_run_vcpu()
2207 kvmppc_remove_runnable(vc, vcpu); in kvmppc_run_vcpu()
2208 vcpu->stat.signal_exits++; in kvmppc_run_vcpu()
2210 vcpu->arch.ret = -EINTR; in kvmppc_run_vcpu()
2220 trace_kvmppc_run_vcpu_exit(vcpu, kvm_run); in kvmppc_run_vcpu()
2222 return vcpu->arch.ret; in kvmppc_run_vcpu()
2225 static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) in kvmppc_vcpu_run_hv() argument
2230 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_hv()
2235 kvmppc_core_prepare_to_enter(vcpu); in kvmppc_vcpu_run_hv()
2243 atomic_inc(&vcpu->kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
2248 if (!vcpu->kvm->arch.hpte_setup_done) { in kvmppc_vcpu_run_hv()
2249 r = kvmppc_hv_setup_htab_rma(vcpu); in kvmppc_vcpu_run_hv()
2257 vcpu->arch.wqp = &vcpu->arch.vcore->wq; in kvmppc_vcpu_run_hv()
2258 vcpu->arch.pgdir = current->mm->pgd; in kvmppc_vcpu_run_hv()
2259 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_vcpu_run_hv()
2262 r = kvmppc_run_vcpu(run, vcpu); in kvmppc_vcpu_run_hv()
2265 !(vcpu->arch.shregs.msr & MSR_PR)) { in kvmppc_vcpu_run_hv()
2266 trace_kvm_hcall_enter(vcpu); in kvmppc_vcpu_run_hv()
2267 r = kvmppc_pseries_do_hcall(vcpu); in kvmppc_vcpu_run_hv()
2268 trace_kvm_hcall_exit(vcpu, r); in kvmppc_vcpu_run_hv()
2269 kvmppc_core_prepare_to_enter(vcpu); in kvmppc_vcpu_run_hv()
2271 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_vcpu_run_hv()
2272 r = kvmppc_book3s_hv_page_fault(run, vcpu, in kvmppc_vcpu_run_hv()
2273 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_vcpu_run_hv()
2274 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvmppc_vcpu_run_hv()
2279 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_vcpu_run_hv()
2280 atomic_dec(&vcpu->kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
2433 static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu) in kvmppc_mmu_destroy_hv() argument
2438 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) in kvmppc_hv_setup_htab_rma() argument
2441 struct kvm *kvm = vcpu->kvm; in kvmppc_hv_setup_htab_rma()
2497 kvmppc_map_vrma(vcpu, memslot, porder); in kvmppc_hv_setup_htab_rma()
2598 static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_core_emulate_op_hv() argument
2604 static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn, in kvmppc_core_emulate_mtspr_hv() argument
2610 static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, in kvmppc_core_emulate_mfspr_hv() argument