Lines Matching refs:arch

126 	if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid))  in kvmppc_fast_vcpu_kick_hv()
169 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
186 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
187 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv()
188 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
189 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
190 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
192 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
197 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
205 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
206 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
207 vcpu->arch.busy_preempt = mftb(); in kvmppc_core_vcpu_put_hv()
208 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
219 vcpu->arch.shregs.msr = msr; in kvmppc_set_msr_hv()
225 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_hv()
231 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
274 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); in kvmppc_dump_regs()
280 vcpu->arch.ctr, vcpu->arch.lr); in kvmppc_dump_regs()
282 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); in kvmppc_dump_regs()
284 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); in kvmppc_dump_regs()
286 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); in kvmppc_dump_regs()
288 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); in kvmppc_dump_regs()
289 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); in kvmppc_dump_regs()
291 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_dump_regs()
292 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); in kvmppc_dump_regs()
293 for (r = 0; r < vcpu->arch.slb_max; ++r) in kvmppc_dump_regs()
295 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); in kvmppc_dump_regs()
297 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
298 vcpu->arch.last_inst); in kvmppc_dump_regs()
329 spin_lock(&vcpu->arch.vpa_update_lock); in set_vpa()
335 spin_unlock(&vcpu->arch.vpa_update_lock); in set_vpa()
398 spin_lock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
404 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
415 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
418 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
425 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
428 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
435 if (vpa_is_registered(&tvcpu->arch.dtl) || in do_h_register_vpa()
436 vpa_is_registered(&tvcpu->arch.slb_shadow)) in do_h_register_vpa()
439 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
444 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
449 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
460 spin_unlock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
482 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
487 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
517 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
518 vcpu->arch.slb_shadow.update_pending || in kvmppc_update_vpas()
519 vcpu->arch.dtl.update_pending)) in kvmppc_update_vpas()
522 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
523 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
524 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); in kvmppc_update_vpas()
525 if (vcpu->arch.vpa.pinned_addr) in kvmppc_update_vpas()
526 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
528 if (vcpu->arch.dtl.update_pending) { in kvmppc_update_vpas()
529 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); in kvmppc_update_vpas()
530 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; in kvmppc_update_vpas()
531 vcpu->arch.dtl_index = 0; in kvmppc_update_vpas()
533 if (vcpu->arch.slb_shadow.update_pending) in kvmppc_update_vpas()
534 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); in kvmppc_update_vpas()
535 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
565 dt = vcpu->arch.dtl_ptr; in kvmppc_create_dtl_entry()
566 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_create_dtl_entry()
569 stolen = core_stolen - vcpu->arch.stolen_logged; in kvmppc_create_dtl_entry()
570 vcpu->arch.stolen_logged = core_stolen; in kvmppc_create_dtl_entry()
571 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_create_dtl_entry()
572 stolen += vcpu->arch.busy_stolen; in kvmppc_create_dtl_entry()
573 vcpu->arch.busy_stolen = 0; in kvmppc_create_dtl_entry()
574 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_create_dtl_entry()
579 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); in kvmppc_create_dtl_entry()
583 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); in kvmppc_create_dtl_entry()
585 if (dt == vcpu->arch.dtl.pinned_end) in kvmppc_create_dtl_entry()
586 dt = vcpu->arch.dtl.pinned_addr; in kvmppc_create_dtl_entry()
587 vcpu->arch.dtl_ptr = dt; in kvmppc_create_dtl_entry()
590 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in kvmppc_create_dtl_entry()
591 vcpu->arch.dtl.dirty = true; in kvmppc_create_dtl_entry()
596 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) in kvmppc_power8_compatible()
598 if ((!vcpu->arch.vcore->arch_compat) && in kvmppc_power8_compatible()
619 vcpu->arch.ciabr = value1; in kvmppc_h_set_mode()
628 vcpu->arch.dawr = value1; in kvmppc_h_set_mode()
629 vcpu->arch.dawrx = value2; in kvmppc_h_set_mode()
638 struct kvmppc_vcore *vcore = target->arch.vcore; in kvm_arch_vcpu_yield_to()
649 if (target->arch.state == KVMPPC_VCPU_RUNNABLE && in kvm_arch_vcpu_yield_to()
662 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
663 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; in kvmppc_get_yield_count()
666 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
679 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) in kvmppc_pseries_do_hcall()
692 tvcpu->arch.prodded = 1; in kvmppc_pseries_do_hcall()
694 if (vcpu->arch.ceded) { in kvmppc_pseries_do_hcall()
721 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) in kvmppc_pseries_do_hcall()
767 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
812 run->debug.arch.address = kvmppc_get_pc(vcpu); in kvmppc_emulate_debug_inst()
829 switch (vcpu->arch.trap) { in kvmppc_handle_exit_hv()
865 flags = vcpu->arch.shregs.msr & 0x1f0000ull; in kvmppc_handle_exit_hv()
883 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_hv()
898 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_exit_hv()
899 vcpu->arch.fault_dsisr = 0; in kvmppc_handle_exit_hv()
910 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) in kvmppc_handle_exit_hv()
911 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? in kvmppc_handle_exit_hv()
912 swab32(vcpu->arch.emul_inst) : in kvmppc_handle_exit_hv()
913 vcpu->arch.emul_inst; in kvmppc_handle_exit_hv()
933 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
934 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
935 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
949 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_hv()
950 for (i = 0; i < vcpu->arch.slb_max; i++) { in kvm_arch_vcpu_ioctl_get_sregs_hv()
951 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; in kvm_arch_vcpu_ioctl_get_sregs_hv()
952 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_hv()
964 if (sregs->pvr != vcpu->arch.pvr) in kvm_arch_vcpu_ioctl_set_sregs_hv()
968 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
970 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; in kvm_arch_vcpu_ioctl_set_sregs_hv()
971 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; in kvm_arch_vcpu_ioctl_set_sregs_hv()
975 vcpu->arch.slb_max = j; in kvm_arch_vcpu_ioctl_set_sregs_hv()
984 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
998 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
1001 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr()
1003 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr()
1038 *val = get_reg_val(id, vcpu->arch.dabr); in kvmppc_get_one_reg_hv()
1041 *val = get_reg_val(id, vcpu->arch.dabrx); in kvmppc_get_one_reg_hv()
1044 *val = get_reg_val(id, vcpu->arch.dscr); in kvmppc_get_one_reg_hv()
1047 *val = get_reg_val(id, vcpu->arch.purr); in kvmppc_get_one_reg_hv()
1050 *val = get_reg_val(id, vcpu->arch.spurr); in kvmppc_get_one_reg_hv()
1053 *val = get_reg_val(id, vcpu->arch.amr); in kvmppc_get_one_reg_hv()
1056 *val = get_reg_val(id, vcpu->arch.uamor); in kvmppc_get_one_reg_hv()
1060 *val = get_reg_val(id, vcpu->arch.mmcr[i]); in kvmppc_get_one_reg_hv()
1064 *val = get_reg_val(id, vcpu->arch.pmc[i]); in kvmppc_get_one_reg_hv()
1068 *val = get_reg_val(id, vcpu->arch.spmc[i]); in kvmppc_get_one_reg_hv()
1071 *val = get_reg_val(id, vcpu->arch.siar); in kvmppc_get_one_reg_hv()
1074 *val = get_reg_val(id, vcpu->arch.sdar); in kvmppc_get_one_reg_hv()
1077 *val = get_reg_val(id, vcpu->arch.sier); in kvmppc_get_one_reg_hv()
1080 *val = get_reg_val(id, vcpu->arch.iamr); in kvmppc_get_one_reg_hv()
1083 *val = get_reg_val(id, vcpu->arch.pspb); in kvmppc_get_one_reg_hv()
1086 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); in kvmppc_get_one_reg_hv()
1089 *val = get_reg_val(id, vcpu->arch.dawr); in kvmppc_get_one_reg_hv()
1092 *val = get_reg_val(id, vcpu->arch.dawrx); in kvmppc_get_one_reg_hv()
1095 *val = get_reg_val(id, vcpu->arch.ciabr); in kvmppc_get_one_reg_hv()
1098 *val = get_reg_val(id, vcpu->arch.csigr); in kvmppc_get_one_reg_hv()
1101 *val = get_reg_val(id, vcpu->arch.tacr); in kvmppc_get_one_reg_hv()
1104 *val = get_reg_val(id, vcpu->arch.tcscr); in kvmppc_get_one_reg_hv()
1107 *val = get_reg_val(id, vcpu->arch.pid); in kvmppc_get_one_reg_hv()
1110 *val = get_reg_val(id, vcpu->arch.acop); in kvmppc_get_one_reg_hv()
1113 *val = get_reg_val(id, vcpu->arch.wort); in kvmppc_get_one_reg_hv()
1116 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1117 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
1118 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1121 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1122 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; in kvmppc_get_one_reg_hv()
1123 val->vpaval.length = vcpu->arch.slb_shadow.len; in kvmppc_get_one_reg_hv()
1124 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1127 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1128 val->vpaval.addr = vcpu->arch.dtl.next_gpa; in kvmppc_get_one_reg_hv()
1129 val->vpaval.length = vcpu->arch.dtl.len; in kvmppc_get_one_reg_hv()
1130 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1133 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
1137 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); in kvmppc_get_one_reg_hv()
1140 *val = get_reg_val(id, vcpu->arch.ppr); in kvmppc_get_one_reg_hv()
1144 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_hv()
1147 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_hv()
1150 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_hv()
1154 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); in kvmppc_get_one_reg_hv()
1162 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_hv()
1165 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_hv()
1172 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_hv()
1175 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_hv()
1178 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_hv()
1181 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_hv()
1184 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_hv()
1187 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_hv()
1190 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_hv()
1194 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_hv()
1199 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_hv()
1202 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_hv()
1206 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); in kvmppc_get_one_reg_hv()
1230 vcpu->arch.dabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1233 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; in kvmppc_set_one_reg_hv()
1236 vcpu->arch.dscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1239 vcpu->arch.purr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1242 vcpu->arch.spurr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1245 vcpu->arch.amr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1248 vcpu->arch.uamor = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1252 vcpu->arch.mmcr[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1256 vcpu->arch.pmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1260 vcpu->arch.spmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1263 vcpu->arch.siar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1266 vcpu->arch.sdar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1269 vcpu->arch.sier = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1272 vcpu->arch.iamr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1275 vcpu->arch.pspb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1278 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1281 vcpu->arch.dawr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1284 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; in kvmppc_set_one_reg_hv()
1287 vcpu->arch.ciabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1289 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) in kvmppc_set_one_reg_hv()
1290 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ in kvmppc_set_one_reg_hv()
1293 vcpu->arch.csigr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1296 vcpu->arch.tacr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1299 vcpu->arch.tcscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1302 vcpu->arch.pid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1305 vcpu->arch.acop = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1308 vcpu->arch.wort = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1313 if (!addr && (vcpu->arch.slb_shadow.next_gpa || in kvmppc_set_one_reg_hv()
1314 vcpu->arch.dtl.next_gpa)) in kvmppc_set_one_reg_hv()
1316 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
1322 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
1324 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); in kvmppc_set_one_reg_hv()
1331 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
1334 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); in kvmppc_set_one_reg_hv()
1338 vcpu->arch.vcore->tb_offset = in kvmppc_set_one_reg_hv()
1348 vcpu->arch.ppr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1352 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1355 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1358 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1362 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1370 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_hv()
1373 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_hv()
1379 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1382 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1385 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1388 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1391 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1394 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1397 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1401 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1406 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1409 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1437 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
1456 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
1457 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
1458 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
1459 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
1460 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
1583 if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) in debugfs_vcpu_init()
1585 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir); in debugfs_vcpu_init()
1586 if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir)) in debugfs_vcpu_init()
1588 vcpu->arch.debugfs_timings = in debugfs_vcpu_init()
1589 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, in debugfs_vcpu_init()
1620 vcpu->arch.shared = &vcpu->arch.shregs; in kvmppc_core_vcpu_create_hv()
1627 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_hv()
1629 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_hv()
1632 vcpu->arch.mmcr[0] = MMCR0_FC; in kvmppc_core_vcpu_create_hv()
1633 vcpu->arch.ctrl = CTRL_RUNLATCH; in kvmppc_core_vcpu_create_hv()
1636 spin_lock_init(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_create_hv()
1637 spin_lock_init(&vcpu->arch.tbacct_lock); in kvmppc_core_vcpu_create_hv()
1638 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_create_hv()
1639 vcpu->arch.intr_msr = MSR_SF | MSR_ME; in kvmppc_core_vcpu_create_hv()
1643 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_core_vcpu_create_hv()
1645 init_waitqueue_head(&vcpu->arch.cpu_run); in kvmppc_core_vcpu_create_hv()
1648 vcore = kvm->arch.vcores[core]; in kvmppc_core_vcpu_create_hv()
1651 kvm->arch.vcores[core] = vcore; in kvmppc_core_vcpu_create_hv()
1652 kvm->arch.online_vcores++; in kvmppc_core_vcpu_create_hv()
1662 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
1663 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
1665 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_core_vcpu_create_hv()
1687 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
1688 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); in kvmppc_core_vcpu_free_hv()
1689 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); in kvmppc_core_vcpu_free_hv()
1690 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
1691 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
1707 if (now > vcpu->arch.dec_expires) { in kvmppc_set_timer()
1713 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC in kvmppc_set_timer()
1715 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), in kvmppc_set_timer()
1717 vcpu->arch.timer_running = 1; in kvmppc_set_timer()
1722 vcpu->arch.ceded = 0; in kvmppc_end_cede()
1723 if (vcpu->arch.timer_running) { in kvmppc_end_cede()
1724 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_end_cede()
1725 vcpu->arch.timer_running = 0; in kvmppc_end_cede()
1736 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_remove_runnable()
1738 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
1740 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
1741 vcpu->arch.stolen_logged; in kvmppc_remove_runnable()
1742 vcpu->arch.busy_preempt = now; in kvmppc_remove_runnable()
1743 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_remove_runnable()
1744 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
1746 list_del(&vcpu->arch.run_list); in kvmppc_remove_runnable()
1795 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_start_thread()
1797 if (vcpu->arch.timer_running) { in kvmppc_start_thread()
1798 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_start_thread()
1799 vcpu->arch.timer_running = 0; in kvmppc_start_thread()
1801 cpu = vc->pcpu + vcpu->arch.ptid; in kvmppc_start_thread()
1804 tpaca->kvm_hstate.ptid = vcpu->arch.ptid; in kvmppc_start_thread()
1904 arch.run_list) { in prepare_threads()
1905 if (signal_pending(vcpu->arch.run_task)) in prepare_threads()
1906 vcpu->arch.ret = -EINTR; in prepare_threads()
1907 else if (vcpu->arch.vpa.update_pending || in prepare_threads()
1908 vcpu->arch.slb_shadow.update_pending || in prepare_threads()
1909 vcpu->arch.dtl.update_pending) in prepare_threads()
1910 vcpu->arch.ret = RESUME_GUEST; in prepare_threads()
1914 wake_up(&vcpu->arch.cpu_run); in prepare_threads()
1926 arch.run_list) { in post_guest_process()
1928 if (now < vcpu->arch.dec_expires && in post_guest_process()
1935 if (vcpu->arch.trap) in post_guest_process()
1936 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, in post_guest_process()
1937 vcpu->arch.run_task); in post_guest_process()
1939 vcpu->arch.ret = ret; in post_guest_process()
1940 vcpu->arch.trap = 0; in post_guest_process()
1942 if (vcpu->arch.ceded) { in post_guest_process()
1948 if (!is_kvmppc_resume_guest(vcpu->arch.ret)) { in post_guest_process()
1950 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
1972 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_core()
1992 arch.run_list) { in kvmppc_run_core()
1993 vcpu->arch.ret = -EBUSY; in kvmppc_run_core()
1995 wake_up(&vcpu->arch.cpu_run); in kvmppc_run_core()
2002 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { in kvmppc_run_core()
2034 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) in kvmppc_run_core()
2068 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); in kvmppc_wait_for_exec()
2069 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) in kvmppc_wait_for_exec()
2071 finish_wait(&vcpu->arch.cpu_run, &wait); in kvmppc_wait_for_exec()
2091 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { in kvmppc_vcore_blocked()
2092 if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) { in kvmppc_vcore_blocked()
2122 vcpu->arch.ret = RESUME_GUEST; in kvmppc_run_vcpu()
2123 vcpu->arch.trap = 0; in kvmppc_run_vcpu()
2129 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
2131 vcpu->arch.ceded = 0; in kvmppc_run_vcpu()
2132 vcpu->arch.run_task = current; in kvmppc_run_vcpu()
2133 vcpu->arch.kvm_run = kvm_run; in kvmppc_run_vcpu()
2134 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
2135 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmppc_run_vcpu()
2136 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_run_vcpu()
2137 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads); in kvmppc_run_vcpu()
2156 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
2165 arch.run_list) { in kvmppc_run_vcpu()
2167 if (signal_pending(v->arch.run_task)) { in kvmppc_run_vcpu()
2170 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; in kvmppc_run_vcpu()
2171 v->arch.ret = -EINTR; in kvmppc_run_vcpu()
2172 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
2175 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
2178 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { in kvmppc_run_vcpu()
2179 if (!v->arch.pending_exceptions) in kvmppc_run_vcpu()
2180 n_ceded += v->arch.ceded; in kvmppc_run_vcpu()
2182 v->arch.ceded = 0; in kvmppc_run_vcpu()
2198 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
2206 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_run_vcpu()
2210 vcpu->arch.ret = -EINTR; in kvmppc_run_vcpu()
2216 struct kvm_vcpu, arch.run_list); in kvmppc_run_vcpu()
2217 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
2222 return vcpu->arch.ret; in kvmppc_run_vcpu()
2230 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_hv()
2243 atomic_inc(&vcpu->kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
2248 if (!vcpu->kvm->arch.hpte_setup_done) { in kvmppc_vcpu_run_hv()
2257 vcpu->arch.wqp = &vcpu->arch.vcore->wq; in kvmppc_vcpu_run_hv()
2258 vcpu->arch.pgdir = current->mm->pgd; in kvmppc_vcpu_run_hv()
2259 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_vcpu_run_hv()
2265 !(vcpu->arch.shregs.msr & MSR_PR)) { in kvmppc_vcpu_run_hv()
2273 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_vcpu_run_hv()
2279 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_vcpu_run_hv()
2280 atomic_dec(&vcpu->kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
2365 if (!dont || free->arch.rmap != dont->arch.rmap) { in kvmppc_core_free_memslot_hv()
2366 vfree(free->arch.rmap); in kvmppc_core_free_memslot_hv()
2367 free->arch.rmap = NULL; in kvmppc_core_free_memslot_hv()
2374 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); in kvmppc_core_create_memslot_hv()
2375 if (!slot->arch.rmap) in kvmppc_core_create_memslot_hv()
2416 if ((kvm->arch.lpcr & mask) == lpcr) in kvmppc_update_lpcr()
2419 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
2422 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_update_lpcr()
2428 if (++cores_done >= kvm->arch.online_vcores) in kvmppc_update_lpcr()
2450 if (kvm->arch.hpte_setup_done) in kvmppc_hv_setup_htab_rma()
2454 if (!kvm->arch.hpt_virt) { in kvmppc_hv_setup_htab_rma()
2491 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvmppc_hv_setup_htab_rma()
2503 kvm->arch.hpte_setup_done = 1; in kvmppc_hv_setup_htab_rma()
2526 kvm->arch.lpid = lpid; in kvmppc_core_init_vm_hv()
2533 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_core_init_vm_hv()
2536 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, in kvmppc_core_init_vm_hv()
2537 sizeof(kvm->arch.enabled_hcalls)); in kvmppc_core_init_vm_hv()
2539 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); in kvmppc_core_init_vm_hv()
2542 kvm->arch.host_lpid = mfspr(SPRN_LPID); in kvmppc_core_init_vm_hv()
2543 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); in kvmppc_core_init_vm_hv()
2547 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | in kvmppc_core_init_vm_hv()
2552 kvm->arch.lpcr = lpcr; in kvmppc_core_init_vm_hv()
2564 kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); in kvmppc_core_init_vm_hv()
2565 if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) in kvmppc_core_init_vm_hv()
2576 if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) { in kvmppc_free_vcores()
2577 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_free_vcores()
2581 kfree(kvm->arch.vcores[i]); in kvmppc_free_vcores()
2583 kvm->arch.online_vcores = 0; in kvmppc_free_vcores()
2588 debugfs_remove_recursive(kvm->arch.debugfs_dir); in kvmppc_core_destroy_vm_hv()