Lines Matching refs:arch

125 	if (kvmppc_ipi_thread(vcpu->arch.thread_cpu))  in kvmppc_fast_vcpu_kick_hv()
190 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
202 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
203 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv()
204 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
205 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
206 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
208 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
213 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
219 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
220 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
221 vcpu->arch.busy_preempt = mftb(); in kvmppc_core_vcpu_put_hv()
222 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
233 vcpu->arch.shregs.msr = msr; in kvmppc_set_msr_hv()
239 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_hv()
245 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
288 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); in kvmppc_dump_regs()
294 vcpu->arch.ctr, vcpu->arch.lr); in kvmppc_dump_regs()
296 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); in kvmppc_dump_regs()
298 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); in kvmppc_dump_regs()
300 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); in kvmppc_dump_regs()
302 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); in kvmppc_dump_regs()
303 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); in kvmppc_dump_regs()
305 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_dump_regs()
306 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); in kvmppc_dump_regs()
307 for (r = 0; r < vcpu->arch.slb_max; ++r) in kvmppc_dump_regs()
309 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); in kvmppc_dump_regs()
311 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
312 vcpu->arch.last_inst); in kvmppc_dump_regs()
343 spin_lock(&vcpu->arch.vpa_update_lock); in set_vpa()
349 spin_unlock(&vcpu->arch.vpa_update_lock); in set_vpa()
412 spin_lock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
418 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
429 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
432 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
439 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
442 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
449 if (vpa_is_registered(&tvcpu->arch.dtl) || in do_h_register_vpa()
450 vpa_is_registered(&tvcpu->arch.slb_shadow)) in do_h_register_vpa()
453 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
458 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
463 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
474 spin_unlock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
496 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
501 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
531 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
532 vcpu->arch.slb_shadow.update_pending || in kvmppc_update_vpas()
533 vcpu->arch.dtl.update_pending)) in kvmppc_update_vpas()
536 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
537 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
538 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); in kvmppc_update_vpas()
539 if (vcpu->arch.vpa.pinned_addr) in kvmppc_update_vpas()
540 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
542 if (vcpu->arch.dtl.update_pending) { in kvmppc_update_vpas()
543 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); in kvmppc_update_vpas()
544 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; in kvmppc_update_vpas()
545 vcpu->arch.dtl_index = 0; in kvmppc_update_vpas()
547 if (vcpu->arch.slb_shadow.update_pending) in kvmppc_update_vpas()
548 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); in kvmppc_update_vpas()
549 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
579 dt = vcpu->arch.dtl_ptr; in kvmppc_create_dtl_entry()
580 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_create_dtl_entry()
583 stolen = core_stolen - vcpu->arch.stolen_logged; in kvmppc_create_dtl_entry()
584 vcpu->arch.stolen_logged = core_stolen; in kvmppc_create_dtl_entry()
585 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_create_dtl_entry()
586 stolen += vcpu->arch.busy_stolen; in kvmppc_create_dtl_entry()
587 vcpu->arch.busy_stolen = 0; in kvmppc_create_dtl_entry()
588 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_create_dtl_entry()
593 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); in kvmppc_create_dtl_entry()
597 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); in kvmppc_create_dtl_entry()
599 if (dt == vcpu->arch.dtl.pinned_end) in kvmppc_create_dtl_entry()
600 dt = vcpu->arch.dtl.pinned_addr; in kvmppc_create_dtl_entry()
601 vcpu->arch.dtl_ptr = dt; in kvmppc_create_dtl_entry()
604 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in kvmppc_create_dtl_entry()
605 vcpu->arch.dtl.dirty = true; in kvmppc_create_dtl_entry()
610 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) in kvmppc_power8_compatible()
612 if ((!vcpu->arch.vcore->arch_compat) && in kvmppc_power8_compatible()
633 vcpu->arch.ciabr = value1; in kvmppc_h_set_mode()
642 vcpu->arch.dawr = value1; in kvmppc_h_set_mode()
643 vcpu->arch.dawrx = value2; in kvmppc_h_set_mode()
652 struct kvmppc_vcore *vcore = target->arch.vcore; in kvm_arch_vcpu_yield_to()
663 if (target->arch.state == KVMPPC_VCPU_RUNNABLE && in kvm_arch_vcpu_yield_to()
677 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
678 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; in kvmppc_get_yield_count()
681 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
694 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) in kvmppc_pseries_do_hcall()
707 tvcpu->arch.prodded = 1; in kvmppc_pseries_do_hcall()
709 if (vcpu->arch.ceded) { in kvmppc_pseries_do_hcall()
736 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) in kvmppc_pseries_do_hcall()
782 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
827 run->debug.arch.address = kvmppc_get_pc(vcpu); in kvmppc_emulate_debug_inst()
844 switch (vcpu->arch.trap) { in kvmppc_handle_exit_hv()
880 flags = vcpu->arch.shregs.msr & 0x1f0000ull; in kvmppc_handle_exit_hv()
898 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_hv()
913 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_exit_hv()
914 vcpu->arch.fault_dsisr = 0; in kvmppc_handle_exit_hv()
925 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) in kvmppc_handle_exit_hv()
926 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? in kvmppc_handle_exit_hv()
927 swab32(vcpu->arch.emul_inst) : in kvmppc_handle_exit_hv()
928 vcpu->arch.emul_inst; in kvmppc_handle_exit_hv()
948 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
949 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
950 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
964 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_hv()
965 for (i = 0; i < vcpu->arch.slb_max; i++) { in kvm_arch_vcpu_ioctl_get_sregs_hv()
966 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; in kvm_arch_vcpu_ioctl_get_sregs_hv()
967 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_hv()
979 if (sregs->pvr != vcpu->arch.pvr) in kvm_arch_vcpu_ioctl_set_sregs_hv()
983 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
985 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; in kvm_arch_vcpu_ioctl_set_sregs_hv()
986 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; in kvm_arch_vcpu_ioctl_set_sregs_hv()
990 vcpu->arch.slb_max = j; in kvm_arch_vcpu_ioctl_set_sregs_hv()
999 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
1013 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
1016 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr()
1018 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr()
1053 *val = get_reg_val(id, vcpu->arch.dabr); in kvmppc_get_one_reg_hv()
1056 *val = get_reg_val(id, vcpu->arch.dabrx); in kvmppc_get_one_reg_hv()
1059 *val = get_reg_val(id, vcpu->arch.dscr); in kvmppc_get_one_reg_hv()
1062 *val = get_reg_val(id, vcpu->arch.purr); in kvmppc_get_one_reg_hv()
1065 *val = get_reg_val(id, vcpu->arch.spurr); in kvmppc_get_one_reg_hv()
1068 *val = get_reg_val(id, vcpu->arch.amr); in kvmppc_get_one_reg_hv()
1071 *val = get_reg_val(id, vcpu->arch.uamor); in kvmppc_get_one_reg_hv()
1075 *val = get_reg_val(id, vcpu->arch.mmcr[i]); in kvmppc_get_one_reg_hv()
1079 *val = get_reg_val(id, vcpu->arch.pmc[i]); in kvmppc_get_one_reg_hv()
1083 *val = get_reg_val(id, vcpu->arch.spmc[i]); in kvmppc_get_one_reg_hv()
1086 *val = get_reg_val(id, vcpu->arch.siar); in kvmppc_get_one_reg_hv()
1089 *val = get_reg_val(id, vcpu->arch.sdar); in kvmppc_get_one_reg_hv()
1092 *val = get_reg_val(id, vcpu->arch.sier); in kvmppc_get_one_reg_hv()
1095 *val = get_reg_val(id, vcpu->arch.iamr); in kvmppc_get_one_reg_hv()
1098 *val = get_reg_val(id, vcpu->arch.pspb); in kvmppc_get_one_reg_hv()
1101 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); in kvmppc_get_one_reg_hv()
1104 *val = get_reg_val(id, vcpu->arch.dawr); in kvmppc_get_one_reg_hv()
1107 *val = get_reg_val(id, vcpu->arch.dawrx); in kvmppc_get_one_reg_hv()
1110 *val = get_reg_val(id, vcpu->arch.ciabr); in kvmppc_get_one_reg_hv()
1113 *val = get_reg_val(id, vcpu->arch.csigr); in kvmppc_get_one_reg_hv()
1116 *val = get_reg_val(id, vcpu->arch.tacr); in kvmppc_get_one_reg_hv()
1119 *val = get_reg_val(id, vcpu->arch.tcscr); in kvmppc_get_one_reg_hv()
1122 *val = get_reg_val(id, vcpu->arch.pid); in kvmppc_get_one_reg_hv()
1125 *val = get_reg_val(id, vcpu->arch.acop); in kvmppc_get_one_reg_hv()
1128 *val = get_reg_val(id, vcpu->arch.wort); in kvmppc_get_one_reg_hv()
1131 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1132 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
1133 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1136 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1137 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; in kvmppc_get_one_reg_hv()
1138 val->vpaval.length = vcpu->arch.slb_shadow.len; in kvmppc_get_one_reg_hv()
1139 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1142 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1143 val->vpaval.addr = vcpu->arch.dtl.next_gpa; in kvmppc_get_one_reg_hv()
1144 val->vpaval.length = vcpu->arch.dtl.len; in kvmppc_get_one_reg_hv()
1145 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1148 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
1152 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); in kvmppc_get_one_reg_hv()
1155 *val = get_reg_val(id, vcpu->arch.ppr); in kvmppc_get_one_reg_hv()
1159 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_hv()
1162 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_hv()
1165 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_hv()
1169 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); in kvmppc_get_one_reg_hv()
1177 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_hv()
1180 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_hv()
1187 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_hv()
1190 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_hv()
1193 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_hv()
1196 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_hv()
1199 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_hv()
1202 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_hv()
1205 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_hv()
1209 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_hv()
1214 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_hv()
1217 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_hv()
1221 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); in kvmppc_get_one_reg_hv()
1245 vcpu->arch.dabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1248 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; in kvmppc_set_one_reg_hv()
1251 vcpu->arch.dscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1254 vcpu->arch.purr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1257 vcpu->arch.spurr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1260 vcpu->arch.amr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1263 vcpu->arch.uamor = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1267 vcpu->arch.mmcr[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1271 vcpu->arch.pmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1275 vcpu->arch.spmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1278 vcpu->arch.siar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1281 vcpu->arch.sdar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1284 vcpu->arch.sier = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1287 vcpu->arch.iamr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1290 vcpu->arch.pspb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1293 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1296 vcpu->arch.dawr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1299 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; in kvmppc_set_one_reg_hv()
1302 vcpu->arch.ciabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1304 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) in kvmppc_set_one_reg_hv()
1305 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ in kvmppc_set_one_reg_hv()
1308 vcpu->arch.csigr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1311 vcpu->arch.tacr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1314 vcpu->arch.tcscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1317 vcpu->arch.pid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1320 vcpu->arch.acop = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1323 vcpu->arch.wort = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1328 if (!addr && (vcpu->arch.slb_shadow.next_gpa || in kvmppc_set_one_reg_hv()
1329 vcpu->arch.dtl.next_gpa)) in kvmppc_set_one_reg_hv()
1331 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
1337 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
1339 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); in kvmppc_set_one_reg_hv()
1346 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
1349 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); in kvmppc_set_one_reg_hv()
1353 vcpu->arch.vcore->tb_offset = in kvmppc_set_one_reg_hv()
1363 vcpu->arch.ppr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1367 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1370 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1373 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1377 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1385 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_hv()
1388 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_hv()
1394 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1397 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1400 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1403 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1406 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1409 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1412 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1416 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1421 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1424 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1452 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
1465 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
1466 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
1467 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
1468 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
1469 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
1592 if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) in debugfs_vcpu_init()
1594 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir); in debugfs_vcpu_init()
1595 if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir)) in debugfs_vcpu_init()
1597 vcpu->arch.debugfs_timings = in debugfs_vcpu_init()
1598 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, in debugfs_vcpu_init()
1629 vcpu->arch.shared = &vcpu->arch.shregs; in kvmppc_core_vcpu_create_hv()
1636 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_hv()
1638 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_hv()
1641 vcpu->arch.mmcr[0] = MMCR0_FC; in kvmppc_core_vcpu_create_hv()
1642 vcpu->arch.ctrl = CTRL_RUNLATCH; in kvmppc_core_vcpu_create_hv()
1645 spin_lock_init(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_create_hv()
1646 spin_lock_init(&vcpu->arch.tbacct_lock); in kvmppc_core_vcpu_create_hv()
1647 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_create_hv()
1648 vcpu->arch.intr_msr = MSR_SF | MSR_ME; in kvmppc_core_vcpu_create_hv()
1652 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_core_vcpu_create_hv()
1654 init_waitqueue_head(&vcpu->arch.cpu_run); in kvmppc_core_vcpu_create_hv()
1657 vcore = kvm->arch.vcores[core]; in kvmppc_core_vcpu_create_hv()
1660 kvm->arch.vcores[core] = vcore; in kvmppc_core_vcpu_create_hv()
1661 kvm->arch.online_vcores++; in kvmppc_core_vcpu_create_hv()
1671 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
1672 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
1673 vcpu->arch.thread_cpu = -1; in kvmppc_core_vcpu_create_hv()
1675 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_core_vcpu_create_hv()
1697 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
1698 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); in kvmppc_core_vcpu_free_hv()
1699 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); in kvmppc_core_vcpu_free_hv()
1700 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
1701 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
1717 if (now > vcpu->arch.dec_expires) { in kvmppc_set_timer()
1723 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC in kvmppc_set_timer()
1725 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), in kvmppc_set_timer()
1727 vcpu->arch.timer_running = 1; in kvmppc_set_timer()
1732 vcpu->arch.ceded = 0; in kvmppc_end_cede()
1733 if (vcpu->arch.timer_running) { in kvmppc_end_cede()
1734 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_end_cede()
1735 vcpu->arch.timer_running = 0; in kvmppc_end_cede()
1746 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_remove_runnable()
1748 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
1750 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
1751 vcpu->arch.stolen_logged; in kvmppc_remove_runnable()
1752 vcpu->arch.busy_preempt = now; in kvmppc_remove_runnable()
1753 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_remove_runnable()
1754 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
1756 list_del(&vcpu->arch.run_list); in kvmppc_remove_runnable()
1812 if (vcpu->arch.timer_running) { in kvmppc_start_thread()
1813 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_start_thread()
1814 vcpu->arch.timer_running = 0; in kvmppc_start_thread()
1816 cpu += vcpu->arch.ptid; in kvmppc_start_thread()
1818 vcpu->arch.thread_cpu = cpu; in kvmppc_start_thread()
2166 arch.run_list) { in prepare_threads()
2167 if (signal_pending(vcpu->arch.run_task)) in prepare_threads()
2168 vcpu->arch.ret = -EINTR; in prepare_threads()
2169 else if (vcpu->arch.vpa.update_pending || in prepare_threads()
2170 vcpu->arch.slb_shadow.update_pending || in prepare_threads()
2171 vcpu->arch.dtl.update_pending) in prepare_threads()
2172 vcpu->arch.ret = RESUME_GUEST; in prepare_threads()
2176 wake_up(&vcpu->arch.cpu_run); in prepare_threads()
2221 arch.run_list) { in post_guest_process()
2223 if (now < vcpu->arch.dec_expires && in post_guest_process()
2230 if (vcpu->arch.trap) in post_guest_process()
2231 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, in post_guest_process()
2232 vcpu->arch.run_task); in post_guest_process()
2234 vcpu->arch.ret = ret; in post_guest_process()
2235 vcpu->arch.trap = 0; in post_guest_process()
2237 if (is_kvmppc_resume_guest(vcpu->arch.ret)) { in post_guest_process()
2238 if (vcpu->arch.pending_exceptions) in post_guest_process()
2240 if (vcpu->arch.ceded) in post_guest_process()
2246 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
2262 struct kvm_vcpu, arch.run_list); in post_guest_process()
2263 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
2295 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_core()
2312 arch.run_list) { in kvmppc_run_core()
2313 vcpu->arch.ret = -EBUSY; in kvmppc_run_core()
2315 wake_up(&vcpu->arch.cpu_run); in kvmppc_run_core()
2390 arch.run_list) { in kvmppc_run_core()
2394 if (!vcpu->arch.ptid) in kvmppc_run_core()
2396 active |= 1 << (thr + vcpu->arch.ptid); in kvmppc_run_core()
2505 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); in kvmppc_wait_for_exec()
2506 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_wait_for_exec()
2511 finish_wait(&vcpu->arch.cpu_run, &wait); in kvmppc_wait_for_exec()
2531 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { in kvmppc_vcore_blocked()
2532 if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) { in kvmppc_vcore_blocked()
2562 vcpu->arch.ret = RESUME_GUEST; in kvmppc_run_vcpu()
2563 vcpu->arch.trap = 0; in kvmppc_run_vcpu()
2569 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
2571 vcpu->arch.ceded = 0; in kvmppc_run_vcpu()
2572 vcpu->arch.run_task = current; in kvmppc_run_vcpu()
2573 vcpu->arch.kvm_run = kvm_run; in kvmppc_run_vcpu()
2574 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
2575 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmppc_run_vcpu()
2576 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_run_vcpu()
2577 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads); in kvmppc_run_vcpu()
2608 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
2618 arch.run_list) { in kvmppc_run_vcpu()
2620 if (signal_pending(v->arch.run_task)) { in kvmppc_run_vcpu()
2623 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; in kvmppc_run_vcpu()
2624 v->arch.ret = -EINTR; in kvmppc_run_vcpu()
2625 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
2628 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
2631 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { in kvmppc_run_vcpu()
2632 if (!v->arch.pending_exceptions) in kvmppc_run_vcpu()
2633 n_ceded += v->arch.ceded; in kvmppc_run_vcpu()
2635 v->arch.ceded = 0; in kvmppc_run_vcpu()
2652 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
2661 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_run_vcpu()
2665 vcpu->arch.ret = -EINTR; in kvmppc_run_vcpu()
2671 struct kvm_vcpu, arch.run_list); in kvmppc_run_vcpu()
2672 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
2677 return vcpu->arch.ret; in kvmppc_run_vcpu()
2685 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_hv()
2698 atomic_inc(&vcpu->kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
2703 if (!vcpu->kvm->arch.hpte_setup_done) { in kvmppc_vcpu_run_hv()
2712 vcpu->arch.wqp = &vcpu->arch.vcore->wq; in kvmppc_vcpu_run_hv()
2713 vcpu->arch.pgdir = current->mm->pgd; in kvmppc_vcpu_run_hv()
2714 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_vcpu_run_hv()
2720 !(vcpu->arch.shregs.msr & MSR_PR)) { in kvmppc_vcpu_run_hv()
2728 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_vcpu_run_hv()
2734 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_vcpu_run_hv()
2735 atomic_dec(&vcpu->kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
2822 if (!dont || free->arch.rmap != dont->arch.rmap) { in kvmppc_core_free_memslot_hv()
2823 vfree(free->arch.rmap); in kvmppc_core_free_memslot_hv()
2824 free->arch.rmap = NULL; in kvmppc_core_free_memslot_hv()
2831 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); in kvmppc_core_create_memslot_hv()
2832 if (!slot->arch.rmap) in kvmppc_core_create_memslot_hv()
2876 if ((kvm->arch.lpcr & mask) == lpcr) in kvmppc_update_lpcr()
2879 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
2882 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_update_lpcr()
2888 if (++cores_done >= kvm->arch.online_vcores) in kvmppc_update_lpcr()
2910 if (kvm->arch.hpte_setup_done) in kvmppc_hv_setup_htab_rma()
2914 if (!kvm->arch.hpt_virt) { in kvmppc_hv_setup_htab_rma()
2951 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvmppc_hv_setup_htab_rma()
2963 kvm->arch.hpte_setup_done = 1; in kvmppc_hv_setup_htab_rma()
2986 kvm->arch.lpid = lpid; in kvmppc_core_init_vm_hv()
2993 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_core_init_vm_hv()
2996 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, in kvmppc_core_init_vm_hv()
2997 sizeof(kvm->arch.enabled_hcalls)); in kvmppc_core_init_vm_hv()
2999 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); in kvmppc_core_init_vm_hv()
3002 kvm->arch.host_lpid = mfspr(SPRN_LPID); in kvmppc_core_init_vm_hv()
3003 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); in kvmppc_core_init_vm_hv()
3007 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | in kvmppc_core_init_vm_hv()
3012 kvm->arch.lpcr = lpcr; in kvmppc_core_init_vm_hv()
3024 kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); in kvmppc_core_init_vm_hv()
3025 if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) in kvmppc_core_init_vm_hv()
3036 kfree(kvm->arch.vcores[i]); in kvmppc_free_vcores()
3037 kvm->arch.online_vcores = 0; in kvmppc_free_vcores()
3042 debugfs_remove_recursive(kvm->arch.debugfs_dir); in kvmppc_core_destroy_vm_hv()