Lines Matching refs:arch
73 container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
181 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
271 return vcpu->arch.apic_base; in kvm_get_apic_base()
277 u64 old_state = vcpu->arch.apic_base & in kvm_set_apic_base()
360 if (!vcpu->arch.exception.pending) { in kvm_multiple_exception()
364 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
365 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
366 vcpu->arch.exception.nr = nr; in kvm_multiple_exception()
367 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
368 vcpu->arch.exception.reinject = reinject; in kvm_multiple_exception()
373 prev_nr = vcpu->arch.exception.nr; in kvm_multiple_exception()
384 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
385 vcpu->arch.exception.has_error_code = true; in kvm_multiple_exception()
386 vcpu->arch.exception.nr = DF_VECTOR; in kvm_multiple_exception()
387 vcpu->arch.exception.error_code = 0; in kvm_multiple_exception()
419 vcpu->arch.cr2 = fault->address; in kvm_inject_page_fault()
427 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); in kvm_propagate_fault()
429 vcpu->arch.mmu.inject_page_fault(vcpu, fault); in kvm_propagate_fault()
436 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
503 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, in kvm_read_nested_guest_page()
527 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { in load_pdptrs()
536 (unsigned long *)&vcpu->arch.regs_avail); in load_pdptrs()
538 (unsigned long *)&vcpu->arch.regs_dirty); in load_pdptrs()
547 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; in pdptrs_changed()
557 (unsigned long *)&vcpu->arch.regs_avail)) in pdptrs_changed()
566 changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; in pdptrs_changed()
595 if ((vcpu->arch.efer & EFER_LME)) { in kvm_set_cr0()
605 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr0()
637 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xcr0()
645 if (vcpu->arch.xcr0 != host_xcr0) in kvm_put_guest_xcr0()
654 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
670 valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP; in __kvm_set_xcr()
683 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
727 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr4()
770 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) in kvm_set_cr3()
773 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
774 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); in kvm_set_cr3()
787 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
797 return vcpu->arch.cr8; in kvm_get_cr8()
807 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
808 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; in kvm_update_dr0123()
815 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); in kvm_update_dr6()
823 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
825 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
827 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
829 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
845 vcpu->arch.db[dr] = val; in __kvm_set_dr()
847 vcpu->arch.eff_db[dr] = val; in __kvm_set_dr()
854 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in __kvm_set_dr()
862 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in __kvm_set_dr()
884 *val = vcpu->arch.db[dr]; in kvm_get_dr()
890 *val = vcpu->arch.dr6; in kvm_get_dr()
897 *val = vcpu->arch.dr7; in kvm_get_dr()
982 u64 old_efer = vcpu->arch.efer; in set_efer()
988 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
992 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1138 if (kvm->arch.kvmclock_offset) { in kvm_write_wall_clock()
1139 struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); in kvm_write_wall_clock()
1209 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, in nsec_to_cycles()
1210 vcpu->arch.virtual_tsc_shift); in nsec_to_cycles()
1231 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
1232 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
1233 vcpu->arch.virtual_tsc_khz = this_tsc_khz; in kvm_set_tsc_khz()
1252 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
1253 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
1254 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
1255 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
1263 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
1290 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; in update_ia32_tsc_adjust_msr()
1303 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_write_tsc()
1306 elapsed = ns - kvm->arch.last_tsc_nsec; in kvm_write_tsc()
1308 if (vcpu->arch.virtual_tsc_khz) { in kvm_write_tsc()
1312 usdiff = data - kvm->arch.last_tsc_write; in kvm_write_tsc()
1314 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; in kvm_write_tsc()
1329 : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); in kvm_write_tsc()
1354 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_write_tsc()
1356 offset = kvm->arch.cur_tsc_offset; in kvm_write_tsc()
1365 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); in kvm_write_tsc()
1376 kvm->arch.cur_tsc_generation++; in kvm_write_tsc()
1377 kvm->arch.cur_tsc_nsec = ns; in kvm_write_tsc()
1378 kvm->arch.cur_tsc_write = data; in kvm_write_tsc()
1379 kvm->arch.cur_tsc_offset = offset; in kvm_write_tsc()
1382 kvm->arch.cur_tsc_generation, data); in kvm_write_tsc()
1389 kvm->arch.last_tsc_nsec = ns; in kvm_write_tsc()
1390 kvm->arch.last_tsc_write = data; in kvm_write_tsc()
1391 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in kvm_write_tsc()
1393 vcpu->arch.last_guest_tsc = data; in kvm_write_tsc()
1396 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in kvm_write_tsc()
1397 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in kvm_write_tsc()
1398 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in kvm_write_tsc()
1403 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_write_tsc()
1405 spin_lock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_write_tsc()
1407 kvm->arch.nr_vcpus_matched_tsc = 0; in kvm_write_tsc()
1409 kvm->arch.nr_vcpus_matched_tsc++; in kvm_write_tsc()
1413 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_write_tsc()
1538 struct kvm_arch *ka = &kvm->arch; in pvclock_update_vm_gtod_copy()
1571 struct kvm_arch *ka = &kvm->arch; in kvm_gen_update_masterclock()
1592 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update()
1593 struct kvm_arch *ka = &v->kvm->arch; in kvm_guest_time_update()
1662 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
1743 struct kvm *kvm = container_of(ka, struct kvm, arch); in kvmclock_update_fn()
1757 schedule_delayed_work(&kvm->arch.kvmclock_update_work, in kvm_gen_kvmclock_update()
1768 struct kvm *kvm = container_of(ka, struct kvm, arch); in kvmclock_sync_fn()
1770 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); in kvmclock_sync_fn()
1771 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvmclock_sync_fn()
1856 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; in set_msr_mtrr()
1862 vcpu->arch.mtrr_state.def_type = data; in set_msr_mtrr()
1863 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; in set_msr_mtrr()
1871 vcpu->arch.pat = data; in set_msr_mtrr()
1880 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; in set_msr_mtrr()
1883 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; in set_msr_mtrr()
1893 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
1898 vcpu->arch.mcg_status = data; in set_msr_mce()
1905 vcpu->arch.mcg_ctl = data; in set_msr_mce()
1919 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
1931 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 in xen_hvm_config()
1932 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; in xen_hvm_config()
1933 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 in xen_hvm_config()
1934 : kvm->arch.xen_hvm_config.blob_size_32; in xen_hvm_config()
1960 return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; in kvm_hv_hypercall_enabled()
1984 kvm->arch.hv_guest_os_id = data; in set_msr_hyperv_pw()
1986 if (!kvm->arch.hv_guest_os_id) in set_msr_hyperv_pw()
1987 kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; in set_msr_hyperv_pw()
1995 if (!kvm->arch.hv_guest_os_id) in set_msr_hyperv_pw()
1998 kvm->arch.hv_hypercall = data; in set_msr_hyperv_pw()
2009 kvm->arch.hv_hypercall = data; in set_msr_hyperv_pw()
2017 kvm->arch.hv_tsc_page = data; in set_msr_hyperv_pw()
2043 vcpu->arch.hv_vapic = data; in set_msr_hyperv()
2054 vcpu->arch.hv_vapic = data; in set_msr_hyperv()
2083 vcpu->arch.apf.msr_val = data; in kvm_pv_enable_async_pf()
2091 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
2095 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
2102 vcpu->arch.pv_time_enabled = false; in kvmclock_reset()
2109 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in accumulate_steal_time()
2112 delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; in accumulate_steal_time()
2113 vcpu->arch.st.last_steal = current->sched_info.run_delay; in accumulate_steal_time()
2114 vcpu->arch.st.accum_steal = delta; in accumulate_steal_time()
2121 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
2124 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, in record_steal_time()
2125 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) in record_steal_time()
2128 vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; in record_steal_time()
2129 vcpu->arch.st.steal.version += 2; in record_steal_time()
2130 vcpu->arch.st.accum_steal = 0; in record_steal_time()
2132 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, in record_steal_time()
2133 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); in record_steal_time()
2195 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
2198 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
2202 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
2206 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
2212 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_set_msr_common()
2226 vcpu->arch.time = data; in kvm_set_msr_common()
2236 &vcpu->arch.pv_time, data & ~1ULL, in kvm_set_msr_common()
2238 vcpu->arch.pv_time_enabled = false; in kvm_set_msr_common()
2240 vcpu->arch.pv_time_enabled = true; in kvm_set_msr_common()
2256 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, in kvm_set_msr_common()
2261 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
2345 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
2350 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
2353 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) in kvm_set_msr_common()
2385 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; in get_msr_mtrr()
2391 *pdata = vcpu->arch.mtrr_state.def_type + in get_msr_mtrr()
2392 (vcpu->arch.mtrr_state.enabled << 10); in get_msr_mtrr()
2400 *pdata = vcpu->arch.pat; in get_msr_mtrr()
2409 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; in get_msr_mtrr()
2412 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; in get_msr_mtrr()
2422 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
2431 data = vcpu->arch.mcg_cap; in get_msr_mce()
2436 data = vcpu->arch.mcg_ctl; in get_msr_mce()
2439 data = vcpu->arch.mcg_status; in get_msr_mce()
2445 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
2461 data = kvm->arch.hv_guest_os_id; in get_msr_hyperv_pw()
2464 data = kvm->arch.hv_hypercall; in get_msr_hyperv_pw()
2468 div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); in get_msr_hyperv_pw()
2472 data = kvm->arch.hv_tsc_page; in get_msr_hyperv_pw()
2506 data = vcpu->arch.hv_vapic; in get_msr_hyperv()
2588 data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
2591 data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
2600 data = vcpu->arch.efer; in kvm_get_msr_common()
2604 data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
2608 data = vcpu->arch.time; in kvm_get_msr_common()
2611 data = vcpu->arch.apf.msr_val; in kvm_get_msr_common()
2614 data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
2617 data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
2664 data = vcpu->arch.osvw.length; in kvm_get_msr_common()
2669 data = vcpu->arch.osvw.status; in kvm_get_msr_common()
2927 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
2936 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
2937 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
2938 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
2943 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
2944 native_read_tsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
2949 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
2951 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
2957 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
2971 vcpu->arch.last_host_tsc = native_read_tsc(); in kvm_arch_vcpu_put()
2978 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); in kvm_vcpu_ioctl_get_lapic()
3018 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
3034 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
3037 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
3040 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
3048 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
3050 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
3059 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
3069 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
3078 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
3098 vcpu->arch.exception.pending && in kvm_vcpu_ioctl_x86_get_vcpu_events()
3099 !kvm_exception_is_soft(vcpu->arch.exception.nr); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3100 events->exception.nr = vcpu->arch.exception.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3101 events->exception.has_error_code = vcpu->arch.exception.has_error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3103 events->exception.error_code = vcpu->arch.exception.error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3106 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3107 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3111 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3112 events->nmi.pending = vcpu->arch.nmi_pending != 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3132 vcpu->arch.exception.pending = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3133 vcpu->arch.exception.nr = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3134 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3135 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3137 vcpu->arch.interrupt.pending = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3138 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3139 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3144 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3146 vcpu->arch.nmi_pending = events->nmi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3151 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3163 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_get_debugregs()
3166 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
3182 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_set_debugregs()
3184 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
3186 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
3196 struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; in fill_xsave()
3232 struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; in load_xsave()
3277 &vcpu->arch.guest_fpu.state->fxsave, in kvm_vcpu_ioctl_x86_get_xsave()
3302 memcpy(&vcpu->arch.guest_fpu.state->fxsave, in kvm_vcpu_ioctl_x86_set_xsave()
3319 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
3353 if (!vcpu->arch.pv_time_enabled) in kvm_set_guest_paused()
3355 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
3377 if (!vcpu->arch.apic) in kvm_arch_vcpu_ioctl()
3395 if (!vcpu->arch.apic) in kvm_arch_vcpu_ioctl()
3613 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
3646 kvm->arch.ept_identity_map_addr = ident_addr; in kvm_vm_ioctl_set_identity_map_addr()
3659 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; in kvm_vm_ioctl_set_nr_mmu_pages()
3667 return kvm->arch.n_max_mmu_pages; in kvm_vm_ioctl_get_nr_mmu_pages()
3731 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit()
3732 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); in kvm_vm_ioctl_get_pit()
3733 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit()
3740 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit()
3741 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); in kvm_vm_ioctl_set_pit()
3744 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit()
3752 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
3753 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, in kvm_vm_ioctl_get_pit2()
3755 ps->flags = kvm->arch.vpit->pit_state.flags; in kvm_vm_ioctl_get_pit2()
3756 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
3766 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
3767 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
3771 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, in kvm_vm_ioctl_set_pit2()
3772 sizeof(kvm->arch.vpit->pit_state.channels)); in kvm_vm_ioctl_set_pit2()
3773 kvm->arch.vpit->pit_state.flags = ps->flags; in kvm_vm_ioctl_set_pit2()
3775 kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start); in kvm_vm_ioctl_set_pit2()
3776 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
3783 if (!kvm->arch.vpit) in kvm_vm_ioctl_reinject()
3785 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_reinject()
3786 kvm->arch.vpit->pit_state.reinject = control->pit_reinject; in kvm_vm_ioctl_reinject()
3787 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_reinject()
3890 if (kvm->arch.vpic) in kvm_arch_vm_ioctl()
3914 kvm->arch.vpic = vpic; in kvm_arch_vm_ioctl()
3940 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
3943 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); in kvm_arch_vm_ioctl()
3944 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
3999 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
4015 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
4022 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
4038 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
4053 if (copy_from_user(&kvm->arch.xen_hvm_config, argp, in kvm_arch_vm_ioctl()
4057 if (kvm->arch.xen_hvm_config.flags) in kvm_arch_vm_ioctl()
4080 kvm->arch.kvmclock_offset = delta; in kvm_arch_vm_ioctl()
4090 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; in kvm_arch_vm_ioctl()
4151 if (!(vcpu->arch.apic && in vcpu_mmio_write()
4152 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
4171 if (!(vcpu->arch.apic && in vcpu_mmio_read()
4172 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
4207 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); in translate_nested_gpa()
4216 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
4224 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_fetch()
4232 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
4239 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
4250 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, in kvm_read_guest_virt_helper()
4284 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
4330 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, in kvm_write_guest_virt_system()
4362 && !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
4363 vcpu->arch.access, access)) { in vcpu_mmio_gva_to_gpa()
4364 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
4370 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
4668 if (vcpu->arch.pio.in) in kernel_pio()
4669 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, in kernel_pio()
4670 vcpu->arch.pio.size, pd); in kernel_pio()
4673 vcpu->arch.pio.port, vcpu->arch.pio.size, in kernel_pio()
4682 vcpu->arch.pio.port = port; in emulator_pio_in_out()
4683 vcpu->arch.pio.in = in; in emulator_pio_in_out()
4684 vcpu->arch.pio.count = count; in emulator_pio_in_out()
4685 vcpu->arch.pio.size = size; in emulator_pio_in_out()
4687 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { in emulator_pio_in_out()
4688 vcpu->arch.pio.count = 0; in emulator_pio_in_out()
4709 if (vcpu->arch.pio.count) in emulator_pio_in_emulated()
4715 memcpy(val, vcpu->arch.pio_data, size * count); in emulator_pio_in_emulated()
4716 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); in emulator_pio_in_emulated()
4717 vcpu->arch.pio.count = 0; in emulator_pio_in_emulated()
4730 memcpy(vcpu->arch.pio_data, val, size * count); in emulator_pio_out_emulated()
4731 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); in emulator_pio_out_emulated()
4753 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
4754 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
4757 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
4805 value = vcpu->arch.cr2; in emulator_get_cr()
4834 vcpu->arch.cr2 = val; in emulator_set_cr()
4979 emul_to_vcpu(ctxt)->arch.halt_request = 1; in emulator_halt()
5085 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in inject_emulated_exception()
5099 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
5114 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
5119 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
5137 vcpu->arch.nmi_pending = 0; in kvm_inject_realmode_interrupt()
5139 vcpu->arch.interrupt.pending = false; in kvm_inject_realmode_interrupt()
5172 if (!vcpu->arch.mmu.direct_map) { in reexecute_instruction()
5205 if (vcpu->arch.mmu.direct_map) { in reexecute_instruction()
5209 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
5239 last_retry_eip = vcpu->arch.last_retry_eip; in retry_instruction()
5240 last_retry_addr = vcpu->arch.last_retry_addr; in retry_instruction()
5255 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; in retry_instruction()
5266 vcpu->arch.last_retry_eip = ctxt->eip; in retry_instruction()
5267 vcpu->arch.last_retry_addr = cr2; in retry_instruction()
5269 if (!vcpu->arch.mmu.direct_map) in retry_instruction()
5309 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | in kvm_vcpu_check_singlestep()
5311 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; in kvm_vcpu_check_singlestep()
5312 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_check_singlestep()
5316 vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; in kvm_vcpu_check_singlestep()
5322 vcpu->arch.dr6 &= ~15; in kvm_vcpu_check_singlestep()
5323 vcpu->arch.dr6 |= DR6_BS | DR6_RTM; in kvm_vcpu_check_singlestep()
5332 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_breakpoint()
5336 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_breakpoint()
5337 vcpu->arch.eff_db); in kvm_vcpu_check_breakpoint()
5340 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; in kvm_vcpu_check_breakpoint()
5341 kvm_run->debug.arch.pc = eip; in kvm_vcpu_check_breakpoint()
5342 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_check_breakpoint()
5349 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_breakpoint()
5353 vcpu->arch.dr7, in kvm_vcpu_check_breakpoint()
5354 vcpu->arch.db); in kvm_vcpu_check_breakpoint()
5357 vcpu->arch.dr6 &= ~15; in kvm_vcpu_check_breakpoint()
5358 vcpu->arch.dr6 |= dr6 | DR6_RTM; in kvm_vcpu_check_breakpoint()
5375 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
5377 bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; in x86_emulate_instruction()
5383 vcpu->arch.write_fault_to_shadow_pgtable = false; in x86_emulate_instruction()
5433 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
5434 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
5456 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
5457 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
5459 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
5462 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
5469 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
5478 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
5495 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
5504 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, in kvm_fast_pio_out()
5507 vcpu->arch.pio.count = 0; in kvm_fast_pio_out()
5869 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; in kvm_vcpu_halt()
6062 if (!vcpu->arch.apic) in update_cr8_intercept()
6065 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
6083 if (vcpu->arch.exception.pending) { in inject_pending_event()
6084 trace_kvm_inj_exception(vcpu->arch.exception.nr, in inject_pending_event()
6085 vcpu->arch.exception.has_error_code, in inject_pending_event()
6086 vcpu->arch.exception.error_code); in inject_pending_event()
6088 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) in inject_pending_event()
6092 if (vcpu->arch.exception.nr == DB_VECTOR && in inject_pending_event()
6093 (vcpu->arch.dr7 & DR7_GD)) { in inject_pending_event()
6094 vcpu->arch.dr7 &= ~DR7_GD; in inject_pending_event()
6098 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, in inject_pending_event()
6099 vcpu->arch.exception.has_error_code, in inject_pending_event()
6100 vcpu->arch.exception.error_code, in inject_pending_event()
6101 vcpu->arch.exception.reinject); in inject_pending_event()
6105 if (vcpu->arch.nmi_injected) { in inject_pending_event()
6110 if (vcpu->arch.interrupt.pending) { in inject_pending_event()
6122 if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { in inject_pending_event()
6123 --vcpu->arch.nmi_pending; in inject_pending_event()
6124 vcpu->arch.nmi_injected = true; in inject_pending_event()
6157 if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
6160 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
6161 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
6170 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_scan_ioapic()
6267 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
6287 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
6296 if (vcpu->arch.nmi_pending) in vcpu_enter_guest()
6354 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
6356 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
6357 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
6358 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
6359 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
6360 set_debugreg(vcpu->arch.dr6, 6); in vcpu_enter_guest()
6361 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
6374 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
6380 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
6393 vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, in vcpu_enter_guest()
6428 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
6431 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
6439 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
6456 switch(vcpu->arch.mp_state) { in vcpu_block()
6458 vcpu->arch.pv.pv_unhalted = false; in vcpu_block()
6459 vcpu->arch.mp_state = in vcpu_block()
6462 vcpu->arch.apf.halted = false; in vcpu_block()
6481 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in vcpu_run()
6482 !vcpu->arch.apf.halted) in vcpu_run()
6533 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
6597 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
6613 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
6629 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
6630 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
6631 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
6636 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
6650 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in kvm_arch_vcpu_ioctl_get_regs()
6658 emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_ioctl_get_regs()
6659 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in kvm_arch_vcpu_ioctl_get_regs()
6688 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in kvm_arch_vcpu_ioctl_set_regs()
6689 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in kvm_arch_vcpu_ioctl_set_regs()
6713 vcpu->arch.exception.pending = false; in kvm_arch_vcpu_ioctl_set_regs()
6753 sregs->cr2 = vcpu->arch.cr2; in kvm_arch_vcpu_ioctl_get_sregs()
6757 sregs->efer = vcpu->arch.efer; in kvm_arch_vcpu_ioctl_get_sregs()
6762 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) in kvm_arch_vcpu_ioctl_get_sregs()
6763 set_bit(vcpu->arch.interrupt.nr, in kvm_arch_vcpu_ioctl_get_sregs()
6773 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && in kvm_arch_vcpu_ioctl_get_mpstate()
6774 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
6777 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
6790 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
6791 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
6793 vcpu->arch.mp_state = mp_state->mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
6801 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in kvm_task_switch()
6837 vcpu->arch.cr2 = sregs->cr2; in kvm_arch_vcpu_ioctl_set_sregs()
6839 vcpu->arch.cr3 = sregs->cr3; in kvm_arch_vcpu_ioctl_set_sregs()
6840 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); in kvm_arch_vcpu_ioctl_set_sregs()
6844 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in kvm_arch_vcpu_ioctl_set_sregs()
6852 vcpu->arch.cr0 = sregs->cr0; in kvm_arch_vcpu_ioctl_set_sregs()
6861 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); in kvm_arch_vcpu_ioctl_set_sregs()
6893 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_ioctl_set_sregs()
6908 if (vcpu->arch.exception.pending) in kvm_arch_vcpu_ioctl_set_guest_debug()
6928 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
6929 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
6932 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
6937 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + in kvm_arch_vcpu_ioctl_set_guest_debug()
6979 &vcpu->arch.guest_fpu.state->fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
6996 &vcpu->arch.guest_fpu.state->fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
7014 err = fpu_alloc(&vcpu->arch.guest_fpu); in fx_init()
7018 fpu_finit(&vcpu->arch.guest_fpu); in fx_init()
7020 vcpu->arch.guest_fpu.state->xsave.xsave_hdr.xcomp_bv = in fx_init()
7026 vcpu->arch.xcr0 = XSTATE_FP; in fx_init()
7028 vcpu->arch.cr0 |= X86_CR0_ET; in fx_init()
7036 fpu_free(&vcpu->arch.guest_fpu); in fx_free()
7051 fpu_restore_checking(&vcpu->arch.guest_fpu); in kvm_load_guest_fpu()
7061 fpu_save_init(&vcpu->arch.guest_fpu); in kvm_put_guest_fpu()
7064 if (!vcpu->arch.eager_fpu) in kvm_put_guest_fpu()
7074 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_free()
7103 vcpu->arch.mtrr_state.have_fixed = 1; in kvm_arch_vcpu_setup()
7127 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvm_arch_vcpu_postcreate()
7134 vcpu->arch.apf.msr_val = 0; in kvm_arch_vcpu_destroy()
7147 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
7148 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
7149 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
7153 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
7155 vcpu->arch.dr6 = DR6_INIT; in kvm_vcpu_reset()
7157 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
7160 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
7163 vcpu->arch.apf.msr_val = 0; in kvm_vcpu_reset()
7164 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
7170 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
7174 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
7175 vcpu->arch.regs_avail = ~0; in kvm_vcpu_reset()
7176 vcpu->arch.regs_dirty = ~0; in kvm_vcpu_reset()
7213 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_hardware_enable()
7215 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_hardware_enable()
7216 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_hardware_enable()
7264 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_hardware_enable()
7265 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_hardware_enable()
7275 kvm->arch.last_tsc_nsec = 0; in kvm_arch_hardware_enable()
7276 kvm->arch.last_tsc_write = 0; in kvm_arch_hardware_enable()
7313 return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); in kvm_vcpu_compatible()
7327 vcpu->arch.pv.pv_unhalted = false; in kvm_arch_vcpu_init()
7328 vcpu->arch.emulate_ctxt.ops = &emulate_ops; in kvm_arch_vcpu_init()
7330 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_init()
7332 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; in kvm_arch_vcpu_init()
7339 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_init()
7354 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, in kvm_arch_vcpu_init()
7356 if (!vcpu->arch.mce_banks) { in kvm_arch_vcpu_init()
7360 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_init()
7362 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { in kvm_arch_vcpu_init()
7371 vcpu->arch.ia32_tsc_adjust_msr = 0x0; in kvm_arch_vcpu_init()
7372 vcpu->arch.pv_time_enabled = false; in kvm_arch_vcpu_init()
7374 vcpu->arch.guest_supported_xcr0 = 0; in kvm_arch_vcpu_init()
7375 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; in kvm_arch_vcpu_init()
7377 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_arch_vcpu_init()
7384 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_init()
7386 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_init()
7392 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_init()
7402 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_uninit()
7407 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_uninit()
7422 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); in kvm_arch_init_vm()
7423 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); in kvm_arch_init_vm()
7424 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); in kvm_arch_init_vm()
7425 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); in kvm_arch_init_vm()
7426 atomic_set(&kvm->arch.noncoherent_dma_count, 0); in kvm_arch_init_vm()
7429 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
7432 &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
7434 raw_spin_lock_init(&kvm->arch.tsc_write_lock); in kvm_arch_init_vm()
7435 mutex_init(&kvm->arch.apic_map_lock); in kvm_arch_init_vm()
7436 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); in kvm_arch_init_vm()
7440 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); in kvm_arch_init_vm()
7441 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); in kvm_arch_init_vm()
7480 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); in kvm_arch_sync_events()
7481 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); in kvm_arch_sync_events()
7506 kfree(kvm->arch.vpic); in kvm_arch_destroy_vm()
7507 kfree(kvm->arch.vioapic); in kvm_arch_destroy_vm()
7509 kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); in kvm_arch_destroy_vm()
7518 if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { in kvm_arch_free_memslot()
7519 kvfree(free->arch.rmap[i]); in kvm_arch_free_memslot()
7520 free->arch.rmap[i] = NULL; in kvm_arch_free_memslot()
7525 if (!dont || free->arch.lpage_info[i - 1] != in kvm_arch_free_memslot()
7526 dont->arch.lpage_info[i - 1]) { in kvm_arch_free_memslot()
7527 kvfree(free->arch.lpage_info[i - 1]); in kvm_arch_free_memslot()
7528 free->arch.lpage_info[i - 1] = NULL; in kvm_arch_free_memslot()
7546 slot->arch.rmap[i] = in kvm_arch_create_memslot()
7547 kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); in kvm_arch_create_memslot()
7548 if (!slot->arch.rmap[i]) in kvm_arch_create_memslot()
7553 slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * in kvm_arch_create_memslot()
7554 sizeof(*slot->arch.lpage_info[i - 1])); in kvm_arch_create_memslot()
7555 if (!slot->arch.lpage_info[i - 1]) in kvm_arch_create_memslot()
7559 slot->arch.lpage_info[i - 1][0].write_count = 1; in kvm_arch_create_memslot()
7561 slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; in kvm_arch_create_memslot()
7573 slot->arch.lpage_info[i - 1][j].write_count = 1; in kvm_arch_create_memslot()
7581 kvfree(slot->arch.rmap[i]); in kvm_arch_create_memslot()
7582 slot->arch.rmap[i] = NULL; in kvm_arch_create_memslot()
7586 kvfree(slot->arch.lpage_info[i - 1]); in kvm_arch_create_memslot()
7587 slot->arch.lpage_info[i - 1] = NULL; in kvm_arch_create_memslot()
7699 if (!kvm->arch.n_requested_mmu_pages) in kvm_arch_commit_memory_region()
7753 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_arch_vcpu_runnable()
7754 !vcpu->arch.apf.halted) in kvm_arch_vcpu_runnable()
7757 || vcpu->arch.pv.pv_unhalted in kvm_arch_vcpu_runnable()
7758 || atomic_read(&vcpu->arch.nmi_queued) || in kvm_arch_vcpu_runnable()
7802 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
7818 if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || in kvm_arch_async_page_ready()
7826 if (!vcpu->arch.mmu.direct_map && in kvm_arch_async_page_ready()
7827 work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) in kvm_arch_async_page_ready()
7830 vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); in kvm_arch_async_page_ready()
7847 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
7850 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
7859 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
7860 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
7868 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
7877 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
7880 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
7882 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
7889 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
7897 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, in apf_put_user()
7906 trace_kvm_async_pf_not_present(work->arch.token, work->gva); in kvm_arch_async_page_not_present()
7907 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
7909 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || in kvm_arch_async_page_not_present()
7910 (vcpu->arch.apf.send_user_only && in kvm_arch_async_page_not_present()
7918 fault.address = work->arch.token; in kvm_arch_async_page_not_present()
7928 trace_kvm_async_pf_ready(work->arch.token, work->gva); in kvm_arch_async_page_present()
7930 work->arch.token = ~0; /* broadcast wakeup */ in kvm_arch_async_page_present()
7932 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
7934 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && in kvm_arch_async_page_present()
7940 fault.address = work->arch.token; in kvm_arch_async_page_present()
7943 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
7944 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_async_page_present()
7949 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) in kvm_arch_can_inject_async_page_present()
7958 atomic_inc(&kvm->arch.noncoherent_dma_count); in kvm_arch_register_noncoherent_dma()
7964 atomic_dec(&kvm->arch.noncoherent_dma_count); in kvm_arch_unregister_noncoherent_dma()
7970 return atomic_read(&kvm->arch.noncoherent_dma_count); in kvm_arch_has_noncoherent_dma()