Lines Matching refs:arch
76 container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
193 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
281 return vcpu->arch.apic_base; in kvm_get_apic_base()
287 u64 old_state = vcpu->arch.apic_base & in kvm_set_apic_base()
370 if (!vcpu->arch.exception.pending) { in kvm_multiple_exception()
374 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
375 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
376 vcpu->arch.exception.nr = nr; in kvm_multiple_exception()
377 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
378 vcpu->arch.exception.reinject = reinject; in kvm_multiple_exception()
383 prev_nr = vcpu->arch.exception.nr; in kvm_multiple_exception()
394 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
395 vcpu->arch.exception.has_error_code = true; in kvm_multiple_exception()
396 vcpu->arch.exception.nr = DF_VECTOR; in kvm_multiple_exception()
397 vcpu->arch.exception.error_code = 0; in kvm_multiple_exception()
429 vcpu->arch.cr2 = fault->address; in kvm_inject_page_fault()
437 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); in kvm_propagate_fault()
439 vcpu->arch.mmu.inject_page_fault(vcpu, fault); in kvm_propagate_fault()
446 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
513 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, in kvm_read_nested_guest_page()
538 vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) { in load_pdptrs()
547 (unsigned long *)&vcpu->arch.regs_avail); in load_pdptrs()
549 (unsigned long *)&vcpu->arch.regs_dirty); in load_pdptrs()
558 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; in pdptrs_changed()
568 (unsigned long *)&vcpu->arch.regs_avail)) in pdptrs_changed()
577 changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; in pdptrs_changed()
605 if ((vcpu->arch.efer & EFER_LME)) { in kvm_set_cr0()
615 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr0()
653 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xcr0()
661 if (vcpu->arch.xcr0 != host_xcr0) in kvm_put_guest_xcr0()
670 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
686 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; in __kvm_set_xcr()
700 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
744 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr4()
787 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) in kvm_set_cr3()
790 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
791 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); in kvm_set_cr3()
804 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
814 return vcpu->arch.cr8; in kvm_get_cr8()
824 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
825 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; in kvm_update_dr0123()
832 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); in kvm_update_dr6()
840 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
842 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
844 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
846 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
862 vcpu->arch.db[dr] = val; in __kvm_set_dr()
864 vcpu->arch.eff_db[dr] = val; in __kvm_set_dr()
871 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in __kvm_set_dr()
879 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in __kvm_set_dr()
901 *val = vcpu->arch.db[dr]; in kvm_get_dr()
907 *val = vcpu->arch.dr6; in kvm_get_dr()
914 *val = vcpu->arch.dr7; in kvm_get_dr()
1008 u64 old_efer = vcpu->arch.efer; in set_efer()
1014 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
1018 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1179 if (kvm->arch.kvmclock_offset) { in kvm_write_wall_clock()
1180 struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); in kvm_write_wall_clock()
1245 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, in nsec_to_cycles()
1246 vcpu->arch.virtual_tsc_shift); in nsec_to_cycles()
1262 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in set_tsc_khz()
1269 vcpu->arch.tsc_catchup = 1; in set_tsc_khz()
1270 vcpu->arch.tsc_always_catchup = 1; in set_tsc_khz()
1288 vcpu->arch.tsc_scaling_ratio = ratio; in set_tsc_khz()
1300 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in kvm_set_tsc_khz()
1306 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
1307 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
1308 vcpu->arch.virtual_tsc_khz = this_tsc_khz; in kvm_set_tsc_khz()
1327 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
1328 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
1329 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
1330 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
1338 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
1365 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; in update_ia32_tsc_adjust_msr()
1386 u64 ratio = vcpu->arch.tsc_scaling_ratio; in kvm_scale_tsc()
1420 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_write_tsc()
1423 elapsed = ns - kvm->arch.last_tsc_nsec; in kvm_write_tsc()
1425 if (vcpu->arch.virtual_tsc_khz) { in kvm_write_tsc()
1429 usdiff = data - kvm->arch.last_tsc_write; in kvm_write_tsc()
1431 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; in kvm_write_tsc()
1446 : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); in kvm_write_tsc()
1471 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_write_tsc()
1473 offset = kvm->arch.cur_tsc_offset; in kvm_write_tsc()
1482 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); in kvm_write_tsc()
1493 kvm->arch.cur_tsc_generation++; in kvm_write_tsc()
1494 kvm->arch.cur_tsc_nsec = ns; in kvm_write_tsc()
1495 kvm->arch.cur_tsc_write = data; in kvm_write_tsc()
1496 kvm->arch.cur_tsc_offset = offset; in kvm_write_tsc()
1499 kvm->arch.cur_tsc_generation, data); in kvm_write_tsc()
1506 kvm->arch.last_tsc_nsec = ns; in kvm_write_tsc()
1507 kvm->arch.last_tsc_write = data; in kvm_write_tsc()
1508 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in kvm_write_tsc()
1510 vcpu->arch.last_guest_tsc = data; in kvm_write_tsc()
1513 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in kvm_write_tsc()
1514 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in kvm_write_tsc()
1515 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in kvm_write_tsc()
1520 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_write_tsc()
1522 spin_lock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_write_tsc()
1524 kvm->arch.nr_vcpus_matched_tsc = 0; in kvm_write_tsc()
1526 kvm->arch.nr_vcpus_matched_tsc++; in kvm_write_tsc()
1530 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_write_tsc()
1543 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) in adjust_tsc_offset_host()
1657 struct kvm_arch *ka = &kvm->arch; in pvclock_update_vm_gtod_copy()
1690 struct kvm_arch *ka = &kvm->arch; in kvm_gen_update_masterclock()
1711 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update()
1712 struct kvm_arch *ka = &v->kvm->arch; in kvm_guest_time_update()
1783 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
1864 struct kvm *kvm = container_of(ka, struct kvm, arch); in kvmclock_update_fn()
1878 schedule_delayed_work(&kvm->arch.kvmclock_update_work, in kvm_gen_kvmclock_update()
1889 struct kvm *kvm = container_of(ka, struct kvm, arch); in kvmclock_sync_fn()
1894 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); in kvmclock_sync_fn()
1895 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvmclock_sync_fn()
1901 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
1906 vcpu->arch.mcg_status = data; in set_msr_mce()
1913 vcpu->arch.mcg_ctl = data; in set_msr_mce()
1927 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
1939 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 in xen_hvm_config()
1940 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; in xen_hvm_config()
1941 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 in xen_hvm_config()
1942 : kvm->arch.xen_hvm_config.blob_size_32; in xen_hvm_config()
1974 vcpu->arch.apf.msr_val = data; in kvm_pv_enable_async_pf()
1982 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
1986 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
1993 vcpu->arch.pv_time_enabled = false; in kvmclock_reset()
2000 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in accumulate_steal_time()
2003 delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; in accumulate_steal_time()
2004 vcpu->arch.st.last_steal = current->sched_info.run_delay; in accumulate_steal_time()
2005 vcpu->arch.st.accum_steal = delta; in accumulate_steal_time()
2012 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
2015 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, in record_steal_time()
2016 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) in record_steal_time()
2019 vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; in record_steal_time()
2020 vcpu->arch.st.steal.version += 2; in record_steal_time()
2021 vcpu->arch.st.accum_steal = 0; in record_steal_time()
2023 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, in record_steal_time()
2024 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); in record_steal_time()
2086 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
2089 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
2093 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
2098 vcpu->arch.smbase = data; in kvm_set_msr_common()
2102 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
2108 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_set_msr_common()
2122 vcpu->arch.time = data; in kvm_set_msr_common()
2132 &vcpu->arch.pv_time, data & ~1ULL, in kvm_set_msr_common()
2134 vcpu->arch.pv_time_enabled = false; in kvm_set_msr_common()
2136 vcpu->arch.pv_time_enabled = true; in kvm_set_msr_common()
2152 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, in kvm_set_msr_common()
2157 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
2211 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
2216 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
2219 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) in kvm_set_msr_common()
2252 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
2261 data = vcpu->arch.mcg_cap; in get_msr_mce()
2266 data = vcpu->arch.mcg_ctl; in get_msr_mce()
2269 data = vcpu->arch.mcg_status; in get_msr_mce()
2275 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
2346 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
2349 msr_info->data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
2354 msr_info->data = vcpu->arch.smbase; in kvm_get_msr_common()
2363 msr_info->data = vcpu->arch.efer; in kvm_get_msr_common()
2367 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
2371 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
2374 msr_info->data = vcpu->arch.apf.msr_val; in kvm_get_msr_common()
2377 msr_info->data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
2380 msr_info->data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
2423 msr_info->data = vcpu->arch.osvw.length; in kvm_get_msr_common()
2428 msr_info->data = vcpu->arch.osvw.status; in kvm_get_msr_common()
2700 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
2709 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
2710 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
2711 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
2716 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
2717 rdtsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
2722 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
2724 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
2730 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
2738 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; in kvm_arch_vcpu_load()
2745 vcpu->arch.last_host_tsc = rdtsc(); in kvm_arch_vcpu_put()
2752 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); in kvm_vcpu_ioctl_get_lapic()
2805 if (vcpu->arch.pending_external_vector != -1) in kvm_vcpu_ioctl_interrupt()
2808 vcpu->arch.pending_external_vector = irq->irq; in kvm_vcpu_ioctl_interrupt()
2832 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
2848 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
2851 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
2854 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
2862 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
2864 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
2873 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
2883 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
2892 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
2912 vcpu->arch.exception.pending && in kvm_vcpu_ioctl_x86_get_vcpu_events()
2913 !kvm_exception_is_soft(vcpu->arch.exception.nr); in kvm_vcpu_ioctl_x86_get_vcpu_events()
2914 events->exception.nr = vcpu->arch.exception.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2915 events->exception.has_error_code = vcpu->arch.exception.has_error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2917 events->exception.error_code = vcpu->arch.exception.error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2920 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2921 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2925 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2926 events->nmi.pending = vcpu->arch.nmi_pending != 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2933 events->smi.pending = vcpu->arch.smi_pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2935 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); in kvm_vcpu_ioctl_x86_get_vcpu_events()
2954 vcpu->arch.exception.pending = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2955 vcpu->arch.exception.nr = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2956 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2957 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2959 vcpu->arch.interrupt.pending = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2960 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2961 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2966 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2968 vcpu->arch.nmi_pending = events->nmi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2973 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2977 vcpu->arch.hflags |= HF_SMM_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2979 vcpu->arch.hflags &= ~HF_SMM_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2980 vcpu->arch.smi_pending = events->smi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2982 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2984 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2987 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
2989 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
3003 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_get_debugregs()
3006 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
3022 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_set_debugregs()
3024 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
3026 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
3036 struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave; in fill_xsave()
3072 struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave; in load_xsave()
3116 &vcpu->arch.guest_fpu.state.fxsave, in kvm_vcpu_ioctl_x86_get_xsave()
3141 memcpy(&vcpu->arch.guest_fpu.state.fxsave, in kvm_vcpu_ioctl_x86_set_xsave()
3158 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
3192 if (!vcpu->arch.pv_time_enabled) in kvm_set_guest_paused()
3194 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
3216 if (!vcpu->arch.apic) in kvm_arch_vcpu_ioctl()
3234 if (!vcpu->arch.apic) in kvm_arch_vcpu_ioctl()
3456 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
3489 kvm->arch.ept_identity_map_addr = ident_addr; in kvm_vm_ioctl_set_identity_map_addr()
3502 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; in kvm_vm_ioctl_set_nr_mmu_pages()
3510 return kvm->arch.n_max_mmu_pages; in kvm_vm_ioctl_get_nr_mmu_pages()
3572 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit()
3573 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); in kvm_vm_ioctl_get_pit()
3574 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit()
3581 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit()
3582 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); in kvm_vm_ioctl_set_pit()
3585 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit()
3591 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
3592 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, in kvm_vm_ioctl_get_pit2()
3594 ps->flags = kvm->arch.vpit->pit_state.flags; in kvm_vm_ioctl_get_pit2()
3595 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
3605 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
3606 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
3610 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, in kvm_vm_ioctl_set_pit2()
3611 sizeof(kvm->arch.vpit->pit_state.channels)); in kvm_vm_ioctl_set_pit2()
3612 kvm->arch.vpit->pit_state.flags = ps->flags; in kvm_vm_ioctl_set_pit2()
3614 kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, in kvm_vm_ioctl_set_pit2()
3616 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
3623 if (!kvm->arch.vpit) in kvm_vm_ioctl_reinject()
3625 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_reinject()
3626 kvm->arch.vpit->pit_state.reinject = control->pit_reinject; in kvm_vm_ioctl_reinject()
3627 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_reinject()
3699 kvm->arch.disabled_quirks = cap->args[0]; in kvm_vm_ioctl_enable_cap()
3717 kvm->arch.irqchip_split = true; in kvm_vm_ioctl_enable_cap()
3718 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; in kvm_vm_ioctl_enable_cap()
3772 if (kvm->arch.vpic) in kvm_arch_vm_ioctl()
3801 kvm->arch.vpic = vpic; in kvm_arch_vm_ioctl()
3817 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
3820 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); in kvm_arch_vm_ioctl()
3821 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
3876 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
3892 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
3899 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
3915 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
3934 kvm->arch.bsp_vcpu_id = arg; in kvm_arch_vm_ioctl()
3939 if (copy_from_user(&kvm->arch.xen_hvm_config, argp, in kvm_arch_vm_ioctl()
3943 if (kvm->arch.xen_hvm_config.flags) in kvm_arch_vm_ioctl()
3966 kvm->arch.kvmclock_offset = delta; in kvm_arch_vm_ioctl()
3976 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; in kvm_arch_vm_ioctl()
4060 if (!(vcpu->arch.apic && in vcpu_mmio_write()
4061 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
4080 if (!(vcpu->arch.apic && in vcpu_mmio_read()
4081 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
4116 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); in translate_nested_gpa()
4125 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
4133 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_fetch()
4141 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
4148 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
4159 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, in kvm_read_guest_virt_helper()
4193 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
4248 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, in kvm_write_guest_virt_system()
4280 && !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
4281 vcpu->arch.access, access)) { in vcpu_mmio_gva_to_gpa()
4282 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
4288 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
4586 if (vcpu->arch.pio.in) in kernel_pio()
4587 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, in kernel_pio()
4588 vcpu->arch.pio.size, pd); in kernel_pio()
4591 vcpu->arch.pio.port, vcpu->arch.pio.size, in kernel_pio()
4600 vcpu->arch.pio.port = port; in emulator_pio_in_out()
4601 vcpu->arch.pio.in = in; in emulator_pio_in_out()
4602 vcpu->arch.pio.count = count; in emulator_pio_in_out()
4603 vcpu->arch.pio.size = size; in emulator_pio_in_out()
4605 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { in emulator_pio_in_out()
4606 vcpu->arch.pio.count = 0; in emulator_pio_in_out()
4627 if (vcpu->arch.pio.count) in emulator_pio_in_emulated()
4633 memcpy(val, vcpu->arch.pio_data, size * count); in emulator_pio_in_emulated()
4634 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); in emulator_pio_in_emulated()
4635 vcpu->arch.pio.count = 0; in emulator_pio_in_emulated()
4648 memcpy(vcpu->arch.pio_data, val, size * count); in emulator_pio_out_emulated()
4649 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); in emulator_pio_out_emulated()
4671 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
4672 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
4675 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
4723 value = vcpu->arch.cr2; in emulator_get_cr()
4752 vcpu->arch.cr2 = val; in emulator_set_cr()
4897 return vcpu->arch.smbase; in emulator_get_smbase()
4904 vcpu->arch.smbase = smbase; in emulator_set_smbase()
4921 emul_to_vcpu(ctxt)->arch.halt_request = 1; in emulator_halt()
5030 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in inject_emulated_exception()
5044 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
5059 ctxt->emul_flags = vcpu->arch.hflags; in init_emulate_ctxt()
5062 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
5067 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
5085 vcpu->arch.nmi_pending = 0; in kvm_inject_realmode_interrupt()
5087 vcpu->arch.interrupt.pending = false; in kvm_inject_realmode_interrupt()
5120 if (!vcpu->arch.mmu.direct_map) { in reexecute_instruction()
5153 if (vcpu->arch.mmu.direct_map) { in reexecute_instruction()
5157 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
5187 last_retry_eip = vcpu->arch.last_retry_eip; in retry_instruction()
5188 last_retry_addr = vcpu->arch.last_retry_addr; in retry_instruction()
5203 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; in retry_instruction()
5214 vcpu->arch.last_retry_eip = ctxt->eip; in retry_instruction()
5215 vcpu->arch.last_retry_addr = cr2; in retry_instruction()
5217 if (!vcpu->arch.mmu.direct_map) in retry_instruction()
5230 if (!(vcpu->arch.hflags & HF_SMM_MASK)) { in kvm_smm_changed()
5232 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false); in kvm_smm_changed()
5234 if (unlikely(vcpu->arch.smi_pending)) { in kvm_smm_changed()
5236 vcpu->arch.smi_pending = 0; in kvm_smm_changed()
5248 unsigned changed = vcpu->arch.hflags ^ emul_flags; in kvm_set_hflags()
5250 vcpu->arch.hflags = emul_flags; in kvm_set_hflags()
5285 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | in kvm_vcpu_check_singlestep()
5287 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; in kvm_vcpu_check_singlestep()
5288 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_check_singlestep()
5292 vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; in kvm_vcpu_check_singlestep()
5298 vcpu->arch.dr6 &= ~15; in kvm_vcpu_check_singlestep()
5299 vcpu->arch.dr6 |= DR6_BS | DR6_RTM; in kvm_vcpu_check_singlestep()
5308 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_breakpoint()
5312 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_breakpoint()
5313 vcpu->arch.eff_db); in kvm_vcpu_check_breakpoint()
5316 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; in kvm_vcpu_check_breakpoint()
5317 kvm_run->debug.arch.pc = eip; in kvm_vcpu_check_breakpoint()
5318 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_check_breakpoint()
5325 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_breakpoint()
5329 vcpu->arch.dr7, in kvm_vcpu_check_breakpoint()
5330 vcpu->arch.db); in kvm_vcpu_check_breakpoint()
5333 vcpu->arch.dr6 &= ~15; in kvm_vcpu_check_breakpoint()
5334 vcpu->arch.dr6 |= dr6 | DR6_RTM; in kvm_vcpu_check_breakpoint()
5351 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
5353 bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; in x86_emulate_instruction()
5359 vcpu->arch.write_fault_to_shadow_pgtable = false; in x86_emulate_instruction()
5409 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
5410 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
5432 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
5433 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
5435 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
5438 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
5445 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
5454 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
5455 if (vcpu->arch.hflags != ctxt->emul_flags) in x86_emulate_instruction()
5473 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
5482 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, in kvm_fast_pio_out()
5485 vcpu->arch.pio.count = 0; in kvm_fast_pio_out()
5847 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; in kvm_vcpu_halt()
5971 if (!vcpu->arch.apic) in update_cr8_intercept()
5974 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
5992 if (vcpu->arch.exception.pending) { in inject_pending_event()
5993 trace_kvm_inj_exception(vcpu->arch.exception.nr, in inject_pending_event()
5994 vcpu->arch.exception.has_error_code, in inject_pending_event()
5995 vcpu->arch.exception.error_code); in inject_pending_event()
5997 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) in inject_pending_event()
6001 if (vcpu->arch.exception.nr == DB_VECTOR && in inject_pending_event()
6002 (vcpu->arch.dr7 & DR7_GD)) { in inject_pending_event()
6003 vcpu->arch.dr7 &= ~DR7_GD; in inject_pending_event()
6007 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, in inject_pending_event()
6008 vcpu->arch.exception.has_error_code, in inject_pending_event()
6009 vcpu->arch.exception.error_code, in inject_pending_event()
6010 vcpu->arch.exception.reinject); in inject_pending_event()
6014 if (vcpu->arch.nmi_injected) { in inject_pending_event()
6019 if (vcpu->arch.interrupt.pending) { in inject_pending_event()
6031 if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { in inject_pending_event()
6032 --vcpu->arch.nmi_pending; in inject_pending_event()
6033 vcpu->arch.nmi_injected = true; in inject_pending_event()
6066 if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
6069 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
6070 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
6174 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); in process_smi_save_state_32()
6200 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); in process_smi_save_state_64()
6205 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); in process_smi_save_state_64()
6242 vcpu->arch.smi_pending = true; in process_smi()
6246 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); in process_smi()
6247 vcpu->arch.hflags |= HF_SMM_MASK; in process_smi()
6254 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); in process_smi()
6257 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in process_smi()
6264 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); in process_smi()
6266 vcpu->arch.cr0 = cr0; in process_smi()
6276 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; in process_smi()
6277 cs.base = vcpu->arch.smbase; in process_smi()
6310 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_scan_ioapic()
6313 memset(vcpu->arch.eoi_exit_bitmap, 0, 256 / 8); in vcpu_scan_ioapic()
6316 kvm_scan_ioapic_routes(vcpu, vcpu->arch.eoi_exit_bitmap); in vcpu_scan_ioapic()
6319 kvm_ioapic_scan_entry(vcpu, vcpu->arch.eoi_exit_bitmap); in vcpu_scan_ioapic()
6412 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
6427 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); in vcpu_enter_guest()
6428 if (test_bit(vcpu->arch.pending_ioapic_eoi, in vcpu_enter_guest()
6429 (void *) vcpu->arch.eoi_exit_bitmap)) { in vcpu_enter_guest()
6432 vcpu->arch.pending_ioapic_eoi; in vcpu_enter_guest()
6471 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
6480 if (vcpu->arch.nmi_pending) in vcpu_enter_guest()
6533 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
6535 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
6536 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
6537 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
6538 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
6539 set_debugreg(vcpu->arch.dr6, 6); in vcpu_enter_guest()
6540 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
6551 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
6557 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
6570 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in vcpu_enter_guest()
6604 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
6607 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
6615 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
6637 switch(vcpu->arch.mp_state) { in vcpu_block()
6639 vcpu->arch.pv.pv_unhalted = false; in vcpu_block()
6640 vcpu->arch.mp_state = in vcpu_block()
6643 vcpu->arch.apf.halted = false; in vcpu_block()
6656 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_vcpu_running()
6657 !vcpu->arch.apf.halted); in kvm_vcpu_running()
6722 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
6786 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
6802 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
6818 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
6819 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
6820 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
6825 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
6839 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in kvm_arch_vcpu_ioctl_get_regs()
6847 emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_ioctl_get_regs()
6848 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in kvm_arch_vcpu_ioctl_get_regs()
6877 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in kvm_arch_vcpu_ioctl_set_regs()
6878 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in kvm_arch_vcpu_ioctl_set_regs()
6902 vcpu->arch.exception.pending = false; in kvm_arch_vcpu_ioctl_set_regs()
6942 sregs->cr2 = vcpu->arch.cr2; in kvm_arch_vcpu_ioctl_get_sregs()
6946 sregs->efer = vcpu->arch.efer; in kvm_arch_vcpu_ioctl_get_sregs()
6951 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) in kvm_arch_vcpu_ioctl_get_sregs()
6952 set_bit(vcpu->arch.interrupt.nr, in kvm_arch_vcpu_ioctl_get_sregs()
6962 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && in kvm_arch_vcpu_ioctl_get_mpstate()
6963 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
6966 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
6979 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
6980 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
6982 vcpu->arch.mp_state = mp_state->mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
6990 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in kvm_task_switch()
7026 vcpu->arch.cr2 = sregs->cr2; in kvm_arch_vcpu_ioctl_set_sregs()
7028 vcpu->arch.cr3 = sregs->cr3; in kvm_arch_vcpu_ioctl_set_sregs()
7029 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); in kvm_arch_vcpu_ioctl_set_sregs()
7033 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in kvm_arch_vcpu_ioctl_set_sregs()
7041 vcpu->arch.cr0 = sregs->cr0; in kvm_arch_vcpu_ioctl_set_sregs()
7050 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); in kvm_arch_vcpu_ioctl_set_sregs()
7082 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_ioctl_set_sregs()
7097 if (vcpu->arch.exception.pending) in kvm_arch_vcpu_ioctl_set_guest_debug()
7117 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
7118 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
7121 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
7126 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + in kvm_arch_vcpu_ioctl_set_guest_debug()
7168 &vcpu->arch.guest_fpu.state.fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
7185 &vcpu->arch.guest_fpu.state.fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
7201 fpstate_init(&vcpu->arch.guest_fpu.state); in fx_init()
7203 vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv = in fx_init()
7209 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in fx_init()
7211 vcpu->arch.cr0 |= X86_CR0_ET; in fx_init()
7226 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state); in kvm_load_guest_fpu()
7238 copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); in kvm_put_guest_fpu()
7247 if (!vcpu->arch.eager_fpu) { in kvm_put_guest_fpu()
7258 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_free()
7307 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvm_arch_vcpu_postcreate()
7314 vcpu->arch.apf.msr_val = 0; in kvm_arch_vcpu_destroy()
7326 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
7328 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
7329 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
7330 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
7334 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
7336 vcpu->arch.dr6 = DR6_INIT; in kvm_vcpu_reset()
7338 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
7341 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
7344 vcpu->arch.apf.msr_val = 0; in kvm_vcpu_reset()
7345 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
7351 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
7355 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
7358 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
7359 vcpu->arch.regs_avail = ~0; in kvm_vcpu_reset()
7360 vcpu->arch.regs_dirty = ~0; in kvm_vcpu_reset()
7397 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_hardware_enable()
7399 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_hardware_enable()
7400 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_hardware_enable()
7448 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_hardware_enable()
7449 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_hardware_enable()
7459 kvm->arch.last_tsc_nsec = 0; in kvm_arch_hardware_enable()
7460 kvm->arch.last_tsc_write = 0; in kvm_arch_hardware_enable()
7511 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
7517 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
7536 vcpu->arch.pv.pv_unhalted = false; in kvm_arch_vcpu_init()
7537 vcpu->arch.emulate_ctxt.ops = &emulate_ops; in kvm_arch_vcpu_init()
7539 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_init()
7541 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; in kvm_arch_vcpu_init()
7548 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_init()
7563 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, in kvm_arch_vcpu_init()
7565 if (!vcpu->arch.mce_banks) { in kvm_arch_vcpu_init()
7569 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_init()
7571 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { in kvm_arch_vcpu_init()
7578 vcpu->arch.ia32_tsc_adjust_msr = 0x0; in kvm_arch_vcpu_init()
7579 vcpu->arch.pv_time_enabled = false; in kvm_arch_vcpu_init()
7581 vcpu->arch.guest_supported_xcr0 = 0; in kvm_arch_vcpu_init()
7582 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; in kvm_arch_vcpu_init()
7584 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_arch_vcpu_init()
7586 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; in kvm_arch_vcpu_init()
7591 vcpu->arch.pending_external_vector = -1; in kvm_arch_vcpu_init()
7596 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_init()
7602 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_init()
7612 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_uninit()
7617 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_uninit()
7632 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); in kvm_arch_init_vm()
7633 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); in kvm_arch_init_vm()
7634 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); in kvm_arch_init_vm()
7635 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); in kvm_arch_init_vm()
7636 atomic_set(&kvm->arch.noncoherent_dma_count, 0); in kvm_arch_init_vm()
7639 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
7642 &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
7644 raw_spin_lock_init(&kvm->arch.tsc_write_lock); in kvm_arch_init_vm()
7645 mutex_init(&kvm->arch.apic_map_lock); in kvm_arch_init_vm()
7646 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); in kvm_arch_init_vm()
7650 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); in kvm_arch_init_vm()
7651 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); in kvm_arch_init_vm()
7690 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); in kvm_arch_sync_events()
7691 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); in kvm_arch_sync_events()
7775 kfree(kvm->arch.vpic); in kvm_arch_destroy_vm()
7776 kfree(kvm->arch.vioapic); in kvm_arch_destroy_vm()
7778 kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); in kvm_arch_destroy_vm()
7787 if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { in kvm_arch_free_memslot()
7788 kvfree(free->arch.rmap[i]); in kvm_arch_free_memslot()
7789 free->arch.rmap[i] = NULL; in kvm_arch_free_memslot()
7794 if (!dont || free->arch.lpage_info[i - 1] != in kvm_arch_free_memslot()
7795 dont->arch.lpage_info[i - 1]) { in kvm_arch_free_memslot()
7796 kvfree(free->arch.lpage_info[i - 1]); in kvm_arch_free_memslot()
7797 free->arch.lpage_info[i - 1] = NULL; in kvm_arch_free_memslot()
7815 slot->arch.rmap[i] = in kvm_arch_create_memslot()
7816 kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); in kvm_arch_create_memslot()
7817 if (!slot->arch.rmap[i]) in kvm_arch_create_memslot()
7822 slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * in kvm_arch_create_memslot()
7823 sizeof(*slot->arch.lpage_info[i - 1])); in kvm_arch_create_memslot()
7824 if (!slot->arch.lpage_info[i - 1]) in kvm_arch_create_memslot()
7828 slot->arch.lpage_info[i - 1][0].write_count = 1; in kvm_arch_create_memslot()
7830 slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; in kvm_arch_create_memslot()
7842 slot->arch.lpage_info[i - 1][j].write_count = 1; in kvm_arch_create_memslot()
7850 kvfree(slot->arch.rmap[i]); in kvm_arch_create_memslot()
7851 slot->arch.rmap[i] = NULL; in kvm_arch_create_memslot()
7855 kvfree(slot->arch.lpage_info[i - 1]); in kvm_arch_create_memslot()
7856 slot->arch.lpage_info[i - 1] = NULL; in kvm_arch_create_memslot()
7936 if (!kvm->arch.n_requested_mmu_pages) in kvm_arch_commit_memory_region()
7992 if (vcpu->arch.pv.pv_unhalted) in kvm_vcpu_has_events()
7995 if (atomic_read(&vcpu->arch.nmi_queued)) in kvm_vcpu_has_events()
8055 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
8071 if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || in kvm_arch_async_page_ready()
8079 if (!vcpu->arch.mmu.direct_map && in kvm_arch_async_page_ready()
8080 work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) in kvm_arch_async_page_ready()
8083 vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); in kvm_arch_async_page_ready()
8100 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
8103 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
8112 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
8113 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
8121 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
8130 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
8133 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
8135 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
8142 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
8150 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, in apf_put_user()
8159 trace_kvm_async_pf_not_present(work->arch.token, work->gva); in kvm_arch_async_page_not_present()
8160 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
8162 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || in kvm_arch_async_page_not_present()
8163 (vcpu->arch.apf.send_user_only && in kvm_arch_async_page_not_present()
8171 fault.address = work->arch.token; in kvm_arch_async_page_not_present()
8181 trace_kvm_async_pf_ready(work->arch.token, work->gva); in kvm_arch_async_page_present()
8183 work->arch.token = ~0; /* broadcast wakeup */ in kvm_arch_async_page_present()
8185 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
8187 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && in kvm_arch_async_page_present()
8193 fault.address = work->arch.token; in kvm_arch_async_page_present()
8196 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
8197 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_async_page_present()
8202 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) in kvm_arch_can_inject_async_page_present()
8211 atomic_inc(&kvm->arch.assigned_device_count); in kvm_arch_start_assignment()
8217 atomic_dec(&kvm->arch.assigned_device_count); in kvm_arch_end_assignment()
8223 return atomic_read(&kvm->arch.assigned_device_count); in kvm_arch_has_assigned_device()
8229 atomic_inc(&kvm->arch.noncoherent_dma_count); in kvm_arch_register_noncoherent_dma()
8235 atomic_dec(&kvm->arch.noncoherent_dma_count); in kvm_arch_unregister_noncoherent_dma()
8241 return atomic_read(&kvm->arch.noncoherent_dma_count); in kvm_arch_has_noncoherent_dma()