Lines Matching refs:kvm
86 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
496 return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len); in kvm_read_guest_page_mmu()
784 if (irqchip_in_kernel(vcpu->kvm)) in kvm_set_cr8()
794 if (irqchip_in_kernel(vcpu->kvm)) in kvm_get_cr8()
1109 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) in kvm_write_wall_clock() argument
1119 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); in kvm_write_wall_clock()
1128 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); in kvm_write_wall_clock()
1138 if (kvm->arch.kvmclock_offset) { in kvm_write_wall_clock()
1139 struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); in kvm_write_wall_clock()
1146 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); in kvm_write_wall_clock()
1149 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); in kvm_write_wall_clock()
1263 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
1267 atomic_read(&vcpu->kvm->online_vcpus)); in kvm_track_tsc_matching()
1282 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
1295 struct kvm *kvm = vcpu->kvm; in kvm_write_tsc() local
1303 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_write_tsc()
1306 elapsed = ns - kvm->arch.last_tsc_nsec; in kvm_write_tsc()
1312 usdiff = data - kvm->arch.last_tsc_write; in kvm_write_tsc()
1354 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_write_tsc()
1356 offset = kvm->arch.cur_tsc_offset; in kvm_write_tsc()
1365 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); in kvm_write_tsc()
1376 kvm->arch.cur_tsc_generation++; in kvm_write_tsc()
1377 kvm->arch.cur_tsc_nsec = ns; in kvm_write_tsc()
1378 kvm->arch.cur_tsc_write = data; in kvm_write_tsc()
1379 kvm->arch.cur_tsc_offset = offset; in kvm_write_tsc()
1382 kvm->arch.cur_tsc_generation, data); in kvm_write_tsc()
1389 kvm->arch.last_tsc_nsec = ns; in kvm_write_tsc()
1390 kvm->arch.last_tsc_write = data; in kvm_write_tsc()
1391 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in kvm_write_tsc()
1396 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in kvm_write_tsc()
1397 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in kvm_write_tsc()
1398 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in kvm_write_tsc()
1403 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_write_tsc()
1405 spin_lock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_write_tsc()
1407 kvm->arch.nr_vcpus_matched_tsc = 0; in kvm_write_tsc()
1409 kvm->arch.nr_vcpus_matched_tsc++; in kvm_write_tsc()
1413 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_write_tsc()
1535 static void pvclock_update_vm_gtod_copy(struct kvm *kvm) in pvclock_update_vm_gtod_copy() argument
1538 struct kvm_arch *ka = &kvm->arch; in pvclock_update_vm_gtod_copy()
1543 atomic_read(&kvm->online_vcpus)); in pvclock_update_vm_gtod_copy()
1566 static void kvm_gen_update_masterclock(struct kvm *kvm) in kvm_gen_update_masterclock() argument
1571 struct kvm_arch *ka = &kvm->arch; in kvm_gen_update_masterclock()
1574 kvm_make_mclock_inprogress_request(kvm); in kvm_gen_update_masterclock()
1576 pvclock_update_vm_gtod_copy(kvm); in kvm_gen_update_masterclock()
1578 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_gen_update_masterclock()
1582 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_gen_update_masterclock()
1593 struct kvm_arch *ka = &v->kvm->arch; in kvm_guest_time_update()
1662 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
1665 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1686 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1708 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1715 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1743 struct kvm *kvm = container_of(ka, struct kvm, arch); in kvmclock_update_fn() local
1746 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmclock_update_fn()
1754 struct kvm *kvm = v->kvm; in kvm_gen_kvmclock_update() local
1757 schedule_delayed_work(&kvm->arch.kvmclock_update_work, in kvm_gen_kvmclock_update()
1768 struct kvm *kvm = container_of(ka, struct kvm, arch); in kvmclock_sync_fn() local
1770 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); in kvmclock_sync_fn()
1771 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvmclock_sync_fn()
1929 struct kvm *kvm = vcpu->kvm; in xen_hvm_config() local
1931 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 in xen_hvm_config()
1932 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; in xen_hvm_config()
1933 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 in xen_hvm_config()
1934 : kvm->arch.xen_hvm_config.blob_size_32; in xen_hvm_config()
1949 if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE)) in xen_hvm_config()
1958 static bool kvm_hv_hypercall_enabled(struct kvm *kvm) in kvm_hv_hypercall_enabled() argument
1960 return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; in kvm_hv_hypercall_enabled()
1980 struct kvm *kvm = vcpu->kvm; in set_msr_hyperv_pw() local
1984 kvm->arch.hv_guest_os_id = data; in set_msr_hyperv_pw()
1986 if (!kvm->arch.hv_guest_os_id) in set_msr_hyperv_pw()
1987 kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; in set_msr_hyperv_pw()
1995 if (!kvm->arch.hv_guest_os_id) in set_msr_hyperv_pw()
1998 kvm->arch.hv_hypercall = data; in set_msr_hyperv_pw()
2002 addr = gfn_to_hva(kvm, gfn); in set_msr_hyperv_pw()
2009 kvm->arch.hv_hypercall = data; in set_msr_hyperv_pw()
2010 mark_page_dirty(kvm, gfn); in set_msr_hyperv_pw()
2017 kvm->arch.hv_tsc_page = data; in set_msr_hyperv_pw()
2021 if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT, in set_msr_hyperv_pw()
2024 mark_page_dirty(kvm, gfn); in set_msr_hyperv_pw()
2049 addr = gfn_to_hva(vcpu->kvm, gfn); in set_msr_hyperv()
2055 mark_page_dirty(vcpu->kvm, gfn); in set_msr_hyperv()
2091 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
2124 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, in record_steal_time()
2132 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, in record_steal_time()
2206 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
2207 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
2212 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_set_msr_common()
2235 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, in kvm_set_msr_common()
2256 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, in kvm_set_msr_common()
2329 mutex_lock(&vcpu->kvm->lock); in kvm_set_msr_common()
2331 mutex_unlock(&vcpu->kvm->lock); in kvm_set_msr_common()
2353 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) in kvm_set_msr_common()
2457 struct kvm *kvm = vcpu->kvm; in get_msr_hyperv_pw() local
2461 data = kvm->arch.hv_guest_os_id; in get_msr_hyperv_pw()
2464 data = kvm->arch.hv_hypercall; in get_msr_hyperv_pw()
2468 div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100); in get_msr_hyperv_pw()
2472 data = kvm->arch.hv_tsc_page; in get_msr_hyperv_pw()
2491 kvm_for_each_vcpu(r, v, vcpu->kvm) { in get_msr_hyperv()
2604 data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
2641 mutex_lock(&vcpu->kvm->lock); in kvm_get_msr_common()
2643 mutex_unlock(&vcpu->kvm->lock); in kvm_get_msr_common()
2700 idx = srcu_read_lock(&vcpu->kvm->srcu); in __msr_io()
2704 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __msr_io()
2755 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) in kvm_vm_ioctl_check_extension() argument
2919 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
2957 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
2997 if (irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
3480 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arch_vcpu_ioctl()
3633 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) in kvm_vm_ioctl_set_tss_addr() argument
3639 ret = kvm_x86_ops->set_tss_addr(kvm, addr); in kvm_vm_ioctl_set_tss_addr()
3643 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, in kvm_vm_ioctl_set_identity_map_addr() argument
3646 kvm->arch.ept_identity_map_addr = ident_addr; in kvm_vm_ioctl_set_identity_map_addr()
3650 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, in kvm_vm_ioctl_set_nr_mmu_pages() argument
3656 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
3658 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); in kvm_vm_ioctl_set_nr_mmu_pages()
3659 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; in kvm_vm_ioctl_set_nr_mmu_pages()
3661 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
3665 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) in kvm_vm_ioctl_get_nr_mmu_pages() argument
3667 return kvm->arch.n_max_mmu_pages; in kvm_vm_ioctl_get_nr_mmu_pages()
3670 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) in kvm_vm_ioctl_get_irqchip() argument
3678 &pic_irqchip(kvm)->pics[0], in kvm_vm_ioctl_get_irqchip()
3683 &pic_irqchip(kvm)->pics[1], in kvm_vm_ioctl_get_irqchip()
3687 r = kvm_get_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_get_irqchip()
3696 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) in kvm_vm_ioctl_set_irqchip() argument
3703 spin_lock(&pic_irqchip(kvm)->lock); in kvm_vm_ioctl_set_irqchip()
3704 memcpy(&pic_irqchip(kvm)->pics[0], in kvm_vm_ioctl_set_irqchip()
3707 spin_unlock(&pic_irqchip(kvm)->lock); in kvm_vm_ioctl_set_irqchip()
3710 spin_lock(&pic_irqchip(kvm)->lock); in kvm_vm_ioctl_set_irqchip()
3711 memcpy(&pic_irqchip(kvm)->pics[1], in kvm_vm_ioctl_set_irqchip()
3714 spin_unlock(&pic_irqchip(kvm)->lock); in kvm_vm_ioctl_set_irqchip()
3717 r = kvm_set_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_set_irqchip()
3723 kvm_pic_update_irq(pic_irqchip(kvm)); in kvm_vm_ioctl_set_irqchip()
3727 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) in kvm_vm_ioctl_get_pit() argument
3731 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit()
3732 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); in kvm_vm_ioctl_get_pit()
3733 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit()
3737 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) in kvm_vm_ioctl_set_pit() argument
3740 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit()
3741 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); in kvm_vm_ioctl_set_pit()
3743 kvm_pit_load_count(kvm, i, ps->channels[i].count, 0); in kvm_vm_ioctl_set_pit()
3744 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit()
3748 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) in kvm_vm_ioctl_get_pit2() argument
3752 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
3753 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, in kvm_vm_ioctl_get_pit2()
3755 ps->flags = kvm->arch.vpit->pit_state.flags; in kvm_vm_ioctl_get_pit2()
3756 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
3761 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) in kvm_vm_ioctl_set_pit2() argument
3766 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
3767 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
3771 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, in kvm_vm_ioctl_set_pit2()
3772 sizeof(kvm->arch.vpit->pit_state.channels)); in kvm_vm_ioctl_set_pit2()
3773 kvm->arch.vpit->pit_state.flags = ps->flags; in kvm_vm_ioctl_set_pit2()
3775 kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start); in kvm_vm_ioctl_set_pit2()
3776 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
3780 static int kvm_vm_ioctl_reinject(struct kvm *kvm, in kvm_vm_ioctl_reinject() argument
3783 if (!kvm->arch.vpit) in kvm_vm_ioctl_reinject()
3785 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_reinject()
3786 kvm->arch.vpit->pit_state.reinject = control->pit_reinject; in kvm_vm_ioctl_reinject()
3787 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_reinject()
3810 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) in kvm_vm_ioctl_get_dirty_log() argument
3815 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
3821 kvm_x86_ops->flush_log_dirty(kvm); in kvm_vm_ioctl_get_dirty_log()
3823 r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); in kvm_vm_ioctl_get_dirty_log()
3829 lockdep_assert_held(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
3831 kvm_flush_remote_tlbs(kvm); in kvm_vm_ioctl_get_dirty_log()
3833 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
3837 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, in kvm_vm_ioctl_irq_line() argument
3840 if (!irqchip_in_kernel(kvm)) in kvm_vm_ioctl_irq_line()
3843 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, in kvm_vm_ioctl_irq_line()
3852 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl() local
3868 r = kvm_vm_ioctl_set_tss_addr(kvm, arg); in kvm_arch_vm_ioctl()
3876 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); in kvm_arch_vm_ioctl()
3880 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); in kvm_arch_vm_ioctl()
3883 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); in kvm_arch_vm_ioctl()
3888 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
3890 if (kvm->arch.vpic) in kvm_arch_vm_ioctl()
3893 if (atomic_read(&kvm->online_vcpus)) in kvm_arch_vm_ioctl()
3896 vpic = kvm_create_pic(kvm); in kvm_arch_vm_ioctl()
3898 r = kvm_ioapic_init(kvm); in kvm_arch_vm_ioctl()
3900 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3901 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, in kvm_arch_vm_ioctl()
3903 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, in kvm_arch_vm_ioctl()
3905 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, in kvm_arch_vm_ioctl()
3907 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3914 kvm->arch.vpic = vpic; in kvm_arch_vm_ioctl()
3916 r = kvm_setup_default_irq_routing(kvm); in kvm_arch_vm_ioctl()
3918 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3919 mutex_lock(&kvm->irq_lock); in kvm_arch_vm_ioctl()
3920 kvm_ioapic_destroy(kvm); in kvm_arch_vm_ioctl()
3921 kvm_destroy_pic(kvm); in kvm_arch_vm_ioctl()
3922 mutex_unlock(&kvm->irq_lock); in kvm_arch_vm_ioctl()
3923 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3926 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
3938 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3940 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
3943 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); in kvm_arch_vm_ioctl()
3944 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
3947 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3960 if (!irqchip_in_kernel(kvm)) in kvm_arch_vm_ioctl()
3962 r = kvm_vm_ioctl_get_irqchip(kvm, chip); in kvm_arch_vm_ioctl()
3984 if (!irqchip_in_kernel(kvm)) in kvm_arch_vm_ioctl()
3986 r = kvm_vm_ioctl_set_irqchip(kvm, chip); in kvm_arch_vm_ioctl()
3999 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
4001 r = kvm_vm_ioctl_get_pit(kvm, &u.ps); in kvm_arch_vm_ioctl()
4015 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
4017 r = kvm_vm_ioctl_set_pit(kvm, &u.ps); in kvm_arch_vm_ioctl()
4022 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
4024 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); in kvm_arch_vm_ioctl()
4038 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
4040 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); in kvm_arch_vm_ioctl()
4048 r = kvm_vm_ioctl_reinject(kvm, &control); in kvm_arch_vm_ioctl()
4053 if (copy_from_user(&kvm->arch.xen_hvm_config, argp, in kvm_arch_vm_ioctl()
4057 if (kvm->arch.xen_hvm_config.flags) in kvm_arch_vm_ioctl()
4080 kvm->arch.kvmclock_offset = delta; in kvm_arch_vm_ioctl()
4081 kvm_gen_update_masterclock(kvm); in kvm_arch_vm_ioctl()
4090 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; in kvm_arch_vm_ioctl()
4103 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); in kvm_arch_vm_ioctl()
4258 ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data, in kvm_read_guest_virt_helper()
4292 ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val, in kvm_fetch_guest_virt()
4339 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); in kvm_write_guest_virt_system()
4392 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); in emulator_write_phys()
4426 return !kvm_read_guest(vcpu->kvm, gpa, val, bytes); in read_emulate()
4624 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); in emulator_cmpxchg_emulated()
4652 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); in emulator_cmpxchg_emulated()
5193 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
5208 spin_lock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
5209 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
5210 spin_unlock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
5213 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
5223 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
5272 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in retry_instruction()
5535 struct kvm *kvm; in kvmclock_cpufreq_notifier() local
5586 list_for_each_entry(kvm, &vm_list, vm_list) { in kvmclock_cpufreq_notifier()
5587 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmclock_cpufreq_notifier()
5748 struct kvm *kvm; in pvclock_gtod_update_fn() local
5754 list_for_each_entry(kvm, &vm_list, vm_list) in pvclock_gtod_update_fn()
5755 kvm_for_each_vcpu(i, vcpu, kvm) in pvclock_gtod_update_fn()
5868 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_halt()
5950 static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) in kvm_pv_kick_cpu_op() argument
5959 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); in kvm_pv_kick_cpu_op()
5969 if (kvm_hv_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
5999 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); in kvm_emulate_hypercall()
6034 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && in dm_request_for_irq_injection()
6046 if (irqchip_in_kernel(vcpu->kvm)) in post_kvm_run_save()
6191 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_reload_apic_access_page()
6197 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); in kvm_vcpu_reload_apic_access_page()
6210 void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, in kvm_arch_mmu_notifier_invalidate_page() argument
6217 if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT)) in kvm_arch_mmu_notifier_invalidate_page()
6218 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); in kvm_arch_mmu_notifier_invalidate_page()
6229 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && in vcpu_enter_guest()
6239 kvm_gen_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
6327 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in vcpu_enter_guest()
6342 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
6418 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
6445 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) in vcpu_block() argument
6448 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_block()
6450 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_block()
6476 struct kvm *kvm = vcpu->kvm; in vcpu_run() local
6478 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
6485 r = vcpu_block(kvm, vcpu); in vcpu_run()
6509 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
6511 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
6515 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
6523 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in complete_emulated_io()
6525 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in complete_emulated_io()
6622 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
6859 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_set_sregs()
6864 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_set_sregs()
6965 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
6967 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
7079 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, in kvm_arch_vcpu_create() argument
7084 if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) in kvm_arch_vcpu_create()
7089 vcpu = kvm_x86_ops->vcpu_create(kvm, id); in kvm_arch_vcpu_create()
7117 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate() local
7127 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvm_arch_vcpu_postcreate()
7194 struct kvm *kvm; in kvm_arch_hardware_enable() local
7209 list_for_each_entry(kvm, &vm_list, vm_list) { in kvm_arch_hardware_enable()
7210 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
7262 list_for_each_entry(kvm, &vm_list, vm_list) { in kvm_arch_hardware_enable()
7263 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
7275 kvm->arch.last_tsc_nsec = 0; in kvm_arch_hardware_enable()
7276 kvm->arch.last_tsc_write = 0; in kvm_arch_hardware_enable()
7313 return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); in kvm_vcpu_compatible()
7321 struct kvm *kvm; in kvm_arch_vcpu_init() local
7324 BUG_ON(vcpu->kvm == NULL); in kvm_arch_vcpu_init()
7325 kvm = vcpu->kvm; in kvm_arch_vcpu_init()
7329 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_init()
7347 if (irqchip_in_kernel(kvm)) { in kvm_arch_vcpu_init()
7404 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_uninit()
7406 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_uninit()
7408 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arch_vcpu_uninit()
7417 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument
7422 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); in kvm_arch_init_vm()
7423 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); in kvm_arch_init_vm()
7424 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); in kvm_arch_init_vm()
7425 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); in kvm_arch_init_vm()
7426 atomic_set(&kvm->arch.noncoherent_dma_count, 0); in kvm_arch_init_vm()
7429 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
7432 &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
7434 raw_spin_lock_init(&kvm->arch.tsc_write_lock); in kvm_arch_init_vm()
7435 mutex_init(&kvm->arch.apic_map_lock); in kvm_arch_init_vm()
7436 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); in kvm_arch_init_vm()
7438 pvclock_update_vm_gtod_copy(kvm); in kvm_arch_init_vm()
7440 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); in kvm_arch_init_vm()
7441 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); in kvm_arch_init_vm()
7455 static void kvm_free_vcpus(struct kvm *kvm) in kvm_free_vcpus() argument
7463 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_free_vcpus()
7467 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_free_vcpus()
7470 mutex_lock(&kvm->lock); in kvm_free_vcpus()
7471 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_free_vcpus()
7472 kvm->vcpus[i] = NULL; in kvm_free_vcpus()
7474 atomic_set(&kvm->online_vcpus, 0); in kvm_free_vcpus()
7475 mutex_unlock(&kvm->lock); in kvm_free_vcpus()
7478 void kvm_arch_sync_events(struct kvm *kvm) in kvm_arch_sync_events() argument
7480 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); in kvm_arch_sync_events()
7481 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); in kvm_arch_sync_events()
7482 kvm_free_all_assigned_devices(kvm); in kvm_arch_sync_events()
7483 kvm_free_pit(kvm); in kvm_arch_sync_events()
7486 void kvm_arch_destroy_vm(struct kvm *kvm) in kvm_arch_destroy_vm() argument
7488 if (current->mm == kvm->mm) { in kvm_arch_destroy_vm()
7497 kvm_set_memory_region(kvm, &mem); in kvm_arch_destroy_vm()
7500 kvm_set_memory_region(kvm, &mem); in kvm_arch_destroy_vm()
7503 kvm_set_memory_region(kvm, &mem); in kvm_arch_destroy_vm()
7505 kvm_iommu_unmap_guest(kvm); in kvm_arch_destroy_vm()
7506 kfree(kvm->arch.vpic); in kvm_arch_destroy_vm()
7507 kfree(kvm->arch.vioapic); in kvm_arch_destroy_vm()
7508 kvm_free_vcpus(kvm); in kvm_arch_destroy_vm()
7509 kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); in kvm_arch_destroy_vm()
7512 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, in kvm_arch_free_memslot() argument
7533 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, in kvm_arch_create_memslot() argument
7592 void kvm_arch_memslots_updated(struct kvm *kvm) in kvm_arch_memslots_updated() argument
7598 kvm_mmu_invalidate_mmio_sptes(kvm); in kvm_arch_memslots_updated()
7601 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
7630 static void kvm_mmu_slot_apply_flags(struct kvm *kvm, in kvm_mmu_slot_apply_flags() argument
7635 kvm_mmu_slot_remove_write_access(kvm, new); in kvm_mmu_slot_apply_flags()
7671 kvm_x86_ops->slot_enable_log_dirty(kvm, new); in kvm_mmu_slot_apply_flags()
7673 kvm_mmu_slot_remove_write_access(kvm, new); in kvm_mmu_slot_apply_flags()
7676 kvm_x86_ops->slot_disable_log_dirty(kvm, new); in kvm_mmu_slot_apply_flags()
7680 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
7699 if (!kvm->arch.n_requested_mmu_pages) in kvm_arch_commit_memory_region()
7700 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); in kvm_arch_commit_memory_region()
7703 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); in kvm_arch_commit_memory_region()
7706 new = id_to_memslot(kvm->memslots, mem->slot); in kvm_arch_commit_memory_region()
7723 kvm_mmu_zap_collapsible_sptes(kvm, new); in kvm_arch_commit_memory_region()
7734 kvm_mmu_slot_apply_flags(kvm, new); in kvm_arch_commit_memory_region()
7737 void kvm_arch_flush_shadow_all(struct kvm *kvm) in kvm_arch_flush_shadow_all() argument
7739 kvm_mmu_invalidate_zap_all_pages(kvm); in kvm_arch_flush_shadow_all()
7742 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, in kvm_arch_flush_shadow_memslot() argument
7745 kvm_mmu_invalidate_zap_all_pages(kvm); in kvm_arch_flush_shadow_memslot()
7897 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, in apf_put_user()
7956 void kvm_arch_register_noncoherent_dma(struct kvm *kvm) in kvm_arch_register_noncoherent_dma() argument
7958 atomic_inc(&kvm->arch.noncoherent_dma_count); in kvm_arch_register_noncoherent_dma()
7962 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) in kvm_arch_unregister_noncoherent_dma() argument
7964 atomic_dec(&kvm->arch.noncoherent_dma_count); in kvm_arch_unregister_noncoherent_dma()
7968 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) in kvm_arch_has_noncoherent_dma() argument
7970 return atomic_read(&kvm->arch.noncoherent_dma_count); in kvm_arch_has_noncoherent_dma()