Lines Matching refs:kvm
89 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
634 kvm_arch_has_noncoherent_dma(vcpu->kvm) && in kvm_set_cr0()
635 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in kvm_set_cr0()
636 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); in kvm_set_cr0()
1150 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) in kvm_write_wall_clock() argument
1160 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); in kvm_write_wall_clock()
1169 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); in kvm_write_wall_clock()
1179 if (kvm->arch.kvmclock_offset) { in kvm_write_wall_clock()
1180 struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); in kvm_write_wall_clock()
1187 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); in kvm_write_wall_clock()
1190 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); in kvm_write_wall_clock()
1338 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
1342 atomic_read(&vcpu->kvm->online_vcpus)); in kvm_track_tsc_matching()
1357 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
1412 struct kvm *kvm = vcpu->kvm; in kvm_write_tsc() local
1420 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_write_tsc()
1423 elapsed = ns - kvm->arch.last_tsc_nsec; in kvm_write_tsc()
1429 usdiff = data - kvm->arch.last_tsc_write; in kvm_write_tsc()
1471 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_write_tsc()
1473 offset = kvm->arch.cur_tsc_offset; in kvm_write_tsc()
1482 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); in kvm_write_tsc()
1493 kvm->arch.cur_tsc_generation++; in kvm_write_tsc()
1494 kvm->arch.cur_tsc_nsec = ns; in kvm_write_tsc()
1495 kvm->arch.cur_tsc_write = data; in kvm_write_tsc()
1496 kvm->arch.cur_tsc_offset = offset; in kvm_write_tsc()
1499 kvm->arch.cur_tsc_generation, data); in kvm_write_tsc()
1506 kvm->arch.last_tsc_nsec = ns; in kvm_write_tsc()
1507 kvm->arch.last_tsc_write = data; in kvm_write_tsc()
1508 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in kvm_write_tsc()
1513 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in kvm_write_tsc()
1514 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in kvm_write_tsc()
1515 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in kvm_write_tsc()
1520 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_write_tsc()
1522 spin_lock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_write_tsc()
1524 kvm->arch.nr_vcpus_matched_tsc = 0; in kvm_write_tsc()
1526 kvm->arch.nr_vcpus_matched_tsc++; in kvm_write_tsc()
1530 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_write_tsc()
1654 static void pvclock_update_vm_gtod_copy(struct kvm *kvm) in pvclock_update_vm_gtod_copy() argument
1657 struct kvm_arch *ka = &kvm->arch; in pvclock_update_vm_gtod_copy()
1662 atomic_read(&kvm->online_vcpus)); in pvclock_update_vm_gtod_copy()
1685 static void kvm_gen_update_masterclock(struct kvm *kvm) in kvm_gen_update_masterclock() argument
1690 struct kvm_arch *ka = &kvm->arch; in kvm_gen_update_masterclock()
1693 kvm_make_mclock_inprogress_request(kvm); in kvm_gen_update_masterclock()
1695 pvclock_update_vm_gtod_copy(kvm); in kvm_gen_update_masterclock()
1697 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_gen_update_masterclock()
1701 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_gen_update_masterclock()
1712 struct kvm_arch *ka = &v->kvm->arch; in kvm_guest_time_update()
1783 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
1786 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1807 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1829 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1836 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1864 struct kvm *kvm = container_of(ka, struct kvm, arch); in kvmclock_update_fn() local
1867 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmclock_update_fn()
1875 struct kvm *kvm = v->kvm; in kvm_gen_kvmclock_update() local
1878 schedule_delayed_work(&kvm->arch.kvmclock_update_work, in kvm_gen_kvmclock_update()
1889 struct kvm *kvm = container_of(ka, struct kvm, arch); in kvmclock_sync_fn() local
1894 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); in kvmclock_sync_fn()
1895 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvmclock_sync_fn()
1937 struct kvm *kvm = vcpu->kvm; in xen_hvm_config() local
1939 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 in xen_hvm_config()
1940 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; in xen_hvm_config()
1941 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 in xen_hvm_config()
1942 : kvm->arch.xen_hvm_config.blob_size_32; in xen_hvm_config()
1982 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
2015 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, in record_steal_time()
2023 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, in record_steal_time()
2102 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
2103 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
2108 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_set_msr_common()
2131 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, in kvm_set_msr_common()
2152 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, in kvm_set_msr_common()
2219 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) in kvm_set_msr_common()
2367 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
2458 idx = srcu_read_lock(&vcpu->kvm->srcu); in __msr_io()
2462 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __msr_io()
2513 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) in kvm_vm_ioctl_check_extension() argument
2692 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
2730 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
2792 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_ioctl_interrupt()
2802 if (pic_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
3476 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) in kvm_vm_ioctl_set_tss_addr() argument
3482 ret = kvm_x86_ops->set_tss_addr(kvm, addr); in kvm_vm_ioctl_set_tss_addr()
3486 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, in kvm_vm_ioctl_set_identity_map_addr() argument
3489 kvm->arch.ept_identity_map_addr = ident_addr; in kvm_vm_ioctl_set_identity_map_addr()
3493 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, in kvm_vm_ioctl_set_nr_mmu_pages() argument
3499 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
3501 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); in kvm_vm_ioctl_set_nr_mmu_pages()
3502 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; in kvm_vm_ioctl_set_nr_mmu_pages()
3504 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
3508 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) in kvm_vm_ioctl_get_nr_mmu_pages() argument
3510 return kvm->arch.n_max_mmu_pages; in kvm_vm_ioctl_get_nr_mmu_pages()
3513 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) in kvm_vm_ioctl_get_irqchip() argument
3521 &pic_irqchip(kvm)->pics[0], in kvm_vm_ioctl_get_irqchip()
3526 &pic_irqchip(kvm)->pics[1], in kvm_vm_ioctl_get_irqchip()
3530 r = kvm_get_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_get_irqchip()
3539 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) in kvm_vm_ioctl_set_irqchip() argument
3546 spin_lock(&pic_irqchip(kvm)->lock); in kvm_vm_ioctl_set_irqchip()
3547 memcpy(&pic_irqchip(kvm)->pics[0], in kvm_vm_ioctl_set_irqchip()
3550 spin_unlock(&pic_irqchip(kvm)->lock); in kvm_vm_ioctl_set_irqchip()
3553 spin_lock(&pic_irqchip(kvm)->lock); in kvm_vm_ioctl_set_irqchip()
3554 memcpy(&pic_irqchip(kvm)->pics[1], in kvm_vm_ioctl_set_irqchip()
3557 spin_unlock(&pic_irqchip(kvm)->lock); in kvm_vm_ioctl_set_irqchip()
3560 r = kvm_set_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_set_irqchip()
3566 kvm_pic_update_irq(pic_irqchip(kvm)); in kvm_vm_ioctl_set_irqchip()
3570 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) in kvm_vm_ioctl_get_pit() argument
3572 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit()
3573 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); in kvm_vm_ioctl_get_pit()
3574 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit()
3578 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) in kvm_vm_ioctl_set_pit() argument
3581 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit()
3582 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); in kvm_vm_ioctl_set_pit()
3584 kvm_pit_load_count(kvm, i, ps->channels[i].count, 0); in kvm_vm_ioctl_set_pit()
3585 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit()
3589 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) in kvm_vm_ioctl_get_pit2() argument
3591 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
3592 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, in kvm_vm_ioctl_get_pit2()
3594 ps->flags = kvm->arch.vpit->pit_state.flags; in kvm_vm_ioctl_get_pit2()
3595 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
3600 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) in kvm_vm_ioctl_set_pit2() argument
3605 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
3606 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
3610 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, in kvm_vm_ioctl_set_pit2()
3611 sizeof(kvm->arch.vpit->pit_state.channels)); in kvm_vm_ioctl_set_pit2()
3612 kvm->arch.vpit->pit_state.flags = ps->flags; in kvm_vm_ioctl_set_pit2()
3614 kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, in kvm_vm_ioctl_set_pit2()
3616 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
3620 static int kvm_vm_ioctl_reinject(struct kvm *kvm, in kvm_vm_ioctl_reinject() argument
3623 if (!kvm->arch.vpit) in kvm_vm_ioctl_reinject()
3625 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_reinject()
3626 kvm->arch.vpit->pit_state.reinject = control->pit_reinject; in kvm_vm_ioctl_reinject()
3627 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_reinject()
3650 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) in kvm_vm_ioctl_get_dirty_log() argument
3655 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
3661 kvm_x86_ops->flush_log_dirty(kvm); in kvm_vm_ioctl_get_dirty_log()
3663 r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); in kvm_vm_ioctl_get_dirty_log()
3669 lockdep_assert_held(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
3671 kvm_flush_remote_tlbs(kvm); in kvm_vm_ioctl_get_dirty_log()
3673 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
3677 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, in kvm_vm_ioctl_irq_line() argument
3680 if (!irqchip_in_kernel(kvm)) in kvm_vm_ioctl_irq_line()
3683 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, in kvm_vm_ioctl_irq_line()
3689 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, in kvm_vm_ioctl_enable_cap() argument
3699 kvm->arch.disabled_quirks = cap->args[0]; in kvm_vm_ioctl_enable_cap()
3703 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
3708 if (irqchip_in_kernel(kvm)) in kvm_vm_ioctl_enable_cap()
3710 if (atomic_read(&kvm->online_vcpus)) in kvm_vm_ioctl_enable_cap()
3712 r = kvm_setup_empty_irq_routing(kvm); in kvm_vm_ioctl_enable_cap()
3717 kvm->arch.irqchip_split = true; in kvm_vm_ioctl_enable_cap()
3718 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; in kvm_vm_ioctl_enable_cap()
3721 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
3734 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl() local
3750 r = kvm_vm_ioctl_set_tss_addr(kvm, arg); in kvm_arch_vm_ioctl()
3758 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); in kvm_arch_vm_ioctl()
3762 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); in kvm_arch_vm_ioctl()
3765 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); in kvm_arch_vm_ioctl()
3770 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
3772 if (kvm->arch.vpic) in kvm_arch_vm_ioctl()
3775 if (atomic_read(&kvm->online_vcpus)) in kvm_arch_vm_ioctl()
3778 vpic = kvm_create_pic(kvm); in kvm_arch_vm_ioctl()
3780 r = kvm_ioapic_init(kvm); in kvm_arch_vm_ioctl()
3782 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3784 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3789 r = kvm_setup_default_irq_routing(kvm); in kvm_arch_vm_ioctl()
3791 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3792 mutex_lock(&kvm->irq_lock); in kvm_arch_vm_ioctl()
3793 kvm_ioapic_destroy(kvm); in kvm_arch_vm_ioctl()
3795 mutex_unlock(&kvm->irq_lock); in kvm_arch_vm_ioctl()
3796 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3801 kvm->arch.vpic = vpic; in kvm_arch_vm_ioctl()
3803 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
3815 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3817 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
3820 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); in kvm_arch_vm_ioctl()
3821 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
3824 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3837 if (!irqchip_in_kernel(kvm) || irqchip_split(kvm)) in kvm_arch_vm_ioctl()
3839 r = kvm_vm_ioctl_get_irqchip(kvm, chip); in kvm_arch_vm_ioctl()
3861 if (!irqchip_in_kernel(kvm) || irqchip_split(kvm)) in kvm_arch_vm_ioctl()
3863 r = kvm_vm_ioctl_set_irqchip(kvm, chip); in kvm_arch_vm_ioctl()
3876 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
3878 r = kvm_vm_ioctl_get_pit(kvm, &u.ps); in kvm_arch_vm_ioctl()
3892 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
3894 r = kvm_vm_ioctl_set_pit(kvm, &u.ps); in kvm_arch_vm_ioctl()
3899 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
3901 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); in kvm_arch_vm_ioctl()
3915 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
3917 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); in kvm_arch_vm_ioctl()
3925 r = kvm_vm_ioctl_reinject(kvm, &control); in kvm_arch_vm_ioctl()
3930 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
3931 if (atomic_read(&kvm->online_vcpus) != 0) in kvm_arch_vm_ioctl()
3934 kvm->arch.bsp_vcpu_id = arg; in kvm_arch_vm_ioctl()
3935 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
3939 if (copy_from_user(&kvm->arch.xen_hvm_config, argp, in kvm_arch_vm_ioctl()
3943 if (kvm->arch.xen_hvm_config.flags) in kvm_arch_vm_ioctl()
3966 kvm->arch.kvmclock_offset = delta; in kvm_arch_vm_ioctl()
3967 kvm_gen_update_masterclock(kvm); in kvm_arch_vm_ioctl()
3976 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; in kvm_arch_vm_ioctl()
3993 r = kvm_vm_ioctl_enable_cap(kvm, &cap); in kvm_arch_vm_ioctl()
3997 r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg); in kvm_arch_vm_ioctl()
5141 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
5156 spin_lock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
5157 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
5158 spin_unlock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
5161 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
5171 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
5220 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in retry_instruction()
5513 struct kvm *kvm; in kvmclock_cpufreq_notifier() local
5564 list_for_each_entry(kvm, &vm_list, vm_list) { in kvmclock_cpufreq_notifier()
5565 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmclock_cpufreq_notifier()
5726 struct kvm *kvm; in pvclock_gtod_update_fn() local
5732 list_for_each_entry(kvm, &vm_list, vm_list) in pvclock_gtod_update_fn()
5733 kvm_for_each_vcpu(i, vcpu, kvm) in pvclock_gtod_update_fn()
5868 static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) in kvm_pv_kick_cpu_op() argument
5878 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); in kvm_pv_kick_cpu_op()
5888 if (kvm_hv_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
5918 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); in kvm_emulate_hypercall()
5948 likely(!pic_in_kernel(vcpu->kvm)); in dm_request_for_irq_injection()
5960 pic_in_kernel(vcpu->kvm) || in post_kvm_run_save()
6315 if (irqchip_split(vcpu->kvm)) in vcpu_scan_ioapic()
6340 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); in kvm_vcpu_reload_apic_access_page()
6353 void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, in kvm_arch_mmu_notifier_invalidate_page() argument
6360 if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT)) in kvm_arch_mmu_notifier_invalidate_page()
6361 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); in kvm_arch_mmu_notifier_invalidate_page()
6384 kvm_gen_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
6504 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in vcpu_enter_guest()
6519 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
6594 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
6621 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) in vcpu_block() argument
6625 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_block()
6627 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_block()
6663 struct kvm *kvm = vcpu->kvm; in vcpu_run() local
6665 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
6671 r = vcpu_block(kvm, vcpu); in vcpu_run()
6698 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
6700 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
6704 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
6712 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in complete_emulated_io()
6714 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in complete_emulated_io()
7048 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_set_sregs()
7053 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_set_sregs()
7154 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
7156 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
7262 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, in kvm_arch_vcpu_create() argument
7267 if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) in kvm_arch_vcpu_create()
7272 vcpu = kvm_x86_ops->vcpu_create(kvm, id); in kvm_arch_vcpu_create()
7294 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate() local
7307 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvm_arch_vcpu_postcreate()
7378 struct kvm *kvm; in kvm_arch_hardware_enable() local
7393 list_for_each_entry(kvm, &vm_list, vm_list) { in kvm_arch_hardware_enable()
7394 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
7446 list_for_each_entry(kvm, &vm_list, vm_list) { in kvm_arch_hardware_enable()
7447 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
7459 kvm->arch.last_tsc_nsec = 0; in kvm_arch_hardware_enable()
7460 kvm->arch.last_tsc_write = 0; in kvm_arch_hardware_enable()
7511 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
7522 return irqchip_in_kernel(vcpu->kvm) == lapic_in_kernel(vcpu); in kvm_vcpu_compatible()
7530 struct kvm *kvm; in kvm_arch_vcpu_init() local
7533 BUG_ON(vcpu->kvm == NULL); in kvm_arch_vcpu_init()
7534 kvm = vcpu->kvm; in kvm_arch_vcpu_init()
7538 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_init()
7556 if (irqchip_in_kernel(kvm)) { in kvm_arch_vcpu_init()
7614 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_uninit()
7616 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_uninit()
7627 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument
7632 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); in kvm_arch_init_vm()
7633 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); in kvm_arch_init_vm()
7634 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); in kvm_arch_init_vm()
7635 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); in kvm_arch_init_vm()
7636 atomic_set(&kvm->arch.noncoherent_dma_count, 0); in kvm_arch_init_vm()
7639 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
7642 &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
7644 raw_spin_lock_init(&kvm->arch.tsc_write_lock); in kvm_arch_init_vm()
7645 mutex_init(&kvm->arch.apic_map_lock); in kvm_arch_init_vm()
7646 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); in kvm_arch_init_vm()
7648 pvclock_update_vm_gtod_copy(kvm); in kvm_arch_init_vm()
7650 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); in kvm_arch_init_vm()
7651 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); in kvm_arch_init_vm()
7665 static void kvm_free_vcpus(struct kvm *kvm) in kvm_free_vcpus() argument
7673 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_free_vcpus()
7677 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_free_vcpus()
7680 mutex_lock(&kvm->lock); in kvm_free_vcpus()
7681 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_free_vcpus()
7682 kvm->vcpus[i] = NULL; in kvm_free_vcpus()
7684 atomic_set(&kvm->online_vcpus, 0); in kvm_free_vcpus()
7685 mutex_unlock(&kvm->lock); in kvm_free_vcpus()
7688 void kvm_arch_sync_events(struct kvm *kvm) in kvm_arch_sync_events() argument
7690 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); in kvm_arch_sync_events()
7691 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); in kvm_arch_sync_events()
7692 kvm_free_all_assigned_devices(kvm); in kvm_arch_sync_events()
7693 kvm_free_pit(kvm); in kvm_arch_sync_events()
7696 int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) in __x86_set_memory_region() argument
7700 struct kvm_memslots *slots = kvm_memslots(kvm); in __x86_set_memory_region()
7736 r = __kvm_set_memory_region(kvm, &m); in __x86_set_memory_region()
7750 int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) in x86_set_memory_region() argument
7754 mutex_lock(&kvm->slots_lock); in x86_set_memory_region()
7755 r = __x86_set_memory_region(kvm, id, gpa, size); in x86_set_memory_region()
7756 mutex_unlock(&kvm->slots_lock); in x86_set_memory_region()
7762 void kvm_arch_destroy_vm(struct kvm *kvm) in kvm_arch_destroy_vm() argument
7764 if (current->mm == kvm->mm) { in kvm_arch_destroy_vm()
7770 x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0); in kvm_arch_destroy_vm()
7771 x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0); in kvm_arch_destroy_vm()
7772 x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); in kvm_arch_destroy_vm()
7774 kvm_iommu_unmap_guest(kvm); in kvm_arch_destroy_vm()
7775 kfree(kvm->arch.vpic); in kvm_arch_destroy_vm()
7776 kfree(kvm->arch.vioapic); in kvm_arch_destroy_vm()
7777 kvm_free_vcpus(kvm); in kvm_arch_destroy_vm()
7778 kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); in kvm_arch_destroy_vm()
7781 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, in kvm_arch_free_memslot() argument
7802 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, in kvm_arch_create_memslot() argument
7861 void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) in kvm_arch_memslots_updated() argument
7867 kvm_mmu_invalidate_mmio_sptes(kvm, slots); in kvm_arch_memslots_updated()
7870 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
7878 static void kvm_mmu_slot_apply_flags(struct kvm *kvm, in kvm_mmu_slot_apply_flags() argument
7883 kvm_mmu_slot_remove_write_access(kvm, new); in kvm_mmu_slot_apply_flags()
7919 kvm_x86_ops->slot_enable_log_dirty(kvm, new); in kvm_mmu_slot_apply_flags()
7921 kvm_mmu_slot_remove_write_access(kvm, new); in kvm_mmu_slot_apply_flags()
7924 kvm_x86_ops->slot_disable_log_dirty(kvm, new); in kvm_mmu_slot_apply_flags()
7928 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
7936 if (!kvm->arch.n_requested_mmu_pages) in kvm_arch_commit_memory_region()
7937 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); in kvm_arch_commit_memory_region()
7940 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); in kvm_arch_commit_memory_region()
7957 kvm_mmu_zap_collapsible_sptes(kvm, new); in kvm_arch_commit_memory_region()
7970 kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new); in kvm_arch_commit_memory_region()
7973 void kvm_arch_flush_shadow_all(struct kvm *kvm) in kvm_arch_flush_shadow_all() argument
7975 kvm_mmu_invalidate_zap_all_pages(kvm); in kvm_arch_flush_shadow_all()
7978 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, in kvm_arch_flush_shadow_memslot() argument
7981 kvm_mmu_invalidate_zap_all_pages(kvm); in kvm_arch_flush_shadow_memslot()
8150 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, in apf_put_user()
8209 void kvm_arch_start_assignment(struct kvm *kvm) in kvm_arch_start_assignment() argument
8211 atomic_inc(&kvm->arch.assigned_device_count); in kvm_arch_start_assignment()
8215 void kvm_arch_end_assignment(struct kvm *kvm) in kvm_arch_end_assignment() argument
8217 atomic_dec(&kvm->arch.assigned_device_count); in kvm_arch_end_assignment()
8221 bool kvm_arch_has_assigned_device(struct kvm *kvm) in kvm_arch_has_assigned_device() argument
8223 return atomic_read(&kvm->arch.assigned_device_count); in kvm_arch_has_assigned_device()
8227 void kvm_arch_register_noncoherent_dma(struct kvm *kvm) in kvm_arch_register_noncoherent_dma() argument
8229 atomic_inc(&kvm->arch.noncoherent_dma_count); in kvm_arch_register_noncoherent_dma()
8233 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) in kvm_arch_unregister_noncoherent_dma() argument
8235 atomic_dec(&kvm->arch.noncoherent_dma_count); in kvm_arch_unregister_noncoherent_dma()
8239 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) in kvm_arch_has_noncoherent_dma() argument
8241 return atomic_read(&kvm->arch.noncoherent_dma_count); in kvm_arch_has_noncoherent_dma()
8253 return kvm_x86_ops->update_pi_irte(irqfd->kvm, in kvm_arch_irq_bypass_add_producer()
8281 ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); in kvm_arch_irq_bypass_del_producer()
8287 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, in kvm_arch_update_irqfd_routing() argument
8293 return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set); in kvm_arch_update_irqfd_routing()