Lines Matching refs:vcpu
125 int vcpu_load(struct kvm_vcpu *vcpu) in vcpu_load() argument
129 if (mutex_lock_killable(&vcpu->mutex)) in vcpu_load()
132 preempt_notifier_register(&vcpu->preempt_notifier); in vcpu_load()
133 kvm_arch_vcpu_load(vcpu, cpu); in vcpu_load()
138 void vcpu_put(struct kvm_vcpu *vcpu) in vcpu_put() argument
141 kvm_arch_vcpu_put(vcpu); in vcpu_put()
142 preempt_notifier_unregister(&vcpu->preempt_notifier); in vcpu_put()
144 mutex_unlock(&vcpu->mutex); in vcpu_put()
156 struct kvm_vcpu *vcpu; in kvm_make_all_cpus_request() local
161 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_make_all_cpus_request()
162 kvm_make_request(req, vcpu); in kvm_make_all_cpus_request()
163 cpu = vcpu->cpu; in kvm_make_all_cpus_request()
169 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) in kvm_make_all_cpus_request()
211 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) in kvm_vcpu_init() argument
216 mutex_init(&vcpu->mutex); in kvm_vcpu_init()
217 vcpu->cpu = -1; in kvm_vcpu_init()
218 vcpu->kvm = kvm; in kvm_vcpu_init()
219 vcpu->vcpu_id = id; in kvm_vcpu_init()
220 vcpu->pid = NULL; in kvm_vcpu_init()
221 init_waitqueue_head(&vcpu->wq); in kvm_vcpu_init()
222 kvm_async_pf_vcpu_init(vcpu); in kvm_vcpu_init()
229 vcpu->run = page_address(page); in kvm_vcpu_init()
231 kvm_vcpu_set_in_spin_loop(vcpu, false); in kvm_vcpu_init()
232 kvm_vcpu_set_dy_eligible(vcpu, false); in kvm_vcpu_init()
233 vcpu->preempted = false; in kvm_vcpu_init()
235 r = kvm_arch_vcpu_init(vcpu); in kvm_vcpu_init()
241 free_page((unsigned long)vcpu->run); in kvm_vcpu_init()
247 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) in kvm_vcpu_uninit() argument
249 put_pid(vcpu->pid); in kvm_vcpu_uninit()
250 kvm_arch_vcpu_uninit(vcpu); in kvm_vcpu_uninit()
251 free_page((unsigned long)vcpu->run); in kvm_vcpu_uninit()
1763 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) in kvm_vcpu_check_block() argument
1765 if (kvm_arch_vcpu_runnable(vcpu)) { in kvm_vcpu_check_block()
1766 kvm_make_request(KVM_REQ_UNHALT, vcpu); in kvm_vcpu_check_block()
1769 if (kvm_cpu_has_pending_timer(vcpu)) in kvm_vcpu_check_block()
1780 void kvm_vcpu_block(struct kvm_vcpu *vcpu) in kvm_vcpu_block() argument
1795 if (kvm_vcpu_check_block(vcpu) < 0) { in kvm_vcpu_block()
1796 ++vcpu->stat.halt_successful_poll; in kvm_vcpu_block()
1804 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); in kvm_vcpu_block()
1806 if (kvm_vcpu_check_block(vcpu) < 0) in kvm_vcpu_block()
1813 finish_wait(&vcpu->wq, &wait); in kvm_vcpu_block()
1825 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) in kvm_vcpu_kick() argument
1828 int cpu = vcpu->cpu; in kvm_vcpu_kick()
1831 wqp = kvm_arch_vcpu_wq(vcpu); in kvm_vcpu_kick()
1834 ++vcpu->stat.halt_wakeup; in kvm_vcpu_kick()
1839 if (kvm_arch_vcpu_should_kick(vcpu)) in kvm_vcpu_kick()
1888 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) in kvm_vcpu_eligible_for_directed_yield() argument
1893 eligible = !vcpu->spin_loop.in_spin_loop || in kvm_vcpu_eligible_for_directed_yield()
1894 vcpu->spin_loop.dy_eligible; in kvm_vcpu_eligible_for_directed_yield()
1896 if (vcpu->spin_loop.in_spin_loop) in kvm_vcpu_eligible_for_directed_yield()
1897 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); in kvm_vcpu_eligible_for_directed_yield()
1908 struct kvm_vcpu *vcpu; in kvm_vcpu_on_spin() local
1924 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vcpu_on_spin()
1930 if (!ACCESS_ONCE(vcpu->preempted)) in kvm_vcpu_on_spin()
1932 if (vcpu == me) in kvm_vcpu_on_spin()
1934 if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) in kvm_vcpu_on_spin()
1936 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) in kvm_vcpu_on_spin()
1939 yielded = kvm_vcpu_yield_to(vcpu); in kvm_vcpu_on_spin()
1959 struct kvm_vcpu *vcpu = vma->vm_file->private_data; in kvm_vcpu_fault() local
1963 page = virt_to_page(vcpu->run); in kvm_vcpu_fault()
1966 page = virt_to_page(vcpu->arch.pio_data); in kvm_vcpu_fault()
1970 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); in kvm_vcpu_fault()
1973 return kvm_arch_vcpu_fault(vcpu, vmf); in kvm_vcpu_fault()
1991 struct kvm_vcpu *vcpu = filp->private_data; in kvm_vcpu_release() local
1993 kvm_put_kvm(vcpu->kvm); in kvm_vcpu_release()
2010 static int create_vcpu_fd(struct kvm_vcpu *vcpu) in create_vcpu_fd() argument
2012 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); in create_vcpu_fd()
2021 struct kvm_vcpu *vcpu, *v; in kvm_vm_ioctl_create_vcpu() local
2026 vcpu = kvm_arch_vcpu_create(kvm, id); in kvm_vm_ioctl_create_vcpu()
2027 if (IS_ERR(vcpu)) in kvm_vm_ioctl_create_vcpu()
2028 return PTR_ERR(vcpu); in kvm_vm_ioctl_create_vcpu()
2030 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); in kvm_vm_ioctl_create_vcpu()
2032 r = kvm_arch_vcpu_setup(vcpu); in kvm_vm_ioctl_create_vcpu()
2037 if (!kvm_vcpu_compatible(vcpu)) { in kvm_vm_ioctl_create_vcpu()
2056 r = create_vcpu_fd(vcpu); in kvm_vm_ioctl_create_vcpu()
2062 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; in kvm_vm_ioctl_create_vcpu()
2067 kvm_arch_vcpu_postcreate(vcpu); in kvm_vm_ioctl_create_vcpu()
2073 kvm_arch_vcpu_destroy(vcpu); in kvm_vm_ioctl_create_vcpu()
2077 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) in kvm_vcpu_ioctl_set_sigmask() argument
2081 vcpu->sigset_active = 1; in kvm_vcpu_ioctl_set_sigmask()
2082 vcpu->sigset = *sigset; in kvm_vcpu_ioctl_set_sigmask()
2084 vcpu->sigset_active = 0; in kvm_vcpu_ioctl_set_sigmask()
2091 struct kvm_vcpu *vcpu = filp->private_data; in kvm_vcpu_ioctl() local
2097 if (vcpu->kvm->mm != current->mm) in kvm_vcpu_ioctl()
2113 r = vcpu_load(vcpu); in kvm_vcpu_ioctl()
2121 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { in kvm_vcpu_ioctl()
2123 struct pid *oldpid = vcpu->pid; in kvm_vcpu_ioctl()
2126 rcu_assign_pointer(vcpu->pid, newpid); in kvm_vcpu_ioctl()
2131 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); in kvm_vcpu_ioctl()
2132 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); in kvm_vcpu_ioctl()
2141 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); in kvm_vcpu_ioctl()
2161 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); in kvm_vcpu_ioctl()
2170 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); in kvm_vcpu_ioctl()
2186 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); in kvm_vcpu_ioctl()
2192 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); in kvm_vcpu_ioctl()
2207 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); in kvm_vcpu_ioctl()
2216 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); in kvm_vcpu_ioctl()
2231 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); in kvm_vcpu_ioctl()
2254 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); in kvm_vcpu_ioctl()
2262 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); in kvm_vcpu_ioctl()
2278 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); in kvm_vcpu_ioctl()
2285 vcpu_put(vcpu); in kvm_vcpu_ioctl()
2295 struct kvm_vcpu *vcpu = filp->private_data; in kvm_vcpu_compat_ioctl() local
2299 if (vcpu->kvm->mm != current->mm) in kvm_vcpu_compat_ioctl()
2322 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); in kvm_vcpu_compat_ioctl()
2324 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); in kvm_vcpu_compat_ioctl()
3005 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, in __kvm_io_bus_write() argument
3016 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, in __kvm_io_bus_write()
3026 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_write() argument
3038 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write()
3039 r = __kvm_io_bus_write(vcpu, bus, &range, val); in kvm_io_bus_write()
3044 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, in kvm_io_bus_write_cookie() argument
3055 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write_cookie()
3060 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, in kvm_io_bus_write_cookie()
3068 return __kvm_io_bus_write(vcpu, bus, &range, val); in kvm_io_bus_write_cookie()
3071 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, in __kvm_io_bus_read() argument
3082 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, in __kvm_io_bus_read()
3093 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_read() argument
3105 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_read()
3106 r = __kvm_io_bus_read(vcpu, bus, &range, val); in kvm_io_bus_read()
3193 struct kvm_vcpu *vcpu; in vcpu_stat_get() local
3199 kvm_for_each_vcpu(i, vcpu, kvm) in vcpu_stat_get()
3200 *val += *(u32 *)((void *)vcpu + offset); in vcpu_stat_get()
3275 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); in kvm_sched_in() local
3277 if (vcpu->preempted) in kvm_sched_in()
3278 vcpu->preempted = false; in kvm_sched_in()
3280 kvm_arch_sched_in(vcpu, cpu); in kvm_sched_in()
3282 kvm_arch_vcpu_load(vcpu, cpu); in kvm_sched_in()
3288 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); in kvm_sched_out() local
3291 vcpu->preempted = true; in kvm_sched_out()
3292 kvm_arch_vcpu_put(vcpu); in kvm_sched_out()