Lines Matching refs:vcpu

133 int vcpu_load(struct kvm_vcpu *vcpu)  in vcpu_load()  argument
137 if (mutex_lock_killable(&vcpu->mutex)) in vcpu_load()
140 preempt_notifier_register(&vcpu->preempt_notifier); in vcpu_load()
141 kvm_arch_vcpu_load(vcpu, cpu); in vcpu_load()
146 void vcpu_put(struct kvm_vcpu *vcpu) in vcpu_put() argument
149 kvm_arch_vcpu_put(vcpu); in vcpu_put()
150 preempt_notifier_unregister(&vcpu->preempt_notifier); in vcpu_put()
152 mutex_unlock(&vcpu->mutex); in vcpu_put()
164 struct kvm_vcpu *vcpu; in kvm_make_all_cpus_request() local
169 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_make_all_cpus_request()
170 kvm_make_request(req, vcpu); in kvm_make_all_cpus_request()
171 cpu = vcpu->cpu; in kvm_make_all_cpus_request()
177 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) in kvm_make_all_cpus_request()
219 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) in kvm_vcpu_init() argument
224 mutex_init(&vcpu->mutex); in kvm_vcpu_init()
225 vcpu->cpu = -1; in kvm_vcpu_init()
226 vcpu->kvm = kvm; in kvm_vcpu_init()
227 vcpu->vcpu_id = id; in kvm_vcpu_init()
228 vcpu->pid = NULL; in kvm_vcpu_init()
229 vcpu->halt_poll_ns = 0; in kvm_vcpu_init()
230 init_waitqueue_head(&vcpu->wq); in kvm_vcpu_init()
231 kvm_async_pf_vcpu_init(vcpu); in kvm_vcpu_init()
233 vcpu->pre_pcpu = -1; in kvm_vcpu_init()
234 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); in kvm_vcpu_init()
241 vcpu->run = page_address(page); in kvm_vcpu_init()
243 kvm_vcpu_set_in_spin_loop(vcpu, false); in kvm_vcpu_init()
244 kvm_vcpu_set_dy_eligible(vcpu, false); in kvm_vcpu_init()
245 vcpu->preempted = false; in kvm_vcpu_init()
247 r = kvm_arch_vcpu_init(vcpu); in kvm_vcpu_init()
253 free_page((unsigned long)vcpu->run); in kvm_vcpu_init()
259 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) in kvm_vcpu_uninit() argument
261 put_pid(vcpu->pid); in kvm_vcpu_uninit()
262 kvm_arch_vcpu_uninit(vcpu); in kvm_vcpu_uninit()
263 free_page((unsigned long)vcpu->run); in kvm_vcpu_uninit()
1163 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_memslot() argument
1165 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); in kvm_vcpu_gfn_to_memslot()
1243 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_hva() argument
1245 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); in kvm_vcpu_gfn_to_hva()
1271 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) in kvm_vcpu_gfn_to_hva_prot() argument
1273 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_gfn_to_hva_prot()
1493 pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_pfn_atomic() argument
1495 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); in kvm_vcpu_gfn_to_pfn_atomic()
1505 pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_pfn() argument
1507 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); in kvm_vcpu_gfn_to_pfn()
1551 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_page() argument
1555 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); in kvm_vcpu_gfn_to_page()
1647 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, in kvm_vcpu_read_guest_page() argument
1650 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_read_guest_page()
1676 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) in kvm_vcpu_read_guest() argument
1684 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); in kvm_vcpu_read_guest()
1724 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, in kvm_vcpu_read_guest_atomic() argument
1728 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_read_guest_atomic()
1760 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_vcpu_write_guest_page() argument
1763 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_write_guest_page()
1790 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, in kvm_vcpu_write_guest() argument
1799 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); in kvm_vcpu_write_guest()
1945 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_mark_page_dirty() argument
1949 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_mark_page_dirty()
1954 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) in grow_halt_poll_ns() argument
1958 old = val = vcpu->halt_poll_ns; in grow_halt_poll_ns()
1968 vcpu->halt_poll_ns = val; in grow_halt_poll_ns()
1969 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); in grow_halt_poll_ns()
1972 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) in shrink_halt_poll_ns() argument
1976 old = val = vcpu->halt_poll_ns; in shrink_halt_poll_ns()
1982 vcpu->halt_poll_ns = val; in shrink_halt_poll_ns()
1983 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); in shrink_halt_poll_ns()
1986 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) in kvm_vcpu_check_block() argument
1988 if (kvm_arch_vcpu_runnable(vcpu)) { in kvm_vcpu_check_block()
1989 kvm_make_request(KVM_REQ_UNHALT, vcpu); in kvm_vcpu_check_block()
1992 if (kvm_cpu_has_pending_timer(vcpu)) in kvm_vcpu_check_block()
2003 void kvm_vcpu_block(struct kvm_vcpu *vcpu) in kvm_vcpu_block() argument
2011 if (vcpu->halt_poll_ns) { in kvm_vcpu_block()
2012 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); in kvm_vcpu_block()
2014 ++vcpu->stat.halt_attempted_poll; in kvm_vcpu_block()
2020 if (kvm_vcpu_check_block(vcpu) < 0) { in kvm_vcpu_block()
2021 ++vcpu->stat.halt_successful_poll; in kvm_vcpu_block()
2028 kvm_arch_vcpu_blocking(vcpu); in kvm_vcpu_block()
2031 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); in kvm_vcpu_block()
2033 if (kvm_vcpu_check_block(vcpu) < 0) in kvm_vcpu_block()
2040 finish_wait(&vcpu->wq, &wait); in kvm_vcpu_block()
2043 kvm_arch_vcpu_unblocking(vcpu); in kvm_vcpu_block()
2048 if (block_ns <= vcpu->halt_poll_ns) in kvm_vcpu_block()
2051 else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns) in kvm_vcpu_block()
2052 shrink_halt_poll_ns(vcpu); in kvm_vcpu_block()
2054 else if (vcpu->halt_poll_ns < halt_poll_ns && in kvm_vcpu_block()
2056 grow_halt_poll_ns(vcpu); in kvm_vcpu_block()
2058 vcpu->halt_poll_ns = 0; in kvm_vcpu_block()
2068 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) in kvm_vcpu_kick() argument
2071 int cpu = vcpu->cpu; in kvm_vcpu_kick()
2074 wqp = kvm_arch_vcpu_wq(vcpu); in kvm_vcpu_kick()
2077 ++vcpu->stat.halt_wakeup; in kvm_vcpu_kick()
2082 if (kvm_arch_vcpu_should_kick(vcpu)) in kvm_vcpu_kick()
2131 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) in kvm_vcpu_eligible_for_directed_yield() argument
2136 eligible = !vcpu->spin_loop.in_spin_loop || in kvm_vcpu_eligible_for_directed_yield()
2137 vcpu->spin_loop.dy_eligible; in kvm_vcpu_eligible_for_directed_yield()
2139 if (vcpu->spin_loop.in_spin_loop) in kvm_vcpu_eligible_for_directed_yield()
2140 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); in kvm_vcpu_eligible_for_directed_yield()
2151 struct kvm_vcpu *vcpu; in kvm_vcpu_on_spin() local
2167 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vcpu_on_spin()
2173 if (!ACCESS_ONCE(vcpu->preempted)) in kvm_vcpu_on_spin()
2175 if (vcpu == me) in kvm_vcpu_on_spin()
2177 if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) in kvm_vcpu_on_spin()
2179 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) in kvm_vcpu_on_spin()
2182 yielded = kvm_vcpu_yield_to(vcpu); in kvm_vcpu_on_spin()
2202 struct kvm_vcpu *vcpu = vma->vm_file->private_data; in kvm_vcpu_fault() local
2206 page = virt_to_page(vcpu->run); in kvm_vcpu_fault()
2209 page = virt_to_page(vcpu->arch.pio_data); in kvm_vcpu_fault()
2213 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); in kvm_vcpu_fault()
2216 return kvm_arch_vcpu_fault(vcpu, vmf); in kvm_vcpu_fault()
2234 struct kvm_vcpu *vcpu = filp->private_data; in kvm_vcpu_release() local
2236 kvm_put_kvm(vcpu->kvm); in kvm_vcpu_release()
2253 static int create_vcpu_fd(struct kvm_vcpu *vcpu) in create_vcpu_fd() argument
2255 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); in create_vcpu_fd()
2264 struct kvm_vcpu *vcpu, *v; in kvm_vm_ioctl_create_vcpu() local
2269 vcpu = kvm_arch_vcpu_create(kvm, id); in kvm_vm_ioctl_create_vcpu()
2270 if (IS_ERR(vcpu)) in kvm_vm_ioctl_create_vcpu()
2271 return PTR_ERR(vcpu); in kvm_vm_ioctl_create_vcpu()
2273 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); in kvm_vm_ioctl_create_vcpu()
2275 r = kvm_arch_vcpu_setup(vcpu); in kvm_vm_ioctl_create_vcpu()
2280 if (!kvm_vcpu_compatible(vcpu)) { in kvm_vm_ioctl_create_vcpu()
2299 r = create_vcpu_fd(vcpu); in kvm_vm_ioctl_create_vcpu()
2305 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; in kvm_vm_ioctl_create_vcpu()
2315 kvm_arch_vcpu_postcreate(vcpu); in kvm_vm_ioctl_create_vcpu()
2321 kvm_arch_vcpu_destroy(vcpu); in kvm_vm_ioctl_create_vcpu()
2325 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) in kvm_vcpu_ioctl_set_sigmask() argument
2329 vcpu->sigset_active = 1; in kvm_vcpu_ioctl_set_sigmask()
2330 vcpu->sigset = *sigset; in kvm_vcpu_ioctl_set_sigmask()
2332 vcpu->sigset_active = 0; in kvm_vcpu_ioctl_set_sigmask()
2339 struct kvm_vcpu *vcpu = filp->private_data; in kvm_vcpu_ioctl() local
2345 if (vcpu->kvm->mm != current->mm) in kvm_vcpu_ioctl()
2361 r = vcpu_load(vcpu); in kvm_vcpu_ioctl()
2369 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { in kvm_vcpu_ioctl()
2371 struct pid *oldpid = vcpu->pid; in kvm_vcpu_ioctl()
2374 rcu_assign_pointer(vcpu->pid, newpid); in kvm_vcpu_ioctl()
2379 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); in kvm_vcpu_ioctl()
2380 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); in kvm_vcpu_ioctl()
2389 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); in kvm_vcpu_ioctl()
2409 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); in kvm_vcpu_ioctl()
2418 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); in kvm_vcpu_ioctl()
2434 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); in kvm_vcpu_ioctl()
2440 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); in kvm_vcpu_ioctl()
2455 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); in kvm_vcpu_ioctl()
2464 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); in kvm_vcpu_ioctl()
2479 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); in kvm_vcpu_ioctl()
2502 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); in kvm_vcpu_ioctl()
2510 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); in kvm_vcpu_ioctl()
2526 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); in kvm_vcpu_ioctl()
2533 vcpu_put(vcpu); in kvm_vcpu_ioctl()
2543 struct kvm_vcpu *vcpu = filp->private_data; in kvm_vcpu_compat_ioctl() local
2547 if (vcpu->kvm->mm != current->mm) in kvm_vcpu_compat_ioctl()
2570 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); in kvm_vcpu_compat_ioctl()
2572 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); in kvm_vcpu_compat_ioctl()
3238 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, in __kvm_io_bus_write() argument
3249 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, in __kvm_io_bus_write()
3259 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_write() argument
3271 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write()
3272 r = __kvm_io_bus_write(vcpu, bus, &range, val); in kvm_io_bus_write()
3277 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, in kvm_io_bus_write_cookie() argument
3288 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write_cookie()
3293 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, in kvm_io_bus_write_cookie()
3301 return __kvm_io_bus_write(vcpu, bus, &range, val); in kvm_io_bus_write_cookie()
3304 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, in __kvm_io_bus_read() argument
3315 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, in __kvm_io_bus_read()
3326 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_read() argument
3338 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_read()
3339 r = __kvm_io_bus_read(vcpu, bus, &range, val); in kvm_io_bus_read()
3426 struct kvm_vcpu *vcpu; in vcpu_stat_get() local
3432 kvm_for_each_vcpu(i, vcpu, kvm) in vcpu_stat_get()
3433 *val += *(u32 *)((void *)vcpu + offset); in vcpu_stat_get()
3508 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); in kvm_sched_in() local
3510 if (vcpu->preempted) in kvm_sched_in()
3511 vcpu->preempted = false; in kvm_sched_in()
3513 kvm_arch_sched_in(vcpu, cpu); in kvm_sched_in()
3515 kvm_arch_vcpu_load(vcpu, cpu); in kvm_sched_in()
3521 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); in kvm_sched_out() local
3524 vcpu->preempted = true; in kvm_sched_out()
3525 kvm_arch_vcpu_put(vcpu); in kvm_sched_out()