Lines Matching refs:vcpu

64 static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)  in kvm_arm_set_running_vcpu()  argument
67 __this_cpu_write(kvm_arm_running_vcpu, vcpu); in kvm_arm_set_running_vcpu()
93 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
95 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
144 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
216 struct kvm_vcpu *vcpu; in kvm_arch_vcpu_create() local
228 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); in kvm_arch_vcpu_create()
229 if (!vcpu) { in kvm_arch_vcpu_create()
234 err = kvm_vcpu_init(vcpu, kvm, id); in kvm_arch_vcpu_create()
238 err = create_hyp_mappings(vcpu, vcpu + 1); in kvm_arch_vcpu_create()
242 return vcpu; in kvm_arch_vcpu_create()
244 kvm_vcpu_uninit(vcpu); in kvm_arch_vcpu_create()
246 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvm_arch_vcpu_create()
251 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
253 kvm_vgic_vcpu_early_init(vcpu); in kvm_arch_vcpu_postcreate()
256 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_free() argument
258 kvm_mmu_free_memory_caches(vcpu); in kvm_arch_vcpu_free()
259 kvm_timer_vcpu_terminate(vcpu); in kvm_arch_vcpu_free()
260 kvm_vgic_vcpu_destroy(vcpu); in kvm_arch_vcpu_free()
261 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvm_arch_vcpu_free()
264 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
266 kvm_arch_vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
269 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
271 return kvm_timer_should_fire(vcpu); in kvm_cpu_has_pending_timer()
274 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_blocking() argument
276 kvm_timer_schedule(vcpu); in kvm_arch_vcpu_blocking()
279 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_unblocking() argument
281 kvm_timer_unschedule(vcpu); in kvm_arch_vcpu_unblocking()
284 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_init() argument
287 vcpu->arch.target = -1; in kvm_arch_vcpu_init()
288 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); in kvm_arch_vcpu_init()
291 kvm_timer_vcpu_init(vcpu); in kvm_arch_vcpu_init()
293 kvm_arm_reset_debug_ptr(vcpu); in kvm_arch_vcpu_init()
298 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
300 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
301 vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); in kvm_arch_vcpu_load()
303 kvm_arm_set_running_vcpu(vcpu); in kvm_arch_vcpu_load()
306 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
313 vcpu->cpu = -1; in kvm_arch_vcpu_put()
318 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
321 if (vcpu->arch.power_off) in kvm_arch_vcpu_ioctl_get_mpstate()
329 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
334 vcpu->arch.power_off = false; in kvm_arch_vcpu_ioctl_set_mpstate()
337 vcpu->arch.power_off = true; in kvm_arch_vcpu_ioctl_set_mpstate()
446 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) in kvm_vcpu_first_run_init() argument
448 struct kvm *kvm = vcpu->kvm; in kvm_vcpu_first_run_init()
451 if (likely(vcpu->arch.has_run_once)) in kvm_vcpu_first_run_init()
454 vcpu->arch.has_run_once = true; in kvm_vcpu_first_run_init()
488 struct kvm_vcpu *vcpu; in kvm_arm_halt_guest() local
490 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_arm_halt_guest()
491 vcpu->arch.pause = true; in kvm_arm_halt_guest()
498 struct kvm_vcpu *vcpu; in kvm_arm_resume_guest() local
500 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arm_resume_guest()
501 wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); in kvm_arm_resume_guest()
503 vcpu->arch.pause = false; in kvm_arm_resume_guest()
508 static void vcpu_sleep(struct kvm_vcpu *vcpu) in vcpu_sleep() argument
510 wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); in vcpu_sleep()
512 wait_event_interruptible(*wq, ((!vcpu->arch.power_off) && in vcpu_sleep()
513 (!vcpu->arch.pause))); in vcpu_sleep()
516 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) in kvm_vcpu_initialized() argument
518 return vcpu->arch.target >= 0; in kvm_vcpu_initialized()
532 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_arch_vcpu_ioctl_run() argument
537 if (unlikely(!kvm_vcpu_initialized(vcpu))) in kvm_arch_vcpu_ioctl_run()
540 ret = kvm_vcpu_first_run_init(vcpu); in kvm_arch_vcpu_ioctl_run()
545 ret = kvm_handle_mmio_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
550 if (vcpu->sigset_active) in kvm_arch_vcpu_ioctl_run()
551 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); in kvm_arch_vcpu_ioctl_run()
561 update_vttbr(vcpu->kvm); in kvm_arch_vcpu_ioctl_run()
563 if (vcpu->arch.power_off || vcpu->arch.pause) in kvm_arch_vcpu_ioctl_run()
564 vcpu_sleep(vcpu); in kvm_arch_vcpu_ioctl_run()
572 kvm_timer_flush_hwstate(vcpu); in kvm_arch_vcpu_ioctl_run()
573 kvm_vgic_flush_hwstate(vcpu); in kvm_arch_vcpu_ioctl_run()
585 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) || in kvm_arch_vcpu_ioctl_run()
586 vcpu->arch.power_off || vcpu->arch.pause) { in kvm_arch_vcpu_ioctl_run()
588 kvm_timer_sync_hwstate(vcpu); in kvm_arch_vcpu_ioctl_run()
589 kvm_vgic_sync_hwstate(vcpu); in kvm_arch_vcpu_ioctl_run()
594 kvm_arm_setup_debug(vcpu); in kvm_arch_vcpu_ioctl_run()
599 trace_kvm_entry(*vcpu_pc(vcpu)); in kvm_arch_vcpu_ioctl_run()
601 vcpu->mode = IN_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
603 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); in kvm_arch_vcpu_ioctl_run()
605 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
610 kvm_arm_clear_debug(vcpu); in kvm_arch_vcpu_ioctl_run()
633 trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); in kvm_arch_vcpu_ioctl_run()
640 kvm_timer_sync_hwstate(vcpu); in kvm_arch_vcpu_ioctl_run()
642 kvm_vgic_sync_hwstate(vcpu); in kvm_arch_vcpu_ioctl_run()
646 ret = handle_exit(vcpu, run, ret); in kvm_arch_vcpu_ioctl_run()
649 if (vcpu->sigset_active) in kvm_arch_vcpu_ioctl_run()
654 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) in vcpu_interrupt_line() argument
665 ptr = (unsigned long *)&vcpu->arch.irq_lines; in vcpu_interrupt_line()
682 kvm_vcpu_kick(vcpu); in vcpu_interrupt_line()
693 struct kvm_vcpu *vcpu = NULL; in kvm_vm_ioctl_irq_line() local
710 vcpu = kvm_get_vcpu(kvm, vcpu_idx); in kvm_vm_ioctl_irq_line()
711 if (!vcpu) in kvm_vm_ioctl_irq_line()
717 return vcpu_interrupt_line(vcpu, irq_num, level); in kvm_vm_ioctl_irq_line()
725 vcpu = kvm_get_vcpu(kvm, vcpu_idx); in kvm_vm_ioctl_irq_line()
726 if (!vcpu) in kvm_vm_ioctl_irq_line()
732 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level); in kvm_vm_ioctl_irq_line()
746 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, in kvm_vcpu_set_target() argument
759 if (vcpu->arch.target != -1 && vcpu->arch.target != init->target) in kvm_vcpu_set_target()
773 if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES && in kvm_vcpu_set_target()
774 test_bit(i, vcpu->arch.features) != set) in kvm_vcpu_set_target()
778 set_bit(i, vcpu->arch.features); in kvm_vcpu_set_target()
781 vcpu->arch.target = phys_target; in kvm_vcpu_set_target()
784 return kvm_reset_vcpu(vcpu); in kvm_vcpu_set_target()
788 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_vcpu_init() argument
793 ret = kvm_vcpu_set_target(vcpu, init); in kvm_arch_vcpu_ioctl_vcpu_init()
801 if (vcpu->arch.has_run_once) in kvm_arch_vcpu_ioctl_vcpu_init()
802 stage2_unmap_vm(vcpu->kvm); in kvm_arch_vcpu_ioctl_vcpu_init()
804 vcpu_reset_hcr(vcpu); in kvm_arch_vcpu_ioctl_vcpu_init()
809 if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) in kvm_arch_vcpu_ioctl_vcpu_init()
810 vcpu->arch.power_off = true; in kvm_arch_vcpu_ioctl_vcpu_init()
812 vcpu->arch.power_off = false; in kvm_arch_vcpu_ioctl_vcpu_init()
820 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
830 return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init); in kvm_arch_vcpu_ioctl()
836 if (unlikely(!kvm_vcpu_initialized(vcpu))) in kvm_arch_vcpu_ioctl()
842 return kvm_arm_set_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
844 return kvm_arm_get_reg(vcpu, &reg); in kvm_arch_vcpu_ioctl()
851 if (unlikely(!kvm_vcpu_initialized(vcpu))) in kvm_arch_vcpu_ioctl()
857 reg_list.n = kvm_arm_num_regs(vcpu); in kvm_arch_vcpu_ioctl()
862 return kvm_arm_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
1152 struct kvm_vcpu *vcpu; in kvm_mpidr_to_vcpu() local
1156 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_mpidr_to_vcpu()
1157 if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu)) in kvm_mpidr_to_vcpu()
1158 return vcpu; in kvm_mpidr_to_vcpu()