Lines Matching refs:vcpu
64 static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) in kvm_arm_set_running_vcpu() argument
67 __this_cpu_write(kvm_arm_running_vcpu, vcpu); in kvm_arm_set_running_vcpu()
93 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
95 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
143 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
216 struct kvm_vcpu *vcpu; in kvm_arch_vcpu_create() local
228 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); in kvm_arch_vcpu_create()
229 if (!vcpu) { in kvm_arch_vcpu_create()
234 err = kvm_vcpu_init(vcpu, kvm, id); in kvm_arch_vcpu_create()
238 err = create_hyp_mappings(vcpu, vcpu + 1); in kvm_arch_vcpu_create()
242 return vcpu; in kvm_arch_vcpu_create()
244 kvm_vcpu_uninit(vcpu); in kvm_arch_vcpu_create()
246 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvm_arch_vcpu_create()
251 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
255 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_free() argument
257 kvm_mmu_free_memory_caches(vcpu); in kvm_arch_vcpu_free()
258 kvm_timer_vcpu_terminate(vcpu); in kvm_arch_vcpu_free()
259 kvm_vgic_vcpu_destroy(vcpu); in kvm_arch_vcpu_free()
260 kmem_cache_free(kvm_vcpu_cache, vcpu); in kvm_arch_vcpu_free()
263 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
265 kvm_arch_vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
268 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
270 return kvm_timer_should_fire(vcpu); in kvm_cpu_has_pending_timer()
273 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_init() argument
276 vcpu->arch.target = -1; in kvm_arch_vcpu_init()
277 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); in kvm_arch_vcpu_init()
280 kvm_timer_vcpu_init(vcpu); in kvm_arch_vcpu_init()
285 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
287 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
288 vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); in kvm_arch_vcpu_load()
290 kvm_arm_set_running_vcpu(vcpu); in kvm_arch_vcpu_load()
293 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
300 vcpu->cpu = -1; in kvm_arch_vcpu_put()
305 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
312 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
315 if (vcpu->arch.pause) in kvm_arch_vcpu_ioctl_get_mpstate()
323 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
328 vcpu->arch.pause = false; in kvm_arch_vcpu_ioctl_set_mpstate()
331 vcpu->arch.pause = true; in kvm_arch_vcpu_ioctl_set_mpstate()
439 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) in kvm_vcpu_first_run_init() argument
441 struct kvm *kvm = vcpu->kvm; in kvm_vcpu_first_run_init()
444 if (likely(vcpu->arch.has_run_once)) in kvm_vcpu_first_run_init()
447 vcpu->arch.has_run_once = true; in kvm_vcpu_first_run_init()
475 static void vcpu_pause(struct kvm_vcpu *vcpu) in vcpu_pause() argument
477 wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); in vcpu_pause()
479 wait_event_interruptible(*wq, !vcpu->arch.pause); in vcpu_pause()
482 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) in kvm_vcpu_initialized() argument
484 return vcpu->arch.target >= 0; in kvm_vcpu_initialized()
498 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_arch_vcpu_ioctl_run() argument
503 if (unlikely(!kvm_vcpu_initialized(vcpu))) in kvm_arch_vcpu_ioctl_run()
506 ret = kvm_vcpu_first_run_init(vcpu); in kvm_arch_vcpu_ioctl_run()
511 ret = kvm_handle_mmio_return(vcpu, vcpu->run); in kvm_arch_vcpu_ioctl_run()
516 if (vcpu->sigset_active) in kvm_arch_vcpu_ioctl_run()
517 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); in kvm_arch_vcpu_ioctl_run()
527 update_vttbr(vcpu->kvm); in kvm_arch_vcpu_ioctl_run()
529 if (vcpu->arch.pause) in kvm_arch_vcpu_ioctl_run()
530 vcpu_pause(vcpu); in kvm_arch_vcpu_ioctl_run()
532 kvm_vgic_flush_hwstate(vcpu); in kvm_arch_vcpu_ioctl_run()
533 kvm_timer_flush_hwstate(vcpu); in kvm_arch_vcpu_ioctl_run()
545 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
547 kvm_timer_sync_hwstate(vcpu); in kvm_arch_vcpu_ioctl_run()
548 kvm_vgic_sync_hwstate(vcpu); in kvm_arch_vcpu_ioctl_run()
555 trace_kvm_entry(*vcpu_pc(vcpu)); in kvm_arch_vcpu_ioctl_run()
557 vcpu->mode = IN_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
559 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); in kvm_arch_vcpu_ioctl_run()
561 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
563 trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); in kvm_arch_vcpu_ioctl_run()
580 kvm_timer_sync_hwstate(vcpu); in kvm_arch_vcpu_ioctl_run()
581 kvm_vgic_sync_hwstate(vcpu); in kvm_arch_vcpu_ioctl_run()
583 ret = handle_exit(vcpu, run, ret); in kvm_arch_vcpu_ioctl_run()
586 if (vcpu->sigset_active) in kvm_arch_vcpu_ioctl_run()
591 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) in vcpu_interrupt_line() argument
602 ptr = (unsigned long *)&vcpu->arch.irq_lines; in vcpu_interrupt_line()
619 kvm_vcpu_kick(vcpu); in vcpu_interrupt_line()
630 struct kvm_vcpu *vcpu = NULL; in kvm_vm_ioctl_irq_line() local
647 vcpu = kvm_get_vcpu(kvm, vcpu_idx); in kvm_vm_ioctl_irq_line()
648 if (!vcpu) in kvm_vm_ioctl_irq_line()
654 return vcpu_interrupt_line(vcpu, irq_num, level); in kvm_vm_ioctl_irq_line()
662 vcpu = kvm_get_vcpu(kvm, vcpu_idx); in kvm_vm_ioctl_irq_line()
663 if (!vcpu) in kvm_vm_ioctl_irq_line()
669 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level); in kvm_vm_ioctl_irq_line()
683 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, in kvm_vcpu_set_target() argument
696 if (vcpu->arch.target != -1 && vcpu->arch.target != init->target) in kvm_vcpu_set_target()
710 if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES && in kvm_vcpu_set_target()
711 test_bit(i, vcpu->arch.features) != set) in kvm_vcpu_set_target()
715 set_bit(i, vcpu->arch.features); in kvm_vcpu_set_target()
718 vcpu->arch.target = phys_target; in kvm_vcpu_set_target()
721 return kvm_reset_vcpu(vcpu); in kvm_vcpu_set_target()
725 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_vcpu_init() argument
730 ret = kvm_vcpu_set_target(vcpu, init); in kvm_arch_vcpu_ioctl_vcpu_init()
738 if (vcpu->arch.has_run_once) in kvm_arch_vcpu_ioctl_vcpu_init()
739 stage2_unmap_vm(vcpu->kvm); in kvm_arch_vcpu_ioctl_vcpu_init()
741 vcpu_reset_hcr(vcpu); in kvm_arch_vcpu_ioctl_vcpu_init()
746 if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) in kvm_arch_vcpu_ioctl_vcpu_init()
747 vcpu->arch.pause = true; in kvm_arch_vcpu_ioctl_vcpu_init()
749 vcpu->arch.pause = false; in kvm_arch_vcpu_ioctl_vcpu_init()
757 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
767 return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init); in kvm_arch_vcpu_ioctl()
773 if (unlikely(!kvm_vcpu_initialized(vcpu))) in kvm_arch_vcpu_ioctl()
779 return kvm_arm_set_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
781 return kvm_arm_get_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
788 if (unlikely(!kvm_vcpu_initialized(vcpu))) in kvm_arch_vcpu_ioctl()
794 reg_list.n = kvm_arm_num_regs(vcpu); in kvm_arch_vcpu_ioctl()
799 return kvm_arm_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
1087 struct kvm_vcpu *vcpu; in kvm_mpidr_to_vcpu() local
1091 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_mpidr_to_vcpu()
1092 if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu)) in kvm_mpidr_to_vcpu()
1093 return vcpu; in kvm_mpidr_to_vcpu()