Lines Matching refs:vcpu
55 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
69 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) in kvmppc_prepare_to_enter() argument
85 kvmppc_account_exit(vcpu, SIGNAL_EXITS); in kvmppc_prepare_to_enter()
86 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter()
91 vcpu->mode = IN_GUEST_MODE; in kvmppc_prepare_to_enter()
101 if (vcpu->requests) { in kvmppc_prepare_to_enter()
104 trace_kvm_check_requests(vcpu); in kvmppc_prepare_to_enter()
105 r = kvmppc_core_check_requests(vcpu); in kvmppc_prepare_to_enter()
112 if (kvmppc_core_prepare_to_enter(vcpu)) { in kvmppc_prepare_to_enter()
129 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) in kvmppc_swab_shared() argument
131 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; in kvmppc_swab_shared()
149 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) in kvmppc_kvm_pv() argument
151 int nr = kvmppc_get_gpr(vcpu, 11); in kvmppc_kvm_pv()
153 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); in kvmppc_kvm_pv()
154 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); in kvmppc_kvm_pv()
155 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); in kvmppc_kvm_pv()
156 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); in kvmppc_kvm_pv()
159 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { in kvmppc_kvm_pv()
173 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_kvm_pv()
175 if (shared_big_endian != vcpu->arch.shared_big_endian) in kvmppc_kvm_pv()
176 kvmppc_swab_shared(vcpu); in kvmppc_kvm_pv()
177 vcpu->arch.shared_big_endian = shared_big_endian; in kvmppc_kvm_pv()
186 vcpu->arch.disable_kernel_nx = true; in kvmppc_kvm_pv()
187 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvmppc_kvm_pv()
190 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; in kvmppc_kvm_pv()
191 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; in kvmppc_kvm_pv()
198 if ((vcpu->arch.magic_page_pa & 0xf000) != in kvmppc_kvm_pv()
199 ((ulong)vcpu->arch.shared & 0xf000)) { in kvmppc_kvm_pv()
200 void *old_shared = vcpu->arch.shared; in kvmppc_kvm_pv()
201 ulong shared = (ulong)vcpu->arch.shared; in kvmppc_kvm_pv()
205 shared |= vcpu->arch.magic_page_pa & 0xf000; in kvmppc_kvm_pv()
208 vcpu->arch.shared = new_shared; in kvmppc_kvm_pv()
227 kvm_vcpu_block(vcpu); in kvmppc_kvm_pv()
228 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); in kvmppc_kvm_pv()
235 kvmppc_set_gpr(vcpu, 4, r2); in kvmppc_kvm_pv()
241 int kvmppc_sanity_check(struct kvm_vcpu *vcpu) in kvmppc_sanity_check() argument
246 if (!vcpu->arch.pvr) in kvmppc_sanity_check()
250 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) in kvmppc_sanity_check()
254 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_sanity_check()
265 vcpu->arch.sane = r; in kvmppc_sanity_check()
270 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio() argument
275 er = kvmppc_emulate_loadstore(vcpu); in kvmppc_emulate_mmio()
297 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); in kvmppc_emulate_mmio()
312 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_st() argument
315 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_st()
319 vcpu->stat.st++; in kvmppc_st()
321 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_st()
332 if (kvmppc_supports_magic_page(vcpu) && mp_pa && in kvmppc_st()
334 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_st()
335 void *magic = vcpu->arch.shared; in kvmppc_st()
341 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) in kvmppc_st()
348 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_ld() argument
351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_ld()
355 vcpu->stat.ld++; in kvmppc_ld()
357 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_ld()
371 if (kvmppc_supports_magic_page(vcpu) && mp_pa && in kvmppc_ld()
373 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_ld()
374 void *magic = vcpu->arch.shared; in kvmppc_ld()
380 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) in kvmppc_ld()
438 struct kvm_vcpu *vcpu; in kvm_arch_destroy_vm() local
440 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_arch_destroy_vm()
441 kvm_arch_vcpu_free(vcpu); in kvm_arch_destroy_vm()
624 struct kvm_vcpu *vcpu; in kvm_arch_vcpu_create() local
625 vcpu = kvmppc_core_vcpu_create(kvm, id); in kvm_arch_vcpu_create()
626 if (!IS_ERR(vcpu)) { in kvm_arch_vcpu_create()
627 vcpu->arch.wqp = &vcpu->wq; in kvm_arch_vcpu_create()
628 kvmppc_create_vcpu_debugfs(vcpu, id); in kvm_arch_vcpu_create()
630 return vcpu; in kvm_arch_vcpu_create()
633 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
637 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_free() argument
640 hrtimer_cancel(&vcpu->arch.dec_timer); in kvm_arch_vcpu_free()
642 kvmppc_remove_vcpu_debugfs(vcpu); in kvm_arch_vcpu_free()
644 switch (vcpu->arch.irq_type) { in kvm_arch_vcpu_free()
646 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); in kvm_arch_vcpu_free()
649 kvmppc_xics_free_icp(vcpu); in kvm_arch_vcpu_free()
653 kvmppc_core_vcpu_free(vcpu); in kvm_arch_vcpu_free()
656 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
658 kvm_arch_vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
661 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
663 return kvmppc_core_pending_dec(vcpu); in kvm_cpu_has_pending_timer()
668 struct kvm_vcpu *vcpu; in kvmppc_decrementer_wakeup() local
670 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); in kvmppc_decrementer_wakeup()
671 kvmppc_decrementer_func(vcpu); in kvmppc_decrementer_wakeup()
676 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_init() argument
680 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); in kvm_arch_vcpu_init()
681 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; in kvm_arch_vcpu_init()
682 vcpu->arch.dec_expires = ~(u64)0; in kvm_arch_vcpu_init()
685 mutex_init(&vcpu->arch.exit_timing_lock); in kvm_arch_vcpu_init()
687 ret = kvmppc_subarch_vcpu_init(vcpu); in kvm_arch_vcpu_init()
691 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_uninit() argument
693 kvmppc_mmu_destroy(vcpu); in kvm_arch_vcpu_uninit()
694 kvmppc_subarch_vcpu_uninit(vcpu); in kvm_arch_vcpu_uninit()
697 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
707 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvm_arch_vcpu_load()
709 kvmppc_core_vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
712 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
714 kvmppc_core_vcpu_put(vcpu); in kvm_arch_vcpu_put()
716 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvm_arch_vcpu_put()
720 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, in kvmppc_complete_mmio_load() argument
730 if (!vcpu->arch.mmio_host_swabbed) { in kvmppc_complete_mmio_load()
746 if (vcpu->arch.mmio_sign_extend) { in kvmppc_complete_mmio_load()
762 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load()
764 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { in kvmppc_complete_mmio_load()
766 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load()
769 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
773 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
776 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; in kvmppc_complete_mmio_load()
777 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
785 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_handle_load() argument
793 if (kvmppc_need_byteswap(vcpu)) { in kvmppc_handle_load()
804 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_load()
808 vcpu->arch.io_gpr = rt; in kvmppc_handle_load()
809 vcpu->arch.mmio_host_swabbed = host_swabbed; in kvmppc_handle_load()
810 vcpu->mmio_needed = 1; in kvmppc_handle_load()
811 vcpu->mmio_is_write = 0; in kvmppc_handle_load()
812 vcpu->arch.mmio_sign_extend = 0; in kvmppc_handle_load()
814 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_load()
816 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in kvmppc_handle_load()
819 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_load()
822 kvmppc_complete_mmio_load(vcpu, run); in kvmppc_handle_load()
823 vcpu->mmio_needed = 0; in kvmppc_handle_load()
832 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_handle_loads() argument
838 vcpu->arch.mmio_sign_extend = 1; in kvmppc_handle_loads()
839 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian); in kvmppc_handle_loads()
844 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_handle_store() argument
852 if (kvmppc_need_byteswap(vcpu)) { in kvmppc_handle_store()
863 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_store()
866 vcpu->mmio_needed = 1; in kvmppc_handle_store()
867 vcpu->mmio_is_write = 1; in kvmppc_handle_store()
886 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_store()
888 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in kvmppc_handle_store()
891 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_store()
894 vcpu->mmio_needed = 0; in kvmppc_handle_store()
902 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) in kvm_vcpu_ioctl_get_one_reg() argument
912 r = kvmppc_get_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_get_one_reg()
922 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; in kvm_vcpu_ioctl_get_one_reg()
929 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); in kvm_vcpu_ioctl_get_one_reg()
932 val = get_reg_val(reg->id, vcpu->arch.vrsave); in kvm_vcpu_ioctl_get_one_reg()
950 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) in kvm_vcpu_ioctl_set_one_reg() argument
963 r = kvmppc_set_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_set_one_reg()
973 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; in kvm_vcpu_ioctl_set_one_reg()
980 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
987 vcpu->arch.vrsave = set_reg_val(reg->id, val); in kvm_vcpu_ioctl_set_one_reg()
999 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_arch_vcpu_ioctl_run() argument
1004 if (vcpu->sigset_active) in kvm_arch_vcpu_ioctl_run()
1005 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); in kvm_arch_vcpu_ioctl_run()
1007 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1008 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1009 kvmppc_complete_mmio_load(vcpu, run); in kvm_arch_vcpu_ioctl_run()
1010 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1011 } else if (vcpu->arch.osi_needed) { in kvm_arch_vcpu_ioctl_run()
1016 kvmppc_set_gpr(vcpu, i, gprs[i]); in kvm_arch_vcpu_ioctl_run()
1017 vcpu->arch.osi_needed = 0; in kvm_arch_vcpu_ioctl_run()
1018 } else if (vcpu->arch.hcall_needed) { in kvm_arch_vcpu_ioctl_run()
1021 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); in kvm_arch_vcpu_ioctl_run()
1023 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); in kvm_arch_vcpu_ioctl_run()
1024 vcpu->arch.hcall_needed = 0; in kvm_arch_vcpu_ioctl_run()
1026 } else if (vcpu->arch.epr_needed) { in kvm_arch_vcpu_ioctl_run()
1027 kvmppc_set_epr(vcpu, run->epr.epr); in kvm_arch_vcpu_ioctl_run()
1028 vcpu->arch.epr_needed = 0; in kvm_arch_vcpu_ioctl_run()
1032 r = kvmppc_vcpu_run(run, vcpu); in kvm_arch_vcpu_ioctl_run()
1034 if (vcpu->sigset_active) in kvm_arch_vcpu_ioctl_run()
1040 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) in kvm_vcpu_ioctl_interrupt() argument
1043 kvmppc_core_dequeue_external(vcpu); in kvm_vcpu_ioctl_interrupt()
1047 kvmppc_core_queue_external(vcpu, irq); in kvm_vcpu_ioctl_interrupt()
1049 kvm_vcpu_kick(vcpu); in kvm_vcpu_ioctl_interrupt()
1054 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
1065 vcpu->arch.osi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1069 vcpu->arch.papr_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1074 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1076 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1081 vcpu->arch.watchdog_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1093 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); in kvm_vcpu_ioctl_enable_cap()
1110 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1129 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1141 r = kvmppc_sanity_check(vcpu); in kvm_vcpu_ioctl_enable_cap()
1146 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
1152 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
1161 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
1171 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_ioctl()
1181 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
1193 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
1195 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); in kvm_arch_vcpu_ioctl()
1205 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); in kvm_arch_vcpu_ioctl()
1217 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument