Lines Matching refs:vcpu
78 static bool access_dcsw(struct kvm_vcpu *vcpu, in access_dcsw() argument
83 return read_from_write_only(vcpu, p); in access_dcsw()
85 kvm_set_way_flush(vcpu); in access_dcsw()
94 static bool access_vm_reg(struct kvm_vcpu *vcpu, in access_vm_reg() argument
99 bool was_enabled = vcpu_has_cache_enabled(vcpu); in access_vm_reg()
103 val = *vcpu_reg(vcpu, p->Rt); in access_vm_reg()
105 vcpu_sys_reg(vcpu, r->reg) = val; in access_vm_reg()
108 vcpu_cp15_64_high(vcpu, r->reg) = val >> 32; in access_vm_reg()
109 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; in access_vm_reg()
112 kvm_toggle_cache(vcpu, was_enabled); in access_vm_reg()
122 static bool access_gic_sgi(struct kvm_vcpu *vcpu, in access_gic_sgi() argument
129 return read_from_write_only(vcpu, p); in access_gic_sgi()
131 val = *vcpu_reg(vcpu, p->Rt); in access_gic_sgi()
132 vgic_v3_dispatch_sgi(vcpu, val); in access_gic_sgi()
137 static bool trap_raz_wi(struct kvm_vcpu *vcpu, in trap_raz_wi() argument
142 return ignore_write(vcpu, p); in trap_raz_wi()
144 return read_zero(vcpu, p); in trap_raz_wi()
147 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, in trap_oslsr_el1() argument
152 return ignore_write(vcpu, p); in trap_oslsr_el1()
154 *vcpu_reg(vcpu, p->Rt) = (1 << 3); in trap_oslsr_el1()
159 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, in trap_dbgauthstatus_el1() argument
164 return ignore_write(vcpu, p); in trap_dbgauthstatus_el1()
168 *vcpu_reg(vcpu, p->Rt) = val; in trap_dbgauthstatus_el1()
200 static bool trap_debug_regs(struct kvm_vcpu *vcpu, in trap_debug_regs() argument
205 vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); in trap_debug_regs()
206 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; in trap_debug_regs()
208 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg); in trap_debug_regs()
214 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) in reset_amair_el1() argument
219 vcpu_sys_reg(vcpu, AMAIR_EL1) = amair; in reset_amair_el1()
222 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) in reset_mpidr() argument
233 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); in reset_mpidr()
234 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); in reset_mpidr()
235 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); in reset_mpidr()
236 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr; in reset_mpidr()
486 static bool trap_dbgidr(struct kvm_vcpu *vcpu, in trap_dbgidr() argument
491 return ignore_write(vcpu, p); in trap_dbgidr()
497 *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) | in trap_dbgidr()
505 static bool trap_debug32(struct kvm_vcpu *vcpu, in trap_debug32() argument
510 vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); in trap_debug32()
511 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; in trap_debug32()
513 *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg); in trap_debug32()
745 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_cp14_load_store() argument
747 kvm_inject_undefined(vcpu); in kvm_handle_cp14_load_store()
761 static int emulate_cp(struct kvm_vcpu *vcpu, in emulate_cp() argument
782 if (likely(r->access(vcpu, params, r))) { in emulate_cp()
784 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); in emulate_cp()
795 static void unhandled_cp_access(struct kvm_vcpu *vcpu, in unhandled_cp_access() argument
798 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); in unhandled_cp_access()
815 cp, *vcpu_pc(vcpu)); in unhandled_cp_access()
817 kvm_inject_undefined(vcpu); in unhandled_cp_access()
825 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, in kvm_handle_cp_64() argument
832 u32 hsr = kvm_vcpu_get_hsr(vcpu); in kvm_handle_cp_64()
852 u64 val = *vcpu_reg(vcpu, params.Rt); in kvm_handle_cp_64()
854 val |= *vcpu_reg(vcpu, Rt2) << 32; in kvm_handle_cp_64()
855 *vcpu_reg(vcpu, params.Rt) = val; in kvm_handle_cp_64()
858 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) in kvm_handle_cp_64()
860 if (!emulate_cp(vcpu, ¶ms, global, nr_global)) in kvm_handle_cp_64()
863 unhandled_cp_access(vcpu, ¶ms); in kvm_handle_cp_64()
868 u64 val = *vcpu_reg(vcpu, params.Rt); in kvm_handle_cp_64()
870 *vcpu_reg(vcpu, Rt2) = val; in kvm_handle_cp_64()
881 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, in kvm_handle_cp_32() argument
888 u32 hsr = kvm_vcpu_get_hsr(vcpu); in kvm_handle_cp_32()
900 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) in kvm_handle_cp_32()
902 if (!emulate_cp(vcpu, ¶ms, global, nr_global)) in kvm_handle_cp_32()
905 unhandled_cp_access(vcpu, ¶ms); in kvm_handle_cp_32()
909 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_cp15_64() argument
914 target_specific = get_target_table(vcpu->arch.target, false, &num); in kvm_handle_cp15_64()
915 return kvm_handle_cp_64(vcpu, in kvm_handle_cp15_64()
920 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_cp15_32() argument
925 target_specific = get_target_table(vcpu->arch.target, false, &num); in kvm_handle_cp15_32()
926 return kvm_handle_cp_32(vcpu, in kvm_handle_cp15_32()
931 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_cp14_64() argument
933 return kvm_handle_cp_64(vcpu, in kvm_handle_cp14_64()
938 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_cp14_32() argument
940 return kvm_handle_cp_32(vcpu, in kvm_handle_cp14_32()
945 static int emulate_sys_reg(struct kvm_vcpu *vcpu, in emulate_sys_reg() argument
951 table = get_target_table(vcpu->arch.target, true, &num); in emulate_sys_reg()
967 if (likely(r->access(vcpu, params, r))) { in emulate_sys_reg()
969 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); in emulate_sys_reg()
975 *vcpu_pc(vcpu)); in emulate_sys_reg()
978 kvm_inject_undefined(vcpu); in emulate_sys_reg()
982 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, in reset_sys_reg_descs() argument
989 table[i].reset(vcpu, &table[i]); in reset_sys_reg_descs()
997 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_sys_reg() argument
1000 unsigned long esr = kvm_vcpu_get_hsr(vcpu); in kvm_handle_sys_reg()
1012 return emulate_sys_reg(vcpu, ¶ms); in kvm_handle_sys_reg()
1049 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, in index_to_sys_reg_desc() argument
1063 table = get_target_table(vcpu->arch.target, true, &num); in index_to_sys_reg_desc()
1291 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in kvm_arm_sys_reg_get_reg() argument
1302 r = index_to_sys_reg_desc(vcpu, reg->id); in kvm_arm_sys_reg_get_reg()
1306 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); in kvm_arm_sys_reg_get_reg()
1309 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in kvm_arm_sys_reg_set_reg() argument
1320 r = index_to_sys_reg_desc(vcpu, reg->id); in kvm_arm_sys_reg_set_reg()
1324 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); in kvm_arm_sys_reg_set_reg()
1378 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) in walk_sys_regs() argument
1385 i1 = get_target_table(vcpu->arch.target, true, &num); in walk_sys_regs()
1420 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) in kvm_arm_num_sys_reg_descs() argument
1424 + walk_sys_regs(vcpu, (u64 __user *)NULL); in kvm_arm_num_sys_reg_descs()
1427 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) in kvm_arm_copy_sys_reg_indices() argument
1439 err = walk_sys_regs(vcpu, uindices); in kvm_arm_copy_sys_reg_indices()
1504 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) in kvm_reset_sys_regs() argument
1510 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); in kvm_reset_sys_regs()
1513 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); in kvm_reset_sys_regs()
1515 table = get_target_table(vcpu->arch.target, true, &num); in kvm_reset_sys_regs()
1516 reset_sys_reg_descs(vcpu, table, num); in kvm_reset_sys_regs()
1519 if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242) in kvm_reset_sys_regs()