root/arch/arm/include/asm/kvm_emulate.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. vcpu_reg32
  2. vpcu_read_spsr
  3. vcpu_write_spsr
  4. host_spsr_to_spsr32
  5. vcpu_get_reg
  6. vcpu_set_reg
  7. kvm_inject_undefined
  8. kvm_inject_dabt
  9. kvm_inject_pabt
  10. kvm_condition_valid
  11. kvm_skip_instr
  12. vcpu_reset_hcr
  13. vcpu_hcr
  14. vcpu_clear_wfe_traps
  15. vcpu_set_wfe_traps
  16. vcpu_mode_is_32bit
  17. vcpu_pc
  18. vcpu_cpsr
  19. vcpu_set_thumb
  20. mode_has_spsr
  21. vcpu_mode_priv
  22. kvm_vcpu_get_hsr
  23. kvm_vcpu_get_condition
  24. kvm_vcpu_get_hfar
  25. kvm_vcpu_get_fault_ipa
  26. kvm_vcpu_dabt_isvalid
  27. kvm_vcpu_dabt_iswrite
  28. kvm_vcpu_dabt_issext
  29. kvm_vcpu_dabt_issf
  30. kvm_vcpu_dabt_get_rd
  31. kvm_vcpu_dabt_iss1tw
  32. kvm_vcpu_dabt_is_cm
  33. kvm_vcpu_dabt_get_as
  34. kvm_vcpu_trap_il_is32bit
  35. kvm_vcpu_trap_get_class
  36. kvm_vcpu_trap_is_iabt
  37. kvm_vcpu_trap_get_fault
  38. kvm_vcpu_trap_get_fault_type
  39. kvm_vcpu_dabt_isextabt
  40. kvm_is_write_fault
  41. kvm_vcpu_hvc_get_imm
  42. kvm_vcpu_get_mpidr_aff
  43. kvm_arm_get_vcpu_workaround_2_flag
  44. kvm_arm_set_vcpu_workaround_2_flag
  45. kvm_vcpu_set_be
  46. kvm_vcpu_is_be
  47. vcpu_data_guest_to_host
  48. vcpu_data_host_to_guest
  49. vcpu_has_ptrauth
  50. vcpu_ptrauth_disable

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   4  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
   5  */
   6 
   7 #ifndef __ARM_KVM_EMULATE_H__
   8 #define __ARM_KVM_EMULATE_H__
   9 
  10 #include <linux/kvm_host.h>
  11 #include <asm/kvm_asm.h>
  12 #include <asm/kvm_mmio.h>
  13 #include <asm/kvm_arm.h>
  14 #include <asm/cputype.h>
  15 
  16 /* arm64 compatibility macros */
  17 #define PSR_AA32_MODE_FIQ       FIQ_MODE
  18 #define PSR_AA32_MODE_SVC       SVC_MODE
  19 #define PSR_AA32_MODE_ABT       ABT_MODE
  20 #define PSR_AA32_MODE_UND       UND_MODE
  21 #define PSR_AA32_T_BIT          PSR_T_BIT
  22 #define PSR_AA32_F_BIT          PSR_F_BIT
  23 #define PSR_AA32_I_BIT          PSR_I_BIT
  24 #define PSR_AA32_A_BIT          PSR_A_BIT
  25 #define PSR_AA32_E_BIT          PSR_E_BIT
  26 #define PSR_AA32_IT_MASK        PSR_IT_MASK
  27 #define PSR_AA32_GE_MASK        0x000f0000
  28 #define PSR_AA32_DIT_BIT        0x00200000
  29 #define PSR_AA32_PAN_BIT        0x00400000
  30 #define PSR_AA32_SSBS_BIT       0x00800000
  31 #define PSR_AA32_Q_BIT          PSR_Q_BIT
  32 #define PSR_AA32_V_BIT          PSR_V_BIT
  33 #define PSR_AA32_C_BIT          PSR_C_BIT
  34 #define PSR_AA32_Z_BIT          PSR_Z_BIT
  35 #define PSR_AA32_N_BIT          PSR_N_BIT
  36 
  37 unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
  38 
  39 static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
  40 {
  41         return vcpu_reg(vcpu, reg_num);
  42 }
  43 
  44 unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);
  45 
  46 static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
  47 {
  48         return *__vcpu_spsr(vcpu);
  49 }
  50 
  51 static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
  52 {
  53         *__vcpu_spsr(vcpu) = v;
  54 }
  55 
  56 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
  57 {
  58         return spsr;
  59 }
  60 
  61 static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
  62                                          u8 reg_num)
  63 {
  64         return *vcpu_reg(vcpu, reg_num);
  65 }
  66 
  67 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
  68                                 unsigned long val)
  69 {
  70         *vcpu_reg(vcpu, reg_num) = val;
  71 }
  72 
  73 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
  74 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
  75 void kvm_inject_undef32(struct kvm_vcpu *vcpu);
  76 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
  77 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
  78 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
  79 
  80 static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu)
  81 {
  82         kvm_inject_undef32(vcpu);
  83 }
  84 
  85 static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
  86 {
  87         kvm_inject_dabt32(vcpu, addr);
  88 }
  89 
  90 static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
  91 {
  92         kvm_inject_pabt32(vcpu, addr);
  93 }
  94 
  95 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
  96 {
  97         return kvm_condition_valid32(vcpu);
  98 }
  99 
 100 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
 101 {
 102         kvm_skip_instr32(vcpu, is_wide_instr);
 103 }
 104 
 105 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 106 {
 107         vcpu->arch.hcr = HCR_GUEST_MASK;
 108 }
 109 
 110 static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
 111 {
 112         return (unsigned long *)&vcpu->arch.hcr;
 113 }
 114 
 115 static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
 116 {
 117         vcpu->arch.hcr &= ~HCR_TWE;
 118 }
 119 
 120 static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
 121 {
 122         vcpu->arch.hcr |= HCR_TWE;
 123 }
 124 
 125 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
 126 {
 127         return true;
 128 }
 129 
 130 static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
 131 {
 132         return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
 133 }
 134 
 135 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
 136 {
 137         return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
 138 }
 139 
 140 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
 141 {
 142         *vcpu_cpsr(vcpu) |= PSR_T_BIT;
 143 }
 144 
 145 static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
 146 {
 147         unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
 148         return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
 149 }
 150 
 151 static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
 152 {
 153         unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
 154         return cpsr_mode > USR_MODE;
 155 }
 156 
 157 static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
 158 {
 159         return vcpu->arch.fault.hsr;
 160 }
 161 
 162 static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
 163 {
 164         u32 hsr = kvm_vcpu_get_hsr(vcpu);
 165 
 166         if (hsr & HSR_CV)
 167                 return (hsr & HSR_COND) >> HSR_COND_SHIFT;
 168 
 169         return -1;
 170 }
 171 
 172 static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
 173 {
 174         return vcpu->arch.fault.hxfar;
 175 }
 176 
 177 static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
 178 {
 179         return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
 180 }
 181 
 182 static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
 183 {
 184         return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
 185 }
 186 
 187 static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
 188 {
 189         return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
 190 }
 191 
 192 static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
 193 {
 194         return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
 195 }
 196 
 197 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
 198 {
 199         return false;
 200 }
 201 
 202 static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
 203 {
 204         return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
 205 }
 206 
 207 static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
 208 {
 209         return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
 210 }
 211 
 212 static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
 213 {
 214         return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
 215 }
 216 
 217 /* Get Access Size from a data abort */
 218 static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
 219 {
 220         switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
 221         case 0:
 222                 return 1;
 223         case 1:
 224                 return 2;
 225         case 2:
 226                 return 4;
 227         default:
 228                 kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
 229                 return -EFAULT;
 230         }
 231 }
 232 
 233 /* This one is not specific to Data Abort */
 234 static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
 235 {
 236         return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
 237 }
 238 
 239 static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
 240 {
 241         return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
 242 }
 243 
 244 static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
 245 {
 246         return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
 247 }
 248 
 249 static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
 250 {
 251         return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
 252 }
 253 
 254 static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
 255 {
 256         return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
 257 }
 258 
 259 static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
 260 {
 261         switch (kvm_vcpu_trap_get_fault(vcpu)) {
 262         case FSC_SEA:
 263         case FSC_SEA_TTW0:
 264         case FSC_SEA_TTW1:
 265         case FSC_SEA_TTW2:
 266         case FSC_SEA_TTW3:
 267         case FSC_SECC:
 268         case FSC_SECC_TTW0:
 269         case FSC_SECC_TTW1:
 270         case FSC_SECC_TTW2:
 271         case FSC_SECC_TTW3:
 272                 return true;
 273         default:
 274                 return false;
 275         }
 276 }
 277 
 278 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
 279 {
 280         if (kvm_vcpu_trap_is_iabt(vcpu))
 281                 return false;
 282 
 283         return kvm_vcpu_dabt_iswrite(vcpu);
 284 }
 285 
 286 static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
 287 {
 288         return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
 289 }
 290 
 291 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
 292 {
 293         return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
 294 }
 295 
 296 static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
 297 {
 298         return false;
 299 }
 300 
 301 static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
 302                                                       bool flag)
 303 {
 304 }
 305 
 306 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
 307 {
 308         *vcpu_cpsr(vcpu) |= PSR_E_BIT;
 309 }
 310 
 311 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
 312 {
 313         return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
 314 }
 315 
 316 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
 317                                                     unsigned long data,
 318                                                     unsigned int len)
 319 {
 320         if (kvm_vcpu_is_be(vcpu)) {
 321                 switch (len) {
 322                 case 1:
 323                         return data & 0xff;
 324                 case 2:
 325                         return be16_to_cpu(data & 0xffff);
 326                 default:
 327                         return be32_to_cpu(data);
 328                 }
 329         } else {
 330                 switch (len) {
 331                 case 1:
 332                         return data & 0xff;
 333                 case 2:
 334                         return le16_to_cpu(data & 0xffff);
 335                 default:
 336                         return le32_to_cpu(data);
 337                 }
 338         }
 339 }
 340 
 341 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
 342                                                     unsigned long data,
 343                                                     unsigned int len)
 344 {
 345         if (kvm_vcpu_is_be(vcpu)) {
 346                 switch (len) {
 347                 case 1:
 348                         return data & 0xff;
 349                 case 2:
 350                         return cpu_to_be16(data & 0xffff);
 351                 default:
 352                         return cpu_to_be32(data);
 353                 }
 354         } else {
 355                 switch (len) {
 356                 case 1:
 357                         return data & 0xff;
 358                 case 2:
 359                         return cpu_to_le16(data & 0xffff);
 360                 default:
 361                         return cpu_to_le32(data);
 362                 }
 363         }
 364 }
 365 
 366 static inline bool vcpu_has_ptrauth(struct kvm_vcpu *vcpu) { return false; }
 367 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) { }
 368 
 369 #endif /* __ARM_KVM_EMULATE_H__ */

/* [<][>][^][v][top][bottom][index][help] */