root/arch/arm64/kvm/regmap.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vcpu_reg32
  2. vcpu_spsr32_mode
  3. vcpu_read_spsr32
  4. vcpu_write_spsr32

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2012,2013 - ARM Ltd
   4  * Author: Marc Zyngier <marc.zyngier@arm.com>
   5  *
   6  * Derived from arch/arm/kvm/emulate.c:
   7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
   9  */
  10 
  11 #include <linux/mm.h>
  12 #include <linux/kvm_host.h>
  13 #include <asm/kvm_emulate.h>
  14 #include <asm/ptrace.h>
  15 
  16 #define VCPU_NR_MODES 6
  17 #define REG_OFFSET(_reg) \
  18         (offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long))
  19 
  20 #define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R))
  21 
  22 static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
  23         /* USR Registers */
  24         {
  25                 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  26                 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  27                 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  28                 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  29                 USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
  30                 REG_OFFSET(pc)
  31         },
  32 
  33         /* FIQ Registers */
  34         {
  35                 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  36                 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  37                 USR_REG_OFFSET(6), USR_REG_OFFSET(7),
  38                 REG_OFFSET(compat_r8_fiq),  /* r8 */
  39                 REG_OFFSET(compat_r9_fiq),  /* r9 */
  40                 REG_OFFSET(compat_r10_fiq), /* r10 */
  41                 REG_OFFSET(compat_r11_fiq), /* r11 */
  42                 REG_OFFSET(compat_r12_fiq), /* r12 */
  43                 REG_OFFSET(compat_sp_fiq),  /* r13 */
  44                 REG_OFFSET(compat_lr_fiq),  /* r14 */
  45                 REG_OFFSET(pc)
  46         },
  47 
  48         /* IRQ Registers */
  49         {
  50                 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  51                 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  52                 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  53                 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  54                 USR_REG_OFFSET(12),
  55                 REG_OFFSET(compat_sp_irq), /* r13 */
  56                 REG_OFFSET(compat_lr_irq), /* r14 */
  57                 REG_OFFSET(pc)
  58         },
  59 
  60         /* SVC Registers */
  61         {
  62                 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  63                 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  64                 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  65                 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  66                 USR_REG_OFFSET(12),
  67                 REG_OFFSET(compat_sp_svc), /* r13 */
  68                 REG_OFFSET(compat_lr_svc), /* r14 */
  69                 REG_OFFSET(pc)
  70         },
  71 
  72         /* ABT Registers */
  73         {
  74                 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  75                 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  76                 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  77                 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  78                 USR_REG_OFFSET(12),
  79                 REG_OFFSET(compat_sp_abt), /* r13 */
  80                 REG_OFFSET(compat_lr_abt), /* r14 */
  81                 REG_OFFSET(pc)
  82         },
  83 
  84         /* UND Registers */
  85         {
  86                 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  87                 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  88                 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  89                 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  90                 USR_REG_OFFSET(12),
  91                 REG_OFFSET(compat_sp_und), /* r13 */
  92                 REG_OFFSET(compat_lr_und), /* r14 */
  93                 REG_OFFSET(pc)
  94         },
  95 };
  96 
  97 /*
  98  * Return a pointer to the register number valid in the current mode of
  99  * the virtual CPU.
 100  */
 101 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
 102 {
 103         unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
 104         unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
 105 
 106         switch (mode) {
 107         case PSR_AA32_MODE_USR ... PSR_AA32_MODE_SVC:
 108                 mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
 109                 break;
 110 
 111         case PSR_AA32_MODE_ABT:
 112                 mode = 4;
 113                 break;
 114 
 115         case PSR_AA32_MODE_UND:
 116                 mode = 5;
 117                 break;
 118 
 119         case PSR_AA32_MODE_SYS:
 120                 mode = 0;       /* SYS maps to USR */
 121                 break;
 122 
 123         default:
 124                 BUG();
 125         }
 126 
 127         return reg_array + vcpu_reg_offsets[mode][reg_num];
 128 }
 129 
 130 /*
 131  * Return the SPSR for the current mode of the virtual CPU.
 132  */
 133 static int vcpu_spsr32_mode(const struct kvm_vcpu *vcpu)
 134 {
 135         unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
 136         switch (mode) {
 137         case PSR_AA32_MODE_SVC: return KVM_SPSR_SVC;
 138         case PSR_AA32_MODE_ABT: return KVM_SPSR_ABT;
 139         case PSR_AA32_MODE_UND: return KVM_SPSR_UND;
 140         case PSR_AA32_MODE_IRQ: return KVM_SPSR_IRQ;
 141         case PSR_AA32_MODE_FIQ: return KVM_SPSR_FIQ;
 142         default: BUG();
 143         }
 144 }
 145 
 146 unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu)
 147 {
 148         int spsr_idx = vcpu_spsr32_mode(vcpu);
 149 
 150         if (!vcpu->arch.sysregs_loaded_on_cpu)
 151                 return vcpu_gp_regs(vcpu)->spsr[spsr_idx];
 152 
 153         switch (spsr_idx) {
 154         case KVM_SPSR_SVC:
 155                 return read_sysreg_el1(SYS_SPSR);
 156         case KVM_SPSR_ABT:
 157                 return read_sysreg(spsr_abt);
 158         case KVM_SPSR_UND:
 159                 return read_sysreg(spsr_und);
 160         case KVM_SPSR_IRQ:
 161                 return read_sysreg(spsr_irq);
 162         case KVM_SPSR_FIQ:
 163                 return read_sysreg(spsr_fiq);
 164         default:
 165                 BUG();
 166         }
 167 }
 168 
 169 void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
 170 {
 171         int spsr_idx = vcpu_spsr32_mode(vcpu);
 172 
 173         if (!vcpu->arch.sysregs_loaded_on_cpu) {
 174                 vcpu_gp_regs(vcpu)->spsr[spsr_idx] = v;
 175                 return;
 176         }
 177 
 178         switch (spsr_idx) {
 179         case KVM_SPSR_SVC:
 180                 write_sysreg_el1(v, SYS_SPSR);
 181                 break;
 182         case KVM_SPSR_ABT:
 183                 write_sysreg(v, spsr_abt);
 184                 break;
 185         case KVM_SPSR_UND:
 186                 write_sysreg(v, spsr_und);
 187                 break;
 188         case KVM_SPSR_IRQ:
 189                 write_sysreg(v, spsr_irq);
 190                 break;
 191         case KVM_SPSR_FIQ:
 192                 write_sysreg(v, spsr_fiq);
 193                 break;
 194         }
 195 }

/* [<][>][^][v][top][bottom][index][help] */