1/* 2 * (not much of an) Emulation layer for 32bit guests. 3 * 4 * Copyright (C) 2012,2013 - ARM Ltd 5 * Author: Marc Zyngier <marc.zyngier@arm.com> 6 * 7 * based on arch/arm/kvm/emulate.c 8 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 9 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 10 * 11 * This program is free software: you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program. If not, see <http://www.gnu.org/licenses/>. 22 */ 23 24#include <linux/kvm_host.h> 25#include <asm/esr.h> 26#include <asm/kvm_emulate.h> 27 28/* 29 * stolen from arch/arm/kernel/opcodes.c 30 * 31 * condition code lookup table 32 * index into the table is test code: EQ, NE, ... LT, GT, AL, NV 33 * 34 * bit position in short is condition code: NZCV 35 */ 36static const unsigned short cc_map[16] = { 37 0xF0F0, /* EQ == Z set */ 38 0x0F0F, /* NE */ 39 0xCCCC, /* CS == C set */ 40 0x3333, /* CC */ 41 0xFF00, /* MI == N set */ 42 0x00FF, /* PL */ 43 0xAAAA, /* VS == V set */ 44 0x5555, /* VC */ 45 0x0C0C, /* HI == C set && Z clear */ 46 0xF3F3, /* LS == C clear || Z set */ 47 0xAA55, /* GE == (N==V) */ 48 0x55AA, /* LT == (N!=V) */ 49 0x0A05, /* GT == (!Z && (N==V)) */ 50 0xF5FA, /* LE == (Z || (N!=V)) */ 51 0xFFFF, /* AL always */ 52 0 /* NV */ 53}; 54 55static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) 56{ 57 u32 esr = kvm_vcpu_get_hsr(vcpu); 58 59 if (esr & ESR_ELx_CV) 60 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; 61 62 return -1; 63} 64 65/* 66 * Check if a trapped instruction should have been executed or not. 67 */ 68bool kvm_condition_valid32(const struct kvm_vcpu *vcpu) 69{ 70 unsigned long cpsr; 71 u32 cpsr_cond; 72 int cond; 73 74 /* Top two bits non-zero? Unconditional. */ 75 if (kvm_vcpu_get_hsr(vcpu) >> 30) 76 return true; 77 78 /* Is condition field valid? */ 79 cond = kvm_vcpu_get_condition(vcpu); 80 if (cond == 0xE) 81 return true; 82 83 cpsr = *vcpu_cpsr(vcpu); 84 85 if (cond < 0) { 86 /* This can happen in Thumb mode: examine IT state. */ 87 unsigned long it; 88 89 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); 90 91 /* it == 0 => unconditional. */ 92 if (it == 0) 93 return true; 94 95 /* The cond for this insn works out as the top 4 bits. */ 96 cond = (it >> 4); 97 } 98 99 cpsr_cond = cpsr >> 28; 100 101 if (!((cc_map[cond] >> cpsr_cond) & 1)) 102 return false; 103 104 return true; 105} 106 107/** 108 * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block 109 * @vcpu: The VCPU pointer 110 * 111 * When exceptions occur while instructions are executed in Thumb IF-THEN 112 * blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have 113 * to do this little bit of work manually. The fields map like this: 114 * 115 * IT[7:0] -> CPSR[26:25],CPSR[15:10] 116 */ 117static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) 118{ 119 unsigned long itbits, cond; 120 unsigned long cpsr = *vcpu_cpsr(vcpu); 121 bool is_arm = !(cpsr & COMPAT_PSR_T_BIT); 122 123 BUG_ON(is_arm && (cpsr & COMPAT_PSR_IT_MASK)); 124 125 if (!(cpsr & COMPAT_PSR_IT_MASK)) 126 return; 127 128 cond = (cpsr & 0xe000) >> 13; 129 itbits = (cpsr & 0x1c00) >> (10 - 2); 130 itbits |= (cpsr & (0x3 << 25)) >> 25; 131 132 /* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */ 133 if ((itbits & 0x7) == 0) 134 itbits = cond = 0; 135 else 136 itbits = (itbits << 1) & 0x1f; 137 138 cpsr &= ~COMPAT_PSR_IT_MASK; 139 cpsr |= cond << 13; 140 cpsr |= (itbits & 0x1c) << (10 - 2); 141 cpsr |= (itbits & 0x3) << 25; 142 *vcpu_cpsr(vcpu) = cpsr; 143} 144 145/** 146 * kvm_skip_instr - skip a trapped instruction and proceed to the next 147 * @vcpu: The vcpu pointer 148 */ 149void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) 150{ 151 bool is_thumb; 152 153 is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT); 154 if (is_thumb && !is_wide_instr) 155 *vcpu_pc(vcpu) += 2; 156 else 157 *vcpu_pc(vcpu) += 4; 158 kvm_adjust_itstate(vcpu); 159} 160