1/* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * Derived from arch/arm/kvm/coproc.c: 6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7 * Authors: Rusty Russell <rusty@rustcorp.com.au> 8 * Christoffer Dall <c.dall@virtualopensystems.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License, version 2, as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program. If not, see <http://www.gnu.org/licenses/>. 21 */ 22 23#include <linux/kvm_host.h> 24#include <linux/mm.h> 25#include <linux/uaccess.h> 26 27#include <asm/cacheflush.h> 28#include <asm/cputype.h> 29#include <asm/debug-monitors.h> 30#include <asm/esr.h> 31#include <asm/kvm_arm.h> 32#include <asm/kvm_coproc.h> 33#include <asm/kvm_emulate.h> 34#include <asm/kvm_host.h> 35#include <asm/kvm_mmu.h> 36 37#include <trace/events/kvm.h> 38 39#include "sys_regs.h" 40 41#include "trace.h" 42 43/* 44 * All of this file is extremly similar to the ARM coproc.c, but the 45 * types are different. My gut feeling is that it should be pretty 46 * easy to merge, but that would be an ABI breakage -- again. VFP 47 * would also need to be abstracted. 48 * 49 * For AArch32, we only take care of what is being trapped. Anything 50 * that has to do with init and userspace access has to go via the 51 * 64bit interface. 52 */ 53 54/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ 55static u32 cache_levels; 56 57/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ 58#define CSSELR_MAX 12 59 60/* Which cache CCSIDR represents depends on CSSELR value. */ 61static u32 get_ccsidr(u32 csselr) 62{ 63 u32 ccsidr; 64 65 /* Make sure noone else changes CSSELR during this! */ 66 local_irq_disable(); 67 /* Put value into CSSELR */ 68 asm volatile("msr csselr_el1, %x0" : : "r" (csselr)); 69 isb(); 70 /* Read result out of CCSIDR */ 71 asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr)); 72 local_irq_enable(); 73 74 return ccsidr; 75} 76 77/* 78 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 79 */ 80static bool access_dcsw(struct kvm_vcpu *vcpu, 81 struct sys_reg_params *p, 82 const struct sys_reg_desc *r) 83{ 84 if (!p->is_write) 85 return read_from_write_only(vcpu, p); 86 87 kvm_set_way_flush(vcpu); 88 return true; 89} 90 91/* 92 * Generic accessor for VM registers. Only called as long as HCR_TVM 93 * is set. If the guest enables the MMU, we stop trapping the VM 94 * sys_regs and leave it in complete control of the caches. 95 */ 96static bool access_vm_reg(struct kvm_vcpu *vcpu, 97 struct sys_reg_params *p, 98 const struct sys_reg_desc *r) 99{ 100 bool was_enabled = vcpu_has_cache_enabled(vcpu); 101 102 BUG_ON(!p->is_write); 103 104 if (!p->is_aarch32) { 105 vcpu_sys_reg(vcpu, r->reg) = p->regval; 106 } else { 107 if (!p->is_32bit) 108 vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval); 109 vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval); 110 } 111 112 kvm_toggle_cache(vcpu, was_enabled); 113 return true; 114} 115 116/* 117 * Trap handler for the GICv3 SGI generation system register. 118 * Forward the request to the VGIC emulation. 119 * The cp15_64 code makes sure this automatically works 120 * for both AArch64 and AArch32 accesses. 121 */ 122static bool access_gic_sgi(struct kvm_vcpu *vcpu, 123 struct sys_reg_params *p, 124 const struct sys_reg_desc *r) 125{ 126 if (!p->is_write) 127 return read_from_write_only(vcpu, p); 128 129 vgic_v3_dispatch_sgi(vcpu, p->regval); 130 131 return true; 132} 133 134static bool trap_raz_wi(struct kvm_vcpu *vcpu, 135 struct sys_reg_params *p, 136 const struct sys_reg_desc *r) 137{ 138 if (p->is_write) 139 return ignore_write(vcpu, p); 140 else 141 return read_zero(vcpu, p); 142} 143 144static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 145 struct sys_reg_params *p, 146 const struct sys_reg_desc *r) 147{ 148 if (p->is_write) { 149 return ignore_write(vcpu, p); 150 } else { 151 p->regval = (1 << 3); 152 return true; 153 } 154} 155 156static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, 157 struct sys_reg_params *p, 158 const struct sys_reg_desc *r) 159{ 160 if (p->is_write) { 161 return ignore_write(vcpu, p); 162 } else { 163 u32 val; 164 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); 165 p->regval = val; 166 return true; 167 } 168} 169 170/* 171 * We want to avoid world-switching all the DBG registers all the 172 * time: 173 * 174 * - If we've touched any debug register, it is likely that we're 175 * going to touch more of them. It then makes sense to disable the 176 * traps and start doing the save/restore dance 177 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is 178 * then mandatory to save/restore the registers, as the guest 179 * depends on them. 180 * 181 * For this, we use a DIRTY bit, indicating the guest has modified the 182 * debug registers, used as follow: 183 * 184 * On guest entry: 185 * - If the dirty bit is set (because we're coming back from trapping), 186 * disable the traps, save host registers, restore guest registers. 187 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), 188 * set the dirty bit, disable the traps, save host registers, 189 * restore guest registers. 190 * - Otherwise, enable the traps 191 * 192 * On guest exit: 193 * - If the dirty bit is set, save guest registers, restore host 194 * registers and clear the dirty bit. This ensure that the host can 195 * now use the debug registers. 196 */ 197static bool trap_debug_regs(struct kvm_vcpu *vcpu, 198 struct sys_reg_params *p, 199 const struct sys_reg_desc *r) 200{ 201 if (p->is_write) { 202 vcpu_sys_reg(vcpu, r->reg) = p->regval; 203 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 204 } else { 205 p->regval = vcpu_sys_reg(vcpu, r->reg); 206 } 207 208 trace_trap_reg(__func__, r->reg, p->is_write, p->regval); 209 210 return true; 211} 212 213/* 214 * reg_to_dbg/dbg_to_reg 215 * 216 * A 32 bit write to a debug register leave top bits alone 217 * A 32 bit read from a debug register only returns the bottom bits 218 * 219 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the 220 * hyp.S code switches between host and guest values in future. 221 */ 222static inline void reg_to_dbg(struct kvm_vcpu *vcpu, 223 struct sys_reg_params *p, 224 u64 *dbg_reg) 225{ 226 u64 val = p->regval; 227 228 if (p->is_32bit) { 229 val &= 0xffffffffUL; 230 val |= ((*dbg_reg >> 32) << 32); 231 } 232 233 *dbg_reg = val; 234 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 235} 236 237static inline void dbg_to_reg(struct kvm_vcpu *vcpu, 238 struct sys_reg_params *p, 239 u64 *dbg_reg) 240{ 241 p->regval = *dbg_reg; 242 if (p->is_32bit) 243 p->regval &= 0xffffffffUL; 244} 245 246static inline bool trap_bvr(struct kvm_vcpu *vcpu, 247 struct sys_reg_params *p, 248 const struct sys_reg_desc *rd) 249{ 250 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 251 252 if (p->is_write) 253 reg_to_dbg(vcpu, p, dbg_reg); 254 else 255 dbg_to_reg(vcpu, p, dbg_reg); 256 257 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 258 259 return true; 260} 261 262static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 263 const struct kvm_one_reg *reg, void __user *uaddr) 264{ 265 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 266 267 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 268 return -EFAULT; 269 return 0; 270} 271 272static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 273 const struct kvm_one_reg *reg, void __user *uaddr) 274{ 275 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 276 277 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 278 return -EFAULT; 279 return 0; 280} 281 282static inline void reset_bvr(struct kvm_vcpu *vcpu, 283 const struct sys_reg_desc *rd) 284{ 285 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; 286} 287 288static inline bool trap_bcr(struct kvm_vcpu *vcpu, 289 struct sys_reg_params *p, 290 const struct sys_reg_desc *rd) 291{ 292 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 293 294 if (p->is_write) 295 reg_to_dbg(vcpu, p, dbg_reg); 296 else 297 dbg_to_reg(vcpu, p, dbg_reg); 298 299 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 300 301 return true; 302} 303 304static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 305 const struct kvm_one_reg *reg, void __user *uaddr) 306{ 307 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 308 309 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 310 return -EFAULT; 311 312 return 0; 313} 314 315static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 316 const struct kvm_one_reg *reg, void __user *uaddr) 317{ 318 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 319 320 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 321 return -EFAULT; 322 return 0; 323} 324 325static inline void reset_bcr(struct kvm_vcpu *vcpu, 326 const struct sys_reg_desc *rd) 327{ 328 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; 329} 330 331static inline bool trap_wvr(struct kvm_vcpu *vcpu, 332 struct sys_reg_params *p, 333 const struct sys_reg_desc *rd) 334{ 335 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 336 337 if (p->is_write) 338 reg_to_dbg(vcpu, p, dbg_reg); 339 else 340 dbg_to_reg(vcpu, p, dbg_reg); 341 342 trace_trap_reg(__func__, rd->reg, p->is_write, 343 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]); 344 345 return true; 346} 347 348static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 349 const struct kvm_one_reg *reg, void __user *uaddr) 350{ 351 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 352 353 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 354 return -EFAULT; 355 return 0; 356} 357 358static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 359 const struct kvm_one_reg *reg, void __user *uaddr) 360{ 361 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 362 363 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 364 return -EFAULT; 365 return 0; 366} 367 368static inline void reset_wvr(struct kvm_vcpu *vcpu, 369 const struct sys_reg_desc *rd) 370{ 371 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; 372} 373 374static inline bool trap_wcr(struct kvm_vcpu *vcpu, 375 struct sys_reg_params *p, 376 const struct sys_reg_desc *rd) 377{ 378 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 379 380 if (p->is_write) 381 reg_to_dbg(vcpu, p, dbg_reg); 382 else 383 dbg_to_reg(vcpu, p, dbg_reg); 384 385 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 386 387 return true; 388} 389 390static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 391 const struct kvm_one_reg *reg, void __user *uaddr) 392{ 393 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 394 395 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 396 return -EFAULT; 397 return 0; 398} 399 400static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 401 const struct kvm_one_reg *reg, void __user *uaddr) 402{ 403 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 404 405 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 406 return -EFAULT; 407 return 0; 408} 409 410static inline void reset_wcr(struct kvm_vcpu *vcpu, 411 const struct sys_reg_desc *rd) 412{ 413 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val; 414} 415 416static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 417{ 418 u64 amair; 419 420 asm volatile("mrs %0, amair_el1\n" : "=r" (amair)); 421 vcpu_sys_reg(vcpu, AMAIR_EL1) = amair; 422} 423 424static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 425{ 426 u64 mpidr; 427 428 /* 429 * Map the vcpu_id into the first three affinity level fields of 430 * the MPIDR. We limit the number of VCPUs in level 0 due to a 431 * limitation to 16 CPUs in that level in the ICC_SGIxR registers 432 * of the GICv3 to be able to address each CPU directly when 433 * sending IPIs. 434 */ 435 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); 436 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); 437 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); 438 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr; 439} 440 441/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ 442#define DBG_BCR_BVR_WCR_WVR_EL1(n) \ 443 /* DBGBVRn_EL1 */ \ 444 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \ 445 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \ 446 /* DBGBCRn_EL1 */ \ 447 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \ 448 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \ 449 /* DBGWVRn_EL1 */ \ 450 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \ 451 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \ 452 /* DBGWCRn_EL1 */ \ 453 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ 454 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } 455 456/* 457 * Architected system registers. 458 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 459 * 460 * We could trap ID_DFR0 and tell the guest we don't support performance 461 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was 462 * NAKed, so it will read the PMCR anyway. 463 * 464 * Therefore we tell the guest we have 0 counters. Unfortunately, we 465 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for 466 * all PM registers, which doesn't crash the guest kernel at least. 467 * 468 * Debug handling: We do trap most, if not all debug related system 469 * registers. The implementation is good enough to ensure that a guest 470 * can use these with minimal performance degradation. The drawback is 471 * that we don't implement any of the external debug, none of the 472 * OSlock protocol. This should be revisited if we ever encounter a 473 * more demanding guest... 474 */ 475static const struct sys_reg_desc sys_reg_descs[] = { 476 /* DC ISW */ 477 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010), 478 access_dcsw }, 479 /* DC CSW */ 480 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010), 481 access_dcsw }, 482 /* DC CISW */ 483 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), 484 access_dcsw }, 485 486 DBG_BCR_BVR_WCR_WVR_EL1(0), 487 DBG_BCR_BVR_WCR_WVR_EL1(1), 488 /* MDCCINT_EL1 */ 489 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), 490 trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, 491 /* MDSCR_EL1 */ 492 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), 493 trap_debug_regs, reset_val, MDSCR_EL1, 0 }, 494 DBG_BCR_BVR_WCR_WVR_EL1(2), 495 DBG_BCR_BVR_WCR_WVR_EL1(3), 496 DBG_BCR_BVR_WCR_WVR_EL1(4), 497 DBG_BCR_BVR_WCR_WVR_EL1(5), 498 DBG_BCR_BVR_WCR_WVR_EL1(6), 499 DBG_BCR_BVR_WCR_WVR_EL1(7), 500 DBG_BCR_BVR_WCR_WVR_EL1(8), 501 DBG_BCR_BVR_WCR_WVR_EL1(9), 502 DBG_BCR_BVR_WCR_WVR_EL1(10), 503 DBG_BCR_BVR_WCR_WVR_EL1(11), 504 DBG_BCR_BVR_WCR_WVR_EL1(12), 505 DBG_BCR_BVR_WCR_WVR_EL1(13), 506 DBG_BCR_BVR_WCR_WVR_EL1(14), 507 DBG_BCR_BVR_WCR_WVR_EL1(15), 508 509 /* MDRAR_EL1 */ 510 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), 511 trap_raz_wi }, 512 /* OSLAR_EL1 */ 513 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100), 514 trap_raz_wi }, 515 /* OSLSR_EL1 */ 516 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100), 517 trap_oslsr_el1 }, 518 /* OSDLR_EL1 */ 519 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100), 520 trap_raz_wi }, 521 /* DBGPRCR_EL1 */ 522 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100), 523 trap_raz_wi }, 524 /* DBGCLAIMSET_EL1 */ 525 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110), 526 trap_raz_wi }, 527 /* DBGCLAIMCLR_EL1 */ 528 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110), 529 trap_raz_wi }, 530 /* DBGAUTHSTATUS_EL1 */ 531 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110), 532 trap_dbgauthstatus_el1 }, 533 534 /* MDCCSR_EL1 */ 535 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000), 536 trap_raz_wi }, 537 /* DBGDTR_EL0 */ 538 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000), 539 trap_raz_wi }, 540 /* DBGDTR[TR]X_EL0 */ 541 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000), 542 trap_raz_wi }, 543 544 /* DBGVCR32_EL2 */ 545 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000), 546 NULL, reset_val, DBGVCR32_EL2, 0 }, 547 548 /* MPIDR_EL1 */ 549 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101), 550 NULL, reset_mpidr, MPIDR_EL1 }, 551 /* SCTLR_EL1 */ 552 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), 553 access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, 554 /* CPACR_EL1 */ 555 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), 556 NULL, reset_val, CPACR_EL1, 0 }, 557 /* TTBR0_EL1 */ 558 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), 559 access_vm_reg, reset_unknown, TTBR0_EL1 }, 560 /* TTBR1_EL1 */ 561 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), 562 access_vm_reg, reset_unknown, TTBR1_EL1 }, 563 /* TCR_EL1 */ 564 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), 565 access_vm_reg, reset_val, TCR_EL1, 0 }, 566 567 /* AFSR0_EL1 */ 568 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), 569 access_vm_reg, reset_unknown, AFSR0_EL1 }, 570 /* AFSR1_EL1 */ 571 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), 572 access_vm_reg, reset_unknown, AFSR1_EL1 }, 573 /* ESR_EL1 */ 574 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), 575 access_vm_reg, reset_unknown, ESR_EL1 }, 576 /* FAR_EL1 */ 577 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), 578 access_vm_reg, reset_unknown, FAR_EL1 }, 579 /* PAR_EL1 */ 580 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), 581 NULL, reset_unknown, PAR_EL1 }, 582 583 /* PMINTENSET_EL1 */ 584 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), 585 trap_raz_wi }, 586 /* PMINTENCLR_EL1 */ 587 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), 588 trap_raz_wi }, 589 590 /* MAIR_EL1 */ 591 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), 592 access_vm_reg, reset_unknown, MAIR_EL1 }, 593 /* AMAIR_EL1 */ 594 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), 595 access_vm_reg, reset_amair_el1, AMAIR_EL1 }, 596 597 /* VBAR_EL1 */ 598 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), 599 NULL, reset_val, VBAR_EL1, 0 }, 600 601 /* ICC_SGI1R_EL1 */ 602 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101), 603 access_gic_sgi }, 604 /* ICC_SRE_EL1 */ 605 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), 606 trap_raz_wi }, 607 608 /* CONTEXTIDR_EL1 */ 609 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), 610 access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, 611 /* TPIDR_EL1 */ 612 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), 613 NULL, reset_unknown, TPIDR_EL1 }, 614 615 /* CNTKCTL_EL1 */ 616 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000), 617 NULL, reset_val, CNTKCTL_EL1, 0}, 618 619 /* CSSELR_EL1 */ 620 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), 621 NULL, reset_unknown, CSSELR_EL1 }, 622 623 /* PMCR_EL0 */ 624 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), 625 trap_raz_wi }, 626 /* PMCNTENSET_EL0 */ 627 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), 628 trap_raz_wi }, 629 /* PMCNTENCLR_EL0 */ 630 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), 631 trap_raz_wi }, 632 /* PMOVSCLR_EL0 */ 633 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), 634 trap_raz_wi }, 635 /* PMSWINC_EL0 */ 636 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), 637 trap_raz_wi }, 638 /* PMSELR_EL0 */ 639 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), 640 trap_raz_wi }, 641 /* PMCEID0_EL0 */ 642 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), 643 trap_raz_wi }, 644 /* PMCEID1_EL0 */ 645 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), 646 trap_raz_wi }, 647 /* PMCCNTR_EL0 */ 648 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), 649 trap_raz_wi }, 650 /* PMXEVTYPER_EL0 */ 651 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), 652 trap_raz_wi }, 653 /* PMXEVCNTR_EL0 */ 654 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), 655 trap_raz_wi }, 656 /* PMUSERENR_EL0 */ 657 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), 658 trap_raz_wi }, 659 /* PMOVSSET_EL0 */ 660 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), 661 trap_raz_wi }, 662 663 /* TPIDR_EL0 */ 664 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), 665 NULL, reset_unknown, TPIDR_EL0 }, 666 /* TPIDRRO_EL0 */ 667 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), 668 NULL, reset_unknown, TPIDRRO_EL0 }, 669 670 /* DACR32_EL2 */ 671 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), 672 NULL, reset_unknown, DACR32_EL2 }, 673 /* IFSR32_EL2 */ 674 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001), 675 NULL, reset_unknown, IFSR32_EL2 }, 676 /* FPEXC32_EL2 */ 677 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000), 678 NULL, reset_val, FPEXC32_EL2, 0x70 }, 679}; 680 681static bool trap_dbgidr(struct kvm_vcpu *vcpu, 682 struct sys_reg_params *p, 683 const struct sys_reg_desc *r) 684{ 685 if (p->is_write) { 686 return ignore_write(vcpu, p); 687 } else { 688 u64 dfr = read_system_reg(SYS_ID_AA64DFR0_EL1); 689 u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1); 690 u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT); 691 692 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | 693 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | 694 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) 695 | (6 << 16) | (el3 << 14) | (el3 << 12)); 696 return true; 697 } 698} 699 700static bool trap_debug32(struct kvm_vcpu *vcpu, 701 struct sys_reg_params *p, 702 const struct sys_reg_desc *r) 703{ 704 if (p->is_write) { 705 vcpu_cp14(vcpu, r->reg) = p->regval; 706 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 707 } else { 708 p->regval = vcpu_cp14(vcpu, r->reg); 709 } 710 711 return true; 712} 713 714/* AArch32 debug register mappings 715 * 716 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0] 717 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32] 718 * 719 * All control registers and watchpoint value registers are mapped to 720 * the lower 32 bits of their AArch64 equivalents. We share the trap 721 * handlers with the above AArch64 code which checks what mode the 722 * system is in. 723 */ 724 725static inline bool trap_xvr(struct kvm_vcpu *vcpu, 726 struct sys_reg_params *p, 727 const struct sys_reg_desc *rd) 728{ 729 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 730 731 if (p->is_write) { 732 u64 val = *dbg_reg; 733 734 val &= 0xffffffffUL; 735 val |= p->regval << 32; 736 *dbg_reg = val; 737 738 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 739 } else { 740 p->regval = *dbg_reg >> 32; 741 } 742 743 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 744 745 return true; 746} 747 748#define DBG_BCR_BVR_WCR_WVR(n) \ 749 /* DBGBVRn */ \ 750 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \ 751 /* DBGBCRn */ \ 752 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \ 753 /* DBGWVRn */ \ 754 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \ 755 /* DBGWCRn */ \ 756 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n } 757 758#define DBGBXVR(n) \ 759 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n } 760 761/* 762 * Trapped cp14 registers. We generally ignore most of the external 763 * debug, on the principle that they don't really make sense to a 764 * guest. Revisit this one day, would this principle change. 765 */ 766static const struct sys_reg_desc cp14_regs[] = { 767 /* DBGIDR */ 768 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr }, 769 /* DBGDTRRXext */ 770 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, 771 772 DBG_BCR_BVR_WCR_WVR(0), 773 /* DBGDSCRint */ 774 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, 775 DBG_BCR_BVR_WCR_WVR(1), 776 /* DBGDCCINT */ 777 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, 778 /* DBGDSCRext */ 779 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, 780 DBG_BCR_BVR_WCR_WVR(2), 781 /* DBGDTR[RT]Xint */ 782 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, 783 /* DBGDTR[RT]Xext */ 784 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, 785 DBG_BCR_BVR_WCR_WVR(3), 786 DBG_BCR_BVR_WCR_WVR(4), 787 DBG_BCR_BVR_WCR_WVR(5), 788 /* DBGWFAR */ 789 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, 790 /* DBGOSECCR */ 791 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, 792 DBG_BCR_BVR_WCR_WVR(6), 793 /* DBGVCR */ 794 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, 795 DBG_BCR_BVR_WCR_WVR(7), 796 DBG_BCR_BVR_WCR_WVR(8), 797 DBG_BCR_BVR_WCR_WVR(9), 798 DBG_BCR_BVR_WCR_WVR(10), 799 DBG_BCR_BVR_WCR_WVR(11), 800 DBG_BCR_BVR_WCR_WVR(12), 801 DBG_BCR_BVR_WCR_WVR(13), 802 DBG_BCR_BVR_WCR_WVR(14), 803 DBG_BCR_BVR_WCR_WVR(15), 804 805 /* DBGDRAR (32bit) */ 806 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, 807 808 DBGBXVR(0), 809 /* DBGOSLAR */ 810 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi }, 811 DBGBXVR(1), 812 /* DBGOSLSR */ 813 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 }, 814 DBGBXVR(2), 815 DBGBXVR(3), 816 /* DBGOSDLR */ 817 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, 818 DBGBXVR(4), 819 /* DBGPRCR */ 820 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, 821 DBGBXVR(5), 822 DBGBXVR(6), 823 DBGBXVR(7), 824 DBGBXVR(8), 825 DBGBXVR(9), 826 DBGBXVR(10), 827 DBGBXVR(11), 828 DBGBXVR(12), 829 DBGBXVR(13), 830 DBGBXVR(14), 831 DBGBXVR(15), 832 833 /* DBGDSAR (32bit) */ 834 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, 835 836 /* DBGDEVID2 */ 837 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, 838 /* DBGDEVID1 */ 839 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, 840 /* DBGDEVID */ 841 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, 842 /* DBGCLAIMSET */ 843 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, 844 /* DBGCLAIMCLR */ 845 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, 846 /* DBGAUTHSTATUS */ 847 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, 848}; 849 850/* Trapped cp14 64bit registers */ 851static const struct sys_reg_desc cp14_64_regs[] = { 852 /* DBGDRAR (64bit) */ 853 { Op1( 0), CRm( 1), .access = trap_raz_wi }, 854 855 /* DBGDSAR (64bit) */ 856 { Op1( 0), CRm( 2), .access = trap_raz_wi }, 857}; 858 859/* 860 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, 861 * depending on the way they are accessed (as a 32bit or a 64bit 862 * register). 863 */ 864static const struct sys_reg_desc cp15_regs[] = { 865 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, 866 867 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, 868 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 869 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, 870 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, 871 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, 872 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, 873 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, 874 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR }, 875 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR }, 876 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR }, 877 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR }, 878 879 /* 880 * DC{C,I,CI}SW operations: 881 */ 882 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, 883 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, 884 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, 885 886 /* PMU */ 887 { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi }, 888 { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi }, 889 { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, 890 { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, 891 { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi }, 892 { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi }, 893 { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi }, 894 { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, 895 { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi }, 896 { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi }, 897 { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, 898 { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, 899 { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi }, 900 901 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, 902 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, 903 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, 904 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, 905 906 /* ICC_SRE */ 907 { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, 908 909 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, 910}; 911 912static const struct sys_reg_desc cp15_64_regs[] = { 913 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 914 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, 915 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, 916}; 917 918/* Target specific emulation tables */ 919static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; 920 921void kvm_register_target_sys_reg_table(unsigned int target, 922 struct kvm_sys_reg_target_table *table) 923{ 924 target_tables[target] = table; 925} 926 927/* Get specific register table for this target. */ 928static const struct sys_reg_desc *get_target_table(unsigned target, 929 bool mode_is_64, 930 size_t *num) 931{ 932 struct kvm_sys_reg_target_table *table; 933 934 table = target_tables[target]; 935 if (mode_is_64) { 936 *num = table->table64.num; 937 return table->table64.table; 938 } else { 939 *num = table->table32.num; 940 return table->table32.table; 941 } 942} 943 944static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, 945 const struct sys_reg_desc table[], 946 unsigned int num) 947{ 948 unsigned int i; 949 950 for (i = 0; i < num; i++) { 951 const struct sys_reg_desc *r = &table[i]; 952 953 if (params->Op0 != r->Op0) 954 continue; 955 if (params->Op1 != r->Op1) 956 continue; 957 if (params->CRn != r->CRn) 958 continue; 959 if (params->CRm != r->CRm) 960 continue; 961 if (params->Op2 != r->Op2) 962 continue; 963 964 return r; 965 } 966 return NULL; 967} 968 969int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) 970{ 971 kvm_inject_undefined(vcpu); 972 return 1; 973} 974 975/* 976 * emulate_cp -- tries to match a sys_reg access in a handling table, and 977 * call the corresponding trap handler. 978 * 979 * @params: pointer to the descriptor of the access 980 * @table: array of trap descriptors 981 * @num: size of the trap descriptor array 982 * 983 * Return 0 if the access has been handled, and -1 if not. 984 */ 985static int emulate_cp(struct kvm_vcpu *vcpu, 986 struct sys_reg_params *params, 987 const struct sys_reg_desc *table, 988 size_t num) 989{ 990 const struct sys_reg_desc *r; 991 992 if (!table) 993 return -1; /* Not handled */ 994 995 r = find_reg(params, table, num); 996 997 if (r) { 998 /* 999 * Not having an accessor means that we have 1000 * configured a trap that we don't know how to 1001 * handle. This certainly qualifies as a gross bug 1002 * that should be fixed right away. 1003 */ 1004 BUG_ON(!r->access); 1005 1006 if (likely(r->access(vcpu, params, r))) { 1007 /* Skip instruction, since it was emulated */ 1008 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 1009 } 1010 1011 /* Handled */ 1012 return 0; 1013 } 1014 1015 /* Not handled */ 1016 return -1; 1017} 1018 1019static void unhandled_cp_access(struct kvm_vcpu *vcpu, 1020 struct sys_reg_params *params) 1021{ 1022 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); 1023 int cp; 1024 1025 switch(hsr_ec) { 1026 case ESR_ELx_EC_CP15_32: 1027 case ESR_ELx_EC_CP15_64: 1028 cp = 15; 1029 break; 1030 case ESR_ELx_EC_CP14_MR: 1031 case ESR_ELx_EC_CP14_64: 1032 cp = 14; 1033 break; 1034 default: 1035 WARN_ON((cp = -1)); 1036 } 1037 1038 kvm_err("Unsupported guest CP%d access at: %08lx\n", 1039 cp, *vcpu_pc(vcpu)); 1040 print_sys_reg_instr(params); 1041 kvm_inject_undefined(vcpu); 1042} 1043 1044/** 1045 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access 1046 * @vcpu: The VCPU pointer 1047 * @run: The kvm_run struct 1048 */ 1049static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, 1050 const struct sys_reg_desc *global, 1051 size_t nr_global, 1052 const struct sys_reg_desc *target_specific, 1053 size_t nr_specific) 1054{ 1055 struct sys_reg_params params; 1056 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1057 int Rt = (hsr >> 5) & 0xf; 1058 int Rt2 = (hsr >> 10) & 0xf; 1059 1060 params.is_aarch32 = true; 1061 params.is_32bit = false; 1062 params.CRm = (hsr >> 1) & 0xf; 1063 params.is_write = ((hsr & 1) == 0); 1064 1065 params.Op0 = 0; 1066 params.Op1 = (hsr >> 16) & 0xf; 1067 params.Op2 = 0; 1068 params.CRn = 0; 1069 1070 /* 1071 * Make a 64-bit value out of Rt and Rt2. As we use the same trap 1072 * backends between AArch32 and AArch64, we get away with it. 1073 */ 1074 if (params.is_write) { 1075 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; 1076 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; 1077 } 1078 1079 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) 1080 goto out; 1081 if (!emulate_cp(vcpu, ¶ms, global, nr_global)) 1082 goto out; 1083 1084 unhandled_cp_access(vcpu, ¶ms); 1085 1086out: 1087 /* Split up the value between registers for the read side */ 1088 if (!params.is_write) { 1089 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); 1090 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); 1091 } 1092 1093 return 1; 1094} 1095 1096/** 1097 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access 1098 * @vcpu: The VCPU pointer 1099 * @run: The kvm_run struct 1100 */ 1101static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, 1102 const struct sys_reg_desc *global, 1103 size_t nr_global, 1104 const struct sys_reg_desc *target_specific, 1105 size_t nr_specific) 1106{ 1107 struct sys_reg_params params; 1108 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1109 int Rt = (hsr >> 5) & 0xf; 1110 1111 params.is_aarch32 = true; 1112 params.is_32bit = true; 1113 params.CRm = (hsr >> 1) & 0xf; 1114 params.regval = vcpu_get_reg(vcpu, Rt); 1115 params.is_write = ((hsr & 1) == 0); 1116 params.CRn = (hsr >> 10) & 0xf; 1117 params.Op0 = 0; 1118 params.Op1 = (hsr >> 14) & 0x7; 1119 params.Op2 = (hsr >> 17) & 0x7; 1120 1121 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) || 1122 !emulate_cp(vcpu, ¶ms, global, nr_global)) { 1123 if (!params.is_write) 1124 vcpu_set_reg(vcpu, Rt, params.regval); 1125 return 1; 1126 } 1127 1128 unhandled_cp_access(vcpu, ¶ms); 1129 return 1; 1130} 1131 1132int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 1133{ 1134 const struct sys_reg_desc *target_specific; 1135 size_t num; 1136 1137 target_specific = get_target_table(vcpu->arch.target, false, &num); 1138 return kvm_handle_cp_64(vcpu, 1139 cp15_64_regs, ARRAY_SIZE(cp15_64_regs), 1140 target_specific, num); 1141} 1142 1143int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 1144{ 1145 const struct sys_reg_desc *target_specific; 1146 size_t num; 1147 1148 target_specific = get_target_table(vcpu->arch.target, false, &num); 1149 return kvm_handle_cp_32(vcpu, 1150 cp15_regs, ARRAY_SIZE(cp15_regs), 1151 target_specific, num); 1152} 1153 1154int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 1155{ 1156 return kvm_handle_cp_64(vcpu, 1157 cp14_64_regs, ARRAY_SIZE(cp14_64_regs), 1158 NULL, 0); 1159} 1160 1161int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 1162{ 1163 return kvm_handle_cp_32(vcpu, 1164 cp14_regs, ARRAY_SIZE(cp14_regs), 1165 NULL, 0); 1166} 1167 1168static int emulate_sys_reg(struct kvm_vcpu *vcpu, 1169 struct sys_reg_params *params) 1170{ 1171 size_t num; 1172 const struct sys_reg_desc *table, *r; 1173 1174 table = get_target_table(vcpu->arch.target, true, &num); 1175 1176 /* Search target-specific then generic table. */ 1177 r = find_reg(params, table, num); 1178 if (!r) 1179 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 1180 1181 if (likely(r)) { 1182 /* 1183 * Not having an accessor means that we have 1184 * configured a trap that we don't know how to 1185 * handle. This certainly qualifies as a gross bug 1186 * that should be fixed right away. 1187 */ 1188 BUG_ON(!r->access); 1189 1190 if (likely(r->access(vcpu, params, r))) { 1191 /* Skip instruction, since it was emulated */ 1192 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 1193 return 1; 1194 } 1195 /* If access function fails, it should complain. */ 1196 } else { 1197 kvm_err("Unsupported guest sys_reg access at: %lx\n", 1198 *vcpu_pc(vcpu)); 1199 print_sys_reg_instr(params); 1200 } 1201 kvm_inject_undefined(vcpu); 1202 return 1; 1203} 1204 1205static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, 1206 const struct sys_reg_desc *table, size_t num) 1207{ 1208 unsigned long i; 1209 1210 for (i = 0; i < num; i++) 1211 if (table[i].reset) 1212 table[i].reset(vcpu, &table[i]); 1213} 1214 1215/** 1216 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access 1217 * @vcpu: The VCPU pointer 1218 * @run: The kvm_run struct 1219 */ 1220int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) 1221{ 1222 struct sys_reg_params params; 1223 unsigned long esr = kvm_vcpu_get_hsr(vcpu); 1224 int Rt = (esr >> 5) & 0x1f; 1225 int ret; 1226 1227 trace_kvm_handle_sys_reg(esr); 1228 1229 params.is_aarch32 = false; 1230 params.is_32bit = false; 1231 params.Op0 = (esr >> 20) & 3; 1232 params.Op1 = (esr >> 14) & 0x7; 1233 params.CRn = (esr >> 10) & 0xf; 1234 params.CRm = (esr >> 1) & 0xf; 1235 params.Op2 = (esr >> 17) & 0x7; 1236 params.regval = vcpu_get_reg(vcpu, Rt); 1237 params.is_write = !(esr & 1); 1238 1239 ret = emulate_sys_reg(vcpu, ¶ms); 1240 1241 if (!params.is_write) 1242 vcpu_set_reg(vcpu, Rt, params.regval); 1243 return ret; 1244} 1245 1246/****************************************************************************** 1247 * Userspace API 1248 *****************************************************************************/ 1249 1250static bool index_to_params(u64 id, struct sys_reg_params *params) 1251{ 1252 switch (id & KVM_REG_SIZE_MASK) { 1253 case KVM_REG_SIZE_U64: 1254 /* Any unused index bits means it's not valid. */ 1255 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK 1256 | KVM_REG_ARM_COPROC_MASK 1257 | KVM_REG_ARM64_SYSREG_OP0_MASK 1258 | KVM_REG_ARM64_SYSREG_OP1_MASK 1259 | KVM_REG_ARM64_SYSREG_CRN_MASK 1260 | KVM_REG_ARM64_SYSREG_CRM_MASK 1261 | KVM_REG_ARM64_SYSREG_OP2_MASK)) 1262 return false; 1263 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) 1264 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); 1265 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) 1266 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); 1267 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) 1268 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); 1269 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) 1270 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); 1271 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) 1272 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); 1273 return true; 1274 default: 1275 return false; 1276 } 1277} 1278 1279/* Decode an index value, and find the sys_reg_desc entry. */ 1280static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, 1281 u64 id) 1282{ 1283 size_t num; 1284 const struct sys_reg_desc *table, *r; 1285 struct sys_reg_params params; 1286 1287 /* We only do sys_reg for now. */ 1288 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) 1289 return NULL; 1290 1291 if (!index_to_params(id, ¶ms)) 1292 return NULL; 1293 1294 table = get_target_table(vcpu->arch.target, true, &num); 1295 r = find_reg(¶ms, table, num); 1296 if (!r) 1297 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 1298 1299 /* Not saved in the sys_reg array? */ 1300 if (r && !r->reg) 1301 r = NULL; 1302 1303 return r; 1304} 1305 1306/* 1307 * These are the invariant sys_reg registers: we let the guest see the 1308 * host versions of these, so they're part of the guest state. 1309 * 1310 * A future CPU may provide a mechanism to present different values to 1311 * the guest, or a future kvm may trap them. 1312 */ 1313 1314#define FUNCTION_INVARIANT(reg) \ 1315 static void get_##reg(struct kvm_vcpu *v, \ 1316 const struct sys_reg_desc *r) \ 1317 { \ 1318 u64 val; \ 1319 \ 1320 asm volatile("mrs %0, " __stringify(reg) "\n" \ 1321 : "=r" (val)); \ 1322 ((struct sys_reg_desc *)r)->val = val; \ 1323 } 1324 1325FUNCTION_INVARIANT(midr_el1) 1326FUNCTION_INVARIANT(ctr_el0) 1327FUNCTION_INVARIANT(revidr_el1) 1328FUNCTION_INVARIANT(id_pfr0_el1) 1329FUNCTION_INVARIANT(id_pfr1_el1) 1330FUNCTION_INVARIANT(id_dfr0_el1) 1331FUNCTION_INVARIANT(id_afr0_el1) 1332FUNCTION_INVARIANT(id_mmfr0_el1) 1333FUNCTION_INVARIANT(id_mmfr1_el1) 1334FUNCTION_INVARIANT(id_mmfr2_el1) 1335FUNCTION_INVARIANT(id_mmfr3_el1) 1336FUNCTION_INVARIANT(id_isar0_el1) 1337FUNCTION_INVARIANT(id_isar1_el1) 1338FUNCTION_INVARIANT(id_isar2_el1) 1339FUNCTION_INVARIANT(id_isar3_el1) 1340FUNCTION_INVARIANT(id_isar4_el1) 1341FUNCTION_INVARIANT(id_isar5_el1) 1342FUNCTION_INVARIANT(clidr_el1) 1343FUNCTION_INVARIANT(aidr_el1) 1344 1345/* ->val is filled in by kvm_sys_reg_table_init() */ 1346static struct sys_reg_desc invariant_sys_regs[] = { 1347 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000), 1348 NULL, get_midr_el1 }, 1349 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110), 1350 NULL, get_revidr_el1 }, 1351 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000), 1352 NULL, get_id_pfr0_el1 }, 1353 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001), 1354 NULL, get_id_pfr1_el1 }, 1355 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010), 1356 NULL, get_id_dfr0_el1 }, 1357 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011), 1358 NULL, get_id_afr0_el1 }, 1359 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100), 1360 NULL, get_id_mmfr0_el1 }, 1361 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101), 1362 NULL, get_id_mmfr1_el1 }, 1363 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110), 1364 NULL, get_id_mmfr2_el1 }, 1365 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111), 1366 NULL, get_id_mmfr3_el1 }, 1367 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), 1368 NULL, get_id_isar0_el1 }, 1369 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001), 1370 NULL, get_id_isar1_el1 }, 1371 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), 1372 NULL, get_id_isar2_el1 }, 1373 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011), 1374 NULL, get_id_isar3_el1 }, 1375 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100), 1376 NULL, get_id_isar4_el1 }, 1377 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101), 1378 NULL, get_id_isar5_el1 }, 1379 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001), 1380 NULL, get_clidr_el1 }, 1381 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111), 1382 NULL, get_aidr_el1 }, 1383 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001), 1384 NULL, get_ctr_el0 }, 1385}; 1386 1387static int reg_from_user(u64 *val, const void __user *uaddr, u64 id) 1388{ 1389 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) 1390 return -EFAULT; 1391 return 0; 1392} 1393 1394static int reg_to_user(void __user *uaddr, const u64 *val, u64 id) 1395{ 1396 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) 1397 return -EFAULT; 1398 return 0; 1399} 1400 1401static int get_invariant_sys_reg(u64 id, void __user *uaddr) 1402{ 1403 struct sys_reg_params params; 1404 const struct sys_reg_desc *r; 1405 1406 if (!index_to_params(id, ¶ms)) 1407 return -ENOENT; 1408 1409 r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); 1410 if (!r) 1411 return -ENOENT; 1412 1413 return reg_to_user(uaddr, &r->val, id); 1414} 1415 1416static int set_invariant_sys_reg(u64 id, void __user *uaddr) 1417{ 1418 struct sys_reg_params params; 1419 const struct sys_reg_desc *r; 1420 int err; 1421 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ 1422 1423 if (!index_to_params(id, ¶ms)) 1424 return -ENOENT; 1425 r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); 1426 if (!r) 1427 return -ENOENT; 1428 1429 err = reg_from_user(&val, uaddr, id); 1430 if (err) 1431 return err; 1432 1433 /* This is what we mean by invariant: you can't change it. */ 1434 if (r->val != val) 1435 return -EINVAL; 1436 1437 return 0; 1438} 1439 1440static bool is_valid_cache(u32 val) 1441{ 1442 u32 level, ctype; 1443 1444 if (val >= CSSELR_MAX) 1445 return false; 1446 1447 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ 1448 level = (val >> 1); 1449 ctype = (cache_levels >> (level * 3)) & 7; 1450 1451 switch (ctype) { 1452 case 0: /* No cache */ 1453 return false; 1454 case 1: /* Instruction cache only */ 1455 return (val & 1); 1456 case 2: /* Data cache only */ 1457 case 4: /* Unified cache */ 1458 return !(val & 1); 1459 case 3: /* Separate instruction and data caches */ 1460 return true; 1461 default: /* Reserved: we can't know instruction or data. */ 1462 return false; 1463 } 1464} 1465 1466static int demux_c15_get(u64 id, void __user *uaddr) 1467{ 1468 u32 val; 1469 u32 __user *uval = uaddr; 1470 1471 /* Fail if we have unknown bits set. */ 1472 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 1473 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 1474 return -ENOENT; 1475 1476 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 1477 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 1478 if (KVM_REG_SIZE(id) != 4) 1479 return -ENOENT; 1480 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 1481 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 1482 if (!is_valid_cache(val)) 1483 return -ENOENT; 1484 1485 return put_user(get_ccsidr(val), uval); 1486 default: 1487 return -ENOENT; 1488 } 1489} 1490 1491static int demux_c15_set(u64 id, void __user *uaddr) 1492{ 1493 u32 val, newval; 1494 u32 __user *uval = uaddr; 1495 1496 /* Fail if we have unknown bits set. */ 1497 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 1498 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 1499 return -ENOENT; 1500 1501 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 1502 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 1503 if (KVM_REG_SIZE(id) != 4) 1504 return -ENOENT; 1505 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 1506 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 1507 if (!is_valid_cache(val)) 1508 return -ENOENT; 1509 1510 if (get_user(newval, uval)) 1511 return -EFAULT; 1512 1513 /* This is also invariant: you can't change it. */ 1514 if (newval != get_ccsidr(val)) 1515 return -EINVAL; 1516 return 0; 1517 default: 1518 return -ENOENT; 1519 } 1520} 1521 1522int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 1523{ 1524 const struct sys_reg_desc *r; 1525 void __user *uaddr = (void __user *)(unsigned long)reg->addr; 1526 1527 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1528 return demux_c15_get(reg->id, uaddr); 1529 1530 if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) 1531 return -ENOENT; 1532 1533 r = index_to_sys_reg_desc(vcpu, reg->id); 1534 if (!r) 1535 return get_invariant_sys_reg(reg->id, uaddr); 1536 1537 if (r->get_user) 1538 return (r->get_user)(vcpu, r, reg, uaddr); 1539 1540 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); 1541} 1542 1543int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 1544{ 1545 const struct sys_reg_desc *r; 1546 void __user *uaddr = (void __user *)(unsigned long)reg->addr; 1547 1548 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1549 return demux_c15_set(reg->id, uaddr); 1550 1551 if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) 1552 return -ENOENT; 1553 1554 r = index_to_sys_reg_desc(vcpu, reg->id); 1555 if (!r) 1556 return set_invariant_sys_reg(reg->id, uaddr); 1557 1558 if (r->set_user) 1559 return (r->set_user)(vcpu, r, reg, uaddr); 1560 1561 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); 1562} 1563 1564static unsigned int num_demux_regs(void) 1565{ 1566 unsigned int i, count = 0; 1567 1568 for (i = 0; i < CSSELR_MAX; i++) 1569 if (is_valid_cache(i)) 1570 count++; 1571 1572 return count; 1573} 1574 1575static int write_demux_regids(u64 __user *uindices) 1576{ 1577 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; 1578 unsigned int i; 1579 1580 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; 1581 for (i = 0; i < CSSELR_MAX; i++) { 1582 if (!is_valid_cache(i)) 1583 continue; 1584 if (put_user(val | i, uindices)) 1585 return -EFAULT; 1586 uindices++; 1587 } 1588 return 0; 1589} 1590 1591static u64 sys_reg_to_index(const struct sys_reg_desc *reg) 1592{ 1593 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | 1594 KVM_REG_ARM64_SYSREG | 1595 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | 1596 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | 1597 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | 1598 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | 1599 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); 1600} 1601 1602static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) 1603{ 1604 if (!*uind) 1605 return true; 1606 1607 if (put_user(sys_reg_to_index(reg), *uind)) 1608 return false; 1609 1610 (*uind)++; 1611 return true; 1612} 1613 1614/* Assumed ordered tables, see kvm_sys_reg_table_init. */ 1615static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) 1616{ 1617 const struct sys_reg_desc *i1, *i2, *end1, *end2; 1618 unsigned int total = 0; 1619 size_t num; 1620 1621 /* We check for duplicates here, to allow arch-specific overrides. */ 1622 i1 = get_target_table(vcpu->arch.target, true, &num); 1623 end1 = i1 + num; 1624 i2 = sys_reg_descs; 1625 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); 1626 1627 BUG_ON(i1 == end1 || i2 == end2); 1628 1629 /* Walk carefully, as both tables may refer to the same register. */ 1630 while (i1 || i2) { 1631 int cmp = cmp_sys_reg(i1, i2); 1632 /* target-specific overrides generic entry. */ 1633 if (cmp <= 0) { 1634 /* Ignore registers we trap but don't save. */ 1635 if (i1->reg) { 1636 if (!copy_reg_to_user(i1, &uind)) 1637 return -EFAULT; 1638 total++; 1639 } 1640 } else { 1641 /* Ignore registers we trap but don't save. */ 1642 if (i2->reg) { 1643 if (!copy_reg_to_user(i2, &uind)) 1644 return -EFAULT; 1645 total++; 1646 } 1647 } 1648 1649 if (cmp <= 0 && ++i1 == end1) 1650 i1 = NULL; 1651 if (cmp >= 0 && ++i2 == end2) 1652 i2 = NULL; 1653 } 1654 return total; 1655} 1656 1657unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) 1658{ 1659 return ARRAY_SIZE(invariant_sys_regs) 1660 + num_demux_regs() 1661 + walk_sys_regs(vcpu, (u64 __user *)NULL); 1662} 1663 1664int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 1665{ 1666 unsigned int i; 1667 int err; 1668 1669 /* Then give them all the invariant registers' indices. */ 1670 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { 1671 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) 1672 return -EFAULT; 1673 uindices++; 1674 } 1675 1676 err = walk_sys_regs(vcpu, uindices); 1677 if (err < 0) 1678 return err; 1679 uindices += err; 1680 1681 return write_demux_regids(uindices); 1682} 1683 1684static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n) 1685{ 1686 unsigned int i; 1687 1688 for (i = 1; i < n; i++) { 1689 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) { 1690 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1); 1691 return 1; 1692 } 1693 } 1694 1695 return 0; 1696} 1697 1698void kvm_sys_reg_table_init(void) 1699{ 1700 unsigned int i; 1701 struct sys_reg_desc clidr; 1702 1703 /* Make sure tables are unique and in order. */ 1704 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs))); 1705 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs))); 1706 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs))); 1707 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); 1708 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs))); 1709 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs))); 1710 1711 /* We abuse the reset function to overwrite the table itself. */ 1712 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) 1713 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); 1714 1715 /* 1716 * CLIDR format is awkward, so clean it up. See ARM B4.1.20: 1717 * 1718 * If software reads the Cache Type fields from Ctype1 1719 * upwards, once it has seen a value of 0b000, no caches 1720 * exist at further-out levels of the hierarchy. So, for 1721 * example, if Ctype3 is the first Cache Type field with a 1722 * value of 0b000, the values of Ctype4 to Ctype7 must be 1723 * ignored. 1724 */ 1725 get_clidr_el1(NULL, &clidr); /* Ugly... */ 1726 cache_levels = clidr.val; 1727 for (i = 0; i < 7; i++) 1728 if (((cache_levels >> (i*3)) & 7) == 0) 1729 break; 1730 /* Clear all higher bits. */ 1731 cache_levels &= (1 << (i*3))-1; 1732} 1733 1734/** 1735 * kvm_reset_sys_regs - sets system registers to reset value 1736 * @vcpu: The VCPU pointer 1737 * 1738 * This function finds the right table above and sets the registers on the 1739 * virtual CPU struct to their architecturally defined reset values. 1740 */ 1741void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) 1742{ 1743 size_t num; 1744 const struct sys_reg_desc *table; 1745 1746 /* Catch someone adding a register without putting in reset entry. */ 1747 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); 1748 1749 /* Generic chip reset first (so target could override). */ 1750 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 1751 1752 table = get_target_table(vcpu->arch.target, true, &num); 1753 reset_sys_reg_descs(vcpu, table, num); 1754 1755 for (num = 1; num < NR_SYS_REGS; num++) 1756 if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242) 1757 panic("Didn't reset vcpu_sys_reg(%zi)", num); 1758} 1759