arch 31 arch/alpha/kernel/audit.c int audit_classify_arch(int arch) arch 83 arch/alpha/kernel/module.c me->arch.gotsecindex = s - sechdrs; arch 152 arch/alpha/kernel/module.c got = sechdrs[me->arch.gotsecindex].sh_addr; arch 56 arch/arc/include/asm/mach_desc.h __used __section(.arch.info.init) = { \ arch 30 arch/arc/kernel/module.c mod->arch.unw_sec_idx = 0; arch 31 arch/arc/kernel/module.c mod->arch.unw_info = NULL; arch 33 arch/arc/kernel/module.c mod->arch.secstr = secstr; arch 40 arch/arc/kernel/module.c if (mod->arch.unw_info) arch 41 arch/arc/kernel/module.c unwind_remove_table(mod->arch.unw_info, 0); arch 67 arch/arc/kernel/module.c module->arch.secstr + sechdrs[tgtsec].sh_name, tgt_addr); arch 86 arch/arc/kernel/module.c s = module->arch.secstr + sechdrs[sym_entry->st_shndx].sh_name; arch 114 arch/arc/kernel/module.c if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0) arch 115 arch/arc/kernel/module.c module->arch.unw_sec_idx = tgtsec; arch 137 arch/arc/kernel/module.c int unwsec = mod->arch.unw_sec_idx; arch 142 arch/arc/kernel/module.c mod->arch.unw_info = unw; arch 107 arch/arm/include/asm/kvm_emulate.h vcpu->arch.hcr = HCR_GUEST_MASK; arch 112 arch/arm/include/asm/kvm_emulate.h return (unsigned long *)&vcpu->arch.hcr; arch 117 arch/arm/include/asm/kvm_emulate.h vcpu->arch.hcr &= ~HCR_TWE; arch 122 arch/arm/include/asm/kvm_emulate.h vcpu->arch.hcr |= HCR_TWE; arch 132 arch/arm/include/asm/kvm_emulate.h return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; arch 137 arch/arm/include/asm/kvm_emulate.h return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; arch 147 arch/arm/include/asm/kvm_emulate.h unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; arch 153 arch/arm/include/asm/kvm_emulate.h unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; arch 159 arch/arm/include/asm/kvm_emulate.h return vcpu->arch.fault.hsr; arch 174 arch/arm/include/asm/kvm_emulate.h return vcpu->arch.fault.hxfar; arch 179 arch/arm/include/asm/kvm_emulate.h return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; arch 224 arch/arm/include/asm/kvm_host.h #define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r] arch 425 arch/arm/include/asm/kvm_mmu.h struct kvm_vmid *vmid = &kvm->arch.vmid; arch 428 arch/arm/include/asm/kvm_mmu.h baddr = kvm->arch.pgd_phys; arch 171 arch/arm/kernel/asm-offsets.c DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); arch 172 arch/arm/kernel/asm-offsets.c DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); arch 148 arch/arm/kernel/hw_breakpoint.c u8 arch = get_debug_arch(); arch 151 arch/arm/kernel/hw_breakpoint.c return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) || arch 152 arch/arm/kernel/hw_breakpoint.c arch >= ARM_DEBUG_ARCH_V7_1; arch 45 arch/arm/kernel/machine_kexec.c image->arch.kernel_r2 = image->start - KEXEC_ARM_ZIMAGE_OFFSET arch 73 arch/arm/kernel/machine_kexec.c image->arch.kernel_r2 = current_segment->mem; arch 184 arch/arm/kernel/machine_kexec.c kexec_boot_atags = image->arch.kernel_r2; arch 38 arch/arm/kernel/module-plts.c struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : arch 39 arch/arm/kernel/module-plts.c &mod->arch.init; arch 202 arch/arm/kernel/module-plts.c mod->arch.core.plt = s; arch 204 arch/arm/kernel/module-plts.c mod->arch.init.plt = s; arch 209 arch/arm/kernel/module-plts.c if (!mod->arch.core.plt || !mod->arch.init.plt) { arch 241 arch/arm/kernel/module-plts.c mod->arch.core.plt->sh_type = SHT_NOBITS; arch 242 arch/arm/kernel/module-plts.c mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; arch 243 arch/arm/kernel/module-plts.c mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES; arch 244 arch/arm/kernel/module-plts.c mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE, arch 246 arch/arm/kernel/module-plts.c mod->arch.core.plt_count = 0; arch 248 arch/arm/kernel/module-plts.c mod->arch.init.plt->sh_type = SHT_NOBITS; arch 249 arch/arm/kernel/module-plts.c mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; arch 250 arch/arm/kernel/module-plts.c mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES; arch 251 arch/arm/kernel/module-plts.c mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE, arch 253 arch/arm/kernel/module-plts.c mod->arch.init.plt_count = 0; arch 256 arch/arm/kernel/module-plts.c mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size); arch 385 arch/arm/kernel/module.c mod->arch.unwind[i] = arch 413 arch/arm/kernel/module.c if (mod->arch.unwind[i]) arch 414 arch/arm/kernel/module.c unwind_table_del(mod->arch.unwind[i]); arch 282 arch/arm/kernel/setup.c static int cpu_has_aliasing_icache(unsigned int arch) arch 292 arch/arm/kernel/setup.c switch (arch) { arch 314 arch/arm/kernel/setup.c unsigned int arch = cpu_architecture(); arch 316 arch/arm/kernel/setup.c if (arch >= CPU_ARCH_ARMv6) { arch 319 arch/arm/kernel/setup.c if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) { arch 323 arch/arm/kernel/setup.c arch = CPU_ARCH_ARMv7; arch 334 arch/arm/kernel/setup.c arch = CPU_ARCH_ARMv6; arch 340 arch/arm/kernel/setup.c if (cpu_has_aliasing_icache(arch)) arch 275 arch/arm/kvm/coproc.c *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; arch 581 arch/arm/kvm/coproc.c table = get_target_table(vcpu->arch.target, &num); arch 780 arch/arm/kvm/coproc.c table = get_target_table(vcpu->arch.target, &num); arch 1099 arch/arm/kvm/coproc.c return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid], arch 1109 arch/arm/kvm/coproc.c return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id); arch 1111 arch/arm/kvm/coproc.c return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id); arch 1113 arch/arm/kvm/coproc.c return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id); arch 1115 arch/arm/kvm/coproc.c return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id); arch 1143 arch/arm/kvm/coproc.c return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid], arch 1153 arch/arm/kvm/coproc.c return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id); arch 1155 arch/arm/kvm/coproc.c return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id); arch 1157 arch/arm/kvm/coproc.c return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id); arch 1159 arch/arm/kvm/coproc.c return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id); arch 1335 arch/arm/kvm/coproc.c i1 = get_target_table(vcpu->arch.target, &num); arch 1449 arch/arm/kvm/coproc.c table = get_target_table(vcpu->arch.target, &num); arch 77 arch/arm/kvm/coproc.h BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); arch 84 arch/arm/kvm/coproc.h BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); arch 92 arch/arm/kvm/coproc.h BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); arch 103 arch/arm/kvm/emulate.c unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs; arch 138 arch/arm/kvm/emulate.c return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr; arch 140 arch/arm/kvm/emulate.c return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr; arch 142 arch/arm/kvm/emulate.c return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr; arch 144 arch/arm/kvm/emulate.c return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr; arch 146 arch/arm/kvm/emulate.c return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr; arch 46 arch/arm/kvm/guest.c struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs; arch 63 arch/arm/kvm/guest.c struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs; arch 36 arch/arm/kvm/hyp/switch.c write_sysreg(vcpu->arch.hcr, HCR); arch 56 arch/arm/kvm/hyp/switch.c if (vcpu->arch.hcr & HCR_VA) arch 57 arch/arm/kvm/hyp/switch.c vcpu->arch.hcr = read_sysreg(HCR); arch 70 arch/arm/kvm/hyp/switch.c write_sysreg(vcpu->arch.midr, VPIDR); arch 102 arch/arm/kvm/hyp/switch.c vcpu->arch.fault.hsr = hsr; arch 141 arch/arm/kvm/hyp/switch.c vcpu->arch.fault.hxfar = far; arch 142 arch/arm/kvm/hyp/switch.c vcpu->arch.fault.hpfar = hpfar; arch 157 arch/arm/kvm/hyp/switch.c host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); arch 158 arch/arm/kvm/hyp/switch.c guest_ctxt = &vcpu->arch.ctxt; arch 230 arch/arm/kvm/hyp/switch.c host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); arch 45 arch/arm/kvm/reset.c switch (vcpu->arch.target) { arch 49 arch/arm/kvm/reset.c vcpu->arch.midr = read_cpuid_id(); arch 56 arch/arm/kvm/reset.c memcpy(&vcpu->arch.ctxt.gp_regs, reset_regs, sizeof(vcpu->arch.ctxt.gp_regs)); arch 65 arch/arm/kvm/reset.c if (READ_ONCE(vcpu->arch.reset_state.reset)) { arch 66 arch/arm/kvm/reset.c unsigned long target_pc = vcpu->arch.reset_state.pc; arch 75 arch/arm/kvm/reset.c if (vcpu->arch.reset_state.be) arch 79 arch/arm/kvm/reset.c vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); arch 81 arch/arm/kvm/reset.c vcpu->arch.reset_state.reset = false; arch 54 arch/arm/probes/decode.c int arch = cpu_architecture(); arch 55 arch/arm/probes/decode.c BUG_ON(arch == CPU_ARCH_UNKNOWN); arch 56 arch/arm/probes/decode.c load_write_pc_interworks = arch >= CPU_ARCH_ARMv5T; arch 68 arch/arm/probes/decode.c int arch = cpu_architecture(); arch 69 arch/arm/probes/decode.c BUG_ON(arch == CPU_ARCH_UNKNOWN); arch 70 arch/arm/probes/decode.c alu_write_pc_interworks = arch >= CPU_ARCH_ARMv7; arch 42 arch/arm64/include/asm/kvm_emulate.h return !(vcpu->arch.hcr_el2 & HCR_RW); arch 47 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; arch 49 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.hcr_el2 |= HCR_E2H; arch 52 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.hcr_el2 |= HCR_TEA; arch 54 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.hcr_el2 |= HCR_TERR; arch 57 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.hcr_el2 |= HCR_FWB; arch 59 arch/arm64/include/asm/kvm_emulate.h if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) arch 60 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.hcr_el2 &= ~HCR_RW; arch 68 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.hcr_el2 |= HCR_TID3; arch 72 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.hcr_el2 |= HCR_TID2; arch 77 arch/arm64/include/asm/kvm_emulate.h return (unsigned long *)&vcpu->arch.hcr_el2; arch 82 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.hcr_el2 &= ~HCR_TWE; arch 87 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.hcr_el2 |= HCR_TWE; arch 92 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); arch 97 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); arch 102 arch/arm64/include/asm/kvm_emulate.h return vcpu->arch.vsesr_el2; arch 107 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.vsesr_el2 = vsesr; arch 122 arch/arm64/include/asm/kvm_emulate.h if (vcpu->arch.sysregs_loaded_on_cpu) arch 130 arch/arm64/include/asm/kvm_emulate.h if (vcpu->arch.sysregs_loaded_on_cpu) arch 182 arch/arm64/include/asm/kvm_emulate.h if (vcpu->arch.sysregs_loaded_on_cpu) arch 195 arch/arm64/include/asm/kvm_emulate.h if (vcpu->arch.sysregs_loaded_on_cpu) arch 249 arch/arm64/include/asm/kvm_emulate.h return vcpu->arch.fault.esr_el2; arch 264 arch/arm64/include/asm/kvm_emulate.h return vcpu->arch.fault.far_el2; arch 269 arch/arm64/include/asm/kvm_emulate.h return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; arch 274 arch/arm64/include/asm/kvm_emulate.h return vcpu->arch.fault.disr_el1; arch 389 arch/arm64/include/asm/kvm_emulate.h return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG; arch 396 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; arch 398 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG; arch 500 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); arch 504 arch/arm64/include/asm/kvm_emulate.h write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR); arch 344 arch/arm64/include/asm/kvm_host.h #define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \ arch 345 arch/arm64/include/asm/kvm_host.h sve_ffr_offset((vcpu)->arch.sve_max_vl))) arch 351 arch/arm64/include/asm/kvm_host.h if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ arch 354 arch/arm64/include/asm/kvm_host.h __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \ arch 372 arch/arm64/include/asm/kvm_host.h ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) arch 376 arch/arm64/include/asm/kvm_host.h ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)) arch 378 arch/arm64/include/asm/kvm_host.h #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) arch 386 arch/arm64/include/asm/kvm_host.h #define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) arch 397 arch/arm64/include/asm/kvm_host.h #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) arch 398 arch/arm64/include/asm/kvm_host.h #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) arch 680 arch/arm64/include/asm/kvm_host.h ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) arch 682 arch/arm64/include/asm/kvm_host.h #define kvm_arm_vcpu_loaded(vcpu) ((vcpu)->arch.sysregs_loaded_on_cpu) arch 90 arch/arm64/include/asm/kvm_hyp.h write_sysreg(kvm->arch.vtcr, vtcr_el2); arch 135 arch/arm64/include/asm/kvm_mmu.h #define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr) arch 597 arch/arm64/include/asm/kvm_mmu.h struct kvm_vmid *vmid = &kvm->arch.vmid; arch 601 arch/arm64/include/asm/kvm_mmu.h baddr = kvm->arch.pgd_phys; arch 32 arch/arm64/include/asm/stage2_pgtable.h #define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr) arch 92 arch/arm64/kernel/asm-offsets.c DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); arch 93 arch/arm64/kernel/asm-offsets.c DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); arch 94 arch/arm64/kernel/asm-offsets.c DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); arch 95 arch/arm64/kernel/asm-offsets.c DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); arch 109 arch/arm64/kernel/ftrace.c dst = mod->arch.ftrace_trampoline; arch 220 arch/arm64/kernel/machine_kexec.c kimage->arch.dtb_mem); arch 40 arch/arm64/kernel/machine_kexec_file.c vfree(image->arch.dtb); arch 41 arch/arm64/kernel/machine_kexec_file.c image->arch.dtb = NULL; arch 233 arch/arm64/kernel/machine_kexec_file.c image->arch.dtb = dtb; arch 234 arch/arm64/kernel/machine_kexec_file.c image->arch.dtb_mem = kbuf.mem; arch 75 arch/arm64/kernel/module-plts.c struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : arch 76 arch/arm64/kernel/module-plts.c &mod->arch.init; arch 106 arch/arm64/kernel/module-plts.c struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : arch 107 arch/arm64/kernel/module-plts.c &mod->arch.init; arch 270 arch/arm64/kernel/module-plts.c mod->arch.core.plt_shndx = i; arch 272 arch/arm64/kernel/module-plts.c mod->arch.init.plt_shndx = i; arch 281 arch/arm64/kernel/module-plts.c if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) { arch 313 arch/arm64/kernel/module-plts.c pltsec = sechdrs + mod->arch.core.plt_shndx; arch 318 arch/arm64/kernel/module-plts.c mod->arch.core.plt_num_entries = 0; arch 319 arch/arm64/kernel/module-plts.c mod->arch.core.plt_max_entries = core_plts; arch 321 arch/arm64/kernel/module-plts.c pltsec = sechdrs + mod->arch.init.plt_shndx; arch 326 arch/arm64/kernel/module-plts.c mod->arch.init.plt_num_entries = 0; arch 327 arch/arm64/kernel/module-plts.c mod->arch.init.plt_max_entries = init_plts; arch 486 arch/arm64/kernel/module.c me->arch.ftrace_trampoline = (void *)s->sh_addr; arch 40 arch/arm64/kvm/debug.c vcpu->arch.guest_debug_preserved.mdscr_el1 = val; arch 43 arch/arm64/kvm/debug.c vcpu->arch.guest_debug_preserved.mdscr_el1); arch 48 arch/arm64/kvm/debug.c u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1; arch 77 arch/arm64/kvm/debug.c vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state; arch 103 arch/arm64/kvm/debug.c bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY); arch 104 arch/arm64/kvm/debug.c unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2; arch 112 arch/arm64/kvm/debug.c vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK; arch 113 arch/arm64/kvm/debug.c vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM | arch 122 arch/arm64/kvm/debug.c vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE; arch 175 arch/arm64/kvm/debug.c vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state; arch 176 arch/arm64/kvm/debug.c vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; arch 180 arch/arm64/kvm/debug.c &vcpu->arch.debug_ptr->dbg_bcr[0], arch 181 arch/arm64/kvm/debug.c &vcpu->arch.debug_ptr->dbg_bvr[0]); arch 184 arch/arm64/kvm/debug.c &vcpu->arch.debug_ptr->dbg_wcr[0], arch 185 arch/arm64/kvm/debug.c &vcpu->arch.debug_ptr->dbg_wvr[0]); arch 190 arch/arm64/kvm/debug.c vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state); arch 194 arch/arm64/kvm/debug.c vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA; arch 198 arch/arm64/kvm/debug.c vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; arch 201 arch/arm64/kvm/debug.c if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2) arch 202 arch/arm64/kvm/debug.c write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); arch 204 arch/arm64/kvm/debug.c trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2); arch 223 arch/arm64/kvm/debug.c &vcpu->arch.debug_ptr->dbg_bcr[0], arch 224 arch/arm64/kvm/debug.c &vcpu->arch.debug_ptr->dbg_bvr[0]); arch 227 arch/arm64/kvm/debug.c &vcpu->arch.debug_ptr->dbg_wcr[0], arch 228 arch/arm64/kvm/debug.c &vcpu->arch.debug_ptr->dbg_wvr[0]); arch 46 arch/arm64/kvm/fpsimd.c vcpu->arch.host_thread_info = kern_hyp_va(ti); arch 47 arch/arm64/kvm/fpsimd.c vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); arch 66 arch/arm64/kvm/fpsimd.c vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | arch 69 arch/arm64/kvm/fpsimd.c vcpu->arch.flags |= KVM_ARM64_FP_HOST; arch 72 arch/arm64/kvm/fpsimd.c vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE; arch 75 arch/arm64/kvm/fpsimd.c vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; arch 88 arch/arm64/kvm/fpsimd.c if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { arch 89 arch/arm64/kvm/fpsimd.c fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs, arch 90 arch/arm64/kvm/fpsimd.c vcpu->arch.sve_state, arch 91 arch/arm64/kvm/fpsimd.c vcpu->arch.sve_max_vl); arch 112 arch/arm64/kvm/fpsimd.c if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { arch 113 arch/arm64/kvm/fpsimd.c u64 *guest_zcr = &vcpu->arch.ctxt.sys_regs[ZCR_EL1]; arch 127 arch/arm64/kvm/fpsimd.c if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED) arch 134 arch/arm64/kvm/fpsimd.c vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE); arch 228 arch/arm64/kvm/guest.c if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl))) arch 233 arch/arm64/kvm/guest.c max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); arch 255 arch/arm64/kvm/guest.c if (WARN_ON(vcpu->arch.sve_state)) arch 285 arch/arm64/kvm/guest.c vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq); arch 361 arch/arm64/kvm/guest.c vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); arch 371 arch/arm64/kvm/guest.c vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); arch 410 arch/arm64/kvm/guest.c if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, arch 436 arch/arm64/kvm/guest.c if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, arch 716 arch/arm64/kvm/guest.c events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); arch 844 arch/arm64/kvm/guest.c vcpu->arch.external_debug_state = dbg->arch; arch 126 arch/arm64/kvm/handle_exit.c run->debug.arch.hsr = hsr; arch 130 arch/arm64/kvm/handle_exit.c run->debug.arch.far = vcpu->arch.fault.far_el2; arch 183 arch/arm64/kvm/hyp/debug-sr.c __debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1); arch 185 arch/arm64/kvm/hyp/debug-sr.c if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) arch 188 arch/arm64/kvm/hyp/debug-sr.c host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); arch 189 arch/arm64/kvm/hyp/debug-sr.c guest_ctxt = &vcpu->arch.ctxt; arch 190 arch/arm64/kvm/hyp/debug-sr.c host_dbg = &vcpu->arch.host_debug_state.regs; arch 191 arch/arm64/kvm/hyp/debug-sr.c guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); arch 205 arch/arm64/kvm/hyp/debug-sr.c __debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1); arch 207 arch/arm64/kvm/hyp/debug-sr.c if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) arch 210 arch/arm64/kvm/hyp/debug-sr.c host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); arch 211 arch/arm64/kvm/hyp/debug-sr.c guest_ctxt = &vcpu->arch.ctxt; arch 212 arch/arm64/kvm/hyp/debug-sr.c host_dbg = &vcpu->arch.host_debug_state.regs; arch 213 arch/arm64/kvm/hyp/debug-sr.c guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); arch 218 arch/arm64/kvm/hyp/debug-sr.c vcpu->arch.flags &= ~KVM_ARM64_DEBUG_DIRTY; arch 39 arch/arm64/kvm/hyp/switch.c vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE) arch 40 arch/arm64/kvm/hyp/switch.c vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | arch 43 arch/arm64/kvm/hyp/switch.c return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED); arch 52 arch/arm64/kvm/hyp/switch.c vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2); arch 85 arch/arm64/kvm/hyp/switch.c write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); arch 133 arch/arm64/kvm/hyp/switch.c u64 hcr = vcpu->arch.hcr_el2; arch 141 arch/arm64/kvm/hyp/switch.c write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); arch 188 arch/arm64/kvm/hyp/switch.c if (vcpu->arch.hcr_el2 & HCR_VSE) { arch 189 arch/arm64/kvm/hyp/switch.c vcpu->arch.hcr_el2 &= ~HCR_VSE; arch 190 arch/arm64/kvm/hyp/switch.c vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE; arch 280 arch/arm64/kvm/hyp/switch.c esr = vcpu->arch.fault.esr_el2; arch 308 arch/arm64/kvm/hyp/switch.c vcpu->arch.fault.far_el2 = far; arch 309 arch/arm64/kvm/hyp/switch.c vcpu->arch.fault.hpfar_el2 = hpfar; arch 324 arch/arm64/kvm/hyp/switch.c sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE; arch 358 arch/arm64/kvm/hyp/switch.c if (vcpu->arch.flags & KVM_ARM64_FP_HOST) { arch 365 arch/arm64/kvm/hyp/switch.c vcpu->arch.host_fpsimd_state, arch 369 arch/arm64/kvm/hyp/switch.c &vcpu->arch.host_fpsimd_state->fpsr); arch 371 arch/arm64/kvm/hyp/switch.c __fpsimd_save_state(vcpu->arch.host_fpsimd_state); arch 374 arch/arm64/kvm/hyp/switch.c vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; arch 379 arch/arm64/kvm/hyp/switch.c &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr, arch 380 arch/arm64/kvm/hyp/switch.c sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1); arch 381 arch/arm64/kvm/hyp/switch.c write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12); arch 383 arch/arm64/kvm/hyp/switch.c __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs); arch 388 arch/arm64/kvm/hyp/switch.c write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2], arch 391 arch/arm64/kvm/hyp/switch.c vcpu->arch.flags |= KVM_ARM64_FP_ENABLED; arch 406 arch/arm64/kvm/hyp/switch.c if (vcpu->arch.hcr_el2 & HCR_TVM) arch 459 arch/arm64/kvm/hyp/switch.c vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); arch 530 arch/arm64/kvm/hyp/switch.c return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG); arch 603 arch/arm64/kvm/hyp/switch.c host_ctxt = vcpu->arch.host_cpu_context; arch 605 arch/arm64/kvm/hyp/switch.c guest_ctxt = &vcpu->arch.ctxt; arch 643 arch/arm64/kvm/hyp/switch.c if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) arch 673 arch/arm64/kvm/hyp/switch.c host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); arch 675 arch/arm64/kvm/hyp/switch.c guest_ctxt = &vcpu->arch.ctxt; arch 716 arch/arm64/kvm/hyp/switch.c if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) arch 198 arch/arm64/kvm/hyp/sysreg-sr.c spsr = vcpu->arch.ctxt.gp_regs.spsr; arch 199 arch/arm64/kvm/hyp/sysreg-sr.c sysreg = vcpu->arch.ctxt.sys_regs; arch 209 arch/arm64/kvm/hyp/sysreg-sr.c if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY) arch 220 arch/arm64/kvm/hyp/sysreg-sr.c spsr = vcpu->arch.ctxt.gp_regs.spsr; arch 221 arch/arm64/kvm/hyp/sysreg-sr.c sysreg = vcpu->arch.ctxt.sys_regs; arch 231 arch/arm64/kvm/hyp/sysreg-sr.c if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY) arch 248 arch/arm64/kvm/hyp/sysreg-sr.c struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context; arch 249 arch/arm64/kvm/hyp/sysreg-sr.c struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; arch 266 arch/arm64/kvm/hyp/sysreg-sr.c vcpu->arch.sysregs_loaded_on_cpu = true; arch 284 arch/arm64/kvm/hyp/sysreg-sr.c struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context; arch 285 arch/arm64/kvm/hyp/sysreg-sr.c struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; arch 299 arch/arm64/kvm/hyp/sysreg-sr.c vcpu->arch.sysregs_loaded_on_cpu = false; arch 38 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c struct vgic_dist *vgic = &kvm->arch.vgic; arch 173 arch/arm64/kvm/pmu.c host_ctxt = vcpu->arch.host_cpu_context; arch 194 arch/arm64/kvm/pmu.c host_ctxt = vcpu->arch.host_cpu_context; arch 103 arch/arm64/kvm/regmap.c unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs; arch 150 arch/arm64/kvm/regmap.c if (!vcpu->arch.sysregs_loaded_on_cpu) arch 173 arch/arm64/kvm/regmap.c if (!vcpu->arch.sysregs_loaded_on_cpu) { arch 142 arch/arm64/kvm/reset.c vcpu->arch.sve_max_vl = kvm_sve_max_vl; arch 149 arch/arm64/kvm/reset.c vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE; arch 163 arch/arm64/kvm/reset.c vl = vcpu->arch.sve_max_vl; arch 178 arch/arm64/kvm/reset.c vcpu->arch.sve_state = buf; arch 179 arch/arm64/kvm/reset.c vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; arch 209 arch/arm64/kvm/reset.c kfree(vcpu->arch.sve_state); arch 215 arch/arm64/kvm/reset.c memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); arch 231 arch/arm64/kvm/reset.c if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || arch 232 arch/arm64/kvm/reset.c !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) arch 235 arch/arm64/kvm/reset.c vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH; arch 273 arch/arm64/kvm/reset.c if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) { arch 282 arch/arm64/kvm/reset.c if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || arch 283 arch/arm64/kvm/reset.c test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { arch 288 arch/arm64/kvm/reset.c switch (vcpu->arch.target) { arch 290 arch/arm64/kvm/reset.c if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { arch 311 arch/arm64/kvm/reset.c if (vcpu->arch.reset_state.reset) { arch 312 arch/arm64/kvm/reset.c unsigned long target_pc = vcpu->arch.reset_state.pc; arch 321 arch/arm64/kvm/reset.c if (vcpu->arch.reset_state.be) arch 325 arch/arm64/kvm/reset.c vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); arch 327 arch/arm64/kvm/reset.c vcpu->arch.reset_state.reset = false; arch 332 arch/arm64/kvm/reset.c vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; arch 435 arch/arm64/kvm/reset.c kvm->arch.vtcr = vtcr; arch 70 arch/arm64/kvm/sys_regs.c if (!vcpu->arch.sysregs_loaded_on_cpu) arch 114 arch/arm64/kvm/sys_regs.c if (!vcpu->arch.sysregs_loaded_on_cpu) arch 292 arch/arm64/kvm/sys_regs.c p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; arch 388 arch/arm64/kvm/sys_regs.c vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; arch 419 arch/arm64/kvm/sys_regs.c vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; arch 435 arch/arm64/kvm/sys_regs.c u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; arch 450 arch/arm64/kvm/sys_regs.c __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; arch 460 arch/arm64/kvm/sys_regs.c __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; arch 470 arch/arm64/kvm/sys_regs.c vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; arch 477 arch/arm64/kvm/sys_regs.c u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; arch 492 arch/arm64/kvm/sys_regs.c __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; arch 503 arch/arm64/kvm/sys_regs.c __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; arch 513 arch/arm64/kvm/sys_regs.c vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; arch 520 arch/arm64/kvm/sys_regs.c u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; arch 528 arch/arm64/kvm/sys_regs.c vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]); arch 536 arch/arm64/kvm/sys_regs.c __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; arch 546 arch/arm64/kvm/sys_regs.c __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; arch 556 arch/arm64/kvm/sys_regs.c vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; arch 563 arch/arm64/kvm/sys_regs.c u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; arch 578 arch/arm64/kvm/sys_regs.c __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; arch 588 arch/arm64/kvm/sys_regs.c __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; arch 598 arch/arm64/kvm/sys_regs.c vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val; arch 1678 arch/arm64/kvm/sys_regs.c vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; arch 1701 arch/arm64/kvm/sys_regs.c u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; arch 1710 arch/arm64/kvm/sys_regs.c vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; arch 2211 arch/arm64/kvm/sys_regs.c target_specific = get_target_table(vcpu->arch.target, false, &num); arch 2222 arch/arm64/kvm/sys_regs.c target_specific = get_target_table(vcpu->arch.target, false, &num); arch 2248 arch/arm64/kvm/sys_regs.c table = get_target_table(vcpu->arch.target, true, &num); arch 2372 arch/arm64/kvm/sys_regs.c table = get_target_table(vcpu->arch.target, true, &num); arch 2681 arch/arm64/kvm/sys_regs.c i1 = get_target_table(vcpu->arch.target, true, &num); arch 2801 arch/arm64/kvm/sys_regs.c table = get_target_table(vcpu->arch.target, true, &num); arch 17 arch/arm64/kvm/vgic-sys-reg-v3.c struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu; arch 186 arch/arm64/kvm/vgic-sys-reg-v3.c struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; arch 233 arch/arm64/kvm/vgic-sys-reg-v3.c struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; arch 32 arch/ia64/kernel/audit.c int audit_classify_arch(int arch) arch 312 arch/ia64/kernel/module.c if (mod->arch.init_unw_table) { arch 313 arch/ia64/kernel/module.c unw_remove_unwind_table(mod->arch.init_unw_table); arch 314 arch/ia64/kernel/module.c mod->arch.init_unw_table = NULL; arch 435 arch/ia64/kernel/module.c mod->arch.core_plt = s; arch 437 arch/ia64/kernel/module.c mod->arch.init_plt = s; arch 439 arch/ia64/kernel/module.c mod->arch.got = s; arch 441 arch/ia64/kernel/module.c mod->arch.opd = s; arch 443 arch/ia64/kernel/module.c mod->arch.unwind = s; arch 445 arch/ia64/kernel/module.c if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { arch 466 arch/ia64/kernel/module.c mod->arch.core_plt->sh_type = SHT_NOBITS; arch 467 arch/ia64/kernel/module.c mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; arch 468 arch/ia64/kernel/module.c mod->arch.core_plt->sh_addralign = 16; arch 469 arch/ia64/kernel/module.c mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry); arch 470 arch/ia64/kernel/module.c mod->arch.init_plt->sh_type = SHT_NOBITS; arch 471 arch/ia64/kernel/module.c mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; arch 472 arch/ia64/kernel/module.c mod->arch.init_plt->sh_addralign = 16; arch 473 arch/ia64/kernel/module.c mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry); arch 474 arch/ia64/kernel/module.c mod->arch.got->sh_type = SHT_NOBITS; arch 475 arch/ia64/kernel/module.c mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC; arch 476 arch/ia64/kernel/module.c mod->arch.got->sh_addralign = 8; arch 477 arch/ia64/kernel/module.c mod->arch.got->sh_size = gots * sizeof(struct got_entry); arch 478 arch/ia64/kernel/module.c mod->arch.opd->sh_type = SHT_NOBITS; arch 479 arch/ia64/kernel/module.c mod->arch.opd->sh_flags = SHF_ALLOC; arch 480 arch/ia64/kernel/module.c mod->arch.opd->sh_addralign = 8; arch 481 arch/ia64/kernel/module.c mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc); arch 483 arch/ia64/kernel/module.c __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size, arch 484 arch/ia64/kernel/module.c mod->arch.got->sh_size, mod->arch.opd->sh_size); arch 517 arch/ia64/kernel/module.c got = (void *) mod->arch.got->sh_addr; arch 518 arch/ia64/kernel/module.c for (e = got; e < got + mod->arch.next_got_entry; ++e) arch 523 arch/ia64/kernel/module.c BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size)); arch 526 arch/ia64/kernel/module.c ++mod->arch.next_got_entry; arch 528 arch/ia64/kernel/module.c return (uint64_t) e - mod->arch.gp; arch 534 arch/ia64/kernel/module.c return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF; arch 548 arch/ia64/kernel/module.c plt = (void *) mod->arch.init_plt->sh_addr; arch 549 arch/ia64/kernel/module.c plt_end = (void *) plt + mod->arch.init_plt->sh_size; arch 551 arch/ia64/kernel/module.c plt = (void *) mod->arch.core_plt->sh_addr; arch 552 arch/ia64/kernel/module.c plt_end = (void *) plt + mod->arch.core_plt->sh_size; arch 587 arch/ia64/kernel/module.c struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr; arch 608 arch/ia64/kernel/module.c if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size) arch 614 arch/ia64/kernel/module.c fdesc->gp = mod->arch.gp; arch 634 arch/ia64/kernel/module.c case RV_GPREL: val -= mod->arch.gp; break; arch 718 arch/ia64/kernel/module.c val -= mod->arch.gp; arch 809 arch/ia64/kernel/module.c if (!mod->arch.gp) { arch 824 arch/ia64/kernel/module.c mod->arch.gp = gp; arch 849 arch/ia64/kernel/module.c struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr; arch 850 arch/ia64/kernel/module.c struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start); arch 886 arch/ia64/kernel/module.c mod->name, mod->arch.gp, num_init, num_core); arch 892 arch/ia64/kernel/module.c mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, arch 895 arch/ia64/kernel/module.c mod->arch.core_unw_table, core, core + num_core); arch 898 arch/ia64/kernel/module.c mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, arch 901 arch/ia64/kernel/module.c mod->arch.init_unw_table, init, init + num_init); arch 909 arch/ia64/kernel/module.c if (mod->arch.unwind) arch 917 arch/ia64/kernel/module.c if (mod->arch.init_unw_table) { arch 918 arch/ia64/kernel/module.c unw_remove_unwind_table(mod->arch.init_unw_table); arch 919 arch/ia64/kernel/module.c mod->arch.init_unw_table = NULL; arch 921 arch/ia64/kernel/module.c if (mod->arch.core_unw_table) { arch 922 arch/ia64/kernel/module.c unw_remove_unwind_table(mod->arch.core_unw_table); arch 923 arch/ia64/kernel/module.c mod->arch.core_unw_table = NULL; arch 929 arch/ia64/kernel/module.c Elf64_Shdr *opd = mod->arch.opd; arch 106 arch/m68k/kernel/module.c module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end); arch 104 arch/mips/include/asm/kvm_host.h #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \ arch 105 arch/mips/include/asm/kvm_host.h ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) arch 28 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h .set arch=octeon arch 431 arch/mips/include/asm/stackframe.h .set arch=r4000 arch 145 arch/mips/include/asm/syscall.h int arch = AUDIT_ARCH_MIPS; arch 148 arch/mips/include/asm/syscall.h arch |= __AUDIT_ARCH_64BIT; arch 151 arch/mips/include/asm/syscall.h arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32; arch 155 arch/mips/include/asm/syscall.h arch |= __AUDIT_ARCH_LE; arch 157 arch/mips/include/asm/syscall.h return arch; arch 101 arch/mips/kernel/module.c n->next = me->arch.r_mips_hi16_list; arch 102 arch/mips/kernel/module.c me->arch.r_mips_hi16_list = n; arch 133 arch/mips/kernel/module.c if (me->arch.r_mips_hi16_list != NULL) { arch 134 arch/mips/kernel/module.c l = me->arch.r_mips_hi16_list; arch 169 arch/mips/kernel/module.c me->arch.r_mips_hi16_list = NULL; arch 183 arch/mips/kernel/module.c me->arch.r_mips_hi16_list = NULL; arch 328 arch/mips/kernel/module.c me->arch.r_mips_hi16_list = NULL; arch 383 arch/mips/kernel/module.c if (me->arch.r_mips_hi16_list) { arch 384 arch/mips/kernel/module.c free_relocation_chain(me->arch.r_mips_hi16_list); arch 385 arch/mips/kernel/module.c me->arch.r_mips_hi16_list = NULL; arch 440 arch/mips/kernel/module.c INIT_LIST_HEAD(&me->arch.dbe_list); arch 444 arch/mips/kernel/module.c me->arch.dbe_start = (void *)s->sh_addr; arch 445 arch/mips/kernel/module.c me->arch.dbe_end = (void *)s->sh_addr + s->sh_size; arch 447 arch/mips/kernel/module.c list_add(&me->arch.dbe_list, &dbe_list); arch 456 arch/mips/kernel/module.c list_del(&mod->arch.dbe_list); arch 1421 arch/mips/kernel/ptrace.c sd.arch = syscall_get_arch(current); arch 28 arch/mips/kvm/commpage.c struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; arch 31 arch/mips/kvm/commpage.c vcpu->arch.cop0 = &page->cop0; arch 116 arch/mips/kvm/dyntrans.c if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8) arch 138 arch/mips/kvm/dyntrans.c if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8) arch 46 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 66 arch/mips/kvm/emulate.c arch->gprs[insn.r_format.rd] = epc + 8; arch 69 arch/mips/kvm/emulate.c nextpc = arch->gprs[insn.r_format.rs]; arch 85 arch/mips/kvm/emulate.c if ((long)arch->gprs[insn.i_format.rs] < 0) arch 94 arch/mips/kvm/emulate.c if ((long)arch->gprs[insn.i_format.rs] >= 0) arch 103 arch/mips/kvm/emulate.c arch->gprs[31] = epc + 8; arch 104 arch/mips/kvm/emulate.c if ((long)arch->gprs[insn.i_format.rs] < 0) arch 113 arch/mips/kvm/emulate.c arch->gprs[31] = epc + 8; arch 114 arch/mips/kvm/emulate.c if ((long)arch->gprs[insn.i_format.rs] >= 0) arch 142 arch/mips/kvm/emulate.c arch->gprs[31] = instpc + 8; arch 155 arch/mips/kvm/emulate.c if (arch->gprs[insn.i_format.rs] == arch 156 arch/mips/kvm/emulate.c arch->gprs[insn.i_format.rt]) arch 165 arch/mips/kvm/emulate.c if (arch->gprs[insn.i_format.rs] != arch 166 arch/mips/kvm/emulate.c arch->gprs[insn.i_format.rt]) arch 179 arch/mips/kvm/emulate.c if ((long)arch->gprs[insn.i_format.rs] <= 0) arch 192 arch/mips/kvm/emulate.c if ((long)arch->gprs[insn.i_format.rs] > 0) arch 249 arch/mips/kvm/emulate.c err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, arch 250 arch/mips/kvm/emulate.c &vcpu->arch.pc); arch 254 arch/mips/kvm/emulate.c vcpu->arch.pc += 4; arch 257 arch/mips/kvm/emulate.c kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); arch 276 arch/mips/kvm/emulate.c *out = vcpu->arch.host_cp0_badinstr; arch 297 arch/mips/kvm/emulate.c *out = vcpu->arch.host_cp0_badinstrp; arch 314 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 316 arch/mips/kvm/emulate.c return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || arch 333 arch/mips/kvm/emulate.c delta = now_ns + vcpu->arch.count_dyn_bias; arch 335 arch/mips/kvm/emulate.c if (delta >= vcpu->arch.count_period) { arch 337 arch/mips/kvm/emulate.c periods = div64_s64(now_ns, vcpu->arch.count_period); arch 338 arch/mips/kvm/emulate.c vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; arch 340 arch/mips/kvm/emulate.c delta = now_ns + vcpu->arch.count_dyn_bias; arch 353 arch/mips/kvm/emulate.c return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); arch 368 arch/mips/kvm/emulate.c if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) arch 369 arch/mips/kvm/emulate.c return vcpu->arch.count_resume; arch 386 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 392 arch/mips/kvm/emulate.c count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); arch 408 arch/mips/kvm/emulate.c expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); arch 409 arch/mips/kvm/emulate.c threshold = ktime_add_ns(now, vcpu->arch.count_period / 4); arch 415 arch/mips/kvm/emulate.c running = hrtimer_cancel(&vcpu->arch.comparecount_timer); arch 426 arch/mips/kvm/emulate.c vcpu->arch.count_period); arch 427 arch/mips/kvm/emulate.c hrtimer_start(&vcpu->arch.comparecount_timer, expires, arch 446 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 476 arch/mips/kvm/emulate.c hrtimer_cancel(&vcpu->arch.comparecount_timer); arch 504 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 512 arch/mips/kvm/emulate.c delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); arch 516 arch/mips/kvm/emulate.c hrtimer_cancel(&vcpu->arch.comparecount_timer); arch 517 arch/mips/kvm/emulate.c hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); arch 551 arch/mips/kvm/emulate.c before_count = vcpu->arch.count_bias + arch 564 arch/mips/kvm/emulate.c vcpu->arch.count_bias += drift; arch 571 arch/mips/kvm/emulate.c now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); arch 580 arch/mips/kvm/emulate.c vcpu->arch.count_bias += drift; arch 587 arch/mips/kvm/emulate.c delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); arch 605 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 610 arch/mips/kvm/emulate.c vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); arch 630 arch/mips/kvm/emulate.c vcpu->arch.count_hz = count_hz; arch 631 arch/mips/kvm/emulate.c vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); arch 632 arch/mips/kvm/emulate.c vcpu->arch.count_dyn_bias = 0; arch 651 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 660 arch/mips/kvm/emulate.c if (vcpu->arch.count_hz == count_hz) arch 673 arch/mips/kvm/emulate.c vcpu->arch.count_hz = count_hz; arch 674 arch/mips/kvm/emulate.c vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); arch 675 arch/mips/kvm/emulate.c vcpu->arch.count_dyn_bias = 0; arch 678 arch/mips/kvm/emulate.c vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); arch 698 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 783 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 788 arch/mips/kvm/emulate.c hrtimer_cancel(&vcpu->arch.comparecount_timer); arch 810 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 813 arch/mips/kvm/emulate.c if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) arch 830 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 856 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 857 arch/mips/kvm/emulate.c s64 changed = count_ctl ^ vcpu->arch.count_ctl; arch 867 arch/mips/kvm/emulate.c vcpu->arch.count_ctl = count_ctl; arch 875 arch/mips/kvm/emulate.c vcpu->arch.count_resume = ktime_get(); arch 878 arch/mips/kvm/emulate.c vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); arch 888 arch/mips/kvm/emulate.c vcpu->arch.count_hz); arch 889 arch/mips/kvm/emulate.c expire = ktime_add_ns(vcpu->arch.count_resume, delta); arch 926 arch/mips/kvm/emulate.c vcpu->arch.count_resume = ns_to_ktime(count_resume); arch 941 arch/mips/kvm/emulate.c hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, arch 942 arch/mips/kvm/emulate.c vcpu->arch.count_period); arch 948 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 953 arch/mips/kvm/emulate.c vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); arch 955 arch/mips/kvm/emulate.c kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, arch 958 arch/mips/kvm/emulate.c vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); arch 962 arch/mips/kvm/emulate.c vcpu->arch.pc); arch 971 arch/mips/kvm/emulate.c kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, arch 972 arch/mips/kvm/emulate.c vcpu->arch.pending_exceptions); arch 976 arch/mips/kvm/emulate.c if (!vcpu->arch.pending_exceptions) { arch 978 arch/mips/kvm/emulate.c vcpu->arch.wait = 1; arch 997 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 998 arch/mips/kvm/emulate.c struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; arch 1031 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 1033 arch/mips/kvm/emulate.c unsigned long pc = vcpu->arch.pc; arch 1043 arch/mips/kvm/emulate.c tlb = &vcpu->arch.guest_tlb[index]; arch 1063 arch/mips/kvm/emulate.c struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; arch 1064 arch/mips/kvm/emulate.c struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; arch 1104 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 1107 arch/mips/kvm/emulate.c unsigned long pc = vcpu->arch.pc; arch 1119 arch/mips/kvm/emulate.c tlb = &vcpu->arch.guest_tlb[index]; arch 1140 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 1142 arch/mips/kvm/emulate.c unsigned long pc = vcpu->arch.pc; arch 1146 arch/mips/kvm/emulate.c tlb = &vcpu->arch.guest_tlb[index]; arch 1165 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 1167 arch/mips/kvm/emulate.c unsigned long pc = vcpu->arch.pc; arch 1192 arch/mips/kvm/emulate.c if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) arch 1211 arch/mips/kvm/emulate.c if (kvm_mips_guest_can_have_msa(&vcpu->arch)) arch 1247 arch/mips/kvm/emulate.c if (kvm_mips_guest_has_msa(&vcpu->arch)) arch 1254 arch/mips/kvm/emulate.c if (kvm_mips_guest_has_fpu(&vcpu->arch)) { arch 1268 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 1277 arch/mips/kvm/emulate.c curr_pc = vcpu->arch.pc; arch 1321 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt] = arch 1324 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt] = 0x0; arch 1329 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel]; arch 1338 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt]); arch 1342 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; arch 1346 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt]); arch 1355 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt]); arch 1358 arch/mips/kvm/emulate.c && (vcpu->arch.gprs[rt] >= arch 1361 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt]); arch 1371 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt]); arch 1374 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt]); arch 1378 arch/mips/kvm/emulate.c kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); arch 1384 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt], arch 1390 arch/mips/kvm/emulate.c val = vcpu->arch.gprs[rt]; arch 1401 arch/mips/kvm/emulate.c if (!kvm_mips_guest_has_fpu(&vcpu->arch)) arch 1432 arch/mips/kvm/emulate.c vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) arch 1443 arch/mips/kvm/emulate.c vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) arch 1455 arch/mips/kvm/emulate.c if (!kvm_mips_guest_has_fpu(&vcpu->arch)) arch 1462 arch/mips/kvm/emulate.c val = vcpu->arch.gprs[rt]; arch 1478 arch/mips/kvm/emulate.c vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) arch 1488 arch/mips/kvm/emulate.c vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) arch 1499 arch/mips/kvm/emulate.c new_cause = vcpu->arch.gprs[rt]; arch 1519 arch/mips/kvm/emulate.c cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask; arch 1521 arch/mips/kvm/emulate.c cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; arch 1530 arch/mips/kvm/emulate.c vcpu->arch.pc, rt, rd, sel); arch 1533 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt]); arch 1542 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt] = arch 1547 arch/mips/kvm/emulate.c vcpu->arch.pc); arch 1551 arch/mips/kvm/emulate.c vcpu->arch.pc); arch 1571 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt]); arch 1572 arch/mips/kvm/emulate.c vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; arch 1577 arch/mips/kvm/emulate.c vcpu->arch.pc, inst.c0r_format.rs); arch 1586 arch/mips/kvm/emulate.c vcpu->arch.pc = curr_pc; arch 1612 arch/mips/kvm/emulate.c curr_pc = vcpu->arch.pc; arch 1620 arch/mips/kvm/emulate.c vcpu->arch.host_cp0_badvaddr); arch 1628 arch/mips/kvm/emulate.c *(u64 *)data = vcpu->arch.gprs[rt]; arch 1631 arch/mips/kvm/emulate.c vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, arch 1632 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt], *(u64 *)data); arch 1638 arch/mips/kvm/emulate.c *(u32 *)data = vcpu->arch.gprs[rt]; arch 1641 arch/mips/kvm/emulate.c vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, arch 1642 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt], *(u32 *)data); arch 1647 arch/mips/kvm/emulate.c *(u16 *)data = vcpu->arch.gprs[rt]; arch 1650 arch/mips/kvm/emulate.c vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, arch 1651 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt], *(u16 *)data); arch 1656 arch/mips/kvm/emulate.c *(u8 *)data = vcpu->arch.gprs[rt]; arch 1659 arch/mips/kvm/emulate.c vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, arch 1660 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt], *(u8 *)data); arch 1676 arch/mips/kvm/emulate.c vcpu->arch.pc = curr_pc; arch 1696 arch/mips/kvm/emulate.c curr_pc = vcpu->arch.pc; arch 1700 arch/mips/kvm/emulate.c vcpu->arch.io_pc = vcpu->arch.pc; arch 1701 arch/mips/kvm/emulate.c vcpu->arch.pc = curr_pc; arch 1703 arch/mips/kvm/emulate.c vcpu->arch.io_gpr = rt; arch 1706 arch/mips/kvm/emulate.c vcpu->arch.host_cp0_badvaddr); arch 1781 arch/mips/kvm/emulate.c vcpu->arch.host_cp0_badvaddr = addr; arch 1782 arch/mips/kvm/emulate.c vcpu->arch.pc = curr_pc; arch 1787 arch/mips/kvm/emulate.c vcpu->arch.host_cp0_badvaddr = addr; arch 1788 arch/mips/kvm/emulate.c vcpu->arch.pc = curr_pc; arch 1805 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 1813 arch/mips/kvm/emulate.c curr_pc = vcpu->arch.pc; arch 1827 arch/mips/kvm/emulate.c va = arch->gprs[base] + offset; arch 1830 arch/mips/kvm/emulate.c cache, op, base, arch->gprs[base], offset); arch 1839 arch/mips/kvm/emulate.c vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, arch 1840 arch/mips/kvm/emulate.c arch->gprs[base], offset); arch 1916 arch/mips/kvm/emulate.c cache, op, base, arch->gprs[base], offset); arch 1923 arch/mips/kvm/emulate.c vcpu->arch.pc = curr_pc; arch 1993 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2006 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2007 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2012 arch/mips/kvm/emulate.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 2020 arch/mips/kvm/emulate.c kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); arch 2026 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; arch 2041 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2042 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2043 arch/mips/kvm/emulate.c unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | arch 2048 arch/mips/kvm/emulate.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 2057 arch/mips/kvm/emulate.c arch->pc); arch 2060 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; arch 2064 arch/mips/kvm/emulate.c arch->pc); arch 2066 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; arch 2073 arch/mips/kvm/emulate.c kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); arch 2085 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2086 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2088 arch/mips/kvm/emulate.c (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | arch 2093 arch/mips/kvm/emulate.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 2102 arch/mips/kvm/emulate.c arch->pc); arch 2105 arch/mips/kvm/emulate.c arch->pc); arch 2109 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; arch 2115 arch/mips/kvm/emulate.c kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); arch 2127 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2128 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2129 arch/mips/kvm/emulate.c unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | arch 2134 arch/mips/kvm/emulate.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 2143 arch/mips/kvm/emulate.c arch->pc); arch 2146 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; arch 2149 arch/mips/kvm/emulate.c arch->pc); arch 2150 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; arch 2157 arch/mips/kvm/emulate.c kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); arch 2169 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2170 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2171 arch/mips/kvm/emulate.c unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | arch 2176 arch/mips/kvm/emulate.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 2185 arch/mips/kvm/emulate.c arch->pc); arch 2188 arch/mips/kvm/emulate.c arch->pc); arch 2192 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; arch 2198 arch/mips/kvm/emulate.c kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); arch 2210 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2211 arch/mips/kvm/emulate.c unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | arch 2213 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2217 arch/mips/kvm/emulate.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 2226 arch/mips/kvm/emulate.c arch->pc); arch 2229 arch/mips/kvm/emulate.c arch->pc); arch 2232 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; arch 2238 arch/mips/kvm/emulate.c kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); arch 2250 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2251 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2255 arch/mips/kvm/emulate.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 2265 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; arch 2279 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2280 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2285 arch/mips/kvm/emulate.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 2293 arch/mips/kvm/emulate.c kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); arch 2299 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; arch 2314 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2315 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2320 arch/mips/kvm/emulate.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 2328 arch/mips/kvm/emulate.c kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); arch 2334 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; arch 2349 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2350 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2355 arch/mips/kvm/emulate.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 2363 arch/mips/kvm/emulate.c kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); arch 2369 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; arch 2384 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2385 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2390 arch/mips/kvm/emulate.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 2398 arch/mips/kvm/emulate.c kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); arch 2404 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; arch 2419 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2420 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2425 arch/mips/kvm/emulate.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 2433 arch/mips/kvm/emulate.c kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); arch 2439 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; arch 2454 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2455 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2460 arch/mips/kvm/emulate.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 2468 arch/mips/kvm/emulate.c kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); arch 2474 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; arch 2488 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2489 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2499 arch/mips/kvm/emulate.c curr_pc = vcpu->arch.pc; arch 2530 arch/mips/kvm/emulate.c arch->gprs[rt] = vcpu->vcpu_id; arch 2533 arch/mips/kvm/emulate.c arch->gprs[rt] = min(current_cpu_data.dcache.linesz, arch 2537 arch/mips/kvm/emulate.c arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu); arch 2543 arch/mips/kvm/emulate.c arch->gprs[rt] = 1; arch 2546 arch/mips/kvm/emulate.c arch->gprs[rt] = 2; arch 2550 arch/mips/kvm/emulate.c arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); arch 2559 arch/mips/kvm/emulate.c vcpu->arch.gprs[rt]); arch 2573 arch/mips/kvm/emulate.c vcpu->arch.pc = curr_pc; arch 2580 arch/mips/kvm/emulate.c unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; arch 2590 arch/mips/kvm/emulate.c vcpu->arch.pc = vcpu->arch.io_pc; arch 2629 arch/mips/kvm/emulate.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2630 arch/mips/kvm/emulate.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 2635 arch/mips/kvm/emulate.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 2647 arch/mips/kvm/emulate.c arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; arch 2648 arch/mips/kvm/emulate.c kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); arch 2668 arch/mips/kvm/emulate.c unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; arch 2765 arch/mips/kvm/emulate.c unsigned long va = vcpu->arch.host_cp0_badvaddr; arch 2769 arch/mips/kvm/emulate.c vcpu->arch.host_cp0_badvaddr); arch 2779 arch/mips/kvm/emulate.c (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & arch 2792 arch/mips/kvm/emulate.c struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; arch 243 arch/mips/kvm/entry.c UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch)); arch 322 arch/mips/kvm/entry.c (int)offsetof(struct kvm_vcpu, arch), K1); arch 323 arch/mips/kvm/entry.c UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0); arch 366 arch/mips/kvm/entry.c offsetof(struct kvm, arch.gpa_mm.context.asid)); arch 485 arch/mips/kvm/entry.c UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1); arch 522 arch/mips/kvm/entry.c UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1); arch 558 arch/mips/kvm/entry.c UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); arch 839 arch/mips/kvm/entry.c UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); arch 22 arch/mips/kvm/hypcall.c kvm_debug("[%#lx] HYPCALL %#03x\n", vcpu->arch.pc, code); arch 45 arch/mips/kvm/hypcall.c num = vcpu->arch.gprs[2]; /* v0 */ arch 46 arch/mips/kvm/hypcall.c args[0] = vcpu->arch.gprs[4]; /* a0 */ arch 47 arch/mips/kvm/hypcall.c args[1] = vcpu->arch.gprs[5]; /* a1 */ arch 48 arch/mips/kvm/hypcall.c args[2] = vcpu->arch.gprs[6]; /* a2 */ arch 49 arch/mips/kvm/hypcall.c args[3] = vcpu->arch.gprs[7]; /* a3 */ arch 52 arch/mips/kvm/hypcall.c args, &vcpu->arch.gprs[2] /* v0 */); arch 26 arch/mips/kvm/interrupt.c set_bit(priority, &vcpu->arch.pending_exceptions); arch 31 arch/mips/kvm/interrupt.c clear_bit(priority, &vcpu->arch.pending_exceptions); arch 41 arch/mips/kvm/interrupt.c kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); arch 50 arch/mips/kvm/interrupt.c kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); arch 66 arch/mips/kvm/interrupt.c kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); arch 72 arch/mips/kvm/interrupt.c kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1)); arch 77 arch/mips/kvm/interrupt.c kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2)); arch 94 arch/mips/kvm/interrupt.c kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); arch 99 arch/mips/kvm/interrupt.c kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1)); arch 104 arch/mips/kvm/interrupt.c kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2)); arch 121 arch/mips/kvm/interrupt.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 122 arch/mips/kvm/interrupt.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 169 arch/mips/kvm/interrupt.c kvm_write_c0_guest_epc(cop0, arch->pc); arch 177 arch/mips/kvm/interrupt.c kvm_debug("Delivering INT @ pc %#lx\n", arch->pc); arch 186 arch/mips/kvm/interrupt.c arch->pc = kvm_mips_guest_exception_base(vcpu); arch 188 arch/mips/kvm/interrupt.c arch->pc += 0x200; arch 190 arch/mips/kvm/interrupt.c arch->pc += 0x180; arch 192 arch/mips/kvm/interrupt.c clear_bit(priority, &vcpu->arch.pending_exceptions); arch 206 arch/mips/kvm/interrupt.c unsigned long *pending = &vcpu->arch.pending_exceptions; arch 207 arch/mips/kvm/interrupt.c unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr; arch 241 arch/mips/kvm/interrupt.c return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions); arch 98 arch/mips/kvm/mips.c return !!(vcpu->arch.pending_exceptions); arch 146 arch/mips/kvm/mips.c kvm->arch.gpa_mm.pgd = kvm_pgd_alloc(); arch 147 arch/mips/kvm/mips.c if (!kvm->arch.gpa_mm.pgd) arch 176 arch/mips/kvm/mips.c pgd_free(NULL, kvm->arch.gpa_mm.pgd); arch 334 arch/mips/kvm/mips.c vcpu->arch.guest_ebase = gebase; arch 361 arch/mips/kvm/mips.c vcpu->arch.vcpu_run = p; arch 368 arch/mips/kvm/mips.c dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); arch 371 arch/mips/kvm/mips.c dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); arch 381 arch/mips/kvm/mips.c vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); arch 383 arch/mips/kvm/mips.c if (!vcpu->arch.kseg0_commpage) { arch 388 arch/mips/kvm/mips.c kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); arch 392 arch/mips/kvm/mips.c vcpu->arch.last_sched_cpu = -1; arch 393 arch/mips/kvm/mips.c vcpu->arch.last_exec_cpu = -1; arch 412 arch/mips/kvm/mips.c hrtimer_cancel(&vcpu->arch.comparecount_timer); arch 419 arch/mips/kvm/mips.c kfree(vcpu->arch.guest_ebase); arch 420 arch/mips/kvm/mips.c kfree(vcpu->arch.kseg0_commpage); arch 505 arch/mips/kvm/mips.c dvcpu->arch.wait = 0; arch 581 arch/mips/kvm/mips.c if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { arch 587 arch/mips/kvm/mips.c if (kvm_mips_guest_can_have_msa(&vcpu->arch)) arch 604 arch/mips/kvm/mips.c if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { arch 627 arch/mips/kvm/mips.c if (kvm_mips_guest_can_have_msa(&vcpu->arch)) { arch 647 arch/mips/kvm/mips.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 648 arch/mips/kvm/mips.c struct mips_fpu_struct *fpu = &vcpu->arch.fpu; arch 657 arch/mips/kvm/mips.c v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; arch 661 arch/mips/kvm/mips.c v = (long)vcpu->arch.hi; arch 664 arch/mips/kvm/mips.c v = (long)vcpu->arch.lo; arch 668 arch/mips/kvm/mips.c v = (long)vcpu->arch.pc; arch 673 arch/mips/kvm/mips.c if (!kvm_mips_guest_has_fpu(&vcpu->arch)) arch 683 arch/mips/kvm/mips.c if (!kvm_mips_guest_has_fpu(&vcpu->arch)) arch 692 arch/mips/kvm/mips.c if (!kvm_mips_guest_has_fpu(&vcpu->arch)) arch 697 arch/mips/kvm/mips.c if (!kvm_mips_guest_has_fpu(&vcpu->arch)) arch 704 arch/mips/kvm/mips.c if (!kvm_mips_guest_has_msa(&vcpu->arch)) arch 721 arch/mips/kvm/mips.c if (!kvm_mips_guest_has_msa(&vcpu->arch)) arch 726 arch/mips/kvm/mips.c if (!kvm_mips_guest_has_msa(&vcpu->arch)) arch 759 arch/mips/kvm/mips.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 760 arch/mips/kvm/mips.c struct mips_fpu_struct *fpu = &vcpu->arch.fpu; arch 791 arch/mips/kvm/mips.c vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; arch 795 arch/mips/kvm/mips.c vcpu->arch.hi = v; arch 798 arch/mips/kvm/mips.c vcpu->arch.lo = v; arch 802 arch/mips/kvm/mips.c vcpu->arch.pc = v; arch 807 arch/mips/kvm/mips.c if (!kvm_mips_guest_has_fpu(&vcpu->arch)) arch 817 arch/mips/kvm/mips.c if (!kvm_mips_guest_has_fpu(&vcpu->arch)) arch 826 arch/mips/kvm/mips.c if (!kvm_mips_guest_has_fpu(&vcpu->arch)) arch 831 arch/mips/kvm/mips.c if (!kvm_mips_guest_has_fpu(&vcpu->arch)) arch 838 arch/mips/kvm/mips.c if (!kvm_mips_guest_has_msa(&vcpu->arch)) arch 852 arch/mips/kvm/mips.c if (!kvm_mips_guest_has_msa(&vcpu->arch)) arch 857 arch/mips/kvm/mips.c if (!kvm_mips_guest_has_msa(&vcpu->arch)) arch 883 arch/mips/kvm/mips.c vcpu->arch.fpu_enabled = true; arch 886 arch/mips/kvm/mips.c vcpu->arch.msa_enabled = true; arch 1147 arch/mips/kvm/mips.c kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI; arch 1159 arch/mips/kvm/mips.c kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); arch 1160 arch/mips/kvm/mips.c kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); arch 1164 arch/mips/kvm/mips.c vcpu->arch.gprs[i], arch 1165 arch/mips/kvm/mips.c vcpu->arch.gprs[i + 1], arch 1166 arch/mips/kvm/mips.c vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); arch 1168 arch/mips/kvm/mips.c kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); arch 1169 arch/mips/kvm/mips.c kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); arch 1171 arch/mips/kvm/mips.c cop0 = vcpu->arch.cop0; arch 1187 arch/mips/kvm/mips.c for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) arch 1188 arch/mips/kvm/mips.c vcpu->arch.gprs[i] = regs->gpr[i]; arch 1189 arch/mips/kvm/mips.c vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ arch 1190 arch/mips/kvm/mips.c vcpu->arch.hi = regs->hi; arch 1191 arch/mips/kvm/mips.c vcpu->arch.lo = regs->lo; arch 1192 arch/mips/kvm/mips.c vcpu->arch.pc = regs->pc; arch 1204 arch/mips/kvm/mips.c for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) arch 1205 arch/mips/kvm/mips.c regs->gpr[i] = vcpu->arch.gprs[i]; arch 1207 arch/mips/kvm/mips.c regs->hi = vcpu->arch.hi; arch 1208 arch/mips/kvm/mips.c regs->lo = vcpu->arch.lo; arch 1209 arch/mips/kvm/mips.c regs->pc = vcpu->arch.pc; arch 1221 arch/mips/kvm/mips.c vcpu->arch.wait = 0; arch 1231 arch/mips/kvm/mips.c vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); arch 1244 arch/mips/kvm/mips.c hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, arch 1246 arch/mips/kvm/mips.c vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; arch 1283 arch/mips/kvm/mips.c u32 cause = vcpu->arch.host_cp0_cause; arch 1285 arch/mips/kvm/mips.c u32 __user *opc = (u32 __user *) vcpu->arch.pc; arch 1286 arch/mips/kvm/mips.c unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; arch 1358 arch/mips/kvm/mips.c cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, arch 1430 arch/mips/kvm/mips.c kvm_read_c0_guest_status(vcpu->arch.cop0)); arch 1479 arch/mips/kvm/mips.c if (kvm_mips_guest_has_fpu(&vcpu->arch) && arch 1481 arch/mips/kvm/mips.c __kvm_restore_fcsr(&vcpu->arch); arch 1483 arch/mips/kvm/mips.c if (kvm_mips_guest_has_msa(&vcpu->arch) && arch 1485 arch/mips/kvm/mips.c __kvm_restore_msacsr(&vcpu->arch); arch 1498 arch/mips/kvm/mips.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 1516 arch/mips/kvm/mips.c vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) arch 1531 arch/mips/kvm/mips.c if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { arch 1532 arch/mips/kvm/mips.c __kvm_restore_fpu(&vcpu->arch); arch 1533 arch/mips/kvm/mips.c vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; arch 1546 arch/mips/kvm/mips.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 1555 arch/mips/kvm/mips.c if (kvm_mips_guest_has_fpu(&vcpu->arch)) { arch 1563 arch/mips/kvm/mips.c (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | arch 1578 arch/mips/kvm/mips.c switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) { arch 1583 arch/mips/kvm/mips.c __kvm_restore_msa_upper(&vcpu->arch); arch 1584 arch/mips/kvm/mips.c vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; arch 1589 arch/mips/kvm/mips.c __kvm_restore_msa(&vcpu->arch); arch 1590 arch/mips/kvm/mips.c vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; arch 1591 arch/mips/kvm/mips.c if (kvm_mips_guest_has_fpu(&vcpu->arch)) arch 1592 arch/mips/kvm/mips.c vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; arch 1609 arch/mips/kvm/mips.c if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { arch 1612 arch/mips/kvm/mips.c vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; arch 1614 arch/mips/kvm/mips.c if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { arch 1617 arch/mips/kvm/mips.c vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; arch 1633 arch/mips/kvm/mips.c if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { arch 1639 arch/mips/kvm/mips.c __kvm_save_msa(&vcpu->arch); arch 1644 arch/mips/kvm/mips.c if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { arch 1648 arch/mips/kvm/mips.c vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); arch 1649 arch/mips/kvm/mips.c } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { arch 1655 arch/mips/kvm/mips.c __kvm_save_fpu(&vcpu->arch); arch 1656 arch/mips/kvm/mips.c vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; arch 62 arch/mips/kvm/mmu.c mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); arch 177 arch/mips/kvm/mmu.c return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr); arch 301 arch/mips/kvm/mmu.c return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd, arch 425 arch/mips/kvm/mmu.c return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd, arch 463 arch/mips/kvm/mmu.c return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd, arch 691 arch/mips/kvm/mmu.c struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; arch 788 arch/mips/kvm/mmu.c struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; arch 799 arch/mips/kvm/mmu.c pgdp = vcpu->arch.guest_kernel_mm.pgd; arch 801 arch/mips/kvm/mmu.c pgdp = vcpu->arch.guest_user_mm.pgd; arch 814 arch/mips/kvm/mmu.c pgdp = vcpu->arch.guest_kernel_mm.pgd; arch 822 arch/mips/kvm/mmu.c pgdp = vcpu->arch.guest_user_mm.pgd; arch 1091 arch/mips/kvm/mmu.c kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, arch 1109 arch/mips/kvm/mmu.c pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage)); arch 1131 arch/mips/kvm/mmu.c if (hrtimer_cancel(&vcpu->arch.comparecount_timer)) arch 1132 arch/mips/kvm/mmu.c hrtimer_restart(&vcpu->arch.comparecount_timer); arch 1145 arch/mips/kvm/mmu.c if (vcpu->arch.last_sched_cpu != cpu) { arch 1147 arch/mips/kvm/mmu.c vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); arch 1171 arch/mips/kvm/mmu.c vcpu->arch.last_sched_cpu = cpu; arch 1200 arch/mips/kvm/mmu.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 1214 arch/mips/kvm/mmu.c tlb = &vcpu->arch.guest_tlb[index]; arch 57 arch/mips/kvm/stats.c if (vcpu->arch.cop0->stat[i][j]) arch 59 arch/mips/kvm/stats.c vcpu->arch.cop0->stat[i][j]); arch 46 arch/mips/kvm/tlb.c struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm; arch 57 arch/mips/kvm/tlb.c struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; arch 65 arch/mips/kvm/tlb.c struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; arch 90 arch/mips/kvm/tlb.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 98 arch/mips/kvm/tlb.c tlb = vcpu->arch.guest_tlb[i]; arch 122 arch/mips/kvm/tlb.c struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; arch 38 arch/mips/kvm/trace.h __entry->pc = vcpu->arch.pc; arch 124 arch/mips/kvm/trace.h __entry->pc = vcpu->arch.pc; arch 263 arch/mips/kvm/trace.h __entry->pc = vcpu->arch.pc; arch 287 arch/mips/kvm/trace.h __entry->pc = vcpu->arch.pc; arch 325 arch/mips/kvm/trace.h __entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0); arch 326 arch/mips/kvm/trace.h __entry->pc = vcpu->arch.pc; arch 327 arch/mips/kvm/trace.h __entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0); arch 328 arch/mips/kvm/trace.h __entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0); arch 329 arch/mips/kvm/trace.h __entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0); arch 46 arch/mips/kvm/trap_emul.c u32 __user *opc = (u32 __user *) vcpu->arch.pc; arch 47 arch/mips/kvm/trap_emul.c u32 cause = vcpu->arch.host_cp0_cause; arch 49 arch/mips/kvm/trap_emul.c unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; arch 61 arch/mips/kvm/trap_emul.c kvm_read_c0_guest_status(vcpu->arch.cop0)); arch 69 arch/mips/kvm/trap_emul.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 71 arch/mips/kvm/trap_emul.c u32 __user *opc = (u32 __user *) vcpu->arch.pc; arch 72 arch/mips/kvm/trap_emul.c u32 cause = vcpu->arch.host_cp0_cause; arch 78 arch/mips/kvm/trap_emul.c if (!kvm_mips_guest_has_fpu(&vcpu->arch) || arch 127 arch/mips/kvm/trap_emul.c if (kvm_is_ifetch_fault(&vcpu->arch)) { arch 190 arch/mips/kvm/trap_emul.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 192 arch/mips/kvm/trap_emul.c u32 __user *opc = (u32 __user *) vcpu->arch.pc; arch 193 arch/mips/kvm/trap_emul.c unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; arch 194 arch/mips/kvm/trap_emul.c u32 cause = vcpu->arch.host_cp0_cause; arch 218 arch/mips/kvm/trap_emul.c tlb = vcpu->arch.guest_tlb + index; arch 252 arch/mips/kvm/trap_emul.c u32 __user *opc = (u32 __user *) vcpu->arch.pc; arch 253 arch/mips/kvm/trap_emul.c unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; arch 254 arch/mips/kvm/trap_emul.c u32 cause = vcpu->arch.host_cp0_cause; arch 324 arch/mips/kvm/trap_emul.c u32 __user *opc = (u32 __user *) vcpu->arch.pc; arch 325 arch/mips/kvm/trap_emul.c unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; arch 326 arch/mips/kvm/trap_emul.c u32 cause = vcpu->arch.host_cp0_cause; arch 344 arch/mips/kvm/trap_emul.c u32 __user *opc = (u32 __user *) vcpu->arch.pc; arch 345 arch/mips/kvm/trap_emul.c unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; arch 346 arch/mips/kvm/trap_emul.c u32 cause = vcpu->arch.host_cp0_cause; arch 363 arch/mips/kvm/trap_emul.c u32 __user *opc = (u32 __user *) vcpu->arch.pc; arch 364 arch/mips/kvm/trap_emul.c u32 cause = vcpu->arch.host_cp0_cause; arch 381 arch/mips/kvm/trap_emul.c u32 __user *opc = (u32 __user *) vcpu->arch.pc; arch 382 arch/mips/kvm/trap_emul.c u32 cause = vcpu->arch.host_cp0_cause; arch 399 arch/mips/kvm/trap_emul.c u32 __user *opc = (u32 __user *) vcpu->arch.pc; arch 400 arch/mips/kvm/trap_emul.c u32 cause = vcpu->arch.host_cp0_cause; arch 417 arch/mips/kvm/trap_emul.c u32 __user *opc = (u32 __user *)vcpu->arch.pc; arch 418 arch/mips/kvm/trap_emul.c u32 cause = vcpu->arch.host_cp0_cause; arch 435 arch/mips/kvm/trap_emul.c u32 __user *opc = (u32 __user *)vcpu->arch.pc; arch 436 arch/mips/kvm/trap_emul.c u32 cause = vcpu->arch.host_cp0_cause; arch 453 arch/mips/kvm/trap_emul.c u32 __user *opc = (u32 __user *)vcpu->arch.pc; arch 454 arch/mips/kvm/trap_emul.c u32 cause = vcpu->arch.host_cp0_cause; arch 476 arch/mips/kvm/trap_emul.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 478 arch/mips/kvm/trap_emul.c u32 __user *opc = (u32 __user *) vcpu->arch.pc; arch 479 arch/mips/kvm/trap_emul.c u32 cause = vcpu->arch.host_cp0_cause; arch 483 arch/mips/kvm/trap_emul.c if (!kvm_mips_guest_has_msa(&vcpu->arch) || arch 542 arch/mips/kvm/trap_emul.c struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; arch 543 arch/mips/kvm/trap_emul.c struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; arch 607 arch/mips/kvm/trap_emul.c kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd); arch 608 arch/mips/kvm/trap_emul.c kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd); arch 613 arch/mips/kvm/trap_emul.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 693 arch/mips/kvm/trap_emul.c vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000); arch 769 arch/mips/kvm/trap_emul.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 848 arch/mips/kvm/trap_emul.c *v = vcpu->arch.count_ctl; arch 851 arch/mips/kvm/trap_emul.c *v = ktime_to_ns(vcpu->arch.count_resume); arch 854 arch/mips/kvm/trap_emul.c *v = vcpu->arch.count_hz; arch 887 arch/mips/kvm/trap_emul.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 1049 arch/mips/kvm/trap_emul.c struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; arch 1050 arch/mips/kvm/trap_emul.c struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; arch 1084 arch/mips/kvm/trap_emul.c struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; arch 1085 arch/mips/kvm/trap_emul.c struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; arch 1185 arch/mips/kvm/trap_emul.c struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; arch 1186 arch/mips/kvm/trap_emul.c struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; arch 1188 arch/mips/kvm/trap_emul.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 1211 arch/mips/kvm/trap_emul.c if (gasid != vcpu->arch.last_user_gasid) { arch 1215 arch/mips/kvm/trap_emul.c vcpu->arch.last_user_gasid = gasid; arch 1233 arch/mips/kvm/trap_emul.c kvm_read_c0_guest_cause(vcpu->arch.cop0)); arch 1253 arch/mips/kvm/trap_emul.c r = vcpu->arch.vcpu_run(run, vcpu); arch 112 arch/mips/kvm/vz.c if (kvm_mips_guest_has_msa(&vcpu->arch)) arch 119 arch/mips/kvm/vz.c if (kvm_mips_guest_has_fpu(&vcpu->arch)) { arch 150 arch/mips/kvm/vz.c if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) arch 167 arch/mips/kvm/vz.c if (kvm_mips_guest_can_have_msa(&vcpu->arch)) arch 191 arch/mips/kvm/vz.c set_bit(priority, &vcpu->arch.pending_exceptions); arch 192 arch/mips/kvm/vz.c clear_bit(priority, &vcpu->arch.pending_exceptions_clr); arch 197 arch/mips/kvm/vz.c clear_bit(priority, &vcpu->arch.pending_exceptions); arch 198 arch/mips/kvm/vz.c set_bit(priority, &vcpu->arch.pending_exceptions_clr); arch 306 arch/mips/kvm/vz.c clear_bit(priority, &vcpu->arch.pending_exceptions); arch 348 arch/mips/kvm/vz.c clear_bit(priority, &vcpu->arch.pending_exceptions_clr); arch 370 arch/mips/kvm/vz.c if (mips_hpt_frequency != vcpu->arch.count_hz) arch 450 arch/mips/kvm/vz.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 545 arch/mips/kvm/vz.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 690 arch/mips/kvm/vz.c opc = (u32 *)vcpu->arch.pc; arch 691 arch/mips/kvm/vz.c if (vcpu->arch.host_cp0_cause & CAUSEF_BD) arch 824 arch/mips/kvm/vz.c unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 & arch 844 arch/mips/kvm/vz.c u32 *opc = (u32 *) vcpu->arch.pc; arch 845 arch/mips/kvm/vz.c u32 cause = vcpu->arch.host_cp0_cause; arch 847 arch/mips/kvm/vz.c unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; arch 891 arch/mips/kvm/vz.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 895 arch/mips/kvm/vz.c kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1); arch 896 arch/mips/kvm/vz.c else if (val < ARRAY_SIZE(vcpu->arch.maar)) arch 905 arch/mips/kvm/vz.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 915 arch/mips/kvm/vz.c curr_pc = vcpu->arch.pc; arch 958 arch/mips/kvm/vz.c ARRAY_SIZE(vcpu->arch.maar)); arch 959 arch/mips/kvm/vz.c val = vcpu->arch.maar[ arch 986 arch/mips/kvm/vz.c vcpu->arch.gprs[rt] = val; arch 999 arch/mips/kvm/vz.c val = vcpu->arch.gprs[rt]; arch 1007 arch/mips/kvm/vz.c kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); arch 1011 arch/mips/kvm/vz.c vcpu->arch.gprs[rt], arch 1031 arch/mips/kvm/vz.c ARRAY_SIZE(vcpu->arch.maar)); arch 1032 arch/mips/kvm/vz.c vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] = arch 1057 arch/mips/kvm/vz.c vcpu->arch.pc = curr_pc; arch 1071 arch/mips/kvm/vz.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 1078 arch/mips/kvm/vz.c curr_pc = vcpu->arch.pc; arch 1092 arch/mips/kvm/vz.c va = arch->gprs[base] + offset; arch 1095 arch/mips/kvm/vz.c cache, op, base, arch->gprs[base], offset); arch 1124 arch/mips/kvm/vz.c curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], arch 1127 arch/mips/kvm/vz.c vcpu->arch.pc = curr_pc; arch 1136 arch/mips/kvm/vz.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 1179 arch/mips/kvm/vz.c arch->gprs[rt] = arch 1189 arch/mips/kvm/vz.c KVM_TRACE_HWR(rd, sel), arch->gprs[rt]); arch 1214 arch/mips/kvm/vz.c struct kvm_vcpu_arch *arch = &vcpu->arch; arch 1234 arch/mips/kvm/vz.c unsigned int val = arch->gprs[rt]; arch 1242 arch/mips/kvm/vz.c if (!kvm_mips_guest_has_fpu(&vcpu->arch)) arch 1272 arch/mips/kvm/vz.c vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) arch 1311 arch/mips/kvm/vz.c vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) arch 1366 arch/mips/kvm/vz.c curr_pc = vcpu->arch.pc; arch 1373 arch/mips/kvm/vz.c vcpu->arch.pc = curr_pc; arch 1400 arch/mips/kvm/vz.c u32 *opc = (u32 *) vcpu->arch.pc; arch 1401 arch/mips/kvm/vz.c u32 cause = vcpu->arch.host_cp0_cause; arch 1403 arch/mips/kvm/vz.c u32 gexccode = (vcpu->arch.host_cp0_guestctl0 & arch 1469 arch/mips/kvm/vz.c u32 cause = vcpu->arch.host_cp0_cause; arch 1479 arch/mips/kvm/vz.c if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) || arch 1480 arch/mips/kvm/vz.c vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { arch 1523 arch/mips/kvm/vz.c if (!kvm_mips_guest_has_msa(&vcpu->arch) || arch 1526 arch/mips/kvm/vz.c vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { arch 1539 arch/mips/kvm/vz.c u32 *opc = (u32 *) vcpu->arch.pc; arch 1540 arch/mips/kvm/vz.c u32 cause = vcpu->arch.host_cp0_cause; arch 1541 arch/mips/kvm/vz.c ulong badvaddr = vcpu->arch.host_cp0_badvaddr; arch 1548 arch/mips/kvm/vz.c if (kvm_is_ifetch_fault(&vcpu->arch)) { arch 1586 arch/mips/kvm/vz.c u32 *opc = (u32 *) vcpu->arch.pc; arch 1587 arch/mips/kvm/vz.c u32 cause = vcpu->arch.host_cp0_cause; arch 1588 arch/mips/kvm/vz.c ulong badvaddr = vcpu->arch.host_cp0_badvaddr; arch 1597 arch/mips/kvm/vz.c vcpu->arch.host_cp0_badvaddr = badvaddr; arch 1712 arch/mips/kvm/vz.c ret += 1 + ARRAY_SIZE(vcpu->arch.maar); arch 1765 arch/mips/kvm/vz.c for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) { arch 1826 arch/mips/kvm/vz.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 1986 arch/mips/kvm/vz.c if (idx >= ARRAY_SIZE(vcpu->arch.maar)) arch 1988 arch/mips/kvm/vz.c *v = vcpu->arch.maar[idx]; arch 1993 arch/mips/kvm/vz.c *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0); arch 2029 arch/mips/kvm/vz.c *v = vcpu->arch.count_ctl; arch 2032 arch/mips/kvm/vz.c *v = ktime_to_ns(vcpu->arch.count_resume); arch 2035 arch/mips/kvm/vz.c *v = vcpu->arch.count_hz; arch 2047 arch/mips/kvm/vz.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2255 arch/mips/kvm/vz.c if (idx >= ARRAY_SIZE(vcpu->arch.maar)) arch 2257 arch/mips/kvm/vz.c vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v); arch 2347 arch/mips/kvm/vz.c vcpu->arch.vzguestid[i] = 0; arch 2370 arch/mips/kvm/vz.c if (wired > vcpu->arch.wired_tlb_limit) { arch 2371 arch/mips/kvm/vz.c tlbs = krealloc(vcpu->arch.wired_tlb, wired * arch 2372 arch/mips/kvm/vz.c sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC); arch 2375 arch/mips/kvm/vz.c wired = vcpu->arch.wired_tlb_limit; arch 2377 arch/mips/kvm/vz.c vcpu->arch.wired_tlb = tlbs; arch 2378 arch/mips/kvm/vz.c vcpu->arch.wired_tlb_limit = wired; arch 2384 arch/mips/kvm/vz.c kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired); arch 2386 arch/mips/kvm/vz.c for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) { arch 2387 arch/mips/kvm/vz.c vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i); arch 2388 arch/mips/kvm/vz.c vcpu->arch.wired_tlb[i].tlb_lo[0] = 0; arch 2389 arch/mips/kvm/vz.c vcpu->arch.wired_tlb[i].tlb_lo[1] = 0; arch 2390 arch/mips/kvm/vz.c vcpu->arch.wired_tlb[i].tlb_mask = 0; arch 2392 arch/mips/kvm/vz.c vcpu->arch.wired_tlb_used = wired; arch 2398 arch/mips/kvm/vz.c if (vcpu->arch.wired_tlb) arch 2399 arch/mips/kvm/vz.c kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0, arch 2400 arch/mips/kvm/vz.c vcpu->arch.wired_tlb_used); arch 2406 arch/mips/kvm/vz.c struct mm_struct *gpa_mm = &kvm->arch.gpa_mm; arch 2413 arch/mips/kvm/vz.c migrated = (vcpu->arch.last_exec_cpu != cpu); arch 2414 arch/mips/kvm/vz.c vcpu->arch.last_exec_cpu = cpu; arch 2431 arch/mips/kvm/vz.c (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) & arch 2434 arch/mips/kvm/vz.c vcpu->arch.vzguestid[cpu] = guestid_cache(cpu); arch 2436 arch/mips/kvm/vz.c vcpu->arch.vzguestid[cpu]); arch 2440 arch/mips/kvm/vz.c change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]); arch 2457 arch/mips/kvm/vz.c if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)) arch 2466 arch/mips/kvm/vz.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2473 arch/mips/kvm/vz.c migrated = (vcpu->arch.last_sched_cpu != cpu); arch 2608 arch/mips/kvm/vz.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 2943 arch/mips/kvm/vz.c vcpu->arch.vzguestid[i] = 0; arch 2967 arch/mips/kvm/vz.c struct mips_coproc *cop0 = vcpu->arch.cop0; arch 3101 arch/mips/kvm/vz.c vcpu->arch.pc = CKSEG1ADDR(0x1fc00000); arch 3121 arch/mips/kvm/vz.c cpumask_setall(&kvm->arch.asid_flush_mask); arch 3161 arch/mips/kvm/vz.c r = vcpu->arch.vcpu_run(run, vcpu); arch 60 arch/parisc/include/asm/syscall.h int arch = AUDIT_ARCH_PARISC; arch 63 arch/parisc/include/asm/syscall.h arch = AUDIT_ARCH_PARISC64; arch 65 arch/parisc/include/asm/syscall.h return arch; arch 32 arch/parisc/kernel/audit.c int audit_classify_arch(int arch) arch 35 arch/parisc/kernel/audit.c if (arch == AUDIT_ARCH_PARISC) arch 80 arch/parisc/kernel/kexec.c struct kimage_arch *arch = &image->arch; arch 96 arch/parisc/kernel/kexec.c *(unsigned long *)(virt + kexec_cmdline_offset) = arch->cmdline; arch 97 arch/parisc/kernel/kexec.c *(unsigned long *)(virt + kexec_initrd_start_offset) = arch->initrd_start; arch 98 arch/parisc/kernel/kexec.c *(unsigned long *)(virt + kexec_initrd_end_offset) = arch->initrd_end; arch 55 arch/parisc/kernel/kexec_file.c image->arch.initrd_start = kbuf.mem; arch 56 arch/parisc/kernel/kexec_file.c image->arch.initrd_end = kbuf.mem + initrd_len; arch 72 arch/parisc/kernel/kexec_file.c image->arch.cmdline = kbuf.mem; arch 283 arch/parisc/kernel/module.c kfree(mod->arch.section); arch 284 arch/parisc/kernel/module.c mod->arch.section = NULL; arch 293 arch/parisc/kernel/module.c return (mod->arch.section[section].stub_entries + 1) arch 306 arch/parisc/kernel/module.c len = hdr->e_shnum * sizeof(me->arch.section[0]); arch 307 arch/parisc/kernel/module.c me->arch.section = kzalloc(len, GFP_KERNEL); arch 308 arch/parisc/kernel/module.c if (!me->arch.section) arch 318 arch/parisc/kernel/module.c me->arch.unwind_section = i; arch 343 arch/parisc/kernel/module.c WARN_ON(me->arch.section[s].stub_entries); arch 346 arch/parisc/kernel/module.c me->arch.section[s].stub_entries += count; arch 351 arch/parisc/kernel/module.c me->arch.got_offset = me->core_layout.size; arch 355 arch/parisc/kernel/module.c me->arch.fdesc_offset = me->core_layout.size; arch 358 arch/parisc/kernel/module.c me->arch.got_max = gots; arch 359 arch/parisc/kernel/module.c me->arch.fdesc_max = fdescs; arch 374 arch/parisc/kernel/module.c got = me->core_layout.base + me->arch.got_offset; arch 379 arch/parisc/kernel/module.c BUG_ON(++me->arch.got_count > me->arch.got_max); arch 392 arch/parisc/kernel/module.c Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset; arch 406 arch/parisc/kernel/module.c BUG_ON(++me->arch.fdesc_count > me->arch.fdesc_max); arch 410 arch/parisc/kernel/module.c fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset; arch 428 arch/parisc/kernel/module.c if (!me->arch.section[targetsec].stub_offset) { arch 429 arch/parisc/kernel/module.c loc0 -= (me->arch.section[targetsec].stub_entries + 1) * arch 433 arch/parisc/kernel/module.c me->arch.section[targetsec].stub_offset = loc0; arch 437 arch/parisc/kernel/module.c stub = (void *) me->arch.section[targetsec].stub_offset; arch 438 arch/parisc/kernel/module.c me->arch.section[targetsec].stub_offset += sizeof(struct stub_entry); arch 441 arch/parisc/kernel/module.c BUG_ON(0 == me->arch.section[targetsec].stub_entries--); arch 837 arch/parisc/kernel/module.c if (!me->arch.unwind_section) arch 840 arch/parisc/kernel/module.c table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; arch 841 arch/parisc/kernel/module.c end = table + sechdrs[me->arch.unwind_section].sh_size; arch 842 arch/parisc/kernel/module.c gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset; arch 845 arch/parisc/kernel/module.c me->arch.unwind_section, table, end, gp); arch 846 arch/parisc/kernel/module.c me->arch.unwind = unwind_table_add(me->name, 0, gp, table, end); arch 852 arch/parisc/kernel/module.c if (me->arch.unwind) arch 853 arch/parisc/kernel/module.c unwind_table_remove(me->arch.unwind); arch 880 arch/parisc/kernel/module.c me->arch.got_count, me->arch.got_max, arch 881 arch/parisc/kernel/module.c me->arch.fdesc_count, me->arch.fdesc_max); arch 905 arch/parisc/kernel/module.c if(me->arch.got_count > MAX_GOTS) { arch 907 arch/parisc/kernel/module.c me->name, me->arch.got_count, MAX_GOTS); arch 911 arch/parisc/kernel/module.c kfree(me->arch.section); arch 912 arch/parisc/kernel/module.c me->arch.section = NULL; arch 978 arch/parisc/kernel/module.c mod->arch.fdesc_offset; arch 980 arch/parisc/kernel/module.c mod->arch.fdesc_count * sizeof(Elf64_Fdesc); arch 21 arch/powerpc/boot/addnote.c static const char arch[] = "PowerPC"; arch 131 arch/powerpc/boot/addnote.c nnote = 12 + ROUNDUP(strlen(arch) + 1) + sizeof(descr); arch 188 arch/powerpc/boot/addnote.c PUT_32(ns, strlen(arch) + 1); arch 191 arch/powerpc/boot/addnote.c strcpy((char *) &buf[ns + 12], arch); arch 192 arch/powerpc/boot/addnote.c ns += 12 + strlen(arch) + 1; arch 316 arch/powerpc/include/asm/kvm_book3s.h return vcpu->arch.book3s; arch 330 arch/powerpc/include/asm/kvm_book3s.h vcpu->arch.regs.gpr[num] = val; arch 335 arch/powerpc/include/asm/kvm_book3s.h return vcpu->arch.regs.gpr[num]; arch 340 arch/powerpc/include/asm/kvm_book3s.h vcpu->arch.regs.ccr = val; arch 345 arch/powerpc/include/asm/kvm_book3s.h return vcpu->arch.regs.ccr; arch 350 arch/powerpc/include/asm/kvm_book3s.h vcpu->arch.regs.xer = val; arch 355 arch/powerpc/include/asm/kvm_book3s.h return vcpu->arch.regs.xer; arch 360 arch/powerpc/include/asm/kvm_book3s.h vcpu->arch.regs.ctr = val; arch 365 arch/powerpc/include/asm/kvm_book3s.h return vcpu->arch.regs.ctr; arch 370 arch/powerpc/include/asm/kvm_book3s.h vcpu->arch.regs.link = val; arch 375 arch/powerpc/include/asm/kvm_book3s.h return vcpu->arch.regs.link; arch 380 arch/powerpc/include/asm/kvm_book3s.h vcpu->arch.regs.nip = val; arch 385 arch/powerpc/include/asm/kvm_book3s.h return vcpu->arch.regs.nip; arch 396 arch/powerpc/include/asm/kvm_book3s.h return vcpu->arch.fault_dar; arch 461 arch/powerpc/include/asm/kvm_book3s.h int stride = kvm->arch.emul_smt_mode; arch 14 arch/powerpc/include/asm/kvm_book3s_32.h return vcpu->arch.shadow_vcpu; arch 140 arch/powerpc/include/asm/kvm_book3s_64.h return kvm->arch.radix; arch 147 arch/powerpc/include/asm/kvm_book3s_64.h if (vcpu->arch.nested) arch 148 arch/powerpc/include/asm/kvm_book3s_64.h radix = vcpu->arch.nested->radix; arch 526 arch/powerpc/include/asm/kvm_book3s_64.h if (atomic_read(&kvm->arch.hpte_mod_interest)) arch 590 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.regs.ccr = vcpu->arch.cr_tm; arch 591 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.regs.xer = vcpu->arch.xer_tm; arch 592 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.regs.link = vcpu->arch.lr_tm; arch 593 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; arch 594 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.amr = vcpu->arch.amr_tm; arch 595 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.ppr = vcpu->arch.ppr_tm; arch 596 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.dscr = vcpu->arch.dscr_tm; arch 597 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.tar = vcpu->arch.tar_tm; arch 598 arch/powerpc/include/asm/kvm_book3s_64.h memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm, arch 599 arch/powerpc/include/asm/kvm_book3s_64.h sizeof(vcpu->arch.regs.gpr)); arch 600 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.fp = vcpu->arch.fp_tm; arch 601 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.vr = vcpu->arch.vr_tm; arch 602 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.vrsave = vcpu->arch.vrsave_tm; arch 607 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.cr_tm = vcpu->arch.regs.ccr; arch 608 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.xer_tm = vcpu->arch.regs.xer; arch 609 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.lr_tm = vcpu->arch.regs.link; arch 610 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; arch 611 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.amr_tm = vcpu->arch.amr; arch 612 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.ppr_tm = vcpu->arch.ppr; arch 613 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.dscr_tm = vcpu->arch.dscr; arch 614 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.tar_tm = vcpu->arch.tar; arch 615 arch/powerpc/include/asm/kvm_book3s_64.h memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr, arch 616 arch/powerpc/include/asm/kvm_book3s_64.h sizeof(vcpu->arch.regs.gpr)); arch 617 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.fp_tm = vcpu->arch.fp; arch 618 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.vr_tm = vcpu->arch.vr; arch 619 arch/powerpc/include/asm/kvm_book3s_64.h vcpu->arch.vrsave_tm = vcpu->arch.vrsave; arch 28 arch/powerpc/include/asm/kvm_booke.h vcpu->arch.regs.gpr[num] = val; arch 33 arch/powerpc/include/asm/kvm_booke.h return vcpu->arch.regs.gpr[num]; arch 38 arch/powerpc/include/asm/kvm_booke.h vcpu->arch.regs.ccr = val; arch 43 arch/powerpc/include/asm/kvm_booke.h return vcpu->arch.regs.ccr; arch 48 arch/powerpc/include/asm/kvm_booke.h vcpu->arch.regs.xer = val; arch 53 arch/powerpc/include/asm/kvm_booke.h return vcpu->arch.regs.xer; arch 64 arch/powerpc/include/asm/kvm_booke.h vcpu->arch.regs.ctr = val; arch 69 arch/powerpc/include/asm/kvm_booke.h return vcpu->arch.regs.ctr; arch 74 arch/powerpc/include/asm/kvm_booke.h vcpu->arch.regs.link = val; arch 79 arch/powerpc/include/asm/kvm_booke.h return vcpu->arch.regs.link; arch 84 arch/powerpc/include/asm/kvm_booke.h vcpu->arch.regs.nip = val; arch 89 arch/powerpc/include/asm/kvm_booke.h return vcpu->arch.regs.nip; arch 94 arch/powerpc/include/asm/kvm_booke.h return vcpu->arch.fault_dear; arch 831 arch/powerpc/include/asm/kvm_host.h #define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET] arch 832 arch/powerpc/include/asm/kvm_host.h #define VCPU_VSX_FPR(vcpu, i, j) ((vcpu)->arch.fp.fpr[i][j]) arch 833 arch/powerpc/include/asm/kvm_host.h #define VCPU_VSX_VR(vcpu, i) ((vcpu)->arch.vr.vr[i]) arch 337 arch/powerpc/include/asm/kvm_ppc.h if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) arch 338 arch/powerpc/include/asm/kvm_ppc.h ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst); arch 343 arch/powerpc/include/asm/kvm_ppc.h swab32(vcpu->arch.last_inst) : arch 344 arch/powerpc/include/asm/kvm_ppc.h vcpu->arch.last_inst; arch 346 arch/powerpc/include/asm/kvm_ppc.h fetched_inst = vcpu->arch.last_inst; arch 354 arch/powerpc/include/asm/kvm_ppc.h return kvm->arch.kvm_ops == kvmppc_hv_ops; arch 553 arch/powerpc/include/asm/kvm_ppc.h vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu); arch 598 arch/powerpc/include/asm/kvm_ppc.h return vcpu->arch.irq_type == KVMPPC_IRQ_XICS; arch 605 arch/powerpc/include/asm/kvm_ppc.h return kvm->arch.pimap; arch 683 arch/powerpc/include/asm/kvm_ppc.h return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE; arch 828 arch/powerpc/include/asm/kvm_ppc.h return vcpu->arch.epr; arch 839 arch/powerpc/include/asm/kvm_ppc.h vcpu->arch.epr = epr; arch 905 arch/powerpc/include/asm/kvm_ppc.h return vcpu->arch.shared_big_endian; arch 930 arch/powerpc/include/asm/kvm_ppc.h return be##size##_to_cpu(vcpu->arch.shared->reg); \ arch 932 arch/powerpc/include/asm/kvm_ppc.h return le##size##_to_cpu(vcpu->arch.shared->reg); \ arch 939 arch/powerpc/include/asm/kvm_ppc.h vcpu->arch.shared->reg = cpu_to_be##size(val); \ arch 941 arch/powerpc/include/asm/kvm_ppc.h vcpu->arch.shared->reg = cpu_to_le##size(val); \ arch 977 arch/powerpc/include/asm/kvm_ppc.h vcpu->arch.shared->msr = cpu_to_be64(val); arch 979 arch/powerpc/include/asm/kvm_ppc.h vcpu->arch.shared->msr = cpu_to_le64(val); arch 991 arch/powerpc/include/asm/kvm_ppc.h return be32_to_cpu(vcpu->arch.shared->sr[nr]); arch 993 arch/powerpc/include/asm/kvm_ppc.h return le32_to_cpu(vcpu->arch.shared->sr[nr]); arch 999 arch/powerpc/include/asm/kvm_ppc.h vcpu->arch.shared->sr[nr] = cpu_to_be32(val); arch 1001 arch/powerpc/include/asm/kvm_ppc.h vcpu->arch.shared->sr[nr] = cpu_to_le32(val); arch 106 arch/powerpc/include/asm/syscall.h int arch; arch 109 arch/powerpc/include/asm/syscall.h arch = AUDIT_ARCH_PPC64; arch 111 arch/powerpc/include/asm/syscall.h arch = AUDIT_ARCH_PPC; arch 114 arch/powerpc/include/asm/syscall.h arch |= __AUDIT_ARCH_LE; arch 116 arch/powerpc/include/asm/syscall.h return arch; arch 434 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack); arch 435 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid); arch 436 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_GUEST_PID, kvm_vcpu, arch.pid); arch 437 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_GPRS, kvm_vcpu, arch.regs.gpr); arch 438 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_VRSAVE, kvm_vcpu, arch.vrsave); arch 439 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_FPRS, kvm_vcpu, arch.fp.fpr); arch 441 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr); arch 443 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer); arch 444 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr); arch 445 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link); arch 447 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TAR, kvm_vcpu, arch.tar); arch 449 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr); arch 450 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip); arch 452 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr); arch 453 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0); arch 454 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SRR1, kvm_vcpu, arch.shregs.srr1); arch 455 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SPRG0, kvm_vcpu, arch.shregs.sprg0); arch 456 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SPRG1, kvm_vcpu, arch.shregs.sprg1); arch 457 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SPRG2, kvm_vcpu, arch.shregs.sprg2); arch 458 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SPRG3, kvm_vcpu, arch.shregs.sprg3); arch 461 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TB_RMENTRY, kvm_vcpu, arch.rm_entry); arch 462 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TB_RMINTR, kvm_vcpu, arch.rm_intr); arch 463 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TB_RMEXIT, kvm_vcpu, arch.rm_exit); arch 464 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TB_GUEST, kvm_vcpu, arch.guest_time); arch 465 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TB_CEDE, kvm_vcpu, arch.cede_time); arch 466 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_CUR_ACTIVITY, kvm_vcpu, arch.cur_activity); arch 467 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_ACTIVITY_START, kvm_vcpu, arch.cur_tb_start); arch 478 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SHADOW_PID, kvm_vcpu, arch.shadow_pid); arch 479 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SHADOW_PID1, kvm_vcpu, arch.shadow_pid1); arch 480 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SHARED, kvm_vcpu, arch.shared); arch 482 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SHADOW_MSR, kvm_vcpu, arch.shadow_msr); arch 484 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SHAREDBE, kvm_vcpu, arch.shared_big_endian); arch 495 arch/powerpc/kernel/asm-offsets.c OFFSET(KVM_LPID, kvm, arch.lpid); arch 499 arch/powerpc/kernel/asm-offsets.c OFFSET(KVM_TLB_SETS, kvm, arch.tlb_sets); arch 500 arch/powerpc/kernel/asm-offsets.c OFFSET(KVM_SDR1, kvm, arch.sdr1); arch 501 arch/powerpc/kernel/asm-offsets.c OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid); arch 502 arch/powerpc/kernel/asm-offsets.c OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr); arch 503 arch/powerpc/kernel/asm-offsets.c OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1); arch 504 arch/powerpc/kernel/asm-offsets.c OFFSET(KVM_NEED_FLUSH, kvm, arch.need_tlb_flush.bits); arch 505 arch/powerpc/kernel/asm-offsets.c OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls); arch 506 arch/powerpc/kernel/asm-offsets.c OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v); arch 507 arch/powerpc/kernel/asm-offsets.c OFFSET(KVM_RADIX, kvm, arch.radix); arch 508 arch/powerpc/kernel/asm-offsets.c OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled); arch 509 arch/powerpc/kernel/asm-offsets.c OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest); arch 510 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr); arch 511 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar); arch 512 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr); arch 513 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty); arch 514 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst); arch 515 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_NESTED, kvm_vcpu, arch.nested); arch 517 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu); arch 520 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_PURR, kvm_vcpu, arch.purr); arch 521 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SPURR, kvm_vcpu, arch.spurr); arch 522 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_IC, kvm_vcpu, arch.ic); arch 523 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_DSCR, kvm_vcpu, arch.dscr); arch 524 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_AMR, kvm_vcpu, arch.amr); arch 525 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_UAMOR, kvm_vcpu, arch.uamor); arch 526 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_IAMR, kvm_vcpu, arch.iamr); arch 527 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl); arch 528 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr); arch 529 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx); arch 530 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_DAWR, kvm_vcpu, arch.dawr); arch 531 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_DAWRX, kvm_vcpu, arch.dawrx); arch 532 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr); arch 533 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags); arch 534 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_DEC, kvm_vcpu, arch.dec); arch 535 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires); arch 536 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions); arch 537 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded); arch 538 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded); arch 539 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_IRQ_PENDING, kvm_vcpu, arch.irq_pending); arch 540 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request); arch 541 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr); arch 542 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc); arch 543 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc); arch 544 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar); arch 545 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SDAR, kvm_vcpu, arch.sdar); arch 546 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SIER, kvm_vcpu, arch.sier); arch 547 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SLB, kvm_vcpu, arch.slb); arch 548 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SLB_MAX, kvm_vcpu, arch.slb_max); arch 549 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr); arch 550 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr); arch 551 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar); arch 552 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_FAULT_GPA, kvm_vcpu, arch.fault_gpa); arch 553 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr); arch 554 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); arch 555 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap); arch 556 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_CFAR, kvm_vcpu, arch.cfar); arch 557 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_PPR, kvm_vcpu, arch.ppr); arch 558 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_FSCR, kvm_vcpu, arch.fscr); arch 559 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_PSPB, kvm_vcpu, arch.pspb); arch 560 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_EBBHR, kvm_vcpu, arch.ebbhr); arch 561 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_EBBRR, kvm_vcpu, arch.ebbrr); arch 562 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_BESCR, kvm_vcpu, arch.bescr); arch 563 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_CSIGR, kvm_vcpu, arch.csigr); arch 564 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TACR, kvm_vcpu, arch.tacr); arch 565 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TCSCR, kvm_vcpu, arch.tcscr); arch 566 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_ACOP, kvm_vcpu, arch.acop); arch 567 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_WORT, kvm_vcpu, arch.wort); arch 568 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TID, kvm_vcpu, arch.tid); arch 569 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_PSSCR, kvm_vcpu, arch.psscr); arch 570 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_HFSCR, kvm_vcpu, arch.hfscr); arch 585 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TFHAR, kvm_vcpu, arch.tfhar); arch 586 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TFIAR, kvm_vcpu, arch.tfiar); arch 587 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TEXASR, kvm_vcpu, arch.texasr); arch 588 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_ORIG_TEXASR, kvm_vcpu, arch.orig_texasr); arch 589 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_GPR_TM, kvm_vcpu, arch.gpr_tm); arch 590 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_FPRS_TM, kvm_vcpu, arch.fp_tm.fpr); arch 591 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_VRS_TM, kvm_vcpu, arch.vr_tm.vr); arch 592 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_VRSAVE_TM, kvm_vcpu, arch.vrsave_tm); arch 593 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_CR_TM, kvm_vcpu, arch.cr_tm); arch 594 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_XER_TM, kvm_vcpu, arch.xer_tm); arch 595 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_LR_TM, kvm_vcpu, arch.lr_tm); arch 596 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_CTR_TM, kvm_vcpu, arch.ctr_tm); arch 597 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_AMR_TM, kvm_vcpu, arch.amr_tm); arch 598 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_PPR_TM, kvm_vcpu, arch.ppr_tm); arch 599 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_DSCR_TM, kvm_vcpu, arch.dscr_tm); arch 600 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TAR_TM, kvm_vcpu, arch.tar_tm); arch 708 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr); arch 709 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer); arch 710 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link); arch 711 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr); arch 712 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip); arch 713 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9); arch 714 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst); arch 715 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear); arch 716 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_FAULT_ESR, kvm_vcpu, arch.fault_esr); arch 717 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_CRIT_SAVE, kvm_vcpu, arch.crit_save); arch 745 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_EVR, kvm_vcpu, arch.evr[0]); arch 746 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_ACC, kvm_vcpu, arch.acc); arch 747 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_SPEFSCR, kvm_vcpu, arch.spefscr); arch 748 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_HOST_SPEFSCR, kvm_vcpu, arch.host_spefscr); arch 752 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_HOST_MAS4, kvm_vcpu, arch.host_mas4); arch 753 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6); arch 758 arch/powerpc/kernel/asm-offsets.c arch.xive_saved_state)); arch 760 arch/powerpc/kernel/asm-offsets.c arch.xive_cam_word)); arch 761 arch/powerpc/kernel/asm-offsets.c DEFINE(VCPU_XIVE_PUSHED, offsetof(struct kvm_vcpu, arch.xive_pushed)); arch 762 arch/powerpc/kernel/asm-offsets.c DEFINE(VCPU_XIVE_ESC_ON, offsetof(struct kvm_vcpu, arch.xive_esc_on)); arch 763 arch/powerpc/kernel/asm-offsets.c DEFINE(VCPU_XIVE_ESC_RADDR, offsetof(struct kvm_vcpu, arch.xive_esc_raddr)); arch 764 arch/powerpc/kernel/asm-offsets.c DEFINE(VCPU_XIVE_ESC_VADDR, offsetof(struct kvm_vcpu, arch.xive_esc_vaddr)); arch 768 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu); arch 769 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl); arch 770 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TIMING_LAST_ENTER_TBU, kvm_vcpu, arch.timing_last_enter.tv32.tbu); arch 771 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_TIMING_LAST_ENTER_TBL, kvm_vcpu, arch.timing_last_enter.tv32.tbl); arch 32 arch/powerpc/kernel/audit.c int audit_classify_arch(int arch) arch 35 arch/powerpc/kernel/audit.c if (arch == AUDIT_ARCH_PPC) arch 142 arch/powerpc/kernel/ima_kexec.c image->arch.ima_buffer_addr = load_addr; arch 143 arch/powerpc/kernel/ima_kexec.c image->arch.ima_buffer_size = size; arch 183 arch/powerpc/kernel/ima_kexec.c if (!image->arch.ima_buffer_size) arch 195 arch/powerpc/kernel/ima_kexec.c ret = write_number(value, image->arch.ima_buffer_addr, addr_cells); arch 199 arch/powerpc/kernel/ima_kexec.c ret = write_number(value + 4 * addr_cells, image->arch.ima_buffer_size, arch 209 arch/powerpc/kernel/ima_kexec.c ret = fdt_add_mem_rsv(fdt, image->arch.ima_buffer_addr, arch 210 arch/powerpc/kernel/ima_kexec.c image->arch.ima_buffer_size); arch 215 arch/powerpc/kernel/ima_kexec.c image->arch.ima_buffer_addr, image->arch.ima_buffer_size); arch 68 arch/powerpc/kernel/module.c me->arch.start_opd = sect->sh_addr; arch 69 arch/powerpc/kernel/module.c me->arch.end_opd = sect->sh_addr + sect->sh_size; arch 144 arch/powerpc/kernel/module_32.c me->arch.init_plt_section = i; arch 146 arch/powerpc/kernel/module_32.c me->arch.core_plt_section = i; arch 148 arch/powerpc/kernel/module_32.c if (!me->arch.core_plt_section || !me->arch.init_plt_section) { arch 154 arch/powerpc/kernel/module_32.c sechdrs[me->arch.core_plt_section].sh_size arch 156 arch/powerpc/kernel/module_32.c sechdrs[me->arch.init_plt_section].sh_size arch 183 arch/powerpc/kernel/module_32.c entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; arch 185 arch/powerpc/kernel/module_32.c entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; arch 300 arch/powerpc/kernel/module_32.c module->arch.tramp = do_plt_call(module->core_layout.base, arch 303 arch/powerpc/kernel/module_32.c if (!module->arch.tramp) arch 87 arch/powerpc/kernel/module_64.c if (ptr < (void *)mod->arch.start_opd || arch 88 arch/powerpc/kernel/module_64.c ptr >= (void *)mod->arch.end_opd) arch 349 arch/powerpc/kernel/module_64.c me->arch.stubs_section = i; arch 351 arch/powerpc/kernel/module_64.c me->arch.toc_section = i; arch 370 arch/powerpc/kernel/module_64.c if (!me->arch.stubs_section) { arch 379 arch/powerpc/kernel/module_64.c if (!me->arch.toc_section) arch 380 arch/powerpc/kernel/module_64.c me->arch.toc_section = me->arch.stubs_section; arch 383 arch/powerpc/kernel/module_64.c sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs); arch 395 arch/powerpc/kernel/module_64.c return (sechdrs[me->arch.toc_section].sh_addr & ~0xfful) + 0x8000; arch 434 arch/powerpc/kernel/module_64.c num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs); arch 437 arch/powerpc/kernel/module_64.c stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr; arch 538 arch/powerpc/kernel/module_64.c if (!me->arch.toc_fixed) { arch 544 arch/powerpc/kernel/module_64.c me->arch.toc_fixed = true; arch 786 arch/powerpc/kernel/module_64.c num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*entry); arch 789 arch/powerpc/kernel/module_64.c entry = (void *)sechdrs[me->arch.stubs_section].sh_addr; arch 826 arch/powerpc/kernel/module_64.c mod->arch.tramp = create_ftrace_stub(sechdrs, mod, arch 829 arch/powerpc/kernel/module_64.c mod->arch.tramp_regs = create_ftrace_stub(sechdrs, mod, arch 831 arch/powerpc/kernel/module_64.c if (!mod->arch.tramp_regs) arch 835 arch/powerpc/kernel/module_64.c if (!mod->arch.tramp) arch 452 arch/powerpc/kernel/trace/ftrace.c if (!rec->arch.mod) { arch 457 arch/powerpc/kernel/trace/ftrace.c rec->arch.mod = mod; arch 459 arch/powerpc/kernel/trace/ftrace.c if (mod != rec->arch.mod) { arch 461 arch/powerpc/kernel/trace/ftrace.c rec->arch.mod, mod); arch 466 arch/powerpc/kernel/trace/ftrace.c mod = rec->arch.mod; arch 516 arch/powerpc/kernel/trace/ftrace.c struct module *mod = rec->arch.mod; arch 530 arch/powerpc/kernel/trace/ftrace.c if (!mod->arch.tramp || !mod->arch.tramp_regs) { arch 532 arch/powerpc/kernel/trace/ftrace.c if (!mod->arch.tramp) { arch 540 arch/powerpc/kernel/trace/ftrace.c tramp = mod->arch.tramp_regs; arch 543 arch/powerpc/kernel/trace/ftrace.c tramp = mod->arch.tramp; arch 591 arch/powerpc/kernel/trace/ftrace.c if (!rec->arch.mod->arch.tramp) { arch 598 arch/powerpc/kernel/trace/ftrace.c rec->arch.mod->arch.tramp, BRANCH_SET_LINK); arch 685 arch/powerpc/kernel/trace/ftrace.c if (!rec->arch.mod) { arch 706 arch/powerpc/kernel/trace/ftrace.c struct module *mod = rec->arch.mod; arch 709 arch/powerpc/kernel/trace/ftrace.c if (!mod->arch.tramp || !mod->arch.tramp_regs) { arch 760 arch/powerpc/kernel/trace/ftrace.c tramp = mod->arch.tramp_regs; arch 762 arch/powerpc/kernel/trace/ftrace.c tramp = mod->arch.tramp; arch 821 arch/powerpc/kernel/trace/ftrace.c if (!rec->arch.mod) { arch 79 arch/powerpc/kvm/book3s.c if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { arch 86 arch/powerpc/kvm/book3s.c vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; arch 141 arch/powerpc/kvm/book3s.c vcpu->arch.mmu.reset_msr(vcpu); arch 174 arch/powerpc/kvm/book3s.c unsigned long old_pending = vcpu->arch.pending_exceptions; arch 177 arch/powerpc/kvm/book3s.c &vcpu->arch.pending_exceptions); arch 179 arch/powerpc/kvm/book3s.c kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions, arch 188 arch/powerpc/kvm/book3s.c &vcpu->arch.pending_exceptions); arch 235 arch/powerpc/kvm/book3s.c return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); arch 269 arch/powerpc/kvm/book3s.c vcpu->arch.external_oneshot = 1; arch 386 arch/powerpc/kvm/book3s.c if (vcpu->arch.external_oneshot) { arch 387 arch/powerpc/kvm/book3s.c vcpu->arch.external_oneshot = 0; arch 398 arch/powerpc/kvm/book3s.c unsigned long *pending = &vcpu->arch.pending_exceptions; arch 399 arch/powerpc/kvm/book3s.c unsigned long old_pending = vcpu->arch.pending_exceptions; arch 403 arch/powerpc/kvm/book3s.c if (vcpu->arch.pending_exceptions) arch 404 arch/powerpc/kvm/book3s.c printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); arch 410 arch/powerpc/kvm/book3s.c clear_bit(priority, &vcpu->arch.pending_exceptions); arch 429 arch/powerpc/kvm/book3s.c ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM; arch 438 arch/powerpc/kvm/book3s.c ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; arch 461 arch/powerpc/kvm/book3s.c r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); arch 473 arch/powerpc/kvm/book3s.c if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && arch 519 arch/powerpc/kvm/book3s.c ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); arch 531 arch/powerpc/kvm/book3s.c ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); arch 549 arch/powerpc/kvm/book3s.c regs->pid = vcpu->arch.pid; arch 608 arch/powerpc/kvm/book3s.c r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val); arch 623 arch/powerpc/kvm/book3s.c *val = get_reg_val(id, vcpu->arch.fp.fpscr); arch 629 arch/powerpc/kvm/book3s.c val->vsxval[0] = vcpu->arch.fp.fpr[i][0]; arch 630 arch/powerpc/kvm/book3s.c val->vsxval[1] = vcpu->arch.fp.fpr[i][1]; arch 641 arch/powerpc/kvm/book3s.c if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) { arch 653 arch/powerpc/kvm/book3s.c if (!vcpu->arch.xive_vcpu) { arch 664 arch/powerpc/kvm/book3s.c *val = get_reg_val(id, vcpu->arch.fscr); arch 667 arch/powerpc/kvm/book3s.c *val = get_reg_val(id, vcpu->arch.tar); arch 670 arch/powerpc/kvm/book3s.c *val = get_reg_val(id, vcpu->arch.ebbhr); arch 673 arch/powerpc/kvm/book3s.c *val = get_reg_val(id, vcpu->arch.ebbrr); arch 676 arch/powerpc/kvm/book3s.c *val = get_reg_val(id, vcpu->arch.bescr); arch 679 arch/powerpc/kvm/book3s.c *val = get_reg_val(id, vcpu->arch.ic); arch 696 arch/powerpc/kvm/book3s.c r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val); arch 711 arch/powerpc/kvm/book3s.c vcpu->arch.fp.fpscr = set_reg_val(id, *val); arch 717 arch/powerpc/kvm/book3s.c vcpu->arch.fp.fpr[i][0] = val->vsxval[0]; arch 718 arch/powerpc/kvm/book3s.c vcpu->arch.fp.fpr[i][1] = val->vsxval[1]; arch 726 arch/powerpc/kvm/book3s.c if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) { arch 738 arch/powerpc/kvm/book3s.c if (!vcpu->arch.xive_vcpu) { arch 749 arch/powerpc/kvm/book3s.c vcpu->arch.fscr = set_reg_val(id, *val); arch 752 arch/powerpc/kvm/book3s.c vcpu->arch.tar = set_reg_val(id, *val); arch 755 arch/powerpc/kvm/book3s.c vcpu->arch.ebbhr = set_reg_val(id, *val); arch 758 arch/powerpc/kvm/book3s.c vcpu->arch.ebbrr = set_reg_val(id, *val); arch 761 arch/powerpc/kvm/book3s.c vcpu->arch.bescr = set_reg_val(id, *val); arch 764 arch/powerpc/kvm/book3s.c vcpu->arch.ic = set_reg_val(id, *val); arch 777 arch/powerpc/kvm/book3s.c vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); arch 782 arch/powerpc/kvm/book3s.c vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); arch 787 arch/powerpc/kvm/book3s.c vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr); arch 793 arch/powerpc/kvm/book3s.c return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); arch 819 arch/powerpc/kvm/book3s.c return kvm->arch.kvm_ops->vcpu_create(kvm, id); arch 824 arch/powerpc/kvm/book3s.c vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); arch 829 arch/powerpc/kvm/book3s.c return vcpu->kvm->arch.kvm_ops->check_requests(vcpu); arch 834 arch/powerpc/kvm/book3s.c return kvm->arch.kvm_ops->get_dirty_log(kvm, log); arch 840 arch/powerpc/kvm/book3s.c kvm->arch.kvm_ops->free_memslot(free, dont); arch 846 arch/powerpc/kvm/book3s.c return kvm->arch.kvm_ops->create_memslot(slot, npages); arch 851 arch/powerpc/kvm/book3s.c kvm->arch.kvm_ops->flush_memslot(kvm, memslot); arch 858 arch/powerpc/kvm/book3s.c return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem); arch 867 arch/powerpc/kvm/book3s.c kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change); arch 872 arch/powerpc/kvm/book3s.c return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); arch 877 arch/powerpc/kvm/book3s.c return kvm->arch.kvm_ops->age_hva(kvm, start, end); arch 882 arch/powerpc/kvm/book3s.c return kvm->arch.kvm_ops->test_age_hva(kvm, hva); arch 887 arch/powerpc/kvm/book3s.c kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); arch 893 arch/powerpc/kvm/book3s.c vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); arch 900 arch/powerpc/kvm/book3s.c INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables); arch 901 arch/powerpc/kvm/book3s.c INIT_LIST_HEAD(&kvm->arch.rtas_tokens); arch 902 arch/powerpc/kvm/book3s.c mutex_init(&kvm->arch.rtas_token_lock); arch 905 arch/powerpc/kvm/book3s.c return kvm->arch.kvm_ops->init_vm(kvm); arch 910 arch/powerpc/kvm/book3s.c kvm->arch.kvm_ops->destroy_vm(kvm); arch 914 arch/powerpc/kvm/book3s.c WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); arch 922 arch/powerpc/kvm/book3s.c kfree(kvm->arch.xive_devices.native); arch 923 arch/powerpc/kvm/book3s.c kvm->arch.xive_devices.native = NULL; arch 924 arch/powerpc/kvm/book3s.c kfree(kvm->arch.xive_devices.xics_on_xive); arch 925 arch/powerpc/kvm/book3s.c kvm->arch.xive_devices.xics_on_xive = NULL; arch 1023 arch/powerpc/kvm/book3s.c return kvm->arch.kvm_ops->hcall_implemented(hcall); arch 43 arch/powerpc/kvm/book3s_32_mmu.c return vcpu->arch.regs.nip == DEBUG_MMU_PTE_IP; arch 300 arch/powerpc/kvm/book3s_32_mmu.c ulong mp_ea = vcpu->arch.magic_page_ea; arch 310 arch/powerpc/kvm/book3s_32_mmu.c pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff); arch 404 arch/powerpc/kvm/book3s_32_mmu.c struct kvmppc_mmu *mmu = &vcpu->arch.mmu; arch 158 arch/powerpc/kvm/book3s_32_mmu_host.c vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); arch 311 arch/powerpc/kvm/book3s_32_mmu_host.c if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { arch 29 arch/powerpc/kvm/book3s_64_mmu.c unsigned long msr = vcpu->arch.intr_msr; arch 49 arch/powerpc/kvm/book3s_64_mmu.c for (i = 0; i < vcpu->arch.slb_nr; i++) { arch 52 arch/powerpc/kvm/book3s_64_mmu.c if (!vcpu->arch.slb[i].valid) arch 55 arch/powerpc/kvm/book3s_64_mmu.c if (vcpu->arch.slb[i].tb) arch 58 arch/powerpc/kvm/book3s_64_mmu.c if (vcpu->arch.slb[i].esid == cmp_esid) arch 59 arch/powerpc/kvm/book3s_64_mmu.c return &vcpu->arch.slb[i]; arch 64 arch/powerpc/kvm/book3s_64_mmu.c for (i = 0; i < vcpu->arch.slb_nr; i++) { arch 65 arch/powerpc/kvm/book3s_64_mmu.c if (vcpu->arch.slb[i].vsid) arch 67 arch/powerpc/kvm/book3s_64_mmu.c vcpu->arch.slb[i].valid ? 'v' : ' ', arch 68 arch/powerpc/kvm/book3s_64_mmu.c vcpu->arch.slb[i].large ? 'l' : ' ', arch 69 arch/powerpc/kvm/book3s_64_mmu.c vcpu->arch.slb[i].tb ? 't' : ' ', arch 70 arch/powerpc/kvm/book3s_64_mmu.c vcpu->arch.slb[i].esid, arch 71 arch/powerpc/kvm/book3s_64_mmu.c vcpu->arch.slb[i].vsid); arch 159 arch/powerpc/kvm/book3s_64_mmu.c if (vcpu->arch.papr_enabled) arch 221 arch/powerpc/kvm/book3s_64_mmu.c ulong mp_ea = vcpu->arch.magic_page_ea; arch 229 arch/powerpc/kvm/book3s_64_mmu.c gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); arch 258 arch/powerpc/kvm/book3s_64_mmu.c mutex_lock(&vcpu->kvm->arch.hpt_mutex); arch 284 arch/powerpc/kvm/book3s_64_mmu.c (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { arch 315 arch/powerpc/kvm/book3s_64_mmu.c if (unlikely(vcpu->arch.disable_kernel_nx) && arch 363 arch/powerpc/kvm/book3s_64_mmu.c mutex_unlock(&vcpu->kvm->arch.hpt_mutex); arch 370 arch/powerpc/kvm/book3s_64_mmu.c mutex_unlock(&vcpu->kvm->arch.hpt_mutex); arch 390 arch/powerpc/kvm/book3s_64_mmu.c if (slb_nr > vcpu->arch.slb_nr) arch 393 arch/powerpc/kvm/book3s_64_mmu.c slbe = &vcpu->arch.slb[slb_nr]; arch 407 arch/powerpc/kvm/book3s_64_mmu.c if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) { arch 444 arch/powerpc/kvm/book3s_64_mmu.c if (slb_nr > vcpu->arch.slb_nr) arch 447 arch/powerpc/kvm/book3s_64_mmu.c slbe = &vcpu->arch.slb[slb_nr]; arch 456 arch/powerpc/kvm/book3s_64_mmu.c if (slb_nr > vcpu->arch.slb_nr) arch 459 arch/powerpc/kvm/book3s_64_mmu.c slbe = &vcpu->arch.slb[slb_nr]; arch 492 arch/powerpc/kvm/book3s_64_mmu.c for (i = 1; i < vcpu->arch.slb_nr; i++) { arch 493 arch/powerpc/kvm/book3s_64_mmu.c vcpu->arch.slb[i].valid = false; arch 494 arch/powerpc/kvm/book3s_64_mmu.c vcpu->arch.slb[i].orige = 0; arch 495 arch/powerpc/kvm/book3s_64_mmu.c vcpu->arch.slb[i].origv = 0; arch 559 arch/powerpc/kvm/book3s_64_mmu.c if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) { arch 580 arch/powerpc/kvm/book3s_64_mmu.c ulong mp_ea = vcpu->arch.magic_page_ea; arch 593 arch/powerpc/kvm/book3s_64_mmu.c ulong mp_ea = vcpu->arch.magic_page_ea; arch 668 arch/powerpc/kvm/book3s_64_mmu.c struct kvmppc_mmu *mmu = &vcpu->arch.mmu; arch 685 arch/powerpc/kvm/book3s_64_mmu.c vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; arch 106 arch/powerpc/kvm/book3s_64_mmu_host.c vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); arch 217 arch/powerpc/kvm/book3s_64_mmu_host.c vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); arch 323 arch/powerpc/kvm/book3s_64_mmu_host.c if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { arch 118 arch/powerpc/kvm/book3s_64_mmu_hv.c atomic64_set(&kvm->arch.mmio_update, 0); arch 119 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm->arch.hpt = *info; arch 120 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18); arch 123 arch/powerpc/kvm/book3s_64_mmu_hv.c info->virt, (long)info->order, kvm->arch.lpid); arch 131 arch/powerpc/kvm/book3s_64_mmu_hv.c mutex_lock(&kvm->arch.mmu_setup_lock); arch 132 arch/powerpc/kvm/book3s_64_mmu_hv.c if (kvm->arch.mmu_ready) { arch 133 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm->arch.mmu_ready = 0; arch 136 arch/powerpc/kvm/book3s_64_mmu_hv.c if (atomic_read(&kvm->arch.vcpus_running)) { arch 137 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm->arch.mmu_ready = 1; arch 147 arch/powerpc/kvm/book3s_64_mmu_hv.c if (kvm->arch.hpt.order == order) { arch 151 arch/powerpc/kvm/book3s_64_mmu_hv.c memset((void *)kvm->arch.hpt.virt, 0, 1ul << order); arch 160 arch/powerpc/kvm/book3s_64_mmu_hv.c if (kvm->arch.hpt.virt) { arch 161 arch/powerpc/kvm/book3s_64_mmu_hv.c kvmppc_free_hpt(&kvm->arch.hpt); arch 173 arch/powerpc/kvm/book3s_64_mmu_hv.c cpumask_setall(&kvm->arch.need_tlb_flush); arch 175 arch/powerpc/kvm/book3s_64_mmu_hv.c mutex_unlock(&kvm->arch.mmu_setup_lock); arch 224 arch/powerpc/kvm/book3s_64_mmu_hv.c if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1) arch 225 arch/powerpc/kvm/book3s_64_mmu_hv.c npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1; arch 236 arch/powerpc/kvm/book3s_64_mmu_hv.c & kvmppc_hpt_mask(&kvm->arch.hpt); arch 280 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long msr = vcpu->arch.intr_msr; arch 283 arch/powerpc/kvm/book3s_64_mmu_hv.c if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) arch 286 arch/powerpc/kvm/book3s_64_mmu_hv.c msr |= vcpu->arch.shregs.msr & MSR_TS_MASK; arch 316 arch/powerpc/kvm/book3s_64_mmu_hv.c for (i = 0; i < vcpu->arch.slb_nr; i++) { arch 317 arch/powerpc/kvm/book3s_64_mmu_hv.c if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) arch 320 arch/powerpc/kvm/book3s_64_mmu_hv.c if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) arch 325 arch/powerpc/kvm/book3s_64_mmu_hv.c if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) arch 326 arch/powerpc/kvm/book3s_64_mmu_hv.c return &vcpu->arch.slb[i]; arch 350 arch/powerpc/kvm/book3s_64_mmu_hv.c int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); arch 363 arch/powerpc/kvm/book3s_64_mmu_hv.c slb_v = vcpu->kvm->arch.vrma_slb_v; arch 374 arch/powerpc/kvm/book3s_64_mmu_hv.c hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); arch 378 arch/powerpc/kvm/book3s_64_mmu_hv.c gr = kvm->arch.hpt.rev[index].guest_rpte; arch 388 arch/powerpc/kvm/book3s_64_mmu_hv.c key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; arch 398 arch/powerpc/kvm/book3s_64_mmu_hv.c int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); arch 486 arch/powerpc/kvm/book3s_64_mmu_hv.c vcpu->arch.paddr_accessed = gpa; arch 487 arch/powerpc/kvm/book3s_64_mmu_hv.c vcpu->arch.vaddr_accessed = ea; arch 521 arch/powerpc/kvm/book3s_64_mmu_hv.c if (ea != vcpu->arch.pgfault_addr) arch 524 arch/powerpc/kvm/book3s_64_mmu_hv.c if (vcpu->arch.pgfault_cache) { arch 525 arch/powerpc/kvm/book3s_64_mmu_hv.c mmio_update = atomic64_read(&kvm->arch.mmio_update); arch 526 arch/powerpc/kvm/book3s_64_mmu_hv.c if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) { arch 527 arch/powerpc/kvm/book3s_64_mmu_hv.c r = vcpu->arch.pgfault_cache->rpte; arch 528 arch/powerpc/kvm/book3s_64_mmu_hv.c psize = kvmppc_actual_pgsz(vcpu->arch.pgfault_hpte[0], arch 537 arch/powerpc/kvm/book3s_64_mmu_hv.c index = vcpu->arch.pgfault_index; arch 538 arch/powerpc/kvm/book3s_64_mmu_hv.c hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); arch 539 arch/powerpc/kvm/book3s_64_mmu_hv.c rev = &kvm->arch.hpt.rev[index]; arch 553 arch/powerpc/kvm/book3s_64_mmu_hv.c if (hpte[0] != vcpu->arch.pgfault_hpte[0] || arch 554 arch/powerpc/kvm/book3s_64_mmu_hv.c hpte[1] != vcpu->arch.pgfault_hpte[1]) arch 677 arch/powerpc/kvm/book3s_64_mmu_hv.c if (!kvm->arch.mmu_ready) arch 687 arch/powerpc/kvm/book3s_64_mmu_hv.c rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; arch 759 arch/powerpc/kvm/book3s_64_mmu_hv.c memset(memslot->arch.rmap, 0, arch 760 arch/powerpc/kvm/book3s_64_mmu_hv.c memslot->npages * sizeof(*memslot->arch.rmap)); arch 816 arch/powerpc/kvm/book3s_64_mmu_hv.c __be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); arch 817 arch/powerpc/kvm/book3s_64_mmu_hv.c struct revmap_entry *rev = kvm->arch.hpt.rev; arch 861 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; arch 875 arch/powerpc/kvm/book3s_64_mmu_hv.c hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); arch 908 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = memslot->arch.rmap; arch 930 arch/powerpc/kvm/book3s_64_mmu_hv.c struct revmap_entry *rev = kvm->arch.hpt.rev; arch 936 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; arch 950 arch/powerpc/kvm/book3s_64_mmu_hv.c hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); arch 993 arch/powerpc/kvm/book3s_64_mmu_hv.c struct revmap_entry *rev = kvm->arch.hpt.rev; arch 999 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; arch 1010 arch/powerpc/kvm/book3s_64_mmu_hv.c hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4)); arch 1041 arch/powerpc/kvm/book3s_64_mmu_hv.c return atomic_read(&kvm->arch.vcpus_running) != 0; arch 1050 arch/powerpc/kvm/book3s_64_mmu_hv.c struct revmap_entry *rev = kvm->arch.hpt.rev; arch 1067 arch/powerpc/kvm/book3s_64_mmu_hv.c hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); arch 1154 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = memslot->arch.rmap; arch 1244 arch/powerpc/kvm/book3s_64_mmu_hv.c struct kvm_hpt_info *old = &kvm->arch.hpt; arch 1297 arch/powerpc/kvm/book3s_64_mmu_hv.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; arch 1401 arch/powerpc/kvm/book3s_64_mmu_hv.c for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) { arch 1423 arch/powerpc/kvm/book3s_64_mmu_hv.c hpt_tmp = kvm->arch.hpt; arch 1439 arch/powerpc/kvm/book3s_64_mmu_hv.c if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock))) arch 1451 arch/powerpc/kvm/book3s_64_mmu_hv.c if (kvm->arch.resize_hpt == resize) arch 1452 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm->arch.resize_hpt = NULL; arch 1466 arch/powerpc/kvm/book3s_64_mmu_hv.c mutex_lock(&kvm->arch.mmu_setup_lock); arch 1469 arch/powerpc/kvm/book3s_64_mmu_hv.c if (kvm->arch.resize_hpt == resize) { arch 1473 arch/powerpc/kvm/book3s_64_mmu_hv.c mutex_unlock(&kvm->arch.mmu_setup_lock); arch 1486 arch/powerpc/kvm/book3s_64_mmu_hv.c mutex_lock(&kvm->arch.mmu_setup_lock); arch 1494 arch/powerpc/kvm/book3s_64_mmu_hv.c if (kvm->arch.resize_hpt != resize) arch 1497 arch/powerpc/kvm/book3s_64_mmu_hv.c mutex_unlock(&kvm->arch.mmu_setup_lock); arch 1514 arch/powerpc/kvm/book3s_64_mmu_hv.c mutex_lock(&kvm->arch.mmu_setup_lock); arch 1516 arch/powerpc/kvm/book3s_64_mmu_hv.c resize = kvm->arch.resize_hpt; arch 1550 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm->arch.resize_hpt = resize; arch 1557 arch/powerpc/kvm/book3s_64_mmu_hv.c mutex_unlock(&kvm->arch.mmu_setup_lock); arch 1580 arch/powerpc/kvm/book3s_64_mmu_hv.c mutex_lock(&kvm->arch.mmu_setup_lock); arch 1582 arch/powerpc/kvm/book3s_64_mmu_hv.c resize = kvm->arch.resize_hpt; arch 1586 arch/powerpc/kvm/book3s_64_mmu_hv.c if (WARN_ON(!kvm->arch.mmu_ready)) arch 1590 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm->arch.mmu_ready = 0; arch 1613 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm->arch.mmu_ready = 1; arch 1617 arch/powerpc/kvm/book3s_64_mmu_hv.c mutex_unlock(&kvm->arch.mmu_setup_lock); arch 1763 arch/powerpc/kvm/book3s_64_mmu_hv.c hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); arch 1764 arch/powerpc/kvm/book3s_64_mmu_hv.c revp = kvm->arch.hpt.rev + i; arch 1779 arch/powerpc/kvm/book3s_64_mmu_hv.c while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && arch 1789 arch/powerpc/kvm/book3s_64_mmu_hv.c while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && arch 1805 arch/powerpc/kvm/book3s_64_mmu_hv.c while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && arch 1826 arch/powerpc/kvm/book3s_64_mmu_hv.c if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) { arch 1860 arch/powerpc/kvm/book3s_64_mmu_hv.c mutex_lock(&kvm->arch.mmu_setup_lock); arch 1861 arch/powerpc/kvm/book3s_64_mmu_hv.c mmu_ready = kvm->arch.mmu_ready; arch 1863 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm->arch.mmu_ready = 0; /* temporarily */ arch 1866 arch/powerpc/kvm/book3s_64_mmu_hv.c if (atomic_read(&kvm->arch.vcpus_running)) { arch 1867 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm->arch.mmu_ready = 1; arch 1868 arch/powerpc/kvm/book3s_64_mmu_hv.c mutex_unlock(&kvm->arch.mmu_setup_lock); arch 1888 arch/powerpc/kvm/book3s_64_mmu_hv.c if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) || arch 1889 arch/powerpc/kvm/book3s_64_mmu_hv.c i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt)) arch 1892 arch/powerpc/kvm/book3s_64_mmu_hv.c hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); arch 1927 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | arch 1954 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm->arch.mmu_ready = mmu_ready; arch 1955 arch/powerpc/kvm/book3s_64_mmu_hv.c mutex_unlock(&kvm->arch.mmu_setup_lock); arch 1968 arch/powerpc/kvm/book3s_64_mmu_hv.c atomic_dec(&ctx->kvm->arch.hpte_mod_interest); arch 2009 arch/powerpc/kvm/book3s_64_mmu_hv.c atomic_inc(&kvm->arch.hpte_mod_interest); arch 2090 arch/powerpc/kvm/book3s_64_mmu_hv.c hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); arch 2091 arch/powerpc/kvm/book3s_64_mmu_hv.c for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt); arch 2102 arch/powerpc/kvm/book3s_64_mmu_hv.c gr = kvm->arch.hpt.rev[i].guest_rpte; arch 2152 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm->arch.htab_dentry = debugfs_create_file("htab", 0400, arch 2153 arch/powerpc/kvm/book3s_64_mmu_hv.c kvm->arch.debugfs_dir, kvm, arch 2159 arch/powerpc/kvm/book3s_64_mmu_hv.c struct kvmppc_mmu *mmu = &vcpu->arch.mmu; arch 2161 arch/powerpc/kvm/book3s_64_mmu_hv.c vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */ arch 2166 arch/powerpc/kvm/book3s_64_mmu_hv.c vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; arch 87 arch/powerpc/kvm/book3s_64_mmu_radix.c int lpid = vcpu->kvm->arch.lpid; arch 88 arch/powerpc/kvm/book3s_64_mmu_radix.c int pid = vcpu->arch.pid; arch 95 arch/powerpc/kvm/book3s_64_mmu_radix.c if (vcpu->arch.nested) arch 96 arch/powerpc/kvm/book3s_64_mmu_radix.c lpid = vcpu->arch.nested->shadow_lpid; arch 259 arch/powerpc/kvm/book3s_64_mmu_radix.c pid = vcpu->arch.pid; arch 269 arch/powerpc/kvm/book3s_64_mmu_radix.c vcpu->kvm->arch.process_table, pid, &pte); arch 283 arch/powerpc/kvm/book3s_64_mmu_radix.c if (vcpu->arch.amr & (1ul << 62)) arch 285 arch/powerpc/kvm/book3s_64_mmu_radix.c if (vcpu->arch.amr & (1ul << 63)) arch 287 arch/powerpc/kvm/book3s_64_mmu_radix.c if (vcpu->arch.iamr & (1ul << 62)) arch 390 arch/powerpc/kvm/book3s_64_mmu_radix.c if (lpid != kvm->arch.lpid) arch 514 arch/powerpc/kvm/book3s_64_mmu_radix.c if (kvm->arch.pgtable) { arch 515 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable, arch 516 arch/powerpc/kvm/book3s_64_mmu_radix.c kvm->arch.lpid); arch 517 arch/powerpc/kvm/book3s_64_mmu_radix.c pgd_free(kvm->mm, kvm->arch.pgtable); arch 518 arch/powerpc/kvm/book3s_64_mmu_radix.c kvm->arch.pgtable = NULL; arch 817 arch/powerpc/kvm/book3s_64_mmu_radix.c ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); arch 865 arch/powerpc/kvm/book3s_64_mmu_radix.c ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level, arch 866 arch/powerpc/kvm/book3s_64_mmu_radix.c mmu_seq, kvm->arch.lpid, NULL, NULL); arch 912 arch/powerpc/kvm/book3s_64_mmu_radix.c gpa = vcpu->arch.fault_gpa & ~0xfffUL; arch 948 arch/powerpc/kvm/book3s_64_mmu_radix.c if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, arch 949 arch/powerpc/kvm/book3s_64_mmu_radix.c writing, gpa, kvm->arch.lpid)) arch 975 arch/powerpc/kvm/book3s_64_mmu_radix.c ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); arch 978 arch/powerpc/kvm/book3s_64_mmu_radix.c kvm->arch.lpid); arch 992 arch/powerpc/kvm/book3s_64_mmu_radix.c ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); arch 998 arch/powerpc/kvm/book3s_64_mmu_radix.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; arch 1016 arch/powerpc/kvm/book3s_64_mmu_radix.c ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); arch 1033 arch/powerpc/kvm/book3s_64_mmu_radix.c ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); arch 1041 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid); arch 1043 arch/powerpc/kvm/book3s_64_mmu_radix.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; arch 1088 arch/powerpc/kvm/book3s_64_mmu_radix.c ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); arch 1091 arch/powerpc/kvm/book3s_64_mmu_radix.c kvm->arch.lpid); arch 1136 arch/powerpc/kvm/book3s_64_mmu_radix.c kvm->arch.pgtable = pgd_alloc(kvm->mm); arch 1137 arch/powerpc/kvm/book3s_64_mmu_radix.c if (!kvm->arch.pgtable) arch 1250 arch/powerpc/kvm/book3s_64_mmu_radix.c pgt = kvm->arch.pgtable; arch 1356 arch/powerpc/kvm/book3s_64_mmu_radix.c kvm->arch.radix_dentry = debugfs_create_file("radix", 0400, arch 1357 arch/powerpc/kvm/book3s_64_mmu_radix.c kvm->arch.debugfs_dir, kvm, arch 77 arch/powerpc/kvm/book3s_64_vio.c list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { arch 109 arch/powerpc/kvm/book3s_64_vio.c list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { arch 305 arch/powerpc/kvm/book3s_64_vio.c list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) { arch 318 arch/powerpc/kvm/book3s_64_vio.c list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables); arch 69 arch/powerpc/kvm/book3s_64_vio_hv.c list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list) arch 92 arch/powerpc/kvm/book3s_64_vio_hv.c *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; arch 456 arch/powerpc/kvm/book3s_64_vio_hv.c ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift); arch 673 arch/powerpc/kvm/book3s_64_vio_hv.c vcpu->arch.regs.gpr[4] = 0; arch 678 arch/powerpc/kvm/book3s_64_vio_hv.c vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE]; arch 77 arch/powerpc/kvm/book3s_emulate.c if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) arch 90 arch/powerpc/kvm/book3s_emulate.c memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0], arch 91 arch/powerpc/kvm/book3s_emulate.c sizeof(vcpu->arch.gpr_tm)); arch 92 arch/powerpc/kvm/book3s_emulate.c memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp, arch 94 arch/powerpc/kvm/book3s_emulate.c memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr, arch 96 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.ppr_tm = vcpu->arch.ppr; arch 97 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.dscr_tm = vcpu->arch.dscr; arch 98 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.amr_tm = vcpu->arch.amr; arch 99 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; arch 100 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.tar_tm = vcpu->arch.tar; arch 101 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.lr_tm = vcpu->arch.regs.link; arch 102 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.cr_tm = vcpu->arch.regs.ccr; arch 103 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.xer_tm = vcpu->arch.regs.xer; arch 104 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.vrsave_tm = vcpu->arch.vrsave; arch 109 arch/powerpc/kvm/book3s_emulate.c memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0], arch 110 arch/powerpc/kvm/book3s_emulate.c sizeof(vcpu->arch.regs.gpr)); arch 111 arch/powerpc/kvm/book3s_emulate.c memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm, arch 113 arch/powerpc/kvm/book3s_emulate.c memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm, arch 115 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.ppr = vcpu->arch.ppr_tm; arch 116 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.dscr = vcpu->arch.dscr_tm; arch 117 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.amr = vcpu->arch.amr_tm; arch 118 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; arch 119 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.tar = vcpu->arch.tar_tm; arch 120 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.regs.link = vcpu->arch.lr_tm; arch 121 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.regs.ccr = vcpu->arch.cr_tm; arch 122 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.regs.xer = vcpu->arch.xer_tm; arch 123 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.vrsave = vcpu->arch.vrsave_tm; arch 133 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) | arch 155 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.texasr = texasr; arch 156 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.tfiar = kvmppc_get_pc(vcpu); arch 158 arch/powerpc/kvm/book3s_emulate.c mtspr(SPRN_TFIAR, vcpu->arch.tfiar); arch 168 arch/powerpc/kvm/book3s_emulate.c if (vcpu->arch.shadow_fscr & FSCR_TAR) arch 169 arch/powerpc/kvm/book3s_emulate.c mtspr(SPRN_TAR, vcpu->arch.tar); arch 212 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) | arch 216 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.texasr = mfspr(SPRN_TEXASR); arch 223 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV); arch 225 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.texasr |= TEXASR_PR; arch 228 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.texasr |= TEXASR_HV; arch 230 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.tfiar = kvmppc_get_pc(vcpu); arch 321 arch/powerpc/kvm/book3s_emulate.c if (vcpu->arch.mmu.mfsrin) { arch 323 arch/powerpc/kvm/book3s_emulate.c sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); arch 333 arch/powerpc/kvm/book3s_emulate.c if (vcpu->arch.mmu.mfsrin) { arch 335 arch/powerpc/kvm/book3s_emulate.c sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); arch 341 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.mmu.mtsrin(vcpu, arch 346 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.mmu.mtsrin(vcpu, arch 355 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.mmu.tlbie(vcpu, addr, large); arch 366 arch/powerpc/kvm/book3s_emulate.c !vcpu->arch.papr_enabled) { arch 381 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.hcall_needed = 1; arch 389 arch/powerpc/kvm/book3s_emulate.c if (!vcpu->arch.mmu.slbmte) arch 392 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.mmu.slbmte(vcpu, arch 397 arch/powerpc/kvm/book3s_emulate.c if (!vcpu->arch.mmu.slbie) arch 400 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.mmu.slbie(vcpu, arch 404 arch/powerpc/kvm/book3s_emulate.c if (!vcpu->arch.mmu.slbia) arch 407 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.mmu.slbia(vcpu); arch 410 arch/powerpc/kvm/book3s_emulate.c if (!(inst & 1) || !vcpu->arch.mmu.slbfee) { arch 417 arch/powerpc/kvm/book3s_emulate.c if (!vcpu->arch.mmu.slbfee(vcpu, b, &t)) arch 421 arch/powerpc/kvm/book3s_emulate.c cr |= (vcpu->arch.regs.xer & 0x80000000) >> arch 427 arch/powerpc/kvm/book3s_emulate.c if (!vcpu->arch.mmu.slbmfee) { arch 433 arch/powerpc/kvm/book3s_emulate.c t = vcpu->arch.mmu.slbmfee(vcpu, rb_val); arch 438 arch/powerpc/kvm/book3s_emulate.c if (!vcpu->arch.mmu.slbmfev) { arch 444 arch/powerpc/kvm/book3s_emulate.c t = vcpu->arch.mmu.slbmfev(vcpu, rb_val); arch 472 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.fault_dar = vaddr; arch 481 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.fault_dsisr = dsisr; arch 503 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE | arch 504 arch/powerpc/kvm/book3s_emulate.c (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT))); arch 506 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT | arch 511 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.texasr |= TEXASR_ROT; arch 514 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.texasr |= TEXASR_HV; arch 516 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4; arch 517 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.tfiar = kvmppc_get_pc(vcpu); arch 727 arch/powerpc/kvm/book3s_emulate.c switch (vcpu->arch.pvr) { arch 737 arch/powerpc/kvm/book3s_emulate.c if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) { arch 740 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE; arch 743 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE; arch 755 arch/powerpc/kvm/book3s_emulate.c if (vcpu->arch.mmu.is_dcbz32(vcpu) && arch 757 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; arch 774 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.bescr = spr_val; arch 777 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.ebbhr = spr_val; arch 780 arch/powerpc/kvm/book3s_emulate.c vcpu->arch.ebbrr = spr_val; arch 923 arch/powerpc/kvm/book3s_emulate.c *spr_val = vcpu->arch.purr; arch 929 arch/powerpc/kvm/book3s_emulate.c *spr_val = vcpu->arch.spurr; arch 935 arch/powerpc/kvm/book3s_emulate.c *spr_val = vcpu->arch.ic; arch 949 arch/powerpc/kvm/book3s_emulate.c *spr_val = vcpu->arch.fscr; arch 952 arch/powerpc/kvm/book3s_emulate.c *spr_val = vcpu->arch.bescr; arch 955 arch/powerpc/kvm/book3s_emulate.c *spr_val = vcpu->arch.ebbhr; arch 958 arch/powerpc/kvm/book3s_emulate.c *spr_val = vcpu->arch.ebbrr; arch 1043 arch/powerpc/kvm/book3s_emulate.c return vcpu->arch.fault_dar; arch 130 arch/powerpc/kvm/book3s_hv.c return kvm->arch.nested_enable && kvm_is_radix(kvm); arch 240 arch/powerpc/kvm/book3s_hv.c cpu = READ_ONCE(vcpu->arch.thread_cpu); arch 306 arch/powerpc/kvm/book3s_hv.c struct kvmppc_vcore *vc = vcpu->arch.vcore; arch 318 arch/powerpc/kvm/book3s_hv.c spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); arch 319 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && arch 320 arch/powerpc/kvm/book3s_hv.c vcpu->arch.busy_preempt != TB_NIL) { arch 321 arch/powerpc/kvm/book3s_hv.c vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; arch 322 arch/powerpc/kvm/book3s_hv.c vcpu->arch.busy_preempt = TB_NIL; arch 324 arch/powerpc/kvm/book3s_hv.c spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); arch 329 arch/powerpc/kvm/book3s_hv.c struct kvmppc_vcore *vc = vcpu->arch.vcore; arch 335 arch/powerpc/kvm/book3s_hv.c spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); arch 336 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) arch 337 arch/powerpc/kvm/book3s_hv.c vcpu->arch.busy_preempt = mftb(); arch 338 arch/powerpc/kvm/book3s_hv.c spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); arch 349 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.msr = msr; arch 355 arch/powerpc/kvm/book3s_hv.c vcpu->arch.pvr = pvr; arch 364 arch/powerpc/kvm/book3s_hv.c struct kvmppc_vcore *vc = vcpu->arch.vcore; arch 420 arch/powerpc/kvm/book3s_hv.c vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); arch 426 arch/powerpc/kvm/book3s_hv.c vcpu->arch.regs.ctr, vcpu->arch.regs.link); arch 428 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); arch 430 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); arch 432 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); arch 434 arch/powerpc/kvm/book3s_hv.c vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); arch 435 arch/powerpc/kvm/book3s_hv.c pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); arch 437 arch/powerpc/kvm/book3s_hv.c vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); arch 438 arch/powerpc/kvm/book3s_hv.c pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); arch 439 arch/powerpc/kvm/book3s_hv.c for (r = 0; r < vcpu->arch.slb_max; ++r) arch 441 arch/powerpc/kvm/book3s_hv.c vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); arch 443 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, arch 444 arch/powerpc/kvm/book3s_hv.c vcpu->arch.last_inst); arch 464 arch/powerpc/kvm/book3s_hv.c spin_lock(&vcpu->arch.vpa_update_lock); arch 470 arch/powerpc/kvm/book3s_hv.c spin_unlock(&vcpu->arch.vpa_update_lock); arch 533 arch/powerpc/kvm/book3s_hv.c spin_lock(&tvcpu->arch.vpa_update_lock); arch 546 arch/powerpc/kvm/book3s_hv.c vpap = &tvcpu->arch.vpa; arch 557 arch/powerpc/kvm/book3s_hv.c if (!vpa_is_registered(&tvcpu->arch.vpa)) arch 560 arch/powerpc/kvm/book3s_hv.c vpap = &tvcpu->arch.dtl; arch 567 arch/powerpc/kvm/book3s_hv.c if (!vpa_is_registered(&tvcpu->arch.vpa)) arch 570 arch/powerpc/kvm/book3s_hv.c vpap = &tvcpu->arch.slb_shadow; arch 577 arch/powerpc/kvm/book3s_hv.c if (vpa_is_registered(&tvcpu->arch.dtl) || arch 578 arch/powerpc/kvm/book3s_hv.c vpa_is_registered(&tvcpu->arch.slb_shadow)) arch 581 arch/powerpc/kvm/book3s_hv.c vpap = &tvcpu->arch.vpa; arch 586 arch/powerpc/kvm/book3s_hv.c vpap = &tvcpu->arch.dtl; arch 591 arch/powerpc/kvm/book3s_hv.c vpap = &tvcpu->arch.slb_shadow; arch 602 arch/powerpc/kvm/book3s_hv.c spin_unlock(&tvcpu->arch.vpa_update_lock); arch 624 arch/powerpc/kvm/book3s_hv.c spin_unlock(&vcpu->arch.vpa_update_lock); arch 629 arch/powerpc/kvm/book3s_hv.c spin_lock(&vcpu->arch.vpa_update_lock); arch 659 arch/powerpc/kvm/book3s_hv.c if (!(vcpu->arch.vpa.update_pending || arch 660 arch/powerpc/kvm/book3s_hv.c vcpu->arch.slb_shadow.update_pending || arch 661 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dtl.update_pending)) arch 664 arch/powerpc/kvm/book3s_hv.c spin_lock(&vcpu->arch.vpa_update_lock); arch 665 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.vpa.update_pending) { arch 666 arch/powerpc/kvm/book3s_hv.c kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); arch 667 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.vpa.pinned_addr) arch 668 arch/powerpc/kvm/book3s_hv.c init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); arch 670 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.dtl.update_pending) { arch 671 arch/powerpc/kvm/book3s_hv.c kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); arch 672 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; arch 673 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dtl_index = 0; arch 675 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.slb_shadow.update_pending) arch 676 arch/powerpc/kvm/book3s_hv.c kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); arch 677 arch/powerpc/kvm/book3s_hv.c spin_unlock(&vcpu->arch.vpa_update_lock); arch 708 arch/powerpc/kvm/book3s_hv.c dt = vcpu->arch.dtl_ptr; arch 709 arch/powerpc/kvm/book3s_hv.c vpa = vcpu->arch.vpa.pinned_addr; arch 712 arch/powerpc/kvm/book3s_hv.c stolen = core_stolen - vcpu->arch.stolen_logged; arch 713 arch/powerpc/kvm/book3s_hv.c vcpu->arch.stolen_logged = core_stolen; arch 714 arch/powerpc/kvm/book3s_hv.c spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); arch 715 arch/powerpc/kvm/book3s_hv.c stolen += vcpu->arch.busy_stolen; arch 716 arch/powerpc/kvm/book3s_hv.c vcpu->arch.busy_stolen = 0; arch 717 arch/powerpc/kvm/book3s_hv.c spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); arch 722 arch/powerpc/kvm/book3s_hv.c dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); arch 726 arch/powerpc/kvm/book3s_hv.c dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); arch 728 arch/powerpc/kvm/book3s_hv.c if (dt == vcpu->arch.dtl.pinned_end) arch 729 arch/powerpc/kvm/book3s_hv.c dt = vcpu->arch.dtl.pinned_addr; arch 730 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dtl_ptr = dt; arch 733 arch/powerpc/kvm/book3s_hv.c vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); arch 734 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dtl.dirty = true; arch 743 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.doorbell_request) arch 751 arch/powerpc/kvm/book3s_hv.c vc = vcpu->arch.vcore; arch 758 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) arch 760 arch/powerpc/kvm/book3s_hv.c if ((!vcpu->arch.vcore->arch_compat) && arch 781 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ciabr = value1; arch 792 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dawr = value1; arch 793 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dawrx = value2; arch 876 arch/powerpc/kvm/book3s_hv.c struct kvmppc_vcore *vcore = target->arch.vcore; arch 887 arch/powerpc/kvm/book3s_hv.c if (target->arch.state == KVMPPC_VCPU_RUNNABLE && arch 901 arch/powerpc/kvm/book3s_hv.c spin_lock(&vcpu->arch.vpa_update_lock); arch 902 arch/powerpc/kvm/book3s_hv.c lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; arch 905 arch/powerpc/kvm/book3s_hv.c spin_unlock(&vcpu->arch.vpa_update_lock); arch 918 arch/powerpc/kvm/book3s_hv.c !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) arch 931 arch/powerpc/kvm/book3s_hv.c tvcpu->arch.prodded = 1; arch 933 arch/powerpc/kvm/book3s_hv.c if (tvcpu->arch.ceded) arch 956 arch/powerpc/kvm/book3s_hv.c if (list_empty(&vcpu->kvm->arch.rtas_tokens)) arch 1042 arch/powerpc/kvm/book3s_hv.c if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4])) arch 1058 arch/powerpc/kvm/book3s_hv.c vcpu->arch.hcall_needed = 0; arch 1062 arch/powerpc/kvm/book3s_hv.c vcpu->arch.hcall_needed = 0; arch 1085 arch/powerpc/kvm/book3s_hv.c vcpu->arch.hcall_needed = 0; arch 1097 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.msr |= MSR_EE; arch 1098 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ceded = 1; arch 1100 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.prodded) { arch 1101 arch/powerpc/kvm/book3s_hv.c vcpu->arch.prodded = 0; arch 1103 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ceded = 0; arch 1149 arch/powerpc/kvm/book3s_hv.c run->debug.arch.address = kvmppc_get_pc(vcpu); arch 1167 arch/powerpc/kvm/book3s_hv.c nthreads = vcpu->kvm->arch.emul_smt_mode; arch 1206 arch/powerpc/kvm/book3s_hv.c thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1); arch 1213 arch/powerpc/kvm/book3s_hv.c if (arg >= kvm->arch.emul_smt_mode) arch 1218 arch/powerpc/kvm/book3s_hv.c if (!tvcpu->arch.doorbell_request) { arch 1219 arch/powerpc/kvm/book3s_hv.c tvcpu->arch.doorbell_request = 1; arch 1227 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vcore->dpdes = 0; arch 1228 arch/powerpc/kvm/book3s_hv.c vcpu->arch.doorbell_request = 0; arch 1265 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.shregs.msr & MSR_HV) { arch 1268 arch/powerpc/kvm/book3s_hv.c vcpu->arch.trap, kvmppc_get_pc(vcpu), arch 1269 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.msr); arch 1272 arch/powerpc/kvm/book3s_hv.c run->hw.hardware_exit_reason = vcpu->arch.trap; arch 1277 arch/powerpc/kvm/book3s_hv.c switch (vcpu->arch.trap) { arch 1297 arch/powerpc/kvm/book3s_hv.c machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); arch 1305 arch/powerpc/kvm/book3s_hv.c if (!vcpu->kvm->arch.fwnmi_enabled) { arch 1306 arch/powerpc/kvm/book3s_hv.c ulong flags = vcpu->arch.shregs.msr & 0x083c0000; arch 1314 arch/powerpc/kvm/book3s_hv.c run->hw.hardware_exit_reason = vcpu->arch.trap; arch 1318 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED) arch 1334 arch/powerpc/kvm/book3s_hv.c flags = vcpu->arch.shregs.msr & 0x1f0000ull; arch 1352 arch/powerpc/kvm/book3s_hv.c vcpu->arch.hcall_needed = 1; arch 1367 arch/powerpc/kvm/book3s_hv.c vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); arch 1368 arch/powerpc/kvm/book3s_hv.c vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr & arch 1370 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) arch 1371 arch/powerpc/kvm/book3s_hv.c vcpu->arch.fault_dsisr |= DSISR_ISSTORE; arch 1382 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) arch 1383 arch/powerpc/kvm/book3s_hv.c vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? arch 1384 arch/powerpc/kvm/book3s_hv.c swab32(vcpu->arch.emul_inst) : arch 1385 arch/powerpc/kvm/book3s_hv.c vcpu->arch.emul_inst; arch 1402 arch/powerpc/kvm/book3s_hv.c if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && arch 1429 arch/powerpc/kvm/book3s_hv.c vcpu->arch.trap, kvmppc_get_pc(vcpu), arch 1430 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.msr); arch 1431 arch/powerpc/kvm/book3s_hv.c run->hw.hardware_exit_reason = vcpu->arch.trap; arch 1454 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.shregs.msr & MSR_HV) { arch 1457 arch/powerpc/kvm/book3s_hv.c vcpu->arch.trap, kvmppc_get_pc(vcpu), arch 1458 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.msr); arch 1462 arch/powerpc/kvm/book3s_hv.c switch (vcpu->arch.trap) { arch 1487 arch/powerpc/kvm/book3s_hv.c machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); arch 1501 arch/powerpc/kvm/book3s_hv.c vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); arch 1502 arch/powerpc/kvm/book3s_hv.c vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & arch 1504 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) arch 1505 arch/powerpc/kvm/book3s_hv.c vcpu->arch.fault_dsisr |= DSISR_ISSTORE; arch 1524 arch/powerpc/kvm/book3s_hv.c vcpu->arch.trap = 0; arch 1543 arch/powerpc/kvm/book3s_hv.c sregs->pvr = vcpu->arch.pvr; arch 1544 arch/powerpc/kvm/book3s_hv.c for (i = 0; i < vcpu->arch.slb_max; i++) { arch 1545 arch/powerpc/kvm/book3s_hv.c sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; arch 1546 arch/powerpc/kvm/book3s_hv.c sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; arch 1558 arch/powerpc/kvm/book3s_hv.c if (sregs->pvr != vcpu->arch.pvr) arch 1562 arch/powerpc/kvm/book3s_hv.c for (i = 0; i < vcpu->arch.slb_nr; i++) { arch 1564 arch/powerpc/kvm/book3s_hv.c vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; arch 1565 arch/powerpc/kvm/book3s_hv.c vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; arch 1569 arch/powerpc/kvm/book3s_hv.c vcpu->arch.slb_max = j; arch 1578 arch/powerpc/kvm/book3s_hv.c struct kvmppc_vcore *vc = vcpu->arch.vcore; arch 1591 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.vcore != vc) arch 1594 arch/powerpc/kvm/book3s_hv.c vcpu->arch.intr_msr |= MSR_LE; arch 1596 arch/powerpc/kvm/book3s_hv.c vcpu->arch.intr_msr &= ~MSR_LE; arch 1636 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.dabr); arch 1639 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.dabrx); arch 1642 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.dscr); arch 1645 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.purr); arch 1648 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.spurr); arch 1651 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.amr); arch 1654 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.uamor); arch 1658 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.mmcr[i]); arch 1662 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.pmc[i]); arch 1666 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.spmc[i]); arch 1669 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.siar); arch 1672 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.sdar); arch 1675 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.sier); arch 1678 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.iamr); arch 1681 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.pspb); arch 1690 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.vcore->dpdes | arch 1691 arch/powerpc/kvm/book3s_hv.c vcpu->arch.doorbell_request); arch 1694 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.vcore->vtb); arch 1697 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.dawr); arch 1700 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.dawrx); arch 1703 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.ciabr); arch 1706 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.csigr); arch 1709 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.tacr); arch 1712 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.tcscr); arch 1715 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.pid); arch 1718 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.acop); arch 1721 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.wort); arch 1724 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.tid); arch 1727 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.psscr); arch 1730 arch/powerpc/kvm/book3s_hv.c spin_lock(&vcpu->arch.vpa_update_lock); arch 1731 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); arch 1732 arch/powerpc/kvm/book3s_hv.c spin_unlock(&vcpu->arch.vpa_update_lock); arch 1735 arch/powerpc/kvm/book3s_hv.c spin_lock(&vcpu->arch.vpa_update_lock); arch 1736 arch/powerpc/kvm/book3s_hv.c val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; arch 1737 arch/powerpc/kvm/book3s_hv.c val->vpaval.length = vcpu->arch.slb_shadow.len; arch 1738 arch/powerpc/kvm/book3s_hv.c spin_unlock(&vcpu->arch.vpa_update_lock); arch 1741 arch/powerpc/kvm/book3s_hv.c spin_lock(&vcpu->arch.vpa_update_lock); arch 1742 arch/powerpc/kvm/book3s_hv.c val->vpaval.addr = vcpu->arch.dtl.next_gpa; arch 1743 arch/powerpc/kvm/book3s_hv.c val->vpaval.length = vcpu->arch.dtl.len; arch 1744 arch/powerpc/kvm/book3s_hv.c spin_unlock(&vcpu->arch.vpa_update_lock); arch 1747 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); arch 1751 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.vcore->lpcr); arch 1754 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.ppr); arch 1758 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.tfhar); arch 1761 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.tfiar); arch 1764 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.texasr); arch 1768 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); arch 1776 arch/powerpc/kvm/book3s_hv.c val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; arch 1779 arch/powerpc/kvm/book3s_hv.c val->vval = vcpu->arch.vr_tm.vr[i-32]; arch 1786 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.cr_tm); arch 1789 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.xer_tm); arch 1792 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.lr_tm); arch 1795 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.ctr_tm); arch 1798 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); arch 1801 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.amr_tm); arch 1804 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.ppr_tm); arch 1807 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.vrsave_tm); arch 1811 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); arch 1816 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.dscr_tm); arch 1819 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.tar_tm); arch 1823 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); arch 1826 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.dec_expires + arch 1827 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vcore->tb_offset); arch 1830 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->arch.online); arch 1833 arch/powerpc/kvm/book3s_hv.c *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); arch 1857 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dabr = set_reg_val(id, *val); arch 1860 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; arch 1863 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dscr = set_reg_val(id, *val); arch 1866 arch/powerpc/kvm/book3s_hv.c vcpu->arch.purr = set_reg_val(id, *val); arch 1869 arch/powerpc/kvm/book3s_hv.c vcpu->arch.spurr = set_reg_val(id, *val); arch 1872 arch/powerpc/kvm/book3s_hv.c vcpu->arch.amr = set_reg_val(id, *val); arch 1875 arch/powerpc/kvm/book3s_hv.c vcpu->arch.uamor = set_reg_val(id, *val); arch 1879 arch/powerpc/kvm/book3s_hv.c vcpu->arch.mmcr[i] = set_reg_val(id, *val); arch 1883 arch/powerpc/kvm/book3s_hv.c vcpu->arch.pmc[i] = set_reg_val(id, *val); arch 1887 arch/powerpc/kvm/book3s_hv.c vcpu->arch.spmc[i] = set_reg_val(id, *val); arch 1890 arch/powerpc/kvm/book3s_hv.c vcpu->arch.siar = set_reg_val(id, *val); arch 1893 arch/powerpc/kvm/book3s_hv.c vcpu->arch.sdar = set_reg_val(id, *val); arch 1896 arch/powerpc/kvm/book3s_hv.c vcpu->arch.sier = set_reg_val(id, *val); arch 1899 arch/powerpc/kvm/book3s_hv.c vcpu->arch.iamr = set_reg_val(id, *val); arch 1902 arch/powerpc/kvm/book3s_hv.c vcpu->arch.pspb = set_reg_val(id, *val); arch 1905 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vcore->dpdes = set_reg_val(id, *val); arch 1908 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vcore->vtb = set_reg_val(id, *val); arch 1911 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dawr = set_reg_val(id, *val); arch 1914 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; arch 1917 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ciabr = set_reg_val(id, *val); arch 1919 arch/powerpc/kvm/book3s_hv.c if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) arch 1920 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ arch 1923 arch/powerpc/kvm/book3s_hv.c vcpu->arch.csigr = set_reg_val(id, *val); arch 1926 arch/powerpc/kvm/book3s_hv.c vcpu->arch.tacr = set_reg_val(id, *val); arch 1929 arch/powerpc/kvm/book3s_hv.c vcpu->arch.tcscr = set_reg_val(id, *val); arch 1932 arch/powerpc/kvm/book3s_hv.c vcpu->arch.pid = set_reg_val(id, *val); arch 1935 arch/powerpc/kvm/book3s_hv.c vcpu->arch.acop = set_reg_val(id, *val); arch 1938 arch/powerpc/kvm/book3s_hv.c vcpu->arch.wort = set_reg_val(id, *val); arch 1941 arch/powerpc/kvm/book3s_hv.c vcpu->arch.tid = set_reg_val(id, *val); arch 1944 arch/powerpc/kvm/book3s_hv.c vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; arch 1949 arch/powerpc/kvm/book3s_hv.c if (!addr && (vcpu->arch.slb_shadow.next_gpa || arch 1950 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dtl.next_gpa)) arch 1952 arch/powerpc/kvm/book3s_hv.c r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); arch 1958 arch/powerpc/kvm/book3s_hv.c if (addr && !vcpu->arch.vpa.next_gpa) arch 1960 arch/powerpc/kvm/book3s_hv.c r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); arch 1967 arch/powerpc/kvm/book3s_hv.c !vcpu->arch.vpa.next_gpa)) arch 1970 arch/powerpc/kvm/book3s_hv.c r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); arch 1974 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vcore->tb_offset = arch 1984 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ppr = set_reg_val(id, *val); arch 1988 arch/powerpc/kvm/book3s_hv.c vcpu->arch.tfhar = set_reg_val(id, *val); arch 1991 arch/powerpc/kvm/book3s_hv.c vcpu->arch.tfiar = set_reg_val(id, *val); arch 1994 arch/powerpc/kvm/book3s_hv.c vcpu->arch.texasr = set_reg_val(id, *val); arch 1998 arch/powerpc/kvm/book3s_hv.c vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); arch 2006 arch/powerpc/kvm/book3s_hv.c vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; arch 2009 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vr_tm.vr[i-32] = val->vval; arch 2015 arch/powerpc/kvm/book3s_hv.c vcpu->arch.cr_tm = set_reg_val(id, *val); arch 2018 arch/powerpc/kvm/book3s_hv.c vcpu->arch.xer_tm = set_reg_val(id, *val); arch 2021 arch/powerpc/kvm/book3s_hv.c vcpu->arch.lr_tm = set_reg_val(id, *val); arch 2024 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ctr_tm = set_reg_val(id, *val); arch 2027 arch/powerpc/kvm/book3s_hv.c vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); arch 2030 arch/powerpc/kvm/book3s_hv.c vcpu->arch.amr_tm = set_reg_val(id, *val); arch 2033 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ppr_tm = set_reg_val(id, *val); arch 2036 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vrsave_tm = set_reg_val(id, *val); arch 2040 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); arch 2045 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dscr_tm = set_reg_val(id, *val); arch 2048 arch/powerpc/kvm/book3s_hv.c vcpu->arch.tar_tm = set_reg_val(id, *val); arch 2055 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dec_expires = set_reg_val(id, *val) - arch 2056 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vcore->tb_offset; arch 2060 arch/powerpc/kvm/book3s_hv.c if (i && !vcpu->arch.online) arch 2061 arch/powerpc/kvm/book3s_hv.c atomic_inc(&vcpu->arch.vcore->online_count); arch 2062 arch/powerpc/kvm/book3s_hv.c else if (!i && vcpu->arch.online) arch 2063 arch/powerpc/kvm/book3s_hv.c atomic_dec(&vcpu->arch.vcore->online_count); arch 2064 arch/powerpc/kvm/book3s_hv.c vcpu->arch.online = i; arch 2067 arch/powerpc/kvm/book3s_hv.c vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); arch 2086 arch/powerpc/kvm/book3s_hv.c if (kvm->arch.threads_indep) arch 2104 arch/powerpc/kvm/book3s_hv.c vcore->lpcr = kvm->arch.lpcr; arch 2117 arch/powerpc/kvm/book3s_hv.c {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)}, arch 2118 arch/powerpc/kvm/book3s_hv.c {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)}, arch 2119 arch/powerpc/kvm/book3s_hv.c {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)}, arch 2120 arch/powerpc/kvm/book3s_hv.c {"guest", offsetof(struct kvm_vcpu, arch.guest_time)}, arch 2121 arch/powerpc/kvm/book3s_hv.c {"cede", offsetof(struct kvm_vcpu, arch.cede_time)}, arch 2244 arch/powerpc/kvm/book3s_hv.c if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) arch 2246 arch/powerpc/kvm/book3s_hv.c vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir); arch 2247 arch/powerpc/kvm/book3s_hv.c if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir)) arch 2249 arch/powerpc/kvm/book3s_hv.c vcpu->arch.debugfs_timings = arch 2250 arch/powerpc/kvm/book3s_hv.c debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, arch 2277 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shared = &vcpu->arch.shregs; arch 2284 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shared_big_endian = true; arch 2286 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shared_big_endian = false; arch 2289 arch/powerpc/kvm/book3s_hv.c vcpu->arch.mmcr[0] = MMCR0_FC; arch 2290 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ctrl = CTRL_RUNLATCH; arch 2293 arch/powerpc/kvm/book3s_hv.c spin_lock_init(&vcpu->arch.vpa_update_lock); arch 2294 arch/powerpc/kvm/book3s_hv.c spin_lock_init(&vcpu->arch.tbacct_lock); arch 2295 arch/powerpc/kvm/book3s_hv.c vcpu->arch.busy_preempt = TB_NIL; arch 2296 arch/powerpc/kvm/book3s_hv.c vcpu->arch.intr_msr = MSR_SF | MSR_ME; arch 2305 arch/powerpc/kvm/book3s_hv.c vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | arch 2308 arch/powerpc/kvm/book3s_hv.c vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); arch 2310 arch/powerpc/kvm/book3s_hv.c vcpu->arch.hfscr |= HFSCR_TM; arch 2313 arch/powerpc/kvm/book3s_hv.c vcpu->arch.hfscr |= HFSCR_TM; arch 2317 arch/powerpc/kvm/book3s_hv.c vcpu->arch.state = KVMPPC_VCPU_NOTREADY; arch 2319 arch/powerpc/kvm/book3s_hv.c init_waitqueue_head(&vcpu->arch.cpu_run); arch 2325 arch/powerpc/kvm/book3s_hv.c if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) { arch 2329 arch/powerpc/kvm/book3s_hv.c BUG_ON(kvm->arch.smt_mode != 1); arch 2333 arch/powerpc/kvm/book3s_hv.c core = id / kvm->arch.smt_mode; arch 2336 arch/powerpc/kvm/book3s_hv.c vcore = kvm->arch.vcores[core]; arch 2347 arch/powerpc/kvm/book3s_hv.c id & ~(kvm->arch.smt_mode - 1)); arch 2348 arch/powerpc/kvm/book3s_hv.c mutex_lock(&kvm->arch.mmu_setup_lock); arch 2349 arch/powerpc/kvm/book3s_hv.c kvm->arch.vcores[core] = vcore; arch 2350 arch/powerpc/kvm/book3s_hv.c kvm->arch.online_vcores++; arch 2351 arch/powerpc/kvm/book3s_hv.c mutex_unlock(&kvm->arch.mmu_setup_lock); arch 2362 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vcore = vcore; arch 2363 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; arch 2364 arch/powerpc/kvm/book3s_hv.c vcpu->arch.thread_cpu = -1; arch 2365 arch/powerpc/kvm/book3s_hv.c vcpu->arch.prev_cpu = -1; arch 2367 arch/powerpc/kvm/book3s_hv.c vcpu->arch.cpu_type = KVM_CPU_3S_64; arch 2409 arch/powerpc/kvm/book3s_hv.c if (!kvm->arch.online_vcores) { arch 2410 arch/powerpc/kvm/book3s_hv.c kvm->arch.smt_mode = smt_mode; arch 2411 arch/powerpc/kvm/book3s_hv.c kvm->arch.emul_smt_mode = esmt; arch 2428 arch/powerpc/kvm/book3s_hv.c spin_lock(&vcpu->arch.vpa_update_lock); arch 2429 arch/powerpc/kvm/book3s_hv.c unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); arch 2430 arch/powerpc/kvm/book3s_hv.c unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); arch 2431 arch/powerpc/kvm/book3s_hv.c unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); arch 2432 arch/powerpc/kvm/book3s_hv.c spin_unlock(&vcpu->arch.vpa_update_lock); arch 2448 arch/powerpc/kvm/book3s_hv.c if (now > vcpu->arch.dec_expires) { arch 2454 arch/powerpc/kvm/book3s_hv.c dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now); arch 2455 arch/powerpc/kvm/book3s_hv.c hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); arch 2456 arch/powerpc/kvm/book3s_hv.c vcpu->arch.timer_running = 1; arch 2461 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ceded = 0; arch 2462 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.timer_running) { arch 2463 arch/powerpc/kvm/book3s_hv.c hrtimer_try_to_cancel(&vcpu->arch.dec_timer); arch 2464 arch/powerpc/kvm/book3s_hv.c vcpu->arch.timer_running = 0; arch 2475 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) arch 2477 arch/powerpc/kvm/book3s_hv.c spin_lock_irq(&vcpu->arch.tbacct_lock); arch 2479 arch/powerpc/kvm/book3s_hv.c vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - arch 2480 arch/powerpc/kvm/book3s_hv.c vcpu->arch.stolen_logged; arch 2481 arch/powerpc/kvm/book3s_hv.c vcpu->arch.busy_preempt = now; arch 2482 arch/powerpc/kvm/book3s_hv.c vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; arch 2483 arch/powerpc/kvm/book3s_hv.c spin_unlock_irq(&vcpu->arch.tbacct_lock); arch 2485 arch/powerpc/kvm/book3s_hv.c WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); arch 2535 arch/powerpc/kvm/book3s_hv.c struct kvm_nested_guest *nested = vcpu->arch.nested; arch 2544 arch/powerpc/kvm/book3s_hv.c cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush); arch 2545 arch/powerpc/kvm/book3s_hv.c cpu_in_guest = &kvm->arch.cpu_in_guest; arch 2560 arch/powerpc/kvm/book3s_hv.c struct kvm_nested_guest *nested = vcpu->arch.nested; arch 2568 arch/powerpc/kvm/book3s_hv.c prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; arch 2570 arch/powerpc/kvm/book3s_hv.c prev_cpu = vcpu->arch.prev_cpu; arch 2590 arch/powerpc/kvm/book3s_hv.c nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; arch 2592 arch/powerpc/kvm/book3s_hv.c vcpu->arch.prev_cpu = pcpu; arch 2604 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.timer_running) { arch 2605 arch/powerpc/kvm/book3s_hv.c hrtimer_try_to_cancel(&vcpu->arch.dec_timer); arch 2606 arch/powerpc/kvm/book3s_hv.c vcpu->arch.timer_running = 0; arch 2608 arch/powerpc/kvm/book3s_hv.c cpu += vcpu->arch.ptid; arch 2610 arch/powerpc/kvm/book3s_hv.c vcpu->arch.thread_cpu = cpu; arch 2611 arch/powerpc/kvm/book3s_hv.c cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest); arch 2852 arch/powerpc/kvm/book3s_hv.c if (signal_pending(vcpu->arch.run_task)) arch 2853 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ret = -EINTR; arch 2854 arch/powerpc/kvm/book3s_hv.c else if (vcpu->arch.vpa.update_pending || arch 2855 arch/powerpc/kvm/book3s_hv.c vcpu->arch.slb_shadow.update_pending || arch 2856 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dtl.update_pending) arch 2857 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ret = RESUME_GUEST; arch 2861 arch/powerpc/kvm/book3s_hv.c wake_up(&vcpu->arch.cpu_run); arch 2875 arch/powerpc/kvm/book3s_hv.c if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) { arch 2904 arch/powerpc/kvm/book3s_hv.c if (!vc->kvm->arch.mmu_ready) arch 2907 arch/powerpc/kvm/book3s_hv.c if (signal_pending(vcpu->arch.run_task)) arch 2932 arch/powerpc/kvm/book3s_hv.c if (now < vcpu->arch.dec_expires && arch 2939 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.trap) arch 2940 arch/powerpc/kvm/book3s_hv.c ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, arch 2941 arch/powerpc/kvm/book3s_hv.c vcpu->arch.run_task); arch 2943 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ret = ret; arch 2944 arch/powerpc/kvm/book3s_hv.c vcpu->arch.trap = 0; arch 2947 arch/powerpc/kvm/book3s_hv.c if (is_kvmppc_resume_guest(vcpu->arch.ret)) { arch 2948 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.pending_exceptions) arch 2950 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.ceded) arch 2956 arch/powerpc/kvm/book3s_hv.c wake_up(&vcpu->arch.cpu_run); arch 2972 arch/powerpc/kvm/book3s_hv.c wake_up(&vcpu->arch.cpu_run); arch 3068 arch/powerpc/kvm/book3s_hv.c if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) arch 3096 arch/powerpc/kvm/book3s_hv.c (hpt_on_radix && vc->kvm->arch.threads_indep)) { arch 3098 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ret = -EBUSY; arch 3100 arch/powerpc/kvm/book3s_hv.c wake_up(&vcpu->arch.cpu_run); arch 3189 arch/powerpc/kvm/book3s_hv.c split_info.lpidr_req = vc->kvm->arch.lpid; arch 3190 arch/powerpc/kvm/book3s_hv.c split_info.host_lpcr = vc->kvm->arch.host_lpcr; arch 3254 arch/powerpc/kvm/book3s_hv.c if (!vcpu->arch.ptid) arch 3256 arch/powerpc/kvm/book3s_hv.c active |= 1 << (thr + vcpu->arch.ptid); arch 3364 arch/powerpc/kvm/book3s_hv.c cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest); arch 3392 arch/powerpc/kvm/book3s_hv.c struct kvmppc_vcore *vc = vcpu->arch.vcore; arch 3424 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_PURR, vcpu->arch.purr); arch 3425 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_SPURR, vcpu->arch.spurr); arch 3428 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_DAWR, vcpu->arch.dawr); arch 3429 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_DAWRX, vcpu->arch.dawrx); arch 3431 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_CIABR, vcpu->arch.ciabr); arch 3432 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_IC, vcpu->arch.ic); arch 3433 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_PID, vcpu->arch.pid); arch 3435 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC | arch 3438 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_HFSCR, vcpu->arch.hfscr); arch 3440 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0); arch 3441 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1); arch 3442 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2); arch 3443 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3); arch 3452 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0); arch 3453 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1); arch 3461 arch/powerpc/kvm/book3s_hv.c purr - vcpu->arch.purr); arch 3463 arch/powerpc/kvm/book3s_hv.c spurr - vcpu->arch.spurr); arch 3464 arch/powerpc/kvm/book3s_hv.c vcpu->arch.purr = purr; arch 3465 arch/powerpc/kvm/book3s_hv.c vcpu->arch.spurr = spurr; arch 3467 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ic = mfspr(SPRN_IC); arch 3468 arch/powerpc/kvm/book3s_hv.c vcpu->arch.pid = mfspr(SPRN_PID); arch 3469 arch/powerpc/kvm/book3s_hv.c vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS; arch 3471 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0); arch 3472 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1); arch 3473 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2); arch 3474 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3); arch 3491 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */ arch 3510 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr); arch 3522 arch/powerpc/kvm/book3s_hv.c struct kvmppc_vcore *vc = vcpu->arch.vcore; arch 3539 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ceded = 0; arch 3548 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.vpa.pinned_addr) { arch 3549 arch/powerpc/kvm/book3s_hv.c struct lppaca *lp = vcpu->arch.vpa.pinned_addr; arch 3552 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vpa.dirty = 1; arch 3557 arch/powerpc/kvm/book3s_hv.c kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true); arch 3562 arch/powerpc/kvm/book3s_hv.c load_fp_state(&vcpu->arch.fp); arch 3564 arch/powerpc/kvm/book3s_hv.c load_vr_state(&vcpu->arch.vr); arch 3566 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); arch 3568 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_DSCR, vcpu->arch.dscr); arch 3569 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_IAMR, vcpu->arch.iamr); arch 3570 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_PSPB, vcpu->arch.pspb); arch 3571 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_FSCR, vcpu->arch.fscr); arch 3572 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_TAR, vcpu->arch.tar); arch 3573 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); arch 3574 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); arch 3575 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_BESCR, vcpu->arch.bescr); arch 3576 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_WORT, vcpu->arch.wort); arch 3577 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_TIDR, vcpu->arch.tid); arch 3578 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_DAR, vcpu->arch.shregs.dar); arch 3579 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); arch 3580 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_AMR, vcpu->arch.amr); arch 3581 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_UAMOR, vcpu->arch.uamor); arch 3583 arch/powerpc/kvm/book3s_hv.c if (!(vcpu->arch.ctrl & 1)) arch 3586 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb()); arch 3600 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); arch 3603 arch/powerpc/kvm/book3s_hv.c vcpu->arch.regs.msr = vcpu->arch.shregs.msr; arch 3605 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.nested) { arch 3606 arch/powerpc/kvm/book3s_hv.c hvregs.lpid = vcpu->arch.nested->shadow_lpid; arch 3607 arch/powerpc/kvm/book3s_hv.c hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; arch 3609 arch/powerpc/kvm/book3s_hv.c hvregs.lpid = vcpu->kvm->arch.lpid; arch 3614 arch/powerpc/kvm/book3s_hv.c __pa(&vcpu->arch.regs)); arch 3616 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.msr = vcpu->arch.regs.msr; arch 3617 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.dar = mfspr(SPRN_DAR); arch 3618 arch/powerpc/kvm/book3s_hv.c vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); arch 3619 arch/powerpc/kvm/book3s_hv.c vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); arch 3623 arch/powerpc/kvm/book3s_hv.c if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && arch 3633 arch/powerpc/kvm/book3s_hv.c vcpu->arch.slb_max = 0; arch 3638 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dec_expires = dec + tb; arch 3640 arch/powerpc/kvm/book3s_hv.c vcpu->arch.thread_cpu = -1; arch 3641 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ctrl = mfspr(SPRN_CTRLF); arch 3643 arch/powerpc/kvm/book3s_hv.c vcpu->arch.iamr = mfspr(SPRN_IAMR); arch 3644 arch/powerpc/kvm/book3s_hv.c vcpu->arch.pspb = mfspr(SPRN_PSPB); arch 3645 arch/powerpc/kvm/book3s_hv.c vcpu->arch.fscr = mfspr(SPRN_FSCR); arch 3646 arch/powerpc/kvm/book3s_hv.c vcpu->arch.tar = mfspr(SPRN_TAR); arch 3647 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ebbhr = mfspr(SPRN_EBBHR); arch 3648 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ebbrr = mfspr(SPRN_EBBRR); arch 3649 arch/powerpc/kvm/book3s_hv.c vcpu->arch.bescr = mfspr(SPRN_BESCR); arch 3650 arch/powerpc/kvm/book3s_hv.c vcpu->arch.wort = mfspr(SPRN_WORT); arch 3651 arch/powerpc/kvm/book3s_hv.c vcpu->arch.tid = mfspr(SPRN_TIDR); arch 3652 arch/powerpc/kvm/book3s_hv.c vcpu->arch.amr = mfspr(SPRN_AMR); arch 3653 arch/powerpc/kvm/book3s_hv.c vcpu->arch.uamor = mfspr(SPRN_UAMOR); arch 3654 arch/powerpc/kvm/book3s_hv.c vcpu->arch.dscr = mfspr(SPRN_DSCR); arch 3664 arch/powerpc/kvm/book3s_hv.c if (host_amr != vcpu->arch.amr) arch 3668 arch/powerpc/kvm/book3s_hv.c store_fp_state(&vcpu->arch.fp); arch 3670 arch/powerpc/kvm/book3s_hv.c store_vr_state(&vcpu->arch.vr); arch 3672 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); arch 3676 arch/powerpc/kvm/book3s_hv.c kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true); arch 3679 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.vpa.pinned_addr) { arch 3680 arch/powerpc/kvm/book3s_hv.c struct lppaca *lp = vcpu->arch.vpa.pinned_addr; arch 3683 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vpa.dirty = 1; arch 3713 arch/powerpc/kvm/book3s_hv.c prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); arch 3714 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { arch 3719 arch/powerpc/kvm/book3s_hv.c finish_wait(&vcpu->arch.cpu_run, &wait); arch 3745 arch/powerpc/kvm/book3s_hv.c return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < arch 3746 arch/powerpc/kvm/book3s_hv.c vcpu->arch.xive_saved_state.cppr; arch 3757 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.pending_exceptions || vcpu->arch.prodded || arch 3774 arch/powerpc/kvm/book3s_hv.c if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) arch 3892 arch/powerpc/kvm/book3s_hv.c mutex_lock(&kvm->arch.mmu_setup_lock); arch 3893 arch/powerpc/kvm/book3s_hv.c if (!kvm->arch.mmu_ready) { arch 3899 arch/powerpc/kvm/book3s_hv.c kvm->arch.mmu_ready = 1; arch 3902 arch/powerpc/kvm/book3s_hv.c mutex_unlock(&kvm->arch.mmu_setup_lock); arch 3915 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ret = RESUME_GUEST; arch 3916 arch/powerpc/kvm/book3s_hv.c vcpu->arch.trap = 0; arch 3922 arch/powerpc/kvm/book3s_hv.c vc = vcpu->arch.vcore; arch 3924 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ceded = 0; arch 3925 arch/powerpc/kvm/book3s_hv.c vcpu->arch.run_task = current; arch 3926 arch/powerpc/kvm/book3s_hv.c vcpu->arch.kvm_run = kvm_run; arch 3927 arch/powerpc/kvm/book3s_hv.c vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); arch 3928 arch/powerpc/kvm/book3s_hv.c vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; arch 3929 arch/powerpc/kvm/book3s_hv.c vcpu->arch.busy_preempt = TB_NIL; arch 3930 arch/powerpc/kvm/book3s_hv.c WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); arch 3951 arch/powerpc/kvm/book3s_hv.c while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && arch 3954 arch/powerpc/kvm/book3s_hv.c if (!vcpu->kvm->arch.mmu_ready) { arch 3962 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ret = r; arch 3976 arch/powerpc/kvm/book3s_hv.c if (signal_pending(v->arch.run_task)) { arch 3979 arch/powerpc/kvm/book3s_hv.c v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; arch 3980 arch/powerpc/kvm/book3s_hv.c v->arch.ret = -EINTR; arch 3981 arch/powerpc/kvm/book3s_hv.c wake_up(&v->arch.cpu_run); arch 3984 arch/powerpc/kvm/book3s_hv.c if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) arch 3989 arch/powerpc/kvm/book3s_hv.c n_ceded += v->arch.ceded; arch 3991 arch/powerpc/kvm/book3s_hv.c v->arch.ceded = 0; arch 4008 arch/powerpc/kvm/book3s_hv.c while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && arch 4017 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { arch 4021 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ret = -EINTR; arch 4028 arch/powerpc/kvm/book3s_hv.c wake_up(&v->arch.cpu_run); arch 4033 arch/powerpc/kvm/book3s_hv.c return vcpu->arch.ret; arch 4044 arch/powerpc/kvm/book3s_hv.c struct kvm_nested_guest *nested = vcpu->arch.nested; arch 4049 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ret = RESUME_GUEST; arch 4050 arch/powerpc/kvm/book3s_hv.c vcpu->arch.trap = 0; arch 4052 arch/powerpc/kvm/book3s_hv.c vc = vcpu->arch.vcore; arch 4053 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ceded = 0; arch 4054 arch/powerpc/kvm/book3s_hv.c vcpu->arch.run_task = current; arch 4055 arch/powerpc/kvm/book3s_hv.c vcpu->arch.kvm_run = kvm_run; arch 4056 arch/powerpc/kvm/book3s_hv.c vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); arch 4057 arch/powerpc/kvm/book3s_hv.c vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; arch 4058 arch/powerpc/kvm/book3s_hv.c vcpu->arch.busy_preempt = TB_NIL; arch 4059 arch/powerpc/kvm/book3s_hv.c vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; arch 4065 arch/powerpc/kvm/book3s_hv.c if (!kvm->arch.mmu_ready) arch 4085 arch/powerpc/kvm/book3s_hv.c if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready) arch 4090 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.doorbell_request) { arch 4093 arch/powerpc/kvm/book3s_hv.c vcpu->arch.doorbell_request = 0; arch 4096 arch/powerpc/kvm/book3s_hv.c &vcpu->arch.pending_exceptions)) arch 4098 arch/powerpc/kvm/book3s_hv.c } else if (vcpu->arch.pending_exceptions || arch 4099 arch/powerpc/kvm/book3s_hv.c vcpu->arch.doorbell_request || arch 4101 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ret = RESUME_HOST; arch 4118 arch/powerpc/kvm/book3s_hv.c lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; arch 4134 arch/powerpc/kvm/book3s_hv.c vcpu->arch.trap = trap; arch 4143 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_LPID, kvm->arch.host_lpid); arch 4154 arch/powerpc/kvm/book3s_hv.c cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest); arch 4164 arch/powerpc/kvm/book3s_hv.c ((get_tb() < vcpu->arch.dec_expires) || arch 4177 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ret = r; arch 4179 arch/powerpc/kvm/book3s_hv.c if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded && arch 4182 arch/powerpc/kvm/book3s_hv.c while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) { arch 4186 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ret = -EINTR; arch 4194 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ceded = 0; arch 4203 arch/powerpc/kvm/book3s_hv.c return vcpu->arch.ret; arch 4208 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ret = -EINTR; arch 4224 arch/powerpc/kvm/book3s_hv.c if (!vcpu->arch.sane) { arch 4256 arch/powerpc/kvm/book3s_hv.c if (!vcpu->arch.online) { arch 4257 arch/powerpc/kvm/book3s_hv.c atomic_inc(&vcpu->arch.vcore->online_count); arch 4258 arch/powerpc/kvm/book3s_hv.c vcpu->arch.online = 1; arch 4270 arch/powerpc/kvm/book3s_hv.c atomic_inc(&kvm->arch.vcpus_running); arch 4285 arch/powerpc/kvm/book3s_hv.c vcpu->arch.wqp = &vcpu->arch.vcore->wq; arch 4286 arch/powerpc/kvm/book3s_hv.c vcpu->arch.pgdir = current->mm->pgd; arch 4287 arch/powerpc/kvm/book3s_hv.c vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; arch 4298 arch/powerpc/kvm/book3s_hv.c if (kvm->arch.threads_indep && kvm_is_radix(kvm) && arch 4301 arch/powerpc/kvm/book3s_hv.c vcpu->arch.vcore->lpcr); arch 4306 arch/powerpc/kvm/book3s_hv.c !(vcpu->arch.shregs.msr & MSR_PR)) { arch 4314 arch/powerpc/kvm/book3s_hv.c vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); arch 4334 arch/powerpc/kvm/book3s_hv.c vcpu->arch.state = KVMPPC_VCPU_NOTREADY; arch 4335 arch/powerpc/kvm/book3s_hv.c atomic_dec(&kvm->arch.vcpus_running); arch 4442 arch/powerpc/kvm/book3s_hv.c spin_lock(&vcpu->arch.vpa_update_lock); arch 4443 arch/powerpc/kvm/book3s_hv.c kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); arch 4444 arch/powerpc/kvm/book3s_hv.c kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); arch 4445 arch/powerpc/kvm/book3s_hv.c spin_unlock(&vcpu->arch.vpa_update_lock); arch 4461 arch/powerpc/kvm/book3s_hv.c if (!dont || free->arch.rmap != dont->arch.rmap) { arch 4462 arch/powerpc/kvm/book3s_hv.c vfree(free->arch.rmap); arch 4463 arch/powerpc/kvm/book3s_hv.c free->arch.rmap = NULL; arch 4470 arch/powerpc/kvm/book3s_hv.c slot->arch.rmap = vzalloc(array_size(npages, sizeof(*slot->arch.rmap))); arch 4471 arch/powerpc/kvm/book3s_hv.c if (!slot->arch.rmap) arch 4499 arch/powerpc/kvm/book3s_hv.c atomic64_inc(&kvm->arch.mmio_update); arch 4529 arch/powerpc/kvm/book3s_hv.c if ((kvm->arch.lpcr & mask) == lpcr) arch 4532 arch/powerpc/kvm/book3s_hv.c kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; arch 4535 arch/powerpc/kvm/book3s_hv.c struct kvmppc_vcore *vc = kvm->arch.vcores[i]; arch 4541 arch/powerpc/kvm/book3s_hv.c if (++cores_done >= kvm->arch.online_vcores) arch 4557 arch/powerpc/kvm/book3s_hv.c dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) | arch 4558 arch/powerpc/kvm/book3s_hv.c ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1); arch 4560 arch/powerpc/kvm/book3s_hv.c dw0 |= kvm->arch.sdr1; arch 4563 arch/powerpc/kvm/book3s_hv.c dw1 = kvm->arch.process_table; arch 4566 arch/powerpc/kvm/book3s_hv.c __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; arch 4567 arch/powerpc/kvm/book3s_hv.c dw1 = PATB_GR | kvm->arch.process_table; arch 4569 arch/powerpc/kvm/book3s_hv.c kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1); arch 4588 arch/powerpc/kvm/book3s_hv.c if (!kvm->arch.hpt.virt) { arch 4637 arch/powerpc/kvm/book3s_hv.c kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | arch 4671 arch/powerpc/kvm/book3s_hv.c kvm->arch.process_table = 0; arch 4674 arch/powerpc/kvm/book3s_hv.c kvm->arch.radix = 0; arch 4696 arch/powerpc/kvm/book3s_hv.c kvm->arch.radix = 1; arch 4698 arch/powerpc/kvm/book3s_hv.c kvmppc_free_hpt(&kvm->arch.hpt); arch 4790 arch/powerpc/kvm/book3s_hv.c mutex_init(&kvm->arch.mmu_setup_lock); arch 4797 arch/powerpc/kvm/book3s_hv.c kvm->arch.lpid = lpid; arch 4811 arch/powerpc/kvm/book3s_hv.c cpumask_setall(&kvm->arch.need_tlb_flush); arch 4814 arch/powerpc/kvm/book3s_hv.c memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, arch 4815 arch/powerpc/kvm/book3s_hv.c sizeof(kvm->arch.enabled_hcalls)); arch 4818 arch/powerpc/kvm/book3s_hv.c kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); arch 4822 arch/powerpc/kvm/book3s_hv.c kvm->arch.host_lpid = mfspr(SPRN_LPID); arch 4823 arch/powerpc/kvm/book3s_hv.c kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); arch 4830 arch/powerpc/kvm/book3s_hv.c kvm->arch.vrma_slb_v = SLB_VSID_B_1T | arch 4858 arch/powerpc/kvm/book3s_hv.c kvm->arch.radix = 1; arch 4859 arch/powerpc/kvm/book3s_hv.c kvm->arch.mmu_ready = 1; arch 4864 arch/powerpc/kvm/book3s_hv.c kvmppc_free_lpid(kvm->arch.lpid); arch 4870 arch/powerpc/kvm/book3s_hv.c kvm->arch.lpcr = lpcr; arch 4873 arch/powerpc/kvm/book3s_hv.c kvm->arch.resize_hpt = NULL; arch 4880 arch/powerpc/kvm/book3s_hv.c kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ arch 4882 arch/powerpc/kvm/book3s_hv.c kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ arch 4884 arch/powerpc/kvm/book3s_hv.c kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */ arch 4886 arch/powerpc/kvm/book3s_hv.c kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */ arch 4897 arch/powerpc/kvm/book3s_hv.c kvm->arch.threads_indep = true; arch 4899 arch/powerpc/kvm/book3s_hv.c kvm->arch.threads_indep = indep_threads_mode; arch 4902 arch/powerpc/kvm/book3s_hv.c if (!kvm->arch.threads_indep) arch 4913 arch/powerpc/kvm/book3s_hv.c kvm->arch.smt_mode = threads_per_subcore; arch 4915 arch/powerpc/kvm/book3s_hv.c kvm->arch.smt_mode = 1; arch 4916 arch/powerpc/kvm/book3s_hv.c kvm->arch.emul_smt_mode = 1; arch 4922 arch/powerpc/kvm/book3s_hv.c kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); arch 4935 arch/powerpc/kvm/book3s_hv.c kfree(kvm->arch.vcores[i]); arch 4936 arch/powerpc/kvm/book3s_hv.c kvm->arch.online_vcores = 0; arch 4941 arch/powerpc/kvm/book3s_hv.c debugfs_remove_recursive(kvm->arch.debugfs_dir); arch 4943 arch/powerpc/kvm/book3s_hv.c if (!kvm->arch.threads_indep) arch 4952 arch/powerpc/kvm/book3s_hv.c kvmppc_free_hpt(&kvm->arch.hpt); arch 4958 arch/powerpc/kvm/book3s_hv.c kvm->arch.process_table = 0; arch 4959 arch/powerpc/kvm/book3s_hv.c kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); arch 4961 arch/powerpc/kvm/book3s_hv.c kvmppc_free_lpid(kvm->arch.lpid); arch 5002 arch/powerpc/kvm/book3s_hv.c kfree(kvm->arch.pimap); arch 5027 arch/powerpc/kvm/book3s_hv.c pimap = kvm->arch.pimap; arch 5035 arch/powerpc/kvm/book3s_hv.c kvm->arch.pimap = pimap; arch 5112 arch/powerpc/kvm/book3s_hv.c if (!kvm->arch.pimap) arch 5115 arch/powerpc/kvm/book3s_hv.c pimap = kvm->arch.pimap; arch 5317 arch/powerpc/kvm/book3s_hv.c mutex_lock(&kvm->arch.mmu_setup_lock); arch 5319 arch/powerpc/kvm/book3s_hv.c if (kvm->arch.mmu_ready) { arch 5320 arch/powerpc/kvm/book3s_hv.c kvm->arch.mmu_ready = 0; arch 5323 arch/powerpc/kvm/book3s_hv.c if (atomic_read(&kvm->arch.vcpus_running)) { arch 5324 arch/powerpc/kvm/book3s_hv.c kvm->arch.mmu_ready = 1; arch 5337 arch/powerpc/kvm/book3s_hv.c kvm->arch.process_table = cfg->process_table; arch 5345 arch/powerpc/kvm/book3s_hv.c mutex_unlock(&kvm->arch.mmu_setup_lock); arch 5358 arch/powerpc/kvm/book3s_hv.c kvm->arch.nested_enable = true; arch 5375 arch/powerpc/kvm/book3s_hv.c if (rc && vcpu->arch.nested) arch 5394 arch/powerpc/kvm/book3s_hv.c if (rc && vcpu->arch.nested) arch 212 arch/powerpc/kvm/book3s_hv_builtin.c r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]); arch 214 arch/powerpc/kvm/book3s_hv_builtin.c r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]); arch 591 arch/powerpc/kvm/book3s_hv_builtin.c vcpu->arch.regs.gpr[5] = get_tb(); arch 769 arch/powerpc/kvm/book3s_hv_builtin.c ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1; arch 775 arch/powerpc/kvm/book3s_hv_builtin.c if (vcpu->arch.shregs.msr & MSR_EE) { arch 787 arch/powerpc/kvm/book3s_hv_builtin.c unsigned long msr, old_msr = vcpu->arch.shregs.msr; arch 792 arch/powerpc/kvm/book3s_hv_builtin.c msr = vcpu->arch.intr_msr; arch 795 arch/powerpc/kvm/book3s_hv_builtin.c vcpu->arch.shregs.msr = msr; arch 798 arch/powerpc/kvm/book3s_hv_builtin.c if (vcpu->arch.doorbell_request) { arch 800 arch/powerpc/kvm/book3s_hv_builtin.c vcpu->arch.vcore->dpdes = 1; arch 802 arch/powerpc/kvm/book3s_hv_builtin.c vcpu->arch.doorbell_request = 0; arch 816 arch/powerpc/kvm/book3s_hv_builtin.c for (set = 1; set < kvm->arch.tlb_sets; ++set) { arch 826 arch/powerpc/kvm/book3s_hv_builtin.c for (set = 0; set < kvm->arch.tlb_sets; ++set) { arch 855 arch/powerpc/kvm/book3s_hv_builtin.c need_tlb_flush = &kvm->arch.need_tlb_flush; arch 30 arch/powerpc/kvm/book3s_hv_nested.c struct kvmppc_vcore *vc = vcpu->arch.vcore; arch 34 arch/powerpc/kvm/book3s_hv_nested.c hr->hfscr = vcpu->arch.hfscr; arch 36 arch/powerpc/kvm/book3s_hv_nested.c hr->dawr0 = vcpu->arch.dawr; arch 37 arch/powerpc/kvm/book3s_hv_nested.c hr->dawrx0 = vcpu->arch.dawrx; arch 38 arch/powerpc/kvm/book3s_hv_nested.c hr->ciabr = vcpu->arch.ciabr; arch 39 arch/powerpc/kvm/book3s_hv_nested.c hr->purr = vcpu->arch.purr; arch 40 arch/powerpc/kvm/book3s_hv_nested.c hr->spurr = vcpu->arch.spurr; arch 41 arch/powerpc/kvm/book3s_hv_nested.c hr->ic = vcpu->arch.ic; arch 43 arch/powerpc/kvm/book3s_hv_nested.c hr->srr0 = vcpu->arch.shregs.srr0; arch 44 arch/powerpc/kvm/book3s_hv_nested.c hr->srr1 = vcpu->arch.shregs.srr1; arch 45 arch/powerpc/kvm/book3s_hv_nested.c hr->sprg[0] = vcpu->arch.shregs.sprg0; arch 46 arch/powerpc/kvm/book3s_hv_nested.c hr->sprg[1] = vcpu->arch.shregs.sprg1; arch 47 arch/powerpc/kvm/book3s_hv_nested.c hr->sprg[2] = vcpu->arch.shregs.sprg2; arch 48 arch/powerpc/kvm/book3s_hv_nested.c hr->sprg[3] = vcpu->arch.shregs.sprg3; arch 49 arch/powerpc/kvm/book3s_hv_nested.c hr->pidr = vcpu->arch.pid; arch 50 arch/powerpc/kvm/book3s_hv_nested.c hr->cfar = vcpu->arch.cfar; arch 51 arch/powerpc/kvm/book3s_hv_nested.c hr->ppr = vcpu->arch.ppr; arch 99 arch/powerpc/kvm/book3s_hv_nested.c struct kvmppc_vcore *vc = vcpu->arch.vcore; arch 102 arch/powerpc/kvm/book3s_hv_nested.c hr->hfscr = vcpu->arch.hfscr; arch 103 arch/powerpc/kvm/book3s_hv_nested.c hr->purr = vcpu->arch.purr; arch 104 arch/powerpc/kvm/book3s_hv_nested.c hr->spurr = vcpu->arch.spurr; arch 105 arch/powerpc/kvm/book3s_hv_nested.c hr->ic = vcpu->arch.ic; arch 107 arch/powerpc/kvm/book3s_hv_nested.c hr->srr0 = vcpu->arch.shregs.srr0; arch 108 arch/powerpc/kvm/book3s_hv_nested.c hr->srr1 = vcpu->arch.shregs.srr1; arch 109 arch/powerpc/kvm/book3s_hv_nested.c hr->sprg[0] = vcpu->arch.shregs.sprg0; arch 110 arch/powerpc/kvm/book3s_hv_nested.c hr->sprg[1] = vcpu->arch.shregs.sprg1; arch 111 arch/powerpc/kvm/book3s_hv_nested.c hr->sprg[2] = vcpu->arch.shregs.sprg2; arch 112 arch/powerpc/kvm/book3s_hv_nested.c hr->sprg[3] = vcpu->arch.shregs.sprg3; arch 113 arch/powerpc/kvm/book3s_hv_nested.c hr->pidr = vcpu->arch.pid; arch 114 arch/powerpc/kvm/book3s_hv_nested.c hr->cfar = vcpu->arch.cfar; arch 115 arch/powerpc/kvm/book3s_hv_nested.c hr->ppr = vcpu->arch.ppr; arch 118 arch/powerpc/kvm/book3s_hv_nested.c hr->hdar = vcpu->arch.fault_dar; arch 119 arch/powerpc/kvm/book3s_hv_nested.c hr->hdsisr = vcpu->arch.fault_dsisr; arch 120 arch/powerpc/kvm/book3s_hv_nested.c hr->asdr = vcpu->arch.fault_gpa; arch 123 arch/powerpc/kvm/book3s_hv_nested.c hr->asdr = vcpu->arch.fault_gpa; arch 126 arch/powerpc/kvm/book3s_hv_nested.c hr->heir = vcpu->arch.emul_inst; arch 137 arch/powerpc/kvm/book3s_hv_nested.c hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr); arch 149 arch/powerpc/kvm/book3s_hv_nested.c struct kvmppc_vcore *vc = vcpu->arch.vcore; arch 153 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.hfscr = hr->hfscr; arch 154 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.dawr = hr->dawr0; arch 155 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.dawrx = hr->dawrx0; arch 156 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.ciabr = hr->ciabr; arch 157 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.purr = hr->purr; arch 158 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.spurr = hr->spurr; arch 159 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.ic = hr->ic; arch 161 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.srr0 = hr->srr0; arch 162 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.srr1 = hr->srr1; arch 163 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.sprg0 = hr->sprg[0]; arch 164 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.sprg1 = hr->sprg[1]; arch 165 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.sprg2 = hr->sprg[2]; arch 166 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.sprg3 = hr->sprg[3]; arch 167 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.pid = hr->pidr; arch 168 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.cfar = hr->cfar; arch 169 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.ppr = hr->ppr; arch 175 arch/powerpc/kvm/book3s_hv_nested.c struct kvmppc_vcore *vc = vcpu->arch.vcore; arch 178 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.hfscr = hr->hfscr; arch 179 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.purr = hr->purr; arch 180 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.spurr = hr->spurr; arch 181 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.ic = hr->ic; arch 183 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.fault_dar = hr->hdar; arch 184 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.fault_dsisr = hr->hdsisr; arch 185 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.fault_gpa = hr->asdr; arch 186 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.emul_inst = hr->heir; arch 187 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.srr0 = hr->srr0; arch 188 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.srr1 = hr->srr1; arch 189 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.sprg0 = hr->sprg[0]; arch 190 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.sprg1 = hr->sprg[1]; arch 191 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.sprg2 = hr->sprg[2]; arch 192 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.sprg3 = hr->sprg[3]; arch 193 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.pid = hr->pidr; arch 194 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.cfar = hr->cfar; arch 195 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.ppr = hr->ppr; arch 201 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.trap = 0; arch 209 arch/powerpc/kvm/book3s_hv_nested.c if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR) arch 211 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr + arch 213 arch/powerpc/kvm/book3s_hv_nested.c gpr[vcpu->arch.io_gpr]); arch 214 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR; arch 224 arch/powerpc/kvm/book3s_hv_nested.c struct kvmppc_vcore *vc = vcpu->arch.vcore; arch 231 arch/powerpc/kvm/book3s_hv_nested.c if (vcpu->kvm->arch.l1_ptcr == 0) arch 266 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.regs.msr = vcpu->arch.shregs.msr; arch 267 arch/powerpc/kvm/book3s_hv_nested.c saved_l1_regs = vcpu->arch.regs; arch 275 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.nested = l2; arch 276 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token; arch 277 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.regs = l2_regs; arch 278 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.msr = vcpu->arch.regs.msr; arch 285 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.ret = RESUME_GUEST; arch 286 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.trap = 0; arch 289 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; arch 293 arch/powerpc/kvm/book3s_hv_nested.c r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp, arch 298 arch/powerpc/kvm/book3s_hv_nested.c l2_regs = vcpu->arch.regs; arch 299 arch/powerpc/kvm/book3s_hv_nested.c l2_regs.msr = vcpu->arch.shregs.msr; arch 300 arch/powerpc/kvm/book3s_hv_nested.c delta_purr = vcpu->arch.purr - l2_hv.purr; arch 301 arch/powerpc/kvm/book3s_hv_nested.c delta_spurr = vcpu->arch.spurr - l2_hv.spurr; arch 302 arch/powerpc/kvm/book3s_hv_nested.c delta_ic = vcpu->arch.ic - l2_hv.ic; arch 304 arch/powerpc/kvm/book3s_hv_nested.c save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv); arch 307 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.nested = NULL; arch 308 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.regs = saved_l1_regs; arch 309 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK; arch 312 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.msr |= MSR_TS_S; arch 315 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.purr += delta_purr; arch 316 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.spurr += delta_spurr; arch 317 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.ic += delta_ic; arch 344 arch/powerpc/kvm/book3s_hv_nested.c return vcpu->arch.trap; arch 435 arch/powerpc/kvm/book3s_hv_nested.c kvm->arch.max_nested_lpid = -1; arch 460 arch/powerpc/kvm/book3s_hv_nested.c kvm->arch.l1_ptcr = ptcr; arch 551 arch/powerpc/kvm/book3s_hv_nested.c ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4); arch 552 arch/powerpc/kvm/book3s_hv_nested.c if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) arch 625 arch/powerpc/kvm/book3s_hv_nested.c if (gp == kvm->arch.nested_guests[lpid]) { arch 626 arch/powerpc/kvm/book3s_hv_nested.c kvm->arch.nested_guests[lpid] = NULL; arch 627 arch/powerpc/kvm/book3s_hv_nested.c if (lpid == kvm->arch.max_nested_lpid) { arch 628 arch/powerpc/kvm/book3s_hv_nested.c while (--lpid >= 0 && !kvm->arch.nested_guests[lpid]) arch 630 arch/powerpc/kvm/book3s_hv_nested.c kvm->arch.max_nested_lpid = lpid; arch 655 arch/powerpc/kvm/book3s_hv_nested.c for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { arch 656 arch/powerpc/kvm/book3s_hv_nested.c gp = kvm->arch.nested_guests[i]; arch 659 arch/powerpc/kvm/book3s_hv_nested.c kvm->arch.nested_guests[i] = NULL; arch 665 arch/powerpc/kvm/book3s_hv_nested.c kvm->arch.max_nested_lpid = -1; arch 698 arch/powerpc/kvm/book3s_hv_nested.c l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) arch 702 arch/powerpc/kvm/book3s_hv_nested.c gp = kvm->arch.nested_guests[l1_lpid]; arch 714 arch/powerpc/kvm/book3s_hv_nested.c if (kvm->arch.nested_guests[l1_lpid]) { arch 716 arch/powerpc/kvm/book3s_hv_nested.c gp = kvm->arch.nested_guests[l1_lpid]; arch 718 arch/powerpc/kvm/book3s_hv_nested.c kvm->arch.nested_guests[l1_lpid] = newgp; arch 722 arch/powerpc/kvm/book3s_hv_nested.c if (l1_lpid > kvm->arch.max_nested_lpid) arch 723 arch/powerpc/kvm/book3s_hv_nested.c kvm->arch.max_nested_lpid = l1_lpid; arch 748 arch/powerpc/kvm/book3s_hv_nested.c if (lpid > kvm->arch.max_nested_lpid) arch 750 arch/powerpc/kvm/book3s_hv_nested.c return kvm->arch.nested_guests[lpid]; arch 894 arch/powerpc/kvm/book3s_hv_nested.c unsigned long *rmap = &memslot->arch.rmap[gfn]; arch 904 arch/powerpc/kvm/book3s_hv_nested.c unsigned long rmap, *rmapp = &free->arch.rmap[page]; arch 1047 arch/powerpc/kvm/book3s_hv_nested.c for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { arch 1048 arch/powerpc/kvm/book3s_hv_nested.c gp = kvm->arch.nested_guests[i]; arch 1155 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.fault_gpa = fault_addr; arch 1169 arch/powerpc/kvm/book3s_hv_nested.c } else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) { arch 1187 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.fault_dsisr = flags; arch 1188 arch/powerpc/kvm/book3s_hv_nested.c if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) { arch 1189 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.msr &= ~0x783f0000ul; arch 1190 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.shregs.msr |= flags; arch 1215 arch/powerpc/kvm/book3s_hv_nested.c ret = kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, writing, arch 1216 arch/powerpc/kvm/book3s_hv_nested.c gpte.raddr, kvm->arch.lpid); arch 1270 arch/powerpc/kvm/book3s_hv_nested.c unsigned long dsisr = vcpu->arch.fault_dsisr; arch 1271 arch/powerpc/kvm/book3s_hv_nested.c unsigned long ea = vcpu->arch.fault_dar; arch 1287 arch/powerpc/kvm/book3s_hv_nested.c n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL; arch 1365 arch/powerpc/kvm/book3s_hv_nested.c pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); arch 1416 arch/powerpc/kvm/book3s_hv_nested.c rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; arch 1433 arch/powerpc/kvm/book3s_hv_nested.c struct kvm_nested_guest *gp = vcpu->arch.nested; arch 1447 arch/powerpc/kvm/book3s_hv_nested.c while (++lpid <= kvm->arch.max_nested_lpid) { arch 1448 arch/powerpc/kvm/book3s_hv_nested.c if (kvm->arch.nested_guests[lpid]) { arch 45 arch/powerpc/kvm/book3s_hv_ras.c slb = vcpu->arch.slb_shadow.pinned_addr; arch 51 arch/powerpc/kvm/book3s_hv_ras.c if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end) arch 70 arch/powerpc/kvm/book3s_hv_ras.c unsigned long srr1 = vcpu->arch.shregs.msr; arch 76 arch/powerpc/kvm/book3s_hv_ras.c unsigned long dsisr = vcpu->arch.shregs.dsisr; arch 86 arch/powerpc/kvm/book3s_hv_ras.c tlbiel_all_lpid(vcpu->kvm->arch.radix); arch 103 arch/powerpc/kvm/book3s_hv_ras.c tlbiel_all_lpid(vcpu->kvm->arch.radix); arch 122 arch/powerpc/kvm/book3s_hv_ras.c vcpu->arch.mce_evt = mce_evt; arch 55 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu) arch 63 arch/powerpc/kvm/book3s_hv_rm_mmu.c cpumask_setall(&kvm->arch.need_tlb_flush); arch 71 arch/powerpc/kvm/book3s_hv_rm_mmu.c cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush); arch 89 arch/powerpc/kvm/book3s_hv_rm_mmu.c head = &kvm->arch.hpt.rev[i]; arch 92 arch/powerpc/kvm/book3s_hv_rm_mmu.c tail = &kvm->arch.hpt.rev[head->back]; arch 155 arch/powerpc/kvm/book3s_hv_rm_mmu.c rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); arch 179 arch/powerpc/kvm/book3s_hv_rm_mmu.c next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]); arch 180 arch/powerpc/kvm/book3s_hv_rm_mmu.c prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]); arch 247 arch/powerpc/kvm/book3s_hv_rm_mmu.c rmap = &memslot->arch.rmap[slot_fn]; arch 317 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) arch 321 arch/powerpc/kvm/book3s_hv_rm_mmu.c hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); arch 352 arch/powerpc/kvm/book3s_hv_rm_mmu.c hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); arch 369 arch/powerpc/kvm/book3s_hv_rm_mmu.c rev = &kvm->arch.hpt.rev[pte_index]; arch 419 arch/powerpc/kvm/book3s_hv_rm_mmu.c vcpu->arch.pgdir, true, arch 420 arch/powerpc/kvm/book3s_hv_rm_mmu.c &vcpu->arch.regs.gpr[4]); arch 482 arch/powerpc/kvm/book3s_hv_rm_mmu.c "r" (rbvalues[i]), "r" (kvm->arch.lpid)); arch 485 arch/powerpc/kvm/book3s_hv_rm_mmu.c fixup_tlbie_lpid(rbvalues[i - 1], kvm->arch.lpid); arch 509 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) arch 511 arch/powerpc/kvm/book3s_hv_rm_mmu.c hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); arch 527 arch/powerpc/kvm/book3s_hv_rm_mmu.c rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); arch 550 arch/powerpc/kvm/book3s_hv_rm_mmu.c atomic64_inc(&kvm->arch.mmio_update); arch 564 arch/powerpc/kvm/book3s_hv_rm_mmu.c &vcpu->arch.regs.gpr[4]); arch 570 arch/powerpc/kvm/book3s_hv_rm_mmu.c unsigned long *args = &vcpu->arch.regs.gpr[4]; arch 597 arch/powerpc/kvm/book3s_hv_rm_mmu.c pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) { arch 603 arch/powerpc/kvm/book3s_hv_rm_mmu.c hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4)); arch 640 arch/powerpc/kvm/book3s_hv_rm_mmu.c rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); arch 649 arch/powerpc/kvm/book3s_hv_rm_mmu.c atomic64_inc(&kvm->arch.mmio_update); arch 697 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) arch 700 arch/powerpc/kvm/book3s_hv_rm_mmu.c hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); arch 720 arch/powerpc/kvm/book3s_hv_rm_mmu.c rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); arch 751 arch/powerpc/kvm/book3s_hv_rm_mmu.c atomic64_inc(&kvm->arch.mmio_update); arch 767 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) arch 773 arch/powerpc/kvm/book3s_hv_rm_mmu.c rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); arch 775 arch/powerpc/kvm/book3s_hv_rm_mmu.c hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); arch 790 arch/powerpc/kvm/book3s_hv_rm_mmu.c vcpu->arch.regs.gpr[4 + i * 2] = v; arch 791 arch/powerpc/kvm/book3s_hv_rm_mmu.c vcpu->arch.regs.gpr[5 + i * 2] = r; arch 808 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) arch 811 arch/powerpc/kvm/book3s_hv_rm_mmu.c rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); arch 812 arch/powerpc/kvm/book3s_hv_rm_mmu.c hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); arch 837 arch/powerpc/kvm/book3s_hv_rm_mmu.c vcpu->arch.regs.gpr[4] = gr; arch 855 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) arch 858 arch/powerpc/kvm/book3s_hv_rm_mmu.c rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); arch 859 arch/powerpc/kvm/book3s_hv_rm_mmu.c hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); arch 884 arch/powerpc/kvm/book3s_hv_rm_mmu.c vcpu->arch.regs.gpr[4] = gr; arch 911 arch/powerpc/kvm/book3s_hv_rm_mmu.c ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); arch 1088 arch/powerpc/kvm/book3s_hv_rm_mmu.c entry = &vcpu->arch.mmio_cache.entry[i]; arch 1102 arch/powerpc/kvm/book3s_hv_rm_mmu.c unsigned int index = vcpu->arch.mmio_cache.index; arch 1104 arch/powerpc/kvm/book3s_hv_rm_mmu.c vcpu->arch.mmio_cache.index++; arch 1105 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE) arch 1106 arch/powerpc/kvm/book3s_hv_rm_mmu.c vcpu->arch.mmio_cache.index = 0; arch 1108 arch/powerpc/kvm/book3s_hv_rm_mmu.c return &vcpu->arch.mmio_cache.entry[index]; arch 1144 arch/powerpc/kvm/book3s_hv_rm_mmu.c hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt); arch 1155 arch/powerpc/kvm/book3s_hv_rm_mmu.c hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7)); arch 1191 arch/powerpc/kvm/book3s_hv_rm_mmu.c hash = hash ^ kvmppc_hpt_mask(&kvm->arch.hpt); arch 1225 arch/powerpc/kvm/book3s_hv_rm_mmu.c mmio_update = atomic64_read(&kvm->arch.mmio_update); arch 1240 arch/powerpc/kvm/book3s_hv_rm_mmu.c hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); arch 1247 arch/powerpc/kvm/book3s_hv_rm_mmu.c rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]); arch 1259 arch/powerpc/kvm/book3s_hv_rm_mmu.c key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; arch 1276 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (data && (vcpu->arch.shregs.msr & MSR_DR)) { arch 1277 arch/powerpc/kvm/book3s_hv_rm_mmu.c unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr); arch 1285 arch/powerpc/kvm/book3s_hv_rm_mmu.c vcpu->arch.pgfault_addr = addr; arch 1286 arch/powerpc/kvm/book3s_hv_rm_mmu.c vcpu->arch.pgfault_index = index; arch 1287 arch/powerpc/kvm/book3s_hv_rm_mmu.c vcpu->arch.pgfault_hpte[0] = v; arch 1288 arch/powerpc/kvm/book3s_hv_rm_mmu.c vcpu->arch.pgfault_hpte[1] = r; arch 1289 arch/powerpc/kvm/book3s_hv_rm_mmu.c vcpu->arch.pgfault_cache = cache_entry; arch 1312 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (data && (vcpu->arch.shregs.msr & MSR_IR)) arch 130 arch/powerpc/kvm/book3s_hv_rm_xics.c struct kvmppc_icp *this_icp = this_vcpu->arch.icp; arch 136 arch/powerpc/kvm/book3s_hv_rm_xics.c set_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); arch 156 arch/powerpc/kvm/book3s_hv_rm_xics.c cpu = vcpu->arch.thread_cpu; arch 177 arch/powerpc/kvm/book3s_hv_rm_xics.c clear_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); arch 215 arch/powerpc/kvm/book3s_hv_rm_xics.c this_vcpu->arch.icp->rm_dbgstate = new; arch 216 arch/powerpc/kvm/book3s_hv_rm_xics.c this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu; arch 493 arch/powerpc/kvm/book3s_hv_rm_xics.c struct kvmppc_xics *xics = vcpu->kvm->arch.xics; arch 494 arch/powerpc/kvm/book3s_hv_rm_xics.c struct kvmppc_icp *icp = vcpu->arch.icp; arch 523 arch/powerpc/kvm/book3s_hv_rm_xics.c vcpu->arch.regs.gpr[4] = xirr; arch 532 arch/powerpc/kvm/book3s_hv_rm_xics.c struct kvmppc_xics *xics = vcpu->kvm->arch.xics; arch 533 arch/powerpc/kvm/book3s_hv_rm_xics.c struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp; arch 618 arch/powerpc/kvm/book3s_hv_rm_xics.c struct kvmppc_xics *xics = vcpu->kvm->arch.xics; arch 619 arch/powerpc/kvm/book3s_hv_rm_xics.c struct kvmppc_icp *icp = vcpu->arch.icp; arch 679 arch/powerpc/kvm/book3s_hv_rm_xics.c struct kvmppc_xics *xics = vcpu->kvm->arch.xics; arch 680 arch/powerpc/kvm/book3s_hv_rm_xics.c struct kvmppc_icp *icp = vcpu->arch.icp; arch 737 arch/powerpc/kvm/book3s_hv_rm_xics.c struct kvmppc_xics *xics = vcpu->kvm->arch.xics; arch 738 arch/powerpc/kvm/book3s_hv_rm_xics.c struct kvmppc_icp *icp = vcpu->arch.icp; arch 868 arch/powerpc/kvm/book3s_hv_rm_xics.c xics = vcpu->kvm->arch.xics; arch 869 arch/powerpc/kvm/book3s_hv_rm_xics.c icp = vcpu->arch.icp; arch 17 arch/powerpc/kvm/book3s_hv_tm.c u64 msr = vcpu->arch.shregs.msr; arch 19 arch/powerpc/kvm/book3s_hv_tm.c tfiar = vcpu->arch.regs.nip & ~0x3ull; arch 21 arch/powerpc/kvm/book3s_hv_tm.c if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) arch 27 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.tfiar = tfiar; arch 29 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr; arch 42 arch/powerpc/kvm/book3s_hv_tm.c u32 instr = vcpu->arch.emul_inst; arch 43 arch/powerpc/kvm/book3s_hv_tm.c u64 msr = vcpu->arch.shregs.msr; arch 50 arch/powerpc/kvm/book3s_hv_tm.c newmsr = vcpu->arch.shregs.srr1; arch 56 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.shregs.msr = newmsr; arch 57 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.cfar = vcpu->arch.regs.nip - 4; arch 58 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; arch 62 arch/powerpc/kvm/book3s_hv_tm.c if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { arch 68 arch/powerpc/kvm/book3s_hv_tm.c if (!(vcpu->arch.hfscr & HFSCR_EBB)) { arch 73 arch/powerpc/kvm/book3s_hv_tm.c if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) { arch 75 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) | arch 80 arch/powerpc/kvm/book3s_hv_tm.c bescr = vcpu->arch.bescr; arch 87 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.bescr = bescr; arch 89 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.shregs.msr = msr; arch 90 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.cfar = vcpu->arch.regs.nip - 4; arch 91 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.regs.nip = vcpu->arch.ebbrr; arch 105 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.shregs.msr = newmsr; arch 110 arch/powerpc/kvm/book3s_hv_tm.c if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { arch 116 arch/powerpc/kvm/book3s_hv_tm.c if (!(vcpu->arch.hfscr & HFSCR_TM)) { arch 123 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) | arch 130 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | arch 140 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.shregs.msr = msr; arch 145 arch/powerpc/kvm/book3s_hv_tm.c if (!(vcpu->arch.hfscr & HFSCR_TM)) { arch 152 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) | arch 164 arch/powerpc/kvm/book3s_hv_tm.c if (!(vcpu->arch.orig_texasr & TEXASR_FS)) { arch 174 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | arch 176 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.shregs.msr &= ~MSR_TS_MASK; arch 182 arch/powerpc/kvm/book3s_hv_tm.c if (!(vcpu->arch.hfscr & HFSCR_TM)) { arch 189 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) | arch 196 arch/powerpc/kvm/book3s_hv_tm.c if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) { arch 204 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | arch 206 arch/powerpc/kvm/book3s_hv_tm.c vcpu->arch.shregs.msr = msr | MSR_TS_S; arch 22 arch/powerpc/kvm/book3s_hv_tm_builtin.c u32 instr = vcpu->arch.emul_inst; arch 29 arch/powerpc/kvm/book3s_hv_tm_builtin.c newmsr = vcpu->arch.shregs.srr1; arch 34 arch/powerpc/kvm/book3s_hv_tm_builtin.c vcpu->arch.shregs.msr = newmsr; arch 35 arch/powerpc/kvm/book3s_hv_tm_builtin.c vcpu->arch.cfar = vcpu->arch.regs.nip - 4; arch 36 arch/powerpc/kvm/book3s_hv_tm_builtin.c vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; arch 41 arch/powerpc/kvm/book3s_hv_tm_builtin.c msr = vcpu->arch.shregs.msr; arch 42 arch/powerpc/kvm/book3s_hv_tm_builtin.c if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) arch 45 arch/powerpc/kvm/book3s_hv_tm_builtin.c if (!(vcpu->arch.hfscr & HFSCR_EBB) || arch 57 arch/powerpc/kvm/book3s_hv_tm_builtin.c vcpu->arch.shregs.msr = msr; arch 58 arch/powerpc/kvm/book3s_hv_tm_builtin.c vcpu->arch.cfar = vcpu->arch.regs.nip - 4; arch 59 arch/powerpc/kvm/book3s_hv_tm_builtin.c vcpu->arch.regs.nip = mfspr(SPRN_EBBRR); arch 66 arch/powerpc/kvm/book3s_hv_tm_builtin.c msr = vcpu->arch.shregs.msr; arch 73 arch/powerpc/kvm/book3s_hv_tm_builtin.c vcpu->arch.shregs.msr = newmsr; arch 78 arch/powerpc/kvm/book3s_hv_tm_builtin.c msr = vcpu->arch.shregs.msr; arch 80 arch/powerpc/kvm/book3s_hv_tm_builtin.c if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) arch 83 arch/powerpc/kvm/book3s_hv_tm_builtin.c if (!(vcpu->arch.hfscr & HFSCR_TM) || !(msr & MSR_TM)) arch 87 arch/powerpc/kvm/book3s_hv_tm_builtin.c vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; arch 89 arch/powerpc/kvm/book3s_hv_tm_builtin.c vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | arch 103 arch/powerpc/kvm/book3s_hv_tm_builtin.c vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */ arch 104 arch/powerpc/kvm/book3s_hv_tm_builtin.c vcpu->arch.regs.nip = vcpu->arch.tfhar; arch 106 arch/powerpc/kvm/book3s_hv_tm_builtin.c vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000; arch 152 arch/powerpc/kvm/book3s_paired_singles.c kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]); arch 185 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.paddr_accessed = addr; arch 202 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.qpr[rs] = *((u32*)tmp); arch 247 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.paddr_accessed = addr; arch 277 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.paddr_accessed = addr; arch 284 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.qpr[rs] = tmp[1]; arch 296 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.qpr[rs] = tmp[1]; arch 314 arch/powerpc/kvm/book3s_paired_singles.c tmp[1] = vcpu->arch.qpr[rs]; arch 317 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.paddr_accessed = addr; arch 346 arch/powerpc/kvm/book3s_paired_singles.c if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) arch 497 arch/powerpc/kvm/book3s_paired_singles.c u32 *qpr = vcpu->arch.qpr; arch 513 arch/powerpc/kvm/book3s_paired_singles.c func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); arch 530 arch/powerpc/kvm/book3s_paired_singles.c func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); arch 545 arch/powerpc/kvm/book3s_paired_singles.c u32 *qpr = vcpu->arch.qpr; arch 562 arch/powerpc/kvm/book3s_paired_singles.c func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2); arch 578 arch/powerpc/kvm/book3s_paired_singles.c func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2); arch 595 arch/powerpc/kvm/book3s_paired_singles.c u32 *qpr = vcpu->arch.qpr; arch 604 arch/powerpc/kvm/book3s_paired_singles.c func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in); arch 613 arch/powerpc/kvm/book3s_paired_singles.c func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in); arch 667 arch/powerpc/kvm/book3s_paired_singles.c for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) { arch 671 arch/powerpc/kvm/book3s_paired_singles.c i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]); arch 759 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; arch 760 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.qpr[ax_rd] ^= 0x80000000; arch 769 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; arch 779 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; arch 780 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.qpr[ax_rd] |= 0x80000000; arch 786 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; arch 787 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.qpr[ax_rd] &= ~0x80000000; arch 794 arch/powerpc/kvm/book3s_paired_singles.c &vcpu->arch.qpr[ax_rd]); arch 799 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; arch 804 arch/powerpc/kvm/book3s_paired_singles.c kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], arch 808 arch/powerpc/kvm/book3s_paired_singles.c &vcpu->arch.qpr[ax_rd]); arch 813 arch/powerpc/kvm/book3s_paired_singles.c kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], arch 815 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; arch 854 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc]; arch 1099 arch/powerpc/kvm/book3s_paired_singles.c fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); arch 1103 arch/powerpc/kvm/book3s_paired_singles.c fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); arch 1107 arch/powerpc/kvm/book3s_paired_singles.c fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); arch 1111 arch/powerpc/kvm/book3s_paired_singles.c fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); arch 1115 arch/powerpc/kvm/book3s_paired_singles.c fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); arch 1121 arch/powerpc/kvm/book3s_paired_singles.c fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c); arch 1125 arch/powerpc/kvm/book3s_paired_singles.c fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); arch 1129 arch/powerpc/kvm/book3s_paired_singles.c fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); arch 1133 arch/powerpc/kvm/book3s_paired_singles.c fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); arch 1137 arch/powerpc/kvm/book3s_paired_singles.c fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); arch 1152 arch/powerpc/kvm/book3s_paired_singles.c *fpr_d = vcpu->arch.fp.fpscr; arch 1157 arch/powerpc/kvm/book3s_paired_singles.c vcpu->arch.fp.fpscr = *fpr_b; arch 1165 arch/powerpc/kvm/book3s_paired_singles.c fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b); arch 1176 arch/powerpc/kvm/book3s_paired_singles.c fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b); arch 1182 arch/powerpc/kvm/book3s_paired_singles.c fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); arch 1188 arch/powerpc/kvm/book3s_paired_singles.c fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); arch 1191 arch/powerpc/kvm/book3s_paired_singles.c fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); arch 1194 arch/powerpc/kvm/book3s_paired_singles.c fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); arch 1197 arch/powerpc/kvm/book3s_paired_singles.c fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); arch 1200 arch/powerpc/kvm/book3s_paired_singles.c fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); arch 1203 arch/powerpc/kvm/book3s_paired_singles.c fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); arch 1206 arch/powerpc/kvm/book3s_paired_singles.c fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); arch 1209 arch/powerpc/kvm/book3s_paired_singles.c fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); arch 1217 arch/powerpc/kvm/book3s_paired_singles.c fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); arch 1219 arch/powerpc/kvm/book3s_paired_singles.c fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d); arch 1225 arch/powerpc/kvm/book3s_paired_singles.c fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c); arch 1228 arch/powerpc/kvm/book3s_paired_singles.c fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); arch 1231 arch/powerpc/kvm/book3s_paired_singles.c fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); arch 1234 arch/powerpc/kvm/book3s_paired_singles.c fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); arch 1237 arch/powerpc/kvm/book3s_paired_singles.c fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); arch 1240 arch/powerpc/kvm/book3s_paired_singles.c fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); arch 1247 arch/powerpc/kvm/book3s_paired_singles.c for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) { arch 82 arch/powerpc/kvm/book3s_pr.c if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) arch 89 arch/powerpc/kvm/book3s_pr.c vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; arch 112 arch/powerpc/kvm/book3s_pr.c current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; arch 153 arch/powerpc/kvm/book3s_pr.c svcpu->gpr[0] = vcpu->arch.regs.gpr[0]; arch 154 arch/powerpc/kvm/book3s_pr.c svcpu->gpr[1] = vcpu->arch.regs.gpr[1]; arch 155 arch/powerpc/kvm/book3s_pr.c svcpu->gpr[2] = vcpu->arch.regs.gpr[2]; arch 156 arch/powerpc/kvm/book3s_pr.c svcpu->gpr[3] = vcpu->arch.regs.gpr[3]; arch 157 arch/powerpc/kvm/book3s_pr.c svcpu->gpr[4] = vcpu->arch.regs.gpr[4]; arch 158 arch/powerpc/kvm/book3s_pr.c svcpu->gpr[5] = vcpu->arch.regs.gpr[5]; arch 159 arch/powerpc/kvm/book3s_pr.c svcpu->gpr[6] = vcpu->arch.regs.gpr[6]; arch 160 arch/powerpc/kvm/book3s_pr.c svcpu->gpr[7] = vcpu->arch.regs.gpr[7]; arch 161 arch/powerpc/kvm/book3s_pr.c svcpu->gpr[8] = vcpu->arch.regs.gpr[8]; arch 162 arch/powerpc/kvm/book3s_pr.c svcpu->gpr[9] = vcpu->arch.regs.gpr[9]; arch 163 arch/powerpc/kvm/book3s_pr.c svcpu->gpr[10] = vcpu->arch.regs.gpr[10]; arch 164 arch/powerpc/kvm/book3s_pr.c svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; arch 165 arch/powerpc/kvm/book3s_pr.c svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; arch 166 arch/powerpc/kvm/book3s_pr.c svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; arch 167 arch/powerpc/kvm/book3s_pr.c svcpu->cr = vcpu->arch.regs.ccr; arch 168 arch/powerpc/kvm/book3s_pr.c svcpu->xer = vcpu->arch.regs.xer; arch 169 arch/powerpc/kvm/book3s_pr.c svcpu->ctr = vcpu->arch.regs.ctr; arch 170 arch/powerpc/kvm/book3s_pr.c svcpu->lr = vcpu->arch.regs.link; arch 171 arch/powerpc/kvm/book3s_pr.c svcpu->pc = vcpu->arch.regs.nip; arch 173 arch/powerpc/kvm/book3s_pr.c svcpu->shadow_fscr = vcpu->arch.shadow_fscr; arch 179 arch/powerpc/kvm/book3s_pr.c vcpu->arch.entry_tb = get_tb(); arch 180 arch/powerpc/kvm/book3s_pr.c vcpu->arch.entry_vtb = get_vtb(); arch 182 arch/powerpc/kvm/book3s_pr.c vcpu->arch.entry_ic = mfspr(SPRN_IC); arch 203 arch/powerpc/kvm/book3s_pr.c smsr |= (guest_msr & vcpu->arch.guest_owned_ext); arch 217 arch/powerpc/kvm/book3s_pr.c vcpu->arch.shadow_msr = smsr; arch 235 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.gpr[0] = svcpu->gpr[0]; arch 236 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.gpr[1] = svcpu->gpr[1]; arch 237 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.gpr[2] = svcpu->gpr[2]; arch 238 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.gpr[3] = svcpu->gpr[3]; arch 239 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.gpr[4] = svcpu->gpr[4]; arch 240 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.gpr[5] = svcpu->gpr[5]; arch 241 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.gpr[6] = svcpu->gpr[6]; arch 242 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.gpr[7] = svcpu->gpr[7]; arch 243 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.gpr[8] = svcpu->gpr[8]; arch 244 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.gpr[9] = svcpu->gpr[9]; arch 245 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.gpr[10] = svcpu->gpr[10]; arch 246 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; arch 247 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; arch 248 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; arch 249 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.ccr = svcpu->cr; arch 250 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.xer = svcpu->xer; arch 251 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.ctr = svcpu->ctr; arch 252 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.link = svcpu->lr; arch 253 arch/powerpc/kvm/book3s_pr.c vcpu->arch.regs.nip = svcpu->pc; arch 254 arch/powerpc/kvm/book3s_pr.c vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; arch 255 arch/powerpc/kvm/book3s_pr.c vcpu->arch.fault_dar = svcpu->fault_dar; arch 256 arch/powerpc/kvm/book3s_pr.c vcpu->arch.fault_dsisr = svcpu->fault_dsisr; arch 257 arch/powerpc/kvm/book3s_pr.c vcpu->arch.last_inst = svcpu->last_inst; arch 259 arch/powerpc/kvm/book3s_pr.c vcpu->arch.shadow_fscr = svcpu->shadow_fscr; arch 264 arch/powerpc/kvm/book3s_pr.c vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; arch 265 arch/powerpc/kvm/book3s_pr.c vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; arch 266 arch/powerpc/kvm/book3s_pr.c to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb; arch 268 arch/powerpc/kvm/book3s_pr.c vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; arch 284 arch/powerpc/kvm/book3s_pr.c (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) != arch 287 arch/powerpc/kvm/book3s_pr.c old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)); arch 303 arch/powerpc/kvm/book3s_pr.c vcpu->arch.tfhar = mfspr(SPRN_TFHAR); arch 304 arch/powerpc/kvm/book3s_pr.c vcpu->arch.texasr = mfspr(SPRN_TEXASR); arch 305 arch/powerpc/kvm/book3s_pr.c vcpu->arch.tfiar = mfspr(SPRN_TFIAR); arch 312 arch/powerpc/kvm/book3s_pr.c mtspr(SPRN_TFHAR, vcpu->arch.tfhar); arch 313 arch/powerpc/kvm/book3s_pr.c mtspr(SPRN_TEXASR, vcpu->arch.texasr); arch 314 arch/powerpc/kvm/book3s_pr.c mtspr(SPRN_TFIAR, vcpu->arch.tfiar); arch 324 arch/powerpc/kvm/book3s_pr.c ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) & arch 361 arch/powerpc/kvm/book3s_pr.c if (vcpu->arch.fscr & FSCR_TAR) arch 373 arch/powerpc/kvm/book3s_pr.c if (vcpu->arch.fscr & FSCR_TAR) arch 456 arch/powerpc/kvm/book3s_pr.c if (vcpu->arch.papr_enabled) arch 479 arch/powerpc/kvm/book3s_pr.c if (!vcpu->arch.pending_exceptions) { arch 501 arch/powerpc/kvm/book3s_pr.c if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { arch 502 arch/powerpc/kvm/book3s_pr.c struct kvm_vcpu_arch *a = &vcpu->arch; arch 519 arch/powerpc/kvm/book3s_pr.c if (vcpu->arch.magic_page_pa && arch 522 arch/powerpc/kvm/book3s_pr.c kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, arch 540 arch/powerpc/kvm/book3s_pr.c vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; arch 541 arch/powerpc/kvm/book3s_pr.c vcpu->arch.pvr = pvr; arch 548 arch/powerpc/kvm/book3s_pr.c vcpu->arch.cpu_type = KVM_CPU_3S_64; arch 556 arch/powerpc/kvm/book3s_pr.c vcpu->arch.cpu_type = KVM_CPU_3S_32; arch 563 arch/powerpc/kvm/book3s_pr.c vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; arch 564 arch/powerpc/kvm/book3s_pr.c if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && arch 566 arch/powerpc/kvm/book3s_pr.c vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; arch 588 arch/powerpc/kvm/book3s_pr.c vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | arch 595 arch/powerpc/kvm/book3s_pr.c vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; arch 610 arch/powerpc/kvm/book3s_pr.c vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; arch 654 arch/powerpc/kvm/book3s_pr.c ulong mp_pa = vcpu->arch.magic_page_pa; arch 681 arch/powerpc/kvm/book3s_pr.c if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) arch 686 arch/powerpc/kvm/book3s_pr.c page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); arch 704 arch/powerpc/kvm/book3s_pr.c (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && arch 709 arch/powerpc/kvm/book3s_pr.c vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); arch 722 arch/powerpc/kvm/book3s_pr.c if (vcpu->arch.mmu.is_dcbz32(vcpu) && arch 723 arch/powerpc/kvm/book3s_pr.c (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { arch 741 arch/powerpc/kvm/book3s_pr.c flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE; arch 751 arch/powerpc/kvm/book3s_pr.c if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { arch 767 arch/powerpc/kvm/book3s_pr.c else if (vcpu->arch.mmu.is_dcbz32(vcpu) && arch 768 arch/powerpc/kvm/book3s_pr.c (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) arch 773 arch/powerpc/kvm/book3s_pr.c vcpu->arch.paddr_accessed = pte.raddr; arch 774 arch/powerpc/kvm/book3s_pr.c vcpu->arch.vaddr_accessed = pte.eaddr; arch 795 arch/powerpc/kvm/book3s_pr.c msr &= vcpu->arch.guest_owned_ext; arch 822 arch/powerpc/kvm/book3s_pr.c vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); arch 830 arch/powerpc/kvm/book3s_pr.c if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { arch 837 arch/powerpc/kvm/book3s_pr.c vcpu->arch.tar = mfspr(SPRN_TAR); arch 839 arch/powerpc/kvm/book3s_pr.c vcpu->arch.shadow_fscr &= ~FSCR_TAR; arch 852 arch/powerpc/kvm/book3s_pr.c if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) arch 878 arch/powerpc/kvm/book3s_pr.c msr &= ~vcpu->arch.guest_owned_ext; arch 889 arch/powerpc/kvm/book3s_pr.c load_fp_state(&vcpu->arch.fp); arch 891 arch/powerpc/kvm/book3s_pr.c t->fp_save_area = &vcpu->arch.fp; arch 899 arch/powerpc/kvm/book3s_pr.c load_vr_state(&vcpu->arch.vr); arch 901 arch/powerpc/kvm/book3s_pr.c t->vr_save_area = &vcpu->arch.vr; arch 907 arch/powerpc/kvm/book3s_pr.c vcpu->arch.guest_owned_ext |= msr; arch 921 arch/powerpc/kvm/book3s_pr.c lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; arch 928 arch/powerpc/kvm/book3s_pr.c load_fp_state(&vcpu->arch.fp); arch 936 arch/powerpc/kvm/book3s_pr.c load_vr_state(&vcpu->arch.vr); arch 949 arch/powerpc/kvm/book3s_pr.c vcpu->arch.fscr &= ~(0xffULL << 56); arch 950 arch/powerpc/kvm/book3s_pr.c vcpu->arch.fscr |= (fac << 56); arch 980 arch/powerpc/kvm/book3s_pr.c guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac)); arch 1000 arch/powerpc/kvm/book3s_pr.c mtspr(SPRN_TAR, vcpu->arch.tar); arch 1001 arch/powerpc/kvm/book3s_pr.c vcpu->arch.shadow_fscr |= FSCR_TAR; arch 1025 arch/powerpc/kvm/book3s_pr.c if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { arch 1028 arch/powerpc/kvm/book3s_pr.c } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) { arch 1029 arch/powerpc/kvm/book3s_pr.c vcpu->arch.fscr = fscr; arch 1034 arch/powerpc/kvm/book3s_pr.c vcpu->arch.fscr = fscr; arch 1071 arch/powerpc/kvm/book3s_pr.c flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; arch 1138 arch/powerpc/kvm/book3s_pr.c ulong shadow_srr1 = vcpu->arch.shadow_srr1; arch 1168 arch/powerpc/kvm/book3s_pr.c } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && arch 1169 arch/powerpc/kvm/book3s_pr.c (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { arch 1187 arch/powerpc/kvm/book3s_pr.c u32 fault_dsisr = vcpu->arch.fault_dsisr; arch 1267 arch/powerpc/kvm/book3s_pr.c if (vcpu->arch.papr_enabled) { arch 1277 arch/powerpc/kvm/book3s_pr.c if (vcpu->arch.papr_enabled && arch 1297 arch/powerpc/kvm/book3s_pr.c vcpu->arch.hcall_needed = 1; arch 1299 arch/powerpc/kvm/book3s_pr.c } else if (vcpu->arch.osi_enabled && arch 1309 arch/powerpc/kvm/book3s_pr.c vcpu->arch.osi_needed = 1; arch 1332 arch/powerpc/kvm/book3s_pr.c if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { arch 1384 arch/powerpc/kvm/book3s_pr.c r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); arch 1402 arch/powerpc/kvm/book3s_pr.c ulong shadow_srr1 = vcpu->arch.shadow_srr1; arch 1445 arch/powerpc/kvm/book3s_pr.c sregs->pvr = vcpu->arch.pvr; arch 1448 arch/powerpc/kvm/book3s_pr.c if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { arch 1450 arch/powerpc/kvm/book3s_pr.c sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; arch 1451 arch/powerpc/kvm/book3s_pr.c sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; arch 1476 arch/powerpc/kvm/book3s_pr.c if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { arch 1478 arch/powerpc/kvm/book3s_pr.c vcpu->arch.mmu.slbmte(vcpu, 0, 0); arch 1479 arch/powerpc/kvm/book3s_pr.c vcpu->arch.mmu.slbia(vcpu); arch 1486 arch/powerpc/kvm/book3s_pr.c vcpu->arch.mmu.slbmte(vcpu, rs, rb); arch 1492 arch/powerpc/kvm/book3s_pr.c vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); arch 1532 arch/powerpc/kvm/book3s_pr.c if (vcpu->arch.intr_msr & MSR_LE) arch 1539 arch/powerpc/kvm/book3s_pr.c *val = get_reg_val(id, vcpu->arch.tfhar); arch 1542 arch/powerpc/kvm/book3s_pr.c *val = get_reg_val(id, vcpu->arch.tfiar); arch 1545 arch/powerpc/kvm/book3s_pr.c *val = get_reg_val(id, vcpu->arch.texasr); arch 1549 arch/powerpc/kvm/book3s_pr.c vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]); arch 1558 arch/powerpc/kvm/book3s_pr.c val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; arch 1561 arch/powerpc/kvm/book3s_pr.c val->vval = vcpu->arch.vr_tm.vr[i-32]; arch 1568 arch/powerpc/kvm/book3s_pr.c *val = get_reg_val(id, vcpu->arch.cr_tm); arch 1571 arch/powerpc/kvm/book3s_pr.c *val = get_reg_val(id, vcpu->arch.xer_tm); arch 1574 arch/powerpc/kvm/book3s_pr.c *val = get_reg_val(id, vcpu->arch.lr_tm); arch 1577 arch/powerpc/kvm/book3s_pr.c *val = get_reg_val(id, vcpu->arch.ctr_tm); arch 1580 arch/powerpc/kvm/book3s_pr.c *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); arch 1583 arch/powerpc/kvm/book3s_pr.c *val = get_reg_val(id, vcpu->arch.amr_tm); arch 1586 arch/powerpc/kvm/book3s_pr.c *val = get_reg_val(id, vcpu->arch.ppr_tm); arch 1589 arch/powerpc/kvm/book3s_pr.c *val = get_reg_val(id, vcpu->arch.vrsave_tm); arch 1593 arch/powerpc/kvm/book3s_pr.c *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); arch 1598 arch/powerpc/kvm/book3s_pr.c *val = get_reg_val(id, vcpu->arch.dscr_tm); arch 1601 arch/powerpc/kvm/book3s_pr.c *val = get_reg_val(id, vcpu->arch.tar_tm); arch 1615 arch/powerpc/kvm/book3s_pr.c vcpu->arch.intr_msr |= MSR_LE; arch 1617 arch/powerpc/kvm/book3s_pr.c vcpu->arch.intr_msr &= ~MSR_LE; arch 1639 arch/powerpc/kvm/book3s_pr.c vcpu->arch.tfhar = set_reg_val(id, *val); arch 1642 arch/powerpc/kvm/book3s_pr.c vcpu->arch.tfiar = set_reg_val(id, *val); arch 1645 arch/powerpc/kvm/book3s_pr.c vcpu->arch.texasr = set_reg_val(id, *val); arch 1648 arch/powerpc/kvm/book3s_pr.c vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] = arch 1658 arch/powerpc/kvm/book3s_pr.c vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; arch 1661 arch/powerpc/kvm/book3s_pr.c vcpu->arch.vr_tm.vr[i-32] = val->vval; arch 1667 arch/powerpc/kvm/book3s_pr.c vcpu->arch.cr_tm = set_reg_val(id, *val); arch 1670 arch/powerpc/kvm/book3s_pr.c vcpu->arch.xer_tm = set_reg_val(id, *val); arch 1673 arch/powerpc/kvm/book3s_pr.c vcpu->arch.lr_tm = set_reg_val(id, *val); arch 1676 arch/powerpc/kvm/book3s_pr.c vcpu->arch.ctr_tm = set_reg_val(id, *val); arch 1679 arch/powerpc/kvm/book3s_pr.c vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); arch 1682 arch/powerpc/kvm/book3s_pr.c vcpu->arch.amr_tm = set_reg_val(id, *val); arch 1685 arch/powerpc/kvm/book3s_pr.c vcpu->arch.ppr_tm = set_reg_val(id, *val); arch 1688 arch/powerpc/kvm/book3s_pr.c vcpu->arch.vrsave_tm = set_reg_val(id, *val); arch 1692 arch/powerpc/kvm/book3s_pr.c vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); arch 1697 arch/powerpc/kvm/book3s_pr.c vcpu->arch.dscr_tm = set_reg_val(id, *val); arch 1700 arch/powerpc/kvm/book3s_pr.c vcpu->arch.tar_tm = set_reg_val(id, *val); arch 1726 arch/powerpc/kvm/book3s_pr.c vcpu->arch.book3s = vcpu_book3s; arch 1729 arch/powerpc/kvm/book3s_pr.c vcpu->arch.shadow_vcpu = arch 1730 arch/powerpc/kvm/book3s_pr.c kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); arch 1731 arch/powerpc/kvm/book3s_pr.c if (!vcpu->arch.shadow_vcpu) arch 1743 arch/powerpc/kvm/book3s_pr.c vcpu->arch.shared = (void *)p; arch 1747 arch/powerpc/kvm/book3s_pr.c vcpu->arch.shared_big_endian = true; arch 1749 arch/powerpc/kvm/book3s_pr.c vcpu->arch.shared_big_endian = false; arch 1757 arch/powerpc/kvm/book3s_pr.c vcpu->arch.pvr = 0x3C0301; arch 1759 arch/powerpc/kvm/book3s_pr.c vcpu->arch.pvr = mfspr(SPRN_PVR); arch 1760 arch/powerpc/kvm/book3s_pr.c vcpu->arch.intr_msr = MSR_SF; arch 1763 arch/powerpc/kvm/book3s_pr.c vcpu->arch.pvr = 0x84202; arch 1765 arch/powerpc/kvm/book3s_pr.c kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); arch 1766 arch/powerpc/kvm/book3s_pr.c vcpu->arch.slb_nr = 64; arch 1768 arch/powerpc/kvm/book3s_pr.c vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; arch 1777 arch/powerpc/kvm/book3s_pr.c free_page((unsigned long)vcpu->arch.shared); arch 1782 arch/powerpc/kvm/book3s_pr.c kfree(vcpu->arch.shadow_vcpu); arch 1796 arch/powerpc/kvm/book3s_pr.c free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); arch 1799 arch/powerpc/kvm/book3s_pr.c kfree(vcpu->arch.shadow_vcpu); arch 1813 arch/powerpc/kvm/book3s_pr.c if (!vcpu->arch.sane) { arch 1964 arch/powerpc/kvm/book3s_pr.c if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { arch 2006 arch/powerpc/kvm/book3s_pr.c mutex_init(&kvm->arch.hpt_mutex); arch 2025 arch/powerpc/kvm/book3s_pr.c WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); arch 49 arch/powerpc/kvm/book3s_pr_papr.c mutex_lock(&vcpu->kvm->arch.hpt_mutex); arch 80 arch/powerpc/kvm/book3s_pr_papr.c mutex_unlock(&vcpu->kvm->arch.hpt_mutex); arch 96 arch/powerpc/kvm/book3s_pr_papr.c mutex_lock(&vcpu->kvm->arch.hpt_mutex); arch 114 arch/powerpc/kvm/book3s_pr_papr.c vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); arch 121 arch/powerpc/kvm/book3s_pr_papr.c mutex_unlock(&vcpu->kvm->arch.hpt_mutex); arch 151 arch/powerpc/kvm/book3s_pr_papr.c mutex_lock(&vcpu->kvm->arch.hpt_mutex); arch 202 arch/powerpc/kvm/book3s_pr_papr.c vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); arch 208 arch/powerpc/kvm/book3s_pr_papr.c mutex_unlock(&vcpu->kvm->arch.hpt_mutex); arch 224 arch/powerpc/kvm/book3s_pr_papr.c mutex_lock(&vcpu->kvm->arch.hpt_mutex); arch 247 arch/powerpc/kvm/book3s_pr_papr.c vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); arch 256 arch/powerpc/kvm/book3s_pr_papr.c mutex_unlock(&vcpu->kvm->arch.hpt_mutex); arch 359 arch/powerpc/kvm/book3s_pr_papr.c !test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls)) arch 397 arch/powerpc/kvm/book3s_pr_papr.c if (list_empty(&vcpu->kvm->arch.rtas_tokens)) arch 469 arch/powerpc/kvm/book3s_pr_papr.c __set_bit(hcall / 4, kvm->arch.enabled_hcalls); arch 146 arch/powerpc/kvm/book3s_rtas.c lockdep_assert_held(&kvm->arch.rtas_token_lock); arch 148 arch/powerpc/kvm/book3s_rtas.c list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { arch 167 arch/powerpc/kvm/book3s_rtas.c lockdep_assert_held(&kvm->arch.rtas_token_lock); arch 169 arch/powerpc/kvm/book3s_rtas.c list_for_each_entry(d, &kvm->arch.rtas_tokens, list) { arch 193 arch/powerpc/kvm/book3s_rtas.c list_add_tail(&d->list, &kvm->arch.rtas_tokens); arch 206 arch/powerpc/kvm/book3s_rtas.c mutex_lock(&kvm->arch.rtas_token_lock); arch 213 arch/powerpc/kvm/book3s_rtas.c mutex_unlock(&kvm->arch.rtas_token_lock); arch 245 arch/powerpc/kvm/book3s_rtas.c mutex_lock(&vcpu->kvm->arch.rtas_token_lock); arch 248 arch/powerpc/kvm/book3s_rtas.c list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { arch 256 arch/powerpc/kvm/book3s_rtas.c mutex_unlock(&vcpu->kvm->arch.rtas_token_lock); arch 282 arch/powerpc/kvm/book3s_rtas.c list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) { arch 168 arch/powerpc/kvm/book3s_xics.c struct kvmppc_xics *xics = kvm->arch.xics; arch 198 arch/powerpc/kvm/book3s_xics.c struct kvmppc_xics *xics = kvm->arch.xics; arch 224 arch/powerpc/kvm/book3s_xics.c struct kvmppc_xics *xics = kvm->arch.xics; arch 251 arch/powerpc/kvm/book3s_xics.c struct kvmppc_xics *xics = kvm->arch.xics; arch 589 arch/powerpc/kvm/book3s_xics.c struct kvmppc_icp *icp = vcpu->arch.icp; arch 623 arch/powerpc/kvm/book3s_xics.c struct kvmppc_xics *xics = vcpu->kvm->arch.xics; arch 632 arch/powerpc/kvm/book3s_xics.c icp = vcpu->arch.icp; arch 709 arch/powerpc/kvm/book3s_xics.c icp = vcpu->arch.icp; arch 724 arch/powerpc/kvm/book3s_xics.c struct kvmppc_xics *xics = vcpu->kvm->arch.xics; arch 725 arch/powerpc/kvm/book3s_xics.c struct kvmppc_icp *icp = vcpu->arch.icp; arch 779 arch/powerpc/kvm/book3s_xics.c struct kvmppc_xics *xics = vcpu->kvm->arch.xics; arch 780 arch/powerpc/kvm/book3s_xics.c struct kvmppc_icp *icp = vcpu->arch.icp; arch 819 arch/powerpc/kvm/book3s_xics.c struct kvmppc_xics *xics = vcpu->kvm->arch.xics; arch 820 arch/powerpc/kvm/book3s_xics.c struct kvmppc_icp *icp = vcpu->arch.icp; arch 850 arch/powerpc/kvm/book3s_xics.c struct kvmppc_xics *xics = vcpu->kvm->arch.xics; arch 851 arch/powerpc/kvm/book3s_xics.c struct kvmppc_icp *icp = vcpu->arch.icp; arch 877 arch/powerpc/kvm/book3s_xics.c struct kvmppc_xics *xics = vcpu->kvm->arch.xics; arch 882 arch/powerpc/kvm/book3s_xics.c if (!xics || !vcpu->arch.icp) arch 960 arch/powerpc/kvm/book3s_xics.c xics_debugfs_irqmap(m, kvm->arch.pimap); arch 965 arch/powerpc/kvm/book3s_xics.c struct kvmppc_icp *icp = vcpu->arch.icp; arch 1075 arch/powerpc/kvm/book3s_xics.c if (!vcpu->kvm->arch.xics) arch 1089 arch/powerpc/kvm/book3s_xics.c vcpu->arch.icp = icp; arch 1098 arch/powerpc/kvm/book3s_xics.c struct kvmppc_icp *icp = vcpu->arch.icp; arch 1112 arch/powerpc/kvm/book3s_xics.c struct kvmppc_icp *icp = vcpu->arch.icp; arch 1113 arch/powerpc/kvm/book3s_xics.c struct kvmppc_xics *xics = vcpu->kvm->arch.xics; arch 1296 arch/powerpc/kvm/book3s_xics.c struct kvmppc_xics *xics = kvm->arch.xics; arch 1346 arch/powerpc/kvm/book3s_xics.c kvm->arch.xics = NULL; arch 1369 arch/powerpc/kvm/book3s_xics.c if (kvm->arch.xics) arch 1372 arch/powerpc/kvm/book3s_xics.c kvm->arch.xics = xics; arch 1418 arch/powerpc/kvm/book3s_xics.c if (vcpu->arch.irq_type) arch 1423 arch/powerpc/kvm/book3s_xics.c vcpu->arch.irq_type = KVMPPC_IRQ_XICS; arch 1430 arch/powerpc/kvm/book3s_xics.c if (!vcpu->arch.icp) arch 1432 arch/powerpc/kvm/book3s_xics.c kfree(vcpu->arch.icp); arch 1433 arch/powerpc/kvm/book3s_xics.c vcpu->arch.icp = NULL; arch 1434 arch/powerpc/kvm/book3s_xics.c vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; arch 1440 arch/powerpc/kvm/book3s_xics.c struct kvmppc_xics *xics = kvm->arch.xics; arch 1456 arch/powerpc/kvm/book3s_xics.c struct kvmppc_xics *xics = kvm->arch.xics; arch 122 arch/powerpc/kvm/book3s_xics.h if (vcpu->arch.icp && nr == vcpu->arch.icp->server_num) arch 123 arch/powerpc/kvm/book3s_xics.h return vcpu->arch.icp; arch 75 arch/powerpc/kvm/book3s_xive.c if (!tima || !vcpu->arch.xive_cam_word) arch 79 arch/powerpc/kvm/book3s_xive.c __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS); arch 80 arch/powerpc/kvm/book3s_xive.c __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2); arch 81 arch/powerpc/kvm/book3s_xive.c vcpu->arch.xive_pushed = 1; arch 91 arch/powerpc/kvm/book3s_xive.c vcpu->arch.irq_pending = 0; arch 97 arch/powerpc/kvm/book3s_xive.c if (vcpu->arch.xive_esc_on) { arch 98 arch/powerpc/kvm/book3s_xive.c pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + arch 125 arch/powerpc/kvm/book3s_xive.c vcpu->arch.xive_esc_on = 0; arch 153 arch/powerpc/kvm/book3s_xive.c vcpu->arch.irq_pending = 1; arch 155 arch/powerpc/kvm/book3s_xive.c if (vcpu->arch.ceded) arch 167 arch/powerpc/kvm/book3s_xive.c vcpu->arch.xive_esc_on = false; arch 178 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 197 arch/powerpc/kvm/book3s_xive.c vcpu->kvm->arch.lpid, xc->server_num); arch 200 arch/powerpc/kvm/book3s_xive.c vcpu->kvm->arch.lpid, xc->server_num, prio); arch 232 arch/powerpc/kvm/book3s_xive.c vcpu->arch.xive_esc_raddr = xd->eoi_page; arch 233 arch/powerpc/kvm/book3s_xive.c vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio; arch 247 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 283 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive *xive = kvm->arch.xive; arch 297 arch/powerpc/kvm/book3s_xive.c if (!vcpu->arch.xive_vcpu) arch 325 arch/powerpc/kvm/book3s_xive.c xc = vcpu->arch.xive_vcpu; arch 335 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 376 arch/powerpc/kvm/book3s_xive.c if (!vcpu->arch.xive_vcpu) arch 380 arch/powerpc/kvm/book3s_xive.c *server = vcpu->arch.xive_vcpu->server_num; arch 532 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive *xive = kvm->arch.xive; arch 616 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive *xive = kvm->arch.xive; arch 717 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive *xive = kvm->arch.xive; arch 739 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive *xive = kvm->arch.xive; arch 778 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive *xive = kvm->arch.xive; arch 826 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 839 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 840 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive *xive = vcpu->kvm->arch.xive; arch 861 arch/powerpc/kvm/book3s_xive.c if (WARN_ON(vcpu->arch.xive_pushed)) arch 865 arch/powerpc/kvm/book3s_xive.c vcpu->arch.xive_saved_state.cppr = cppr; arch 899 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive *xive = kvm->arch.xive; arch 996 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive *xive = kvm->arch.xive; arch 1076 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 1078 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive *xive = kvm->arch.xive; arch 1110 arch/powerpc/kvm/book3s_xive.c if (vcpu->arch.xive_esc_on) { arch 1111 arch/powerpc/kvm/book3s_xive.c __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + arch 1113 arch/powerpc/kvm/book3s_xive.c vcpu->arch.xive_esc_on = false; arch 1121 arch/powerpc/kvm/book3s_xive.c vcpu->arch.xive_esc_vaddr = 0; arch 1122 arch/powerpc/kvm/book3s_xive.c vcpu->arch.xive_esc_raddr = 0; arch 1146 arch/powerpc/kvm/book3s_xive.c if (!vcpu->arch.xive_esc_on) arch 1152 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 1153 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive *xive = vcpu->kvm->arch.xive; arch 1187 arch/powerpc/kvm/book3s_xive.c vcpu->arch.xive_cam_word = 0; arch 1210 arch/powerpc/kvm/book3s_xive.c vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; arch 1211 arch/powerpc/kvm/book3s_xive.c vcpu->arch.xive_vcpu = NULL; arch 1230 arch/powerpc/kvm/book3s_xive.c if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) arch 1232 arch/powerpc/kvm/book3s_xive.c if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) { arch 1253 arch/powerpc/kvm/book3s_xive.c vcpu->arch.xive_vcpu = xc; arch 1266 arch/powerpc/kvm/book3s_xive.c vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); arch 1267 arch/powerpc/kvm/book3s_xive.c vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); arch 1342 arch/powerpc/kvm/book3s_xive.c vcpu->arch.irq_type = KVMPPC_IRQ_XICS; arch 1455 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 1623 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 1803 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive *xive = kvm->arch.xive; arch 1948 arch/powerpc/kvm/book3s_xive.c kvm->arch.xive = NULL; arch 1983 arch/powerpc/kvm/book3s_xive.c &kvm->arch.xive_devices.native : arch 1984 arch/powerpc/kvm/book3s_xive.c &kvm->arch.xive_devices.xics_on_xive; arch 2009 arch/powerpc/kvm/book3s_xive.c if (kvm->arch.xive) arch 2040 arch/powerpc/kvm/book3s_xive.c kvm->arch.xive = xive; arch 2046 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 2105 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 199 arch/powerpc/kvm/book3s_xive.h if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num) arch 229 arch/powerpc/kvm/book3s_xive.h if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id) arch 43 arch/powerpc/kvm/book3s_xive_native.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 73 arch/powerpc/kvm/book3s_xive_native.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 106 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_cam_word = 0; arch 117 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; arch 118 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_vcpu = NULL; arch 137 arch/powerpc/kvm/book3s_xive_native.c if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) arch 139 arch/powerpc/kvm/book3s_xive_native.c if (server_num >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) { arch 159 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_vcpu = xc; arch 166 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.irq_type = KVMPPC_IRQ_XIVE; arch 185 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); arch 186 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); arch 202 arch/powerpc/kvm/book3s_xive_native.c struct kvmppc_xive *xive = kvm->arch.xive; arch 585 arch/powerpc/kvm/book3s_xive_native.c xc = vcpu->arch.xive_vcpu; arch 730 arch/powerpc/kvm/book3s_xive_native.c xc = vcpu->arch.xive_vcpu; arch 808 arch/powerpc/kvm/book3s_xive_native.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 885 arch/powerpc/kvm/book3s_xive_native.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 1055 arch/powerpc/kvm/book3s_xive_native.c kvm->arch.xive = NULL; arch 1088 arch/powerpc/kvm/book3s_xive_native.c if (kvm->arch.xive) arch 1118 arch/powerpc/kvm/book3s_xive_native.c kvm->arch.xive = xive; arch 1130 arch/powerpc/kvm/book3s_xive_native.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 1141 arch/powerpc/kvm/book3s_xive_native.c val->xive_timaval[0] = vcpu->arch.xive_saved_state.w01; arch 1156 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_saved_state.nsr, arch 1157 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_saved_state.cppr, arch 1158 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_saved_state.ipb, arch 1159 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_saved_state.pipr, arch 1160 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_saved_state.w01, arch 1161 arch/powerpc/kvm/book3s_xive_native.c (u32) vcpu->arch.xive_cam_word, opal_state); arch 1168 arch/powerpc/kvm/book3s_xive_native.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 1169 arch/powerpc/kvm/book3s_xive_native.c struct kvmppc_xive *xive = vcpu->kvm->arch.xive; arch 1181 arch/powerpc/kvm/book3s_xive_native.c if (WARN_ON(vcpu->arch.xive_pushed)) arch 1188 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_saved_state.w01 = val->xive_timaval[0]; arch 1216 arch/powerpc/kvm/book3s_xive_native.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 1223 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_saved_state.nsr, arch 1224 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_saved_state.cppr, arch 1225 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_saved_state.ipb, arch 1226 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_saved_state.pipr, arch 1227 arch/powerpc/kvm/book3s_xive_native.c vcpu->arch.xive_saved_state.w01, arch 1228 arch/powerpc/kvm/book3s_xive_native.c (u32) vcpu->arch.xive_cam_word); arch 271 arch/powerpc/kvm/book3s_xive_template.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 315 arch/powerpc/kvm/book3s_xive_template.c vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24); arch 322 arch/powerpc/kvm/book3s_xive_template.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 335 arch/powerpc/kvm/book3s_xive_template.c xc = vcpu->arch.xive_vcpu; arch 350 arch/powerpc/kvm/book3s_xive_template.c vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24); arch 441 arch/powerpc/kvm/book3s_xive_template.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 442 arch/powerpc/kvm/book3s_xive_template.c struct kvmppc_xive *xive = vcpu->kvm->arch.xive; arch 500 arch/powerpc/kvm/book3s_xive_template.c struct kvmppc_xive *xive = vcpu->kvm->arch.xive; arch 503 arch/powerpc/kvm/book3s_xive_template.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 606 arch/powerpc/kvm/book3s_xive_template.c struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; arch 616 arch/powerpc/kvm/book3s_xive_template.c xc = vcpu->arch.xive_vcpu; arch 69 arch/powerpc/kvm/booke.c printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip, arch 70 arch/powerpc/kvm/booke.c vcpu->arch.shared->msr); arch 71 arch/powerpc/kvm/booke.c printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link, arch 72 arch/powerpc/kvm/booke.c vcpu->arch.regs.ctr); arch 73 arch/powerpc/kvm/booke.c printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, arch 74 arch/powerpc/kvm/booke.c vcpu->arch.shared->srr1); arch 76 arch/powerpc/kvm/booke.c printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); arch 94 arch/powerpc/kvm/booke.c vcpu->arch.shadow_msr &= ~MSR_SPE; arch 104 arch/powerpc/kvm/booke.c vcpu->arch.shadow_msr |= MSR_SPE; arch 110 arch/powerpc/kvm/booke.c if (vcpu->arch.shared->msr & MSR_SPE) { arch 111 arch/powerpc/kvm/booke.c if (!(vcpu->arch.shadow_msr & MSR_SPE)) arch 113 arch/powerpc/kvm/booke.c } else if (vcpu->arch.shadow_msr & MSR_SPE) { arch 137 arch/powerpc/kvm/booke.c load_fp_state(&vcpu->arch.fp); arch 139 arch/powerpc/kvm/booke.c current->thread.fp_save_area = &vcpu->arch.fp; arch 163 arch/powerpc/kvm/booke.c vcpu->arch.shadow_msr &= ~MSR_FP; arch 164 arch/powerpc/kvm/booke.c vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP; arch 179 arch/powerpc/kvm/booke.c load_vr_state(&vcpu->arch.vr); arch 181 arch/powerpc/kvm/booke.c current->thread.vr_save_area = &vcpu->arch.vr; arch 207 arch/powerpc/kvm/booke.c vcpu->arch.shadow_msr &= ~MSR_DE; arch 208 arch/powerpc/kvm/booke.c vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE; arch 218 arch/powerpc/kvm/booke.c vcpu->arch.shared->msr |= MSR_DE; arch 220 arch/powerpc/kvm/booke.c vcpu->arch.shadow_msr |= MSR_DE; arch 221 arch/powerpc/kvm/booke.c vcpu->arch.shared->msr &= ~MSR_DE; arch 232 arch/powerpc/kvm/booke.c u32 old_msr = vcpu->arch.shared->msr; arch 238 arch/powerpc/kvm/booke.c vcpu->arch.shared->msr = new_msr; arch 250 arch/powerpc/kvm/booke.c set_bit(priority, &vcpu->arch.pending_exceptions); arch 256 arch/powerpc/kvm/booke.c vcpu->arch.queued_dear = dear_flags; arch 257 arch/powerpc/kvm/booke.c vcpu->arch.queued_esr = esr_flags; arch 264 arch/powerpc/kvm/booke.c vcpu->arch.queued_dear = dear_flags; arch 265 arch/powerpc/kvm/booke.c vcpu->arch.queued_esr = esr_flags; arch 276 arch/powerpc/kvm/booke.c vcpu->arch.queued_esr = esr_flags; arch 283 arch/powerpc/kvm/booke.c vcpu->arch.queued_dear = dear_flags; arch 284 arch/powerpc/kvm/booke.c vcpu->arch.queued_esr = esr_flags; arch 290 arch/powerpc/kvm/booke.c vcpu->arch.queued_esr = esr_flags; arch 313 arch/powerpc/kvm/booke.c return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); arch 318 arch/powerpc/kvm/booke.c clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); arch 334 arch/powerpc/kvm/booke.c clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); arch 335 arch/powerpc/kvm/booke.c clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); arch 345 arch/powerpc/kvm/booke.c clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions); arch 355 arch/powerpc/kvm/booke.c clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions); arch 366 arch/powerpc/kvm/booke.c vcpu->arch.csrr0 = srr0; arch 367 arch/powerpc/kvm/booke.c vcpu->arch.csrr1 = srr1; arch 373 arch/powerpc/kvm/booke.c vcpu->arch.dsrr0 = srr0; arch 374 arch/powerpc/kvm/booke.c vcpu->arch.dsrr1 = srr1; arch 382 arch/powerpc/kvm/booke.c vcpu->arch.mcsrr0 = srr0; arch 383 arch/powerpc/kvm/booke.c vcpu->arch.mcsrr1 = srr1; arch 393 arch/powerpc/kvm/booke.c ulong crit_raw = vcpu->arch.shared->critical; arch 398 arch/powerpc/kvm/booke.c ulong new_msr = vcpu->arch.shared->msr; arch 401 arch/powerpc/kvm/booke.c if (!(vcpu->arch.shared->msr & MSR_SF)) { arch 409 arch/powerpc/kvm/booke.c crit = crit && !(vcpu->arch.shared->msr & MSR_PR); arch 416 arch/powerpc/kvm/booke.c if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags) arch 449 arch/powerpc/kvm/booke.c allowed = vcpu->arch.shared->msr & MSR_CE; arch 455 arch/powerpc/kvm/booke.c allowed = vcpu->arch.shared->msr & MSR_ME; arch 465 arch/powerpc/kvm/booke.c allowed = vcpu->arch.shared->msr & MSR_EE; arch 471 arch/powerpc/kvm/booke.c allowed = vcpu->arch.shared->msr & MSR_DE; arch 485 arch/powerpc/kvm/booke.c set_guest_srr(vcpu, vcpu->arch.regs.nip, arch 486 arch/powerpc/kvm/booke.c vcpu->arch.shared->msr); arch 489 arch/powerpc/kvm/booke.c set_guest_csrr(vcpu, vcpu->arch.regs.nip, arch 490 arch/powerpc/kvm/booke.c vcpu->arch.shared->msr); arch 493 arch/powerpc/kvm/booke.c set_guest_dsrr(vcpu, vcpu->arch.regs.nip, arch 494 arch/powerpc/kvm/booke.c vcpu->arch.shared->msr); arch 497 arch/powerpc/kvm/booke.c set_guest_mcsrr(vcpu, vcpu->arch.regs.nip, arch 498 arch/powerpc/kvm/booke.c vcpu->arch.shared->msr); arch 502 arch/powerpc/kvm/booke.c vcpu->arch.regs.nip = vcpu->arch.ivpr | arch 503 arch/powerpc/kvm/booke.c vcpu->arch.ivor[priority]; arch 505 arch/powerpc/kvm/booke.c kvmppc_set_esr(vcpu, vcpu->arch.queued_esr); arch 507 arch/powerpc/kvm/booke.c kvmppc_set_dar(vcpu, vcpu->arch.queued_dear); arch 509 arch/powerpc/kvm/booke.c if (vcpu->arch.epr_flags & KVMPPC_EPR_USER) arch 511 arch/powerpc/kvm/booke.c else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) { arch 512 arch/powerpc/kvm/booke.c BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC); arch 519 arch/powerpc/kvm/booke.c if (vcpu->arch.epcr & SPRN_EPCR_ICM) arch 525 arch/powerpc/kvm/booke.c clear_bit(priority, &vcpu->arch.pending_exceptions); arch 534 arch/powerpc/kvm/booke.c if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE) arch 536 arch/powerpc/kvm/booke.c if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE) arch 538 arch/powerpc/kvm/booke.c if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK) arch 554 arch/powerpc/kvm/booke.c u32 period = TCR_GET_WP(vcpu->arch.tcr); arch 585 arch/powerpc/kvm/booke.c if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS)) arch 588 arch/powerpc/kvm/booke.c spin_lock_irqsave(&vcpu->arch.wdt_lock, flags); arch 595 arch/powerpc/kvm/booke.c mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies); arch 597 arch/powerpc/kvm/booke.c del_timer(&vcpu->arch.wdt_timer); arch 598 arch/powerpc/kvm/booke.c spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags); arch 603 arch/powerpc/kvm/booke.c struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer); arch 608 arch/powerpc/kvm/booke.c new_tsr = tsr = vcpu->arch.tsr; arch 620 arch/powerpc/kvm/booke.c } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr); arch 632 arch/powerpc/kvm/booke.c if (final && (vcpu->arch.tcr & TCR_WRC_MASK) && arch 633 arch/powerpc/kvm/booke.c vcpu->arch.watchdog_enabled) { arch 651 arch/powerpc/kvm/booke.c if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS)) arch 656 arch/powerpc/kvm/booke.c if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS)) arch 664 arch/powerpc/kvm/booke.c unsigned long *pending = &vcpu->arch.pending_exceptions; arch 678 arch/powerpc/kvm/booke.c vcpu->arch.shared->int_pending = !!*pending; arch 694 arch/powerpc/kvm/booke.c if (vcpu->arch.shared->msr & MSR_WE) { arch 725 arch/powerpc/kvm/booke.c vcpu->arch.epr_needed = true; arch 738 arch/powerpc/kvm/booke.c if (!vcpu->arch.sane) { arch 773 arch/powerpc/kvm/booke.c debug = vcpu->arch.dbg_reg; arch 776 arch/powerpc/kvm/booke.c current->thread.debug = vcpu->arch.dbg_reg; arch 778 arch/powerpc/kvm/booke.c vcpu->arch.pgdir = current->mm->pgd; arch 821 arch/powerpc/kvm/booke.c __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst); arch 825 arch/powerpc/kvm/booke.c run->hw.hardware_exit_reason |= vcpu->arch.last_inst; arch 839 arch/powerpc/kvm/booke.c struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg); arch 840 arch/powerpc/kvm/booke.c u32 dbsr = vcpu->arch.dbsr; arch 853 arch/powerpc/kvm/booke.c if (dbsr && (vcpu->arch.shared->msr & MSR_DE) && arch 854 arch/powerpc/kvm/booke.c (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM)) arch 858 arch/powerpc/kvm/booke.c if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE)) arch 868 arch/powerpc/kvm/booke.c vcpu->arch.dbsr = 0; arch 869 arch/powerpc/kvm/booke.c run->debug.arch.status = 0; arch 870 arch/powerpc/kvm/booke.c run->debug.arch.address = vcpu->arch.regs.nip; arch 873 arch/powerpc/kvm/booke.c run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT; arch 876 arch/powerpc/kvm/booke.c run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE; arch 878 arch/powerpc/kvm/booke.c run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ; arch 880 arch/powerpc/kvm/booke.c run->debug.arch.address = dbg_reg->dac1; arch 882 arch/powerpc/kvm/booke.c run->debug.arch.address = dbg_reg->dac2; arch 951 arch/powerpc/kvm/booke.c vcpu->arch.dbsr = mfspr(SPRN_DBSR); arch 966 arch/powerpc/kvm/booke.c __func__, vcpu->arch.regs.nip); arch 1103 arch/powerpc/kvm/booke.c if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) { arch 1112 arch/powerpc/kvm/booke.c kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); arch 1129 arch/powerpc/kvm/booke.c if (vcpu->arch.shared->msr & MSR_SPE) arch 1164 arch/powerpc/kvm/booke.c __func__, exit_nr, vcpu->arch.regs.nip); arch 1187 arch/powerpc/kvm/booke.c kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, arch 1188 arch/powerpc/kvm/booke.c vcpu->arch.fault_esr); arch 1194 arch/powerpc/kvm/booke.c kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr); arch 1200 arch/powerpc/kvm/booke.c kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear, arch 1201 arch/powerpc/kvm/booke.c vcpu->arch.fault_esr); arch 1207 arch/powerpc/kvm/booke.c if (!(vcpu->arch.shared->msr & MSR_PR)) { arch 1221 arch/powerpc/kvm/booke.c if (!(vcpu->arch.shared->msr & MSR_PR) && arch 1236 arch/powerpc/kvm/booke.c unsigned long eaddr = vcpu->arch.fault_dear; arch 1242 arch/powerpc/kvm/booke.c if (!(vcpu->arch.shared->msr & MSR_PR) && arch 1243 arch/powerpc/kvm/booke.c (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { arch 1257 arch/powerpc/kvm/booke.c vcpu->arch.fault_dear, arch 1258 arch/powerpc/kvm/booke.c vcpu->arch.fault_esr); arch 1283 arch/powerpc/kvm/booke.c vcpu->arch.paddr_accessed = gpaddr; arch 1284 arch/powerpc/kvm/booke.c vcpu->arch.vaddr_accessed = eaddr; arch 1294 arch/powerpc/kvm/booke.c unsigned long eaddr = vcpu->arch.regs.nip; arch 1370 arch/powerpc/kvm/booke.c u32 old_tsr = vcpu->arch.tsr; arch 1372 arch/powerpc/kvm/booke.c vcpu->arch.tsr = new_tsr; arch 1374 arch/powerpc/kvm/booke.c if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS)) arch 1386 arch/powerpc/kvm/booke.c vcpu->arch.regs.nip = 0; arch 1387 arch/powerpc/kvm/booke.c vcpu->arch.shared->pir = vcpu->vcpu_id; arch 1392 arch/powerpc/kvm/booke.c vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS; arch 1393 arch/powerpc/kvm/booke.c vcpu->arch.shadow_pid = 1; arch 1394 arch/powerpc/kvm/booke.c vcpu->arch.shared->msr = 0; arch 1399 arch/powerpc/kvm/booke.c vcpu->arch.ivpr = 0x55550000; arch 1401 arch/powerpc/kvm/booke.c vcpu->arch.ivor[i] = 0x7700 | i * 4; arch 1413 arch/powerpc/kvm/booke.c spin_lock_init(&vcpu->arch.wdt_lock); arch 1414 arch/powerpc/kvm/booke.c timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0); arch 1426 arch/powerpc/kvm/booke.c del_timer_sync(&vcpu->arch.wdt_timer); arch 1435 arch/powerpc/kvm/booke.c regs->pc = vcpu->arch.regs.nip; arch 1437 arch/powerpc/kvm/booke.c regs->ctr = vcpu->arch.regs.ctr; arch 1438 arch/powerpc/kvm/booke.c regs->lr = vcpu->arch.regs.link; arch 1440 arch/powerpc/kvm/booke.c regs->msr = vcpu->arch.shared->msr; arch 1443 arch/powerpc/kvm/booke.c regs->pid = vcpu->arch.pid; arch 1466 arch/powerpc/kvm/booke.c vcpu->arch.regs.nip = regs->pc; arch 1468 arch/powerpc/kvm/booke.c vcpu->arch.regs.ctr = regs->ctr; arch 1469 arch/powerpc/kvm/booke.c vcpu->arch.regs.link = regs->lr; arch 1498 arch/powerpc/kvm/booke.c sregs->u.e.csrr0 = vcpu->arch.csrr0; arch 1499 arch/powerpc/kvm/booke.c sregs->u.e.csrr1 = vcpu->arch.csrr1; arch 1500 arch/powerpc/kvm/booke.c sregs->u.e.mcsr = vcpu->arch.mcsr; arch 1503 arch/powerpc/kvm/booke.c sregs->u.e.tsr = vcpu->arch.tsr; arch 1504 arch/powerpc/kvm/booke.c sregs->u.e.tcr = vcpu->arch.tcr; arch 1507 arch/powerpc/kvm/booke.c sregs->u.e.vrsave = vcpu->arch.vrsave; arch 1516 arch/powerpc/kvm/booke.c vcpu->arch.csrr0 = sregs->u.e.csrr0; arch 1517 arch/powerpc/kvm/booke.c vcpu->arch.csrr1 = sregs->u.e.csrr1; arch 1518 arch/powerpc/kvm/booke.c vcpu->arch.mcsr = sregs->u.e.mcsr; arch 1521 arch/powerpc/kvm/booke.c vcpu->arch.vrsave = sregs->u.e.vrsave; arch 1525 arch/powerpc/kvm/booke.c vcpu->arch.dec = sregs->u.e.dec; arch 1541 arch/powerpc/kvm/booke.c sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0; arch 1542 arch/powerpc/kvm/booke.c sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1; arch 1543 arch/powerpc/kvm/booke.c sregs->u.e.decar = vcpu->arch.decar; arch 1544 arch/powerpc/kvm/booke.c sregs->u.e.ivpr = vcpu->arch.ivpr; arch 1556 arch/powerpc/kvm/booke.c vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0; arch 1557 arch/powerpc/kvm/booke.c vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1; arch 1558 arch/powerpc/kvm/booke.c vcpu->arch.decar = sregs->u.e.decar; arch 1559 arch/powerpc/kvm/booke.c vcpu->arch.ivpr = sregs->u.e.ivpr; arch 1568 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; arch 1569 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; arch 1570 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; arch 1571 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; arch 1572 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; arch 1573 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; arch 1574 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; arch 1575 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; arch 1576 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; arch 1577 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; arch 1578 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; arch 1579 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; arch 1580 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; arch 1581 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; arch 1582 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; arch 1583 arch/powerpc/kvm/booke.c sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; arch 1592 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0]; arch 1593 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1]; arch 1594 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2]; arch 1595 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3]; arch 1596 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4]; arch 1597 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5]; arch 1598 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6]; arch 1599 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7]; arch 1600 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8]; arch 1601 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9]; arch 1602 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10]; arch 1603 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11]; arch 1604 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12]; arch 1605 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13]; arch 1606 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14]; arch 1607 arch/powerpc/kvm/booke.c vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15]; arch 1619 arch/powerpc/kvm/booke.c sregs->pvr = vcpu->arch.pvr; arch 1623 arch/powerpc/kvm/booke.c ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); arch 1635 arch/powerpc/kvm/booke.c if (vcpu->arch.pvr != sregs->pvr) arch 1646 arch/powerpc/kvm/booke.c ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); arch 1660 arch/powerpc/kvm/booke.c *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1); arch 1663 arch/powerpc/kvm/booke.c *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2); arch 1667 arch/powerpc/kvm/booke.c *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3); arch 1670 arch/powerpc/kvm/booke.c *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4); arch 1674 arch/powerpc/kvm/booke.c *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1); arch 1677 arch/powerpc/kvm/booke.c *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2); arch 1686 arch/powerpc/kvm/booke.c *val = get_reg_val(id, vcpu->arch.epcr); arch 1690 arch/powerpc/kvm/booke.c *val = get_reg_val(id, vcpu->arch.tcr); arch 1693 arch/powerpc/kvm/booke.c *val = get_reg_val(id, vcpu->arch.tsr); arch 1699 arch/powerpc/kvm/booke.c *val = get_reg_val(id, vcpu->arch.vrsave); arch 1702 arch/powerpc/kvm/booke.c r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val); arch 1716 arch/powerpc/kvm/booke.c vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val); arch 1719 arch/powerpc/kvm/booke.c vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val); arch 1723 arch/powerpc/kvm/booke.c vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val); arch 1726 arch/powerpc/kvm/booke.c vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val); arch 1730 arch/powerpc/kvm/booke.c vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val); arch 1733 arch/powerpc/kvm/booke.c vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val); arch 1768 arch/powerpc/kvm/booke.c vcpu->arch.vrsave = set_reg_val(id, *val); arch 1771 arch/powerpc/kvm/booke.c r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val); arch 1837 arch/powerpc/kvm/booke.c vcpu->arch.epcr = new_epcr; arch 1839 arch/powerpc/kvm/booke.c vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM; arch 1840 arch/powerpc/kvm/booke.c if (vcpu->arch.epcr & SPRN_EPCR_ICM) arch 1841 arch/powerpc/kvm/booke.c vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM; arch 1848 arch/powerpc/kvm/booke.c vcpu->arch.tcr = new_tcr; arch 1855 arch/powerpc/kvm/booke.c set_bits(tsr_bits, &vcpu->arch.tsr); arch 1863 arch/powerpc/kvm/booke.c clear_bits(tsr_bits, &vcpu->arch.tsr); arch 1877 arch/powerpc/kvm/booke.c if (vcpu->arch.tcr & TCR_ARE) { arch 1878 arch/powerpc/kvm/booke.c vcpu->arch.dec = vcpu->arch.decar; arch 1947 arch/powerpc/kvm/booke.c vcpu->arch.shadow_msrp |= MSRP_UCLEP; arch 1949 arch/powerpc/kvm/booke.c vcpu->arch.shadow_msrp |= MSRP_DEP; arch 1951 arch/powerpc/kvm/booke.c vcpu->arch.shadow_msrp |= MSRP_PMMP; arch 1954 arch/powerpc/kvm/booke.c vcpu->arch.shadow_msrp &= ~MSRP_UCLEP; arch 1956 arch/powerpc/kvm/booke.c vcpu->arch.shadow_msrp &= ~MSRP_DEP; arch 1958 arch/powerpc/kvm/booke.c vcpu->arch.shadow_msrp &= ~MSRP_PMMP; arch 1970 arch/powerpc/kvm/booke.c if (!(vcpu->arch.shared->msr & MSR_PR) && arch 1971 arch/powerpc/kvm/booke.c (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { arch 1973 arch/powerpc/kvm/booke.c pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) | arch 2024 arch/powerpc/kvm/booke.c vcpu->arch.dbg_reg.dbcr0 = 0; arch 2032 arch/powerpc/kvm/booke.c vcpu->arch.dbg_reg.dbcr0 = 0; arch 2035 arch/powerpc/kvm/booke.c vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC; arch 2038 arch/powerpc/kvm/booke.c dbg_reg = &(vcpu->arch.dbg_reg); arch 2063 arch/powerpc/kvm/booke.c uint64_t addr = dbg->arch.bp[n].addr; arch 2064 arch/powerpc/kvm/booke.c uint32_t type = dbg->arch.bp[n].type; arch 2109 arch/powerpc/kvm/booke.c vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); arch 2114 arch/powerpc/kvm/booke.c return kvm->arch.kvm_ops->init_vm(kvm); arch 2119 arch/powerpc/kvm/booke.c return kvm->arch.kvm_ops->vcpu_create(kvm, id); arch 2124 arch/powerpc/kvm/booke.c vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); arch 2129 arch/powerpc/kvm/booke.c kvm->arch.kvm_ops->destroy_vm(kvm); arch 2134 arch/powerpc/kvm/booke.c vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); arch 2139 arch/powerpc/kvm/booke.c vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); arch 26 arch/powerpc/kvm/booke_emulate.c vcpu->arch.regs.nip = vcpu->arch.shared->srr0; arch 27 arch/powerpc/kvm/booke_emulate.c kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); arch 32 arch/powerpc/kvm/booke_emulate.c vcpu->arch.regs.nip = vcpu->arch.dsrr0; arch 33 arch/powerpc/kvm/booke_emulate.c kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); arch 38 arch/powerpc/kvm/booke_emulate.c vcpu->arch.regs.nip = vcpu->arch.csrr0; arch 39 arch/powerpc/kvm/booke_emulate.c kvmppc_set_msr(vcpu, vcpu->arch.csrr1); arch 80 arch/powerpc/kvm/booke_emulate.c kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); arch 90 arch/powerpc/kvm/booke_emulate.c vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) arch 96 arch/powerpc/kvm/booke_emulate.c vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) arch 127 arch/powerpc/kvm/booke_emulate.c vcpu->arch.shared->dar = spr_val; arch 130 arch/powerpc/kvm/booke_emulate.c vcpu->arch.shared->esr = spr_val; arch 133 arch/powerpc/kvm/booke_emulate.c vcpu->arch.csrr0 = spr_val; arch 136 arch/powerpc/kvm/booke_emulate.c vcpu->arch.csrr1 = spr_val; arch 139 arch/powerpc/kvm/booke_emulate.c vcpu->arch.dsrr0 = spr_val; arch 142 arch/powerpc/kvm/booke_emulate.c vcpu->arch.dsrr1 = spr_val; arch 153 arch/powerpc/kvm/booke_emulate.c vcpu->arch.dbg_reg.iac1 = spr_val; arch 164 arch/powerpc/kvm/booke_emulate.c vcpu->arch.dbg_reg.iac2 = spr_val; arch 176 arch/powerpc/kvm/booke_emulate.c vcpu->arch.dbg_reg.iac3 = spr_val; arch 187 arch/powerpc/kvm/booke_emulate.c vcpu->arch.dbg_reg.iac4 = spr_val; arch 199 arch/powerpc/kvm/booke_emulate.c vcpu->arch.dbg_reg.dac1 = spr_val; arch 210 arch/powerpc/kvm/booke_emulate.c vcpu->arch.dbg_reg.dac2 = spr_val; arch 225 arch/powerpc/kvm/booke_emulate.c vcpu->arch.dbg_reg.dbcr0 = spr_val; arch 236 arch/powerpc/kvm/booke_emulate.c vcpu->arch.dbg_reg.dbcr1 = spr_val; arch 247 arch/powerpc/kvm/booke_emulate.c vcpu->arch.dbg_reg.dbcr2 = spr_val; arch 257 arch/powerpc/kvm/booke_emulate.c vcpu->arch.dbsr &= ~spr_val; arch 258 arch/powerpc/kvm/booke_emulate.c if (!(vcpu->arch.dbsr & ~DBSR_IDE)) arch 269 arch/powerpc/kvm/booke_emulate.c if (vcpu->arch.tcr & TCR_WRC_MASK) { arch 271 arch/powerpc/kvm/booke_emulate.c spr_val |= vcpu->arch.tcr & TCR_WRC_MASK; arch 277 arch/powerpc/kvm/booke_emulate.c vcpu->arch.decar = spr_val; arch 298 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivpr = spr_val; arch 304 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; arch 307 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val; arch 310 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; arch 316 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; arch 319 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val; arch 322 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val; arch 325 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val; arch 328 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val; arch 331 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; arch 337 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; arch 340 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val; arch 343 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val; arch 346 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val; arch 349 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val; arch 352 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val; arch 355 arch/powerpc/kvm/booke_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val; arch 358 arch/powerpc/kvm/booke_emulate.c vcpu->arch.mcsr &= ~spr_val; arch 364 arch/powerpc/kvm/booke_emulate.c mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); arch 373 arch/powerpc/kvm/booke_emulate.c current->thread.debug = vcpu->arch.dbg_reg; arch 374 arch/powerpc/kvm/booke_emulate.c switch_booke_debug_regs(&vcpu->arch.dbg_reg); arch 385 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivpr; arch 388 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.shared->dar; arch 391 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.shared->esr; arch 394 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.epr; arch 397 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.csrr0; arch 400 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.csrr1; arch 403 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.dsrr0; arch 406 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.dsrr1; arch 409 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.dbg_reg.iac1; arch 412 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.dbg_reg.iac2; arch 416 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.dbg_reg.iac3; arch 419 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.dbg_reg.iac4; arch 423 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.dbg_reg.dac1; arch 426 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.dbg_reg.dac2; arch 429 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.dbg_reg.dbcr0; arch 434 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.dbg_reg.dbcr1; arch 437 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.dbg_reg.dbcr2; arch 440 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.dbsr; arch 443 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.tsr; arch 446 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.tcr; arch 450 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; arch 453 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; arch 456 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; arch 459 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; arch 462 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; arch 465 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; arch 468 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; arch 471 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; arch 474 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; arch 477 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; arch 480 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; arch 483 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; arch 486 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; arch 489 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; arch 492 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; arch 495 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; arch 498 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.mcsr; arch 502 arch/powerpc/kvm/booke_emulate.c *spr_val = vcpu->arch.epcr; arch 139 arch/powerpc/kvm/e500.c vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, arch 143 arch/powerpc/kvm/e500.c vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500, arch 225 arch/powerpc/kvm/e500.c if (vcpu->arch.pid != pid) { arch 226 arch/powerpc/kvm/e500.c vcpu_e500->pid[0] = vcpu->arch.pid = pid; arch 310 arch/powerpc/kvm/e500.c if (vcpu->arch.shadow_msr & MSR_SPE) arch 353 arch/powerpc/kvm/e500.c vcpu->arch.pvr = mfspr(SPRN_PVR); arch 356 arch/powerpc/kvm/e500.c vcpu->arch.cpu_type = KVM_CPU_E500V2; arch 375 arch/powerpc/kvm/e500.c sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; arch 376 arch/powerpc/kvm/e500.c sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; arch 377 arch/powerpc/kvm/e500.c sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; arch 379 arch/powerpc/kvm/e500.c vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; arch 406 arch/powerpc/kvm/e500.c vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = arch 408 arch/powerpc/kvm/e500.c vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = arch 410 arch/powerpc/kvm/e500.c vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = arch 415 arch/powerpc/kvm/e500.c vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = arch 466 arch/powerpc/kvm/e500.c vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); arch 467 arch/powerpc/kvm/e500.c if (!vcpu->arch.shared) { arch 490 arch/powerpc/kvm/e500.c free_page((unsigned long)vcpu->arch.shared); arch 210 arch/powerpc/kvm/e500.h return vcpu->arch.pid & 0xff; arch 215 arch/powerpc/kvm/e500.h return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS)); arch 220 arch/powerpc/kvm/e500.h return !!(vcpu->arch.shared->msr & MSR_PR); arch 225 arch/powerpc/kvm/e500.h return (vcpu->arch.shared->mas6 >> 16) & 0xff; arch 230 arch/powerpc/kvm/e500.h return vcpu->arch.shared->mas6 & 0x1; arch 239 arch/powerpc/kvm/e500.h return (vcpu->arch.shared->mas0 >> 28) & 0x1; arch 244 arch/powerpc/kvm/e500.h return vcpu->arch.shared->mas0 & 0xfff; arch 249 arch/powerpc/kvm/e500.h return (vcpu->arch.shared->mas0 >> 16) & 0xfff; arch 263 arch/powerpc/kvm/e500.h if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) arch 307 arch/powerpc/kvm/e500.h return get_thread_specific_lpid(vcpu->kvm->arch.lpid); arch 316 arch/powerpc/kvm/e500.h unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf; arch 331 arch/powerpc/kvm/e500.h has_ftr = ((vcpu->arch.mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2); arch 53 arch/powerpc/kvm/e500_emulate.c ulong param = vcpu->arch.regs.gpr[rb]; arch 59 arch/powerpc/kvm/e500_emulate.c clear_bit(prio, &vcpu->arch.pending_exceptions); arch 65 arch/powerpc/kvm/e500_emulate.c ulong param = vcpu->arch.regs.gpr[rb]; arch 75 arch/powerpc/kvm/e500_emulate.c int cpir = cvcpu->arch.shared->pir; arch 77 arch/powerpc/kvm/e500_emulate.c set_bit(prio, &cvcpu->arch.pending_exceptions); arch 94 arch/powerpc/kvm/e500_emulate.c run->debug.arch.address = vcpu->arch.regs.nip; arch 95 arch/powerpc/kvm/e500_emulate.c run->debug.arch.status = 0; arch 226 arch/powerpc/kvm/e500_emulate.c vcpu->arch.shared->mas0 = spr_val; arch 229 arch/powerpc/kvm/e500_emulate.c vcpu->arch.shared->mas1 = spr_val; arch 232 arch/powerpc/kvm/e500_emulate.c vcpu->arch.shared->mas2 = spr_val; arch 235 arch/powerpc/kvm/e500_emulate.c vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff; arch 236 arch/powerpc/kvm/e500_emulate.c vcpu->arch.shared->mas7_3 |= spr_val; arch 239 arch/powerpc/kvm/e500_emulate.c vcpu->arch.shared->mas4 = spr_val; arch 242 arch/powerpc/kvm/e500_emulate.c vcpu->arch.shared->mas6 = spr_val; arch 245 arch/powerpc/kvm/e500_emulate.c vcpu->arch.shared->mas7_3 &= (u64)0xffffffff; arch 246 arch/powerpc/kvm/e500_emulate.c vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; arch 274 arch/powerpc/kvm/e500_emulate.c vcpu->arch.pwrmgtcr0 = spr_val; arch 287 arch/powerpc/kvm/e500_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val; arch 290 arch/powerpc/kvm/e500_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val; arch 293 arch/powerpc/kvm/e500_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val; arch 298 arch/powerpc/kvm/e500_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val; arch 301 arch/powerpc/kvm/e500_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val; arch 305 arch/powerpc/kvm/e500_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val; arch 309 arch/powerpc/kvm/e500_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val; arch 312 arch/powerpc/kvm/e500_emulate.c vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val; arch 339 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.shared->mas0; arch 342 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.shared->mas1; arch 345 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.shared->mas2; arch 348 arch/powerpc/kvm/e500_emulate.c *spr_val = (u32)vcpu->arch.shared->mas7_3; arch 351 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.shared->mas4; arch 354 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.shared->mas6; arch 357 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.shared->mas7_3 >> 32; arch 361 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.decar; arch 364 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.tlbcfg[0]; arch 367 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.tlbcfg[1]; arch 372 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.tlbps[0]; arch 377 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.tlbps[1]; arch 400 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.mmucfg; arch 409 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.eptcfg; arch 413 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.pwrmgtcr0; arch 419 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; arch 422 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; arch 425 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; arch 430 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL]; arch 433 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST]; arch 437 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; arch 441 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; arch 444 arch/powerpc/kvm/e500_emulate.c *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; arch 71 arch/powerpc/kvm/e500_mmu.c esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2); arch 134 arch/powerpc/kvm/e500_mmu.c tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; arch 136 arch/powerpc/kvm/e500_mmu.c tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; arch 138 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) arch 140 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) arch 143 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) arch 144 arch/powerpc/kvm/e500_mmu.c | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); arch 145 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; arch 146 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1) arch 332 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas0 &= ~MAS0_NV(~0); arch 333 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); arch 334 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas1 = gtlbe->mas1; arch 335 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas2 = gtlbe->mas2; arch 336 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas7_3 = gtlbe->mas7_3; arch 360 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) arch 362 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas1 = gtlbe->mas1; arch 363 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas2 = gtlbe->mas2; arch 364 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas7_3 = gtlbe->mas7_3; arch 369 arch/powerpc/kvm/e500_mmu.c tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1; arch 372 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) arch 375 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas1 = arch 376 arch/powerpc/kvm/e500_mmu.c (vcpu->arch.shared->mas6 & MAS6_SPID0) arch 377 arch/powerpc/kvm/e500_mmu.c | ((vcpu->arch.shared->mas6 & MAS6_SAS) ? MAS1_TS : 0) arch 378 arch/powerpc/kvm/e500_mmu.c | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0)); arch 379 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas2 &= MAS2_EPN; arch 380 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 & arch 382 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | arch 410 arch/powerpc/kvm/e500_mmu.c gtlbe->mas1 = vcpu->arch.shared->mas1; arch 411 arch/powerpc/kvm/e500_mmu.c gtlbe->mas2 = vcpu->arch.shared->mas2; arch 412 arch/powerpc/kvm/e500_mmu.c if (!(vcpu->arch.shared->msr & MSR_CM)) arch 414 arch/powerpc/kvm/e500_mmu.c gtlbe->mas7_3 = vcpu->arch.shared->mas7_3; arch 416 arch/powerpc/kvm/e500_mmu.c trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, arch 497 arch/powerpc/kvm/e500_mmu.c unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); arch 504 arch/powerpc/kvm/e500_mmu.c unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); arch 511 arch/powerpc/kvm/e500_mmu.c unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); arch 513 arch/powerpc/kvm/e500_mmu.c kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as); arch 518 arch/powerpc/kvm/e500_mmu.c unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); arch 520 arch/powerpc/kvm/e500_mmu.c kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); arch 573 arch/powerpc/kvm/e500_mmu.c sregs->u.e.mas0 = vcpu->arch.shared->mas0; arch 574 arch/powerpc/kvm/e500_mmu.c sregs->u.e.mas1 = vcpu->arch.shared->mas1; arch 575 arch/powerpc/kvm/e500_mmu.c sregs->u.e.mas2 = vcpu->arch.shared->mas2; arch 576 arch/powerpc/kvm/e500_mmu.c sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3; arch 577 arch/powerpc/kvm/e500_mmu.c sregs->u.e.mas4 = vcpu->arch.shared->mas4; arch 578 arch/powerpc/kvm/e500_mmu.c sregs->u.e.mas6 = vcpu->arch.shared->mas6; arch 580 arch/powerpc/kvm/e500_mmu.c sregs->u.e.mmucfg = vcpu->arch.mmucfg; arch 581 arch/powerpc/kvm/e500_mmu.c sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0]; arch 582 arch/powerpc/kvm/e500_mmu.c sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1]; arch 590 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas0 = sregs->u.e.mas0; arch 591 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas1 = sregs->u.e.mas1; arch 592 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas2 = sregs->u.e.mas2; arch 593 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3; arch 594 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas4 = sregs->u.e.mas4; arch 595 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas6 = sregs->u.e.mas6; arch 609 arch/powerpc/kvm/e500_mmu.c *val = get_reg_val(id, vcpu->arch.shared->mas0); arch 612 arch/powerpc/kvm/e500_mmu.c *val = get_reg_val(id, vcpu->arch.shared->mas1); arch 615 arch/powerpc/kvm/e500_mmu.c *val = get_reg_val(id, vcpu->arch.shared->mas2); arch 618 arch/powerpc/kvm/e500_mmu.c *val = get_reg_val(id, vcpu->arch.shared->mas7_3); arch 621 arch/powerpc/kvm/e500_mmu.c *val = get_reg_val(id, vcpu->arch.shared->mas4); arch 624 arch/powerpc/kvm/e500_mmu.c *val = get_reg_val(id, vcpu->arch.shared->mas6); arch 627 arch/powerpc/kvm/e500_mmu.c *val = get_reg_val(id, vcpu->arch.mmucfg); arch 630 arch/powerpc/kvm/e500_mmu.c *val = get_reg_val(id, vcpu->arch.eptcfg); arch 637 arch/powerpc/kvm/e500_mmu.c *val = get_reg_val(id, vcpu->arch.tlbcfg[i]); arch 644 arch/powerpc/kvm/e500_mmu.c *val = get_reg_val(id, vcpu->arch.tlbps[i]); arch 662 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas0 = set_reg_val(id, *val); arch 665 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas1 = set_reg_val(id, *val); arch 668 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas2 = set_reg_val(id, *val); arch 671 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas7_3 = set_reg_val(id, *val); arch 674 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas4 = set_reg_val(id, *val); arch 677 arch/powerpc/kvm/e500_mmu.c vcpu->arch.shared->mas6 = set_reg_val(id, *val); arch 682 arch/powerpc/kvm/e500_mmu.c if (reg != vcpu->arch.mmucfg) arch 688 arch/powerpc/kvm/e500_mmu.c if (reg != vcpu->arch.eptcfg) arch 699 arch/powerpc/kvm/e500_mmu.c if (reg != vcpu->arch.tlbcfg[i]) arch 709 arch/powerpc/kvm/e500_mmu.c if (reg != vcpu->arch.tlbps[i]) arch 724 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); arch 726 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[0] |= params->tlb_sizes[0]; arch 727 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[0] |= params->tlb_ways[0] << TLBnCFG_ASSOC_SHIFT; arch 729 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); arch 730 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[1] |= params->tlb_sizes[1]; arch 731 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[1] |= params->tlb_ways[1] << TLBnCFG_ASSOC_SHIFT; arch 874 arch/powerpc/kvm/e500_mmu.c vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE; arch 877 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) & arch 879 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[0] |= params[0].entries; arch 880 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[0] |= params[0].ways << TLBnCFG_ASSOC_SHIFT; arch 882 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) & arch 884 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[1] |= params[1].entries; arch 885 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[1] |= params[1].ways << TLBnCFG_ASSOC_SHIFT; arch 888 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbps[0] = mfspr(SPRN_TLB0PS); arch 889 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbps[1] = mfspr(SPRN_TLB1PS); arch 891 arch/powerpc/kvm/e500_mmu.c vcpu->arch.mmucfg &= ~MMUCFG_LRAT; arch 894 arch/powerpc/kvm/e500_mmu.c vcpu->arch.eptcfg = 0; arch 895 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[0] &= ~TLBnCFG_PT; arch 896 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[1] &= ~TLBnCFG_IND; arch 131 arch/powerpc/kvm/e500_mmu_host.c __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid); arch 136 arch/powerpc/kvm/e500_mmu_host.c vcpu_e500->vcpu.kvm->arch.lpid); arch 162 arch/powerpc/kvm/e500_mmu_host.c ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; arch 174 arch/powerpc/kvm/e500_mmu_host.c magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; arch 311 arch/powerpc/kvm/e500_mmu_host.c u32 pr = vcpu->arch.shared->msr & MSR_PR; arch 469 arch/powerpc/kvm/e500_mmu_host.c pgdir = vcpu_e500->vcpu.arch.pgdir; arch 642 arch/powerpc/kvm/e500_mmu_host.c addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG; arch 645 arch/powerpc/kvm/e500_mmu_host.c mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space); arch 672 arch/powerpc/kvm/e500_mmu_host.c pr = vcpu->arch.shared->msr & MSR_PR; arch 102 arch/powerpc/kvm/e500mc.c vcpu->arch.pid = pid; arch 119 arch/powerpc/kvm/e500mc.c mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); arch 121 arch/powerpc/kvm/e500mc.c mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); arch 122 arch/powerpc/kvm/e500mc.c vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT); arch 123 arch/powerpc/kvm/e500mc.c vcpu->arch.epsc = vcpu->arch.eplc; arch 124 arch/powerpc/kvm/e500mc.c mtspr(SPRN_EPLC, vcpu->arch.eplc); arch 125 arch/powerpc/kvm/e500mc.c mtspr(SPRN_EPSC, vcpu->arch.epsc); arch 127 arch/powerpc/kvm/e500mc.c mtspr(SPRN_GIVPR, vcpu->arch.ivpr); arch 128 arch/powerpc/kvm/e500mc.c mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); arch 129 arch/powerpc/kvm/e500mc.c mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); arch 130 arch/powerpc/kvm/e500mc.c mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0); arch 131 arch/powerpc/kvm/e500mc.c mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1); arch 132 arch/powerpc/kvm/e500mc.c mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2); arch 133 arch/powerpc/kvm/e500mc.c mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3); arch 135 arch/powerpc/kvm/e500mc.c mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0); arch 136 arch/powerpc/kvm/e500mc.c mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1); arch 138 arch/powerpc/kvm/e500mc.c mtspr(SPRN_GEPR, vcpu->arch.epr); arch 139 arch/powerpc/kvm/e500mc.c mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); arch 140 arch/powerpc/kvm/e500mc.c mtspr(SPRN_GESR, vcpu->arch.shared->esr); arch 142 arch/powerpc/kvm/e500mc.c if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || arch 151 arch/powerpc/kvm/e500mc.c vcpu->arch.eplc = mfspr(SPRN_EPLC); arch 152 arch/powerpc/kvm/e500mc.c vcpu->arch.epsc = mfspr(SPRN_EPSC); arch 154 arch/powerpc/kvm/e500mc.c vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0); arch 155 arch/powerpc/kvm/e500mc.c vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1); arch 156 arch/powerpc/kvm/e500mc.c vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2); arch 157 arch/powerpc/kvm/e500mc.c vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3); arch 159 arch/powerpc/kvm/e500mc.c vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0); arch 160 arch/powerpc/kvm/e500mc.c vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1); arch 162 arch/powerpc/kvm/e500mc.c vcpu->arch.epr = mfspr(SPRN_GEPR); arch 163 arch/powerpc/kvm/e500mc.c vcpu->arch.shared->dar = mfspr(SPRN_GDEAR); arch 164 arch/powerpc/kvm/e500mc.c vcpu->arch.shared->esr = mfspr(SPRN_GESR); arch 166 arch/powerpc/kvm/e500mc.c vcpu->arch.oldpir = mfspr(SPRN_PIR); arch 199 arch/powerpc/kvm/e500mc.c vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \ arch 202 arch/powerpc/kvm/e500mc.c vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM; arch 204 arch/powerpc/kvm/e500mc.c vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP; arch 206 arch/powerpc/kvm/e500mc.c vcpu->arch.pvr = mfspr(SPRN_PVR); arch 209 arch/powerpc/kvm/e500mc.c vcpu->arch.cpu_type = KVM_CPU_E500MC; arch 231 arch/powerpc/kvm/e500mc.c vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; arch 232 arch/powerpc/kvm/e500mc.c sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; arch 233 arch/powerpc/kvm/e500mc.c sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; arch 258 arch/powerpc/kvm/e500mc.c vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = arch 263 arch/powerpc/kvm/e500mc.c vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = arch 265 arch/powerpc/kvm/e500mc.c vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = arch 279 arch/powerpc/kvm/e500mc.c *val = get_reg_val(id, vcpu->arch.sprg9); arch 295 arch/powerpc/kvm/e500mc.c vcpu->arch.sprg9 = set_reg_val(id, *val); arch 319 arch/powerpc/kvm/e500mc.c vcpu->arch.oldpir = 0xffffffff; arch 329 arch/powerpc/kvm/e500mc.c vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); arch 330 arch/powerpc/kvm/e500mc.c if (!vcpu->arch.shared) { arch 352 arch/powerpc/kvm/e500mc.c free_page((unsigned long)vcpu->arch.shared); arch 374 arch/powerpc/kvm/e500mc.c kvm->arch.lpid = lpid; arch 380 arch/powerpc/kvm/e500mc.c int lpid = kvm->arch.lpid; arch 31 arch/powerpc/kvm/emulate.c pr_debug("mtDEC: %lx\n", vcpu->arch.dec); arch 32 arch/powerpc/kvm/emulate.c hrtimer_try_to_cancel(&vcpu->arch.dec_timer); arch 41 arch/powerpc/kvm/emulate.c if (vcpu->arch.dec == 0) arch 51 arch/powerpc/kvm/emulate.c dec_time = vcpu->arch.dec; arch 58 arch/powerpc/kvm/emulate.c hrtimer_start(&vcpu->arch.dec_timer, arch 60 arch/powerpc/kvm/emulate.c vcpu->arch.dec_jiffies = get_tb(); arch 65 arch/powerpc/kvm/emulate.c u64 jd = tb - vcpu->arch.dec_jiffies; arch 68 arch/powerpc/kvm/emulate.c if (vcpu->arch.dec < jd) arch 72 arch/powerpc/kvm/emulate.c return vcpu->arch.dec - jd; arch 94 arch/powerpc/kvm/emulate.c vcpu->arch.dec = (u32) spr_val; arch 115 arch/powerpc/kvm/emulate.c emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn, arch 141 arch/powerpc/kvm/emulate.c spr_val = vcpu->arch.pvr; arch 176 arch/powerpc/kvm/emulate.c emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn, arch 221 arch/powerpc/kvm/emulate.c vcpu->arch.shared->esr | ESR_PTR); arch 237 arch/powerpc/kvm/emulate.c vcpu->arch.shared->esr | ESR_PTR); arch 274 arch/powerpc/kvm/emulate.c run->debug.arch.status = 0; arch 275 arch/powerpc/kvm/emulate.c run->debug.arch.address = kvmppc_get_pc(vcpu); arch 288 arch/powerpc/kvm/emulate.c emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst, arch 87 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_vsx_copy_nums = 0; arch 88 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_vsx_offset = 0; arch 89 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE; arch 90 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_sp64_extend = 0; arch 91 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_sign_extend = 0; arch 92 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_vmx_copy_nums = 0; arch 93 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_vmx_offset = 0; arch 94 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_host_swabbed = 0; arch 97 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.regs.msr = vcpu->arch.shared->msr; arch 98 arch/powerpc/kvm/emulate_loadstore.c if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { arch 124 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_sp64_extend = 1; arch 144 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1); arch 145 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1); arch 148 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = arch 151 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = arch 154 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = arch 157 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = arch 162 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_vmx_offset = arch 163 arch/powerpc/kvm/emulate_loadstore.c (vcpu->arch.vaddr_accessed & 0xf)/size; arch 166 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_vmx_copy_nums = 2; arch 171 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_vmx_copy_nums = 1; arch 191 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_sp64_extend = 1; arch 195 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = arch 198 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = arch 202 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = arch 205 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = arch 212 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_vsx_copy_nums = 1; arch 215 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_vsx_copy_nums = arch 246 arch/powerpc/kvm/emulate_loadstore.c if (vcpu->kvm->arch.kvm_ops->giveup_ext) arch 247 arch/powerpc/kvm/emulate_loadstore.c vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, arch 251 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_sp64_extend = 1; arch 267 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1); arch 268 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1); arch 270 arch/powerpc/kvm/emulate_loadstore.c if (vcpu->kvm->arch.kvm_ops->giveup_ext) arch 271 arch/powerpc/kvm/emulate_loadstore.c vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, arch 274 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = arch 277 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = arch 280 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = arch 283 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = arch 288 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_vmx_offset = arch 289 arch/powerpc/kvm/emulate_loadstore.c (vcpu->arch.vaddr_accessed & 0xf)/size; arch 292 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_vmx_copy_nums = 2; arch 296 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_vmx_copy_nums = 1; arch 315 arch/powerpc/kvm/emulate_loadstore.c if (vcpu->kvm->arch.kvm_ops->giveup_ext) arch 316 arch/powerpc/kvm/emulate_loadstore.c vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, arch 320 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_sp64_extend = 1; arch 323 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = arch 326 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_copy_type = arch 333 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_vsx_copy_nums = 1; arch 336 arch/powerpc/kvm/emulate_loadstore.c vcpu->arch.mmio_vsx_copy_nums = arch 12 arch/powerpc/kvm/irq.h ret = ret || (kvm->arch.mpic != NULL); arch 15 arch/powerpc/kvm/irq.h ret = ret || (kvm->arch.xics != NULL); arch 16 arch/powerpc/kvm/irq.h ret = ret || (kvm->arch.xive != NULL); arch 118 arch/powerpc/kvm/mpic.c return vcpu ? vcpu->arch.irq_cpu_id : -1; arch 255 arch/powerpc/kvm/mpic.c pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id, arch 273 arch/powerpc/kvm/mpic.c pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id, arch 1179 arch/powerpc/kvm/mpic.c struct openpic *opp = vcpu->arch.mpic; arch 1180 arch/powerpc/kvm/mpic.c int cpu = vcpu->arch.irq_cpu_id; arch 1636 arch/powerpc/kvm/mpic.c dev->kvm->arch.mpic = NULL; arch 1662 arch/powerpc/kvm/mpic.c if (dev->kvm->arch.mpic) arch 1715 arch/powerpc/kvm/mpic.c dev->kvm->arch.mpic = opp; arch 1752 arch/powerpc/kvm/mpic.c if (vcpu->arch.irq_type) { arch 1760 arch/powerpc/kvm/mpic.c vcpu->arch.mpic = opp; arch 1761 arch/powerpc/kvm/mpic.c vcpu->arch.irq_cpu_id = cpu; arch 1762 arch/powerpc/kvm/mpic.c vcpu->arch.irq_type = KVMPPC_IRQ_MPIC; arch 1766 arch/powerpc/kvm/mpic.c vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL; arch 1780 arch/powerpc/kvm/mpic.c BUG_ON(!opp->dst[vcpu->arch.irq_cpu_id].vcpu); arch 1782 arch/powerpc/kvm/mpic.c opp->dst[vcpu->arch.irq_cpu_id].vcpu = NULL; arch 1796 arch/powerpc/kvm/mpic.c struct openpic *opp = kvm->arch.mpic; arch 1810 arch/powerpc/kvm/mpic.c struct openpic *opp = kvm->arch.mpic; arch 1819 arch/powerpc/kvm/mpic.c openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data); arch 50 arch/powerpc/kvm/powerpc.c return !!(v->arch.pending_exceptions) || kvm_request_pending(v); arch 142 arch/powerpc/kvm/powerpc.c struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; arch 184 arch/powerpc/kvm/powerpc.c if (vcpu->arch.intr_msr & MSR_LE) arch 186 arch/powerpc/kvm/powerpc.c if (shared_big_endian != vcpu->arch.shared_big_endian) arch 188 arch/powerpc/kvm/powerpc.c vcpu->arch.shared_big_endian = shared_big_endian; arch 197 arch/powerpc/kvm/powerpc.c vcpu->arch.disable_kernel_nx = true; arch 201 arch/powerpc/kvm/powerpc.c vcpu->arch.magic_page_pa = param1 & ~0xfffULL; arch 202 arch/powerpc/kvm/powerpc.c vcpu->arch.magic_page_ea = param2 & ~0xfffULL; arch 209 arch/powerpc/kvm/powerpc.c if ((vcpu->arch.magic_page_pa & 0xf000) != arch 210 arch/powerpc/kvm/powerpc.c ((ulong)vcpu->arch.shared & 0xf000)) { arch 211 arch/powerpc/kvm/powerpc.c void *old_shared = vcpu->arch.shared; arch 212 arch/powerpc/kvm/powerpc.c ulong shared = (ulong)vcpu->arch.shared; arch 216 arch/powerpc/kvm/powerpc.c shared |= vcpu->arch.magic_page_pa & 0xf000; arch 219 arch/powerpc/kvm/powerpc.c vcpu->arch.shared = new_shared; arch 257 arch/powerpc/kvm/powerpc.c if (!vcpu->arch.pvr) arch 261 arch/powerpc/kvm/powerpc.c if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) arch 265 arch/powerpc/kvm/powerpc.c if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) arch 276 arch/powerpc/kvm/powerpc.c vcpu->arch.sane = r; arch 326 arch/powerpc/kvm/powerpc.c ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; arch 332 arch/powerpc/kvm/powerpc.c if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) arch 333 arch/powerpc/kvm/powerpc.c r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, arch 353 arch/powerpc/kvm/powerpc.c void *magic = vcpu->arch.shared; arch 369 arch/powerpc/kvm/powerpc.c ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; arch 375 arch/powerpc/kvm/powerpc.c if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) arch 376 arch/powerpc/kvm/powerpc.c rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, arch 399 arch/powerpc/kvm/powerpc.c void *magic = vcpu->arch.shared; arch 454 arch/powerpc/kvm/powerpc.c kvm->arch.kvm_ops = kvm_ops; arch 489 arch/powerpc/kvm/powerpc.c module_put(kvm->arch.kvm_ops->owner); arch 577 arch/powerpc/kvm/powerpc.c if (kvm->arch.emul_smt_mode > 1) arch 578 arch/powerpc/kvm/powerpc.c r = kvm->arch.emul_smt_mode; arch 580 arch/powerpc/kvm/powerpc.c r = kvm->arch.smt_mode; arch 724 arch/powerpc/kvm/powerpc.c vcpu->arch.wqp = &vcpu->wq; arch 737 arch/powerpc/kvm/powerpc.c hrtimer_cancel(&vcpu->arch.dec_timer); arch 741 arch/powerpc/kvm/powerpc.c switch (vcpu->arch.irq_type) { arch 743 arch/powerpc/kvm/powerpc.c kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); arch 773 arch/powerpc/kvm/powerpc.c vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); arch 783 arch/powerpc/kvm/powerpc.c hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); arch 784 arch/powerpc/kvm/powerpc.c vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; arch 785 arch/powerpc/kvm/powerpc.c vcpu->arch.dec_expires = get_tb(); arch 788 arch/powerpc/kvm/powerpc.c mutex_init(&vcpu->arch.exit_timing_lock); arch 810 arch/powerpc/kvm/powerpc.c mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); arch 819 arch/powerpc/kvm/powerpc.c vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); arch 842 arch/powerpc/kvm/powerpc.c if (kvm->arch.kvm_ops->irq_bypass_add_producer) arch 843 arch/powerpc/kvm/powerpc.c return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); arch 855 arch/powerpc/kvm/powerpc.c if (kvm->arch.kvm_ops->irq_bypass_del_producer) arch 856 arch/powerpc/kvm/powerpc.c kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); arch 895 arch/powerpc/kvm/powerpc.c int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); arch 896 arch/powerpc/kvm/powerpc.c int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; arch 914 arch/powerpc/kvm/powerpc.c int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; arch 931 arch/powerpc/kvm/powerpc.c int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; arch 951 arch/powerpc/kvm/powerpc.c int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); arch 952 arch/powerpc/kvm/powerpc.c int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; arch 1020 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vmx_offset); arch 1021 arch/powerpc/kvm/powerpc.c int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; arch 1036 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vmx_offset); arch 1037 arch/powerpc/kvm/powerpc.c int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; arch 1052 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vmx_offset); arch 1053 arch/powerpc/kvm/powerpc.c int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; arch 1068 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vmx_offset); arch 1069 arch/powerpc/kvm/powerpc.c int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; arch 1120 arch/powerpc/kvm/powerpc.c if (!vcpu->arch.mmio_host_swabbed) { arch 1137 arch/powerpc/kvm/powerpc.c if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) arch 1140 arch/powerpc/kvm/powerpc.c if (vcpu->arch.mmio_sign_extend) { arch 1156 arch/powerpc/kvm/powerpc.c switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { arch 1158 arch/powerpc/kvm/powerpc.c kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); arch 1161 arch/powerpc/kvm/powerpc.c if (vcpu->kvm->arch.kvm_ops->giveup_ext) arch 1162 arch/powerpc/kvm/powerpc.c vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); arch 1164 arch/powerpc/kvm/powerpc.c VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; arch 1168 arch/powerpc/kvm/powerpc.c vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; arch 1171 arch/powerpc/kvm/powerpc.c VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; arch 1172 arch/powerpc/kvm/powerpc.c vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; arch 1177 arch/powerpc/kvm/powerpc.c if (vcpu->kvm->arch.kvm_ops->giveup_ext) arch 1178 arch/powerpc/kvm/powerpc.c vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); arch 1180 arch/powerpc/kvm/powerpc.c if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) arch 1182 arch/powerpc/kvm/powerpc.c else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) arch 1184 arch/powerpc/kvm/powerpc.c else if (vcpu->arch.mmio_copy_type == arch 1187 arch/powerpc/kvm/powerpc.c else if (vcpu->arch.mmio_copy_type == arch 1194 arch/powerpc/kvm/powerpc.c if (vcpu->kvm->arch.kvm_ops->giveup_ext) arch 1195 arch/powerpc/kvm/powerpc.c vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); arch 1197 arch/powerpc/kvm/powerpc.c if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) arch 1199 arch/powerpc/kvm/powerpc.c else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) arch 1201 arch/powerpc/kvm/powerpc.c else if (vcpu->arch.mmio_copy_type == arch 1204 arch/powerpc/kvm/powerpc.c else if (vcpu->arch.mmio_copy_type == arch 1213 arch/powerpc/kvm/powerpc.c kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, arch 1241 arch/powerpc/kvm/powerpc.c run->mmio.phys_addr = vcpu->arch.paddr_accessed; arch 1245 arch/powerpc/kvm/powerpc.c vcpu->arch.io_gpr = rt; arch 1246 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_host_swabbed = host_swabbed; arch 1249 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_sign_extend = sign_extend; arch 1291 arch/powerpc/kvm/powerpc.c if (vcpu->arch.mmio_vsx_copy_nums > 4) arch 1294 arch/powerpc/kvm/powerpc.c while (vcpu->arch.mmio_vsx_copy_nums) { arch 1301 arch/powerpc/kvm/powerpc.c vcpu->arch.paddr_accessed += run->mmio.len; arch 1303 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vsx_copy_nums--; arch 1304 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vsx_offset++; arch 1329 arch/powerpc/kvm/powerpc.c run->mmio.phys_addr = vcpu->arch.paddr_accessed; arch 1335 arch/powerpc/kvm/powerpc.c if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) arch 1377 arch/powerpc/kvm/powerpc.c int copy_type = vcpu->arch.mmio_copy_type; arch 1383 arch/powerpc/kvm/powerpc.c kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); arch 1400 arch/powerpc/kvm/powerpc.c kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); arch 1432 arch/powerpc/kvm/powerpc.c vcpu->arch.io_gpr = rs; arch 1435 arch/powerpc/kvm/powerpc.c if (vcpu->arch.mmio_vsx_copy_nums > 4) arch 1438 arch/powerpc/kvm/powerpc.c while (vcpu->arch.mmio_vsx_copy_nums) { arch 1448 arch/powerpc/kvm/powerpc.c vcpu->arch.paddr_accessed += run->mmio.len; arch 1450 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vsx_copy_nums--; arch 1451 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vsx_offset++; arch 1463 arch/powerpc/kvm/powerpc.c vcpu->arch.paddr_accessed += run->mmio.len; arch 1466 arch/powerpc/kvm/powerpc.c emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, arch 1467 arch/powerpc/kvm/powerpc.c run->mmio.len, 1, vcpu->arch.mmio_sign_extend); arch 1470 arch/powerpc/kvm/powerpc.c vcpu->arch.io_gpr, run->mmio.len, 1); arch 1498 arch/powerpc/kvm/powerpc.c if (vcpu->arch.mmio_vsx_copy_nums > 2) arch 1501 arch/powerpc/kvm/powerpc.c while (vcpu->arch.mmio_vmx_copy_nums) { arch 1508 arch/powerpc/kvm/powerpc.c vcpu->arch.paddr_accessed += run->mmio.len; arch 1509 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vmx_copy_nums--; arch 1510 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vmx_offset++; arch 1523 arch/powerpc/kvm/powerpc.c kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); arch 1541 arch/powerpc/kvm/powerpc.c kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); arch 1559 arch/powerpc/kvm/powerpc.c kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); arch 1577 arch/powerpc/kvm/powerpc.c kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); arch 1595 arch/powerpc/kvm/powerpc.c if (vcpu->arch.mmio_vsx_copy_nums > 2) arch 1598 arch/powerpc/kvm/powerpc.c vcpu->arch.io_gpr = rs; arch 1600 arch/powerpc/kvm/powerpc.c while (vcpu->arch.mmio_vmx_copy_nums) { arch 1601 arch/powerpc/kvm/powerpc.c switch (vcpu->arch.mmio_copy_type) { arch 1628 arch/powerpc/kvm/powerpc.c vcpu->arch.paddr_accessed += run->mmio.len; arch 1629 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vmx_copy_nums--; arch 1630 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vmx_offset++; arch 1642 arch/powerpc/kvm/powerpc.c vcpu->arch.paddr_accessed += run->mmio.len; arch 1646 arch/powerpc/kvm/powerpc.c vcpu->arch.io_gpr, run->mmio.len, 1); arch 1649 arch/powerpc/kvm/powerpc.c vcpu->arch.io_gpr, run->mmio.len, 1); arch 1691 arch/powerpc/kvm/powerpc.c val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; arch 1698 arch/powerpc/kvm/powerpc.c val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); arch 1701 arch/powerpc/kvm/powerpc.c val = get_reg_val(reg->id, vcpu->arch.vrsave); arch 1742 arch/powerpc/kvm/powerpc.c vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; arch 1749 arch/powerpc/kvm/powerpc.c vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); arch 1756 arch/powerpc/kvm/powerpc.c vcpu->arch.vrsave = set_reg_val(reg->id, val); arch 1779 arch/powerpc/kvm/powerpc.c if (vcpu->arch.mmio_vsx_copy_nums > 0) { arch 1780 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vsx_copy_nums--; arch 1781 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vsx_offset++; arch 1784 arch/powerpc/kvm/powerpc.c if (vcpu->arch.mmio_vsx_copy_nums > 0) { arch 1793 arch/powerpc/kvm/powerpc.c if (vcpu->arch.mmio_vmx_copy_nums > 0) { arch 1794 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vmx_copy_nums--; arch 1795 arch/powerpc/kvm/powerpc.c vcpu->arch.mmio_vmx_offset++; arch 1798 arch/powerpc/kvm/powerpc.c if (vcpu->arch.mmio_vmx_copy_nums > 0) { arch 1806 arch/powerpc/kvm/powerpc.c } else if (vcpu->arch.osi_needed) { arch 1812 arch/powerpc/kvm/powerpc.c vcpu->arch.osi_needed = 0; arch 1813 arch/powerpc/kvm/powerpc.c } else if (vcpu->arch.hcall_needed) { arch 1819 arch/powerpc/kvm/powerpc.c vcpu->arch.hcall_needed = 0; arch 1821 arch/powerpc/kvm/powerpc.c } else if (vcpu->arch.epr_needed) { arch 1823 arch/powerpc/kvm/powerpc.c vcpu->arch.epr_needed = 0; arch 1868 arch/powerpc/kvm/powerpc.c vcpu->arch.osi_enabled = true; arch 1872 arch/powerpc/kvm/powerpc.c vcpu->arch.papr_enabled = true; arch 1877 arch/powerpc/kvm/powerpc.c vcpu->arch.epr_flags |= KVMPPC_EPR_USER; arch 1879 arch/powerpc/kvm/powerpc.c vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; arch 1884 arch/powerpc/kvm/powerpc.c vcpu->arch.watchdog_enabled = true; arch 1972 arch/powerpc/kvm/powerpc.c vcpu->kvm->arch.fwnmi_enabled = true; arch 1989 arch/powerpc/kvm/powerpc.c if (kvm->arch.mpic) arch 1993 arch/powerpc/kvm/powerpc.c if (kvm->arch.xics || kvm->arch.xive) arch 2153 arch/powerpc/kvm/powerpc.c set_bit(hcall / 4, kvm->arch.enabled_hcalls); arch 2155 arch/powerpc/kvm/powerpc.c clear_bit(hcall / 4, kvm->arch.enabled_hcalls); arch 2164 arch/powerpc/kvm/powerpc.c if (kvm->arch.kvm_ops->set_smt_mode) arch 2165 arch/powerpc/kvm/powerpc.c r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); arch 2172 arch/powerpc/kvm/powerpc.c !kvm->arch.kvm_ops->enable_nested) arch 2174 arch/powerpc/kvm/powerpc.c r = kvm->arch.kvm_ops->enable_nested(kvm); arch 2370 arch/powerpc/kvm/powerpc.c r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); arch 2386 arch/powerpc/kvm/powerpc.c if (!kvm->arch.kvm_ops->configure_mmu) arch 2391 arch/powerpc/kvm/powerpc.c r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); arch 2399 arch/powerpc/kvm/powerpc.c if (!kvm->arch.kvm_ops->get_rmmu_info) arch 2401 arch/powerpc/kvm/powerpc.c r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); arch 2416 arch/powerpc/kvm/powerpc.c r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); arch 27 arch/powerpc/kvm/timing.c mutex_lock(&vcpu->arch.exit_timing_lock); arch 29 arch/powerpc/kvm/timing.c vcpu->arch.last_exit_type = 0xDEAD; arch 31 arch/powerpc/kvm/timing.c vcpu->arch.timing_count_type[i] = 0; arch 32 arch/powerpc/kvm/timing.c vcpu->arch.timing_max_duration[i] = 0; arch 33 arch/powerpc/kvm/timing.c vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; arch 34 arch/powerpc/kvm/timing.c vcpu->arch.timing_sum_duration[i] = 0; arch 35 arch/powerpc/kvm/timing.c vcpu->arch.timing_sum_quad_duration[i] = 0; arch 37 arch/powerpc/kvm/timing.c vcpu->arch.timing_last_exit = 0; arch 38 arch/powerpc/kvm/timing.c vcpu->arch.timing_exit.tv64 = 0; arch 39 arch/powerpc/kvm/timing.c vcpu->arch.timing_last_enter.tv64 = 0; arch 41 arch/powerpc/kvm/timing.c mutex_unlock(&vcpu->arch.exit_timing_lock); arch 48 arch/powerpc/kvm/timing.c mutex_lock(&vcpu->arch.exit_timing_lock); arch 50 arch/powerpc/kvm/timing.c vcpu->arch.timing_count_type[type]++; arch 53 arch/powerpc/kvm/timing.c old = vcpu->arch.timing_sum_duration[type]; arch 54 arch/powerpc/kvm/timing.c vcpu->arch.timing_sum_duration[type] += duration; arch 55 arch/powerpc/kvm/timing.c if (unlikely(old > vcpu->arch.timing_sum_duration[type])) { arch 58 arch/powerpc/kvm/timing.c __func__, old, vcpu->arch.timing_sum_duration[type], arch 59 arch/powerpc/kvm/timing.c type, vcpu->arch.timing_count_type[type]); arch 63 arch/powerpc/kvm/timing.c old = vcpu->arch.timing_sum_quad_duration[type]; arch 64 arch/powerpc/kvm/timing.c vcpu->arch.timing_sum_quad_duration[type] += (duration*duration); arch 65 arch/powerpc/kvm/timing.c if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) { arch 69 arch/powerpc/kvm/timing.c vcpu->arch.timing_sum_quad_duration[type], arch 70 arch/powerpc/kvm/timing.c type, vcpu->arch.timing_count_type[type]); arch 74 arch/powerpc/kvm/timing.c if (unlikely(duration < vcpu->arch.timing_min_duration[type])) arch 75 arch/powerpc/kvm/timing.c vcpu->arch.timing_min_duration[type] = duration; arch 76 arch/powerpc/kvm/timing.c if (unlikely(duration > vcpu->arch.timing_max_duration[type])) arch 77 arch/powerpc/kvm/timing.c vcpu->arch.timing_max_duration[type] = duration; arch 79 arch/powerpc/kvm/timing.c mutex_unlock(&vcpu->arch.exit_timing_lock); arch 84 arch/powerpc/kvm/timing.c u64 exit = vcpu->arch.timing_last_exit; arch 85 arch/powerpc/kvm/timing.c u64 enter = vcpu->arch.timing_last_enter.tv64; arch 88 arch/powerpc/kvm/timing.c vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64; arch 90 arch/powerpc/kvm/timing.c if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0)) arch 94 arch/powerpc/kvm/timing.c add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type); arch 96 arch/powerpc/kvm/timing.c add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter), arch 139 arch/powerpc/kvm/timing.c min = vcpu->arch.timing_min_duration[i]; arch 141 arch/powerpc/kvm/timing.c max = vcpu->arch.timing_max_duration[i]; arch 143 arch/powerpc/kvm/timing.c sum = vcpu->arch.timing_sum_duration[i]; arch 145 arch/powerpc/kvm/timing.c sum_quad = vcpu->arch.timing_sum_quad_duration[i]; arch 150 arch/powerpc/kvm/timing.c vcpu->arch.timing_count_type[i], arch 224 arch/powerpc/kvm/timing.c vcpu->arch.debugfs_exit_timing = debugfs_file; arch 229 arch/powerpc/kvm/timing.c if (vcpu->arch.debugfs_exit_timing) { arch 230 arch/powerpc/kvm/timing.c debugfs_remove(vcpu->arch.debugfs_exit_timing); arch 231 arch/powerpc/kvm/timing.c vcpu->arch.debugfs_exit_timing = NULL; arch 23 arch/powerpc/kvm/timing.h vcpu->arch.last_exit_type = type; arch 54 arch/powerpc/kvm/trace_booke.h __entry->msr = vcpu->arch.shared->msr; arch 55 arch/powerpc/kvm/trace_booke.h __entry->last_inst = vcpu->arch.last_inst; arch 207 arch/powerpc/kvm/trace_booke.h __entry->pending = vcpu->arch.pending_exceptions; arch 235 arch/powerpc/kvm/trace_hv.h __entry->ceded = vcpu->arch.ceded; arch 236 arch/powerpc/kvm/trace_hv.h __entry->pending_exceptions = vcpu->arch.pending_exceptions; arch 259 arch/powerpc/kvm/trace_hv.h __entry->trap = vcpu->arch.trap; arch 260 arch/powerpc/kvm/trace_hv.h __entry->ceded = vcpu->arch.ceded; arch 262 arch/powerpc/kvm/trace_hv.h __entry->msr = vcpu->arch.shregs.msr; arch 488 arch/powerpc/kvm/trace_hv.h __entry->ret = vcpu->arch.ret; arch 235 arch/powerpc/kvm/trace_pr.h __entry->srr1 = vcpu->arch.shadow_srr1; arch 236 arch/powerpc/kvm/trace_pr.h __entry->last_inst = vcpu->arch.last_inst; arch 111 arch/powerpc/platforms/powernv/vas-trace.h #define TRACE_INCLUDE_PATH ../../arch/powerpc/platforms/powernv arch 15 arch/riscv/kernel/module-sections.c struct mod_section *got_sec = &mod->arch.got; arch 34 arch/riscv/kernel/module-sections.c struct mod_section *got_plt_sec = &mod->arch.got_plt; arch 36 arch/riscv/kernel/module-sections.c struct mod_section *plt_sec = &mod->arch.plt; arch 102 arch/riscv/kernel/module-sections.c mod->arch.plt.shdr = sechdrs + i; arch 104 arch/riscv/kernel/module-sections.c mod->arch.got.shdr = sechdrs + i; arch 106 arch/riscv/kernel/module-sections.c mod->arch.got_plt.shdr = sechdrs + i; arch 109 arch/riscv/kernel/module-sections.c if (!mod->arch.plt.shdr) { arch 113 arch/riscv/kernel/module-sections.c if (!mod->arch.got.shdr) { arch 117 arch/riscv/kernel/module-sections.c if (!mod->arch.got_plt.shdr) { arch 138 arch/riscv/kernel/module-sections.c mod->arch.plt.shdr->sh_type = SHT_NOBITS; arch 139 arch/riscv/kernel/module-sections.c mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC; arch 140 arch/riscv/kernel/module-sections.c mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES; arch 141 arch/riscv/kernel/module-sections.c mod->arch.plt.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_entry); arch 142 arch/riscv/kernel/module-sections.c mod->arch.plt.num_entries = 0; arch 143 arch/riscv/kernel/module-sections.c mod->arch.plt.max_entries = num_plts; arch 145 arch/riscv/kernel/module-sections.c mod->arch.got.shdr->sh_type = SHT_NOBITS; arch 146 arch/riscv/kernel/module-sections.c mod->arch.got.shdr->sh_flags = SHF_ALLOC; arch 147 arch/riscv/kernel/module-sections.c mod->arch.got.shdr->sh_addralign = L1_CACHE_BYTES; arch 148 arch/riscv/kernel/module-sections.c mod->arch.got.shdr->sh_size = (num_gots + 1) * sizeof(struct got_entry); arch 149 arch/riscv/kernel/module-sections.c mod->arch.got.num_entries = 0; arch 150 arch/riscv/kernel/module-sections.c mod->arch.got.max_entries = num_gots; arch 152 arch/riscv/kernel/module-sections.c mod->arch.got_plt.shdr->sh_type = SHT_NOBITS; arch 153 arch/riscv/kernel/module-sections.c mod->arch.got_plt.shdr->sh_flags = SHF_ALLOC; arch 154 arch/riscv/kernel/module-sections.c mod->arch.got_plt.shdr->sh_addralign = L1_CACHE_BYTES; arch 155 arch/riscv/kernel/module-sections.c mod->arch.got_plt.shdr->sh_size = (num_plts + 1) * sizeof(struct got_entry); arch 156 arch/riscv/kernel/module-sections.c mod->arch.got_plt.num_entries = 0; arch 157 arch/riscv/kernel/module-sections.c mod->arch.got_plt.max_entries = num_plts; arch 33 arch/s390/kernel/audit.c int audit_classify_arch(int arch) arch 36 arch/s390/kernel/audit.c if (arch == AUDIT_ARCH_S390) arch 56 arch/s390/kernel/module.c vfree(mod->arch.syminfo); arch 57 arch/s390/kernel/module.c mod->arch.syminfo = NULL; arch 64 arch/s390/kernel/module.c info = me->arch.syminfo + ELF_R_SYM (rela->r_info); arch 79 arch/s390/kernel/module.c info->got_offset = me->arch.got_size; arch 80 arch/s390/kernel/module.c me->arch.got_size += sizeof(void*); arch 91 arch/s390/kernel/module.c info->plt_offset = me->arch.plt_size; arch 92 arch/s390/kernel/module.c me->arch.plt_size += PLT_ENTRY_SIZE; arch 132 arch/s390/kernel/module.c me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym); arch 133 arch/s390/kernel/module.c me->arch.syminfo = vmalloc(array_size(sizeof(struct mod_arch_syminfo), arch 134 arch/s390/kernel/module.c me->arch.nsyms)); arch 135 arch/s390/kernel/module.c if (!me->arch.syminfo) arch 139 arch/s390/kernel/module.c for (i = 0; i < me->arch.nsyms; i++) { arch 145 arch/s390/kernel/module.c me->arch.syminfo[i].got_offset = -1UL; arch 146 arch/s390/kernel/module.c me->arch.syminfo[i].plt_offset = -1UL; arch 147 arch/s390/kernel/module.c me->arch.syminfo[i].got_initialized = 0; arch 148 arch/s390/kernel/module.c me->arch.syminfo[i].plt_initialized = 0; arch 152 arch/s390/kernel/module.c me->arch.got_size = me->arch.plt_size = 0; arch 165 arch/s390/kernel/module.c me->arch.got_offset = me->core_layout.size; arch 166 arch/s390/kernel/module.c me->core_layout.size += me->arch.got_size; arch 167 arch/s390/kernel/module.c me->arch.plt_offset = me->core_layout.size; arch 168 arch/s390/kernel/module.c if (me->arch.plt_size) { arch 170 arch/s390/kernel/module.c me->arch.plt_size += PLT_ENTRY_SIZE; arch 171 arch/s390/kernel/module.c me->core_layout.size += me->arch.plt_size; arch 229 arch/s390/kernel/module.c info = me->arch.syminfo + r_sym; arch 288 arch/s390/kernel/module.c gotent = me->core_layout.base + me->arch.got_offset + arch 324 arch/s390/kernel/module.c ip = me->core_layout.base + me->arch.plt_offset + arch 331 arch/s390/kernel/module.c me->arch.plt_offset + arch 332 arch/s390/kernel/module.c me->arch.plt_size - PLT_ENTRY_SIZE; arch 347 arch/s390/kernel/module.c val = me->arch.plt_offset - me->arch.got_offset + arch 355 arch/s390/kernel/module.c me->arch.plt_offset + arch 376 arch/s390/kernel/module.c ((Elf_Addr) me->core_layout.base + me->arch.got_offset); arch 386 arch/s390/kernel/module.c val = (Elf_Addr) me->core_layout.base + me->arch.got_offset + arch 449 arch/s390/kernel/module.c !nospec_disable && me->arch.plt_size) { arch 452 arch/s390/kernel/module.c ij = me->core_layout.base + me->arch.plt_offset + arch 453 arch/s390/kernel/module.c me->arch.plt_size - PLT_ENTRY_SIZE; arch 26 arch/s390/kvm/diag.c start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; arch 27 arch/s390/kvm/diag.c end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE; arch 41 arch/s390/kvm/diag.c gmap_discard(vcpu->arch.gmap, start, end); arch 49 arch/s390/kvm/diag.c gmap_discard(vcpu->arch.gmap, start, prefix); arch 51 arch/s390/kvm/diag.c gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE); arch 53 arch/s390/kvm/diag.c gmap_discard(vcpu->arch.gmap, PAGE_SIZE, 2 * PAGE_SIZE); arch 54 arch/s390/kvm/diag.c gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end); arch 73 arch/s390/kvm/diag.c u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4; arch 74 arch/s390/kvm/diag.c u16 ry = (vcpu->arch.sie_block->ipa & 0x0f); arch 92 arch/s390/kvm/diag.c if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) { arch 109 arch/s390/kvm/diag.c vcpu->arch.pfault_token = parm.token_addr; arch 110 arch/s390/kvm/diag.c vcpu->arch.pfault_select = parm.select_mask; arch 111 arch/s390/kvm/diag.c vcpu->arch.pfault_compare = parm.compare_mask; arch 131 arch/s390/kvm/diag.c if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) arch 134 arch/s390/kvm/diag.c vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; arch 159 arch/s390/kvm/diag.c tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; arch 174 arch/s390/kvm/diag.c unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; arch 208 arch/s390/kvm/diag.c if (!vcpu->kvm->arch.css_support || arch 242 arch/s390/kvm/diag.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 266 arch/s390/kvm/gaccess.c if (vcpu->arch.sie_block->eca & ECA_SII) { arch 269 arch/s390/kvm/gaccess.c read_lock(&vcpu->kvm->arch.sca_lock); arch 271 arch/s390/kvm/gaccess.c read_unlock(&vcpu->kvm->arch.sca_lock); arch 274 arch/s390/kvm/gaccess.c return vcpu->kvm->arch.ipte_lock_count != 0; arch 281 arch/s390/kvm/gaccess.c mutex_lock(&vcpu->kvm->arch.ipte_mutex); arch 282 arch/s390/kvm/gaccess.c vcpu->kvm->arch.ipte_lock_count++; arch 283 arch/s390/kvm/gaccess.c if (vcpu->kvm->arch.ipte_lock_count > 1) arch 286 arch/s390/kvm/gaccess.c read_lock(&vcpu->kvm->arch.sca_lock); arch 291 arch/s390/kvm/gaccess.c read_unlock(&vcpu->kvm->arch.sca_lock); arch 298 arch/s390/kvm/gaccess.c read_unlock(&vcpu->kvm->arch.sca_lock); arch 300 arch/s390/kvm/gaccess.c mutex_unlock(&vcpu->kvm->arch.ipte_mutex); arch 307 arch/s390/kvm/gaccess.c mutex_lock(&vcpu->kvm->arch.ipte_mutex); arch 308 arch/s390/kvm/gaccess.c vcpu->kvm->arch.ipte_lock_count--; arch 309 arch/s390/kvm/gaccess.c if (vcpu->kvm->arch.ipte_lock_count) arch 311 arch/s390/kvm/gaccess.c read_lock(&vcpu->kvm->arch.sca_lock); arch 318 arch/s390/kvm/gaccess.c read_unlock(&vcpu->kvm->arch.sca_lock); arch 319 arch/s390/kvm/gaccess.c wake_up(&vcpu->kvm->arch.ipte_wq); arch 321 arch/s390/kvm/gaccess.c mutex_unlock(&vcpu->kvm->arch.ipte_mutex); arch 329 arch/s390/kvm/gaccess.c read_lock(&vcpu->kvm->arch.sca_lock); arch 334 arch/s390/kvm/gaccess.c read_unlock(&vcpu->kvm->arch.sca_lock); arch 342 arch/s390/kvm/gaccess.c read_unlock(&vcpu->kvm->arch.sca_lock); arch 349 arch/s390/kvm/gaccess.c read_lock(&vcpu->kvm->arch.sca_lock); arch 358 arch/s390/kvm/gaccess.c read_unlock(&vcpu->kvm->arch.sca_lock); arch 360 arch/s390/kvm/gaccess.c wake_up(&vcpu->kvm->arch.ipte_wq); arch 365 arch/s390/kvm/gaccess.c if (vcpu->arch.sie_block->eca & ECA_SII) arch 373 arch/s390/kvm/gaccess.c if (vcpu->arch.sie_block->eca & ECA_SII) arch 397 arch/s390/kvm/gaccess.c asce->val = vcpu->arch.sie_block->gcr[1]; arch 400 arch/s390/kvm/gaccess.c asce->val = vcpu->arch.sie_block->gcr[7]; arch 408 arch/s390/kvm/gaccess.c ald_addr = vcpu->arch.sie_block->gcr[5]; arch 410 arch/s390/kvm/gaccess.c ald_addr = vcpu->arch.sie_block->gcr[2]; arch 443 arch/s390/kvm/gaccess.c eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff; arch 496 arch/s390/kvm/gaccess.c struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; arch 536 arch/s390/kvm/gaccess.c tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as; arch 558 arch/s390/kvm/gaccess.c struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw); arch 571 arch/s390/kvm/gaccess.c asce->val = vcpu->arch.sie_block->gcr[1]; arch 574 arch/s390/kvm/gaccess.c asce->val = vcpu->arch.sie_block->gcr[7]; arch 577 arch/s390/kvm/gaccess.c asce->val = vcpu->arch.sie_block->gcr[13]; arch 627 arch/s390/kvm/gaccess.c ctlreg0.val = vcpu->arch.sie_block->gcr[0]; arch 787 arch/s390/kvm/gaccess.c union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; arch 788 arch/s390/kvm/gaccess.c psw_t *psw = &vcpu->arch.sie_block->gpsw; arch 801 arch/s390/kvm/gaccess.c psw_t *psw = &vcpu->arch.sie_block->gpsw; arch 833 arch/s390/kvm/gaccess.c psw_t *psw = &vcpu->arch.sie_block->gpsw; arch 907 arch/s390/kvm/gaccess.c psw_t *psw = &vcpu->arch.sie_block->gpsw; arch 968 arch/s390/kvm/gaccess.c union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; arch 55 arch/s390/kvm/gaccess.h psw_t *psw = &vcpu->arch.sie_block->gpsw; arch 62 arch/s390/kvm/guestdbg.c u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; arch 63 arch/s390/kvm/guestdbg.c u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; arch 64 arch/s390/kvm/guestdbg.c u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; arch 67 arch/s390/kvm/guestdbg.c if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || arch 68 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.hw_bp_info == NULL) arch 79 arch/s390/kvm/guestdbg.c for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { arch 80 arch/s390/kvm/guestdbg.c start = vcpu->arch.guestdbg.hw_bp_info[i].addr; arch 81 arch/s390/kvm/guestdbg.c len = vcpu->arch.guestdbg.hw_bp_info[i].len; arch 102 arch/s390/kvm/guestdbg.c u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; arch 103 arch/s390/kvm/guestdbg.c u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; arch 104 arch/s390/kvm/guestdbg.c u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; arch 107 arch/s390/kvm/guestdbg.c if (vcpu->arch.guestdbg.nr_hw_wp <= 0 || arch 108 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.hw_wp_info == NULL) arch 121 arch/s390/kvm/guestdbg.c for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { arch 122 arch/s390/kvm/guestdbg.c start = vcpu->arch.guestdbg.hw_wp_info[i].addr; arch 123 arch/s390/kvm/guestdbg.c len = vcpu->arch.guestdbg.hw_wp_info[i].len; arch 132 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0]; arch 133 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9]; arch 134 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10]; arch 135 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11]; arch 140 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0; arch 141 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9; arch 142 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10; arch 143 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11; arch 156 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->gcr[0] &= ~CR0_CLOCK_COMPARATOR_SUBMASK; arch 157 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH; arch 158 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->gcr[10] = 0; arch 159 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->gcr[11] = -1UL; arch 168 arch/s390/kvm/guestdbg.c if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION) arch 169 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION; arch 211 arch/s390/kvm/guestdbg.c if (dbg->arch.nr_hw_bp <= 0 || !dbg->arch.hw_bp) arch 213 arch/s390/kvm/guestdbg.c else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT) arch 216 arch/s390/kvm/guestdbg.c bp_data = memdup_user(dbg->arch.hw_bp, arch 217 arch/s390/kvm/guestdbg.c sizeof(*bp_data) * dbg->arch.nr_hw_bp); arch 221 arch/s390/kvm/guestdbg.c for (i = 0; i < dbg->arch.nr_hw_bp; i++) { arch 253 arch/s390/kvm/guestdbg.c for (nr_wp = 0, nr_bp = 0, i = 0; i < dbg->arch.nr_hw_bp; i++) { arch 270 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.nr_hw_bp = nr_bp; arch 271 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.hw_bp_info = bp_info; arch 272 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.nr_hw_wp = nr_wp; arch 273 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.hw_wp_info = wp_info; arch 287 arch/s390/kvm/guestdbg.c for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { arch 288 arch/s390/kvm/guestdbg.c hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i]; arch 292 arch/s390/kvm/guestdbg.c kfree(vcpu->arch.guestdbg.hw_wp_info); arch 293 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.hw_wp_info = NULL; arch 295 arch/s390/kvm/guestdbg.c kfree(vcpu->arch.guestdbg.hw_bp_info); arch 296 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.hw_bp_info = NULL; arch 298 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.nr_hw_wp = 0; arch 299 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.nr_hw_bp = 0; arch 316 arch/s390/kvm/guestdbg.c struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info; arch 319 arch/s390/kvm/guestdbg.c if (vcpu->arch.guestdbg.nr_hw_bp == 0) arch 322 arch/s390/kvm/guestdbg.c for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { arch 344 arch/s390/kvm/guestdbg.c if (vcpu->arch.guestdbg.nr_hw_wp == 0) arch 347 arch/s390/kvm/guestdbg.c for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { arch 348 arch/s390/kvm/guestdbg.c wp_info = &vcpu->arch.guestdbg.hw_wp_info[i]; arch 391 arch/s390/kvm/guestdbg.c struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch; arch 394 arch/s390/kvm/guestdbg.c unsigned long addr = vcpu->arch.sie_block->gpsw.addr; arch 398 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.nr_hw_wp > 0) { arch 407 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.nr_hw_bp > 0) { arch 413 arch/s390/kvm/guestdbg.c vcpu->arch.guestdbg.last_bp = addr; arch 418 arch/s390/kvm/guestdbg.c if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) { arch 442 arch/s390/kvm/guestdbg.c if (vcpu->arch.sie_block->icptcode == ICPT_PROGI) { arch 444 arch/s390/kvm/guestdbg.c *addr = vcpu->arch.sie_block->peraddr; arch 459 arch/s390/kvm/guestdbg.c *addr = __rewind_psw(vcpu->arch.sie_block->gpsw, arch 461 arch/s390/kvm/guestdbg.c if (vcpu->arch.sie_block->icptstatus & 0x01) { arch 462 arch/s390/kvm/guestdbg.c exec_ilen = (vcpu->arch.sie_block->icptstatus & 0x60) >> 4; arch 495 arch/s390/kvm/guestdbg.c (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) arch 499 arch/s390/kvm/guestdbg.c const u64 cr10 = vcpu->arch.sie_block->gcr[10]; arch 500 arch/s390/kvm/guestdbg.c const u64 cr11 = vcpu->arch.sie_block->gcr[11]; arch 505 arch/s390/kvm/guestdbg.c .per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen), arch 522 arch/s390/kvm/guestdbg.c !(vcpu->arch.sie_block->gcr[9] & PER_EVENT_IFETCH)) arch 539 arch/s390/kvm/guestdbg.c const u8 perc = vcpu->arch.sie_block->perc; arch 540 arch/s390/kvm/guestdbg.c u64 addr = vcpu->arch.sie_block->gpsw.addr; arch 541 arch/s390/kvm/guestdbg.c u64 cr9 = vcpu->arch.sie_block->gcr[9]; arch 542 arch/s390/kvm/guestdbg.c u64 cr10 = vcpu->arch.sie_block->gcr[10]; arch 543 arch/s390/kvm/guestdbg.c u64 cr11 = vcpu->arch.sie_block->gcr[11]; arch 574 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->perc = guest_perc; arch 577 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->iprcc &= ~PGM_PER; arch 581 arch/s390/kvm/guestdbg.c #define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH) arch 582 arch/s390/kvm/guestdbg.c #define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH) arch 583 arch/s390/kvm/guestdbg.c #define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1) arch 584 arch/s390/kvm/guestdbg.c #define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff) arch 590 arch/s390/kvm/guestdbg.c if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc, arch 591 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->peraddr)) arch 604 arch/s390/kvm/guestdbg.c if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) { arch 605 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->iprcc = 0; arch 606 arch/s390/kvm/guestdbg.c new_as = psw_bits(vcpu->arch.sie_block->gpsw).as; arch 615 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; arch 623 arch/s390/kvm/guestdbg.c vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; arch 27 arch/s390/kvm/intercept.c struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; arch 30 arch/s390/kvm/intercept.c switch (vcpu->arch.sie_block->icptcode) { arch 37 arch/s390/kvm/intercept.c ilen = insn_length(vcpu->arch.sie_block->ipa >> 8); arch 47 arch/s390/kvm/intercept.c ilen = vcpu->arch.sie_block->pgmilc & 0x6; arch 55 arch/s390/kvm/intercept.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 89 arch/s390/kvm/intercept.c int viwhy = vcpu->arch.sie_block->ipb >> 16; arch 106 arch/s390/kvm/intercept.c vcpu->arch.sie_block->ipa, arch 107 arch/s390/kvm/intercept.c vcpu->arch.sie_block->ipb); arch 109 arch/s390/kvm/intercept.c switch (vcpu->arch.sie_block->ipa >> 8) { arch 142 arch/s390/kvm/intercept.c .code = vcpu->arch.sie_block->iprcc, arch 147 arch/s390/kvm/intercept.c switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) { arch 158 arch/s390/kvm/intercept.c pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc; arch 166 arch/s390/kvm/intercept.c pgm_info.exc_access_id = vcpu->arch.sie_block->eai; arch 174 arch/s390/kvm/intercept.c pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc; arch 175 arch/s390/kvm/intercept.c pgm_info.exc_access_id = vcpu->arch.sie_block->eai; arch 176 arch/s390/kvm/intercept.c pgm_info.op_access_id = vcpu->arch.sie_block->oai; arch 179 arch/s390/kvm/intercept.c pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn; arch 180 arch/s390/kvm/intercept.c pgm_info.mon_code = vcpu->arch.sie_block->tecmc; arch 184 arch/s390/kvm/intercept.c pgm_info.data_exc_code = vcpu->arch.sie_block->dxc; arch 187 arch/s390/kvm/intercept.c pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc; arch 188 arch/s390/kvm/intercept.c pgm_info.exc_access_id = vcpu->arch.sie_block->eai; arch 194 arch/s390/kvm/intercept.c if (vcpu->arch.sie_block->iprcc & PGM_PER) { arch 195 arch/s390/kvm/intercept.c pgm_info.per_code = vcpu->arch.sie_block->perc; arch 196 arch/s390/kvm/intercept.c pgm_info.per_atmid = vcpu->arch.sie_block->peratmid; arch 197 arch/s390/kvm/intercept.c pgm_info.per_address = vcpu->arch.sie_block->peraddr; arch 198 arch/s390/kvm/intercept.c pgm_info.per_access_id = vcpu->arch.sie_block->peraid; arch 216 arch/s390/kvm/intercept.c itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba; arch 225 arch/s390/kvm/intercept.c #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER) arch 239 arch/s390/kvm/intercept.c if (vcpu->arch.sie_block->iprcc == 0) arch 243 arch/s390/kvm/intercept.c trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); arch 244 arch/s390/kvm/intercept.c if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) { arch 269 arch/s390/kvm/intercept.c u16 eic = vcpu->arch.sie_block->eic; arch 293 arch/s390/kvm/intercept.c irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr; arch 348 arch/s390/kvm/intercept.c if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */ arch 350 arch/s390/kvm/intercept.c if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */ arch 417 arch/s390/kvm/intercept.c trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa, arch 418 arch/s390/kvm/intercept.c vcpu->arch.sie_block->ipb); arch 420 arch/s390/kvm/intercept.c if (vcpu->arch.sie_block->ipa == 0xb256) arch 423 arch/s390/kvm/intercept.c if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0) arch 436 arch/s390/kvm/intercept.c oldpsw = vcpu->arch.sie_block->gpsw; arch 454 arch/s390/kvm/intercept.c switch (vcpu->arch.sie_block->icptcode) { arch 488 arch/s390/kvm/intercept.c if (vcpu->arch.sie_block->icptstatus & 0x02 && arch 50 arch/s390/kvm/interrupt.c read_lock(&vcpu->kvm->arch.sca_lock); arch 51 arch/s390/kvm/interrupt.c if (vcpu->kvm->arch.use_esca) { arch 52 arch/s390/kvm/interrupt.c struct esca_block *sca = vcpu->kvm->arch.sca; arch 59 arch/s390/kvm/interrupt.c struct bsca_block *sca = vcpu->kvm->arch.sca; arch 66 arch/s390/kvm/interrupt.c read_unlock(&vcpu->kvm->arch.sca_lock); arch 79 arch/s390/kvm/interrupt.c read_lock(&vcpu->kvm->arch.sca_lock); arch 80 arch/s390/kvm/interrupt.c if (vcpu->kvm->arch.use_esca) { arch 81 arch/s390/kvm/interrupt.c struct esca_block *sca = vcpu->kvm->arch.sca; arch 93 arch/s390/kvm/interrupt.c struct bsca_block *sca = vcpu->kvm->arch.sca; arch 105 arch/s390/kvm/interrupt.c read_unlock(&vcpu->kvm->arch.sca_lock); arch 122 arch/s390/kvm/interrupt.c read_lock(&vcpu->kvm->arch.sca_lock); arch 123 arch/s390/kvm/interrupt.c if (vcpu->kvm->arch.use_esca) { arch 124 arch/s390/kvm/interrupt.c struct esca_block *sca = vcpu->kvm->arch.sca; arch 132 arch/s390/kvm/interrupt.c struct bsca_block *sca = vcpu->kvm->arch.sca; arch 140 arch/s390/kvm/interrupt.c read_unlock(&vcpu->kvm->arch.sca_lock); arch 146 arch/s390/kvm/interrupt.c return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); arch 151 arch/s390/kvm/interrupt.c return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); arch 156 arch/s390/kvm/interrupt.c return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); arch 169 arch/s390/kvm/interrupt.c !(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK)) arch 180 arch/s390/kvm/interrupt.c const u64 ckc = vcpu->arch.sie_block->ckc; arch 182 arch/s390/kvm/interrupt.c if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) { arch 194 arch/s390/kvm/interrupt.c (vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK); arch 327 arch/s390/kvm/interrupt.c return vcpu->kvm->arch.float_int.pending_irqs | arch 328 arch/s390/kvm/interrupt.c vcpu->arch.local_int.pending_irqs; arch 333 arch/s390/kvm/interrupt.c struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int; arch 358 arch/s390/kvm/interrupt.c if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i))) arch 378 arch/s390/kvm/interrupt.c if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK)) arch 380 arch/s390/kvm/interrupt.c if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK)) arch 382 arch/s390/kvm/interrupt.c if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK)) arch 384 arch/s390/kvm/interrupt.c if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK)) arch 386 arch/s390/kvm/interrupt.c if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) arch 394 arch/s390/kvm/interrupt.c if (!(vcpu->arch.sie_block->gcr[14] & arch 395 arch/s390/kvm/interrupt.c (vcpu->kvm->arch.float_int.mchk.cr14 | arch 396 arch/s390/kvm/interrupt.c vcpu->arch.local_int.irq.mchk.cr14))) arch 411 arch/s390/kvm/interrupt.c set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask); arch 417 arch/s390/kvm/interrupt.c clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask); arch 424 arch/s390/kvm/interrupt.c vcpu->arch.sie_block->lctl = 0x0000; arch 425 arch/s390/kvm/interrupt.c vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); arch 428 arch/s390/kvm/interrupt.c vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 | arch 430 arch/s390/kvm/interrupt.c vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); arch 441 arch/s390/kvm/interrupt.c vcpu->arch.sie_block->lctl |= LCTL_CR6; arch 451 arch/s390/kvm/interrupt.c vcpu->arch.sie_block->lctl |= LCTL_CR0; arch 459 arch/s390/kvm/interrupt.c vcpu->arch.sie_block->ictl |= ICTL_LPSW; arch 461 arch/s390/kvm/interrupt.c vcpu->arch.sie_block->lctl |= LCTL_CR14; arch 481 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 492 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 494 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 501 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 512 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 514 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 521 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 540 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 542 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 560 arch/s390/kvm/interrupt.c if (MACHINE_HAS_GS && vcpu->arch.gs_enabled) arch 607 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 609 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 624 arch/s390/kvm/interrupt.c rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr, arch 628 arch/s390/kvm/interrupt.c rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8, arch 633 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gcr, 128); arch 647 arch/s390/kvm/interrupt.c struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; arch 648 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 698 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 707 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 709 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 716 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 736 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 756 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 758 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 764 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 784 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 785 arch/s390/kvm/interrupt.c rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw, arch 792 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 894 arch/s390/kvm/interrupt.c rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea, arch 899 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 901 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 907 arch/s390/kvm/interrupt.c struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; arch 930 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 932 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); arch 941 arch/s390/kvm/interrupt.c struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; arch 969 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, arch 972 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, arch 983 arch/s390/kvm/interrupt.c struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; arch 1013 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, arch 1016 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, arch 1036 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, arch 1039 arch/s390/kvm/interrupt.c &vcpu->arch.sie_block->gpsw, arch 1049 arch/s390/kvm/interrupt.c struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int; arch 1055 arch/s390/kvm/interrupt.c fi = &vcpu->kvm->arch.float_int; arch 1116 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 1135 arch/s390/kvm/interrupt.c (vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK)) arch 1151 arch/s390/kvm/interrupt.c const u64 ckc = vcpu->arch.sie_block->ckc; arch 1155 arch/s390/kvm/interrupt.c if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) { arch 1182 arch/s390/kvm/interrupt.c struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int; arch 1198 arch/s390/kvm/interrupt.c vcpu->arch.sie_block->gcr[6] >> 24)) arch 1213 arch/s390/kvm/interrupt.c hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL); arch 1221 arch/s390/kvm/interrupt.c hrtimer_cancel(&vcpu->arch.ckc_timer); arch 1242 arch/s390/kvm/interrupt.c vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); arch 1257 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 1270 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 1351 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 1393 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 1410 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 1436 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 1456 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 1483 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 1496 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 1516 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 1548 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 1562 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 1577 arch/s390/kvm/interrupt.c struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; arch 1615 arch/s390/kvm/interrupt.c struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; arch 1650 arch/s390/kvm/interrupt.c struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; arch 1691 arch/s390/kvm/interrupt.c struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; arch 1717 arch/s390/kvm/interrupt.c struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; arch 1735 arch/s390/kvm/interrupt.c struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; arch 1755 arch/s390/kvm/interrupt.c struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; arch 1769 arch/s390/kvm/interrupt.c struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; arch 1784 arch/s390/kvm/interrupt.c fi = &kvm->arch.float_int; arch 1819 arch/s390/kvm/interrupt.c sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus); arch 1822 arch/s390/kvm/interrupt.c sigcpu = kvm->arch.float_int.next_rr_cpu++; arch 1823 arch/s390/kvm/interrupt.c kvm->arch.float_int.next_rr_cpu %= online_vcpus; arch 1838 arch/s390/kvm/interrupt.c kvm->arch.gisa_int.origin)) arch 1980 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 1987 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 2042 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 2081 arch/s390/kvm/interrupt.c struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; arch 2098 arch/s390/kvm/interrupt.c struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; arch 2137 arch/s390/kvm/interrupt.c fi = &kvm->arch.float_int; arch 2187 arch/s390/kvm/interrupt.c struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; arch 2305 arch/s390/kvm/interrupt.c return kvm->arch.adapters[id]; arch 2324 arch/s390/kvm/interrupt.c if (dev->kvm->arch.adapters[adapter_info.id] != NULL) arch 2341 arch/s390/kvm/interrupt.c dev->kvm->arch.adapters[adapter->id] = adapter; arch 2374 arch/s390/kvm/interrupt.c map->addr = gmap_translate(kvm->arch.gmap, addr); arch 2429 arch/s390/kvm/interrupt.c if (!kvm->arch.adapters[i]) arch 2432 arch/s390/kvm/interrupt.c &kvm->arch.adapters[i]->maps, list) { arch 2437 arch/s390/kvm/interrupt.c kfree(kvm->arch.adapters[i]); arch 2498 arch/s390/kvm/interrupt.c struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; arch 2538 arch/s390/kvm/interrupt.c struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; arch 2579 arch/s390/kvm/interrupt.c struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; arch 2610 arch/s390/kvm/interrupt.c dev->kvm->arch.gmap->pfault_enabled = 1; arch 2613 arch/s390/kvm/interrupt.c dev->kvm->arch.gmap->pfault_enabled = 0; arch 2672 arch/s390/kvm/interrupt.c if (dev->kvm->arch.flic) arch 2674 arch/s390/kvm/interrupt.c dev->kvm->arch.flic = dev; arch 2680 arch/s390/kvm/interrupt.c dev->kvm->arch.flic = NULL; arch 2849 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 2933 arch/s390/kvm/interrupt.c struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; arch 2952 arch/s390/kvm/interrupt.c store_local_irq(&vcpu->arch.local_int, &irq, irq_type); arch 2988 arch/s390/kvm/interrupt.c struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; arch 2991 arch/s390/kvm/interrupt.c for_each_set_bit(vcpu_id, kvm->arch.idle_mask, online_vcpus) { arch 2995 arch/s390/kvm/interrupt.c deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24); arch 3063 arch/s390/kvm/interrupt.c gi = &kvm->arch.gisa_int; arch 3074 arch/s390/kvm/interrupt.c struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; arch 3084 arch/s390/kvm/interrupt.c struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; arch 3088 arch/s390/kvm/interrupt.c gi->origin = &kvm->arch.sie_page2->gisa; arch 3101 arch/s390/kvm/interrupt.c struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; arch 3133 arch/s390/kvm/interrupt.c struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; arch 3172 arch/s390/kvm/interrupt.c struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; arch 276 arch/s390/kvm/kvm-s390.c kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); arch 278 arch/s390/kvm/kvm-s390.c kvm->arch.epoch = vcpu->arch.sie_block->epoch; arch 279 arch/s390/kvm/kvm-s390.c kvm->arch.epdx = vcpu->arch.sie_block->epdx; arch 281 arch/s390/kvm/kvm-s390.c if (vcpu->arch.cputm_enabled) arch 282 arch/s390/kvm/kvm-s390.c vcpu->arch.cputm_start += *delta; arch 283 arch/s390/kvm/kvm-s390.c if (vcpu->arch.vsie_block) arch 284 arch/s390/kvm/kvm-s390.c kvm_clock_sync_scb(vcpu->arch.vsie_block, arch 581 arch/s390/kvm/kvm-s390.c struct gmap *gmap = kvm->arch.gmap; arch 672 arch/s390/kvm/kvm-s390.c kvm->arch.use_irqchip = 1; arch 677 arch/s390/kvm/kvm-s390.c kvm->arch.user_sigp = 1; arch 685 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_mask, 129); arch 686 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_list, 129); arch 688 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_mask, 134); arch 689 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_list, 134); arch 692 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_mask, 135); arch 693 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_list, 135); arch 696 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_mask, 148); arch 697 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_list, 148); arch 700 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_mask, 152); arch 701 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_list, 152); arch 716 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_mask, 64); arch 717 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_list, 64); arch 729 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_mask, 72); arch 730 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_list, 72); arch 743 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_mask, 133); arch 744 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_list, 133); arch 755 arch/s390/kvm/kvm-s390.c else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) arch 767 arch/s390/kvm/kvm-s390.c kvm->arch.use_skf = 0; arch 768 arch/s390/kvm/kvm-s390.c kvm->arch.use_pfmfi = 0; arch 776 arch/s390/kvm/kvm-s390.c kvm->arch.user_stsi = 1; arch 781 arch/s390/kvm/kvm-s390.c kvm->arch.user_instr0 = 1; arch 800 arch/s390/kvm/kvm-s390.c kvm->arch.mem_limit); arch 801 arch/s390/kvm/kvm-s390.c if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) arch 828 arch/s390/kvm/kvm-s390.c kvm->arch.use_cmma = 1; arch 830 arch/s390/kvm/kvm-s390.c kvm->arch.use_pfmfi = 0; arch 840 arch/s390/kvm/kvm-s390.c if (!kvm->arch.use_cmma) arch 846 arch/s390/kvm/kvm-s390.c s390_reset_cmma(kvm->arch.gmap->mm); arch 860 arch/s390/kvm/kvm-s390.c if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && arch 861 arch/s390/kvm/kvm-s390.c new_limit > kvm->arch.mem_limit) arch 880 arch/s390/kvm/kvm-s390.c gmap_remove(kvm->arch.gmap); arch 882 arch/s390/kvm/kvm-s390.c kvm->arch.gmap = new; arch 889 arch/s390/kvm/kvm-s390.c (void *) kvm->arch.gmap->asce); arch 927 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.crycb->aes_wrapping_key_mask, arch 928 arch/s390/kvm/kvm-s390.c sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); arch 929 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.aes_kw = 1; arch 938 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.crycb->dea_wrapping_key_mask, arch 939 arch/s390/kvm/kvm-s390.c sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); arch 940 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.dea_kw = 1; arch 948 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.aes_kw = 0; arch 949 arch/s390/kvm/kvm-s390.c memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, arch 950 arch/s390/kvm/kvm-s390.c sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); arch 958 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.dea_kw = 0; arch 959 arch/s390/kvm/kvm-s390.c memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, arch 960 arch/s390/kvm/kvm-s390.c sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); arch 968 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.apie = 1; arch 975 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.apie = 0; arch 1008 arch/s390/kvm/kvm-s390.c if (kvm->arch.migration_mode) arch 1014 arch/s390/kvm/kvm-s390.c if (!kvm->arch.use_cmma) { arch 1015 arch/s390/kvm/kvm-s390.c kvm->arch.migration_mode = 1; arch 1032 arch/s390/kvm/kvm-s390.c atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); arch 1033 arch/s390/kvm/kvm-s390.c kvm->arch.migration_mode = 1; arch 1045 arch/s390/kvm/kvm-s390.c if (!kvm->arch.migration_mode) arch 1047 arch/s390/kvm/kvm-s390.c kvm->arch.migration_mode = 0; arch 1048 arch/s390/kvm/kvm-s390.c if (kvm->arch.use_cmma) arch 1077 arch/s390/kvm/kvm-s390.c u64 mig = kvm->arch.migration_mode; arch 1165 arch/s390/kvm/kvm-s390.c gtod->tod = htod.tod + kvm->arch.epoch; arch 1168 arch/s390/kvm/kvm-s390.c gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx; arch 1256 arch/s390/kvm/kvm-s390.c kvm->arch.model.cpuid = proc->cpuid; arch 1261 arch/s390/kvm/kvm-s390.c kvm->arch.model.ibc = unblocked_ibc; arch 1263 arch/s390/kvm/kvm-s390.c kvm->arch.model.ibc = lowest_ibc; arch 1265 arch/s390/kvm/kvm-s390.c kvm->arch.model.ibc = proc->ibc; arch 1267 arch/s390/kvm/kvm-s390.c memcpy(kvm->arch.model.fac_list, proc->fac_list, arch 1270 arch/s390/kvm/kvm-s390.c kvm->arch.model.ibc, arch 1271 arch/s390/kvm/kvm-s390.c kvm->arch.model.cpuid); arch 1273 arch/s390/kvm/kvm-s390.c kvm->arch.model.fac_list[0], arch 1274 arch/s390/kvm/kvm-s390.c kvm->arch.model.fac_list[1], arch 1275 arch/s390/kvm/kvm-s390.c kvm->arch.model.fac_list[2]); arch 1301 arch/s390/kvm/kvm-s390.c bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat, arch 1320 arch/s390/kvm/kvm-s390.c if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, arch 1328 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], arch 1329 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], arch 1330 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], arch 1331 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); arch 1333 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], arch 1334 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); arch 1336 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], arch 1337 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); arch 1339 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], arch 1340 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); arch 1342 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], arch 1343 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); arch 1345 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], arch 1346 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); arch 1348 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], arch 1349 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); arch 1351 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], arch 1352 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); arch 1354 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], arch 1355 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); arch 1357 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], arch 1358 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); arch 1360 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], arch 1361 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); arch 1363 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], arch 1364 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); arch 1366 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], arch 1367 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); arch 1369 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], arch 1370 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); arch 1372 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], arch 1373 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); arch 1375 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], arch 1376 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], arch 1377 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], arch 1378 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); arch 1380 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], arch 1381 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], arch 1382 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], arch 1383 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); arch 1416 arch/s390/kvm/kvm-s390.c proc->cpuid = kvm->arch.model.cpuid; arch 1417 arch/s390/kvm/kvm-s390.c proc->ibc = kvm->arch.model.ibc; arch 1418 arch/s390/kvm/kvm-s390.c memcpy(&proc->fac_list, kvm->arch.model.fac_list, arch 1421 arch/s390/kvm/kvm-s390.c kvm->arch.model.ibc, arch 1422 arch/s390/kvm/kvm-s390.c kvm->arch.model.cpuid); arch 1424 arch/s390/kvm/kvm-s390.c kvm->arch.model.fac_list[0], arch 1425 arch/s390/kvm/kvm-s390.c kvm->arch.model.fac_list[1], arch 1426 arch/s390/kvm/kvm-s390.c kvm->arch.model.fac_list[2]); arch 1446 arch/s390/kvm/kvm-s390.c memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, arch 1451 arch/s390/kvm/kvm-s390.c kvm->arch.model.ibc, arch 1452 arch/s390/kvm/kvm-s390.c kvm->arch.model.cpuid); arch 1473 arch/s390/kvm/kvm-s390.c bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat, arch 1504 arch/s390/kvm/kvm-s390.c if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, arch 1509 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], arch 1510 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], arch 1511 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], arch 1512 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); arch 1514 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], arch 1515 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); arch 1517 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], arch 1518 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); arch 1520 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], arch 1521 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); arch 1523 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], arch 1524 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); arch 1526 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], arch 1527 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); arch 1529 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], arch 1530 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); arch 1532 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], arch 1533 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); arch 1535 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], arch 1536 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); arch 1538 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], arch 1539 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); arch 1541 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], arch 1542 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); arch 1544 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], arch 1545 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); arch 1547 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], arch 1548 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); arch 1550 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], arch 1551 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); arch 1553 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], arch 1554 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); arch 1556 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], arch 1557 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], arch 1558 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], arch 1559 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); arch 1561 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], arch 1562 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], arch 1563 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], arch 1564 arch/s390/kvm/kvm-s390.c ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); arch 2016 arch/s390/kvm/kvm-s390.c atomic64_dec(&kvm->arch.cmma_dirty_pages); arch 2057 arch/s390/kvm/kvm-s390.c if (!kvm->arch.use_cmma) arch 2064 arch/s390/kvm/kvm-s390.c if (!peek && !kvm->arch.migration_mode) arch 2073 arch/s390/kvm/kvm-s390.c if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { arch 2091 arch/s390/kvm/kvm-s390.c if (kvm->arch.migration_mode) arch 2092 arch/s390/kvm/kvm-s390.c args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); arch 2117 arch/s390/kvm/kvm-s390.c if (!kvm->arch.use_cmma) arch 2188 arch/s390/kvm/kvm-s390.c if (kvm->arch.use_irqchip) { arch 2292 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; arch 2295 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); arch 2302 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; arch 2304 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; arch 2310 arch/s390/kvm/kvm-s390.c struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; arch 2315 arch/s390/kvm/kvm-s390.c switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { arch 2352 arch/s390/kvm/kvm-s390.c memset(&kvm->arch.crypto.crycb->apcb0, 0, arch 2353 arch/s390/kvm/kvm-s390.c sizeof(kvm->arch.crypto.crycb->apcb0)); arch 2354 arch/s390/kvm/kvm-s390.c memset(&kvm->arch.crypto.crycb->apcb1, 0, arch 2355 arch/s390/kvm/kvm-s390.c sizeof(kvm->arch.crypto.crycb->apcb1)); arch 2376 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; arch 2383 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.aes_kw = 1; arch 2384 arch/s390/kvm/kvm-s390.c kvm->arch.crypto.dea_kw = 1; arch 2385 arch/s390/kvm/kvm-s390.c get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, arch 2386 arch/s390/kvm/kvm-s390.c sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); arch 2387 arch/s390/kvm/kvm-s390.c get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, arch 2388 arch/s390/kvm/kvm-s390.c sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); arch 2393 arch/s390/kvm/kvm-s390.c if (kvm->arch.use_esca) arch 2394 arch/s390/kvm/kvm-s390.c free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); arch 2396 arch/s390/kvm/kvm-s390.c free_page((unsigned long)(kvm->arch.sca)); arch 2397 arch/s390/kvm/kvm-s390.c kvm->arch.sca = NULL; arch 2426 arch/s390/kvm/kvm-s390.c rwlock_init(&kvm->arch.sca_lock); arch 2428 arch/s390/kvm/kvm-s390.c kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); arch 2429 arch/s390/kvm/kvm-s390.c if (!kvm->arch.sca) arch 2435 arch/s390/kvm/kvm-s390.c kvm->arch.sca = (struct bsca_block *) arch 2436 arch/s390/kvm/kvm-s390.c ((char *) kvm->arch.sca + sca_offset); arch 2441 arch/s390/kvm/kvm-s390.c kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); arch 2442 arch/s390/kvm/kvm-s390.c if (!kvm->arch.dbf) arch 2446 arch/s390/kvm/kvm-s390.c kvm->arch.sie_page2 = arch 2448 arch/s390/kvm/kvm-s390.c if (!kvm->arch.sie_page2) arch 2451 arch/s390/kvm/kvm-s390.c kvm->arch.sie_page2->kvm = kvm; arch 2452 arch/s390/kvm/kvm-s390.c kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; arch 2455 arch/s390/kvm/kvm-s390.c kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] & arch 2458 arch/s390/kvm/kvm-s390.c kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] & arch 2461 arch/s390/kvm/kvm-s390.c kvm->arch.model.subfuncs = kvm_s390_available_subfunc; arch 2464 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_mask, 138); arch 2465 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_list, 138); arch 2467 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_mask, 74); arch 2468 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_list, 74); arch 2470 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_mask, 147); arch 2471 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_list, 147); arch 2475 arch/s390/kvm/kvm-s390.c set_kvm_facility(kvm->arch.model.fac_mask, 65); arch 2477 arch/s390/kvm/kvm-s390.c kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); arch 2478 arch/s390/kvm/kvm-s390.c kvm->arch.model.ibc = sclp.ibc & 0x0fff; arch 2482 arch/s390/kvm/kvm-s390.c mutex_init(&kvm->arch.float_int.ais_lock); arch 2483 arch/s390/kvm/kvm-s390.c spin_lock_init(&kvm->arch.float_int.lock); arch 2485 arch/s390/kvm/kvm-s390.c INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); arch 2486 arch/s390/kvm/kvm-s390.c init_waitqueue_head(&kvm->arch.ipte_wq); arch 2487 arch/s390/kvm/kvm-s390.c mutex_init(&kvm->arch.ipte_mutex); arch 2489 arch/s390/kvm/kvm-s390.c debug_register_view(kvm->arch.dbf, &debug_sprintf_view); arch 2493 arch/s390/kvm/kvm-s390.c kvm->arch.gmap = NULL; arch 2494 arch/s390/kvm/kvm-s390.c kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; arch 2497 arch/s390/kvm/kvm-s390.c kvm->arch.mem_limit = TASK_SIZE_MAX; arch 2499 arch/s390/kvm/kvm-s390.c kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, arch 2501 arch/s390/kvm/kvm-s390.c kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); arch 2502 arch/s390/kvm/kvm-s390.c if (!kvm->arch.gmap) arch 2504 arch/s390/kvm/kvm-s390.c kvm->arch.gmap->private = kvm; arch 2505 arch/s390/kvm/kvm-s390.c kvm->arch.gmap->pfault_enabled = 0; arch 2508 arch/s390/kvm/kvm-s390.c kvm->arch.use_pfmfi = sclp.has_pfmfi; arch 2509 arch/s390/kvm/kvm-s390.c kvm->arch.use_skf = sclp.has_skey; arch 2510 arch/s390/kvm/kvm-s390.c spin_lock_init(&kvm->arch.start_stop_lock); arch 2517 arch/s390/kvm/kvm-s390.c free_page((unsigned long)kvm->arch.sie_page2); arch 2518 arch/s390/kvm/kvm-s390.c debug_unregister(kvm->arch.dbf); arch 2534 arch/s390/kvm/kvm-s390.c gmap_remove(vcpu->arch.gmap); arch 2536 arch/s390/kvm/kvm-s390.c if (vcpu->kvm->arch.use_cmma) arch 2538 arch/s390/kvm/kvm-s390.c free_page((unsigned long)(vcpu->arch.sie_block)); arch 2564 arch/s390/kvm/kvm-s390.c debug_unregister(kvm->arch.dbf); arch 2566 arch/s390/kvm/kvm-s390.c free_page((unsigned long)kvm->arch.sie_page2); arch 2568 arch/s390/kvm/kvm-s390.c gmap_remove(kvm->arch.gmap); arch 2578 arch/s390/kvm/kvm-s390.c vcpu->arch.gmap = gmap_create(current->mm, -1UL); arch 2579 arch/s390/kvm/kvm-s390.c if (!vcpu->arch.gmap) arch 2581 arch/s390/kvm/kvm-s390.c vcpu->arch.gmap->private = vcpu->kvm; arch 2590 arch/s390/kvm/kvm-s390.c read_lock(&vcpu->kvm->arch.sca_lock); arch 2591 arch/s390/kvm/kvm-s390.c if (vcpu->kvm->arch.use_esca) { arch 2592 arch/s390/kvm/kvm-s390.c struct esca_block *sca = vcpu->kvm->arch.sca; arch 2597 arch/s390/kvm/kvm-s390.c struct bsca_block *sca = vcpu->kvm->arch.sca; arch 2602 arch/s390/kvm/kvm-s390.c read_unlock(&vcpu->kvm->arch.sca_lock); arch 2608 arch/s390/kvm/kvm-s390.c struct bsca_block *sca = vcpu->kvm->arch.sca; arch 2611 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); arch 2612 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; arch 2615 arch/s390/kvm/kvm-s390.c read_lock(&vcpu->kvm->arch.sca_lock); arch 2616 arch/s390/kvm/kvm-s390.c if (vcpu->kvm->arch.use_esca) { arch 2617 arch/s390/kvm/kvm-s390.c struct esca_block *sca = vcpu->kvm->arch.sca; arch 2619 arch/s390/kvm/kvm-s390.c sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; arch 2620 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); arch 2621 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU; arch 2622 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; arch 2625 arch/s390/kvm/kvm-s390.c struct bsca_block *sca = vcpu->kvm->arch.sca; arch 2627 arch/s390/kvm/kvm-s390.c sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; arch 2628 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); arch 2629 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; arch 2632 arch/s390/kvm/kvm-s390.c read_unlock(&vcpu->kvm->arch.sca_lock); arch 2655 arch/s390/kvm/kvm-s390.c struct bsca_block *old_sca = kvm->arch.sca; arch 2669 arch/s390/kvm/kvm-s390.c write_lock(&kvm->arch.sca_lock); arch 2674 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->scaoh = scaoh; arch 2675 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->scaol = scaol; arch 2676 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; arch 2678 arch/s390/kvm/kvm-s390.c kvm->arch.sca = new_sca; arch 2679 arch/s390/kvm/kvm-s390.c kvm->arch.use_esca = 1; arch 2681 arch/s390/kvm/kvm-s390.c write_unlock(&kvm->arch.sca_lock); arch 2687 arch/s390/kvm/kvm-s390.c old_sca, kvm->arch.sca); arch 2706 arch/s390/kvm/kvm-s390.c rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); arch 2714 arch/s390/kvm/kvm-s390.c vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; arch 2748 arch/s390/kvm/kvm-s390.c WARN_ON_ONCE(vcpu->arch.cputm_start != 0); arch 2749 arch/s390/kvm/kvm-s390.c raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); arch 2750 arch/s390/kvm/kvm-s390.c vcpu->arch.cputm_start = get_tod_clock_fast(); arch 2751 arch/s390/kvm/kvm-s390.c raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); arch 2757 arch/s390/kvm/kvm-s390.c WARN_ON_ONCE(vcpu->arch.cputm_start == 0); arch 2758 arch/s390/kvm/kvm-s390.c raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); arch 2759 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; arch 2760 arch/s390/kvm/kvm-s390.c vcpu->arch.cputm_start = 0; arch 2761 arch/s390/kvm/kvm-s390.c raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); arch 2767 arch/s390/kvm/kvm-s390.c WARN_ON_ONCE(vcpu->arch.cputm_enabled); arch 2768 arch/s390/kvm/kvm-s390.c vcpu->arch.cputm_enabled = true; arch 2775 arch/s390/kvm/kvm-s390.c WARN_ON_ONCE(!vcpu->arch.cputm_enabled); arch 2777 arch/s390/kvm/kvm-s390.c vcpu->arch.cputm_enabled = false; arch 2798 arch/s390/kvm/kvm-s390.c raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); arch 2799 arch/s390/kvm/kvm-s390.c if (vcpu->arch.cputm_enabled) arch 2800 arch/s390/kvm/kvm-s390.c vcpu->arch.cputm_start = get_tod_clock_fast(); arch 2801 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->cputm = cputm; arch 2802 arch/s390/kvm/kvm-s390.c raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); arch 2812 arch/s390/kvm/kvm-s390.c if (unlikely(!vcpu->arch.cputm_enabled)) arch 2813 arch/s390/kvm/kvm-s390.c return vcpu->arch.sie_block->cputm; arch 2817 arch/s390/kvm/kvm-s390.c seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); arch 2823 arch/s390/kvm/kvm-s390.c value = vcpu->arch.sie_block->cputm; arch 2825 arch/s390/kvm/kvm-s390.c if (likely(vcpu->arch.cputm_start)) arch 2826 arch/s390/kvm/kvm-s390.c value -= get_tod_clock_fast() - vcpu->arch.cputm_start; arch 2827 arch/s390/kvm/kvm-s390.c } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); arch 2835 arch/s390/kvm/kvm-s390.c gmap_enable(vcpu->arch.enabled_gmap); arch 2837 arch/s390/kvm/kvm-s390.c if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) arch 2845 arch/s390/kvm/kvm-s390.c if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) arch 2848 arch/s390/kvm/kvm-s390.c vcpu->arch.enabled_gmap = gmap_get_enabled(); arch 2849 arch/s390/kvm/kvm-s390.c gmap_disable(vcpu->arch.enabled_gmap); arch 2856 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->gpsw.mask = 0UL; arch 2857 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->gpsw.addr = 0UL; arch 2860 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ckc = 0UL; arch 2861 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->todpr = 0; arch 2862 arch/s390/kvm/kvm-s390.c memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); arch 2863 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 | arch 2866 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 | arch 2870 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->gbea = 1; arch 2871 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->pp = 0; arch 2872 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->fpf &= ~FPF_BPBC; arch 2873 arch/s390/kvm/kvm-s390.c vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; arch 2884 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; arch 2885 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; arch 2889 arch/s390/kvm/kvm-s390.c vcpu->arch.gmap = vcpu->kvm->arch.gmap; arch 2892 arch/s390/kvm/kvm-s390.c if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) arch 2893 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; arch 2895 arch/s390/kvm/kvm-s390.c vcpu->arch.enabled_gmap = vcpu->arch.gmap; arch 2900 arch/s390/kvm/kvm-s390.c if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && arch 2923 arch/s390/kvm/kvm-s390.c if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) arch 2926 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; arch 2927 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); arch 2928 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->eca &= ~ECA_APIE; arch 2929 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecd &= ~ECD_ECC; arch 2931 arch/s390/kvm/kvm-s390.c if (vcpu->kvm->arch.crypto.apie) arch 2932 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->eca |= ECA_APIE; arch 2935 arch/s390/kvm/kvm-s390.c if (vcpu->kvm->arch.crypto.aes_kw) { arch 2936 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecb3 |= ECB3_AES; arch 2939 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecd |= ECD_ECC; arch 2942 arch/s390/kvm/kvm-s390.c if (vcpu->kvm->arch.crypto.dea_kw) arch 2943 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecb3 |= ECB3_DEA; arch 2948 arch/s390/kvm/kvm-s390.c free_page(vcpu->arch.sie_block->cbrlo); arch 2949 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->cbrlo = 0; arch 2954 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); arch 2955 arch/s390/kvm/kvm-s390.c if (!vcpu->arch.sie_block->cbrlo) arch 2962 arch/s390/kvm/kvm-s390.c struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; arch 2964 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ibc = model->ibc; arch 2966 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list; arch 2973 arch/s390/kvm/kvm-s390.c atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | arch 2986 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; arch 2988 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecb |= ECB_SRSI; arch 2990 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecb |= ECB_TE; arch 2992 arch/s390/kvm/kvm-s390.c if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) arch 2993 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; arch 2995 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecb2 |= ECB2_IEP; arch 2996 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; arch 2998 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->eca |= ECA_CEI; arch 3000 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->eca |= ECA_IB; arch 3002 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->eca |= ECA_SII; arch 3004 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->eca |= ECA_SIGPI; arch 3006 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->eca |= ECA_VX; arch 3007 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; arch 3010 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecd |= ECD_MEF; arch 3012 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecd |= ECD_ETOKENF; arch 3013 arch/s390/kvm/kvm-s390.c if (vcpu->arch.sie_block->gd) { arch 3014 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->eca |= ECA_AIV; arch 3016 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); arch 3018 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx) arch 3020 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; arch 3025 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; arch 3027 arch/s390/kvm/kvm-s390.c if (vcpu->kvm->arch.use_cmma) { arch 3032 arch/s390/kvm/kvm-s390.c hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); arch 3033 arch/s390/kvm/kvm-s390.c vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; arch 3035 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->hpid = HPID_KVM; arch 3063 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block = &sie_page->sie_block; arch 3064 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; arch 3067 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->mso = 0; arch 3068 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->msl = sclp.hamax; arch 3070 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->icpua = id; arch 3071 arch/s390/kvm/kvm-s390.c spin_lock_init(&vcpu->arch.local_int.lock); arch 3072 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin; arch 3073 arch/s390/kvm/kvm-s390.c if (vcpu->arch.sie_block->gd && sclp.has_gisaf) arch 3074 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->gd |= GISA_FORMAT1; arch 3075 arch/s390/kvm/kvm-s390.c seqcount_init(&vcpu->arch.cputm_seqcount); arch 3081 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block); arch 3082 arch/s390/kvm/kvm-s390.c trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); arch 3086 arch/s390/kvm/kvm-s390.c free_page((unsigned long)(vcpu->arch.sie_block)); arch 3100 arch/s390/kvm/kvm-s390.c return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); arch 3105 arch/s390/kvm/kvm-s390.c atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); arch 3111 arch/s390/kvm/kvm-s390.c atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); arch 3116 arch/s390/kvm/kvm-s390.c atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); arch 3122 arch/s390/kvm/kvm-s390.c return atomic_read(&vcpu->arch.sie_block->prog20) & arch 3128 arch/s390/kvm/kvm-s390.c atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); arch 3139 arch/s390/kvm/kvm-s390.c while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) arch 3199 arch/s390/kvm/kvm-s390.c r = put_user(vcpu->arch.sie_block->todpr, arch 3203 arch/s390/kvm/kvm-s390.c r = put_user(vcpu->arch.sie_block->epoch, arch 3211 arch/s390/kvm/kvm-s390.c r = put_user(vcpu->arch.sie_block->ckc, arch 3215 arch/s390/kvm/kvm-s390.c r = put_user(vcpu->arch.pfault_token, arch 3219 arch/s390/kvm/kvm-s390.c r = put_user(vcpu->arch.pfault_compare, arch 3223 arch/s390/kvm/kvm-s390.c r = put_user(vcpu->arch.pfault_select, arch 3227 arch/s390/kvm/kvm-s390.c r = put_user(vcpu->arch.sie_block->pp, arch 3231 arch/s390/kvm/kvm-s390.c r = put_user(vcpu->arch.sie_block->gbea, arch 3249 arch/s390/kvm/kvm-s390.c r = get_user(vcpu->arch.sie_block->todpr, arch 3253 arch/s390/kvm/kvm-s390.c r = get_user(vcpu->arch.sie_block->epoch, arch 3262 arch/s390/kvm/kvm-s390.c r = get_user(vcpu->arch.sie_block->ckc, arch 3266 arch/s390/kvm/kvm-s390.c r = get_user(vcpu->arch.pfault_token, arch 3268 arch/s390/kvm/kvm-s390.c if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) arch 3272 arch/s390/kvm/kvm-s390.c r = get_user(vcpu->arch.pfault_compare, arch 3276 arch/s390/kvm/kvm-s390.c r = get_user(vcpu->arch.pfault_select, arch 3280 arch/s390/kvm/kvm-s390.c r = get_user(vcpu->arch.sie_block->pp, arch 3284 arch/s390/kvm/kvm-s390.c r = get_user(vcpu->arch.sie_block->gbea, arch 3322 arch/s390/kvm/kvm-s390.c memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); arch 3334 arch/s390/kvm/kvm-s390.c memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); arch 3430 arch/s390/kvm/kvm-s390.c vcpu->arch.guestdbg.last_bp = 0; arch 3467 arch/s390/kvm/kvm-s390.c vcpu->kvm->arch.user_cpu_state_ctrl = 1; arch 3507 arch/s390/kvm/kvm-s390.c rc = gmap_mprotect_notify(vcpu->arch.gmap, arch 3518 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ihcpu = 0xffff; arch 3539 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; arch 3549 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; arch 3558 arch/s390/kvm/kvm-s390.c if ((vcpu->kvm->arch.use_cmma) && arch 3560 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; arch 3584 arch/s390/kvm/kvm-s390.c kvm->arch.epoch = gtod->tod - htod.tod; arch 3585 arch/s390/kvm/kvm-s390.c kvm->arch.epdx = 0; arch 3587 arch/s390/kvm/kvm-s390.c kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; arch 3588 arch/s390/kvm/kvm-s390.c if (kvm->arch.epoch > gtod->tod) arch 3589 arch/s390/kvm/kvm-s390.c kvm->arch.epdx -= 1; arch 3594 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->epoch = kvm->arch.epoch; arch 3595 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->epdx = kvm->arch.epdx; arch 3615 arch/s390/kvm/kvm-s390.c return gmap_fault(vcpu->arch.gmap, gpa, arch 3639 arch/s390/kvm/kvm-s390.c trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); arch 3640 arch/s390/kvm/kvm-s390.c __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); arch 3646 arch/s390/kvm/kvm-s390.c trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); arch 3647 arch/s390/kvm/kvm-s390.c __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); arch 3668 arch/s390/kvm/kvm-s390.c struct kvm_arch_async_pf arch; arch 3671 arch/s390/kvm/kvm-s390.c if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) arch 3673 arch/s390/kvm/kvm-s390.c if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != arch 3674 arch/s390/kvm/kvm-s390.c vcpu->arch.pfault_compare) arch 3680 arch/s390/kvm/kvm-s390.c if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) arch 3682 arch/s390/kvm/kvm-s390.c if (!vcpu->arch.gmap->pfault_enabled) arch 3687 arch/s390/kvm/kvm-s390.c if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) arch 3690 arch/s390/kvm/kvm-s390.c rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); arch 3705 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; arch 3706 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; arch 3729 arch/s390/kvm/kvm-s390.c clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask); arch 3731 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->icptcode = 0; arch 3732 arch/s390/kvm/kvm-s390.c cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); arch 3758 arch/s390/kvm/kvm-s390.c rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1); arch 3767 arch/s390/kvm/kvm-s390.c pgm_info = vcpu->arch.pgm; arch 3781 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->icptcode); arch 3782 arch/s390/kvm/kvm-s390.c trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); arch 3787 arch/s390/kvm/kvm-s390.c vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; arch 3788 arch/s390/kvm/kvm-s390.c vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; arch 3792 arch/s390/kvm/kvm-s390.c sie_page = container_of(vcpu->arch.sie_block, arch 3799 arch/s390/kvm/kvm-s390.c if (vcpu->arch.sie_block->icptcode > 0) { arch 3805 arch/s390/kvm/kvm-s390.c vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; arch 3806 arch/s390/kvm/kvm-s390.c vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; arch 3807 arch/s390/kvm/kvm-s390.c vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; arch 3852 arch/s390/kvm/kvm-s390.c exit_reason = sie64a(vcpu->arch.sie_block, arch 3874 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; arch 3875 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; arch 3879 arch/s390/kvm/kvm-s390.c memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); arch 3885 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; arch 3886 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; arch 3887 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; arch 3888 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; arch 3891 arch/s390/kvm/kvm-s390.c vcpu->arch.pfault_token = kvm_run->s.regs.pft; arch 3892 arch/s390/kvm/kvm-s390.c vcpu->arch.pfault_select = kvm_run->s.regs.pfs; arch 3893 arch/s390/kvm/kvm-s390.c vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; arch 3894 arch/s390/kvm/kvm-s390.c if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) arch 3904 arch/s390/kvm/kvm-s390.c !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { arch 3906 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecb3 |= ECB3_RI; arch 3915 arch/s390/kvm/kvm-s390.c !vcpu->arch.gs_enabled) { arch 3917 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecb |= ECB_GS; arch 3918 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; arch 3919 arch/s390/kvm/kvm-s390.c vcpu->arch.gs_enabled = 1; arch 3923 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->fpf &= ~FPF_BPBC; arch 3924 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; arch 3926 arch/s390/kvm/kvm-s390.c save_access_regs(vcpu->arch.host_acrs); arch 3930 arch/s390/kvm/kvm-s390.c vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; arch 3931 arch/s390/kvm/kvm-s390.c vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; arch 3944 arch/s390/kvm/kvm-s390.c vcpu->arch.host_gscb = current->thread.gs_cb; arch 3945 arch/s390/kvm/kvm-s390.c save_gs_cb(vcpu->arch.host_gscb); arch 3947 arch/s390/kvm/kvm-s390.c if (vcpu->arch.gs_enabled) { arch 3961 arch/s390/kvm/kvm-s390.c kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; arch 3962 arch/s390/kvm/kvm-s390.c kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; arch 3964 arch/s390/kvm/kvm-s390.c memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); arch 3966 arch/s390/kvm/kvm-s390.c kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; arch 3967 arch/s390/kvm/kvm-s390.c kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; arch 3968 arch/s390/kvm/kvm-s390.c kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; arch 3969 arch/s390/kvm/kvm-s390.c kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; arch 3970 arch/s390/kvm/kvm-s390.c kvm_run->s.regs.pft = vcpu->arch.pfault_token; arch 3971 arch/s390/kvm/kvm-s390.c kvm_run->s.regs.pfs = vcpu->arch.pfault_select; arch 3972 arch/s390/kvm/kvm-s390.c kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; arch 3973 arch/s390/kvm/kvm-s390.c kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; arch 3975 arch/s390/kvm/kvm-s390.c restore_access_regs(vcpu->arch.host_acrs); arch 3980 arch/s390/kvm/kvm-s390.c current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; arch 3981 arch/s390/kvm/kvm-s390.c current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; arch 3984 arch/s390/kvm/kvm-s390.c if (vcpu->arch.gs_enabled) arch 3987 arch/s390/kvm/kvm-s390.c current->thread.gs_cb = vcpu->arch.host_gscb; arch 3988 arch/s390/kvm/kvm-s390.c restore_gs_cb(vcpu->arch.host_gscb); arch 3990 arch/s390/kvm/kvm-s390.c if (!vcpu->arch.host_gscb) arch 3992 arch/s390/kvm/kvm-s390.c vcpu->arch.host_gscb = NULL; arch 4097 arch/s390/kvm/kvm-s390.c &vcpu->arch.sie_block->gpsw, 16); arch 4103 arch/s390/kvm/kvm-s390.c &vcpu->arch.sie_block->todpr, 4); arch 4107 arch/s390/kvm/kvm-s390.c clkcomp = vcpu->arch.sie_block->ckc >> 8; arch 4113 arch/s390/kvm/kvm-s390.c &vcpu->arch.sie_block->gcr, 128); arch 4164 arch/s390/kvm/kvm-s390.c spin_lock(&vcpu->kvm->arch.start_stop_lock); arch 4190 arch/s390/kvm/kvm-s390.c spin_unlock(&vcpu->kvm->arch.start_stop_lock); arch 4204 arch/s390/kvm/kvm-s390.c spin_lock(&vcpu->kvm->arch.start_stop_lock); arch 4228 arch/s390/kvm/kvm-s390.c spin_unlock(&vcpu->kvm->arch.start_stop_lock); arch 4242 arch/s390/kvm/kvm-s390.c if (!vcpu->kvm->arch.css_support) { arch 4243 arch/s390/kvm/kvm-s390.c vcpu->kvm->arch.css_support = 1; arch 4311 arch/s390/kvm/kvm-s390.c kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); arch 4399 arch/s390/kvm/kvm-s390.c r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, arch 4416 arch/s390/kvm/kvm-s390.c r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, arch 4422 arch/s390/kvm/kvm-s390.c r = gmap_fault(vcpu->arch.gmap, arg, 0); arch 4490 arch/s390/kvm/kvm-s390.c vmf->page = virt_to_page(vcpu->arch.sie_block); arch 4521 arch/s390/kvm/kvm-s390.c if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit) arch 4537 arch/s390/kvm/kvm-s390.c rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, arch 4541 arch/s390/kvm/kvm-s390.c rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, arch 4547 arch/s390/kvm/kvm-s390.c rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, arch 23 arch/s390/kvm/kvm-s390.h #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) arch 25 arch/s390/kvm/kvm-s390.h #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) arch 36 arch/s390/kvm/kvm-s390.h debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \ arch 42 arch/s390/kvm/kvm-s390.h debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \ arch 44 arch/s390/kvm/kvm-s390.h d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\ arch 50 arch/s390/kvm/kvm-s390.h atomic_or(flags, &vcpu->arch.sie_block->cpuflags); arch 55 arch/s390/kvm/kvm-s390.h atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags); arch 60 arch/s390/kvm/kvm-s390.h return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags; arch 70 arch/s390/kvm/kvm-s390.h return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask); arch 76 arch/s390/kvm/kvm-s390.h if (kvm->arch.gmap) arch 87 arch/s390/kvm/kvm-s390.h return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT; arch 94 arch/s390/kvm/kvm-s390.h vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; arch 101 arch/s390/kvm/kvm-s390.h u32 base2 = vcpu->arch.sie_block->ipb >> 28; arch 102 arch/s390/kvm/kvm-s390.h u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); arch 114 arch/s390/kvm/kvm-s390.h u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; arch 115 arch/s390/kvm/kvm-s390.h u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; arch 116 arch/s390/kvm/kvm-s390.h u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12; arch 117 arch/s390/kvm/kvm-s390.h u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff; arch 131 arch/s390/kvm/kvm-s390.h *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20; arch 133 arch/s390/kvm/kvm-s390.h *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; arch 138 arch/s390/kvm/kvm-s390.h u32 base2 = vcpu->arch.sie_block->ipb >> 28; arch 139 arch/s390/kvm/kvm-s390.h u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + arch 140 arch/s390/kvm/kvm-s390.h ((vcpu->arch.sie_block->ipb & 0xff00) << 4); arch 153 arch/s390/kvm/kvm-s390.h u32 base2 = vcpu->arch.sie_block->ipb >> 28; arch 154 arch/s390/kvm/kvm-s390.h u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); arch 165 arch/s390/kvm/kvm-s390.h vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44); arch 166 arch/s390/kvm/kvm-s390.h vcpu->arch.sie_block->gpsw.mask |= cc << 44; arch 172 arch/s390/kvm/kvm-s390.h return __test_facility(nr, kvm->arch.model.fac_mask) && arch 173 arch/s390/kvm/kvm-s390.h __test_facility(nr, kvm->arch.model.fac_list); arch 190 arch/s390/kvm/kvm-s390.h return test_bit_inv(nr, kvm->arch.cpu_feat); arch 196 arch/s390/kvm/kvm-s390.h return kvm->arch.user_cpu_state_ctrl != 0; arch 240 arch/s390/kvm/kvm-s390.h struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; arch 251 arch/s390/kvm/kvm-s390.h vcpu->arch.sie_block->icptstatus &= ~0x02; arch 328 arch/s390/kvm/kvm-s390.h rc = get_tod_clock_fast() + kvm->arch.epoch; arch 363 arch/s390/kvm/kvm-s390.h return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); arch 401 arch/s390/kvm/kvm-s390.h struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */ arch 41 arch/s390/kvm/priv.c vcpu->arch.sie_block->ecb3 |= ECB3_RI; arch 50 arch/s390/kvm/priv.c if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) arch 67 arch/s390/kvm/priv.c vcpu->arch.sie_block->ecb |= ECB_GS; arch 68 arch/s390/kvm/priv.c vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; arch 69 arch/s390/kvm/priv.c vcpu->arch.gs_enabled = 1; arch 78 arch/s390/kvm/priv.c int code = vcpu->arch.sie_block->ipb & 0xff; arch 95 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 121 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 159 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 189 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 212 arch/s390/kvm/priv.c if (vcpu->arch.skey_enabled) arch 222 arch/s390/kvm/priv.c if (!vcpu->kvm->arch.use_skf) arch 223 arch/s390/kvm/priv.c vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; arch 225 arch/s390/kvm/priv.c vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); arch 226 arch/s390/kvm/priv.c vcpu->arch.skey_enabled = true; arch 237 arch/s390/kvm/priv.c if (vcpu->kvm->arch.use_skf) { arch 256 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 303 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 345 arch/s390/kvm/priv.c unsigned char m3 = vcpu->arch.sie_block->ipb >> 28; arch 354 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 417 arch/s390/kvm/priv.c if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) arch 430 arch/s390/kvm/priv.c if (psw_bits(vcpu->arch.sie_block->gpsw).pstate) arch 432 arch/s390/kvm/priv.c wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); arch 445 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 452 arch/s390/kvm/priv.c return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); arch 483 arch/s390/kvm/priv.c inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); arch 562 arch/s390/kvm/priv.c vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; arch 571 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 574 arch/s390/kvm/priv.c if (vcpu->kvm->arch.css_support) { arch 579 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->ipa == 0xb236) arch 581 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->ipa == 0xb235) arch 622 arch/s390/kvm/priv.c if (!(vcpu->arch.sie_block->eca & ECA_APIE)) arch 638 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 649 arch/s390/kvm/priv.c if (!(vcpu->kvm->arch.crypto.crycbd & 0x02) && (reg0 & 0x0000c0f0UL)) arch 661 arch/s390/kvm/priv.c if (vcpu->kvm->arch.crypto.pqap_hook) { arch 662 arch/s390/kvm/priv.c if (!try_module_get(vcpu->kvm->arch.crypto.pqap_hook->owner)) arch 664 arch/s390/kvm/priv.c ret = vcpu->kvm->arch.crypto.pqap_hook->hook(vcpu); arch 665 arch/s390/kvm/priv.c module_put(vcpu->kvm->arch.crypto.pqap_hook->owner); arch 688 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 695 arch/s390/kvm/priv.c fac = *vcpu->kvm->arch.model.fac_list >> 32; arch 729 arch/s390/kvm/priv.c psw_t *gpsw = &vcpu->arch.sie_block->gpsw; arch 766 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 775 arch/s390/kvm/priv.c vcpu->arch.sie_block->gpsw = new_psw; arch 776 arch/s390/kvm/priv.c if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) arch 783 arch/s390/kvm/priv.c u64 stidp_data = vcpu->kvm->arch.model.cpuid; arch 790 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 857 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 904 arch/s390/kvm/priv.c if (vcpu->kvm->arch.user_stsi) { arch 922 arch/s390/kvm/priv.c switch (vcpu->arch.sie_block->ipa & 0x00ff) { arch 988 arch/s390/kvm/priv.c vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; arch 992 arch/s390/kvm/priv.c vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; arch 1021 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 1046 arch/s390/kvm/priv.c return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); arch 1062 arch/s390/kvm/priv.c psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT) arch 1108 arch/s390/kvm/priv.c if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { arch 1136 arch/s390/kvm/priv.c entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; arch 1169 arch/s390/kvm/priv.c cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK); arch 1178 arch/s390/kvm/priv.c atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages); arch 1187 arch/s390/kvm/priv.c int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; arch 1193 arch/s390/kvm/priv.c gmap = vcpu->arch.gmap; arch 1195 arch/s390/kvm/priv.c if (!vcpu->kvm->arch.use_cmma) arch 1198 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 1201 arch/s390/kvm/priv.c orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; arch 1207 arch/s390/kvm/priv.c if (!vcpu->kvm->arch.migration_mode) { arch 1231 arch/s390/kvm/priv.c vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; arch 1247 arch/s390/kvm/priv.c vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ arch 1248 arch/s390/kvm/priv.c cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); arch 1258 arch/s390/kvm/priv.c switch (vcpu->arch.sie_block->ipa & 0x00ff) { arch 1276 arch/s390/kvm/priv.c int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; arch 1277 arch/s390/kvm/priv.c int reg3 = vcpu->arch.sie_block->ipa & 0x000f; arch 1285 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 1303 arch/s390/kvm/priv.c vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; arch 1304 arch/s390/kvm/priv.c vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++]; arch 1315 arch/s390/kvm/priv.c int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; arch 1316 arch/s390/kvm/priv.c int reg3 = vcpu->arch.sie_block->ipa & 0x000f; arch 1324 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 1338 arch/s390/kvm/priv.c ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; arch 1349 arch/s390/kvm/priv.c int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; arch 1350 arch/s390/kvm/priv.c int reg3 = vcpu->arch.sie_block->ipa & 0x000f; arch 1358 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 1376 arch/s390/kvm/priv.c vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++]; arch 1387 arch/s390/kvm/priv.c int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; arch 1388 arch/s390/kvm/priv.c int reg3 = vcpu->arch.sie_block->ipa & 0x000f; arch 1396 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 1410 arch/s390/kvm/priv.c ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg]; arch 1421 arch/s390/kvm/priv.c switch (vcpu->arch.sie_block->ipb & 0x000000ff) { arch 1445 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 1455 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) arch 1485 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) arch 1492 arch/s390/kvm/priv.c switch (vcpu->arch.sie_block->ipa & 0x00ff) { arch 1506 arch/s390/kvm/priv.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 1514 arch/s390/kvm/priv.c vcpu->arch.sie_block->todpr = value; arch 1530 arch/s390/kvm/priv.c switch (vcpu->arch.sie_block->ipa & 0x00ff) { arch 76 arch/s390/kvm/sigp.c psw = &dst_vcpu->arch.sie_block->gpsw; arch 77 arch/s390/kvm/sigp.c p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */ arch 78 arch/s390/kvm/sigp.c s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */ arch 255 arch/s390/kvm/sigp.c struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; arch 362 arch/s390/kvm/sigp.c if (!vcpu->kvm->arch.user_sigp) arch 411 arch/s390/kvm/sigp.c int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; arch 412 arch/s390/kvm/sigp.c int r3 = vcpu->arch.sie_block->ipa & 0x000f; arch 419 arch/s390/kvm/sigp.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 462 arch/s390/kvm/sigp.c int r3 = vcpu->arch.sie_block->ipa & 0x000f; arch 27 arch/s390/kvm/trace.h __entry->pswmask = vcpu->arch.sie_block->gpsw.mask; \ arch 28 arch/s390/kvm/trace.h __entry->pswaddr = vcpu->arch.sie_block->gpsw.addr; \ arch 296 arch/s390/kvm/vsie.c int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK; arch 301 arch/s390/kvm/vsie.c apie_h = vcpu->arch.sie_block->eca & ECA_APIE; arch 316 arch/s390/kvm/vsie.c vcpu->kvm->arch.crypto.crycb, arch 324 arch/s390/kvm/vsie.c ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & arch 326 arch/s390/kvm/vsie.c ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC; arch 341 arch/s390/kvm/vsie.c vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask; arch 369 arch/s390/kvm/vsie.c if (vcpu->kvm->arch.model.ibc && new_ibc) { arch 375 arch/s390/kvm/vsie.c if (scb_s->ibc > vcpu->kvm->arch.model.ibc) arch 376 arch/s390/kvm/vsie.c scb_s->ibc = vcpu->kvm->arch.model.ibc; arch 579 arch/s390/kvm/vsie.c for (i = 0; i < kvm->arch.vsie.page_count; i++) { arch 580 arch/s390/kvm/vsie.c page = READ_ONCE(kvm->arch.vsie.pages[i]); arch 1020 arch/s390/kvm/vsie.c vcpu->arch.sie_block->fpf & FPF_BPBC) arch 1033 arch/s390/kvm/vsie.c vcpu->arch.sie_block->prog0c |= PROG_IN_SIE; arch 1038 arch/s390/kvm/vsie.c vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE; arch 1095 arch/s390/kvm/vsie.c asce = vcpu->arch.sie_block->gcr[1]; arch 1096 arch/s390/kvm/vsie.c cr0.val = vcpu->arch.sie_block->gcr[0]; arch 1110 arch/s390/kvm/vsie.c gmap = gmap_shadow(vcpu->arch.gmap, asce, edat); arch 1126 arch/s390/kvm/vsie.c WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s); arch 1137 arch/s390/kvm/vsie.c scb_s->epoch += vcpu->kvm->arch.epoch; arch 1140 arch/s390/kvm/vsie.c scb_s->epdx += vcpu->kvm->arch.epdx; arch 1141 arch/s390/kvm/vsie.c if (scb_s->epoch < vcpu->kvm->arch.epoch) arch 1154 arch/s390/kvm/vsie.c WRITE_ONCE(vcpu->arch.vsie_block, NULL); arch 1178 arch/s390/kvm/vsie.c gmap_enable(vcpu->arch.gmap); arch 1224 arch/s390/kvm/vsie.c page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9); arch 1238 arch/s390/kvm/vsie.c mutex_lock(&kvm->arch.vsie.mutex); arch 1239 arch/s390/kvm/vsie.c if (kvm->arch.vsie.page_count < nr_vcpus) { arch 1242 arch/s390/kvm/vsie.c mutex_unlock(&kvm->arch.vsie.mutex); arch 1246 arch/s390/kvm/vsie.c kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page; arch 1247 arch/s390/kvm/vsie.c kvm->arch.vsie.page_count++; arch 1251 arch/s390/kvm/vsie.c page = kvm->arch.vsie.pages[kvm->arch.vsie.next]; arch 1255 arch/s390/kvm/vsie.c kvm->arch.vsie.next++; arch 1256 arch/s390/kvm/vsie.c kvm->arch.vsie.next %= nr_vcpus; arch 1258 arch/s390/kvm/vsie.c radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9); arch 1262 arch/s390/kvm/vsie.c if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) { arch 1264 arch/s390/kvm/vsie.c mutex_unlock(&kvm->arch.vsie.mutex); arch 1267 arch/s390/kvm/vsie.c mutex_unlock(&kvm->arch.vsie.mutex); arch 1294 arch/s390/kvm/vsie.c if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) arch 1341 arch/s390/kvm/vsie.c mutex_init(&kvm->arch.vsie.mutex); arch 1342 arch/s390/kvm/vsie.c INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL); arch 1352 arch/s390/kvm/vsie.c mutex_lock(&kvm->arch.vsie.mutex); arch 1353 arch/s390/kvm/vsie.c for (i = 0; i < kvm->arch.vsie.page_count; i++) { arch 1354 arch/s390/kvm/vsie.c page = kvm->arch.vsie.pages[i]; arch 1355 arch/s390/kvm/vsie.c kvm->arch.vsie.pages[i] = NULL; arch 1359 arch/s390/kvm/vsie.c radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9); arch 1362 arch/s390/kvm/vsie.c kvm->arch.vsie.page_count = 0; arch 1363 arch/s390/kvm/vsie.c mutex_unlock(&kvm->arch.vsie.mutex); arch 1368 arch/s390/kvm/vsie.c struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block); arch 77 arch/sh/include/asm/syscall_32.h int arch = AUDIT_ARCH_SH; arch 80 arch/sh/include/asm/syscall_32.h arch |= __AUDIT_ARCH_LE; arch 82 arch/sh/include/asm/syscall_32.h return arch; arch 64 arch/sh/include/asm/syscall_64.h int arch = AUDIT_ARCH_SH; arch 67 arch/sh/include/asm/syscall_64.h arch |= __AUDIT_ARCH_64BIT; arch 70 arch/sh/include/asm/syscall_64.h arch |= __AUDIT_ARCH_LE; arch 73 arch/sh/include/asm/syscall_64.h return arch; arch 857 arch/sh/kernel/dwarf.c list_add_tail(&cie->link, &mod->arch.cie_list); arch 949 arch/sh/kernel/dwarf.c list_add_tail(&fde->link, &mod->arch.fde_list); arch 1110 arch/sh/kernel/dwarf.c INIT_LIST_HEAD(&me->arch.cie_list); arch 1111 arch/sh/kernel/dwarf.c INIT_LIST_HEAD(&me->arch.fde_list); arch 1138 arch/sh/kernel/dwarf.c list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) { arch 1148 arch/sh/kernel/dwarf.c list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) { arch 34 arch/sparc/kernel/audit.c int audit_classify_arch(int arch) arch 37 arch/sparc/kernel/audit.c if (arch == AUDIT_ARCH_SPARC) arch 14 arch/um/include/asm/mmu.h struct uml_arch_mm_context arch; arch 28 arch/um/include/asm/processor-generic.h struct arch_thread arch; arch 53 arch/um/include/asm/processor-generic.h .arch = INIT_ARCH_THREAD, \ arch 27 arch/um/kernel/exec.c arch_flush_thread(¤t->thread.arch); arch 174 arch/um/kernel/process.c arch_copy_thread(¤t->thread.arch, &p->thread.arch); arch 165 arch/um/kernel/trap.c current->thread.arch.faultinfo = fi; arch 269 arch/um/kernel/trap.c current->thread.arch.faultinfo = fi; arch 273 arch/um/kernel/trap.c current->thread.arch.faultinfo = fi; arch 303 arch/um/kernel/trap.c current->thread.arch.faultinfo = *fi; arch 51 arch/x86/entry/common.c static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) arch 54 arch/x86/entry/common.c if (arch == AUDIT_ARCH_X86_64) { arch 71 arch/x86/entry/common.c u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64; arch 95 arch/x86/entry/common.c sd.arch = arch; arch 99 arch/x86/entry/common.c if (arch == AUDIT_ARCH_X86_64) { arch 126 arch/x86/entry/common.c do_audit_syscall_entry(regs, arch); arch 28 arch/x86/entry/vsyscall/vsyscall_trace.h #define TRACE_INCLUDE_PATH ../../arch/x86/entry/vsyscall/ arch 1535 arch/x86/include/asm/kvm_host.h #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) arch 79 arch/x86/kernel/asm-offsets.c OFFSET(XEN_vcpu_info_arch_cr2, vcpu_info, arch.cr2); arch 32 arch/x86/kernel/audit_64.c int audit_classify_arch(int arch) arch 35 arch/x86/kernel/audit_64.c if (arch == AUDIT_ARCH_I386) arch 281 arch/x86/kernel/crash.c phdr->p_paddr == image->arch.backup_src_start && arch 282 arch/x86/kernel/crash.c phdr->p_memsz == image->arch.backup_src_sz) { arch 283 arch/x86/kernel/crash.c phdr->p_offset = image->arch.backup_load_addr; arch 331 arch/x86/kernel/crash.c start = image->arch.backup_load_addr; arch 332 arch/x86/kernel/crash.c end = start + image->arch.backup_src_sz - 1; arch 338 arch/x86/kernel/crash.c start = image->arch.elf_load_addr; arch 339 arch/x86/kernel/crash.c end = start + image->arch.elf_headers_sz - 1; arch 360 arch/x86/kernel/crash.c ei.addr = image->arch.backup_src_start; arch 361 arch/x86/kernel/crash.c ei.size = image->arch.backup_src_sz; arch 416 arch/x86/kernel/crash.c image->arch.backup_src_start = res->start; arch 417 arch/x86/kernel/crash.c image->arch.backup_src_sz = resource_size(res); arch 442 arch/x86/kernel/crash.c if (image->arch.backup_src_sz) { arch 445 arch/x86/kernel/crash.c kbuf.memsz = image->arch.backup_src_sz; arch 455 arch/x86/kernel/crash.c image->arch.backup_load_addr = kbuf.mem; arch 457 arch/x86/kernel/crash.c image->arch.backup_load_addr, arch 458 arch/x86/kernel/crash.c image->arch.backup_src_start, kbuf.memsz); arch 466 arch/x86/kernel/crash.c image->arch.elf_headers = kbuf.buffer; arch 467 arch/x86/kernel/crash.c image->arch.elf_headers_sz = kbuf.bufsz; arch 474 arch/x86/kernel/crash.c vfree((void *)image->arch.elf_headers); arch 477 arch/x86/kernel/crash.c image->arch.elf_load_addr = kbuf.mem; arch 479 arch/x86/kernel/crash.c image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz); arch 78 arch/x86/kernel/kexec-bzimage64.c "elfcorehdr=0x%lx ", image->arch.elf_load_addr); arch 57 arch/x86/kernel/machine_kexec_32.c free_pages((unsigned long)image->arch.pgd, PGD_ALLOCATION_ORDER); arch 58 arch/x86/kernel/machine_kexec_32.c image->arch.pgd = NULL; arch 60 arch/x86/kernel/machine_kexec_32.c free_page((unsigned long)image->arch.pmd0); arch 61 arch/x86/kernel/machine_kexec_32.c image->arch.pmd0 = NULL; arch 62 arch/x86/kernel/machine_kexec_32.c free_page((unsigned long)image->arch.pmd1); arch 63 arch/x86/kernel/machine_kexec_32.c image->arch.pmd1 = NULL; arch 65 arch/x86/kernel/machine_kexec_32.c free_page((unsigned long)image->arch.pte0); arch 66 arch/x86/kernel/machine_kexec_32.c image->arch.pte0 = NULL; arch 67 arch/x86/kernel/machine_kexec_32.c free_page((unsigned long)image->arch.pte1); arch 68 arch/x86/kernel/machine_kexec_32.c image->arch.pte1 = NULL; arch 73 arch/x86/kernel/machine_kexec_32.c image->arch.pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, arch 76 arch/x86/kernel/machine_kexec_32.c image->arch.pmd0 = (pmd_t *)get_zeroed_page(GFP_KERNEL); arch 77 arch/x86/kernel/machine_kexec_32.c image->arch.pmd1 = (pmd_t *)get_zeroed_page(GFP_KERNEL); arch 79 arch/x86/kernel/machine_kexec_32.c image->arch.pte0 = (pte_t *)get_zeroed_page(GFP_KERNEL); arch 80 arch/x86/kernel/machine_kexec_32.c image->arch.pte1 = (pte_t *)get_zeroed_page(GFP_KERNEL); arch 81 arch/x86/kernel/machine_kexec_32.c if (!image->arch.pgd || arch 83 arch/x86/kernel/machine_kexec_32.c !image->arch.pmd0 || !image->arch.pmd1 || arch 85 arch/x86/kernel/machine_kexec_32.c !image->arch.pte0 || !image->arch.pte1) { arch 119 arch/x86/kernel/machine_kexec_32.c pmd = image->arch.pmd0; arch 122 arch/x86/kernel/machine_kexec_32.c image->arch.pgd, pmd, image->arch.pte0, arch 125 arch/x86/kernel/machine_kexec_32.c pmd = image->arch.pmd1; arch 128 arch/x86/kernel/machine_kexec_32.c image->arch.pgd, pmd, image->arch.pte1, arch 215 arch/x86/kernel/machine_kexec_32.c page_list[PA_PGD] = __pa(image->arch.pgd); arch 114 arch/x86/kernel/machine_kexec_64.c free_page((unsigned long)image->arch.p4d); arch 115 arch/x86/kernel/machine_kexec_64.c image->arch.p4d = NULL; arch 116 arch/x86/kernel/machine_kexec_64.c free_page((unsigned long)image->arch.pud); arch 117 arch/x86/kernel/machine_kexec_64.c image->arch.pud = NULL; arch 118 arch/x86/kernel/machine_kexec_64.c free_page((unsigned long)image->arch.pmd); arch 119 arch/x86/kernel/machine_kexec_64.c image->arch.pmd = NULL; arch 120 arch/x86/kernel/machine_kexec_64.c free_page((unsigned long)image->arch.pte); arch 121 arch/x86/kernel/machine_kexec_64.c image->arch.pte = NULL; arch 141 arch/x86/kernel/machine_kexec_64.c image->arch.p4d = p4d; arch 149 arch/x86/kernel/machine_kexec_64.c image->arch.pud = pud; arch 157 arch/x86/kernel/machine_kexec_64.c image->arch.pmd = pmd; arch 165 arch/x86/kernel/machine_kexec_64.c image->arch.pte = pte; arch 314 arch/x86/kernel/machine_kexec_64.c &image->arch.backup_load_addr, arch 315 arch/x86/kernel/machine_kexec_64.c sizeof(image->arch.backup_load_addr), 0); arch 321 arch/x86/kernel/machine_kexec_64.c &image->arch.backup_src_start, arch 322 arch/x86/kernel/machine_kexec_64.c sizeof(image->arch.backup_src_start), 0); arch 328 arch/x86/kernel/machine_kexec_64.c &image->arch.backup_src_sz, arch 329 arch/x86/kernel/machine_kexec_64.c sizeof(image->arch.backup_src_sz), 0); arch 472 arch/x86/kernel/machine_kexec_64.c vfree(image->arch.elf_headers); arch 473 arch/x86/kernel/machine_kexec_64.c image->arch.elf_headers = NULL; arch 65 arch/x86/kernel/unwind_orc.c if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip) arch 67 arch/x86/kernel/unwind_orc.c return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, arch 68 arch/x86/kernel/unwind_orc.c mod->arch.num_orcs, ip); arch 252 arch/x86/kernel/unwind_orc.c mod->arch.orc_unwind_ip = orc_ip; arch 253 arch/x86/kernel/unwind_orc.c mod->arch.orc_unwind = orc; arch 254 arch/x86/kernel/unwind_orc.c mod->arch.num_orcs = num_entries; arch 70 arch/x86/kvm/cpuid.c struct kvm_lapic *apic = vcpu->arch.apic; arch 84 arch/x86/kvm/cpuid.c if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE) arch 106 arch/x86/kvm/cpuid.c vcpu->arch.guest_supported_xcr0 = 0; arch 107 arch/x86/kvm/cpuid.c vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; arch 109 arch/x86/kvm/cpuid.c vcpu->arch.guest_supported_xcr0 = arch 112 arch/x86/kvm/cpuid.c vcpu->arch.guest_xstate_size = best->ebx = arch 113 arch/x86/kvm/cpuid.c xstate_required_size(vcpu->arch.xcr0, false); arch 118 arch/x86/kvm/cpuid.c best->ebx = xstate_required_size(vcpu->arch.xcr0, true); arch 140 arch/x86/kvm/cpuid.c if (vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT) arch 148 arch/x86/kvm/cpuid.c vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); arch 169 arch/x86/kvm/cpuid.c for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { arch 170 arch/x86/kvm/cpuid.c e = &vcpu->arch.cpuid_entries[i]; arch 221 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; arch 222 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; arch 223 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx; arch 224 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx; arch 225 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx; arch 226 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_entries[i].index = 0; arch 227 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_entries[i].flags = 0; arch 228 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_entries[i].padding[0] = 0; arch 229 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_entries[i].padding[1] = 0; arch 230 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_entries[i].padding[2] = 0; arch 232 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_nent = cpuid->nent; arch 253 arch/x86/kvm/cpuid.c if (copy_from_user(&vcpu->arch.cpuid_entries, entries, arch 256 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_nent = cpuid->nent; arch 271 arch/x86/kvm/cpuid.c if (cpuid->nent < vcpu->arch.cpuid_nent) arch 274 arch/x86/kvm/cpuid.c if (copy_to_user(entries, &vcpu->arch.cpuid_entries, arch 275 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) arch 280 arch/x86/kvm/cpuid.c cpuid->nent = vcpu->arch.cpuid_nent; arch 930 arch/x86/kvm/cpuid.c struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; arch 933 arch/x86/kvm/cpuid.c int nent = vcpu->arch.cpuid_nent; arch 939 arch/x86/kvm/cpuid.c ej = &vcpu->arch.cpuid_entries[j]; arch 968 arch/x86/kvm/cpuid.c for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { arch 971 arch/x86/kvm/cpuid.c e = &vcpu->arch.cpuid_entries[i]; arch 32 arch/x86/kvm/cpuid.h return vcpu->arch.maxphyaddr; arch 159 arch/x86/kvm/cpuid.h return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT; arch 164 arch/x86/kvm/cpuid.h return vcpu->arch.msr_misc_features_enables & arch 14 arch/x86/kvm/debugfs.c *val = vcpu->arch.apic->lapic_timer.timer_advance_ns; arch 23 arch/x86/kvm/debugfs.c *val = vcpu->arch.tsc_offset; arch 32 arch/x86/kvm/debugfs.c *val = vcpu->arch.tsc_scaling_ratio; arch 191 arch/x86/kvm/hyperv.c struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; arch 323 arch/x86/kvm/hyperv.c ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL); arch 398 arch/x86/kvm/hyperv.c struct kvm_hv *hv = &kvm->arch.hyperv; arch 713 arch/x86/kvm/hyperv.c if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) arch 715 arch/x86/kvm/hyperv.c return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; arch 724 arch/x86/kvm/hyperv.c return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, arch 811 arch/x86/kvm/hyperv.c struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; arch 823 arch/x86/kvm/hyperv.c struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; arch 831 arch/x86/kvm/hyperv.c struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; arch 855 arch/x86/kvm/hyperv.c struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; arch 937 arch/x86/kvm/hyperv.c struct kvm_hv *hv = &kvm->arch.hyperv; arch 947 arch/x86/kvm/hyperv.c mutex_lock(&kvm->arch.hyperv.hv_lock); arch 991 arch/x86/kvm/hyperv.c mutex_unlock(&kvm->arch.hyperv.hv_lock); arch 998 arch/x86/kvm/hyperv.c struct kvm_hv *hv = &kvm->arch.hyperv; arch 1082 arch/x86/kvm/hyperv.c struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; arch 1086 arch/x86/kvm/hyperv.c struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; arch 1195 arch/x86/kvm/hyperv.c struct kvm_hv *hv = &kvm->arch.hyperv; arch 1241 arch/x86/kvm/hyperv.c struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; arch 1285 arch/x86/kvm/hyperv.c data = (u64)vcpu->arch.virtual_tsc_khz * 1000; arch 1303 arch/x86/kvm/hyperv.c mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); arch 1305 arch/x86/kvm/hyperv.c mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); arch 1316 arch/x86/kvm/hyperv.c mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); arch 1318 arch/x86/kvm/hyperv.c mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); arch 1328 arch/x86/kvm/hyperv.c struct kvm_hv *hv = &kvm->arch.hyperv; arch 1356 arch/x86/kvm/hyperv.c struct kvm_vcpu_hv *hv_vcpu = ¤t_vcpu->arch.hyperv; arch 1531 arch/x86/kvm/hyperv.c return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE; arch 1589 arch/x86/kvm/hyperv.c eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param); arch 1665 arch/x86/kvm/hyperv.c vcpu->arch.complete_userspace_io = arch 1720 arch/x86/kvm/hyperv.c mutex_init(&kvm->arch.hyperv.hv_lock); arch 1721 arch/x86/kvm/hyperv.c idr_init(&kvm->arch.hyperv.conn_to_evt); arch 1729 arch/x86/kvm/hyperv.c idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i) arch 1731 arch/x86/kvm/hyperv.c idr_destroy(&kvm->arch.hyperv.conn_to_evt); arch 1736 arch/x86/kvm/hyperv.c struct kvm_hv *hv = &kvm->arch.hyperv; arch 1760 arch/x86/kvm/hyperv.c struct kvm_hv *hv = &kvm->arch.hyperv; arch 28 arch/x86/kvm/hyperv.h return &vcpu->arch.hyperv; arch 33 arch/x86/kvm/hyperv.h struct kvm_vcpu_arch *arch; arch 35 arch/x86/kvm/hyperv.h arch = container_of(hv_vcpu, struct kvm_vcpu_arch, hyperv); arch 36 arch/x86/kvm/hyperv.h return container_of(arch, struct kvm_vcpu, arch); arch 41 arch/x86/kvm/hyperv.h return &vcpu->arch.hyperv.synic; arch 85 arch/x86/kvm/hyperv.h return !bitmap_empty(vcpu->arch.hyperv.stimer_pending_bitmap, arch 220 arch/x86/kvm/i8254.c struct kvm_pit *pit = vcpu->kvm->arch.vpit; arch 262 arch/x86/kvm/i8254.c if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0) arch 724 arch/x86/kvm/i8254.c struct kvm_pit *pit = kvm->arch.vpit; arch 237 arch/x86/kvm/i8259.c struct kvm_pic *s = kvm->arch.vpic; arch 563 arch/x86/kvm/i8259.c struct kvm_pic *s = kvm->arch.vpic; arch 622 arch/x86/kvm/i8259.c kvm->arch.vpic = s; arch 642 arch/x86/kvm/i8259.c struct kvm_pic *vpic = kvm->arch.vpic; arch 653 arch/x86/kvm/i8259.c kvm->arch.vpic = NULL; arch 108 arch/x86/kvm/ioapic.c struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; arch 136 arch/x86/kvm/ioapic.c struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; arch 238 arch/x86/kvm/ioapic.c struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; arch 426 arch/x86/kvm/ioapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 483 arch/x86/kvm/ioapic.c struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; arch 618 arch/x86/kvm/ioapic.c kvm->arch.vioapic = ioapic; arch 627 arch/x86/kvm/ioapic.c kvm->arch.vioapic = NULL; arch 636 arch/x86/kvm/ioapic.c struct kvm_ioapic *ioapic = kvm->arch.vioapic; arch 645 arch/x86/kvm/ioapic.c kvm->arch.vioapic = NULL; arch 651 arch/x86/kvm/ioapic.c struct kvm_ioapic *ioapic = kvm->arch.vioapic; arch 661 arch/x86/kvm/ioapic.c struct kvm_ioapic *ioapic = kvm->arch.vioapic; arch 111 arch/x86/kvm/ioapic.h int mode = kvm->arch.irqchip_mode; arch 36 arch/x86/kvm/irq.c return v->arch.pending_external_vector != -1; arch 51 arch/x86/kvm/irq.c return v->kvm->arch.vpic->output; arch 76 arch/x86/kvm/irq.c return v->arch.interrupt.injected; arch 105 arch/x86/kvm/irq.c return v->arch.interrupt.injected; arch 122 arch/x86/kvm/irq.c int vector = v->arch.pending_external_vector; arch 124 arch/x86/kvm/irq.c v->arch.pending_external_vector = -1; arch 140 arch/x86/kvm/irq.c return v->arch.interrupt.nr; arch 71 arch/x86/kvm/irq.h int mode = kvm->arch.irqchip_mode; arch 80 arch/x86/kvm/irq.h int mode = kvm->arch.irqchip_mode; arch 89 arch/x86/kvm/irq.h int mode = kvm->arch.irqchip_mode; arch 98 arch/x86/kvm/irq.h int mode = kvm->arch.irqchip_mode; arch 34 arch/x86/kvm/irq_comm.c struct kvm_pic *pic = kvm->arch.vpic; arch 42 arch/x86/kvm/irq_comm.c struct kvm_ioapic *ioapic = kvm->arch.vioapic; arch 78 arch/x86/kvm/irq_comm.c } else if (kvm_apic_sw_enabled(vcpu->arch.apic)) { arch 107 arch/x86/kvm/irq_comm.c trace_kvm_msi_set_irq(e->msi.address_lo | (kvm->arch.x2apic_format ? arch 113 arch/x86/kvm/irq_comm.c if (kvm->arch.x2apic_format) arch 130 arch/x86/kvm/irq_comm.c return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff); arch 191 arch/x86/kvm/irq_comm.c unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; arch 223 arch/x86/kvm/irq_comm.c clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); arch 227 arch/x86/kvm/irq_comm.c kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id); arch 228 arch/x86/kvm/irq_comm.c kvm_pic_clear_all(kvm->arch.vpic, irq_source_id); arch 238 arch/x86/kvm/irq_comm.c hlist_add_head_rcu(&kimn->link, &kvm->arch.mask_notifier_list); arch 260 arch/x86/kvm/irq_comm.c hlist_for_each_entry_rcu(kimn, &kvm->arch.mask_notifier_list, link) arch 409 arch/x86/kvm/irq_comm.c kvm->arch.nr_reserved_ioapic_pins); arch 15 arch/x86/kvm/kvm_cache_regs.h return vcpu->arch.regs[VCPU_REGS_##uname]; \ arch 20 arch/x86/kvm/kvm_cache_regs.h vcpu->arch.regs[VCPU_REGS_##uname] = val; \ arch 43 arch/x86/kvm/kvm_cache_regs.h if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail)) arch 46 arch/x86/kvm/kvm_cache_regs.h return vcpu->arch.regs[reg]; arch 53 arch/x86/kvm/kvm_cache_regs.h vcpu->arch.regs[reg] = val; arch 54 arch/x86/kvm/kvm_cache_regs.h __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); arch 55 arch/x86/kvm/kvm_cache_regs.h __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); arch 83 arch/x86/kvm/kvm_cache_regs.h (unsigned long *)&vcpu->arch.regs_avail)) arch 86 arch/x86/kvm/kvm_cache_regs.h return vcpu->arch.walk_mmu->pdptrs[index]; arch 92 arch/x86/kvm/kvm_cache_regs.h if (tmask & vcpu->arch.cr0_guest_owned_bits) arch 94 arch/x86/kvm/kvm_cache_regs.h return vcpu->arch.cr0 & mask; arch 105 arch/x86/kvm/kvm_cache_regs.h if (tmask & vcpu->arch.cr4_guest_owned_bits) arch 107 arch/x86/kvm/kvm_cache_regs.h return vcpu->arch.cr4 & mask; arch 112 arch/x86/kvm/kvm_cache_regs.h if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) arch 114 arch/x86/kvm/kvm_cache_regs.h return vcpu->arch.cr3; arch 130 arch/x86/kvm/kvm_cache_regs.h vcpu->arch.hflags |= HF_GUEST_MASK; arch 135 arch/x86/kvm/kvm_cache_regs.h vcpu->arch.hflags &= ~HF_GUEST_MASK; arch 137 arch/x86/kvm/kvm_cache_regs.h if (vcpu->arch.load_eoi_exitmap_pending) { arch 138 arch/x86/kvm/kvm_cache_regs.h vcpu->arch.load_eoi_exitmap_pending = false; arch 145 arch/x86/kvm/kvm_cache_regs.h return vcpu->arch.hflags & HF_GUEST_MASK; arch 150 arch/x86/kvm/kvm_cache_regs.h return vcpu->arch.hflags & HF_SMM_MASK; arch 83 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 177 arch/x86/kvm/lapic.c mutex_lock(&kvm->arch.apic_map_lock); arch 181 arch/x86/kvm/lapic.c max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic)); arch 193 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 239 arch/x86/kvm/lapic.c old = rcu_dereference_protected(kvm->arch.apic_map, arch 240 arch/x86/kvm/lapic.c lockdep_is_held(&kvm->arch.apic_map_lock)); arch 241 arch/x86/kvm/lapic.c rcu_assign_pointer(kvm->arch.apic_map, new); arch 242 arch/x86/kvm/lapic.c mutex_unlock(&kvm->arch.apic_map_lock); arch 327 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 417 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 451 arch/x86/kvm/lapic.c if (unlikely(vcpu->arch.apicv_active)) { arch 478 arch/x86/kvm/lapic.c if (unlikely(vcpu->arch.apicv_active)) arch 526 arch/x86/kvm/lapic.c if (unlikely(vcpu->arch.apicv_active)) arch 543 arch/x86/kvm/lapic.c return apic_find_highest_irr(vcpu->arch.apic); arch 554 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 582 arch/x86/kvm/lapic.c map = rcu_dereference(kvm->arch.apic_map); arch 621 arch/x86/kvm/lapic.c return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val, arch 628 arch/x86/kvm/lapic.c return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val, arch 634 arch/x86/kvm/lapic.c return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; arch 642 arch/x86/kvm/lapic.c (unsigned long long)vcpu->arch.pv_eoi.msr_val); arch 652 arch/x86/kvm/lapic.c (unsigned long long)vcpu->arch.pv_eoi.msr_val); arch 655 arch/x86/kvm/lapic.c __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); arch 662 arch/x86/kvm/lapic.c (unsigned long long)vcpu->arch.pv_eoi.msr_val); arch 665 arch/x86/kvm/lapic.c __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); arch 671 arch/x86/kvm/lapic.c if (apic->vcpu->arch.apicv_active) arch 713 arch/x86/kvm/lapic.c apic_update_ppr(vcpu->arch.apic); arch 796 arch/x86/kvm/lapic.c if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled && arch 806 arch/x86/kvm/lapic.c struct kvm_lapic *target = vcpu->arch.apic; arch 846 arch/x86/kvm/lapic.c if (!kvm->arch.disabled_lapic_found) { arch 847 arch/x86/kvm/lapic.c kvm->arch.disabled_lapic_found = true; arch 856 arch/x86/kvm/lapic.c if (kvm->arch.x2apic_broadcast_quirk_disabled) { arch 962 arch/x86/kvm/lapic.c map = rcu_dereference(kvm->arch.apic_map); arch 1004 arch/x86/kvm/lapic.c map = rcu_dereference(kvm->arch.apic_map); arch 1035 arch/x86/kvm/lapic.c vcpu->arch.apic_arb_prio++; arch 1070 arch/x86/kvm/lapic.c vcpu->arch.pv.pv_unhalted = 1; arch 1128 arch/x86/kvm/lapic.c return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; arch 1133 arch/x86/kvm/lapic.c return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors); arch 1146 arch/x86/kvm/lapic.c apic->vcpu->arch.pending_ioapic_eoi = vector; arch 1189 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 1256 arch/x86/kvm/lapic.c if (apic->vcpu->arch.tpr_access_reporting) arch 1442 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 1449 arch/x86/kvm/lapic.c if (vcpu->arch.apicv_active) arch 1460 arch/x86/kvm/lapic.c u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns; arch 1468 arch/x86/kvm/lapic.c if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) { arch 1473 arch/x86/kvm/lapic.c do_div(delay_ns, vcpu->arch.virtual_tsc_khz); arch 1481 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 1493 arch/x86/kvm/lapic.c do_div(ns, vcpu->arch.virtual_tsc_khz); arch 1498 arch/x86/kvm/lapic.c do_div(ns, vcpu->arch.virtual_tsc_khz); arch 1509 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 1576 arch/x86/kvm/lapic.c unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz; arch 1695 arch/x86/kvm/lapic.c return vcpu->arch.apic->lapic_timer.hv_timer_in_use; arch 1781 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 1802 arch/x86/kvm/lapic.c restart_apic_timer(vcpu->arch.apic); arch 1808 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 1820 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 1844 arch/x86/kvm/lapic.c atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); arch 1846 arch/x86/kvm/lapic.c atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); arch 2028 arch/x86/kvm/lapic.c kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0); arch 2040 arch/x86/kvm/lapic.c kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val); arch 2043 arch/x86/kvm/lapic.c kvm_lapic_reg_write(vcpu->arch.apic, offset, val); arch 2049 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2051 arch/x86/kvm/lapic.c if (!vcpu->arch.apic) arch 2056 arch/x86/kvm/lapic.c if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)) arch 2075 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2086 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2099 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2109 arch/x86/kvm/lapic.c tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI); arch 2116 arch/x86/kvm/lapic.c u64 old_value = vcpu->arch.apic_base; arch 2117 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2122 arch/x86/kvm/lapic.c vcpu->arch.apic_base = value; arch 2147 arch/x86/kvm/lapic.c apic->base_address = apic->vcpu->arch.apic_base & arch 2157 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2197 arch/x86/kvm/lapic.c apic->irr_pending = vcpu->arch.apicv_active; arch 2198 arch/x86/kvm/lapic.c apic->isr_count = vcpu->arch.apicv_active ? 1 : 0; arch 2204 arch/x86/kvm/lapic.c vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP); arch 2205 arch/x86/kvm/lapic.c vcpu->arch.pv_eoi.msr_val = 0; arch 2207 arch/x86/kvm/lapic.c if (vcpu->arch.apicv_active) { arch 2213 arch/x86/kvm/lapic.c vcpu->arch.apic_arb_prio = 0; arch 2214 arch/x86/kvm/lapic.c vcpu->arch.apic_attention = 0; arch 2230 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2255 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2291 arch/x86/kvm/lapic.c vcpu->arch.apic = apic; arch 2316 arch/x86/kvm/lapic.c vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; arch 2323 arch/x86/kvm/lapic.c vcpu->arch.apic = NULL; arch 2330 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2342 arch/x86/kvm/lapic.c u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0); arch 2345 arch/x86/kvm/lapic.c if (!kvm_apic_hw_enabled(vcpu->arch.apic)) arch 2355 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2366 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2404 arch/x86/kvm/lapic.c if (apic_x2apic_mode(vcpu->arch.apic)) { arch 2408 arch/x86/kvm/lapic.c if (vcpu->kvm->arch.x2apic_format) { arch 2428 arch/x86/kvm/lapic.c memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s)); arch 2434 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2438 arch/x86/kvm/lapic.c kvm_lapic_set_base(vcpu, vcpu->arch.apic_base); arch 2445 arch/x86/kvm/lapic.c memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s)); arch 2457 arch/x86/kvm/lapic.c apic->isr_count = vcpu->arch.apicv_active ? arch 2460 arch/x86/kvm/lapic.c if (vcpu->arch.apicv_active) { arch 2471 arch/x86/kvm/lapic.c vcpu->arch.apic_arb_prio = 0; arch 2484 arch/x86/kvm/lapic.c timer = &vcpu->arch.apic->lapic_timer.timer; arch 2530 arch/x86/kvm/lapic.c if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) arch 2531 arch/x86/kvm/lapic.c apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); arch 2533 arch/x86/kvm/lapic.c if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) arch 2536 arch/x86/kvm/lapic.c if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, arch 2540 arch/x86/kvm/lapic.c apic_set_tpr(vcpu->arch.apic, data & 0xff); arch 2573 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2577 arch/x86/kvm/lapic.c if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) arch 2589 arch/x86/kvm/lapic.c kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, arch 2597 arch/x86/kvm/lapic.c &vcpu->arch.apic->vapic_cache, arch 2600 arch/x86/kvm/lapic.c __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); arch 2602 arch/x86/kvm/lapic.c __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); arch 2605 arch/x86/kvm/lapic.c vcpu->arch.apic->vapic_addr = vapic_addr; arch 2611 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2628 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2649 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2662 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2681 arch/x86/kvm/lapic.c struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data; arch 2687 arch/x86/kvm/lapic.c vcpu->arch.pv_eoi.msr_val = data; arch 2701 arch/x86/kvm/lapic.c struct kvm_lapic *apic = vcpu->arch.apic; arch 2717 arch/x86/kvm/lapic.c WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED); arch 2727 arch/x86/kvm/lapic.c vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; arch 2729 arch/x86/kvm/lapic.c vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; arch 2732 arch/x86/kvm/lapic.c vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { arch 2737 arch/x86/kvm/lapic.c vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; arch 122 arch/x86/kvm/lapic.h return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; arch 167 arch/x86/kvm/lapic.h return vcpu->arch.apic; arch 176 arch/x86/kvm/lapic.h return apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE; arch 191 arch/x86/kvm/lapic.h return lapic_in_kernel(vcpu) && kvm_apic_hw_enabled(vcpu->arch.apic); arch 196 arch/x86/kvm/lapic.h return kvm_apic_present(vcpu) && kvm_apic_sw_enabled(vcpu->arch.apic); arch 201 arch/x86/kvm/lapic.h return apic->vcpu->arch.apic_base & X2APIC_ENABLE; arch 206 arch/x86/kvm/lapic.h return vcpu->arch.apic && vcpu->arch.apicv_active; arch 211 arch/x86/kvm/lapic.h return lapic_in_kernel(vcpu) && vcpu->arch.apic->pending_events; arch 222 arch/x86/kvm/lapic.h return lapic_in_kernel(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); arch 372 arch/x86/kvm/mmu.c return vcpu->arch.mmu == &vcpu->arch.guest_mmu; arch 606 arch/x86/kvm/mmu.c return vcpu->arch.efer & EFER_NX; arch 1114 arch/x86/kvm/mmu.c r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, arch 1118 arch/x86/kvm/mmu.c r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); arch 1121 arch/x86/kvm/mmu.c r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, arch 1129 arch/x86/kvm/mmu.c mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, arch 1131 arch/x86/kvm/mmu.c mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); arch 1132 arch/x86/kvm/mmu.c mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, arch 1147 arch/x86/kvm/mmu.c return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); arch 1188 arch/x86/kvm/mmu.c return &slot->arch.lpage_info[level - 2][idx]; arch 1220 arch/x86/kvm/mmu.c kvm->arch.indirect_shadow_pages++; arch 1240 arch/x86/kvm/mmu.c &kvm->arch.lpage_disallowed_mmu_pages); arch 1250 arch/x86/kvm/mmu.c kvm->arch.indirect_shadow_pages--; arch 1475 arch/x86/kvm/mmu.c return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx]; arch 1493 arch/x86/kvm/mmu.c cache = &vcpu->arch.mmu_pte_list_desc_cache; arch 2136 arch/x86/kvm/mmu.c kvm->arch.n_used_mmu_pages += nr; arch 2182 arch/x86/kvm/mmu.c sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); arch 2183 arch/x86/kvm/mmu.c sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); arch 2185 arch/x86/kvm/mmu.c sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); arch 2193 arch/x86/kvm/mmu.c sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; arch 2194 arch/x86/kvm/mmu.c list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); arch 2342 arch/x86/kvm/mmu.c &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ arch 2360 arch/x86/kvm/mmu.c vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { arch 2403 arch/x86/kvm/mmu.c unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); arch 2565 arch/x86/kvm/mmu.c role = vcpu->arch.mmu->mmu_role.base; arch 2571 arch/x86/kvm/mmu.c if (!vcpu->arch.mmu->direct_map arch 2572 arch/x86/kvm/mmu.c && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { arch 2615 arch/x86/kvm/mmu.c &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); arch 2646 arch/x86/kvm/mmu.c iterator->level = vcpu->arch.mmu->shadow_root_level; arch 2649 arch/x86/kvm/mmu.c vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && arch 2650 arch/x86/kvm/mmu.c !vcpu->arch.mmu->direct_map) arch 2658 arch/x86/kvm/mmu.c BUG_ON(root != vcpu->arch.mmu->root_hpa); arch 2661 arch/x86/kvm/mmu.c = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; arch 2672 arch/x86/kvm/mmu.c shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa, arch 2842 arch/x86/kvm/mmu.c list_move(&sp->link, &kvm->arch.active_mmu_pages); arch 2899 arch/x86/kvm/mmu.c if (list_empty(&kvm->arch.active_mmu_pages)) arch 2902 arch/x86/kvm/mmu.c sp = list_last_entry(&kvm->arch.active_mmu_pages, arch 2917 arch/x86/kvm/mmu.c if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { arch 2919 arch/x86/kvm/mmu.c while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) arch 2924 arch/x86/kvm/mmu.c goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; arch 2927 arch/x86/kvm/mmu.c kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; arch 3335 arch/x86/kvm/mmu.c if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) arch 3545 arch/x86/kvm/mmu.c if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) arch 3782 arch/x86/kvm/mmu.c if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) { arch 3789 arch/x86/kvm/mmu.c vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL); arch 3792 arch/x86/kvm/mmu.c vcpu->arch.mmu->root_hpa = __pa(sp->spt); arch 3793 arch/x86/kvm/mmu.c } else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) { arch 3795 arch/x86/kvm/mmu.c hpa_t root = vcpu->arch.mmu->pae_root[i]; arch 3808 arch/x86/kvm/mmu.c vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK; arch 3810 arch/x86/kvm/mmu.c vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); arch 3813 arch/x86/kvm/mmu.c vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); arch 3825 arch/x86/kvm/mmu.c root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); arch 3835 arch/x86/kvm/mmu.c if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { arch 3836 arch/x86/kvm/mmu.c hpa_t root = vcpu->arch.mmu->root_hpa; arch 3846 arch/x86/kvm/mmu.c vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL); arch 3850 arch/x86/kvm/mmu.c vcpu->arch.mmu->root_hpa = root; arch 3860 arch/x86/kvm/mmu.c if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) arch 3864 arch/x86/kvm/mmu.c hpa_t root = vcpu->arch.mmu->pae_root[i]; arch 3867 arch/x86/kvm/mmu.c if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) { arch 3868 arch/x86/kvm/mmu.c pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i); arch 3870 arch/x86/kvm/mmu.c vcpu->arch.mmu->pae_root[i] = 0; arch 3888 arch/x86/kvm/mmu.c vcpu->arch.mmu->pae_root[i] = root | pm_mask; arch 3890 arch/x86/kvm/mmu.c vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); arch 3896 arch/x86/kvm/mmu.c if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) { arch 3897 arch/x86/kvm/mmu.c if (vcpu->arch.mmu->lm_root == NULL) { arch 3909 arch/x86/kvm/mmu.c lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask; arch 3911 arch/x86/kvm/mmu.c vcpu->arch.mmu->lm_root = lm_root; arch 3914 arch/x86/kvm/mmu.c vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); arch 3918 arch/x86/kvm/mmu.c vcpu->arch.mmu->root_cr3 = root_cr3; arch 3925 arch/x86/kvm/mmu.c if (vcpu->arch.mmu->direct_map) arch 3936 arch/x86/kvm/mmu.c if (vcpu->arch.mmu->direct_map) arch 3939 arch/x86/kvm/mmu.c if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) arch 3944 arch/x86/kvm/mmu.c if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { arch 3945 arch/x86/kvm/mmu.c hpa_t root = vcpu->arch.mmu->root_hpa; arch 3976 arch/x86/kvm/mmu.c hpa_t root = vcpu->arch.mmu->pae_root[i]; arch 4004 arch/x86/kvm/mmu.c return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); arch 4050 arch/x86/kvm/mmu.c if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) arch 4067 arch/x86/kvm/mmu.c reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte, arch 4146 arch/x86/kvm/mmu.c if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) arch 4174 arch/x86/kvm/mmu.c MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)); arch 4184 arch/x86/kvm/mmu.c struct kvm_arch_async_pf arch; arch 4186 arch/x86/kvm/mmu.c arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; arch 4187 arch/x86/kvm/mmu.c arch.gfn = gfn; arch 4188 arch/x86/kvm/mmu.c arch.direct_map = vcpu->arch.mmu->direct_map; arch 4189 arch/x86/kvm/mmu.c arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu); arch 4192 arch/x86/kvm/mmu.c kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); arch 4241 arch/x86/kvm/mmu.c vcpu->arch.l1tf_flush_l1d = true; arch 4242 arch/x86/kvm/mmu.c switch (vcpu->arch.apf.host_apf_reason) { arch 4252 arch/x86/kvm/mmu.c vcpu->arch.apf.host_apf_reason = 0; arch 4258 arch/x86/kvm/mmu.c vcpu->arch.apf.host_apf_reason = 0; arch 4292 arch/x86/kvm/mmu.c MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)); arch 4367 arch/x86/kvm/mmu.c struct kvm_mmu *mmu = vcpu->arch.mmu; arch 4391 arch/x86/kvm/mmu.c struct kvm_mmu *mmu = vcpu->arch.mmu; arch 4441 arch/x86/kvm/mmu.c kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, arch 4460 arch/x86/kvm/mmu.c vcpu->arch.mmu->inject_page_fault(vcpu, fault); arch 5018 arch/x86/kvm/mmu.c struct kvm_mmu *context = vcpu->arch.mmu; arch 5090 arch/x86/kvm/mmu.c struct kvm_mmu *context = vcpu->arch.mmu; arch 5119 arch/x86/kvm/mmu.c role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm; arch 5144 arch/x86/kvm/mmu.c struct kvm_mmu *context = vcpu->arch.mmu; arch 5178 arch/x86/kvm/mmu.c struct kvm_mmu *context = vcpu->arch.mmu; arch 5190 arch/x86/kvm/mmu.c struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; arch 5241 arch/x86/kvm/mmu.c vcpu->arch.mmu->root_hpa = INVALID_PAGE; arch 5244 arch/x86/kvm/mmu.c vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; arch 5296 arch/x86/kvm/mmu.c kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); arch 5297 arch/x86/kvm/mmu.c WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa)); arch 5298 arch/x86/kvm/mmu.c kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); arch 5299 arch/x86/kvm/mmu.c WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa)); arch 5313 arch/x86/kvm/mmu.c vcpu->arch.mmu->update_pte(vcpu, sp, spte, new); arch 5446 arch/x86/kvm/mmu.c if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) arch 5481 arch/x86/kvm/mmu.c u32 base_role = vcpu->arch.mmu->mmu_role.base.word; arch 5504 arch/x86/kvm/mmu.c if (vcpu->arch.mmu->direct_map) arch 5539 arch/x86/kvm/mmu.c bool direct = vcpu->arch.mmu->direct_map; arch 5542 arch/x86/kvm/mmu.c if (vcpu->arch.mmu->direct_map) { arch 5543 arch/x86/kvm/mmu.c vcpu->arch.gpa_available = true; arch 5544 arch/x86/kvm/mmu.c vcpu->arch.gpa_val = cr2_or_gpa; arch 5555 arch/x86/kvm/mmu.c r = vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, arch 5573 arch/x86/kvm/mmu.c if (vcpu->arch.mmu->direct_map && arch 5612 arch/x86/kvm/mmu.c struct kvm_mmu *mmu = vcpu->arch.mmu; arch 5643 arch/x86/kvm/mmu.c struct kvm_mmu *mmu = vcpu->arch.mmu; arch 5797 arch/x86/kvm/mmu.c vcpu->arch.mmu = &vcpu->arch.root_mmu; arch 5798 arch/x86/kvm/mmu.c vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; arch 5800 arch/x86/kvm/mmu.c vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; arch 5801 arch/x86/kvm/mmu.c vcpu->arch.root_mmu.root_cr3 = 0; arch 5802 arch/x86/kvm/mmu.c vcpu->arch.root_mmu.translate_gpa = translate_gpa; arch 5804 arch/x86/kvm/mmu.c vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; arch 5806 arch/x86/kvm/mmu.c vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; arch 5807 arch/x86/kvm/mmu.c vcpu->arch.guest_mmu.root_cr3 = 0; arch 5808 arch/x86/kvm/mmu.c vcpu->arch.guest_mmu.translate_gpa = translate_gpa; arch 5810 arch/x86/kvm/mmu.c vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; arch 5812 arch/x86/kvm/mmu.c vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; arch 5814 arch/x86/kvm/mmu.c ret = alloc_mmu_pages(vcpu, &vcpu->arch.guest_mmu); arch 5818 arch/x86/kvm/mmu.c ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu); arch 5824 arch/x86/kvm/mmu.c free_mmu_pages(&vcpu->arch.guest_mmu); arch 5836 arch/x86/kvm/mmu.c &kvm->arch.active_mmu_pages, link) { arch 5866 arch/x86/kvm/mmu.c &kvm->arch.zapped_obsolete_pages, &nr_zapped)) { arch 5877 arch/x86/kvm/mmu.c kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); arch 5903 arch/x86/kvm/mmu.c kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1; arch 5921 arch/x86/kvm/mmu.c return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); arch 5933 arch/x86/kvm/mmu.c struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; arch 5942 arch/x86/kvm/mmu.c struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; arch 6129 arch/x86/kvm/mmu.c list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { arch 6194 arch/x86/kvm/mmu.c if (!kvm->arch.n_used_mmu_pages && arch 6203 arch/x86/kvm/mmu.c &kvm->arch.zapped_obsolete_pages); arch 6303 arch/x86/kvm/mmu.c wake_up_process(kvm->arch.nx_lpage_recovery_thread); arch 6385 arch/x86/kvm/mmu.c free_mmu_pages(&vcpu->arch.root_mmu); arch 6386 arch/x86/kvm/mmu.c free_mmu_pages(&vcpu->arch.guest_mmu); arch 6415 arch/x86/kvm/mmu.c wake_up_process(kvm->arch.nx_lpage_recovery_thread); arch 6436 arch/x86/kvm/mmu.c while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) { arch 6442 arch/x86/kvm/mmu.c sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages, arch 6498 arch/x86/kvm/mmu.c &kvm->arch.nx_lpage_recovery_thread); arch 6500 arch/x86/kvm/mmu.c kthread_unpark(kvm->arch.nx_lpage_recovery_thread); arch 6507 arch/x86/kvm/mmu.c if (kvm->arch.nx_lpage_recovery_thread) arch 6508 arch/x86/kvm/mmu.c kthread_stop(kvm->arch.nx_lpage_recovery_thread); arch 69 arch/x86/kvm/mmu.h if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) arch 70 arch/x86/kvm/mmu.h return kvm->arch.n_max_mmu_pages - arch 71 arch/x86/kvm/mmu.h kvm->arch.n_used_mmu_pages; arch 78 arch/x86/kvm/mmu.h if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE)) arch 100 arch/x86/kvm/mmu.h if (VALID_PAGE(vcpu->arch.mmu->root_hpa)) arch 101 arch/x86/kvm/mmu.h vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa | arch 192 arch/x86/kvm/mmu.h pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3; arch 30 arch/x86/kvm/mmu_audit.c fmt, audit_point_name[kvm->arch.audit_point], ##args) arch 59 arch/x86/kvm/mmu_audit.c if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) arch 62 arch/x86/kvm/mmu_audit.c if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { arch 63 arch/x86/kvm/mmu_audit.c hpa_t root = vcpu->arch.mmu->root_hpa; arch 66 arch/x86/kvm/mmu_audit.c __mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->root_level); arch 71 arch/x86/kvm/mmu_audit.c hpa_t root = vcpu->arch.mmu->pae_root[i]; arch 89 arch/x86/kvm/mmu_audit.c list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) arch 122 arch/x86/kvm/mmu_audit.c "ent %llxn", vcpu->arch.mmu->root_level, pfn, arch 170 arch/x86/kvm/mmu_audit.c if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync) arch 246 arch/x86/kvm/mmu_audit.c vcpu->kvm->arch.audit_point = point; arch 296 arch/x86/kvm/mmutrace.h __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen; arch 297 arch/x86/kvm/mmutrace.h __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages; arch 312 arch/x86/kvm/mtrr.c struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; arch 346 arch/x86/kvm/mtrr.c struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; arch 385 arch/x86/kvm/mtrr.c *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data; arch 387 arch/x86/kvm/mtrr.c vcpu->arch.mtrr_state.deftype = data; arch 389 arch/x86/kvm/mtrr.c vcpu->arch.pat = data; arch 418 arch/x86/kvm/mtrr.c *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index]; arch 420 arch/x86/kvm/mtrr.c *pdata = vcpu->arch.mtrr_state.deftype; arch 422 arch/x86/kvm/mtrr.c *pdata = vcpu->arch.pat; arch 429 arch/x86/kvm/mtrr.c *pdata = vcpu->arch.mtrr_state.var_ranges[index].base; arch 431 arch/x86/kvm/mtrr.c *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; arch 441 arch/x86/kvm/mtrr.c INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head); arch 621 arch/x86/kvm/mtrr.c struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; arch 698 arch/x86/kvm/mtrr.c struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; arch 28 arch/x86/kvm/page_track.c if (!dont || free->arch.gfn_track[i] != arch 29 arch/x86/kvm/page_track.c dont->arch.gfn_track[i]) { arch 30 arch/x86/kvm/page_track.c kvfree(free->arch.gfn_track[i]); arch 31 arch/x86/kvm/page_track.c free->arch.gfn_track[i] = NULL; arch 41 arch/x86/kvm/page_track.c slot->arch.gfn_track[i] = arch 42 arch/x86/kvm/page_track.c kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]), arch 44 arch/x86/kvm/page_track.c if (!slot->arch.gfn_track[i]) arch 70 arch/x86/kvm/page_track.c val = slot->arch.gfn_track[mode][index]; arch 75 arch/x86/kvm/page_track.c slot->arch.gfn_track[mode][index] += count; arch 159 arch/x86/kvm/page_track.c return !!READ_ONCE(slot->arch.gfn_track[mode][index]); arch 166 arch/x86/kvm/page_track.c head = &kvm->arch.track_notifier_head; arch 174 arch/x86/kvm/page_track.c head = &kvm->arch.track_notifier_head; arch 189 arch/x86/kvm/page_track.c head = &kvm->arch.track_notifier_head; arch 207 arch/x86/kvm/page_track.c head = &kvm->arch.track_notifier_head; arch 230 arch/x86/kvm/page_track.c head = &vcpu->kvm->arch.track_notifier_head; arch 255 arch/x86/kvm/page_track.c head = &kvm->arch.track_notifier_head; arch 178 arch/x86/kvm/paging_tmpl.h if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) arch 185 arch/x86/kvm/paging_tmpl.h if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) && arch 481 arch/x86/kvm/paging_tmpl.h vcpu->arch.exit_qualification &= 0x180; arch 483 arch/x86/kvm/paging_tmpl.h vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_WRITE; arch 485 arch/x86/kvm/paging_tmpl.h vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_READ; arch 487 arch/x86/kvm/paging_tmpl.h vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_INSTR; arch 488 arch/x86/kvm/paging_tmpl.h vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3; arch 492 arch/x86/kvm/paging_tmpl.h walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; arch 501 arch/x86/kvm/paging_tmpl.h return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr, arch 510 arch/x86/kvm/paging_tmpl.h return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu, arch 530 arch/x86/kvm/paging_tmpl.h FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); arch 628 arch/x86/kvm/paging_tmpl.h top_level = vcpu->arch.mmu->root_level; arch 640 arch/x86/kvm/paging_tmpl.h if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) arch 816 arch/x86/kvm/paging_tmpl.h vcpu->arch.write_fault_to_shadow_pgtable = false; arch 819 arch/x86/kvm/paging_tmpl.h &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable); arch 1048 arch/x86/kvm/paging_tmpl.h FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); arch 162 arch/x86/kvm/pmu.c filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); arch 217 arch/x86/kvm/pmu.c filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); arch 303 arch/x86/kvm/pmu.c vcpu->kvm->arch.kvmclock_offset; arch 337 arch/x86/kvm/pmu.c kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); arch 419 arch/x86/kvm/pmu.c rcu_swap_protected(kvm->arch.pmu_event_filter, filter, arch 7 arch/x86/kvm/pmu.h #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) arch 8 arch/x86/kvm/pmu.h #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) arch 9 arch/x86/kvm/pmu.h #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) arch 633 arch/x86/kvm/svm.c svm->vcpu.arch.hflags |= HF_GIF_MASK; arch 641 arch/x86/kvm/svm.c svm->vcpu.arch.hflags &= ~HF_GIF_MASK; arch 649 arch/x86/kvm/svm.c return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); arch 736 arch/x86/kvm/svm.c vcpu->arch.efer = efer; arch 803 arch/x86/kvm/svm.c unsigned nr = vcpu->arch.exception.nr; arch 804 arch/x86/kvm/svm.c bool has_error_code = vcpu->arch.exception.has_error_code; arch 805 arch/x86/kvm/svm.c bool reinject = vcpu->arch.exception.injected; arch 806 arch/x86/kvm/svm.c u32 error_code = vcpu->arch.exception.error_code; arch 871 arch/x86/kvm/svm.c vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; arch 872 arch/x86/kvm/svm.c vcpu->arch.osvw.status = osvw_status & ~(6ULL); arch 883 arch/x86/kvm/svm.c vcpu->arch.osvw.status |= 1; arch 1497 arch/x86/kvm/svm.c return vcpu->arch.tsc_offset; arch 1542 arch/x86/kvm/svm.c svm->vcpu.arch.hflags = 0; arch 1629 arch/x86/kvm/svm.c svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; arch 1648 arch/x86/kvm/svm.c save->g_pat = svm->vcpu.arch.pat; arch 1655 arch/x86/kvm/svm.c svm->vcpu.arch.hflags = 0; arch 1724 arch/x86/kvm/svm.c if (kvm->arch.apic_access_page_done) arch 1734 arch/x86/kvm/svm.c kvm->arch.apic_access_page_done = true; arch 1754 arch/x86/kvm/svm.c if (!svm->vcpu.arch.apic->regs) arch 1757 arch/x86/kvm/svm.c svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs); arch 2144 arch/x86/kvm/svm.c vcpu->arch.microcode_version = 0x01000065; arch 2149 arch/x86/kvm/svm.c svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | arch 2152 arch/x86/kvm/svm.c svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; arch 2199 arch/x86/kvm/svm.c svm->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, arch 2201 arch/x86/kvm/svm.c if (!svm->vcpu.arch.user_fpu) { arch 2207 arch/x86/kvm/svm.c svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, arch 2209 arch/x86/kvm/svm.c if (!svm->vcpu.arch.guest_fpu) { arch 2274 arch/x86/kvm/svm.c kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); arch 2276 arch/x86/kvm/svm.c kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu); arch 2307 arch/x86/kvm/svm.c kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu); arch 2308 arch/x86/kvm/svm.c kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); arch 2334 arch/x86/kvm/svm.c u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; arch 2416 arch/x86/kvm/svm.c load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); arch 2582 arch/x86/kvm/svm.c ulong gcr0 = svm->vcpu.arch.cr0; arch 2604 arch/x86/kvm/svm.c if (vcpu->arch.efer & EFER_LME) { arch 2606 arch/x86/kvm/svm.c vcpu->arch.efer |= EFER_LMA; arch 2611 arch/x86/kvm/svm.c vcpu->arch.efer &= ~EFER_LMA; arch 2616 arch/x86/kvm/svm.c vcpu->arch.cr0 = cr0; arch 2644 arch/x86/kvm/svm.c vcpu->arch.cr4 = cr4; arch 2728 arch/x86/kvm/svm.c get_debugreg(vcpu->arch.db[0], 0); arch 2729 arch/x86/kvm/svm.c get_debugreg(vcpu->arch.db[1], 1); arch 2730 arch/x86/kvm/svm.c get_debugreg(vcpu->arch.db[2], 2); arch 2731 arch/x86/kvm/svm.c get_debugreg(vcpu->arch.db[3], 3); arch 2732 arch/x86/kvm/svm.c vcpu->arch.dr6 = svm_get_dr6(vcpu); arch 2733 arch/x86/kvm/svm.c vcpu->arch.dr7 = svm->vmcb->save.dr7; arch 2735 arch/x86/kvm/svm.c vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; arch 2791 arch/x86/kvm/svm.c kvm_run->debug.arch.pc = arch 2793 arch/x86/kvm/svm.c kvm_run->debug.arch.exception = DB_VECTOR; arch 2805 arch/x86/kvm/svm.c kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; arch 2806 arch/x86/kvm/svm.c kvm_run->debug.arch.exception = BP_VECTOR; arch 3032 arch/x86/kvm/svm.c vcpu->arch.mmu = &vcpu->arch.guest_mmu; arch 3034 arch/x86/kvm/svm.c vcpu->arch.mmu->set_cr3 = nested_svm_set_tdp_cr3; arch 3035 arch/x86/kvm/svm.c vcpu->arch.mmu->get_cr3 = nested_svm_get_tdp_cr3; arch 3036 arch/x86/kvm/svm.c vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; arch 3037 arch/x86/kvm/svm.c vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; arch 3038 arch/x86/kvm/svm.c vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu); arch 3039 arch/x86/kvm/svm.c reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu); arch 3040 arch/x86/kvm/svm.c vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; arch 3045 arch/x86/kvm/svm.c vcpu->arch.mmu = &vcpu->arch.root_mmu; arch 3046 arch/x86/kvm/svm.c vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; arch 3051 arch/x86/kvm/svm.c if (!(svm->vcpu.arch.efer & EFER_SVME) || arch 3085 arch/x86/kvm/svm.c if (svm->vcpu.arch.exception.nested_apf) arch 3086 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; arch 3087 arch/x86/kvm/svm.c else if (svm->vcpu.arch.exception.has_payload) arch 3088 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; arch 3090 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; arch 3102 arch/x86/kvm/svm.c if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) arch 3105 arch/x86/kvm/svm.c if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) arch 3183 arch/x86/kvm/svm.c msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; arch 3241 arch/x86/kvm/svm.c if (!npt_enabled || svm->vcpu.arch.apf.host_apf_reason) arch 3288 arch/x86/kvm/svm.c svm->vcpu.arch.exception.nested_apf != 0) arch 3387 arch/x86/kvm/svm.c nested_vmcb->save.efer = svm->vcpu.arch.efer; arch 3391 arch/x86/kvm/svm.c nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; arch 3438 arch/x86/kvm/svm.c if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) arch 3444 arch/x86/kvm/svm.c svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset; arch 3463 arch/x86/kvm/svm.c svm->vcpu.arch.cr3 = hsave->save.cr3; arch 3486 arch/x86/kvm/svm.c svm->vcpu.arch.nmi_injected = false; arch 3545 arch/x86/kvm/svm.c svm->vcpu.arch.hflags |= HF_HIF_MASK; arch 3547 arch/x86/kvm/svm.c svm->vcpu.arch.hflags &= ~HF_HIF_MASK; arch 3567 arch/x86/kvm/svm.c svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; arch 3574 arch/x86/kvm/svm.c svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; arch 3599 arch/x86/kvm/svm.c svm->vcpu.arch.hflags |= HF_VINTR_MASK; arch 3601 arch/x86/kvm/svm.c svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; arch 3603 arch/x86/kvm/svm.c if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { arch 3612 arch/x86/kvm/svm.c svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset; arch 3613 arch/x86/kvm/svm.c svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; arch 3703 arch/x86/kvm/svm.c hsave->save.efer = svm->vcpu.arch.efer; arch 3705 arch/x86/kvm/svm.c hsave->save.cr4 = svm->vcpu.arch.cr4; arch 3924 arch/x86/kvm/svm.c svm->vcpu.arch.nmi_injected = false; arch 3967 arch/x86/kvm/svm.c svm->vcpu.arch.hflags |= HF_IRET_MASK; arch 4006 arch/x86/kvm/svm.c unsigned long cr0 = svm->vcpu.arch.cr0; arch 4079 arch/x86/kvm/svm.c val = svm->vcpu.arch.cr2; arch 4112 arch/x86/kvm/svm.c svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; arch 4296 arch/x86/kvm/svm.c if (svm_dis && (vcpu->arch.efer & EFER_SVME)) arch 4312 arch/x86/kvm/svm.c vcpu->arch.pat = data; arch 4527 arch/x86/kvm/svm.c struct kvm_lapic *apic = svm->vcpu.arch.apic; arch 4551 arch/x86/kvm/svm.c struct kvm_lapic *apic = svm->vcpu.arch.apic; arch 4617 arch/x86/kvm/svm.c flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT; arch 4645 arch/x86/kvm/svm.c u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR); arch 4646 arch/x86/kvm/svm.c u32 id = kvm_xapic_id(vcpu->arch.apic); arch 4666 arch/x86/kvm/svm.c u32 id = kvm_xapic_id(vcpu->arch.apic); arch 4694 arch/x86/kvm/svm.c u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR); arch 4705 arch/x86/kvm/svm.c struct kvm_lapic *apic = svm->vcpu.arch.apic; arch 4985 arch/x86/kvm/svm.c vcpu->arch.cr0 = svm->vmcb->save.cr0; arch 4987 arch/x86/kvm/svm.c vcpu->arch.cr3 = svm->vmcb->save.cr3; arch 5101 arch/x86/kvm/svm.c vcpu->arch.hflags |= HF_NMI_MASK; arch 5125 arch/x86/kvm/svm.c trace_kvm_inj_virq(vcpu->arch.interrupt.nr); arch 5128 arch/x86/kvm/svm.c svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | arch 5134 arch/x86/kvm/svm.c return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK); arch 5192 arch/x86/kvm/svm.c if (!vcpu->arch.apicv_active) arch 5195 arch/x86/kvm/svm.c kvm_lapic_set_irr(vec, vcpu->arch.apic); arch 5426 arch/x86/kvm/svm.c !(svm->vcpu.arch.hflags & HF_NMI_MASK); arch 5436 arch/x86/kvm/svm.c return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); arch 5444 arch/x86/kvm/svm.c svm->vcpu.arch.hflags |= HF_NMI_MASK; arch 5447 arch/x86/kvm/svm.c svm->vcpu.arch.hflags &= ~HF_NMI_MASK; arch 5465 arch/x86/kvm/svm.c return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK); arch 5495 arch/x86/kvm/svm.c if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) arch 5588 arch/x86/kvm/svm.c if ((svm->vcpu.arch.hflags & HF_IRET_MASK) arch 5590 arch/x86/kvm/svm.c svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); arch 5594 arch/x86/kvm/svm.c svm->vcpu.arch.nmi_injected = false; arch 5608 arch/x86/kvm/svm.c svm->vcpu.arch.nmi_injected = true; arch 5654 arch/x86/kvm/svm.c svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; arch 5655 arch/x86/kvm/svm.c svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; arch 5656 arch/x86/kvm/svm.c svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; arch 5685 arch/x86/kvm/svm.c svm->vmcb->save.cr2 = vcpu->arch.cr2; arch 5691 arch/x86/kvm/svm.c vcpu->arch.apic->lapic_timer.timer_advance_ns) arch 5769 arch/x86/kvm/svm.c [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])), arch 5770 arch/x86/kvm/svm.c [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])), arch 5771 arch/x86/kvm/svm.c [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])), arch 5772 arch/x86/kvm/svm.c [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])), arch 5773 arch/x86/kvm/svm.c [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])), arch 5774 arch/x86/kvm/svm.c [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP])) arch 5776 arch/x86/kvm/svm.c , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])), arch 5777 arch/x86/kvm/svm.c [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])), arch 5778 arch/x86/kvm/svm.c [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])), arch 5779 arch/x86/kvm/svm.c [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])), arch 5780 arch/x86/kvm/svm.c [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])), arch 5781 arch/x86/kvm/svm.c [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])), arch 5782 arch/x86/kvm/svm.c [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])), arch 5783 arch/x86/kvm/svm.c [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15])) arch 5830 arch/x86/kvm/svm.c vcpu->arch.cr2 = svm->vmcb->save.cr2; arch 5831 arch/x86/kvm/svm.c vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; arch 5832 arch/x86/kvm/svm.c vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; arch 5833 arch/x86/kvm/svm.c vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; arch 5854 arch/x86/kvm/svm.c svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); arch 5857 arch/x86/kvm/svm.c vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); arch 5858 arch/x86/kvm/svm.c vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); arch 6147 arch/x86/kvm/svm.c cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; arch 6252 arch/x86/kvm/svm.c vcpu->arch.mcg_cap &= 0x1ff; arch 6285 arch/x86/kvm/svm.c svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; arch 6286 arch/x86/kvm/svm.c svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; arch 6287 arch/x86/kvm/svm.c svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; arch 748 arch/x86/kvm/trace.h __entry->len = vcpu->arch.emulate_ctxt.fetch.ptr arch 749 arch/x86/kvm/trace.h - vcpu->arch.emulate_ctxt.fetch.data; arch 750 arch/x86/kvm/trace.h __entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len; arch 752 arch/x86/kvm/trace.h vcpu->arch.emulate_ctxt.fetch.data, arch 754 arch/x86/kvm/trace.h __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode); arch 1502 arch/x86/kvm/trace.h #define TRACE_INCLUDE_PATH ../../arch/x86/kvm arch 267 arch/x86/kvm/vmx/nested.c kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); arch 331 arch/x86/kvm/vmx/nested.c unsigned long exit_qualification = vcpu->arch.exit_qualification; arch 350 arch/x86/kvm/vmx/nested.c vcpu->arch.mmu = &vcpu->arch.guest_mmu; arch 356 arch/x86/kvm/vmx/nested.c vcpu->arch.mmu->set_cr3 = vmx_set_cr3; arch 357 arch/x86/kvm/vmx/nested.c vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3; arch 358 arch/x86/kvm/vmx/nested.c vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; arch 359 arch/x86/kvm/vmx/nested.c vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; arch 361 arch/x86/kvm/vmx/nested.c vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; arch 366 arch/x86/kvm/vmx/nested.c vcpu->arch.mmu = &vcpu->arch.root_mmu; arch 367 arch/x86/kvm/vmx/nested.c vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; arch 390 arch/x86/kvm/vmx/nested.c unsigned int nr = vcpu->arch.exception.nr; arch 391 arch/x86/kvm/vmx/nested.c bool has_payload = vcpu->arch.exception.has_payload; arch 392 arch/x86/kvm/vmx/nested.c unsigned long payload = vcpu->arch.exception.payload; arch 395 arch/x86/kvm/vmx/nested.c if (vcpu->arch.exception.nested_apf) { arch 396 arch/x86/kvm/vmx/nested.c *exit_qual = vcpu->arch.apf.nested_apf_token; arch 400 arch/x86/kvm/vmx/nested.c vcpu->arch.exception.error_code)) { arch 401 arch/x86/kvm/vmx/nested.c *exit_qual = has_payload ? payload : vcpu->arch.cr2; arch 407 arch/x86/kvm/vmx/nested.c payload = vcpu->arch.dr6; arch 849 arch/x86/kvm/vmx/nested.c if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) arch 1004 arch/x86/kvm/vmx/nested.c if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) { arch 1014 arch/x86/kvm/vmx/nested.c vcpu->arch.cr3 = cr3; arch 1015 arch/x86/kvm/vmx/nested.c __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); arch 1951 arch/x86/kvm/vmx/nested.c if (vcpu->arch.virtual_tsc_khz == 0) arch 1956 arch/x86/kvm/vmx/nested.c do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); arch 1967 arch/x86/kvm/vmx/nested.c return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); arch 1969 arch/x86/kvm/vmx/nested.c return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); arch 2327 arch/x86/kvm/vmx/nested.c kvm_set_dr(vcpu, 7, vcpu->arch.dr7); arch 2340 arch/x86/kvm/vmx/nested.c vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; arch 2341 arch/x86/kvm/vmx/nested.c vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); arch 2346 arch/x86/kvm/vmx/nested.c vcpu->arch.pat = vmcs12->guest_ia32_pat; arch 2348 arch/x86/kvm/vmx/nested.c vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); arch 2351 arch/x86/kvm/vmx/nested.c vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); arch 2403 arch/x86/kvm/vmx/nested.c vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); arch 2405 arch/x86/kvm/vmx/nested.c vmx_set_efer(vcpu, vcpu->arch.efer); arch 2442 arch/x86/kvm/vmx/nested.c vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; arch 2679 arch/x86/kvm/vmx/nested.c ia32e = !!(vcpu->arch.efer & EFER_LMA); arch 3035 arch/x86/kvm/vmx/nested.c u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI); arch 3090 arch/x86/kvm/vmx/nested.c vmcs_writel(GUEST_CR3, vcpu->arch.cr3); arch 3111 arch/x86/kvm/vmx/nested.c vcpu->arch.tsc_offset += vmcs12->tsc_offset; arch 3175 arch/x86/kvm/vmx/nested.c vcpu->arch.tsc_offset -= vmcs12->tsc_offset; arch 3266 arch/x86/kvm/vmx/nested.c vmx->vcpu.arch.l1tf_flush_l1d = true; arch 3326 arch/x86/kvm/vmx/nested.c /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | arch 3329 arch/x86/kvm/vmx/nested.c vcpu->arch.cr0_guest_owned_bits)); arch 3336 arch/x86/kvm/vmx/nested.c /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | arch 3339 arch/x86/kvm/vmx/nested.c vcpu->arch.cr4_guest_owned_bits)); arch 3348 arch/x86/kvm/vmx/nested.c if (vcpu->arch.exception.injected) { arch 3349 arch/x86/kvm/vmx/nested.c nr = vcpu->arch.exception.nr; arch 3354 arch/x86/kvm/vmx/nested.c vcpu->arch.event_exit_inst_len; arch 3359 arch/x86/kvm/vmx/nested.c if (vcpu->arch.exception.has_error_code) { arch 3362 arch/x86/kvm/vmx/nested.c vcpu->arch.exception.error_code; arch 3366 arch/x86/kvm/vmx/nested.c } else if (vcpu->arch.nmi_injected) { arch 3369 arch/x86/kvm/vmx/nested.c } else if (vcpu->arch.interrupt.injected) { arch 3370 arch/x86/kvm/vmx/nested.c nr = vcpu->arch.interrupt.nr; arch 3373 arch/x86/kvm/vmx/nested.c if (vcpu->arch.interrupt.soft) { arch 3376 arch/x86/kvm/vmx/nested.c vcpu->arch.event_exit_inst_len; arch 3443 arch/x86/kvm/vmx/nested.c unsigned int nr = vcpu->arch.exception.nr; arch 3446 arch/x86/kvm/vmx/nested.c if (vcpu->arch.exception.has_error_code) { arch 3447 arch/x86/kvm/vmx/nested.c vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; arch 3469 arch/x86/kvm/vmx/nested.c struct kvm_lapic *apic = vcpu->arch.apic; arch 3479 arch/x86/kvm/vmx/nested.c if (vcpu->arch.exception.pending && arch 3495 arch/x86/kvm/vmx/nested.c if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { arch 3505 arch/x86/kvm/vmx/nested.c vcpu->arch.nmi_pending = 0; arch 3530 arch/x86/kvm/vmx/nested.c value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; arch 3684 arch/x86/kvm/vmx/nested.c if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) arch 3725 arch/x86/kvm/vmx/nested.c vmcs12->guest_ia32_efer = vcpu->arch.efer; arch 3782 arch/x86/kvm/vmx/nested.c vcpu->arch.nmi_injected = false; arch 3803 arch/x86/kvm/vmx/nested.c vcpu->arch.efer = vmcs12->host_ia32_efer; arch 3805 arch/x86/kvm/vmx/nested.c vcpu->arch.efer |= (EFER_LMA | EFER_LME); arch 3807 arch/x86/kvm/vmx/nested.c vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); arch 3808 arch/x86/kvm/vmx/nested.c vmx_set_efer(vcpu, vcpu->arch.efer); arch 3822 arch/x86/kvm/vmx/nested.c vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; arch 3826 arch/x86/kvm/vmx/nested.c vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); arch 3839 arch/x86/kvm/vmx/nested.c vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; arch 3874 arch/x86/kvm/vmx/nested.c vcpu->arch.pat = vmcs12->host_ia32_pat; arch 3968 arch/x86/kvm/vmx/nested.c vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); arch 3989 arch/x86/kvm/vmx/nested.c vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; arch 3992 arch/x86/kvm/vmx/nested.c vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); arch 3996 arch/x86/kvm/vmx/nested.c vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); arch 3997 arch/x86/kvm/vmx/nested.c __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); arch 4088 arch/x86/kvm/vmx/nested.c vcpu->arch.tsc_offset -= vmcs12->tsc_offset; arch 4124 arch/x86/kvm/vmx/nested.c vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); arch 4157 arch/x86/kvm/vmx/nested.c vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; arch 4519 arch/x86/kvm/vmx/nested.c kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); arch 5042 arch/x86/kvm/vmx/nested.c struct kvm_mmu *mmu = vcpu->arch.walk_mmu; arch 5365 arch/x86/kvm/vmx/nested.c return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept; arch 540 arch/x86/kvm/vmx/vmx.c &vcpu->kvm->arch.hyperv.hv_pa_pg; arch 706 arch/x86/kvm/vmx/vmx.c if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { arch 707 arch/x86/kvm/vmx/vmx.c vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); arch 946 arch/x86/kvm/vmx/vmx.c u64 guest_efer = vmx->vcpu.arch.efer; arch 970 arch/x86/kvm/vmx/vmx.c (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { arch 1357 arch/x86/kvm/vmx/vmx.c vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) arch 1408 arch/x86/kvm/vmx/vmx.c if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) { arch 1409 arch/x86/kvm/vmx/vmx.c __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); arch 1425 arch/x86/kvm/vmx/vmx.c __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); arch 1582 arch/x86/kvm/vmx/vmx.c unsigned nr = vcpu->arch.exception.nr; arch 1583 arch/x86/kvm/vmx/vmx.c bool has_error_code = vcpu->arch.exception.has_error_code; arch 1584 arch/x86/kvm/vmx/vmx.c u32 error_code = vcpu->arch.exception.error_code; arch 1597 arch/x86/kvm/vmx/vmx.c inc_eip = vcpu->arch.event_exit_inst_len; arch 1606 arch/x86/kvm/vmx/vmx.c vmx->vcpu.arch.event_exit_inst_len); arch 1653 arch/x86/kvm/vmx/vmx.c if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) { arch 1685 arch/x86/kvm/vmx/vmx.c return vcpu->arch.tsc_offset - vmcs12->tsc_offset; arch 1687 arch/x86/kvm/vmx/vmx.c return vcpu->arch.tsc_offset; arch 1706 arch/x86/kvm/vmx/vmx.c vcpu->arch.tsc_offset - g_tsc_offset, arch 1804 arch/x86/kvm/vmx/vmx.c msr_info->data = vcpu->arch.mcg_ext_ctl; arch 1820 arch/x86/kvm/vmx/vmx.c msr_info->data = vcpu->arch.ia32_xss; arch 2026 arch/x86/kvm/vmx/vmx.c vcpu->arch.pat = data; arch 2040 arch/x86/kvm/vmx/vmx.c vcpu->arch.mcg_ext_ctl = data; arch 2069 arch/x86/kvm/vmx/vmx.c vcpu->arch.ia32_xss = data; arch 2070 arch/x86/kvm/vmx/vmx.c if (vcpu->arch.ia32_xss != host_xss) arch 2072 arch/x86/kvm/vmx/vmx.c vcpu->arch.ia32_xss, host_xss, false); arch 2166 arch/x86/kvm/vmx/vmx.c __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); arch 2169 arch/x86/kvm/vmx/vmx.c vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); arch 2172 arch/x86/kvm/vmx/vmx.c vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); arch 2773 arch/x86/kvm/vmx/vmx.c vcpu->arch.efer = efer; arch 2801 arch/x86/kvm/vmx/vmx.c vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); arch 2807 arch/x86/kvm/vmx/vmx.c vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); arch 2828 arch/x86/kvm/vmx/vmx.c ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; arch 2830 arch/x86/kvm/vmx/vmx.c vcpu->arch.cr0 &= ~cr0_guest_owned_bits; arch 2831 arch/x86/kvm/vmx/vmx.c vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; arch 2837 arch/x86/kvm/vmx/vmx.c vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); arch 2838 arch/x86/kvm/vmx/vmx.c __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); arch 2843 arch/x86/kvm/vmx/vmx.c ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; arch 2845 arch/x86/kvm/vmx/vmx.c vcpu->arch.cr4 &= ~cr4_guest_owned_bits; arch 2846 arch/x86/kvm/vmx/vmx.c vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; arch 2851 arch/x86/kvm/vmx/vmx.c struct kvm_mmu *mmu = vcpu->arch.walk_mmu; arch 2854 arch/x86/kvm/vmx/vmx.c (unsigned long *)&vcpu->arch.regs_dirty)) arch 2867 arch/x86/kvm/vmx/vmx.c struct kvm_mmu *mmu = vcpu->arch.walk_mmu; arch 2877 arch/x86/kvm/vmx/vmx.c (unsigned long *)&vcpu->arch.regs_avail); arch 2879 arch/x86/kvm/vmx/vmx.c (unsigned long *)&vcpu->arch.regs_dirty); arch 2888 arch/x86/kvm/vmx/vmx.c if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) arch 2894 arch/x86/kvm/vmx/vmx.c vcpu->arch.cr0 = cr0; arch 2900 arch/x86/kvm/vmx/vmx.c vcpu->arch.cr0 = cr0; arch 2927 arch/x86/kvm/vmx/vmx.c if (vcpu->arch.efer & EFER_LME) { arch 2940 arch/x86/kvm/vmx/vmx.c vcpu->arch.cr0 = cr0; arch 3047 arch/x86/kvm/vmx/vmx.c vcpu->arch.cr4 = cr4; arch 3523 arch/x86/kvm/vmx/vmx.c if (kvm->arch.apic_access_page_done) arch 3541 arch/x86/kvm/vmx/vmx.c kvm->arch.apic_access_page_done = true; arch 3835 arch/x86/kvm/vmx/vmx.c if (!vcpu->arch.apicv_active) arch 3916 arch/x86/kvm/vmx/vmx.c vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; arch 3918 arch/x86/kvm/vmx/vmx.c vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; arch 3920 arch/x86/kvm/vmx/vmx.c vmx->vcpu.arch.cr4_guest_owned_bits &= arch 3922 arch/x86/kvm/vmx/vmx.c vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); arch 3965 arch/x86/kvm/vmx/vmx.c if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) arch 4208 arch/x86/kvm/vmx/vmx.c vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); arch 4230 arch/x86/kvm/vmx/vmx.c vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS; arch 4265 arch/x86/kvm/vmx/vmx.c vcpu->arch.microcode_version = 0x100000000ULL; arch 4266 arch/x86/kvm/vmx/vmx.c vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); arch 4331 arch/x86/kvm/vmx/vmx.c __pa(vcpu->arch.apic->regs)); arch 4341 arch/x86/kvm/vmx/vmx.c vmx->vcpu.arch.cr0 = cr0; arch 4373 arch/x86/kvm/vmx/vmx.c int irq = vcpu->arch.interrupt.nr; arch 4380 arch/x86/kvm/vmx/vmx.c if (vcpu->arch.interrupt.soft) arch 4381 arch/x86/kvm/vmx/vmx.c inc_eip = vcpu->arch.event_exit_inst_len; arch 4386 arch/x86/kvm/vmx/vmx.c if (vcpu->arch.interrupt.soft) { arch 4389 arch/x86/kvm/vmx/vmx.c vmx->vcpu.arch.event_exit_inst_len); arch 4518 arch/x86/kvm/vmx/vmx.c to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = arch 4551 arch/x86/kvm/vmx/vmx.c if (vcpu->arch.halt_request) { arch 4552 arch/x86/kvm/vmx/vmx.c vcpu->arch.halt_request = 0; arch 4649 arch/x86/kvm/vmx/vmx.c WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept); arch 4666 arch/x86/kvm/vmx/vmx.c vcpu->arch.dr6 &= ~DR_TRAP_BITS; arch 4667 arch/x86/kvm/vmx/vmx.c vcpu->arch.dr6 |= dr6 | DR6_RTM; arch 4674 arch/x86/kvm/vmx/vmx.c kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; arch 4675 arch/x86/kvm/vmx/vmx.c kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); arch 4683 arch/x86/kvm/vmx/vmx.c vmx->vcpu.arch.event_exit_inst_len = arch 4687 arch/x86/kvm/vmx/vmx.c kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; arch 4688 arch/x86/kvm/vmx/vmx.c kvm_run->debug.arch.exception = ex_no; arch 4797 arch/x86/kvm/vmx/vmx.c WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); arch 4904 arch/x86/kvm/vmx/vmx.c vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; arch 4905 arch/x86/kvm/vmx/vmx.c vcpu->run->debug.arch.dr7 = dr7; arch 4906 arch/x86/kvm/vmx/vmx.c vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); arch 4907 arch/x86/kvm/vmx/vmx.c vcpu->run->debug.arch.exception = DB_VECTOR; arch 4911 arch/x86/kvm/vmx/vmx.c vcpu->arch.dr6 &= ~DR_TRAP_BITS; arch 4912 arch/x86/kvm/vmx/vmx.c vcpu->arch.dr6 |= DR6_BD | DR6_RTM; arch 4926 arch/x86/kvm/vmx/vmx.c vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; arch 4946 arch/x86/kvm/vmx/vmx.c return vcpu->arch.dr6; arch 4955 arch/x86/kvm/vmx/vmx.c get_debugreg(vcpu->arch.db[0], 0); arch 4956 arch/x86/kvm/vmx/vmx.c get_debugreg(vcpu->arch.db[1], 1); arch 4957 arch/x86/kvm/vmx/vmx.c get_debugreg(vcpu->arch.db[2], 2); arch 4958 arch/x86/kvm/vmx/vmx.c get_debugreg(vcpu->arch.db[3], 3); arch 4959 arch/x86/kvm/vmx/vmx.c get_debugreg(vcpu->arch.dr6, 6); arch 4960 arch/x86/kvm/vmx/vmx.c vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); arch 4962 arch/x86/kvm/vmx/vmx.c vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; arch 5109 arch/x86/kvm/vmx/vmx.c vcpu->arch.nmi_injected = false; arch 5187 arch/x86/kvm/vmx/vmx.c vcpu->arch.exit_qualification = exit_qualification; arch 5246 arch/x86/kvm/vmx/vmx.c vcpu->arch.exception.pending) { arch 5254 arch/x86/kvm/vmx/vmx.c if (vcpu->arch.halt_request) { arch 5255 arch/x86/kvm/vmx/vmx.c vcpu->arch.halt_request = 0; arch 5453 arch/x86/kvm/vmx/vmx.c if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3) arch 5457 arch/x86/kvm/vmx/vmx.c kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); arch 5902 arch/x86/kvm/vmx/vmx.c vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; arch 5916 arch/x86/kvm/vmx/vmx.c vcpu->arch.nmi_pending) { arch 5972 arch/x86/kvm/vmx/vmx.c flush_l1d = vcpu->arch.l1tf_flush_l1d; arch 5973 arch/x86/kvm/vmx/vmx.c vcpu->arch.l1tf_flush_l1d = false; arch 6139 arch/x86/kvm/vmx/vmx.c WARN_ON(!vcpu->arch.apicv_active); arch 6204 arch/x86/kvm/vmx/vmx.c vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); arch 6352 arch/x86/kvm/vmx/vmx.c vcpu->arch.nmi_injected = false; arch 6366 arch/x86/kvm/vmx/vmx.c vcpu->arch.nmi_injected = true; arch 6375 arch/x86/kvm/vmx/vmx.c vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); arch 6385 arch/x86/kvm/vmx/vmx.c vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); arch 6506 arch/x86/kvm/vmx/vmx.c if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) arch 6507 arch/x86/kvm/vmx/vmx.c vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); arch 6508 arch/x86/kvm/vmx/vmx.c if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) arch 6509 arch/x86/kvm/vmx/vmx.c vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); arch 6542 arch/x86/kvm/vmx/vmx.c vcpu->arch.apic->lapic_timer.timer_advance_ns) arch 6559 arch/x86/kvm/vmx/vmx.c if (vcpu->arch.cr2 != read_cr2()) arch 6560 arch/x86/kvm/vmx/vmx.c write_cr2(vcpu->arch.cr2); arch 6562 arch/x86/kvm/vmx/vmx.c vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, arch 6565 arch/x86/kvm/vmx/vmx.c vcpu->arch.cr2 = read_cr2(); arch 6593 arch/x86/kvm/vmx/vmx.c current_evmcs->hv_vp_id = vcpu->arch.hyperv.vp_index; arch 6612 arch/x86/kvm/vmx/vmx.c vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) arch 6617 arch/x86/kvm/vmx/vmx.c vcpu->arch.regs_dirty = 0; arch 6654 arch/x86/kvm/vmx/vmx.c kfree(kvm->arch.hyperv.hv_pa_pg); arch 6669 arch/x86/kvm/vmx/vmx.c kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu); arch 6670 arch/x86/kvm/vmx/vmx.c kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); arch 6688 arch/x86/kvm/vmx/vmx.c vmx->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, arch 6690 arch/x86/kvm/vmx/vmx.c if (!vmx->vcpu.arch.user_fpu) { arch 6696 arch/x86/kvm/vmx/vmx.c vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, arch 6698 arch/x86/kvm/vmx/vmx.c if (!vmx->vcpu.arch.guest_fpu) { arch 6802 arch/x86/kvm/vmx/vmx.c kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu); arch 6804 arch/x86/kvm/vmx/vmx.c kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu); arch 6818 arch/x86/kvm/vmx/vmx.c kvm->arch.pause_in_guest = true; arch 7139 arch/x86/kvm/vmx/vmx.c struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; arch 7206 arch/x86/kvm/vmx/vmx.c struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer; arch 7225 arch/x86/kvm/vmx/vmx.c if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && arch 7228 arch/x86/kvm/vmx/vmx.c vcpu->arch.tsc_scaling_ratio, &delta_tsc)) arch 7545 arch/x86/kvm/vmx/vmx.c if (vcpu->arch.mcg_cap & MCG_LMCE_P) arch 495 arch/x86/kvm/vmx/vmx.h if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) arch 498 arch/x86/kvm/vmx/vmx.h vcpu->arch.mmu->root_hpa)); arch 511 arch/x86/kvm/vmx/vmx.h vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio; arch 82 arch/x86/kvm/x86.c container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) arch 235 arch/x86/kvm/x86.c vcpu->arch.apf.gfns[i] = ~0; arch 333 arch/x86/kvm/x86.c return vcpu->arch.apic_base; arch 419 arch/x86/kvm/x86.c unsigned nr = vcpu->arch.exception.nr; arch 420 arch/x86/kvm/x86.c bool has_payload = vcpu->arch.exception.has_payload; arch 421 arch/x86/kvm/x86.c unsigned long payload = vcpu->arch.exception.payload; arch 433 arch/x86/kvm/x86.c vcpu->arch.dr6 &= ~DR_TRAP_BITS; arch 437 arch/x86/kvm/x86.c vcpu->arch.dr6 |= DR6_RTM; arch 438 arch/x86/kvm/x86.c vcpu->arch.dr6 |= payload; arch 447 arch/x86/kvm/x86.c vcpu->arch.dr6 ^= payload & DR6_RTM; arch 455 arch/x86/kvm/x86.c vcpu->arch.dr6 &= ~BIT(12); arch 458 arch/x86/kvm/x86.c vcpu->arch.cr2 = payload; arch 462 arch/x86/kvm/x86.c vcpu->arch.exception.has_payload = false; arch 463 arch/x86/kvm/x86.c vcpu->arch.exception.payload = 0; arch 476 arch/x86/kvm/x86.c if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { arch 489 arch/x86/kvm/x86.c WARN_ON_ONCE(vcpu->arch.exception.pending); arch 490 arch/x86/kvm/x86.c vcpu->arch.exception.injected = true; arch 500 arch/x86/kvm/x86.c vcpu->arch.exception.pending = true; arch 501 arch/x86/kvm/x86.c vcpu->arch.exception.injected = false; arch 503 arch/x86/kvm/x86.c vcpu->arch.exception.has_error_code = has_error; arch 504 arch/x86/kvm/x86.c vcpu->arch.exception.nr = nr; arch 505 arch/x86/kvm/x86.c vcpu->arch.exception.error_code = error_code; arch 506 arch/x86/kvm/x86.c vcpu->arch.exception.has_payload = has_payload; arch 507 arch/x86/kvm/x86.c vcpu->arch.exception.payload = payload; arch 519 arch/x86/kvm/x86.c if (!vcpu->kvm->arch.exception_payload_enabled || arch 526 arch/x86/kvm/x86.c prev_nr = vcpu->arch.exception.nr; arch 541 arch/x86/kvm/x86.c vcpu->arch.exception.pending = true; arch 542 arch/x86/kvm/x86.c vcpu->arch.exception.injected = false; arch 543 arch/x86/kvm/x86.c vcpu->arch.exception.has_error_code = true; arch 544 arch/x86/kvm/x86.c vcpu->arch.exception.nr = DF_VECTOR; arch 545 arch/x86/kvm/x86.c vcpu->arch.exception.error_code = 0; arch 546 arch/x86/kvm/x86.c vcpu->arch.exception.has_payload = false; arch 547 arch/x86/kvm/x86.c vcpu->arch.exception.payload = 0; arch 594 arch/x86/kvm/x86.c vcpu->arch.exception.nested_apf = arch 596 arch/x86/kvm/x86.c if (vcpu->arch.exception.nested_apf) { arch 597 arch/x86/kvm/x86.c vcpu->arch.apf.nested_apf_token = fault->address; arch 609 arch/x86/kvm/x86.c vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); arch 611 arch/x86/kvm/x86.c vcpu->arch.mmu->inject_page_fault(vcpu, fault); arch 618 arch/x86/kvm/x86.c atomic_inc(&vcpu->arch.nmi_queued); arch 685 arch/x86/kvm/x86.c return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, arch 724 arch/x86/kvm/x86.c (unsigned long *)&vcpu->arch.regs_avail); arch 726 arch/x86/kvm/x86.c (unsigned long *)&vcpu->arch.regs_dirty); arch 735 arch/x86/kvm/x86.c u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; arch 745 arch/x86/kvm/x86.c (unsigned long *)&vcpu->arch.regs_avail)) arch 754 arch/x86/kvm/x86.c changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; arch 783 arch/x86/kvm/x86.c if ((vcpu->arch.efer & EFER_LME)) { arch 793 arch/x86/kvm/x86.c if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, arch 831 arch/x86/kvm/x86.c if (vcpu->arch.xcr0 != host_xcr0) arch 832 arch/x86/kvm/x86.c xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); arch 838 arch/x86/kvm/x86.c (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) && arch 839 arch/x86/kvm/x86.c vcpu->arch.pkru != vcpu->arch.host_pkru) arch 840 arch/x86/kvm/x86.c __write_pkru(vcpu->arch.pkru); arch 848 arch/x86/kvm/x86.c (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) { arch 849 arch/x86/kvm/x86.c vcpu->arch.pkru = rdpkru(); arch 850 arch/x86/kvm/x86.c if (vcpu->arch.pkru != vcpu->arch.host_pkru) arch 851 arch/x86/kvm/x86.c __write_pkru(vcpu->arch.host_pkru); arch 855 arch/x86/kvm/x86.c if (vcpu->arch.xcr0 != host_xcr0) arch 865 arch/x86/kvm/x86.c u64 old_xcr0 = vcpu->arch.xcr0; arch 881 arch/x86/kvm/x86.c valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; arch 895 arch/x86/kvm/x86.c vcpu->arch.xcr0 = xcr0; arch 985 arch/x86/kvm/x86.c && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, arch 1036 arch/x86/kvm/x86.c !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) arch 1040 arch/x86/kvm/x86.c vcpu->arch.cr3 = cr3; arch 1041 arch/x86/kvm/x86.c __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); arch 1054 arch/x86/kvm/x86.c vcpu->arch.cr8 = cr8; arch 1064 arch/x86/kvm/x86.c return vcpu->arch.cr8; arch 1074 arch/x86/kvm/x86.c vcpu->arch.eff_db[i] = vcpu->arch.db[i]; arch 1075 arch/x86/kvm/x86.c vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; arch 1082 arch/x86/kvm/x86.c kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); arch 1090 arch/x86/kvm/x86.c dr7 = vcpu->arch.guest_debug_dr7; arch 1092 arch/x86/kvm/x86.c dr7 = vcpu->arch.dr7; arch 1094 arch/x86/kvm/x86.c vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; arch 1096 arch/x86/kvm/x86.c vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; arch 1110 arch/x86/kvm/x86.c size_t size = ARRAY_SIZE(vcpu->arch.db); arch 1114 arch/x86/kvm/x86.c vcpu->arch.db[array_index_nospec(dr, size)] = val; arch 1116 arch/x86/kvm/x86.c vcpu->arch.eff_db[dr] = val; arch 1123 arch/x86/kvm/x86.c vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); arch 1131 arch/x86/kvm/x86.c vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; arch 1151 arch/x86/kvm/x86.c size_t size = ARRAY_SIZE(vcpu->arch.db); arch 1155 arch/x86/kvm/x86.c *val = vcpu->arch.db[array_index_nospec(dr, size)]; arch 1161 arch/x86/kvm/x86.c *val = vcpu->arch.dr6; arch 1168 arch/x86/kvm/x86.c *val = vcpu->arch.dr7; arch 1460 arch/x86/kvm/x86.c u64 old_efer = vcpu->arch.efer; arch 1471 arch/x86/kvm/x86.c (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) arch 1476 arch/x86/kvm/x86.c efer |= vcpu->arch.efer & EFER_LMA; arch 1698 arch/x86/kvm/x86.c if (kvm->arch.kvmclock_offset) { arch 1699 arch/x86/kvm/x86.c struct timespec64 ts = ns_to_timespec64(kvm->arch.kvmclock_offset); arch 1766 arch/x86/kvm/x86.c vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; arch 1773 arch/x86/kvm/x86.c vcpu->arch.tsc_catchup = 1; arch 1774 arch/x86/kvm/x86.c vcpu->arch.tsc_always_catchup = 1; arch 1792 arch/x86/kvm/x86.c vcpu->arch.tsc_scaling_ratio = ratio; arch 1804 arch/x86/kvm/x86.c vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; arch 1810 arch/x86/kvm/x86.c &vcpu->arch.virtual_tsc_shift, arch 1811 arch/x86/kvm/x86.c &vcpu->arch.virtual_tsc_mult); arch 1812 arch/x86/kvm/x86.c vcpu->arch.virtual_tsc_khz = user_tsc_khz; arch 1831 arch/x86/kvm/x86.c u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, arch 1832 arch/x86/kvm/x86.c vcpu->arch.virtual_tsc_mult, arch 1833 arch/x86/kvm/x86.c vcpu->arch.virtual_tsc_shift); arch 1834 arch/x86/kvm/x86.c tsc += vcpu->arch.this_tsc_write; arch 1847 arch/x86/kvm/x86.c struct kvm_arch *ka = &vcpu->kvm->arch; arch 1874 arch/x86/kvm/x86.c vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; arch 1895 arch/x86/kvm/x86.c u64 ratio = vcpu->arch.tsc_scaling_ratio; arch 1923 arch/x86/kvm/x86.c vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset); arch 1949 arch/x86/kvm/x86.c raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); arch 1952 arch/x86/kvm/x86.c elapsed = ns - kvm->arch.last_tsc_nsec; arch 1954 arch/x86/kvm/x86.c if (vcpu->arch.virtual_tsc_khz) { arch 1963 arch/x86/kvm/x86.c u64 tsc_exp = kvm->arch.last_tsc_write + arch 1965 arch/x86/kvm/x86.c u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; arch 1983 arch/x86/kvm/x86.c vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { arch 1985 arch/x86/kvm/x86.c offset = kvm->arch.cur_tsc_offset; arch 1992 arch/x86/kvm/x86.c already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); arch 2003 arch/x86/kvm/x86.c kvm->arch.cur_tsc_generation++; arch 2004 arch/x86/kvm/x86.c kvm->arch.cur_tsc_nsec = ns; arch 2005 arch/x86/kvm/x86.c kvm->arch.cur_tsc_write = data; arch 2006 arch/x86/kvm/x86.c kvm->arch.cur_tsc_offset = offset; arch 2014 arch/x86/kvm/x86.c kvm->arch.last_tsc_nsec = ns; arch 2015 arch/x86/kvm/x86.c kvm->arch.last_tsc_write = data; arch 2016 arch/x86/kvm/x86.c kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; arch 2018 arch/x86/kvm/x86.c vcpu->arch.last_guest_tsc = data; arch 2021 arch/x86/kvm/x86.c vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; arch 2022 arch/x86/kvm/x86.c vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; arch 2023 arch/x86/kvm/x86.c vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; arch 2029 arch/x86/kvm/x86.c raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); arch 2031 arch/x86/kvm/x86.c spin_lock(&kvm->arch.pvclock_gtod_sync_lock); arch 2033 arch/x86/kvm/x86.c kvm->arch.nr_vcpus_matched_tsc = 0; arch 2035 arch/x86/kvm/x86.c kvm->arch.nr_vcpus_matched_tsc++; arch 2039 arch/x86/kvm/x86.c spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); arch 2053 arch/x86/kvm/x86.c if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) arch 2224 arch/x86/kvm/x86.c struct kvm_arch *ka = &kvm->arch; arch 2262 arch/x86/kvm/x86.c struct kvm_arch *ka = &kvm->arch; arch 2282 arch/x86/kvm/x86.c struct kvm_arch *ka = &kvm->arch; arch 2314 arch/x86/kvm/x86.c struct kvm_vcpu_arch *vcpu = &v->arch; arch 2372 arch/x86/kvm/x86.c struct kvm_vcpu_arch *vcpu = &v->arch; arch 2373 arch/x86/kvm/x86.c struct kvm_arch *ka = &v->kvm->arch; arch 2442 arch/x86/kvm/x86.c vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; arch 2481 arch/x86/kvm/x86.c struct kvm *kvm = container_of(ka, struct kvm, arch); arch 2495 arch/x86/kvm/x86.c schedule_delayed_work(&kvm->arch.kvmclock_update_work, arch 2506 arch/x86/kvm/x86.c struct kvm *kvm = container_of(ka, struct kvm, arch); arch 2511 arch/x86/kvm/x86.c schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); arch 2512 arch/x86/kvm/x86.c schedule_delayed_work(&kvm->arch.kvmclock_sync_work, arch 2523 arch/x86/kvm/x86.c return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); arch 2530 arch/x86/kvm/x86.c u64 mcg_cap = vcpu->arch.mcg_cap; arch 2537 arch/x86/kvm/x86.c vcpu->arch.mcg_status = data; arch 2545 arch/x86/kvm/x86.c vcpu->arch.mcg_ctl = data; arch 2570 arch/x86/kvm/x86.c vcpu->arch.mce_banks[offset] = data; arch 2582 arch/x86/kvm/x86.c u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 arch 2583 arch/x86/kvm/x86.c : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; arch 2584 arch/x86/kvm/x86.c u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 arch 2585 arch/x86/kvm/x86.c : kvm->arch.xen_hvm_config.blob_size_32; arch 2617 arch/x86/kvm/x86.c vcpu->arch.apf.msr_val = data; arch 2625 arch/x86/kvm/x86.c if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, arch 2629 arch/x86/kvm/x86.c vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); arch 2630 arch/x86/kvm/x86.c vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; arch 2637 arch/x86/kvm/x86.c vcpu->arch.pv_time_enabled = false; arch 2638 arch/x86/kvm/x86.c vcpu->arch.time = 0; arch 2652 arch/x86/kvm/x86.c if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) arch 2656 arch/x86/kvm/x86.c if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, arch 2657 arch/x86/kvm/x86.c &map, &vcpu->arch.st.cache, false)) arch 2661 arch/x86/kvm/x86.c offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); arch 2672 arch/x86/kvm/x86.c vcpu->arch.st.preempted = 0; arch 2682 arch/x86/kvm/x86.c vcpu->arch.st.last_steal; arch 2683 arch/x86/kvm/x86.c vcpu->arch.st.last_steal = current->sched_info.run_delay; arch 2689 arch/x86/kvm/x86.c kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); arch 2710 arch/x86/kvm/x86.c vcpu->arch.microcode_version = data; arch 2715 arch/x86/kvm/x86.c vcpu->arch.arch_capabilities = data; arch 2726 arch/x86/kvm/x86.c vcpu->arch.msr_hwcr = data; arch 2764 arch/x86/kvm/x86.c s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; arch 2767 arch/x86/kvm/x86.c vcpu->arch.ia32_tsc_adjust_msr = data; arch 2772 arch/x86/kvm/x86.c ((vcpu->arch.ia32_misc_enable_msr ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { arch 2775 arch/x86/kvm/x86.c vcpu->arch.ia32_misc_enable_msr = data; arch 2778 arch/x86/kvm/x86.c vcpu->arch.ia32_misc_enable_msr = data; arch 2784 arch/x86/kvm/x86.c vcpu->arch.smbase = data; arch 2787 arch/x86/kvm/x86.c vcpu->arch.msr_ia32_power_ctl = data; arch 2795 arch/x86/kvm/x86.c vcpu->arch.smi_count = data; arch 2799 arch/x86/kvm/x86.c vcpu->kvm->arch.wall_clock = data; arch 2804 arch/x86/kvm/x86.c struct kvm_arch *ka = &vcpu->kvm->arch; arch 2815 arch/x86/kvm/x86.c vcpu->arch.time = data; arch 2819 arch/x86/kvm/x86.c vcpu->arch.pv_time_enabled = false; arch 2824 arch/x86/kvm/x86.c &vcpu->arch.pv_time, data & ~1ULL, arch 2826 arch/x86/kvm/x86.c vcpu->arch.pv_time_enabled = true; arch 2842 arch/x86/kvm/x86.c vcpu->arch.st.msr_val = data; arch 2860 arch/x86/kvm/x86.c vcpu->arch.msr_kvm_poll_control = data; arch 2910 arch/x86/kvm/x86.c vcpu->arch.osvw.length = data; arch 2915 arch/x86/kvm/x86.c vcpu->arch.osvw.status = data; arch 2922 arch/x86/kvm/x86.c vcpu->arch.msr_platform_info = data; arch 2929 arch/x86/kvm/x86.c vcpu->arch.msr_misc_features_enables = data; arch 2932 arch/x86/kvm/x86.c if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) arch 2955 arch/x86/kvm/x86.c u64 mcg_cap = vcpu->arch.mcg_cap; arch 2964 arch/x86/kvm/x86.c data = vcpu->arch.mcg_cap; arch 2969 arch/x86/kvm/x86.c data = vcpu->arch.mcg_ctl; arch 2972 arch/x86/kvm/x86.c data = vcpu->arch.mcg_status; arch 2981 arch/x86/kvm/x86.c data = vcpu->arch.mce_banks[offset]; arch 3023 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.microcode_version; arch 3029 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.arch_capabilities; arch 3032 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.msr_ia32_power_ctl; arch 3035 arch/x86/kvm/x86.c msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset; arch 3067 arch/x86/kvm/x86.c msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; arch 3070 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.ia32_misc_enable_msr; arch 3075 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.smbase; arch 3078 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.smi_count; arch 3087 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.efer; arch 3091 arch/x86/kvm/x86.c msr_info->data = vcpu->kvm->arch.wall_clock; arch 3095 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.time; arch 3098 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.apf.msr_val; arch 3101 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.st.msr_val; arch 3104 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.pv_eoi.msr_val; arch 3107 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.msr_kvm_poll_control; arch 3156 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.osvw.length; arch 3161 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.osvw.status; arch 3165 arch/x86/kvm/x86.c !vcpu->kvm->arch.guest_can_read_msr_platform_info) arch 3167 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.msr_platform_info; arch 3170 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.msr_misc_features_enables; arch 3173 arch/x86/kvm/x86.c msr_info->data = vcpu->arch.msr_hwcr; arch 3498 arch/x86/kvm/x86.c cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); arch 3507 arch/x86/kvm/x86.c if (unlikely(vcpu->arch.tsc_offset_adjustment)) { arch 3508 arch/x86/kvm/x86.c adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); arch 3509 arch/x86/kvm/x86.c vcpu->arch.tsc_offset_adjustment = 0; arch 3514 arch/x86/kvm/x86.c s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : arch 3515 arch/x86/kvm/x86.c rdtsc() - vcpu->arch.last_host_tsc; arch 3521 arch/x86/kvm/x86.c vcpu->arch.last_guest_tsc); arch 3523 arch/x86/kvm/x86.c vcpu->arch.tsc_catchup = 1; arch 3533 arch/x86/kvm/x86.c if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) arch 3548 arch/x86/kvm/x86.c if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) arch 3551 arch/x86/kvm/x86.c if (vcpu->arch.st.preempted) arch 3554 arch/x86/kvm/x86.c if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, arch 3555 arch/x86/kvm/x86.c &vcpu->arch.st.cache, true)) arch 3559 arch/x86/kvm/x86.c offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); arch 3561 arch/x86/kvm/x86.c st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; arch 3563 arch/x86/kvm/x86.c kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); arch 3571 arch/x86/kvm/x86.c vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu); arch 3591 arch/x86/kvm/x86.c vcpu->arch.last_host_tsc = rdtsc(); arch 3603 arch/x86/kvm/x86.c if (vcpu->arch.apicv_active) arch 3661 arch/x86/kvm/x86.c if (vcpu->arch.pending_external_vector != -1) arch 3664 arch/x86/kvm/x86.c vcpu->arch.pending_external_vector = irq->irq; arch 3688 arch/x86/kvm/x86.c vcpu->arch.tpr_access_reporting = !!tac->enabled; arch 3704 arch/x86/kvm/x86.c vcpu->arch.mcg_cap = mcg_cap; arch 3707 arch/x86/kvm/x86.c vcpu->arch.mcg_ctl = ~(u64)0; arch 3710 arch/x86/kvm/x86.c vcpu->arch.mce_banks[bank*4] = ~(u64)0; arch 3720 arch/x86/kvm/x86.c u64 mcg_cap = vcpu->arch.mcg_cap; arch 3722 arch/x86/kvm/x86.c u64 *banks = vcpu->arch.mce_banks; arch 3731 arch/x86/kvm/x86.c vcpu->arch.mcg_ctl != ~(u64)0) arch 3741 arch/x86/kvm/x86.c if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || arch 3750 arch/x86/kvm/x86.c vcpu->arch.mcg_status = mce->mcg_status; arch 3776 arch/x86/kvm/x86.c if (kvm_exception_is_soft(vcpu->arch.exception.nr)) { arch 3780 arch/x86/kvm/x86.c events->exception.injected = vcpu->arch.exception.injected; arch 3781 arch/x86/kvm/x86.c events->exception.pending = vcpu->arch.exception.pending; arch 3787 arch/x86/kvm/x86.c if (!vcpu->kvm->arch.exception_payload_enabled) arch 3789 arch/x86/kvm/x86.c vcpu->arch.exception.pending; arch 3791 arch/x86/kvm/x86.c events->exception.nr = vcpu->arch.exception.nr; arch 3792 arch/x86/kvm/x86.c events->exception.has_error_code = vcpu->arch.exception.has_error_code; arch 3793 arch/x86/kvm/x86.c events->exception.error_code = vcpu->arch.exception.error_code; arch 3794 arch/x86/kvm/x86.c events->exception_has_payload = vcpu->arch.exception.has_payload; arch 3795 arch/x86/kvm/x86.c events->exception_payload = vcpu->arch.exception.payload; arch 3798 arch/x86/kvm/x86.c vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; arch 3799 arch/x86/kvm/x86.c events->interrupt.nr = vcpu->arch.interrupt.nr; arch 3803 arch/x86/kvm/x86.c events->nmi.injected = vcpu->arch.nmi_injected; arch 3804 arch/x86/kvm/x86.c events->nmi.pending = vcpu->arch.nmi_pending != 0; arch 3811 arch/x86/kvm/x86.c events->smi.pending = vcpu->arch.smi_pending; arch 3813 arch/x86/kvm/x86.c !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); arch 3819 arch/x86/kvm/x86.c if (vcpu->kvm->arch.exception_payload_enabled) arch 3838 arch/x86/kvm/x86.c if (!vcpu->kvm->arch.exception_payload_enabled) arch 3856 arch/x86/kvm/x86.c vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) arch 3860 arch/x86/kvm/x86.c vcpu->arch.exception.injected = events->exception.injected; arch 3861 arch/x86/kvm/x86.c vcpu->arch.exception.pending = events->exception.pending; arch 3862 arch/x86/kvm/x86.c vcpu->arch.exception.nr = events->exception.nr; arch 3863 arch/x86/kvm/x86.c vcpu->arch.exception.has_error_code = events->exception.has_error_code; arch 3864 arch/x86/kvm/x86.c vcpu->arch.exception.error_code = events->exception.error_code; arch 3865 arch/x86/kvm/x86.c vcpu->arch.exception.has_payload = events->exception_has_payload; arch 3866 arch/x86/kvm/x86.c vcpu->arch.exception.payload = events->exception_payload; arch 3868 arch/x86/kvm/x86.c vcpu->arch.interrupt.injected = events->interrupt.injected; arch 3869 arch/x86/kvm/x86.c vcpu->arch.interrupt.nr = events->interrupt.nr; arch 3870 arch/x86/kvm/x86.c vcpu->arch.interrupt.soft = events->interrupt.soft; arch 3875 arch/x86/kvm/x86.c vcpu->arch.nmi_injected = events->nmi.injected; arch 3877 arch/x86/kvm/x86.c vcpu->arch.nmi_pending = events->nmi.pending; arch 3882 arch/x86/kvm/x86.c vcpu->arch.apic->sipi_vector = events->sipi_vector; arch 3885 arch/x86/kvm/x86.c if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { arch 3887 arch/x86/kvm/x86.c vcpu->arch.hflags |= HF_SMM_MASK; arch 3889 arch/x86/kvm/x86.c vcpu->arch.hflags &= ~HF_SMM_MASK; arch 3893 arch/x86/kvm/x86.c vcpu->arch.smi_pending = events->smi.pending; arch 3897 arch/x86/kvm/x86.c vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; arch 3899 arch/x86/kvm/x86.c vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; arch 3902 arch/x86/kvm/x86.c set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); arch 3904 arch/x86/kvm/x86.c clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); arch 3919 arch/x86/kvm/x86.c memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); arch 3922 arch/x86/kvm/x86.c dbgregs->dr7 = vcpu->arch.dr7; arch 3938 arch/x86/kvm/x86.c memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); arch 3940 arch/x86/kvm/x86.c vcpu->arch.dr6 = dbgregs->dr6; arch 3942 arch/x86/kvm/x86.c vcpu->arch.dr7 = dbgregs->dr7; arch 3952 arch/x86/kvm/x86.c struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; arch 3963 arch/x86/kvm/x86.c xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE; arch 3981 arch/x86/kvm/x86.c memcpy(dest + offset, &vcpu->arch.pkru, arch 3982 arch/x86/kvm/x86.c sizeof(vcpu->arch.pkru)); arch 3994 arch/x86/kvm/x86.c struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; arch 4024 arch/x86/kvm/x86.c memcpy(&vcpu->arch.pkru, src + offset, arch 4025 arch/x86/kvm/x86.c sizeof(vcpu->arch.pkru)); arch 4042 arch/x86/kvm/x86.c &vcpu->arch.guest_fpu->state.fxsave, arch 4072 arch/x86/kvm/x86.c memcpy(&vcpu->arch.guest_fpu->state.fxsave, arch 4089 arch/x86/kvm/x86.c guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; arch 4123 arch/x86/kvm/x86.c if (!vcpu->arch.pv_time_enabled) arch 4125 arch/x86/kvm/x86.c vcpu->arch.pvclock_set_guest_stopped_request = true; arch 4448 arch/x86/kvm/x86.c r = vcpu->arch.virtual_tsc_khz; arch 4584 arch/x86/kvm/x86.c kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; arch 4592 arch/x86/kvm/x86.c return kvm->arch.n_max_mmu_pages; arch 4597 arch/x86/kvm/x86.c struct kvm_pic *pic = kvm->arch.vpic; arch 4622 arch/x86/kvm/x86.c struct kvm_pic *pic = kvm->arch.vpic; arch 4652 arch/x86/kvm/x86.c struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; arch 4665 arch/x86/kvm/x86.c struct kvm_pit *pit = kvm->arch.vpit; arch 4677 arch/x86/kvm/x86.c mutex_lock(&kvm->arch.vpit->pit_state.lock); arch 4678 arch/x86/kvm/x86.c memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, arch 4680 arch/x86/kvm/x86.c ps->flags = kvm->arch.vpit->pit_state.flags; arch 4681 arch/x86/kvm/x86.c mutex_unlock(&kvm->arch.vpit->pit_state.lock); arch 4691 arch/x86/kvm/x86.c struct kvm_pit *pit = kvm->arch.vpit; arch 4711 arch/x86/kvm/x86.c struct kvm_pit *pit = kvm->arch.vpit; arch 4822 arch/x86/kvm/x86.c kvm->arch.disabled_quirks = cap->args[0]; arch 4840 arch/x86/kvm/x86.c kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; arch 4841 arch/x86/kvm/x86.c kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; arch 4853 arch/x86/kvm/x86.c kvm->arch.x2apic_format = true; arch 4855 arch/x86/kvm/x86.c kvm->arch.x2apic_broadcast_quirk_disabled = true; arch 4866 arch/x86/kvm/x86.c kvm->arch.mwait_in_guest = true; arch 4868 arch/x86/kvm/x86.c kvm->arch.hlt_in_guest = true; arch 4870 arch/x86/kvm/x86.c kvm->arch.pause_in_guest = true; arch 4872 arch/x86/kvm/x86.c kvm->arch.cstate_in_guest = true; arch 4876 arch/x86/kvm/x86.c kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; arch 4880 arch/x86/kvm/x86.c kvm->arch.exception_payload_enabled = cap->args[0]; arch 4961 arch/x86/kvm/x86.c kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; arch 4977 arch/x86/kvm/x86.c if (kvm->arch.vpit) arch 4980 arch/x86/kvm/x86.c kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); arch 4981 arch/x86/kvm/x86.c if (kvm->arch.vpit) arch 5036 arch/x86/kvm/x86.c if (!kvm->arch.vpit) arch 5052 arch/x86/kvm/x86.c if (!kvm->arch.vpit) arch 5059 arch/x86/kvm/x86.c if (!kvm->arch.vpit) arch 5075 arch/x86/kvm/x86.c if (!kvm->arch.vpit) arch 5094 arch/x86/kvm/x86.c kvm->arch.bsp_vcpu_id = arg; arch 5105 arch/x86/kvm/x86.c memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc)); arch 5129 arch/x86/kvm/x86.c kvm->arch.kvmclock_offset += user_ns.clock - now_ns; arch 5139 arch/x86/kvm/x86.c user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0; arch 5296 arch/x86/kvm/x86.c !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) arch 5316 arch/x86/kvm/x86.c !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, arch 5351 arch/x86/kvm/x86.c t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception); arch 5360 arch/x86/kvm/x86.c return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); arch 5368 arch/x86/kvm/x86.c return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); arch 5376 arch/x86/kvm/x86.c return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); arch 5383 arch/x86/kvm/x86.c return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); arch 5394 arch/x86/kvm/x86.c gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, arch 5428 arch/x86/kvm/x86.c gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, arch 5492 arch/x86/kvm/x86.c gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, arch 5533 arch/x86/kvm/x86.c vcpu->arch.l1tf_flush_l1d = true; arch 5593 arch/x86/kvm/x86.c && !permission_fault(vcpu, vcpu->arch.walk_mmu, arch 5594 arch/x86/kvm/x86.c vcpu->arch.mmio_access, 0, access)) { arch 5595 arch/x86/kvm/x86.c *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | arch 5601 arch/x86/kvm/x86.c *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); arch 5703 arch/x86/kvm/x86.c struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; arch 5712 arch/x86/kvm/x86.c if (vcpu->arch.gpa_available && arch 5714 arch/x86/kvm/x86.c (addr & ~PAGE_MASK) == (vcpu->arch.gpa_val & ~PAGE_MASK)) { arch 5715 arch/x86/kvm/x86.c gpa = vcpu->arch.gpa_val; arch 5896 arch/x86/kvm/x86.c for (i = 0; i < vcpu->arch.pio.count; i++) { arch 5897 arch/x86/kvm/x86.c if (vcpu->arch.pio.in) arch 5898 arch/x86/kvm/x86.c r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, arch 5899 arch/x86/kvm/x86.c vcpu->arch.pio.size, pd); arch 5902 arch/x86/kvm/x86.c vcpu->arch.pio.port, vcpu->arch.pio.size, arch 5906 arch/x86/kvm/x86.c pd += vcpu->arch.pio.size; arch 5915 arch/x86/kvm/x86.c vcpu->arch.pio.port = port; arch 5916 arch/x86/kvm/x86.c vcpu->arch.pio.in = in; arch 5917 arch/x86/kvm/x86.c vcpu->arch.pio.count = count; arch 5918 arch/x86/kvm/x86.c vcpu->arch.pio.size = size; arch 5920 arch/x86/kvm/x86.c if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { arch 5921 arch/x86/kvm/x86.c vcpu->arch.pio.count = 0; arch 5942 arch/x86/kvm/x86.c if (vcpu->arch.pio.count) arch 5945 arch/x86/kvm/x86.c memset(vcpu->arch.pio_data, 0, size * count); arch 5950 arch/x86/kvm/x86.c memcpy(val, vcpu->arch.pio_data, size * count); arch 5951 arch/x86/kvm/x86.c trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); arch 5952 arch/x86/kvm/x86.c vcpu->arch.pio.count = 0; arch 5965 arch/x86/kvm/x86.c memcpy(vcpu->arch.pio_data, val, size * count); arch 5966 arch/x86/kvm/x86.c trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); arch 5988 arch/x86/kvm/x86.c cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); arch 5989 arch/x86/kvm/x86.c smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, arch 5992 arch/x86/kvm/x86.c cpumask_clear(vcpu->arch.wbinvd_dirty_mask); arch 6040 arch/x86/kvm/x86.c value = vcpu->arch.cr2; arch 6069 arch/x86/kvm/x86.c vcpu->arch.cr2 = val; arch 6201 arch/x86/kvm/x86.c return vcpu->arch.smbase; arch 6208 arch/x86/kvm/x86.c vcpu->arch.smbase = smbase; arch 6225 arch/x86/kvm/x86.c emul_to_vcpu(ctxt)->arch.halt_request = 1; arch 6258 arch/x86/kvm/x86.c return emul_to_vcpu(ctxt)->arch.hflags; arch 6263 arch/x86/kvm/x86.c emul_to_vcpu(ctxt)->arch.hflags = emul_flags; arch 6347 arch/x86/kvm/x86.c struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; arch 6361 arch/x86/kvm/x86.c struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; arch 6380 arch/x86/kvm/x86.c vcpu->arch.emulate_regs_need_sync_from_vcpu = false; arch 6385 arch/x86/kvm/x86.c struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; arch 6447 arch/x86/kvm/x86.c if (!vcpu->arch.mmu->direct_map) { arch 6480 arch/x86/kvm/x86.c if (vcpu->arch.mmu->direct_map) { arch 6484 arch/x86/kvm/x86.c indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; arch 6514 arch/x86/kvm/x86.c last_retry_eip = vcpu->arch.last_retry_eip; arch 6515 arch/x86/kvm/x86.c last_retry_addr = vcpu->arch.last_retry_addr; arch 6530 arch/x86/kvm/x86.c vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; arch 6544 arch/x86/kvm/x86.c vcpu->arch.last_retry_eip = ctxt->eip; arch 6545 arch/x86/kvm/x86.c vcpu->arch.last_retry_addr = cr2_or_gpa; arch 6547 arch/x86/kvm/x86.c if (!vcpu->arch.mmu->direct_map) arch 6560 arch/x86/kvm/x86.c if (!(vcpu->arch.hflags & HF_SMM_MASK)) { arch 6562 arch/x86/kvm/x86.c trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false); arch 6591 arch/x86/kvm/x86.c kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; arch 6592 arch/x86/kvm/x86.c kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; arch 6593 arch/x86/kvm/x86.c kvm_run->debug.arch.exception = DB_VECTOR; arch 6627 arch/x86/kvm/x86.c (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { arch 6631 arch/x86/kvm/x86.c vcpu->arch.guest_debug_dr7, arch 6632 arch/x86/kvm/x86.c vcpu->arch.eff_db); arch 6635 arch/x86/kvm/x86.c kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; arch 6636 arch/x86/kvm/x86.c kvm_run->debug.arch.pc = eip; arch 6637 arch/x86/kvm/x86.c kvm_run->debug.arch.exception = DB_VECTOR; arch 6644 arch/x86/kvm/x86.c if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && arch 6648 arch/x86/kvm/x86.c vcpu->arch.dr7, arch 6649 arch/x86/kvm/x86.c vcpu->arch.db); arch 6652 arch/x86/kvm/x86.c vcpu->arch.dr6 &= ~DR_TRAP_BITS; arch 6653 arch/x86/kvm/x86.c vcpu->arch.dr6 |= dr6 | DR6_RTM; arch 6698 arch/x86/kvm/x86.c struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; arch 6700 arch/x86/kvm/x86.c bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; arch 6702 arch/x86/kvm/x86.c vcpu->arch.l1tf_flush_l1d = true; arch 6708 arch/x86/kvm/x86.c vcpu->arch.write_fault_to_shadow_pgtable = false; arch 6782 arch/x86/kvm/x86.c if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { arch 6783 arch/x86/kvm/x86.c vcpu->arch.emulate_regs_need_sync_from_vcpu = false; arch 6808 arch/x86/kvm/x86.c } else if (vcpu->arch.pio.count) { arch 6809 arch/x86/kvm/x86.c if (!vcpu->arch.pio.in) { arch 6811 arch/x86/kvm/x86.c vcpu->arch.pio.count = 0; arch 6814 arch/x86/kvm/x86.c vcpu->arch.complete_userspace_io = complete_emulated_pio; arch 6823 arch/x86/kvm/x86.c vcpu->arch.complete_userspace_io = complete_emulated_mmio; arch 6832 arch/x86/kvm/x86.c vcpu->arch.emulate_regs_need_sync_to_vcpu = false; arch 6850 arch/x86/kvm/x86.c vcpu->arch.emulate_regs_need_sync_to_vcpu = true; arch 6870 arch/x86/kvm/x86.c vcpu->arch.pio.count = 0; arch 6876 arch/x86/kvm/x86.c vcpu->arch.pio.count = 0; arch 6878 arch/x86/kvm/x86.c if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) arch 6888 arch/x86/kvm/x86.c int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, arch 6899 arch/x86/kvm/x86.c vcpu->arch.complete_userspace_io = arch 6903 arch/x86/kvm/x86.c vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); arch 6904 arch/x86/kvm/x86.c vcpu->arch.complete_userspace_io = complete_fast_pio_out; arch 6914 arch/x86/kvm/x86.c BUG_ON(vcpu->arch.pio.count != 1); arch 6916 arch/x86/kvm/x86.c if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { arch 6917 arch/x86/kvm/x86.c vcpu->arch.pio.count = 0; arch 6922 arch/x86/kvm/x86.c val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; arch 6928 arch/x86/kvm/x86.c emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size, arch 6929 arch/x86/kvm/x86.c vcpu->arch.pio.port, &val, 1); arch 6944 arch/x86/kvm/x86.c ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port, arch 6951 arch/x86/kvm/x86.c vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); arch 6952 arch/x86/kvm/x86.c vcpu->arch.complete_userspace_io = complete_fast_pio_in; arch 7008 arch/x86/kvm/x86.c struct kvm_arch *ka = &kvm->arch; arch 7187 arch/x86/kvm/x86.c (unsigned long *)&vcpu->arch.pmu.global_status); arch 7351 arch/x86/kvm/x86.c vcpu->arch.mp_state = KVM_MP_STATE_HALTED; arch 7423 arch/x86/kvm/x86.c WARN_ON_ONCE(vcpu->arch.apicv_active); arch 7426 arch/x86/kvm/x86.c if (!vcpu->arch.apicv_active) arch 7429 arch/x86/kvm/x86.c vcpu->arch.apicv_active = false; arch 7439 arch/x86/kvm/x86.c map = rcu_dereference(kvm->arch.apic_map); arch 7556 arch/x86/kvm/x86.c if (vcpu->arch.apicv_active) arch 7559 arch/x86/kvm/x86.c if (!vcpu->arch.apic->vapic_addr) arch 7578 arch/x86/kvm/x86.c if (vcpu->arch.exception.injected) arch 7594 arch/x86/kvm/x86.c else if (!vcpu->arch.exception.pending) { arch 7595 arch/x86/kvm/x86.c if (vcpu->arch.nmi_injected) arch 7597 arch/x86/kvm/x86.c else if (vcpu->arch.interrupt.injected) arch 7614 arch/x86/kvm/x86.c if (vcpu->arch.exception.pending) { arch 7615 arch/x86/kvm/x86.c trace_kvm_inj_exception(vcpu->arch.exception.nr, arch 7616 arch/x86/kvm/x86.c vcpu->arch.exception.has_error_code, arch 7617 arch/x86/kvm/x86.c vcpu->arch.exception.error_code); arch 7619 arch/x86/kvm/x86.c WARN_ON_ONCE(vcpu->arch.exception.injected); arch 7620 arch/x86/kvm/x86.c vcpu->arch.exception.pending = false; arch 7621 arch/x86/kvm/x86.c vcpu->arch.exception.injected = true; arch 7623 arch/x86/kvm/x86.c if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) arch 7627 arch/x86/kvm/x86.c if (vcpu->arch.exception.nr == DB_VECTOR) { arch 7639 arch/x86/kvm/x86.c if (vcpu->arch.dr7 & DR7_GD) { arch 7640 arch/x86/kvm/x86.c vcpu->arch.dr7 &= ~DR7_GD; arch 7652 arch/x86/kvm/x86.c if (vcpu->arch.smi_pending && !is_smm(vcpu) && arch 7654 arch/x86/kvm/x86.c vcpu->arch.smi_pending = false; arch 7655 arch/x86/kvm/x86.c ++vcpu->arch.smi_count; arch 7657 arch/x86/kvm/x86.c } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { arch 7658 arch/x86/kvm/x86.c --vcpu->arch.nmi_pending; arch 7659 arch/x86/kvm/x86.c vcpu->arch.nmi_injected = true; arch 7693 arch/x86/kvm/x86.c if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) arch 7696 arch/x86/kvm/x86.c vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); arch 7697 arch/x86/kvm/x86.c vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); arch 7798 arch/x86/kvm/x86.c put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); arch 7824 arch/x86/kvm/x86.c put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); arch 7829 arch/x86/kvm/x86.c put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); arch 7863 arch/x86/kvm/x86.c trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); arch 7879 arch/x86/kvm/x86.c vcpu->arch.hflags |= HF_SMM_MASK; arch 7880 arch/x86/kvm/x86.c kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); arch 7883 arch/x86/kvm/x86.c vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; arch 7890 arch/x86/kvm/x86.c cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); arch 7892 arch/x86/kvm/x86.c vcpu->arch.cr0 = cr0; arch 7902 arch/x86/kvm/x86.c cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; arch 7903 arch/x86/kvm/x86.c cs.base = vcpu->arch.smbase; arch 7938 arch/x86/kvm/x86.c vcpu->arch.smi_pending = true; arch 7952 arch/x86/kvm/x86.c bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); arch 7955 arch/x86/kvm/x86.c kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); arch 7957 arch/x86/kvm/x86.c if (vcpu->arch.apicv_active) arch 7960 arch/x86/kvm/x86.c kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); arch 7964 arch/x86/kvm/x86.c vcpu->arch.load_eoi_exitmap_pending = true; arch 7973 arch/x86/kvm/x86.c if (!kvm_apic_hw_enabled(vcpu->arch.apic)) arch 7976 arch/x86/kvm/x86.c bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, arch 8077 arch/x86/kvm/x86.c vcpu->arch.apf.halted = true; arch 8092 arch/x86/kvm/x86.c BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); arch 8093 arch/x86/kvm/x86.c if (test_bit(vcpu->arch.pending_ioapic_eoi, arch 8094 arch/x86/kvm/x86.c vcpu->arch.ioapic_handled_vectors)) { arch 8097 arch/x86/kvm/x86.c vcpu->arch.pending_ioapic_eoi; arch 8122 arch/x86/kvm/x86.c vcpu->run->hyperv = vcpu->arch.hyperv.exit; arch 8139 arch/x86/kvm/x86.c if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { arch 8161 arch/x86/kvm/x86.c if (vcpu->arch.smi_pending && !is_smm(vcpu)) arch 8164 arch/x86/kvm/x86.c if (vcpu->arch.nmi_pending) arch 8168 arch/x86/kvm/x86.c WARN_ON(vcpu->arch.exception.pending); arch 8214 arch/x86/kvm/x86.c if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) arch 8237 arch/x86/kvm/x86.c vcpu->arch.host_pkru = read_pkru(); arch 8243 arch/x86/kvm/x86.c if (unlikely(vcpu->arch.switch_db_regs)) { arch 8245 arch/x86/kvm/x86.c set_debugreg(vcpu->arch.eff_db[0], 0); arch 8246 arch/x86/kvm/x86.c set_debugreg(vcpu->arch.eff_db[1], 1); arch 8247 arch/x86/kvm/x86.c set_debugreg(vcpu->arch.eff_db[2], 2); arch 8248 arch/x86/kvm/x86.c set_debugreg(vcpu->arch.eff_db[3], 3); arch 8249 arch/x86/kvm/x86.c set_debugreg(vcpu->arch.dr6, 6); arch 8250 arch/x86/kvm/x86.c vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; arch 8261 arch/x86/kvm/x86.c if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { arch 8267 arch/x86/kvm/x86.c vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; arch 8280 arch/x86/kvm/x86.c vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); arch 8302 arch/x86/kvm/x86.c s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta; arch 8305 arch/x86/kvm/x86.c vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN; arch 8322 arch/x86/kvm/x86.c if (unlikely(vcpu->arch.tsc_always_catchup)) arch 8325 arch/x86/kvm/x86.c if (vcpu->arch.apic_attention) arch 8328 arch/x86/kvm/x86.c vcpu->arch.gpa_available = false; arch 8334 arch/x86/kvm/x86.c if (unlikely(vcpu->arch.apic_attention)) arch 8356 arch/x86/kvm/x86.c switch(vcpu->arch.mp_state) { arch 8358 arch/x86/kvm/x86.c vcpu->arch.pv.pv_unhalted = false; arch 8359 arch/x86/kvm/x86.c vcpu->arch.mp_state = arch 8363 arch/x86/kvm/x86.c vcpu->arch.apf.halted = false; arch 8379 arch/x86/kvm/x86.c return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && arch 8380 arch/x86/kvm/x86.c !vcpu->arch.apf.halted); arch 8389 arch/x86/kvm/x86.c vcpu->arch.l1tf_flush_l1d = true; arch 8445 arch/x86/kvm/x86.c BUG_ON(!vcpu->arch.pio.count); arch 8509 arch/x86/kvm/x86.c vcpu->arch.complete_userspace_io = complete_emulated_mmio; arch 8531 arch/x86/kvm/x86.c kvm_save_current_fpu(vcpu->arch.user_fpu); arch 8534 arch/x86/kvm/x86.c __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state, arch 8548 arch/x86/kvm/x86.c kvm_save_current_fpu(vcpu->arch.guest_fpu); arch 8550 arch/x86/kvm/x86.c copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state); arch 8567 arch/x86/kvm/x86.c if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { arch 8603 arch/x86/kvm/x86.c if (unlikely(vcpu->arch.complete_userspace_io)) { arch 8604 arch/x86/kvm/x86.c int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; arch 8605 arch/x86/kvm/x86.c vcpu->arch.complete_userspace_io = NULL; arch 8610 arch/x86/kvm/x86.c WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); arch 8630 arch/x86/kvm/x86.c if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { arch 8638 arch/x86/kvm/x86.c emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); arch 8639 arch/x86/kvm/x86.c vcpu->arch.emulate_regs_need_sync_to_vcpu = false; arch 8674 arch/x86/kvm/x86.c vcpu->arch.emulate_regs_need_sync_from_vcpu = true; arch 8675 arch/x86/kvm/x86.c vcpu->arch.emulate_regs_need_sync_to_vcpu = false; arch 8699 arch/x86/kvm/x86.c vcpu->arch.exception.pending = false; arch 8744 arch/x86/kvm/x86.c sregs->cr2 = vcpu->arch.cr2; arch 8748 arch/x86/kvm/x86.c sregs->efer = vcpu->arch.efer; arch 8753 arch/x86/kvm/x86.c if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) arch 8754 arch/x86/kvm/x86.c set_bit(vcpu->arch.interrupt.nr, arch 8775 arch/x86/kvm/x86.c if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && arch 8776 arch/x86/kvm/x86.c vcpu->arch.pv.pv_unhalted) arch 8779 arch/x86/kvm/x86.c mp_state->mp_state = vcpu->arch.mp_state; arch 8799 arch/x86/kvm/x86.c if ((is_smm(vcpu) || vcpu->arch.smi_pending) && arch 8805 arch/x86/kvm/x86.c vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; arch 8806 arch/x86/kvm/x86.c set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); arch 8808 arch/x86/kvm/x86.c vcpu->arch.mp_state = mp_state->mp_state; arch 8820 arch/x86/kvm/x86.c struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; arch 8888 arch/x86/kvm/x86.c vcpu->arch.cr2 = sregs->cr2; arch 8890 arch/x86/kvm/x86.c vcpu->arch.cr3 = sregs->cr3; arch 8891 arch/x86/kvm/x86.c __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); arch 8895 arch/x86/kvm/x86.c mmu_reset_needed |= vcpu->arch.efer != sregs->efer; arch 8900 arch/x86/kvm/x86.c vcpu->arch.cr0 = sregs->cr0; arch 8911 arch/x86/kvm/x86.c load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); arch 8943 arch/x86/kvm/x86.c vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; arch 8973 arch/x86/kvm/x86.c if (vcpu->arch.exception.pending) arch 8993 arch/x86/kvm/x86.c vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; arch 8994 arch/x86/kvm/x86.c vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; arch 8997 arch/x86/kvm/x86.c vcpu->arch.eff_db[i] = vcpu->arch.db[i]; arch 9002 arch/x86/kvm/x86.c vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + arch 9050 arch/x86/kvm/x86.c fxsave = &vcpu->arch.guest_fpu->state.fxsave; arch 9070 arch/x86/kvm/x86.c fxsave = &vcpu->arch.guest_fpu->state.fxsave; arch 9126 arch/x86/kvm/x86.c fpstate_init(&vcpu->arch.guest_fpu->state); arch 9128 arch/x86/kvm/x86.c vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv = arch 9134 arch/x86/kvm/x86.c vcpu->arch.xcr0 = XFEATURE_MASK_FP; arch 9136 arch/x86/kvm/x86.c vcpu->arch.cr0 |= X86_CR0_ET; arch 9141 arch/x86/kvm/x86.c void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; arch 9142 arch/x86/kvm/x86.c struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache; arch 9169 arch/x86/kvm/x86.c vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); arch 9170 arch/x86/kvm/x86.c vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; arch 9196 arch/x86/kvm/x86.c vcpu->arch.msr_kvm_poll_control = 1; arch 9203 arch/x86/kvm/x86.c schedule_delayed_work(&kvm->arch.kvmclock_sync_work, arch 9216 arch/x86/kvm/x86.c vcpu->arch.hflags = 0; arch 9218 arch/x86/kvm/x86.c vcpu->arch.smi_pending = 0; arch 9219 arch/x86/kvm/x86.c vcpu->arch.smi_count = 0; arch 9220 arch/x86/kvm/x86.c atomic_set(&vcpu->arch.nmi_queued, 0); arch 9221 arch/x86/kvm/x86.c vcpu->arch.nmi_pending = 0; arch 9222 arch/x86/kvm/x86.c vcpu->arch.nmi_injected = false; arch 9225 arch/x86/kvm/x86.c vcpu->arch.exception.pending = false; arch 9227 arch/x86/kvm/x86.c memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); arch 9229 arch/x86/kvm/x86.c vcpu->arch.dr6 = DR6_INIT; arch 9231 arch/x86/kvm/x86.c vcpu->arch.dr7 = DR7_FIXED_1; arch 9234 arch/x86/kvm/x86.c vcpu->arch.cr2 = 0; arch 9237 arch/x86/kvm/x86.c vcpu->arch.apf.msr_val = 0; arch 9238 arch/x86/kvm/x86.c vcpu->arch.st.msr_val = 0; arch 9244 arch/x86/kvm/x86.c vcpu->arch.apf.halted = false; arch 9255 arch/x86/kvm/x86.c mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, arch 9259 arch/x86/kvm/x86.c mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, arch 9269 arch/x86/kvm/x86.c vcpu->arch.smbase = 0x30000; arch 9271 arch/x86/kvm/x86.c vcpu->arch.msr_misc_features_enables = 0; arch 9273 arch/x86/kvm/x86.c vcpu->arch.xcr0 = XFEATURE_MASK_FP; arch 9276 arch/x86/kvm/x86.c memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); arch 9277 arch/x86/kvm/x86.c vcpu->arch.regs_avail = ~0; arch 9278 arch/x86/kvm/x86.c vcpu->arch.regs_dirty = ~0; arch 9280 arch/x86/kvm/x86.c vcpu->arch.ia32_xss = 0; arch 9317 arch/x86/kvm/x86.c if (stable && vcpu->arch.last_host_tsc > local_tsc) { arch 9319 arch/x86/kvm/x86.c if (vcpu->arch.last_host_tsc > max_tsc) arch 9320 arch/x86/kvm/x86.c max_tsc = vcpu->arch.last_host_tsc; arch 9366 arch/x86/kvm/x86.c kvm->arch.backwards_tsc_observed = true; arch 9368 arch/x86/kvm/x86.c vcpu->arch.tsc_offset_adjustment += delta_cyc; arch 9369 arch/x86/kvm/x86.c vcpu->arch.last_host_tsc = local_tsc; arch 9379 arch/x86/kvm/x86.c kvm->arch.last_tsc_nsec = 0; arch 9380 arch/x86/kvm/x86.c kvm->arch.last_tsc_write = 0; arch 9433 arch/x86/kvm/x86.c return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; arch 9439 arch/x86/kvm/x86.c return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; arch 9450 arch/x86/kvm/x86.c vcpu->arch.emulate_ctxt.ops = &emulate_ops; arch 9452 arch/x86/kvm/x86.c vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; arch 9454 arch/x86/kvm/x86.c vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; arch 9461 arch/x86/kvm/x86.c vcpu->arch.pio_data = page_address(page); arch 9470 arch/x86/kvm/x86.c vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu); arch 9477 arch/x86/kvm/x86.c vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, arch 9479 arch/x86/kvm/x86.c if (!vcpu->arch.mce_banks) { arch 9483 arch/x86/kvm/x86.c vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; arch 9485 arch/x86/kvm/x86.c if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, arch 9493 arch/x86/kvm/x86.c vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; arch 9495 arch/x86/kvm/x86.c vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); arch 9497 arch/x86/kvm/x86.c vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; arch 9502 arch/x86/kvm/x86.c vcpu->arch.pending_external_vector = -1; arch 9503 arch/x86/kvm/x86.c vcpu->arch.preempted_in_kernel = false; arch 9510 arch/x86/kvm/x86.c kfree(vcpu->arch.mce_banks); arch 9516 arch/x86/kvm/x86.c free_page((unsigned long)vcpu->arch.pio_data); arch 9527 arch/x86/kvm/x86.c kfree(vcpu->arch.mce_banks); arch 9532 arch/x86/kvm/x86.c free_page((unsigned long)vcpu->arch.pio_data); arch 9539 arch/x86/kvm/x86.c vcpu->arch.l1tf_flush_l1d = true; arch 9548 arch/x86/kvm/x86.c INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); arch 9549 arch/x86/kvm/x86.c INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); arch 9550 arch/x86/kvm/x86.c INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); arch 9551 arch/x86/kvm/x86.c INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages); arch 9552 arch/x86/kvm/x86.c INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); arch 9553 arch/x86/kvm/x86.c atomic_set(&kvm->arch.noncoherent_dma_count, 0); arch 9556 arch/x86/kvm/x86.c set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); arch 9559 arch/x86/kvm/x86.c &kvm->arch.irq_sources_bitmap); arch 9561 arch/x86/kvm/x86.c raw_spin_lock_init(&kvm->arch.tsc_write_lock); arch 9562 arch/x86/kvm/x86.c mutex_init(&kvm->arch.apic_map_lock); arch 9563 arch/x86/kvm/x86.c spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); arch 9565 arch/x86/kvm/x86.c kvm->arch.kvmclock_offset = -ktime_get_boottime_ns(); arch 9568 arch/x86/kvm/x86.c kvm->arch.guest_can_read_msr_platform_info = true; arch 9570 arch/x86/kvm/x86.c INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); arch 9571 arch/x86/kvm/x86.c INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); arch 9617 arch/x86/kvm/x86.c cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); arch 9618 arch/x86/kvm/x86.c cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); arch 9708 arch/x86/kvm/x86.c kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); arch 9709 arch/x86/kvm/x86.c kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); arch 9721 arch/x86/kvm/x86.c if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { arch 9722 arch/x86/kvm/x86.c kvfree(free->arch.rmap[i]); arch 9723 arch/x86/kvm/x86.c free->arch.rmap[i] = NULL; arch 9728 arch/x86/kvm/x86.c if (!dont || free->arch.lpage_info[i - 1] != arch 9729 arch/x86/kvm/x86.c dont->arch.lpage_info[i - 1]) { arch 9730 arch/x86/kvm/x86.c kvfree(free->arch.lpage_info[i - 1]); arch 9731 arch/x86/kvm/x86.c free->arch.lpage_info[i - 1] = NULL; arch 9748 arch/x86/kvm/x86.c memset(&slot->arch, 0, sizeof(slot->arch)); arch 9759 arch/x86/kvm/x86.c slot->arch.rmap[i] = arch 9760 arch/x86/kvm/x86.c kvcalloc(lpages, sizeof(*slot->arch.rmap[i]), arch 9762 arch/x86/kvm/x86.c if (!slot->arch.rmap[i]) arch 9771 arch/x86/kvm/x86.c slot->arch.lpage_info[i - 1] = linfo; arch 9799 arch/x86/kvm/x86.c kvfree(slot->arch.rmap[i]); arch 9800 arch/x86/kvm/x86.c slot->arch.rmap[i] = NULL; arch 9804 arch/x86/kvm/x86.c kvfree(slot->arch.lpage_info[i - 1]); arch 9805 arch/x86/kvm/x86.c slot->arch.lpage_info[i - 1] = NULL; arch 9894 arch/x86/kvm/x86.c if (!kvm->arch.n_requested_mmu_pages) arch 9960 arch/x86/kvm/x86.c if (vcpu->arch.pv.pv_unhalted) arch 9963 arch/x86/kvm/x86.c if (vcpu->arch.exception.pending) arch 9967 arch/x86/kvm/x86.c (vcpu->arch.nmi_pending && arch 9972 arch/x86/kvm/x86.c (vcpu->arch.smi_pending && !is_smm(vcpu))) arch 9993 arch/x86/kvm/x86.c if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) arch 10001 arch/x86/kvm/x86.c if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu)) arch 10009 arch/x86/kvm/x86.c return vcpu->arch.preempted_in_kernel; arch 10051 arch/x86/kvm/x86.c kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) arch 10067 arch/x86/kvm/x86.c if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) || arch 10075 arch/x86/kvm/x86.c if (!vcpu->arch.mmu->direct_map && arch 10076 arch/x86/kvm/x86.c work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu)) arch 10079 arch/x86/kvm/x86.c vcpu->arch.mmu->page_fault(vcpu, work->cr2_or_gpa, 0, true); arch 10096 arch/x86/kvm/x86.c while (vcpu->arch.apf.gfns[key] != ~0) arch 10099 arch/x86/kvm/x86.c vcpu->arch.apf.gfns[key] = gfn; arch 10108 arch/x86/kvm/x86.c (vcpu->arch.apf.gfns[key] != gfn && arch 10109 arch/x86/kvm/x86.c vcpu->arch.apf.gfns[key] != ~0); i++) arch 10117 arch/x86/kvm/x86.c return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; arch 10126 arch/x86/kvm/x86.c vcpu->arch.apf.gfns[i] = ~0; arch 10129 arch/x86/kvm/x86.c if (vcpu->arch.apf.gfns[j] == ~0) arch 10131 arch/x86/kvm/x86.c k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); arch 10138 arch/x86/kvm/x86.c vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; arch 10146 arch/x86/kvm/x86.c return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, arch 10153 arch/x86/kvm/x86.c return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, val, arch 10159 arch/x86/kvm/x86.c if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) arch 10162 arch/x86/kvm/x86.c if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || arch 10163 arch/x86/kvm/x86.c (vcpu->arch.apf.send_user_only && arch 10174 arch/x86/kvm/x86.c vcpu->arch.exception.pending)) arch 10192 arch/x86/kvm/x86.c trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); arch 10193 arch/x86/kvm/x86.c kvm_add_async_pf_gfn(vcpu, work->arch.gfn); arch 10201 arch/x86/kvm/x86.c fault.address = work->arch.token; arch 10224 arch/x86/kvm/x86.c work->arch.token = ~0; /* broadcast wakeup */ arch 10226 arch/x86/kvm/x86.c kvm_del_async_pf_gfn(vcpu, work->arch.gfn); arch 10227 arch/x86/kvm/x86.c trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); arch 10229 arch/x86/kvm/x86.c if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED && arch 10232 arch/x86/kvm/x86.c vcpu->arch.exception.pending && arch 10233 arch/x86/kvm/x86.c vcpu->arch.exception.nr == PF_VECTOR && arch 10235 arch/x86/kvm/x86.c vcpu->arch.exception.injected = false; arch 10236 arch/x86/kvm/x86.c vcpu->arch.exception.pending = false; arch 10237 arch/x86/kvm/x86.c vcpu->arch.exception.nr = 0; arch 10238 arch/x86/kvm/x86.c vcpu->arch.exception.has_error_code = false; arch 10239 arch/x86/kvm/x86.c vcpu->arch.exception.error_code = 0; arch 10240 arch/x86/kvm/x86.c vcpu->arch.exception.has_payload = false; arch 10241 arch/x86/kvm/x86.c vcpu->arch.exception.payload = 0; arch 10247 arch/x86/kvm/x86.c fault.address = work->arch.token; arch 10252 arch/x86/kvm/x86.c vcpu->arch.apf.halted = false; arch 10253 arch/x86/kvm/x86.c vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; arch 10258 arch/x86/kvm/x86.c if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) arch 10266 arch/x86/kvm/x86.c atomic_inc(&kvm->arch.assigned_device_count); arch 10272 arch/x86/kvm/x86.c atomic_dec(&kvm->arch.assigned_device_count); arch 10278 arch/x86/kvm/x86.c return atomic_read(&kvm->arch.assigned_device_count); arch 10284 arch/x86/kvm/x86.c atomic_inc(&kvm->arch.noncoherent_dma_count); arch 10290 arch/x86/kvm/x86.c atomic_dec(&kvm->arch.noncoherent_dma_count); arch 10296 arch/x86/kvm/x86.c return atomic_read(&kvm->arch.noncoherent_dma_count); arch 10353 arch/x86/kvm/x86.c return (vcpu->arch.msr_kvm_poll_control & 1) == 0; arch 51 arch/x86/kvm/x86.h vcpu->arch.exception.pending = false; arch 52 arch/x86/kvm/x86.h vcpu->arch.exception.injected = false; arch 58 arch/x86/kvm/x86.h vcpu->arch.interrupt.injected = true; arch 59 arch/x86/kvm/x86.h vcpu->arch.interrupt.soft = soft; arch 60 arch/x86/kvm/x86.h vcpu->arch.interrupt.nr = vector; arch 65 arch/x86/kvm/x86.h vcpu->arch.interrupt.injected = false; arch 70 arch/x86/kvm/x86.h return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected || arch 71 arch/x86/kvm/x86.h vcpu->arch.nmi_injected; arch 87 arch/x86/kvm/x86.h return vcpu->arch.efer & EFER_LMA; arch 106 arch/x86/kvm/x86.h return (vcpu->arch.efer & EFER_LMA) && arch 124 arch/x86/kvm/x86.h return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; arch 198 arch/x86/kvm/x86.h vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; arch 199 arch/x86/kvm/x86.h vcpu->arch.mmio_access = access; arch 200 arch/x86/kvm/x86.h vcpu->arch.mmio_gfn = gfn; arch 201 arch/x86/kvm/x86.h vcpu->arch.mmio_gen = gen; arch 206 arch/x86/kvm/x86.h return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; arch 217 arch/x86/kvm/x86.h if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) arch 220 arch/x86/kvm/x86.h vcpu->arch.mmio_gva = 0; arch 225 arch/x86/kvm/x86.h if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && arch 226 arch/x86/kvm/x86.h vcpu->arch.mmio_gva == (gva & PAGE_MASK)) arch 234 arch/x86/kvm/x86.h if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && arch 235 arch/x86/kvm/x86.h vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) arch 260 arch/x86/kvm/x86.h return !(kvm->arch.disabled_quirks & quirk); arch 310 arch/x86/kvm/x86.h return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, arch 311 arch/x86/kvm/x86.h vcpu->arch.virtual_tsc_shift); arch 330 arch/x86/kvm/x86.h return kvm->arch.mwait_in_guest; arch 335 arch/x86/kvm/x86.h return kvm->arch.hlt_in_guest; arch 340 arch/x86/kvm/x86.h return kvm->arch.pause_in_guest; arch 345 arch/x86/kvm/x86.h return kvm->arch.cstate_in_guest; arch 171 arch/x86/um/asm/elf.h (pr_reg)[21] = current->thread.arch.fs; \ arch 58 arch/x86/um/ldt.c uml_ldt_t *ldt = ¤t->mm->context.arch.ldt; arch 123 arch/x86/um/ldt.c uml_ldt_t *ldt = ¤t->mm->context.arch.ldt; arch 307 arch/x86/um/ldt.c mutex_init(&new_mm->arch.ldt.lock); arch 324 arch/x86/um/ldt.c new_mm->arch.ldt.entry_count = 0; arch 335 arch/x86/um/ldt.c mutex_lock(&from_mm->arch.ldt.lock); arch 336 arch/x86/um/ldt.c if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES) arch 337 arch/x86/um/ldt.c memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries, arch 338 arch/x86/um/ldt.c sizeof(new_mm->arch.ldt.u.entries)); arch 340 arch/x86/um/ldt.c i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE; arch 347 arch/x86/um/ldt.c new_mm->arch.ldt.u.pages[i] = arch 349 arch/x86/um/ldt.c memcpy(new_mm->arch.ldt.u.pages[i], arch 350 arch/x86/um/ldt.c from_mm->arch.ldt.u.pages[i], PAGE_SIZE); arch 353 arch/x86/um/ldt.c new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count; arch 354 arch/x86/um/ldt.c mutex_unlock(&from_mm->arch.ldt.lock); arch 365 arch/x86/um/ldt.c if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) { arch 366 arch/x86/um/ldt.c i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE; arch 368 arch/x86/um/ldt.c free_page((long) mm->arch.ldt.u.pages[i]); arch 370 arch/x86/um/ldt.c mm->arch.ldt.entry_count = 0; arch 137 arch/x86/um/ptrace_32.c child->thread.arch.debugregs[addr] = data; arch 191 arch/x86/um/ptrace_32.c tmp = child->thread.arch.debugregs[addr]; arch 131 arch/x86/um/ptrace_64.c child->thread.arch.debugregs[addr] = data; arch 196 arch/x86/um/ptrace_64.c tmp = child->thread.arch.debugregs[addr]; arch 241 arch/x86/um/signal.c struct faultinfo * fi = ¤t->thread.arch.faultinfo; arch 61 arch/x86/um/syscalls_64.c current->thread.arch.fs = (unsigned long) ptr; arch 85 arch/x86/um/syscalls_64.c if ((to->thread.arch.fs == 0) || (to->mm == NULL)) arch 88 arch/x86/um/syscalls_64.c arch_prctl(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs); arch 68 arch/x86/um/tls_32.c if (!t->arch.tls_array) arch 72 arch/x86/um/tls_32.c if (!t->arch.tls_array[idx].present) arch 99 arch/x86/um/tls_32.c &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN]; arch 139 arch/x86/um/tls_32.c &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; arch 163 arch/x86/um/tls_32.c &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN]; arch 211 arch/x86/um/tls_32.c t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info; arch 212 arch/x86/um/tls_32.c t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1; arch 213 arch/x86/um/tls_32.c t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed; arch 243 arch/x86/um/tls_32.c if (!t->arch.tls_array) arch 249 arch/x86/um/tls_32.c if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present) arch 252 arch/x86/um/tls_32.c *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls; arch 260 arch/x86/um/tls_32.c !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) { arch 15 arch/x86/um/tls_64.c t->thread.arch.fs = tls; arch 1106 arch/x86/xen/enlighten_pv.c &HYPERVISOR_shared_info->arch.nmi_reason)) arch 1109 arch/x86/xen/enlighten_pv.c &HYPERVISOR_shared_info->arch.nmi_reason)) arch 1307 arch/x86/xen/mmu_pv.c this_cpu_read(xen_vcpu)->arch.cr2 = cr2; arch 283 arch/x86/xen/p2m.c HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = ~0UL; arch 285 arch/x86/xen/p2m.c HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = arch 287 arch/x86/xen/p2m.c HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; arch 288 arch/x86/xen/p2m.c HYPERVISOR_shared_info->arch.p2m_generation = 0; arch 289 arch/x86/xen/p2m.c HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr; arch 290 arch/x86/xen/p2m.c HYPERVISOR_shared_info->arch.p2m_cr3 = arch 506 arch/x86/xen/p2m.c HYPERVISOR_shared_info->arch.p2m_generation++; arch 511 arch/x86/xen/p2m.c HYPERVISOR_shared_info->arch.p2m_generation++; arch 607 arch/x86/xen/p2m.c HYPERVISOR_shared_info->arch.p2m_generation++; arch 612 arch/x86/xen/p2m.c HYPERVISOR_shared_info->arch.p2m_generation++; arch 627 arch/x86/xen/p2m.c HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; arch 239 drivers/hwtracing/coresight/coresight-etm.h u8 arch; arch 718 drivers/hwtracing/coresight/coresight-etm3x.c static bool etm_arch_supported(u8 arch) arch 720 drivers/hwtracing/coresight/coresight-etm3x.c switch (arch) { arch 763 drivers/hwtracing/coresight/coresight-etm3x.c drvdata->arch = BMVAL(etmidr, 4, 11); arch 846 drivers/hwtracing/coresight/coresight-etm3x.c if (etm_arch_supported(drvdata->arch) == false) { arch 57 drivers/hwtracing/coresight/coresight-etm4x.c static bool etm4_arch_supported(u8 arch) arch 60 drivers/hwtracing/coresight/coresight-etm4x.c switch (arch & 0xf0) { arch 631 drivers/hwtracing/coresight/coresight-etm4x.c drvdata->arch = BMVAL(etmidr1, 4, 11); arch 1142 drivers/hwtracing/coresight/coresight-etm4x.c if (etm4_arch_supported(drvdata->arch) == false) { arch 1177 drivers/hwtracing/coresight/coresight-etm4x.c drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf); arch 346 drivers/hwtracing/coresight/coresight-etm4x.h u8 arch; arch 386 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < padap->params.arch.nchan; i++) { arch 1175 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c vf_count = padap->params.arch.vfcount; arch 1436 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c for (i = 0; i < padap->params.arch.vfcount; i++) arch 1440 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c sge_qbase->vfcount = padap->params.arch.vfcount; arch 2229 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) { arch 2238 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c tcam->rplc_size = padap->params.arch.mps_rplc_size; arch 2252 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c n = padap->params.arch.mps_tcam_size; arch 392 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h struct arch_specific_params arch; /* chip specific params */ arch 175 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c len = adap->params.arch.vfcount * arch 245 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c adap->params.arch.mps_tcam_size; arch 826 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c if (adap->params.arch.nchan == NCHAN) { arch 1684 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c if (adap->params.arch.mps_rplc_size > 128) arch 1801 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c if (adap->params.arch.mps_rplc_size > 128) { arch 1863 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c if (adap->params.arch.mps_rplc_size > 128) arch 1872 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c if (adap->params.arch.mps_rplc_size > 128) arch 2366 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c int vf, vfcount = adapter->params.arch.vfcount; arch 3322 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c for (i = 0; i < adap->params.arch.nchan; i++) arch 3363 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c for (i = 0; i < adap->params.arch.nchan; i++) \ arch 3372 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c for (i = 0; i < adap->params.arch.nchan; i++) \ arch 3478 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c for (i = 0; i < adap->params.arch.nchan; i++) arch 3502 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c if (adap->params.arch.nchan == NCHAN) arch 538 drivers/net/ethernet/chelsio/cxgb4/sge.c u32 val = adap->params.arch.sge_fl_db; arch 3660 drivers/net/ethernet/chelsio/cxgb4/sge.c u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; arch 5643 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c int nchan = adap->params.arch.nchan; arch 5676 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c int nchan = adap->params.arch.nchan; arch 5927 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c if (adap->params.arch.nchan == NCHAN) { arch 5935 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c if (adap->params.arch.nchan == NCHAN) { arch 6065 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) { arch 6092 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) { arch 7971 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c unsigned int max_naddr = adap->params.arch.mps_tcam_size; arch 8136 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c unsigned int max_mac_addr = adap->params.arch.mps_tcam_size; arch 9129 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.sge_fl_db = DBPRIO_F; arch 9130 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.mps_tcam_size = arch 9132 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.mps_rplc_size = 128; arch 9133 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.nchan = NCHAN; arch 9134 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.pm_stats_cnt = PM_NSTATS; arch 9135 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.vfcount = 128; arch 9139 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.cng_ch_bits_log = 2; arch 9143 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F; arch 9144 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.mps_tcam_size = arch 9146 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.mps_rplc_size = 128; arch 9147 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.nchan = NCHAN; arch 9148 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.pm_stats_cnt = PM_NSTATS; arch 9149 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.vfcount = 128; arch 9150 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.cng_ch_bits_log = 2; arch 9154 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.sge_fl_db = 0; arch 9155 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.mps_tcam_size = arch 9157 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.mps_rplc_size = 256; arch 9158 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.nchan = 2; arch 9159 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS; arch 9160 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.vfcount = 256; arch 9164 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c adapter->params.arch.cng_ch_bits_log = 3; arch 527 drivers/net/ethernet/chelsio/cxgb4vf/sge.c u32 val = adapter->params.arch.sge_fl_db; arch 266 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h struct arch_specific_params arch; /* chip specific params */ arch 1535 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c unsigned int max_naddr = adapter->params.arch.mps_tcam_size; arch 1619 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c unsigned int max_naddr = adapter->params.arch.mps_tcam_size; arch 1698 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size; arch 2164 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c adapter->params.arch.sge_fl_db = DBPRIO_F; arch 2165 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c adapter->params.arch.mps_tcam_size = arch 2172 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F; arch 2173 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c adapter->params.arch.mps_tcam_size = arch 2180 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c adapter->params.arch.sge_fl_db = 0; arch 2181 drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c adapter->params.arch.mps_tcam_size = arch 180 drivers/net/ethernet/sfc/falcon/nic.c #define REGISTER(name, arch, min_rev, max_rev) { \ arch 181 drivers/net/ethernet/sfc/falcon/nic.c arch ## R_ ## min_rev ## max_rev ## _ ## name, \ arch 182 drivers/net/ethernet/sfc/falcon/nic.c REGISTER_REVISION_ ## arch ## min_rev, \ arch 183 drivers/net/ethernet/sfc/falcon/nic.c REGISTER_REVISION_ ## arch ## max_rev \ arch 305 drivers/net/ethernet/sfc/falcon/nic.c #define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \ arch 307 drivers/net/ethernet/sfc/falcon/nic.c REGISTER_REVISION_ ## arch ## min_rev, \ arch 308 drivers/net/ethernet/sfc/falcon/nic.c REGISTER_REVISION_ ## arch ## max_rev, \ arch 311 drivers/net/ethernet/sfc/falcon/nic.c #define REGISTER_TABLE(name, arch, min_rev, max_rev) \ arch 313 drivers/net/ethernet/sfc/falcon/nic.c name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \ arch 314 drivers/net/ethernet/sfc/falcon/nic.c arch, min_rev, max_rev, \ arch 315 drivers/net/ethernet/sfc/falcon/nic.c arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ arch 316 drivers/net/ethernet/sfc/falcon/nic.c arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS) arch 181 drivers/net/ethernet/sfc/nic.c #define REGISTER(name, arch, min_rev, max_rev) { \ arch 182 drivers/net/ethernet/sfc/nic.c arch ## R_ ## min_rev ## max_rev ## _ ## name, \ arch 183 drivers/net/ethernet/sfc/nic.c REGISTER_REVISION_ ## arch ## min_rev, \ arch 184 drivers/net/ethernet/sfc/nic.c REGISTER_REVISION_ ## arch ## max_rev \ arch 310 drivers/net/ethernet/sfc/nic.c #define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \ arch 312 drivers/net/ethernet/sfc/nic.c REGISTER_REVISION_ ## arch ## min_rev, \ arch 313 drivers/net/ethernet/sfc/nic.c REGISTER_REVISION_ ## arch ## max_rev, \ arch 316 drivers/net/ethernet/sfc/nic.c #define REGISTER_TABLE(name, arch, min_rev, max_rev) \ arch 318 drivers/net/ethernet/sfc/nic.c name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \ arch 319 drivers/net/ethernet/sfc/nic.c arch, min_rev, max_rev, \ arch 320 drivers/net/ethernet/sfc/nic.c arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ arch 321 drivers/net/ethernet/sfc/nic.c arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS) arch 288 drivers/s390/char/zcore.c unsigned char arch; arch 313 drivers/s390/char/zcore.c rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); arch 317 drivers/s390/char/zcore.c if (arch == ARCH_S390) { arch 223 drivers/s390/crypto/vfio_ap_ops.c gisa = kvm->arch.gisa_int.origin; arch 285 drivers/s390/crypto/vfio_ap_ops.c if (!(vcpu->arch.sie_block->eca & ECA_AIV)) arch 291 drivers/s390/crypto/vfio_ap_ops.c if (!vcpu->kvm->arch.crypto.pqap_hook) arch 293 drivers/s390/crypto/vfio_ap_ops.c matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook, arch 1051 drivers/s390/crypto/vfio_ap_ops.c kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; arch 1107 drivers/s390/crypto/vfio_ap_ops.c if (!matrix_mdev->kvm->arch.crypto.crycbd) arch 1227 drivers/s390/crypto/vfio_ap_ops.c matrix_mdev->kvm->arch.crypto.pqap_hook = NULL; arch 77 drivers/soc/versatile/soc-integrator.c __ATTR(arch, S_IRUGO, integrator_get_arch, NULL); arch 637 drivers/usb/dwc2/core.h unsigned arch:2; arch 728 drivers/usb/dwc2/debugfs.c print_param(seq, hw, arch); arch 104 drivers/usb/dwc2/hcd.c switch (hsotg->hw_params.arch) { arch 816 drivers/usb/dwc2/hcd.c hsotg->hw_params.arch == GHWCFG2_EXT_DMA_ARCH) { arch 326 drivers/usb/dwc2/params.c bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH); arch 620 drivers/usb/dwc2/params.c bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH); arch 783 drivers/usb/dwc2/params.c hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >> arch 204 drivers/vfio/pci/vfio_pci_nvlink2.c kvm->arch.lpid, MSR_DR | MSR_PR)) arch 1221 drivers/video/fbdev/nvidia/nvidia.c u32 arch = 0; arch 1231 drivers/video/fbdev/nvidia/nvidia.c arch = NV_ARCH_10; arch 1236 drivers/video/fbdev/nvidia/nvidia.c arch = NV_ARCH_20; arch 1243 drivers/video/fbdev/nvidia/nvidia.c arch = NV_ARCH_30; arch 1258 drivers/video/fbdev/nvidia/nvidia.c arch = NV_ARCH_40; arch 1261 drivers/video/fbdev/nvidia/nvidia.c arch = NV_ARCH_04; arch 1267 drivers/video/fbdev/nvidia/nvidia.c return arch; arch 1858 drivers/video/fbdev/riva/fbdev.c u32 arch = 0; arch 1868 drivers/video/fbdev/riva/fbdev.c arch = NV_ARCH_10; arch 1873 drivers/video/fbdev/riva/fbdev.c arch = NV_ARCH_20; arch 1880 drivers/video/fbdev/riva/fbdev.c arch = NV_ARCH_30; arch 1883 drivers/video/fbdev/riva/fbdev.c arch = NV_ARCH_04; arch 1886 drivers/video/fbdev/riva/fbdev.c arch = NV_ARCH_03; arch 1891 drivers/video/fbdev/riva/fbdev.c return arch; arch 97 include/kvm/arm_arch_timer.h #define vcpu_timer(v) (&(v)->arch.timer_cpu) arch 99 include/kvm/arm_arch_timer.h #define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER]) arch 100 include/kvm/arm_arch_timer.h #define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER]) arch 32 include/kvm/arm_pmu.h #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready) arch 33 include/kvm/arm_pmu.h #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) arch 32 include/kvm/arm_psci.h if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) { arch 33 include/kvm/arm_psci.h if (vcpu->kvm->arch.psci_version) arch 34 include/kvm/arm_psci.h return vcpu->kvm->arch.psci_version; arch 357 include/kvm/arm_vgic.h #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) arch 358 include/kvm/arm_vgic.h #define vgic_initialized(k) ((k)->arch.vgic.initialized) arch 359 include/kvm/arm_vgic.h #define vgic_ready(k) ((k)->arch.vgic.ready) arch 361 include/kvm/arm_vgic.h ((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) arch 101 include/linux/audit.h extern int audit_classify_arch(int arch); arch 249 include/linux/audit.h #define audit_is_compat(arch) (!((arch) & __AUDIT_ARCH_64BIT)) arch 251 include/linux/audit.h #define audit_is_compat(arch) false arch 361 include/linux/ftrace.h struct dyn_arch_ftrace arch; arch 293 include/linux/hp_sdc.h #error No support for device registration on this arch yet. arch 273 include/linux/kexec.h struct kimage_arch arch; arch 209 include/linux/kvm_host.h struct kvm_arch_async_pf arch; arch 216 include/linux/kvm_host.h unsigned long hva, struct kvm_arch_async_pf *arch); arch 322 include/linux/kvm_host.h struct kvm_vcpu_arch arch; arch 347 include/linux/kvm_host.h struct kvm_arch_memory_slot arch; arch 472 include/linux/kvm_host.h struct kvm_arch arch; arch 946 include/linux/kvm_host.h return vcpu->arch.wqp; arch 85 include/linux/mm_types_task.h struct arch_tlbflush_unmap_batch arch; arch 416 include/linux/module.h struct mod_arch_specific arch; arch 262 include/linux/pe.h struct data_dirent arch; /* reservered */ arch 167 include/sound/sof/header.h uint32_t arch; /* Identifier of architecture */ arch 297 include/uapi/linux/kvm.h struct kvm_debug_exit_arch arch; arch 669 include/uapi/linux/kvm.h struct kvm_guest_debug_arch arch; arch 84 include/uapi/linux/ptrace.h __u32 arch __attribute__((__aligned__(sizeof(__u32)))); arch 61 include/uapi/linux/seccomp.h __u32 arch; arch 551 include/xen/interface/xen.h struct arch_vcpu_info arch; arch 602 include/xen/interface/xen.h struct arch_shared_info arch; arch 131 kernel/audit.h int arch; arch 210 kernel/auditfilter.c struct audit_field *arch = entry->rule.arch_f; arch 212 kernel/auditfilter.c if (!arch) { arch 221 kernel/auditfilter.c switch(audit_classify_arch(arch->val)) { arch 140 kernel/auditsc.c switch (audit_classify_syscall(ctx->arch, n)) { arch 522 kernel/auditsc.c result = audit_comparator(ctx->arch, f->op, f->val); arch 1458 kernel/auditsc.c context->arch, context->major); arch 1650 kernel/auditsc.c context->arch = syscall_get_arch(current); arch 77 kernel/events/uprobes.c struct arch_uprobe arch; arch 480 kernel/events/uprobes.c uprobe = container_of(auprobe, struct uprobe, arch); arch 823 kernel/events/uprobes.c void *insn = &uprobe->arch.insn; arch 824 kernel/events/uprobes.c int size = sizeof(uprobe->arch.insn); arch 863 kernel/events/uprobes.c if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) arch 866 kernel/events/uprobes.c ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); arch 921 kernel/events/uprobes.c ret = set_swbp(&uprobe->arch, mm, vaddr); arch 934 kernel/events/uprobes.c return set_orig_insn(&uprobe->arch, mm, vaddr); arch 1631 kernel/events/uprobes.c &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); arch 1952 kernel/events/uprobes.c err = arch_uprobe_pre_xol(&uprobe->arch, regs); arch 2247 kernel/events/uprobes.c if (arch_uprobe_ignore(&uprobe->arch, regs)) arch 2252 kernel/events/uprobes.c if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) arch 2274 kernel/events/uprobes.c err = arch_uprobe_post_xol(&uprobe->arch, regs); arch 2276 kernel/events/uprobes.c arch_uprobe_abort_xol(&uprobe->arch, regs); arch 965 kernel/ptrace.c .arch = syscall_get_arch(child), arch 151 kernel/seccomp.c sd->arch = syscall_get_arch(task); arch 32 lib/audit.c int audit_classify_arch(int arch) arch 34 lib/audit.c if (audit_is_compat(arch)) arch 589 mm/rmap.c arch_tlbbatch_flush(&tlb_ubc->arch); arch 607 mm/rmap.c arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); arch 28 samples/seccomp/dropper.c static int install_filter(int nr, int arch, int error) arch 32 samples/seccomp/dropper.c (offsetof(struct seccomp_data, arch))), arch 33 samples/seccomp/dropper.c BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, arch, 0, 3), arch 68 tools/bpf/bpf_jit_disasm.c info.arch = bfd_get_arch(bfdf); arch 76 tools/bpf/bpf_jit_disasm.c disassemble = disassembler(info.arch, arch 76 tools/bpf/bpftool/jit_disasm.c const char *arch, const char *disassembler_options, arch 108 tools/bpf/bpftool/jit_disasm.c if (arch) { arch 109 tools/bpf/bpftool/jit_disasm.c const bfd_arch_info_type *inf = bfd_scan_arch(arch); arch 114 tools/bpf/bpftool/jit_disasm.c p_err("No libbfd support for %s", arch); arch 119 tools/bpf/bpftool/jit_disasm.c info.arch = bfd_get_arch(bfdf); arch 129 tools/bpf/bpftool/jit_disasm.c disassemble = disassembler(info.arch, arch 167 tools/bpf/bpftool/main.h const char *arch, const char *disassembler_options, arch 176 tools/bpf/bpftool/main.h const char *arch, const char *disassembler_options, arch 297 tools/include/uapi/linux/kvm.h struct kvm_debug_exit_arch arch; arch 669 tools/include/uapi/linux/kvm.h struct kvm_guest_debug_arch arch; arch 4 tools/perf/arch/arc/annotate/instructions.c static int arc__annotate_init(struct arch *arch, char *cpuid __maybe_unused) arch 6 tools/perf/arch/arc/annotate/instructions.c arch->initialized = true; arch 7 tools/perf/arch/arc/annotate/instructions.c arch->objdump.comment_char = ';'; arch 13 tools/perf/arch/arm/annotate/instructions.c static struct ins_ops *arm__associate_instruction_ops(struct arch *arch, const char *name) arch 15 tools/perf/arch/arm/annotate/instructions.c struct arm_annotate *arm = arch->priv; arch 26 tools/perf/arch/arm/annotate/instructions.c arch__associate_ins_ops(arch, name, ops); arch 30 tools/perf/arch/arm/annotate/instructions.c static int arm__annotate_init(struct arch *arch, char *cpuid __maybe_unused) arch 35 tools/perf/arch/arm/annotate/instructions.c if (arch->initialized) arch 51 tools/perf/arch/arm/annotate/instructions.c arch->initialized = true; arch 52 tools/perf/arch/arm/annotate/instructions.c arch->priv = arm; arch 53 tools/perf/arch/arm/annotate/instructions.c arch->associate_instruction_ops = arm__associate_instruction_ops; arch 54 tools/perf/arch/arm/annotate/instructions.c arch->objdump.comment_char = ';'; arch 55 tools/perf/arch/arm/annotate/instructions.c arch->objdump.skip_functions_char = '+'; arch 12 tools/perf/arch/arm64/annotate/instructions.c static int arm64_mov__parse(struct arch *arch __maybe_unused, arch 69 tools/perf/arch/arm64/annotate/instructions.c static struct ins_ops *arm64__associate_instruction_ops(struct arch *arch, const char *name) arch 71 tools/perf/arch/arm64/annotate/instructions.c struct arm64_annotate *arm = arch->priv; arch 84 tools/perf/arch/arm64/annotate/instructions.c arch__associate_ins_ops(arch, name, ops); arch 88 tools/perf/arch/arm64/annotate/instructions.c static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused) arch 93 tools/perf/arch/arm64/annotate/instructions.c if (arch->initialized) arch 110 tools/perf/arch/arm64/annotate/instructions.c arch->initialized = true; arch 111 tools/perf/arch/arm64/annotate/instructions.c arch->priv = arm; arch 112 tools/perf/arch/arm64/annotate/instructions.c arch->associate_instruction_ops = arm64__associate_instruction_ops; arch 113 tools/perf/arch/arm64/annotate/instructions.c arch->objdump.comment_char = '/'; arch 114 tools/perf/arch/arm64/annotate/instructions.c arch->objdump.skip_functions_char = '+'; arch 136 tools/perf/arch/common.c const char *arch = perf_env__arch(env), *cross_env; arch 144 tools/perf/arch/common.c if (!strcmp(perf_env__arch(NULL), arch)) arch 161 tools/perf/arch/common.c if (!strcmp(arch, "arc")) arch 163 tools/perf/arch/common.c else if (!strcmp(arch, "arm")) arch 165 tools/perf/arch/common.c else if (!strcmp(arch, "arm64")) arch 167 tools/perf/arch/common.c else if (!strcmp(arch, "powerpc")) arch 169 tools/perf/arch/common.c else if (!strcmp(arch, "sh")) arch 171 tools/perf/arch/common.c else if (!strcmp(arch, "s390")) arch 173 tools/perf/arch/common.c else if (!strcmp(arch, "sparc")) arch 175 tools/perf/arch/common.c else if (!strcmp(arch, "x86")) arch 177 tools/perf/arch/common.c else if (!strcmp(arch, "mips")) arch 180 tools/perf/arch/common.c ui__error("binutils for %s not supported.\n", arch); arch 189 tools/perf/arch/common.c name, arch, name); arch 211 tools/perf/arch/common.c if (env->arch == NULL) arch 6 tools/perf/arch/csky/annotate/instructions.c static struct ins_ops *csky__associate_ins_ops(struct arch *arch, arch 37 tools/perf/arch/csky/annotate/instructions.c arch__associate_ins_ops(arch, name, ops); arch 41 tools/perf/arch/csky/annotate/instructions.c static int csky__annotate_init(struct arch *arch, char *cpuid __maybe_unused) arch 43 tools/perf/arch/csky/annotate/instructions.c arch->initialized = true; arch 44 tools/perf/arch/csky/annotate/instructions.c arch->objdump.comment_char = '/'; arch 45 tools/perf/arch/csky/annotate/instructions.c arch->associate_instruction_ops = csky__associate_ins_ops; arch 4 tools/perf/arch/powerpc/annotate/instructions.c static struct ins_ops *powerpc__associate_instruction_ops(struct arch *arch, const char *name) arch 48 tools/perf/arch/powerpc/annotate/instructions.c arch__associate_ins_ops(arch, name, ops); arch 52 tools/perf/arch/powerpc/annotate/instructions.c static int powerpc__annotate_init(struct arch *arch, char *cpuid __maybe_unused) arch 54 tools/perf/arch/powerpc/annotate/instructions.c if (!arch->initialized) { arch 55 tools/perf/arch/powerpc/annotate/instructions.c arch->initialized = true; arch 56 tools/perf/arch/powerpc/annotate/instructions.c arch->associate_instruction_ops = powerpc__associate_instruction_ops; arch 57 tools/perf/arch/powerpc/annotate/instructions.c arch->objdump.comment_char = '#'; arch 4 tools/perf/arch/s390/annotate/instructions.c static int s390_call__parse(struct arch *arch, struct ins_operands *ops, arch 25 tools/perf/arch/s390/annotate/instructions.c if (arch->objdump.skip_functions_char && arch 26 tools/perf/arch/s390/annotate/instructions.c strchr(name, arch->objdump.skip_functions_char)) arch 56 tools/perf/arch/s390/annotate/instructions.c static int s390_mov__parse(struct arch *arch __maybe_unused, arch 109 tools/perf/arch/s390/annotate/instructions.c static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *name) arch 135 tools/perf/arch/s390/annotate/instructions.c arch__associate_ins_ops(arch, name, ops); arch 139 tools/perf/arch/s390/annotate/instructions.c static int s390__cpuid_parse(struct arch *arch, char *cpuid) arch 152 tools/perf/arch/s390/annotate/instructions.c arch->family = family; arch 153 tools/perf/arch/s390/annotate/instructions.c arch->model = 0; arch 160 tools/perf/arch/s390/annotate/instructions.c static int s390__annotate_init(struct arch *arch, char *cpuid __maybe_unused) arch 164 tools/perf/arch/s390/annotate/instructions.c if (!arch->initialized) { arch 165 tools/perf/arch/s390/annotate/instructions.c arch->initialized = true; arch 166 tools/perf/arch/s390/annotate/instructions.c arch->associate_instruction_ops = s390__associate_ins_ops; arch 168 tools/perf/arch/s390/annotate/instructions.c if (s390__cpuid_parse(arch, cpuid)) arch 120 tools/perf/arch/sparc/annotate/instructions.c static struct ins_ops *sparc__associate_instruction_ops(struct arch *arch, const char *name) arch 155 tools/perf/arch/sparc/annotate/instructions.c arch__associate_ins_ops(arch, name, ops); arch 160 tools/perf/arch/sparc/annotate/instructions.c static int sparc__annotate_init(struct arch *arch, char *cpuid __maybe_unused) arch 162 tools/perf/arch/sparc/annotate/instructions.c if (!arch->initialized) { arch 163 tools/perf/arch/sparc/annotate/instructions.c arch->initialized = true; arch 164 tools/perf/arch/sparc/annotate/instructions.c arch->associate_instruction_ops = sparc__associate_instruction_ops; arch 165 tools/perf/arch/sparc/annotate/instructions.c arch->objdump.comment_char = '#'; arch 146 tools/perf/arch/x86/annotate/instructions.c static bool x86__ins_is_fused(struct arch *arch, const char *ins1, arch 149 tools/perf/arch/x86/annotate/instructions.c if (arch->family != 6 || arch->model < 0x1e || strstr(ins2, "jmp")) arch 152 tools/perf/arch/x86/annotate/instructions.c if (arch->model == 0x1e) { arch 174 tools/perf/arch/x86/annotate/instructions.c static int x86__cpuid_parse(struct arch *arch, char *cpuid) arch 184 tools/perf/arch/x86/annotate/instructions.c arch->family = family; arch 185 tools/perf/arch/x86/annotate/instructions.c arch->model = model; arch 192 tools/perf/arch/x86/annotate/instructions.c static int x86__annotate_init(struct arch *arch, char *cpuid) arch 196 tools/perf/arch/x86/annotate/instructions.c if (arch->initialized) arch 200 tools/perf/arch/x86/annotate/instructions.c if (x86__cpuid_parse(arch, cpuid)) arch 204 tools/perf/arch/x86/annotate/instructions.c arch->initialized = true; arch 3777 tools/perf/builtin-script.c !strcmp(uts.machine, session->header.env.arch) || arch 3779 tools/perf/builtin-script.c !strcmp(session->header.env.arch, "i386"))) arch 1075 tools/perf/pmu-events/jevents.c const char *arch; arch 1086 tools/perf/pmu-events/jevents.c arch = argv[1]; arch 1100 tools/perf/pmu-events/jevents.c sprintf(ldirname, "%s/%s", start_dirname, arch); arch 1104 tools/perf/pmu-events/jevents.c pr_info("%s: Arch %s has no PMU event lists\n", prog, arch); arch 212 tools/perf/trace/beauty/beauty.h const char *arch_syscalls__strerrno(const char *arch, int err); arch 27 tools/perf/ui/browsers/annotate.c struct arch; arch 34 tools/perf/ui/browsers/annotate.c struct arch *arch; arch 144 tools/perf/ui/browsers/annotate.c return ins__is_fused(ab->arch, name, cursor->ins.name); arch 921 tools/perf/ui/browsers/annotate.c err = symbol__annotate2(sym, map, evsel, opts, &browser.arch); arch 2997 tools/perf/ui/browsers/hists.c if (env->arch) arch 71 tools/perf/util/annotate.c static struct ins_ops *ins__find(struct arch *arch, const char *name); arch 72 tools/perf/util/annotate.c static void ins__sort(struct arch *arch); arch 80 tools/perf/util/annotate.c struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name); arch 86 tools/perf/util/annotate.c int (*init)(struct arch *arch, char *cpuid); arch 87 tools/perf/util/annotate.c bool (*ins_is_fused)(struct arch *arch, const char *ins1, arch 103 tools/perf/util/annotate.c static int arch__grow_instructions(struct arch *arch) arch 108 tools/perf/util/annotate.c if (arch->nr_instructions_allocated == 0 && arch->instructions) arch 111 tools/perf/util/annotate.c new_nr_allocated = arch->nr_instructions_allocated + 128; arch 112 tools/perf/util/annotate.c new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins)); arch 117 tools/perf/util/annotate.c arch->instructions = new_instructions; arch 118 tools/perf/util/annotate.c arch->nr_instructions_allocated = new_nr_allocated; arch 122 tools/perf/util/annotate.c new_nr_allocated = arch->nr_instructions + 128; arch 127 tools/perf/util/annotate.c memcpy(new_instructions, arch->instructions, arch->nr_instructions); arch 131 tools/perf/util/annotate.c static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops) arch 135 tools/perf/util/annotate.c if (arch->nr_instructions == arch->nr_instructions_allocated && arch 136 tools/perf/util/annotate.c arch__grow_instructions(arch)) arch 139 tools/perf/util/annotate.c ins = &arch->instructions[arch->nr_instructions]; arch 145 tools/perf/util/annotate.c arch->nr_instructions++; arch 147 tools/perf/util/annotate.c ins__sort(arch); arch 160 tools/perf/util/annotate.c static struct arch architectures[] = { arch 232 tools/perf/util/annotate.c bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2) arch 234 tools/perf/util/annotate.c if (!arch || !arch->ins_is_fused) arch 237 tools/perf/util/annotate.c return arch->ins_is_fused(arch, ins1, ins2); arch 240 tools/perf/util/annotate.c static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms) arch 256 tools/perf/util/annotate.c if (arch->objdump.skip_functions_char && arch 257 tools/perf/util/annotate.c strchr(name, arch->objdump.skip_functions_char)) arch 329 tools/perf/util/annotate.c static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms) arch 339 tools/perf/util/annotate.c ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char); arch 477 tools/perf/util/annotate.c static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms) arch 486 tools/perf/util/annotate.c ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name); arch 492 tools/perf/util/annotate.c ops->locked.ins.ops->parse(arch, ops->locked.ops, ms) < 0) arch 535 tools/perf/util/annotate.c static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused) arch 550 tools/perf/util/annotate.c comment = strchr(s, arch->objdump.comment_char); arch 596 tools/perf/util/annotate.c static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms __maybe_unused) arch 613 tools/perf/util/annotate.c comment = strchr(s, arch->objdump.comment_char); arch 674 tools/perf/util/annotate.c static void ins__sort(struct arch *arch) arch 676 tools/perf/util/annotate.c const int nmemb = arch->nr_instructions; arch 678 tools/perf/util/annotate.c qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp); arch 681 tools/perf/util/annotate.c static struct ins_ops *__ins__find(struct arch *arch, const char *name) arch 684 tools/perf/util/annotate.c const int nmemb = arch->nr_instructions; arch 686 tools/perf/util/annotate.c if (!arch->sorted_instructions) { arch 687 tools/perf/util/annotate.c ins__sort(arch); arch 688 tools/perf/util/annotate.c arch->sorted_instructions = true; arch 691 tools/perf/util/annotate.c ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp); arch 695 tools/perf/util/annotate.c static struct ins_ops *ins__find(struct arch *arch, const char *name) arch 697 tools/perf/util/annotate.c struct ins_ops *ops = __ins__find(arch, name); arch 699 tools/perf/util/annotate.c if (!ops && arch->associate_instruction_ops) arch 700 tools/perf/util/annotate.c ops = arch->associate_instruction_ops(arch, name); arch 707 tools/perf/util/annotate.c const struct arch *arch = archp; arch 709 tools/perf/util/annotate.c return strcmp(name, arch->name); arch 714 tools/perf/util/annotate.c const struct arch *aa = a; arch 715 tools/perf/util/annotate.c const struct arch *ab = b; arch 724 tools/perf/util/annotate.c qsort(architectures, nmemb, sizeof(struct arch), arch__cmp); arch 727 tools/perf/util/annotate.c static struct arch *arch__find(const char *name) arch 737 tools/perf/util/annotate.c return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp); arch 1100 tools/perf/util/annotate.c static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms) arch 1102 tools/perf/util/annotate.c dl->ins.ops = ins__find(arch, dl->ins.name); arch 1107 tools/perf/util/annotate.c if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, ms) < 0) arch 1141 tools/perf/util/annotate.c struct arch *arch; arch 1225 tools/perf/util/annotate.c disasm_line__init_ins(dl, args->arch, &args->ms); arch 1755 tools/perf/util/annotate.c info.arch = bfd_get_arch(bfdf); arch 1786 tools/perf/util/annotate.c disassemble = disassembler(info.arch, arch 2077 tools/perf/util/annotate.c struct arch **parch) arch 2087 tools/perf/util/annotate.c struct arch *arch; arch 2093 tools/perf/util/annotate.c args.arch = arch = arch__find(arch_name); arch 2094 tools/perf/util/annotate.c if (arch == NULL) arch 2098 tools/perf/util/annotate.c *parch = arch; arch 2100 tools/perf/util/annotate.c if (arch->init) { arch 2101 tools/perf/util/annotate.c err = arch->init(arch, env ? env->cpuid : NULL); arch 2103 tools/perf/util/annotate.c pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name); arch 2983 tools/perf/util/annotate.c struct annotation_options *options, struct arch **parch) arch 56 tools/perf/util/annotate.h struct arch; arch 60 tools/perf/util/annotate.h int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms); arch 70 tools/perf/util/annotate.h bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2); arch 353 tools/perf/util/annotate.h struct arch **parch); arch 357 tools/perf/util/annotate.h struct arch **parch); arch 1376 tools/perf/util/data-convert-bt.c ADD("machine", header->env.arch); arch 173 tools/perf/util/env.c zfree(&env->arch); arch 263 tools/perf/util/env.c if (env->arch) arch 267 tools/perf/util/env.c env->arch = strdup(uts.machine); arch 269 tools/perf/util/env.c return env->arch ? 0 : -ENOMEM; arch 282 tools/perf/util/env.c return env && !perf_env__read_arch(env) ? env->arch : "unknown"; arch 301 tools/perf/util/env.c static const char *normalize_arch(char *arch) arch 303 tools/perf/util/env.c if (!strcmp(arch, "x86_64")) arch 305 tools/perf/util/env.c if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6') arch 307 tools/perf/util/env.c if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5)) arch 309 tools/perf/util/env.c if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64")) arch 311 tools/perf/util/env.c if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110")) arch 313 tools/perf/util/env.c if (!strncmp(arch, "s390", 4)) arch 315 tools/perf/util/env.c if (!strncmp(arch, "parisc", 6)) arch 317 tools/perf/util/env.c if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3)) arch 319 tools/perf/util/env.c if (!strncmp(arch, "mips", 4)) arch 321 tools/perf/util/env.c if (!strncmp(arch, "sh", 2) && isdigit(arch[2])) arch 324 tools/perf/util/env.c return arch; arch 332 tools/perf/util/env.c if (!env || !env->arch) { /* Assume local operation */ arch 337 tools/perf/util/env.c arch_name = env->arch; arch 44 tools/perf/util/env.h char *arch; arch 1410 tools/perf/util/header.c fprintf(fp, "# arch : %s\n", ff->ph->env.arch); arch 2052 tools/perf/util/header.c FEAT_PROCESS_STR_FUN(arch, arch); arch 2263 tools/perf/util/header.c if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4) arch 2264 tools/perf/util/header.c || !strncmp(ph->env.arch, "aarch64", 7))) arch 2847 tools/perf/util/header.c FEAT_OPR(ARCH, arch, false), arch 2639 tools/perf/util/machine.c bool machine__is(struct machine *machine, const char *arch) arch 2641 tools/perf/util/machine.c return machine && !strcmp(perf_env__raw_arch(machine->env), arch); arch 199 tools/perf/util/machine.h bool machine__is(struct machine *machine, const char *arch); arch 95 tools/perf/util/map.c const char *arch; arch 111 tools/perf/util/map.c arch = !strncmp(app_abi, "arm", 3) ? "arm" : arch 115 tools/perf/util/map.c if (!arch) arch 120 tools/perf/util/map.c + strlen(arch); arch 126 tools/perf/util/map.c ndk, app, arch, libname); arch 139 tools/perf/util/thread-stack.c const char *arch = perf_env__arch(machine->env); arch 142 tools/perf/util/thread-stack.c if (!strcmp(arch, "x86")) arch 24 tools/perf/util/unwind-libunwind.c const char *arch; arch 41 tools/perf/util/unwind-libunwind.c if (!mg->machine->env || !mg->machine->env->arch) arch 48 tools/perf/util/unwind-libunwind.c arch = perf_env__arch(mg->machine->env); arch 50 tools/perf/util/unwind-libunwind.c if (!strcmp(arch, "x86")) { arch 53 tools/perf/util/unwind-libunwind.c } else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) { arch 59 tools/perf/util/unwind-libunwind.c pr_err("unwind: target platform=%s is not supported\n", arch); arch 166 tools/testing/selftests/ptrace/get_syscall_info.c ASSERT_TRUE(info.arch) { arch 194 tools/testing/selftests/ptrace/get_syscall_info.c ASSERT_TRUE(info.arch) { arch 234 tools/testing/selftests/ptrace/get_syscall_info.c ASSERT_TRUE(info.arch) { arch 86 tools/testing/selftests/seccomp/seccomp_bpf.c __u32 arch; arch 161 virt/kvm/arm/arch_timer.c struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i]; arch 182 virt/kvm/arm/arch_timer.c vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); arch 1133 virt/kvm/arm/arch_timer.c if (vcpu->arch.timer_cpu.enabled) arch 114 virt/kvm/arm/arm.c kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran)); arch 115 virt/kvm/arm/arm.c if (!kvm->arch.last_vcpu_ran) arch 119 virt/kvm/arm/arm.c *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1; arch 132 virt/kvm/arm/arm.c kvm->arch.vmid.vmid_gen = 0; arch 135 virt/kvm/arm/arm.c kvm->arch.max_vcpus = vgic_present ? arch 142 virt/kvm/arm/arm.c free_percpu(kvm->arch.last_vcpu_ran); arch 143 virt/kvm/arm/arm.c kvm->arch.last_vcpu_ran = NULL; arch 168 virt/kvm/arm/arm.c free_percpu(kvm->arch.last_vcpu_ran); arch 169 virt/kvm/arm/arm.c kvm->arch.last_vcpu_ran = NULL; arch 218 virt/kvm/arm/arm.c r = kvm->arch.vgic.msis_require_devid; arch 266 virt/kvm/arm/arm.c if (id >= kvm->arch.max_vcpus) { arch 300 virt/kvm/arm/arm.c if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm))) arch 344 virt/kvm/arm/arm.c vcpu->arch.target = -1; arch 345 virt/kvm/arm/arm.c bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); arch 372 virt/kvm/arm/arm.c last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); arch 385 virt/kvm/arm/arm.c vcpu->arch.host_cpu_context = &cpu_data->host_ctxt; arch 400 virt/kvm/arm/arm.c struct kvm_cpu_context __maybe_unused *ctxt = vcpu->arch.host_cpu_context; arch 427 virt/kvm/arm/arm.c vcpu->arch.power_off = true; arch 435 virt/kvm/arm/arm.c if (vcpu->arch.power_off) arch 450 virt/kvm/arm/arm.c vcpu->arch.power_off = false; arch 473 virt/kvm/arm/arm.c && !v->arch.power_off && !v->arch.pause); arch 568 virt/kvm/arm/arm.c if (likely(vcpu->arch.has_run_once)) arch 574 virt/kvm/arm/arm.c vcpu->arch.has_run_once = true; arch 614 virt/kvm/arm/arm.c vcpu->arch.pause = true; arch 624 virt/kvm/arm/arm.c vcpu->arch.pause = false; arch 633 virt/kvm/arm/arm.c swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) && arch 634 virt/kvm/arm/arm.c (!vcpu->arch.pause))); arch 636 virt/kvm/arm/arm.c if (vcpu->arch.power_off || vcpu->arch.pause) { arch 651 virt/kvm/arm/arm.c return vcpu->arch.target >= 0; arch 714 virt/kvm/arm/arm.c update_vmid(&vcpu->kvm->arch.vmid); arch 763 virt/kvm/arm/arm.c if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) || arch 974 virt/kvm/arm/arm.c if (vcpu->arch.target != -1 && vcpu->arch.target != init->target) arch 988 virt/kvm/arm/arm.c if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES && arch 989 virt/kvm/arm/arm.c test_bit(i, vcpu->arch.features) != set) arch 993 virt/kvm/arm/arm.c set_bit(i, vcpu->arch.features); arch 996 virt/kvm/arm/arm.c vcpu->arch.target = phys_target; arch 1001 virt/kvm/arm/arm.c vcpu->arch.target = -1; arch 1002 virt/kvm/arm/arm.c bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); arch 1021 virt/kvm/arm/arm.c if (vcpu->arch.has_run_once) arch 1029 virt/kvm/arm/arm.c if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) arch 1032 virt/kvm/arm/arm.c vcpu->arch.power_off = false; arch 199 virt/kvm/arm/hyp/vgic-v3-sr.c struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; arch 200 virt/kvm/arm/hyp/vgic-v3-sr.c u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; arch 235 virt/kvm/arm/hyp/vgic-v3-sr.c struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; arch 236 virt/kvm/arm/hyp/vgic-v3-sr.c u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; arch 262 virt/kvm/arm/hyp/vgic-v3-sr.c struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; arch 311 virt/kvm/arm/hyp/vgic-v3-sr.c struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; arch 343 virt/kvm/arm/hyp/vgic-v3-sr.c cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; arch 380 virt/kvm/arm/hyp/vgic-v3-sr.c cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; arch 456 virt/kvm/arm/hyp/vgic-v3-sr.c unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs; arch 495 virt/kvm/arm/hyp/vgic-v3-sr.c unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs; arch 102 virt/kvm/arm/mmio.c if (vcpu->arch.mmio_decode.sign_extend && arch 108 virt/kvm/arm/mmio.c if (!vcpu->arch.mmio_decode.sixty_four) arch 114 virt/kvm/arm/mmio.c vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); arch 149 virt/kvm/arm/mmio.c vcpu->arch.mmio_decode.sign_extend = sign_extend; arch 150 virt/kvm/arm/mmio.c vcpu->arch.mmio_decode.rt = rt; arch 151 virt/kvm/arm/mmio.c vcpu->arch.mmio_decode.sixty_four = sixty_four; arch 180 virt/kvm/arm/mmio.c rt = vcpu->arch.mmio_decode.rt; arch 344 virt/kvm/arm/mmu.c pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr); arch 351 virt/kvm/arm/mmu.c if (!READ_ONCE(kvm->arch.pgd)) arch 421 virt/kvm/arm/mmu.c pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr); arch 904 virt/kvm/arm/mmu.c if (kvm->arch.pgd != NULL) { arch 918 virt/kvm/arm/mmu.c kvm->arch.pgd = pgd; arch 919 virt/kvm/arm/mmu.c kvm->arch.pgd_phys = pgd_phys; arch 1003 virt/kvm/arm/mmu.c if (kvm->arch.pgd) { arch 1005 virt/kvm/arm/mmu.c pgd = READ_ONCE(kvm->arch.pgd); arch 1006 virt/kvm/arm/mmu.c kvm->arch.pgd = NULL; arch 1007 virt/kvm/arm/mmu.c kvm->arch.pgd_phys = 0; arch 1022 virt/kvm/arm/mmu.c pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr); arch 1507 virt/kvm/arm/mmu.c pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr); arch 1519 virt/kvm/arm/mmu.c if (!READ_ONCE(kvm->arch.pgd)) arch 1679 virt/kvm/arm/mmu.c struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; arch 2054 virt/kvm/arm/mmu.c if (!kvm->arch.pgd) arch 2085 virt/kvm/arm/mmu.c if (!kvm->arch.pgd) arch 2139 virt/kvm/arm/mmu.c if (!kvm->arch.pgd) arch 2147 virt/kvm/arm/mmu.c if (!kvm->arch.pgd) arch 2156 virt/kvm/arm/mmu.c mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); arch 40 virt/kvm/arm/pmu.c return container_of(vcpu_arch, struct kvm_vcpu, arch); arch 51 virt/kvm/arm/pmu.c return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); arch 142 virt/kvm/arm/pmu.c struct kvm_pmu *pmu = &vcpu->arch.pmu; arch 228 virt/kvm/arm/pmu.c struct kvm_pmu *pmu = &vcpu->arch.pmu; arch 242 virt/kvm/arm/pmu.c struct kvm_pmu *pmu = &vcpu->arch.pmu; arch 247 virt/kvm/arm/pmu.c bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS); arch 258 virt/kvm/arm/pmu.c struct kvm_pmu *pmu = &vcpu->arch.pmu; arch 285 virt/kvm/arm/pmu.c struct kvm_pmu *pmu = &vcpu->arch.pmu; arch 326 virt/kvm/arm/pmu.c struct kvm_pmu *pmu = &vcpu->arch.pmu; arch 370 virt/kvm/arm/pmu.c struct kvm_pmu *pmu = &vcpu->arch.pmu; arch 391 virt/kvm/arm/pmu.c struct kvm_pmu *pmu = &vcpu->arch.pmu; arch 410 virt/kvm/arm/pmu.c if (vcpu->arch.pmu.irq_level) arch 483 virt/kvm/arm/pmu.c struct kvm_pmu *pmu = &vcpu->arch.pmu; arch 566 virt/kvm/arm/pmu.c struct kvm_pmu *pmu = &vcpu->arch.pmu; arch 648 virt/kvm/arm/pmu.c struct kvm_pmu *pmu = &vcpu->arch.pmu; arch 659 virt/kvm/arm/pmu.c set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); arch 661 virt/kvm/arm/pmu.c clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); arch 701 virt/kvm/arm/pmu.c if (!vcpu->arch.pmu.created) arch 710 virt/kvm/arm/pmu.c int irq = vcpu->arch.pmu.irq_num; arch 727 virt/kvm/arm/pmu.c vcpu->arch.pmu.ready = true; arch 737 virt/kvm/arm/pmu.c if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) arch 740 virt/kvm/arm/pmu.c if (vcpu->arch.pmu.created) arch 757 virt/kvm/arm/pmu.c ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, arch 758 virt/kvm/arm/pmu.c &vcpu->arch.pmu); arch 763 virt/kvm/arm/pmu.c vcpu->arch.pmu.created = true; arch 782 virt/kvm/arm/pmu.c if (vcpu->arch.pmu.irq_num != irq) arch 785 virt/kvm/arm/pmu.c if (vcpu->arch.pmu.irq_num == irq) arch 803 virt/kvm/arm/pmu.c if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) arch 820 virt/kvm/arm/pmu.c vcpu->arch.pmu.irq_num = irq; arch 840 virt/kvm/arm/pmu.c if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) arch 846 virt/kvm/arm/pmu.c irq = vcpu->arch.pmu.irq_num; arch 860 virt/kvm/arm/pmu.c test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) arch 89 virt/kvm/arm/psci.c vcpu->arch.power_off = true; arch 113 virt/kvm/arm/psci.c if (!vcpu->arch.power_off) { arch 120 virt/kvm/arm/psci.c reset_state = &vcpu->arch.reset_state; arch 142 virt/kvm/arm/psci.c vcpu->arch.power_off = false; arch 177 virt/kvm/arm/psci.c if (!tmp->arch.power_off) arch 203 virt/kvm/arm/psci.c tmp->arch.power_off = true; arch 539 virt/kvm/arm/psci.c wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features); arch 545 virt/kvm/arm/psci.c vcpu->kvm->arch.psci_version = val; arch 551 virt/kvm/arm/psci.c vcpu->kvm->arch.psci_version = val; arch 63 virt/kvm/arm/vgic/vgic-debug.c iter->nr_spis = kvm->arch.vgic.nr_spis; arch 64 virt/kvm/arm/vgic/vgic-debug.c if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { arch 89 virt/kvm/arm/vgic/vgic-debug.c iter = kvm->arch.vgic.iter; arch 102 virt/kvm/arm/vgic/vgic-debug.c kvm->arch.vgic.iter = iter; arch 114 virt/kvm/arm/vgic/vgic-debug.c struct vgic_state_iter *iter = kvm->arch.vgic.iter; arch 136 virt/kvm/arm/vgic/vgic-debug.c iter = kvm->arch.vgic.iter; arch 139 virt/kvm/arm/vgic/vgic-debug.c kvm->arch.vgic.iter = NULL; arch 227 virt/kvm/arm/vgic/vgic-debug.c print_dist_state(s, &kvm->arch.vgic); arch 231 virt/kvm/arm/vgic/vgic-debug.c if (!kvm->arch.vgic.initialized) arch 54 virt/kvm/arm/vgic/vgic-init.c struct vgic_dist *dist = &kvm->arch.vgic; arch 102 virt/kvm/arm/vgic/vgic-init.c if (vcpu->arch.has_run_once) arch 108 virt/kvm/arm/vgic/vgic-init.c kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS; arch 110 virt/kvm/arm/vgic/vgic-init.c kvm->arch.max_vcpus = VGIC_V3_MAX_CPUS; arch 112 virt/kvm/arm/vgic/vgic-init.c if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus) { arch 117 virt/kvm/arm/vgic/vgic-init.c kvm->arch.vgic.in_kernel = true; arch 118 virt/kvm/arm/vgic/vgic-init.c kvm->arch.vgic.vgic_model = type; arch 120 virt/kvm/arm/vgic/vgic-init.c kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; arch 123 virt/kvm/arm/vgic/vgic-init.c kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; arch 125 virt/kvm/arm/vgic/vgic-init.c INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions); arch 144 virt/kvm/arm/vgic/vgic-init.c struct vgic_dist *dist = &kvm->arch.vgic; arch 197 virt/kvm/arm/vgic/vgic-init.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 198 virt/kvm/arm/vgic/vgic-init.c struct vgic_dist *dist = &vcpu->kvm->arch.vgic; arch 265 virt/kvm/arm/vgic/vgic-init.c struct vgic_dist *dist = &kvm->arch.vgic; arch 286 virt/kvm/arm/vgic/vgic-init.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 331 virt/kvm/arm/vgic/vgic-init.c struct vgic_dist *dist = &kvm->arch.vgic; arch 341 virt/kvm/arm/vgic/vgic-init.c if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { arch 358 virt/kvm/arm/vgic/vgic-init.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 401 virt/kvm/arm/vgic/vgic-init.c if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) arch 424 virt/kvm/arm/vgic/vgic-init.c struct vgic_dist *dist = &kvm->arch.vgic; arch 124 virt/kvm/arm/vgic/vgic-irqfd.c struct vgic_dist *dist = &kvm->arch.vgic; arch 42 virt/kvm/arm/vgic/vgic-its.c struct vgic_dist *dist = &kvm->arch.vgic; arch 276 virt/kvm/arm/vgic/vgic-its.c u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser); arch 314 virt/kvm/arm/vgic/vgic-its.c struct vgic_dist *dist = &kvm->arch.vgic; arch 363 virt/kvm/arm/vgic/vgic-its.c map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; arch 420 virt/kvm/arm/vgic/vgic-its.c gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); arch 572 virt/kvm/arm/vgic/vgic-its.c struct vgic_dist *dist = &kvm->arch.vgic; arch 587 virt/kvm/arm/vgic/vgic-its.c struct vgic_dist *dist = &kvm->arch.vgic; arch 638 virt/kvm/arm/vgic/vgic-its.c struct vgic_dist *dist = &kvm->arch.vgic; arch 676 virt/kvm/arm/vgic/vgic-its.c if (!vcpu->arch.vgic_cpu.lpis_enabled) arch 1064 virt/kvm/arm/vgic/vgic-its.c lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser)) arch 1324 virt/kvm/arm/vgic/vgic-its.c if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm) arch 1325 virt/kvm/arm/vgic/vgic-its.c its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe); arch 1792 virt/kvm/arm/vgic/vgic-its.c if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ)) arch 1829 virt/kvm/arm/vgic/vgic-its.c struct vgic_dist *dist = &kvm->arch.vgic; arch 1853 virt/kvm/arm/vgic/vgic-its.c struct vgic_dist *dist = &kvm->arch.vgic; arch 1905 virt/kvm/arm/vgic/vgic-its.c dev->kvm->arch.vgic.msis_require_devid = true; arch 1906 virt/kvm/arm/vgic/vgic-its.c dev->kvm->arch.vgic.has_its = true; arch 1914 virt/kvm/arm/vgic/vgic-its.c dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE; arch 34 virt/kvm/arm/vgic/vgic-kvm-device.c if (kvm->arch.vgic.vgic_model != type_needed) arch 59 virt/kvm/arm/vgic/vgic-kvm-device.c struct vgic_dist *vgic = &kvm->arch.vgic; arch 193 virt/kvm/arm/vgic/vgic-kvm-device.c if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis) arch 196 virt/kvm/arm/vgic/vgic-kvm-device.c dev->kvm->arch.vgic.nr_spis = arch 240 virt/kvm/arm/vgic/vgic-kvm-device.c r = put_user(dev->kvm->arch.vgic.nr_spis + arch 28 virt/kvm/arm/vgic/vgic-mmio-v2.c struct vgic_dist *vgic = &vcpu->kvm->arch.vgic; arch 56 virt/kvm/arm/vgic/vgic-mmio-v2.c struct vgic_dist *dist = &vcpu->kvm->arch.vgic; arch 90 virt/kvm/arm/vgic/vgic-mmio-v2.c vcpu->kvm->arch.vgic.v2_groups_user_writable = true; arch 102 virt/kvm/arm/vgic/vgic-mmio-v2.c if (vcpu->kvm->arch.vgic.v2_groups_user_writable) arch 362 virt/kvm/arm/vgic/vgic-mmio-v2.c return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr; arch 364 virt/kvm/arm/vgic/vgic-mmio-v2.c struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; arch 388 virt/kvm/arm/vgic/vgic-mmio-v2.c vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val; arch 390 virt/kvm/arm/vgic/vgic-mmio-v2.c struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3; arch 41 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_dist *dist = &kvm->arch.vgic; arch 64 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_dist *vgic = &vcpu->kvm->arch.vgic; arch 99 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_dist *dist = &vcpu->kvm->arch.vgic; arch 177 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 187 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 208 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 387 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_dist *dist = &vcpu->kvm->arch.vgic; arch 396 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_dist *dist = &vcpu->kvm->arch.vgic; arch 397 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 416 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 425 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 608 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_dist *vgic = &kvm->arch.vgic; arch 609 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 610 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; arch 656 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; arch 703 virt/kvm/arm/vgic/vgic-mmio-v3.c struct vgic_dist *d = &kvm->arch.vgic; arch 217 virt/kvm/arm/vgic/vgic-mmio.c vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2); arch 321 virt/kvm/arm/vgic/vgic-mmio.c if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || arch 329 virt/kvm/arm/vgic/vgic-mmio.c if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || arch 403 virt/kvm/arm/vgic/vgic-mmio.c u32 model = vcpu->kvm->arch.vgic.vgic_model; arch 607 virt/kvm/arm/vgic/vgic-mmio.c int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; arch 629 virt/kvm/arm/vgic/vgic-mmio.c int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; arch 759 virt/kvm/arm/vgic/vgic-mmio.c int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; arch 922 virt/kvm/arm/vgic/vgic-mmio.c struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev; arch 31 virt/kvm/arm/vgic/vgic-v2.c struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; arch 51 virt/kvm/arm/vgic/vgic-v2.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 212 virt/kvm/arm/vgic/vgic-v2.c vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val; arch 217 virt/kvm/arm/vgic/vgic-v2.c vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0; arch 222 virt/kvm/arm/vgic/vgic-v2.c struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; arch 249 virt/kvm/arm/vgic/vgic-v2.c struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; arch 282 virt/kvm/arm/vgic/vgic-v2.c vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0; arch 285 virt/kvm/arm/vgic/vgic-v2.c vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN; arch 306 virt/kvm/arm/vgic/vgic-v2.c struct vgic_dist *dist = &kvm->arch.vgic; arch 429 virt/kvm/arm/vgic/vgic-v2.c struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; arch 430 virt/kvm/arm/vgic/vgic-v2.c u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; arch 451 virt/kvm/arm/vgic/vgic-v2.c u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; arch 464 virt/kvm/arm/vgic/vgic-v2.c struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; arch 466 virt/kvm/arm/vgic/vgic-v2.c u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; arch 483 virt/kvm/arm/vgic/vgic-v2.c struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; arch 493 virt/kvm/arm/vgic/vgic-v2.c struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; arch 500 virt/kvm/arm/vgic/vgic-v2.c struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; arch 20 virt/kvm/arm/vgic/vgic-v3.c struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; arch 33 virt/kvm/arm/vgic/vgic-v3.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 35 virt/kvm/arm/vgic/vgic-v3.c u32 model = vcpu->kvm->arch.vgic.vgic_model; arch 120 virt/kvm/arm/vgic/vgic-v3.c u32 model = vcpu->kvm->arch.vgic.vgic_model; arch 197 virt/kvm/arm/vgic/vgic-v3.c vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val; arch 202 virt/kvm/arm/vgic/vgic-v3.c vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0; arch 207 virt/kvm/arm/vgic/vgic-v3.c struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; arch 208 virt/kvm/arm/vgic/vgic-v3.c u32 model = vcpu->kvm->arch.vgic.vgic_model; arch 237 virt/kvm/arm/vgic/vgic-v3.c struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; arch 238 virt/kvm/arm/vgic/vgic-v3.c u32 model = vcpu->kvm->arch.vgic.vgic_model; arch 273 virt/kvm/arm/vgic/vgic-v3.c struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3; arch 288 virt/kvm/arm/vgic/vgic-v3.c if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { arch 292 virt/kvm/arm/vgic/vgic-v3.c vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE; arch 297 virt/kvm/arm/vgic/vgic-v3.c vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 & arch 300 virt/kvm/arm/vgic/vgic-v3.c vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 & arch 329 virt/kvm/arm/vgic/vgic-v3.c pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); arch 365 virt/kvm/arm/vgic/vgic-v3.c struct vgic_dist *dist = &kvm->arch.vgic; arch 381 virt/kvm/arm/vgic/vgic-v3.c pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); arch 422 virt/kvm/arm/vgic/vgic-v3.c struct vgic_dist *d = &kvm->arch.vgic; arch 439 virt/kvm/arm/vgic/vgic-v3.c struct vgic_dist *d = &kvm->arch.vgic; arch 485 virt/kvm/arm/vgic/vgic-v3.c struct list_head *rd_regions = &kvm->arch.vgic.rd_regions; arch 498 virt/kvm/arm/vgic/vgic-v3.c struct vgic_dist *dist = &kvm->arch.vgic; arch 507 virt/kvm/arm/vgic/vgic-v3.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 653 virt/kvm/arm/vgic/vgic-v3.c struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; arch 671 virt/kvm/arm/vgic/vgic-v3.c struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; arch 88 virt/kvm/arm/vgic/vgic-v4.c vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; arch 106 virt/kvm/arm/vgic/vgic-v4.c struct vgic_dist *dist = &kvm->arch.vgic; arch 126 virt/kvm/arm/vgic/vgic-v4.c dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; arch 175 virt/kvm/arm/vgic/vgic-v4.c struct its_vm *its_vm = &kvm->arch.vgic.its_vm; arch 200 virt/kvm/arm/vgic/vgic-v4.c return its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, false); arch 205 virt/kvm/arm/vgic/vgic-v4.c int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq; arch 221 virt/kvm/arm/vgic/vgic-v4.c err = its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, true); arch 282 virt/kvm/arm/vgic/vgic-v4.c .vm = &kvm->arch.vgic.its_vm, arch 283 virt/kvm/arm/vgic/vgic-v4.c .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe, arch 342 virt/kvm/arm/vgic/vgic-v4.c int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq; arch 351 virt/kvm/arm/vgic/vgic-v4.c int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq; arch 60 virt/kvm/arm/vgic/vgic.c struct vgic_dist *dist = &kvm->arch.vgic; arch 96 virt/kvm/arm/vgic/vgic.c return &vcpu->arch.vgic_cpu.private_irqs[intid]; arch 100 virt/kvm/arm/vgic/vgic.c if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) { arch 101 virt/kvm/arm/vgic/vgic.c intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS); arch 102 virt/kvm/arm/vgic/vgic.c return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; arch 127 virt/kvm/arm/vgic/vgic.c struct vgic_dist *dist = &kvm->arch.vgic; arch 140 virt/kvm/arm/vgic/vgic.c struct vgic_dist *dist = &kvm->arch.vgic; arch 153 virt/kvm/arm/vgic/vgic.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 231 virt/kvm/arm/vgic/vgic.c !irq->target_vcpu->kvm->arch.vgic.enabled)) arch 299 virt/kvm/arm/vgic/vgic.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 379 virt/kvm/arm/vgic/vgic.c raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); arch 396 virt/kvm/arm/vgic/vgic.c raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, arch 408 virt/kvm/arm/vgic/vgic.c list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); arch 412 virt/kvm/arm/vgic/vgic.c raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); arch 620 virt/kvm/arm/vgic/vgic.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 681 virt/kvm/arm/vgic/vgic.c raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); arch 682 virt/kvm/arm/vgic/vgic.c raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, arch 696 virt/kvm/arm/vgic/vgic.c struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; arch 705 virt/kvm/arm/vgic/vgic.c raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); arch 706 virt/kvm/arm/vgic/vgic.c raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); arch 759 virt/kvm/arm/vgic/vgic.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 784 virt/kvm/arm/vgic/vgic.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 830 virt/kvm/arm/vgic/vgic.c vcpu->arch.vgic_cpu.used_lrs = count; arch 858 virt/kvm/arm/vgic/vgic.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 863 virt/kvm/arm/vgic/vgic.c if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) arch 899 virt/kvm/arm/vgic/vgic.c if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) && arch 905 virt/kvm/arm/vgic/vgic.c if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) { arch 906 virt/kvm/arm/vgic/vgic.c raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); arch 908 virt/kvm/arm/vgic/vgic.c raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); arch 950 virt/kvm/arm/vgic/vgic.c struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; arch 956 virt/kvm/arm/vgic/vgic.c if (!vcpu->kvm->arch.vgic.enabled) arch 959 virt/kvm/arm/vgic/vgic.c if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last) arch 260 virt/kvm/arm/vgic/vgic.h struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu; arch 301 virt/kvm/arm/vgic/vgic.h struct vgic_dist *d = &kvm->arch.vgic; arch 169 virt/kvm/async_pf.c unsigned long hva, struct kvm_arch_async_pf *arch) arch 190 virt/kvm/async_pf.c work->arch = *arch; arch 1133 virt/kvm/kvm_main.c memset(&new.arch, 0, sizeof(new.arch)); arch 2720 virt/kvm/kvm_main.c page = virt_to_page(vcpu->arch.pio_data); arch 4449 virt/kvm/kvm_main.c offsetof(struct kvm_vcpu, arch), arch 4450 virt/kvm/kvm_main.c sizeof_field(struct kvm_vcpu, arch),