ctxt 132 arch/arm/include/asm/kvm_emulate.h return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; ctxt 137 arch/arm/include/asm/kvm_emulate.h return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; ctxt 147 arch/arm/include/asm/kvm_emulate.h unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; ctxt 153 arch/arm/include/asm/kvm_emulate.h unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; ctxt 163 arch/arm/include/asm/kvm_host.h struct kvm_cpu_context ctxt; ctxt 224 arch/arm/include/asm/kvm_host.h #define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r] ctxt 102 arch/arm/include/asm/kvm_hyp.h void __sysreg_save_state(struct kvm_cpu_context *ctxt); ctxt 103 arch/arm/include/asm/kvm_hyp.h void __sysreg_restore_state(struct kvm_cpu_context *ctxt); ctxt 119 arch/arm/include/asm/kvm_hyp.h void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); ctxt 120 arch/arm/include/asm/kvm_hyp.h void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); ctxt 171 arch/arm/kernel/asm-offsets.c DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); ctxt 1099 arch/arm/kvm/coproc.c return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid], ctxt 1109 arch/arm/kvm/coproc.c return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id); ctxt 1111 arch/arm/kvm/coproc.c return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id); ctxt 1113 arch/arm/kvm/coproc.c return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id); ctxt 1115 arch/arm/kvm/coproc.c return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id); ctxt 1143 arch/arm/kvm/coproc.c return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid], ctxt 1153 arch/arm/kvm/coproc.c return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id); ctxt 1155 arch/arm/kvm/coproc.c return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id); ctxt 1157 arch/arm/kvm/coproc.c return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id); ctxt 1159 arch/arm/kvm/coproc.c return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id); ctxt 77 arch/arm/kvm/coproc.h BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); ctxt 84 arch/arm/kvm/coproc.h BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); ctxt 92 arch/arm/kvm/coproc.h BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); ctxt 103 arch/arm/kvm/emulate.c unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs; ctxt 138 arch/arm/kvm/emulate.c return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr; ctxt 140 arch/arm/kvm/emulate.c return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr; ctxt 142 arch/arm/kvm/emulate.c return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr; ctxt 144 arch/arm/kvm/emulate.c return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr; ctxt 146 arch/arm/kvm/emulate.c return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr; ctxt 46 arch/arm/kvm/guest.c struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs; ctxt 63 arch/arm/kvm/guest.c struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs; ctxt 18 arch/arm/kvm/hyp/banked-sr.c void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) ctxt 20 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.usr_regs.ARM_sp = read_special(SP_usr); ctxt 21 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.usr_regs.ARM_pc = read_special(ELR_hyp); ctxt 22 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.usr_regs.ARM_cpsr = read_special(SPSR); ctxt 23 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_SVC_sp = read_special(SP_svc); ctxt 24 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_SVC_lr = read_special(LR_svc); ctxt 25 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_SVC_spsr = read_special(SPSR_svc); ctxt 26 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_ABT_sp = read_special(SP_abt); ctxt 27 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_ABT_lr = read_special(LR_abt); ctxt 28 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_ABT_spsr = read_special(SPSR_abt); ctxt 29 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_UND_sp = read_special(SP_und); ctxt 30 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_UND_lr = read_special(LR_und); ctxt 31 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_UND_spsr = read_special(SPSR_und); ctxt 32 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_IRQ_sp = read_special(SP_irq); ctxt 33 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_IRQ_lr = read_special(LR_irq); ctxt 34 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_IRQ_spsr = read_special(SPSR_irq); ctxt 35 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_FIQ_r8 = read_special(R8_fiq); ctxt 36 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_FIQ_r9 = read_special(R9_fiq); ctxt 37 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_FIQ_r10 = read_special(R10_fiq); ctxt 38 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_FIQ_fp = read_special(R11_fiq); ctxt 39 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_FIQ_ip = read_special(R12_fiq); ctxt 40 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_FIQ_sp = read_special(SP_fiq); ctxt 41 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_FIQ_lr = read_special(LR_fiq); ctxt 42 arch/arm/kvm/hyp/banked-sr.c ctxt->gp_regs.KVM_ARM_FIQ_spsr = read_special(SPSR_fiq); ctxt 45 arch/arm/kvm/hyp/banked-sr.c void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt) ctxt 47 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.usr_regs.ARM_sp, SP_usr); ctxt 48 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.usr_regs.ARM_pc, ELR_hyp); ctxt 49 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.usr_regs.ARM_cpsr, SPSR_cxsf); ctxt 50 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_SVC_sp, SP_svc); ctxt 51 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_SVC_lr, LR_svc); ctxt 52 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_SVC_spsr, SPSR_svc); ctxt 53 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_ABT_sp, SP_abt); ctxt 54 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_ABT_lr, LR_abt); ctxt 55 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_ABT_spsr, SPSR_abt); ctxt 56 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_UND_sp, SP_und); ctxt 57 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_UND_lr, LR_und); ctxt 58 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_UND_spsr, SPSR_und); ctxt 59 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_IRQ_sp, SP_irq); ctxt 60 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_IRQ_lr, LR_irq); ctxt 61 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_IRQ_spsr, SPSR_irq); ctxt 62 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_FIQ_r8, R8_fiq); ctxt 63 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_FIQ_r9, R9_fiq); ctxt 64 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_FIQ_r10, R10_fiq); ctxt 65 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_FIQ_fp, R11_fiq); ctxt 66 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_FIQ_ip, R12_fiq); ctxt 67 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_FIQ_sp, SP_fiq); ctxt 68 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_FIQ_lr, LR_fiq); ctxt 69 arch/arm/kvm/hyp/banked-sr.c write_special(ctxt->gp_regs.KVM_ARM_FIQ_spsr, SPSR_fiq); ctxt 12 arch/arm/kvm/hyp/cp15-sr.c static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx) ctxt 14 arch/arm/kvm/hyp/cp15-sr.c return (u64 *)(ctxt->cp15 + idx); ctxt 17 arch/arm/kvm/hyp/cp15-sr.c void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) ctxt 19 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR); ctxt 20 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR); ctxt 21 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c1_CPACR] = read_sysreg(CPACR); ctxt 22 arch/arm/kvm/hyp/cp15-sr.c *cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0); ctxt 23 arch/arm/kvm/hyp/cp15-sr.c *cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1); ctxt 24 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c2_TTBCR] = read_sysreg(TTBCR); ctxt 25 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c3_DACR] = read_sysreg(DACR); ctxt 26 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c5_DFSR] = read_sysreg(DFSR); ctxt 27 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c5_IFSR] = read_sysreg(IFSR); ctxt 28 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c5_ADFSR] = read_sysreg(ADFSR); ctxt 29 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c5_AIFSR] = read_sysreg(AIFSR); ctxt 30 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c6_DFAR] = read_sysreg(DFAR); ctxt 31 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c6_IFAR] = read_sysreg(IFAR); ctxt 32 arch/arm/kvm/hyp/cp15-sr.c *cp15_64(ctxt, c7_PAR) = read_sysreg(PAR); ctxt 33 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c10_PRRR] = read_sysreg(PRRR); ctxt 34 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c10_NMRR] = read_sysreg(NMRR); ctxt 35 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c10_AMAIR0] = read_sysreg(AMAIR0); ctxt 36 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c10_AMAIR1] = read_sysreg(AMAIR1); ctxt 37 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c12_VBAR] = read_sysreg(VBAR); ctxt 38 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c13_CID] = read_sysreg(CID); ctxt 39 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c13_TID_URW] = read_sysreg(TID_URW); ctxt 40 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c13_TID_URO] = read_sysreg(TID_URO); ctxt 41 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c13_TID_PRIV] = read_sysreg(TID_PRIV); ctxt 42 arch/arm/kvm/hyp/cp15-sr.c ctxt->cp15[c14_CNTKCTL] = read_sysreg(CNTKCTL); ctxt 45 arch/arm/kvm/hyp/cp15-sr.c void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) ctxt 47 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c0_MPIDR], VMPIDR); ctxt 48 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c0_CSSELR], CSSELR); ctxt 49 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c1_SCTLR], SCTLR); ctxt 50 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c1_CPACR], CPACR); ctxt 51 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(*cp15_64(ctxt, c2_TTBR0), TTBR0); ctxt 52 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(*cp15_64(ctxt, c2_TTBR1), TTBR1); ctxt 53 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c2_TTBCR], TTBCR); ctxt 54 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c3_DACR], DACR); ctxt 55 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c5_DFSR], DFSR); ctxt 56 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c5_IFSR], IFSR); ctxt 57 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c5_ADFSR], ADFSR); ctxt 58 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c5_AIFSR], AIFSR); ctxt 59 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c6_DFAR], DFAR); ctxt 60 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c6_IFAR], IFAR); ctxt 61 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(*cp15_64(ctxt, c7_PAR), PAR); ctxt 62 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c10_PRRR], PRRR); ctxt 63 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c10_NMRR], NMRR); ctxt 64 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c10_AMAIR0], AMAIR0); ctxt 65 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c10_AMAIR1], AMAIR1); ctxt 66 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c12_VBAR], VBAR); ctxt 67 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c13_CID], CID); ctxt 68 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c13_TID_URW], TID_URW); ctxt 69 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c13_TID_URO], TID_URO); ctxt 70 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c13_TID_PRIV], TID_PRIV); ctxt 71 arch/arm/kvm/hyp/cp15-sr.c write_sysreg(ctxt->cp15[c14_CNTKCTL], CNTKCTL); ctxt 158 arch/arm/kvm/hyp/switch.c guest_ctxt = &vcpu->arch.ctxt; ctxt 56 arch/arm/kvm/reset.c memcpy(&vcpu->arch.ctxt.gp_regs, reset_regs, sizeof(vcpu->arch.ctxt.gp_regs)); ctxt 110 arch/arm64/include/asm/kvm_asm.h .macro get_vcpu_ptr vcpu, ctxt ctxt 500 arch/arm64/include/asm/kvm_emulate.h vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); ctxt 504 arch/arm64/include/asm/kvm_emulate.h write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR); ctxt 245 arch/arm64/include/asm/kvm_host.h struct kvm_cpu_context ctxt; ctxt 378 arch/arm64/include/asm/kvm_host.h #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) ctxt 386 arch/arm64/include/asm/kvm_host.h #define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) ctxt 397 arch/arm64/include/asm/kvm_host.h #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) ctxt 398 arch/arm64/include/asm/kvm_host.h #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) ctxt 63 arch/arm64/include/asm/kvm_hyp.h void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt); ctxt 64 arch/arm64/include/asm/kvm_hyp.h void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt); ctxt 65 arch/arm64/include/asm/kvm_hyp.h void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt); ctxt 66 arch/arm64/include/asm/kvm_hyp.h void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt); ctxt 67 arch/arm64/include/asm/kvm_hyp.h void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt); ctxt 68 arch/arm64/include/asm/kvm_hyp.h void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt); ctxt 92 arch/arm64/kernel/asm-offsets.c DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); ctxt 89 arch/arm64/kvm/fpsimd.c fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs, ctxt 113 arch/arm64/kvm/fpsimd.c u64 *guest_zcr = &vcpu->arch.ctxt.sys_regs[ZCR_EL1]; ctxt 134 arch/arm64/kvm/hyp/debug-sr.c struct kvm_cpu_context *ctxt) ctxt 148 arch/arm64/kvm/hyp/debug-sr.c ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1); ctxt 153 arch/arm64/kvm/hyp/debug-sr.c struct kvm_cpu_context *ctxt) ctxt 168 arch/arm64/kvm/hyp/debug-sr.c write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1); ctxt 189 arch/arm64/kvm/hyp/debug-sr.c guest_ctxt = &vcpu->arch.ctxt; ctxt 211 arch/arm64/kvm/hyp/debug-sr.c guest_ctxt = &vcpu->arch.ctxt; ctxt 52 arch/arm64/kvm/hyp/switch.c vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2); ctxt 379 arch/arm64/kvm/hyp/switch.c &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr, ctxt 381 arch/arm64/kvm/hyp/switch.c write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12); ctxt 383 arch/arm64/kvm/hyp/switch.c __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs); ctxt 388 arch/arm64/kvm/hyp/switch.c write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2], ctxt 605 arch/arm64/kvm/hyp/switch.c guest_ctxt = &vcpu->arch.ctxt; ctxt 675 arch/arm64/kvm/hyp/switch.c guest_ctxt = &vcpu->arch.ctxt; ctxt 26 arch/arm64/kvm/hyp/sysreg-sr.c static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) ctxt 28 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); ctxt 34 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); ctxt 37 arch/arm64/kvm/hyp/sysreg-sr.c static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt) ctxt 39 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); ctxt 40 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); ctxt 43 arch/arm64/kvm/hyp/sysreg-sr.c static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) ctxt 45 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); ctxt 46 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR); ctxt 47 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); ctxt 48 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR); ctxt 49 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0); ctxt 50 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1); ctxt 51 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(SYS_TCR); ctxt 52 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(SYS_ESR); ctxt 53 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(SYS_AFSR0); ctxt 54 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(SYS_AFSR1); ctxt 55 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(SYS_FAR); ctxt 56 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(SYS_MAIR); ctxt 57 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(SYS_VBAR); ctxt 58 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(SYS_CONTEXTIDR); ctxt 59 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(SYS_AMAIR); ctxt 60 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(SYS_CNTKCTL); ctxt 61 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); ctxt 62 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); ctxt 64 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); ctxt 65 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->gp_regs.elr_el1 = read_sysreg_el1(SYS_ELR); ctxt 66 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR); ctxt 69 arch/arm64/kvm/hyp/sysreg-sr.c static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) ctxt 71 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR); ctxt 72 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); ctxt 75 arch/arm64/kvm/hyp/sysreg-sr.c ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2); ctxt 78 arch/arm64/kvm/hyp/sysreg-sr.c void __hyp_text __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt) ctxt 80 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_save_el1_state(ctxt); ctxt 81 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_save_common_state(ctxt); ctxt 82 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_save_user_state(ctxt); ctxt 83 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_save_el2_return_state(ctxt); ctxt 86 arch/arm64/kvm/hyp/sysreg-sr.c void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) ctxt 88 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_save_common_state(ctxt); ctxt 92 arch/arm64/kvm/hyp/sysreg-sr.c void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) ctxt 94 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_save_common_state(ctxt); ctxt 95 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_save_el2_return_state(ctxt); ctxt 99 arch/arm64/kvm/hyp/sysreg-sr.c static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) ctxt 101 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); ctxt 107 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg(ctxt->gp_regs.regs.sp, sp_el0); ctxt 110 arch/arm64/kvm/hyp/sysreg-sr.c static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) ctxt 112 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); ctxt 113 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); ctxt 116 arch/arm64/kvm/hyp/sysreg-sr.c static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) ctxt 118 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); ctxt 119 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); ctxt 120 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); ctxt 121 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); ctxt 122 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR); ctxt 123 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0); ctxt 124 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1); ctxt 125 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); ctxt 126 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->sys_regs[ESR_EL1], SYS_ESR); ctxt 127 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], SYS_AFSR0); ctxt 128 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], SYS_AFSR1); ctxt 129 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->sys_regs[FAR_EL1], SYS_FAR); ctxt 130 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], SYS_MAIR); ctxt 131 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], SYS_VBAR); ctxt 132 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],SYS_CONTEXTIDR); ctxt 133 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], SYS_AMAIR); ctxt 134 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], SYS_CNTKCTL); ctxt 135 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); ctxt 136 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); ctxt 138 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); ctxt 139 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->gp_regs.elr_el1, SYS_ELR); ctxt 140 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR); ctxt 144 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) ctxt 146 arch/arm64/kvm/hyp/sysreg-sr.c u64 pstate = ctxt->gp_regs.regs.pstate; ctxt 163 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_el2(ctxt->gp_regs.regs.pc, SYS_ELR); ctxt 167 arch/arm64/kvm/hyp/sysreg-sr.c write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2); ctxt 170 arch/arm64/kvm/hyp/sysreg-sr.c void __hyp_text __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt) ctxt 172 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_restore_el1_state(ctxt); ctxt 173 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_restore_common_state(ctxt); ctxt 174 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_restore_user_state(ctxt); ctxt 175 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_restore_el2_return_state(ctxt); ctxt 178 arch/arm64/kvm/hyp/sysreg-sr.c void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) ctxt 180 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_restore_common_state(ctxt); ctxt 184 arch/arm64/kvm/hyp/sysreg-sr.c void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) ctxt 186 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_restore_common_state(ctxt); ctxt 187 arch/arm64/kvm/hyp/sysreg-sr.c __sysreg_restore_el2_return_state(ctxt); ctxt 198 arch/arm64/kvm/hyp/sysreg-sr.c spsr = vcpu->arch.ctxt.gp_regs.spsr; ctxt 199 arch/arm64/kvm/hyp/sysreg-sr.c sysreg = vcpu->arch.ctxt.sys_regs; ctxt 220 arch/arm64/kvm/hyp/sysreg-sr.c spsr = vcpu->arch.ctxt.gp_regs.spsr; ctxt 221 arch/arm64/kvm/hyp/sysreg-sr.c sysreg = vcpu->arch.ctxt.sys_regs; ctxt 249 arch/arm64/kvm/hyp/sysreg-sr.c struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; ctxt 285 arch/arm64/kvm/hyp/sysreg-sr.c struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; ctxt 103 arch/arm64/kvm/regmap.c unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs; ctxt 177 arch/m68k/include/asm/openprom.h void (*pv_setctxt)(int ctxt, char *va, int pmeg); ctxt 245 arch/m68k/include/asm/openprom.h void (*pv_setctxt)(int ctxt, char *va, int pmeg); ctxt 142 arch/sparc/include/asm/openprom.h void (*pv_setctxt)(int ctxt, char *va, int pmeg); ctxt 96 arch/x86/include/asm/kvm_emulate.h ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg); ctxt 103 arch/x86/include/asm/kvm_emulate.h void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val); ctxt 112 arch/x86/include/asm/kvm_emulate.h int (*read_std)(struct x86_emulate_ctxt *ctxt, ctxt 124 arch/x86/include/asm/kvm_emulate.h int (*read_phys)(struct x86_emulate_ctxt *ctxt, unsigned long addr, ctxt 135 arch/x86/include/asm/kvm_emulate.h int (*write_std)(struct x86_emulate_ctxt *ctxt, ctxt 145 arch/x86/include/asm/kvm_emulate.h int (*fetch)(struct x86_emulate_ctxt *ctxt, ctxt 155 arch/x86/include/asm/kvm_emulate.h int (*read_emulated)(struct x86_emulate_ctxt *ctxt, ctxt 166 arch/x86/include/asm/kvm_emulate.h int (*write_emulated)(struct x86_emulate_ctxt *ctxt, ctxt 179 arch/x86/include/asm/kvm_emulate.h int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt, ctxt 185 arch/x86/include/asm/kvm_emulate.h void (*invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr); ctxt 187 arch/x86/include/asm/kvm_emulate.h int (*pio_in_emulated)(struct x86_emulate_ctxt *ctxt, ctxt 191 arch/x86/include/asm/kvm_emulate.h int (*pio_out_emulated)(struct x86_emulate_ctxt *ctxt, ctxt 195 arch/x86/include/asm/kvm_emulate.h bool (*get_segment)(struct x86_emulate_ctxt *ctxt, u16 *selector, ctxt 197 arch/x86/include/asm/kvm_emulate.h void (*set_segment)(struct x86_emulate_ctxt *ctxt, u16 selector, ctxt 199 arch/x86/include/asm/kvm_emulate.h unsigned long (*get_cached_segment_base)(struct x86_emulate_ctxt *ctxt, ctxt 201 arch/x86/include/asm/kvm_emulate.h void (*get_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt); ctxt 202 arch/x86/include/asm/kvm_emulate.h void (*get_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt); ctxt 203 arch/x86/include/asm/kvm_emulate.h void (*set_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt); ctxt 204 arch/x86/include/asm/kvm_emulate.h void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt); ctxt 205 arch/x86/include/asm/kvm_emulate.h ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr); ctxt 206 arch/x86/include/asm/kvm_emulate.h int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val); ctxt 207 arch/x86/include/asm/kvm_emulate.h int (*cpl)(struct x86_emulate_ctxt *ctxt); ctxt 208 arch/x86/include/asm/kvm_emulate.h int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest); ctxt 209 arch/x86/include/asm/kvm_emulate.h int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); ctxt 210 arch/x86/include/asm/kvm_emulate.h u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt); ctxt 211 arch/x86/include/asm/kvm_emulate.h void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase); ctxt 212 arch/x86/include/asm/kvm_emulate.h int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); ctxt 213 arch/x86/include/asm/kvm_emulate.h int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); ctxt 214 arch/x86/include/asm/kvm_emulate.h int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc); ctxt 215 arch/x86/include/asm/kvm_emulate.h int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata); ctxt 216 arch/x86/include/asm/kvm_emulate.h void (*halt)(struct x86_emulate_ctxt *ctxt); ctxt 217 arch/x86/include/asm/kvm_emulate.h void (*wbinvd)(struct x86_emulate_ctxt *ctxt); ctxt 218 arch/x86/include/asm/kvm_emulate.h int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt); ctxt 219 arch/x86/include/asm/kvm_emulate.h int (*intercept)(struct x86_emulate_ctxt *ctxt, ctxt 223 arch/x86/include/asm/kvm_emulate.h bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, ctxt 225 arch/x86/include/asm/kvm_emulate.h void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); ctxt 227 arch/x86/include/asm/kvm_emulate.h unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); ctxt 228 arch/x86/include/asm/kvm_emulate.h void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags); ctxt 229 arch/x86/include/asm/kvm_emulate.h int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt, ctxt 231 arch/x86/include/asm/kvm_emulate.h void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt); ctxt 232 arch/x86/include/asm/kvm_emulate.h int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr); ctxt 323 arch/x86/include/asm/kvm_emulate.h int (*execute)(struct x86_emulate_ctxt *ctxt); ctxt 324 arch/x86/include/asm/kvm_emulate.h int (*check_perm)(struct x86_emulate_ctxt *ctxt); ctxt 444 arch/x86/include/asm/kvm_emulate.h int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len); ctxt 445 arch/x86/include/asm/kvm_emulate.h bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt); ctxt 450 arch/x86/include/asm/kvm_emulate.h void init_decode_cache(struct x86_emulate_ctxt *ctxt); ctxt 451 arch/x86/include/asm/kvm_emulate.h int x86_emulate_insn(struct x86_emulate_ctxt *ctxt); ctxt 452 arch/x86/include/asm/kvm_emulate.h int emulator_task_switch(struct x86_emulate_ctxt *ctxt, ctxt 455 arch/x86/include/asm/kvm_emulate.h int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq); ctxt 456 arch/x86/include/asm/kvm_emulate.h void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt); ctxt 457 arch/x86/include/asm/kvm_emulate.h void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt); ctxt 458 arch/x86/include/asm/kvm_emulate.h bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt); ctxt 49 arch/x86/kernel/cpu/mtrr/mtrr.h void set_mtrr_done(struct set_mtrr_context *ctxt); ctxt 50 arch/x86/kernel/cpu/mtrr/mtrr.h void set_mtrr_cache_disable(struct set_mtrr_context *ctxt); ctxt 51 arch/x86/kernel/cpu/mtrr/mtrr.h void set_mtrr_prepare_save(struct set_mtrr_context *ctxt); ctxt 217 arch/x86/kvm/emulate.c int (*execute)(struct x86_emulate_ctxt *ctxt); ctxt 226 arch/x86/kvm/emulate.c int (*check_perm)(struct x86_emulate_ctxt *ctxt); ctxt 265 arch/x86/kvm/emulate.c static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) ctxt 267 arch/x86/kvm/emulate.c if (!(ctxt->regs_valid & (1 << nr))) { ctxt 268 arch/x86/kvm/emulate.c ctxt->regs_valid |= 1 << nr; ctxt 269 arch/x86/kvm/emulate.c ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); ctxt 271 arch/x86/kvm/emulate.c return ctxt->_regs[nr]; ctxt 274 arch/x86/kvm/emulate.c static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) ctxt 276 arch/x86/kvm/emulate.c ctxt->regs_valid |= 1 << nr; ctxt 277 arch/x86/kvm/emulate.c ctxt->regs_dirty |= 1 << nr; ctxt 278 arch/x86/kvm/emulate.c return &ctxt->_regs[nr]; ctxt 281 arch/x86/kvm/emulate.c static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) ctxt 283 arch/x86/kvm/emulate.c reg_read(ctxt, nr); ctxt 284 arch/x86/kvm/emulate.c return reg_write(ctxt, nr); ctxt 287 arch/x86/kvm/emulate.c static void writeback_registers(struct x86_emulate_ctxt *ctxt) ctxt 291 arch/x86/kvm/emulate.c for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16) ctxt 292 arch/x86/kvm/emulate.c ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); ctxt 295 arch/x86/kvm/emulate.c static void invalidate_registers(struct x86_emulate_ctxt *ctxt) ctxt 297 arch/x86/kvm/emulate.c ctxt->regs_dirty = 0; ctxt 298 arch/x86/kvm/emulate.c ctxt->regs_valid = 0; ctxt 314 arch/x86/kvm/emulate.c static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); ctxt 494 arch/x86/kvm/emulate.c static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, ctxt 500 arch/x86/kvm/emulate.c .rep_prefix = ctxt->rep_prefix, ctxt 501 arch/x86/kvm/emulate.c .modrm_mod = ctxt->modrm_mod, ctxt 502 arch/x86/kvm/emulate.c .modrm_reg = ctxt->modrm_reg, ctxt 503 arch/x86/kvm/emulate.c .modrm_rm = ctxt->modrm_rm, ctxt 504 arch/x86/kvm/emulate.c .src_val = ctxt->src.val64, ctxt 505 arch/x86/kvm/emulate.c .dst_val = ctxt->dst.val64, ctxt 506 arch/x86/kvm/emulate.c .src_bytes = ctxt->src.bytes, ctxt 507 arch/x86/kvm/emulate.c .dst_bytes = ctxt->dst.bytes, ctxt 508 arch/x86/kvm/emulate.c .ad_bytes = ctxt->ad_bytes, ctxt 509 arch/x86/kvm/emulate.c .next_rip = ctxt->eip, ctxt 512 arch/x86/kvm/emulate.c return ctxt->ops->intercept(ctxt, &info, stage); ctxt 539 arch/x86/kvm/emulate.c static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) ctxt 541 arch/x86/kvm/emulate.c return (1UL << (ctxt->ad_bytes << 3)) - 1; ctxt 544 arch/x86/kvm/emulate.c static ulong stack_mask(struct x86_emulate_ctxt *ctxt) ctxt 549 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt 551 arch/x86/kvm/emulate.c ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); ctxt 555 arch/x86/kvm/emulate.c static int stack_size(struct x86_emulate_ctxt *ctxt) ctxt 557 arch/x86/kvm/emulate.c return (__fls(stack_mask(ctxt)) + 1) >> 3; ctxt 562 arch/x86/kvm/emulate.c address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) ctxt 564 arch/x86/kvm/emulate.c if (ctxt->ad_bytes == sizeof(unsigned long)) ctxt 567 arch/x86/kvm/emulate.c return reg & ad_mask(ctxt); ctxt 571 arch/x86/kvm/emulate.c register_address(struct x86_emulate_ctxt *ctxt, int reg) ctxt 573 arch/x86/kvm/emulate.c return address_mask(ctxt, reg_read(ctxt, reg)); ctxt 582 arch/x86/kvm/emulate.c register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc) ctxt 584 arch/x86/kvm/emulate.c ulong *preg = reg_rmw(ctxt, reg); ctxt 586 arch/x86/kvm/emulate.c assign_register(preg, *preg + inc, ctxt->ad_bytes); ctxt 589 arch/x86/kvm/emulate.c static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) ctxt 591 arch/x86/kvm/emulate.c masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); ctxt 601 arch/x86/kvm/emulate.c static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) ctxt 603 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) ctxt 606 arch/x86/kvm/emulate.c return ctxt->ops->get_cached_segment_base(ctxt, seg); ctxt 609 arch/x86/kvm/emulate.c static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, ctxt 613 arch/x86/kvm/emulate.c ctxt->exception.vector = vec; ctxt 614 arch/x86/kvm/emulate.c ctxt->exception.error_code = error; ctxt 615 arch/x86/kvm/emulate.c ctxt->exception.error_code_valid = valid; ctxt 619 arch/x86/kvm/emulate.c static int emulate_db(struct x86_emulate_ctxt *ctxt) ctxt 621 arch/x86/kvm/emulate.c return emulate_exception(ctxt, DB_VECTOR, 0, false); ctxt 624 arch/x86/kvm/emulate.c static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) ctxt 626 arch/x86/kvm/emulate.c return emulate_exception(ctxt, GP_VECTOR, err, true); ctxt 629 arch/x86/kvm/emulate.c static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) ctxt 631 arch/x86/kvm/emulate.c return emulate_exception(ctxt, SS_VECTOR, err, true); ctxt 634 arch/x86/kvm/emulate.c static int emulate_ud(struct x86_emulate_ctxt *ctxt) ctxt 636 arch/x86/kvm/emulate.c return emulate_exception(ctxt, UD_VECTOR, 0, false); ctxt 639 arch/x86/kvm/emulate.c static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) ctxt 641 arch/x86/kvm/emulate.c return emulate_exception(ctxt, TS_VECTOR, err, true); ctxt 644 arch/x86/kvm/emulate.c static int emulate_de(struct x86_emulate_ctxt *ctxt) ctxt 646 arch/x86/kvm/emulate.c return emulate_exception(ctxt, DE_VECTOR, 0, false); ctxt 649 arch/x86/kvm/emulate.c static int emulate_nm(struct x86_emulate_ctxt *ctxt) ctxt 651 arch/x86/kvm/emulate.c return emulate_exception(ctxt, NM_VECTOR, 0, false); ctxt 654 arch/x86/kvm/emulate.c static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) ctxt 659 arch/x86/kvm/emulate.c ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); ctxt 663 arch/x86/kvm/emulate.c static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, ctxt 670 arch/x86/kvm/emulate.c ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); ctxt 671 arch/x86/kvm/emulate.c ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); ctxt 683 arch/x86/kvm/emulate.c static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size) ctxt 685 arch/x86/kvm/emulate.c u64 alignment = ctxt->d & AlignMask; ctxt 702 arch/x86/kvm/emulate.c static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, ctxt 715 arch/x86/kvm/emulate.c la = seg_base(ctxt, addr.seg) + addr.ea; ctxt 720 arch/x86/kvm/emulate.c va_bits = ctxt_virt_addr_bits(ctxt); ctxt 730 arch/x86/kvm/emulate.c usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, ctxt 735 arch/x86/kvm/emulate.c if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) ctxt 759 arch/x86/kvm/emulate.c if (la & (insn_alignment(ctxt, size) - 1)) ctxt 760 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 764 arch/x86/kvm/emulate.c return emulate_ss(ctxt, 0); ctxt 766 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 769 arch/x86/kvm/emulate.c static int linearize(struct x86_emulate_ctxt *ctxt, ctxt 775 arch/x86/kvm/emulate.c return __linearize(ctxt, addr, &max_size, size, write, false, ctxt 776 arch/x86/kvm/emulate.c ctxt->mode, linear); ctxt 779 arch/x86/kvm/emulate.c static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, ctxt 788 arch/x86/kvm/emulate.c if (ctxt->op_bytes != sizeof(unsigned long)) ctxt 789 arch/x86/kvm/emulate.c addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); ctxt 790 arch/x86/kvm/emulate.c rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear); ctxt 792 arch/x86/kvm/emulate.c ctxt->_eip = addr.ea; ctxt 796 arch/x86/kvm/emulate.c static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) ctxt 798 arch/x86/kvm/emulate.c return assign_eip(ctxt, dst, ctxt->mode); ctxt 801 arch/x86/kvm/emulate.c static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, ctxt 804 arch/x86/kvm/emulate.c enum x86emul_mode mode = ctxt->mode; ctxt 808 arch/x86/kvm/emulate.c if (ctxt->mode >= X86EMUL_MODE_PROT16) { ctxt 812 arch/x86/kvm/emulate.c ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); ctxt 821 arch/x86/kvm/emulate.c rc = assign_eip(ctxt, dst, mode); ctxt 823 arch/x86/kvm/emulate.c ctxt->mode = mode; ctxt 827 arch/x86/kvm/emulate.c static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) ctxt 829 arch/x86/kvm/emulate.c return assign_eip_near(ctxt, ctxt->_eip + rel); ctxt 832 arch/x86/kvm/emulate.c static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear, ctxt 835 arch/x86/kvm/emulate.c return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true); ctxt 838 arch/x86/kvm/emulate.c static int linear_write_system(struct x86_emulate_ctxt *ctxt, ctxt 842 arch/x86/kvm/emulate.c return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true); ctxt 845 arch/x86/kvm/emulate.c static int segmented_read_std(struct x86_emulate_ctxt *ctxt, ctxt 853 arch/x86/kvm/emulate.c rc = linearize(ctxt, addr, size, false, &linear); ctxt 856 arch/x86/kvm/emulate.c return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false); ctxt 859 arch/x86/kvm/emulate.c static int segmented_write_std(struct x86_emulate_ctxt *ctxt, ctxt 867 arch/x86/kvm/emulate.c rc = linearize(ctxt, addr, size, true, &linear); ctxt 870 arch/x86/kvm/emulate.c return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false); ctxt 877 arch/x86/kvm/emulate.c static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) ctxt 882 arch/x86/kvm/emulate.c int cur_size = ctxt->fetch.end - ctxt->fetch.data; ctxt 884 arch/x86/kvm/emulate.c .ea = ctxt->eip + cur_size }; ctxt 896 arch/x86/kvm/emulate.c rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode, ctxt 911 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 913 arch/x86/kvm/emulate.c rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, ctxt 914 arch/x86/kvm/emulate.c size, &ctxt->exception); ctxt 917 arch/x86/kvm/emulate.c ctxt->fetch.end += size; ctxt 921 arch/x86/kvm/emulate.c static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, ctxt 924 arch/x86/kvm/emulate.c unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr; ctxt 927 arch/x86/kvm/emulate.c return __do_insn_fetch_bytes(ctxt, size - done_size); ctxt 939 arch/x86/kvm/emulate.c ctxt->_eip += sizeof(_type); \ ctxt 940 arch/x86/kvm/emulate.c memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \ ctxt 941 arch/x86/kvm/emulate.c ctxt->fetch.ptr += sizeof(_type); \ ctxt 950 arch/x86/kvm/emulate.c ctxt->_eip += (_size); \ ctxt 951 arch/x86/kvm/emulate.c memcpy(_arr, ctxt->fetch.ptr, _size); \ ctxt 952 arch/x86/kvm/emulate.c ctxt->fetch.ptr += (_size); \ ctxt 960 arch/x86/kvm/emulate.c static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, ctxt 964 arch/x86/kvm/emulate.c int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; ctxt 967 arch/x86/kvm/emulate.c p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; ctxt 969 arch/x86/kvm/emulate.c p = reg_rmw(ctxt, modrm_reg); ctxt 973 arch/x86/kvm/emulate.c static int read_descriptor(struct x86_emulate_ctxt *ctxt, ctxt 982 arch/x86/kvm/emulate.c rc = segmented_read_std(ctxt, addr, size, 2); ctxt 986 arch/x86/kvm/emulate.c rc = segmented_read_std(ctxt, addr, address, op_bytes); ctxt 1034 arch/x86/kvm/emulate.c static int em_bsf_c(struct x86_emulate_ctxt *ctxt) ctxt 1037 arch/x86/kvm/emulate.c if (ctxt->src.val == 0) ctxt 1038 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 1039 arch/x86/kvm/emulate.c return fastop(ctxt, em_bsf); ctxt 1042 arch/x86/kvm/emulate.c static int em_bsr_c(struct x86_emulate_ctxt *ctxt) ctxt 1045 arch/x86/kvm/emulate.c if (ctxt->src.val == 0) ctxt 1046 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 1047 arch/x86/kvm/emulate.c return fastop(ctxt, em_bsr); ctxt 1093 arch/x86/kvm/emulate.c static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) ctxt 1120 arch/x86/kvm/emulate.c static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, ctxt 1148 arch/x86/kvm/emulate.c static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) ctxt 1165 arch/x86/kvm/emulate.c static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) ctxt 1182 arch/x86/kvm/emulate.c static int em_fninit(struct x86_emulate_ctxt *ctxt) ctxt 1184 arch/x86/kvm/emulate.c if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) ctxt 1185 arch/x86/kvm/emulate.c return emulate_nm(ctxt); ctxt 1193 arch/x86/kvm/emulate.c static int em_fnstcw(struct x86_emulate_ctxt *ctxt) ctxt 1197 arch/x86/kvm/emulate.c if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) ctxt 1198 arch/x86/kvm/emulate.c return emulate_nm(ctxt); ctxt 1204 arch/x86/kvm/emulate.c ctxt->dst.val = fcw; ctxt 1209 arch/x86/kvm/emulate.c static int em_fnstsw(struct x86_emulate_ctxt *ctxt) ctxt 1213 arch/x86/kvm/emulate.c if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) ctxt 1214 arch/x86/kvm/emulate.c return emulate_nm(ctxt); ctxt 1220 arch/x86/kvm/emulate.c ctxt->dst.val = fsw; ctxt 1225 arch/x86/kvm/emulate.c static void decode_register_operand(struct x86_emulate_ctxt *ctxt, ctxt 1228 arch/x86/kvm/emulate.c unsigned reg = ctxt->modrm_reg; ctxt 1230 arch/x86/kvm/emulate.c if (!(ctxt->d & ModRM)) ctxt 1231 arch/x86/kvm/emulate.c reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); ctxt 1233 arch/x86/kvm/emulate.c if (ctxt->d & Sse) { ctxt 1237 arch/x86/kvm/emulate.c read_sse_reg(ctxt, &op->vec_val, reg); ctxt 1240 arch/x86/kvm/emulate.c if (ctxt->d & Mmx) { ctxt 1249 arch/x86/kvm/emulate.c op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; ctxt 1250 arch/x86/kvm/emulate.c op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); ctxt 1256 arch/x86/kvm/emulate.c static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) ctxt 1259 arch/x86/kvm/emulate.c ctxt->modrm_seg = VCPU_SREG_SS; ctxt 1262 arch/x86/kvm/emulate.c static int decode_modrm(struct x86_emulate_ctxt *ctxt, ctxt 1270 arch/x86/kvm/emulate.c ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ ctxt 1271 arch/x86/kvm/emulate.c index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ ctxt 1272 arch/x86/kvm/emulate.c base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ ctxt 1274 arch/x86/kvm/emulate.c ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; ctxt 1275 arch/x86/kvm/emulate.c ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; ctxt 1276 arch/x86/kvm/emulate.c ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); ctxt 1277 arch/x86/kvm/emulate.c ctxt->modrm_seg = VCPU_SREG_DS; ctxt 1279 arch/x86/kvm/emulate.c if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { ctxt 1281 arch/x86/kvm/emulate.c op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; ctxt 1282 arch/x86/kvm/emulate.c op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt 1283 arch/x86/kvm/emulate.c ctxt->d & ByteOp); ctxt 1284 arch/x86/kvm/emulate.c if (ctxt->d & Sse) { ctxt 1287 arch/x86/kvm/emulate.c op->addr.xmm = ctxt->modrm_rm; ctxt 1288 arch/x86/kvm/emulate.c read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); ctxt 1291 arch/x86/kvm/emulate.c if (ctxt->d & Mmx) { ctxt 1294 arch/x86/kvm/emulate.c op->addr.mm = ctxt->modrm_rm & 7; ctxt 1303 arch/x86/kvm/emulate.c if (ctxt->ad_bytes == 2) { ctxt 1304 arch/x86/kvm/emulate.c unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); ctxt 1305 arch/x86/kvm/emulate.c unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); ctxt 1306 arch/x86/kvm/emulate.c unsigned si = reg_read(ctxt, VCPU_REGS_RSI); ctxt 1307 arch/x86/kvm/emulate.c unsigned di = reg_read(ctxt, VCPU_REGS_RDI); ctxt 1310 arch/x86/kvm/emulate.c switch (ctxt->modrm_mod) { ctxt 1312 arch/x86/kvm/emulate.c if (ctxt->modrm_rm == 6) ctxt 1313 arch/x86/kvm/emulate.c modrm_ea += insn_fetch(u16, ctxt); ctxt 1316 arch/x86/kvm/emulate.c modrm_ea += insn_fetch(s8, ctxt); ctxt 1319 arch/x86/kvm/emulate.c modrm_ea += insn_fetch(u16, ctxt); ctxt 1322 arch/x86/kvm/emulate.c switch (ctxt->modrm_rm) { ctxt 1342 arch/x86/kvm/emulate.c if (ctxt->modrm_mod != 0) ctxt 1349 arch/x86/kvm/emulate.c if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || ctxt 1350 arch/x86/kvm/emulate.c (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) ctxt 1351 arch/x86/kvm/emulate.c ctxt->modrm_seg = VCPU_SREG_SS; ctxt 1355 arch/x86/kvm/emulate.c if ((ctxt->modrm_rm & 7) == 4) { ctxt 1356 arch/x86/kvm/emulate.c sib = insn_fetch(u8, ctxt); ctxt 1361 arch/x86/kvm/emulate.c if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) ctxt 1362 arch/x86/kvm/emulate.c modrm_ea += insn_fetch(s32, ctxt); ctxt 1364 arch/x86/kvm/emulate.c modrm_ea += reg_read(ctxt, base_reg); ctxt 1365 arch/x86/kvm/emulate.c adjust_modrm_seg(ctxt, base_reg); ctxt 1367 arch/x86/kvm/emulate.c if ((ctxt->d & IncSP) && ctxt 1369 arch/x86/kvm/emulate.c modrm_ea += ctxt->op_bytes; ctxt 1372 arch/x86/kvm/emulate.c modrm_ea += reg_read(ctxt, index_reg) << scale; ctxt 1373 arch/x86/kvm/emulate.c } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { ctxt 1374 arch/x86/kvm/emulate.c modrm_ea += insn_fetch(s32, ctxt); ctxt 1375 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt 1376 arch/x86/kvm/emulate.c ctxt->rip_relative = 1; ctxt 1378 arch/x86/kvm/emulate.c base_reg = ctxt->modrm_rm; ctxt 1379 arch/x86/kvm/emulate.c modrm_ea += reg_read(ctxt, base_reg); ctxt 1380 arch/x86/kvm/emulate.c adjust_modrm_seg(ctxt, base_reg); ctxt 1382 arch/x86/kvm/emulate.c switch (ctxt->modrm_mod) { ctxt 1384 arch/x86/kvm/emulate.c modrm_ea += insn_fetch(s8, ctxt); ctxt 1387 arch/x86/kvm/emulate.c modrm_ea += insn_fetch(s32, ctxt); ctxt 1392 arch/x86/kvm/emulate.c if (ctxt->ad_bytes != 8) ctxt 1393 arch/x86/kvm/emulate.c ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; ctxt 1399 arch/x86/kvm/emulate.c static int decode_abs(struct x86_emulate_ctxt *ctxt, ctxt 1405 arch/x86/kvm/emulate.c switch (ctxt->ad_bytes) { ctxt 1407 arch/x86/kvm/emulate.c op->addr.mem.ea = insn_fetch(u16, ctxt); ctxt 1410 arch/x86/kvm/emulate.c op->addr.mem.ea = insn_fetch(u32, ctxt); ctxt 1413 arch/x86/kvm/emulate.c op->addr.mem.ea = insn_fetch(u64, ctxt); ctxt 1420 arch/x86/kvm/emulate.c static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) ctxt 1424 arch/x86/kvm/emulate.c if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { ctxt 1425 arch/x86/kvm/emulate.c mask = ~((long)ctxt->dst.bytes * 8 - 1); ctxt 1427 arch/x86/kvm/emulate.c if (ctxt->src.bytes == 2) ctxt 1428 arch/x86/kvm/emulate.c sv = (s16)ctxt->src.val & (s16)mask; ctxt 1429 arch/x86/kvm/emulate.c else if (ctxt->src.bytes == 4) ctxt 1430 arch/x86/kvm/emulate.c sv = (s32)ctxt->src.val & (s32)mask; ctxt 1432 arch/x86/kvm/emulate.c sv = (s64)ctxt->src.val & (s64)mask; ctxt 1434 arch/x86/kvm/emulate.c ctxt->dst.addr.mem.ea = address_mask(ctxt, ctxt 1435 arch/x86/kvm/emulate.c ctxt->dst.addr.mem.ea + (sv >> 3)); ctxt 1439 arch/x86/kvm/emulate.c ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; ctxt 1442 arch/x86/kvm/emulate.c static int read_emulated(struct x86_emulate_ctxt *ctxt, ctxt 1446 arch/x86/kvm/emulate.c struct read_cache *mc = &ctxt->mem_read; ctxt 1453 arch/x86/kvm/emulate.c rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size, ctxt 1454 arch/x86/kvm/emulate.c &ctxt->exception); ctxt 1466 arch/x86/kvm/emulate.c static int segmented_read(struct x86_emulate_ctxt *ctxt, ctxt 1474 arch/x86/kvm/emulate.c rc = linearize(ctxt, addr, size, false, &linear); ctxt 1477 arch/x86/kvm/emulate.c return read_emulated(ctxt, linear, data, size); ctxt 1480 arch/x86/kvm/emulate.c static int segmented_write(struct x86_emulate_ctxt *ctxt, ctxt 1488 arch/x86/kvm/emulate.c rc = linearize(ctxt, addr, size, true, &linear); ctxt 1491 arch/x86/kvm/emulate.c return ctxt->ops->write_emulated(ctxt, linear, data, size, ctxt 1492 arch/x86/kvm/emulate.c &ctxt->exception); ctxt 1495 arch/x86/kvm/emulate.c static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, ctxt 1503 arch/x86/kvm/emulate.c rc = linearize(ctxt, addr, size, true, &linear); ctxt 1506 arch/x86/kvm/emulate.c return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, ctxt 1507 arch/x86/kvm/emulate.c size, &ctxt->exception); ctxt 1510 arch/x86/kvm/emulate.c static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, ctxt 1514 arch/x86/kvm/emulate.c struct read_cache *rc = &ctxt->io_read; ctxt 1518 arch/x86/kvm/emulate.c unsigned int count = ctxt->rep_prefix ? ctxt 1519 arch/x86/kvm/emulate.c address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; ctxt 1520 arch/x86/kvm/emulate.c in_page = (ctxt->eflags & X86_EFLAGS_DF) ? ctxt 1521 arch/x86/kvm/emulate.c offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : ctxt 1522 arch/x86/kvm/emulate.c PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); ctxt 1527 arch/x86/kvm/emulate.c if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) ctxt 1532 arch/x86/kvm/emulate.c if (ctxt->rep_prefix && (ctxt->d & String) && ctxt 1533 arch/x86/kvm/emulate.c !(ctxt->eflags & X86_EFLAGS_DF)) { ctxt 1534 arch/x86/kvm/emulate.c ctxt->dst.data = rc->data + rc->pos; ctxt 1535 arch/x86/kvm/emulate.c ctxt->dst.type = OP_MEM_STR; ctxt 1536 arch/x86/kvm/emulate.c ctxt->dst.count = (rc->end - rc->pos) / size; ctxt 1545 arch/x86/kvm/emulate.c static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, ctxt 1551 arch/x86/kvm/emulate.c ctxt->ops->get_idt(ctxt, &dt); ctxt 1554 arch/x86/kvm/emulate.c return emulate_gp(ctxt, index << 3 | 0x2); ctxt 1557 arch/x86/kvm/emulate.c return linear_read_system(ctxt, addr, desc, sizeof(*desc)); ctxt 1560 arch/x86/kvm/emulate.c static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, ctxt 1563 arch/x86/kvm/emulate.c const struct x86_emulate_ops *ops = ctxt->ops; ctxt 1571 arch/x86/kvm/emulate.c if (!ops->get_segment(ctxt, &sel, &desc, &base3, ctxt 1578 arch/x86/kvm/emulate.c ops->get_gdt(ctxt, dt); ctxt 1581 arch/x86/kvm/emulate.c static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt, ctxt 1588 arch/x86/kvm/emulate.c get_descriptor_table_ptr(ctxt, selector, &dt); ctxt 1591 arch/x86/kvm/emulate.c return emulate_gp(ctxt, selector & 0xfffc); ctxt 1599 arch/x86/kvm/emulate.c ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); ctxt 1610 arch/x86/kvm/emulate.c static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, ctxt 1616 arch/x86/kvm/emulate.c rc = get_descriptor_ptr(ctxt, selector, desc_addr_p); ctxt 1620 arch/x86/kvm/emulate.c return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc)); ctxt 1624 arch/x86/kvm/emulate.c static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, ctxt 1630 arch/x86/kvm/emulate.c rc = get_descriptor_ptr(ctxt, selector, &addr); ctxt 1634 arch/x86/kvm/emulate.c return linear_write_system(ctxt, addr, desc, sizeof(*desc)); ctxt 1637 arch/x86/kvm/emulate.c static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, ctxt 1654 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_REAL) { ctxt 1657 arch/x86/kvm/emulate.c ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); ctxt 1660 arch/x86/kvm/emulate.c } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { ctxt 1683 arch/x86/kvm/emulate.c if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl) ctxt 1702 arch/x86/kvm/emulate.c ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); ctxt 1750 arch/x86/kvm/emulate.c ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); ctxt 1763 arch/x86/kvm/emulate.c ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, ctxt 1764 arch/x86/kvm/emulate.c sizeof(seg_desc), &ctxt->exception); ctxt 1789 arch/x86/kvm/emulate.c ret = write_segment_descriptor(ctxt, selector, ctxt 1794 arch/x86/kvm/emulate.c } else if (ctxt->mode == X86EMUL_MODE_PROT64) { ctxt 1795 arch/x86/kvm/emulate.c ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3)); ctxt 1799 arch/x86/kvm/emulate.c ((u64)base3 << 32), ctxt)) ctxt 1800 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 1803 arch/x86/kvm/emulate.c ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); ctxt 1808 arch/x86/kvm/emulate.c return emulate_exception(ctxt, err_vec, err_code, true); ctxt 1811 arch/x86/kvm/emulate.c static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, ctxt 1814 arch/x86/kvm/emulate.c u8 cpl = ctxt->ops->cpl(ctxt); ctxt 1827 arch/x86/kvm/emulate.c ctxt->mode == X86EMUL_MODE_PROT64) ctxt 1828 arch/x86/kvm/emulate.c return emulate_exception(ctxt, GP_VECTOR, 0, true); ctxt 1830 arch/x86/kvm/emulate.c return __load_segment_descriptor(ctxt, selector, seg, cpl, ctxt 1839 arch/x86/kvm/emulate.c static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) ctxt 1846 arch/x86/kvm/emulate.c if (ctxt->lock_prefix) ctxt 1847 arch/x86/kvm/emulate.c return segmented_cmpxchg(ctxt, ctxt 1853 arch/x86/kvm/emulate.c return segmented_write(ctxt, ctxt 1859 arch/x86/kvm/emulate.c return segmented_write(ctxt, ctxt 1865 arch/x86/kvm/emulate.c write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); ctxt 1868 arch/x86/kvm/emulate.c write_mmx_reg(ctxt, &op->mm_val, op->addr.mm); ctxt 1879 arch/x86/kvm/emulate.c static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) ctxt 1883 arch/x86/kvm/emulate.c rsp_increment(ctxt, -bytes); ctxt 1884 arch/x86/kvm/emulate.c addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); ctxt 1887 arch/x86/kvm/emulate.c return segmented_write(ctxt, addr, data, bytes); ctxt 1890 arch/x86/kvm/emulate.c static int em_push(struct x86_emulate_ctxt *ctxt) ctxt 1893 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 1894 arch/x86/kvm/emulate.c return push(ctxt, &ctxt->src.val, ctxt->op_bytes); ctxt 1897 arch/x86/kvm/emulate.c static int emulate_pop(struct x86_emulate_ctxt *ctxt, ctxt 1903 arch/x86/kvm/emulate.c addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); ctxt 1905 arch/x86/kvm/emulate.c rc = segmented_read(ctxt, addr, dest, len); ctxt 1909 arch/x86/kvm/emulate.c rsp_increment(ctxt, len); ctxt 1913 arch/x86/kvm/emulate.c static int em_pop(struct x86_emulate_ctxt *ctxt) ctxt 1915 arch/x86/kvm/emulate.c return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); ctxt 1918 arch/x86/kvm/emulate.c static int emulate_popf(struct x86_emulate_ctxt *ctxt, ctxt 1923 arch/x86/kvm/emulate.c int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; ctxt 1924 arch/x86/kvm/emulate.c int cpl = ctxt->ops->cpl(ctxt); ctxt 1926 arch/x86/kvm/emulate.c rc = emulate_pop(ctxt, &val, len); ctxt 1935 arch/x86/kvm/emulate.c switch(ctxt->mode) { ctxt 1946 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 1955 arch/x86/kvm/emulate.c (ctxt->eflags & ~change_mask) | (val & change_mask); ctxt 1960 arch/x86/kvm/emulate.c static int em_popf(struct x86_emulate_ctxt *ctxt) ctxt 1962 arch/x86/kvm/emulate.c ctxt->dst.type = OP_REG; ctxt 1963 arch/x86/kvm/emulate.c ctxt->dst.addr.reg = &ctxt->eflags; ctxt 1964 arch/x86/kvm/emulate.c ctxt->dst.bytes = ctxt->op_bytes; ctxt 1965 arch/x86/kvm/emulate.c return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); ctxt 1968 arch/x86/kvm/emulate.c static int em_enter(struct x86_emulate_ctxt *ctxt) ctxt 1971 arch/x86/kvm/emulate.c unsigned frame_size = ctxt->src.val; ctxt 1972 arch/x86/kvm/emulate.c unsigned nesting_level = ctxt->src2.val & 31; ctxt 1978 arch/x86/kvm/emulate.c rbp = reg_read(ctxt, VCPU_REGS_RBP); ctxt 1979 arch/x86/kvm/emulate.c rc = push(ctxt, &rbp, stack_size(ctxt)); ctxt 1982 arch/x86/kvm/emulate.c assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), ctxt 1983 arch/x86/kvm/emulate.c stack_mask(ctxt)); ctxt 1984 arch/x86/kvm/emulate.c assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), ctxt 1985 arch/x86/kvm/emulate.c reg_read(ctxt, VCPU_REGS_RSP) - frame_size, ctxt 1986 arch/x86/kvm/emulate.c stack_mask(ctxt)); ctxt 1990 arch/x86/kvm/emulate.c static int em_leave(struct x86_emulate_ctxt *ctxt) ctxt 1992 arch/x86/kvm/emulate.c assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), ctxt 1993 arch/x86/kvm/emulate.c stack_mask(ctxt)); ctxt 1994 arch/x86/kvm/emulate.c return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); ctxt 1997 arch/x86/kvm/emulate.c static int em_push_sreg(struct x86_emulate_ctxt *ctxt) ctxt 1999 arch/x86/kvm/emulate.c int seg = ctxt->src2.val; ctxt 2001 arch/x86/kvm/emulate.c ctxt->src.val = get_segment_selector(ctxt, seg); ctxt 2002 arch/x86/kvm/emulate.c if (ctxt->op_bytes == 4) { ctxt 2003 arch/x86/kvm/emulate.c rsp_increment(ctxt, -2); ctxt 2004 arch/x86/kvm/emulate.c ctxt->op_bytes = 2; ctxt 2007 arch/x86/kvm/emulate.c return em_push(ctxt); ctxt 2010 arch/x86/kvm/emulate.c static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) ctxt 2012 arch/x86/kvm/emulate.c int seg = ctxt->src2.val; ctxt 2016 arch/x86/kvm/emulate.c rc = emulate_pop(ctxt, &selector, 2); ctxt 2020 arch/x86/kvm/emulate.c if (ctxt->modrm_reg == VCPU_SREG_SS) ctxt 2021 arch/x86/kvm/emulate.c ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; ctxt 2022 arch/x86/kvm/emulate.c if (ctxt->op_bytes > 2) ctxt 2023 arch/x86/kvm/emulate.c rsp_increment(ctxt, ctxt->op_bytes - 2); ctxt 2025 arch/x86/kvm/emulate.c rc = load_segment_descriptor(ctxt, (u16)selector, seg); ctxt 2029 arch/x86/kvm/emulate.c static int em_pusha(struct x86_emulate_ctxt *ctxt) ctxt 2031 arch/x86/kvm/emulate.c unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); ctxt 2037 arch/x86/kvm/emulate.c (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); ctxt 2039 arch/x86/kvm/emulate.c rc = em_push(ctxt); ctxt 2049 arch/x86/kvm/emulate.c static int em_pushf(struct x86_emulate_ctxt *ctxt) ctxt 2051 arch/x86/kvm/emulate.c ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM; ctxt 2052 arch/x86/kvm/emulate.c return em_push(ctxt); ctxt 2055 arch/x86/kvm/emulate.c static int em_popa(struct x86_emulate_ctxt *ctxt) ctxt 2063 arch/x86/kvm/emulate.c rsp_increment(ctxt, ctxt->op_bytes); ctxt 2067 arch/x86/kvm/emulate.c rc = emulate_pop(ctxt, &val, ctxt->op_bytes); ctxt 2070 arch/x86/kvm/emulate.c assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes); ctxt 2076 arch/x86/kvm/emulate.c static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) ctxt 2078 arch/x86/kvm/emulate.c const struct x86_emulate_ops *ops = ctxt->ops; ctxt 2086 arch/x86/kvm/emulate.c ctxt->src.val = ctxt->eflags; ctxt 2087 arch/x86/kvm/emulate.c rc = em_push(ctxt); ctxt 2091 arch/x86/kvm/emulate.c ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC); ctxt 2093 arch/x86/kvm/emulate.c ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); ctxt 2094 arch/x86/kvm/emulate.c rc = em_push(ctxt); ctxt 2098 arch/x86/kvm/emulate.c ctxt->src.val = ctxt->_eip; ctxt 2099 arch/x86/kvm/emulate.c rc = em_push(ctxt); ctxt 2103 arch/x86/kvm/emulate.c ops->get_idt(ctxt, &dt); ctxt 2108 arch/x86/kvm/emulate.c rc = linear_read_system(ctxt, cs_addr, &cs, 2); ctxt 2112 arch/x86/kvm/emulate.c rc = linear_read_system(ctxt, eip_addr, &eip, 2); ctxt 2116 arch/x86/kvm/emulate.c rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); ctxt 2120 arch/x86/kvm/emulate.c ctxt->_eip = eip; ctxt 2125 arch/x86/kvm/emulate.c int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) ctxt 2129 arch/x86/kvm/emulate.c invalidate_registers(ctxt); ctxt 2130 arch/x86/kvm/emulate.c rc = __emulate_int_real(ctxt, irq); ctxt 2132 arch/x86/kvm/emulate.c writeback_registers(ctxt); ctxt 2136 arch/x86/kvm/emulate.c static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) ctxt 2138 arch/x86/kvm/emulate.c switch(ctxt->mode) { ctxt 2140 arch/x86/kvm/emulate.c return __emulate_int_real(ctxt, irq); ctxt 2151 arch/x86/kvm/emulate.c static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) ctxt 2168 arch/x86/kvm/emulate.c rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); ctxt 2174 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 2176 arch/x86/kvm/emulate.c rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); ctxt 2181 arch/x86/kvm/emulate.c rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); ctxt 2186 arch/x86/kvm/emulate.c rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); ctxt 2191 arch/x86/kvm/emulate.c ctxt->_eip = temp_eip; ctxt 2193 arch/x86/kvm/emulate.c if (ctxt->op_bytes == 4) ctxt 2194 arch/x86/kvm/emulate.c ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); ctxt 2195 arch/x86/kvm/emulate.c else if (ctxt->op_bytes == 2) { ctxt 2196 arch/x86/kvm/emulate.c ctxt->eflags &= ~0xffff; ctxt 2197 arch/x86/kvm/emulate.c ctxt->eflags |= temp_eflags; ctxt 2200 arch/x86/kvm/emulate.c ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ ctxt 2201 arch/x86/kvm/emulate.c ctxt->eflags |= X86_EFLAGS_FIXED; ctxt 2202 arch/x86/kvm/emulate.c ctxt->ops->set_nmi_mask(ctxt, false); ctxt 2207 arch/x86/kvm/emulate.c static int em_iret(struct x86_emulate_ctxt *ctxt) ctxt 2209 arch/x86/kvm/emulate.c switch(ctxt->mode) { ctxt 2211 arch/x86/kvm/emulate.c return emulate_iret_real(ctxt); ctxt 2222 arch/x86/kvm/emulate.c static int em_jmp_far(struct x86_emulate_ctxt *ctxt) ctxt 2227 arch/x86/kvm/emulate.c u8 cpl = ctxt->ops->cpl(ctxt); ctxt 2229 arch/x86/kvm/emulate.c memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); ctxt 2231 arch/x86/kvm/emulate.c rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, ctxt 2237 arch/x86/kvm/emulate.c rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); ctxt 2245 arch/x86/kvm/emulate.c static int em_jmp_abs(struct x86_emulate_ctxt *ctxt) ctxt 2247 arch/x86/kvm/emulate.c return assign_eip_near(ctxt, ctxt->src.val); ctxt 2250 arch/x86/kvm/emulate.c static int em_call_near_abs(struct x86_emulate_ctxt *ctxt) ctxt 2255 arch/x86/kvm/emulate.c old_eip = ctxt->_eip; ctxt 2256 arch/x86/kvm/emulate.c rc = assign_eip_near(ctxt, ctxt->src.val); ctxt 2259 arch/x86/kvm/emulate.c ctxt->src.val = old_eip; ctxt 2260 arch/x86/kvm/emulate.c rc = em_push(ctxt); ctxt 2264 arch/x86/kvm/emulate.c static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) ctxt 2266 arch/x86/kvm/emulate.c u64 old = ctxt->dst.orig_val64; ctxt 2268 arch/x86/kvm/emulate.c if (ctxt->dst.bytes == 16) ctxt 2271 arch/x86/kvm/emulate.c if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || ctxt 2272 arch/x86/kvm/emulate.c ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { ctxt 2273 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); ctxt 2274 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); ctxt 2275 arch/x86/kvm/emulate.c ctxt->eflags &= ~X86_EFLAGS_ZF; ctxt 2277 arch/x86/kvm/emulate.c ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | ctxt 2278 arch/x86/kvm/emulate.c (u32) reg_read(ctxt, VCPU_REGS_RBX); ctxt 2280 arch/x86/kvm/emulate.c ctxt->eflags |= X86_EFLAGS_ZF; ctxt 2285 arch/x86/kvm/emulate.c static int em_ret(struct x86_emulate_ctxt *ctxt) ctxt 2290 arch/x86/kvm/emulate.c rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); ctxt 2294 arch/x86/kvm/emulate.c return assign_eip_near(ctxt, eip); ctxt 2297 arch/x86/kvm/emulate.c static int em_ret_far(struct x86_emulate_ctxt *ctxt) ctxt 2301 arch/x86/kvm/emulate.c int cpl = ctxt->ops->cpl(ctxt); ctxt 2304 arch/x86/kvm/emulate.c rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); ctxt 2307 arch/x86/kvm/emulate.c rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); ctxt 2311 arch/x86/kvm/emulate.c if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) ctxt 2313 arch/x86/kvm/emulate.c rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, ctxt 2318 arch/x86/kvm/emulate.c rc = assign_eip_far(ctxt, eip, &new_desc); ctxt 2326 arch/x86/kvm/emulate.c static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) ctxt 2330 arch/x86/kvm/emulate.c rc = em_ret_far(ctxt); ctxt 2333 arch/x86/kvm/emulate.c rsp_increment(ctxt, ctxt->src.val); ctxt 2337 arch/x86/kvm/emulate.c static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) ctxt 2340 arch/x86/kvm/emulate.c ctxt->dst.orig_val = ctxt->dst.val; ctxt 2341 arch/x86/kvm/emulate.c ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); ctxt 2342 arch/x86/kvm/emulate.c ctxt->src.orig_val = ctxt->src.val; ctxt 2343 arch/x86/kvm/emulate.c ctxt->src.val = ctxt->dst.orig_val; ctxt 2344 arch/x86/kvm/emulate.c fastop(ctxt, em_cmp); ctxt 2346 arch/x86/kvm/emulate.c if (ctxt->eflags & X86_EFLAGS_ZF) { ctxt 2348 arch/x86/kvm/emulate.c ctxt->src.type = OP_NONE; ctxt 2349 arch/x86/kvm/emulate.c ctxt->dst.val = ctxt->src.orig_val; ctxt 2352 arch/x86/kvm/emulate.c ctxt->src.type = OP_REG; ctxt 2353 arch/x86/kvm/emulate.c ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); ctxt 2354 arch/x86/kvm/emulate.c ctxt->src.val = ctxt->dst.orig_val; ctxt 2356 arch/x86/kvm/emulate.c ctxt->dst.val = ctxt->dst.orig_val; ctxt 2361 arch/x86/kvm/emulate.c static int em_lseg(struct x86_emulate_ctxt *ctxt) ctxt 2363 arch/x86/kvm/emulate.c int seg = ctxt->src2.val; ctxt 2367 arch/x86/kvm/emulate.c memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); ctxt 2369 arch/x86/kvm/emulate.c rc = load_segment_descriptor(ctxt, sel, seg); ctxt 2373 arch/x86/kvm/emulate.c ctxt->dst.val = ctxt->src.val; ctxt 2377 arch/x86/kvm/emulate.c static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt) ctxt 2384 arch/x86/kvm/emulate.c ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); ctxt 2403 arch/x86/kvm/emulate.c static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate, ctxt 2420 arch/x86/kvm/emulate.c ctxt->ops->set_segment(ctxt, selector, &desc, 0, n); ctxt 2425 arch/x86/kvm/emulate.c static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate, ctxt 2441 arch/x86/kvm/emulate.c ctxt->ops->set_segment(ctxt, selector, &desc, base3, n); ctxt 2446 arch/x86/kvm/emulate.c static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt, ctxt 2459 arch/x86/kvm/emulate.c bad = ctxt->ops->set_cr(ctxt, 3, cr3); ctxt 2468 arch/x86/kvm/emulate.c bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); ctxt 2472 arch/x86/kvm/emulate.c bad = ctxt->ops->set_cr(ctxt, 0, cr0); ctxt 2477 arch/x86/kvm/emulate.c bad = ctxt->ops->set_cr(ctxt, 4, cr4); ctxt 2481 arch/x86/kvm/emulate.c bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid); ctxt 2491 arch/x86/kvm/emulate.c static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, ctxt 2502 arch/x86/kvm/emulate.c ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED; ctxt 2503 arch/x86/kvm/emulate.c ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0); ctxt 2506 arch/x86/kvm/emulate.c *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4); ctxt 2509 arch/x86/kvm/emulate.c ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1); ctxt 2511 arch/x86/kvm/emulate.c ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); ctxt 2517 arch/x86/kvm/emulate.c ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR); ctxt 2523 arch/x86/kvm/emulate.c ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR); ctxt 2527 arch/x86/kvm/emulate.c ctxt->ops->set_gdt(ctxt, &dt); ctxt 2531 arch/x86/kvm/emulate.c ctxt->ops->set_idt(ctxt, &dt); ctxt 2534 arch/x86/kvm/emulate.c int r = rsm_load_seg_32(ctxt, smstate, i); ctxt 2541 arch/x86/kvm/emulate.c ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8)); ctxt 2543 arch/x86/kvm/emulate.c return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); ctxt 2547 arch/x86/kvm/emulate.c static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, ctxt 2558 arch/x86/kvm/emulate.c *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8); ctxt 2560 arch/x86/kvm/emulate.c ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78); ctxt 2561 arch/x86/kvm/emulate.c ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED; ctxt 2564 arch/x86/kvm/emulate.c ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1); ctxt 2566 arch/x86/kvm/emulate.c ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1); ctxt 2571 arch/x86/kvm/emulate.c ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00)); ctxt 2573 arch/x86/kvm/emulate.c ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA); ctxt 2580 arch/x86/kvm/emulate.c ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR); ctxt 2584 arch/x86/kvm/emulate.c ctxt->ops->set_idt(ctxt, &dt); ctxt 2591 arch/x86/kvm/emulate.c ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR); ctxt 2595 arch/x86/kvm/emulate.c ctxt->ops->set_gdt(ctxt, &dt); ctxt 2597 arch/x86/kvm/emulate.c r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4); ctxt 2602 arch/x86/kvm/emulate.c r = rsm_load_seg_64(ctxt, smstate, i); ctxt 2611 arch/x86/kvm/emulate.c static int em_rsm(struct x86_emulate_ctxt *ctxt) ctxt 2618 arch/x86/kvm/emulate.c if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0) ctxt 2619 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 2621 arch/x86/kvm/emulate.c smbase = ctxt->ops->get_smbase(ctxt); ctxt 2623 arch/x86/kvm/emulate.c ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf)); ctxt 2627 arch/x86/kvm/emulate.c if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) ctxt 2628 arch/x86/kvm/emulate.c ctxt->ops->set_nmi_mask(ctxt, false); ctxt 2630 arch/x86/kvm/emulate.c ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) & ctxt 2638 arch/x86/kvm/emulate.c if (emulator_has_longmode(ctxt)) { ctxt 2642 arch/x86/kvm/emulate.c cr4 = ctxt->ops->get_cr(ctxt, 4); ctxt 2644 arch/x86/kvm/emulate.c ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE); ctxt 2650 arch/x86/kvm/emulate.c ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS); ctxt 2654 arch/x86/kvm/emulate.c cr0 = ctxt->ops->get_cr(ctxt, 0); ctxt 2656 arch/x86/kvm/emulate.c ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE)); ctxt 2658 arch/x86/kvm/emulate.c if (emulator_has_longmode(ctxt)) { ctxt 2660 arch/x86/kvm/emulate.c cr4 = ctxt->ops->get_cr(ctxt, 4); ctxt 2662 arch/x86/kvm/emulate.c ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE); ctxt 2666 arch/x86/kvm/emulate.c ctxt->ops->set_msr(ctxt, MSR_EFER, efer); ctxt 2674 arch/x86/kvm/emulate.c if (ctxt->ops->pre_leave_smm(ctxt, buf)) ctxt 2678 arch/x86/kvm/emulate.c if (emulator_has_longmode(ctxt)) ctxt 2679 arch/x86/kvm/emulate.c ret = rsm_load_state_64(ctxt, buf); ctxt 2682 arch/x86/kvm/emulate.c ret = rsm_load_state_32(ctxt, buf); ctxt 2689 arch/x86/kvm/emulate.c ctxt->ops->post_leave_smm(ctxt); ctxt 2695 arch/x86/kvm/emulate.c setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, ctxt 2721 arch/x86/kvm/emulate.c static bool vendor_intel(struct x86_emulate_ctxt *ctxt) ctxt 2726 arch/x86/kvm/emulate.c ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); ctxt 2732 arch/x86/kvm/emulate.c static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) ctxt 2734 arch/x86/kvm/emulate.c const struct x86_emulate_ops *ops = ctxt->ops; ctxt 2741 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt 2746 arch/x86/kvm/emulate.c ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); ctxt 2785 arch/x86/kvm/emulate.c static int em_syscall(struct x86_emulate_ctxt *ctxt) ctxt 2787 arch/x86/kvm/emulate.c const struct x86_emulate_ops *ops = ctxt->ops; ctxt 2794 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_REAL || ctxt 2795 arch/x86/kvm/emulate.c ctxt->mode == X86EMUL_MODE_VM86) ctxt 2796 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 2798 arch/x86/kvm/emulate.c if (!(em_syscall_is_enabled(ctxt))) ctxt 2799 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 2801 arch/x86/kvm/emulate.c ops->get_msr(ctxt, MSR_EFER, &efer); ctxt 2802 arch/x86/kvm/emulate.c setup_syscalls_segments(ctxt, &cs, &ss); ctxt 2805 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 2807 arch/x86/kvm/emulate.c ops->get_msr(ctxt, MSR_STAR, &msr_data); ctxt 2816 arch/x86/kvm/emulate.c ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ctxt 2817 arch/x86/kvm/emulate.c ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt 2819 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; ctxt 2822 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; ctxt 2824 arch/x86/kvm/emulate.c ops->get_msr(ctxt, ctxt 2825 arch/x86/kvm/emulate.c ctxt->mode == X86EMUL_MODE_PROT64 ? ctxt 2827 arch/x86/kvm/emulate.c ctxt->_eip = msr_data; ctxt 2829 arch/x86/kvm/emulate.c ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); ctxt 2830 arch/x86/kvm/emulate.c ctxt->eflags &= ~msr_data; ctxt 2831 arch/x86/kvm/emulate.c ctxt->eflags |= X86_EFLAGS_FIXED; ctxt 2835 arch/x86/kvm/emulate.c ops->get_msr(ctxt, MSR_STAR, &msr_data); ctxt 2836 arch/x86/kvm/emulate.c ctxt->_eip = (u32)msr_data; ctxt 2838 arch/x86/kvm/emulate.c ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); ctxt 2841 arch/x86/kvm/emulate.c ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; ctxt 2845 arch/x86/kvm/emulate.c static int em_sysenter(struct x86_emulate_ctxt *ctxt) ctxt 2847 arch/x86/kvm/emulate.c const struct x86_emulate_ops *ops = ctxt->ops; ctxt 2853 arch/x86/kvm/emulate.c ops->get_msr(ctxt, MSR_EFER, &efer); ctxt 2855 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_REAL) ctxt 2856 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 2862 arch/x86/kvm/emulate.c if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA) ctxt 2863 arch/x86/kvm/emulate.c && !vendor_intel(ctxt)) ctxt 2864 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 2867 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt 2870 arch/x86/kvm/emulate.c setup_syscalls_segments(ctxt, &cs, &ss); ctxt 2872 arch/x86/kvm/emulate.c ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); ctxt 2874 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 2876 arch/x86/kvm/emulate.c ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); ctxt 2884 arch/x86/kvm/emulate.c ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ctxt 2885 arch/x86/kvm/emulate.c ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt 2887 arch/x86/kvm/emulate.c ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); ctxt 2888 arch/x86/kvm/emulate.c ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data; ctxt 2890 arch/x86/kvm/emulate.c ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); ctxt 2891 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : ctxt 2897 arch/x86/kvm/emulate.c static int em_sysexit(struct x86_emulate_ctxt *ctxt) ctxt 2899 arch/x86/kvm/emulate.c const struct x86_emulate_ops *ops = ctxt->ops; ctxt 2906 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_REAL || ctxt 2907 arch/x86/kvm/emulate.c ctxt->mode == X86EMUL_MODE_VM86) ctxt 2908 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 2910 arch/x86/kvm/emulate.c setup_syscalls_segments(ctxt, &cs, &ss); ctxt 2912 arch/x86/kvm/emulate.c if ((ctxt->rex_prefix & 0x8) != 0x0) ctxt 2917 arch/x86/kvm/emulate.c rcx = reg_read(ctxt, VCPU_REGS_RCX); ctxt 2918 arch/x86/kvm/emulate.c rdx = reg_read(ctxt, VCPU_REGS_RDX); ctxt 2922 arch/x86/kvm/emulate.c ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); ctxt 2927 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 2935 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 2939 arch/x86/kvm/emulate.c if (emul_is_noncanonical_address(rcx, ctxt) || ctxt 2940 arch/x86/kvm/emulate.c emul_is_noncanonical_address(rdx, ctxt)) ctxt 2941 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 2947 arch/x86/kvm/emulate.c ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ctxt 2948 arch/x86/kvm/emulate.c ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt 2950 arch/x86/kvm/emulate.c ctxt->_eip = rdx; ctxt 2951 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RSP) = rcx; ctxt 2956 arch/x86/kvm/emulate.c static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) ctxt 2959 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_REAL) ctxt 2961 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_VM86) ctxt 2963 arch/x86/kvm/emulate.c iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; ctxt 2964 arch/x86/kvm/emulate.c return ctxt->ops->cpl(ctxt) > iopl; ctxt 2970 arch/x86/kvm/emulate.c static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, ctxt 2973 arch/x86/kvm/emulate.c const struct x86_emulate_ops *ops = ctxt->ops; ctxt 2989 arch/x86/kvm/emulate.c ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); ctxt 2998 arch/x86/kvm/emulate.c r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true); ctxt 3003 arch/x86/kvm/emulate.c r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true); ctxt 3011 arch/x86/kvm/emulate.c static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, ctxt 3014 arch/x86/kvm/emulate.c if (ctxt->perm_ok) ctxt 3017 arch/x86/kvm/emulate.c if (emulator_bad_iopl(ctxt)) ctxt 3018 arch/x86/kvm/emulate.c if (!emulator_io_port_access_allowed(ctxt, port, len)) ctxt 3021 arch/x86/kvm/emulate.c ctxt->perm_ok = true; ctxt 3026 arch/x86/kvm/emulate.c static void string_registers_quirk(struct x86_emulate_ctxt *ctxt) ctxt 3033 arch/x86/kvm/emulate.c if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt)) ctxt 3036 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RCX) = 0; ctxt 3038 arch/x86/kvm/emulate.c switch (ctxt->b) { ctxt 3041 arch/x86/kvm/emulate.c *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1; ctxt 3045 arch/x86/kvm/emulate.c *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1; ctxt 3050 arch/x86/kvm/emulate.c static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, ctxt 3053 arch/x86/kvm/emulate.c tss->ip = ctxt->_eip; ctxt 3054 arch/x86/kvm/emulate.c tss->flag = ctxt->eflags; ctxt 3055 arch/x86/kvm/emulate.c tss->ax = reg_read(ctxt, VCPU_REGS_RAX); ctxt 3056 arch/x86/kvm/emulate.c tss->cx = reg_read(ctxt, VCPU_REGS_RCX); ctxt 3057 arch/x86/kvm/emulate.c tss->dx = reg_read(ctxt, VCPU_REGS_RDX); ctxt 3058 arch/x86/kvm/emulate.c tss->bx = reg_read(ctxt, VCPU_REGS_RBX); ctxt 3059 arch/x86/kvm/emulate.c tss->sp = reg_read(ctxt, VCPU_REGS_RSP); ctxt 3060 arch/x86/kvm/emulate.c tss->bp = reg_read(ctxt, VCPU_REGS_RBP); ctxt 3061 arch/x86/kvm/emulate.c tss->si = reg_read(ctxt, VCPU_REGS_RSI); ctxt 3062 arch/x86/kvm/emulate.c tss->di = reg_read(ctxt, VCPU_REGS_RDI); ctxt 3064 arch/x86/kvm/emulate.c tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); ctxt 3065 arch/x86/kvm/emulate.c tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); ctxt 3066 arch/x86/kvm/emulate.c tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); ctxt 3067 arch/x86/kvm/emulate.c tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); ctxt 3068 arch/x86/kvm/emulate.c tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); ctxt 3071 arch/x86/kvm/emulate.c static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, ctxt 3077 arch/x86/kvm/emulate.c ctxt->_eip = tss->ip; ctxt 3078 arch/x86/kvm/emulate.c ctxt->eflags = tss->flag | 2; ctxt 3079 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; ctxt 3080 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; ctxt 3081 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; ctxt 3082 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; ctxt 3083 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; ctxt 3084 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; ctxt 3085 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; ctxt 3086 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; ctxt 3092 arch/x86/kvm/emulate.c set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); ctxt 3093 arch/x86/kvm/emulate.c set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); ctxt 3094 arch/x86/kvm/emulate.c set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); ctxt 3095 arch/x86/kvm/emulate.c set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); ctxt 3096 arch/x86/kvm/emulate.c set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); ctxt 3104 arch/x86/kvm/emulate.c ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, ctxt 3108 arch/x86/kvm/emulate.c ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, ctxt 3112 arch/x86/kvm/emulate.c ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, ctxt 3116 arch/x86/kvm/emulate.c ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, ctxt 3120 arch/x86/kvm/emulate.c ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, ctxt 3128 arch/x86/kvm/emulate.c static int task_switch_16(struct x86_emulate_ctxt *ctxt, ctxt 3136 arch/x86/kvm/emulate.c ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); ctxt 3140 arch/x86/kvm/emulate.c save_state_to_tss16(ctxt, &tss_seg); ctxt 3142 arch/x86/kvm/emulate.c ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); ctxt 3146 arch/x86/kvm/emulate.c ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); ctxt 3153 arch/x86/kvm/emulate.c ret = linear_write_system(ctxt, new_tss_base, ctxt 3160 arch/x86/kvm/emulate.c return load_state_from_tss16(ctxt, &tss_seg); ctxt 3163 arch/x86/kvm/emulate.c static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, ctxt 3167 arch/x86/kvm/emulate.c tss->eip = ctxt->_eip; ctxt 3168 arch/x86/kvm/emulate.c tss->eflags = ctxt->eflags; ctxt 3169 arch/x86/kvm/emulate.c tss->eax = reg_read(ctxt, VCPU_REGS_RAX); ctxt 3170 arch/x86/kvm/emulate.c tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); ctxt 3171 arch/x86/kvm/emulate.c tss->edx = reg_read(ctxt, VCPU_REGS_RDX); ctxt 3172 arch/x86/kvm/emulate.c tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); ctxt 3173 arch/x86/kvm/emulate.c tss->esp = reg_read(ctxt, VCPU_REGS_RSP); ctxt 3174 arch/x86/kvm/emulate.c tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); ctxt 3175 arch/x86/kvm/emulate.c tss->esi = reg_read(ctxt, VCPU_REGS_RSI); ctxt 3176 arch/x86/kvm/emulate.c tss->edi = reg_read(ctxt, VCPU_REGS_RDI); ctxt 3178 arch/x86/kvm/emulate.c tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); ctxt 3179 arch/x86/kvm/emulate.c tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); ctxt 3180 arch/x86/kvm/emulate.c tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); ctxt 3181 arch/x86/kvm/emulate.c tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); ctxt 3182 arch/x86/kvm/emulate.c tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); ctxt 3183 arch/x86/kvm/emulate.c tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); ctxt 3186 arch/x86/kvm/emulate.c static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, ctxt 3192 arch/x86/kvm/emulate.c if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) ctxt 3193 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 3194 arch/x86/kvm/emulate.c ctxt->_eip = tss->eip; ctxt 3195 arch/x86/kvm/emulate.c ctxt->eflags = tss->eflags | 2; ctxt 3198 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; ctxt 3199 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; ctxt 3200 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; ctxt 3201 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; ctxt 3202 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; ctxt 3203 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; ctxt 3204 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; ctxt 3205 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; ctxt 3212 arch/x86/kvm/emulate.c set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); ctxt 3213 arch/x86/kvm/emulate.c set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); ctxt 3214 arch/x86/kvm/emulate.c set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); ctxt 3215 arch/x86/kvm/emulate.c set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); ctxt 3216 arch/x86/kvm/emulate.c set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); ctxt 3217 arch/x86/kvm/emulate.c set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); ctxt 3218 arch/x86/kvm/emulate.c set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); ctxt 3225 arch/x86/kvm/emulate.c if (ctxt->eflags & X86_EFLAGS_VM) { ctxt 3226 arch/x86/kvm/emulate.c ctxt->mode = X86EMUL_MODE_VM86; ctxt 3229 arch/x86/kvm/emulate.c ctxt->mode = X86EMUL_MODE_PROT32; ctxt 3237 arch/x86/kvm/emulate.c ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, ctxt 3241 arch/x86/kvm/emulate.c ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, ctxt 3245 arch/x86/kvm/emulate.c ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, ctxt 3249 arch/x86/kvm/emulate.c ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, ctxt 3253 arch/x86/kvm/emulate.c ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, ctxt 3257 arch/x86/kvm/emulate.c ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, ctxt 3261 arch/x86/kvm/emulate.c ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, ctxt 3267 arch/x86/kvm/emulate.c static int task_switch_32(struct x86_emulate_ctxt *ctxt, ctxt 3277 arch/x86/kvm/emulate.c ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); ctxt 3281 arch/x86/kvm/emulate.c save_state_to_tss32(ctxt, &tss_seg); ctxt 3284 arch/x86/kvm/emulate.c ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip, ctxt 3289 arch/x86/kvm/emulate.c ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); ctxt 3296 arch/x86/kvm/emulate.c ret = linear_write_system(ctxt, new_tss_base, ctxt 3303 arch/x86/kvm/emulate.c return load_state_from_tss32(ctxt, &tss_seg); ctxt 3306 arch/x86/kvm/emulate.c static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, ctxt 3310 arch/x86/kvm/emulate.c const struct x86_emulate_ops *ops = ctxt->ops; ctxt 3313 arch/x86/kvm/emulate.c u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); ctxt 3315 arch/x86/kvm/emulate.c ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); ctxt 3321 arch/x86/kvm/emulate.c ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); ctxt 3324 arch/x86/kvm/emulate.c ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); ctxt 3344 arch/x86/kvm/emulate.c ret = read_interrupt_descriptor(ctxt, idt_index, ctxt 3350 arch/x86/kvm/emulate.c if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) ctxt 3351 arch/x86/kvm/emulate.c return emulate_gp(ctxt, (idt_index << 3) | 0x2); ctxt 3359 arch/x86/kvm/emulate.c return emulate_ts(ctxt, tss_selector & 0xfffc); ctxt 3364 arch/x86/kvm/emulate.c write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); ctxt 3368 arch/x86/kvm/emulate.c ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; ctxt 3376 arch/x86/kvm/emulate.c ret = task_switch_32(ctxt, tss_selector, old_tss_sel, ctxt 3379 arch/x86/kvm/emulate.c ret = task_switch_16(ctxt, tss_selector, old_tss_sel, ctxt 3385 arch/x86/kvm/emulate.c ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; ctxt 3389 arch/x86/kvm/emulate.c write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); ctxt 3392 arch/x86/kvm/emulate.c ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); ctxt 3393 arch/x86/kvm/emulate.c ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); ctxt 3396 arch/x86/kvm/emulate.c ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; ctxt 3397 arch/x86/kvm/emulate.c ctxt->lock_prefix = 0; ctxt 3398 arch/x86/kvm/emulate.c ctxt->src.val = (unsigned long) error_code; ctxt 3399 arch/x86/kvm/emulate.c ret = em_push(ctxt); ctxt 3402 arch/x86/kvm/emulate.c ops->get_dr(ctxt, 7, &dr7); ctxt 3403 arch/x86/kvm/emulate.c ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN)); ctxt 3408 arch/x86/kvm/emulate.c int emulator_task_switch(struct x86_emulate_ctxt *ctxt, ctxt 3414 arch/x86/kvm/emulate.c invalidate_registers(ctxt); ctxt 3415 arch/x86/kvm/emulate.c ctxt->_eip = ctxt->eip; ctxt 3416 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 3418 arch/x86/kvm/emulate.c rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, ctxt 3422 arch/x86/kvm/emulate.c ctxt->eip = ctxt->_eip; ctxt 3423 arch/x86/kvm/emulate.c writeback_registers(ctxt); ctxt 3429 arch/x86/kvm/emulate.c static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, ctxt 3432 arch/x86/kvm/emulate.c int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count; ctxt 3434 arch/x86/kvm/emulate.c register_address_increment(ctxt, reg, df * op->bytes); ctxt 3435 arch/x86/kvm/emulate.c op->addr.mem.ea = register_address(ctxt, reg); ctxt 3438 arch/x86/kvm/emulate.c static int em_das(struct x86_emulate_ctxt *ctxt) ctxt 3443 arch/x86/kvm/emulate.c cf = ctxt->eflags & X86_EFLAGS_CF; ctxt 3444 arch/x86/kvm/emulate.c al = ctxt->dst.val; ctxt 3449 arch/x86/kvm/emulate.c af = ctxt->eflags & X86_EFLAGS_AF; ctxt 3462 arch/x86/kvm/emulate.c ctxt->dst.val = al; ctxt 3464 arch/x86/kvm/emulate.c ctxt->src.type = OP_IMM; ctxt 3465 arch/x86/kvm/emulate.c ctxt->src.val = 0; ctxt 3466 arch/x86/kvm/emulate.c ctxt->src.bytes = 1; ctxt 3467 arch/x86/kvm/emulate.c fastop(ctxt, em_or); ctxt 3468 arch/x86/kvm/emulate.c ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); ctxt 3470 arch/x86/kvm/emulate.c ctxt->eflags |= X86_EFLAGS_CF; ctxt 3472 arch/x86/kvm/emulate.c ctxt->eflags |= X86_EFLAGS_AF; ctxt 3476 arch/x86/kvm/emulate.c static int em_aam(struct x86_emulate_ctxt *ctxt) ctxt 3480 arch/x86/kvm/emulate.c if (ctxt->src.val == 0) ctxt 3481 arch/x86/kvm/emulate.c return emulate_de(ctxt); ctxt 3483 arch/x86/kvm/emulate.c al = ctxt->dst.val & 0xff; ctxt 3484 arch/x86/kvm/emulate.c ah = al / ctxt->src.val; ctxt 3485 arch/x86/kvm/emulate.c al %= ctxt->src.val; ctxt 3487 arch/x86/kvm/emulate.c ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); ctxt 3490 arch/x86/kvm/emulate.c ctxt->src.type = OP_IMM; ctxt 3491 arch/x86/kvm/emulate.c ctxt->src.val = 0; ctxt 3492 arch/x86/kvm/emulate.c ctxt->src.bytes = 1; ctxt 3493 arch/x86/kvm/emulate.c fastop(ctxt, em_or); ctxt 3498 arch/x86/kvm/emulate.c static int em_aad(struct x86_emulate_ctxt *ctxt) ctxt 3500 arch/x86/kvm/emulate.c u8 al = ctxt->dst.val & 0xff; ctxt 3501 arch/x86/kvm/emulate.c u8 ah = (ctxt->dst.val >> 8) & 0xff; ctxt 3503 arch/x86/kvm/emulate.c al = (al + (ah * ctxt->src.val)) & 0xff; ctxt 3505 arch/x86/kvm/emulate.c ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; ctxt 3508 arch/x86/kvm/emulate.c ctxt->src.type = OP_IMM; ctxt 3509 arch/x86/kvm/emulate.c ctxt->src.val = 0; ctxt 3510 arch/x86/kvm/emulate.c ctxt->src.bytes = 1; ctxt 3511 arch/x86/kvm/emulate.c fastop(ctxt, em_or); ctxt 3516 arch/x86/kvm/emulate.c static int em_call(struct x86_emulate_ctxt *ctxt) ctxt 3519 arch/x86/kvm/emulate.c long rel = ctxt->src.val; ctxt 3521 arch/x86/kvm/emulate.c ctxt->src.val = (unsigned long)ctxt->_eip; ctxt 3522 arch/x86/kvm/emulate.c rc = jmp_rel(ctxt, rel); ctxt 3525 arch/x86/kvm/emulate.c return em_push(ctxt); ctxt 3528 arch/x86/kvm/emulate.c static int em_call_far(struct x86_emulate_ctxt *ctxt) ctxt 3534 arch/x86/kvm/emulate.c const struct x86_emulate_ops *ops = ctxt->ops; ctxt 3535 arch/x86/kvm/emulate.c int cpl = ctxt->ops->cpl(ctxt); ctxt 3536 arch/x86/kvm/emulate.c enum x86emul_mode prev_mode = ctxt->mode; ctxt 3538 arch/x86/kvm/emulate.c old_eip = ctxt->_eip; ctxt 3539 arch/x86/kvm/emulate.c ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); ctxt 3541 arch/x86/kvm/emulate.c memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); ctxt 3542 arch/x86/kvm/emulate.c rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, ctxt 3547 arch/x86/kvm/emulate.c rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); ctxt 3551 arch/x86/kvm/emulate.c ctxt->src.val = old_cs; ctxt 3552 arch/x86/kvm/emulate.c rc = em_push(ctxt); ctxt 3556 arch/x86/kvm/emulate.c ctxt->src.val = old_eip; ctxt 3557 arch/x86/kvm/emulate.c rc = em_push(ctxt); ctxt 3566 arch/x86/kvm/emulate.c ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); ctxt 3567 arch/x86/kvm/emulate.c ctxt->mode = prev_mode; ctxt 3572 arch/x86/kvm/emulate.c static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) ctxt 3577 arch/x86/kvm/emulate.c rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); ctxt 3580 arch/x86/kvm/emulate.c rc = assign_eip_near(ctxt, eip); ctxt 3583 arch/x86/kvm/emulate.c rsp_increment(ctxt, ctxt->src.val); ctxt 3587 arch/x86/kvm/emulate.c static int em_xchg(struct x86_emulate_ctxt *ctxt) ctxt 3590 arch/x86/kvm/emulate.c ctxt->src.val = ctxt->dst.val; ctxt 3591 arch/x86/kvm/emulate.c write_register_operand(&ctxt->src); ctxt 3594 arch/x86/kvm/emulate.c ctxt->dst.val = ctxt->src.orig_val; ctxt 3595 arch/x86/kvm/emulate.c ctxt->lock_prefix = 1; ctxt 3599 arch/x86/kvm/emulate.c static int em_imul_3op(struct x86_emulate_ctxt *ctxt) ctxt 3601 arch/x86/kvm/emulate.c ctxt->dst.val = ctxt->src2.val; ctxt 3602 arch/x86/kvm/emulate.c return fastop(ctxt, em_imul); ctxt 3605 arch/x86/kvm/emulate.c static int em_cwd(struct x86_emulate_ctxt *ctxt) ctxt 3607 arch/x86/kvm/emulate.c ctxt->dst.type = OP_REG; ctxt 3608 arch/x86/kvm/emulate.c ctxt->dst.bytes = ctxt->src.bytes; ctxt 3609 arch/x86/kvm/emulate.c ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); ctxt 3610 arch/x86/kvm/emulate.c ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); ctxt 3615 arch/x86/kvm/emulate.c static int em_rdpid(struct x86_emulate_ctxt *ctxt) ctxt 3619 arch/x86/kvm/emulate.c if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux)) ctxt 3620 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 3621 arch/x86/kvm/emulate.c ctxt->dst.val = tsc_aux; ctxt 3625 arch/x86/kvm/emulate.c static int em_rdtsc(struct x86_emulate_ctxt *ctxt) ctxt 3629 arch/x86/kvm/emulate.c ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); ctxt 3630 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; ctxt 3631 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; ctxt 3635 arch/x86/kvm/emulate.c static int em_rdpmc(struct x86_emulate_ctxt *ctxt) ctxt 3639 arch/x86/kvm/emulate.c if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) ctxt 3640 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 3641 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; ctxt 3642 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; ctxt 3646 arch/x86/kvm/emulate.c static int em_mov(struct x86_emulate_ctxt *ctxt) ctxt 3648 arch/x86/kvm/emulate.c memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); ctxt 3654 arch/x86/kvm/emulate.c static int em_movbe(struct x86_emulate_ctxt *ctxt) ctxt 3662 arch/x86/kvm/emulate.c ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); ctxt 3664 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 3666 arch/x86/kvm/emulate.c switch (ctxt->op_bytes) { ctxt 3676 arch/x86/kvm/emulate.c tmp = (u16)ctxt->src.val; ctxt 3677 arch/x86/kvm/emulate.c ctxt->dst.val &= ~0xffffUL; ctxt 3678 arch/x86/kvm/emulate.c ctxt->dst.val |= (unsigned long)swab16(tmp); ctxt 3681 arch/x86/kvm/emulate.c ctxt->dst.val = swab32((u32)ctxt->src.val); ctxt 3684 arch/x86/kvm/emulate.c ctxt->dst.val = swab64(ctxt->src.val); ctxt 3692 arch/x86/kvm/emulate.c static int em_cr_write(struct x86_emulate_ctxt *ctxt) ctxt 3694 arch/x86/kvm/emulate.c if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) ctxt 3695 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 3698 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 3702 arch/x86/kvm/emulate.c static int em_dr_write(struct x86_emulate_ctxt *ctxt) ctxt 3706 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt 3707 arch/x86/kvm/emulate.c val = ctxt->src.val & ~0ULL; ctxt 3709 arch/x86/kvm/emulate.c val = ctxt->src.val & ~0U; ctxt 3712 arch/x86/kvm/emulate.c if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) ctxt 3713 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 3716 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 3720 arch/x86/kvm/emulate.c static int em_wrmsr(struct x86_emulate_ctxt *ctxt) ctxt 3724 arch/x86/kvm/emulate.c msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) ctxt 3725 arch/x86/kvm/emulate.c | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); ctxt 3726 arch/x86/kvm/emulate.c if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data)) ctxt 3727 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 3732 arch/x86/kvm/emulate.c static int em_rdmsr(struct x86_emulate_ctxt *ctxt) ctxt 3736 arch/x86/kvm/emulate.c if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data)) ctxt 3737 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 3739 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; ctxt 3740 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; ctxt 3744 arch/x86/kvm/emulate.c static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment) ctxt 3747 arch/x86/kvm/emulate.c (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && ctxt 3748 arch/x86/kvm/emulate.c ctxt->ops->cpl(ctxt) > 0) ctxt 3749 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 3751 arch/x86/kvm/emulate.c ctxt->dst.val = get_segment_selector(ctxt, segment); ctxt 3752 arch/x86/kvm/emulate.c if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM) ctxt 3753 arch/x86/kvm/emulate.c ctxt->dst.bytes = 2; ctxt 3757 arch/x86/kvm/emulate.c static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) ctxt 3759 arch/x86/kvm/emulate.c if (ctxt->modrm_reg > VCPU_SREG_GS) ctxt 3760 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 3762 arch/x86/kvm/emulate.c return em_store_sreg(ctxt, ctxt->modrm_reg); ctxt 3765 arch/x86/kvm/emulate.c static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) ctxt 3767 arch/x86/kvm/emulate.c u16 sel = ctxt->src.val; ctxt 3769 arch/x86/kvm/emulate.c if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) ctxt 3770 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 3772 arch/x86/kvm/emulate.c if (ctxt->modrm_reg == VCPU_SREG_SS) ctxt 3773 arch/x86/kvm/emulate.c ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; ctxt 3776 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 3777 arch/x86/kvm/emulate.c return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); ctxt 3780 arch/x86/kvm/emulate.c static int em_sldt(struct x86_emulate_ctxt *ctxt) ctxt 3782 arch/x86/kvm/emulate.c return em_store_sreg(ctxt, VCPU_SREG_LDTR); ctxt 3785 arch/x86/kvm/emulate.c static int em_lldt(struct x86_emulate_ctxt *ctxt) ctxt 3787 arch/x86/kvm/emulate.c u16 sel = ctxt->src.val; ctxt 3790 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 3791 arch/x86/kvm/emulate.c return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); ctxt 3794 arch/x86/kvm/emulate.c static int em_str(struct x86_emulate_ctxt *ctxt) ctxt 3796 arch/x86/kvm/emulate.c return em_store_sreg(ctxt, VCPU_SREG_TR); ctxt 3799 arch/x86/kvm/emulate.c static int em_ltr(struct x86_emulate_ctxt *ctxt) ctxt 3801 arch/x86/kvm/emulate.c u16 sel = ctxt->src.val; ctxt 3804 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 3805 arch/x86/kvm/emulate.c return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); ctxt 3808 arch/x86/kvm/emulate.c static int em_invlpg(struct x86_emulate_ctxt *ctxt) ctxt 3813 arch/x86/kvm/emulate.c rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); ctxt 3815 arch/x86/kvm/emulate.c ctxt->ops->invlpg(ctxt, linear); ctxt 3817 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 3821 arch/x86/kvm/emulate.c static int em_clts(struct x86_emulate_ctxt *ctxt) ctxt 3825 arch/x86/kvm/emulate.c cr0 = ctxt->ops->get_cr(ctxt, 0); ctxt 3827 arch/x86/kvm/emulate.c ctxt->ops->set_cr(ctxt, 0, cr0); ctxt 3831 arch/x86/kvm/emulate.c static int em_hypercall(struct x86_emulate_ctxt *ctxt) ctxt 3833 arch/x86/kvm/emulate.c int rc = ctxt->ops->fix_hypercall(ctxt); ctxt 3839 arch/x86/kvm/emulate.c ctxt->_eip = ctxt->eip; ctxt 3841 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 3845 arch/x86/kvm/emulate.c static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, ctxt 3846 arch/x86/kvm/emulate.c void (*get)(struct x86_emulate_ctxt *ctxt, ctxt 3851 arch/x86/kvm/emulate.c if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && ctxt 3852 arch/x86/kvm/emulate.c ctxt->ops->cpl(ctxt) > 0) ctxt 3853 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 3855 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt 3856 arch/x86/kvm/emulate.c ctxt->op_bytes = 8; ctxt 3857 arch/x86/kvm/emulate.c get(ctxt, &desc_ptr); ctxt 3858 arch/x86/kvm/emulate.c if (ctxt->op_bytes == 2) { ctxt 3859 arch/x86/kvm/emulate.c ctxt->op_bytes = 4; ctxt 3863 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 3864 arch/x86/kvm/emulate.c return segmented_write_std(ctxt, ctxt->dst.addr.mem, ctxt 3865 arch/x86/kvm/emulate.c &desc_ptr, 2 + ctxt->op_bytes); ctxt 3868 arch/x86/kvm/emulate.c static int em_sgdt(struct x86_emulate_ctxt *ctxt) ctxt 3870 arch/x86/kvm/emulate.c return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); ctxt 3873 arch/x86/kvm/emulate.c static int em_sidt(struct x86_emulate_ctxt *ctxt) ctxt 3875 arch/x86/kvm/emulate.c return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); ctxt 3878 arch/x86/kvm/emulate.c static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt) ctxt 3883 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt 3884 arch/x86/kvm/emulate.c ctxt->op_bytes = 8; ctxt 3885 arch/x86/kvm/emulate.c rc = read_descriptor(ctxt, ctxt->src.addr.mem, ctxt 3887 arch/x86/kvm/emulate.c ctxt->op_bytes); ctxt 3890 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_PROT64 && ctxt 3891 arch/x86/kvm/emulate.c emul_is_noncanonical_address(desc_ptr.address, ctxt)) ctxt 3892 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 3894 arch/x86/kvm/emulate.c ctxt->ops->set_gdt(ctxt, &desc_ptr); ctxt 3896 arch/x86/kvm/emulate.c ctxt->ops->set_idt(ctxt, &desc_ptr); ctxt 3898 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 3902 arch/x86/kvm/emulate.c static int em_lgdt(struct x86_emulate_ctxt *ctxt) ctxt 3904 arch/x86/kvm/emulate.c return em_lgdt_lidt(ctxt, true); ctxt 3907 arch/x86/kvm/emulate.c static int em_lidt(struct x86_emulate_ctxt *ctxt) ctxt 3909 arch/x86/kvm/emulate.c return em_lgdt_lidt(ctxt, false); ctxt 3912 arch/x86/kvm/emulate.c static int em_smsw(struct x86_emulate_ctxt *ctxt) ctxt 3914 arch/x86/kvm/emulate.c if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && ctxt 3915 arch/x86/kvm/emulate.c ctxt->ops->cpl(ctxt) > 0) ctxt 3916 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 3918 arch/x86/kvm/emulate.c if (ctxt->dst.type == OP_MEM) ctxt 3919 arch/x86/kvm/emulate.c ctxt->dst.bytes = 2; ctxt 3920 arch/x86/kvm/emulate.c ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); ctxt 3924 arch/x86/kvm/emulate.c static int em_lmsw(struct x86_emulate_ctxt *ctxt) ctxt 3926 arch/x86/kvm/emulate.c ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) ctxt 3927 arch/x86/kvm/emulate.c | (ctxt->src.val & 0x0f)); ctxt 3928 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 3932 arch/x86/kvm/emulate.c static int em_loop(struct x86_emulate_ctxt *ctxt) ctxt 3936 arch/x86/kvm/emulate.c register_address_increment(ctxt, VCPU_REGS_RCX, -1); ctxt 3937 arch/x86/kvm/emulate.c if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && ctxt 3938 arch/x86/kvm/emulate.c (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) ctxt 3939 arch/x86/kvm/emulate.c rc = jmp_rel(ctxt, ctxt->src.val); ctxt 3944 arch/x86/kvm/emulate.c static int em_jcxz(struct x86_emulate_ctxt *ctxt) ctxt 3948 arch/x86/kvm/emulate.c if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) ctxt 3949 arch/x86/kvm/emulate.c rc = jmp_rel(ctxt, ctxt->src.val); ctxt 3954 arch/x86/kvm/emulate.c static int em_in(struct x86_emulate_ctxt *ctxt) ctxt 3956 arch/x86/kvm/emulate.c if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, ctxt 3957 arch/x86/kvm/emulate.c &ctxt->dst.val)) ctxt 3963 arch/x86/kvm/emulate.c static int em_out(struct x86_emulate_ctxt *ctxt) ctxt 3965 arch/x86/kvm/emulate.c ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, ctxt 3966 arch/x86/kvm/emulate.c &ctxt->src.val, 1); ctxt 3968 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 3972 arch/x86/kvm/emulate.c static int em_cli(struct x86_emulate_ctxt *ctxt) ctxt 3974 arch/x86/kvm/emulate.c if (emulator_bad_iopl(ctxt)) ctxt 3975 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 3977 arch/x86/kvm/emulate.c ctxt->eflags &= ~X86_EFLAGS_IF; ctxt 3981 arch/x86/kvm/emulate.c static int em_sti(struct x86_emulate_ctxt *ctxt) ctxt 3983 arch/x86/kvm/emulate.c if (emulator_bad_iopl(ctxt)) ctxt 3984 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 3986 arch/x86/kvm/emulate.c ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; ctxt 3987 arch/x86/kvm/emulate.c ctxt->eflags |= X86_EFLAGS_IF; ctxt 3991 arch/x86/kvm/emulate.c static int em_cpuid(struct x86_emulate_ctxt *ctxt) ctxt 3996 arch/x86/kvm/emulate.c ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr); ctxt 3998 arch/x86/kvm/emulate.c ctxt->ops->cpl(ctxt)) { ctxt 3999 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 4002 arch/x86/kvm/emulate.c eax = reg_read(ctxt, VCPU_REGS_RAX); ctxt 4003 arch/x86/kvm/emulate.c ecx = reg_read(ctxt, VCPU_REGS_RCX); ctxt 4004 arch/x86/kvm/emulate.c ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true); ctxt 4005 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RAX) = eax; ctxt 4006 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RBX) = ebx; ctxt 4007 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RCX) = ecx; ctxt 4008 arch/x86/kvm/emulate.c *reg_write(ctxt, VCPU_REGS_RDX) = edx; ctxt 4012 arch/x86/kvm/emulate.c static int em_sahf(struct x86_emulate_ctxt *ctxt) ctxt 4018 arch/x86/kvm/emulate.c flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; ctxt 4020 arch/x86/kvm/emulate.c ctxt->eflags &= ~0xffUL; ctxt 4021 arch/x86/kvm/emulate.c ctxt->eflags |= flags | X86_EFLAGS_FIXED; ctxt 4025 arch/x86/kvm/emulate.c static int em_lahf(struct x86_emulate_ctxt *ctxt) ctxt 4027 arch/x86/kvm/emulate.c *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; ctxt 4028 arch/x86/kvm/emulate.c *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; ctxt 4032 arch/x86/kvm/emulate.c static int em_bswap(struct x86_emulate_ctxt *ctxt) ctxt 4034 arch/x86/kvm/emulate.c switch (ctxt->op_bytes) { ctxt 4037 arch/x86/kvm/emulate.c asm("bswap %0" : "+r"(ctxt->dst.val)); ctxt 4041 arch/x86/kvm/emulate.c asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); ctxt 4047 arch/x86/kvm/emulate.c static int em_clflush(struct x86_emulate_ctxt *ctxt) ctxt 4053 arch/x86/kvm/emulate.c static int em_movsxd(struct x86_emulate_ctxt *ctxt) ctxt 4055 arch/x86/kvm/emulate.c ctxt->dst.val = (s32) ctxt->src.val; ctxt 4059 arch/x86/kvm/emulate.c static int check_fxsr(struct x86_emulate_ctxt *ctxt) ctxt 4063 arch/x86/kvm/emulate.c ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); ctxt 4065 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 4067 arch/x86/kvm/emulate.c if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) ctxt 4068 arch/x86/kvm/emulate.c return emulate_nm(ctxt); ctxt 4074 arch/x86/kvm/emulate.c if (ctxt->mode >= X86EMUL_MODE_PROT64) ctxt 4089 arch/x86/kvm/emulate.c static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt) ctxt 4092 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt 4095 arch/x86/kvm/emulate.c cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR; ctxt 4117 arch/x86/kvm/emulate.c static int em_fxsave(struct x86_emulate_ctxt *ctxt) ctxt 4122 arch/x86/kvm/emulate.c rc = check_fxsr(ctxt); ctxt 4135 arch/x86/kvm/emulate.c return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, ctxt 4136 arch/x86/kvm/emulate.c fxstate_size(ctxt)); ctxt 4159 arch/x86/kvm/emulate.c static int em_fxrstor(struct x86_emulate_ctxt *ctxt) ctxt 4165 arch/x86/kvm/emulate.c rc = check_fxsr(ctxt); ctxt 4169 arch/x86/kvm/emulate.c size = fxstate_size(ctxt); ctxt 4170 arch/x86/kvm/emulate.c rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size); ctxt 4183 arch/x86/kvm/emulate.c rc = emulate_gp(ctxt, 0); ctxt 4196 arch/x86/kvm/emulate.c static int em_xsetbv(struct x86_emulate_ctxt *ctxt) ctxt 4200 arch/x86/kvm/emulate.c eax = reg_read(ctxt, VCPU_REGS_RAX); ctxt 4201 arch/x86/kvm/emulate.c edx = reg_read(ctxt, VCPU_REGS_RDX); ctxt 4202 arch/x86/kvm/emulate.c ecx = reg_read(ctxt, VCPU_REGS_RCX); ctxt 4204 arch/x86/kvm/emulate.c if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax)) ctxt 4205 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 4222 arch/x86/kvm/emulate.c static int check_cr_read(struct x86_emulate_ctxt *ctxt) ctxt 4224 arch/x86/kvm/emulate.c if (!valid_cr(ctxt->modrm_reg)) ctxt 4225 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 4230 arch/x86/kvm/emulate.c static int check_cr_write(struct x86_emulate_ctxt *ctxt) ctxt 4232 arch/x86/kvm/emulate.c u64 new_val = ctxt->src.val64; ctxt 4233 arch/x86/kvm/emulate.c int cr = ctxt->modrm_reg; ctxt 4245 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 4248 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 4255 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 4257 arch/x86/kvm/emulate.c cr4 = ctxt->ops->get_cr(ctxt, 4); ctxt 4258 arch/x86/kvm/emulate.c ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); ctxt 4262 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 4269 arch/x86/kvm/emulate.c ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); ctxt 4276 arch/x86/kvm/emulate.c if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, ctxt 4282 arch/x86/kvm/emulate.c if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE) ctxt 4287 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 4292 arch/x86/kvm/emulate.c ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); ctxt 4295 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 4304 arch/x86/kvm/emulate.c static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) ctxt 4308 arch/x86/kvm/emulate.c ctxt->ops->get_dr(ctxt, 7, &dr7); ctxt 4314 arch/x86/kvm/emulate.c static int check_dr_read(struct x86_emulate_ctxt *ctxt) ctxt 4316 arch/x86/kvm/emulate.c int dr = ctxt->modrm_reg; ctxt 4320 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 4322 arch/x86/kvm/emulate.c cr4 = ctxt->ops->get_cr(ctxt, 4); ctxt 4324 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 4326 arch/x86/kvm/emulate.c if (check_dr7_gd(ctxt)) { ctxt 4329 arch/x86/kvm/emulate.c ctxt->ops->get_dr(ctxt, 6, &dr6); ctxt 4332 arch/x86/kvm/emulate.c ctxt->ops->set_dr(ctxt, 6, dr6); ctxt 4333 arch/x86/kvm/emulate.c return emulate_db(ctxt); ctxt 4339 arch/x86/kvm/emulate.c static int check_dr_write(struct x86_emulate_ctxt *ctxt) ctxt 4341 arch/x86/kvm/emulate.c u64 new_val = ctxt->src.val64; ctxt 4342 arch/x86/kvm/emulate.c int dr = ctxt->modrm_reg; ctxt 4345 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 4347 arch/x86/kvm/emulate.c return check_dr_read(ctxt); ctxt 4350 arch/x86/kvm/emulate.c static int check_svme(struct x86_emulate_ctxt *ctxt) ctxt 4354 arch/x86/kvm/emulate.c ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); ctxt 4357 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 4362 arch/x86/kvm/emulate.c static int check_svme_pa(struct x86_emulate_ctxt *ctxt) ctxt 4364 arch/x86/kvm/emulate.c u64 rax = reg_read(ctxt, VCPU_REGS_RAX); ctxt 4368 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 4370 arch/x86/kvm/emulate.c return check_svme(ctxt); ctxt 4373 arch/x86/kvm/emulate.c static int check_rdtsc(struct x86_emulate_ctxt *ctxt) ctxt 4375 arch/x86/kvm/emulate.c u64 cr4 = ctxt->ops->get_cr(ctxt, 4); ctxt 4377 arch/x86/kvm/emulate.c if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) ctxt 4378 arch/x86/kvm/emulate.c return emulate_ud(ctxt); ctxt 4383 arch/x86/kvm/emulate.c static int check_rdpmc(struct x86_emulate_ctxt *ctxt) ctxt 4385 arch/x86/kvm/emulate.c u64 cr4 = ctxt->ops->get_cr(ctxt, 4); ctxt 4386 arch/x86/kvm/emulate.c u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); ctxt 4395 arch/x86/kvm/emulate.c if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || ctxt 4396 arch/x86/kvm/emulate.c ctxt->ops->check_pmc(ctxt, rcx)) ctxt 4397 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 4402 arch/x86/kvm/emulate.c static int check_perm_in(struct x86_emulate_ctxt *ctxt) ctxt 4404 arch/x86/kvm/emulate.c ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); ctxt 4405 arch/x86/kvm/emulate.c if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes)) ctxt 4406 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 4411 arch/x86/kvm/emulate.c static int check_perm_out(struct x86_emulate_ctxt *ctxt) ctxt 4413 arch/x86/kvm/emulate.c ctxt->src.bytes = min(ctxt->src.bytes, 4u); ctxt 4414 arch/x86/kvm/emulate.c if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes)) ctxt 4415 arch/x86/kvm/emulate.c return emulate_gp(ctxt, 0); ctxt 4967 arch/x86/kvm/emulate.c static unsigned imm_size(struct x86_emulate_ctxt *ctxt) ctxt 4971 arch/x86/kvm/emulate.c size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; ctxt 4977 arch/x86/kvm/emulate.c static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, ctxt 4984 arch/x86/kvm/emulate.c op->addr.mem.ea = ctxt->_eip; ctxt 4988 arch/x86/kvm/emulate.c op->val = insn_fetch(s8, ctxt); ctxt 4991 arch/x86/kvm/emulate.c op->val = insn_fetch(s16, ctxt); ctxt 4994 arch/x86/kvm/emulate.c op->val = insn_fetch(s32, ctxt); ctxt 4997 arch/x86/kvm/emulate.c op->val = insn_fetch(s64, ctxt); ctxt 5017 arch/x86/kvm/emulate.c static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, ctxt 5024 arch/x86/kvm/emulate.c decode_register_operand(ctxt, op); ctxt 5027 arch/x86/kvm/emulate.c rc = decode_imm(ctxt, op, 1, false); ctxt 5030 arch/x86/kvm/emulate.c ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; ctxt 5032 arch/x86/kvm/emulate.c *op = ctxt->memop; ctxt 5033 arch/x86/kvm/emulate.c ctxt->memopp = op; ctxt 5034 arch/x86/kvm/emulate.c if (ctxt->d & BitOp) ctxt 5035 arch/x86/kvm/emulate.c fetch_bit_operand(ctxt); ctxt 5039 arch/x86/kvm/emulate.c ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; ctxt 5043 arch/x86/kvm/emulate.c op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; ctxt 5044 arch/x86/kvm/emulate.c op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); ctxt 5050 arch/x86/kvm/emulate.c op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes; ctxt 5051 arch/x86/kvm/emulate.c op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); ctxt 5056 arch/x86/kvm/emulate.c if (ctxt->d & ByteOp) { ctxt 5061 arch/x86/kvm/emulate.c op->bytes = ctxt->op_bytes; ctxt 5062 arch/x86/kvm/emulate.c op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); ctxt 5068 arch/x86/kvm/emulate.c op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; ctxt 5070 arch/x86/kvm/emulate.c register_address(ctxt, VCPU_REGS_RDI); ctxt 5078 arch/x86/kvm/emulate.c op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); ctxt 5084 arch/x86/kvm/emulate.c op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; ctxt 5087 arch/x86/kvm/emulate.c rc = decode_imm(ctxt, op, 1, true); ctxt 5095 arch/x86/kvm/emulate.c rc = decode_imm(ctxt, op, imm_size(ctxt), true); ctxt 5098 arch/x86/kvm/emulate.c rc = decode_imm(ctxt, op, ctxt->op_bytes, true); ctxt 5101 arch/x86/kvm/emulate.c ctxt->memop.bytes = 1; ctxt 5102 arch/x86/kvm/emulate.c if (ctxt->memop.type == OP_REG) { ctxt 5103 arch/x86/kvm/emulate.c ctxt->memop.addr.reg = decode_register(ctxt, ctxt 5104 arch/x86/kvm/emulate.c ctxt->modrm_rm, true); ctxt 5105 arch/x86/kvm/emulate.c fetch_register_operand(&ctxt->memop); ctxt 5109 arch/x86/kvm/emulate.c ctxt->memop.bytes = 2; ctxt 5112 arch/x86/kvm/emulate.c ctxt->memop.bytes = 4; ctxt 5115 arch/x86/kvm/emulate.c rc = decode_imm(ctxt, op, 2, false); ctxt 5118 arch/x86/kvm/emulate.c rc = decode_imm(ctxt, op, imm_size(ctxt), false); ctxt 5122 arch/x86/kvm/emulate.c op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; ctxt 5124 arch/x86/kvm/emulate.c register_address(ctxt, VCPU_REGS_RSI); ctxt 5125 arch/x86/kvm/emulate.c op->addr.mem.seg = ctxt->seg_override; ctxt 5131 arch/x86/kvm/emulate.c op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; ctxt 5133 arch/x86/kvm/emulate.c address_mask(ctxt, ctxt 5134 arch/x86/kvm/emulate.c reg_read(ctxt, VCPU_REGS_RBX) + ctxt 5135 arch/x86/kvm/emulate.c (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); ctxt 5136 arch/x86/kvm/emulate.c op->addr.mem.seg = ctxt->seg_override; ctxt 5141 arch/x86/kvm/emulate.c op->addr.mem.ea = ctxt->_eip; ctxt 5142 arch/x86/kvm/emulate.c op->bytes = ctxt->op_bytes + 2; ctxt 5143 arch/x86/kvm/emulate.c insn_fetch_arr(op->valptr, op->bytes, ctxt); ctxt 5146 arch/x86/kvm/emulate.c ctxt->memop.bytes = ctxt->op_bytes + 2; ctxt 5183 arch/x86/kvm/emulate.c int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) ctxt 5186 arch/x86/kvm/emulate.c int mode = ctxt->mode; ctxt 5194 arch/x86/kvm/emulate.c ctxt->memop.type = OP_NONE; ctxt 5195 arch/x86/kvm/emulate.c ctxt->memopp = NULL; ctxt 5196 arch/x86/kvm/emulate.c ctxt->_eip = ctxt->eip; ctxt 5197 arch/x86/kvm/emulate.c ctxt->fetch.ptr = ctxt->fetch.data; ctxt 5198 arch/x86/kvm/emulate.c ctxt->fetch.end = ctxt->fetch.data + insn_len; ctxt 5199 arch/x86/kvm/emulate.c ctxt->opcode_len = 1; ctxt 5200 arch/x86/kvm/emulate.c ctxt->intercept = x86_intercept_none; ctxt 5202 arch/x86/kvm/emulate.c memcpy(ctxt->fetch.data, insn, insn_len); ctxt 5204 arch/x86/kvm/emulate.c rc = __do_insn_fetch_bytes(ctxt, 1); ctxt 5213 arch/x86/kvm/emulate.c ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS); ctxt 5233 arch/x86/kvm/emulate.c ctxt->op_bytes = def_op_bytes; ctxt 5234 arch/x86/kvm/emulate.c ctxt->ad_bytes = def_ad_bytes; ctxt 5238 arch/x86/kvm/emulate.c switch (ctxt->b = insn_fetch(u8, ctxt)) { ctxt 5242 arch/x86/kvm/emulate.c ctxt->op_bytes = def_op_bytes ^ 6; ctxt 5247 arch/x86/kvm/emulate.c ctxt->ad_bytes = def_ad_bytes ^ 12; ctxt 5250 arch/x86/kvm/emulate.c ctxt->ad_bytes = def_ad_bytes ^ 6; ctxt 5254 arch/x86/kvm/emulate.c ctxt->seg_override = VCPU_SREG_ES; ctxt 5258 arch/x86/kvm/emulate.c ctxt->seg_override = VCPU_SREG_CS; ctxt 5262 arch/x86/kvm/emulate.c ctxt->seg_override = VCPU_SREG_SS; ctxt 5266 arch/x86/kvm/emulate.c ctxt->seg_override = VCPU_SREG_DS; ctxt 5270 arch/x86/kvm/emulate.c ctxt->seg_override = VCPU_SREG_FS; ctxt 5274 arch/x86/kvm/emulate.c ctxt->seg_override = VCPU_SREG_GS; ctxt 5279 arch/x86/kvm/emulate.c ctxt->rex_prefix = ctxt->b; ctxt 5282 arch/x86/kvm/emulate.c ctxt->lock_prefix = 1; ctxt 5286 arch/x86/kvm/emulate.c ctxt->rep_prefix = ctxt->b; ctxt 5294 arch/x86/kvm/emulate.c ctxt->rex_prefix = 0; ctxt 5300 arch/x86/kvm/emulate.c if (ctxt->rex_prefix & 8) ctxt 5301 arch/x86/kvm/emulate.c ctxt->op_bytes = 8; /* REX.W */ ctxt 5304 arch/x86/kvm/emulate.c opcode = opcode_table[ctxt->b]; ctxt 5306 arch/x86/kvm/emulate.c if (ctxt->b == 0x0f) { ctxt 5307 arch/x86/kvm/emulate.c ctxt->opcode_len = 2; ctxt 5308 arch/x86/kvm/emulate.c ctxt->b = insn_fetch(u8, ctxt); ctxt 5309 arch/x86/kvm/emulate.c opcode = twobyte_table[ctxt->b]; ctxt 5312 arch/x86/kvm/emulate.c if (ctxt->b == 0x38) { ctxt 5313 arch/x86/kvm/emulate.c ctxt->opcode_len = 3; ctxt 5314 arch/x86/kvm/emulate.c ctxt->b = insn_fetch(u8, ctxt); ctxt 5315 arch/x86/kvm/emulate.c opcode = opcode_map_0f_38[ctxt->b]; ctxt 5318 arch/x86/kvm/emulate.c ctxt->d = opcode.flags; ctxt 5320 arch/x86/kvm/emulate.c if (ctxt->d & ModRM) ctxt 5321 arch/x86/kvm/emulate.c ctxt->modrm = insn_fetch(u8, ctxt); ctxt 5324 arch/x86/kvm/emulate.c if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && ctxt 5325 arch/x86/kvm/emulate.c (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) { ctxt 5326 arch/x86/kvm/emulate.c ctxt->d = NotImpl; ctxt 5329 arch/x86/kvm/emulate.c while (ctxt->d & GroupMask) { ctxt 5330 arch/x86/kvm/emulate.c switch (ctxt->d & GroupMask) { ctxt 5332 arch/x86/kvm/emulate.c goffset = (ctxt->modrm >> 3) & 7; ctxt 5336 arch/x86/kvm/emulate.c goffset = (ctxt->modrm >> 3) & 7; ctxt 5337 arch/x86/kvm/emulate.c if ((ctxt->modrm >> 6) == 3) ctxt 5343 arch/x86/kvm/emulate.c goffset = ctxt->modrm & 7; ctxt 5347 arch/x86/kvm/emulate.c if (ctxt->rep_prefix && op_prefix) ctxt 5349 arch/x86/kvm/emulate.c simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; ctxt 5358 arch/x86/kvm/emulate.c if (ctxt->modrm > 0xbf) { ctxt 5361 arch/x86/kvm/emulate.c ctxt->modrm - 0xc0, size); ctxt 5365 arch/x86/kvm/emulate.c opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; ctxt 5369 arch/x86/kvm/emulate.c if ((ctxt->modrm >> 6) == 3) ctxt 5375 arch/x86/kvm/emulate.c if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt 5384 arch/x86/kvm/emulate.c ctxt->d &= ~(u64)GroupMask; ctxt 5385 arch/x86/kvm/emulate.c ctxt->d |= opcode.flags; ctxt 5389 arch/x86/kvm/emulate.c if (ctxt->d == 0) ctxt 5392 arch/x86/kvm/emulate.c ctxt->execute = opcode.u.execute; ctxt 5394 arch/x86/kvm/emulate.c if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) ctxt 5397 arch/x86/kvm/emulate.c if (unlikely(ctxt->d & ctxt 5404 arch/x86/kvm/emulate.c ctxt->check_perm = opcode.check_perm; ctxt 5405 arch/x86/kvm/emulate.c ctxt->intercept = opcode.intercept; ctxt 5407 arch/x86/kvm/emulate.c if (ctxt->d & NotImpl) ctxt 5411 arch/x86/kvm/emulate.c if (ctxt->op_bytes == 4 && (ctxt->d & Stack)) ctxt 5412 arch/x86/kvm/emulate.c ctxt->op_bytes = 8; ctxt 5413 arch/x86/kvm/emulate.c else if (ctxt->d & NearBranch) ctxt 5414 arch/x86/kvm/emulate.c ctxt->op_bytes = 8; ctxt 5417 arch/x86/kvm/emulate.c if (ctxt->d & Op3264) { ctxt 5419 arch/x86/kvm/emulate.c ctxt->op_bytes = 8; ctxt 5421 arch/x86/kvm/emulate.c ctxt->op_bytes = 4; ctxt 5424 arch/x86/kvm/emulate.c if ((ctxt->d & No16) && ctxt->op_bytes == 2) ctxt 5425 arch/x86/kvm/emulate.c ctxt->op_bytes = 4; ctxt 5427 arch/x86/kvm/emulate.c if (ctxt->d & Sse) ctxt 5428 arch/x86/kvm/emulate.c ctxt->op_bytes = 16; ctxt 5429 arch/x86/kvm/emulate.c else if (ctxt->d & Mmx) ctxt 5430 arch/x86/kvm/emulate.c ctxt->op_bytes = 8; ctxt 5434 arch/x86/kvm/emulate.c if (ctxt->d & ModRM) { ctxt 5435 arch/x86/kvm/emulate.c rc = decode_modrm(ctxt, &ctxt->memop); ctxt 5438 arch/x86/kvm/emulate.c ctxt->seg_override = ctxt->modrm_seg; ctxt 5440 arch/x86/kvm/emulate.c } else if (ctxt->d & MemAbs) ctxt 5441 arch/x86/kvm/emulate.c rc = decode_abs(ctxt, &ctxt->memop); ctxt 5446 arch/x86/kvm/emulate.c ctxt->seg_override = VCPU_SREG_DS; ctxt 5448 arch/x86/kvm/emulate.c ctxt->memop.addr.mem.seg = ctxt->seg_override; ctxt 5454 arch/x86/kvm/emulate.c rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); ctxt 5462 arch/x86/kvm/emulate.c rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); ctxt 5467 arch/x86/kvm/emulate.c rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); ctxt 5469 arch/x86/kvm/emulate.c if (ctxt->rip_relative && likely(ctxt->memopp)) ctxt 5470 arch/x86/kvm/emulate.c ctxt->memopp->addr.mem.ea = address_mask(ctxt, ctxt 5471 arch/x86/kvm/emulate.c ctxt->memopp->addr.mem.ea + ctxt->_eip); ctxt 5475 arch/x86/kvm/emulate.c ctxt->have_exception = true; ctxt 5479 arch/x86/kvm/emulate.c bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt) ctxt 5481 arch/x86/kvm/emulate.c return ctxt->d & PageTable; ctxt 5484 arch/x86/kvm/emulate.c static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) ctxt 5493 arch/x86/kvm/emulate.c if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || ctxt 5494 arch/x86/kvm/emulate.c (ctxt->b == 0xae) || (ctxt->b == 0xaf)) ctxt 5495 arch/x86/kvm/emulate.c && (((ctxt->rep_prefix == REPE_PREFIX) && ctxt 5496 arch/x86/kvm/emulate.c ((ctxt->eflags & X86_EFLAGS_ZF) == 0)) ctxt 5497 arch/x86/kvm/emulate.c || ((ctxt->rep_prefix == REPNE_PREFIX) && ctxt 5498 arch/x86/kvm/emulate.c ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF)))) ctxt 5504 arch/x86/kvm/emulate.c static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) ctxt 5513 arch/x86/kvm/emulate.c return emulate_exception(ctxt, MF_VECTOR, 0, false); ctxt 5518 arch/x86/kvm/emulate.c static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, ctxt 5522 arch/x86/kvm/emulate.c read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); ctxt 5525 arch/x86/kvm/emulate.c static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) ctxt 5527 arch/x86/kvm/emulate.c ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; ctxt 5529 arch/x86/kvm/emulate.c if (!(ctxt->d & ByteOp)) ctxt 5530 arch/x86/kvm/emulate.c fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; ctxt 5533 arch/x86/kvm/emulate.c : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), ctxt 5535 arch/x86/kvm/emulate.c : "c"(ctxt->src2.val)); ctxt 5537 arch/x86/kvm/emulate.c ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); ctxt 5539 arch/x86/kvm/emulate.c return emulate_de(ctxt); ctxt 5543 arch/x86/kvm/emulate.c void init_decode_cache(struct x86_emulate_ctxt *ctxt) ctxt 5545 arch/x86/kvm/emulate.c memset(&ctxt->rip_relative, 0, ctxt 5546 arch/x86/kvm/emulate.c (void *)&ctxt->modrm - (void *)&ctxt->rip_relative); ctxt 5548 arch/x86/kvm/emulate.c ctxt->io_read.pos = 0; ctxt 5549 arch/x86/kvm/emulate.c ctxt->io_read.end = 0; ctxt 5550 arch/x86/kvm/emulate.c ctxt->mem_read.end = 0; ctxt 5553 arch/x86/kvm/emulate.c int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ctxt 5555 arch/x86/kvm/emulate.c const struct x86_emulate_ops *ops = ctxt->ops; ctxt 5557 arch/x86/kvm/emulate.c int saved_dst_type = ctxt->dst.type; ctxt 5560 arch/x86/kvm/emulate.c ctxt->mem_read.pos = 0; ctxt 5563 arch/x86/kvm/emulate.c if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { ctxt 5564 arch/x86/kvm/emulate.c rc = emulate_ud(ctxt); ctxt 5568 arch/x86/kvm/emulate.c if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { ctxt 5569 arch/x86/kvm/emulate.c rc = emulate_ud(ctxt); ctxt 5573 arch/x86/kvm/emulate.c emul_flags = ctxt->ops->get_hflags(ctxt); ctxt 5574 arch/x86/kvm/emulate.c if (unlikely(ctxt->d & ctxt 5576 arch/x86/kvm/emulate.c if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || ctxt 5577 arch/x86/kvm/emulate.c (ctxt->d & Undefined)) { ctxt 5578 arch/x86/kvm/emulate.c rc = emulate_ud(ctxt); ctxt 5582 arch/x86/kvm/emulate.c if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) ctxt 5583 arch/x86/kvm/emulate.c || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { ctxt 5584 arch/x86/kvm/emulate.c rc = emulate_ud(ctxt); ctxt 5588 arch/x86/kvm/emulate.c if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { ctxt 5589 arch/x86/kvm/emulate.c rc = emulate_nm(ctxt); ctxt 5593 arch/x86/kvm/emulate.c if (ctxt->d & Mmx) { ctxt 5594 arch/x86/kvm/emulate.c rc = flush_pending_x87_faults(ctxt); ctxt 5601 arch/x86/kvm/emulate.c fetch_possible_mmx_operand(ctxt, &ctxt->src); ctxt 5602 arch/x86/kvm/emulate.c fetch_possible_mmx_operand(ctxt, &ctxt->src2); ctxt 5603 arch/x86/kvm/emulate.c if (!(ctxt->d & Mov)) ctxt 5604 arch/x86/kvm/emulate.c fetch_possible_mmx_operand(ctxt, &ctxt->dst); ctxt 5607 arch/x86/kvm/emulate.c if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { ctxt 5608 arch/x86/kvm/emulate.c rc = emulator_check_intercept(ctxt, ctxt->intercept, ctxt 5615 arch/x86/kvm/emulate.c if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { ctxt 5616 arch/x86/kvm/emulate.c rc = emulate_ud(ctxt); ctxt 5621 arch/x86/kvm/emulate.c if ((ctxt->d & Priv) && ops->cpl(ctxt)) { ctxt 5622 arch/x86/kvm/emulate.c if (ctxt->d & PrivUD) ctxt 5623 arch/x86/kvm/emulate.c rc = emulate_ud(ctxt); ctxt 5625 arch/x86/kvm/emulate.c rc = emulate_gp(ctxt, 0); ctxt 5630 arch/x86/kvm/emulate.c if (ctxt->d & CheckPerm) { ctxt 5631 arch/x86/kvm/emulate.c rc = ctxt->check_perm(ctxt); ctxt 5636 arch/x86/kvm/emulate.c if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { ctxt 5637 arch/x86/kvm/emulate.c rc = emulator_check_intercept(ctxt, ctxt->intercept, ctxt 5643 arch/x86/kvm/emulate.c if (ctxt->rep_prefix && (ctxt->d & String)) { ctxt 5645 arch/x86/kvm/emulate.c if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { ctxt 5646 arch/x86/kvm/emulate.c string_registers_quirk(ctxt); ctxt 5647 arch/x86/kvm/emulate.c ctxt->eip = ctxt->_eip; ctxt 5648 arch/x86/kvm/emulate.c ctxt->eflags &= ~X86_EFLAGS_RF; ctxt 5654 arch/x86/kvm/emulate.c if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { ctxt 5655 arch/x86/kvm/emulate.c rc = segmented_read(ctxt, ctxt->src.addr.mem, ctxt 5656 arch/x86/kvm/emulate.c ctxt->src.valptr, ctxt->src.bytes); ctxt 5659 arch/x86/kvm/emulate.c ctxt->src.orig_val64 = ctxt->src.val64; ctxt 5662 arch/x86/kvm/emulate.c if (ctxt->src2.type == OP_MEM) { ctxt 5663 arch/x86/kvm/emulate.c rc = segmented_read(ctxt, ctxt->src2.addr.mem, ctxt 5664 arch/x86/kvm/emulate.c &ctxt->src2.val, ctxt->src2.bytes); ctxt 5669 arch/x86/kvm/emulate.c if ((ctxt->d & DstMask) == ImplicitOps) ctxt 5673 arch/x86/kvm/emulate.c if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { ctxt 5675 arch/x86/kvm/emulate.c rc = segmented_read(ctxt, ctxt->dst.addr.mem, ctxt 5676 arch/x86/kvm/emulate.c &ctxt->dst.val, ctxt->dst.bytes); ctxt 5678 arch/x86/kvm/emulate.c if (!(ctxt->d & NoWrite) && ctxt 5680 arch/x86/kvm/emulate.c ctxt->exception.vector == PF_VECTOR) ctxt 5681 arch/x86/kvm/emulate.c ctxt->exception.error_code |= PFERR_WRITE_MASK; ctxt 5686 arch/x86/kvm/emulate.c ctxt->dst.orig_val64 = ctxt->dst.val64; ctxt 5690 arch/x86/kvm/emulate.c if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { ctxt 5691 arch/x86/kvm/emulate.c rc = emulator_check_intercept(ctxt, ctxt->intercept, ctxt 5697 arch/x86/kvm/emulate.c if (ctxt->rep_prefix && (ctxt->d & String)) ctxt 5698 arch/x86/kvm/emulate.c ctxt->eflags |= X86_EFLAGS_RF; ctxt 5700 arch/x86/kvm/emulate.c ctxt->eflags &= ~X86_EFLAGS_RF; ctxt 5702 arch/x86/kvm/emulate.c if (ctxt->execute) { ctxt 5703 arch/x86/kvm/emulate.c if (ctxt->d & Fastop) { ctxt 5704 arch/x86/kvm/emulate.c void (*fop)(struct fastop *) = (void *)ctxt->execute; ctxt 5705 arch/x86/kvm/emulate.c rc = fastop(ctxt, fop); ctxt 5710 arch/x86/kvm/emulate.c rc = ctxt->execute(ctxt); ctxt 5716 arch/x86/kvm/emulate.c if (ctxt->opcode_len == 2) ctxt 5718 arch/x86/kvm/emulate.c else if (ctxt->opcode_len == 3) ctxt 5721 arch/x86/kvm/emulate.c switch (ctxt->b) { ctxt 5723 arch/x86/kvm/emulate.c if (test_cc(ctxt->b, ctxt->eflags)) ctxt 5724 arch/x86/kvm/emulate.c rc = jmp_rel(ctxt, ctxt->src.val); ctxt 5727 arch/x86/kvm/emulate.c ctxt->dst.val = ctxt->src.addr.mem.ea; ctxt 5730 arch/x86/kvm/emulate.c if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) ctxt 5731 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; ctxt 5733 arch/x86/kvm/emulate.c rc = em_xchg(ctxt); ctxt 5736 arch/x86/kvm/emulate.c switch (ctxt->op_bytes) { ctxt 5737 arch/x86/kvm/emulate.c case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; ctxt 5738 arch/x86/kvm/emulate.c case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; ctxt 5739 arch/x86/kvm/emulate.c case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; ctxt 5743 arch/x86/kvm/emulate.c rc = emulate_int(ctxt, 3); ctxt 5746 arch/x86/kvm/emulate.c rc = emulate_int(ctxt, ctxt->src.val); ctxt 5749 arch/x86/kvm/emulate.c if (ctxt->eflags & X86_EFLAGS_OF) ctxt 5750 arch/x86/kvm/emulate.c rc = emulate_int(ctxt, 4); ctxt 5754 arch/x86/kvm/emulate.c rc = jmp_rel(ctxt, ctxt->src.val); ctxt 5755 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; /* Disable writeback. */ ctxt 5758 arch/x86/kvm/emulate.c ctxt->ops->halt(ctxt); ctxt 5762 arch/x86/kvm/emulate.c ctxt->eflags ^= X86_EFLAGS_CF; ctxt 5765 arch/x86/kvm/emulate.c ctxt->eflags &= ~X86_EFLAGS_CF; ctxt 5768 arch/x86/kvm/emulate.c ctxt->eflags |= X86_EFLAGS_CF; ctxt 5771 arch/x86/kvm/emulate.c ctxt->eflags &= ~X86_EFLAGS_DF; ctxt 5774 arch/x86/kvm/emulate.c ctxt->eflags |= X86_EFLAGS_DF; ctxt 5784 arch/x86/kvm/emulate.c if (ctxt->d & SrcWrite) { ctxt 5785 arch/x86/kvm/emulate.c BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); ctxt 5786 arch/x86/kvm/emulate.c rc = writeback(ctxt, &ctxt->src); ctxt 5790 arch/x86/kvm/emulate.c if (!(ctxt->d & NoWrite)) { ctxt 5791 arch/x86/kvm/emulate.c rc = writeback(ctxt, &ctxt->dst); ctxt 5800 arch/x86/kvm/emulate.c ctxt->dst.type = saved_dst_type; ctxt 5802 arch/x86/kvm/emulate.c if ((ctxt->d & SrcMask) == SrcSI) ctxt 5803 arch/x86/kvm/emulate.c string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); ctxt 5805 arch/x86/kvm/emulate.c if ((ctxt->d & DstMask) == DstDI) ctxt 5806 arch/x86/kvm/emulate.c string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); ctxt 5808 arch/x86/kvm/emulate.c if (ctxt->rep_prefix && (ctxt->d & String)) { ctxt 5810 arch/x86/kvm/emulate.c struct read_cache *r = &ctxt->io_read; ctxt 5811 arch/x86/kvm/emulate.c if ((ctxt->d & SrcMask) == SrcSI) ctxt 5812 arch/x86/kvm/emulate.c count = ctxt->src.count; ctxt 5814 arch/x86/kvm/emulate.c count = ctxt->dst.count; ctxt 5815 arch/x86/kvm/emulate.c register_address_increment(ctxt, VCPU_REGS_RCX, -count); ctxt 5817 arch/x86/kvm/emulate.c if (!string_insn_completed(ctxt)) { ctxt 5822 arch/x86/kvm/emulate.c if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && ctxt 5829 arch/x86/kvm/emulate.c ctxt->mem_read.end = 0; ctxt 5830 arch/x86/kvm/emulate.c writeback_registers(ctxt); ctxt 5835 arch/x86/kvm/emulate.c ctxt->eflags &= ~X86_EFLAGS_RF; ctxt 5838 arch/x86/kvm/emulate.c ctxt->eip = ctxt->_eip; ctxt 5842 arch/x86/kvm/emulate.c WARN_ON(ctxt->exception.vector > 0x1f); ctxt 5843 arch/x86/kvm/emulate.c ctxt->have_exception = true; ctxt 5849 arch/x86/kvm/emulate.c writeback_registers(ctxt); ctxt 5854 arch/x86/kvm/emulate.c switch (ctxt->b) { ctxt 5856 arch/x86/kvm/emulate.c (ctxt->ops->wbinvd)(ctxt); ctxt 5864 arch/x86/kvm/emulate.c ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); ctxt 5867 arch/x86/kvm/emulate.c ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); ctxt 5870 arch/x86/kvm/emulate.c if (test_cc(ctxt->b, ctxt->eflags)) ctxt 5871 arch/x86/kvm/emulate.c ctxt->dst.val = ctxt->src.val; ctxt 5872 arch/x86/kvm/emulate.c else if (ctxt->op_bytes != 4) ctxt 5873 arch/x86/kvm/emulate.c ctxt->dst.type = OP_NONE; /* no writeback */ ctxt 5876 arch/x86/kvm/emulate.c if (test_cc(ctxt->b, ctxt->eflags)) ctxt 5877 arch/x86/kvm/emulate.c rc = jmp_rel(ctxt, ctxt->src.val); ctxt 5880 arch/x86/kvm/emulate.c ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); ctxt 5883 arch/x86/kvm/emulate.c ctxt->dst.bytes = ctxt->op_bytes; ctxt 5884 arch/x86/kvm/emulate.c ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val ctxt 5885 arch/x86/kvm/emulate.c : (u16) ctxt->src.val; ctxt 5888 arch/x86/kvm/emulate.c ctxt->dst.bytes = ctxt->op_bytes; ctxt 5889 arch/x86/kvm/emulate.c ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : ctxt 5890 arch/x86/kvm/emulate.c (s16) ctxt->src.val; ctxt 5907 arch/x86/kvm/emulate.c void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) ctxt 5909 arch/x86/kvm/emulate.c invalidate_registers(ctxt); ctxt 5912 arch/x86/kvm/emulate.c void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) ctxt 5914 arch/x86/kvm/emulate.c writeback_registers(ctxt); ctxt 5917 arch/x86/kvm/emulate.c bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt) ctxt 5919 arch/x86/kvm/emulate.c if (ctxt->rep_prefix && (ctxt->d & String)) ctxt 5922 arch/x86/kvm/emulate.c if (ctxt->d & TwoMemOp) ctxt 7139 arch/x86/kvm/vmx/vmx.c struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; ctxt 7148 arch/x86/kvm/vmx/vmx.c ctxt->exception.vector = UD_VECTOR; ctxt 7149 arch/x86/kvm/vmx/vmx.c ctxt->exception.error_code_valid = false; ctxt 81 arch/x86/kvm/x86.c #define emul_to_vcpu(ctxt) \ ctxt 82 arch/x86/kvm/x86.c container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) ctxt 229 arch/x86/kvm/x86.c static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); ctxt 5418 arch/x86/kvm/x86.c static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, ctxt 5422 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 5462 arch/x86/kvm/x86.c static int emulator_read_std(struct x86_emulate_ctxt *ctxt, ctxt 5466 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 5475 arch/x86/kvm/x86.c static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, ctxt 5478 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 5515 arch/x86/kvm/x86.c static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, ctxt 5519 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 5703 arch/x86/kvm/x86.c struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; ctxt 5713 arch/x86/kvm/x86.c emulator_can_use_gpa(ctxt) && ctxt 5745 arch/x86/kvm/x86.c static int emulator_read_write(struct x86_emulate_ctxt *ctxt, ctxt 5751 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 5772 arch/x86/kvm/x86.c if (ctxt->mode != X86EMUL_MODE_PROT64) ctxt 5799 arch/x86/kvm/x86.c static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, ctxt 5805 arch/x86/kvm/x86.c return emulator_read_write(ctxt, addr, val, bytes, ctxt 5809 arch/x86/kvm/x86.c static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, ctxt 5815 arch/x86/kvm/x86.c return emulator_read_write(ctxt, addr, (void *)val, bytes, ctxt 5829 arch/x86/kvm/x86.c static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, ctxt 5837 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 5889 arch/x86/kvm/x86.c return emulator_write_emulated(ctxt, addr, new, bytes, exception); ctxt 5935 arch/x86/kvm/x86.c static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, ctxt 5939 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 5959 arch/x86/kvm/x86.c static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, ctxt 5963 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 5975 arch/x86/kvm/x86.c static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) ctxt 5977 arch/x86/kvm/x86.c kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); ctxt 6007 arch/x86/kvm/x86.c static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) ctxt 6009 arch/x86/kvm/x86.c kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); ctxt 6012 arch/x86/kvm/x86.c static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, ctxt 6015 arch/x86/kvm/x86.c return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); ctxt 6018 arch/x86/kvm/x86.c static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, ctxt 6022 arch/x86/kvm/x86.c return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); ctxt 6030 arch/x86/kvm/x86.c static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) ctxt 6032 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 6059 arch/x86/kvm/x86.c static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) ctxt 6061 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 6088 arch/x86/kvm/x86.c static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) ctxt 6090 arch/x86/kvm/x86.c return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); ctxt 6093 arch/x86/kvm/x86.c static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) ctxt 6095 arch/x86/kvm/x86.c kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); ctxt 6098 arch/x86/kvm/x86.c static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) ctxt 6100 arch/x86/kvm/x86.c kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); ctxt 6103 arch/x86/kvm/x86.c static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) ctxt 6105 arch/x86/kvm/x86.c kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); ctxt 6108 arch/x86/kvm/x86.c static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) ctxt 6110 arch/x86/kvm/x86.c kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); ctxt 6114 arch/x86/kvm/x86.c struct x86_emulate_ctxt *ctxt, int seg) ctxt 6116 arch/x86/kvm/x86.c return get_segment_base(emul_to_vcpu(ctxt), seg); ctxt 6119 arch/x86/kvm/x86.c static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, ctxt 6125 arch/x86/kvm/x86.c kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); ctxt 6155 arch/x86/kvm/x86.c static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, ctxt 6159 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 6185 arch/x86/kvm/x86.c static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, ctxt 6188 arch/x86/kvm/x86.c return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); ctxt 6191 arch/x86/kvm/x86.c static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, ctxt 6194 arch/x86/kvm/x86.c return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data); ctxt 6197 arch/x86/kvm/x86.c static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt) ctxt 6199 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 6204 arch/x86/kvm/x86.c static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase) ctxt 6206 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 6211 arch/x86/kvm/x86.c static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, ctxt 6214 arch/x86/kvm/x86.c return kvm_pmu_is_valid_msr_idx(emul_to_vcpu(ctxt), pmc); ctxt 6217 arch/x86/kvm/x86.c static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, ctxt 6220 arch/x86/kvm/x86.c return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata); ctxt 6223 arch/x86/kvm/x86.c static void emulator_halt(struct x86_emulate_ctxt *ctxt) ctxt 6225 arch/x86/kvm/x86.c emul_to_vcpu(ctxt)->arch.halt_request = 1; ctxt 6228 arch/x86/kvm/x86.c static int emulator_intercept(struct x86_emulate_ctxt *ctxt, ctxt 6232 arch/x86/kvm/x86.c return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); ctxt 6235 arch/x86/kvm/x86.c static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, ctxt 6238 arch/x86/kvm/x86.c return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, check_limit); ctxt 6241 arch/x86/kvm/x86.c static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) ctxt 6243 arch/x86/kvm/x86.c return kvm_register_read(emul_to_vcpu(ctxt), reg); ctxt 6246 arch/x86/kvm/x86.c static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) ctxt 6248 arch/x86/kvm/x86.c kvm_register_write(emul_to_vcpu(ctxt), reg, val); ctxt 6251 arch/x86/kvm/x86.c static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) ctxt 6253 arch/x86/kvm/x86.c kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked); ctxt 6256 arch/x86/kvm/x86.c static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) ctxt 6258 arch/x86/kvm/x86.c return emul_to_vcpu(ctxt)->arch.hflags; ctxt 6261 arch/x86/kvm/x86.c static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags) ctxt 6263 arch/x86/kvm/x86.c emul_to_vcpu(ctxt)->arch.hflags = emul_flags; ctxt 6266 arch/x86/kvm/x86.c static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, ctxt 6269 arch/x86/kvm/x86.c return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate); ctxt 6272 arch/x86/kvm/x86.c static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt) ctxt 6274 arch/x86/kvm/x86.c kvm_smm_changed(emul_to_vcpu(ctxt)); ctxt 6277 arch/x86/kvm/x86.c static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr) ctxt 6279 arch/x86/kvm/x86.c return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr); ctxt 6347 arch/x86/kvm/x86.c struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; ctxt 6348 arch/x86/kvm/x86.c if (ctxt->exception.vector == PF_VECTOR) ctxt 6349 arch/x86/kvm/x86.c return kvm_propagate_fault(vcpu, &ctxt->exception); ctxt 6351 arch/x86/kvm/x86.c if (ctxt->exception.error_code_valid) ctxt 6352 arch/x86/kvm/x86.c kvm_queue_exception_e(vcpu, ctxt->exception.vector, ctxt 6353 arch/x86/kvm/x86.c ctxt->exception.error_code); ctxt 6355 arch/x86/kvm/x86.c kvm_queue_exception(vcpu, ctxt->exception.vector); ctxt 6361 arch/x86/kvm/x86.c struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; ctxt 6366 arch/x86/kvm/x86.c ctxt->eflags = kvm_get_rflags(vcpu); ctxt 6367 arch/x86/kvm/x86.c ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; ctxt 6369 arch/x86/kvm/x86.c ctxt->eip = kvm_rip_read(vcpu); ctxt 6370 arch/x86/kvm/x86.c ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : ctxt 6371 arch/x86/kvm/x86.c (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : ctxt 6379 arch/x86/kvm/x86.c init_decode_cache(ctxt); ctxt 6385 arch/x86/kvm/x86.c struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; ctxt 6390 arch/x86/kvm/x86.c ctxt->op_bytes = 2; ctxt 6391 arch/x86/kvm/x86.c ctxt->ad_bytes = 2; ctxt 6392 arch/x86/kvm/x86.c ctxt->_eip = ctxt->eip + inc_eip; ctxt 6393 arch/x86/kvm/x86.c ret = emulate_int_real(ctxt, irq); ctxt 6398 arch/x86/kvm/x86.c ctxt->eip = ctxt->_eip; ctxt 6399 arch/x86/kvm/x86.c kvm_rip_write(vcpu, ctxt->eip); ctxt 6400 arch/x86/kvm/x86.c kvm_set_rflags(vcpu, ctxt->eflags); ctxt 6508 arch/x86/kvm/x86.c static bool retry_instruction(struct x86_emulate_ctxt *ctxt, ctxt 6511 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 6538 arch/x86/kvm/x86.c if (x86_page_table_writing_insn(ctxt)) ctxt 6541 arch/x86/kvm/x86.c if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) ctxt 6544 arch/x86/kvm/x86.c vcpu->arch.last_retry_eip = ctxt->eip; ctxt 6663 arch/x86/kvm/x86.c static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) ctxt 6665 arch/x86/kvm/x86.c switch (ctxt->opcode_len) { ctxt 6667 arch/x86/kvm/x86.c switch (ctxt->b) { ctxt 6684 arch/x86/kvm/x86.c switch (ctxt->b) { ctxt 6698 arch/x86/kvm/x86.c struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; ctxt 6724 arch/x86/kvm/x86.c ctxt->interruptibility = 0; ctxt 6725 arch/x86/kvm/x86.c ctxt->have_exception = false; ctxt 6726 arch/x86/kvm/x86.c ctxt->exception.vector = -1; ctxt 6727 arch/x86/kvm/x86.c ctxt->perm_ok = false; ctxt 6729 arch/x86/kvm/x86.c ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; ctxt 6731 arch/x86/kvm/x86.c r = x86_decode_insn(ctxt, insn, insn_len); ctxt 6745 arch/x86/kvm/x86.c if (ctxt->have_exception) { ctxt 6750 arch/x86/kvm/x86.c WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || ctxt 6751 arch/x86/kvm/x86.c exception_type(ctxt->exception.vector) == EXCPT_TRAP); ctxt 6760 arch/x86/kvm/x86.c !is_vmware_backdoor_opcode(ctxt)) { ctxt 6771 arch/x86/kvm/x86.c kvm_rip_write(vcpu, ctxt->_eip); ctxt 6772 arch/x86/kvm/x86.c if (ctxt->eflags & X86_EFLAGS_RF) ctxt 6773 arch/x86/kvm/x86.c kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); ctxt 6777 arch/x86/kvm/x86.c if (retry_instruction(ctxt, cr2_or_gpa, emulation_type)) ctxt 6784 arch/x86/kvm/x86.c emulator_invalidate_register_cache(ctxt); ctxt 6789 arch/x86/kvm/x86.c ctxt->exception.address = cr2_or_gpa; ctxt 6791 arch/x86/kvm/x86.c r = x86_emulate_insn(ctxt); ctxt 6804 arch/x86/kvm/x86.c if (ctxt->have_exception) { ctxt 6831 arch/x86/kvm/x86.c toggle_interruptibility(vcpu, ctxt->interruptibility); ctxt 6833 arch/x86/kvm/x86.c if (!ctxt->have_exception || ctxt 6834 arch/x86/kvm/x86.c exception_type(ctxt->exception.vector) == EXCPT_TRAP) { ctxt 6835 arch/x86/kvm/x86.c kvm_rip_write(vcpu, ctxt->eip); ctxt 6836 arch/x86/kvm/x86.c if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) ctxt 6838 arch/x86/kvm/x86.c __kvm_set_rflags(vcpu, ctxt->eflags); ctxt 6847 arch/x86/kvm/x86.c if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) ctxt 7515 arch/x86/kvm/x86.c static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) ctxt 7517 arch/x86/kvm/x86.c struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); ctxt 7523 arch/x86/kvm/x86.c return emulator_write_emulated(ctxt, rip, instruction, 3, ctxt 7524 arch/x86/kvm/x86.c &ctxt->exception); ctxt 8820 arch/x86/kvm/x86.c struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; ctxt 8825 arch/x86/kvm/x86.c ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, ctxt 8834 arch/x86/kvm/x86.c kvm_rip_write(vcpu, ctxt->eip); ctxt 8835 arch/x86/kvm/x86.c kvm_set_rflags(vcpu, ctxt->eflags); ctxt 157 arch/x86/kvm/x86.h static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt) ctxt 159 arch/x86/kvm/x86.h return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48; ctxt 177 arch/x86/kvm/x86.h struct x86_emulate_ctxt *ctxt) ctxt 180 arch/x86/kvm/x86.h return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la; ctxt 37 arch/x86/power/cpu.c static void msr_save_context(struct saved_context *ctxt) ctxt 39 arch/x86/power/cpu.c struct saved_msr *msr = ctxt->saved_msrs.array; ctxt 40 arch/x86/power/cpu.c struct saved_msr *end = msr + ctxt->saved_msrs.num; ctxt 48 arch/x86/power/cpu.c static void msr_restore_context(struct saved_context *ctxt) ctxt 50 arch/x86/power/cpu.c struct saved_msr *msr = ctxt->saved_msrs.array; ctxt 51 arch/x86/power/cpu.c struct saved_msr *end = msr + ctxt->saved_msrs.num; ctxt 75 arch/x86/power/cpu.c static void __save_processor_state(struct saved_context *ctxt) ctxt 85 arch/x86/power/cpu.c store_idt(&ctxt->idt); ctxt 93 arch/x86/power/cpu.c ctxt->gdt_desc.size = GDT_SIZE - 1; ctxt 94 arch/x86/power/cpu.c ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id()); ctxt 96 arch/x86/power/cpu.c store_tr(ctxt->tr); ctxt 103 arch/x86/power/cpu.c savesegment(gs, ctxt->gs); ctxt 106 arch/x86/power/cpu.c savesegment(gs, ctxt->gs); ctxt 107 arch/x86/power/cpu.c savesegment(fs, ctxt->fs); ctxt 108 arch/x86/power/cpu.c savesegment(ds, ctxt->ds); ctxt 109 arch/x86/power/cpu.c savesegment(es, ctxt->es); ctxt 111 arch/x86/power/cpu.c rdmsrl(MSR_FS_BASE, ctxt->fs_base); ctxt 112 arch/x86/power/cpu.c rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); ctxt 113 arch/x86/power/cpu.c rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); ctxt 116 arch/x86/power/cpu.c rdmsrl(MSR_EFER, ctxt->efer); ctxt 122 arch/x86/power/cpu.c ctxt->cr0 = read_cr0(); ctxt 123 arch/x86/power/cpu.c ctxt->cr2 = read_cr2(); ctxt 124 arch/x86/power/cpu.c ctxt->cr3 = __read_cr3(); ctxt 125 arch/x86/power/cpu.c ctxt->cr4 = __read_cr4(); ctxt 126 arch/x86/power/cpu.c ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, ctxt 127 arch/x86/power/cpu.c &ctxt->misc_enable); ctxt 128 arch/x86/power/cpu.c msr_save_context(ctxt); ctxt 194 arch/x86/power/cpu.c static void notrace __restore_processor_state(struct saved_context *ctxt) ctxt 196 arch/x86/power/cpu.c if (ctxt->misc_enable_saved) ctxt 197 arch/x86/power/cpu.c wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); ctxt 203 arch/x86/power/cpu.c if (ctxt->cr4) ctxt 204 arch/x86/power/cpu.c __write_cr4(ctxt->cr4); ctxt 207 arch/x86/power/cpu.c wrmsrl(MSR_EFER, ctxt->efer); ctxt 208 arch/x86/power/cpu.c __write_cr4(ctxt->cr4); ctxt 210 arch/x86/power/cpu.c write_cr3(ctxt->cr3); ctxt 211 arch/x86/power/cpu.c write_cr2(ctxt->cr2); ctxt 212 arch/x86/power/cpu.c write_cr0(ctxt->cr0); ctxt 215 arch/x86/power/cpu.c load_idt(&ctxt->idt); ctxt 230 arch/x86/power/cpu.c wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); ctxt 244 arch/x86/power/cpu.c loadsegment(ds, ctxt->es); ctxt 245 arch/x86/power/cpu.c loadsegment(es, ctxt->es); ctxt 246 arch/x86/power/cpu.c loadsegment(fs, ctxt->fs); ctxt 247 arch/x86/power/cpu.c load_gs_index(ctxt->gs); ctxt 254 arch/x86/power/cpu.c wrmsrl(MSR_FS_BASE, ctxt->fs_base); ctxt 255 arch/x86/power/cpu.c wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); ctxt 257 arch/x86/power/cpu.c loadsegment(gs, ctxt->gs); ctxt 265 arch/x86/power/cpu.c msr_restore_context(ctxt); ctxt 30 arch/x86/xen/pmu.c #define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \ ctxt 31 arch/x86/xen/pmu.c (uintptr_t)ctxt->field)) ctxt 193 arch/x86/xen/pmu.c struct xen_pmu_intel_ctxt *ctxt; ctxt 203 arch/x86/xen/pmu.c ctxt = &xenpmu_data->pmu.c.intel; ctxt 207 arch/x86/xen/pmu.c reg = &ctxt->global_ovf_ctrl; ctxt 210 arch/x86/xen/pmu.c reg = &ctxt->global_status; ctxt 213 arch/x86/xen/pmu.c reg = &ctxt->global_ctrl; ctxt 216 arch/x86/xen/pmu.c reg = &ctxt->fixed_ctrl; ctxt 221 arch/x86/xen/pmu.c fix_counters = field_offset(ctxt, fixed_counters); ctxt 225 arch/x86/xen/pmu.c arch_cntr_pair = field_offset(ctxt, arch_counters); ctxt 229 arch/x86/xen/pmu.c arch_cntr_pair = field_offset(ctxt, arch_counters); ctxt 244 arch/x86/xen/pmu.c ctxt->global_status &= (~(*val)); ctxt 256 arch/x86/xen/pmu.c struct xen_pmu_amd_ctxt *ctxt; ctxt 268 arch/x86/xen/pmu.c ctxt = &xenpmu_data->pmu.c.amd; ctxt 271 arch/x86/xen/pmu.c ctrl_regs = field_offset(ctxt, ctrls); ctxt 275 arch/x86/xen/pmu.c counter_regs = field_offset(ctxt, counters); ctxt 339 arch/x86/xen/pmu.c struct xen_pmu_amd_ctxt *ctxt; ctxt 352 arch/x86/xen/pmu.c ctxt = &xenpmu_data->pmu.c.amd; ctxt 353 arch/x86/xen/pmu.c counter_regs = field_offset(ctxt, counters); ctxt 359 arch/x86/xen/pmu.c struct xen_pmu_intel_ctxt *ctxt; ctxt 377 arch/x86/xen/pmu.c ctxt = &xenpmu_data->pmu.c.intel; ctxt 379 arch/x86/xen/pmu.c fixed_counters = field_offset(ctxt, fixed_counters); ctxt 383 arch/x86/xen/pmu.c arch_cntr_pair = field_offset(ctxt, arch_counters); ctxt 287 arch/x86/xen/smp_pv.c struct vcpu_guest_context *ctxt; ctxt 296 arch/x86/xen/smp_pv.c ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); ctxt 297 arch/x86/xen/smp_pv.c if (ctxt == NULL) ctxt 303 arch/x86/xen/smp_pv.c ctxt->user_regs.fs = __KERNEL_PERCPU; ctxt 304 arch/x86/xen/smp_pv.c ctxt->user_regs.gs = __KERNEL_STACK_CANARY; ctxt 306 arch/x86/xen/smp_pv.c memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); ctxt 313 arch/x86/xen/smp_pv.c ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; ctxt 314 arch/x86/xen/smp_pv.c ctxt->flags = VGCF_IN_KERNEL; ctxt 315 arch/x86/xen/smp_pv.c ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ ctxt 316 arch/x86/xen/smp_pv.c ctxt->user_regs.ds = __USER_DS; ctxt 317 arch/x86/xen/smp_pv.c ctxt->user_regs.es = __USER_DS; ctxt 318 arch/x86/xen/smp_pv.c ctxt->user_regs.ss = __KERNEL_DS; ctxt 319 arch/x86/xen/smp_pv.c ctxt->user_regs.cs = __KERNEL_CS; ctxt 320 arch/x86/xen/smp_pv.c ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle); ctxt 322 arch/x86/xen/smp_pv.c xen_copy_trap_info(ctxt->trap_ctxt); ctxt 324 arch/x86/xen/smp_pv.c ctxt->ldt_ents = 0; ctxt 332 arch/x86/xen/smp_pv.c ctxt->gdt_frames[0] = gdt_mfn; ctxt 333 arch/x86/xen/smp_pv.c ctxt->gdt_ents = GDT_ENTRIES; ctxt 340 arch/x86/xen/smp_pv.c ctxt->kernel_ss = __KERNEL_DS; ctxt 341 arch/x86/xen/smp_pv.c ctxt->kernel_sp = task_top_of_stack(idle); ctxt 344 arch/x86/xen/smp_pv.c ctxt->event_callback_cs = __KERNEL_CS; ctxt 345 arch/x86/xen/smp_pv.c ctxt->failsafe_callback_cs = __KERNEL_CS; ctxt 347 arch/x86/xen/smp_pv.c ctxt->gs_base_kernel = per_cpu_offset(cpu); ctxt 349 arch/x86/xen/smp_pv.c ctxt->event_callback_eip = ctxt 351 arch/x86/xen/smp_pv.c ctxt->failsafe_callback_eip = ctxt 355 arch/x86/xen/smp_pv.c ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir)); ctxt 356 arch/x86/xen/smp_pv.c if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt)) ctxt 359 arch/x86/xen/smp_pv.c kfree(ctxt); ctxt 912 drivers/infiniband/hw/hfi1/affinity.c if (rcd->ctxt == HFI1_CTRL_CTXT) ctxt 916 drivers/infiniband/hw/hfi1/affinity.c scnprintf(extra, 64, "ctxt %u", rcd->ctxt); ctxt 987 drivers/infiniband/hw/hfi1/affinity.c if (rcd->ctxt != HFI1_CTRL_CTXT) ctxt 236 drivers/infiniband/hw/hfi1/aspm.c rcd->ctxt < rcd->dd->first_dyn_alloc_ctxt; ctxt 5247 drivers/infiniband/hw/hfi1/chip.c u32 is = IS_RCVURGENT_START + rcd->ctxt; ctxt 8420 drivers/infiniband/hw/hfi1/chip.c tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); ctxt 11799 drivers/infiniband/hw/hfi1/chip.c write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT, ctxt 11809 drivers/infiniband/hw/hfi1/chip.c u32 ctxt = rcd->ctxt; ctxt 11820 drivers/infiniband/hw/hfi1/chip.c write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg); ctxt 11825 drivers/infiniband/hw/hfi1/chip.c write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); ctxt 11832 drivers/infiniband/hw/hfi1/chip.c head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) ctxt 11838 drivers/infiniband/hw/hfi1/chip.c tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); ctxt 11884 drivers/infiniband/hw/hfi1/chip.c u16 ctxt; ctxt 11889 drivers/infiniband/hw/hfi1/chip.c ctxt = rcd->ctxt; ctxt 11891 drivers/infiniband/hw/hfi1/chip.c hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op); ctxt 11893 drivers/infiniband/hw/hfi1/chip.c rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL); ctxt 11898 drivers/infiniband/hw/hfi1/chip.c write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR, ctxt 11901 drivers/infiniband/hw/hfi1/chip.c write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, ctxt 11929 drivers/infiniband/hw/hfi1/chip.c write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0); ctxt 11933 drivers/infiniband/hw/hfi1/chip.c write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0); ctxt 11942 drivers/infiniband/hw/hfi1/chip.c write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg); ctxt 11956 drivers/infiniband/hw/hfi1/chip.c write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg); ctxt 11957 drivers/infiniband/hw/hfi1/chip.c if (ctxt == HFI1_CTRL_CTXT) ctxt 11968 drivers/infiniband/hw/hfi1/chip.c write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, ctxt 11977 drivers/infiniband/hw/hfi1/chip.c set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, ctxt 11978 drivers/infiniband/hw/hfi1/chip.c IS_RCVAVAIL_START + rcd->ctxt, true); ctxt 11982 drivers/infiniband/hw/hfi1/chip.c set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, ctxt 11983 drivers/infiniband/hw/hfi1/chip.c IS_RCVAVAIL_START + rcd->ctxt, false); ctxt 12016 drivers/infiniband/hw/hfi1/chip.c set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, ctxt 12017 drivers/infiniband/hw/hfi1/chip.c IS_RCVURGENT_START + rcd->ctxt, true); ctxt 12019 drivers/infiniband/hw/hfi1/chip.c set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, ctxt 12020 drivers/infiniband/hw/hfi1/chip.c IS_RCVURGENT_START + rcd->ctxt, false); ctxt 12022 drivers/infiniband/hw/hfi1/chip.c hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl); ctxt 12023 drivers/infiniband/hw/hfi1/chip.c write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl); ctxt 12028 drivers/infiniband/hw/hfi1/chip.c reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); ctxt 12031 drivers/infiniband/hw/hfi1/chip.c ctxt, reg); ctxt 12032 drivers/infiniband/hw/hfi1/chip.c read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); ctxt 12033 drivers/infiniband/hw/hfi1/chip.c write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10); ctxt 12034 drivers/infiniband/hw/hfi1/chip.c write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00); ctxt 12035 drivers/infiniband/hw/hfi1/chip.c read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD); ctxt 12036 drivers/infiniband/hw/hfi1/chip.c reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS); ctxt 12038 drivers/infiniband/hw/hfi1/chip.c ctxt, reg, reg == 0 ? "not" : "still"); ctxt 12048 drivers/infiniband/hw/hfi1/chip.c write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT, ctxt 12054 drivers/infiniband/hw/hfi1/chip.c write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg); ctxt 12063 drivers/infiniband/hw/hfi1/chip.c write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, ctxt 14084 drivers/infiniband/hw/hfi1/chip.c u64 ctxt = first_ctxt; ctxt 14087 drivers/infiniband/hw/hfi1/chip.c reg |= ctxt << (8 * (i % 8)); ctxt 14088 drivers/infiniband/hw/hfi1/chip.c ctxt++; ctxt 14089 drivers/infiniband/hw/hfi1/chip.c if (ctxt > last_ctxt) ctxt 14090 drivers/infiniband/hw/hfi1/chip.c ctxt = first_ctxt; ctxt 14254 drivers/infiniband/hw/hfi1/chip.c unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m; ctxt 14271 drivers/infiniband/hw/hfi1/chip.c for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) { ctxt 14274 drivers/infiniband/hw/hfi1/chip.c for (qpn = 0, tctxt = ctxt; ctxt 14288 drivers/infiniband/hw/hfi1/chip.c if (tctxt == ctxt + krcvqs[i]) ctxt 14289 drivers/infiniband/hw/hfi1/chip.c tctxt = ctxt; ctxt 14291 drivers/infiniband/hw/hfi1/chip.c ctxt += krcvqs[i]; ctxt 14421 drivers/infiniband/hw/hfi1/chip.c reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8); ctxt 14618 drivers/infiniband/hw/hfi1/chip.c write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg); ctxt 14644 drivers/infiniband/hw/hfi1/chip.c write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0); ctxt 14670 drivers/infiniband/hw/hfi1/chip.c int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt) ctxt 14675 drivers/infiniband/hw/hfi1/chip.c if (!ctxt || !ctxt->sc) ctxt 14678 drivers/infiniband/hw/hfi1/chip.c hw_ctxt = ctxt->sc->hw_context; ctxt 623 drivers/infiniband/hw/hfi1/chip.h static inline u64 read_kctxt_csr(const struct hfi1_devdata *dd, int ctxt, ctxt 627 drivers/infiniband/hw/hfi1/chip.h return read_csr(dd, offset0 + (0x100 * ctxt)); ctxt 630 drivers/infiniband/hw/hfi1/chip.h static inline void write_kctxt_csr(struct hfi1_devdata *dd, int ctxt, ctxt 634 drivers/infiniband/hw/hfi1/chip.h write_csr(dd, offset0 + (0x100 * ctxt), value); ctxt 646 drivers/infiniband/hw/hfi1/chip.h int ctxt, ctxt 649 drivers/infiniband/hw/hfi1/chip.h return get_csr_addr(dd, offset0 + (0x100 * ctxt)); ctxt 658 drivers/infiniband/hw/hfi1/chip.h static inline u64 read_uctxt_csr(const struct hfi1_devdata *dd, int ctxt, ctxt 662 drivers/infiniband/hw/hfi1/chip.h return read_csr(dd, offset0 + (0x1000 * ctxt)); ctxt 665 drivers/infiniband/hw/hfi1/chip.h static inline void write_uctxt_csr(struct hfi1_devdata *dd, int ctxt, ctxt 669 drivers/infiniband/hw/hfi1/chip.h write_csr(dd, offset0 + (0x1000 * ctxt), value); ctxt 1432 drivers/infiniband/hw/hfi1/chip.h int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt); ctxt 1433 drivers/infiniband/hw/hfi1/chip.h int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt, ctxt 1435 drivers/infiniband/hw/hfi1/chip.h int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt); ctxt 556 drivers/infiniband/hw/hfi1/driver.c if (rcd->ctxt == HFI1_CTRL_CTXT) ctxt 581 drivers/infiniband/hw/hfi1/driver.c if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail)) ctxt 596 drivers/infiniband/hw/hfi1/driver.c (rcd->ctxt == HFI1_CTRL_CTXT)) { ctxt 894 drivers/infiniband/hw/hfi1/driver.c static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt) ctxt 904 drivers/infiniband/hw/hfi1/driver.c if (ctxt >= dd->first_dyn_alloc_ctxt) { ctxt 905 drivers/infiniband/hw/hfi1/driver.c rcd = hfi1_rcd_get_by_index_safe(dd, ctxt); ctxt 923 drivers/infiniband/hw/hfi1/driver.c static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt) ctxt 933 drivers/infiniband/hw/hfi1/driver.c if (ctxt >= dd->first_dyn_alloc_ctxt) { ctxt 934 drivers/infiniband/hw/hfi1/driver.c rcd = hfi1_rcd_get_by_index_safe(dd, ctxt); ctxt 1019 drivers/infiniband/hw/hfi1/driver.c needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1; ctxt 1043 drivers/infiniband/hw/hfi1/driver.c if (rcd->ctxt == HFI1_CTRL_CTXT) { ctxt 1087 drivers/infiniband/hw/hfi1/driver.c set_nodma_rtail(dd, rcd->ctxt); ctxt 1097 drivers/infiniband/hw/hfi1/driver.c if (rcd->ctxt == HFI1_CTRL_CTXT) { ctxt 1109 drivers/infiniband/hw/hfi1/driver.c set_dma_rtail(dd, rcd->ctxt); ctxt 1586 drivers/infiniband/hw/hfi1/driver.c rcd->ctxt, packet->rhf, ctxt 1752 drivers/infiniband/hw/hfi1/driver.c rcd->ctxt, rcd->rcvhdrq_cnt, rcd->rcvhdrqentsize, ctxt 1755 drivers/infiniband/hw/hfi1/driver.c read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) & ctxt 1757 drivers/infiniband/hw/hfi1/driver.c read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL)); ctxt 171 drivers/infiniband/hw/hfi1/file_ops.c #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \ ctxt 174 drivers/infiniband/hw/hfi1/file_ops.c HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \ ctxt 323 drivers/infiniband/hw/hfi1/file_ops.c trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); ctxt 362 drivers/infiniband/hw/hfi1/file_ops.c u16 ctxt; ctxt 370 drivers/infiniband/hw/hfi1/file_ops.c ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token); ctxt 373 drivers/infiniband/hw/hfi1/file_ops.c if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) { ctxt 476 drivers/infiniband/hw/hfi1/file_ops.c + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE); ctxt 566 drivers/infiniband/hw/hfi1/file_ops.c uctxt->ctxt, fd->subctxt, ctxt 575 drivers/infiniband/hw/hfi1/file_ops.c ctxt, subctxt, type, mapio, vmf, memaddr, memlen, ctxt 651 drivers/infiniband/hw/hfi1/file_ops.c hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt); ctxt 979 drivers/infiniband/hw/hfi1/file_ops.c uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num, ctxt 1158 drivers/infiniband/hw/hfi1/file_ops.c cinfo.ctxt = uctxt->ctxt; ctxt 1174 drivers/infiniband/hw/hfi1/file_ops.c trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, &cinfo); ctxt 1279 drivers/infiniband/hw/hfi1/file_ops.c binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt, ctxt 1281 drivers/infiniband/hw/hfi1/file_ops.c binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt, ctxt 1285 drivers/infiniband/hw/hfi1/file_ops.c uctxt->ctxt, ctxt 1288 drivers/infiniband/hw/hfi1/file_ops.c binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt, ctxt 1291 drivers/infiniband/hw/hfi1/file_ops.c binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt, ctxt 1294 drivers/infiniband/hw/hfi1/file_ops.c binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt, ctxt 1300 drivers/infiniband/hw/hfi1/file_ops.c binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt, ctxt 1304 drivers/infiniband/hw/hfi1/file_ops.c binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt, ctxt 1307 drivers/infiniband/hw/hfi1/file_ops.c binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt, ctxt 1311 drivers/infiniband/hw/hfi1/file_ops.c binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt, ctxt 1315 drivers/infiniband/hw/hfi1/file_ops.c uctxt->ctxt, ctxt 1318 drivers/infiniband/hw/hfi1/file_ops.c uctxt->ctxt, ctxt 1321 drivers/infiniband/hw/hfi1/file_ops.c uctxt->ctxt, ctxt 1499 drivers/infiniband/hw/hfi1/file_ops.c u16 ctxt; ctxt 1504 drivers/infiniband/hw/hfi1/file_ops.c for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts; ctxt 1505 drivers/infiniband/hw/hfi1/file_ops.c ctxt++) { ctxt 1506 drivers/infiniband/hw/hfi1/file_ops.c uctxt = hfi1_rcd_get_by_index(dd, ctxt); ctxt 297 drivers/infiniband/hw/hfi1/hfi.h u8 ctxt; ctxt 1053 drivers/infiniband/hw/hfi1/hfi.h struct hfi1_ctxtdata *ctxt[HFI1_NUM_VNIC_CTXT]; ctxt 1469 drivers/infiniband/hw/hfi1/hfi.h return (uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) * ctxt 1494 drivers/infiniband/hw/hfi1/hfi.h u16 ctxt); ctxt 1495 drivers/infiniband/hw/hfi1/hfi.h struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt); ctxt 155 drivers/infiniband/hw/hfi1/init.c if (rcd->ctxt == HFI1_CTRL_CTXT) ctxt 219 drivers/infiniband/hw/hfi1/init.c rcd->dd->rcd[rcd->ctxt] = NULL; ctxt 269 drivers/infiniband/hw/hfi1/init.c u16 ctxt; ctxt 272 drivers/infiniband/hw/hfi1/init.c for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) ctxt 273 drivers/infiniband/hw/hfi1/init.c if (!dd->rcd[ctxt]) ctxt 276 drivers/infiniband/hw/hfi1/init.c if (ctxt < dd->num_rcv_contexts) { ctxt 277 drivers/infiniband/hw/hfi1/init.c rcd->ctxt = ctxt; ctxt 278 drivers/infiniband/hw/hfi1/init.c dd->rcd[ctxt] = rcd; ctxt 283 drivers/infiniband/hw/hfi1/init.c if (ctxt >= dd->num_rcv_contexts) ctxt 286 drivers/infiniband/hw/hfi1/init.c *index = ctxt; ctxt 304 drivers/infiniband/hw/hfi1/init.c u16 ctxt) ctxt 306 drivers/infiniband/hw/hfi1/init.c if (ctxt < dd->num_rcv_contexts) ctxt 307 drivers/infiniband/hw/hfi1/init.c return hfi1_rcd_get_by_index(dd, ctxt); ctxt 324 drivers/infiniband/hw/hfi1/init.c struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt) ctxt 330 drivers/infiniband/hw/hfi1/init.c if (dd->rcd[ctxt]) { ctxt 331 drivers/infiniband/hw/hfi1/init.c rcd = dd->rcd[ctxt]; ctxt 359 drivers/infiniband/hw/hfi1/init.c u16 ctxt; ctxt 362 drivers/infiniband/hw/hfi1/init.c ret = allocate_rcd_index(dd, rcd, &ctxt); ctxt 382 drivers/infiniband/hw/hfi1/init.c hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt); ctxt 391 drivers/infiniband/hw/hfi1/init.c if (ctxt < dd->first_dyn_alloc_ctxt) { ctxt 392 drivers/infiniband/hw/hfi1/init.c if (ctxt < kctxt_ngroups) { ctxt 393 drivers/infiniband/hw/hfi1/init.c base = ctxt * (dd->rcv_entries.ngroups + 1); ctxt 397 drivers/infiniband/hw/hfi1/init.c (ctxt * dd->rcv_entries.ngroups); ctxt 400 drivers/infiniband/hw/hfi1/init.c u16 ct = ctxt - dd->first_dyn_alloc_ctxt; ctxt 436 drivers/infiniband/hw/hfi1/init.c rcd->ctxt); ctxt 441 drivers/infiniband/hw/hfi1/init.c rcd->ctxt, rcd->egrbufs.count); ctxt 473 drivers/infiniband/hw/hfi1/init.c rcd->ctxt, rcd->egrbufs.size); ctxt 478 drivers/infiniband/hw/hfi1/init.c if (ctxt < dd->first_dyn_alloc_ctxt) { ctxt 1533 drivers/infiniband/hw/hfi1/init.c int ctxt; ctxt 1570 drivers/infiniband/hw/hfi1/init.c for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) { ctxt 1571 drivers/infiniband/hw/hfi1/init.c struct hfi1_ctxtdata *rcd = dd->rcd[ctxt]; ctxt 1584 drivers/infiniband/hw/hfi1/init.c for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++) ctxt 1585 drivers/infiniband/hw/hfi1/init.c sc_free(dd->send_contexts[ctxt].sc); ctxt 1852 drivers/infiniband/hw/hfi1/init.c if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic) ctxt 1863 drivers/infiniband/hw/hfi1/init.c amt, rcd->ctxt); ctxt 1886 drivers/infiniband/hw/hfi1/init.c write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg); ctxt 1890 drivers/infiniband/hw/hfi1/init.c write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg); ctxt 1893 drivers/infiniband/hw/hfi1/init.c write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg); ctxt 1899 drivers/infiniband/hw/hfi1/init.c write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR, ctxt 1907 drivers/infiniband/hw/hfi1/init.c rcd->ctxt); ctxt 1995 drivers/infiniband/hw/hfi1/init.c rcd->ctxt); ctxt 2043 drivers/infiniband/hw/hfi1/init.c rcd->ctxt, rcd->egrbufs.alloced, ctxt 2066 drivers/infiniband/hw/hfi1/init.c rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count, ctxt 2072 drivers/infiniband/hw/hfi1/init.c rcd->ctxt, rcd->egrbufs.rcvtid_size); ctxt 208 drivers/infiniband/hw/hfi1/msix.c receive_context_thread, rcd->ctxt, IRQ_RCVCTXT); ctxt 216 drivers/infiniband/hw/hfi1/msix.c rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64; ctxt 217 drivers/infiniband/hw/hfi1/msix.c rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64); ctxt 219 drivers/infiniband/hw/hfi1/msix.c remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr); ctxt 356 drivers/infiniband/hw/hfi1/msix.c struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i]; ctxt 2019 drivers/infiniband/hw/hfi1/pio.c u32 ctxt; ctxt 2064 drivers/infiniband/hw/hfi1/pio.c ctxt = dd->vld[15].sc->hw_context; ctxt 2066 drivers/infiniband/hw/hfi1/pio.c write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); ctxt 2069 drivers/infiniband/hw/hfi1/pio.c dd->vld[15].sc->sw_index, ctxt); ctxt 2073 drivers/infiniband/hw/hfi1/pio.c ctxt = dd->vld[i].sc->hw_context; ctxt 2075 drivers/infiniband/hw/hfi1/pio.c write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); ctxt 2079 drivers/infiniband/hw/hfi1/pio.c ctxt = dd->kernel_send_context[i + 1]->hw_context; ctxt 2081 drivers/infiniband/hw/hfi1/pio.c write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); ctxt 127 drivers/infiniband/hw/hfi1/tid_rdma.c static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx); ctxt 197 drivers/infiniband/hw/hfi1/tid_rdma.c p->qp = (kdeth_qp << 16) | priv->rcd->ctxt; ctxt 328 drivers/infiniband/hw/hfi1/tid_rdma.c unsigned int ctxt; ctxt 331 drivers/infiniband/hw/hfi1/tid_rdma.c ctxt = 0; ctxt 333 drivers/infiniband/hw/hfi1/tid_rdma.c ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift); ctxt 334 drivers/infiniband/hw/hfi1/tid_rdma.c return dd->rcd[ctxt]; ctxt 745 drivers/infiniband/hw/hfi1/tid_rdma.c write_uctxt_csr(rcd->dd, rcd->ctxt, ctxt 1754 drivers/infiniband/hw/hfi1/tid_rdma.c qpriv->rcd->ctxt); ctxt 2791 drivers/infiniband/hw/hfi1/tid_rdma.c last_psn = read_r_next_psn(dd, rcd->ctxt, ctxt 2952 drivers/infiniband/hw/hfi1/tid_rdma.c read_r_next_psn(dd, rcd->ctxt, ctxt 3915 drivers/infiniband/hw/hfi1/tid_rdma.c qpriv->rcd->ctxt); ctxt 4449 drivers/infiniband/hw/hfi1/tid_rdma.c qpriv->rcd->ctxt); ctxt 5473 drivers/infiniband/hw/hfi1/tid_rdma.c static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx) ctxt 5481 drivers/infiniband/hw/hfi1/tid_rdma.c reg = read_uctxt_csr(dd, ctxt, RCV_TID_FLOW_TABLE + (8 * fidx)); ctxt 5514 drivers/infiniband/hw/hfi1/tid_rdma.c read_r_next_psn(dd, rcd->ctxt, flow->idx); ctxt 66 drivers/infiniband/hw/hfi1/trace_ctxts.h __field(unsigned int, ctxt) ctxt 78 drivers/infiniband/hw/hfi1/trace_ctxts.h __entry->ctxt = uctxt->ctxt; ctxt 91 drivers/infiniband/hw/hfi1/trace_ctxts.h __entry->ctxt, ctxt 107 drivers/infiniband/hw/hfi1/trace_ctxts.h TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt, ctxt 110 drivers/infiniband/hw/hfi1/trace_ctxts.h TP_ARGS(dd, ctxt, subctxt, cinfo), ctxt 112 drivers/infiniband/hw/hfi1/trace_ctxts.h __field(unsigned int, ctxt) ctxt 121 drivers/infiniband/hw/hfi1/trace_ctxts.h __entry->ctxt = ctxt; ctxt 131 drivers/infiniband/hw/hfi1/trace_ctxts.h __entry->ctxt, ctxt 116 drivers/infiniband/hw/hfi1/trace_misc.h __field(u32, ctxt) ctxt 124 drivers/infiniband/hw/hfi1/trace_misc.h __entry->ctxt = packet->rcd->ctxt; ctxt 133 drivers/infiniband/hw/hfi1/trace_misc.h __entry->ctxt, ctxt 70 drivers/infiniband/hw/hfi1/trace_rx.h __field(u32, ctxt) ctxt 79 drivers/infiniband/hw/hfi1/trace_rx.h __entry->ctxt = packet->rcd->ctxt; ctxt 89 drivers/infiniband/hw/hfi1/trace_rx.h __entry->ctxt, ctxt 103 drivers/infiniband/hw/hfi1/trace_rx.h __field(u32, ctxt) ctxt 108 drivers/infiniband/hw/hfi1/trace_rx.h __entry->ctxt = rcd->ctxt; ctxt 125 drivers/infiniband/hw/hfi1/trace_rx.h __entry->ctxt, ctxt 132 drivers/infiniband/hw/hfi1/trace_rx.h TP_PROTO(unsigned int ctxt, u16 subctxt, const char *type, ctxt 134 drivers/infiniband/hw/hfi1/trace_rx.h TP_ARGS(ctxt, subctxt, type, start, end), ctxt 136 drivers/infiniband/hw/hfi1/trace_rx.h __field(unsigned int, ctxt) ctxt 143 drivers/infiniband/hw/hfi1/trace_rx.h __entry->ctxt = ctxt; ctxt 150 drivers/infiniband/hw/hfi1/trace_rx.h __entry->ctxt, ctxt 88 drivers/infiniband/hw/hfi1/trace_tid.h TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, ctxt 90 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma), ctxt 92 drivers/infiniband/hw/hfi1/trace_tid.h __field(unsigned int, ctxt) ctxt 101 drivers/infiniband/hw/hfi1/trace_tid.h __entry->ctxt = ctxt; ctxt 110 drivers/infiniband/hw/hfi1/trace_tid.h __entry->ctxt, ctxt 122 drivers/infiniband/hw/hfi1/trace_tid.h TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, ctxt 124 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma) ctxt 129 drivers/infiniband/hw/hfi1/trace_tid.h TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, ctxt 131 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma) ctxt 164 drivers/infiniband/hw/hfi1/trace_tid.h TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr, ctxt 166 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(ctxt, subctxt, va, rarr, npages, dma), ctxt 168 drivers/infiniband/hw/hfi1/trace_tid.h __field(unsigned int, ctxt) ctxt 176 drivers/infiniband/hw/hfi1/trace_tid.h __entry->ctxt = ctxt; ctxt 184 drivers/infiniband/hw/hfi1/trace_tid.h __entry->ctxt, ctxt 210 drivers/infiniband/hw/hfi1/trace_tx.h TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt), ctxt 211 drivers/infiniband/hw/hfi1/trace_tx.h TP_ARGS(dd, ctxt, subctxt), ctxt 213 drivers/infiniband/hw/hfi1/trace_tx.h __field(u16, ctxt) ctxt 217 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt = ctxt; ctxt 222 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt, ctxt 228 drivers/infiniband/hw/hfi1/trace_tx.h TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, ctxt 230 drivers/infiniband/hw/hfi1/trace_tx.h TP_ARGS(dd, ctxt, subctxt, comp_idx), ctxt 232 drivers/infiniband/hw/hfi1/trace_tx.h __field(u16, ctxt) ctxt 237 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt = ctxt; ctxt 243 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt, ctxt 251 drivers/infiniband/hw/hfi1/trace_tx.h TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx, ctxt 253 drivers/infiniband/hw/hfi1/trace_tx.h TP_ARGS(dd, ctxt, subctxt, comp_idx, value), ctxt 255 drivers/infiniband/hw/hfi1/trace_tx.h __field(u16, ctxt) ctxt 261 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt = ctxt; ctxt 268 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt, ctxt 276 drivers/infiniband/hw/hfi1/trace_tx.h TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, ctxt 278 drivers/infiniband/hw/hfi1/trace_tx.h TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset)); ctxt 281 drivers/infiniband/hw/hfi1/trace_tx.h TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, ctxt 283 drivers/infiniband/hw/hfi1/trace_tx.h TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len)); ctxt 286 drivers/infiniband/hw/hfi1/trace_tx.h TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, ctxt 288 drivers/infiniband/hw/hfi1/trace_tx.h TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len)); ctxt 291 drivers/infiniband/hw/hfi1/trace_tx.h TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, ctxt 293 drivers/infiniband/hw/hfi1/trace_tx.h TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift), ctxt 295 drivers/infiniband/hw/hfi1/trace_tx.h __field(u16, ctxt) ctxt 303 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt = ctxt; ctxt 312 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt, ctxt 322 drivers/infiniband/hw/hfi1/trace_tx.h TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, ctxt 324 drivers/infiniband/hw/hfi1/trace_tx.h TP_ARGS(dd, ctxt, subctxt, dim), ctxt 326 drivers/infiniband/hw/hfi1/trace_tx.h __field(u16, ctxt) ctxt 331 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt = ctxt; ctxt 337 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt, ctxt 506 drivers/infiniband/hw/hfi1/trace_tx.h TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req, ctxt 508 drivers/infiniband/hw/hfi1/trace_tx.h TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval), ctxt 511 drivers/infiniband/hw/hfi1/trace_tx.h __field(u16, ctxt) ctxt 539 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt = ctxt; ctxt 562 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt, ctxt 588 drivers/infiniband/hw/hfi1/trace_tx.h TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i), ctxt 589 drivers/infiniband/hw/hfi1/trace_tx.h TP_ARGS(dd, ctxt, subctxt, i), ctxt 592 drivers/infiniband/hw/hfi1/trace_tx.h __field(u16, ctxt) ctxt 602 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt = ctxt; ctxt 612 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt, ctxt 631 drivers/infiniband/hw/hfi1/trace_tx.h TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx, ctxt 633 drivers/infiniband/hw/hfi1/trace_tx.h TP_ARGS(dd, ctxt, subctxt, idx, state, code), ctxt 636 drivers/infiniband/hw/hfi1/trace_tx.h __field(u16, ctxt) ctxt 644 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt = ctxt; ctxt 651 drivers/infiniband/hw/hfi1/trace_tx.h __get_str(dev), __entry->ctxt, __entry->subctxt, ctxt 660 drivers/infiniband/hw/hfi1/trace_tx.h TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req, ctxt 662 drivers/infiniband/hw/hfi1/trace_tx.h TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval), ctxt 665 drivers/infiniband/hw/hfi1/trace_tx.h __field(u16, ctxt) ctxt 676 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt = ctxt; ctxt 687 drivers/infiniband/hw/hfi1/trace_tx.h __entry->ctxt, ctxt 643 drivers/infiniband/hw/hfi1/ud.c struct send_context *ctxt = qp_to_send_context(qp, sc5); ctxt 684 drivers/infiniband/hw/hfi1/ud.c if (ctxt) { ctxt 685 drivers/infiniband/hw/hfi1/ud.c pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL); ctxt 705 drivers/infiniband/hw/hfi1/ud.c struct send_context *ctxt = qp_to_send_context(qp, sc5); ctxt 739 drivers/infiniband/hw/hfi1/ud.c if (ctxt) { ctxt 740 drivers/infiniband/hw/hfi1/ud.c pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL); ctxt 803 drivers/infiniband/hw/hfi1/user_exp_rcv.c trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages, ctxt 819 drivers/infiniband/hw/hfi1/user_exp_rcv.c tididx, uctxt->ctxt); ctxt 848 drivers/infiniband/hw/hfi1/user_exp_rcv.c trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry, ctxt 919 drivers/infiniband/hw/hfi1/user_exp_rcv.c trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->mmu.addr, ctxt 185 drivers/infiniband/hw/hfi1/user_sdma.c pq->ctxt = uctxt->ctxt; ctxt 209 drivers/infiniband/hw/hfi1/user_sdma.c snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt, ctxt 218 drivers/infiniband/hw/hfi1/user_sdma.c uctxt->ctxt); ctxt 281 drivers/infiniband/hw/hfi1/user_sdma.c trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt); ctxt 366 drivers/infiniband/hw/hfi1/user_sdma.c dd->unit, uctxt->ctxt, fd->subctxt, ctxt 373 drivers/infiniband/hw/hfi1/user_sdma.c dd->unit, uctxt->ctxt, fd->subctxt, ret); ctxt 377 drivers/infiniband/hw/hfi1/user_sdma.c trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt, ctxt 382 drivers/infiniband/hw/hfi1/user_sdma.c dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); ctxt 393 drivers/infiniband/hw/hfi1/user_sdma.c dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx, ctxt 401 drivers/infiniband/hw/hfi1/user_sdma.c dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx); ctxt 408 drivers/infiniband/hw/hfi1/user_sdma.c dd->unit, uctxt->ctxt, fd->subctxt, ctxt 415 drivers/infiniband/hw/hfi1/user_sdma.c trace_hfi1_sdma_user_process_request(dd, uctxt->ctxt, fd->subctxt, ctxt 517 drivers/infiniband/hw/hfi1/user_sdma.c trace_hfi1_sdma_user_initial_tidoffset(dd, uctxt->ctxt, fd->subctxt, ctxt 535 drivers/infiniband/hw/hfi1/user_sdma.c trace_hfi1_sdma_user_data_length(dd, uctxt->ctxt, fd->subctxt, ctxt 578 drivers/infiniband/hw/hfi1/user_sdma.c selector += uctxt->ctxt + fd->subctxt; ctxt 680 drivers/infiniband/hw/hfi1/user_sdma.c req->pq->ctxt, ctxt 1256 drivers/infiniband/hw/hfi1/user_sdma.c pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx, ctxt 1265 drivers/infiniband/hw/hfi1/user_sdma.c trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, ctxt 1375 drivers/infiniband/hw/hfi1/user_sdma.c trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt, ctxt 1482 drivers/infiniband/hw/hfi1/user_sdma.c trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt, ctxt 117 drivers/infiniband/hw/hfi1/user_sdma.h (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \ ctxt 121 drivers/infiniband/hw/hfi1/user_sdma.h u16 ctxt; ctxt 126 drivers/infiniband/hw/hfi1/vnic_main.c dd_dev_dbg(dd, "created vnic context %d\n", uctxt->ctxt); ctxt 135 drivers/infiniband/hw/hfi1/vnic_main.c dd_dev_dbg(dd, "closing vnic context %d\n", uctxt->ctxt); ctxt 558 drivers/infiniband/hw/hfi1/vnic_main.c l4_type, vesw_id, packet->rcd->ctxt); ctxt 697 drivers/infiniband/hw/hfi1/vnic_main.c rc = hfi1_vnic_allot_ctxt(dd, &dd->vnic.ctxt[i]); ctxt 700 drivers/infiniband/hw/hfi1/vnic_main.c hfi1_rcd_get(dd->vnic.ctxt[i]); ctxt 701 drivers/infiniband/hw/hfi1/vnic_main.c dd->vnic.ctxt[i]->vnic_q_idx = i; ctxt 711 drivers/infiniband/hw/hfi1/vnic_main.c deallocate_vnic_ctxt(dd, dd->vnic.ctxt[i]); ctxt 712 drivers/infiniband/hw/hfi1/vnic_main.c hfi1_rcd_put(dd->vnic.ctxt[i]); ctxt 713 drivers/infiniband/hw/hfi1/vnic_main.c dd->vnic.ctxt[i] = NULL; ctxt 741 drivers/infiniband/hw/hfi1/vnic_main.c deallocate_vnic_ctxt(dd, dd->vnic.ctxt[i]); ctxt 742 drivers/infiniband/hw/hfi1/vnic_main.c hfi1_rcd_put(dd->vnic.ctxt[i]); ctxt 743 drivers/infiniband/hw/hfi1/vnic_main.c dd->vnic.ctxt[i] = NULL; ctxt 152 drivers/infiniband/hw/qib/qib.h unsigned ctxt; ctxt 805 drivers/infiniband/hw/qib/qib.h int ctxt); ctxt 423 drivers/infiniband/hw/qib/qib_common.h __u16 ctxt; /* ctxt on unit assigned to caller */ ctxt 292 drivers/infiniband/hw/qib/qib_driver.c u32 ctxt, u32 eflags, u32 l, u32 etail, ctxt 508 drivers/infiniband/hw/qib/qib_driver.c crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l, ctxt 193 drivers/infiniband/hw/qib/qib_file_ops.c kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt; ctxt 242 drivers/infiniband/hw/qib/qib_file_ops.c kinfo->spi_ctxt = rcd->ctxt; ctxt 308 drivers/infiniband/hw/qib/qib_file_ops.c ctxttid = rcd->ctxt * dd->rcvtidcnt; ctxt 503 drivers/infiniband/hw/qib/qib_file_ops.c ctxttid = rcd->ctxt * dd->rcvtidcnt; ctxt 675 drivers/infiniband/hw/qib/qib_file_ops.c dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt); ctxt 745 drivers/infiniband/hw/qib/qib_file_ops.c what, rcd->ctxt, pfn, len, ret); ctxt 1022 drivers/infiniband/hw/qib/qib_file_ops.c ureg = dd->uregbase + dd->ureg_align * rcd->ctxt; ctxt 1115 drivers/infiniband/hw/qib/qib_file_ops.c dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt); ctxt 1284 drivers/infiniband/hw/qib/qib_file_ops.c static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, ctxt 1300 drivers/infiniband/hw/qib/qib_file_ops.c rcd = qib_create_ctxtdata(ppd, ctxt, numa_id); ctxt 1323 drivers/infiniband/hw/qib/qib_file_ops.c init_waitqueue_head(&dd->rcd[ctxt]->wait); ctxt 1335 drivers/infiniband/hw/qib/qib_file_ops.c dd->rcd[ctxt] = NULL; ctxt 1358 drivers/infiniband/hw/qib/qib_file_ops.c int ret, ctxt; ctxt 1367 drivers/infiniband/hw/qib/qib_file_ops.c for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt]; ctxt 1368 drivers/infiniband/hw/qib/qib_file_ops.c ctxt++) ctxt 1370 drivers/infiniband/hw/qib/qib_file_ops.c if (ctxt == dd->cfgctxts) { ctxt 1375 drivers/infiniband/hw/qib/qib_file_ops.c u32 pidx = ctxt % dd->num_pports; ctxt 1386 drivers/infiniband/hw/qib/qib_file_ops.c ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN; ctxt 1410 drivers/infiniband/hw/qib/qib_file_ops.c u32 port = uinfo->spu_port, ctxt; ctxt 1441 drivers/infiniband/hw/qib/qib_file_ops.c for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt 1442 drivers/infiniband/hw/qib/qib_file_ops.c ctxt++) ctxt 1443 drivers/infiniband/hw/qib/qib_file_ops.c if (dd->rcd[ctxt]) ctxt 1572 drivers/infiniband/hw/qib/qib_file_ops.c rcd->ctxt, ctxt 1667 drivers/infiniband/hw/qib/qib_file_ops.c uctxt = rcd->ctxt - dd->first_user_ctxt; ctxt 1687 drivers/infiniband/hw/qib/qib_file_ops.c dd->unit, rcd->ctxt); ctxt 1693 drivers/infiniband/hw/qib/qib_file_ops.c rcd->ctxt, rcd->piocnt); ctxt 1744 drivers/infiniband/hw/qib/qib_file_ops.c rcd->ctxt); ctxt 1770 drivers/infiniband/hw/qib/qib_file_ops.c int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt; ctxt 1796 drivers/infiniband/hw/qib/qib_file_ops.c unsigned ctxt; ctxt 1836 drivers/infiniband/hw/qib/qib_file_ops.c ctxt = rcd->ctxt; ctxt 1837 drivers/infiniband/hw/qib/qib_file_ops.c dd->rcd[ctxt] = NULL; ctxt 1854 drivers/infiniband/hw/qib/qib_file_ops.c QIB_RCVCTRL_INTRAVAIL_DIS, ctxt); ctxt 1891 drivers/infiniband/hw/qib/qib_file_ops.c info.ctxt = rcd->ctxt; ctxt 1979 drivers/infiniband/hw/qib/qib_file_ops.c unsigned ctxt; ctxt 1984 drivers/infiniband/hw/qib/qib_file_ops.c for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts; ctxt 1985 drivers/infiniband/hw/qib/qib_file_ops.c ctxt++) { ctxt 1986 drivers/infiniband/hw/qib/qib_file_ops.c rcd = ppd->dd->rcd[ctxt]; ctxt 307 drivers/infiniband/hw/qib/qib_iba6120.c enum qib_ureg regno, int ctxt) ctxt 315 drivers/infiniband/hw/qib/qib_iba6120.c dd->ureg_align * ctxt)); ctxt 320 drivers/infiniband/hw/qib/qib_iba6120.c dd->ureg_align * ctxt)); ctxt 333 drivers/infiniband/hw/qib/qib_iba6120.c enum qib_ureg regno, u64 value, int ctxt) ctxt 340 drivers/infiniband/hw/qib/qib_iba6120.c dd->ureg_align * ctxt); ctxt 345 drivers/infiniband/hw/qib/qib_iba6120.c dd->ureg_align * ctxt); ctxt 383 drivers/infiniband/hw/qib/qib_iba6120.c const u16 regno, unsigned ctxt, ctxt 386 drivers/infiniband/hw/qib/qib_iba6120.c qib_write_kreg(dd, regno + ctxt, value); ctxt 1948 drivers/infiniband/hw/qib/qib_iba6120.c u32 ctxt; ctxt 1954 drivers/infiniband/hw/qib/qib_iba6120.c ctxt = rcd->ctxt; ctxt 1960 drivers/infiniband/hw/qib/qib_iba6120.c ctxt * dd->rcvtidcnt * sizeof(*tidbase)); ctxt 2053 drivers/infiniband/hw/qib/qib_iba6120.c qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); ctxt 2054 drivers/infiniband/hw/qib/qib_iba6120.c qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); ctxt 2061 drivers/infiniband/hw/qib/qib_iba6120.c head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); ctxt 2065 drivers/infiniband/hw/qib/qib_iba6120.c tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); ctxt 2095 drivers/infiniband/hw/qib/qib_iba6120.c int ctxt) ctxt 2111 drivers/infiniband/hw/qib/qib_iba6120.c if (ctxt < 0) ctxt 2114 drivers/infiniband/hw/qib/qib_iba6120.c mask = (1ULL << ctxt); ctxt 2121 drivers/infiniband/hw/qib/qib_iba6120.c qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, ctxt 2122 drivers/infiniband/hw/qib/qib_iba6120.c dd->rcd[ctxt]->rcvhdrqtailaddr_phys); ctxt 2123 drivers/infiniband/hw/qib/qib_iba6120.c qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, ctxt 2124 drivers/infiniband/hw/qib/qib_iba6120.c dd->rcd[ctxt]->rcvhdrq_phys); ctxt 2126 drivers/infiniband/hw/qib/qib_iba6120.c if (ctxt == 0 && !dd->cspec->dummy_hdrq) ctxt 2138 drivers/infiniband/hw/qib/qib_iba6120.c val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) | ctxt 2140 drivers/infiniband/hw/qib/qib_iba6120.c qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); ctxt 2149 drivers/infiniband/hw/qib/qib_iba6120.c val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); ctxt 2150 drivers/infiniband/hw/qib/qib_iba6120.c qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); ctxt 2152 drivers/infiniband/hw/qib/qib_iba6120.c val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); ctxt 2153 drivers/infiniband/hw/qib/qib_iba6120.c dd->rcd[ctxt]->head = val; ctxt 2155 drivers/infiniband/hw/qib/qib_iba6120.c if (ctxt < dd->first_user_ctxt) ctxt 2157 drivers/infiniband/hw/qib/qib_iba6120.c qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); ctxt 2169 drivers/infiniband/hw/qib/qib_iba6120.c if (ctxt >= 0) { ctxt 2170 drivers/infiniband/hw/qib/qib_iba6120.c qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, ctxt 2172 drivers/infiniband/hw/qib/qib_iba6120.c qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, ctxt 3390 drivers/infiniband/hw/qib/qib_iba6120.c rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt; ctxt 231 drivers/infiniband/hw/qib/qib_iba7220.c enum qib_ureg regno, int ctxt) ctxt 239 drivers/infiniband/hw/qib/qib_iba7220.c dd->ureg_align * ctxt)); ctxt 244 drivers/infiniband/hw/qib/qib_iba7220.c dd->ureg_align * ctxt)); ctxt 257 drivers/infiniband/hw/qib/qib_iba7220.c enum qib_ureg regno, u64 value, int ctxt) ctxt 264 drivers/infiniband/hw/qib/qib_iba7220.c dd->ureg_align * ctxt); ctxt 269 drivers/infiniband/hw/qib/qib_iba7220.c dd->ureg_align * ctxt); ctxt 283 drivers/infiniband/hw/qib/qib_iba7220.c const u16 regno, unsigned ctxt, ctxt 286 drivers/infiniband/hw/qib/qib_iba7220.c qib_write_kreg(dd, regno + ctxt, value); ctxt 2195 drivers/infiniband/hw/qib/qib_iba7220.c u32 ctxt; ctxt 2201 drivers/infiniband/hw/qib/qib_iba7220.c ctxt = rcd->ctxt; ctxt 2207 drivers/infiniband/hw/qib/qib_iba7220.c ctxt * dd->rcvtidcnt * sizeof(*tidbase)); ctxt 2705 drivers/infiniband/hw/qib/qib_iba7220.c qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); ctxt 2706 drivers/infiniband/hw/qib/qib_iba7220.c qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); ctxt 2713 drivers/infiniband/hw/qib/qib_iba7220.c head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); ctxt 2717 drivers/infiniband/hw/qib/qib_iba7220.c tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); ctxt 2729 drivers/infiniband/hw/qib/qib_iba7220.c int ctxt) ctxt 2744 drivers/infiniband/hw/qib/qib_iba7220.c if (ctxt < 0) ctxt 2747 drivers/infiniband/hw/qib/qib_iba7220.c mask = (1ULL << ctxt); ctxt 2754 drivers/infiniband/hw/qib/qib_iba7220.c qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, ctxt 2755 drivers/infiniband/hw/qib/qib_iba7220.c dd->rcd[ctxt]->rcvhdrqtailaddr_phys); ctxt 2756 drivers/infiniband/hw/qib/qib_iba7220.c qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, ctxt 2757 drivers/infiniband/hw/qib/qib_iba7220.c dd->rcd[ctxt]->rcvhdrq_phys); ctxt 2758 drivers/infiniband/hw/qib/qib_iba7220.c dd->rcd[ctxt]->seq_cnt = 1; ctxt 2769 drivers/infiniband/hw/qib/qib_iba7220.c val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) | ctxt 2771 drivers/infiniband/hw/qib/qib_iba7220.c qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); ctxt 2780 drivers/infiniband/hw/qib/qib_iba7220.c val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); ctxt 2781 drivers/infiniband/hw/qib/qib_iba7220.c qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); ctxt 2783 drivers/infiniband/hw/qib/qib_iba7220.c val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); ctxt 2784 drivers/infiniband/hw/qib/qib_iba7220.c dd->rcd[ctxt]->head = val; ctxt 2786 drivers/infiniband/hw/qib/qib_iba7220.c if (ctxt < dd->first_user_ctxt) ctxt 2788 drivers/infiniband/hw/qib/qib_iba7220.c qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); ctxt 2791 drivers/infiniband/hw/qib/qib_iba7220.c if (ctxt >= 0) { ctxt 2792 drivers/infiniband/hw/qib/qib_iba7220.c qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0); ctxt 2793 drivers/infiniband/hw/qib/qib_iba7220.c qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0); ctxt 4350 drivers/infiniband/hw/qib/qib_iba7220.c if (!rcd->ctxt) { ctxt 4356 drivers/infiniband/hw/qib/qib_iba7220.c (rcd->ctxt - 1) * rcd->rcvegrcnt; ctxt 783 drivers/infiniband/hw/qib/qib_iba7322.c enum qib_ureg regno, int ctxt) ctxt 788 drivers/infiniband/hw/qib/qib_iba7322.c (dd->ureg_align * ctxt) + (dd->userbase ? ctxt 804 drivers/infiniband/hw/qib/qib_iba7322.c enum qib_ureg regno, int ctxt) ctxt 810 drivers/infiniband/hw/qib/qib_iba7322.c (dd->ureg_align * ctxt) + (dd->userbase ? ctxt 825 drivers/infiniband/hw/qib/qib_iba7322.c enum qib_ureg regno, u64 value, int ctxt) ctxt 832 drivers/infiniband/hw/qib/qib_iba7322.c dd->ureg_align * ctxt); ctxt 837 drivers/infiniband/hw/qib/qib_iba7322.c dd->ureg_align * ctxt); ctxt 894 drivers/infiniband/hw/qib/qib_iba7322.c const u16 regno, unsigned ctxt, ctxt 897 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_kreg(dd, regno + ctxt, value); ctxt 2700 drivers/infiniband/hw/qib/qib_iba7322.c if (cspec->rhdr_cpu[rcd->ctxt] != cpu) { ctxt 2703 drivers/infiniband/hw/qib/qib_iba7322.c cspec->rhdr_cpu[rcd->ctxt] = cpu; ctxt 2704 drivers/infiniband/hw/qib/qib_iba7322.c rmp = &dca_rcvhdr_reg_map[rcd->ctxt]; ctxt 2709 drivers/infiniband/hw/qib/qib_iba7322.c "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu, ctxt 3042 drivers/infiniband/hw/qib/qib_iba7322.c u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt]; ctxt 3055 drivers/infiniband/hw/qib/qib_iba7322.c dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout; ctxt 3056 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout); ctxt 3178 drivers/infiniband/hw/qib/qib_iba7322.c (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); ctxt 3466 drivers/infiniband/hw/qib/qib_iba7322.c unsigned ctxt; ctxt 3468 drivers/infiniband/hw/qib/qib_iba7322.c ctxt = i - ARRAY_SIZE(irq_table); ctxt 3470 drivers/infiniband/hw/qib/qib_iba7322.c arg = dd->rcd[ctxt]; ctxt 3473 drivers/infiniband/hw/qib/qib_iba7322.c if (qib_krcvq01_no_msi && ctxt < 2) ctxt 3478 drivers/infiniband/hw/qib/qib_iba7322.c lsb = QIB_I_RCVAVAIL_LSB + ctxt; ctxt 3811 drivers/infiniband/hw/qib/qib_iba7322.c u32 ctxt; ctxt 3817 drivers/infiniband/hw/qib/qib_iba7322.c ctxt = rcd->ctxt; ctxt 3823 drivers/infiniband/hw/qib/qib_iba7322.c ctxt * dd->rcvtidcnt * sizeof(*tidbase)); ctxt 4441 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); ctxt 4442 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); ctxt 4443 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); ctxt 4450 drivers/infiniband/hw/qib/qib_iba7322.c head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); ctxt 4454 drivers/infiniband/hw/qib/qib_iba7322.c tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); ctxt 4482 drivers/infiniband/hw/qib/qib_iba7322.c int ctxt) ctxt 4503 drivers/infiniband/hw/qib/qib_iba7322.c if (ctxt < 0) { ctxt 4507 drivers/infiniband/hw/qib/qib_iba7322.c mask = (1ULL << ctxt); ctxt 4508 drivers/infiniband/hw/qib/qib_iba7322.c rcd = dd->rcd[ctxt]; ctxt 4518 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, ctxt 4520 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, ctxt 4544 drivers/infiniband/hw/qib/qib_iba7322.c if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) { ctxt 4551 drivers/infiniband/hw/qib/qib_iba7322.c val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); ctxt 4552 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); ctxt 4556 drivers/infiniband/hw/qib/qib_iba7322.c val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); ctxt 4557 drivers/infiniband/hw/qib/qib_iba7322.c dd->rcd[ctxt]->head = val; ctxt 4559 drivers/infiniband/hw/qib/qib_iba7322.c if (ctxt < dd->first_user_ctxt) ctxt 4561 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); ctxt 4563 drivers/infiniband/hw/qib/qib_iba7322.c dd->rcd[ctxt] && dd->rhdrhead_intr_off) { ctxt 4565 drivers/infiniband/hw/qib/qib_iba7322.c val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off; ctxt 4566 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); ctxt 4572 drivers/infiniband/hw/qib/qib_iba7322.c if (ctxt >= 0) { ctxt 4573 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0); ctxt 4574 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0); ctxt 4577 drivers/infiniband/hw/qib/qib_iba7322.c TIDFLOW_ERRBITS, ctxt); ctxt 6315 drivers/infiniband/hw/qib/qib_iba7322.c unsigned ctxt; ctxt 6318 drivers/infiniband/hw/qib/qib_iba7322.c ctxt = (i % n) * dd->num_pports + pidx; ctxt 6320 drivers/infiniband/hw/qib/qib_iba7322.c ctxt = (i % n) + 1; ctxt 6322 drivers/infiniband/hw/qib/qib_iba7322.c ctxt = ppd->hw_pidx; ctxt 6323 drivers/infiniband/hw/qib/qib_iba7322.c val |= ctxt << (5 * (i % 6)); ctxt 7009 drivers/infiniband/hw/qib/qib_iba7322.c if (rcd->ctxt < NUM_IB_PORTS) { ctxt 7012 drivers/infiniband/hw/qib/qib_iba7322.c rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0; ctxt 7020 drivers/infiniband/hw/qib/qib_iba7322.c (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt; ctxt 165 drivers/infiniband/hw/qib/qib_init.c struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt, ctxt 178 drivers/infiniband/hw/qib/qib_init.c rcd->ctxt = ctxt; ctxt 179 drivers/infiniband/hw/qib/qib_init.c dd->rcd[ctxt] = rcd; ctxt 181 drivers/infiniband/hw/qib/qib_init.c if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ ctxt 1293 drivers/infiniband/hw/qib/qib_init.c int ctxt; ctxt 1331 drivers/infiniband/hw/qib/qib_init.c for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { ctxt 1332 drivers/infiniband/hw/qib/qib_init.c int ctxt_tidbase = ctxt * dd->rcvtidcnt; ctxt 1362 drivers/infiniband/hw/qib/qib_init.c for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) { ctxt 1363 drivers/infiniband/hw/qib/qib_init.c struct qib_ctxtdata *rcd = tmp[ctxt]; ctxt 1365 drivers/infiniband/hw/qib/qib_init.c tmp[ctxt] = NULL; /* debugging paranoia */ ctxt 1554 drivers/infiniband/hw/qib/qib_init.c gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? ctxt 1567 drivers/infiniband/hw/qib/qib_init.c amt, rcd->ctxt); ctxt 1571 drivers/infiniband/hw/qib/qib_init.c if (rcd->ctxt >= dd->first_user_ctxt) { ctxt 1600 drivers/infiniband/hw/qib/qib_init.c rcd->ctxt); ctxt 206 drivers/infiniband/hw/qib/qib_intr.c rcd->ctxt); ctxt 135 drivers/infiniband/hw/qib/qib_tx.c unsigned ctxt; ctxt 139 drivers/infiniband/hw/qib/qib_tx.c for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { ctxt 140 drivers/infiniband/hw/qib/qib_tx.c rcd = dd->rcd[ctxt]; ctxt 458 drivers/infiniband/hw/qib/qib_tx.c unsigned ctxt; ctxt 470 drivers/infiniband/hw/qib/qib_tx.c for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { ctxt 472 drivers/infiniband/hw/qib/qib_tx.c rcd = dd->rcd[ctxt]; ctxt 397 drivers/infiniband/hw/qib/qib_ud.c unsigned ctxt = ppd->hw_pidx; ctxt 402 drivers/infiniband/hw/qib/qib_ud.c for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i) ctxt 403 drivers/infiniband/hw/qib/qib_ud.c if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey) ctxt 183 drivers/infiniband/hw/qib/qib_user_sdma.c qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt) ctxt 204 drivers/infiniband/hw/qib/qib_user_sdma.c "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt); ctxt 213 drivers/infiniband/hw/qib/qib_user_sdma.c "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt); ctxt 1377 drivers/infiniband/hw/qib/qib_verbs.c unsigned ctxt = ppd->hw_pidx; ctxt 1381 drivers/infiniband/hw/qib/qib_verbs.c if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys)) ctxt 1384 drivers/infiniband/hw/qib/qib_verbs.c ret = dd->rcd[ctxt]->pkeys[index]; ctxt 1503 drivers/infiniband/hw/qib/qib_verbs.c unsigned i, ctxt; ctxt 1612 drivers/infiniband/hw/qib/qib_verbs.c ctxt = ppd->hw_pidx; ctxt 1616 drivers/infiniband/hw/qib/qib_verbs.c dd->rcd[ctxt]->pkeys); ctxt 127 drivers/media/usb/pvrusb2/pvrusb2-encoder.c static int pvr2_encoder_cmd(void *ctxt, ctxt 141 drivers/media/usb/pvrusb2/pvrusb2-encoder.c struct pvr2_hdw *hdw = (struct pvr2_hdw *)ctxt; ctxt 1162 drivers/net/ethernet/emulex/benet/be_cmds.c void *ctxt; ctxt 1170 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt = &req->context; ctxt 1179 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, ctxt 1182 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt, no_delay); ctxt 1183 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, ctxt 1185 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); ctxt 1186 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); ctxt 1187 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); ctxt 1197 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt, coalesce_wm); ctxt 1198 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, ctxt 1200 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, ctxt 1202 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); ctxt 1203 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); ctxt 1204 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); ctxt 1207 drivers/net/ethernet/emulex/benet/be_cmds.c be_dws_cpu_to_le(ctxt, sizeof(req->context)); ctxt 1240 drivers/net/ethernet/emulex/benet/be_cmds.c void *ctxt; ctxt 1248 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt = &req->context; ctxt 1256 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); ctxt 1257 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, ctxt 1259 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); ctxt 1264 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt, ctxt 1266 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1); ctxt 1268 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt, cq->id); ctxt 1270 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt, 1); ctxt 1282 drivers/net/ethernet/emulex/benet/be_cmds.c be_dws_cpu_to_le(ctxt, sizeof(req->context)); ctxt 1305 drivers/net/ethernet/emulex/benet/be_cmds.c void *ctxt; ctxt 1313 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt = &req->context; ctxt 1321 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); ctxt 1322 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, ctxt 1324 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); ctxt 1326 drivers/net/ethernet/emulex/benet/be_cmds.c be_dws_cpu_to_le(ctxt, sizeof(req->context)); ctxt 2342 drivers/net/ethernet/emulex/benet/be_cmds.c void *ctxt = NULL; ctxt 2361 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt = &req->context; ctxt 2363 drivers/net/ethernet/emulex/benet/be_cmds.c write_length, ctxt, data_size); ctxt 2367 drivers/net/ethernet/emulex/benet/be_cmds.c eof, ctxt, 1); ctxt 2370 drivers/net/ethernet/emulex/benet/be_cmds.c eof, ctxt, 0); ctxt 2372 drivers/net/ethernet/emulex/benet/be_cmds.c be_dws_cpu_to_le(ctxt, sizeof(req->context)); ctxt 3882 drivers/net/ethernet/emulex/benet/be_cmds.c void *ctxt; ctxt 3898 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt = &req->context; ctxt 3905 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); ctxt 3907 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); ctxt 3908 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); ctxt 3912 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt, adapter->hba_port_num); ctxt 3913 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1); ctxt 3915 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt, hsw_mode); ctxt 3921 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt, spoofchk); ctxt 3923 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt, spoofchk); ctxt 3940 drivers/net/ethernet/emulex/benet/be_cmds.c void *ctxt; ctxt 3953 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt = &req->context; ctxt 3961 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt, intf_id); ctxt 3962 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); ctxt 3966 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt, adapter->hba_port_num); ctxt 3967 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1); ctxt 360 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c struct hinic_api_cmd_cell_ctxt *ctxt; ctxt 376 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c ctxt = &chain->cell_ctxt[chain->prod_idx]; ctxt 378 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c chain->curr_node = ctxt->cell_vaddr; ctxt 685 drivers/net/ethernet/intel/i40e/i40e_client.c struct i40e_vsi_context ctxt; ctxt 693 drivers/net/ethernet/intel/i40e/i40e_client.c ctxt.seid = pf->main_vsi_seid; ctxt 694 drivers/net/ethernet/intel/i40e/i40e_client.c ctxt.pf_num = pf->hw.pf_id; ctxt 695 drivers/net/ethernet/intel/i40e/i40e_client.c err = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); ctxt 696 drivers/net/ethernet/intel/i40e/i40e_client.c ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt 708 drivers/net/ethernet/intel/i40e/i40e_client.c ctxt.info.valid_sections = ctxt 710 drivers/net/ethernet/intel/i40e/i40e_client.c ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; ctxt 713 drivers/net/ethernet/intel/i40e/i40e_client.c ctxt.info.valid_sections = ctxt 715 drivers/net/ethernet/intel/i40e/i40e_client.c ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA; ctxt 724 drivers/net/ethernet/intel/i40e/i40e_client.c err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); ctxt 1675 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_vsi_context *ctxt, ctxt 1724 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); ctxt 1725 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); ctxt 1726 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); ctxt 1727 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.valid_sections |= cpu_to_le16(sections); ctxt 1763 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_vsi_context *ctxt, ctxt 1857 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); ctxt 1873 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.up_enable_bits = enabled_tc; ctxt 1876 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.mapping_flags |= ctxt 1879 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.queue_mapping[i] = ctxt 1882 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.mapping_flags |= ctxt 1884 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); ctxt 1886 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.valid_sections |= cpu_to_le16(sections); ctxt 2685 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_vsi_context ctxt; ctxt 2701 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.seid = vsi->seid; ctxt 2702 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info = vsi->info; ctxt 2703 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); ctxt 2719 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_vsi_context ctxt; ctxt 2736 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.seid = vsi->seid; ctxt 2737 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info = vsi->info; ctxt 2738 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); ctxt 2959 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_vsi_context ctxt; ctxt 2968 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.seid = vsi->seid; ctxt 2969 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info = vsi->info; ctxt 2970 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); ctxt 5333 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_vsi_context *ctxt) ctxt 5339 drivers/net/ethernet/intel/i40e/i40e_main.c vsi->info.mapping_flags = ctxt->info.mapping_flags; ctxt 5341 drivers/net/ethernet/intel/i40e/i40e_main.c &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); ctxt 5342 drivers/net/ethernet/intel/i40e/i40e_main.c memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, ctxt 5364 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_vsi_context ctxt; ctxt 5418 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.seid = vsi->seid; ctxt 5419 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.pf_num = vsi->back->hw.pf_id; ctxt 5420 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.vf_num = 0; ctxt 5421 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.uplink_seid = vsi->uplink_seid; ctxt 5422 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info = vsi->info; ctxt 5424 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc); ctxt 5428 drivers/net/ethernet/intel/i40e/i40e_main.c i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); ctxt 5446 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.valid_sections |= ctxt 5448 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; ctxt 5454 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); ctxt 5463 drivers/net/ethernet/intel/i40e/i40e_main.c i40e_vsi_update_queue_map(vsi, &ctxt); ctxt 5797 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_vsi_context *ctxt, ctxt 5819 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); ctxt 5821 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */ ctxt 5822 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); ctxt 5823 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue); ctxt 5824 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt->info.valid_sections |= cpu_to_le16(sections); ctxt 5839 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_vsi_context ctxt; ctxt 5849 drivers/net/ethernet/intel/i40e/i40e_main.c memset(&ctxt, 0, sizeof(ctxt)); ctxt 5850 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.pf_num = hw->pf_id; ctxt 5851 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.vf_num = 0; ctxt 5852 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.uplink_seid = uplink_seid; ctxt 5853 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt 5855 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; ctxt 5858 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.valid_sections |= ctxt 5860 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.switch_id = ctxt 5865 drivers/net/ethernet/intel/i40e/i40e_main.c i40e_channel_setup_queue_map(pf, &ctxt, ch); ctxt 5868 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_aq_add_vsi(hw, &ctxt, NULL); ctxt 5882 drivers/net/ethernet/intel/i40e/i40e_main.c ch->seid = ctxt.seid; ctxt 5883 drivers/net/ethernet/intel/i40e/i40e_main.c ch->vsi_number = ctxt.vsi_number; ctxt 5884 drivers/net/ethernet/intel/i40e/i40e_main.c ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx); ctxt 5890 drivers/net/ethernet/intel/i40e/i40e_main.c ch->info.mapping_flags = ctxt.info.mapping_flags; ctxt 5892 drivers/net/ethernet/intel/i40e/i40e_main.c &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping)); ctxt 5893 drivers/net/ethernet/intel/i40e/i40e_main.c memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping, ctxt 5894 drivers/net/ethernet/intel/i40e/i40e_main.c sizeof(ctxt.info.tc_mapping)); ctxt 7098 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_vsi_context ctxt; ctxt 7118 drivers/net/ethernet/intel/i40e/i40e_main.c memset(&ctxt, 0, sizeof(ctxt)); ctxt 7119 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.seid = vsi->seid; ctxt 7120 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.pf_num = vsi->back->hw.pf_id; ctxt 7121 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.vf_num = 0; ctxt 7122 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.uplink_seid = vsi->uplink_seid; ctxt 7123 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info = vsi->info; ctxt 7124 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.tc_mapping[0] = cpu_to_le16(qmap); ctxt 7125 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); ctxt 7126 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); ctxt 7127 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.valid_sections |= cpu_to_le16(sections); ctxt 7147 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); ctxt 7156 drivers/net/ethernet/intel/i40e/i40e_main.c i40e_vsi_update_queue_map(vsi, &ctxt); ctxt 9377 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_vsi_context ctxt; ctxt 9380 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.seid = pf->main_vsi_seid; ctxt 9381 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.pf_num = pf->hw.pf_id; ctxt 9382 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.vf_num = 0; ctxt 9383 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); ctxt 9391 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt 9392 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt 9393 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); ctxt 9395 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); ctxt 9413 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_vsi_context ctxt; ctxt 9416 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.seid = pf->main_vsi_seid; ctxt 9417 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.pf_num = pf->hw.pf_id; ctxt 9418 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.vf_num = 0; ctxt 9419 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); ctxt 9427 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt 9428 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt 9429 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); ctxt 9431 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); ctxt 13083 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_vsi_context ctxt; ctxt 13091 drivers/net/ethernet/intel/i40e/i40e_main.c memset(&ctxt, 0, sizeof(ctxt)); ctxt 13099 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.seid = pf->main_vsi_seid; ctxt 13100 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.pf_num = pf->hw.pf_id; ctxt 13101 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.vf_num = 0; ctxt 13102 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); ctxt 13103 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt 13112 drivers/net/ethernet/intel/i40e/i40e_main.c vsi->info = ctxt.info; ctxt 13115 drivers/net/ethernet/intel/i40e/i40e_main.c vsi->seid = ctxt.seid; ctxt 13116 drivers/net/ethernet/intel/i40e/i40e_main.c vsi->id = ctxt.vsi_number; ctxt 13125 drivers/net/ethernet/intel/i40e/i40e_main.c memset(&ctxt, 0, sizeof(ctxt)); ctxt 13126 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.seid = pf->main_vsi_seid; ctxt 13127 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.pf_num = pf->hw.pf_id; ctxt 13128 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.vf_num = 0; ctxt 13129 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.valid_sections |= ctxt 13131 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.switch_id = ctxt 13133 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); ctxt 13148 drivers/net/ethernet/intel/i40e/i40e_main.c memset(&ctxt, 0, sizeof(ctxt)); ctxt 13149 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.seid = pf->main_vsi_seid; ctxt 13150 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.pf_num = pf->hw.pf_id; ctxt 13151 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.vf_num = 0; ctxt 13152 drivers/net/ethernet/intel/i40e/i40e_main.c i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); ctxt 13153 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); ctxt 13164 drivers/net/ethernet/intel/i40e/i40e_main.c i40e_vsi_update_queue_map(vsi, &ctxt); ctxt 13189 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.pf_num = hw->pf_id; ctxt 13190 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.vf_num = 0; ctxt 13191 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.uplink_seid = vsi->uplink_seid; ctxt 13192 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt 13193 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt 13196 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.valid_sections |= ctxt 13198 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.switch_id = ctxt 13201 drivers/net/ethernet/intel/i40e/i40e_main.c i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); ctxt 13205 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.pf_num = hw->pf_id; ctxt 13206 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.vf_num = 0; ctxt 13207 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.uplink_seid = vsi->uplink_seid; ctxt 13208 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt 13209 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; ctxt 13215 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.valid_sections |= ctxt 13217 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.switch_id = ctxt 13222 drivers/net/ethernet/intel/i40e/i40e_main.c i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); ctxt 13226 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.pf_num = hw->pf_id; ctxt 13227 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; ctxt 13228 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.uplink_seid = vsi->uplink_seid; ctxt 13229 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt 13230 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.flags = I40E_AQ_VSI_TYPE_VF; ctxt 13236 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.valid_sections |= ctxt 13238 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.switch_id = ctxt 13243 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.valid_sections |= ctxt 13245 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.queueing_opt_flags |= ctxt 13250 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); ctxt 13251 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; ctxt 13253 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.valid_sections |= ctxt 13255 drivers/net/ethernet/intel/i40e/i40e_main.c ctxt.info.sec_flags |= ctxt 13260 drivers/net/ethernet/intel/i40e/i40e_main.c i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); ctxt 13272 drivers/net/ethernet/intel/i40e/i40e_main.c ret = i40e_aq_add_vsi(hw, &ctxt, NULL); ctxt 13282 drivers/net/ethernet/intel/i40e/i40e_main.c vsi->info = ctxt.info; ctxt 13284 drivers/net/ethernet/intel/i40e/i40e_main.c vsi->seid = ctxt.seid; ctxt 13285 drivers/net/ethernet/intel/i40e/i40e_main.c vsi->id = ctxt.vsi_number; ctxt 4445 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c struct i40e_vsi_context ctxt; ctxt 4474 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c memset(&ctxt, 0, sizeof(ctxt)); ctxt 4475 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; ctxt 4476 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ctxt.pf_num = pf->hw.pf_id; ctxt 4477 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); ctxt 4479 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | ctxt 4481 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); ctxt 421 drivers/net/ethernet/intel/ice/ice_lib.c struct ice_vsi_ctx *ctxt; ctxt 424 drivers/net/ethernet/intel/ice/ice_lib.c ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); ctxt 425 drivers/net/ethernet/intel/ice/ice_lib.c if (!ctxt) ctxt 429 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->vf_num = vsi->vf_id; ctxt 430 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->vsi_num = vsi->vsi_num; ctxt 432 drivers/net/ethernet/intel/ice/ice_lib.c memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); ctxt 434 drivers/net/ethernet/intel/ice/ice_lib.c status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); ctxt 439 drivers/net/ethernet/intel/ice/ice_lib.c devm_kfree(&pf->pdev->dev, ctxt); ctxt 829 drivers/net/ethernet/intel/ice/ice_lib.c static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) ctxt 833 drivers/net/ethernet/intel/ice/ice_lib.c memset(&ctxt->info, 0, sizeof(ctxt->info)); ctxt 835 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->alloc_from_pool = true; ctxt 837 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; ctxt 839 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; ctxt 844 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & ctxt 856 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.ingress_table = cpu_to_le32(table); ctxt 857 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.egress_table = cpu_to_le32(table); ctxt 859 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.outer_up_table = cpu_to_le32(table); ctxt 868 drivers/net/ethernet/intel/ice/ice_lib.c static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) ctxt 935 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.tc_mapping[i] = 0; ctxt 951 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); ctxt 976 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG); ctxt 981 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); ctxt 982 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); ctxt 990 drivers/net/ethernet/intel/ice/ice_lib.c static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) ctxt 1016 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & ctxt 1033 drivers/net/ethernet/intel/ice/ice_lib.c struct ice_vsi_ctx *ctxt; ctxt 1036 drivers/net/ethernet/intel/ice/ice_lib.c ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); ctxt 1037 drivers/net/ethernet/intel/ice/ice_lib.c if (!ctxt) ctxt 1040 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info = vsi->info; ctxt 1045 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->flags = ICE_AQ_VSI_TYPE_PF; ctxt 1048 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->flags = ICE_AQ_VSI_TYPE_VF; ctxt 1050 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id; ctxt 1056 drivers/net/ethernet/intel/ice/ice_lib.c ice_set_dflt_vsi_ctx(ctxt); ctxt 1059 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; ctxt 1063 drivers/net/ethernet/intel/ice/ice_lib.c ice_set_rss_vsi_ctx(ctxt, vsi); ctxt 1065 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.sw_id = vsi->port_info->sw_id; ctxt 1066 drivers/net/ethernet/intel/ice/ice_lib.c ice_vsi_setup_q_map(vsi, ctxt); ctxt 1070 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.valid_sections |= ctxt 1072 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.sec_flags |= ctxt 1078 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; ctxt 1079 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.valid_sections |= ctxt 1083 drivers/net/ethernet/intel/ice/ice_lib.c ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); ctxt 1091 drivers/net/ethernet/intel/ice/ice_lib.c vsi->info = ctxt->info; ctxt 1094 drivers/net/ethernet/intel/ice/ice_lib.c vsi->vsi_num = ctxt->vsi_num; ctxt 1096 drivers/net/ethernet/intel/ice/ice_lib.c devm_kfree(&pf->pdev->dev, ctxt); ctxt 2033 drivers/net/ethernet/intel/ice/ice_lib.c struct ice_vsi_ctx *ctxt; ctxt 2037 drivers/net/ethernet/intel/ice/ice_lib.c ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); ctxt 2038 drivers/net/ethernet/intel/ice/ice_lib.c if (!ctxt) ctxt 2045 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; ctxt 2048 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.vlan_flags |= (vsi->info.vlan_flags & ctxt 2051 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); ctxt 2053 drivers/net/ethernet/intel/ice/ice_lib.c status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); ctxt 2061 drivers/net/ethernet/intel/ice/ice_lib.c vsi->info.vlan_flags = ctxt->info.vlan_flags; ctxt 2063 drivers/net/ethernet/intel/ice/ice_lib.c devm_kfree(dev, ctxt); ctxt 2076 drivers/net/ethernet/intel/ice/ice_lib.c struct ice_vsi_ctx *ctxt; ctxt 2080 drivers/net/ethernet/intel/ice/ice_lib.c ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); ctxt 2081 drivers/net/ethernet/intel/ice/ice_lib.c if (!ctxt) ctxt 2090 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; ctxt 2093 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; ctxt 2096 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; ctxt 2098 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); ctxt 2100 drivers/net/ethernet/intel/ice/ice_lib.c status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); ctxt 2108 drivers/net/ethernet/intel/ice/ice_lib.c vsi->info.vlan_flags = ctxt->info.vlan_flags; ctxt 2110 drivers/net/ethernet/intel/ice/ice_lib.c devm_kfree(dev, ctxt); ctxt 2306 drivers/net/ethernet/intel/ice/ice_lib.c struct ice_vsi_ctx *ctxt; ctxt 2316 drivers/net/ethernet/intel/ice/ice_lib.c ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); ctxt 2317 drivers/net/ethernet/intel/ice/ice_lib.c if (!ctxt) ctxt 2320 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info = vsi->info; ctxt 2323 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.sec_flags |= ctxt 2326 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; ctxt 2328 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.sec_flags &= ctxt 2331 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; ctxt 2335 drivers/net/ethernet/intel/ice/ice_lib.c ctxt->info.valid_sections = ctxt 2339 drivers/net/ethernet/intel/ice/ice_lib.c status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL); ctxt 2347 drivers/net/ethernet/intel/ice/ice_lib.c vsi->info.sec_flags = ctxt->info.sec_flags; ctxt 2348 drivers/net/ethernet/intel/ice/ice_lib.c vsi->info.sw_flags2 = ctxt->info.sw_flags2; ctxt 2350 drivers/net/ethernet/intel/ice/ice_lib.c devm_kfree(dev, ctxt); ctxt 2354 drivers/net/ethernet/intel/ice/ice_lib.c devm_kfree(dev, ctxt); ctxt 4531 drivers/net/ethernet/intel/ice/ice_main.c struct ice_vsi_ctx *ctxt; ctxt 4537 drivers/net/ethernet/intel/ice/ice_main.c ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); ctxt 4538 drivers/net/ethernet/intel/ice/ice_main.c if (!ctxt) ctxt 4541 drivers/net/ethernet/intel/ice/ice_main.c ctxt->info = vsi->info; ctxt 4545 drivers/net/ethernet/intel/ice/ice_main.c ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; ctxt 4548 drivers/net/ethernet/intel/ice/ice_main.c ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; ctxt 4549 drivers/net/ethernet/intel/ice/ice_main.c ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); ctxt 4551 drivers/net/ethernet/intel/ice/ice_main.c status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); ctxt 4559 drivers/net/ethernet/intel/ice/ice_main.c vsi_props->sw_flags = ctxt->info.sw_flags; ctxt 4562 drivers/net/ethernet/intel/ice/ice_main.c devm_kfree(dev, ctxt); ctxt 429 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid) ctxt 431 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED | ctxt 434 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ctxt->info.pvid = cpu_to_le16(vid); ctxt 435 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; ctxt 436 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | ctxt 444 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt) ctxt 446 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; ctxt 447 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; ctxt 448 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; ctxt 449 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | ctxt 463 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c struct ice_vsi_ctx *ctxt; ctxt 467 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); ctxt 468 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c if (!ctxt) ctxt 471 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ctxt->info = vsi->info; ctxt 473 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ice_vsi_set_pvid_fill_ctxt(ctxt, vid); ctxt 475 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ice_vsi_kill_pvid_fill_ctxt(ctxt); ctxt 477 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); ctxt 485 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c vsi->info = ctxt->info; ctxt 487 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c devm_kfree(dev, ctxt); ctxt 244 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c u32 own, ctxt; ctxt 248 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR) ctxt 251 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c if (likely(!own && ctxt)) { ctxt 1637 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, ctxt 1640 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, ctxt 1644 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h struct iwl_mvm_phy_ctxt *ctxt); ctxt 1646 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h struct iwl_mvm_phy_ctxt *ctxt); ctxt 126 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, ctxt 132 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, ctxt 133 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c ctxt->color)); ctxt 189 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c struct iwl_mvm_phy_ctxt *ctxt, ctxt 199 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time); ctxt 214 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, ctxt 219 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c ctxt->ref); ctxt 222 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c ctxt->channel = chandef->chan; ctxt 224 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ctxt 233 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) ctxt 236 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c ctxt->ref++; ctxt 244 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, ctxt 254 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c ctxt->channel->band != chandef->chan->band) { ctxt 258 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ctxt 268 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c ctxt->channel = chandef->chan; ctxt 269 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c ctxt->width = chandef->width; ctxt 270 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ctxt 275 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) ctxt 279 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c if (WARN_ON_ONCE(!ctxt)) ctxt 282 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c ctxt->ref--; ctxt 289 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c if (ctxt->ref == 0) { ctxt 304 drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1); ctxt 1111 drivers/pci/controller/pci-hyperv.c } ctxt; ctxt 1113 drivers/pci/controller/pci-hyperv.c memset(&ctxt, 0, sizeof(ctxt)); ctxt 1114 drivers/pci/controller/pci-hyperv.c int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; ctxt 1120 drivers/pci/controller/pci-hyperv.c (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0); ctxt 1368 drivers/pci/controller/pci-hyperv.c } __packed ctxt; ctxt 1392 drivers/pci/controller/pci-hyperv.c memset(&ctxt, 0, sizeof(ctxt)); ctxt 1394 drivers/pci/controller/pci-hyperv.c ctxt.pci_pkt.completion_func = hv_pci_compose_compl; ctxt 1395 drivers/pci/controller/pci-hyperv.c ctxt.pci_pkt.compl_ctxt = ∁ ctxt 1399 drivers/pci/controller/pci-hyperv.c size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, ctxt 1406 drivers/pci/controller/pci-hyperv.c size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, ctxt 1422 drivers/pci/controller/pci-hyperv.c ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts, ctxt 1423 drivers/pci/controller/pci-hyperv.c size, (unsigned long)&ctxt.pci_pkt, ctxt 2169 drivers/pci/controller/pci-hyperv.c } ctxt; ctxt 2198 drivers/pci/controller/pci-hyperv.c memset(&ctxt, 0, sizeof(ctxt)); ctxt 2199 drivers/pci/controller/pci-hyperv.c ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; ctxt 2203 drivers/pci/controller/pci-hyperv.c sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, ctxt 1223 drivers/scsi/aacraid/aacraid.h typedef void (*fib_callback)(void *ctxt, struct fib *fibctx); ctxt 2686 drivers/scsi/aacraid/aacraid.h int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt); ctxt 2688 drivers/scsi/aacraid/aacraid.h fib_callback callback, void *ctxt); ctxt 784 drivers/scsi/be2iscsi/be_cmds.c void *ctxt = &req->context; ctxt 798 drivers/scsi/be2iscsi/be_cmds.c ctxt, coalesce_wm); ctxt 799 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); ctxt 800 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_cq_context, count, ctxt, ctxt 802 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); ctxt 803 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); ctxt 804 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); ctxt 805 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); ctxt 806 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); ctxt 807 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctxt 813 drivers/scsi/be2iscsi/be_cmds.c ctxt, coalesce_wm); ctxt 815 drivers/scsi/be2iscsi/be_cmds.c ctxt, no_delay); ctxt 816 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, ctxt 818 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); ctxt 819 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); ctxt 820 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); ctxt 821 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1); ctxt 824 drivers/scsi/be2iscsi/be_cmds.c be_dws_cpu_to_le(ctxt, sizeof(req->context)); ctxt 858 drivers/scsi/be2iscsi/be_cmds.c void *ctxt; ctxt 866 drivers/scsi/be2iscsi/be_cmds.c ctxt = &req->context; ctxt 878 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctxt 880 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); ctxt 881 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, ctxt 883 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); ctxt 885 drivers/scsi/be2iscsi/be_cmds.c be_dws_cpu_to_le(ctxt, sizeof(req->context)); ctxt 984 drivers/scsi/be2iscsi/be_cmds.c void *ctxt = &req->context; ctxt 1004 drivers/scsi/be2iscsi/be_cmds.c rx_pdid, ctxt, 0); ctxt 1006 drivers/scsi/be2iscsi/be_cmds.c rx_pdid_valid, ctxt, 1); ctxt 1008 drivers/scsi/be2iscsi/be_cmds.c pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn)); ctxt 1010 drivers/scsi/be2iscsi/be_cmds.c ring_size, ctxt, ctxt 1014 drivers/scsi/be2iscsi/be_cmds.c default_buffer_size, ctxt, entry_size); ctxt 1016 drivers/scsi/be2iscsi/be_cmds.c cq_id_recv, ctxt, cq->id); ctxt 1019 drivers/scsi/be2iscsi/be_cmds.c rx_pdid, ctxt, 0); ctxt 1021 drivers/scsi/be2iscsi/be_cmds.c rx_pdid_valid, ctxt, 1); ctxt 1023 drivers/scsi/be2iscsi/be_cmds.c ring_size, ctxt, ctxt 1027 drivers/scsi/be2iscsi/be_cmds.c default_buffer_size, ctxt, entry_size); ctxt 1029 drivers/scsi/be2iscsi/be_cmds.c cq_id_recv, ctxt, cq->id); ctxt 1032 drivers/scsi/be2iscsi/be_cmds.c be_dws_cpu_to_le(ctxt, sizeof(req->context)); ctxt 366 drivers/xen/xen-acpi-memhotplug.c u32 level, void *ctxt, void **retv) ctxt 382 drivers/xen/xen-acpi-memhotplug.c u32 level, void *ctxt, void **retv) ctxt 612 fs/cifs/smb2pdu.c static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt) ctxt 614 fs/cifs/smb2pdu.c unsigned int len = le16_to_cpu(ctxt->DataLength); ctxt 621 fs/cifs/smb2pdu.c if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1) ctxt 623 fs/cifs/smb2pdu.c if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512) ctxt 628 fs/cifs/smb2pdu.c struct smb2_compression_capabilities_context *ctxt) ctxt 630 fs/cifs/smb2pdu.c unsigned int len = le16_to_cpu(ctxt->DataLength); ctxt 637 fs/cifs/smb2pdu.c if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) { ctxt 641 fs/cifs/smb2pdu.c if (le16_to_cpu(ctxt->CompressionAlgorithms[0]) > 3) { ctxt 645 fs/cifs/smb2pdu.c server->compress_algorithm = ctxt->CompressionAlgorithms[0]; ctxt 649 fs/cifs/smb2pdu.c struct smb2_encryption_neg_context *ctxt) ctxt 651 fs/cifs/smb2pdu.c unsigned int len = le16_to_cpu(ctxt->DataLength); ctxt 659 fs/cifs/smb2pdu.c if (le16_to_cpu(ctxt->CipherCount) != 1) { ctxt 663 fs/cifs/smb2pdu.c cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0])); ctxt 664 fs/cifs/smb2pdu.c if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) && ctxt 665 fs/cifs/smb2pdu.c (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM)) { ctxt 669 fs/cifs/smb2pdu.c server->cipher_type = ctxt->Ciphers[0]; ctxt 157 fs/nilfs2/btnode.c struct nilfs_btnode_chkey_ctxt *ctxt) ctxt 161 fs/nilfs2/btnode.c __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; ctxt 167 fs/nilfs2/btnode.c obh = ctxt->bh; ctxt 168 fs/nilfs2/btnode.c ctxt->newbh = NULL; ctxt 207 fs/nilfs2/btnode.c ctxt->newbh = nbh; ctxt 220 fs/nilfs2/btnode.c struct nilfs_btnode_chkey_ctxt *ctxt) ctxt 222 fs/nilfs2/btnode.c struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; ctxt 223 fs/nilfs2/btnode.c __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; ctxt 250 fs/nilfs2/btnode.c ctxt->bh = nbh; ctxt 260 fs/nilfs2/btnode.c struct nilfs_btnode_chkey_ctxt *ctxt) ctxt 262 fs/nilfs2/btnode.c struct buffer_head *nbh = ctxt->newbh; ctxt 263 fs/nilfs2/btnode.c __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; ctxt 270 fs/nilfs2/btnode.c unlock_page(ctxt->bh->b_page); ctxt 564 fs/ocfs2/alloc.c static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, ctxt 3661 fs/ocfs2/alloc.c struct ocfs2_merge_ctxt *ctxt) ctxt 3667 fs/ocfs2/alloc.c BUG_ON(ctxt->c_contig_type == CONTIG_NONE); ctxt 3669 fs/ocfs2/alloc.c if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) { ctxt 3694 fs/ocfs2/alloc.c if (ctxt->c_contig_type == CONTIG_LEFTRIGHT) { ctxt 3698 fs/ocfs2/alloc.c BUG_ON(!ctxt->c_split_covers_rec); ctxt 3781 fs/ocfs2/alloc.c if (ctxt->c_contig_type == CONTIG_RIGHT) { ctxt 3799 fs/ocfs2/alloc.c if (ctxt->c_split_covers_rec) { ctxt 4355 fs/ocfs2/alloc.c struct ocfs2_merge_ctxt *ctxt) ctxt 4474 fs/ocfs2/alloc.c ctxt->c_contig_type = ret; ctxt 5070 fs/ocfs2/alloc.c struct ocfs2_merge_ctxt ctxt; ctxt 5083 fs/ocfs2/alloc.c &ctxt); ctxt 5106 fs/ocfs2/alloc.c ctxt.c_split_covers_rec = 1; ctxt 5108 fs/ocfs2/alloc.c ctxt.c_split_covers_rec = 0; ctxt 5110 fs/ocfs2/alloc.c ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]); ctxt 5112 fs/ocfs2/alloc.c trace_ocfs2_split_extent(split_index, ctxt.c_contig_type, ctxt 5113 fs/ocfs2/alloc.c ctxt.c_has_empty_extent, ctxt 5114 fs/ocfs2/alloc.c ctxt.c_split_covers_rec); ctxt 5116 fs/ocfs2/alloc.c if (ctxt.c_contig_type == CONTIG_NONE) { ctxt 5117 fs/ocfs2/alloc.c if (ctxt.c_split_covers_rec) ctxt 5129 fs/ocfs2/alloc.c dealloc, &ctxt); ctxt 6458 fs/ocfs2/alloc.c int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, ctxt 6475 fs/ocfs2/alloc.c item->free_next = ctxt->c_global_allocator; ctxt 6477 fs/ocfs2/alloc.c ctxt->c_global_allocator = item; ctxt 6534 fs/ocfs2/alloc.c struct ocfs2_cached_dealloc_ctxt *ctxt) ctxt 6539 fs/ocfs2/alloc.c if (!ctxt) ctxt 6542 fs/ocfs2/alloc.c while (ctxt->c_first_suballocator) { ctxt 6543 fs/ocfs2/alloc.c fl = ctxt->c_first_suballocator; ctxt 6558 fs/ocfs2/alloc.c ctxt->c_first_suballocator = fl->f_next_suballocator; ctxt 6562 fs/ocfs2/alloc.c if (ctxt->c_global_allocator) { ctxt 6564 fs/ocfs2/alloc.c ctxt->c_global_allocator); ctxt 6570 fs/ocfs2/alloc.c ctxt->c_global_allocator = NULL; ctxt 6579 fs/ocfs2/alloc.c struct ocfs2_cached_dealloc_ctxt *ctxt) ctxt 6581 fs/ocfs2/alloc.c struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator; ctxt 6595 fs/ocfs2/alloc.c fl->f_next_suballocator = ctxt->c_first_suballocator; ctxt 6597 fs/ocfs2/alloc.c ctxt->c_first_suballocator = fl; ctxt 6606 fs/ocfs2/alloc.c struct ocfs2_cached_dealloc_ctxt *ctxt) ctxt 6608 fs/ocfs2/alloc.c struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator; ctxt 6622 fs/ocfs2/alloc.c fl = ctxt->c_first_suballocator; ctxt 6750 fs/ocfs2/alloc.c int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, ctxt 6758 fs/ocfs2/alloc.c fl = ocfs2_find_per_slot_free_list(type, slot, ctxt); ctxt 6788 fs/ocfs2/alloc.c static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, ctxt 6791 fs/ocfs2/alloc.c return ocfs2_cache_block_dealloc(ctxt, EXTENT_ALLOC_SYSTEM_INODE, ctxt 198 fs/ocfs2/alloc.h int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, ctxt 200 fs/ocfs2/alloc.h int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, ctxt 208 fs/ocfs2/alloc.h struct ocfs2_cached_dealloc_ctxt *ctxt); ctxt 380 fs/ocfs2/cluster/netdebug.c static int sc_common_open(struct file *file, int ctxt) ctxt 395 fs/ocfs2/cluster/netdebug.c sd->dbg_ctxt = ctxt; ctxt 1568 fs/ocfs2/dlm/dlmdomain.c struct domain_join_ctxt *ctxt, ctxt 1581 fs/ocfs2/dlm/dlmdomain.c ret = memcmp(ctxt->live_map, dlm->live_nodes_map, ctxt 1594 fs/ocfs2/dlm/dlmdomain.c struct domain_join_ctxt *ctxt; ctxt 1599 fs/ocfs2/dlm/dlmdomain.c ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); ctxt 1600 fs/ocfs2/dlm/dlmdomain.c if (!ctxt) { ctxt 1612 fs/ocfs2/dlm/dlmdomain.c memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map)); ctxt 1619 fs/ocfs2/dlm/dlmdomain.c while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES, ctxt 1633 fs/ocfs2/dlm/dlmdomain.c set_bit(node, ctxt->yes_resp_map); ctxt 1635 fs/ocfs2/dlm/dlmdomain.c if (dlm_should_restart_join(dlm, ctxt, response)) { ctxt 1648 fs/ocfs2/dlm/dlmdomain.c memcpy(dlm->domain_map, ctxt->yes_resp_map, ctxt 1649 fs/ocfs2/dlm/dlmdomain.c sizeof(ctxt->yes_resp_map)); ctxt 1656 fs/ocfs2/dlm/dlmdomain.c status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map); ctxt 1661 fs/ocfs2/dlm/dlmdomain.c status = dlm_send_regions(dlm, ctxt->yes_resp_map); ctxt 1668 fs/ocfs2/dlm/dlmdomain.c dlm_send_join_asserts(dlm, ctxt->yes_resp_map); ctxt 1688 fs/ocfs2/dlm/dlmdomain.c if (ctxt) { ctxt 1692 fs/ocfs2/dlm/dlmdomain.c ctxt->yes_resp_map, ctxt 1693 fs/ocfs2/dlm/dlmdomain.c sizeof(ctxt->yes_resp_map)); ctxt 1697 fs/ocfs2/dlm/dlmdomain.c kfree(ctxt); ctxt 266 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt); ctxt 271 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt); ctxt 704 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 707 fs/ocfs2/xattr.c handle_t *handle = ctxt->handle; ctxt 730 fs/ocfs2/xattr.c ctxt->data_ac, ctxt 731 fs/ocfs2/xattr.c ctxt->meta_ac, ctxt 769 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 773 fs/ocfs2/xattr.c handle_t *handle = ctxt->handle; ctxt 785 fs/ocfs2/xattr.c ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac, ctxt 786 fs/ocfs2/xattr.c &ctxt->dealloc); ctxt 799 fs/ocfs2/xattr.c len, ctxt->meta_ac, &ctxt->dealloc, 1); ctxt 801 fs/ocfs2/xattr.c ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc, ctxt 814 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 840 fs/ocfs2/xattr.c ext_flags, ctxt); ctxt 860 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 872 fs/ocfs2/xattr.c vb, ctxt); ctxt 876 fs/ocfs2/xattr.c vb, ctxt); ctxt 1906 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 1913 fs/ocfs2/xattr.c ctxt); ctxt 1925 fs/ocfs2/xattr.c access_rc = ocfs2_xa_journal_access(ctxt->handle, loc, ctxt 2016 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 2023 fs/ocfs2/xattr.c rc = ocfs2_xa_value_truncate(loc, 0, ctxt); ctxt 2064 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 2086 fs/ocfs2/xattr.c rc = ocfs2_xa_value_truncate(loc, 0, ctxt); ctxt 2096 fs/ocfs2/xattr.c ctxt); ctxt 2126 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 2139 fs/ocfs2/xattr.c rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); ctxt 2147 fs/ocfs2/xattr.c rc = ocfs2_xa_value_truncate(loc, 0, ctxt); ctxt 2171 fs/ocfs2/xattr.c rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt); ctxt 2173 fs/ocfs2/xattr.c ctxt->set_abort = 1; ctxt 2201 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 2213 fs/ocfs2/xattr.c ctxt->handle, &vb, ctxt 2224 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 2230 fs/ocfs2/xattr.c ret = ocfs2_xa_journal_access(ctxt->handle, loc, ctxt 2245 fs/ocfs2/xattr.c ret = ocfs2_xa_remove(loc, ctxt); ctxt 2249 fs/ocfs2/xattr.c ret = ocfs2_xa_prepare_entry(loc, xi, name_hash, ctxt); ctxt 2256 fs/ocfs2/xattr.c ret = ocfs2_xa_store_value(loc, xi, ctxt); ctxt 2261 fs/ocfs2/xattr.c ocfs2_xa_journal_dirty(ctxt->handle, loc); ctxt 2370 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, }; ctxt 2373 fs/ocfs2/xattr.c ocfs2_init_dealloc_ctxt(&ctxt.dealloc); ctxt 2388 fs/ocfs2/xattr.c &ctxt.meta_ac, ctxt 2391 fs/ocfs2/xattr.c ctxt.handle = ocfs2_start_trans(osb, ref_credits + ctxt 2393 fs/ocfs2/xattr.c if (IS_ERR(ctxt.handle)) { ctxt 2394 fs/ocfs2/xattr.c ret = PTR_ERR(ctxt.handle); ctxt 2399 fs/ocfs2/xattr.c ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt); ctxt 2401 fs/ocfs2/xattr.c ocfs2_commit_trans(osb, ctxt.handle); ctxt 2402 fs/ocfs2/xattr.c if (ctxt.meta_ac) { ctxt 2403 fs/ocfs2/xattr.c ocfs2_free_alloc_context(ctxt.meta_ac); ctxt 2404 fs/ocfs2/xattr.c ctxt.meta_ac = NULL; ctxt 2414 fs/ocfs2/xattr.c if (ctxt.meta_ac) ctxt 2415 fs/ocfs2/xattr.c ocfs2_free_alloc_context(ctxt.meta_ac); ctxt 2417 fs/ocfs2/xattr.c ocfs2_run_deallocs(osb, &ctxt.dealloc); ctxt 2718 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 2731 fs/ocfs2/xattr.c ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), di_bh, ctxt 2757 fs/ocfs2/xattr.c ocfs2_journal_dirty(ctxt->handle, di_bh); ctxt 2772 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 2783 fs/ocfs2/xattr.c ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt); ctxt 2793 fs/ocfs2/xattr.c ret = ocfs2_xa_set(&loc, xi, ctxt); ctxt 2862 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt, ctxt 2874 fs/ocfs2/xattr.c ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), ctxt 2881 fs/ocfs2/xattr.c ret = ocfs2_claim_metadata(ctxt->handle, ctxt->meta_ac, 1, ctxt 2898 fs/ocfs2/xattr.c ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode), ctxt 2910 fs/ocfs2/xattr.c xblk->xb_suballoc_slot = cpu_to_le16(ctxt->meta_ac->ac_alloc_slot); ctxt 2926 fs/ocfs2/xattr.c ocfs2_journal_dirty(ctxt->handle, new_bh); ctxt 2936 fs/ocfs2/xattr.c ocfs2_journal_dirty(ctxt->handle, inode_bh); ctxt 2955 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 2963 fs/ocfs2/xattr.c ret = ocfs2_create_xattr_block(inode, xs->inode_bh, ctxt, ctxt 2983 fs/ocfs2/xattr.c ret = ocfs2_xa_set(&loc, xi, ctxt); ctxt 2986 fs/ocfs2/xattr.c else if ((ret != -ENOSPC) || ctxt->set_abort) ctxt 2989 fs/ocfs2/xattr.c ret = ocfs2_xattr_create_index_block(inode, xs, ctxt); ctxt 2996 fs/ocfs2/xattr.c ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt); ctxt 3252 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt, ctxt 3259 fs/ocfs2/xattr.c memset(ctxt, 0, sizeof(struct ocfs2_xattr_set_ctxt)); ctxt 3261 fs/ocfs2/xattr.c ocfs2_init_dealloc_ctxt(&ctxt->dealloc); ctxt 3276 fs/ocfs2/xattr.c &ctxt->meta_ac); ctxt 3284 fs/ocfs2/xattr.c ret = ocfs2_reserve_clusters(osb, clusters_add, &ctxt->data_ac); ctxt 3290 fs/ocfs2/xattr.c if (ctxt->meta_ac) { ctxt 3291 fs/ocfs2/xattr.c ocfs2_free_alloc_context(ctxt->meta_ac); ctxt 3292 fs/ocfs2/xattr.c ctxt->meta_ac = NULL; ctxt 3308 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 3315 fs/ocfs2/xattr.c ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt); ctxt 3317 fs/ocfs2/xattr.c ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt); ctxt 3320 fs/ocfs2/xattr.c ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt); ctxt 3345 fs/ocfs2/xattr.c ret = ocfs2_extend_trans(ctxt->handle, credits); ctxt 3350 fs/ocfs2/xattr.c ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt); ctxt 3351 fs/ocfs2/xattr.c } else if ((ret == -ENOSPC) && !ctxt->set_abort) { ctxt 3375 fs/ocfs2/xattr.c ret = ocfs2_extend_trans(ctxt->handle, credits); ctxt 3385 fs/ocfs2/xattr.c ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt); ctxt 3409 fs/ocfs2/xattr.c ret = ocfs2_extend_trans(ctxt->handle, credits); ctxt 3415 fs/ocfs2/xattr.c xis, ctxt); ctxt 3422 fs/ocfs2/xattr.c ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), ctxt 3433 fs/ocfs2/xattr.c ocfs2_journal_dirty(ctxt->handle, xis->inode_bh); ctxt 3474 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt ctxt = { ctxt 3510 fs/ocfs2/xattr.c ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt); ctxt 3539 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, }; ctxt 3633 fs/ocfs2/xattr.c &xbs, &ctxt, ref_meta, &credits); ctxt 3641 fs/ocfs2/xattr.c ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits); ctxt 3642 fs/ocfs2/xattr.c if (IS_ERR(ctxt.handle)) { ctxt 3643 fs/ocfs2/xattr.c ret = PTR_ERR(ctxt.handle); ctxt 3648 fs/ocfs2/xattr.c ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt); ctxt 3649 fs/ocfs2/xattr.c ocfs2_update_inode_fsync_trans(ctxt.handle, inode, 0); ctxt 3651 fs/ocfs2/xattr.c ocfs2_commit_trans(osb, ctxt.handle); ctxt 3654 fs/ocfs2/xattr.c if (ctxt.data_ac) ctxt 3655 fs/ocfs2/xattr.c ocfs2_free_alloc_context(ctxt.data_ac); ctxt 3656 fs/ocfs2/xattr.c if (ctxt.meta_ac) ctxt 3657 fs/ocfs2/xattr.c ocfs2_free_alloc_context(ctxt.meta_ac); ctxt 3658 fs/ocfs2/xattr.c if (ocfs2_dealloc_has_cluster(&ctxt.dealloc)) ctxt 3660 fs/ocfs2/xattr.c ocfs2_run_deallocs(osb, &ctxt.dealloc); ctxt 4274 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 4279 fs/ocfs2/xattr.c handle_t *handle = ctxt->handle; ctxt 4307 fs/ocfs2/xattr.c ret = __ocfs2_claim_clusters(handle, ctxt->data_ac, ctxt 5102 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 5109 fs/ocfs2/xattr.c handle_t *handle = ctxt->handle; ctxt 5127 fs/ocfs2/xattr.c ret = __ocfs2_claim_clusters(handle, ctxt->data_ac, 1, ctxt 5172 fs/ocfs2/xattr.c num_bits, 0, ctxt->meta_ac); ctxt 5272 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 5324 fs/ocfs2/xattr.c ctxt); ctxt 5333 fs/ocfs2/xattr.c ctxt->handle, ctxt 5358 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 5396 fs/ocfs2/xattr.c ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt); ctxt 5402 fs/ocfs2/xattr.c ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket, ctxt 5411 fs/ocfs2/xattr.c ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket); ctxt 5547 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 5556 fs/ocfs2/xattr.c ret = ocfs2_xa_set(&loc, xi, ctxt); ctxt 5567 fs/ocfs2/xattr.c ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle, ctxt 5574 fs/ocfs2/xattr.c ret = ocfs2_xa_set(&loc, xi, ctxt); ctxt 5590 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt *ctxt) ctxt 5596 fs/ocfs2/xattr.c ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt); ctxt 5623 fs/ocfs2/xattr.c ctxt); ctxt 5645 fs/ocfs2/xattr.c ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt); ctxt 5662 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt ctxt = {NULL, NULL,}; ctxt 5669 fs/ocfs2/xattr.c ocfs2_init_dealloc_ctxt(&ctxt.dealloc); ctxt 5686 fs/ocfs2/xattr.c &ctxt.meta_ac, ctxt 5689 fs/ocfs2/xattr.c ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits); ctxt 5690 fs/ocfs2/xattr.c if (IS_ERR(ctxt.handle)) { ctxt 5691 fs/ocfs2/xattr.c ret = PTR_ERR(ctxt.handle); ctxt 5697 fs/ocfs2/xattr.c i, 0, &ctxt); ctxt 5699 fs/ocfs2/xattr.c ocfs2_commit_trans(osb, ctxt.handle); ctxt 5700 fs/ocfs2/xattr.c if (ctxt.meta_ac) { ctxt 5701 fs/ocfs2/xattr.c ocfs2_free_alloc_context(ctxt.meta_ac); ctxt 5702 fs/ocfs2/xattr.c ctxt.meta_ac = NULL; ctxt 5710 fs/ocfs2/xattr.c if (ctxt.meta_ac) ctxt 5711 fs/ocfs2/xattr.c ocfs2_free_alloc_context(ctxt.meta_ac); ctxt 5713 fs/ocfs2/xattr.c ocfs2_run_deallocs(osb, &ctxt.dealloc); ctxt 6553 fs/ocfs2/xattr.c struct ocfs2_xattr_set_ctxt ctxt; ctxt 6555 fs/ocfs2/xattr.c memset(&ctxt, 0, sizeof(ctxt)); ctxt 6556 fs/ocfs2/xattr.c ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &ctxt.meta_ac); ctxt 6562 fs/ocfs2/xattr.c ctxt.handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS); ctxt 6563 fs/ocfs2/xattr.c if (IS_ERR(ctxt.handle)) { ctxt 6564 fs/ocfs2/xattr.c ret = PTR_ERR(ctxt.handle); ctxt 6571 fs/ocfs2/xattr.c ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed, ctxt 6576 fs/ocfs2/xattr.c ocfs2_commit_trans(osb, ctxt.handle); ctxt 6578 fs/ocfs2/xattr.c ocfs2_free_alloc_context(ctxt.meta_ac); ctxt 163 include/linux/sunrpc/svc_rdma.h struct svc_rdma_recv_ctxt *ctxt); ctxt 184 include/linux/sunrpc/svc_rdma.h struct svc_rdma_send_ctxt *ctxt); ctxt 187 include/linux/sunrpc/svc_rdma.h struct svc_rdma_send_ctxt *ctxt, ctxt 190 include/linux/sunrpc/svc_rdma.h struct svc_rdma_send_ctxt *ctxt, ctxt 86 include/uapi/rdma/hfi/hfi1_ioctl.h __u16 ctxt; /* ctxt on unit assigned to caller */ ctxt 16 net/ipv4/tcp_fastopen.c struct tcp_fastopen_context *ctxt; ctxt 19 net/ipv4/tcp_fastopen.c ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx); ctxt 20 net/ipv4/tcp_fastopen.c if (ctxt) { ctxt 56 net/ipv4/tcp_fastopen.c struct tcp_fastopen_context *ctxt; ctxt 60 net/ipv4/tcp_fastopen.c ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx, ctxt 65 net/ipv4/tcp_fastopen.c if (ctxt) ctxt 66 net/ipv4/tcp_fastopen.c call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free); ctxt 116 net/sunrpc/xprtrdma/svc_rdma_backchannel.c struct svc_rdma_send_ctxt *ctxt) ctxt 120 net/sunrpc/xprtrdma/svc_rdma_backchannel.c ret = svc_rdma_map_reply_msg(rdma, ctxt, &rqst->rq_snd_buf, NULL); ctxt 128 net/sunrpc/xprtrdma/svc_rdma_backchannel.c ctxt->sc_send_wr.opcode = IB_WR_SEND; ctxt 129 net/sunrpc/xprtrdma/svc_rdma_backchannel.c return svc_rdma_send(rdma, &ctxt->sc_send_wr); ctxt 176 net/sunrpc/xprtrdma/svc_rdma_backchannel.c struct svc_rdma_send_ctxt *ctxt; ctxt 180 net/sunrpc/xprtrdma/svc_rdma_backchannel.c ctxt = svc_rdma_send_ctxt_get(rdma); ctxt 181 net/sunrpc/xprtrdma/svc_rdma_backchannel.c if (!ctxt) ctxt 184 net/sunrpc/xprtrdma/svc_rdma_backchannel.c p = ctxt->sc_xprt_buf; ctxt 192 net/sunrpc/xprtrdma/svc_rdma_backchannel.c svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_MIN); ctxt 199 net/sunrpc/xprtrdma/svc_rdma_backchannel.c rc = svc_rdma_bc_sendto(rdma, rqst, ctxt); ctxt 201 net/sunrpc/xprtrdma/svc_rdma_backchannel.c svc_rdma_send_ctxt_put(rdma, ctxt); ctxt 123 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *ctxt; ctxt 127 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL); ctxt 128 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c if (!ctxt) ctxt 138 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_recv_wr.next = NULL; ctxt 139 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; ctxt 140 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge; ctxt 141 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_recv_wr.num_sge = 1; ctxt 142 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_cqe.done = svc_rdma_wc_receive; ctxt 143 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_recv_sge.addr = addr; ctxt 144 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_recv_sge.length = rdma->sc_max_req_size; ctxt 145 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey; ctxt 146 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_recv_buf = buffer; ctxt 147 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_temp = false; ctxt 148 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c return ctxt; ctxt 153 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c kfree(ctxt); ctxt 159 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *ctxt) ctxt 161 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr, ctxt 162 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_recv_sge.length, DMA_FROM_DEVICE); ctxt 163 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c kfree(ctxt->rc_recv_buf); ctxt 164 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c kfree(ctxt); ctxt 174 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *ctxt; ctxt 178 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node); ctxt 179 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_destroy(rdma, ctxt); ctxt 186 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *ctxt; ctxt 192 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node); ctxt 195 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_page_count = 0; ctxt 196 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c return ctxt; ctxt 199 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt = svc_rdma_recv_ctxt_alloc(rdma); ctxt 200 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c if (!ctxt) ctxt 212 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *ctxt) ctxt 216 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c for (i = 0; i < ctxt->rc_page_count; i++) ctxt 217 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c put_page(ctxt->rc_pages[i]); ctxt 219 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c if (!ctxt->rc_temp) ctxt 220 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts); ctxt 222 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_destroy(rdma, ctxt); ctxt 235 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt; ctxt 241 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c if (ctxt) ctxt 242 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_put(rdma, ctxt); ctxt 246 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *ctxt) ctxt 251 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL); ctxt 252 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret); ctxt 258 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_put(rdma, ctxt); ctxt 265 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *ctxt; ctxt 267 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt = svc_rdma_recv_ctxt_get(rdma); ctxt 268 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c if (!ctxt) ctxt 270 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c return __svc_rdma_post_recv(rdma, ctxt); ctxt 281 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *ctxt; ctxt 286 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt = svc_rdma_recv_ctxt_get(rdma); ctxt 287 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c if (!ctxt) ctxt 289 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_temp = true; ctxt 290 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ret = __svc_rdma_post_recv(rdma, ctxt); ctxt 309 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *ctxt; ctxt 314 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe); ctxt 323 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_byte_len = wc->byte_len; ctxt 325 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_recv_sge.addr, ctxt 329 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q); ctxt 339 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_put(rdma, ctxt); ctxt 353 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *ctxt; ctxt 355 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) { ctxt 356 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c list_del(&ctxt->rc_list); ctxt 357 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_put(rdma, ctxt); ctxt 359 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) { ctxt 360 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c list_del(&ctxt->rc_list); ctxt 361 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_put(rdma, ctxt); ctxt 366 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *ctxt) ctxt 370 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c arg->head[0].iov_base = ctxt->rc_recv_buf; ctxt 371 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c arg->head[0].iov_len = ctxt->rc_byte_len; ctxt 376 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c arg->buflen = ctxt->rc_byte_len; ctxt 377 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c arg->len = ctxt->rc_byte_len; ctxt 509 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *ctxt) ctxt 514 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_inv_rkey = 0; ctxt 520 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c p = ctxt->rc_recv_buf; ctxt 557 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->rc_inv_rkey = be32_to_cpu(inv_rkey); ctxt 672 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_send_ctxt *ctxt; ctxt 677 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt = svc_rdma_send_ctxt_get(xprt); ctxt 678 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c if (!ctxt) ctxt 681 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c p = ctxt->sc_xprt_buf; ctxt 697 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c length = (unsigned long)p - (unsigned long)ctxt->sc_xprt_buf; ctxt 698 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_sync_reply_hdr(xprt, ctxt, length); ctxt 700 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt->sc_send_wr.opcode = IB_WR_SEND; ctxt 701 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ret = svc_rdma_send(xprt, &ctxt->sc_send_wr); ctxt 703 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_send_ctxt_put(xprt, ctxt); ctxt 775 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *ctxt; ctxt 782 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q); ctxt 783 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c if (ctxt) { ctxt 784 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c list_del(&ctxt->rc_list); ctxt 786 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c rdma_read_complete(rqstp, ctxt); ctxt 789 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q); ctxt 790 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c if (!ctxt) { ctxt 796 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c list_del(&ctxt->rc_list); ctxt 801 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_build_arg_xdr(rqstp, ctxt); ctxt 820 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); ctxt 823 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_get_inv_rkey(rdma_xprt, ctxt); ctxt 830 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c rqstp->rq_xprt_ctxt = ctxt; ctxt 836 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p); ctxt 843 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); ctxt 849 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); ctxt 853 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); ctxt 57 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_rdma_rw_ctxt *ctxt; ctxt 61 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts); ctxt 62 net/sunrpc/xprtrdma/svc_rdma_rw.c if (ctxt) { ctxt 63 net/sunrpc/xprtrdma/svc_rdma_rw.c list_del(&ctxt->rw_list); ctxt 67 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE), ctxt 69 net/sunrpc/xprtrdma/svc_rdma_rw.c if (!ctxt) ctxt 71 net/sunrpc/xprtrdma/svc_rdma_rw.c INIT_LIST_HEAD(&ctxt->rw_list); ctxt 74 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; ctxt 75 net/sunrpc/xprtrdma/svc_rdma_rw.c if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, ctxt 76 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt->rw_sg_table.sgl, ctxt 78 net/sunrpc/xprtrdma/svc_rdma_rw.c kfree(ctxt); ctxt 79 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt = NULL; ctxt 82 net/sunrpc/xprtrdma/svc_rdma_rw.c return ctxt; ctxt 86 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_rdma_rw_ctxt *ctxt) ctxt 88 net/sunrpc/xprtrdma/svc_rdma_rw.c sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE); ctxt 91 net/sunrpc/xprtrdma/svc_rdma_rw.c list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts); ctxt 102 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_rdma_rw_ctxt *ctxt; ctxt 104 net/sunrpc/xprtrdma/svc_rdma_rw.c while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) { ctxt 105 net/sunrpc/xprtrdma/svc_rdma_rw.c list_del(&ctxt->rw_list); ctxt 106 net/sunrpc/xprtrdma/svc_rdma_rw.c kfree(ctxt); ctxt 139 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_rdma_rw_ctxt *ctxt; ctxt 141 net/sunrpc/xprtrdma/svc_rdma_rw.c while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) { ctxt 142 net/sunrpc/xprtrdma/svc_rdma_rw.c list_del(&ctxt->rw_list); ctxt 144 net/sunrpc/xprtrdma/svc_rdma_rw.c rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp, ctxt 145 net/sunrpc/xprtrdma/svc_rdma_rw.c rdma->sc_port_num, ctxt->rw_sg_table.sgl, ctxt 146 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt->rw_nents, dir); ctxt 147 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_rdma_put_rw_ctxt(rdma, ctxt); ctxt 314 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_rdma_rw_ctxt *ctxt; ctxt 316 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list); ctxt 317 net/sunrpc/xprtrdma/svc_rdma_rw.c first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp, ctxt 354 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_rdma_rw_ctxt *ctxt) ctxt 356 net/sunrpc/xprtrdma/svc_rdma_rw.c struct scatterlist *sg = ctxt->rw_sg_table.sgl; ctxt 361 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt->rw_nents = 1; ctxt 368 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_rdma_rw_ctxt *ctxt) ctxt 380 net/sunrpc/xprtrdma/svc_rdma_rw.c sg = ctxt->rw_sg_table.sgl; ctxt 394 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt->rw_nents = sge_no; ctxt 404 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_rdma_rw_ctxt *ctxt), ctxt 409 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_rdma_rw_ctxt *ctxt; ctxt 428 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt = svc_rdma_get_rw_ctxt(rdma, ctxt 430 net/sunrpc/xprtrdma/svc_rdma_rw.c if (!ctxt) ctxt 433 net/sunrpc/xprtrdma/svc_rdma_rw.c constructor(info, write_len, ctxt); ctxt 434 net/sunrpc/xprtrdma/svc_rdma_rw.c ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, ctxt 435 net/sunrpc/xprtrdma/svc_rdma_rw.c rdma->sc_port_num, ctxt->rw_sg_table.sgl, ctxt 436 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt->rw_nents, 0, seg_offset, ctxt 442 net/sunrpc/xprtrdma/svc_rdma_rw.c list_add(&ctxt->rw_list, &cc->cc_rwctxts); ctxt 466 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_rdma_put_rw_ctxt(rdma, ctxt); ctxt 604 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_rdma_rw_ctxt *ctxt; ctxt 610 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no); ctxt 611 net/sunrpc/xprtrdma/svc_rdma_rw.c if (!ctxt) ctxt 613 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt->rw_nents = sge_no; ctxt 615 net/sunrpc/xprtrdma/svc_rdma_rw.c sg = ctxt->rw_sg_table.sgl; ctxt 616 net/sunrpc/xprtrdma/svc_rdma_rw.c for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) { ctxt 642 net/sunrpc/xprtrdma/svc_rdma_rw.c ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp, ctxt 644 net/sunrpc/xprtrdma/svc_rdma_rw.c ctxt->rw_sg_table.sgl, ctxt->rw_nents, ctxt 649 net/sunrpc/xprtrdma/svc_rdma_rw.c list_add(&ctxt->rw_list, &cc->cc_rwctxts); ctxt 663 net/sunrpc/xprtrdma/svc_rdma_rw.c svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt); ctxt 129 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svc_rdma_send_ctxt *ctxt; ctxt 135 net/sunrpc/xprtrdma/svc_rdma_sendto.c size = sizeof(*ctxt); ctxt 137 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt = kmalloc(size, GFP_KERNEL); ctxt 138 net/sunrpc/xprtrdma/svc_rdma_sendto.c if (!ctxt) ctxt 148 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_send_wr.next = NULL; ctxt 149 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; ctxt 150 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_send_wr.sg_list = ctxt->sc_sges; ctxt 151 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; ctxt 152 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_cqe.done = svc_rdma_wc_send; ctxt 153 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_xprt_buf = buffer; ctxt 154 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_sges[0].addr = addr; ctxt 157 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey; ctxt 158 net/sunrpc/xprtrdma/svc_rdma_sendto.c return ctxt; ctxt 163 net/sunrpc/xprtrdma/svc_rdma_sendto.c kfree(ctxt); ctxt 175 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svc_rdma_send_ctxt *ctxt; ctxt 177 net/sunrpc/xprtrdma/svc_rdma_sendto.c while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) { ctxt 178 net/sunrpc/xprtrdma/svc_rdma_sendto.c list_del(&ctxt->sc_list); ctxt 180 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_sges[0].addr, ctxt 183 net/sunrpc/xprtrdma/svc_rdma_sendto.c kfree(ctxt->sc_xprt_buf); ctxt 184 net/sunrpc/xprtrdma/svc_rdma_sendto.c kfree(ctxt); ctxt 197 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svc_rdma_send_ctxt *ctxt; ctxt 200 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts); ctxt 201 net/sunrpc/xprtrdma/svc_rdma_sendto.c if (!ctxt) ctxt 203 net/sunrpc/xprtrdma/svc_rdma_sendto.c list_del(&ctxt->sc_list); ctxt 207 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_send_wr.num_sge = 0; ctxt 208 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_cur_sge_no = 0; ctxt 209 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_page_count = 0; ctxt 210 net/sunrpc/xprtrdma/svc_rdma_sendto.c return ctxt; ctxt 214 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt = svc_rdma_send_ctxt_alloc(rdma); ctxt 215 net/sunrpc/xprtrdma/svc_rdma_sendto.c if (!ctxt) ctxt 228 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svc_rdma_send_ctxt *ctxt) ctxt 236 net/sunrpc/xprtrdma/svc_rdma_sendto.c for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) ctxt 238 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_sges[i].addr, ctxt 239 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_sges[i].length, ctxt 242 net/sunrpc/xprtrdma/svc_rdma_sendto.c for (i = 0; i < ctxt->sc_page_count; ++i) ctxt 243 net/sunrpc/xprtrdma/svc_rdma_sendto.c put_page(ctxt->sc_pages[i]); ctxt 246 net/sunrpc/xprtrdma/svc_rdma_sendto.c list_add(&ctxt->sc_list, &rdma->sc_send_ctxts); ctxt 262 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svc_rdma_send_ctxt *ctxt; ctxt 269 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe); ctxt 270 net/sunrpc/xprtrdma/svc_rdma_sendto.c svc_rdma_send_ctxt_put(rdma, ctxt); ctxt 486 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svc_rdma_send_ctxt *ctxt, ctxt 498 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr; ctxt 499 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len; ctxt 500 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_send_wr.num_sge++; ctxt 512 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svc_rdma_send_ctxt *ctxt, ctxt 516 net/sunrpc/xprtrdma/svc_rdma_sendto.c return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base), ctxt 528 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svc_rdma_send_ctxt *ctxt, ctxt 531 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_sges[0].length = len; ctxt 532 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_send_wr.num_sge++; ctxt 534 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_sges[0].addr, len, ctxt 579 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svc_rdma_send_ctxt *ctxt, ctxt 585 net/sunrpc/xprtrdma/svc_rdma_sendto.c dst = ctxt->sc_xprt_buf; ctxt 586 net/sunrpc/xprtrdma/svc_rdma_sendto.c dst += ctxt->sc_sges[0].length; ctxt 622 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_sges[0].length += xdr->len; ctxt 624 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_sges[0].addr, ctxt 625 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_sges[0].length, ctxt 643 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svc_rdma_send_ctxt *ctxt, ctxt 654 net/sunrpc/xprtrdma/svc_rdma_sendto.c return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst); ctxt 656 net/sunrpc/xprtrdma/svc_rdma_sendto.c ++ctxt->sc_cur_sge_no; ctxt 657 net/sunrpc/xprtrdma/svc_rdma_sendto.c ret = svc_rdma_dma_map_buf(rdma, ctxt, ctxt 687 net/sunrpc/xprtrdma/svc_rdma_sendto.c ++ctxt->sc_cur_sge_no; ctxt 688 net/sunrpc/xprtrdma/svc_rdma_sendto.c ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++, ctxt 701 net/sunrpc/xprtrdma/svc_rdma_sendto.c ++ctxt->sc_cur_sge_no; ctxt 702 net/sunrpc/xprtrdma/svc_rdma_sendto.c ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len); ctxt 715 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svc_rdma_send_ctxt *ctxt) ctxt 719 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_page_count += pages; ctxt 721 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_pages[i] = rqstp->rq_respages[i]; ctxt 783 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svc_rdma_send_ctxt *ctxt, ctxt 789 net/sunrpc/xprtrdma/svc_rdma_sendto.c p = ctxt->sc_xprt_buf; ctxt 794 net/sunrpc/xprtrdma/svc_rdma_sendto.c svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_ERR); ctxt 796 net/sunrpc/xprtrdma/svc_rdma_sendto.c svc_rdma_save_io_pages(rqstp, ctxt); ctxt 798 net/sunrpc/xprtrdma/svc_rdma_sendto.c ctxt->sc_send_wr.opcode = IB_WR_SEND; ctxt 799 net/sunrpc/xprtrdma/svc_rdma_sendto.c ret = svc_rdma_send(rdma, &ctxt->sc_send_wr); ctxt 801 net/sunrpc/xprtrdma/svc_rdma_sendto.c svc_rdma_send_ctxt_put(rdma, ctxt); ctxt 3407 security/selinux/ss/services.c struct context *ctxt; ctxt 3424 security/selinux/ss/services.c ctxt = sidtab_search(state->ss->sidtab, sid); ctxt 3425 security/selinux/ss/services.c if (unlikely(!ctxt)) { ctxt 3439 security/selinux/ss/services.c match = (ctxt->user == rule->au_ctxt.user); ctxt 3442 security/selinux/ss/services.c match = (ctxt->user != rule->au_ctxt.user); ctxt 3450 security/selinux/ss/services.c match = (ctxt->role == rule->au_ctxt.role); ctxt 3453 security/selinux/ss/services.c match = (ctxt->role != rule->au_ctxt.role); ctxt 3461 security/selinux/ss/services.c match = (ctxt->type == rule->au_ctxt.type); ctxt 3464 security/selinux/ss/services.c match = (ctxt->type != rule->au_ctxt.type); ctxt 3474 security/selinux/ss/services.c &ctxt->range.level[0] : &ctxt->range.level[1]); ctxt 27 tools/testing/selftests/powerpc/mm/segv_errors.c ucontext_t *ctxt = (ucontext_t *)ctxt_v; ctxt 28 tools/testing/selftests/powerpc/mm/segv_errors.c struct pt_regs *regs = ctxt->uc_mcontext.regs; ctxt 39 tools/testing/selftests/powerpc/mm/subpage_prot.c ucontext_t *ctxt = (ucontext_t *)ctxt_v; ctxt 40 tools/testing/selftests/powerpc/mm/subpage_prot.c struct pt_regs *regs = ctxt->uc_mcontext.regs; ctxt 26 tools/testing/selftests/powerpc/mm/wild_bctr.c static void save_regs(ucontext_t *ctxt) ctxt 28 tools/testing/selftests/powerpc/mm/wild_bctr.c struct pt_regs *regs = ctxt->uc_mcontext.regs; ctxt 400 virt/kvm/arm/arm.c struct kvm_cpu_context __maybe_unused *ctxt = vcpu->arch.host_cpu_context; ctxt 402 virt/kvm/arm/arm.c __ptrauth_save_key(ctxt->sys_regs, APIA); ctxt 403 virt/kvm/arm/arm.c __ptrauth_save_key(ctxt->sys_regs, APIB); ctxt 404 virt/kvm/arm/arm.c __ptrauth_save_key(ctxt->sys_regs, APDA); ctxt 405 virt/kvm/arm/arm.c __ptrauth_save_key(ctxt->sys_regs, APDB); ctxt 406 virt/kvm/arm/arm.c __ptrauth_save_key(ctxt->sys_regs, APGA);