Searched refs:ctxt (Results 1 - 81 of 81) sorted by relevance

/linux-4.1.27/arch/x86/kvm/
H A Demulate.c212 int (*execute)(struct x86_emulate_ctxt *ctxt);
221 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
260 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) reg_read() argument
262 if (!(ctxt->regs_valid & (1 << nr))) { reg_read()
263 ctxt->regs_valid |= 1 << nr; reg_read()
264 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); reg_read()
266 return ctxt->_regs[nr]; reg_read()
269 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) reg_write() argument
271 ctxt->regs_valid |= 1 << nr; reg_write()
272 ctxt->regs_dirty |= 1 << nr; reg_write()
273 return &ctxt->_regs[nr]; reg_write()
276 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) reg_rmw() argument
278 reg_read(ctxt, nr); reg_rmw()
279 return reg_write(ctxt, nr); reg_rmw()
282 static void writeback_registers(struct x86_emulate_ctxt *ctxt) writeback_registers() argument
286 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16) writeback_registers()
287 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); writeback_registers()
290 static void invalidate_registers(struct x86_emulate_ctxt *ctxt) invalidate_registers() argument
292 ctxt->regs_dirty = 0; invalidate_registers()
293 ctxt->regs_valid = 0; invalidate_registers()
309 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
436 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, emulator_check_intercept() argument
442 .rep_prefix = ctxt->rep_prefix, emulator_check_intercept()
443 .modrm_mod = ctxt->modrm_mod, emulator_check_intercept()
444 .modrm_reg = ctxt->modrm_reg, emulator_check_intercept()
445 .modrm_rm = ctxt->modrm_rm, emulator_check_intercept()
446 .src_val = ctxt->src.val64, emulator_check_intercept()
447 .dst_val = ctxt->dst.val64, emulator_check_intercept()
448 .src_bytes = ctxt->src.bytes, emulator_check_intercept()
449 .dst_bytes = ctxt->dst.bytes, emulator_check_intercept()
450 .ad_bytes = ctxt->ad_bytes, emulator_check_intercept()
451 .next_rip = ctxt->eip, emulator_check_intercept()
454 return ctxt->ops->intercept(ctxt, &info, stage); emulator_check_intercept()
481 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) ad_mask() argument
483 return (1UL << (ctxt->ad_bytes << 3)) - 1; ad_mask()
486 static ulong stack_mask(struct x86_emulate_ctxt *ctxt) stack_mask() argument
491 if (ctxt->mode == X86EMUL_MODE_PROT64) stack_mask()
493 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); stack_mask()
497 static int stack_size(struct x86_emulate_ctxt *ctxt) stack_size() argument
499 return (__fls(stack_mask(ctxt)) + 1) >> 3; stack_size()
504 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) address_mask() argument
506 if (ctxt->ad_bytes == sizeof(unsigned long)) address_mask()
509 return reg & ad_mask(ctxt); address_mask()
513 register_address(struct x86_emulate_ctxt *ctxt, int reg) register_address() argument
515 return address_mask(ctxt, reg_read(ctxt, reg)); register_address()
524 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc) register_address_increment() argument
528 if (ctxt->ad_bytes == sizeof(unsigned long)) register_address_increment()
531 mask = ad_mask(ctxt); register_address_increment()
532 masked_increment(reg_rmw(ctxt, reg), mask, inc); register_address_increment()
535 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) rsp_increment() argument
537 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); rsp_increment()
547 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) seg_base() argument
549 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) seg_base()
552 return ctxt->ops->get_cached_segment_base(ctxt, seg); seg_base()
555 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, emulate_exception() argument
559 ctxt->exception.vector = vec; emulate_exception()
560 ctxt->exception.error_code = error; emulate_exception()
561 ctxt->exception.error_code_valid = valid; emulate_exception()
565 static int emulate_db(struct x86_emulate_ctxt *ctxt) emulate_db() argument
567 return emulate_exception(ctxt, DB_VECTOR, 0, false); emulate_db()
570 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) emulate_gp() argument
572 return emulate_exception(ctxt, GP_VECTOR, err, true); emulate_gp()
575 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) emulate_ss() argument
577 return emulate_exception(ctxt, SS_VECTOR, err, true); emulate_ss()
580 static int emulate_ud(struct x86_emulate_ctxt *ctxt) emulate_ud() argument
582 return emulate_exception(ctxt, UD_VECTOR, 0, false); emulate_ud()
585 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) emulate_ts() argument
587 return emulate_exception(ctxt, TS_VECTOR, err, true); emulate_ts()
590 static int emulate_de(struct x86_emulate_ctxt *ctxt) emulate_de() argument
592 return emulate_exception(ctxt, DE_VECTOR, 0, false); emulate_de()
595 static int emulate_nm(struct x86_emulate_ctxt *ctxt) emulate_nm() argument
597 return emulate_exception(ctxt, NM_VECTOR, 0, false); emulate_nm()
600 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) get_segment_selector() argument
605 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); get_segment_selector()
609 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, set_segment_selector() argument
616 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); set_segment_selector()
617 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); set_segment_selector()
628 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) insn_aligned() argument
633 if (ctxt->d & Aligned) insn_aligned()
635 else if (ctxt->d & Unaligned) insn_aligned()
637 else if (ctxt->d & Avx) insn_aligned()
643 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, __linearize() argument
655 la = seg_base(ctxt, addr.seg) + addr.ea; __linearize()
667 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, __linearize()
672 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) __linearize()
697 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) __linearize()
698 return emulate_gp(ctxt, 0); __linearize()
703 return emulate_ss(ctxt, 0); __linearize()
705 return emulate_gp(ctxt, 0); __linearize()
708 static int linearize(struct x86_emulate_ctxt *ctxt, linearize() argument
714 return __linearize(ctxt, addr, &max_size, size, write, false, linearize()
715 ctxt->mode, linear); linearize()
718 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst, assign_eip() argument
727 if (ctxt->op_bytes != sizeof(unsigned long)) assign_eip()
728 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); assign_eip()
729 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear); assign_eip()
731 ctxt->_eip = addr.ea; assign_eip()
735 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) assign_eip_near() argument
737 return assign_eip(ctxt, dst, ctxt->mode); assign_eip_near()
740 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, assign_eip_far() argument
743 enum x86emul_mode mode = ctxt->mode; assign_eip_far()
747 if (ctxt->mode >= X86EMUL_MODE_PROT16) { assign_eip_far()
751 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); assign_eip_far()
760 rc = assign_eip(ctxt, dst, mode); assign_eip_far()
762 ctxt->mode = mode; assign_eip_far()
766 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) jmp_rel() argument
768 return assign_eip_near(ctxt, ctxt->_eip + rel); jmp_rel()
771 static int segmented_read_std(struct x86_emulate_ctxt *ctxt, segmented_read_std() argument
779 rc = linearize(ctxt, addr, size, false, &linear); segmented_read_std()
782 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); segmented_read_std()
789 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) __do_insn_fetch_bytes() argument
794 int cur_size = ctxt->fetch.end - ctxt->fetch.data; __do_insn_fetch_bytes()
796 .ea = ctxt->eip + cur_size }; __do_insn_fetch_bytes()
808 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode, __do_insn_fetch_bytes()
823 return emulate_gp(ctxt, 0); __do_insn_fetch_bytes()
825 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, __do_insn_fetch_bytes()
826 size, &ctxt->exception); __do_insn_fetch_bytes()
829 ctxt->fetch.end += size; __do_insn_fetch_bytes()
833 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, do_insn_fetch_bytes() argument
836 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr; do_insn_fetch_bytes()
839 return __do_insn_fetch_bytes(ctxt, size - done_size); do_insn_fetch_bytes()
851 ctxt->_eip += sizeof(_type); \
852 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
853 ctxt->fetch.ptr += sizeof(_type); \
862 ctxt->_eip += (_size); \
863 memcpy(_arr, ctxt->fetch.ptr, _size); \
864 ctxt->fetch.ptr += (_size); \
872 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, decode_register() argument
876 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; decode_register()
879 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; decode_register()
881 p = reg_rmw(ctxt, modrm_reg); decode_register()
885 static int read_descriptor(struct x86_emulate_ctxt *ctxt, read_descriptor() argument
894 rc = segmented_read_std(ctxt, addr, size, 2); read_descriptor()
898 rc = segmented_read_std(ctxt, addr, address, op_bytes); read_descriptor()
946 static int em_bsf_c(struct x86_emulate_ctxt *ctxt) em_bsf_c() argument
949 if (ctxt->src.val == 0) em_bsf_c()
950 ctxt->dst.type = OP_NONE; em_bsf_c()
951 return fastop(ctxt, em_bsf); em_bsf_c()
954 static int em_bsr_c(struct x86_emulate_ctxt *ctxt) em_bsr_c() argument
957 if (ctxt->src.val == 0) em_bsr_c()
958 ctxt->dst.type = OP_NONE; em_bsr_c()
959 return fastop(ctxt, em_bsr); em_bsr_c()
991 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) read_sse_reg() argument
993 ctxt->ops->get_fpu(ctxt); read_sse_reg()
1015 ctxt->ops->put_fpu(ctxt); read_sse_reg()
1018 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, write_sse_reg() argument
1021 ctxt->ops->get_fpu(ctxt); write_sse_reg()
1043 ctxt->ops->put_fpu(ctxt); write_sse_reg()
1046 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) read_mmx_reg() argument
1048 ctxt->ops->get_fpu(ctxt); read_mmx_reg()
1060 ctxt->ops->put_fpu(ctxt); read_mmx_reg()
1063 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) write_mmx_reg() argument
1065 ctxt->ops->get_fpu(ctxt); write_mmx_reg()
1077 ctxt->ops->put_fpu(ctxt); write_mmx_reg()
1080 static int em_fninit(struct x86_emulate_ctxt *ctxt) em_fninit() argument
1082 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) em_fninit()
1083 return emulate_nm(ctxt); em_fninit()
1085 ctxt->ops->get_fpu(ctxt); em_fninit()
1087 ctxt->ops->put_fpu(ctxt); em_fninit()
1091 static int em_fnstcw(struct x86_emulate_ctxt *ctxt) em_fnstcw() argument
1095 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) em_fnstcw()
1096 return emulate_nm(ctxt); em_fnstcw()
1098 ctxt->ops->get_fpu(ctxt); em_fnstcw()
1100 ctxt->ops->put_fpu(ctxt); em_fnstcw()
1102 ctxt->dst.val = fcw; em_fnstcw()
1107 static int em_fnstsw(struct x86_emulate_ctxt *ctxt) em_fnstsw() argument
1111 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) em_fnstsw()
1112 return emulate_nm(ctxt); em_fnstsw()
1114 ctxt->ops->get_fpu(ctxt); em_fnstsw()
1116 ctxt->ops->put_fpu(ctxt); em_fnstsw()
1118 ctxt->dst.val = fsw; em_fnstsw()
1123 static void decode_register_operand(struct x86_emulate_ctxt *ctxt, decode_register_operand() argument
1126 unsigned reg = ctxt->modrm_reg; decode_register_operand()
1128 if (!(ctxt->d & ModRM)) decode_register_operand()
1129 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); decode_register_operand()
1131 if (ctxt->d & Sse) { decode_register_operand()
1135 read_sse_reg(ctxt, &op->vec_val, reg); decode_register_operand()
1138 if (ctxt->d & Mmx) { decode_register_operand()
1147 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; decode_register_operand()
1148 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); decode_register_operand()
1154 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) adjust_modrm_seg() argument
1157 ctxt->modrm_seg = VCPU_SREG_SS; adjust_modrm_seg()
1160 static int decode_modrm(struct x86_emulate_ctxt *ctxt, decode_modrm() argument
1168 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ decode_modrm()
1169 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ decode_modrm()
1170 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ decode_modrm()
1172 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; decode_modrm()
1173 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; decode_modrm()
1174 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); decode_modrm()
1175 ctxt->modrm_seg = VCPU_SREG_DS; decode_modrm()
1177 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { decode_modrm()
1179 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; decode_modrm()
1180 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, decode_modrm()
1181 ctxt->d & ByteOp); decode_modrm()
1182 if (ctxt->d & Sse) { decode_modrm()
1185 op->addr.xmm = ctxt->modrm_rm; decode_modrm()
1186 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); decode_modrm()
1189 if (ctxt->d & Mmx) { decode_modrm()
1192 op->addr.mm = ctxt->modrm_rm & 7; decode_modrm()
1201 if (ctxt->ad_bytes == 2) { decode_modrm()
1202 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); decode_modrm()
1203 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); decode_modrm()
1204 unsigned si = reg_read(ctxt, VCPU_REGS_RSI); decode_modrm()
1205 unsigned di = reg_read(ctxt, VCPU_REGS_RDI); decode_modrm()
1208 switch (ctxt->modrm_mod) { decode_modrm()
1210 if (ctxt->modrm_rm == 6) decode_modrm()
1211 modrm_ea += insn_fetch(u16, ctxt); decode_modrm()
1214 modrm_ea += insn_fetch(s8, ctxt); decode_modrm()
1217 modrm_ea += insn_fetch(u16, ctxt); decode_modrm()
1220 switch (ctxt->modrm_rm) { decode_modrm()
1240 if (ctxt->modrm_mod != 0) decode_modrm()
1247 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || decode_modrm()
1248 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) decode_modrm()
1249 ctxt->modrm_seg = VCPU_SREG_SS; decode_modrm()
1253 if ((ctxt->modrm_rm & 7) == 4) { decode_modrm()
1254 sib = insn_fetch(u8, ctxt); decode_modrm()
1259 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) decode_modrm()
1260 modrm_ea += insn_fetch(s32, ctxt); decode_modrm()
1262 modrm_ea += reg_read(ctxt, base_reg); decode_modrm()
1263 adjust_modrm_seg(ctxt, base_reg); decode_modrm()
1265 if ((ctxt->d & IncSP) && decode_modrm()
1267 modrm_ea += ctxt->op_bytes; decode_modrm()
1270 modrm_ea += reg_read(ctxt, index_reg) << scale; decode_modrm()
1271 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { decode_modrm()
1272 modrm_ea += insn_fetch(s32, ctxt); decode_modrm()
1273 if (ctxt->mode == X86EMUL_MODE_PROT64) decode_modrm()
1274 ctxt->rip_relative = 1; decode_modrm()
1276 base_reg = ctxt->modrm_rm; decode_modrm()
1277 modrm_ea += reg_read(ctxt, base_reg); decode_modrm()
1278 adjust_modrm_seg(ctxt, base_reg); decode_modrm()
1280 switch (ctxt->modrm_mod) { decode_modrm()
1282 modrm_ea += insn_fetch(s8, ctxt); decode_modrm()
1285 modrm_ea += insn_fetch(s32, ctxt); decode_modrm()
1290 if (ctxt->ad_bytes != 8) decode_modrm()
1291 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; decode_modrm()
1297 static int decode_abs(struct x86_emulate_ctxt *ctxt, decode_abs() argument
1303 switch (ctxt->ad_bytes) { decode_abs()
1305 op->addr.mem.ea = insn_fetch(u16, ctxt); decode_abs()
1308 op->addr.mem.ea = insn_fetch(u32, ctxt); decode_abs()
1311 op->addr.mem.ea = insn_fetch(u64, ctxt); decode_abs()
1318 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) fetch_bit_operand() argument
1322 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { fetch_bit_operand()
1323 mask = ~((long)ctxt->dst.bytes * 8 - 1); fetch_bit_operand()
1325 if (ctxt->src.bytes == 2) fetch_bit_operand()
1326 sv = (s16)ctxt->src.val & (s16)mask; fetch_bit_operand()
1327 else if (ctxt->src.bytes == 4) fetch_bit_operand()
1328 sv = (s32)ctxt->src.val & (s32)mask; fetch_bit_operand()
1330 sv = (s64)ctxt->src.val & (s64)mask; fetch_bit_operand()
1332 ctxt->dst.addr.mem.ea = address_mask(ctxt, fetch_bit_operand()
1333 ctxt->dst.addr.mem.ea + (sv >> 3)); fetch_bit_operand()
1337 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; fetch_bit_operand()
1340 static int read_emulated(struct x86_emulate_ctxt *ctxt, read_emulated() argument
1344 struct read_cache *mc = &ctxt->mem_read; read_emulated()
1351 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size, read_emulated()
1352 &ctxt->exception); read_emulated()
1364 static int segmented_read(struct x86_emulate_ctxt *ctxt, segmented_read() argument
1372 rc = linearize(ctxt, addr, size, false, &linear); segmented_read()
1375 return read_emulated(ctxt, linear, data, size); segmented_read()
1378 static int segmented_write(struct x86_emulate_ctxt *ctxt, segmented_write() argument
1386 rc = linearize(ctxt, addr, size, true, &linear); segmented_write()
1389 return ctxt->ops->write_emulated(ctxt, linear, data, size, segmented_write()
1390 &ctxt->exception); segmented_write()
1393 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, segmented_cmpxchg() argument
1401 rc = linearize(ctxt, addr, size, true, &linear); segmented_cmpxchg()
1404 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, segmented_cmpxchg()
1405 size, &ctxt->exception); segmented_cmpxchg()
1408 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, pio_in_emulated() argument
1412 struct read_cache *rc = &ctxt->io_read; pio_in_emulated()
1416 unsigned int count = ctxt->rep_prefix ? pio_in_emulated()
1417 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; pio_in_emulated()
1418 in_page = (ctxt->eflags & X86_EFLAGS_DF) ? pio_in_emulated()
1419 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : pio_in_emulated()
1420 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); pio_in_emulated()
1425 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) pio_in_emulated()
1430 if (ctxt->rep_prefix && (ctxt->d & String) && pio_in_emulated()
1431 !(ctxt->eflags & X86_EFLAGS_DF)) { pio_in_emulated()
1432 ctxt->dst.data = rc->data + rc->pos; pio_in_emulated()
1433 ctxt->dst.type = OP_MEM_STR; pio_in_emulated()
1434 ctxt->dst.count = (rc->end - rc->pos) / size; pio_in_emulated()
1443 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, read_interrupt_descriptor() argument
1449 ctxt->ops->get_idt(ctxt, &dt); read_interrupt_descriptor()
1452 return emulate_gp(ctxt, index << 3 | 0x2); read_interrupt_descriptor()
1455 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, read_interrupt_descriptor()
1456 &ctxt->exception); read_interrupt_descriptor()
1459 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, get_descriptor_table_ptr() argument
1462 const struct x86_emulate_ops *ops = ctxt->ops; get_descriptor_table_ptr()
1470 if (!ops->get_segment(ctxt, &sel, &desc, &base3, get_descriptor_table_ptr()
1477 ops->get_gdt(ctxt, dt); get_descriptor_table_ptr()
1480 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt, get_descriptor_ptr() argument
1487 get_descriptor_table_ptr(ctxt, selector, &dt); get_descriptor_ptr()
1490 return emulate_gp(ctxt, selector & 0xfffc); get_descriptor_ptr()
1498 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); get_descriptor_ptr()
1509 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, read_segment_descriptor() argument
1515 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p); read_segment_descriptor()
1519 return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc), read_segment_descriptor()
1520 &ctxt->exception); read_segment_descriptor()
1524 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, write_segment_descriptor() argument
1530 rc = get_descriptor_ptr(ctxt, selector, &addr); write_segment_descriptor()
1534 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc, write_segment_descriptor()
1535 &ctxt->exception); write_segment_descriptor()
1539 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, __load_segment_descriptor() argument
1556 if (ctxt->mode == X86EMUL_MODE_REAL) { __load_segment_descriptor()
1559 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); __load_segment_descriptor()
1562 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { __load_segment_descriptor()
1578 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) __load_segment_descriptor()
1590 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); __load_segment_descriptor()
1638 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); __load_segment_descriptor()
1651 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, __load_segment_descriptor()
1652 sizeof(seg_desc), &ctxt->exception); __load_segment_descriptor()
1677 ret = write_segment_descriptor(ctxt, selector, __load_segment_descriptor()
1682 } else if (ctxt->mode == X86EMUL_MODE_PROT64) { __load_segment_descriptor()
1683 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, __load_segment_descriptor()
1684 sizeof(base3), &ctxt->exception); __load_segment_descriptor()
1689 return emulate_gp(ctxt, 0); __load_segment_descriptor()
1692 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); __load_segment_descriptor()
1697 return emulate_exception(ctxt, err_vec, err_code, true); __load_segment_descriptor()
1700 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, load_segment_descriptor() argument
1703 u8 cpl = ctxt->ops->cpl(ctxt); load_segment_descriptor()
1704 return __load_segment_descriptor(ctxt, selector, seg, cpl, load_segment_descriptor()
1713 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) writeback() argument
1720 if (ctxt->lock_prefix) writeback()
1721 return segmented_cmpxchg(ctxt, writeback()
1727 return segmented_write(ctxt, writeback()
1733 return segmented_write(ctxt, writeback()
1739 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); writeback()
1742 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm); writeback()
1753 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) push() argument
1757 rsp_increment(ctxt, -bytes); push()
1758 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); push()
1761 return segmented_write(ctxt, addr, data, bytes); push()
1764 static int em_push(struct x86_emulate_ctxt *ctxt) em_push() argument
1767 ctxt->dst.type = OP_NONE; em_push()
1768 return push(ctxt, &ctxt->src.val, ctxt->op_bytes); em_push()
1771 static int emulate_pop(struct x86_emulate_ctxt *ctxt, emulate_pop() argument
1777 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); emulate_pop()
1779 rc = segmented_read(ctxt, addr, dest, len); emulate_pop()
1783 rsp_increment(ctxt, len); emulate_pop()
1787 static int em_pop(struct x86_emulate_ctxt *ctxt) em_pop() argument
1789 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); em_pop()
1792 static int emulate_popf(struct x86_emulate_ctxt *ctxt, emulate_popf() argument
1797 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; emulate_popf()
1798 int cpl = ctxt->ops->cpl(ctxt); emulate_popf()
1800 rc = emulate_pop(ctxt, &val, len); emulate_popf()
1809 switch(ctxt->mode) { emulate_popf()
1820 return emulate_gp(ctxt, 0); emulate_popf()
1829 (ctxt->eflags & ~change_mask) | (val & change_mask); emulate_popf()
1834 static int em_popf(struct x86_emulate_ctxt *ctxt) em_popf() argument
1836 ctxt->dst.type = OP_REG; em_popf()
1837 ctxt->dst.addr.reg = &ctxt->eflags; em_popf()
1838 ctxt->dst.bytes = ctxt->op_bytes; em_popf()
1839 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); em_popf()
1842 static int em_enter(struct x86_emulate_ctxt *ctxt) em_enter() argument
1845 unsigned frame_size = ctxt->src.val; em_enter()
1846 unsigned nesting_level = ctxt->src2.val & 31; em_enter()
1852 rbp = reg_read(ctxt, VCPU_REGS_RBP); em_enter()
1853 rc = push(ctxt, &rbp, stack_size(ctxt)); em_enter()
1856 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), em_enter()
1857 stack_mask(ctxt)); em_enter()
1858 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), em_enter()
1859 reg_read(ctxt, VCPU_REGS_RSP) - frame_size, em_enter()
1860 stack_mask(ctxt)); em_enter()
1864 static int em_leave(struct x86_emulate_ctxt *ctxt) em_leave() argument
1866 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), em_leave()
1867 stack_mask(ctxt)); em_leave()
1868 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); em_leave()
1871 static int em_push_sreg(struct x86_emulate_ctxt *ctxt) em_push_sreg() argument
1873 int seg = ctxt->src2.val; em_push_sreg()
1875 ctxt->src.val = get_segment_selector(ctxt, seg); em_push_sreg()
1876 if (ctxt->op_bytes == 4) { em_push_sreg()
1877 rsp_increment(ctxt, -2); em_push_sreg()
1878 ctxt->op_bytes = 2; em_push_sreg()
1881 return em_push(ctxt); em_push_sreg()
1884 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) em_pop_sreg() argument
1886 int seg = ctxt->src2.val; em_pop_sreg()
1890 rc = emulate_pop(ctxt, &selector, 2); em_pop_sreg()
1894 if (ctxt->modrm_reg == VCPU_SREG_SS) em_pop_sreg()
1895 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; em_pop_sreg()
1896 if (ctxt->op_bytes > 2) em_pop_sreg()
1897 rsp_increment(ctxt, ctxt->op_bytes - 2); em_pop_sreg()
1899 rc = load_segment_descriptor(ctxt, (u16)selector, seg); em_pop_sreg()
1903 static int em_pusha(struct x86_emulate_ctxt *ctxt) em_pusha() argument
1905 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); em_pusha()
1911 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); em_pusha()
1913 rc = em_push(ctxt); em_pusha()
1923 static int em_pushf(struct x86_emulate_ctxt *ctxt) em_pushf() argument
1925 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM; em_pushf()
1926 return em_push(ctxt); em_pushf()
1929 static int em_popa(struct x86_emulate_ctxt *ctxt) em_popa() argument
1937 rsp_increment(ctxt, ctxt->op_bytes); em_popa()
1941 rc = emulate_pop(ctxt, &val, ctxt->op_bytes); em_popa()
1944 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes); em_popa()
1950 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) __emulate_int_real() argument
1952 const struct x86_emulate_ops *ops = ctxt->ops; __emulate_int_real()
1960 ctxt->src.val = ctxt->eflags; __emulate_int_real()
1961 rc = em_push(ctxt); __emulate_int_real()
1965 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC); __emulate_int_real()
1967 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); __emulate_int_real()
1968 rc = em_push(ctxt); __emulate_int_real()
1972 ctxt->src.val = ctxt->_eip; __emulate_int_real()
1973 rc = em_push(ctxt); __emulate_int_real()
1977 ops->get_idt(ctxt, &dt); __emulate_int_real()
1982 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); __emulate_int_real()
1986 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); __emulate_int_real()
1990 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); __emulate_int_real()
1994 ctxt->_eip = eip; __emulate_int_real()
1999 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) emulate_int_real() argument
2003 invalidate_registers(ctxt); emulate_int_real()
2004 rc = __emulate_int_real(ctxt, irq); emulate_int_real()
2006 writeback_registers(ctxt); emulate_int_real()
2010 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) emulate_int() argument
2012 switch(ctxt->mode) { emulate_int()
2014 return __emulate_int_real(ctxt, irq); emulate_int()
2025 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) emulate_iret_real() argument
2042 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); emulate_iret_real()
2048 return emulate_gp(ctxt, 0); emulate_iret_real()
2050 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); emulate_iret_real()
2055 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); emulate_iret_real()
2060 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); emulate_iret_real()
2065 ctxt->_eip = temp_eip; emulate_iret_real()
2067 if (ctxt->op_bytes == 4) emulate_iret_real()
2068 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); emulate_iret_real()
2069 else if (ctxt->op_bytes == 2) { emulate_iret_real()
2070 ctxt->eflags &= ~0xffff; emulate_iret_real()
2071 ctxt->eflags |= temp_eflags; emulate_iret_real()
2074 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ emulate_iret_real()
2075 ctxt->eflags |= X86_EFLAGS_FIXED; emulate_iret_real()
2076 ctxt->ops->set_nmi_mask(ctxt, false); emulate_iret_real()
2081 static int em_iret(struct x86_emulate_ctxt *ctxt) em_iret() argument
2083 switch(ctxt->mode) { em_iret()
2085 return emulate_iret_real(ctxt); em_iret()
2096 static int em_jmp_far(struct x86_emulate_ctxt *ctxt) em_jmp_far() argument
2101 const struct x86_emulate_ops *ops = ctxt->ops; em_jmp_far()
2102 u8 cpl = ctxt->ops->cpl(ctxt); em_jmp_far()
2105 if (ctxt->mode == X86EMUL_MODE_PROT64) em_jmp_far()
2106 ops->get_segment(ctxt, &old_sel, &old_desc, NULL, em_jmp_far()
2109 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); em_jmp_far()
2111 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, em_jmp_far()
2117 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); em_jmp_far()
2119 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); em_jmp_far()
2121 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); em_jmp_far()
2127 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt) em_jmp_abs() argument
2129 return assign_eip_near(ctxt, ctxt->src.val); em_jmp_abs()
2132 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt) em_call_near_abs() argument
2137 old_eip = ctxt->_eip; em_call_near_abs()
2138 rc = assign_eip_near(ctxt, ctxt->src.val); em_call_near_abs()
2141 ctxt->src.val = old_eip; em_call_near_abs()
2142 rc = em_push(ctxt); em_call_near_abs()
2146 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) em_cmpxchg8b() argument
2148 u64 old = ctxt->dst.orig_val64; em_cmpxchg8b()
2150 if (ctxt->dst.bytes == 16) em_cmpxchg8b()
2153 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || em_cmpxchg8b()
2154 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { em_cmpxchg8b()
2155 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); em_cmpxchg8b()
2156 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); em_cmpxchg8b()
2157 ctxt->eflags &= ~X86_EFLAGS_ZF; em_cmpxchg8b()
2159 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | em_cmpxchg8b()
2160 (u32) reg_read(ctxt, VCPU_REGS_RBX); em_cmpxchg8b()
2162 ctxt->eflags |= X86_EFLAGS_ZF; em_cmpxchg8b()
2167 static int em_ret(struct x86_emulate_ctxt *ctxt) em_ret() argument
2172 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); em_ret()
2176 return assign_eip_near(ctxt, eip); em_ret()
2179 static int em_ret_far(struct x86_emulate_ctxt *ctxt) em_ret_far() argument
2184 int cpl = ctxt->ops->cpl(ctxt); em_ret_far()
2186 const struct x86_emulate_ops *ops = ctxt->ops; em_ret_far()
2188 if (ctxt->mode == X86EMUL_MODE_PROT64) em_ret_far()
2189 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, em_ret_far()
2192 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); em_ret_far()
2195 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); em_ret_far()
2199 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) em_ret_far()
2201 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, em_ret_far()
2206 rc = assign_eip_far(ctxt, eip, &new_desc); em_ret_far()
2208 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); em_ret_far()
2209 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); em_ret_far()
2214 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) em_ret_far_imm() argument
2218 rc = em_ret_far(ctxt); em_ret_far_imm()
2221 rsp_increment(ctxt, ctxt->src.val); em_ret_far_imm()
2225 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) em_cmpxchg() argument
2228 ctxt->dst.orig_val = ctxt->dst.val; em_cmpxchg()
2229 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); em_cmpxchg()
2230 ctxt->src.orig_val = ctxt->src.val; em_cmpxchg()
2231 ctxt->src.val = ctxt->dst.orig_val; em_cmpxchg()
2232 fastop(ctxt, em_cmp); em_cmpxchg()
2234 if (ctxt->eflags & X86_EFLAGS_ZF) { em_cmpxchg()
2236 ctxt->src.type = OP_NONE; em_cmpxchg()
2237 ctxt->dst.val = ctxt->src.orig_val; em_cmpxchg()
2240 ctxt->src.type = OP_REG; em_cmpxchg()
2241 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); em_cmpxchg()
2242 ctxt->src.val = ctxt->dst.orig_val; em_cmpxchg()
2244 ctxt->dst.val = ctxt->dst.orig_val; em_cmpxchg()
2249 static int em_lseg(struct x86_emulate_ctxt *ctxt) em_lseg() argument
2251 int seg = ctxt->src2.val; em_lseg()
2255 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); em_lseg()
2257 rc = load_segment_descriptor(ctxt, sel, seg); em_lseg()
2261 ctxt->dst.val = ctxt->src.val; em_lseg()
2266 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, setup_syscalls_segments() argument
2292 static bool vendor_intel(struct x86_emulate_ctxt *ctxt) vendor_intel() argument
2297 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); vendor_intel()
2303 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) em_syscall_is_enabled() argument
2305 const struct x86_emulate_ops *ops = ctxt->ops; em_syscall_is_enabled()
2312 if (ctxt->mode == X86EMUL_MODE_PROT64) em_syscall_is_enabled()
2317 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); em_syscall_is_enabled()
2347 static int em_syscall(struct x86_emulate_ctxt *ctxt) em_syscall() argument
2349 const struct x86_emulate_ops *ops = ctxt->ops; em_syscall()
2356 if (ctxt->mode == X86EMUL_MODE_REAL || em_syscall()
2357 ctxt->mode == X86EMUL_MODE_VM86) em_syscall()
2358 return emulate_ud(ctxt); em_syscall()
2360 if (!(em_syscall_is_enabled(ctxt))) em_syscall()
2361 return emulate_ud(ctxt); em_syscall()
2363 ops->get_msr(ctxt, MSR_EFER, &efer); em_syscall()
2364 setup_syscalls_segments(ctxt, &cs, &ss); em_syscall()
2367 return emulate_ud(ctxt); em_syscall()
2369 ops->get_msr(ctxt, MSR_STAR, &msr_data); em_syscall()
2378 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); em_syscall()
2379 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); em_syscall()
2381 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; em_syscall()
2384 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; em_syscall()
2386 ops->get_msr(ctxt, em_syscall()
2387 ctxt->mode == X86EMUL_MODE_PROT64 ? em_syscall()
2389 ctxt->_eip = msr_data; em_syscall()
2391 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); em_syscall()
2392 ctxt->eflags &= ~msr_data; em_syscall()
2393 ctxt->eflags |= X86_EFLAGS_FIXED; em_syscall()
2397 ops->get_msr(ctxt, MSR_STAR, &msr_data); em_syscall()
2398 ctxt->_eip = (u32)msr_data; em_syscall()
2400 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); em_syscall()
2406 static int em_sysenter(struct x86_emulate_ctxt *ctxt) em_sysenter() argument
2408 const struct x86_emulate_ops *ops = ctxt->ops; em_sysenter()
2414 ops->get_msr(ctxt, MSR_EFER, &efer); em_sysenter()
2416 if (ctxt->mode == X86EMUL_MODE_REAL) em_sysenter()
2417 return emulate_gp(ctxt, 0); em_sysenter()
2423 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA) em_sysenter()
2424 && !vendor_intel(ctxt)) em_sysenter()
2425 return emulate_ud(ctxt); em_sysenter()
2428 if (ctxt->mode == X86EMUL_MODE_PROT64) em_sysenter()
2431 setup_syscalls_segments(ctxt, &cs, &ss); em_sysenter()
2433 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); em_sysenter()
2435 return emulate_gp(ctxt, 0); em_sysenter()
2437 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); em_sysenter()
2445 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); em_sysenter()
2446 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); em_sysenter()
2448 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); em_sysenter()
2449 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data; em_sysenter()
2451 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); em_sysenter()
2452 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : em_sysenter()
2458 static int em_sysexit(struct x86_emulate_ctxt *ctxt) em_sysexit() argument
2460 const struct x86_emulate_ops *ops = ctxt->ops; em_sysexit()
2467 if (ctxt->mode == X86EMUL_MODE_REAL || em_sysexit()
2468 ctxt->mode == X86EMUL_MODE_VM86) em_sysexit()
2469 return emulate_gp(ctxt, 0); em_sysexit()
2471 setup_syscalls_segments(ctxt, &cs, &ss); em_sysexit()
2473 if ((ctxt->rex_prefix & 0x8) != 0x0) em_sysexit()
2478 rcx = reg_read(ctxt, VCPU_REGS_RCX); em_sysexit()
2479 rdx = reg_read(ctxt, VCPU_REGS_RDX); em_sysexit()
2483 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); em_sysexit()
2488 return emulate_gp(ctxt, 0); em_sysexit()
2496 return emulate_gp(ctxt, 0); em_sysexit()
2502 return emulate_gp(ctxt, 0); em_sysexit()
2508 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); em_sysexit()
2509 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); em_sysexit()
2511 ctxt->_eip = rdx; em_sysexit()
2512 *reg_write(ctxt, VCPU_REGS_RSP) = rcx; em_sysexit()
2517 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) emulator_bad_iopl() argument
2520 if (ctxt->mode == X86EMUL_MODE_REAL) emulator_bad_iopl()
2522 if (ctxt->mode == X86EMUL_MODE_VM86) emulator_bad_iopl()
2524 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; emulator_bad_iopl()
2525 return ctxt->ops->cpl(ctxt) > iopl; emulator_bad_iopl()
2528 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, emulator_io_port_access_allowed() argument
2531 const struct x86_emulate_ops *ops = ctxt->ops; emulator_io_port_access_allowed()
2539 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); emulator_io_port_access_allowed()
2548 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); emulator_io_port_access_allowed()
2553 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); emulator_io_port_access_allowed()
2561 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, emulator_io_permited() argument
2564 if (ctxt->perm_ok) emulator_io_permited()
2567 if (emulator_bad_iopl(ctxt)) emulator_io_permited()
2568 if (!emulator_io_port_access_allowed(ctxt, port, len)) emulator_io_permited()
2571 ctxt->perm_ok = true; emulator_io_permited()
2576 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, save_state_to_tss16() argument
2579 tss->ip = ctxt->_eip; save_state_to_tss16()
2580 tss->flag = ctxt->eflags; save_state_to_tss16()
2581 tss->ax = reg_read(ctxt, VCPU_REGS_RAX); save_state_to_tss16()
2582 tss->cx = reg_read(ctxt, VCPU_REGS_RCX); save_state_to_tss16()
2583 tss->dx = reg_read(ctxt, VCPU_REGS_RDX); save_state_to_tss16()
2584 tss->bx = reg_read(ctxt, VCPU_REGS_RBX); save_state_to_tss16()
2585 tss->sp = reg_read(ctxt, VCPU_REGS_RSP); save_state_to_tss16()
2586 tss->bp = reg_read(ctxt, VCPU_REGS_RBP); save_state_to_tss16()
2587 tss->si = reg_read(ctxt, VCPU_REGS_RSI); save_state_to_tss16()
2588 tss->di = reg_read(ctxt, VCPU_REGS_RDI); save_state_to_tss16()
2590 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); save_state_to_tss16()
2591 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); save_state_to_tss16()
2592 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); save_state_to_tss16()
2593 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); save_state_to_tss16()
2594 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); save_state_to_tss16()
2597 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, load_state_from_tss16() argument
2603 ctxt->_eip = tss->ip; load_state_from_tss16()
2604 ctxt->eflags = tss->flag | 2; load_state_from_tss16()
2605 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; load_state_from_tss16()
2606 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; load_state_from_tss16()
2607 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; load_state_from_tss16()
2608 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; load_state_from_tss16()
2609 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; load_state_from_tss16()
2610 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; load_state_from_tss16()
2611 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; load_state_from_tss16()
2612 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; load_state_from_tss16()
2618 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); load_state_from_tss16()
2619 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); load_state_from_tss16()
2620 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); load_state_from_tss16()
2621 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); load_state_from_tss16()
2622 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); load_state_from_tss16()
2630 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, load_state_from_tss16()
2634 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, load_state_from_tss16()
2638 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, load_state_from_tss16()
2642 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, load_state_from_tss16()
2646 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, load_state_from_tss16()
2654 static int task_switch_16(struct x86_emulate_ctxt *ctxt, task_switch_16() argument
2658 const struct x86_emulate_ops *ops = ctxt->ops; task_switch_16()
2663 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, task_switch_16()
2664 &ctxt->exception); task_switch_16()
2668 save_state_to_tss16(ctxt, &tss_seg); task_switch_16()
2670 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, task_switch_16()
2671 &ctxt->exception); task_switch_16()
2675 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, task_switch_16()
2676 &ctxt->exception); task_switch_16()
2683 ret = ops->write_std(ctxt, new_tss_base, task_switch_16()
2686 &ctxt->exception); task_switch_16()
2691 return load_state_from_tss16(ctxt, &tss_seg); task_switch_16()
2694 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, save_state_to_tss32() argument
2698 tss->eip = ctxt->_eip; save_state_to_tss32()
2699 tss->eflags = ctxt->eflags; save_state_to_tss32()
2700 tss->eax = reg_read(ctxt, VCPU_REGS_RAX); save_state_to_tss32()
2701 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); save_state_to_tss32()
2702 tss->edx = reg_read(ctxt, VCPU_REGS_RDX); save_state_to_tss32()
2703 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); save_state_to_tss32()
2704 tss->esp = reg_read(ctxt, VCPU_REGS_RSP); save_state_to_tss32()
2705 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); save_state_to_tss32()
2706 tss->esi = reg_read(ctxt, VCPU_REGS_RSI); save_state_to_tss32()
2707 tss->edi = reg_read(ctxt, VCPU_REGS_RDI); save_state_to_tss32()
2709 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); save_state_to_tss32()
2710 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); save_state_to_tss32()
2711 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); save_state_to_tss32()
2712 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); save_state_to_tss32()
2713 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); save_state_to_tss32()
2714 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); save_state_to_tss32()
2717 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, load_state_from_tss32() argument
2723 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) load_state_from_tss32()
2724 return emulate_gp(ctxt, 0); load_state_from_tss32()
2725 ctxt->_eip = tss->eip; load_state_from_tss32()
2726 ctxt->eflags = tss->eflags | 2; load_state_from_tss32()
2729 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; load_state_from_tss32()
2730 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; load_state_from_tss32()
2731 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; load_state_from_tss32()
2732 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; load_state_from_tss32()
2733 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; load_state_from_tss32()
2734 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; load_state_from_tss32()
2735 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; load_state_from_tss32()
2736 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; load_state_from_tss32()
2743 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); load_state_from_tss32()
2744 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); load_state_from_tss32()
2745 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); load_state_from_tss32()
2746 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); load_state_from_tss32()
2747 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); load_state_from_tss32()
2748 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); load_state_from_tss32()
2749 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); load_state_from_tss32()
2756 if (ctxt->eflags & X86_EFLAGS_VM) { load_state_from_tss32()
2757 ctxt->mode = X86EMUL_MODE_VM86; load_state_from_tss32()
2760 ctxt->mode = X86EMUL_MODE_PROT32; load_state_from_tss32()
2768 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, load_state_from_tss32()
2772 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, load_state_from_tss32()
2776 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, load_state_from_tss32()
2780 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, load_state_from_tss32()
2784 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, load_state_from_tss32()
2788 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, load_state_from_tss32()
2792 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, load_state_from_tss32()
2798 static int task_switch_32(struct x86_emulate_ctxt *ctxt, task_switch_32() argument
2802 const struct x86_emulate_ops *ops = ctxt->ops; task_switch_32()
2809 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, task_switch_32()
2810 &ctxt->exception); task_switch_32()
2814 save_state_to_tss32(ctxt, &tss_seg); task_switch_32()
2817 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip, task_switch_32()
2818 ldt_sel_offset - eip_offset, &ctxt->exception); task_switch_32()
2822 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, task_switch_32()
2823 &ctxt->exception); task_switch_32()
2830 ret = ops->write_std(ctxt, new_tss_base, task_switch_32()
2833 &ctxt->exception); task_switch_32()
2838 return load_state_from_tss32(ctxt, &tss_seg); task_switch_32()
2841 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, emulator_do_task_switch() argument
2845 const struct x86_emulate_ops *ops = ctxt->ops; emulator_do_task_switch()
2848 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); emulator_do_task_switch()
2850 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); emulator_do_task_switch()
2856 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); emulator_do_task_switch()
2859 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); emulator_do_task_switch()
2879 ret = read_interrupt_descriptor(ctxt, idt_index, emulator_do_task_switch()
2885 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) emulator_do_task_switch()
2886 return emulate_gp(ctxt, (idt_index << 3) | 0x2); emulator_do_task_switch()
2894 return emulate_ts(ctxt, tss_selector & 0xfffc); emulator_do_task_switch()
2899 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); emulator_do_task_switch()
2903 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; emulator_do_task_switch()
2911 ret = task_switch_32(ctxt, tss_selector, old_tss_sel, emulator_do_task_switch()
2914 ret = task_switch_16(ctxt, tss_selector, old_tss_sel, emulator_do_task_switch()
2920 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; emulator_do_task_switch()
2924 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); emulator_do_task_switch()
2927 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); emulator_do_task_switch()
2928 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); emulator_do_task_switch()
2931 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; emulator_do_task_switch()
2932 ctxt->lock_prefix = 0; emulator_do_task_switch()
2933 ctxt->src.val = (unsigned long) error_code; emulator_do_task_switch()
2934 ret = em_push(ctxt); emulator_do_task_switch()
2940 int emulator_task_switch(struct x86_emulate_ctxt *ctxt, emulator_task_switch() argument
2946 invalidate_registers(ctxt); emulator_task_switch()
2947 ctxt->_eip = ctxt->eip; emulator_task_switch()
2948 ctxt->dst.type = OP_NONE; emulator_task_switch()
2950 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, emulator_task_switch()
2954 ctxt->eip = ctxt->_eip; emulator_task_switch()
2955 writeback_registers(ctxt); emulator_task_switch()
2961 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, string_addr_inc() argument
2964 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count; string_addr_inc()
2966 register_address_increment(ctxt, reg, df * op->bytes); string_addr_inc()
2967 op->addr.mem.ea = register_address(ctxt, reg); string_addr_inc()
2970 static int em_das(struct x86_emulate_ctxt *ctxt) em_das() argument
2975 cf = ctxt->eflags & X86_EFLAGS_CF; em_das()
2976 al = ctxt->dst.val; em_das()
2981 af = ctxt->eflags & X86_EFLAGS_AF; em_das()
2994 ctxt->dst.val = al; em_das()
2996 ctxt->src.type = OP_IMM; em_das()
2997 ctxt->src.val = 0; em_das()
2998 ctxt->src.bytes = 1; em_das()
2999 fastop(ctxt, em_or); em_das()
3000 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); em_das()
3002 ctxt->eflags |= X86_EFLAGS_CF; em_das()
3004 ctxt->eflags |= X86_EFLAGS_AF; em_das()
3008 static int em_aam(struct x86_emulate_ctxt *ctxt) em_aam() argument
3012 if (ctxt->src.val == 0) em_aam()
3013 return emulate_de(ctxt); em_aam()
3015 al = ctxt->dst.val & 0xff; em_aam()
3016 ah = al / ctxt->src.val; em_aam()
3017 al %= ctxt->src.val; em_aam()
3019 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); em_aam()
3022 ctxt->src.type = OP_IMM; em_aam()
3023 ctxt->src.val = 0; em_aam()
3024 ctxt->src.bytes = 1; em_aam()
3025 fastop(ctxt, em_or); em_aam()
3030 static int em_aad(struct x86_emulate_ctxt *ctxt) em_aad() argument
3032 u8 al = ctxt->dst.val & 0xff; em_aad()
3033 u8 ah = (ctxt->dst.val >> 8) & 0xff; em_aad()
3035 al = (al + (ah * ctxt->src.val)) & 0xff; em_aad()
3037 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; em_aad()
3040 ctxt->src.type = OP_IMM; em_aad()
3041 ctxt->src.val = 0; em_aad()
3042 ctxt->src.bytes = 1; em_aad()
3043 fastop(ctxt, em_or); em_aad()
3048 static int em_call(struct x86_emulate_ctxt *ctxt) em_call() argument
3051 long rel = ctxt->src.val; em_call()
3053 ctxt->src.val = (unsigned long)ctxt->_eip; em_call()
3054 rc = jmp_rel(ctxt, rel); em_call()
3057 return em_push(ctxt); em_call()
3060 static int em_call_far(struct x86_emulate_ctxt *ctxt) em_call_far() argument
3066 const struct x86_emulate_ops *ops = ctxt->ops; em_call_far()
3067 int cpl = ctxt->ops->cpl(ctxt); em_call_far()
3068 enum x86emul_mode prev_mode = ctxt->mode; em_call_far()
3070 old_eip = ctxt->_eip; em_call_far()
3071 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); em_call_far()
3073 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); em_call_far()
3074 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, em_call_far()
3079 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); em_call_far()
3083 ctxt->src.val = old_cs; em_call_far()
3084 rc = em_push(ctxt); em_call_far()
3088 ctxt->src.val = old_eip; em_call_far()
3089 rc = em_push(ctxt); em_call_far()
3098 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); em_call_far()
3099 ctxt->mode = prev_mode; em_call_far()
3104 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) em_ret_near_imm() argument
3109 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); em_ret_near_imm()
3112 rc = assign_eip_near(ctxt, eip); em_ret_near_imm()
3115 rsp_increment(ctxt, ctxt->src.val); em_ret_near_imm()
3119 static int em_xchg(struct x86_emulate_ctxt *ctxt) em_xchg() argument
3122 ctxt->src.val = ctxt->dst.val; em_xchg()
3123 write_register_operand(&ctxt->src); em_xchg()
3126 ctxt->dst.val = ctxt->src.orig_val; em_xchg()
3127 ctxt->lock_prefix = 1; em_xchg()
3131 static int em_imul_3op(struct x86_emulate_ctxt *ctxt) em_imul_3op() argument
3133 ctxt->dst.val = ctxt->src2.val; em_imul_3op()
3134 return fastop(ctxt, em_imul); em_imul_3op()
3137 static int em_cwd(struct x86_emulate_ctxt *ctxt) em_cwd() argument
3139 ctxt->dst.type = OP_REG; em_cwd()
3140 ctxt->dst.bytes = ctxt->src.bytes; em_cwd()
3141 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); em_cwd()
3142 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); em_cwd()
3147 static int em_rdtsc(struct x86_emulate_ctxt *ctxt) em_rdtsc() argument
3151 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); em_rdtsc()
3152 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; em_rdtsc()
3153 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; em_rdtsc()
3157 static int em_rdpmc(struct x86_emulate_ctxt *ctxt) em_rdpmc() argument
3161 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) em_rdpmc()
3162 return emulate_gp(ctxt, 0); em_rdpmc()
3163 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; em_rdpmc()
3164 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; em_rdpmc()
3168 static int em_mov(struct x86_emulate_ctxt *ctxt) em_mov() argument
3170 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); em_mov()
3176 static int em_movbe(struct x86_emulate_ctxt *ctxt) em_movbe() argument
3184 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); em_movbe()
3186 return emulate_ud(ctxt); em_movbe()
3188 switch (ctxt->op_bytes) { em_movbe()
3198 tmp = (u16)ctxt->src.val; em_movbe()
3199 ctxt->dst.val &= ~0xffffUL; em_movbe()
3200 ctxt->dst.val |= (unsigned long)swab16(tmp); em_movbe()
3203 ctxt->dst.val = swab32((u32)ctxt->src.val); em_movbe()
3206 ctxt->dst.val = swab64(ctxt->src.val); em_movbe()
3214 static int em_cr_write(struct x86_emulate_ctxt *ctxt) em_cr_write() argument
3216 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) em_cr_write()
3217 return emulate_gp(ctxt, 0); em_cr_write()
3220 ctxt->dst.type = OP_NONE; em_cr_write()
3224 static int em_dr_write(struct x86_emulate_ctxt *ctxt) em_dr_write() argument
3228 if (ctxt->mode == X86EMUL_MODE_PROT64) em_dr_write()
3229 val = ctxt->src.val & ~0ULL; em_dr_write()
3231 val = ctxt->src.val & ~0U; em_dr_write()
3234 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) em_dr_write()
3235 return emulate_gp(ctxt, 0); em_dr_write()
3238 ctxt->dst.type = OP_NONE; em_dr_write()
3242 static int em_wrmsr(struct x86_emulate_ctxt *ctxt) em_wrmsr() argument
3246 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) em_wrmsr()
3247 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); em_wrmsr()
3248 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data)) em_wrmsr()
3249 return emulate_gp(ctxt, 0); em_wrmsr()
3254 static int em_rdmsr(struct x86_emulate_ctxt *ctxt) em_rdmsr() argument
3258 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data)) em_rdmsr()
3259 return emulate_gp(ctxt, 0); em_rdmsr()
3261 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; em_rdmsr()
3262 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; em_rdmsr()
3266 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) em_mov_rm_sreg() argument
3268 if (ctxt->modrm_reg > VCPU_SREG_GS) em_mov_rm_sreg()
3269 return emulate_ud(ctxt); em_mov_rm_sreg()
3271 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg); em_mov_rm_sreg()
3272 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM) em_mov_rm_sreg()
3273 ctxt->dst.bytes = 2; em_mov_rm_sreg()
3277 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) em_mov_sreg_rm() argument
3279 u16 sel = ctxt->src.val; em_mov_sreg_rm()
3281 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) em_mov_sreg_rm()
3282 return emulate_ud(ctxt); em_mov_sreg_rm()
3284 if (ctxt->modrm_reg == VCPU_SREG_SS) em_mov_sreg_rm()
3285 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; em_mov_sreg_rm()
3288 ctxt->dst.type = OP_NONE; em_mov_sreg_rm()
3289 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); em_mov_sreg_rm()
3292 static int em_lldt(struct x86_emulate_ctxt *ctxt) em_lldt() argument
3294 u16 sel = ctxt->src.val; em_lldt()
3297 ctxt->dst.type = OP_NONE; em_lldt()
3298 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); em_lldt()
3301 static int em_ltr(struct x86_emulate_ctxt *ctxt) em_ltr() argument
3303 u16 sel = ctxt->src.val; em_ltr()
3306 ctxt->dst.type = OP_NONE; em_ltr()
3307 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); em_ltr()
3310 static int em_invlpg(struct x86_emulate_ctxt *ctxt) em_invlpg() argument
3315 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); em_invlpg()
3317 ctxt->ops->invlpg(ctxt, linear); em_invlpg()
3319 ctxt->dst.type = OP_NONE; em_invlpg()
3323 static int em_clts(struct x86_emulate_ctxt *ctxt) em_clts() argument
3327 cr0 = ctxt->ops->get_cr(ctxt, 0); em_clts()
3329 ctxt->ops->set_cr(ctxt, 0, cr0); em_clts()
3333 static int em_hypercall(struct x86_emulate_ctxt *ctxt) em_hypercall() argument
3335 int rc = ctxt->ops->fix_hypercall(ctxt); em_hypercall()
3341 ctxt->_eip = ctxt->eip; em_hypercall()
3343 ctxt->dst.type = OP_NONE; em_hypercall()
3347 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, emulate_store_desc_ptr() argument
3348 void (*get)(struct x86_emulate_ctxt *ctxt, emulate_store_desc_ptr()
3353 if (ctxt->mode == X86EMUL_MODE_PROT64) emulate_store_desc_ptr()
3354 ctxt->op_bytes = 8; emulate_store_desc_ptr()
3355 get(ctxt, &desc_ptr); emulate_store_desc_ptr()
3356 if (ctxt->op_bytes == 2) { emulate_store_desc_ptr()
3357 ctxt->op_bytes = 4; emulate_store_desc_ptr()
3361 ctxt->dst.type = OP_NONE; emulate_store_desc_ptr()
3362 return segmented_write(ctxt, ctxt->dst.addr.mem, emulate_store_desc_ptr()
3363 &desc_ptr, 2 + ctxt->op_bytes); emulate_store_desc_ptr()
3366 static int em_sgdt(struct x86_emulate_ctxt *ctxt) em_sgdt() argument
3368 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); em_sgdt()
3371 static int em_sidt(struct x86_emulate_ctxt *ctxt) em_sidt() argument
3373 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); em_sidt()
3376 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt) em_lgdt_lidt() argument
3381 if (ctxt->mode == X86EMUL_MODE_PROT64) em_lgdt_lidt()
3382 ctxt->op_bytes = 8; em_lgdt_lidt()
3383 rc = read_descriptor(ctxt, ctxt->src.addr.mem, em_lgdt_lidt()
3385 ctxt->op_bytes); em_lgdt_lidt()
3388 if (ctxt->mode == X86EMUL_MODE_PROT64 && em_lgdt_lidt()
3390 return emulate_gp(ctxt, 0); em_lgdt_lidt()
3392 ctxt->ops->set_gdt(ctxt, &desc_ptr); em_lgdt_lidt()
3394 ctxt->ops->set_idt(ctxt, &desc_ptr); em_lgdt_lidt()
3396 ctxt->dst.type = OP_NONE; em_lgdt_lidt()
3400 static int em_lgdt(struct x86_emulate_ctxt *ctxt) em_lgdt() argument
3402 return em_lgdt_lidt(ctxt, true); em_lgdt()
3405 static int em_lidt(struct x86_emulate_ctxt *ctxt) em_lidt() argument
3407 return em_lgdt_lidt(ctxt, false); em_lidt()
3410 static int em_smsw(struct x86_emulate_ctxt *ctxt) em_smsw() argument
3412 if (ctxt->dst.type == OP_MEM) em_smsw()
3413 ctxt->dst.bytes = 2; em_smsw()
3414 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); em_smsw()
3418 static int em_lmsw(struct x86_emulate_ctxt *ctxt) em_lmsw() argument
3420 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) em_lmsw()
3421 | (ctxt->src.val & 0x0f)); em_lmsw()
3422 ctxt->dst.type = OP_NONE; em_lmsw()
3426 static int em_loop(struct x86_emulate_ctxt *ctxt) em_loop() argument
3430 register_address_increment(ctxt, VCPU_REGS_RCX, -1); em_loop()
3431 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && em_loop()
3432 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) em_loop()
3433 rc = jmp_rel(ctxt, ctxt->src.val); em_loop()
3438 static int em_jcxz(struct x86_emulate_ctxt *ctxt) em_jcxz() argument
3442 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) em_jcxz()
3443 rc = jmp_rel(ctxt, ctxt->src.val); em_jcxz()
3448 static int em_in(struct x86_emulate_ctxt *ctxt) em_in() argument
3450 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, em_in()
3451 &ctxt->dst.val)) em_in()
3457 static int em_out(struct x86_emulate_ctxt *ctxt) em_out() argument
3459 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, em_out()
3460 &ctxt->src.val, 1); em_out()
3462 ctxt->dst.type = OP_NONE; em_out()
3466 static int em_cli(struct x86_emulate_ctxt *ctxt) em_cli() argument
3468 if (emulator_bad_iopl(ctxt)) em_cli()
3469 return emulate_gp(ctxt, 0); em_cli()
3471 ctxt->eflags &= ~X86_EFLAGS_IF; em_cli()
3475 static int em_sti(struct x86_emulate_ctxt *ctxt) em_sti() argument
3477 if (emulator_bad_iopl(ctxt)) em_sti()
3478 return emulate_gp(ctxt, 0); em_sti()
3480 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; em_sti()
3481 ctxt->eflags |= X86_EFLAGS_IF; em_sti()
3485 static int em_cpuid(struct x86_emulate_ctxt *ctxt) em_cpuid() argument
3489 eax = reg_read(ctxt, VCPU_REGS_RAX); em_cpuid()
3490 ecx = reg_read(ctxt, VCPU_REGS_RCX); em_cpuid()
3491 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); em_cpuid()
3492 *reg_write(ctxt, VCPU_REGS_RAX) = eax; em_cpuid()
3493 *reg_write(ctxt, VCPU_REGS_RBX) = ebx; em_cpuid()
3494 *reg_write(ctxt, VCPU_REGS_RCX) = ecx; em_cpuid()
3495 *reg_write(ctxt, VCPU_REGS_RDX) = edx; em_cpuid()
3499 static int em_sahf(struct x86_emulate_ctxt *ctxt) em_sahf() argument
3505 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; em_sahf()
3507 ctxt->eflags &= ~0xffUL; em_sahf()
3508 ctxt->eflags |= flags | X86_EFLAGS_FIXED; em_sahf()
3512 static int em_lahf(struct x86_emulate_ctxt *ctxt) em_lahf() argument
3514 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; em_lahf()
3515 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; em_lahf()
3519 static int em_bswap(struct x86_emulate_ctxt *ctxt) em_bswap() argument
3521 switch (ctxt->op_bytes) { em_bswap()
3524 asm("bswap %0" : "+r"(ctxt->dst.val)); em_bswap()
3528 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); em_bswap()
3534 static int em_clflush(struct x86_emulate_ctxt *ctxt) em_clflush() argument
3540 static int em_movsxd(struct x86_emulate_ctxt *ctxt) em_movsxd() argument
3542 ctxt->dst.val = (s32) ctxt->src.val; em_movsxd()
3558 static int check_cr_read(struct x86_emulate_ctxt *ctxt) check_cr_read() argument
3560 if (!valid_cr(ctxt->modrm_reg)) check_cr_read()
3561 return emulate_ud(ctxt); check_cr_read()
3566 static int check_cr_write(struct x86_emulate_ctxt *ctxt) check_cr_write() argument
3568 u64 new_val = ctxt->src.val64; check_cr_write()
3569 int cr = ctxt->modrm_reg; check_cr_write()
3581 return emulate_ud(ctxt); check_cr_write()
3584 return emulate_gp(ctxt, 0); check_cr_write()
3591 return emulate_gp(ctxt, 0); check_cr_write()
3593 cr4 = ctxt->ops->get_cr(ctxt, 4); check_cr_write()
3594 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); check_cr_write()
3598 return emulate_gp(ctxt, 0); check_cr_write()
3605 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); check_cr_write()
3610 return emulate_gp(ctxt, 0); check_cr_write()
3615 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); check_cr_write()
3618 return emulate_gp(ctxt, 0); check_cr_write()
3627 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) check_dr7_gd() argument
3631 ctxt->ops->get_dr(ctxt, 7, &dr7); check_dr7_gd()
3637 static int check_dr_read(struct x86_emulate_ctxt *ctxt) check_dr_read() argument
3639 int dr = ctxt->modrm_reg; check_dr_read()
3643 return emulate_ud(ctxt); check_dr_read()
3645 cr4 = ctxt->ops->get_cr(ctxt, 4); check_dr_read()
3647 return emulate_ud(ctxt); check_dr_read()
3649 if (check_dr7_gd(ctxt)) { check_dr_read()
3652 ctxt->ops->get_dr(ctxt, 6, &dr6); check_dr_read()
3655 ctxt->ops->set_dr(ctxt, 6, dr6); check_dr_read()
3656 return emulate_db(ctxt); check_dr_read()
3662 static int check_dr_write(struct x86_emulate_ctxt *ctxt) check_dr_write() argument
3664 u64 new_val = ctxt->src.val64; check_dr_write()
3665 int dr = ctxt->modrm_reg; check_dr_write()
3668 return emulate_gp(ctxt, 0); check_dr_write()
3670 return check_dr_read(ctxt); check_dr_write()
3673 static int check_svme(struct x86_emulate_ctxt *ctxt) check_svme() argument
3677 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); check_svme()
3680 return emulate_ud(ctxt); check_svme()
3685 static int check_svme_pa(struct x86_emulate_ctxt *ctxt) check_svme_pa() argument
3687 u64 rax = reg_read(ctxt, VCPU_REGS_RAX); check_svme_pa()
3691 return emulate_gp(ctxt, 0); check_svme_pa()
3693 return check_svme(ctxt); check_svme_pa()
3696 static int check_rdtsc(struct x86_emulate_ctxt *ctxt) check_rdtsc() argument
3698 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); check_rdtsc()
3700 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) check_rdtsc()
3701 return emulate_ud(ctxt); check_rdtsc()
3706 static int check_rdpmc(struct x86_emulate_ctxt *ctxt) check_rdpmc() argument
3708 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); check_rdpmc()
3709 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); check_rdpmc()
3711 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || check_rdpmc()
3712 ctxt->ops->check_pmc(ctxt, rcx)) check_rdpmc()
3713 return emulate_gp(ctxt, 0); check_rdpmc()
3718 static int check_perm_in(struct x86_emulate_ctxt *ctxt) check_perm_in() argument
3720 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); check_perm_in()
3721 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes)) check_perm_in()
3722 return emulate_gp(ctxt, 0); check_perm_in()
3727 static int check_perm_out(struct x86_emulate_ctxt *ctxt) check_perm_out() argument
3729 ctxt->src.bytes = min(ctxt->src.bytes, 4u); check_perm_out()
3730 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes)) check_perm_out()
3731 return emulate_gp(ctxt, 0); check_perm_out()
4258 static unsigned imm_size(struct x86_emulate_ctxt *ctxt) imm_size() argument
4262 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; imm_size()
4268 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, decode_imm() argument
4275 op->addr.mem.ea = ctxt->_eip; decode_imm()
4279 op->val = insn_fetch(s8, ctxt); decode_imm()
4282 op->val = insn_fetch(s16, ctxt); decode_imm()
4285 op->val = insn_fetch(s32, ctxt); decode_imm()
4288 op->val = insn_fetch(s64, ctxt); decode_imm()
4308 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, decode_operand() argument
4315 decode_register_operand(ctxt, op); decode_operand()
4318 rc = decode_imm(ctxt, op, 1, false); decode_operand()
4321 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; decode_operand()
4323 *op = ctxt->memop; decode_operand()
4324 ctxt->memopp = op; decode_operand()
4325 if (ctxt->d & BitOp) decode_operand()
4326 fetch_bit_operand(ctxt); decode_operand()
4330 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; decode_operand()
4334 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; decode_operand()
4335 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); decode_operand()
4341 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes; decode_operand()
4342 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); decode_operand()
4347 if (ctxt->d & ByteOp) { decode_operand()
4352 op->bytes = ctxt->op_bytes; decode_operand()
4353 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); decode_operand()
4359 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; decode_operand()
4361 register_address(ctxt, VCPU_REGS_RDI); decode_operand()
4369 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); decode_operand()
4375 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; decode_operand()
4378 rc = decode_imm(ctxt, op, 1, true); decode_operand()
4386 rc = decode_imm(ctxt, op, imm_size(ctxt), true); decode_operand()
4389 rc = decode_imm(ctxt, op, ctxt->op_bytes, true); decode_operand()
4392 ctxt->memop.bytes = 1; decode_operand()
4393 if (ctxt->memop.type == OP_REG) { decode_operand()
4394 ctxt->memop.addr.reg = decode_register(ctxt, decode_operand()
4395 ctxt->modrm_rm, true); decode_operand()
4396 fetch_register_operand(&ctxt->memop); decode_operand()
4400 ctxt->memop.bytes = 2; decode_operand()
4403 ctxt->memop.bytes = 4; decode_operand()
4406 rc = decode_imm(ctxt, op, 2, false); decode_operand()
4409 rc = decode_imm(ctxt, op, imm_size(ctxt), false); decode_operand()
4413 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; decode_operand()
4415 register_address(ctxt, VCPU_REGS_RSI); decode_operand()
4416 op->addr.mem.seg = ctxt->seg_override; decode_operand()
4422 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; decode_operand()
4424 address_mask(ctxt, decode_operand()
4425 reg_read(ctxt, VCPU_REGS_RBX) + decode_operand()
4426 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); decode_operand()
4427 op->addr.mem.seg = ctxt->seg_override; decode_operand()
4432 op->addr.mem.ea = ctxt->_eip; decode_operand()
4433 op->bytes = ctxt->op_bytes + 2; decode_operand()
4434 insn_fetch_arr(op->valptr, op->bytes, ctxt); decode_operand()
4437 ctxt->memop.bytes = ctxt->op_bytes + 2; decode_operand()
4474 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) x86_decode_insn() argument
4477 int mode = ctxt->mode; x86_decode_insn()
4483 ctxt->memop.type = OP_NONE; x86_decode_insn()
4484 ctxt->memopp = NULL; x86_decode_insn()
4485 ctxt->_eip = ctxt->eip; x86_decode_insn()
4486 ctxt->fetch.ptr = ctxt->fetch.data; x86_decode_insn()
4487 ctxt->fetch.end = ctxt->fetch.data + insn_len; x86_decode_insn()
4488 ctxt->opcode_len = 1; x86_decode_insn()
4490 memcpy(ctxt->fetch.data, insn, insn_len); x86_decode_insn()
4492 rc = __do_insn_fetch_bytes(ctxt, 1); x86_decode_insn()
4516 ctxt->op_bytes = def_op_bytes; x86_decode_insn()
4517 ctxt->ad_bytes = def_ad_bytes; x86_decode_insn()
4521 switch (ctxt->b = insn_fetch(u8, ctxt)) { x86_decode_insn()
4525 ctxt->op_bytes = def_op_bytes ^ 6; x86_decode_insn()
4530 ctxt->ad_bytes = def_ad_bytes ^ 12; x86_decode_insn()
4533 ctxt->ad_bytes = def_ad_bytes ^ 6; x86_decode_insn()
4540 ctxt->seg_override = (ctxt->b >> 3) & 3; x86_decode_insn()
4545 ctxt->seg_override = ctxt->b & 7; x86_decode_insn()
4550 ctxt->rex_prefix = ctxt->b; x86_decode_insn()
4553 ctxt->lock_prefix = 1; x86_decode_insn()
4557 ctxt->rep_prefix = ctxt->b; x86_decode_insn()
4565 ctxt->rex_prefix = 0; x86_decode_insn()
4571 if (ctxt->rex_prefix & 8) x86_decode_insn()
4572 ctxt->op_bytes = 8; /* REX.W */ x86_decode_insn()
4575 opcode = opcode_table[ctxt->b]; x86_decode_insn()
4577 if (ctxt->b == 0x0f) { x86_decode_insn()
4578 ctxt->opcode_len = 2; x86_decode_insn()
4579 ctxt->b = insn_fetch(u8, ctxt); x86_decode_insn()
4580 opcode = twobyte_table[ctxt->b]; x86_decode_insn()
4583 if (ctxt->b == 0x38) { x86_decode_insn()
4584 ctxt->opcode_len = 3; x86_decode_insn()
4585 ctxt->b = insn_fetch(u8, ctxt); x86_decode_insn()
4586 opcode = opcode_map_0f_38[ctxt->b]; x86_decode_insn()
4589 ctxt->d = opcode.flags; x86_decode_insn()
4591 if (ctxt->d & ModRM) x86_decode_insn()
4592 ctxt->modrm = insn_fetch(u8, ctxt); x86_decode_insn()
4595 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && x86_decode_insn()
4596 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) { x86_decode_insn()
4597 ctxt->d = NotImpl; x86_decode_insn()
4600 while (ctxt->d & GroupMask) { x86_decode_insn()
4601 switch (ctxt->d & GroupMask) { x86_decode_insn()
4603 goffset = (ctxt->modrm >> 3) & 7; x86_decode_insn()
4607 goffset = (ctxt->modrm >> 3) & 7; x86_decode_insn()
4608 if ((ctxt->modrm >> 6) == 3) x86_decode_insn()
4614 goffset = ctxt->modrm & 7; x86_decode_insn()
4618 if (ctxt->rep_prefix && op_prefix) x86_decode_insn()
4620 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; x86_decode_insn()
4629 if (ctxt->modrm > 0xbf) x86_decode_insn()
4630 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; x86_decode_insn()
4632 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; x86_decode_insn()
4635 if ((ctxt->modrm >> 6) == 3) x86_decode_insn()
4641 if (ctxt->mode == X86EMUL_MODE_PROT64) x86_decode_insn()
4650 ctxt->d &= ~(u64)GroupMask; x86_decode_insn()
4651 ctxt->d |= opcode.flags; x86_decode_insn()
4655 if (ctxt->d == 0) x86_decode_insn()
4658 ctxt->execute = opcode.u.execute; x86_decode_insn()
4660 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) x86_decode_insn()
4663 if (unlikely(ctxt->d & x86_decode_insn()
4670 ctxt->check_perm = opcode.check_perm; x86_decode_insn()
4671 ctxt->intercept = opcode.intercept; x86_decode_insn()
4673 if (ctxt->d & NotImpl) x86_decode_insn()
4677 if (ctxt->op_bytes == 4 && (ctxt->d & Stack)) x86_decode_insn()
4678 ctxt->op_bytes = 8; x86_decode_insn()
4679 else if (ctxt->d & NearBranch) x86_decode_insn()
4680 ctxt->op_bytes = 8; x86_decode_insn()
4683 if (ctxt->d & Op3264) { x86_decode_insn()
4685 ctxt->op_bytes = 8; x86_decode_insn()
4687 ctxt->op_bytes = 4; x86_decode_insn()
4690 if ((ctxt->d & No16) && ctxt->op_bytes == 2) x86_decode_insn()
4691 ctxt->op_bytes = 4; x86_decode_insn()
4693 if (ctxt->d & Sse) x86_decode_insn()
4694 ctxt->op_bytes = 16; x86_decode_insn()
4695 else if (ctxt->d & Mmx) x86_decode_insn()
4696 ctxt->op_bytes = 8; x86_decode_insn()
4700 if (ctxt->d & ModRM) { x86_decode_insn()
4701 rc = decode_modrm(ctxt, &ctxt->memop); x86_decode_insn()
4704 ctxt->seg_override = ctxt->modrm_seg; x86_decode_insn()
4706 } else if (ctxt->d & MemAbs) x86_decode_insn()
4707 rc = decode_abs(ctxt, &ctxt->memop); x86_decode_insn()
4712 ctxt->seg_override = VCPU_SREG_DS; x86_decode_insn()
4714 ctxt->memop.addr.mem.seg = ctxt->seg_override; x86_decode_insn()
4720 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); x86_decode_insn()
4728 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); x86_decode_insn()
4733 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); x86_decode_insn()
4735 if (ctxt->rip_relative) x86_decode_insn()
4736 ctxt->memopp->addr.mem.ea = address_mask(ctxt, x86_decode_insn()
4737 ctxt->memopp->addr.mem.ea + ctxt->_eip); x86_decode_insn()
4743 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt) x86_page_table_writing_insn() argument
4745 return ctxt->d & PageTable; x86_page_table_writing_insn()
4748 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) string_insn_completed() argument
4757 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || string_insn_completed()
4758 (ctxt->b == 0xae) || (ctxt->b == 0xaf)) string_insn_completed()
4759 && (((ctxt->rep_prefix == REPE_PREFIX) && string_insn_completed()
4760 ((ctxt->eflags & X86_EFLAGS_ZF) == 0)) string_insn_completed()
4761 || ((ctxt->rep_prefix == REPNE_PREFIX) && string_insn_completed()
4762 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF)))) string_insn_completed()
4768 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) flush_pending_x87_faults() argument
4772 ctxt->ops->get_fpu(ctxt); flush_pending_x87_faults()
4782 ctxt->ops->put_fpu(ctxt); flush_pending_x87_faults()
4785 return emulate_exception(ctxt, MF_VECTOR, 0, false); flush_pending_x87_faults()
4790 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, fetch_possible_mmx_operand() argument
4794 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); fetch_possible_mmx_operand()
4797 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) fastop() argument
4799 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; fastop()
4800 if (!(ctxt->d & ByteOp)) fastop()
4801 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; fastop()
4803 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), fastop()
4805 : "c"(ctxt->src2.val)); fastop()
4806 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); fastop()
4808 return emulate_de(ctxt); fastop()
4812 void init_decode_cache(struct x86_emulate_ctxt *ctxt) init_decode_cache() argument
4814 memset(&ctxt->rip_relative, 0, init_decode_cache()
4815 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative); init_decode_cache()
4817 ctxt->io_read.pos = 0; init_decode_cache()
4818 ctxt->io_read.end = 0; init_decode_cache()
4819 ctxt->mem_read.end = 0; init_decode_cache()
4822 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) x86_emulate_insn() argument
4824 const struct x86_emulate_ops *ops = ctxt->ops; x86_emulate_insn()
4826 int saved_dst_type = ctxt->dst.type; x86_emulate_insn()
4828 ctxt->mem_read.pos = 0; x86_emulate_insn()
4831 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { x86_emulate_insn()
4832 rc = emulate_ud(ctxt); x86_emulate_insn()
4836 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { x86_emulate_insn()
4837 rc = emulate_ud(ctxt); x86_emulate_insn()
4841 if (unlikely(ctxt->d & x86_emulate_insn()
4843 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || x86_emulate_insn()
4844 (ctxt->d & Undefined)) { x86_emulate_insn()
4845 rc = emulate_ud(ctxt); x86_emulate_insn()
4849 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) x86_emulate_insn()
4850 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { x86_emulate_insn()
4851 rc = emulate_ud(ctxt); x86_emulate_insn()
4855 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { x86_emulate_insn()
4856 rc = emulate_nm(ctxt); x86_emulate_insn()
4860 if (ctxt->d & Mmx) { x86_emulate_insn()
4861 rc = flush_pending_x87_faults(ctxt); x86_emulate_insn()
4868 fetch_possible_mmx_operand(ctxt, &ctxt->src); x86_emulate_insn()
4869 fetch_possible_mmx_operand(ctxt, &ctxt->src2); x86_emulate_insn()
4870 if (!(ctxt->d & Mov)) x86_emulate_insn()
4871 fetch_possible_mmx_operand(ctxt, &ctxt->dst); x86_emulate_insn()
4874 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { x86_emulate_insn()
4875 rc = emulator_check_intercept(ctxt, ctxt->intercept, x86_emulate_insn()
4882 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { x86_emulate_insn()
4883 rc = emulate_ud(ctxt); x86_emulate_insn()
4888 if ((ctxt->d & Priv) && ops->cpl(ctxt)) { x86_emulate_insn()
4889 if (ctxt->d & PrivUD) x86_emulate_insn()
4890 rc = emulate_ud(ctxt); x86_emulate_insn()
4892 rc = emulate_gp(ctxt, 0); x86_emulate_insn()
4897 if (ctxt->d & CheckPerm) { x86_emulate_insn()
4898 rc = ctxt->check_perm(ctxt); x86_emulate_insn()
4903 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { x86_emulate_insn()
4904 rc = emulator_check_intercept(ctxt, ctxt->intercept, x86_emulate_insn()
4910 if (ctxt->rep_prefix && (ctxt->d & String)) { x86_emulate_insn()
4912 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { x86_emulate_insn()
4913 ctxt->eip = ctxt->_eip; x86_emulate_insn()
4914 ctxt->eflags &= ~X86_EFLAGS_RF; x86_emulate_insn()
4920 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { x86_emulate_insn()
4921 rc = segmented_read(ctxt, ctxt->src.addr.mem, x86_emulate_insn()
4922 ctxt->src.valptr, ctxt->src.bytes); x86_emulate_insn()
4925 ctxt->src.orig_val64 = ctxt->src.val64; x86_emulate_insn()
4928 if (ctxt->src2.type == OP_MEM) { x86_emulate_insn()
4929 rc = segmented_read(ctxt, ctxt->src2.addr.mem, x86_emulate_insn()
4930 &ctxt->src2.val, ctxt->src2.bytes); x86_emulate_insn()
4935 if ((ctxt->d & DstMask) == ImplicitOps) x86_emulate_insn()
4939 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { x86_emulate_insn()
4941 rc = segmented_read(ctxt, ctxt->dst.addr.mem, x86_emulate_insn()
4942 &ctxt->dst.val, ctxt->dst.bytes); x86_emulate_insn()
4944 if (!(ctxt->d & NoWrite) && x86_emulate_insn()
4946 ctxt->exception.vector == PF_VECTOR) x86_emulate_insn()
4947 ctxt->exception.error_code |= PFERR_WRITE_MASK; x86_emulate_insn()
4952 ctxt->dst.orig_val64 = ctxt->dst.val64; x86_emulate_insn()
4956 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { x86_emulate_insn()
4957 rc = emulator_check_intercept(ctxt, ctxt->intercept, x86_emulate_insn()
4963 if (ctxt->rep_prefix && (ctxt->d & String)) x86_emulate_insn()
4964 ctxt->eflags |= X86_EFLAGS_RF; x86_emulate_insn()
4966 ctxt->eflags &= ~X86_EFLAGS_RF; x86_emulate_insn()
4968 if (ctxt->execute) { x86_emulate_insn()
4969 if (ctxt->d & Fastop) { x86_emulate_insn()
4970 void (*fop)(struct fastop *) = (void *)ctxt->execute; x86_emulate_insn()
4971 rc = fastop(ctxt, fop); x86_emulate_insn()
4976 rc = ctxt->execute(ctxt); x86_emulate_insn()
4982 if (ctxt->opcode_len == 2) x86_emulate_insn()
4984 else if (ctxt->opcode_len == 3) x86_emulate_insn()
4987 switch (ctxt->b) { x86_emulate_insn()
4989 if (test_cc(ctxt->b, ctxt->eflags)) x86_emulate_insn()
4990 rc = jmp_rel(ctxt, ctxt->src.val); x86_emulate_insn()
4993 ctxt->dst.val = ctxt->src.addr.mem.ea; x86_emulate_insn()
4996 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) x86_emulate_insn()
4997 ctxt->dst.type = OP_NONE; x86_emulate_insn()
4999 rc = em_xchg(ctxt); x86_emulate_insn()
5002 switch (ctxt->op_bytes) { x86_emulate_insn()
5003 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; x86_emulate_insn()
5004 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; x86_emulate_insn()
5005 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; x86_emulate_insn()
5009 rc = emulate_int(ctxt, 3); x86_emulate_insn()
5012 rc = emulate_int(ctxt, ctxt->src.val); x86_emulate_insn()
5015 if (ctxt->eflags & X86_EFLAGS_OF) x86_emulate_insn()
5016 rc = emulate_int(ctxt, 4); x86_emulate_insn()
5020 rc = jmp_rel(ctxt, ctxt->src.val); x86_emulate_insn()
5021 ctxt->dst.type = OP_NONE; /* Disable writeback. */ x86_emulate_insn()
5024 ctxt->ops->halt(ctxt); x86_emulate_insn()
5028 ctxt->eflags ^= X86_EFLAGS_CF; x86_emulate_insn()
5031 ctxt->eflags &= ~X86_EFLAGS_CF; x86_emulate_insn()
5034 ctxt->eflags |= X86_EFLAGS_CF; x86_emulate_insn()
5037 ctxt->eflags &= ~X86_EFLAGS_DF; x86_emulate_insn()
5040 ctxt->eflags |= X86_EFLAGS_DF; x86_emulate_insn()
5050 if (ctxt->d & SrcWrite) { x86_emulate_insn()
5051 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); x86_emulate_insn()
5052 rc = writeback(ctxt, &ctxt->src); x86_emulate_insn()
5056 if (!(ctxt->d & NoWrite)) { x86_emulate_insn()
5057 rc = writeback(ctxt, &ctxt->dst); x86_emulate_insn()
5066 ctxt->dst.type = saved_dst_type; x86_emulate_insn()
5068 if ((ctxt->d & SrcMask) == SrcSI) x86_emulate_insn()
5069 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); x86_emulate_insn()
5071 if ((ctxt->d & DstMask) == DstDI) x86_emulate_insn()
5072 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); x86_emulate_insn()
5074 if (ctxt->rep_prefix && (ctxt->d & String)) { x86_emulate_insn()
5076 struct read_cache *r = &ctxt->io_read; x86_emulate_insn()
5077 if ((ctxt->d & SrcMask) == SrcSI) x86_emulate_insn()
5078 count = ctxt->src.count; x86_emulate_insn()
5080 count = ctxt->dst.count; x86_emulate_insn()
5081 register_address_increment(ctxt, VCPU_REGS_RCX, -count); x86_emulate_insn()
5083 if (!string_insn_completed(ctxt)) { x86_emulate_insn()
5088 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && x86_emulate_insn()
5095 ctxt->mem_read.end = 0; x86_emulate_insn()
5096 writeback_registers(ctxt); x86_emulate_insn()
5101 ctxt->eflags &= ~X86_EFLAGS_RF; x86_emulate_insn()
5104 ctxt->eip = ctxt->_eip; x86_emulate_insn()
5108 WARN_ON(ctxt->exception.vector > 0x1f); x86_emulate_insn()
5109 ctxt->have_exception = true; x86_emulate_insn()
5115 writeback_registers(ctxt); x86_emulate_insn()
5120 switch (ctxt->b) { x86_emulate_insn()
5122 (ctxt->ops->wbinvd)(ctxt); x86_emulate_insn()
5130 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); x86_emulate_insn()
5133 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); x86_emulate_insn()
5136 if (test_cc(ctxt->b, ctxt->eflags)) x86_emulate_insn()
5137 ctxt->dst.val = ctxt->src.val; x86_emulate_insn()
5138 else if (ctxt->op_bytes != 4) x86_emulate_insn()
5139 ctxt->dst.type = OP_NONE; /* no writeback */ x86_emulate_insn()
5142 if (test_cc(ctxt->b, ctxt->eflags)) x86_emulate_insn()
5143 rc = jmp_rel(ctxt, ctxt->src.val); x86_emulate_insn()
5146 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); x86_emulate_insn()
5149 ctxt->dst.bytes = ctxt->op_bytes; x86_emulate_insn()
5150 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val x86_emulate_insn()
5151 : (u16) ctxt->src.val; x86_emulate_insn()
5154 ctxt->dst.bytes = ctxt->op_bytes; x86_emulate_insn()
5155 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : x86_emulate_insn()
5156 (s16) ctxt->src.val; x86_emulate_insn()
5173 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) emulator_invalidate_register_cache() argument
5175 invalidate_registers(ctxt); emulator_invalidate_register_cache()
5178 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) emulator_writeback_register_cache() argument
5180 writeback_registers(ctxt); emulator_writeback_register_cache()
H A Dx86.c72 #define emul_to_vcpu(ctxt) \
73 container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
175 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
4274 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, kvm_fetch_guest_virt() argument
4278 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); kvm_fetch_guest_virt()
4300 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, kvm_read_guest_virt() argument
4304 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); kvm_read_guest_virt()
4312 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, kvm_read_guest_virt_system() argument
4316 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); kvm_read_guest_virt_system()
4320 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, kvm_write_guest_virt_system() argument
4325 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); kvm_write_guest_virt_system()
4514 static int emulator_read_write(struct x86_emulate_ctxt *ctxt, emulator_read_write() argument
4520 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); emulator_read_write()
4541 if (ctxt->mode != X86EMUL_MODE_PROT64) emulator_read_write()
4568 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, emulator_read_emulated() argument
4574 return emulator_read_write(ctxt, addr, val, bytes, emulator_read_emulated()
4578 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, emulator_write_emulated() argument
4584 return emulator_read_write(ctxt, addr, (void *)val, bytes, emulator_write_emulated()
4598 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, emulator_cmpxchg_emulated() argument
4605 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); emulator_cmpxchg_emulated()
4660 return emulator_write_emulated(ctxt, addr, new, bytes, exception); emulator_cmpxchg_emulated()
4702 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, emulator_pio_in_emulated() argument
4706 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); emulator_pio_in_emulated()
4724 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, emulator_pio_out_emulated() argument
4728 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); emulator_pio_out_emulated()
4740 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) emulator_invlpg() argument
4742 kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); emulator_invlpg()
4772 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) emulator_wbinvd() argument
4774 kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); emulator_wbinvd()
4777 static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, emulator_get_dr() argument
4780 return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); emulator_get_dr()
4783 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, emulator_set_dr() argument
4787 return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); emulator_set_dr()
4795 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) emulator_get_cr() argument
4797 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); emulator_get_cr()
4824 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) emulator_set_cr() argument
4826 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); emulator_set_cr()
4853 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) emulator_get_cpl() argument
4855 return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); emulator_get_cpl()
4858 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) emulator_get_gdt() argument
4860 kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); emulator_get_gdt()
4863 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) emulator_get_idt() argument
4865 kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); emulator_get_idt()
4868 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) emulator_set_gdt() argument
4870 kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); emulator_set_gdt()
4873 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) emulator_set_idt() argument
4875 kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); emulator_set_idt()
4879 struct x86_emulate_ctxt *ctxt, int seg) emulator_get_cached_segment_base()
4881 return get_segment_base(emul_to_vcpu(ctxt), seg); emulator_get_cached_segment_base()
4884 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, emulator_get_segment() argument
4890 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); emulator_get_segment()
4918 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, emulator_set_segment() argument
4922 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); emulator_set_segment()
4948 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, emulator_get_msr() argument
4951 return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); emulator_get_msr()
4954 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, emulator_set_msr() argument
4962 return kvm_set_msr(emul_to_vcpu(ctxt), &msr); emulator_set_msr()
4965 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, emulator_check_pmc() argument
4968 return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc); emulator_check_pmc()
4971 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, emulator_read_pmc() argument
4974 return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata); emulator_read_pmc()
4977 static void emulator_halt(struct x86_emulate_ctxt *ctxt) emulator_halt() argument
4979 emul_to_vcpu(ctxt)->arch.halt_request = 1; emulator_halt()
4982 static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) emulator_get_fpu() argument
4985 kvm_load_guest_fpu(emul_to_vcpu(ctxt)); emulator_get_fpu()
4993 static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt) emulator_put_fpu() argument
4998 static int emulator_intercept(struct x86_emulate_ctxt *ctxt, emulator_intercept() argument
5002 return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); emulator_intercept()
5005 static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, emulator_get_cpuid() argument
5008 kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx); emulator_get_cpuid()
5011 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) emulator_read_gpr() argument
5013 return kvm_register_read(emul_to_vcpu(ctxt), reg); emulator_read_gpr()
5016 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) emulator_write_gpr() argument
5018 kvm_register_write(emul_to_vcpu(ctxt), reg, val); emulator_write_gpr()
5021 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) emulator_set_nmi_mask() argument
5023 kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked); emulator_set_nmi_mask()
5085 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; inject_emulated_exception() local
5086 if (ctxt->exception.vector == PF_VECTOR) inject_emulated_exception()
5087 return kvm_propagate_fault(vcpu, &ctxt->exception); inject_emulated_exception()
5089 if (ctxt->exception.error_code_valid) inject_emulated_exception()
5090 kvm_queue_exception_e(vcpu, ctxt->exception.vector, inject_emulated_exception()
5091 ctxt->exception.error_code); inject_emulated_exception()
5093 kvm_queue_exception(vcpu, ctxt->exception.vector); inject_emulated_exception()
5099 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; init_emulate_ctxt() local
5104 ctxt->eflags = kvm_get_rflags(vcpu); init_emulate_ctxt()
5105 ctxt->eip = kvm_rip_read(vcpu); init_emulate_ctxt()
5106 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : init_emulate_ctxt()
5107 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : init_emulate_ctxt()
5111 ctxt->guest_mode = is_guest_mode(vcpu); init_emulate_ctxt()
5113 init_decode_cache(ctxt); init_emulate_ctxt()
5119 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; kvm_inject_realmode_interrupt() local
5124 ctxt->op_bytes = 2; kvm_inject_realmode_interrupt()
5125 ctxt->ad_bytes = 2; kvm_inject_realmode_interrupt()
5126 ctxt->_eip = ctxt->eip + inc_eip; kvm_inject_realmode_interrupt()
5127 ret = emulate_int_real(ctxt, irq); kvm_inject_realmode_interrupt()
5132 ctxt->eip = ctxt->_eip; kvm_inject_realmode_interrupt()
5133 kvm_rip_write(vcpu, ctxt->eip); kvm_inject_realmode_interrupt()
5134 kvm_set_rflags(vcpu, ctxt->eflags); kvm_inject_realmode_interrupt()
5233 static bool retry_instruction(struct x86_emulate_ctxt *ctxt, retry_instruction() argument
5236 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); retry_instruction()
5260 if (x86_page_table_writing_insn(ctxt)) retry_instruction()
5263 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2) retry_instruction()
5266 vcpu->arch.last_retry_eip = ctxt->eip; retry_instruction()
5375 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; x86_emulate_instruction() local
5398 ctxt->interruptibility = 0; x86_emulate_instruction()
5399 ctxt->have_exception = false; x86_emulate_instruction()
5400 ctxt->exception.vector = -1; x86_emulate_instruction()
5401 ctxt->perm_ok = false; x86_emulate_instruction()
5403 ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; x86_emulate_instruction()
5405 r = x86_decode_insn(ctxt, insn, insn_len); x86_emulate_instruction()
5422 kvm_rip_write(vcpu, ctxt->_eip); x86_emulate_instruction()
5423 if (ctxt->eflags & X86_EFLAGS_RF) x86_emulate_instruction()
5424 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); x86_emulate_instruction()
5428 if (retry_instruction(ctxt, cr2, emulation_type)) x86_emulate_instruction()
5435 emulator_invalidate_register_cache(ctxt); x86_emulate_instruction()
5439 r = x86_emulate_insn(ctxt); x86_emulate_instruction()
5452 if (ctxt->have_exception) { x86_emulate_instruction()
5477 toggle_interruptibility(vcpu, ctxt->interruptibility); x86_emulate_instruction()
5479 kvm_rip_write(vcpu, ctxt->eip); x86_emulate_instruction()
5482 if (!ctxt->have_exception || x86_emulate_instruction()
5483 exception_type(ctxt->exception.vector) == EXCPT_TRAP) x86_emulate_instruction()
5484 __kvm_set_rflags(vcpu, ctxt->eflags); x86_emulate_instruction()
5492 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) x86_emulate_instruction()
6015 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) emulator_fix_hypercall() argument
6017 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); emulator_fix_hypercall()
6023 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); emulator_fix_hypercall()
6801 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; kvm_task_switch() local
6806 ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, kvm_task_switch()
6812 kvm_rip_write(vcpu, ctxt->eip); kvm_task_switch()
6813 kvm_set_rflags(vcpu, ctxt->eflags); kvm_task_switch()
4878 emulator_get_cached_segment_base( struct x86_emulate_ctxt *ctxt, int seg) emulator_get_cached_segment_base() argument
H A Dx86.h155 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
159 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/
H A Dllog_obd.c47 struct llog_ctxt *ctxt; llog_new_ctxt() local
49 OBD_ALLOC_PTR(ctxt); llog_new_ctxt()
50 if (!ctxt) llog_new_ctxt()
53 ctxt->loc_obd = obd; llog_new_ctxt()
54 atomic_set(&ctxt->loc_refcount, 1); llog_new_ctxt()
56 return ctxt; llog_new_ctxt()
59 static void llog_ctxt_destroy(struct llog_ctxt *ctxt) llog_ctxt_destroy() argument
61 if (ctxt->loc_exp) { llog_ctxt_destroy()
62 class_export_put(ctxt->loc_exp); llog_ctxt_destroy()
63 ctxt->loc_exp = NULL; llog_ctxt_destroy()
65 if (ctxt->loc_imp) { llog_ctxt_destroy()
66 class_import_put(ctxt->loc_imp); llog_ctxt_destroy()
67 ctxt->loc_imp = NULL; llog_ctxt_destroy()
69 OBD_FREE_PTR(ctxt); llog_ctxt_destroy()
72 int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt) __llog_ctxt_put() argument
74 struct obd_llog_group *olg = ctxt->loc_olg; __llog_ctxt_put()
79 if (!atomic_dec_and_test(&ctxt->loc_refcount)) { __llog_ctxt_put()
83 olg->olg_ctxts[ctxt->loc_idx] = NULL; __llog_ctxt_put()
86 obd = ctxt->loc_obd; __llog_ctxt_put()
88 /* sync with llog ctxt user thread */ __llog_ctxt_put()
98 /* cleanup the llog ctxt here */ __llog_ctxt_put()
99 if (CTXTP(ctxt, cleanup)) __llog_ctxt_put()
100 rc = CTXTP(ctxt, cleanup)(env, ctxt); __llog_ctxt_put()
102 llog_ctxt_destroy(ctxt); __llog_ctxt_put()
108 int llog_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt) llog_cleanup() argument
114 LASSERT(ctxt != NULL); llog_cleanup()
115 LASSERT(ctxt != LP_POISON); llog_cleanup()
117 olg = ctxt->loc_olg; llog_cleanup()
121 idx = ctxt->loc_idx; llog_cleanup()
124 * Banlance the ctxt get when calling llog_cleanup() llog_cleanup()
126 LASSERT(atomic_read(&ctxt->loc_refcount) < LI_POISON); llog_cleanup()
127 LASSERT(atomic_read(&ctxt->loc_refcount) > 1); llog_cleanup()
128 llog_ctxt_put(ctxt); llog_cleanup()
131 * Try to free the ctxt. llog_cleanup()
133 rc = __llog_ctxt_put(env, ctxt); llog_cleanup()
135 CERROR("Error %d while cleaning up ctxt %p\n", llog_cleanup()
136 rc, ctxt); llog_cleanup()
149 struct llog_ctxt *ctxt; llog_setup() local
157 ctxt = llog_new_ctxt(obd); llog_setup()
158 if (!ctxt) llog_setup()
161 ctxt->loc_obd = obd; llog_setup()
162 ctxt->loc_olg = olg; llog_setup()
163 ctxt->loc_idx = index; llog_setup()
164 ctxt->loc_logops = op; llog_setup()
165 mutex_init(&ctxt->loc_mutex); llog_setup()
166 ctxt->loc_exp = class_export_get(disk_obd->obd_self_export); llog_setup()
167 ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED; llog_setup()
169 rc = llog_group_set_ctxt(olg, ctxt, index); llog_setup()
171 llog_ctxt_destroy(ctxt); llog_setup()
173 ctxt = llog_group_get_ctxt(olg, index); llog_setup()
174 if (ctxt) { llog_setup()
180 CDEBUG(D_CONFIG, "obd %s ctxt %d already set up\n", llog_setup()
182 LASSERT(ctxt->loc_olg == olg); llog_setup()
183 LASSERT(ctxt->loc_obd == obd); llog_setup()
184 LASSERT(ctxt->loc_exp == disk_obd->obd_self_export); llog_setup()
185 LASSERT(ctxt->loc_logops == op); llog_setup()
186 llog_ctxt_put(ctxt); llog_setup()
201 CERROR("%s: ctxt %d lop_setup=%p failed: rc = %d\n", llog_setup()
204 llog_ctxt_destroy(ctxt); llog_setup()
206 CDEBUG(D_CONFIG, "obd %s ctxt %d is initialized\n", llog_setup()
208 ctxt->loc_flags &= ~LLOG_CTXT_FLAG_UNINITIALIZED; llog_setup()
215 int llog_sync(struct llog_ctxt *ctxt, struct obd_export *exp, int flags) llog_sync() argument
219 if (!ctxt) llog_sync()
222 if (CTXTP(ctxt, sync)) llog_sync()
223 rc = CTXTP(ctxt, sync)(ctxt, exp, flags); llog_sync()
229 int llog_cancel(const struct lu_env *env, struct llog_ctxt *ctxt, llog_cancel() argument
234 if (!ctxt) { llog_cancel()
235 CERROR("No ctxt\n"); llog_cancel()
239 CTXT_CHECK_OP(ctxt, cancel, -EOPNOTSUPP); llog_cancel()
240 rc = CTXTP(ctxt, cancel)(env, ctxt, cookies, flags); llog_cancel()
H A Dllog.c745 int llog_open_create(const struct lu_env *env, struct llog_ctxt *ctxt, llog_open_create() argument
753 rc = llog_open(env, ctxt, res, logid, name, LLOG_OPEN_NEW); llog_open_create()
787 int llog_erase(const struct lu_env *env, struct llog_ctxt *ctxt, llog_erase() argument
797 rc = llog_open(env, ctxt, &handle, logid, name, LLOG_OPEN_EXISTS); llog_erase()
853 int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt, llog_open() argument
860 LASSERT(ctxt); llog_open()
861 LASSERT(ctxt->loc_logops); llog_open()
863 if (ctxt->loc_logops->lop_open == NULL) { llog_open()
871 (*lgh)->lgh_ctxt = ctxt; llog_open()
872 (*lgh)->lgh_logops = ctxt->loc_logops; llog_open()
877 rc = ctxt->loc_logops->lop_open(env, *lgh, logid, name, open_param); llog_open()
907 int llog_is_empty(const struct lu_env *env, struct llog_ctxt *ctxt, llog_is_empty() argument
913 rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS); llog_is_empty()
945 struct llog_ctxt *ctxt, struct llog_ctxt *bctxt, llog_backup()
954 rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS); llog_backup()
944 llog_backup(const struct lu_env *env, struct obd_device *obd, struct llog_ctxt *ctxt, struct llog_ctxt *bctxt, char *name, char *backup) llog_backup() argument
H A Dllog_cat.c240 /* if handle was stored in ctxt, remove it too */ llog_cat_close()
439 struct llog_ctxt *ctxt; llog_cat_add() local
444 ctxt = cathandle->lgh_ctxt; llog_cat_add()
445 LASSERT(ctxt); llog_cat_add()
446 LASSERT(ctxt->loc_exp); llog_cat_add()
449 dt = ctxt->loc_exp->exp_obd->obd_lvfs_ctxt.dt; llog_cat_add()
H A Dobd_config.c1577 int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt, class_config_parse_llog() argument
1586 rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS); class_config_parse_llog()
1690 int class_config_dump_llog(const struct lu_env *env, struct llog_ctxt *ctxt, class_config_dump_llog() argument
1698 rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS); class_config_dump_llog()
/linux-4.1.27/arch/x86/power/
H A Dcpu.c39 * @ctxt - structure to store the registers contents in
51 static void __save_processor_state(struct saved_context *ctxt) __save_processor_state() argument
62 store_idt(&ctxt->idt); __save_processor_state()
65 store_idt((struct desc_ptr *)&ctxt->idt_limit); __save_processor_state()
73 ctxt->gdt_desc.size = GDT_SIZE - 1; __save_processor_state()
74 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_table(smp_processor_id()); __save_processor_state()
76 store_tr(ctxt->tr); __save_processor_state()
83 savesegment(es, ctxt->es); __save_processor_state()
84 savesegment(fs, ctxt->fs); __save_processor_state()
85 savesegment(gs, ctxt->gs); __save_processor_state()
86 savesegment(ss, ctxt->ss); __save_processor_state()
89 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); __save_processor_state()
90 asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); __save_processor_state()
91 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); __save_processor_state()
92 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); __save_processor_state()
93 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); __save_processor_state()
95 rdmsrl(MSR_FS_BASE, ctxt->fs_base); __save_processor_state()
96 rdmsrl(MSR_GS_BASE, ctxt->gs_base); __save_processor_state()
97 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); __save_processor_state()
100 rdmsrl(MSR_EFER, ctxt->efer); __save_processor_state()
106 ctxt->cr0 = read_cr0(); __save_processor_state()
107 ctxt->cr2 = read_cr2(); __save_processor_state()
108 ctxt->cr3 = read_cr3(); __save_processor_state()
109 ctxt->cr4 = __read_cr4_safe(); __save_processor_state()
111 ctxt->cr8 = read_cr8(); __save_processor_state()
113 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, __save_processor_state()
114 &ctxt->misc_enable); __save_processor_state()
164 * @ctxt - structure to load the registers contents from
166 static void notrace __restore_processor_state(struct saved_context *ctxt) __restore_processor_state() argument
168 if (ctxt->misc_enable_saved) __restore_processor_state()
169 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); __restore_processor_state()
175 if (ctxt->cr4) __restore_processor_state()
176 __write_cr4(ctxt->cr4); __restore_processor_state()
179 wrmsrl(MSR_EFER, ctxt->efer); __restore_processor_state()
180 write_cr8(ctxt->cr8); __restore_processor_state()
181 __write_cr4(ctxt->cr4); __restore_processor_state()
183 write_cr3(ctxt->cr3); __restore_processor_state()
184 write_cr2(ctxt->cr2); __restore_processor_state()
185 write_cr0(ctxt->cr0); __restore_processor_state()
192 load_idt(&ctxt->idt); __restore_processor_state()
195 load_idt((const struct desc_ptr *)&ctxt->idt_limit); __restore_processor_state()
202 loadsegment(es, ctxt->es); __restore_processor_state()
203 loadsegment(fs, ctxt->fs); __restore_processor_state()
204 loadsegment(gs, ctxt->gs); __restore_processor_state()
205 loadsegment(ss, ctxt->ss); __restore_processor_state()
214 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); __restore_processor_state()
215 asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); __restore_processor_state()
216 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); __restore_processor_state()
217 load_gs_index(ctxt->gs); __restore_processor_state()
218 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); __restore_processor_state()
220 wrmsrl(MSR_FS_BASE, ctxt->fs_base); __restore_processor_state()
221 wrmsrl(MSR_GS_BASE, ctxt->gs_base); __restore_processor_state()
222 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); __restore_processor_state()
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
H A Dllog_net.c55 int llog_initiator_connect(struct llog_ctxt *ctxt) llog_initiator_connect() argument
59 LASSERT(ctxt); llog_initiator_connect()
60 new_imp = ctxt->loc_obd->u.cli.cl_import; llog_initiator_connect()
61 LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp, llog_initiator_connect()
62 "%p - %p\n", ctxt->loc_imp, new_imp); llog_initiator_connect()
63 mutex_lock(&ctxt->loc_mutex); llog_initiator_connect()
64 if (ctxt->loc_imp != new_imp) { llog_initiator_connect()
65 if (ctxt->loc_imp) llog_initiator_connect()
66 class_import_put(ctxt->loc_imp); llog_initiator_connect()
67 ctxt->loc_imp = class_import_get(new_imp); llog_initiator_connect()
69 mutex_unlock(&ctxt->loc_mutex); llog_initiator_connect()
H A Dllog_client.c52 #define LLOG_CLIENT_ENTRY(ctxt, imp) do { \
53 mutex_lock(&ctxt->loc_mutex); \
54 if (ctxt->loc_imp) { \
55 imp = class_import_get(ctxt->loc_imp); \
57 CERROR("ctxt->loc_imp == NULL for context idx %d." \
60 ctxt->loc_idx); \
62 mutex_unlock(&ctxt->loc_mutex); \
65 mutex_unlock(&ctxt->loc_mutex); \
68 #define LLOG_CLIENT_EXIT(ctxt, imp) do { \
69 mutex_lock(&ctxt->loc_mutex); \
70 if (ctxt->loc_imp != imp) \
72 ctxt->loc_imp, imp); \
74 mutex_unlock(&ctxt->loc_mutex); \
85 struct llog_ctxt *ctxt = lgh->lgh_ctxt; llog_client_open() local
89 LLOG_CLIENT_ENTRY(ctxt, imp); llog_client_open()
117 body->lgd_ctxt_idx = ctxt->loc_idx - 1; llog_client_open()
138 lgh->lgh_ctxt = ctxt; llog_client_open()
140 LLOG_CLIENT_EXIT(ctxt, imp); llog_client_open()
/linux-4.1.27/Documentation/prctl/
H A DMakefile2 hostprogs-$(CONFIG_X86) := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test disable-tsc-test
6 HOSTCFLAGS_disable-tsc-ctxt-sw-stress-test.o += -I$(objtree)/usr/include
/linux-4.1.27/arch/x86/include/asm/
H A Dkvm_emulate.h94 ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg);
101 void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val);
109 int (*read_std)(struct x86_emulate_ctxt *ctxt,
121 int (*write_std)(struct x86_emulate_ctxt *ctxt,
131 int (*fetch)(struct x86_emulate_ctxt *ctxt,
141 int (*read_emulated)(struct x86_emulate_ctxt *ctxt,
152 int (*write_emulated)(struct x86_emulate_ctxt *ctxt,
165 int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt,
171 void (*invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr);
173 int (*pio_in_emulated)(struct x86_emulate_ctxt *ctxt,
177 int (*pio_out_emulated)(struct x86_emulate_ctxt *ctxt,
181 bool (*get_segment)(struct x86_emulate_ctxt *ctxt, u16 *selector,
183 void (*set_segment)(struct x86_emulate_ctxt *ctxt, u16 selector,
185 unsigned long (*get_cached_segment_base)(struct x86_emulate_ctxt *ctxt,
187 void (*get_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
188 void (*get_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
189 void (*set_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
190 void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
191 ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
192 int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
193 int (*cpl)(struct x86_emulate_ctxt *ctxt);
194 int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
195 int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
196 int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
197 int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
198 int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
199 int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
200 void (*halt)(struct x86_emulate_ctxt *ctxt);
201 void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
202 int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
203 void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
204 void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
205 int (*intercept)(struct x86_emulate_ctxt *ctxt,
209 void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
211 void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
297 int (*execute)(struct x86_emulate_ctxt *ctxt);
298 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
413 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
414 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
419 void init_decode_cache(struct x86_emulate_ctxt *ctxt);
420 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
421 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
424 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
425 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt);
426 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt);
/linux-4.1.27/net/sunrpc/xprtrdma/
H A Dsvc_rdma_recvfrom.c59 struct svc_rdma_op_ctxt *ctxt, rdma_build_arg_xdr()
68 page = ctxt->pages[0]; rdma_build_arg_xdr()
75 min_t(size_t, byte_count, ctxt->sge[0].length); rdma_build_arg_xdr()
94 while (bc && sge_no < ctxt->count) { rdma_build_arg_xdr()
95 page = ctxt->pages[sge_no]; rdma_build_arg_xdr()
98 bc -= min_t(u32, bc, ctxt->sge[sge_no].length); rdma_build_arg_xdr()
99 rqstp->rq_arg.buflen += ctxt->sge[sge_no].length; rdma_build_arg_xdr()
107 while (sge_no < ctxt->count) { rdma_build_arg_xdr()
108 page = ctxt->pages[sge_no++]; rdma_build_arg_xdr()
111 ctxt->count = bc; rdma_build_arg_xdr()
140 struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt); rdma_read_chunk_lcl() local
145 ctxt->direction = DMA_FROM_DEVICE; rdma_read_chunk_lcl()
146 ctxt->read_hdr = head; rdma_read_chunk_lcl()
162 ctxt->sge[pno].addr = rdma_read_chunk_lcl()
168 ctxt->sge[pno].addr); rdma_read_chunk_lcl()
174 ctxt->sge[pno].lkey = xprt->sc_dma_lkey; rdma_read_chunk_lcl()
175 ctxt->sge[pno].length = len; rdma_read_chunk_lcl()
176 ctxt->count++; rdma_read_chunk_lcl()
188 set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); rdma_read_chunk_lcl()
190 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); rdma_read_chunk_lcl()
193 read_wr.wr_id = (unsigned long)ctxt; rdma_read_chunk_lcl()
195 ctxt->wr_op = read_wr.opcode; rdma_read_chunk_lcl()
199 read_wr.sg_list = ctxt->sge; rdma_read_chunk_lcl()
216 svc_rdma_unmap_dma(ctxt); rdma_read_chunk_lcl()
217 svc_rdma_put_context(ctxt, 0); rdma_read_chunk_lcl()
237 struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt); rdma_read_chunk_frmr() local
246 ctxt->direction = DMA_FROM_DEVICE; rdma_read_chunk_frmr()
247 ctxt->frmr = frmr; rdma_read_chunk_frmr()
288 set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); rdma_read_chunk_frmr()
290 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags); rdma_read_chunk_frmr()
296 ctxt->sge[0].addr = (unsigned long)frmr->kva + *page_offset; rdma_read_chunk_frmr()
297 ctxt->sge[0].lkey = frmr->mr->lkey; rdma_read_chunk_frmr()
298 ctxt->sge[0].length = read; rdma_read_chunk_frmr()
299 ctxt->count = 1; rdma_read_chunk_frmr()
300 ctxt->read_hdr = head; rdma_read_chunk_frmr()
320 read_wr.sg_list = ctxt->sge; rdma_read_chunk_frmr()
324 read_wr.wr_id = (unsigned long)ctxt; rdma_read_chunk_frmr()
325 read_wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey; rdma_read_chunk_frmr()
331 inv_wr.wr_id = (unsigned long)ctxt; rdma_read_chunk_frmr()
336 ctxt->wr_op = read_wr.opcode; rdma_read_chunk_frmr()
353 svc_rdma_unmap_dma(ctxt); rdma_read_chunk_frmr()
354 svc_rdma_put_context(ctxt, 0); rdma_read_chunk_frmr()
583 struct svc_rdma_op_ctxt *ctxt = NULL; svc_rdma_recvfrom() local
592 ctxt = list_entry(rdma_xprt->sc_read_complete_q.next, svc_rdma_recvfrom()
595 list_del_init(&ctxt->dto_q); svc_rdma_recvfrom()
597 return rdma_read_complete(rqstp, ctxt); svc_rdma_recvfrom()
599 ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next, svc_rdma_recvfrom()
602 list_del_init(&ctxt->dto_q); svc_rdma_recvfrom()
606 ctxt = NULL; svc_rdma_recvfrom()
609 if (!ctxt) { svc_rdma_recvfrom()
620 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n", svc_rdma_recvfrom()
621 ctxt, rdma_xprt, rqstp, ctxt->wc_status); svc_rdma_recvfrom()
625 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len); svc_rdma_recvfrom()
639 ret = rdma_read_chunks(rdma_xprt, rmsgp, rqstp, ctxt); svc_rdma_recvfrom()
645 svc_rdma_put_context(ctxt, 1); svc_rdma_recvfrom()
652 svc_rdma_put_context(ctxt, 0); svc_rdma_recvfrom()
664 if (ctxt) svc_rdma_recvfrom()
665 svc_rdma_put_context(ctxt, 1); svc_rdma_recvfrom()
58 rdma_build_arg_xdr(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *ctxt, u32 byte_count) rdma_build_arg_xdr() argument
H A Dsvc_rdma_transport.c100 struct svc_rdma_op_ctxt *ctxt; svc_rdma_get_context() local
103 ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL); svc_rdma_get_context()
104 if (ctxt) svc_rdma_get_context()
108 ctxt->xprt = xprt; svc_rdma_get_context()
109 INIT_LIST_HEAD(&ctxt->dto_q); svc_rdma_get_context()
110 ctxt->count = 0; svc_rdma_get_context()
111 ctxt->frmr = NULL; svc_rdma_get_context()
113 return ctxt; svc_rdma_get_context()
116 void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) svc_rdma_unmap_dma() argument
118 struct svcxprt_rdma *xprt = ctxt->xprt; svc_rdma_unmap_dma()
120 for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { svc_rdma_unmap_dma()
127 if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) { svc_rdma_unmap_dma()
130 ctxt->sge[i].addr, svc_rdma_unmap_dma()
131 ctxt->sge[i].length, svc_rdma_unmap_dma()
132 ctxt->direction); svc_rdma_unmap_dma()
137 void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) svc_rdma_put_context() argument
142 xprt = ctxt->xprt; svc_rdma_put_context()
144 for (i = 0; i < ctxt->count; i++) svc_rdma_put_context()
145 put_page(ctxt->pages[i]); svc_rdma_put_context()
147 kmem_cache_free(svc_rdma_ctxt_cachep, ctxt); svc_rdma_put_context()
291 struct svc_rdma_op_ctxt *ctxt = NULL; rq_cq_reap() local
300 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; rq_cq_reap()
301 ctxt->wc_status = wc.status; rq_cq_reap()
302 ctxt->byte_len = wc.byte_len; rq_cq_reap()
303 svc_rdma_unmap_dma(ctxt); rq_cq_reap()
306 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt); rq_cq_reap()
308 svc_rdma_put_context(ctxt, 1); rq_cq_reap()
313 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q); rq_cq_reap()
318 if (ctxt) rq_cq_reap()
335 struct svc_rdma_op_ctxt *ctxt) process_context()
337 svc_rdma_unmap_dma(ctxt); process_context()
339 switch (ctxt->wr_op) { process_context()
341 if (ctxt->frmr) process_context()
342 pr_err("svcrdma: SEND: ctxt->frmr != NULL\n"); process_context()
343 svc_rdma_put_context(ctxt, 1); process_context()
347 if (ctxt->frmr) process_context()
348 pr_err("svcrdma: WRITE: ctxt->frmr != NULL\n"); process_context()
349 svc_rdma_put_context(ctxt, 0); process_context()
354 svc_rdma_put_frmr(xprt, ctxt->frmr); process_context()
355 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { process_context()
356 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr; process_context()
364 pr_err("svcrdma: ctxt->read_hdr == NULL\n"); process_context()
368 svc_rdma_put_context(ctxt, 0); process_context()
374 ctxt->wr_op); process_context()
386 struct svc_rdma_op_ctxt *ctxt = NULL; sq_cq_reap() local
416 ctxt = (struct svc_rdma_op_ctxt *) sq_cq_reap()
418 if (ctxt) sq_cq_reap()
419 process_context(xprt, ctxt); sq_cq_reap()
425 if (ctxt) sq_cq_reap()
508 struct svc_rdma_op_ctxt *ctxt; svc_rdma_post_recv() local
515 ctxt = svc_rdma_get_context(xprt); svc_rdma_post_recv()
517 ctxt->direction = DMA_FROM_DEVICE; svc_rdma_post_recv()
524 ctxt->pages[sge_no] = page; svc_rdma_post_recv()
531 ctxt->sge[sge_no].addr = pa; svc_rdma_post_recv()
532 ctxt->sge[sge_no].length = PAGE_SIZE; svc_rdma_post_recv()
533 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey; svc_rdma_post_recv()
534 ctxt->count = sge_no + 1; svc_rdma_post_recv()
538 recv_wr.sg_list = &ctxt->sge[0]; svc_rdma_post_recv()
539 recv_wr.num_sge = ctxt->count; svc_rdma_post_recv()
540 recv_wr.wr_id = (u64)(unsigned long)ctxt; svc_rdma_post_recv()
545 svc_rdma_unmap_dma(ctxt); svc_rdma_post_recv()
546 svc_rdma_put_context(ctxt, 1); svc_rdma_post_recv()
552 svc_rdma_unmap_dma(ctxt); svc_rdma_post_recv()
553 svc_rdma_put_context(ctxt, 1); svc_rdma_post_recv()
1145 struct svc_rdma_op_ctxt *ctxt; __svc_rdma_free() local
1146 ctxt = list_entry(rdma->sc_read_complete_q.next, __svc_rdma_free()
1149 list_del_init(&ctxt->dto_q); __svc_rdma_free()
1150 svc_rdma_put_context(ctxt, 1); __svc_rdma_free()
1155 struct svc_rdma_op_ctxt *ctxt; __svc_rdma_free() local
1156 ctxt = list_entry(rdma->sc_rq_dto_q.next, __svc_rdma_free()
1159 list_del_init(&ctxt->dto_q); __svc_rdma_free()
1160 svc_rdma_put_context(ctxt, 1); __svc_rdma_free()
1165 pr_err("svcrdma: ctxt still in use? (%d)\n", __svc_rdma_free()
1321 struct svc_rdma_op_ctxt *ctxt; svc_rdma_send_error() local
1332 ctxt = svc_rdma_get_context(xprt); svc_rdma_send_error()
1333 ctxt->direction = DMA_FROM_DEVICE; svc_rdma_send_error()
1334 ctxt->count = 1; svc_rdma_send_error()
1335 ctxt->pages[0] = p; svc_rdma_send_error()
1338 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device, svc_rdma_send_error()
1340 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { svc_rdma_send_error()
1342 svc_rdma_put_context(ctxt, 1); svc_rdma_send_error()
1346 ctxt->sge[0].lkey = xprt->sc_dma_lkey; svc_rdma_send_error()
1347 ctxt->sge[0].length = length; svc_rdma_send_error()
1351 ctxt->wr_op = IB_WR_SEND; svc_rdma_send_error()
1352 err_wr.wr_id = (unsigned long)ctxt; svc_rdma_send_error()
1353 err_wr.sg_list = ctxt->sge; svc_rdma_send_error()
1363 svc_rdma_unmap_dma(ctxt); svc_rdma_send_error()
1364 svc_rdma_put_context(ctxt, 1); svc_rdma_send_error()
334 process_context(struct svcxprt_rdma *xprt, struct svc_rdma_op_ctxt *ctxt) process_context() argument
H A Dsvc_rdma_sendto.c154 struct svc_rdma_op_ctxt *ctxt; send_write() local
166 ctxt = svc_rdma_get_context(xprt); send_write()
167 ctxt->direction = DMA_TO_DEVICE; send_write()
168 sge = ctxt->sge; send_write()
196 ctxt->count++; send_write()
211 ctxt->wr_op = IB_WR_RDMA_WRITE; send_write()
212 write_wr.wr_id = (unsigned long)ctxt; send_write()
226 svc_rdma_unmap_dma(ctxt); send_write()
227 svc_rdma_put_context(ctxt, 0); send_write()
380 struct svc_rdma_op_ctxt *ctxt, send_reply()
399 svc_rdma_put_context(ctxt, 0); send_reply()
404 ctxt->pages[0] = page; send_reply()
405 ctxt->count = 1; send_reply()
408 ctxt->sge[0].lkey = rdma->sc_dma_lkey; send_reply()
409 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp); send_reply()
410 ctxt->sge[0].addr = send_reply()
412 ctxt->sge[0].length, DMA_TO_DEVICE); send_reply()
413 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) send_reply()
417 ctxt->direction = DMA_TO_DEVICE; send_reply()
424 ctxt->sge[sge_no].addr = send_reply()
429 ctxt->sge[sge_no].addr)) send_reply()
432 ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey; send_reply()
433 ctxt->sge[sge_no].length = sge_bytes; send_reply()
440 /* Save all respages in the ctxt and remove them from the send_reply()
446 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no]; send_reply()
447 ctxt->count++; send_reply()
455 ctxt->sge[page_no+1].length = 0; send_reply()
463 if (sge_no > ctxt->count) send_reply()
471 ctxt->wr_op = IB_WR_SEND; send_reply()
472 send_wr.wr_id = (unsigned long)ctxt; send_reply()
473 send_wr.sg_list = ctxt->sge; send_reply()
485 svc_rdma_unmap_dma(ctxt); send_reply()
486 svc_rdma_put_context(ctxt, 1); send_reply()
506 struct svc_rdma_op_ctxt *ctxt; svc_rdma_sendto() local
517 ctxt = svc_rdma_get_context(rdma); svc_rdma_sendto()
518 ctxt->direction = DMA_TO_DEVICE; svc_rdma_sendto()
556 ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec, svc_rdma_sendto()
566 svc_rdma_put_context(ctxt, 0); svc_rdma_sendto()
376 send_reply(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, struct page *page, struct rpcrdma_msg *rdma_resp, struct svc_rdma_op_ctxt *ctxt, struct svc_rdma_req_map *vec, int byte_count) send_reply() argument
H A Dsvc_rdma.c284 printk(KERN_INFO "Could not allocate WR ctxt cache.\n"); svc_rdma_init()
/linux-4.1.27/fs/nilfs2/
H A Dbtnode.c171 struct nilfs_btnode_chkey_ctxt *ctxt) nilfs_btnode_prepare_change_key()
175 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; nilfs_btnode_prepare_change_key()
181 obh = ctxt->bh; nilfs_btnode_prepare_change_key()
182 ctxt->newbh = NULL; nilfs_btnode_prepare_change_key()
228 ctxt->newbh = nbh; nilfs_btnode_prepare_change_key()
241 struct nilfs_btnode_chkey_ctxt *ctxt) nilfs_btnode_commit_change_key()
243 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; nilfs_btnode_commit_change_key()
244 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; nilfs_btnode_commit_change_key()
272 ctxt->bh = nbh; nilfs_btnode_commit_change_key()
282 struct nilfs_btnode_chkey_ctxt *ctxt) nilfs_btnode_abort_change_key()
284 struct buffer_head *nbh = ctxt->newbh; nilfs_btnode_abort_change_key()
285 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; nilfs_btnode_abort_change_key()
294 unlock_page(ctxt->bh->b_page); nilfs_btnode_abort_change_key()
170 nilfs_btnode_prepare_change_key(struct address_space *btnc, struct nilfs_btnode_chkey_ctxt *ctxt) nilfs_btnode_prepare_change_key() argument
240 nilfs_btnode_commit_change_key(struct address_space *btnc, struct nilfs_btnode_chkey_ctxt *ctxt) nilfs_btnode_commit_change_key() argument
281 nilfs_btnode_abort_change_key(struct address_space *btnc, struct nilfs_btnode_chkey_ctxt *ctxt) nilfs_btnode_abort_change_key() argument
/linux-4.1.27/drivers/staging/lustre/lustre/include/
H A Dlustre_log.h132 int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt,
136 int llog_is_empty(const struct lu_env *env, struct llog_ctxt *ctxt,
139 struct llog_ctxt *ctxt, struct llog_ctxt *bak_ctxt,
203 int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt);
205 int llog_sync(struct llog_ctxt *ctxt, struct obd_export *exp, int flags);
206 int llog_cancel(const struct lu_env *env, struct llog_ctxt *ctxt,
210 int llog_initiator_connect(struct llog_ctxt *ctxt);
225 int (*lop_sync)(struct llog_ctxt *ctxt, struct obd_export *exp,
227 int (*lop_cleanup)(const struct lu_env *env, struct llog_ctxt *ctxt);
228 int (*lop_cancel)(const struct lu_env *env, struct llog_ctxt *ctxt,
230 int (*lop_connect)(struct llog_ctxt *ctxt, struct llog_logid *logid,
310 int loc_idx; /* my index the obd array of ctxt's */
312 struct obd_llog_group *loc_olg; /* group containing that ctxt */
327 static inline int llog_obd2ops(struct llog_ctxt *ctxt, llog_obd2ops() argument
330 if (ctxt == NULL) llog_obd2ops()
333 *lop = ctxt->loc_logops; llog_obd2ops()
362 static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt) llog_ctxt_get() argument
364 atomic_inc(&ctxt->loc_refcount); llog_ctxt_get()
365 CDEBUG(D_INFO, "GETting ctxt %p : new refcount %d\n", ctxt, llog_ctxt_get()
366 atomic_read(&ctxt->loc_refcount)); llog_ctxt_get()
367 return ctxt; llog_ctxt_get()
370 static inline void llog_ctxt_put(struct llog_ctxt *ctxt) llog_ctxt_put() argument
372 if (ctxt == NULL) llog_ctxt_put()
374 LASSERT_ATOMIC_GT_LT(&ctxt->loc_refcount, 0, LI_POISON); llog_ctxt_put()
375 CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt, llog_ctxt_put()
376 atomic_read(&ctxt->loc_refcount) - 1); llog_ctxt_put()
377 __llog_ctxt_put(NULL, ctxt); llog_ctxt_put()
389 struct llog_ctxt *ctxt, int index) llog_group_set_ctxt()
398 olg->olg_ctxts[index] = ctxt; llog_group_set_ctxt()
406 struct llog_ctxt *ctxt; llog_group_get_ctxt() local
412 ctxt = NULL; llog_group_get_ctxt()
414 ctxt = llog_ctxt_get(olg->olg_ctxts[index]); llog_group_get_ctxt()
416 return ctxt; llog_group_get_ctxt()
495 static inline int llog_connect(struct llog_ctxt *ctxt, llog_connect() argument
502 rc = llog_obd2ops(ctxt, &lop); llog_connect()
508 rc = lop->lop_connect(ctxt, logid, gen, uuid); llog_connect()
534 int llog_open_create(const struct lu_env *env, struct llog_ctxt *ctxt,
537 int llog_erase(const struct lu_env *env, struct llog_ctxt *ctxt,
388 llog_group_set_ctxt(struct obd_llog_group *olg, struct llog_ctxt *ctxt, int index) llog_group_set_ctxt() argument
H A Dobd_class.h168 int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
170 int class_config_dump_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
332 #define CTXTP(ctxt, op) (ctxt)->loc_logops->lop_##op
492 #define CTXT_CHECK_OP(ctxt, op, err) \
494 if (!OBT(ctxt->loc_obd) || !CTXTP((ctxt), op)) { \
497 ctxt->loc_obd->obd_minor); \
/linux-4.1.27/drivers/net/wireless/iwlwifi/mvm/
H A Dphy-ctxt.c127 static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, iwl_mvm_phy_ctxt_cmd_hdr() argument
133 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, iwl_mvm_phy_ctxt_cmd_hdr()
134 ctxt->color)); iwl_mvm_phy_ctxt_cmd_hdr()
193 struct iwl_mvm_phy_ctxt *ctxt, iwl_mvm_phy_ctxt_apply()
202 iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time); iwl_mvm_phy_ctxt_apply()
212 IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret); iwl_mvm_phy_ctxt_apply()
219 int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, iwl_mvm_phy_ctxt_add() argument
224 ctxt->ref); iwl_mvm_phy_ctxt_add()
227 ctxt->channel = chandef->chan; iwl_mvm_phy_ctxt_add()
229 return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, iwl_mvm_phy_ctxt_add()
238 void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) iwl_mvm_phy_ctxt_ref() argument
241 ctxt->ref++; iwl_mvm_phy_ctxt_ref()
249 int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, iwl_mvm_phy_ctxt_changed() argument
255 ctxt->channel = chandef->chan; iwl_mvm_phy_ctxt_changed()
256 return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, iwl_mvm_phy_ctxt_changed()
261 void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) iwl_mvm_phy_ctxt_unref() argument
265 if (WARN_ON_ONCE(!ctxt)) iwl_mvm_phy_ctxt_unref()
268 ctxt->ref--; iwl_mvm_phy_ctxt_unref()
192 iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic, u32 action, u32 apply_time) iwl_mvm_phy_ctxt_apply() argument
H A Dmvm.h1111 int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
1114 int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
1118 struct iwl_mvm_phy_ctxt *ctxt);
1120 struct iwl_mvm_phy_ctxt *ctxt);
/linux-4.1.27/arch/x86/xen/
H A Dsmp.c360 struct vcpu_guest_context *ctxt; cpu_initialize_context() local
369 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); cpu_initialize_context()
370 if (ctxt == NULL) cpu_initialize_context()
377 ctxt->user_regs.fs = __KERNEL_PERCPU; cpu_initialize_context()
378 ctxt->user_regs.gs = __KERNEL_STACK_CANARY; cpu_initialize_context()
380 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); cpu_initialize_context()
383 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; cpu_initialize_context()
384 ctxt->flags = VGCF_IN_KERNEL; cpu_initialize_context()
385 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ cpu_initialize_context()
386 ctxt->user_regs.ds = __USER_DS; cpu_initialize_context()
387 ctxt->user_regs.es = __USER_DS; cpu_initialize_context()
388 ctxt->user_regs.ss = __KERNEL_DS; cpu_initialize_context()
390 xen_copy_trap_info(ctxt->trap_ctxt); cpu_initialize_context()
392 ctxt->ldt_ents = 0; cpu_initialize_context()
400 ctxt->gdt_frames[0] = gdt_mfn; cpu_initialize_context()
401 ctxt->gdt_ents = GDT_ENTRIES; cpu_initialize_context()
403 ctxt->kernel_ss = __KERNEL_DS; cpu_initialize_context()
404 ctxt->kernel_sp = idle->thread.sp0; cpu_initialize_context()
407 ctxt->event_callback_cs = __KERNEL_CS; cpu_initialize_context()
408 ctxt->failsafe_callback_cs = __KERNEL_CS; cpu_initialize_context()
410 ctxt->gs_base_kernel = per_cpu_offset(cpu); cpu_initialize_context()
412 ctxt->event_callback_eip = cpu_initialize_context()
414 ctxt->failsafe_callback_eip = cpu_initialize_context()
416 ctxt->user_regs.cs = __KERNEL_CS; cpu_initialize_context()
426 ctxt->user_regs.eip = (unsigned long)xen_pvh_early_cpu_init; cpu_initialize_context()
427 ctxt->user_regs.rdi = cpu; cpu_initialize_context()
428 ctxt->user_regs.rsi = true; /* entry == true */ cpu_initialize_context()
431 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); cpu_initialize_context()
432 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); cpu_initialize_context()
433 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) cpu_initialize_context()
436 kfree(ctxt); cpu_initialize_context()
/linux-4.1.27/arch/x86/kernel/cpu/mtrr/
H A Dmtrr.h48 void set_mtrr_done(struct set_mtrr_context *ctxt);
49 void set_mtrr_cache_disable(struct set_mtrr_context *ctxt);
50 void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
/linux-4.1.27/drivers/scsi/be2iscsi/
H A Dbe_cmds.c953 void *ctxt = &req->context; beiscsi_cmd_cq_create() local
967 ctxt, coalesce_wm); beiscsi_cmd_cq_create()
968 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); beiscsi_cmd_cq_create()
969 AMAP_SET_BITS(struct amap_cq_context, count, ctxt, beiscsi_cmd_cq_create()
971 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); beiscsi_cmd_cq_create()
972 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); beiscsi_cmd_cq_create()
973 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); beiscsi_cmd_cq_create()
974 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); beiscsi_cmd_cq_create()
975 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); beiscsi_cmd_cq_create()
976 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, beiscsi_cmd_cq_create()
982 ctxt, coalesce_wm); beiscsi_cmd_cq_create()
984 ctxt, no_delay); beiscsi_cmd_cq_create()
985 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, beiscsi_cmd_cq_create()
987 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); beiscsi_cmd_cq_create()
988 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); beiscsi_cmd_cq_create()
989 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); beiscsi_cmd_cq_create()
990 AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1); beiscsi_cmd_cq_create()
993 be_dws_cpu_to_le(ctxt, sizeof(req->context)); beiscsi_cmd_cq_create()
1027 void *ctxt; beiscsi_cmd_mccq_create() local
1035 ctxt = &req->context; beiscsi_cmd_mccq_create()
1044 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, beiscsi_cmd_mccq_create()
1046 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); beiscsi_cmd_mccq_create()
1047 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, beiscsi_cmd_mccq_create()
1049 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); beiscsi_cmd_mccq_create()
1051 be_dws_cpu_to_le(ctxt, sizeof(req->context)); beiscsi_cmd_mccq_create()
1151 void *ctxt = &req->context; be_cmd_create_default_pdu_queue() local
1171 rx_pdid, ctxt, 0); be_cmd_create_default_pdu_queue()
1173 rx_pdid_valid, ctxt, 1); be_cmd_create_default_pdu_queue()
1175 pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn)); be_cmd_create_default_pdu_queue()
1177 ring_size, ctxt, be_cmd_create_default_pdu_queue()
1181 default_buffer_size, ctxt, entry_size); be_cmd_create_default_pdu_queue()
1183 cq_id_recv, ctxt, cq->id); be_cmd_create_default_pdu_queue()
1186 rx_pdid, ctxt, 0); be_cmd_create_default_pdu_queue()
1188 rx_pdid_valid, ctxt, 1); be_cmd_create_default_pdu_queue()
1190 ring_size, ctxt, be_cmd_create_default_pdu_queue()
1194 default_buffer_size, ctxt, entry_size); be_cmd_create_default_pdu_queue()
1196 cq_id_recv, ctxt, cq->id); be_cmd_create_default_pdu_queue()
1199 be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_create_default_pdu_queue()
/linux-4.1.27/tools/perf/scripts/python/
H A Dfutex-contention.py24 def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
34 def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
/linux-4.1.27/fs/ocfs2/
H A Dxattr.c274 struct ocfs2_xattr_set_ctxt *ctxt);
279 struct ocfs2_xattr_set_ctxt *ctxt);
716 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_extend_allocation()
719 handle_t *handle = ctxt->handle; ocfs2_xattr_extend_allocation()
742 ctxt->data_ac, ocfs2_xattr_extend_allocation()
743 ctxt->meta_ac, ocfs2_xattr_extend_allocation()
781 struct ocfs2_xattr_set_ctxt *ctxt) __ocfs2_remove_xattr_range()
785 handle_t *handle = ctxt->handle; __ocfs2_remove_xattr_range()
797 ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac, __ocfs2_remove_xattr_range()
798 &ctxt->dealloc); __ocfs2_remove_xattr_range()
811 len, ctxt->meta_ac, &ctxt->dealloc, 1); __ocfs2_remove_xattr_range()
813 ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc, __ocfs2_remove_xattr_range()
826 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_shrink_size()
852 ext_flags, ctxt); ocfs2_xattr_shrink_size()
872 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_value_truncate()
884 vb, ctxt); ocfs2_xattr_value_truncate()
888 vb, ctxt); ocfs2_xattr_value_truncate()
1895 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xa_value_truncate()
1902 ctxt); ocfs2_xa_value_truncate()
1914 access_rc = ocfs2_xa_journal_access(ctxt->handle, loc, ocfs2_xa_value_truncate()
2005 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xa_remove()
2012 rc = ocfs2_xa_value_truncate(loc, 0, ctxt); ocfs2_xa_remove()
2053 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xa_reuse_entry()
2075 rc = ocfs2_xa_value_truncate(loc, 0, ctxt); ocfs2_xa_reuse_entry()
2085 ctxt); ocfs2_xa_reuse_entry()
2115 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xa_prepare_entry()
2128 rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); ocfs2_xa_prepare_entry()
2136 rc = ocfs2_xa_value_truncate(loc, 0, ctxt); ocfs2_xa_prepare_entry()
2160 rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt); ocfs2_xa_prepare_entry()
2162 ctxt->set_abort = 1; ocfs2_xa_prepare_entry()
2190 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xa_store_value()
2202 ctxt->handle, &vb, ocfs2_xa_store_value()
2213 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xa_set()
2219 ret = ocfs2_xa_journal_access(ctxt->handle, loc, ocfs2_xa_set()
2234 ret = ocfs2_xa_remove(loc, ctxt); ocfs2_xa_set()
2238 ret = ocfs2_xa_prepare_entry(loc, xi, name_hash, ctxt); ocfs2_xa_set()
2245 ret = ocfs2_xa_store_value(loc, xi, ctxt); ocfs2_xa_set()
2250 ocfs2_xa_journal_dirty(ctxt->handle, loc); ocfs2_xa_set()
2359 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, }; ocfs2_remove_value_outside() local
2362 ocfs2_init_dealloc_ctxt(&ctxt.dealloc); ocfs2_remove_value_outside()
2377 &ctxt.meta_ac, ocfs2_remove_value_outside()
2380 ctxt.handle = ocfs2_start_trans(osb, ref_credits + ocfs2_remove_value_outside()
2382 if (IS_ERR(ctxt.handle)) { ocfs2_remove_value_outside()
2383 ret = PTR_ERR(ctxt.handle); ocfs2_remove_value_outside()
2388 ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt); ocfs2_remove_value_outside()
2390 ocfs2_commit_trans(osb, ctxt.handle); ocfs2_remove_value_outside()
2391 if (ctxt.meta_ac) { ocfs2_remove_value_outside()
2392 ocfs2_free_alloc_context(ctxt.meta_ac); ocfs2_remove_value_outside()
2393 ctxt.meta_ac = NULL; ocfs2_remove_value_outside()
2403 if (ctxt.meta_ac) ocfs2_remove_value_outside()
2404 ocfs2_free_alloc_context(ctxt.meta_ac); ocfs2_remove_value_outside()
2406 ocfs2_run_deallocs(osb, &ctxt.dealloc); ocfs2_remove_value_outside()
2707 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_ibody_init()
2720 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), di_bh, ocfs2_xattr_ibody_init()
2746 ocfs2_journal_dirty(ctxt->handle, di_bh); ocfs2_xattr_ibody_init()
2761 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_ibody_set()
2772 ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt); ocfs2_xattr_ibody_set()
2782 ret = ocfs2_xa_set(&loc, xi, ctxt); ocfs2_xattr_ibody_set()
2851 struct ocfs2_xattr_set_ctxt *ctxt, ocfs2_create_xattr_block()
2863 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), ocfs2_create_xattr_block()
2870 ret = ocfs2_claim_metadata(ctxt->handle, ctxt->meta_ac, 1, ocfs2_create_xattr_block()
2887 ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode), ocfs2_create_xattr_block()
2899 xblk->xb_suballoc_slot = cpu_to_le16(ctxt->meta_ac->ac_alloc_slot); ocfs2_create_xattr_block()
2915 ocfs2_journal_dirty(ctxt->handle, new_bh); ocfs2_create_xattr_block()
2925 ocfs2_journal_dirty(ctxt->handle, inode_bh); ocfs2_create_xattr_block()
2944 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_block_set()
2952 ret = ocfs2_create_xattr_block(inode, xs->inode_bh, ctxt, ocfs2_xattr_block_set()
2972 ret = ocfs2_xa_set(&loc, xi, ctxt); ocfs2_xattr_block_set()
2975 else if ((ret != -ENOSPC) || ctxt->set_abort) ocfs2_xattr_block_set()
2978 ret = ocfs2_xattr_create_index_block(inode, xs, ctxt); ocfs2_xattr_block_set()
2985 ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt); ocfs2_xattr_block_set()
3241 struct ocfs2_xattr_set_ctxt *ctxt, ocfs2_init_xattr_set_ctxt()
3248 memset(ctxt, 0, sizeof(struct ocfs2_xattr_set_ctxt)); ocfs2_init_xattr_set_ctxt()
3250 ocfs2_init_dealloc_ctxt(&ctxt->dealloc); ocfs2_init_xattr_set_ctxt()
3265 &ctxt->meta_ac); ocfs2_init_xattr_set_ctxt()
3273 ret = ocfs2_reserve_clusters(osb, clusters_add, &ctxt->data_ac); ocfs2_init_xattr_set_ctxt()
3279 if (ctxt->meta_ac) { ocfs2_init_xattr_set_ctxt()
3280 ocfs2_free_alloc_context(ctxt->meta_ac); ocfs2_init_xattr_set_ctxt()
3281 ctxt->meta_ac = NULL; ocfs2_init_xattr_set_ctxt()
3285 * We cannot have an error and a non null ctxt->data_ac. ocfs2_init_xattr_set_ctxt()
3297 struct ocfs2_xattr_set_ctxt *ctxt) __ocfs2_xattr_set_handle()
3304 ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt); __ocfs2_xattr_set_handle()
3306 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt); __ocfs2_xattr_set_handle()
3309 ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt); __ocfs2_xattr_set_handle()
3334 ret = ocfs2_extend_trans(ctxt->handle, credits); __ocfs2_xattr_set_handle()
3339 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt); __ocfs2_xattr_set_handle()
3340 } else if ((ret == -ENOSPC) && !ctxt->set_abort) { __ocfs2_xattr_set_handle()
3364 ret = ocfs2_extend_trans(ctxt->handle, credits); __ocfs2_xattr_set_handle()
3374 ret = ocfs2_xattr_block_set(inode, xi, xbs, ctxt); __ocfs2_xattr_set_handle()
3398 ret = ocfs2_extend_trans(ctxt->handle, credits); __ocfs2_xattr_set_handle()
3404 xis, ctxt); __ocfs2_xattr_set_handle()
3411 ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), __ocfs2_xattr_set_handle()
3422 ocfs2_journal_dirty(ctxt->handle, xis->inode_bh); __ocfs2_xattr_set_handle()
3463 struct ocfs2_xattr_set_ctxt ctxt = { ocfs2_xattr_set_handle() local
3499 ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt); ocfs2_xattr_set_handle()
3528 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, }; ocfs2_xattr_set() local
3620 &xbs, &ctxt, ref_meta, &credits); ocfs2_xattr_set()
3628 ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits); ocfs2_xattr_set()
3629 if (IS_ERR(ctxt.handle)) { ocfs2_xattr_set()
3630 ret = PTR_ERR(ctxt.handle); ocfs2_xattr_set()
3635 ret = __ocfs2_xattr_set_handle(inode, di, &xi, &xis, &xbs, &ctxt); ocfs2_xattr_set()
3636 ocfs2_update_inode_fsync_trans(ctxt.handle, inode, 0); ocfs2_xattr_set()
3638 ocfs2_commit_trans(osb, ctxt.handle); ocfs2_xattr_set()
3641 if (ctxt.data_ac) ocfs2_xattr_set()
3642 ocfs2_free_alloc_context(ctxt.data_ac); ocfs2_xattr_set()
3643 if (ctxt.meta_ac) ocfs2_xattr_set()
3644 ocfs2_free_alloc_context(ctxt.meta_ac); ocfs2_xattr_set()
3645 if (ocfs2_dealloc_has_cluster(&ctxt.dealloc)) ocfs2_xattr_set()
3647 ocfs2_run_deallocs(osb, &ctxt.dealloc); ocfs2_xattr_set()
4268 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_create_index_block()
4273 handle_t *handle = ctxt->handle; ocfs2_xattr_create_index_block()
4301 ret = __ocfs2_claim_clusters(handle, ctxt->data_ac, ocfs2_xattr_create_index_block()
5096 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_add_new_xattr_cluster()
5103 handle_t *handle = ctxt->handle; ocfs2_add_new_xattr_cluster()
5121 ret = __ocfs2_claim_clusters(handle, ctxt->data_ac, 1, ocfs2_add_new_xattr_cluster()
5166 num_bits, 0, ctxt->meta_ac); ocfs2_add_new_xattr_cluster()
5266 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_add_new_xattr_bucket()
5318 ctxt); ocfs2_add_new_xattr_bucket()
5327 ctxt->handle, ocfs2_add_new_xattr_bucket()
5352 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_bucket_value_truncate()
5390 ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt); ocfs2_xattr_bucket_value_truncate()
5396 ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket, ocfs2_xattr_bucket_value_truncate()
5405 ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket); ocfs2_xattr_bucket_value_truncate()
5541 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_set_entry_bucket()
5550 ret = ocfs2_xa_set(&loc, xi, ctxt); ocfs2_xattr_set_entry_bucket()
5561 ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle, ocfs2_xattr_set_entry_bucket()
5568 ret = ocfs2_xa_set(&loc, xi, ctxt); ocfs2_xattr_set_entry_bucket()
5584 struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_set_entry_index_block()
5590 ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt); ocfs2_xattr_set_entry_index_block()
5617 ctxt); ocfs2_xattr_set_entry_index_block()
5639 ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt); ocfs2_xattr_set_entry_index_block()
5656 struct ocfs2_xattr_set_ctxt ctxt = {NULL, NULL,}; ocfs2_delete_xattr_in_bucket() local
5663 ocfs2_init_dealloc_ctxt(&ctxt.dealloc); ocfs2_delete_xattr_in_bucket()
5680 &ctxt.meta_ac, ocfs2_delete_xattr_in_bucket()
5683 ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits); ocfs2_delete_xattr_in_bucket()
5684 if (IS_ERR(ctxt.handle)) { ocfs2_delete_xattr_in_bucket()
5685 ret = PTR_ERR(ctxt.handle); ocfs2_delete_xattr_in_bucket()
5691 i, 0, &ctxt); ocfs2_delete_xattr_in_bucket()
5693 ocfs2_commit_trans(osb, ctxt.handle); ocfs2_delete_xattr_in_bucket()
5694 if (ctxt.meta_ac) { ocfs2_delete_xattr_in_bucket()
5695 ocfs2_free_alloc_context(ctxt.meta_ac); ocfs2_delete_xattr_in_bucket()
5696 ctxt.meta_ac = NULL; ocfs2_delete_xattr_in_bucket()
5704 if (ctxt.meta_ac) ocfs2_delete_xattr_in_bucket()
5705 ocfs2_free_alloc_context(ctxt.meta_ac); ocfs2_delete_xattr_in_bucket()
5707 ocfs2_run_deallocs(osb, &ctxt.dealloc); ocfs2_delete_xattr_in_bucket()
6547 struct ocfs2_xattr_set_ctxt ctxt; ocfs2_create_empty_xattr_block() local
6549 memset(&ctxt, 0, sizeof(ctxt)); ocfs2_create_empty_xattr_block()
6550 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &ctxt.meta_ac); ocfs2_create_empty_xattr_block()
6556 ctxt.handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS); ocfs2_create_empty_xattr_block()
6557 if (IS_ERR(ctxt.handle)) { ocfs2_create_empty_xattr_block()
6558 ret = PTR_ERR(ctxt.handle); ocfs2_create_empty_xattr_block()
6565 ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed, ocfs2_create_empty_xattr_block()
6570 ocfs2_commit_trans(osb, ctxt.handle); ocfs2_create_empty_xattr_block()
6572 ocfs2_free_alloc_context(ctxt.meta_ac); ocfs2_create_empty_xattr_block()
713 ocfs2_xattr_extend_allocation(struct inode *inode, u32 clusters_to_add, struct ocfs2_xattr_value_buf *vb, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_extend_allocation() argument
777 __ocfs2_remove_xattr_range(struct inode *inode, struct ocfs2_xattr_value_buf *vb, u32 cpos, u32 phys_cpos, u32 len, unsigned int ext_flags, struct ocfs2_xattr_set_ctxt *ctxt) __ocfs2_remove_xattr_range() argument
822 ocfs2_xattr_shrink_size(struct inode *inode, u32 old_clusters, u32 new_clusters, struct ocfs2_xattr_value_buf *vb, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_shrink_size() argument
869 ocfs2_xattr_value_truncate(struct inode *inode, struct ocfs2_xattr_value_buf *vb, int len, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_value_truncate() argument
1894 ocfs2_xa_value_truncate(struct ocfs2_xa_loc *loc, u64 bytes, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xa_value_truncate() argument
2004 ocfs2_xa_remove(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xa_remove() argument
2051 ocfs2_xa_reuse_entry(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xa_reuse_entry() argument
2112 ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi, u32 name_hash, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xa_prepare_entry() argument
2188 ocfs2_xa_store_value(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xa_store_value() argument
2211 ocfs2_xa_set(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xa_set() argument
2705 ocfs2_xattr_ibody_init(struct inode *inode, struct buffer_head *di_bh, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_ibody_init() argument
2758 ocfs2_xattr_ibody_set(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_ibody_set() argument
2849 ocfs2_create_xattr_block(struct inode *inode, struct buffer_head *inode_bh, struct ocfs2_xattr_set_ctxt *ctxt, int indexed, struct buffer_head **ret_bh) ocfs2_create_xattr_block() argument
2941 ocfs2_xattr_block_set(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_block_set() argument
3236 ocfs2_init_xattr_set_ctxt(struct inode *inode, struct ocfs2_dinode *di, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xis, struct ocfs2_xattr_search *xbs, struct ocfs2_xattr_set_ctxt *ctxt, int extra_meta, int *credits) ocfs2_init_xattr_set_ctxt() argument
3292 __ocfs2_xattr_set_handle(struct inode *inode, struct ocfs2_dinode *di, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xis, struct ocfs2_xattr_search *xbs, struct ocfs2_xattr_set_ctxt *ctxt) __ocfs2_xattr_set_handle() argument
4266 ocfs2_xattr_create_index_block(struct inode *inode, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_create_index_block() argument
5089 ocfs2_add_new_xattr_cluster(struct inode *inode, struct buffer_head *root_bh, struct ocfs2_xattr_bucket *first, struct ocfs2_xattr_bucket *target, u32 *num_clusters, u32 prev_cpos, int *extend, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_add_new_xattr_cluster() argument
5263 ocfs2_add_new_xattr_bucket(struct inode *inode, struct buffer_head *xb_bh, struct ocfs2_xattr_bucket *target, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_add_new_xattr_bucket() argument
5348 ocfs2_xattr_bucket_value_truncate(struct inode *inode, struct ocfs2_xattr_bucket *bucket, int xe_off, int len, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_bucket_value_truncate() argument
5538 ocfs2_xattr_set_entry_bucket(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_set_entry_bucket() argument
5581 ocfs2_xattr_set_entry_index_block(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_set_entry_index_block() argument
H A Dalloc.h210 int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
212 int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
220 struct ocfs2_cached_dealloc_ctxt *ctxt);
H A Dalloc.c569 static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
3659 struct ocfs2_merge_ctxt *ctxt) ocfs2_try_to_merge_extent()
3665 BUG_ON(ctxt->c_contig_type == CONTIG_NONE); ocfs2_try_to_merge_extent()
3667 if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) { ocfs2_try_to_merge_extent()
3684 if (ctxt->c_contig_type == CONTIG_LEFTRIGHT) { ocfs2_try_to_merge_extent()
3688 BUG_ON(!ctxt->c_split_covers_rec); ocfs2_try_to_merge_extent()
3753 if (ctxt->c_contig_type == CONTIG_RIGHT) { ocfs2_try_to_merge_extent()
3771 if (ctxt->c_split_covers_rec) { ocfs2_try_to_merge_extent()
5031 struct ocfs2_merge_ctxt ctxt; ocfs2_split_extent() local
5042 ctxt.c_contig_type = ocfs2_figure_merge_contig_type(et, path, el, ocfs2_split_extent()
5069 ctxt.c_split_covers_rec = 1; ocfs2_split_extent()
5071 ctxt.c_split_covers_rec = 0; ocfs2_split_extent()
5073 ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]); ocfs2_split_extent()
5075 trace_ocfs2_split_extent(split_index, ctxt.c_contig_type, ocfs2_split_extent()
5076 ctxt.c_has_empty_extent, ocfs2_split_extent()
5077 ctxt.c_split_covers_rec); ocfs2_split_extent()
5079 if (ctxt.c_contig_type == CONTIG_NONE) { ocfs2_split_extent()
5080 if (ctxt.c_split_covers_rec) ocfs2_split_extent()
5092 dealloc, &ctxt); ocfs2_split_extent()
6394 int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, ocfs2_cache_cluster_dealloc() argument
6411 item->free_next = ctxt->c_global_allocator; ocfs2_cache_cluster_dealloc()
6413 ctxt->c_global_allocator = item; ocfs2_cache_cluster_dealloc()
6470 struct ocfs2_cached_dealloc_ctxt *ctxt) ocfs2_run_deallocs()
6475 if (!ctxt) ocfs2_run_deallocs()
6478 while (ctxt->c_first_suballocator) { ocfs2_run_deallocs()
6479 fl = ctxt->c_first_suballocator; ocfs2_run_deallocs()
6494 ctxt->c_first_suballocator = fl->f_next_suballocator; ocfs2_run_deallocs()
6498 if (ctxt->c_global_allocator) { ocfs2_run_deallocs()
6500 ctxt->c_global_allocator); ocfs2_run_deallocs()
6506 ctxt->c_global_allocator = NULL; ocfs2_run_deallocs()
6515 struct ocfs2_cached_dealloc_ctxt *ctxt) ocfs2_find_per_slot_free_list()
6517 struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator; ocfs2_find_per_slot_free_list()
6531 fl->f_next_suballocator = ctxt->c_first_suballocator; ocfs2_find_per_slot_free_list()
6533 ctxt->c_first_suballocator = fl; ocfs2_find_per_slot_free_list()
6538 int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, ocfs2_cache_block_dealloc() argument
6546 fl = ocfs2_find_per_slot_free_list(type, slot, ctxt); ocfs2_cache_block_dealloc()
6576 static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, ocfs2_cache_extent_block_free() argument
6579 return ocfs2_cache_block_dealloc(ctxt, EXTENT_ALLOC_SYSTEM_INODE, ocfs2_cache_extent_block_free()
3653 ocfs2_try_to_merge_extent(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_path *path, int split_index, struct ocfs2_extent_rec *split_rec, struct ocfs2_cached_dealloc_ctxt *dealloc, struct ocfs2_merge_ctxt *ctxt) ocfs2_try_to_merge_extent() argument
6469 ocfs2_run_deallocs(struct ocfs2_super *osb, struct ocfs2_cached_dealloc_ctxt *ctxt) ocfs2_run_deallocs() argument
6513 ocfs2_find_per_slot_free_list(int type, int slot, struct ocfs2_cached_dealloc_ctxt *ctxt) ocfs2_find_per_slot_free_list() argument
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_file_ops.c193 kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt; qib_get_base_info()
242 kinfo->spi_ctxt = rcd->ctxt; qib_get_base_info()
308 ctxttid = rcd->ctxt * dd->rcvtidcnt; qib_tid_update()
499 ctxttid = rcd->ctxt * dd->rcvtidcnt; qib_tid_free()
674 /* atomically clear receive enable ctxt. */ qib_manage_rcvq()
689 dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt); qib_manage_rcvq()
765 "%s ctxt%u mmap of %lx, %x bytes failed: %d\n", qib_mmap_mem()
766 what, rcd->ctxt, pfn, len, ret); qib_mmap_mem()
780 * for the user process to update the head registers for their ctxt mmap_ureg()
1046 ureg = dd->uregbase + dd->ureg_align * rcd->ctxt; qib_mmapf()
1048 /* ctxt is not shared */ qib_mmapf()
1139 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt); qib_poll_next()
1308 static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, setup_ctxt() argument
1324 rcd = qib_create_ctxtdata(ppd, ctxt, numa_id); setup_ctxt()
1347 init_waitqueue_head(&dd->rcd[ctxt]->wait); setup_ctxt()
1359 dd->rcd[ctxt] = NULL; setup_ctxt()
1382 int ret, ctxt; choose_port_ctxt() local
1391 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt]; choose_port_ctxt()
1392 ctxt++) choose_port_ctxt()
1394 if (ctxt == dd->cfgctxts) { choose_port_ctxt()
1399 u32 pidx = ctxt % dd->num_pports; choose_port_ctxt()
1410 ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN; choose_port_ctxt()
1434 u32 port = uinfo->spu_port, ctxt; get_a_ctxt() local
1465 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; get_a_ctxt()
1466 ctxt++) get_a_ctxt()
1467 if (dd->rcd[ctxt]) get_a_ctxt()
1519 /* Skip ctxt if it doesn't match the requested one */ find_shared_ctxt()
1596 rcd->ctxt, do_qib_user_sdma_queue_create()
1606 * Get ctxt early, so can set affinity prior to memory allocation.
1692 uctxt = rcd->ctxt - dd->first_user_ctxt; qib_do_user_init()
1711 "%u:ctxt%u: no 2KB buffers available\n", qib_do_user_init()
1712 dd->unit, rcd->ctxt); qib_do_user_init()
1718 rcd->ctxt, rcd->piocnt); qib_do_user_init()
1738 * array for time being. If rcd->ctxt > chip-supported, qib_do_user_init()
1740 * through ctxt 0, someday qib_do_user_init()
1755 * Now enable the ctxt for receive. qib_do_user_init()
1769 rcd->ctxt); qib_do_user_init()
1787 * @rcd: ctxt
1795 int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt; unlock_expected_tids()
1822 unsigned ctxt; qib_close() local
1863 ctxt = rcd->ctxt; qib_close()
1864 dd->rcd[ctxt] = NULL; qib_close()
1880 /* atomically clear receive enable ctxt and intr avail. */ qib_close()
1882 QIB_RCVCTRL_INTRAVAIL_DIS, ctxt); qib_close()
1884 /* clean up the pkeys for this ctxt user */ qib_close()
1919 info.ctxt = rcd->ctxt; qib_ctxt_info()
2007 unsigned ctxt; qib_set_uevent_bits() local
2012 for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts; qib_set_uevent_bits()
2013 ctxt++) { qib_set_uevent_bits()
2014 rcd = ppd->dd->rcd[ctxt]; qib_set_uevent_bits()
H A Dqib_init.c68 * buffers per ctxt, etc.) Zero means use chip value.
154 "Unable to allocate ctxtdata for Kernel ctxt, failing\n"); qib_create_ctxts()
168 struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt, qib_create_ctxtdata() argument
181 rcd->ctxt = ctxt; qib_create_ctxtdata()
182 dd->rcd[ctxt] = rcd; qib_create_ctxtdata()
184 if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ qib_create_ctxtdata()
190 "Unable to allocate per ctxt stats buffer\n"); qib_create_ctxtdata()
375 * ctxt data structure, so we only allocate memory for ctxts actually
376 * in use, since we at 8k per ctxt, now.
477 * ctxt == -1 means "all contexts". Only really safe for init_after_reset()
692 * Need to re-create rest of ctxt 0 ctxtdata as well. qib_init()
703 "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n"); qib_init()
1347 int ctxt; cleanup_device_data() local
1385 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { cleanup_device_data()
1386 int ctxt_tidbase = ctxt * dd->rcvtidcnt; cleanup_device_data()
1416 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) { cleanup_device_data()
1417 struct qib_ctxtdata *rcd = tmp[ctxt]; cleanup_device_data()
1419 tmp[ctxt] = NULL; /* debugging paranoia */ cleanup_device_data()
1603 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? qib_create_rcvhdrq()
1615 "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n", qib_create_rcvhdrq()
1616 amt, rcd->ctxt); qib_create_rcvhdrq()
1620 if (rcd->ctxt >= dd->first_user_ctxt) { qib_create_rcvhdrq()
1648 "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n", qib_create_rcvhdrq()
1649 rcd->ctxt); qib_create_rcvhdrq()
H A Dqib_tx.c135 unsigned ctxt; find_ctxt() local
139 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { find_ctxt()
140 rcd = dd->rcd[ctxt]; find_ctxt()
462 unsigned ctxt; qib_cancel_sends() local
474 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { qib_cancel_sends()
476 rcd = dd->rcd[ctxt]; qib_cancel_sends()
H A Dqib_iba6120.c222 void *dummy_hdrq; /* used after ctxt close */
300 * @ctxt: context number
307 enum qib_ureg regno, int ctxt) qib_read_ureg32()
315 dd->ureg_align * ctxt)); qib_read_ureg32()
320 dd->ureg_align * ctxt)); qib_read_ureg32()
328 * @ctxt: context
333 enum qib_ureg regno, u64 value, int ctxt) qib_write_ureg()
340 dd->ureg_align * ctxt); qib_write_ureg()
345 dd->ureg_align * ctxt); qib_write_ureg()
376 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
379 * @ctxt: the context containing the register
383 const u16 regno, unsigned ctxt, qib_write_kreg_ctxt()
386 qib_write_kreg(dd, regno + ctxt, value); qib_write_kreg_ctxt()
1905 * call can be done from interrupt level for the ctxt 0 eager TIDs, qib_6120_put_tid()
1973 * @ctxt: the context
1985 u32 ctxt; qib_6120_clear_tids() local
1991 ctxt = rcd->ctxt; qib_6120_clear_tids()
1997 ctxt * dd->rcvtidcnt * sizeof(*tidbase)); qib_6120_clear_tids()
2028 * specify 2KB or 4KB and/or make be per ctxt instead of per device qib_6120_tidtemplate()
2048 * @rcd: the qlogic_ib ctxt
2090 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); qib_update_6120_usrhead()
2092 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); qib_update_6120_usrhead()
2100 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); qib_6120_hdrqempty()
2104 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); qib_6120_hdrqempty()
2109 * Used when we close any ctxt, for DMA already in flight
2134 int ctxt) rcvctrl_6120_mod()
2150 if (ctxt < 0) rcvctrl_6120_mod()
2153 mask = (1ULL << ctxt); rcvctrl_6120_mod()
2155 /* always done for specific ctxt */ rcvctrl_6120_mod()
2160 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, rcvctrl_6120_mod()
2161 dd->rcd[ctxt]->rcvhdrqtailaddr_phys); rcvctrl_6120_mod()
2162 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, rcvctrl_6120_mod()
2163 dd->rcd[ctxt]->rcvhdrq_phys); rcvctrl_6120_mod()
2165 if (ctxt == 0 && !dd->cspec->dummy_hdrq) rcvctrl_6120_mod()
2177 val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) | rcvctrl_6120_mod()
2179 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); rcvctrl_6120_mod()
2188 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); rcvctrl_6120_mod()
2189 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); rcvctrl_6120_mod()
2191 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); rcvctrl_6120_mod()
2192 dd->rcd[ctxt]->head = val; rcvctrl_6120_mod()
2194 if (ctxt < dd->first_user_ctxt) rcvctrl_6120_mod()
2196 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); rcvctrl_6120_mod()
2204 * page 0, and by disabling the ctxt, it should stop "soon", rcvctrl_6120_mod()
2206 * disabled the ctxt. Only 6120 has this issue. rcvctrl_6120_mod()
2208 if (ctxt >= 0) { rcvctrl_6120_mod()
2209 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, rcvctrl_6120_mod()
2211 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, rcvctrl_6120_mod()
3451 rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt; qib_6120_init_ctxt()
306 qib_read_ureg32(const struct qib_devdata *dd, enum qib_ureg regno, int ctxt) qib_read_ureg32() argument
332 qib_write_ureg(const struct qib_devdata *dd, enum qib_ureg regno, u64 value, int ctxt) qib_write_ureg() argument
382 qib_write_kreg_ctxt(const struct qib_devdata *dd, const u16 regno, unsigned ctxt, u64 value) qib_write_kreg_ctxt() argument
2133 rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op, int ctxt) rcvctrl_6120_mod() argument
H A Dqib_iba7220.c223 * @ctxt: context number
230 enum qib_ureg regno, int ctxt) qib_read_ureg32()
238 dd->ureg_align * ctxt)); qib_read_ureg32()
243 dd->ureg_align * ctxt)); qib_read_ureg32()
251 * @ctxt: context
256 enum qib_ureg regno, u64 value, int ctxt) qib_write_ureg()
263 dd->ureg_align * ctxt); qib_write_ureg()
268 dd->ureg_align * ctxt); qib_write_ureg()
275 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
278 * @ctxt: the context containing the register
282 const u16 regno, unsigned ctxt, qib_write_kreg_ctxt()
285 qib_write_kreg(dd, regno + ctxt, value); qib_write_kreg_ctxt()
2212 * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
2214 * @ctxt: the ctxt
2216 * clear all TID entries for a ctxt, expected and eager.
2226 u32 ctxt; qib_7220_clear_tids() local
2232 ctxt = rcd->ctxt; qib_7220_clear_tids()
2238 ctxt * dd->rcvtidcnt * sizeof(*tidbase)); qib_7220_clear_tids()
2271 * @rcd: the qlogic_ib ctxt
2330 * affects number of eager TIDs per ctxt (1K, 2K, 4K). qib_7220_config_ctxts()
2736 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); qib_update_7220_usrhead()
2738 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); qib_update_7220_usrhead()
2746 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); qib_7220_hdrqempty()
2750 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); qib_7220_hdrqempty()
2762 int ctxt) rcvctrl_7220_mod()
2777 if (ctxt < 0) rcvctrl_7220_mod()
2780 mask = (1ULL << ctxt); rcvctrl_7220_mod()
2782 /* always done for specific ctxt */ rcvctrl_7220_mod()
2787 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, rcvctrl_7220_mod()
2788 dd->rcd[ctxt]->rcvhdrqtailaddr_phys); rcvctrl_7220_mod()
2789 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, rcvctrl_7220_mod()
2790 dd->rcd[ctxt]->rcvhdrq_phys); rcvctrl_7220_mod()
2791 dd->rcd[ctxt]->seq_cnt = 1; rcvctrl_7220_mod()
2802 val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) | rcvctrl_7220_mod()
2804 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); rcvctrl_7220_mod()
2813 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); rcvctrl_7220_mod()
2814 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); rcvctrl_7220_mod()
2816 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); rcvctrl_7220_mod()
2817 dd->rcd[ctxt]->head = val; rcvctrl_7220_mod()
2819 if (ctxt < dd->first_user_ctxt) rcvctrl_7220_mod()
2821 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); rcvctrl_7220_mod()
2824 if (ctxt >= 0) { rcvctrl_7220_mod()
2825 qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0); rcvctrl_7220_mod()
2826 qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0); rcvctrl_7220_mod()
4171 * want to update before we actually run out, at low pbufs/ctxt qib_init_7220_variables()
4408 if (!rcd->ctxt) { qib_7220_init_ctxt()
4414 (rcd->ctxt - 1) * rcd->rcvegrcnt; qib_7220_init_ctxt()
229 qib_read_ureg32(const struct qib_devdata *dd, enum qib_ureg regno, int ctxt) qib_read_ureg32() argument
255 qib_write_ureg(const struct qib_devdata *dd, enum qib_ureg regno, u64 value, int ctxt) qib_write_ureg() argument
281 qib_write_kreg_ctxt(const struct qib_devdata *dd, const u16 regno, unsigned ctxt, u64 value) qib_write_kreg_ctxt() argument
2761 rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op, int ctxt) rcvctrl_7220_mod() argument
H A Dqib_iba7322.c362 * those entries for ctxt 0/1 on driver load twice, for example).
769 * @ctxt: context number
776 enum qib_ureg regno, int ctxt) qib_read_ureg32()
781 (dd->ureg_align * ctxt) + (dd->userbase ? qib_read_ureg32()
790 * @ctxt: context number
797 enum qib_ureg regno, int ctxt) qib_read_ureg()
803 (dd->ureg_align * ctxt) + (dd->userbase ? qib_read_ureg()
813 * @ctxt: context
818 enum qib_ureg regno, u64 value, int ctxt) qib_write_ureg()
825 dd->ureg_align * ctxt); qib_write_ureg()
830 dd->ureg_align * ctxt); qib_write_ureg()
880 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
883 * @ctxt: the context containing the register
887 const u16 regno, unsigned ctxt, qib_write_kreg_ctxt()
890 qib_write_kreg(dd, regno + ctxt, value); qib_write_kreg_ctxt()
1304 * with ctxt-number appended
2712 if (cspec->rhdr_cpu[rcd->ctxt] != cpu) { qib_update_rhdrq_dca()
2715 cspec->rhdr_cpu[rcd->ctxt] = cpu; qib_update_rhdrq_dca()
2716 rmp = &dca_rcvhdr_reg_map[rcd->ctxt]; qib_update_rhdrq_dca()
2721 "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu, qib_update_rhdrq_dca()
3065 u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt]; adjust_rcv_timeout()
3078 dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout; adjust_rcv_timeout()
3079 qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout); adjust_rcv_timeout()
3201 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); qib_7322pintr()
3502 unsigned ctxt; qib_setup_7322_interrupt() local
3504 ctxt = i - ARRAY_SIZE(irq_table); qib_setup_7322_interrupt()
3506 arg = dd->rcd[ctxt]; qib_setup_7322_interrupt()
3509 if (qib_krcvq01_no_msi && ctxt < 2) qib_setup_7322_interrupt()
3514 lsb = QIB_I_RCVAVAIL_LSB + ctxt; qib_setup_7322_interrupt()
3849 * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3851 * @ctxt: the ctxt
3853 * clear all TID entries for a ctxt, expected and eager.
3861 u32 ctxt; qib_7322_clear_tids() local
3867 ctxt = rcd->ctxt; qib_7322_clear_tids()
3873 ctxt * dd->rcvtidcnt * sizeof(*tidbase)); qib_7322_clear_tids()
3915 * @rcd: the qlogic_ib ctxt
3984 * affects number of eager TIDs per ctxt (1K, 2K, 4K). qib_7322_config_ctxts()
4491 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); qib_update_7322_usrhead()
4493 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); qib_update_7322_usrhead()
4494 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); qib_update_7322_usrhead()
4502 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); qib_7322_hdrqempty()
4506 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt); qib_7322_hdrqempty()
4534 int ctxt) rcvctrl_7322_mod()
4555 if (ctxt < 0) { rcvctrl_7322_mod()
4559 mask = (1ULL << ctxt); rcvctrl_7322_mod()
4560 rcd = dd->rcd[ctxt]; rcvctrl_7322_mod()
4570 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, rcvctrl_7322_mod()
4572 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, rcvctrl_7322_mod()
4596 if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) { rcvctrl_7322_mod()
4603 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt); rcvctrl_7322_mod()
4604 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt); rcvctrl_7322_mod()
4608 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt); rcvctrl_7322_mod()
4609 dd->rcd[ctxt]->head = val; rcvctrl_7322_mod()
4611 if (ctxt < dd->first_user_ctxt) rcvctrl_7322_mod()
4613 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); rcvctrl_7322_mod()
4615 dd->rcd[ctxt] && dd->rhdrhead_intr_off) { rcvctrl_7322_mod()
4617 val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off; rcvctrl_7322_mod()
4618 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt); rcvctrl_7322_mod()
4624 if (ctxt >= 0) { rcvctrl_7322_mod()
4625 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0); rcvctrl_7322_mod()
4626 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0); rcvctrl_7322_mod()
4629 TIDFLOW_ERRBITS, ctxt); rcvctrl_7322_mod()
6374 unsigned ctxt; write_7322_initregs() local
6377 ctxt = (i % n) * dd->num_pports + pidx; write_7322_initregs()
6379 ctxt = (i % n) + 1; write_7322_initregs()
6381 ctxt = ppd->hw_pidx; write_7322_initregs()
6382 val |= ctxt << (5 * (i % 6)); write_7322_initregs()
6746 * want to update before we actually run out, at low pbufs/ctxt qib_init_7322_variables()
7072 if (rcd->ctxt < NUM_IB_PORTS) { qib_7322_init_ctxt()
7075 rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0; qib_7322_init_ctxt()
7083 (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt; qib_7322_init_ctxt()
7350 /* reduce by ctxt's < 2 */ qib_init_iba7322_funcs()
775 qib_read_ureg32(const struct qib_devdata *dd, enum qib_ureg regno, int ctxt) qib_read_ureg32() argument
796 qib_read_ureg(const struct qib_devdata *dd, enum qib_ureg regno, int ctxt) qib_read_ureg() argument
817 qib_write_ureg(const struct qib_devdata *dd, enum qib_ureg regno, u64 value, int ctxt) qib_write_ureg() argument
886 qib_write_kreg_ctxt(const struct qib_devdata *dd, const u16 regno, unsigned ctxt, u64 value) qib_write_kreg_ctxt() argument
4533 rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op, int ctxt) rcvctrl_7322_mod() argument
H A Dqib.h161 unsigned ctxt; member in struct:qib_ctxtdata
164 /* non-zero if ctxt is being shared. */
166 /* non-zero if ctxt is being shared. */
172 /* number of pio bufs for this ctxt (all procs, if shared) */
174 /* first pio buffer for this ctxt */
176 /* chip offset of PIO buffers for this ctxt */
204 /* pid of process using this ctxt */
209 /* pkeys set by this use of this ctxt */
221 /* The version of the library which opened this ctxt */
230 /* ctxt rcvhdrq head offset */
355 * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
818 * (ctxt == -1) means "all contexts", only meaningful for
822 int ctxt);
876 /* pio bufs allocated per ctxt */
878 /* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */
882 * supports, less gives more pio bufs/ctxt, etc.
H A Dqib_common.h250 * shared memory pages for subctxts if ctxt is shared; these cover
375 #define QIB_CMD_ASSIGN_CTXT 23 /* allocate HCA and ctxt */
423 __u16 ctxt; /* ctxt on unit assigned to caller */ member in struct:qib_ctxt_info
426 __u16 num_subctxts; /* number of subctxts opened on ctxt */
H A Dqib_ud.c399 unsigned ctxt = ppd->hw_pidx; qib_lookup_pkey() local
404 for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i) qib_lookup_pkey()
405 if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey) qib_lookup_pkey()
H A Dqib_intr.c206 rcd->ctxt); qib_handle_urcv()
H A Dqib_driver.c297 u32 ctxt, u32 eflags, u32 l, u32 etail, qib_rcv_hdrerr()
514 crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l, qib_kreceive()
296 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, u32 ctxt, u32 eflags, u32 l, u32 etail, __le32 *rhf_addr, struct qib_message_header *rhdr) qib_rcv_hdrerr() argument
H A Dqib_user_sdma.c183 qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt) qib_user_sdma_queue_create() argument
204 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt); qib_user_sdma_queue_create()
213 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt); qib_user_sdma_queue_create()
H A Dqib_verbs.c1934 * No need to validate rcd[ctxt]; the port is setup if we are here.
1940 unsigned ctxt = ppd->hw_pidx; qib_get_pkey() local
1944 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys)) qib_get_pkey()
1947 ret = dd->rcd[ctxt]->pkeys[index]; qib_get_pkey()
H A Dqib_mad.c981 * set_pkeys - set the PKEY table for ctxt 0
/linux-4.1.27/drivers/net/ethernet/intel/igb/
H A De1000_82575.h149 #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
154 #define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
155 #define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
156 /* Adv ctxt IPSec SA IDX mask */
157 /* Adv ctxt IPSec ESP len mask */
/linux-4.1.27/arch/arm64/include/asm/
H A Dkvm_host.h102 struct kvm_cpu_context ctxt; member in struct:kvm_vcpu_arch
145 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
146 #define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
151 #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
152 #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
/linux-4.1.27/drivers/net/ethernet/emulex/benet/
H A Dbe_cmds.c1067 void *ctxt; be_cmd_cq_create() local
1075 ctxt = &req->context; be_cmd_cq_create()
1084 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, be_cmd_cq_create()
1087 ctxt, no_delay); be_cmd_cq_create()
1088 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, be_cmd_cq_create()
1090 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); be_cmd_cq_create()
1091 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); be_cmd_cq_create()
1092 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); be_cmd_cq_create()
1102 ctxt, coalesce_wm); be_cmd_cq_create()
1103 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, be_cmd_cq_create()
1105 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, be_cmd_cq_create()
1107 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); be_cmd_cq_create()
1108 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); be_cmd_cq_create()
1109 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); be_cmd_cq_create()
1112 be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_cq_create()
1145 void *ctxt; be_cmd_mccq_ext_create() local
1153 ctxt = &req->context; be_cmd_mccq_ext_create()
1161 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); be_cmd_mccq_ext_create()
1162 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, be_cmd_mccq_ext_create()
1164 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); be_cmd_mccq_ext_create()
1169 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt, be_cmd_mccq_ext_create()
1171 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1); be_cmd_mccq_ext_create()
1173 ctxt, cq->id); be_cmd_mccq_ext_create()
1175 ctxt, 1); be_cmd_mccq_ext_create()
1187 be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_mccq_ext_create()
1210 void *ctxt; be_cmd_mccq_org_create() local
1218 ctxt = &req->context; be_cmd_mccq_org_create()
1226 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); be_cmd_mccq_org_create()
1227 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, be_cmd_mccq_org_create()
1229 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); be_cmd_mccq_org_create()
1231 be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_mccq_org_create()
2260 void *ctxt = NULL; lancer_cmd_write_object() local
2279 ctxt = &req->context; lancer_cmd_write_object()
2281 write_length, ctxt, data_size); lancer_cmd_write_object()
2285 eof, ctxt, 1); lancer_cmd_write_object()
2288 eof, ctxt, 0); lancer_cmd_write_object()
2290 be_dws_cpu_to_le(ctxt, sizeof(req->context)); lancer_cmd_write_object()
3163 void *ctxt; be_cmd_set_hsw_config() local
3175 ctxt = &req->context; be_cmd_set_hsw_config()
3182 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); be_cmd_set_hsw_config()
3184 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); be_cmd_set_hsw_config()
3185 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); be_cmd_set_hsw_config()
3189 ctxt, adapter->hba_port_num); be_cmd_set_hsw_config()
3190 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1); be_cmd_set_hsw_config()
3192 ctxt, hsw_mode); be_cmd_set_hsw_config()
3209 void *ctxt; be_cmd_get_hsw_config() local
3222 ctxt = &req->context; be_cmd_get_hsw_config()
3230 ctxt, intf_id); be_cmd_get_hsw_config()
3231 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); be_cmd_get_hsw_config()
3235 ctxt, adapter->hba_port_num); be_cmd_get_hsw_config()
3236 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1); be_cmd_get_hsw_config()
/linux-4.1.27/fs/ocfs2/dlm/
H A Ddlmdomain.c862 /* Once the dlm ctxt is marked as leaving then we don't want dlm_query_join_handler()
940 /* XXX should we consider no dlm ctxt an error? */ dlm_assert_joined_handler()
1577 struct domain_join_ctxt *ctxt, dlm_should_restart_join()
1590 ret = memcmp(ctxt->live_map, dlm->live_nodes_map, dlm_should_restart_join()
1603 struct domain_join_ctxt *ctxt; dlm_try_to_join_domain() local
1608 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); dlm_try_to_join_domain()
1609 if (!ctxt) { dlm_try_to_join_domain()
1621 memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map)); dlm_try_to_join_domain()
1628 while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES, dlm_try_to_join_domain()
1642 set_bit(node, ctxt->yes_resp_map); dlm_try_to_join_domain()
1644 if (dlm_should_restart_join(dlm, ctxt, response)) { dlm_try_to_join_domain()
1657 memcpy(dlm->domain_map, ctxt->yes_resp_map, dlm_try_to_join_domain()
1658 sizeof(ctxt->yes_resp_map)); dlm_try_to_join_domain()
1665 status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map); dlm_try_to_join_domain()
1670 status = dlm_send_regions(dlm, ctxt->yes_resp_map); dlm_try_to_join_domain()
1677 dlm_send_join_asserts(dlm, ctxt->yes_resp_map); dlm_try_to_join_domain()
1697 if (ctxt) { dlm_try_to_join_domain()
1701 ctxt->yes_resp_map, dlm_try_to_join_domain()
1702 sizeof(ctxt->yes_resp_map)); dlm_try_to_join_domain()
1706 kfree(ctxt); dlm_try_to_join_domain()
2140 mlog(0, "This ctxt is not joined yet!\n"); dlm_register_domain()
1576 dlm_should_restart_join(struct dlm_ctxt *dlm, struct domain_join_ctxt *ctxt, enum dlm_query_join_response_code response) dlm_should_restart_join() argument
/linux-4.1.27/tools/testing/selftests/powerpc/mm/
H A Dsubpage_prot.c39 ucontext_t *ctxt = (ucontext_t *)ctxt_v; segv() local
40 struct pt_regs *regs = ctxt->uc_mcontext.regs; segv()
/linux-4.1.27/drivers/net/ethernet/intel/ixgbevf/
H A Ddefines.h266 #define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
267 #define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
268 #define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
269 #define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
/linux-4.1.27/arch/cris/include/arch-v32/arch/hwregs/
H A Ddma_defs.h332 unsigned int ctxt : 1; member in struct:__anon436
344 unsigned int ctxt : 1; member in struct:__anon437
356 unsigned int ctxt : 1; member in struct:__anon438
367 unsigned int ctxt : 1; member in struct:__anon439
/linux-4.1.27/drivers/net/ethernet/intel/igbvf/
H A Dvf.h126 #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
129 #define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
130 #define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
/linux-4.1.27/drivers/staging/lustre/lustre/mgc/
H A Dmgc_request.c646 struct llog_ctxt *ctxt; mgc_llog_init() local
649 /* setup only remote ctxt, the local disk context is switched per each mgc_llog_init()
656 ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT); mgc_llog_init()
657 LASSERT(ctxt); mgc_llog_init()
659 llog_initiator_connect(ctxt); mgc_llog_init()
660 llog_ctxt_put(ctxt); mgc_llog_init()
667 struct llog_ctxt *ctxt; mgc_llog_fini() local
669 ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT); mgc_llog_fini()
670 if (ctxt) mgc_llog_fini()
671 llog_cleanup(env, ctxt); mgc_llog_fini()
1478 struct llog_ctxt *ctxt; mgc_process_cfg_log() local
1505 ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT); mgc_process_cfg_log()
1506 LASSERT(ctxt); mgc_process_cfg_log()
1521 rc = class_config_parse_llog(env, ctxt, cld->cld_logname, mgc_process_cfg_log()
1525 __llog_ctxt_put(env, ctxt); mgc_process_cfg_log()
/linux-4.1.27/drivers/net/ethernet/intel/i40e/
H A Di40e_main.c1478 * @ctxt: VSI context structure
1486 struct i40e_vsi_context *ctxt, i40e_vsi_setup_queue_map()
1491 struct i40e_vsi_context *ctxt, i40e_vsi_setup_queue_map()
1586 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); i40e_vsi_setup_queue_map()
1602 ctxt->info.up_enable_bits = enabled_tc; i40e_vsi_setup_queue_map()
1605 ctxt->info.mapping_flags |= i40e_vsi_setup_queue_map()
1608 ctxt->info.queue_mapping[i] = i40e_vsi_setup_queue_map()
1611 ctxt->info.mapping_flags |= i40e_vsi_setup_queue_map()
1613 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); i40e_vsi_setup_queue_map()
1615 ctxt->info.valid_sections |= cpu_to_le16(sections); i40e_vsi_setup_queue_map()
1975 struct i40e_vsi_context ctxt; i40e_vlan_stripping_enable() local
1987 ctxt.seid = vsi->seid; i40e_vlan_stripping_enable()
1988 ctxt.info = vsi->info; i40e_vlan_stripping_enable()
1989 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); i40e_vlan_stripping_enable()
2003 struct i40e_vsi_context ctxt; i40e_vlan_stripping_disable() local
2016 ctxt.seid = vsi->seid; i40e_vlan_stripping_disable()
2017 ctxt.info = vsi->info; i40e_vlan_stripping_disable()
2018 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); i40e_vlan_stripping_disable()
2291 struct i40e_vsi_context ctxt; i40e_vsi_add_pvid() local
2300 ctxt.seid = vsi->seid; i40e_vsi_add_pvid()
2301 ctxt.info = vsi->info; i40e_vsi_add_pvid()
2302 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); i40e_vsi_add_pvid()
4355 * @ctxt: the ctxt buffer returned from AQ VSI update param command
4358 struct i40e_vsi_context *ctxt) i40e_vsi_update_queue_map()
4364 vsi->info.mapping_flags = ctxt->info.mapping_flags; i40e_vsi_update_queue_map()
4366 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); i40e_vsi_update_queue_map()
4367 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, i40e_vsi_update_queue_map()
4387 struct i40e_vsi_context ctxt; i40e_vsi_config_tc() local
4410 ctxt.seid = vsi->seid; i40e_vsi_config_tc()
4411 ctxt.pf_num = vsi->back->hw.pf_id; i40e_vsi_config_tc()
4412 ctxt.vf_num = 0; i40e_vsi_config_tc()
4413 ctxt.uplink_seid = vsi->uplink_seid; i40e_vsi_config_tc()
4414 ctxt.info = vsi->info; i40e_vsi_config_tc()
4415 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); i40e_vsi_config_tc()
4418 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); i40e_vsi_config_tc()
4426 i40e_vsi_update_queue_map(vsi, &ctxt); i40e_vsi_config_tc()
5975 struct i40e_vsi_context ctxt; i40e_enable_pf_switch_lb() local
5978 ctxt.seid = pf->main_vsi_seid; i40e_enable_pf_switch_lb()
5979 ctxt.pf_num = pf->hw.pf_id; i40e_enable_pf_switch_lb()
5980 ctxt.vf_num = 0; i40e_enable_pf_switch_lb()
5981 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); i40e_enable_pf_switch_lb()
5988 ctxt.flags = I40E_AQ_VSI_TYPE_PF; i40e_enable_pf_switch_lb()
5989 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); i40e_enable_pf_switch_lb()
5990 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); i40e_enable_pf_switch_lb()
5992 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); i40e_enable_pf_switch_lb()
6009 struct i40e_vsi_context ctxt; i40e_disable_pf_switch_lb() local
6012 ctxt.seid = pf->main_vsi_seid; i40e_disable_pf_switch_lb()
6013 ctxt.pf_num = pf->hw.pf_id; i40e_disable_pf_switch_lb()
6014 ctxt.vf_num = 0; i40e_disable_pf_switch_lb()
6015 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); i40e_disable_pf_switch_lb()
6022 ctxt.flags = I40E_AQ_VSI_TYPE_PF; i40e_disable_pf_switch_lb()
6023 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); i40e_disable_pf_switch_lb()
6024 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); i40e_disable_pf_switch_lb()
6026 aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); i40e_disable_pf_switch_lb()
8281 struct i40e_vsi_context ctxt; i40e_add_vsi() local
8285 memset(&ctxt, 0, sizeof(ctxt)); i40e_add_vsi()
8293 ctxt.seid = pf->main_vsi_seid; i40e_add_vsi()
8294 ctxt.pf_num = pf->hw.pf_id; i40e_add_vsi()
8295 ctxt.vf_num = 0; i40e_add_vsi()
8296 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); i40e_add_vsi()
8297 ctxt.flags = I40E_AQ_VSI_TYPE_PF; i40e_add_vsi()
8304 vsi->info = ctxt.info; i40e_add_vsi()
8307 vsi->seid = ctxt.seid; i40e_add_vsi()
8308 vsi->id = ctxt.vsi_number; i40e_add_vsi()
8315 memset(&ctxt, 0, sizeof(ctxt)); i40e_add_vsi()
8316 ctxt.seid = pf->main_vsi_seid; i40e_add_vsi()
8317 ctxt.pf_num = pf->hw.pf_id; i40e_add_vsi()
8318 ctxt.vf_num = 0; i40e_add_vsi()
8319 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); i40e_add_vsi()
8320 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); i40e_add_vsi()
8329 i40e_vsi_update_queue_map(vsi, &ctxt); i40e_add_vsi()
8350 ctxt.pf_num = hw->pf_id; i40e_add_vsi()
8351 ctxt.vf_num = 0; i40e_add_vsi()
8352 ctxt.uplink_seid = vsi->uplink_seid; i40e_add_vsi()
8353 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; i40e_add_vsi()
8354 ctxt.flags = I40E_AQ_VSI_TYPE_PF; i40e_add_vsi()
8357 ctxt.info.valid_sections |= i40e_add_vsi()
8359 ctxt.info.switch_id = i40e_add_vsi()
8362 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); i40e_add_vsi()
8366 ctxt.pf_num = hw->pf_id; i40e_add_vsi()
8367 ctxt.vf_num = 0; i40e_add_vsi()
8368 ctxt.uplink_seid = vsi->uplink_seid; i40e_add_vsi()
8369 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; i40e_add_vsi()
8370 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; i40e_add_vsi()
8376 ctxt.info.valid_sections |= i40e_add_vsi()
8378 ctxt.info.switch_id = i40e_add_vsi()
8383 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); i40e_add_vsi()
8387 ctxt.pf_num = hw->pf_id; i40e_add_vsi()
8388 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; i40e_add_vsi()
8389 ctxt.uplink_seid = vsi->uplink_seid; i40e_add_vsi()
8390 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; i40e_add_vsi()
8391 ctxt.flags = I40E_AQ_VSI_TYPE_VF; i40e_add_vsi()
8397 ctxt.info.valid_sections |= i40e_add_vsi()
8399 ctxt.info.switch_id = i40e_add_vsi()
8403 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); i40e_add_vsi()
8404 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; i40e_add_vsi()
8406 ctxt.info.valid_sections |= i40e_add_vsi()
8408 ctxt.info.sec_flags |= i40e_add_vsi()
8413 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); i40e_add_vsi()
8418 ret = i40e_fcoe_vsi_init(vsi, &ctxt); i40e_add_vsi()
8431 ret = i40e_aq_add_vsi(hw, &ctxt, NULL); i40e_add_vsi()
8439 vsi->info = ctxt.info; i40e_add_vsi()
8441 vsi->seid = ctxt.seid; i40e_add_vsi()
8442 vsi->id = ctxt.vsi_number; i40e_add_vsi()
1485 i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt, u8 enabled_tc, bool is_add) i40e_vsi_setup_queue_map() argument
4357 i40e_vsi_update_queue_map(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) i40e_vsi_update_queue_map() argument
H A Di40e_fcoe.c362 * @ctxt: pointer to the associated VSI context to be passed to HW
366 int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) i40e_fcoe_vsi_init() argument
368 struct i40e_aqc_vsi_properties_data *info = &ctxt->info; i40e_fcoe_vsi_init()
380 ctxt->pf_num = hw->pf_id; i40e_fcoe_vsi_init()
381 ctxt->vf_num = 0; i40e_fcoe_vsi_init()
382 ctxt->uplink_seid = vsi->uplink_seid; i40e_fcoe_vsi_init()
383 ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; i40e_fcoe_vsi_init()
384 ctxt->flags = I40E_AQ_VSI_TYPE_PF; i40e_fcoe_vsi_init()
403 i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true); i40e_fcoe_vsi_init()
743 /* fetch xid from hw rxd wb, which should match up the sw ctxt */ i40e_fcoe_handle_offload()
1482 * @ctxt: pointer to the associated VSI context to be passed to HW
H A Di40e_virtchnl_pf.c2369 struct i40e_vsi_context ctxt; i40e_ndo_set_vf_spoofchk() local
2387 memset(&ctxt, 0, sizeof(ctxt)); i40e_ndo_set_vf_spoofchk()
2388 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; i40e_ndo_set_vf_spoofchk()
2389 ctxt.pf_num = pf->hw.pf_id; i40e_ndo_set_vf_spoofchk()
2390 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); i40e_ndo_set_vf_spoofchk()
2392 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | i40e_ndo_set_vf_spoofchk()
2394 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); i40e_ndo_set_vf_spoofchk()
H A Di40e.h663 struct i40e_vsi_context *ctxt,
723 int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
/linux-4.1.27/arch/arm64/kvm/
H A Dregmap.c114 unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs; vcpu_reg32()
H A Dsys_regs.c1510 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); kvm_reset_sys_regs()
/linux-4.1.27/arch/m68k/include/asm/
H A Dopenprom.h176 void (*pv_setctxt)(int ctxt, char *va, int pmeg);
244 void (*pv_setctxt)(int ctxt, char *va, int pmeg);
/linux-4.1.27/drivers/staging/lustre/lustre/mdc/
H A Dmdc_request.c1599 struct llog_ctxt *ctxt = NULL; mdc_changelog_send_thread() local
1614 ctxt = llog_get_context(cs->cs_obd, LLOG_CHANGELOG_REPL_CTXT); mdc_changelog_send_thread()
1615 if (ctxt == NULL) { mdc_changelog_send_thread()
1619 rc = llog_open(NULL, ctxt, &llh, NULL, CHANGELOG_CATALOG, mdc_changelog_send_thread()
1645 if (ctxt) mdc_changelog_send_thread()
1646 llog_ctxt_put(ctxt); mdc_changelog_send_thread()
2403 struct llog_ctxt *ctxt; mdc_llog_init() local
2411 ctxt = llog_group_get_ctxt(olg, LLOG_CHANGELOG_REPL_CTXT); mdc_llog_init()
2412 llog_initiator_connect(ctxt); mdc_llog_init()
2413 llog_ctxt_put(ctxt); mdc_llog_init()
2420 struct llog_ctxt *ctxt; mdc_llog_finish() local
2422 ctxt = llog_get_context(obd, LLOG_CHANGELOG_REPL_CTXT); mdc_llog_finish()
2423 if (ctxt) mdc_llog_finish()
2424 llog_cleanup(NULL, ctxt); mdc_llog_finish()
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/
H A Dfault.c161 * Clear dsisr under ctxt lock after handling the fault, so that spufs_handle_class1()
/linux-4.1.27/arch/arc/include/asm/
H A Dcmpxchg.h154 * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
H A Dmmu_context.h30 * "Fast Context Switch" i.e. no TLB flush on ctxt-switch
/linux-4.1.27/net/ipv4/
H A Dsysctl_net_ipv4.c208 struct tcp_fastopen_context *ctxt; proc_tcp_fastopen_key() local
217 ctxt = rcu_dereference(tcp_fastopen_ctx); proc_tcp_fastopen_key()
218 if (ctxt) proc_tcp_fastopen_key()
219 memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); proc_tcp_fastopen_key()
/linux-4.1.27/scripts/
H A Dcoccicheck162 elif [ "$MODE" = "rep+ctxt" ] ; then
/linux-4.1.27/drivers/xen/
H A Dxen-acpi-memhotplug.c376 u32 level, void *ctxt, void **retv) acpi_memory_register_notify_handler()
392 u32 level, void *ctxt, void **retv) acpi_memory_deregister_notify_handler()
375 acpi_memory_register_notify_handler(acpi_handle handle, u32 level, void *ctxt, void **retv) acpi_memory_register_notify_handler() argument
391 acpi_memory_deregister_notify_handler(acpi_handle handle, u32 level, void *ctxt, void **retv) acpi_memory_deregister_notify_handler() argument
/linux-4.1.27/drivers/media/usb/pvrusb2/
H A Dpvrusb2-encoder.c141 static int pvr2_encoder_cmd(void *ctxt, pvr2_encoder_cmd() argument
155 struct pvr2_hdw *hdw = (struct pvr2_hdw *)ctxt; pvr2_encoder_cmd()
/linux-4.1.27/fs/ocfs2/cluster/
H A Dnetdebug.c399 static int sc_common_open(struct file *file, int ctxt) sc_common_open() argument
414 sd->dbg_ctxt = ctxt; sc_common_open()
/linux-4.1.27/security/selinux/ss/
H A Dservices.c2977 struct context *ctxt; selinux_audit_rule_match() local
2994 ctxt = sidtab_search(&sidtab, sid); selinux_audit_rule_match()
2995 if (unlikely(!ctxt)) { selinux_audit_rule_match()
3009 match = (ctxt->user == rule->au_ctxt.user); selinux_audit_rule_match()
3012 match = (ctxt->user != rule->au_ctxt.user); selinux_audit_rule_match()
3020 match = (ctxt->role == rule->au_ctxt.role); selinux_audit_rule_match()
3023 match = (ctxt->role != rule->au_ctxt.role); selinux_audit_rule_match()
3031 match = (ctxt->type == rule->au_ctxt.type); selinux_audit_rule_match()
3034 match = (ctxt->type != rule->au_ctxt.type); selinux_audit_rule_match()
3044 &ctxt->range.level[0] : &ctxt->range.level[1]); selinux_audit_rule_match()
/linux-4.1.27/arch/sparc/include/asm/
H A Dopenprom.h141 void (*pv_setctxt)(int ctxt, char *va, int pmeg);
/linux-4.1.27/arch/arm64/kernel/
H A Dasm-offsets.c107 DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); main()
/linux-4.1.27/include/linux/sunrpc/
H A Dsvc_rdma.h220 extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
/linux-4.1.27/drivers/gpu/drm/sti/
H A Dsti_hqvdp.c844 * @ctxt: hqvdp structure
848 static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt) sti_hqvdp_start_xp70() argument
850 struct sti_hqvdp *hqvdp = ctxt; sti_hqvdp_start_xp70()
/linux-4.1.27/fs/cifs/
H A Dcifsglob.h121 struct sdesc *sdeschmacmd5; /* ctxt to generate ntlmv2 hash, CR1 */
122 struct sdesc *sdescmd5; /* ctxt to generate cifs/smb signature */
123 struct sdesc *sdeschmacsha256; /* ctxt to generate smb2 signature */
124 struct sdesc *sdesccmacaes; /* ctxt to generate smb3 signature */
/linux-4.1.27/drivers/gpu/drm/omapdrm/
H A Domap_crtc.c601 /* avoid assumptions about what ctxt we are called from: */ page_flip_cb()
H A Domap_gem.c897 * aquired if !remap (because this can be called in atomic ctxt),
/linux-4.1.27/drivers/scsi/aacraid/
H A Daacraid.h851 typedef void (*fib_callback)(void *ctxt, struct fib *fibctx);
2110 int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_type.h2575 #define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
2576 #define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
2596 #define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
2597 #define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
/linux-4.1.27/arch/arc/mm/
H A Dtlb.c248 * Only for fork( ) do we need to move parent to a new MMU ctxt, local_flush_tlb_mm()
/linux-4.1.27/drivers/net/wireless/ath/ath6kl/
H A Dhtc_mbox.c428 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n", htc_tx_comp_update()
1238 "htc tx activity ctxt 0x%p dist 0x%p\n", ath6kl_htc_mbox_activity_changed()
/linux-4.1.27/drivers/hsi/controllers/
H A Domap_ssi_port.c429 pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n", list_for_each_safe()
/linux-4.1.27/fs/ext4/
H A Dnamei.c619 " crypto ctxt--skipping crypto\n"); dx_show_leaf()

Completed in 2816 milliseconds