Lines Matching refs:seg
524 } seg[8]; member
815 struct kvm_segment *var, int seg);
817 struct kvm_segment *var, int seg);
866 #define VMX_SEGMENT_FIELD(seg) \ argument
867 [VCPU_SREG_##seg] = { \
868 .selector = GUEST_##seg##_SELECTOR, \
869 .base = GUEST_##seg##_BASE, \
870 .limit = GUEST_##seg##_LIMIT, \
871 .ar_bytes = GUEST_##seg##_AR_BYTES, \
1514 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, in vmx_segment_cache_test_set() argument
1518 u32 mask = 1 << (seg * SEG_FIELD_NR + field); in vmx_segment_cache_test_set()
1529 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_selector() argument
1531 u16 *p = &vmx->segment_cache.seg[seg].selector; in vmx_read_guest_seg_selector()
1533 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) in vmx_read_guest_seg_selector()
1534 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); in vmx_read_guest_seg_selector()
1538 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_base() argument
1540 ulong *p = &vmx->segment_cache.seg[seg].base; in vmx_read_guest_seg_base()
1542 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) in vmx_read_guest_seg_base()
1543 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); in vmx_read_guest_seg_base()
1547 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_limit() argument
1549 u32 *p = &vmx->segment_cache.seg[seg].limit; in vmx_read_guest_seg_limit()
1551 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) in vmx_read_guest_seg_limit()
1552 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); in vmx_read_guest_seg_limit()
1556 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_ar() argument
1558 u32 *p = &vmx->segment_cache.seg[seg].ar; in vmx_read_guest_seg_ar()
1560 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) in vmx_read_guest_seg_ar()
1561 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); in vmx_read_guest_seg_ar()
3277 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, in fix_pmode_seg() argument
3288 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) in fix_pmode_seg()
3293 vmx_set_segment(vcpu, save, seg); in fix_pmode_seg()
3336 static void fix_rmode_seg(int seg, struct kvm_segment *save) in fix_rmode_seg() argument
3338 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; in fix_rmode_seg()
3342 if (seg == VCPU_SREG_CS) in fix_rmode_seg()
3360 "protected mode (seg=%d)", seg); in fix_rmode_seg()
3693 struct kvm_segment *var, int seg) in vmx_get_segment() argument
3698 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_get_segment()
3699 *var = vmx->rmode.segs[seg]; in vmx_get_segment()
3700 if (seg == VCPU_SREG_TR in vmx_get_segment()
3701 || var->selector == vmx_read_guest_seg_selector(vmx, seg)) in vmx_get_segment()
3703 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
3704 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
3707 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
3708 var->limit = vmx_read_guest_seg_limit(vmx, seg); in vmx_get_segment()
3709 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
3710 ar = vmx_read_guest_seg_ar(vmx, seg); in vmx_get_segment()
3729 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) in vmx_get_segment_base() argument
3734 vmx_get_segment(vcpu, &s, seg); in vmx_get_segment_base()
3737 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); in vmx_get_segment_base()
3773 struct kvm_segment *var, int seg) in vmx_set_segment() argument
3776 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; in vmx_set_segment()
3780 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_set_segment()
3781 vmx->rmode.segs[seg] = *var; in vmx_set_segment()
3782 if (seg == VCPU_SREG_TR) in vmx_set_segment()
3785 fix_rmode_seg(seg, &vmx->rmode.segs[seg]); in vmx_set_segment()
3804 if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) in vmx_set_segment()
3845 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) in rmode_segment_valid() argument
3850 vmx_get_segment(vcpu, &var, seg); in rmode_segment_valid()
3852 if (seg == VCPU_SREG_CS) in rmode_segment_valid()
3916 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) in data_segment_valid() argument
3921 vmx_get_segment(vcpu, &var, seg); in data_segment_valid()
4116 static void seg_setup(int seg) in seg_setup() argument
4118 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; in seg_setup()
4125 if (seg == VCPU_SREG_CS) in seg_setup()
9877 struct kvm_segment seg; in load_vmcs12_host_state() local
9951 seg = (struct kvm_segment) { in load_vmcs12_host_state()
9961 seg.l = 1; in load_vmcs12_host_state()
9963 seg.db = 1; in load_vmcs12_host_state()
9964 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); in load_vmcs12_host_state()
9965 seg = (struct kvm_segment) { in load_vmcs12_host_state()
9974 seg.selector = vmcs12->host_ds_selector; in load_vmcs12_host_state()
9975 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); in load_vmcs12_host_state()
9976 seg.selector = vmcs12->host_es_selector; in load_vmcs12_host_state()
9977 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); in load_vmcs12_host_state()
9978 seg.selector = vmcs12->host_ss_selector; in load_vmcs12_host_state()
9979 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); in load_vmcs12_host_state()
9980 seg.selector = vmcs12->host_fs_selector; in load_vmcs12_host_state()
9981 seg.base = vmcs12->host_fs_base; in load_vmcs12_host_state()
9982 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); in load_vmcs12_host_state()
9983 seg.selector = vmcs12->host_gs_selector; in load_vmcs12_host_state()
9984 seg.base = vmcs12->host_gs_base; in load_vmcs12_host_state()
9985 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); in load_vmcs12_host_state()
9986 seg = (struct kvm_segment) { in load_vmcs12_host_state()
9993 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); in load_vmcs12_host_state()