Lines Matching refs:seg

574 		} seg[8];  member
870 struct kvm_segment *var, int seg);
872 struct kvm_segment *var, int seg);
928 #define VMX_SEGMENT_FIELD(seg) \ argument
929 [VCPU_SREG_##seg] = { \
930 .selector = GUEST_##seg##_SELECTOR, \
931 .base = GUEST_##seg##_BASE, \
932 .limit = GUEST_##seg##_LIMIT, \
933 .ar_bytes = GUEST_##seg##_AR_BYTES, \
1588 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, in vmx_segment_cache_test_set() argument
1592 u32 mask = 1 << (seg * SEG_FIELD_NR + field); in vmx_segment_cache_test_set()
1603 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_selector() argument
1605 u16 *p = &vmx->segment_cache.seg[seg].selector; in vmx_read_guest_seg_selector()
1607 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) in vmx_read_guest_seg_selector()
1608 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector); in vmx_read_guest_seg_selector()
1612 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_base() argument
1614 ulong *p = &vmx->segment_cache.seg[seg].base; in vmx_read_guest_seg_base()
1616 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) in vmx_read_guest_seg_base()
1617 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base); in vmx_read_guest_seg_base()
1621 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_limit() argument
1623 u32 *p = &vmx->segment_cache.seg[seg].limit; in vmx_read_guest_seg_limit()
1625 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) in vmx_read_guest_seg_limit()
1626 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit); in vmx_read_guest_seg_limit()
1630 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_ar() argument
1632 u32 *p = &vmx->segment_cache.seg[seg].ar; in vmx_read_guest_seg_ar()
1634 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) in vmx_read_guest_seg_ar()
1635 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes); in vmx_read_guest_seg_ar()
3414 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, in fix_pmode_seg() argument
3425 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) in fix_pmode_seg()
3430 vmx_set_segment(vcpu, save, seg); in fix_pmode_seg()
3473 static void fix_rmode_seg(int seg, struct kvm_segment *save) in fix_rmode_seg() argument
3475 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; in fix_rmode_seg()
3479 if (seg == VCPU_SREG_CS) in fix_rmode_seg()
3497 "protected mode (seg=%d)", seg); in fix_rmode_seg()
3835 struct kvm_segment *var, int seg) in vmx_get_segment() argument
3840 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_get_segment()
3841 *var = vmx->rmode.segs[seg]; in vmx_get_segment()
3842 if (seg == VCPU_SREG_TR in vmx_get_segment()
3843 || var->selector == vmx_read_guest_seg_selector(vmx, seg)) in vmx_get_segment()
3845 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
3846 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
3849 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
3850 var->limit = vmx_read_guest_seg_limit(vmx, seg); in vmx_get_segment()
3851 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
3852 ar = vmx_read_guest_seg_ar(vmx, seg); in vmx_get_segment()
3871 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) in vmx_get_segment_base() argument
3876 vmx_get_segment(vcpu, &s, seg); in vmx_get_segment_base()
3879 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); in vmx_get_segment_base()
3915 struct kvm_segment *var, int seg) in vmx_set_segment() argument
3918 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; in vmx_set_segment()
3922 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_set_segment()
3923 vmx->rmode.segs[seg] = *var; in vmx_set_segment()
3924 if (seg == VCPU_SREG_TR) in vmx_set_segment()
3927 fix_rmode_seg(seg, &vmx->rmode.segs[seg]); in vmx_set_segment()
3946 if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) in vmx_set_segment()
3987 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) in rmode_segment_valid() argument
3992 vmx_get_segment(vcpu, &var, seg); in rmode_segment_valid()
3994 if (seg == VCPU_SREG_CS) in rmode_segment_valid()
4058 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) in data_segment_valid() argument
4063 vmx_get_segment(vcpu, &var, seg); in data_segment_valid()
4258 static void seg_setup(int seg) in seg_setup() argument
4260 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; in seg_setup()
4267 if (seg == VCPU_SREG_CS) in seg_setup()
10289 struct kvm_segment seg; in load_vmcs12_host_state() local
10363 seg = (struct kvm_segment) { in load_vmcs12_host_state()
10373 seg.l = 1; in load_vmcs12_host_state()
10375 seg.db = 1; in load_vmcs12_host_state()
10376 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); in load_vmcs12_host_state()
10377 seg = (struct kvm_segment) { in load_vmcs12_host_state()
10386 seg.selector = vmcs12->host_ds_selector; in load_vmcs12_host_state()
10387 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); in load_vmcs12_host_state()
10388 seg.selector = vmcs12->host_es_selector; in load_vmcs12_host_state()
10389 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); in load_vmcs12_host_state()
10390 seg.selector = vmcs12->host_ss_selector; in load_vmcs12_host_state()
10391 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); in load_vmcs12_host_state()
10392 seg.selector = vmcs12->host_fs_selector; in load_vmcs12_host_state()
10393 seg.base = vmcs12->host_fs_base; in load_vmcs12_host_state()
10394 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); in load_vmcs12_host_state()
10395 seg.selector = vmcs12->host_gs_selector; in load_vmcs12_host_state()
10396 seg.base = vmcs12->host_gs_base; in load_vmcs12_host_state()
10397 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); in load_vmcs12_host_state()
10398 seg = (struct kvm_segment) { in load_vmcs12_host_state()
10405 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); in load_vmcs12_host_state()