Lines Matching defs:kvm_vcpu_arch
383 struct kvm_vcpu_arch { struct
388 unsigned long regs[NR_VCPU_REGS];
389 u32 regs_avail;
390 u32 regs_dirty;
392 unsigned long cr0;
393 unsigned long cr0_guest_owned_bits;
394 unsigned long cr2;
395 unsigned long cr3;
396 unsigned long cr4;
397 unsigned long cr4_guest_owned_bits;
398 unsigned long cr8;
399 u32 hflags;
400 u64 efer;
401 u64 apic_base;
402 struct kvm_lapic *apic; /* kernel irqchip context */
403 u64 eoi_exit_bitmap[4];
404 unsigned long apic_attention;
405 int32_t apic_arb_prio;
406 int mp_state;
407 u64 ia32_misc_enable_msr;
408 u64 smbase;
409 bool tpr_access_reporting;
410 u64 ia32_xss;
419 struct kvm_mmu mmu;
429 struct kvm_mmu nested_mmu;
435 struct kvm_mmu *walk_mmu;
437 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
438 struct kvm_mmu_memory_cache mmu_page_cache;
439 struct kvm_mmu_memory_cache mmu_page_header_cache;
441 struct fpu guest_fpu;
442 bool eager_fpu;
443 u64 xcr0;
444 u64 guest_supported_xcr0;
445 u32 guest_xstate_size;
447 struct kvm_pio_request pio;
448 void *pio_data;
450 u8 event_exit_inst_len;
452 struct kvm_queued_exception {
458 } exception;
460 struct kvm_queued_interrupt {
464 } interrupt;
466 int halt_request; /* real mode on Intel only */
468 int cpuid_nent;
469 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
471 int maxphyaddr;
475 struct x86_emulate_ctxt emulate_ctxt;
476 bool emulate_regs_need_sync_to_vcpu;
477 bool emulate_regs_need_sync_from_vcpu;
478 int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
480 gpa_t time;
481 struct pvclock_vcpu_time_info hv_clock;
482 unsigned int hw_tsc_khz;
483 struct gfn_to_hva_cache pv_time;
484 bool pv_time_enabled;
486 bool pvclock_set_guest_stopped_request;
488 struct {
494 } st;
496 u64 last_guest_tsc;
497 u64 last_host_tsc;
498 u64 tsc_offset_adjustment;
499 u64 this_tsc_nsec;
500 u64 this_tsc_write;
501 u64 this_tsc_generation;
502 bool tsc_catchup;
503 bool tsc_always_catchup;
504 s8 virtual_tsc_shift;
505 u32 virtual_tsc_mult;
506 u32 virtual_tsc_khz;
507 s64 ia32_tsc_adjust_msr;
508 u64 tsc_scaling_ratio;
510 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
511 unsigned nmi_pending; /* NMI queued after currently running handler */
512 bool nmi_injected; /* Trying to inject an NMI this entry */
513 bool smi_pending; /* SMI queued after currently running handler */
515 struct kvm_mtrr mtrr_state;
516 u64 pat;
518 unsigned switch_db_regs;
519 unsigned long db[KVM_NR_DB_REGS];
520 unsigned long dr6;
521 unsigned long dr7;
522 unsigned long eff_db[KVM_NR_DB_REGS];
523 unsigned long guest_debug_dr7;
525 u64 mcg_cap;
526 u64 mcg_status;
527 u64 mcg_ctl;
528 u64 *mce_banks;
531 u64 mmio_gva;
532 unsigned access;
533 gfn_t mmio_gfn;
534 u64 mmio_gen;
536 struct kvm_pmu pmu;
539 unsigned long singlestep_rip;
541 struct kvm_vcpu_hv hyperv;
543 cpumask_var_t wbinvd_dirty_mask;
545 unsigned long last_retry_eip;
546 unsigned long last_retry_addr;
548 struct {
555 } apf;
558 struct {
561 } osvw;
563 struct {
566 } pv_eoi;
573 bool write_fault_to_shadow_pgtable;
576 unsigned long exit_qualification;
579 struct {
581 } pv;
583 int pending_ioapic_eoi;
584 int pending_external_vector;