Lines Matching defs:kvm_vcpu_arch

347 struct kvm_vcpu_arch {  struct
352 unsigned long regs[NR_VCPU_REGS];
353 u32 regs_avail;
354 u32 regs_dirty;
356 unsigned long cr0;
357 unsigned long cr0_guest_owned_bits;
358 unsigned long cr2;
359 unsigned long cr3;
360 unsigned long cr4;
361 unsigned long cr4_guest_owned_bits;
362 unsigned long cr8;
363 u32 hflags;
364 u64 efer;
365 u64 apic_base;
366 struct kvm_lapic *apic; /* kernel irqchip context */
367 unsigned long apic_attention;
368 int32_t apic_arb_prio;
369 int mp_state;
370 u64 ia32_misc_enable_msr;
371 bool tpr_access_reporting;
372 u64 ia32_xss;
381 struct kvm_mmu mmu;
391 struct kvm_mmu nested_mmu;
397 struct kvm_mmu *walk_mmu;
399 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
400 struct kvm_mmu_memory_cache mmu_page_cache;
401 struct kvm_mmu_memory_cache mmu_page_header_cache;
403 struct fpu guest_fpu;
404 bool eager_fpu;
405 u64 xcr0;
406 u64 guest_supported_xcr0;
407 u32 guest_xstate_size;
409 struct kvm_pio_request pio;
410 void *pio_data;
412 u8 event_exit_inst_len;
414 struct kvm_queued_exception {
420 } exception;
422 struct kvm_queued_interrupt {
426 } interrupt;
428 int halt_request; /* real mode on Intel only */
430 int cpuid_nent;
431 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
433 int maxphyaddr;
437 struct x86_emulate_ctxt emulate_ctxt;
438 bool emulate_regs_need_sync_to_vcpu;
439 bool emulate_regs_need_sync_from_vcpu;
440 int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
442 gpa_t time;
443 struct pvclock_vcpu_time_info hv_clock;
444 unsigned int hw_tsc_khz;
445 struct gfn_to_hva_cache pv_time;
446 bool pv_time_enabled;
448 bool pvclock_set_guest_stopped_request;
450 struct {
456 } st;
458 u64 last_guest_tsc;
459 u64 last_host_tsc;
460 u64 tsc_offset_adjustment;
461 u64 this_tsc_nsec;
462 u64 this_tsc_write;
463 u64 this_tsc_generation;
464 bool tsc_catchup;
465 bool tsc_always_catchup;
466 s8 virtual_tsc_shift;
467 u32 virtual_tsc_mult;
468 u32 virtual_tsc_khz;
469 s64 ia32_tsc_adjust_msr;
471 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
472 unsigned nmi_pending; /* NMI queued after currently running handler */
473 bool nmi_injected; /* Trying to inject an NMI this entry */
475 struct mtrr_state_type mtrr_state;
476 u64 pat;
478 unsigned switch_db_regs;
479 unsigned long db[KVM_NR_DB_REGS];
480 unsigned long dr6;
481 unsigned long dr7;
482 unsigned long eff_db[KVM_NR_DB_REGS];
483 unsigned long guest_debug_dr7;
485 u64 mcg_cap;
486 u64 mcg_status;
487 u64 mcg_ctl;
488 u64 *mce_banks;
491 u64 mmio_gva;
492 unsigned access;
493 gfn_t mmio_gfn;
494 u64 mmio_gen;
496 struct kvm_pmu pmu;
499 unsigned long singlestep_rip;
502 u64 hv_vapic;
504 cpumask_var_t wbinvd_dirty_mask;
506 unsigned long last_retry_eip;
507 unsigned long last_retry_addr;
509 struct {
516 } apf;
519 struct {
522 } osvw;
524 struct {
527 } pv_eoi;
534 bool write_fault_to_shadow_pgtable;
537 unsigned long exit_qualification;
540 struct {
542 } pv;