1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This header defines architecture specific interfaces, x86 version
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2.  See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
11#ifndef _ASM_X86_KVM_HOST_H
12#define _ASM_X86_KVM_HOST_H
13
14#include <linux/types.h>
15#include <linux/mm.h>
16#include <linux/mmu_notifier.h>
17#include <linux/tracepoint.h>
18#include <linux/cpumask.h>
19#include <linux/irq_work.h>
20
21#include <linux/kvm.h>
22#include <linux/kvm_para.h>
23#include <linux/kvm_types.h>
24#include <linux/perf_event.h>
25#include <linux/pvclock_gtod.h>
26#include <linux/clocksource.h>
27#include <linux/irqbypass.h>
28
29#include <asm/pvclock-abi.h>
30#include <asm/desc.h>
31#include <asm/mtrr.h>
32#include <asm/msr-index.h>
33#include <asm/asm.h>
34
35#define KVM_MAX_VCPUS 255
36#define KVM_SOFT_MAX_VCPUS 160
37#define KVM_USER_MEM_SLOTS 509
38/* memory slots that are not exposed to userspace */
39#define KVM_PRIVATE_MEM_SLOTS 3
40#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
41
42#define KVM_PIO_PAGE_OFFSET 1
43#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
44#define KVM_HALT_POLL_NS_DEFAULT 400000
45
46#define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS
47
48#define CR0_RESERVED_BITS                                               \
49	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
50			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
51			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
52
53#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
54#define CR3_PCID_INVD		 BIT_64(63)
55#define CR4_RESERVED_BITS                                               \
56	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
57			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
58			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
59			  | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
60			  | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP))
61
62#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
63
64
65
66#define INVALID_PAGE (~(hpa_t)0)
67#define VALID_PAGE(x) ((x) != INVALID_PAGE)
68
69#define UNMAPPED_GVA (~(gpa_t)0)
70
71/* KVM Hugepage definitions for x86 */
72#define KVM_NR_PAGE_SIZES	3
73#define KVM_HPAGE_GFN_SHIFT(x)	(((x) - 1) * 9)
74#define KVM_HPAGE_SHIFT(x)	(PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
75#define KVM_HPAGE_SIZE(x)	(1UL << KVM_HPAGE_SHIFT(x))
76#define KVM_HPAGE_MASK(x)	(~(KVM_HPAGE_SIZE(x) - 1))
77#define KVM_PAGES_PER_HPAGE(x)	(KVM_HPAGE_SIZE(x) / PAGE_SIZE)
78
79static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
80{
81	/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
82	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
83		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
84}
85
86#define KVM_PERMILLE_MMU_PAGES 20
87#define KVM_MIN_ALLOC_MMU_PAGES 64
88#define KVM_MMU_HASH_SHIFT 10
89#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
90#define KVM_MIN_FREE_MMU_PAGES 5
91#define KVM_REFILL_PAGES 25
92#define KVM_MAX_CPUID_ENTRIES 80
93#define KVM_NR_FIXED_MTRR_REGION 88
94#define KVM_NR_VAR_MTRR 8
95
96#define ASYNC_PF_PER_VCPU 64
97
98enum kvm_reg {
99	VCPU_REGS_RAX = 0,
100	VCPU_REGS_RCX = 1,
101	VCPU_REGS_RDX = 2,
102	VCPU_REGS_RBX = 3,
103	VCPU_REGS_RSP = 4,
104	VCPU_REGS_RBP = 5,
105	VCPU_REGS_RSI = 6,
106	VCPU_REGS_RDI = 7,
107#ifdef CONFIG_X86_64
108	VCPU_REGS_R8 = 8,
109	VCPU_REGS_R9 = 9,
110	VCPU_REGS_R10 = 10,
111	VCPU_REGS_R11 = 11,
112	VCPU_REGS_R12 = 12,
113	VCPU_REGS_R13 = 13,
114	VCPU_REGS_R14 = 14,
115	VCPU_REGS_R15 = 15,
116#endif
117	VCPU_REGS_RIP,
118	NR_VCPU_REGS
119};
120
121enum kvm_reg_ex {
122	VCPU_EXREG_PDPTR = NR_VCPU_REGS,
123	VCPU_EXREG_CR3,
124	VCPU_EXREG_RFLAGS,
125	VCPU_EXREG_SEGMENTS,
126};
127
128enum {
129	VCPU_SREG_ES,
130	VCPU_SREG_CS,
131	VCPU_SREG_SS,
132	VCPU_SREG_DS,
133	VCPU_SREG_FS,
134	VCPU_SREG_GS,
135	VCPU_SREG_TR,
136	VCPU_SREG_LDTR,
137};
138
139#include <asm/kvm_emulate.h>
140
141#define KVM_NR_MEM_OBJS 40
142
143#define KVM_NR_DB_REGS	4
144
145#define DR6_BD		(1 << 13)
146#define DR6_BS		(1 << 14)
147#define DR6_RTM		(1 << 16)
148#define DR6_FIXED_1	0xfffe0ff0
149#define DR6_INIT	0xffff0ff0
150#define DR6_VOLATILE	0x0001e00f
151
152#define DR7_BP_EN_MASK	0x000000ff
153#define DR7_GE		(1 << 9)
154#define DR7_GD		(1 << 13)
155#define DR7_FIXED_1	0x00000400
156#define DR7_VOLATILE	0xffff2bff
157
158#define PFERR_PRESENT_BIT 0
159#define PFERR_WRITE_BIT 1
160#define PFERR_USER_BIT 2
161#define PFERR_RSVD_BIT 3
162#define PFERR_FETCH_BIT 4
163
164#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
165#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
166#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
167#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
168#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
169
170/* apic attention bits */
171#define KVM_APIC_CHECK_VAPIC	0
172/*
173 * The following bit is set with PV-EOI, unset on EOI.
174 * We detect PV-EOI changes by guest by comparing
175 * this bit with PV-EOI in guest memory.
176 * See the implementation in apic_update_pv_eoi.
177 */
178#define KVM_APIC_PV_EOI_PENDING	1
179
180struct kvm_kernel_irq_routing_entry;
181
182/*
183 * We don't want allocation failures within the mmu code, so we preallocate
184 * enough memory for a single page fault in a cache.
185 */
186struct kvm_mmu_memory_cache {
187	int nobjs;
188	void *objects[KVM_NR_MEM_OBJS];
189};
190
191union kvm_mmu_page_role {
192	unsigned word;
193	struct {
194		unsigned level:4;
195		unsigned cr4_pae:1;
196		unsigned quadrant:2;
197		unsigned direct:1;
198		unsigned access:3;
199		unsigned invalid:1;
200		unsigned nxe:1;
201		unsigned cr0_wp:1;
202		unsigned smep_andnot_wp:1;
203		unsigned smap_andnot_wp:1;
204		unsigned :8;
205
206		/*
207		 * This is left at the top of the word so that
208		 * kvm_memslots_for_spte_role can extract it with a
209		 * simple shift.  While there is room, give it a whole
210		 * byte so it is also faster to load it from memory.
211		 */
212		unsigned smm:8;
213	};
214};
215
216struct kvm_mmu_page {
217	struct list_head link;
218	struct hlist_node hash_link;
219
220	/*
221	 * The following two entries are used to key the shadow page in the
222	 * hash table.
223	 */
224	gfn_t gfn;
225	union kvm_mmu_page_role role;
226
227	u64 *spt;
228	/* hold the gfn of each spte inside spt */
229	gfn_t *gfns;
230	bool unsync;
231	int root_count;          /* Currently serving as active root */
232	unsigned int unsync_children;
233	unsigned long parent_ptes;	/* Reverse mapping for parent_pte */
234
235	/* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen.  */
236	unsigned long mmu_valid_gen;
237
238	DECLARE_BITMAP(unsync_child_bitmap, 512);
239
240#ifdef CONFIG_X86_32
241	/*
242	 * Used out of the mmu-lock to avoid reading spte values while an
243	 * update is in progress; see the comments in __get_spte_lockless().
244	 */
245	int clear_spte_count;
246#endif
247
248	/* Number of writes since the last time traversal visited this page.  */
249	int write_flooding_count;
250};
251
252struct kvm_pio_request {
253	unsigned long count;
254	int in;
255	int port;
256	int size;
257};
258
259struct rsvd_bits_validate {
260	u64 rsvd_bits_mask[2][4];
261	u64 bad_mt_xwr;
262};
263
264/*
265 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
266 * 32-bit).  The kvm_mmu structure abstracts the details of the current mmu
267 * mode.
268 */
269struct kvm_mmu {
270	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
271	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
272	u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
273	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
274			  bool prefault);
275	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
276				  struct x86_exception *fault);
277	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
278			    struct x86_exception *exception);
279	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
280			       struct x86_exception *exception);
281	int (*sync_page)(struct kvm_vcpu *vcpu,
282			 struct kvm_mmu_page *sp);
283	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
284	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
285			   u64 *spte, const void *pte);
286	hpa_t root_hpa;
287	int root_level;
288	int shadow_root_level;
289	union kvm_mmu_page_role base_role;
290	bool direct_map;
291
292	/*
293	 * Bitmap; bit set = permission fault
294	 * Byte index: page fault error code [4:1]
295	 * Bit index: pte permissions in ACC_* format
296	 */
297	u8 permissions[16];
298
299	u64 *pae_root;
300	u64 *lm_root;
301
302	/*
303	 * check zero bits on shadow page table entries, these
304	 * bits include not only hardware reserved bits but also
305	 * the bits spte never used.
306	 */
307	struct rsvd_bits_validate shadow_zero_check;
308
309	struct rsvd_bits_validate guest_rsvd_check;
310
311	/*
312	 * Bitmap: bit set = last pte in walk
313	 * index[0:1]: level (zero-based)
314	 * index[2]: pte.ps
315	 */
316	u8 last_pte_bitmap;
317
318	bool nx;
319
320	u64 pdptrs[4]; /* pae */
321};
322
323enum pmc_type {
324	KVM_PMC_GP = 0,
325	KVM_PMC_FIXED,
326};
327
328struct kvm_pmc {
329	enum pmc_type type;
330	u8 idx;
331	u64 counter;
332	u64 eventsel;
333	struct perf_event *perf_event;
334	struct kvm_vcpu *vcpu;
335};
336
337struct kvm_pmu {
338	unsigned nr_arch_gp_counters;
339	unsigned nr_arch_fixed_counters;
340	unsigned available_event_types;
341	u64 fixed_ctr_ctrl;
342	u64 global_ctrl;
343	u64 global_status;
344	u64 global_ovf_ctrl;
345	u64 counter_bitmask[2];
346	u64 global_ctrl_mask;
347	u64 reserved_bits;
348	u8 version;
349	struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
350	struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
351	struct irq_work irq_work;
352	u64 reprogram_pmi;
353};
354
355struct kvm_pmu_ops;
356
357enum {
358	KVM_DEBUGREG_BP_ENABLED = 1,
359	KVM_DEBUGREG_WONT_EXIT = 2,
360	KVM_DEBUGREG_RELOAD = 4,
361};
362
363struct kvm_mtrr_range {
364	u64 base;
365	u64 mask;
366	struct list_head node;
367};
368
369struct kvm_mtrr {
370	struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
371	mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
372	u64 deftype;
373
374	struct list_head head;
375};
376
377/* Hyper-V per vcpu emulation context */
378struct kvm_vcpu_hv {
379	u64 hv_vapic;
380	s64 runtime_offset;
381};
382
383struct kvm_vcpu_arch {
384	/*
385	 * rip and regs accesses must go through
386	 * kvm_{register,rip}_{read,write} functions.
387	 */
388	unsigned long regs[NR_VCPU_REGS];
389	u32 regs_avail;
390	u32 regs_dirty;
391
392	unsigned long cr0;
393	unsigned long cr0_guest_owned_bits;
394	unsigned long cr2;
395	unsigned long cr3;
396	unsigned long cr4;
397	unsigned long cr4_guest_owned_bits;
398	unsigned long cr8;
399	u32 hflags;
400	u64 efer;
401	u64 apic_base;
402	struct kvm_lapic *apic;    /* kernel irqchip context */
403	u64 eoi_exit_bitmap[4];
404	unsigned long apic_attention;
405	int32_t apic_arb_prio;
406	int mp_state;
407	u64 ia32_misc_enable_msr;
408	u64 smbase;
409	bool tpr_access_reporting;
410	u64 ia32_xss;
411
412	/*
413	 * Paging state of the vcpu
414	 *
415	 * If the vcpu runs in guest mode with two level paging this still saves
416	 * the paging mode of the l1 guest. This context is always used to
417	 * handle faults.
418	 */
419	struct kvm_mmu mmu;
420
421	/*
422	 * Paging state of an L2 guest (used for nested npt)
423	 *
424	 * This context will save all necessary information to walk page tables
425	 * of the an L2 guest. This context is only initialized for page table
426	 * walking and not for faulting since we never handle l2 page faults on
427	 * the host.
428	 */
429	struct kvm_mmu nested_mmu;
430
431	/*
432	 * Pointer to the mmu context currently used for
433	 * gva_to_gpa translations.
434	 */
435	struct kvm_mmu *walk_mmu;
436
437	struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
438	struct kvm_mmu_memory_cache mmu_page_cache;
439	struct kvm_mmu_memory_cache mmu_page_header_cache;
440
441	struct fpu guest_fpu;
442	bool eager_fpu;
443	u64 xcr0;
444	u64 guest_supported_xcr0;
445	u32 guest_xstate_size;
446
447	struct kvm_pio_request pio;
448	void *pio_data;
449
450	u8 event_exit_inst_len;
451
452	struct kvm_queued_exception {
453		bool pending;
454		bool has_error_code;
455		bool reinject;
456		u8 nr;
457		u32 error_code;
458	} exception;
459
460	struct kvm_queued_interrupt {
461		bool pending;
462		bool soft;
463		u8 nr;
464	} interrupt;
465
466	int halt_request; /* real mode on Intel only */
467
468	int cpuid_nent;
469	struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
470
471	int maxphyaddr;
472
473	/* emulate context */
474
475	struct x86_emulate_ctxt emulate_ctxt;
476	bool emulate_regs_need_sync_to_vcpu;
477	bool emulate_regs_need_sync_from_vcpu;
478	int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
479
480	gpa_t time;
481	struct pvclock_vcpu_time_info hv_clock;
482	unsigned int hw_tsc_khz;
483	struct gfn_to_hva_cache pv_time;
484	bool pv_time_enabled;
485	/* set guest stopped flag in pvclock flags field */
486	bool pvclock_set_guest_stopped_request;
487
488	struct {
489		u64 msr_val;
490		u64 last_steal;
491		u64 accum_steal;
492		struct gfn_to_hva_cache stime;
493		struct kvm_steal_time steal;
494	} st;
495
496	u64 last_guest_tsc;
497	u64 last_host_tsc;
498	u64 tsc_offset_adjustment;
499	u64 this_tsc_nsec;
500	u64 this_tsc_write;
501	u64 this_tsc_generation;
502	bool tsc_catchup;
503	bool tsc_always_catchup;
504	s8 virtual_tsc_shift;
505	u32 virtual_tsc_mult;
506	u32 virtual_tsc_khz;
507	s64 ia32_tsc_adjust_msr;
508	u64 tsc_scaling_ratio;
509
510	atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
511	unsigned nmi_pending; /* NMI queued after currently running handler */
512	bool nmi_injected;    /* Trying to inject an NMI this entry */
513	bool smi_pending;    /* SMI queued after currently running handler */
514
515	struct kvm_mtrr mtrr_state;
516	u64 pat;
517
518	unsigned switch_db_regs;
519	unsigned long db[KVM_NR_DB_REGS];
520	unsigned long dr6;
521	unsigned long dr7;
522	unsigned long eff_db[KVM_NR_DB_REGS];
523	unsigned long guest_debug_dr7;
524
525	u64 mcg_cap;
526	u64 mcg_status;
527	u64 mcg_ctl;
528	u64 *mce_banks;
529
530	/* Cache MMIO info */
531	u64 mmio_gva;
532	unsigned access;
533	gfn_t mmio_gfn;
534	u64 mmio_gen;
535
536	struct kvm_pmu pmu;
537
538	/* used for guest single stepping over the given code position */
539	unsigned long singlestep_rip;
540
541	struct kvm_vcpu_hv hyperv;
542
543	cpumask_var_t wbinvd_dirty_mask;
544
545	unsigned long last_retry_eip;
546	unsigned long last_retry_addr;
547
548	struct {
549		bool halted;
550		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
551		struct gfn_to_hva_cache data;
552		u64 msr_val;
553		u32 id;
554		bool send_user_only;
555	} apf;
556
557	/* OSVW MSRs (AMD only) */
558	struct {
559		u64 length;
560		u64 status;
561	} osvw;
562
563	struct {
564		u64 msr_val;
565		struct gfn_to_hva_cache data;
566	} pv_eoi;
567
568	/*
569	 * Indicate whether the access faults on its page table in guest
570	 * which is set when fix page fault and used to detect unhandeable
571	 * instruction.
572	 */
573	bool write_fault_to_shadow_pgtable;
574
575	/* set at EPT violation at this point */
576	unsigned long exit_qualification;
577
578	/* pv related host specific info */
579	struct {
580		bool pv_unhalted;
581	} pv;
582
583	int pending_ioapic_eoi;
584	int pending_external_vector;
585};
586
587struct kvm_lpage_info {
588	int write_count;
589};
590
591struct kvm_arch_memory_slot {
592	unsigned long *rmap[KVM_NR_PAGE_SIZES];
593	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
594};
595
596/*
597 * We use as the mode the number of bits allocated in the LDR for the
598 * logical processor ID.  It happens that these are all powers of two.
599 * This makes it is very easy to detect cases where the APICs are
600 * configured for multiple modes; in that case, we cannot use the map and
601 * hence cannot use kvm_irq_delivery_to_apic_fast either.
602 */
603#define KVM_APIC_MODE_XAPIC_CLUSTER          4
604#define KVM_APIC_MODE_XAPIC_FLAT             8
605#define KVM_APIC_MODE_X2APIC                16
606
607struct kvm_apic_map {
608	struct rcu_head rcu;
609	u8 mode;
610	struct kvm_lapic *phys_map[256];
611	/* first index is cluster id second is cpu id in a cluster */
612	struct kvm_lapic *logical_map[16][16];
613};
614
615/* Hyper-V emulation context */
616struct kvm_hv {
617	u64 hv_guest_os_id;
618	u64 hv_hypercall;
619	u64 hv_tsc_page;
620
621	/* Hyper-v based guest crash (NT kernel bugcheck) parameters */
622	u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
623	u64 hv_crash_ctl;
624};
625
626struct kvm_arch {
627	unsigned int n_used_mmu_pages;
628	unsigned int n_requested_mmu_pages;
629	unsigned int n_max_mmu_pages;
630	unsigned int indirect_shadow_pages;
631	unsigned long mmu_valid_gen;
632	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
633	/*
634	 * Hash table of struct kvm_mmu_page.
635	 */
636	struct list_head active_mmu_pages;
637	struct list_head zapped_obsolete_pages;
638
639	struct list_head assigned_dev_head;
640	struct iommu_domain *iommu_domain;
641	bool iommu_noncoherent;
642#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
643	atomic_t noncoherent_dma_count;
644#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
645	atomic_t assigned_device_count;
646	struct kvm_pic *vpic;
647	struct kvm_ioapic *vioapic;
648	struct kvm_pit *vpit;
649	atomic_t vapics_in_nmi_mode;
650	struct mutex apic_map_lock;
651	struct kvm_apic_map *apic_map;
652
653	unsigned int tss_addr;
654	bool apic_access_page_done;
655
656	gpa_t wall_clock;
657
658	bool ept_identity_pagetable_done;
659	gpa_t ept_identity_map_addr;
660
661	unsigned long irq_sources_bitmap;
662	s64 kvmclock_offset;
663	raw_spinlock_t tsc_write_lock;
664	u64 last_tsc_nsec;
665	u64 last_tsc_write;
666	u32 last_tsc_khz;
667	u64 cur_tsc_nsec;
668	u64 cur_tsc_write;
669	u64 cur_tsc_offset;
670	u64 cur_tsc_generation;
671	int nr_vcpus_matched_tsc;
672
673	spinlock_t pvclock_gtod_sync_lock;
674	bool use_master_clock;
675	u64 master_kernel_ns;
676	cycle_t master_cycle_now;
677	struct delayed_work kvmclock_update_work;
678	struct delayed_work kvmclock_sync_work;
679
680	struct kvm_xen_hvm_config xen_hvm_config;
681
682	/* reads protected by irq_srcu, writes by irq_lock */
683	struct hlist_head mask_notifier_list;
684
685	struct kvm_hv hyperv;
686
687	#ifdef CONFIG_KVM_MMU_AUDIT
688	int audit_point;
689	#endif
690
691	bool boot_vcpu_runs_old_kvmclock;
692	u32 bsp_vcpu_id;
693
694	u64 disabled_quirks;
695
696	bool irqchip_split;
697	u8 nr_reserved_ioapic_pins;
698};
699
700struct kvm_vm_stat {
701	u32 mmu_shadow_zapped;
702	u32 mmu_pte_write;
703	u32 mmu_pte_updated;
704	u32 mmu_pde_zapped;
705	u32 mmu_flooded;
706	u32 mmu_recycled;
707	u32 mmu_cache_miss;
708	u32 mmu_unsync;
709	u32 remote_tlb_flush;
710	u32 lpages;
711};
712
713struct kvm_vcpu_stat {
714	u32 pf_fixed;
715	u32 pf_guest;
716	u32 tlb_flush;
717	u32 invlpg;
718
719	u32 exits;
720	u32 io_exits;
721	u32 mmio_exits;
722	u32 signal_exits;
723	u32 irq_window_exits;
724	u32 nmi_window_exits;
725	u32 halt_exits;
726	u32 halt_successful_poll;
727	u32 halt_attempted_poll;
728	u32 halt_wakeup;
729	u32 request_irq_exits;
730	u32 irq_exits;
731	u32 host_state_reload;
732	u32 efer_reload;
733	u32 fpu_reload;
734	u32 insn_emulation;
735	u32 insn_emulation_fail;
736	u32 hypercalls;
737	u32 irq_injections;
738	u32 nmi_injections;
739};
740
741struct x86_instruction_info;
742
743struct msr_data {
744	bool host_initiated;
745	u32 index;
746	u64 data;
747};
748
749struct kvm_lapic_irq {
750	u32 vector;
751	u16 delivery_mode;
752	u16 dest_mode;
753	bool level;
754	u16 trig_mode;
755	u32 shorthand;
756	u32 dest_id;
757	bool msi_redir_hint;
758};
759
760struct kvm_x86_ops {
761	int (*cpu_has_kvm_support)(void);          /* __init */
762	int (*disabled_by_bios)(void);             /* __init */
763	int (*hardware_enable)(void);
764	void (*hardware_disable)(void);
765	void (*check_processor_compatibility)(void *rtn);
766	int (*hardware_setup)(void);               /* __init */
767	void (*hardware_unsetup)(void);            /* __exit */
768	bool (*cpu_has_accelerated_tpr)(void);
769	bool (*cpu_has_high_real_mode_segbase)(void);
770	void (*cpuid_update)(struct kvm_vcpu *vcpu);
771
772	/* Create, but do not attach this VCPU */
773	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
774	void (*vcpu_free)(struct kvm_vcpu *vcpu);
775	void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
776
777	void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
778	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
779	void (*vcpu_put)(struct kvm_vcpu *vcpu);
780
781	void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
782	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
783	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
784	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
785	void (*get_segment)(struct kvm_vcpu *vcpu,
786			    struct kvm_segment *var, int seg);
787	int (*get_cpl)(struct kvm_vcpu *vcpu);
788	void (*set_segment)(struct kvm_vcpu *vcpu,
789			    struct kvm_segment *var, int seg);
790	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
791	void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
792	void (*decache_cr3)(struct kvm_vcpu *vcpu);
793	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
794	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
795	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
796	int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
797	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
798	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
799	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
800	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
801	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
802	u64 (*get_dr6)(struct kvm_vcpu *vcpu);
803	void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
804	void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
805	void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
806	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
807	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
808	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
809	void (*fpu_activate)(struct kvm_vcpu *vcpu);
810	void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
811
812	void (*tlb_flush)(struct kvm_vcpu *vcpu);
813
814	void (*run)(struct kvm_vcpu *vcpu);
815	int (*handle_exit)(struct kvm_vcpu *vcpu);
816	void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
817	void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
818	u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
819	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
820				unsigned char *hypercall_addr);
821	void (*set_irq)(struct kvm_vcpu *vcpu);
822	void (*set_nmi)(struct kvm_vcpu *vcpu);
823	void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
824				bool has_error_code, u32 error_code,
825				bool reinject);
826	void (*cancel_injection)(struct kvm_vcpu *vcpu);
827	int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
828	int (*nmi_allowed)(struct kvm_vcpu *vcpu);
829	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
830	void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
831	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
832	void (*enable_irq_window)(struct kvm_vcpu *vcpu);
833	void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
834	int (*cpu_uses_apicv)(struct kvm_vcpu *vcpu);
835	void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
836	void (*hwapic_isr_update)(struct kvm *kvm, int isr);
837	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu);
838	void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
839	void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
840	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
841	void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
842	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
843	int (*get_tdp_level)(void);
844	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
845	int (*get_lpage_level)(void);
846	bool (*rdtscp_supported)(void);
847	bool (*invpcid_supported)(void);
848	void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);
849
850	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
851
852	void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
853
854	bool (*has_wbinvd_exit)(void);
855
856	u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
857	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
858
859	u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
860
861	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
862
863	int (*check_intercept)(struct kvm_vcpu *vcpu,
864			       struct x86_instruction_info *info,
865			       enum x86_intercept_stage stage);
866	void (*handle_external_intr)(struct kvm_vcpu *vcpu);
867	bool (*mpx_supported)(void);
868	bool (*xsaves_supported)(void);
869
870	int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
871
872	void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
873
874	/*
875	 * Arch-specific dirty logging hooks. These hooks are only supposed to
876	 * be valid if the specific arch has hardware-accelerated dirty logging
877	 * mechanism. Currently only for PML on VMX.
878	 *
879	 *  - slot_enable_log_dirty:
880	 *	called when enabling log dirty mode for the slot.
881	 *  - slot_disable_log_dirty:
882	 *	called when disabling log dirty mode for the slot.
883	 *	also called when slot is created with log dirty disabled.
884	 *  - flush_log_dirty:
885	 *	called before reporting dirty_bitmap to userspace.
886	 *  - enable_log_dirty_pt_masked:
887	 *	called when reenabling log dirty for the GFNs in the mask after
888	 *	corresponding bits are cleared in slot->dirty_bitmap.
889	 */
890	void (*slot_enable_log_dirty)(struct kvm *kvm,
891				      struct kvm_memory_slot *slot);
892	void (*slot_disable_log_dirty)(struct kvm *kvm,
893				       struct kvm_memory_slot *slot);
894	void (*flush_log_dirty)(struct kvm *kvm);
895	void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
896					   struct kvm_memory_slot *slot,
897					   gfn_t offset, unsigned long mask);
898	/* pmu operations of sub-arch */
899	const struct kvm_pmu_ops *pmu_ops;
900
901	/*
902	 * Architecture specific hooks for vCPU blocking due to
903	 * HLT instruction.
904	 * Returns for .pre_block():
905	 *    - 0 means continue to block the vCPU.
906	 *    - 1 means we cannot block the vCPU since some event
907	 *        happens during this period, such as, 'ON' bit in
908	 *        posted-interrupts descriptor is set.
909	 */
910	int (*pre_block)(struct kvm_vcpu *vcpu);
911	void (*post_block)(struct kvm_vcpu *vcpu);
912	int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
913			      uint32_t guest_irq, bool set);
914};
915
916struct kvm_arch_async_pf {
917	u32 token;
918	gfn_t gfn;
919	unsigned long cr3;
920	bool direct_map;
921};
922
923extern struct kvm_x86_ops *kvm_x86_ops;
924
925int kvm_mmu_module_init(void);
926void kvm_mmu_module_exit(void);
927
928void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
929int kvm_mmu_create(struct kvm_vcpu *vcpu);
930void kvm_mmu_setup(struct kvm_vcpu *vcpu);
931void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
932		u64 dirty_mask, u64 nx_mask, u64 x_mask);
933
934void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
935void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
936				      struct kvm_memory_slot *memslot);
937void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
938				   const struct kvm_memory_slot *memslot);
939void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
940				   struct kvm_memory_slot *memslot);
941void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
942					struct kvm_memory_slot *memslot);
943void kvm_mmu_slot_set_dirty(struct kvm *kvm,
944			    struct kvm_memory_slot *memslot);
945void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
946				   struct kvm_memory_slot *slot,
947				   gfn_t gfn_offset, unsigned long mask);
948void kvm_mmu_zap_all(struct kvm *kvm);
949void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
950unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
951void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
952
953int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
954
955int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
956			  const void *val, int bytes);
957
958struct kvm_irq_mask_notifier {
959	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
960	int irq;
961	struct hlist_node link;
962};
963
964void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
965				    struct kvm_irq_mask_notifier *kimn);
966void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
967				      struct kvm_irq_mask_notifier *kimn);
968void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
969			     bool mask);
970
971extern bool tdp_enabled;
972
973u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
974
975/* control of guest tsc rate supported? */
976extern bool kvm_has_tsc_control;
977/* maximum supported tsc_khz for guests */
978extern u32  kvm_max_guest_tsc_khz;
979/* number of bits of the fractional part of the TSC scaling ratio */
980extern u8   kvm_tsc_scaling_ratio_frac_bits;
981/* maximum allowed value of TSC scaling ratio */
982extern u64  kvm_max_tsc_scaling_ratio;
983
984enum emulation_result {
985	EMULATE_DONE,         /* no further processing */
986	EMULATE_USER_EXIT,    /* kvm_run ready for userspace exit */
987	EMULATE_FAIL,         /* can't emulate this instruction */
988};
989
990#define EMULTYPE_NO_DECODE	    (1 << 0)
991#define EMULTYPE_TRAP_UD	    (1 << 1)
992#define EMULTYPE_SKIP		    (1 << 2)
993#define EMULTYPE_RETRY		    (1 << 3)
994#define EMULTYPE_NO_REEXECUTE	    (1 << 4)
995int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
996			    int emulation_type, void *insn, int insn_len);
997
998static inline int emulate_instruction(struct kvm_vcpu *vcpu,
999			int emulation_type)
1000{
1001	return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
1002}
1003
1004void kvm_enable_efer_bits(u64);
1005bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
1006int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
1007int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
1008
1009struct x86_emulate_ctxt;
1010
1011int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
1012void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
1013int kvm_emulate_halt(struct kvm_vcpu *vcpu);
1014int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1015int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
1016
1017void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
1018int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
1019void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
1020
1021int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
1022		    int reason, bool has_error_code, u32 error_code);
1023
1024int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
1025int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
1026int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1027int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
1028int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
1029int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
1030unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
1031void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
1032void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
1033int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
1034
1035int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1036int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1037
1038unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
1039void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
1040bool kvm_rdpmc(struct kvm_vcpu *vcpu);
1041
1042void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1043void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1044void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1045void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1046void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
1047int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1048			    gfn_t gfn, void *data, int offset, int len,
1049			    u32 access);
1050bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
1051bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
1052
1053static inline int __kvm_irq_line_state(unsigned long *irq_state,
1054				       int irq_source_id, int level)
1055{
1056	/* Logical OR for level trig interrupt */
1057	if (level)
1058		__set_bit(irq_source_id, irq_state);
1059	else
1060		__clear_bit(irq_source_id, irq_state);
1061
1062	return !!(*irq_state);
1063}
1064
1065int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
1066void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
1067
1068void kvm_inject_nmi(struct kvm_vcpu *vcpu);
1069
1070void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1071		       const u8 *new, int bytes);
1072int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1073int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
1074void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
1075int kvm_mmu_load(struct kvm_vcpu *vcpu);
1076void kvm_mmu_unload(struct kvm_vcpu *vcpu);
1077void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
1078gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1079			   struct x86_exception *exception);
1080gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
1081			      struct x86_exception *exception);
1082gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
1083			       struct x86_exception *exception);
1084gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1085			       struct x86_exception *exception);
1086gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1087				struct x86_exception *exception);
1088
1089int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
1090
1091int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
1092		       void *insn, int insn_len);
1093void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1094void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
1095
1096void kvm_enable_tdp(void);
1097void kvm_disable_tdp(void);
1098
1099static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1100				  struct x86_exception *exception)
1101{
1102	return gpa;
1103}
1104
1105static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
1106{
1107	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
1108
1109	return (struct kvm_mmu_page *)page_private(page);
1110}
1111
1112static inline u16 kvm_read_ldt(void)
1113{
1114	u16 ldt;
1115	asm("sldt %0" : "=g"(ldt));
1116	return ldt;
1117}
1118
1119static inline void kvm_load_ldt(u16 sel)
1120{
1121	asm("lldt %0" : : "rm"(sel));
1122}
1123
1124#ifdef CONFIG_X86_64
1125static inline unsigned long read_msr(unsigned long msr)
1126{
1127	u64 value;
1128
1129	rdmsrl(msr, value);
1130	return value;
1131}
1132#endif
1133
1134static inline u32 get_rdx_init_val(void)
1135{
1136	return 0x600; /* P6 family */
1137}
1138
1139static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
1140{
1141	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1142}
1143
1144static inline u64 get_canonical(u64 la)
1145{
1146	return ((int64_t)la << 16) >> 16;
1147}
1148
1149static inline bool is_noncanonical_address(u64 la)
1150{
1151#ifdef CONFIG_X86_64
1152	return get_canonical(la) != la;
1153#else
1154	return false;
1155#endif
1156}
1157
1158#define TSS_IOPB_BASE_OFFSET 0x66
1159#define TSS_BASE_SIZE 0x68
1160#define TSS_IOPB_SIZE (65536 / 8)
1161#define TSS_REDIRECTION_SIZE (256 / 8)
1162#define RMODE_TSS_SIZE							\
1163	(TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
1164
1165enum {
1166	TASK_SWITCH_CALL = 0,
1167	TASK_SWITCH_IRET = 1,
1168	TASK_SWITCH_JMP = 2,
1169	TASK_SWITCH_GATE = 3,
1170};
1171
1172#define HF_GIF_MASK		(1 << 0)
1173#define HF_HIF_MASK		(1 << 1)
1174#define HF_VINTR_MASK		(1 << 2)
1175#define HF_NMI_MASK		(1 << 3)
1176#define HF_IRET_MASK		(1 << 4)
1177#define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
1178#define HF_SMM_MASK		(1 << 6)
1179#define HF_SMM_INSIDE_NMI_MASK	(1 << 7)
1180
1181#define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
1182#define KVM_ADDRESS_SPACE_NUM 2
1183
1184#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
1185#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
1186
1187/*
1188 * Hardware virtualization extension instructions may fault if a
1189 * reboot turns off virtualization while processes are running.
1190 * Trap the fault and ignore the instruction if that happens.
1191 */
1192asmlinkage void kvm_spurious_fault(void);
1193
1194#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn)	\
1195	"666: " insn "\n\t" \
1196	"668: \n\t"                           \
1197	".pushsection .fixup, \"ax\" \n" \
1198	"667: \n\t" \
1199	cleanup_insn "\n\t"		      \
1200	"cmpb $0, kvm_rebooting \n\t"	      \
1201	"jne 668b \n\t"      		      \
1202	__ASM_SIZE(push) " $666b \n\t"	      \
1203	"call kvm_spurious_fault \n\t"	      \
1204	".popsection \n\t" \
1205	_ASM_EXTABLE(666b, 667b)
1206
1207#define __kvm_handle_fault_on_reboot(insn)		\
1208	____kvm_handle_fault_on_reboot(insn, "")
1209
1210#define KVM_ARCH_WANT_MMU_NOTIFIER
1211int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
1212int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
1213int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
1214int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
1215void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
1216int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1217int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
1218int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1219int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1220void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
1221void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1222void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
1223					   unsigned long address);
1224
1225void kvm_define_shared_msr(unsigned index, u32 msr);
1226int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
1227
1228u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
1229u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
1230
1231unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
1232bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
1233
1234void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1235				     struct kvm_async_pf *work);
1236void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1237				 struct kvm_async_pf *work);
1238void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1239			       struct kvm_async_pf *work);
1240bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
1241extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1242
1243void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1244
1245int kvm_is_in_guest(void);
1246
1247int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1248int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1249bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
1250bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
1251
1252bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
1253			     struct kvm_vcpu **dest_vcpu);
1254
1255void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
1256		     struct kvm_lapic_irq *irq);
1257
1258static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
1259static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
1260
1261#endif /* _ASM_X86_KVM_HOST_H */
1262