1#ifndef __KVM_HOST_H
2#define __KVM_HOST_H
3
4/*
5 * This work is licensed under the terms of the GNU GPL, version 2.  See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/types.h>
10#include <linux/hardirq.h>
11#include <linux/list.h>
12#include <linux/mutex.h>
13#include <linux/spinlock.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/bug.h>
17#include <linux/mm.h>
18#include <linux/mmu_notifier.h>
19#include <linux/preempt.h>
20#include <linux/msi.h>
21#include <linux/slab.h>
22#include <linux/rcupdate.h>
23#include <linux/ratelimit.h>
24#include <linux/err.h>
25#include <linux/irqflags.h>
26#include <linux/context_tracking.h>
27#include <linux/irqbypass.h>
28#include <asm/signal.h>
29
30#include <linux/kvm.h>
31#include <linux/kvm_para.h>
32
33#include <linux/kvm_types.h>
34
35#include <asm/kvm_host.h>
36
37/*
38 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
39 * in kvm, other bits are visible for userspace which are defined in
40 * include/linux/kvm_h.
41 */
42#define KVM_MEMSLOT_INVALID	(1UL << 16)
43#define KVM_MEMSLOT_INCOHERENT	(1UL << 17)
44
45/* Two fragments for cross MMIO pages. */
46#define KVM_MAX_MMIO_FRAGMENTS	2
47
48#ifndef KVM_ADDRESS_SPACE_NUM
49#define KVM_ADDRESS_SPACE_NUM	1
50#endif
51
52/*
53 * For the normal pfn, the highest 12 bits should be zero,
54 * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
55 * mask bit 63 to indicate the noslot pfn.
56 */
57#define KVM_PFN_ERR_MASK	(0x7ffULL << 52)
58#define KVM_PFN_ERR_NOSLOT_MASK	(0xfffULL << 52)
59#define KVM_PFN_NOSLOT		(0x1ULL << 63)
60
61#define KVM_PFN_ERR_FAULT	(KVM_PFN_ERR_MASK)
62#define KVM_PFN_ERR_HWPOISON	(KVM_PFN_ERR_MASK + 1)
63#define KVM_PFN_ERR_RO_FAULT	(KVM_PFN_ERR_MASK + 2)
64
65/*
66 * error pfns indicate that the gfn is in slot but faild to
67 * translate it to pfn on host.
68 */
69static inline bool is_error_pfn(pfn_t pfn)
70{
71	return !!(pfn & KVM_PFN_ERR_MASK);
72}
73
74/*
75 * error_noslot pfns indicate that the gfn can not be
76 * translated to pfn - it is not in slot or failed to
77 * translate it to pfn.
78 */
79static inline bool is_error_noslot_pfn(pfn_t pfn)
80{
81	return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
82}
83
84/* noslot pfn indicates that the gfn is not in slot. */
85static inline bool is_noslot_pfn(pfn_t pfn)
86{
87	return pfn == KVM_PFN_NOSLOT;
88}
89
90/*
91 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
92 * provide own defines and kvm_is_error_hva
93 */
94#ifndef KVM_HVA_ERR_BAD
95
96#define KVM_HVA_ERR_BAD		(PAGE_OFFSET)
97#define KVM_HVA_ERR_RO_BAD	(PAGE_OFFSET + PAGE_SIZE)
98
99static inline bool kvm_is_error_hva(unsigned long addr)
100{
101	return addr >= PAGE_OFFSET;
102}
103
104#endif
105
106#define KVM_ERR_PTR_BAD_PAGE	(ERR_PTR(-ENOENT))
107
108static inline bool is_error_page(struct page *page)
109{
110	return IS_ERR(page);
111}
112
113/*
114 * vcpu->requests bit members
115 */
116#define KVM_REQ_TLB_FLUSH          0
117#define KVM_REQ_MIGRATE_TIMER      1
118#define KVM_REQ_REPORT_TPR_ACCESS  2
119#define KVM_REQ_MMU_RELOAD         3
120#define KVM_REQ_TRIPLE_FAULT       4
121#define KVM_REQ_PENDING_TIMER      5
122#define KVM_REQ_UNHALT             6
123#define KVM_REQ_MMU_SYNC           7
124#define KVM_REQ_CLOCK_UPDATE       8
125#define KVM_REQ_KICK               9
126#define KVM_REQ_DEACTIVATE_FPU    10
127#define KVM_REQ_EVENT             11
128#define KVM_REQ_APF_HALT          12
129#define KVM_REQ_STEAL_UPDATE      13
130#define KVM_REQ_NMI               14
131#define KVM_REQ_PMU               15
132#define KVM_REQ_PMI               16
133#define KVM_REQ_WATCHDOG          17
134#define KVM_REQ_MASTERCLOCK_UPDATE 18
135#define KVM_REQ_MCLOCK_INPROGRESS 19
136#define KVM_REQ_EPR_EXIT          20
137#define KVM_REQ_SCAN_IOAPIC       21
138#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
139#define KVM_REQ_ENABLE_IBS        23
140#define KVM_REQ_DISABLE_IBS       24
141#define KVM_REQ_APIC_PAGE_RELOAD  25
142#define KVM_REQ_SMI               26
143#define KVM_REQ_HV_CRASH          27
144#define KVM_REQ_IOAPIC_EOI_EXIT   28
145#define KVM_REQ_HV_RESET          29
146
147#define KVM_USERSPACE_IRQ_SOURCE_ID		0
148#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
149
150extern struct kmem_cache *kvm_vcpu_cache;
151
152extern spinlock_t kvm_lock;
153extern struct list_head vm_list;
154
155struct kvm_io_range {
156	gpa_t addr;
157	int len;
158	struct kvm_io_device *dev;
159};
160
161#define NR_IOBUS_DEVS 1000
162
163struct kvm_io_bus {
164	int dev_count;
165	int ioeventfd_count;
166	struct kvm_io_range range[];
167};
168
169enum kvm_bus {
170	KVM_MMIO_BUS,
171	KVM_PIO_BUS,
172	KVM_VIRTIO_CCW_NOTIFY_BUS,
173	KVM_FAST_MMIO_BUS,
174	KVM_NR_BUSES
175};
176
177int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
178		     int len, const void *val);
179int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
180			    gpa_t addr, int len, const void *val, long cookie);
181int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
182		    int len, void *val);
183int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
184			    int len, struct kvm_io_device *dev);
185int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
186			      struct kvm_io_device *dev);
187
188#ifdef CONFIG_KVM_ASYNC_PF
189struct kvm_async_pf {
190	struct work_struct work;
191	struct list_head link;
192	struct list_head queue;
193	struct kvm_vcpu *vcpu;
194	struct mm_struct *mm;
195	gva_t gva;
196	unsigned long addr;
197	struct kvm_arch_async_pf arch;
198	bool   wakeup_all;
199};
200
201void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
202void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
203int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
204		       struct kvm_arch_async_pf *arch);
205int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
206#endif
207
208enum {
209	OUTSIDE_GUEST_MODE,
210	IN_GUEST_MODE,
211	EXITING_GUEST_MODE,
212	READING_SHADOW_PAGE_TABLES,
213};
214
215/*
216 * Sometimes a large or cross-page mmio needs to be broken up into separate
217 * exits for userspace servicing.
218 */
219struct kvm_mmio_fragment {
220	gpa_t gpa;
221	void *data;
222	unsigned len;
223};
224
225struct kvm_vcpu {
226	struct kvm *kvm;
227#ifdef CONFIG_PREEMPT_NOTIFIERS
228	struct preempt_notifier preempt_notifier;
229#endif
230	int cpu;
231	int vcpu_id;
232	int srcu_idx;
233	int mode;
234	unsigned long requests;
235	unsigned long guest_debug;
236
237	int pre_pcpu;
238	struct list_head blocked_vcpu_list;
239
240	struct mutex mutex;
241	struct kvm_run *run;
242
243	int fpu_active;
244	int guest_fpu_loaded, guest_xcr0_loaded;
245	unsigned char fpu_counter;
246	wait_queue_head_t wq;
247	struct pid *pid;
248	int sigset_active;
249	sigset_t sigset;
250	struct kvm_vcpu_stat stat;
251	unsigned int halt_poll_ns;
252
253#ifdef CONFIG_HAS_IOMEM
254	int mmio_needed;
255	int mmio_read_completed;
256	int mmio_is_write;
257	int mmio_cur_fragment;
258	int mmio_nr_fragments;
259	struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
260#endif
261
262#ifdef CONFIG_KVM_ASYNC_PF
263	struct {
264		u32 queued;
265		struct list_head queue;
266		struct list_head done;
267		spinlock_t lock;
268	} async_pf;
269#endif
270
271#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
272	/*
273	 * Cpu relax intercept or pause loop exit optimization
274	 * in_spin_loop: set when a vcpu does a pause loop exit
275	 *  or cpu relax intercepted.
276	 * dy_eligible: indicates whether vcpu is eligible for directed yield.
277	 */
278	struct {
279		bool in_spin_loop;
280		bool dy_eligible;
281	} spin_loop;
282#endif
283	bool preempted;
284	struct kvm_vcpu_arch arch;
285};
286
287static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
288{
289	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
290}
291
292/*
293 * Some of the bitops functions do not support too long bitmaps.
294 * This number must be determined not to exceed such limits.
295 */
296#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
297
298struct kvm_memory_slot {
299	gfn_t base_gfn;
300	unsigned long npages;
301	unsigned long *dirty_bitmap;
302	struct kvm_arch_memory_slot arch;
303	unsigned long userspace_addr;
304	u32 flags;
305	short id;
306};
307
308static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
309{
310	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
311}
312
313struct kvm_s390_adapter_int {
314	u64 ind_addr;
315	u64 summary_addr;
316	u64 ind_offset;
317	u32 summary_offset;
318	u32 adapter_id;
319};
320
321struct kvm_kernel_irq_routing_entry {
322	u32 gsi;
323	u32 type;
324	int (*set)(struct kvm_kernel_irq_routing_entry *e,
325		   struct kvm *kvm, int irq_source_id, int level,
326		   bool line_status);
327	union {
328		struct {
329			unsigned irqchip;
330			unsigned pin;
331		} irqchip;
332		struct msi_msg msi;
333		struct kvm_s390_adapter_int adapter;
334	};
335	struct hlist_node link;
336};
337
338#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
339struct kvm_irq_routing_table {
340	int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
341	u32 nr_rt_entries;
342	/*
343	 * Array indexed by gsi. Each entry contains list of irq chips
344	 * the gsi is connected to.
345	 */
346	struct hlist_head map[0];
347};
348#endif
349
350#ifndef KVM_PRIVATE_MEM_SLOTS
351#define KVM_PRIVATE_MEM_SLOTS 0
352#endif
353
354#ifndef KVM_MEM_SLOTS_NUM
355#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
356#endif
357
358#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
359static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
360{
361	return 0;
362}
363#endif
364
365/*
366 * Note:
367 * memslots are not sorted by id anymore, please use id_to_memslot()
368 * to get the memslot by its id.
369 */
370struct kvm_memslots {
371	u64 generation;
372	struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
373	/* The mapping table from slot id to the index in memslots[]. */
374	short id_to_index[KVM_MEM_SLOTS_NUM];
375	atomic_t lru_slot;
376	int used_slots;
377};
378
379struct kvm {
380	spinlock_t mmu_lock;
381	struct mutex slots_lock;
382	struct mm_struct *mm; /* userspace tied to this vm */
383	struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
384	struct srcu_struct srcu;
385	struct srcu_struct irq_srcu;
386	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
387	atomic_t online_vcpus;
388	int last_boosted_vcpu;
389	struct list_head vm_list;
390	struct mutex lock;
391	struct kvm_io_bus *buses[KVM_NR_BUSES];
392#ifdef CONFIG_HAVE_KVM_EVENTFD
393	struct {
394		spinlock_t        lock;
395		struct list_head  items;
396		struct list_head  resampler_list;
397		struct mutex      resampler_lock;
398	} irqfds;
399	struct list_head ioeventfds;
400#endif
401	struct kvm_vm_stat stat;
402	struct kvm_arch arch;
403	atomic_t users_count;
404#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
405	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
406	spinlock_t ring_lock;
407	struct list_head coalesced_zones;
408#endif
409
410	struct mutex irq_lock;
411#ifdef CONFIG_HAVE_KVM_IRQCHIP
412	/*
413	 * Update side is protected by irq_lock.
414	 */
415	struct kvm_irq_routing_table __rcu *irq_routing;
416#endif
417#ifdef CONFIG_HAVE_KVM_IRQFD
418	struct hlist_head irq_ack_notifier_list;
419#endif
420
421#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
422	struct mmu_notifier mmu_notifier;
423	unsigned long mmu_notifier_seq;
424	long mmu_notifier_count;
425#endif
426	long tlbs_dirty;
427	struct list_head devices;
428};
429
430#define kvm_err(fmt, ...) \
431	pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
432#define kvm_info(fmt, ...) \
433	pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
434#define kvm_debug(fmt, ...) \
435	pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
436#define kvm_pr_unimpl(fmt, ...) \
437	pr_err_ratelimited("kvm [%i]: " fmt, \
438			   task_tgid_nr(current), ## __VA_ARGS__)
439
440/* The guest did something we don't support. */
441#define vcpu_unimpl(vcpu, fmt, ...)					\
442	kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
443
444#define vcpu_debug(vcpu, fmt, ...)					\
445	kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
446
447static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
448{
449	/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
450	 * the caller has read kvm->online_vcpus before (as is the case
451	 * for kvm_for_each_vcpu, for example).
452	 */
453	smp_rmb();
454	return kvm->vcpus[i];
455}
456
457#define kvm_for_each_vcpu(idx, vcpup, kvm) \
458	for (idx = 0; \
459	     idx < atomic_read(&kvm->online_vcpus) && \
460	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
461	     idx++)
462
463static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
464{
465	struct kvm_vcpu *vcpu;
466	int i;
467
468	kvm_for_each_vcpu(i, vcpu, kvm)
469		if (vcpu->vcpu_id == id)
470			return vcpu;
471	return NULL;
472}
473
474#define kvm_for_each_memslot(memslot, slots)	\
475	for (memslot = &slots->memslots[0];	\
476	      memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
477		memslot++)
478
479int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
480void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
481
482int __must_check vcpu_load(struct kvm_vcpu *vcpu);
483void vcpu_put(struct kvm_vcpu *vcpu);
484
485#ifdef __KVM_HAVE_IOAPIC
486void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
487void kvm_arch_irq_routing_update(struct kvm *kvm);
488#else
489static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
490{
491}
492static inline void kvm_arch_irq_routing_update(struct kvm *kvm)
493{
494}
495#endif
496
497#ifdef CONFIG_HAVE_KVM_IRQFD
498int kvm_irqfd_init(void);
499void kvm_irqfd_exit(void);
500#else
501static inline int kvm_irqfd_init(void)
502{
503	return 0;
504}
505
506static inline void kvm_irqfd_exit(void)
507{
508}
509#endif
510int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
511		  struct module *module);
512void kvm_exit(void);
513
514void kvm_get_kvm(struct kvm *kvm);
515void kvm_put_kvm(struct kvm *kvm);
516
517static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
518{
519	return rcu_dereference_check(kvm->memslots[as_id],
520			srcu_read_lock_held(&kvm->srcu)
521			|| lockdep_is_held(&kvm->slots_lock));
522}
523
524static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
525{
526	return __kvm_memslots(kvm, 0);
527}
528
529static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
530{
531	int as_id = kvm_arch_vcpu_memslots_id(vcpu);
532
533	return __kvm_memslots(vcpu->kvm, as_id);
534}
535
536static inline struct kvm_memory_slot *
537id_to_memslot(struct kvm_memslots *slots, int id)
538{
539	int index = slots->id_to_index[id];
540	struct kvm_memory_slot *slot;
541
542	slot = &slots->memslots[index];
543
544	WARN_ON(slot->id != id);
545	return slot;
546}
547
548/*
549 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
550 * - create a new memory slot
551 * - delete an existing memory slot
552 * - modify an existing memory slot
553 *   -- move it in the guest physical memory space
554 *   -- just change its flags
555 *
556 * Since flags can be changed by some of these operations, the following
557 * differentiation is the best we can do for __kvm_set_memory_region():
558 */
559enum kvm_mr_change {
560	KVM_MR_CREATE,
561	KVM_MR_DELETE,
562	KVM_MR_MOVE,
563	KVM_MR_FLAGS_ONLY,
564};
565
566int kvm_set_memory_region(struct kvm *kvm,
567			  const struct kvm_userspace_memory_region *mem);
568int __kvm_set_memory_region(struct kvm *kvm,
569			    const struct kvm_userspace_memory_region *mem);
570void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
571			   struct kvm_memory_slot *dont);
572int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
573			    unsigned long npages);
574void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
575int kvm_arch_prepare_memory_region(struct kvm *kvm,
576				struct kvm_memory_slot *memslot,
577				const struct kvm_userspace_memory_region *mem,
578				enum kvm_mr_change change);
579void kvm_arch_commit_memory_region(struct kvm *kvm,
580				const struct kvm_userspace_memory_region *mem,
581				const struct kvm_memory_slot *old,
582				const struct kvm_memory_slot *new,
583				enum kvm_mr_change change);
584bool kvm_largepages_enabled(void);
585void kvm_disable_largepages(void);
586/* flush all memory translations */
587void kvm_arch_flush_shadow_all(struct kvm *kvm);
588/* flush memory translations pointing to 'slot' */
589void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
590				   struct kvm_memory_slot *slot);
591
592int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
593			    struct page **pages, int nr_pages);
594
595struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
596unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
597unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
598unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
599unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
600				      bool *writable);
601void kvm_release_page_clean(struct page *page);
602void kvm_release_page_dirty(struct page *page);
603void kvm_set_page_accessed(struct page *page);
604
605pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
606pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
607pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
608		      bool *writable);
609pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
610pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
611pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
612			   bool *async, bool write_fault, bool *writable);
613
614void kvm_release_pfn_clean(pfn_t pfn);
615void kvm_set_pfn_dirty(pfn_t pfn);
616void kvm_set_pfn_accessed(pfn_t pfn);
617void kvm_get_pfn(pfn_t pfn);
618
619int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
620			int len);
621int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
622			  unsigned long len);
623int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
624int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
625			   void *data, unsigned long len);
626int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
627			 int offset, int len);
628int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
629		    unsigned long len);
630int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
631			   void *data, unsigned long len);
632int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
633			      gpa_t gpa, unsigned long len);
634int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
635int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
636struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
637int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
638unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
639void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
640
641struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
642struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
643pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
644pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
645struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
646unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
647unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
648int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
649			     int len);
650int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
651			       unsigned long len);
652int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
653			unsigned long len);
654int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
655			      int offset, int len);
656int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
657			 unsigned long len);
658void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
659
660void kvm_vcpu_block(struct kvm_vcpu *vcpu);
661void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
662void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
663void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
664int kvm_vcpu_yield_to(struct kvm_vcpu *target);
665void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
666void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
667void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
668
669void kvm_flush_remote_tlbs(struct kvm *kvm);
670void kvm_reload_remote_mmus(struct kvm *kvm);
671void kvm_make_mclock_inprogress_request(struct kvm *kvm);
672void kvm_make_scan_ioapic_request(struct kvm *kvm);
673bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
674
675long kvm_arch_dev_ioctl(struct file *filp,
676			unsigned int ioctl, unsigned long arg);
677long kvm_arch_vcpu_ioctl(struct file *filp,
678			 unsigned int ioctl, unsigned long arg);
679int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
680
681int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
682
683int kvm_get_dirty_log(struct kvm *kvm,
684			struct kvm_dirty_log *log, int *is_dirty);
685
686int kvm_get_dirty_log_protect(struct kvm *kvm,
687			struct kvm_dirty_log *log, bool *is_dirty);
688
689void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
690					struct kvm_memory_slot *slot,
691					gfn_t gfn_offset,
692					unsigned long mask);
693
694int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
695				struct kvm_dirty_log *log);
696
697int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
698			bool line_status);
699long kvm_arch_vm_ioctl(struct file *filp,
700		       unsigned int ioctl, unsigned long arg);
701
702int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
703int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
704
705int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
706				    struct kvm_translation *tr);
707
708int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
709int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
710int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
711				  struct kvm_sregs *sregs);
712int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
713				  struct kvm_sregs *sregs);
714int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
715				    struct kvm_mp_state *mp_state);
716int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
717				    struct kvm_mp_state *mp_state);
718int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
719					struct kvm_guest_debug *dbg);
720int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
721
722int kvm_arch_init(void *opaque);
723void kvm_arch_exit(void);
724
725int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
726void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
727
728void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
729
730void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
731void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
732void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
733struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
734int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
735void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
736void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
737
738int kvm_arch_hardware_enable(void);
739void kvm_arch_hardware_disable(void);
740int kvm_arch_hardware_setup(void);
741void kvm_arch_hardware_unsetup(void);
742void kvm_arch_check_processor_compat(void *rtn);
743int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
744int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
745
746void *kvm_kvzalloc(unsigned long size);
747
748#ifndef __KVM_HAVE_ARCH_VM_ALLOC
749static inline struct kvm *kvm_arch_alloc_vm(void)
750{
751	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
752}
753
754static inline void kvm_arch_free_vm(struct kvm *kvm)
755{
756	kfree(kvm);
757}
758#endif
759
760#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
761void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
762void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
763bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
764#else
765static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
766{
767}
768
769static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
770{
771}
772
773static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
774{
775	return false;
776}
777#endif
778#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
779void kvm_arch_start_assignment(struct kvm *kvm);
780void kvm_arch_end_assignment(struct kvm *kvm);
781bool kvm_arch_has_assigned_device(struct kvm *kvm);
782#else
783static inline void kvm_arch_start_assignment(struct kvm *kvm)
784{
785}
786
787static inline void kvm_arch_end_assignment(struct kvm *kvm)
788{
789}
790
791static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
792{
793	return false;
794}
795#endif
796
797static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
798{
799#ifdef __KVM_HAVE_ARCH_WQP
800	return vcpu->arch.wqp;
801#else
802	return &vcpu->wq;
803#endif
804}
805
806#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
807/*
808 * returns true if the virtual interrupt controller is initialized and
809 * ready to accept virtual IRQ. On some architectures the virtual interrupt
810 * controller is dynamically instantiated and this is not always true.
811 */
812bool kvm_arch_intc_initialized(struct kvm *kvm);
813#else
814static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
815{
816	return true;
817}
818#endif
819
820int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
821void kvm_arch_destroy_vm(struct kvm *kvm);
822void kvm_arch_sync_events(struct kvm *kvm);
823
824int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
825void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
826
827bool kvm_is_reserved_pfn(pfn_t pfn);
828
829struct kvm_irq_ack_notifier {
830	struct hlist_node link;
831	unsigned gsi;
832	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
833};
834
835int kvm_irq_map_gsi(struct kvm *kvm,
836		    struct kvm_kernel_irq_routing_entry *entries, int gsi);
837int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
838
839int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
840		bool line_status);
841int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
842		int irq_source_id, int level, bool line_status);
843int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
844			       struct kvm *kvm, int irq_source_id,
845			       int level, bool line_status);
846bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
847void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
848void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
849void kvm_register_irq_ack_notifier(struct kvm *kvm,
850				   struct kvm_irq_ack_notifier *kian);
851void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
852				   struct kvm_irq_ack_notifier *kian);
853int kvm_request_irq_source_id(struct kvm *kvm);
854void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
855
856#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
857int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
858void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
859#else
860static inline int kvm_iommu_map_pages(struct kvm *kvm,
861				      struct kvm_memory_slot *slot)
862{
863	return 0;
864}
865
866static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
867					 struct kvm_memory_slot *slot)
868{
869}
870#endif
871
872/* must be called with irqs disabled */
873static inline void __kvm_guest_enter(void)
874{
875	guest_enter();
876	/* KVM does not hold any references to rcu protected data when it
877	 * switches CPU into a guest mode. In fact switching to a guest mode
878	 * is very similar to exiting to userspace from rcu point of view. In
879	 * addition CPU may stay in a guest mode for quite a long time (up to
880	 * one time slice). Lets treat guest mode as quiescent state, just like
881	 * we do with user-mode execution.
882	 */
883	if (!context_tracking_cpu_is_enabled())
884		rcu_virt_note_context_switch(smp_processor_id());
885}
886
887/* must be called with irqs disabled */
888static inline void __kvm_guest_exit(void)
889{
890	guest_exit();
891}
892
893static inline void kvm_guest_enter(void)
894{
895	unsigned long flags;
896
897	local_irq_save(flags);
898	__kvm_guest_enter();
899	local_irq_restore(flags);
900}
901
902static inline void kvm_guest_exit(void)
903{
904	unsigned long flags;
905
906	local_irq_save(flags);
907	__kvm_guest_exit();
908	local_irq_restore(flags);
909}
910
911/*
912 * search_memslots() and __gfn_to_memslot() are here because they are
913 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
914 * gfn_to_memslot() itself isn't here as an inline because that would
915 * bloat other code too much.
916 */
917static inline struct kvm_memory_slot *
918search_memslots(struct kvm_memslots *slots, gfn_t gfn)
919{
920	int start = 0, end = slots->used_slots;
921	int slot = atomic_read(&slots->lru_slot);
922	struct kvm_memory_slot *memslots = slots->memslots;
923
924	if (gfn >= memslots[slot].base_gfn &&
925	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
926		return &memslots[slot];
927
928	while (start < end) {
929		slot = start + (end - start) / 2;
930
931		if (gfn >= memslots[slot].base_gfn)
932			end = slot;
933		else
934			start = slot + 1;
935	}
936
937	if (gfn >= memslots[start].base_gfn &&
938	    gfn < memslots[start].base_gfn + memslots[start].npages) {
939		atomic_set(&slots->lru_slot, start);
940		return &memslots[start];
941	}
942
943	return NULL;
944}
945
946static inline struct kvm_memory_slot *
947__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
948{
949	return search_memslots(slots, gfn);
950}
951
952static inline unsigned long
953__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
954{
955	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
956}
957
958static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
959{
960	return gfn_to_memslot(kvm, gfn)->id;
961}
962
963static inline gfn_t
964hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
965{
966	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
967
968	return slot->base_gfn + gfn_offset;
969}
970
971static inline gpa_t gfn_to_gpa(gfn_t gfn)
972{
973	return (gpa_t)gfn << PAGE_SHIFT;
974}
975
976static inline gfn_t gpa_to_gfn(gpa_t gpa)
977{
978	return (gfn_t)(gpa >> PAGE_SHIFT);
979}
980
981static inline hpa_t pfn_to_hpa(pfn_t pfn)
982{
983	return (hpa_t)pfn << PAGE_SHIFT;
984}
985
986static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
987{
988	unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
989
990	return kvm_is_error_hva(hva);
991}
992
993static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
994{
995	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
996}
997
998enum kvm_stat_kind {
999	KVM_STAT_VM,
1000	KVM_STAT_VCPU,
1001};
1002
1003struct kvm_stats_debugfs_item {
1004	const char *name;
1005	int offset;
1006	enum kvm_stat_kind kind;
1007	struct dentry *dentry;
1008};
1009extern struct kvm_stats_debugfs_item debugfs_entries[];
1010extern struct dentry *kvm_debugfs_dir;
1011
1012#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1013static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
1014{
1015	if (unlikely(kvm->mmu_notifier_count))
1016		return 1;
1017	/*
1018	 * Ensure the read of mmu_notifier_count happens before the read
1019	 * of mmu_notifier_seq.  This interacts with the smp_wmb() in
1020	 * mmu_notifier_invalidate_range_end to make sure that the caller
1021	 * either sees the old (non-zero) value of mmu_notifier_count or
1022	 * the new (incremented) value of mmu_notifier_seq.
1023	 * PowerPC Book3s HV KVM calls this under a per-page lock
1024	 * rather than under kvm->mmu_lock, for scalability, so
1025	 * can't rely on kvm->mmu_lock to keep things ordered.
1026	 */
1027	smp_rmb();
1028	if (kvm->mmu_notifier_seq != mmu_seq)
1029		return 1;
1030	return 0;
1031}
1032#endif
1033
1034#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1035
1036#ifdef CONFIG_S390
1037#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
1038#else
1039#define KVM_MAX_IRQ_ROUTES 1024
1040#endif
1041
1042int kvm_setup_default_irq_routing(struct kvm *kvm);
1043int kvm_setup_empty_irq_routing(struct kvm *kvm);
1044int kvm_set_irq_routing(struct kvm *kvm,
1045			const struct kvm_irq_routing_entry *entries,
1046			unsigned nr,
1047			unsigned flags);
1048int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
1049			  const struct kvm_irq_routing_entry *ue);
1050void kvm_free_irq_routing(struct kvm *kvm);
1051
1052#else
1053
1054static inline void kvm_free_irq_routing(struct kvm *kvm) {}
1055
1056#endif
1057
1058int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1059
1060#ifdef CONFIG_HAVE_KVM_EVENTFD
1061
1062void kvm_eventfd_init(struct kvm *kvm);
1063int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1064
1065#ifdef CONFIG_HAVE_KVM_IRQFD
1066int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1067void kvm_irqfd_release(struct kvm *kvm);
1068void kvm_irq_routing_update(struct kvm *);
1069#else
1070static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1071{
1072	return -EINVAL;
1073}
1074
1075static inline void kvm_irqfd_release(struct kvm *kvm) {}
1076#endif
1077
1078#else
1079
1080static inline void kvm_eventfd_init(struct kvm *kvm) {}
1081
1082static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1083{
1084	return -EINVAL;
1085}
1086
1087static inline void kvm_irqfd_release(struct kvm *kvm) {}
1088
1089#ifdef CONFIG_HAVE_KVM_IRQCHIP
1090static inline void kvm_irq_routing_update(struct kvm *kvm)
1091{
1092}
1093#endif
1094
1095static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1096{
1097	return -ENOSYS;
1098}
1099
1100#endif /* CONFIG_HAVE_KVM_EVENTFD */
1101
1102#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1103bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
1104#else
1105static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
1106#endif
1107
1108static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1109{
1110	set_bit(req, &vcpu->requests);
1111}
1112
1113static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1114{
1115	if (test_bit(req, &vcpu->requests)) {
1116		clear_bit(req, &vcpu->requests);
1117		return true;
1118	} else {
1119		return false;
1120	}
1121}
1122
1123extern bool kvm_rebooting;
1124
1125struct kvm_device {
1126	struct kvm_device_ops *ops;
1127	struct kvm *kvm;
1128	void *private;
1129	struct list_head vm_node;
1130};
1131
1132/* create, destroy, and name are mandatory */
1133struct kvm_device_ops {
1134	const char *name;
1135	int (*create)(struct kvm_device *dev, u32 type);
1136
1137	/*
1138	 * Destroy is responsible for freeing dev.
1139	 *
1140	 * Destroy may be called before or after destructors are called
1141	 * on emulated I/O regions, depending on whether a reference is
1142	 * held by a vcpu or other kvm component that gets destroyed
1143	 * after the emulated I/O.
1144	 */
1145	void (*destroy)(struct kvm_device *dev);
1146
1147	int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1148	int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1149	int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1150	long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1151		      unsigned long arg);
1152};
1153
1154void kvm_device_get(struct kvm_device *dev);
1155void kvm_device_put(struct kvm_device *dev);
1156struct kvm_device *kvm_device_from_filp(struct file *filp);
1157int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
1158void kvm_unregister_device_ops(u32 type);
1159
1160extern struct kvm_device_ops kvm_mpic_ops;
1161extern struct kvm_device_ops kvm_xics_ops;
1162extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
1163extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
1164
1165#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1166
1167static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1168{
1169	vcpu->spin_loop.in_spin_loop = val;
1170}
1171static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1172{
1173	vcpu->spin_loop.dy_eligible = val;
1174}
1175
1176#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1177
1178static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1179{
1180}
1181
1182static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1183{
1184}
1185#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1186
1187#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
1188int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
1189			   struct irq_bypass_producer *);
1190void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
1191			   struct irq_bypass_producer *);
1192void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
1193void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
1194int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
1195				  uint32_t guest_irq, bool set);
1196#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
1197
1198#endif
1199