root/include/linux/kvm_host.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. is_error_pfn
  2. is_error_noslot_pfn
  3. is_noslot_pfn
  4. kvm_is_error_hva
  5. is_error_page
  6. kvm_vcpu_mapped
  7. kvm_vcpu_exiting_guest_mode
  8. kvm_dirty_bitmap_bytes
  9. kvm_second_dirty_bitmap
  10. kvm_arch_vcpu_memslots_id
  11. kvm_get_bus
  12. kvm_get_vcpu
  13. kvm_get_vcpu_by_id
  14. kvm_vcpu_get_idx
  15. kvm_arch_post_irq_ack_notifier_list_update
  16. kvm_arch_post_irq_routing_update
  17. kvm_irqfd_init
  18. kvm_irqfd_exit
  19. __kvm_memslots
  20. kvm_vcpu_memslots
  21. id_to_memslot
  22. kvm_arch_alloc_vm
  23. kvm_arch_free_vm
  24. kvm_arch_flush_remote_tlb
  25. kvm_arch_register_noncoherent_dma
  26. kvm_arch_unregister_noncoherent_dma
  27. kvm_arch_has_noncoherent_dma
  28. kvm_arch_start_assignment
  29. kvm_arch_end_assignment
  30. kvm_arch_has_assigned_device
  31. kvm_arch_vcpu_wq
  32. kvm_arch_intc_initialized
  33. search_memslots
  34. __gfn_to_memslot
  35. __gfn_to_hva_memslot
  36. memslot_id
  37. hva_to_gfn_memslot
  38. gfn_to_gpa
  39. gpa_to_gfn
  40. pfn_to_hpa
  41. kvm_vcpu_gpa_to_page
  42. kvm_is_error_gpa
  43. mmu_notifier_retry
  44. kvm_free_irq_routing
  45. kvm_irqfd
  46. kvm_irqfd_release
  47. kvm_eventfd_init
  48. kvm_irqfd
  49. kvm_irqfd_release
  50. kvm_irq_routing_update
  51. kvm_ioeventfd
  52. kvm_make_request
  53. kvm_request_pending
  54. kvm_test_request
  55. kvm_clear_request
  56. kvm_check_request
  57. kvm_vcpu_set_in_spin_loop
  58. kvm_vcpu_set_dy_eligible
  59. kvm_vcpu_set_in_spin_loop
  60. kvm_vcpu_set_dy_eligible
  61. vcpu_valid_wakeup
  62. vcpu_valid_wakeup
  63. kvm_arch_no_poll
  64. kvm_arch_vcpu_async_ioctl
  65. kvm_arch_vcpu_run_pid_change

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 #ifndef __KVM_HOST_H
   3 #define __KVM_HOST_H
   4 
   5 
   6 #include <linux/types.h>
   7 #include <linux/hardirq.h>
   8 #include <linux/list.h>
   9 #include <linux/mutex.h>
  10 #include <linux/spinlock.h>
  11 #include <linux/signal.h>
  12 #include <linux/sched.h>
  13 #include <linux/bug.h>
  14 #include <linux/mm.h>
  15 #include <linux/mmu_notifier.h>
  16 #include <linux/preempt.h>
  17 #include <linux/msi.h>
  18 #include <linux/slab.h>
  19 #include <linux/vmalloc.h>
  20 #include <linux/rcupdate.h>
  21 #include <linux/ratelimit.h>
  22 #include <linux/err.h>
  23 #include <linux/irqflags.h>
  24 #include <linux/context_tracking.h>
  25 #include <linux/irqbypass.h>
  26 #include <linux/swait.h>
  27 #include <linux/refcount.h>
  28 #include <linux/nospec.h>
  29 #include <asm/signal.h>
  30 
  31 #include <linux/kvm.h>
  32 #include <linux/kvm_para.h>
  33 
  34 #include <linux/kvm_types.h>
  35 
  36 #include <asm/kvm_host.h>
  37 
  38 #ifndef KVM_MAX_VCPU_ID
  39 #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
  40 #endif
  41 
  42 /*
  43  * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
  44  * in kvm, other bits are visible for userspace which are defined in
  45  * include/linux/kvm_h.
  46  */
  47 #define KVM_MEMSLOT_INVALID     (1UL << 16)
  48 
  49 /*
  50  * Bit 63 of the memslot generation number is an "update in-progress flag",
  51  * e.g. is temporarily set for the duration of install_new_memslots().
  52  * This flag effectively creates a unique generation number that is used to
  53  * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
  54  * i.e. may (or may not) have come from the previous memslots generation.
  55  *
  56  * This is necessary because the actual memslots update is not atomic with
  57  * respect to the generation number update.  Updating the generation number
  58  * first would allow a vCPU to cache a spte from the old memslots using the
  59  * new generation number, and updating the generation number after switching
  60  * to the new memslots would allow cache hits using the old generation number
  61  * to reference the defunct memslots.
  62  *
  63  * This mechanism is used to prevent getting hits in KVM's caches while a
  64  * memslot update is in-progress, and to prevent cache hits *after* updating
  65  * the actual generation number against accesses that were inserted into the
  66  * cache *before* the memslots were updated.
  67  */
  68 #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS      BIT_ULL(63)
  69 
  70 /* Two fragments for cross MMIO pages. */
  71 #define KVM_MAX_MMIO_FRAGMENTS  2
  72 
  73 #ifndef KVM_ADDRESS_SPACE_NUM
  74 #define KVM_ADDRESS_SPACE_NUM   1
  75 #endif
  76 
  77 /*
  78  * For the normal pfn, the highest 12 bits should be zero,
  79  * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
  80  * mask bit 63 to indicate the noslot pfn.
  81  */
  82 #define KVM_PFN_ERR_MASK        (0x7ffULL << 52)
  83 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
  84 #define KVM_PFN_NOSLOT          (0x1ULL << 63)
  85 
  86 #define KVM_PFN_ERR_FAULT       (KVM_PFN_ERR_MASK)
  87 #define KVM_PFN_ERR_HWPOISON    (KVM_PFN_ERR_MASK + 1)
  88 #define KVM_PFN_ERR_RO_FAULT    (KVM_PFN_ERR_MASK + 2)
  89 
  90 /*
  91  * error pfns indicate that the gfn is in slot but faild to
  92  * translate it to pfn on host.
  93  */
  94 static inline bool is_error_pfn(kvm_pfn_t pfn)
  95 {
  96         return !!(pfn & KVM_PFN_ERR_MASK);
  97 }
  98 
  99 /*
 100  * error_noslot pfns indicate that the gfn can not be
 101  * translated to pfn - it is not in slot or failed to
 102  * translate it to pfn.
 103  */
 104 static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
 105 {
 106         return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
 107 }
 108 
 109 /* noslot pfn indicates that the gfn is not in slot. */
 110 static inline bool is_noslot_pfn(kvm_pfn_t pfn)
 111 {
 112         return pfn == KVM_PFN_NOSLOT;
 113 }
 114 
 115 /*
 116  * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
 117  * provide own defines and kvm_is_error_hva
 118  */
 119 #ifndef KVM_HVA_ERR_BAD
 120 
 121 #define KVM_HVA_ERR_BAD         (PAGE_OFFSET)
 122 #define KVM_HVA_ERR_RO_BAD      (PAGE_OFFSET + PAGE_SIZE)
 123 
 124 static inline bool kvm_is_error_hva(unsigned long addr)
 125 {
 126         return addr >= PAGE_OFFSET;
 127 }
 128 
 129 #endif
 130 
 131 #define KVM_ERR_PTR_BAD_PAGE    (ERR_PTR(-ENOENT))
 132 
 133 static inline bool is_error_page(struct page *page)
 134 {
 135         return IS_ERR(page);
 136 }
 137 
 138 #define KVM_REQUEST_MASK           GENMASK(7,0)
 139 #define KVM_REQUEST_NO_WAKEUP      BIT(8)
 140 #define KVM_REQUEST_WAIT           BIT(9)
 141 /*
 142  * Architecture-independent vcpu->requests bit members
 143  * Bits 4-7 are reserved for more arch-independent bits.
 144  */
 145 #define KVM_REQ_TLB_FLUSH         (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 146 #define KVM_REQ_MMU_RELOAD        (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 147 #define KVM_REQ_PENDING_TIMER     2
 148 #define KVM_REQ_UNHALT            3
 149 #define KVM_REQUEST_ARCH_BASE     8
 150 
 151 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
 152         BUILD_BUG_ON((unsigned)(nr) >= (FIELD_SIZEOF(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
 153         (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
 154 })
 155 #define KVM_ARCH_REQ(nr)           KVM_ARCH_REQ_FLAGS(nr, 0)
 156 
 157 #define KVM_USERSPACE_IRQ_SOURCE_ID             0
 158 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID        1
 159 
 160 extern struct kmem_cache *kvm_vcpu_cache;
 161 
 162 extern struct mutex kvm_lock;
 163 extern struct list_head vm_list;
 164 
 165 struct kvm_io_range {
 166         gpa_t addr;
 167         int len;
 168         struct kvm_io_device *dev;
 169 };
 170 
 171 #define NR_IOBUS_DEVS 1000
 172 
 173 struct kvm_io_bus {
 174         int dev_count;
 175         int ioeventfd_count;
 176         struct kvm_io_range range[];
 177 };
 178 
 179 enum kvm_bus {
 180         KVM_MMIO_BUS,
 181         KVM_PIO_BUS,
 182         KVM_VIRTIO_CCW_NOTIFY_BUS,
 183         KVM_FAST_MMIO_BUS,
 184         KVM_NR_BUSES
 185 };
 186 
 187 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
 188                      int len, const void *val);
 189 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
 190                             gpa_t addr, int len, const void *val, long cookie);
 191 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
 192                     int len, void *val);
 193 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 194                             int len, struct kvm_io_device *dev);
 195 void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 196                                struct kvm_io_device *dev);
 197 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 198                                          gpa_t addr);
 199 
 200 #ifdef CONFIG_KVM_ASYNC_PF
 201 struct kvm_async_pf {
 202         struct work_struct work;
 203         struct list_head link;
 204         struct list_head queue;
 205         struct kvm_vcpu *vcpu;
 206         struct mm_struct *mm;
 207         gpa_t cr2_or_gpa;
 208         unsigned long addr;
 209         struct kvm_arch_async_pf arch;
 210         bool   wakeup_all;
 211 };
 212 
 213 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
 214 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
 215 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 216                        unsigned long hva, struct kvm_arch_async_pf *arch);
 217 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 218 #endif
 219 
 220 enum {
 221         OUTSIDE_GUEST_MODE,
 222         IN_GUEST_MODE,
 223         EXITING_GUEST_MODE,
 224         READING_SHADOW_PAGE_TABLES,
 225 };
 226 
 227 #define KVM_UNMAPPED_PAGE       ((void *) 0x500 + POISON_POINTER_DELTA)
 228 
 229 struct kvm_host_map {
 230         /*
 231          * Only valid if the 'pfn' is managed by the host kernel (i.e. There is
 232          * a 'struct page' for it. When using mem= kernel parameter some memory
 233          * can be used as guest memory but they are not managed by host
 234          * kernel).
 235          * If 'pfn' is not managed by the host kernel, this field is
 236          * initialized to KVM_UNMAPPED_PAGE.
 237          */
 238         struct page *page;
 239         void *hva;
 240         kvm_pfn_t pfn;
 241         kvm_pfn_t gfn;
 242 };
 243 
 244 /*
 245  * Used to check if the mapping is valid or not. Never use 'kvm_host_map'
 246  * directly to check for that.
 247  */
 248 static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
 249 {
 250         return !!map->hva;
 251 }
 252 
 253 /*
 254  * Sometimes a large or cross-page mmio needs to be broken up into separate
 255  * exits for userspace servicing.
 256  */
 257 struct kvm_mmio_fragment {
 258         gpa_t gpa;
 259         void *data;
 260         unsigned len;
 261 };
 262 
 263 struct kvm_vcpu {
 264         struct kvm *kvm;
 265 #ifdef CONFIG_PREEMPT_NOTIFIERS
 266         struct preempt_notifier preempt_notifier;
 267 #endif
 268         int cpu;
 269         int vcpu_id;
 270         int srcu_idx;
 271         int mode;
 272         u64 requests;
 273         unsigned long guest_debug;
 274 
 275         int pre_pcpu;
 276         struct list_head blocked_vcpu_list;
 277 
 278         struct mutex mutex;
 279         struct kvm_run *run;
 280 
 281         int guest_xcr0_loaded;
 282         struct swait_queue_head wq;
 283         struct pid __rcu *pid;
 284         int sigset_active;
 285         sigset_t sigset;
 286         struct kvm_vcpu_stat stat;
 287         unsigned int halt_poll_ns;
 288         bool valid_wakeup;
 289 
 290 #ifdef CONFIG_HAS_IOMEM
 291         int mmio_needed;
 292         int mmio_read_completed;
 293         int mmio_is_write;
 294         int mmio_cur_fragment;
 295         int mmio_nr_fragments;
 296         struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
 297 #endif
 298 
 299 #ifdef CONFIG_KVM_ASYNC_PF
 300         struct {
 301                 u32 queued;
 302                 struct list_head queue;
 303                 struct list_head done;
 304                 spinlock_t lock;
 305         } async_pf;
 306 #endif
 307 
 308 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
 309         /*
 310          * Cpu relax intercept or pause loop exit optimization
 311          * in_spin_loop: set when a vcpu does a pause loop exit
 312          *  or cpu relax intercepted.
 313          * dy_eligible: indicates whether vcpu is eligible for directed yield.
 314          */
 315         struct {
 316                 bool in_spin_loop;
 317                 bool dy_eligible;
 318         } spin_loop;
 319 #endif
 320         bool preempted;
 321         bool ready;
 322         struct kvm_vcpu_arch arch;
 323         struct dentry *debugfs_dentry;
 324 };
 325 
 326 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
 327 {
 328         /*
 329          * The memory barrier ensures a previous write to vcpu->requests cannot
 330          * be reordered with the read of vcpu->mode.  It pairs with the general
 331          * memory barrier following the write of vcpu->mode in VCPU RUN.
 332          */
 333         smp_mb__before_atomic();
 334         return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
 335 }
 336 
 337 /*
 338  * Some of the bitops functions do not support too long bitmaps.
 339  * This number must be determined not to exceed such limits.
 340  */
 341 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 342 
 343 struct kvm_memory_slot {
 344         gfn_t base_gfn;
 345         unsigned long npages;
 346         unsigned long *dirty_bitmap;
 347         struct kvm_arch_memory_slot arch;
 348         unsigned long userspace_addr;
 349         u32 flags;
 350         short id;
 351 };
 352 
 353 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
 354 {
 355         return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
 356 }
 357 
 358 static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
 359 {
 360         unsigned long len = kvm_dirty_bitmap_bytes(memslot);
 361 
 362         return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
 363 }
 364 
 365 struct kvm_s390_adapter_int {
 366         u64 ind_addr;
 367         u64 summary_addr;
 368         u64 ind_offset;
 369         u32 summary_offset;
 370         u32 adapter_id;
 371 };
 372 
 373 struct kvm_hv_sint {
 374         u32 vcpu;
 375         u32 sint;
 376 };
 377 
 378 struct kvm_kernel_irq_routing_entry {
 379         u32 gsi;
 380         u32 type;
 381         int (*set)(struct kvm_kernel_irq_routing_entry *e,
 382                    struct kvm *kvm, int irq_source_id, int level,
 383                    bool line_status);
 384         union {
 385                 struct {
 386                         unsigned irqchip;
 387                         unsigned pin;
 388                 } irqchip;
 389                 struct {
 390                         u32 address_lo;
 391                         u32 address_hi;
 392                         u32 data;
 393                         u32 flags;
 394                         u32 devid;
 395                 } msi;
 396                 struct kvm_s390_adapter_int adapter;
 397                 struct kvm_hv_sint hv_sint;
 398         };
 399         struct hlist_node link;
 400 };
 401 
 402 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
 403 struct kvm_irq_routing_table {
 404         int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
 405         u32 nr_rt_entries;
 406         /*
 407          * Array indexed by gsi. Each entry contains list of irq chips
 408          * the gsi is connected to.
 409          */
 410         struct hlist_head map[0];
 411 };
 412 #endif
 413 
 414 #ifndef KVM_PRIVATE_MEM_SLOTS
 415 #define KVM_PRIVATE_MEM_SLOTS 0
 416 #endif
 417 
 418 #ifndef KVM_MEM_SLOTS_NUM
 419 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
 420 #endif
 421 
 422 #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
 423 static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
 424 {
 425         return 0;
 426 }
 427 #endif
 428 
 429 /*
 430  * Note:
 431  * memslots are not sorted by id anymore, please use id_to_memslot()
 432  * to get the memslot by its id.
 433  */
 434 struct kvm_memslots {
 435         u64 generation;
 436         struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
 437         /* The mapping table from slot id to the index in memslots[]. */
 438         short id_to_index[KVM_MEM_SLOTS_NUM];
 439         atomic_t lru_slot;
 440         int used_slots;
 441 };
 442 
 443 struct kvm {
 444         spinlock_t mmu_lock;
 445         struct mutex slots_lock;
 446         struct mm_struct *mm; /* userspace tied to this vm */
 447         struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
 448         struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
 449 
 450         /*
 451          * created_vcpus is protected by kvm->lock, and is incremented
 452          * at the beginning of KVM_CREATE_VCPU.  online_vcpus is only
 453          * incremented after storing the kvm_vcpu pointer in vcpus,
 454          * and is accessed atomically.
 455          */
 456         atomic_t online_vcpus;
 457         int created_vcpus;
 458         int last_boosted_vcpu;
 459         struct list_head vm_list;
 460         struct mutex lock;
 461         struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
 462 #ifdef CONFIG_HAVE_KVM_EVENTFD
 463         struct {
 464                 spinlock_t        lock;
 465                 struct list_head  items;
 466                 struct list_head  resampler_list;
 467                 struct mutex      resampler_lock;
 468         } irqfds;
 469         struct list_head ioeventfds;
 470 #endif
 471         struct kvm_vm_stat stat;
 472         struct kvm_arch arch;
 473         refcount_t users_count;
 474 #ifdef CONFIG_KVM_MMIO
 475         struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
 476         spinlock_t ring_lock;
 477         struct list_head coalesced_zones;
 478 #endif
 479 
 480         struct mutex irq_lock;
 481 #ifdef CONFIG_HAVE_KVM_IRQCHIP
 482         /*
 483          * Update side is protected by irq_lock.
 484          */
 485         struct kvm_irq_routing_table __rcu *irq_routing;
 486 #endif
 487 #ifdef CONFIG_HAVE_KVM_IRQFD
 488         struct hlist_head irq_ack_notifier_list;
 489 #endif
 490 
 491 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 492         struct mmu_notifier mmu_notifier;
 493         unsigned long mmu_notifier_seq;
 494         long mmu_notifier_count;
 495 #endif
 496         long tlbs_dirty;
 497         struct list_head devices;
 498         bool manual_dirty_log_protect;
 499         struct dentry *debugfs_dentry;
 500         struct kvm_stat_data **debugfs_stat_data;
 501         struct srcu_struct srcu;
 502         struct srcu_struct irq_srcu;
 503         pid_t userspace_pid;
 504 };
 505 
 506 #define kvm_err(fmt, ...) \
 507         pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 508 #define kvm_info(fmt, ...) \
 509         pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 510 #define kvm_debug(fmt, ...) \
 511         pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 512 #define kvm_debug_ratelimited(fmt, ...) \
 513         pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
 514                              ## __VA_ARGS__)
 515 #define kvm_pr_unimpl(fmt, ...) \
 516         pr_err_ratelimited("kvm [%i]: " fmt, \
 517                            task_tgid_nr(current), ## __VA_ARGS__)
 518 
 519 /* The guest did something we don't support. */
 520 #define vcpu_unimpl(vcpu, fmt, ...)                                     \
 521         kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt,                  \
 522                         (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
 523 
 524 #define vcpu_debug(vcpu, fmt, ...)                                      \
 525         kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
 526 #define vcpu_debug_ratelimited(vcpu, fmt, ...)                          \
 527         kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id,           \
 528                               ## __VA_ARGS__)
 529 #define vcpu_err(vcpu, fmt, ...)                                        \
 530         kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
 531 
 532 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
 533 {
 534         return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
 535                                       lockdep_is_held(&kvm->slots_lock) ||
 536                                       !refcount_read(&kvm->users_count));
 537 }
 538 
 539 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 540 {
 541         int num_vcpus = atomic_read(&kvm->online_vcpus);
 542         i = array_index_nospec(i, num_vcpus);
 543 
 544         /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu.  */
 545         smp_rmb();
 546         return kvm->vcpus[i];
 547 }
 548 
 549 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
 550         for (idx = 0; \
 551              idx < atomic_read(&kvm->online_vcpus) && \
 552              (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
 553              idx++)
 554 
 555 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
 556 {
 557         struct kvm_vcpu *vcpu = NULL;
 558         int i;
 559 
 560         if (id < 0)
 561                 return NULL;
 562         if (id < KVM_MAX_VCPUS)
 563                 vcpu = kvm_get_vcpu(kvm, id);
 564         if (vcpu && vcpu->vcpu_id == id)
 565                 return vcpu;
 566         kvm_for_each_vcpu(i, vcpu, kvm)
 567                 if (vcpu->vcpu_id == id)
 568                         return vcpu;
 569         return NULL;
 570 }
 571 
 572 static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
 573 {
 574         struct kvm_vcpu *tmp;
 575         int idx;
 576 
 577         kvm_for_each_vcpu(idx, tmp, vcpu->kvm)
 578                 if (tmp == vcpu)
 579                         return idx;
 580         BUG();
 581 }
 582 
 583 #define kvm_for_each_memslot(memslot, slots)    \
 584         for (memslot = &slots->memslots[0];     \
 585               memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
 586                 memslot++)
 587 
 588 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
 589 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
 590 
 591 void vcpu_load(struct kvm_vcpu *vcpu);
 592 void vcpu_put(struct kvm_vcpu *vcpu);
 593 
 594 #ifdef __KVM_HAVE_IOAPIC
 595 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
 596 void kvm_arch_post_irq_routing_update(struct kvm *kvm);
 597 #else
 598 static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
 599 {
 600 }
 601 static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
 602 {
 603 }
 604 #endif
 605 
 606 #ifdef CONFIG_HAVE_KVM_IRQFD
 607 int kvm_irqfd_init(void);
 608 void kvm_irqfd_exit(void);
 609 #else
 610 static inline int kvm_irqfd_init(void)
 611 {
 612         return 0;
 613 }
 614 
 615 static inline void kvm_irqfd_exit(void)
 616 {
 617 }
 618 #endif
 619 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 620                   struct module *module);
 621 void kvm_exit(void);
 622 
 623 void kvm_get_kvm(struct kvm *kvm);
 624 void kvm_put_kvm(struct kvm *kvm);
 625 
 626 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
 627 {
 628         as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
 629         return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
 630                         lockdep_is_held(&kvm->slots_lock) ||
 631                         !refcount_read(&kvm->users_count));
 632 }
 633 
 634 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
 635 {
 636         return __kvm_memslots(kvm, 0);
 637 }
 638 
 639 static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
 640 {
 641         int as_id = kvm_arch_vcpu_memslots_id(vcpu);
 642 
 643         return __kvm_memslots(vcpu->kvm, as_id);
 644 }
 645 
 646 static inline struct kvm_memory_slot *
 647 id_to_memslot(struct kvm_memslots *slots, int id)
 648 {
 649         int index = slots->id_to_index[id];
 650         struct kvm_memory_slot *slot;
 651 
 652         slot = &slots->memslots[index];
 653 
 654         WARN_ON(slot->id != id);
 655         return slot;
 656 }
 657 
 658 /*
 659  * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
 660  * - create a new memory slot
 661  * - delete an existing memory slot
 662  * - modify an existing memory slot
 663  *   -- move it in the guest physical memory space
 664  *   -- just change its flags
 665  *
 666  * Since flags can be changed by some of these operations, the following
 667  * differentiation is the best we can do for __kvm_set_memory_region():
 668  */
 669 enum kvm_mr_change {
 670         KVM_MR_CREATE,
 671         KVM_MR_DELETE,
 672         KVM_MR_MOVE,
 673         KVM_MR_FLAGS_ONLY,
 674 };
 675 
 676 int kvm_set_memory_region(struct kvm *kvm,
 677                           const struct kvm_userspace_memory_region *mem);
 678 int __kvm_set_memory_region(struct kvm *kvm,
 679                             const struct kvm_userspace_memory_region *mem);
 680 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
 681                            struct kvm_memory_slot *dont);
 682 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
 683                             unsigned long npages);
 684 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
 685 int kvm_arch_prepare_memory_region(struct kvm *kvm,
 686                                 struct kvm_memory_slot *memslot,
 687                                 const struct kvm_userspace_memory_region *mem,
 688                                 enum kvm_mr_change change);
 689 void kvm_arch_commit_memory_region(struct kvm *kvm,
 690                                 const struct kvm_userspace_memory_region *mem,
 691                                 const struct kvm_memory_slot *old,
 692                                 const struct kvm_memory_slot *new,
 693                                 enum kvm_mr_change change);
 694 bool kvm_largepages_enabled(void);
 695 void kvm_disable_largepages(void);
 696 /* flush all memory translations */
 697 void kvm_arch_flush_shadow_all(struct kvm *kvm);
 698 /* flush memory translations pointing to 'slot' */
 699 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 700                                    struct kvm_memory_slot *slot);
 701 
 702 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
 703                             struct page **pages, int nr_pages);
 704 
 705 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
 706 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
 707 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
 708 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
 709 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
 710                                       bool *writable);
 711 void kvm_release_page_clean(struct page *page);
 712 void kvm_release_page_dirty(struct page *page);
 713 void kvm_set_page_accessed(struct page *page);
 714 
 715 kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
 716 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
 717 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
 718                       bool *writable);
 719 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
 720 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
 721 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
 722                                bool atomic, bool *async, bool write_fault,
 723                                bool *writable);
 724 
 725 void kvm_release_pfn_clean(kvm_pfn_t pfn);
 726 void kvm_release_pfn_dirty(kvm_pfn_t pfn);
 727 void kvm_set_pfn_dirty(kvm_pfn_t pfn);
 728 void kvm_set_pfn_accessed(kvm_pfn_t pfn);
 729 void kvm_get_pfn(kvm_pfn_t pfn);
 730 
 731 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
 732 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
 733                         int len);
 734 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
 735                           unsigned long len);
 736 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
 737 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 738                            void *data, unsigned long len);
 739 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
 740                          int offset, int len);
 741 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
 742                     unsigned long len);
 743 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 744                            void *data, unsigned long len);
 745 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 746                                   void *data, unsigned int offset,
 747                                   unsigned long len);
 748 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 749                               gpa_t gpa, unsigned long len);
 750 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 751 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 752 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 753 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
 754 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
 755 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
 756 
 757 struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
 758 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
 759 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
 760 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 761 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
 762 int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
 763                 struct gfn_to_pfn_cache *cache, bool atomic);
 764 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
 765 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
 766 int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
 767                   struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
 768 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
 769 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
 770 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
 771                              int len);
 772 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
 773                                unsigned long len);
 774 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
 775                         unsigned long len);
 776 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
 777                               int offset, int len);
 778 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
 779                          unsigned long len);
 780 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
 781 
 782 void kvm_sigset_activate(struct kvm_vcpu *vcpu);
 783 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
 784 
 785 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 786 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
 787 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
 788 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
 789 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 790 int kvm_vcpu_yield_to(struct kvm_vcpu *target);
 791 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
 792 
 793 void kvm_flush_remote_tlbs(struct kvm *kvm);
 794 void kvm_reload_remote_mmus(struct kvm *kvm);
 795 
 796 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
 797                                  unsigned long *vcpu_bitmap, cpumask_var_t tmp);
 798 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
 799 
 800 long kvm_arch_dev_ioctl(struct file *filp,
 801                         unsigned int ioctl, unsigned long arg);
 802 long kvm_arch_vcpu_ioctl(struct file *filp,
 803                          unsigned int ioctl, unsigned long arg);
 804 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
 805 
 806 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
 807 
 808 int kvm_get_dirty_log(struct kvm *kvm,
 809                         struct kvm_dirty_log *log, int *is_dirty);
 810 
 811 int kvm_get_dirty_log_protect(struct kvm *kvm,
 812                               struct kvm_dirty_log *log, bool *flush);
 813 int kvm_clear_dirty_log_protect(struct kvm *kvm,
 814                                 struct kvm_clear_dirty_log *log, bool *flush);
 815 
 816 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
 817                                         struct kvm_memory_slot *slot,
 818                                         gfn_t gfn_offset,
 819                                         unsigned long mask);
 820 
 821 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 822                                 struct kvm_dirty_log *log);
 823 int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
 824                                   struct kvm_clear_dirty_log *log);
 825 
 826 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
 827                         bool line_status);
 828 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
 829                             struct kvm_enable_cap *cap);
 830 long kvm_arch_vm_ioctl(struct file *filp,
 831                        unsigned int ioctl, unsigned long arg);
 832 
 833 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 834 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 835 
 836 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 837                                     struct kvm_translation *tr);
 838 
 839 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 840 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 841 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 842                                   struct kvm_sregs *sregs);
 843 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 844                                   struct kvm_sregs *sregs);
 845 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 846                                     struct kvm_mp_state *mp_state);
 847 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 848                                     struct kvm_mp_state *mp_state);
 849 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 850                                         struct kvm_guest_debug *dbg);
 851 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 852 
 853 int kvm_arch_init(void *opaque);
 854 void kvm_arch_exit(void);
 855 
 856 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
 857 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
 858 
 859 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
 860 
 861 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
 862 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 863 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
 864 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
 865 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
 866 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
 867 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
 868 
 869 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
 870 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
 871 #endif
 872 
 873 int kvm_arch_hardware_enable(void);
 874 void kvm_arch_hardware_disable(void);
 875 int kvm_arch_hardware_setup(void);
 876 void kvm_arch_hardware_unsetup(void);
 877 int kvm_arch_check_processor_compat(void);
 878 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 879 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
 880 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
 881 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
 882 
 883 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
 884 /*
 885  * All architectures that want to use vzalloc currently also
 886  * need their own kvm_arch_alloc_vm implementation.
 887  */
 888 static inline struct kvm *kvm_arch_alloc_vm(void)
 889 {
 890         return kzalloc(sizeof(struct kvm), GFP_KERNEL);
 891 }
 892 
 893 static inline void kvm_arch_free_vm(struct kvm *kvm)
 894 {
 895         kfree(kvm);
 896 }
 897 #endif
 898 
 899 #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
 900 static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
 901 {
 902         return -ENOTSUPP;
 903 }
 904 #endif
 905 
 906 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
 907 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
 908 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
 909 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
 910 #else
 911 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
 912 {
 913 }
 914 
 915 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
 916 {
 917 }
 918 
 919 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
 920 {
 921         return false;
 922 }
 923 #endif
 924 #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
 925 void kvm_arch_start_assignment(struct kvm *kvm);
 926 void kvm_arch_end_assignment(struct kvm *kvm);
 927 bool kvm_arch_has_assigned_device(struct kvm *kvm);
 928 #else
 929 static inline void kvm_arch_start_assignment(struct kvm *kvm)
 930 {
 931 }
 932 
 933 static inline void kvm_arch_end_assignment(struct kvm *kvm)
 934 {
 935 }
 936 
 937 static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
 938 {
 939         return false;
 940 }
 941 #endif
 942 
 943 static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
 944 {
 945 #ifdef __KVM_HAVE_ARCH_WQP
 946         return vcpu->arch.wqp;
 947 #else
 948         return &vcpu->wq;
 949 #endif
 950 }
 951 
 952 #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
 953 /*
 954  * returns true if the virtual interrupt controller is initialized and
 955  * ready to accept virtual IRQ. On some architectures the virtual interrupt
 956  * controller is dynamically instantiated and this is not always true.
 957  */
 958 bool kvm_arch_intc_initialized(struct kvm *kvm);
 959 #else
 960 static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
 961 {
 962         return true;
 963 }
 964 #endif
 965 
 966 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
 967 void kvm_arch_destroy_vm(struct kvm *kvm);
 968 void kvm_arch_sync_events(struct kvm *kvm);
 969 
 970 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
 971 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 972 
 973 bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
 974 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
 975 
 976 struct kvm_irq_ack_notifier {
 977         struct hlist_node link;
 978         unsigned gsi;
 979         void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
 980 };
 981 
 982 int kvm_irq_map_gsi(struct kvm *kvm,
 983                     struct kvm_kernel_irq_routing_entry *entries, int gsi);
 984 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
 985 
 986 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
 987                 bool line_status);
 988 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
 989                 int irq_source_id, int level, bool line_status);
 990 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
 991                                struct kvm *kvm, int irq_source_id,
 992                                int level, bool line_status);
 993 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
 994 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
 995 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
 996 void kvm_register_irq_ack_notifier(struct kvm *kvm,
 997                                    struct kvm_irq_ack_notifier *kian);
 998 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
 999                                    struct kvm_irq_ack_notifier *kian);
1000 int kvm_request_irq_source_id(struct kvm *kvm);
1001 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
1002 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1003 
1004 /*
1005  * search_memslots() and __gfn_to_memslot() are here because they are
1006  * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
1007  * gfn_to_memslot() itself isn't here as an inline because that would
1008  * bloat other code too much.
1009  */
1010 static inline struct kvm_memory_slot *
1011 search_memslots(struct kvm_memslots *slots, gfn_t gfn)
1012 {
1013         int start = 0, end = slots->used_slots;
1014         int slot = atomic_read(&slots->lru_slot);
1015         struct kvm_memory_slot *memslots = slots->memslots;
1016 
1017         if (gfn >= memslots[slot].base_gfn &&
1018             gfn < memslots[slot].base_gfn + memslots[slot].npages)
1019                 return &memslots[slot];
1020 
1021         while (start < end) {
1022                 slot = start + (end - start) / 2;
1023 
1024                 if (gfn >= memslots[slot].base_gfn)
1025                         end = slot;
1026                 else
1027                         start = slot + 1;
1028         }
1029 
1030         if (start < slots->used_slots && gfn >= memslots[start].base_gfn &&
1031             gfn < memslots[start].base_gfn + memslots[start].npages) {
1032                 atomic_set(&slots->lru_slot, start);
1033                 return &memslots[start];
1034         }
1035 
1036         return NULL;
1037 }
1038 
1039 static inline struct kvm_memory_slot *
1040 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
1041 {
1042         return search_memslots(slots, gfn);
1043 }
1044 
1045 static inline unsigned long
1046 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
1047 {
1048         return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
1049 }
1050 
1051 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1052 {
1053         return gfn_to_memslot(kvm, gfn)->id;
1054 }
1055 
1056 static inline gfn_t
1057 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
1058 {
1059         gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
1060 
1061         return slot->base_gfn + gfn_offset;
1062 }
1063 
1064 static inline gpa_t gfn_to_gpa(gfn_t gfn)
1065 {
1066         return (gpa_t)gfn << PAGE_SHIFT;
1067 }
1068 
1069 static inline gfn_t gpa_to_gfn(gpa_t gpa)
1070 {
1071         return (gfn_t)(gpa >> PAGE_SHIFT);
1072 }
1073 
1074 static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
1075 {
1076         return (hpa_t)pfn << PAGE_SHIFT;
1077 }
1078 
1079 static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
1080                                                 gpa_t gpa)
1081 {
1082         return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
1083 }
1084 
1085 static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
1086 {
1087         unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
1088 
1089         return kvm_is_error_hva(hva);
1090 }
1091 
1092 enum kvm_stat_kind {
1093         KVM_STAT_VM,
1094         KVM_STAT_VCPU,
1095 };
1096 
1097 struct kvm_stat_data {
1098         int offset;
1099         int mode;
1100         struct kvm *kvm;
1101 };
1102 
1103 struct kvm_stats_debugfs_item {
1104         const char *name;
1105         int offset;
1106         enum kvm_stat_kind kind;
1107         int mode;
1108 };
1109 extern struct kvm_stats_debugfs_item debugfs_entries[];
1110 extern struct dentry *kvm_debugfs_dir;
1111 
1112 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1113 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
1114 {
1115         if (unlikely(kvm->mmu_notifier_count))
1116                 return 1;
1117         /*
1118          * Ensure the read of mmu_notifier_count happens before the read
1119          * of mmu_notifier_seq.  This interacts with the smp_wmb() in
1120          * mmu_notifier_invalidate_range_end to make sure that the caller
1121          * either sees the old (non-zero) value of mmu_notifier_count or
1122          * the new (incremented) value of mmu_notifier_seq.
1123          * PowerPC Book3s HV KVM calls this under a per-page lock
1124          * rather than under kvm->mmu_lock, for scalability, so
1125          * can't rely on kvm->mmu_lock to keep things ordered.
1126          */
1127         smp_rmb();
1128         if (kvm->mmu_notifier_seq != mmu_seq)
1129                 return 1;
1130         return 0;
1131 }
1132 #endif
1133 
1134 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1135 
1136 #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
1137 
1138 bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
1139 int kvm_set_irq_routing(struct kvm *kvm,
1140                         const struct kvm_irq_routing_entry *entries,
1141                         unsigned nr,
1142                         unsigned flags);
1143 int kvm_set_routing_entry(struct kvm *kvm,
1144                           struct kvm_kernel_irq_routing_entry *e,
1145                           const struct kvm_irq_routing_entry *ue);
1146 void kvm_free_irq_routing(struct kvm *kvm);
1147 
1148 #else
1149 
1150 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
1151 
1152 #endif
1153 
1154 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1155 
1156 #ifdef CONFIG_HAVE_KVM_EVENTFD
1157 
1158 void kvm_eventfd_init(struct kvm *kvm);
1159 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1160 
1161 #ifdef CONFIG_HAVE_KVM_IRQFD
1162 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1163 void kvm_irqfd_release(struct kvm *kvm);
1164 void kvm_irq_routing_update(struct kvm *);
1165 #else
1166 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1167 {
1168         return -EINVAL;
1169 }
1170 
1171 static inline void kvm_irqfd_release(struct kvm *kvm) {}
1172 #endif
1173 
1174 #else
1175 
1176 static inline void kvm_eventfd_init(struct kvm *kvm) {}
1177 
1178 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1179 {
1180         return -EINVAL;
1181 }
1182 
1183 static inline void kvm_irqfd_release(struct kvm *kvm) {}
1184 
1185 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1186 static inline void kvm_irq_routing_update(struct kvm *kvm)
1187 {
1188 }
1189 #endif
1190 
1191 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1192 {
1193         return -ENOSYS;
1194 }
1195 
1196 #endif /* CONFIG_HAVE_KVM_EVENTFD */
1197 
1198 void kvm_arch_irq_routing_update(struct kvm *kvm);
1199 
1200 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1201 {
1202         /*
1203          * Ensure the rest of the request is published to kvm_check_request's
1204          * caller.  Paired with the smp_mb__after_atomic in kvm_check_request.
1205          */
1206         smp_wmb();
1207         set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1208 }
1209 
1210 static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
1211 {
1212         return READ_ONCE(vcpu->requests);
1213 }
1214 
1215 static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
1216 {
1217         return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1218 }
1219 
1220 static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
1221 {
1222         clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1223 }
1224 
1225 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1226 {
1227         if (kvm_test_request(req, vcpu)) {
1228                 kvm_clear_request(req, vcpu);
1229 
1230                 /*
1231                  * Ensure the rest of the request is visible to kvm_check_request's
1232                  * caller.  Paired with the smp_wmb in kvm_make_request.
1233                  */
1234                 smp_mb__after_atomic();
1235                 return true;
1236         } else {
1237                 return false;
1238         }
1239 }
1240 
1241 extern bool kvm_rebooting;
1242 
1243 extern unsigned int halt_poll_ns;
1244 extern unsigned int halt_poll_ns_grow;
1245 extern unsigned int halt_poll_ns_grow_start;
1246 extern unsigned int halt_poll_ns_shrink;
1247 
1248 struct kvm_device {
1249         struct kvm_device_ops *ops;
1250         struct kvm *kvm;
1251         void *private;
1252         struct list_head vm_node;
1253 };
1254 
1255 /* create, destroy, and name are mandatory */
1256 struct kvm_device_ops {
1257         const char *name;
1258 
1259         /*
1260          * create is called holding kvm->lock and any operations not suitable
1261          * to do while holding the lock should be deferred to init (see
1262          * below).
1263          */
1264         int (*create)(struct kvm_device *dev, u32 type);
1265 
1266         /*
1267          * init is called after create if create is successful and is called
1268          * outside of holding kvm->lock.
1269          */
1270         void (*init)(struct kvm_device *dev);
1271 
1272         /*
1273          * Destroy is responsible for freeing dev.
1274          *
1275          * Destroy may be called before or after destructors are called
1276          * on emulated I/O regions, depending on whether a reference is
1277          * held by a vcpu or other kvm component that gets destroyed
1278          * after the emulated I/O.
1279          */
1280         void (*destroy)(struct kvm_device *dev);
1281 
1282         /*
1283          * Release is an alternative method to free the device. It is
1284          * called when the device file descriptor is closed. Once
1285          * release is called, the destroy method will not be called
1286          * anymore as the device is removed from the device list of
1287          * the VM. kvm->lock is held.
1288          */
1289         void (*release)(struct kvm_device *dev);
1290 
1291         int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1292         int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1293         int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1294         long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1295                       unsigned long arg);
1296         int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
1297 };
1298 
1299 void kvm_device_get(struct kvm_device *dev);
1300 void kvm_device_put(struct kvm_device *dev);
1301 struct kvm_device *kvm_device_from_filp(struct file *filp);
1302 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
1303 void kvm_unregister_device_ops(u32 type);
1304 
1305 extern struct kvm_device_ops kvm_mpic_ops;
1306 extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
1307 extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
1308 
1309 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1310 
1311 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1312 {
1313         vcpu->spin_loop.in_spin_loop = val;
1314 }
1315 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1316 {
1317         vcpu->spin_loop.dy_eligible = val;
1318 }
1319 
1320 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1321 
1322 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1323 {
1324 }
1325 
1326 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1327 {
1328 }
1329 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1330 
1331 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
1332 bool kvm_arch_has_irq_bypass(void);
1333 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
1334                            struct irq_bypass_producer *);
1335 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
1336                            struct irq_bypass_producer *);
1337 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
1338 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
1339 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
1340                                   uint32_t guest_irq, bool set);
1341 #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
1342 
1343 #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
1344 /* If we wakeup during the poll time, was it a sucessful poll? */
1345 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1346 {
1347         return vcpu->valid_wakeup;
1348 }
1349 
1350 #else
1351 static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1352 {
1353         return true;
1354 }
1355 #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
1356 
1357 #ifdef CONFIG_HAVE_KVM_NO_POLL
1358 /* Callback that tells if we must not poll */
1359 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
1360 #else
1361 static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
1362 {
1363         return false;
1364 }
1365 #endif /* CONFIG_HAVE_KVM_NO_POLL */
1366 
1367 #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
1368 long kvm_arch_vcpu_async_ioctl(struct file *filp,
1369                                unsigned int ioctl, unsigned long arg);
1370 #else
1371 static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
1372                                              unsigned int ioctl,
1373                                              unsigned long arg)
1374 {
1375         return -ENOIOCTLCMD;
1376 }
1377 #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
1378 
1379 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
1380                                             unsigned long start, unsigned long end);
1381 
1382 #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
1383 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
1384 #else
1385 static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
1386 {
1387         return 0;
1388 }
1389 #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
1390 
1391 typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
1392 
1393 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
1394                                 uintptr_t data, const char *name,
1395                                 struct task_struct **thread_ptr);
1396 
1397 #endif

/* [<][>][^][v][top][bottom][index][help] */