1/* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 9 * 10 * Authors: 11 * Avi Kivity <avi@qumranet.com> 12 * Yaniv Kamay <yaniv@qumranet.com> 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. See 15 * the COPYING file in the top-level directory. 16 * 17 */ 18 19#include <kvm/iodev.h> 20 21#include <linux/kvm_host.h> 22#include <linux/kvm.h> 23#include <linux/module.h> 24#include <linux/errno.h> 25#include <linux/percpu.h> 26#include <linux/mm.h> 27#include <linux/miscdevice.h> 28#include <linux/vmalloc.h> 29#include <linux/reboot.h> 30#include <linux/debugfs.h> 31#include <linux/highmem.h> 32#include <linux/file.h> 33#include <linux/syscore_ops.h> 34#include <linux/cpu.h> 35#include <linux/sched.h> 36#include <linux/cpumask.h> 37#include <linux/smp.h> 38#include <linux/anon_inodes.h> 39#include <linux/profile.h> 40#include <linux/kvm_para.h> 41#include <linux/pagemap.h> 42#include <linux/mman.h> 43#include <linux/swap.h> 44#include <linux/bitops.h> 45#include <linux/spinlock.h> 46#include <linux/compat.h> 47#include <linux/srcu.h> 48#include <linux/hugetlb.h> 49#include <linux/slab.h> 50#include <linux/sort.h> 51#include <linux/bsearch.h> 52 53#include <asm/processor.h> 54#include <asm/io.h> 55#include <asm/ioctl.h> 56#include <asm/uaccess.h> 57#include <asm/pgtable.h> 58 59#include "coalesced_mmio.h" 60#include "async_pf.h" 61#include "vfio.h" 62 63#define CREATE_TRACE_POINTS 64#include <trace/events/kvm.h> 65 66MODULE_AUTHOR("Qumranet"); 67MODULE_LICENSE("GPL"); 68 69/* Architectures should define their poll value according to the halt latency */ 70static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT; 71module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR); 72 73/* Default doubles per-vcpu halt_poll_ns. */ 74static unsigned int halt_poll_ns_grow = 2; 75module_param(halt_poll_ns_grow, int, S_IRUGO); 76 77/* Default resets per-vcpu halt_poll_ns . */ 78static unsigned int halt_poll_ns_shrink; 79module_param(halt_poll_ns_shrink, int, S_IRUGO); 80 81/* 82 * Ordering of locks: 83 * 84 * kvm->lock --> kvm->slots_lock --> kvm->irq_lock 85 */ 86 87DEFINE_SPINLOCK(kvm_lock); 88static DEFINE_RAW_SPINLOCK(kvm_count_lock); 89LIST_HEAD(vm_list); 90 91static cpumask_var_t cpus_hardware_enabled; 92static int kvm_usage_count; 93static atomic_t hardware_enable_failed; 94 95struct kmem_cache *kvm_vcpu_cache; 96EXPORT_SYMBOL_GPL(kvm_vcpu_cache); 97 98static __read_mostly struct preempt_ops kvm_preempt_ops; 99 100struct dentry *kvm_debugfs_dir; 101EXPORT_SYMBOL_GPL(kvm_debugfs_dir); 102 103static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 104 unsigned long arg); 105#ifdef CONFIG_KVM_COMPAT 106static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, 107 unsigned long arg); 108#endif 109static int hardware_enable_all(void); 110static void hardware_disable_all(void); 111 112static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 113 114static void kvm_release_pfn_dirty(pfn_t pfn); 115static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn); 116 117__visible bool kvm_rebooting; 118EXPORT_SYMBOL_GPL(kvm_rebooting); 119 120static bool largepages_enabled = true; 121 122bool kvm_is_reserved_pfn(pfn_t pfn) 123{ 124 if (pfn_valid(pfn)) 125 return PageReserved(pfn_to_page(pfn)); 126 127 return true; 128} 129 130/* 131 * Switches to specified vcpu, until a matching vcpu_put() 132 */ 133int vcpu_load(struct kvm_vcpu *vcpu) 134{ 135 int cpu; 136 137 if (mutex_lock_killable(&vcpu->mutex)) 138 return -EINTR; 139 cpu = get_cpu(); 140 preempt_notifier_register(&vcpu->preempt_notifier); 141 kvm_arch_vcpu_load(vcpu, cpu); 142 put_cpu(); 143 return 0; 144} 145 146void vcpu_put(struct kvm_vcpu *vcpu) 147{ 148 preempt_disable(); 149 kvm_arch_vcpu_put(vcpu); 150 preempt_notifier_unregister(&vcpu->preempt_notifier); 151 preempt_enable(); 152 mutex_unlock(&vcpu->mutex); 153} 154 155static void ack_flush(void *_completed) 156{ 157} 158 159bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) 160{ 161 int i, cpu, me; 162 cpumask_var_t cpus; 163 bool called = true; 164 struct kvm_vcpu *vcpu; 165 166 zalloc_cpumask_var(&cpus, GFP_ATOMIC); 167 168 me = get_cpu(); 169 kvm_for_each_vcpu(i, vcpu, kvm) { 170 kvm_make_request(req, vcpu); 171 cpu = vcpu->cpu; 172 173 /* Set ->requests bit before we read ->mode */ 174 smp_mb(); 175 176 if (cpus != NULL && cpu != -1 && cpu != me && 177 kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE) 178 cpumask_set_cpu(cpu, cpus); 179 } 180 if (unlikely(cpus == NULL)) 181 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); 182 else if (!cpumask_empty(cpus)) 183 smp_call_function_many(cpus, ack_flush, NULL, 1); 184 else 185 called = false; 186 put_cpu(); 187 free_cpumask_var(cpus); 188 return called; 189} 190 191#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 192void kvm_flush_remote_tlbs(struct kvm *kvm) 193{ 194 long dirty_count = kvm->tlbs_dirty; 195 196 smp_mb(); 197 if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 198 ++kvm->stat.remote_tlb_flush; 199 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 200} 201EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 202#endif 203 204void kvm_reload_remote_mmus(struct kvm *kvm) 205{ 206 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); 207} 208 209void kvm_make_mclock_inprogress_request(struct kvm *kvm) 210{ 211 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); 212} 213 214void kvm_make_scan_ioapic_request(struct kvm *kvm) 215{ 216 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC); 217} 218 219int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) 220{ 221 struct page *page; 222 int r; 223 224 mutex_init(&vcpu->mutex); 225 vcpu->cpu = -1; 226 vcpu->kvm = kvm; 227 vcpu->vcpu_id = id; 228 vcpu->pid = NULL; 229 vcpu->halt_poll_ns = 0; 230 init_waitqueue_head(&vcpu->wq); 231 kvm_async_pf_vcpu_init(vcpu); 232 233 vcpu->pre_pcpu = -1; 234 INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); 235 236 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 237 if (!page) { 238 r = -ENOMEM; 239 goto fail; 240 } 241 vcpu->run = page_address(page); 242 243 kvm_vcpu_set_in_spin_loop(vcpu, false); 244 kvm_vcpu_set_dy_eligible(vcpu, false); 245 vcpu->preempted = false; 246 247 r = kvm_arch_vcpu_init(vcpu); 248 if (r < 0) 249 goto fail_free_run; 250 return 0; 251 252fail_free_run: 253 free_page((unsigned long)vcpu->run); 254fail: 255 return r; 256} 257EXPORT_SYMBOL_GPL(kvm_vcpu_init); 258 259void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) 260{ 261 put_pid(vcpu->pid); 262 kvm_arch_vcpu_uninit(vcpu); 263 free_page((unsigned long)vcpu->run); 264} 265EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); 266 267#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 268static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) 269{ 270 return container_of(mn, struct kvm, mmu_notifier); 271} 272 273static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, 274 struct mm_struct *mm, 275 unsigned long address) 276{ 277 struct kvm *kvm = mmu_notifier_to_kvm(mn); 278 int need_tlb_flush, idx; 279 280 /* 281 * When ->invalidate_page runs, the linux pte has been zapped 282 * already but the page is still allocated until 283 * ->invalidate_page returns. So if we increase the sequence 284 * here the kvm page fault will notice if the spte can't be 285 * established because the page is going to be freed. If 286 * instead the kvm page fault establishes the spte before 287 * ->invalidate_page runs, kvm_unmap_hva will release it 288 * before returning. 289 * 290 * The sequence increase only need to be seen at spin_unlock 291 * time, and not at spin_lock time. 292 * 293 * Increasing the sequence after the spin_unlock would be 294 * unsafe because the kvm page fault could then establish the 295 * pte after kvm_unmap_hva returned, without noticing the page 296 * is going to be freed. 297 */ 298 idx = srcu_read_lock(&kvm->srcu); 299 spin_lock(&kvm->mmu_lock); 300 301 kvm->mmu_notifier_seq++; 302 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; 303 /* we've to flush the tlb before the pages can be freed */ 304 if (need_tlb_flush) 305 kvm_flush_remote_tlbs(kvm); 306 307 spin_unlock(&kvm->mmu_lock); 308 309 kvm_arch_mmu_notifier_invalidate_page(kvm, address); 310 311 srcu_read_unlock(&kvm->srcu, idx); 312} 313 314static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 315 struct mm_struct *mm, 316 unsigned long address, 317 pte_t pte) 318{ 319 struct kvm *kvm = mmu_notifier_to_kvm(mn); 320 int idx; 321 322 idx = srcu_read_lock(&kvm->srcu); 323 spin_lock(&kvm->mmu_lock); 324 kvm->mmu_notifier_seq++; 325 kvm_set_spte_hva(kvm, address, pte); 326 spin_unlock(&kvm->mmu_lock); 327 srcu_read_unlock(&kvm->srcu, idx); 328} 329 330static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 331 struct mm_struct *mm, 332 unsigned long start, 333 unsigned long end) 334{ 335 struct kvm *kvm = mmu_notifier_to_kvm(mn); 336 int need_tlb_flush = 0, idx; 337 338 idx = srcu_read_lock(&kvm->srcu); 339 spin_lock(&kvm->mmu_lock); 340 /* 341 * The count increase must become visible at unlock time as no 342 * spte can be established without taking the mmu_lock and 343 * count is also read inside the mmu_lock critical section. 344 */ 345 kvm->mmu_notifier_count++; 346 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); 347 need_tlb_flush |= kvm->tlbs_dirty; 348 /* we've to flush the tlb before the pages can be freed */ 349 if (need_tlb_flush) 350 kvm_flush_remote_tlbs(kvm); 351 352 spin_unlock(&kvm->mmu_lock); 353 srcu_read_unlock(&kvm->srcu, idx); 354} 355 356static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 357 struct mm_struct *mm, 358 unsigned long start, 359 unsigned long end) 360{ 361 struct kvm *kvm = mmu_notifier_to_kvm(mn); 362 363 spin_lock(&kvm->mmu_lock); 364 /* 365 * This sequence increase will notify the kvm page fault that 366 * the page that is going to be mapped in the spte could have 367 * been freed. 368 */ 369 kvm->mmu_notifier_seq++; 370 smp_wmb(); 371 /* 372 * The above sequence increase must be visible before the 373 * below count decrease, which is ensured by the smp_wmb above 374 * in conjunction with the smp_rmb in mmu_notifier_retry(). 375 */ 376 kvm->mmu_notifier_count--; 377 spin_unlock(&kvm->mmu_lock); 378 379 BUG_ON(kvm->mmu_notifier_count < 0); 380} 381 382static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, 383 struct mm_struct *mm, 384 unsigned long start, 385 unsigned long end) 386{ 387 struct kvm *kvm = mmu_notifier_to_kvm(mn); 388 int young, idx; 389 390 idx = srcu_read_lock(&kvm->srcu); 391 spin_lock(&kvm->mmu_lock); 392 393 young = kvm_age_hva(kvm, start, end); 394 if (young) 395 kvm_flush_remote_tlbs(kvm); 396 397 spin_unlock(&kvm->mmu_lock); 398 srcu_read_unlock(&kvm->srcu, idx); 399 400 return young; 401} 402 403static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, 404 struct mm_struct *mm, 405 unsigned long start, 406 unsigned long end) 407{ 408 struct kvm *kvm = mmu_notifier_to_kvm(mn); 409 int young, idx; 410 411 idx = srcu_read_lock(&kvm->srcu); 412 spin_lock(&kvm->mmu_lock); 413 /* 414 * Even though we do not flush TLB, this will still adversely 415 * affect performance on pre-Haswell Intel EPT, where there is 416 * no EPT Access Bit to clear so that we have to tear down EPT 417 * tables instead. If we find this unacceptable, we can always 418 * add a parameter to kvm_age_hva so that it effectively doesn't 419 * do anything on clear_young. 420 * 421 * Also note that currently we never issue secondary TLB flushes 422 * from clear_young, leaving this job up to the regular system 423 * cadence. If we find this inaccurate, we might come up with a 424 * more sophisticated heuristic later. 425 */ 426 young = kvm_age_hva(kvm, start, end); 427 spin_unlock(&kvm->mmu_lock); 428 srcu_read_unlock(&kvm->srcu, idx); 429 430 return young; 431} 432 433static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, 434 struct mm_struct *mm, 435 unsigned long address) 436{ 437 struct kvm *kvm = mmu_notifier_to_kvm(mn); 438 int young, idx; 439 440 idx = srcu_read_lock(&kvm->srcu); 441 spin_lock(&kvm->mmu_lock); 442 young = kvm_test_age_hva(kvm, address); 443 spin_unlock(&kvm->mmu_lock); 444 srcu_read_unlock(&kvm->srcu, idx); 445 446 return young; 447} 448 449static void kvm_mmu_notifier_release(struct mmu_notifier *mn, 450 struct mm_struct *mm) 451{ 452 struct kvm *kvm = mmu_notifier_to_kvm(mn); 453 int idx; 454 455 idx = srcu_read_lock(&kvm->srcu); 456 kvm_arch_flush_shadow_all(kvm); 457 srcu_read_unlock(&kvm->srcu, idx); 458} 459 460static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { 461 .invalidate_page = kvm_mmu_notifier_invalidate_page, 462 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 463 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 464 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 465 .clear_young = kvm_mmu_notifier_clear_young, 466 .test_young = kvm_mmu_notifier_test_young, 467 .change_pte = kvm_mmu_notifier_change_pte, 468 .release = kvm_mmu_notifier_release, 469}; 470 471static int kvm_init_mmu_notifier(struct kvm *kvm) 472{ 473 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; 474 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); 475} 476 477#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ 478 479static int kvm_init_mmu_notifier(struct kvm *kvm) 480{ 481 return 0; 482} 483 484#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 485 486static struct kvm_memslots *kvm_alloc_memslots(void) 487{ 488 int i; 489 struct kvm_memslots *slots; 490 491 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 492 if (!slots) 493 return NULL; 494 495 /* 496 * Init kvm generation close to the maximum to easily test the 497 * code of handling generation number wrap-around. 498 */ 499 slots->generation = -150; 500 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) 501 slots->id_to_index[i] = slots->memslots[i].id = i; 502 503 return slots; 504} 505 506static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) 507{ 508 if (!memslot->dirty_bitmap) 509 return; 510 511 kvfree(memslot->dirty_bitmap); 512 memslot->dirty_bitmap = NULL; 513} 514 515/* 516 * Free any memory in @free but not in @dont. 517 */ 518static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 519 struct kvm_memory_slot *dont) 520{ 521 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 522 kvm_destroy_dirty_bitmap(free); 523 524 kvm_arch_free_memslot(kvm, free, dont); 525 526 free->npages = 0; 527} 528 529static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) 530{ 531 struct kvm_memory_slot *memslot; 532 533 if (!slots) 534 return; 535 536 kvm_for_each_memslot(memslot, slots) 537 kvm_free_memslot(kvm, memslot, NULL); 538 539 kvfree(slots); 540} 541 542static struct kvm *kvm_create_vm(unsigned long type) 543{ 544 int r, i; 545 struct kvm *kvm = kvm_arch_alloc_vm(); 546 547 if (!kvm) 548 return ERR_PTR(-ENOMEM); 549 550 spin_lock_init(&kvm->mmu_lock); 551 atomic_inc(¤t->mm->mm_count); 552 kvm->mm = current->mm; 553 kvm_eventfd_init(kvm); 554 mutex_init(&kvm->lock); 555 mutex_init(&kvm->irq_lock); 556 mutex_init(&kvm->slots_lock); 557 atomic_set(&kvm->users_count, 1); 558 INIT_LIST_HEAD(&kvm->devices); 559 560 r = kvm_arch_init_vm(kvm, type); 561 if (r) 562 goto out_err_no_disable; 563 564 r = hardware_enable_all(); 565 if (r) 566 goto out_err_no_disable; 567 568#ifdef CONFIG_HAVE_KVM_IRQFD 569 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); 570#endif 571 572 BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX); 573 574 r = -ENOMEM; 575 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 576 kvm->memslots[i] = kvm_alloc_memslots(); 577 if (!kvm->memslots[i]) 578 goto out_err_no_srcu; 579 } 580 581 if (init_srcu_struct(&kvm->srcu)) 582 goto out_err_no_srcu; 583 if (init_srcu_struct(&kvm->irq_srcu)) 584 goto out_err_no_irq_srcu; 585 for (i = 0; i < KVM_NR_BUSES; i++) { 586 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus), 587 GFP_KERNEL); 588 if (!kvm->buses[i]) 589 goto out_err; 590 } 591 592 r = kvm_init_mmu_notifier(kvm); 593 if (r) 594 goto out_err; 595 596 spin_lock(&kvm_lock); 597 list_add(&kvm->vm_list, &vm_list); 598 spin_unlock(&kvm_lock); 599 600 preempt_notifier_inc(); 601 602 return kvm; 603 604out_err: 605 cleanup_srcu_struct(&kvm->irq_srcu); 606out_err_no_irq_srcu: 607 cleanup_srcu_struct(&kvm->srcu); 608out_err_no_srcu: 609 hardware_disable_all(); 610out_err_no_disable: 611 for (i = 0; i < KVM_NR_BUSES; i++) 612 kfree(kvm->buses[i]); 613 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 614 kvm_free_memslots(kvm, kvm->memslots[i]); 615 kvm_arch_free_vm(kvm); 616 mmdrop(current->mm); 617 return ERR_PTR(r); 618} 619 620/* 621 * Avoid using vmalloc for a small buffer. 622 * Should not be used when the size is statically known. 623 */ 624void *kvm_kvzalloc(unsigned long size) 625{ 626 if (size > PAGE_SIZE) 627 return vzalloc(size); 628 else 629 return kzalloc(size, GFP_KERNEL); 630} 631 632static void kvm_destroy_devices(struct kvm *kvm) 633{ 634 struct list_head *node, *tmp; 635 636 list_for_each_safe(node, tmp, &kvm->devices) { 637 struct kvm_device *dev = 638 list_entry(node, struct kvm_device, vm_node); 639 640 list_del(node); 641 dev->ops->destroy(dev); 642 } 643} 644 645static void kvm_destroy_vm(struct kvm *kvm) 646{ 647 int i; 648 struct mm_struct *mm = kvm->mm; 649 650 kvm_arch_sync_events(kvm); 651 spin_lock(&kvm_lock); 652 list_del(&kvm->vm_list); 653 spin_unlock(&kvm_lock); 654 kvm_free_irq_routing(kvm); 655 for (i = 0; i < KVM_NR_BUSES; i++) 656 kvm_io_bus_destroy(kvm->buses[i]); 657 kvm_coalesced_mmio_free(kvm); 658#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) 659 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); 660#else 661 kvm_arch_flush_shadow_all(kvm); 662#endif 663 kvm_arch_destroy_vm(kvm); 664 kvm_destroy_devices(kvm); 665 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) 666 kvm_free_memslots(kvm, kvm->memslots[i]); 667 cleanup_srcu_struct(&kvm->irq_srcu); 668 cleanup_srcu_struct(&kvm->srcu); 669 kvm_arch_free_vm(kvm); 670 preempt_notifier_dec(); 671 hardware_disable_all(); 672 mmdrop(mm); 673} 674 675void kvm_get_kvm(struct kvm *kvm) 676{ 677 atomic_inc(&kvm->users_count); 678} 679EXPORT_SYMBOL_GPL(kvm_get_kvm); 680 681void kvm_put_kvm(struct kvm *kvm) 682{ 683 if (atomic_dec_and_test(&kvm->users_count)) 684 kvm_destroy_vm(kvm); 685} 686EXPORT_SYMBOL_GPL(kvm_put_kvm); 687 688 689static int kvm_vm_release(struct inode *inode, struct file *filp) 690{ 691 struct kvm *kvm = filp->private_data; 692 693 kvm_irqfd_release(kvm); 694 695 kvm_put_kvm(kvm); 696 return 0; 697} 698 699/* 700 * Allocation size is twice as large as the actual dirty bitmap size. 701 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed. 702 */ 703static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 704{ 705 unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); 706 707 memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); 708 if (!memslot->dirty_bitmap) 709 return -ENOMEM; 710 711 return 0; 712} 713 714/* 715 * Insert memslot and re-sort memslots based on their GFN, 716 * so binary search could be used to lookup GFN. 717 * Sorting algorithm takes advantage of having initially 718 * sorted array and known changed memslot position. 719 */ 720static void update_memslots(struct kvm_memslots *slots, 721 struct kvm_memory_slot *new) 722{ 723 int id = new->id; 724 int i = slots->id_to_index[id]; 725 struct kvm_memory_slot *mslots = slots->memslots; 726 727 WARN_ON(mslots[i].id != id); 728 if (!new->npages) { 729 WARN_ON(!mslots[i].npages); 730 if (mslots[i].npages) 731 slots->used_slots--; 732 } else { 733 if (!mslots[i].npages) 734 slots->used_slots++; 735 } 736 737 while (i < KVM_MEM_SLOTS_NUM - 1 && 738 new->base_gfn <= mslots[i + 1].base_gfn) { 739 if (!mslots[i + 1].npages) 740 break; 741 mslots[i] = mslots[i + 1]; 742 slots->id_to_index[mslots[i].id] = i; 743 i++; 744 } 745 746 /* 747 * The ">=" is needed when creating a slot with base_gfn == 0, 748 * so that it moves before all those with base_gfn == npages == 0. 749 * 750 * On the other hand, if new->npages is zero, the above loop has 751 * already left i pointing to the beginning of the empty part of 752 * mslots, and the ">=" would move the hole backwards in this 753 * case---which is wrong. So skip the loop when deleting a slot. 754 */ 755 if (new->npages) { 756 while (i > 0 && 757 new->base_gfn >= mslots[i - 1].base_gfn) { 758 mslots[i] = mslots[i - 1]; 759 slots->id_to_index[mslots[i].id] = i; 760 i--; 761 } 762 } else 763 WARN_ON_ONCE(i != slots->used_slots); 764 765 mslots[i] = *new; 766 slots->id_to_index[mslots[i].id] = i; 767} 768 769static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem) 770{ 771 u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; 772 773#ifdef __KVM_HAVE_READONLY_MEM 774 valid_flags |= KVM_MEM_READONLY; 775#endif 776 777 if (mem->flags & ~valid_flags) 778 return -EINVAL; 779 780 return 0; 781} 782 783static struct kvm_memslots *install_new_memslots(struct kvm *kvm, 784 int as_id, struct kvm_memslots *slots) 785{ 786 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); 787 788 /* 789 * Set the low bit in the generation, which disables SPTE caching 790 * until the end of synchronize_srcu_expedited. 791 */ 792 WARN_ON(old_memslots->generation & 1); 793 slots->generation = old_memslots->generation + 1; 794 795 rcu_assign_pointer(kvm->memslots[as_id], slots); 796 synchronize_srcu_expedited(&kvm->srcu); 797 798 /* 799 * Increment the new memslot generation a second time. This prevents 800 * vm exits that race with memslot updates from caching a memslot 801 * generation that will (potentially) be valid forever. 802 */ 803 slots->generation++; 804 805 kvm_arch_memslots_updated(kvm, slots); 806 807 return old_memslots; 808} 809 810/* 811 * Allocate some memory and give it an address in the guest physical address 812 * space. 813 * 814 * Discontiguous memory is allowed, mostly for framebuffers. 815 * 816 * Must be called holding kvm->slots_lock for write. 817 */ 818int __kvm_set_memory_region(struct kvm *kvm, 819 const struct kvm_userspace_memory_region *mem) 820{ 821 int r; 822 gfn_t base_gfn; 823 unsigned long npages; 824 struct kvm_memory_slot *slot; 825 struct kvm_memory_slot old, new; 826 struct kvm_memslots *slots = NULL, *old_memslots; 827 int as_id, id; 828 enum kvm_mr_change change; 829 830 r = check_memory_region_flags(mem); 831 if (r) 832 goto out; 833 834 r = -EINVAL; 835 as_id = mem->slot >> 16; 836 id = (u16)mem->slot; 837 838 /* General sanity checks */ 839 if (mem->memory_size & (PAGE_SIZE - 1)) 840 goto out; 841 if (mem->guest_phys_addr & (PAGE_SIZE - 1)) 842 goto out; 843 /* We can read the guest memory with __xxx_user() later on. */ 844 if ((id < KVM_USER_MEM_SLOTS) && 845 ((mem->userspace_addr & (PAGE_SIZE - 1)) || 846 !access_ok(VERIFY_WRITE, 847 (void __user *)(unsigned long)mem->userspace_addr, 848 mem->memory_size))) 849 goto out; 850 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) 851 goto out; 852 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) 853 goto out; 854 855 slot = id_to_memslot(__kvm_memslots(kvm, as_id), id); 856 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; 857 npages = mem->memory_size >> PAGE_SHIFT; 858 859 if (npages > KVM_MEM_MAX_NR_PAGES) 860 goto out; 861 862 new = old = *slot; 863 864 new.id = id; 865 new.base_gfn = base_gfn; 866 new.npages = npages; 867 new.flags = mem->flags; 868 869 if (npages) { 870 if (!old.npages) 871 change = KVM_MR_CREATE; 872 else { /* Modify an existing slot. */ 873 if ((mem->userspace_addr != old.userspace_addr) || 874 (npages != old.npages) || 875 ((new.flags ^ old.flags) & KVM_MEM_READONLY)) 876 goto out; 877 878 if (base_gfn != old.base_gfn) 879 change = KVM_MR_MOVE; 880 else if (new.flags != old.flags) 881 change = KVM_MR_FLAGS_ONLY; 882 else { /* Nothing to change. */ 883 r = 0; 884 goto out; 885 } 886 } 887 } else { 888 if (!old.npages) 889 goto out; 890 891 change = KVM_MR_DELETE; 892 new.base_gfn = 0; 893 new.flags = 0; 894 } 895 896 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 897 /* Check for overlaps */ 898 r = -EEXIST; 899 kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { 900 if ((slot->id >= KVM_USER_MEM_SLOTS) || 901 (slot->id == id)) 902 continue; 903 if (!((base_gfn + npages <= slot->base_gfn) || 904 (base_gfn >= slot->base_gfn + slot->npages))) 905 goto out; 906 } 907 } 908 909 /* Free page dirty bitmap if unneeded */ 910 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 911 new.dirty_bitmap = NULL; 912 913 r = -ENOMEM; 914 if (change == KVM_MR_CREATE) { 915 new.userspace_addr = mem->userspace_addr; 916 917 if (kvm_arch_create_memslot(kvm, &new, npages)) 918 goto out_free; 919 } 920 921 /* Allocate page dirty bitmap if needed */ 922 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { 923 if (kvm_create_dirty_bitmap(&new) < 0) 924 goto out_free; 925 } 926 927 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); 928 if (!slots) 929 goto out_free; 930 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); 931 932 if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) { 933 slot = id_to_memslot(slots, id); 934 slot->flags |= KVM_MEMSLOT_INVALID; 935 936 old_memslots = install_new_memslots(kvm, as_id, slots); 937 938 /* slot was deleted or moved, clear iommu mapping */ 939 kvm_iommu_unmap_pages(kvm, &old); 940 /* From this point no new shadow pages pointing to a deleted, 941 * or moved, memslot will be created. 942 * 943 * validation of sp->gfn happens in: 944 * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) 945 * - kvm_is_visible_gfn (mmu_check_roots) 946 */ 947 kvm_arch_flush_shadow_memslot(kvm, slot); 948 949 /* 950 * We can re-use the old_memslots from above, the only difference 951 * from the currently installed memslots is the invalid flag. This 952 * will get overwritten by update_memslots anyway. 953 */ 954 slots = old_memslots; 955 } 956 957 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); 958 if (r) 959 goto out_slots; 960 961 /* actual memory is freed via old in kvm_free_memslot below */ 962 if (change == KVM_MR_DELETE) { 963 new.dirty_bitmap = NULL; 964 memset(&new.arch, 0, sizeof(new.arch)); 965 } 966 967 update_memslots(slots, &new); 968 old_memslots = install_new_memslots(kvm, as_id, slots); 969 970 kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); 971 972 kvm_free_memslot(kvm, &old, &new); 973 kvfree(old_memslots); 974 975 /* 976 * IOMMU mapping: New slots need to be mapped. Old slots need to be 977 * un-mapped and re-mapped if their base changes. Since base change 978 * unmapping is handled above with slot deletion, mapping alone is 979 * needed here. Anything else the iommu might care about for existing 980 * slots (size changes, userspace addr changes and read-only flag 981 * changes) is disallowed above, so any other attribute changes getting 982 * here can be skipped. 983 */ 984 if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { 985 r = kvm_iommu_map_pages(kvm, &new); 986 return r; 987 } 988 989 return 0; 990 991out_slots: 992 kvfree(slots); 993out_free: 994 kvm_free_memslot(kvm, &new, &old); 995out: 996 return r; 997} 998EXPORT_SYMBOL_GPL(__kvm_set_memory_region); 999 1000int kvm_set_memory_region(struct kvm *kvm, 1001 const struct kvm_userspace_memory_region *mem) 1002{ 1003 int r; 1004 1005 mutex_lock(&kvm->slots_lock); 1006 r = __kvm_set_memory_region(kvm, mem); 1007 mutex_unlock(&kvm->slots_lock); 1008 return r; 1009} 1010EXPORT_SYMBOL_GPL(kvm_set_memory_region); 1011 1012static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 1013 struct kvm_userspace_memory_region *mem) 1014{ 1015 if ((u16)mem->slot >= KVM_USER_MEM_SLOTS) 1016 return -EINVAL; 1017 1018 return kvm_set_memory_region(kvm, mem); 1019} 1020 1021int kvm_get_dirty_log(struct kvm *kvm, 1022 struct kvm_dirty_log *log, int *is_dirty) 1023{ 1024 struct kvm_memslots *slots; 1025 struct kvm_memory_slot *memslot; 1026 int r, i, as_id, id; 1027 unsigned long n; 1028 unsigned long any = 0; 1029 1030 r = -EINVAL; 1031 as_id = log->slot >> 16; 1032 id = (u16)log->slot; 1033 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1034 goto out; 1035 1036 slots = __kvm_memslots(kvm, as_id); 1037 memslot = id_to_memslot(slots, id); 1038 r = -ENOENT; 1039 if (!memslot->dirty_bitmap) 1040 goto out; 1041 1042 n = kvm_dirty_bitmap_bytes(memslot); 1043 1044 for (i = 0; !any && i < n/sizeof(long); ++i) 1045 any = memslot->dirty_bitmap[i]; 1046 1047 r = -EFAULT; 1048 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) 1049 goto out; 1050 1051 if (any) 1052 *is_dirty = 1; 1053 1054 r = 0; 1055out: 1056 return r; 1057} 1058EXPORT_SYMBOL_GPL(kvm_get_dirty_log); 1059 1060#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1061/** 1062 * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages 1063 * are dirty write protect them for next write. 1064 * @kvm: pointer to kvm instance 1065 * @log: slot id and address to which we copy the log 1066 * @is_dirty: flag set if any page is dirty 1067 * 1068 * We need to keep it in mind that VCPU threads can write to the bitmap 1069 * concurrently. So, to avoid losing track of dirty pages we keep the 1070 * following order: 1071 * 1072 * 1. Take a snapshot of the bit and clear it if needed. 1073 * 2. Write protect the corresponding page. 1074 * 3. Copy the snapshot to the userspace. 1075 * 4. Upon return caller flushes TLB's if needed. 1076 * 1077 * Between 2 and 4, the guest may write to the page using the remaining TLB 1078 * entry. This is not a problem because the page is reported dirty using 1079 * the snapshot taken before and step 4 ensures that writes done after 1080 * exiting to userspace will be logged for the next call. 1081 * 1082 */ 1083int kvm_get_dirty_log_protect(struct kvm *kvm, 1084 struct kvm_dirty_log *log, bool *is_dirty) 1085{ 1086 struct kvm_memslots *slots; 1087 struct kvm_memory_slot *memslot; 1088 int r, i, as_id, id; 1089 unsigned long n; 1090 unsigned long *dirty_bitmap; 1091 unsigned long *dirty_bitmap_buffer; 1092 1093 r = -EINVAL; 1094 as_id = log->slot >> 16; 1095 id = (u16)log->slot; 1096 if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) 1097 goto out; 1098 1099 slots = __kvm_memslots(kvm, as_id); 1100 memslot = id_to_memslot(slots, id); 1101 1102 dirty_bitmap = memslot->dirty_bitmap; 1103 r = -ENOENT; 1104 if (!dirty_bitmap) 1105 goto out; 1106 1107 n = kvm_dirty_bitmap_bytes(memslot); 1108 1109 dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); 1110 memset(dirty_bitmap_buffer, 0, n); 1111 1112 spin_lock(&kvm->mmu_lock); 1113 *is_dirty = false; 1114 for (i = 0; i < n / sizeof(long); i++) { 1115 unsigned long mask; 1116 gfn_t offset; 1117 1118 if (!dirty_bitmap[i]) 1119 continue; 1120 1121 *is_dirty = true; 1122 1123 mask = xchg(&dirty_bitmap[i], 0); 1124 dirty_bitmap_buffer[i] = mask; 1125 1126 if (mask) { 1127 offset = i * BITS_PER_LONG; 1128 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, 1129 offset, mask); 1130 } 1131 } 1132 1133 spin_unlock(&kvm->mmu_lock); 1134 1135 r = -EFAULT; 1136 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 1137 goto out; 1138 1139 r = 0; 1140out: 1141 return r; 1142} 1143EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect); 1144#endif 1145 1146bool kvm_largepages_enabled(void) 1147{ 1148 return largepages_enabled; 1149} 1150 1151void kvm_disable_largepages(void) 1152{ 1153 largepages_enabled = false; 1154} 1155EXPORT_SYMBOL_GPL(kvm_disable_largepages); 1156 1157struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) 1158{ 1159 return __gfn_to_memslot(kvm_memslots(kvm), gfn); 1160} 1161EXPORT_SYMBOL_GPL(gfn_to_memslot); 1162 1163struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) 1164{ 1165 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); 1166} 1167 1168int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 1169{ 1170 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); 1171 1172 if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS || 1173 memslot->flags & KVM_MEMSLOT_INVALID) 1174 return 0; 1175 1176 return 1; 1177} 1178EXPORT_SYMBOL_GPL(kvm_is_visible_gfn); 1179 1180unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) 1181{ 1182 struct vm_area_struct *vma; 1183 unsigned long addr, size; 1184 1185 size = PAGE_SIZE; 1186 1187 addr = gfn_to_hva(kvm, gfn); 1188 if (kvm_is_error_hva(addr)) 1189 return PAGE_SIZE; 1190 1191 down_read(¤t->mm->mmap_sem); 1192 vma = find_vma(current->mm, addr); 1193 if (!vma) 1194 goto out; 1195 1196 size = vma_kernel_pagesize(vma); 1197 1198out: 1199 up_read(¤t->mm->mmap_sem); 1200 1201 return size; 1202} 1203 1204static bool memslot_is_readonly(struct kvm_memory_slot *slot) 1205{ 1206 return slot->flags & KVM_MEM_READONLY; 1207} 1208 1209static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1210 gfn_t *nr_pages, bool write) 1211{ 1212 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 1213 return KVM_HVA_ERR_BAD; 1214 1215 if (memslot_is_readonly(slot) && write) 1216 return KVM_HVA_ERR_RO_BAD; 1217 1218 if (nr_pages) 1219 *nr_pages = slot->npages - (gfn - slot->base_gfn); 1220 1221 return __gfn_to_hva_memslot(slot, gfn); 1222} 1223 1224static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, 1225 gfn_t *nr_pages) 1226{ 1227 return __gfn_to_hva_many(slot, gfn, nr_pages, true); 1228} 1229 1230unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 1231 gfn_t gfn) 1232{ 1233 return gfn_to_hva_many(slot, gfn, NULL); 1234} 1235EXPORT_SYMBOL_GPL(gfn_to_hva_memslot); 1236 1237unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) 1238{ 1239 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); 1240} 1241EXPORT_SYMBOL_GPL(gfn_to_hva); 1242 1243unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn) 1244{ 1245 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL); 1246} 1247EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva); 1248 1249/* 1250 * If writable is set to false, the hva returned by this function is only 1251 * allowed to be read. 1252 */ 1253unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, 1254 gfn_t gfn, bool *writable) 1255{ 1256 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false); 1257 1258 if (!kvm_is_error_hva(hva) && writable) 1259 *writable = !memslot_is_readonly(slot); 1260 1261 return hva; 1262} 1263 1264unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) 1265{ 1266 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1267 1268 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1269} 1270 1271unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) 1272{ 1273 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1274 1275 return gfn_to_hva_memslot_prot(slot, gfn, writable); 1276} 1277 1278static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, 1279 unsigned long start, int write, struct page **page) 1280{ 1281 int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET; 1282 1283 if (write) 1284 flags |= FOLL_WRITE; 1285 1286 return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL); 1287} 1288 1289static inline int check_user_page_hwpoison(unsigned long addr) 1290{ 1291 int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE; 1292 1293 rc = __get_user_pages(current, current->mm, addr, 1, 1294 flags, NULL, NULL, NULL); 1295 return rc == -EHWPOISON; 1296} 1297 1298/* 1299 * The atomic path to get the writable pfn which will be stored in @pfn, 1300 * true indicates success, otherwise false is returned. 1301 */ 1302static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, 1303 bool write_fault, bool *writable, pfn_t *pfn) 1304{ 1305 struct page *page[1]; 1306 int npages; 1307 1308 if (!(async || atomic)) 1309 return false; 1310 1311 /* 1312 * Fast pin a writable pfn only if it is a write fault request 1313 * or the caller allows to map a writable pfn for a read fault 1314 * request. 1315 */ 1316 if (!(write_fault || writable)) 1317 return false; 1318 1319 npages = __get_user_pages_fast(addr, 1, 1, page); 1320 if (npages == 1) { 1321 *pfn = page_to_pfn(page[0]); 1322 1323 if (writable) 1324 *writable = true; 1325 return true; 1326 } 1327 1328 return false; 1329} 1330 1331/* 1332 * The slow path to get the pfn of the specified host virtual address, 1333 * 1 indicates success, -errno is returned if error is detected. 1334 */ 1335static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, 1336 bool *writable, pfn_t *pfn) 1337{ 1338 struct page *page[1]; 1339 int npages = 0; 1340 1341 might_sleep(); 1342 1343 if (writable) 1344 *writable = write_fault; 1345 1346 if (async) { 1347 down_read(¤t->mm->mmap_sem); 1348 npages = get_user_page_nowait(current, current->mm, 1349 addr, write_fault, page); 1350 up_read(¤t->mm->mmap_sem); 1351 } else 1352 npages = __get_user_pages_unlocked(current, current->mm, addr, 1, 1353 write_fault, 0, page, 1354 FOLL_TOUCH|FOLL_HWPOISON); 1355 if (npages != 1) 1356 return npages; 1357 1358 /* map read fault as writable if possible */ 1359 if (unlikely(!write_fault) && writable) { 1360 struct page *wpage[1]; 1361 1362 npages = __get_user_pages_fast(addr, 1, 1, wpage); 1363 if (npages == 1) { 1364 *writable = true; 1365 put_page(page[0]); 1366 page[0] = wpage[0]; 1367 } 1368 1369 npages = 1; 1370 } 1371 *pfn = page_to_pfn(page[0]); 1372 return npages; 1373} 1374 1375static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault) 1376{ 1377 if (unlikely(!(vma->vm_flags & VM_READ))) 1378 return false; 1379 1380 if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE)))) 1381 return false; 1382 1383 return true; 1384} 1385 1386/* 1387 * Pin guest page in memory and return its pfn. 1388 * @addr: host virtual address which maps memory to the guest 1389 * @atomic: whether this function can sleep 1390 * @async: whether this function need to wait IO complete if the 1391 * host page is not in the memory 1392 * @write_fault: whether we should get a writable host page 1393 * @writable: whether it allows to map a writable host page for !@write_fault 1394 * 1395 * The function will map a writable host page for these two cases: 1396 * 1): @write_fault = true 1397 * 2): @write_fault = false && @writable, @writable will tell the caller 1398 * whether the mapping is writable. 1399 */ 1400static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, 1401 bool write_fault, bool *writable) 1402{ 1403 struct vm_area_struct *vma; 1404 pfn_t pfn = 0; 1405 int npages; 1406 1407 /* we can do it either atomically or asynchronously, not both */ 1408 BUG_ON(atomic && async); 1409 1410 if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) 1411 return pfn; 1412 1413 if (atomic) 1414 return KVM_PFN_ERR_FAULT; 1415 1416 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); 1417 if (npages == 1) 1418 return pfn; 1419 1420 down_read(¤t->mm->mmap_sem); 1421 if (npages == -EHWPOISON || 1422 (!async && check_user_page_hwpoison(addr))) { 1423 pfn = KVM_PFN_ERR_HWPOISON; 1424 goto exit; 1425 } 1426 1427 vma = find_vma_intersection(current->mm, addr, addr + 1); 1428 1429 if (vma == NULL) 1430 pfn = KVM_PFN_ERR_FAULT; 1431 else if ((vma->vm_flags & VM_PFNMAP)) { 1432 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + 1433 vma->vm_pgoff; 1434 BUG_ON(!kvm_is_reserved_pfn(pfn)); 1435 } else { 1436 if (async && vma_is_valid(vma, write_fault)) 1437 *async = true; 1438 pfn = KVM_PFN_ERR_FAULT; 1439 } 1440exit: 1441 up_read(¤t->mm->mmap_sem); 1442 return pfn; 1443} 1444 1445pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic, 1446 bool *async, bool write_fault, bool *writable) 1447{ 1448 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); 1449 1450 if (addr == KVM_HVA_ERR_RO_BAD) 1451 return KVM_PFN_ERR_RO_FAULT; 1452 1453 if (kvm_is_error_hva(addr)) 1454 return KVM_PFN_NOSLOT; 1455 1456 /* Do not map writable pfn in the readonly memslot. */ 1457 if (writable && memslot_is_readonly(slot)) { 1458 *writable = false; 1459 writable = NULL; 1460 } 1461 1462 return hva_to_pfn(addr, atomic, async, write_fault, 1463 writable); 1464} 1465EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot); 1466 1467pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1468 bool *writable) 1469{ 1470 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, 1471 write_fault, writable); 1472} 1473EXPORT_SYMBOL_GPL(gfn_to_pfn_prot); 1474 1475pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1476{ 1477 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL); 1478} 1479EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot); 1480 1481pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn) 1482{ 1483 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL); 1484} 1485EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic); 1486 1487pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) 1488{ 1489 return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn); 1490} 1491EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); 1492 1493pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn) 1494{ 1495 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 1496} 1497EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic); 1498 1499pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 1500{ 1501 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); 1502} 1503EXPORT_SYMBOL_GPL(gfn_to_pfn); 1504 1505pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 1506{ 1507 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn); 1508} 1509EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn); 1510 1511int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1512 struct page **pages, int nr_pages) 1513{ 1514 unsigned long addr; 1515 gfn_t entry; 1516 1517 addr = gfn_to_hva_many(slot, gfn, &entry); 1518 if (kvm_is_error_hva(addr)) 1519 return -1; 1520 1521 if (entry < nr_pages) 1522 return 0; 1523 1524 return __get_user_pages_fast(addr, nr_pages, 1, pages); 1525} 1526EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); 1527 1528static struct page *kvm_pfn_to_page(pfn_t pfn) 1529{ 1530 if (is_error_noslot_pfn(pfn)) 1531 return KVM_ERR_PTR_BAD_PAGE; 1532 1533 if (kvm_is_reserved_pfn(pfn)) { 1534 WARN_ON(1); 1535 return KVM_ERR_PTR_BAD_PAGE; 1536 } 1537 1538 return pfn_to_page(pfn); 1539} 1540 1541struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1542{ 1543 pfn_t pfn; 1544 1545 pfn = gfn_to_pfn(kvm, gfn); 1546 1547 return kvm_pfn_to_page(pfn); 1548} 1549EXPORT_SYMBOL_GPL(gfn_to_page); 1550 1551struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn) 1552{ 1553 pfn_t pfn; 1554 1555 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn); 1556 1557 return kvm_pfn_to_page(pfn); 1558} 1559EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page); 1560 1561void kvm_release_page_clean(struct page *page) 1562{ 1563 WARN_ON(is_error_page(page)); 1564 1565 kvm_release_pfn_clean(page_to_pfn(page)); 1566} 1567EXPORT_SYMBOL_GPL(kvm_release_page_clean); 1568 1569void kvm_release_pfn_clean(pfn_t pfn) 1570{ 1571 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) 1572 put_page(pfn_to_page(pfn)); 1573} 1574EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1575 1576void kvm_release_page_dirty(struct page *page) 1577{ 1578 WARN_ON(is_error_page(page)); 1579 1580 kvm_release_pfn_dirty(page_to_pfn(page)); 1581} 1582EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1583 1584static void kvm_release_pfn_dirty(pfn_t pfn) 1585{ 1586 kvm_set_pfn_dirty(pfn); 1587 kvm_release_pfn_clean(pfn); 1588} 1589 1590void kvm_set_pfn_dirty(pfn_t pfn) 1591{ 1592 if (!kvm_is_reserved_pfn(pfn)) { 1593 struct page *page = pfn_to_page(pfn); 1594 1595 if (!PageReserved(page)) 1596 SetPageDirty(page); 1597 } 1598} 1599EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); 1600 1601void kvm_set_pfn_accessed(pfn_t pfn) 1602{ 1603 if (!kvm_is_reserved_pfn(pfn)) 1604 mark_page_accessed(pfn_to_page(pfn)); 1605} 1606EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1607 1608void kvm_get_pfn(pfn_t pfn) 1609{ 1610 if (!kvm_is_reserved_pfn(pfn)) 1611 get_page(pfn_to_page(pfn)); 1612} 1613EXPORT_SYMBOL_GPL(kvm_get_pfn); 1614 1615static int next_segment(unsigned long len, int offset) 1616{ 1617 if (len > PAGE_SIZE - offset) 1618 return PAGE_SIZE - offset; 1619 else 1620 return len; 1621} 1622 1623static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, 1624 void *data, int offset, int len) 1625{ 1626 int r; 1627 unsigned long addr; 1628 1629 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 1630 if (kvm_is_error_hva(addr)) 1631 return -EFAULT; 1632 r = __copy_from_user(data, (void __user *)addr + offset, len); 1633 if (r) 1634 return -EFAULT; 1635 return 0; 1636} 1637 1638int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1639 int len) 1640{ 1641 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1642 1643 return __kvm_read_guest_page(slot, gfn, data, offset, len); 1644} 1645EXPORT_SYMBOL_GPL(kvm_read_guest_page); 1646 1647int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, 1648 int offset, int len) 1649{ 1650 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1651 1652 return __kvm_read_guest_page(slot, gfn, data, offset, len); 1653} 1654EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); 1655 1656int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) 1657{ 1658 gfn_t gfn = gpa >> PAGE_SHIFT; 1659 int seg; 1660 int offset = offset_in_page(gpa); 1661 int ret; 1662 1663 while ((seg = next_segment(len, offset)) != 0) { 1664 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); 1665 if (ret < 0) 1666 return ret; 1667 offset = 0; 1668 len -= seg; 1669 data += seg; 1670 ++gfn; 1671 } 1672 return 0; 1673} 1674EXPORT_SYMBOL_GPL(kvm_read_guest); 1675 1676int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len) 1677{ 1678 gfn_t gfn = gpa >> PAGE_SHIFT; 1679 int seg; 1680 int offset = offset_in_page(gpa); 1681 int ret; 1682 1683 while ((seg = next_segment(len, offset)) != 0) { 1684 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg); 1685 if (ret < 0) 1686 return ret; 1687 offset = 0; 1688 len -= seg; 1689 data += seg; 1690 ++gfn; 1691 } 1692 return 0; 1693} 1694EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest); 1695 1696static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1697 void *data, int offset, unsigned long len) 1698{ 1699 int r; 1700 unsigned long addr; 1701 1702 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); 1703 if (kvm_is_error_hva(addr)) 1704 return -EFAULT; 1705 pagefault_disable(); 1706 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); 1707 pagefault_enable(); 1708 if (r) 1709 return -EFAULT; 1710 return 0; 1711} 1712 1713int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, 1714 unsigned long len) 1715{ 1716 gfn_t gfn = gpa >> PAGE_SHIFT; 1717 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1718 int offset = offset_in_page(gpa); 1719 1720 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 1721} 1722EXPORT_SYMBOL_GPL(kvm_read_guest_atomic); 1723 1724int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, 1725 void *data, unsigned long len) 1726{ 1727 gfn_t gfn = gpa >> PAGE_SHIFT; 1728 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1729 int offset = offset_in_page(gpa); 1730 1731 return __kvm_read_guest_atomic(slot, gfn, data, offset, len); 1732} 1733EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic); 1734 1735static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn, 1736 const void *data, int offset, int len) 1737{ 1738 int r; 1739 unsigned long addr; 1740 1741 addr = gfn_to_hva_memslot(memslot, gfn); 1742 if (kvm_is_error_hva(addr)) 1743 return -EFAULT; 1744 r = __copy_to_user((void __user *)addr + offset, data, len); 1745 if (r) 1746 return -EFAULT; 1747 mark_page_dirty_in_slot(memslot, gfn); 1748 return 0; 1749} 1750 1751int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, 1752 const void *data, int offset, int len) 1753{ 1754 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 1755 1756 return __kvm_write_guest_page(slot, gfn, data, offset, len); 1757} 1758EXPORT_SYMBOL_GPL(kvm_write_guest_page); 1759 1760int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, 1761 const void *data, int offset, int len) 1762{ 1763 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1764 1765 return __kvm_write_guest_page(slot, gfn, data, offset, len); 1766} 1767EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page); 1768 1769int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1770 unsigned long len) 1771{ 1772 gfn_t gfn = gpa >> PAGE_SHIFT; 1773 int seg; 1774 int offset = offset_in_page(gpa); 1775 int ret; 1776 1777 while ((seg = next_segment(len, offset)) != 0) { 1778 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); 1779 if (ret < 0) 1780 return ret; 1781 offset = 0; 1782 len -= seg; 1783 data += seg; 1784 ++gfn; 1785 } 1786 return 0; 1787} 1788EXPORT_SYMBOL_GPL(kvm_write_guest); 1789 1790int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 1791 unsigned long len) 1792{ 1793 gfn_t gfn = gpa >> PAGE_SHIFT; 1794 int seg; 1795 int offset = offset_in_page(gpa); 1796 int ret; 1797 1798 while ((seg = next_segment(len, offset)) != 0) { 1799 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg); 1800 if (ret < 0) 1801 return ret; 1802 offset = 0; 1803 len -= seg; 1804 data += seg; 1805 ++gfn; 1806 } 1807 return 0; 1808} 1809EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest); 1810 1811int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1812 gpa_t gpa, unsigned long len) 1813{ 1814 struct kvm_memslots *slots = kvm_memslots(kvm); 1815 int offset = offset_in_page(gpa); 1816 gfn_t start_gfn = gpa >> PAGE_SHIFT; 1817 gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; 1818 gfn_t nr_pages_needed = end_gfn - start_gfn + 1; 1819 gfn_t nr_pages_avail; 1820 1821 ghc->gpa = gpa; 1822 ghc->generation = slots->generation; 1823 ghc->len = len; 1824 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1825 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL); 1826 if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) { 1827 ghc->hva += offset; 1828 } else { 1829 /* 1830 * If the requested region crosses two memslots, we still 1831 * verify that the entire region is valid here. 1832 */ 1833 while (start_gfn <= end_gfn) { 1834 ghc->memslot = gfn_to_memslot(kvm, start_gfn); 1835 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, 1836 &nr_pages_avail); 1837 if (kvm_is_error_hva(ghc->hva)) 1838 return -EFAULT; 1839 start_gfn += nr_pages_avail; 1840 } 1841 /* Use the slow path for cross page reads and writes. */ 1842 ghc->memslot = NULL; 1843 } 1844 return 0; 1845} 1846EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); 1847 1848int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1849 void *data, unsigned long len) 1850{ 1851 struct kvm_memslots *slots = kvm_memslots(kvm); 1852 int r; 1853 1854 BUG_ON(len > ghc->len); 1855 1856 if (slots->generation != ghc->generation) 1857 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1858 1859 if (unlikely(!ghc->memslot)) 1860 return kvm_write_guest(kvm, ghc->gpa, data, len); 1861 1862 if (kvm_is_error_hva(ghc->hva)) 1863 return -EFAULT; 1864 1865 r = __copy_to_user((void __user *)ghc->hva, data, len); 1866 if (r) 1867 return -EFAULT; 1868 mark_page_dirty_in_slot(ghc->memslot, ghc->gpa >> PAGE_SHIFT); 1869 1870 return 0; 1871} 1872EXPORT_SYMBOL_GPL(kvm_write_guest_cached); 1873 1874int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1875 void *data, unsigned long len) 1876{ 1877 struct kvm_memslots *slots = kvm_memslots(kvm); 1878 int r; 1879 1880 BUG_ON(len > ghc->len); 1881 1882 if (slots->generation != ghc->generation) 1883 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); 1884 1885 if (unlikely(!ghc->memslot)) 1886 return kvm_read_guest(kvm, ghc->gpa, data, len); 1887 1888 if (kvm_is_error_hva(ghc->hva)) 1889 return -EFAULT; 1890 1891 r = __copy_from_user(data, (void __user *)ghc->hva, len); 1892 if (r) 1893 return -EFAULT; 1894 1895 return 0; 1896} 1897EXPORT_SYMBOL_GPL(kvm_read_guest_cached); 1898 1899int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) 1900{ 1901 const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0))); 1902 1903 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); 1904} 1905EXPORT_SYMBOL_GPL(kvm_clear_guest_page); 1906 1907int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) 1908{ 1909 gfn_t gfn = gpa >> PAGE_SHIFT; 1910 int seg; 1911 int offset = offset_in_page(gpa); 1912 int ret; 1913 1914 while ((seg = next_segment(len, offset)) != 0) { 1915 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); 1916 if (ret < 0) 1917 return ret; 1918 offset = 0; 1919 len -= seg; 1920 ++gfn; 1921 } 1922 return 0; 1923} 1924EXPORT_SYMBOL_GPL(kvm_clear_guest); 1925 1926static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, 1927 gfn_t gfn) 1928{ 1929 if (memslot && memslot->dirty_bitmap) { 1930 unsigned long rel_gfn = gfn - memslot->base_gfn; 1931 1932 set_bit_le(rel_gfn, memslot->dirty_bitmap); 1933 } 1934} 1935 1936void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 1937{ 1938 struct kvm_memory_slot *memslot; 1939 1940 memslot = gfn_to_memslot(kvm, gfn); 1941 mark_page_dirty_in_slot(memslot, gfn); 1942} 1943EXPORT_SYMBOL_GPL(mark_page_dirty); 1944 1945void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) 1946{ 1947 struct kvm_memory_slot *memslot; 1948 1949 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 1950 mark_page_dirty_in_slot(memslot, gfn); 1951} 1952EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); 1953 1954static void grow_halt_poll_ns(struct kvm_vcpu *vcpu) 1955{ 1956 int old, val; 1957 1958 old = val = vcpu->halt_poll_ns; 1959 /* 10us base */ 1960 if (val == 0 && halt_poll_ns_grow) 1961 val = 10000; 1962 else 1963 val *= halt_poll_ns_grow; 1964 1965 if (val > halt_poll_ns) 1966 val = halt_poll_ns; 1967 1968 vcpu->halt_poll_ns = val; 1969 trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old); 1970} 1971 1972static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 1973{ 1974 int old, val; 1975 1976 old = val = vcpu->halt_poll_ns; 1977 if (halt_poll_ns_shrink == 0) 1978 val = 0; 1979 else 1980 val /= halt_poll_ns_shrink; 1981 1982 vcpu->halt_poll_ns = val; 1983 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); 1984} 1985 1986static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) 1987{ 1988 if (kvm_arch_vcpu_runnable(vcpu)) { 1989 kvm_make_request(KVM_REQ_UNHALT, vcpu); 1990 return -EINTR; 1991 } 1992 if (kvm_cpu_has_pending_timer(vcpu)) 1993 return -EINTR; 1994 if (signal_pending(current)) 1995 return -EINTR; 1996 1997 return 0; 1998} 1999 2000/* 2001 * The vCPU has executed a HLT instruction with in-kernel mode enabled. 2002 */ 2003void kvm_vcpu_block(struct kvm_vcpu *vcpu) 2004{ 2005 ktime_t start, cur; 2006 DEFINE_WAIT(wait); 2007 bool waited = false; 2008 u64 block_ns; 2009 2010 start = cur = ktime_get(); 2011 if (vcpu->halt_poll_ns) { 2012 ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns); 2013 2014 ++vcpu->stat.halt_attempted_poll; 2015 do { 2016 /* 2017 * This sets KVM_REQ_UNHALT if an interrupt 2018 * arrives. 2019 */ 2020 if (kvm_vcpu_check_block(vcpu) < 0) { 2021 ++vcpu->stat.halt_successful_poll; 2022 goto out; 2023 } 2024 cur = ktime_get(); 2025 } while (single_task_running() && ktime_before(cur, stop)); 2026 } 2027 2028 kvm_arch_vcpu_blocking(vcpu); 2029 2030 for (;;) { 2031 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 2032 2033 if (kvm_vcpu_check_block(vcpu) < 0) 2034 break; 2035 2036 waited = true; 2037 schedule(); 2038 } 2039 2040 finish_wait(&vcpu->wq, &wait); 2041 cur = ktime_get(); 2042 2043 kvm_arch_vcpu_unblocking(vcpu); 2044out: 2045 block_ns = ktime_to_ns(cur) - ktime_to_ns(start); 2046 2047 if (halt_poll_ns) { 2048 if (block_ns <= vcpu->halt_poll_ns) 2049 ; 2050 /* we had a long block, shrink polling */ 2051 else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns) 2052 shrink_halt_poll_ns(vcpu); 2053 /* we had a short halt and our poll time is too small */ 2054 else if (vcpu->halt_poll_ns < halt_poll_ns && 2055 block_ns < halt_poll_ns) 2056 grow_halt_poll_ns(vcpu); 2057 } else 2058 vcpu->halt_poll_ns = 0; 2059 2060 trace_kvm_vcpu_wakeup(block_ns, waited); 2061} 2062EXPORT_SYMBOL_GPL(kvm_vcpu_block); 2063 2064#ifndef CONFIG_S390 2065/* 2066 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode. 2067 */ 2068void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 2069{ 2070 int me; 2071 int cpu = vcpu->cpu; 2072 wait_queue_head_t *wqp; 2073 2074 wqp = kvm_arch_vcpu_wq(vcpu); 2075 if (waitqueue_active(wqp)) { 2076 wake_up_interruptible(wqp); 2077 ++vcpu->stat.halt_wakeup; 2078 } 2079 2080 me = get_cpu(); 2081 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 2082 if (kvm_arch_vcpu_should_kick(vcpu)) 2083 smp_send_reschedule(cpu); 2084 put_cpu(); 2085} 2086EXPORT_SYMBOL_GPL(kvm_vcpu_kick); 2087#endif /* !CONFIG_S390 */ 2088 2089int kvm_vcpu_yield_to(struct kvm_vcpu *target) 2090{ 2091 struct pid *pid; 2092 struct task_struct *task = NULL; 2093 int ret = 0; 2094 2095 rcu_read_lock(); 2096 pid = rcu_dereference(target->pid); 2097 if (pid) 2098 task = get_pid_task(pid, PIDTYPE_PID); 2099 rcu_read_unlock(); 2100 if (!task) 2101 return ret; 2102 ret = yield_to(task, 1); 2103 put_task_struct(task); 2104 2105 return ret; 2106} 2107EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); 2108 2109/* 2110 * Helper that checks whether a VCPU is eligible for directed yield. 2111 * Most eligible candidate to yield is decided by following heuristics: 2112 * 2113 * (a) VCPU which has not done pl-exit or cpu relax intercepted recently 2114 * (preempted lock holder), indicated by @in_spin_loop. 2115 * Set at the beiginning and cleared at the end of interception/PLE handler. 2116 * 2117 * (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get 2118 * chance last time (mostly it has become eligible now since we have probably 2119 * yielded to lockholder in last iteration. This is done by toggling 2120 * @dy_eligible each time a VCPU checked for eligibility.) 2121 * 2122 * Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding 2123 * to preempted lock-holder could result in wrong VCPU selection and CPU 2124 * burning. Giving priority for a potential lock-holder increases lock 2125 * progress. 2126 * 2127 * Since algorithm is based on heuristics, accessing another VCPU data without 2128 * locking does not harm. It may result in trying to yield to same VCPU, fail 2129 * and continue with next VCPU and so on. 2130 */ 2131static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 2132{ 2133#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 2134 bool eligible; 2135 2136 eligible = !vcpu->spin_loop.in_spin_loop || 2137 vcpu->spin_loop.dy_eligible; 2138 2139 if (vcpu->spin_loop.in_spin_loop) 2140 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); 2141 2142 return eligible; 2143#else 2144 return true; 2145#endif 2146} 2147 2148void kvm_vcpu_on_spin(struct kvm_vcpu *me) 2149{ 2150 struct kvm *kvm = me->kvm; 2151 struct kvm_vcpu *vcpu; 2152 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; 2153 int yielded = 0; 2154 int try = 3; 2155 int pass; 2156 int i; 2157 2158 kvm_vcpu_set_in_spin_loop(me, true); 2159 /* 2160 * We boost the priority of a VCPU that is runnable but not 2161 * currently running, because it got preempted by something 2162 * else and called schedule in __vcpu_run. Hopefully that 2163 * VCPU is holding the lock that we need and will release it. 2164 * We approximate round-robin by starting at the last boosted VCPU. 2165 */ 2166 for (pass = 0; pass < 2 && !yielded && try; pass++) { 2167 kvm_for_each_vcpu(i, vcpu, kvm) { 2168 if (!pass && i <= last_boosted_vcpu) { 2169 i = last_boosted_vcpu; 2170 continue; 2171 } else if (pass && i > last_boosted_vcpu) 2172 break; 2173 if (!ACCESS_ONCE(vcpu->preempted)) 2174 continue; 2175 if (vcpu == me) 2176 continue; 2177 if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) 2178 continue; 2179 if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) 2180 continue; 2181 2182 yielded = kvm_vcpu_yield_to(vcpu); 2183 if (yielded > 0) { 2184 kvm->last_boosted_vcpu = i; 2185 break; 2186 } else if (yielded < 0) { 2187 try--; 2188 if (!try) 2189 break; 2190 } 2191 } 2192 } 2193 kvm_vcpu_set_in_spin_loop(me, false); 2194 2195 /* Ensure vcpu is not eligible during next spinloop */ 2196 kvm_vcpu_set_dy_eligible(me, false); 2197} 2198EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); 2199 2200static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2201{ 2202 struct kvm_vcpu *vcpu = vma->vm_file->private_data; 2203 struct page *page; 2204 2205 if (vmf->pgoff == 0) 2206 page = virt_to_page(vcpu->run); 2207#ifdef CONFIG_X86 2208 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) 2209 page = virt_to_page(vcpu->arch.pio_data); 2210#endif 2211#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2212 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET) 2213 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); 2214#endif 2215 else 2216 return kvm_arch_vcpu_fault(vcpu, vmf); 2217 get_page(page); 2218 vmf->page = page; 2219 return 0; 2220} 2221 2222static const struct vm_operations_struct kvm_vcpu_vm_ops = { 2223 .fault = kvm_vcpu_fault, 2224}; 2225 2226static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma) 2227{ 2228 vma->vm_ops = &kvm_vcpu_vm_ops; 2229 return 0; 2230} 2231 2232static int kvm_vcpu_release(struct inode *inode, struct file *filp) 2233{ 2234 struct kvm_vcpu *vcpu = filp->private_data; 2235 2236 kvm_put_kvm(vcpu->kvm); 2237 return 0; 2238} 2239 2240static struct file_operations kvm_vcpu_fops = { 2241 .release = kvm_vcpu_release, 2242 .unlocked_ioctl = kvm_vcpu_ioctl, 2243#ifdef CONFIG_KVM_COMPAT 2244 .compat_ioctl = kvm_vcpu_compat_ioctl, 2245#endif 2246 .mmap = kvm_vcpu_mmap, 2247 .llseek = noop_llseek, 2248}; 2249 2250/* 2251 * Allocates an inode for the vcpu. 2252 */ 2253static int create_vcpu_fd(struct kvm_vcpu *vcpu) 2254{ 2255 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC); 2256} 2257 2258/* 2259 * Creates some virtual cpus. Good luck creating more than one. 2260 */ 2261static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) 2262{ 2263 int r; 2264 struct kvm_vcpu *vcpu, *v; 2265 2266 if (id >= KVM_MAX_VCPUS) 2267 return -EINVAL; 2268 2269 vcpu = kvm_arch_vcpu_create(kvm, id); 2270 if (IS_ERR(vcpu)) 2271 return PTR_ERR(vcpu); 2272 2273 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); 2274 2275 r = kvm_arch_vcpu_setup(vcpu); 2276 if (r) 2277 goto vcpu_destroy; 2278 2279 mutex_lock(&kvm->lock); 2280 if (!kvm_vcpu_compatible(vcpu)) { 2281 r = -EINVAL; 2282 goto unlock_vcpu_destroy; 2283 } 2284 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) { 2285 r = -EINVAL; 2286 goto unlock_vcpu_destroy; 2287 } 2288 2289 kvm_for_each_vcpu(r, v, kvm) 2290 if (v->vcpu_id == id) { 2291 r = -EEXIST; 2292 goto unlock_vcpu_destroy; 2293 } 2294 2295 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); 2296 2297 /* Now it's all set up, let userspace reach it */ 2298 kvm_get_kvm(kvm); 2299 r = create_vcpu_fd(vcpu); 2300 if (r < 0) { 2301 kvm_put_kvm(kvm); 2302 goto unlock_vcpu_destroy; 2303 } 2304 2305 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; 2306 2307 /* 2308 * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus 2309 * before kvm->online_vcpu's incremented value. 2310 */ 2311 smp_wmb(); 2312 atomic_inc(&kvm->online_vcpus); 2313 2314 mutex_unlock(&kvm->lock); 2315 kvm_arch_vcpu_postcreate(vcpu); 2316 return r; 2317 2318unlock_vcpu_destroy: 2319 mutex_unlock(&kvm->lock); 2320vcpu_destroy: 2321 kvm_arch_vcpu_destroy(vcpu); 2322 return r; 2323} 2324 2325static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset) 2326{ 2327 if (sigset) { 2328 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP)); 2329 vcpu->sigset_active = 1; 2330 vcpu->sigset = *sigset; 2331 } else 2332 vcpu->sigset_active = 0; 2333 return 0; 2334} 2335 2336static long kvm_vcpu_ioctl(struct file *filp, 2337 unsigned int ioctl, unsigned long arg) 2338{ 2339 struct kvm_vcpu *vcpu = filp->private_data; 2340 void __user *argp = (void __user *)arg; 2341 int r; 2342 struct kvm_fpu *fpu = NULL; 2343 struct kvm_sregs *kvm_sregs = NULL; 2344 2345 if (vcpu->kvm->mm != current->mm) 2346 return -EIO; 2347 2348 if (unlikely(_IOC_TYPE(ioctl) != KVMIO)) 2349 return -EINVAL; 2350 2351#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS) 2352 /* 2353 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 2354 * so vcpu_load() would break it. 2355 */ 2356 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT) 2357 return kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2358#endif 2359 2360 2361 r = vcpu_load(vcpu); 2362 if (r) 2363 return r; 2364 switch (ioctl) { 2365 case KVM_RUN: 2366 r = -EINVAL; 2367 if (arg) 2368 goto out; 2369 if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) { 2370 /* The thread running this VCPU changed. */ 2371 struct pid *oldpid = vcpu->pid; 2372 struct pid *newpid = get_task_pid(current, PIDTYPE_PID); 2373 2374 rcu_assign_pointer(vcpu->pid, newpid); 2375 if (oldpid) 2376 synchronize_rcu(); 2377 put_pid(oldpid); 2378 } 2379 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run); 2380 trace_kvm_userspace_exit(vcpu->run->exit_reason, r); 2381 break; 2382 case KVM_GET_REGS: { 2383 struct kvm_regs *kvm_regs; 2384 2385 r = -ENOMEM; 2386 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL); 2387 if (!kvm_regs) 2388 goto out; 2389 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs); 2390 if (r) 2391 goto out_free1; 2392 r = -EFAULT; 2393 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs))) 2394 goto out_free1; 2395 r = 0; 2396out_free1: 2397 kfree(kvm_regs); 2398 break; 2399 } 2400 case KVM_SET_REGS: { 2401 struct kvm_regs *kvm_regs; 2402 2403 r = -ENOMEM; 2404 kvm_regs = memdup_user(argp, sizeof(*kvm_regs)); 2405 if (IS_ERR(kvm_regs)) { 2406 r = PTR_ERR(kvm_regs); 2407 goto out; 2408 } 2409 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs); 2410 kfree(kvm_regs); 2411 break; 2412 } 2413 case KVM_GET_SREGS: { 2414 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 2415 r = -ENOMEM; 2416 if (!kvm_sregs) 2417 goto out; 2418 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 2419 if (r) 2420 goto out; 2421 r = -EFAULT; 2422 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 2423 goto out; 2424 r = 0; 2425 break; 2426 } 2427 case KVM_SET_SREGS: { 2428 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs)); 2429 if (IS_ERR(kvm_sregs)) { 2430 r = PTR_ERR(kvm_sregs); 2431 kvm_sregs = NULL; 2432 goto out; 2433 } 2434 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 2435 break; 2436 } 2437 case KVM_GET_MP_STATE: { 2438 struct kvm_mp_state mp_state; 2439 2440 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state); 2441 if (r) 2442 goto out; 2443 r = -EFAULT; 2444 if (copy_to_user(argp, &mp_state, sizeof(mp_state))) 2445 goto out; 2446 r = 0; 2447 break; 2448 } 2449 case KVM_SET_MP_STATE: { 2450 struct kvm_mp_state mp_state; 2451 2452 r = -EFAULT; 2453 if (copy_from_user(&mp_state, argp, sizeof(mp_state))) 2454 goto out; 2455 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state); 2456 break; 2457 } 2458 case KVM_TRANSLATE: { 2459 struct kvm_translation tr; 2460 2461 r = -EFAULT; 2462 if (copy_from_user(&tr, argp, sizeof(tr))) 2463 goto out; 2464 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr); 2465 if (r) 2466 goto out; 2467 r = -EFAULT; 2468 if (copy_to_user(argp, &tr, sizeof(tr))) 2469 goto out; 2470 r = 0; 2471 break; 2472 } 2473 case KVM_SET_GUEST_DEBUG: { 2474 struct kvm_guest_debug dbg; 2475 2476 r = -EFAULT; 2477 if (copy_from_user(&dbg, argp, sizeof(dbg))) 2478 goto out; 2479 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg); 2480 break; 2481 } 2482 case KVM_SET_SIGNAL_MASK: { 2483 struct kvm_signal_mask __user *sigmask_arg = argp; 2484 struct kvm_signal_mask kvm_sigmask; 2485 sigset_t sigset, *p; 2486 2487 p = NULL; 2488 if (argp) { 2489 r = -EFAULT; 2490 if (copy_from_user(&kvm_sigmask, argp, 2491 sizeof(kvm_sigmask))) 2492 goto out; 2493 r = -EINVAL; 2494 if (kvm_sigmask.len != sizeof(sigset)) 2495 goto out; 2496 r = -EFAULT; 2497 if (copy_from_user(&sigset, sigmask_arg->sigset, 2498 sizeof(sigset))) 2499 goto out; 2500 p = &sigset; 2501 } 2502 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p); 2503 break; 2504 } 2505 case KVM_GET_FPU: { 2506 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 2507 r = -ENOMEM; 2508 if (!fpu) 2509 goto out; 2510 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 2511 if (r) 2512 goto out; 2513 r = -EFAULT; 2514 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 2515 goto out; 2516 r = 0; 2517 break; 2518 } 2519 case KVM_SET_FPU: { 2520 fpu = memdup_user(argp, sizeof(*fpu)); 2521 if (IS_ERR(fpu)) { 2522 r = PTR_ERR(fpu); 2523 fpu = NULL; 2524 goto out; 2525 } 2526 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 2527 break; 2528 } 2529 default: 2530 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 2531 } 2532out: 2533 vcpu_put(vcpu); 2534 kfree(fpu); 2535 kfree(kvm_sregs); 2536 return r; 2537} 2538 2539#ifdef CONFIG_KVM_COMPAT 2540static long kvm_vcpu_compat_ioctl(struct file *filp, 2541 unsigned int ioctl, unsigned long arg) 2542{ 2543 struct kvm_vcpu *vcpu = filp->private_data; 2544 void __user *argp = compat_ptr(arg); 2545 int r; 2546 2547 if (vcpu->kvm->mm != current->mm) 2548 return -EIO; 2549 2550 switch (ioctl) { 2551 case KVM_SET_SIGNAL_MASK: { 2552 struct kvm_signal_mask __user *sigmask_arg = argp; 2553 struct kvm_signal_mask kvm_sigmask; 2554 compat_sigset_t csigset; 2555 sigset_t sigset; 2556 2557 if (argp) { 2558 r = -EFAULT; 2559 if (copy_from_user(&kvm_sigmask, argp, 2560 sizeof(kvm_sigmask))) 2561 goto out; 2562 r = -EINVAL; 2563 if (kvm_sigmask.len != sizeof(csigset)) 2564 goto out; 2565 r = -EFAULT; 2566 if (copy_from_user(&csigset, sigmask_arg->sigset, 2567 sizeof(csigset))) 2568 goto out; 2569 sigset_from_compat(&sigset, &csigset); 2570 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 2571 } else 2572 r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); 2573 break; 2574 } 2575 default: 2576 r = kvm_vcpu_ioctl(filp, ioctl, arg); 2577 } 2578 2579out: 2580 return r; 2581} 2582#endif 2583 2584static int kvm_device_ioctl_attr(struct kvm_device *dev, 2585 int (*accessor)(struct kvm_device *dev, 2586 struct kvm_device_attr *attr), 2587 unsigned long arg) 2588{ 2589 struct kvm_device_attr attr; 2590 2591 if (!accessor) 2592 return -EPERM; 2593 2594 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 2595 return -EFAULT; 2596 2597 return accessor(dev, &attr); 2598} 2599 2600static long kvm_device_ioctl(struct file *filp, unsigned int ioctl, 2601 unsigned long arg) 2602{ 2603 struct kvm_device *dev = filp->private_data; 2604 2605 switch (ioctl) { 2606 case KVM_SET_DEVICE_ATTR: 2607 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 2608 case KVM_GET_DEVICE_ATTR: 2609 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg); 2610 case KVM_HAS_DEVICE_ATTR: 2611 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg); 2612 default: 2613 if (dev->ops->ioctl) 2614 return dev->ops->ioctl(dev, ioctl, arg); 2615 2616 return -ENOTTY; 2617 } 2618} 2619 2620static int kvm_device_release(struct inode *inode, struct file *filp) 2621{ 2622 struct kvm_device *dev = filp->private_data; 2623 struct kvm *kvm = dev->kvm; 2624 2625 kvm_put_kvm(kvm); 2626 return 0; 2627} 2628 2629static const struct file_operations kvm_device_fops = { 2630 .unlocked_ioctl = kvm_device_ioctl, 2631#ifdef CONFIG_KVM_COMPAT 2632 .compat_ioctl = kvm_device_ioctl, 2633#endif 2634 .release = kvm_device_release, 2635}; 2636 2637struct kvm_device *kvm_device_from_filp(struct file *filp) 2638{ 2639 if (filp->f_op != &kvm_device_fops) 2640 return NULL; 2641 2642 return filp->private_data; 2643} 2644 2645static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = { 2646#ifdef CONFIG_KVM_MPIC 2647 [KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops, 2648 [KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops, 2649#endif 2650 2651#ifdef CONFIG_KVM_XICS 2652 [KVM_DEV_TYPE_XICS] = &kvm_xics_ops, 2653#endif 2654}; 2655 2656int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type) 2657{ 2658 if (type >= ARRAY_SIZE(kvm_device_ops_table)) 2659 return -ENOSPC; 2660 2661 if (kvm_device_ops_table[type] != NULL) 2662 return -EEXIST; 2663 2664 kvm_device_ops_table[type] = ops; 2665 return 0; 2666} 2667 2668void kvm_unregister_device_ops(u32 type) 2669{ 2670 if (kvm_device_ops_table[type] != NULL) 2671 kvm_device_ops_table[type] = NULL; 2672} 2673 2674static int kvm_ioctl_create_device(struct kvm *kvm, 2675 struct kvm_create_device *cd) 2676{ 2677 struct kvm_device_ops *ops = NULL; 2678 struct kvm_device *dev; 2679 bool test = cd->flags & KVM_CREATE_DEVICE_TEST; 2680 int ret; 2681 2682 if (cd->type >= ARRAY_SIZE(kvm_device_ops_table)) 2683 return -ENODEV; 2684 2685 ops = kvm_device_ops_table[cd->type]; 2686 if (ops == NULL) 2687 return -ENODEV; 2688 2689 if (test) 2690 return 0; 2691 2692 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2693 if (!dev) 2694 return -ENOMEM; 2695 2696 dev->ops = ops; 2697 dev->kvm = kvm; 2698 2699 ret = ops->create(dev, cd->type); 2700 if (ret < 0) { 2701 kfree(dev); 2702 return ret; 2703 } 2704 2705 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 2706 if (ret < 0) { 2707 ops->destroy(dev); 2708 return ret; 2709 } 2710 2711 list_add(&dev->vm_node, &kvm->devices); 2712 kvm_get_kvm(kvm); 2713 cd->fd = ret; 2714 return 0; 2715} 2716 2717static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) 2718{ 2719 switch (arg) { 2720 case KVM_CAP_USER_MEMORY: 2721 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 2722 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: 2723 case KVM_CAP_INTERNAL_ERROR_DATA: 2724#ifdef CONFIG_HAVE_KVM_MSI 2725 case KVM_CAP_SIGNAL_MSI: 2726#endif 2727#ifdef CONFIG_HAVE_KVM_IRQFD 2728 case KVM_CAP_IRQFD: 2729 case KVM_CAP_IRQFD_RESAMPLE: 2730#endif 2731 case KVM_CAP_IOEVENTFD_ANY_LENGTH: 2732 case KVM_CAP_CHECK_EXTENSION_VM: 2733 return 1; 2734#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2735 case KVM_CAP_IRQ_ROUTING: 2736 return KVM_MAX_IRQ_ROUTES; 2737#endif 2738#if KVM_ADDRESS_SPACE_NUM > 1 2739 case KVM_CAP_MULTI_ADDRESS_SPACE: 2740 return KVM_ADDRESS_SPACE_NUM; 2741#endif 2742 default: 2743 break; 2744 } 2745 return kvm_vm_ioctl_check_extension(kvm, arg); 2746} 2747 2748static long kvm_vm_ioctl(struct file *filp, 2749 unsigned int ioctl, unsigned long arg) 2750{ 2751 struct kvm *kvm = filp->private_data; 2752 void __user *argp = (void __user *)arg; 2753 int r; 2754 2755 if (kvm->mm != current->mm) 2756 return -EIO; 2757 switch (ioctl) { 2758 case KVM_CREATE_VCPU: 2759 r = kvm_vm_ioctl_create_vcpu(kvm, arg); 2760 break; 2761 case KVM_SET_USER_MEMORY_REGION: { 2762 struct kvm_userspace_memory_region kvm_userspace_mem; 2763 2764 r = -EFAULT; 2765 if (copy_from_user(&kvm_userspace_mem, argp, 2766 sizeof(kvm_userspace_mem))) 2767 goto out; 2768 2769 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); 2770 break; 2771 } 2772 case KVM_GET_DIRTY_LOG: { 2773 struct kvm_dirty_log log; 2774 2775 r = -EFAULT; 2776 if (copy_from_user(&log, argp, sizeof(log))) 2777 goto out; 2778 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2779 break; 2780 } 2781#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2782 case KVM_REGISTER_COALESCED_MMIO: { 2783 struct kvm_coalesced_mmio_zone zone; 2784 2785 r = -EFAULT; 2786 if (copy_from_user(&zone, argp, sizeof(zone))) 2787 goto out; 2788 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); 2789 break; 2790 } 2791 case KVM_UNREGISTER_COALESCED_MMIO: { 2792 struct kvm_coalesced_mmio_zone zone; 2793 2794 r = -EFAULT; 2795 if (copy_from_user(&zone, argp, sizeof(zone))) 2796 goto out; 2797 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); 2798 break; 2799 } 2800#endif 2801 case KVM_IRQFD: { 2802 struct kvm_irqfd data; 2803 2804 r = -EFAULT; 2805 if (copy_from_user(&data, argp, sizeof(data))) 2806 goto out; 2807 r = kvm_irqfd(kvm, &data); 2808 break; 2809 } 2810 case KVM_IOEVENTFD: { 2811 struct kvm_ioeventfd data; 2812 2813 r = -EFAULT; 2814 if (copy_from_user(&data, argp, sizeof(data))) 2815 goto out; 2816 r = kvm_ioeventfd(kvm, &data); 2817 break; 2818 } 2819#ifdef CONFIG_HAVE_KVM_MSI 2820 case KVM_SIGNAL_MSI: { 2821 struct kvm_msi msi; 2822 2823 r = -EFAULT; 2824 if (copy_from_user(&msi, argp, sizeof(msi))) 2825 goto out; 2826 r = kvm_send_userspace_msi(kvm, &msi); 2827 break; 2828 } 2829#endif 2830#ifdef __KVM_HAVE_IRQ_LINE 2831 case KVM_IRQ_LINE_STATUS: 2832 case KVM_IRQ_LINE: { 2833 struct kvm_irq_level irq_event; 2834 2835 r = -EFAULT; 2836 if (copy_from_user(&irq_event, argp, sizeof(irq_event))) 2837 goto out; 2838 2839 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, 2840 ioctl == KVM_IRQ_LINE_STATUS); 2841 if (r) 2842 goto out; 2843 2844 r = -EFAULT; 2845 if (ioctl == KVM_IRQ_LINE_STATUS) { 2846 if (copy_to_user(argp, &irq_event, sizeof(irq_event))) 2847 goto out; 2848 } 2849 2850 r = 0; 2851 break; 2852 } 2853#endif 2854#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2855 case KVM_SET_GSI_ROUTING: { 2856 struct kvm_irq_routing routing; 2857 struct kvm_irq_routing __user *urouting; 2858 struct kvm_irq_routing_entry *entries; 2859 2860 r = -EFAULT; 2861 if (copy_from_user(&routing, argp, sizeof(routing))) 2862 goto out; 2863 r = -EINVAL; 2864 if (routing.nr >= KVM_MAX_IRQ_ROUTES) 2865 goto out; 2866 if (routing.flags) 2867 goto out; 2868 r = -ENOMEM; 2869 entries = vmalloc(routing.nr * sizeof(*entries)); 2870 if (!entries) 2871 goto out; 2872 r = -EFAULT; 2873 urouting = argp; 2874 if (copy_from_user(entries, urouting->entries, 2875 routing.nr * sizeof(*entries))) 2876 goto out_free_irq_routing; 2877 r = kvm_set_irq_routing(kvm, entries, routing.nr, 2878 routing.flags); 2879out_free_irq_routing: 2880 vfree(entries); 2881 break; 2882 } 2883#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */ 2884 case KVM_CREATE_DEVICE: { 2885 struct kvm_create_device cd; 2886 2887 r = -EFAULT; 2888 if (copy_from_user(&cd, argp, sizeof(cd))) 2889 goto out; 2890 2891 r = kvm_ioctl_create_device(kvm, &cd); 2892 if (r) 2893 goto out; 2894 2895 r = -EFAULT; 2896 if (copy_to_user(argp, &cd, sizeof(cd))) 2897 goto out; 2898 2899 r = 0; 2900 break; 2901 } 2902 case KVM_CHECK_EXTENSION: 2903 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); 2904 break; 2905 default: 2906 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 2907 } 2908out: 2909 return r; 2910} 2911 2912#ifdef CONFIG_KVM_COMPAT 2913struct compat_kvm_dirty_log { 2914 __u32 slot; 2915 __u32 padding1; 2916 union { 2917 compat_uptr_t dirty_bitmap; /* one bit per page */ 2918 __u64 padding2; 2919 }; 2920}; 2921 2922static long kvm_vm_compat_ioctl(struct file *filp, 2923 unsigned int ioctl, unsigned long arg) 2924{ 2925 struct kvm *kvm = filp->private_data; 2926 int r; 2927 2928 if (kvm->mm != current->mm) 2929 return -EIO; 2930 switch (ioctl) { 2931 case KVM_GET_DIRTY_LOG: { 2932 struct compat_kvm_dirty_log compat_log; 2933 struct kvm_dirty_log log; 2934 2935 r = -EFAULT; 2936 if (copy_from_user(&compat_log, (void __user *)arg, 2937 sizeof(compat_log))) 2938 goto out; 2939 log.slot = compat_log.slot; 2940 log.padding1 = compat_log.padding1; 2941 log.padding2 = compat_log.padding2; 2942 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap); 2943 2944 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); 2945 break; 2946 } 2947 default: 2948 r = kvm_vm_ioctl(filp, ioctl, arg); 2949 } 2950 2951out: 2952 return r; 2953} 2954#endif 2955 2956static struct file_operations kvm_vm_fops = { 2957 .release = kvm_vm_release, 2958 .unlocked_ioctl = kvm_vm_ioctl, 2959#ifdef CONFIG_KVM_COMPAT 2960 .compat_ioctl = kvm_vm_compat_ioctl, 2961#endif 2962 .llseek = noop_llseek, 2963}; 2964 2965static int kvm_dev_ioctl_create_vm(unsigned long type) 2966{ 2967 int r; 2968 struct kvm *kvm; 2969 2970 kvm = kvm_create_vm(type); 2971 if (IS_ERR(kvm)) 2972 return PTR_ERR(kvm); 2973#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 2974 r = kvm_coalesced_mmio_init(kvm); 2975 if (r < 0) { 2976 kvm_put_kvm(kvm); 2977 return r; 2978 } 2979#endif 2980 r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC); 2981 if (r < 0) 2982 kvm_put_kvm(kvm); 2983 2984 return r; 2985} 2986 2987static long kvm_dev_ioctl(struct file *filp, 2988 unsigned int ioctl, unsigned long arg) 2989{ 2990 long r = -EINVAL; 2991 2992 switch (ioctl) { 2993 case KVM_GET_API_VERSION: 2994 if (arg) 2995 goto out; 2996 r = KVM_API_VERSION; 2997 break; 2998 case KVM_CREATE_VM: 2999 r = kvm_dev_ioctl_create_vm(arg); 3000 break; 3001 case KVM_CHECK_EXTENSION: 3002 r = kvm_vm_ioctl_check_extension_generic(NULL, arg); 3003 break; 3004 case KVM_GET_VCPU_MMAP_SIZE: 3005 if (arg) 3006 goto out; 3007 r = PAGE_SIZE; /* struct kvm_run */ 3008#ifdef CONFIG_X86 3009 r += PAGE_SIZE; /* pio data page */ 3010#endif 3011#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET 3012 r += PAGE_SIZE; /* coalesced mmio ring page */ 3013#endif 3014 break; 3015 case KVM_TRACE_ENABLE: 3016 case KVM_TRACE_PAUSE: 3017 case KVM_TRACE_DISABLE: 3018 r = -EOPNOTSUPP; 3019 break; 3020 default: 3021 return kvm_arch_dev_ioctl(filp, ioctl, arg); 3022 } 3023out: 3024 return r; 3025} 3026 3027static struct file_operations kvm_chardev_ops = { 3028 .unlocked_ioctl = kvm_dev_ioctl, 3029 .compat_ioctl = kvm_dev_ioctl, 3030 .llseek = noop_llseek, 3031}; 3032 3033static struct miscdevice kvm_dev = { 3034 KVM_MINOR, 3035 "kvm", 3036 &kvm_chardev_ops, 3037}; 3038 3039static void hardware_enable_nolock(void *junk) 3040{ 3041 int cpu = raw_smp_processor_id(); 3042 int r; 3043 3044 if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) 3045 return; 3046 3047 cpumask_set_cpu(cpu, cpus_hardware_enabled); 3048 3049 r = kvm_arch_hardware_enable(); 3050 3051 if (r) { 3052 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 3053 atomic_inc(&hardware_enable_failed); 3054 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu); 3055 } 3056} 3057 3058static void hardware_enable(void) 3059{ 3060 raw_spin_lock(&kvm_count_lock); 3061 if (kvm_usage_count) 3062 hardware_enable_nolock(NULL); 3063 raw_spin_unlock(&kvm_count_lock); 3064} 3065 3066static void hardware_disable_nolock(void *junk) 3067{ 3068 int cpu = raw_smp_processor_id(); 3069 3070 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) 3071 return; 3072 cpumask_clear_cpu(cpu, cpus_hardware_enabled); 3073 kvm_arch_hardware_disable(); 3074} 3075 3076static void hardware_disable(void) 3077{ 3078 raw_spin_lock(&kvm_count_lock); 3079 if (kvm_usage_count) 3080 hardware_disable_nolock(NULL); 3081 raw_spin_unlock(&kvm_count_lock); 3082} 3083 3084static void hardware_disable_all_nolock(void) 3085{ 3086 BUG_ON(!kvm_usage_count); 3087 3088 kvm_usage_count--; 3089 if (!kvm_usage_count) 3090 on_each_cpu(hardware_disable_nolock, NULL, 1); 3091} 3092 3093static void hardware_disable_all(void) 3094{ 3095 raw_spin_lock(&kvm_count_lock); 3096 hardware_disable_all_nolock(); 3097 raw_spin_unlock(&kvm_count_lock); 3098} 3099 3100static int hardware_enable_all(void) 3101{ 3102 int r = 0; 3103 3104 raw_spin_lock(&kvm_count_lock); 3105 3106 kvm_usage_count++; 3107 if (kvm_usage_count == 1) { 3108 atomic_set(&hardware_enable_failed, 0); 3109 on_each_cpu(hardware_enable_nolock, NULL, 1); 3110 3111 if (atomic_read(&hardware_enable_failed)) { 3112 hardware_disable_all_nolock(); 3113 r = -EBUSY; 3114 } 3115 } 3116 3117 raw_spin_unlock(&kvm_count_lock); 3118 3119 return r; 3120} 3121 3122static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 3123 void *v) 3124{ 3125 val &= ~CPU_TASKS_FROZEN; 3126 switch (val) { 3127 case CPU_DYING: 3128 hardware_disable(); 3129 break; 3130 case CPU_STARTING: 3131 hardware_enable(); 3132 break; 3133 } 3134 return NOTIFY_OK; 3135} 3136 3137static int kvm_reboot(struct notifier_block *notifier, unsigned long val, 3138 void *v) 3139{ 3140 /* 3141 * Some (well, at least mine) BIOSes hang on reboot if 3142 * in vmx root mode. 3143 * 3144 * And Intel TXT required VMX off for all cpu when system shutdown. 3145 */ 3146 pr_info("kvm: exiting hardware virtualization\n"); 3147 kvm_rebooting = true; 3148 on_each_cpu(hardware_disable_nolock, NULL, 1); 3149 return NOTIFY_OK; 3150} 3151 3152static struct notifier_block kvm_reboot_notifier = { 3153 .notifier_call = kvm_reboot, 3154 .priority = 0, 3155}; 3156 3157static void kvm_io_bus_destroy(struct kvm_io_bus *bus) 3158{ 3159 int i; 3160 3161 for (i = 0; i < bus->dev_count; i++) { 3162 struct kvm_io_device *pos = bus->range[i].dev; 3163 3164 kvm_iodevice_destructor(pos); 3165 } 3166 kfree(bus); 3167} 3168 3169static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1, 3170 const struct kvm_io_range *r2) 3171{ 3172 gpa_t addr1 = r1->addr; 3173 gpa_t addr2 = r2->addr; 3174 3175 if (addr1 < addr2) 3176 return -1; 3177 3178 /* If r2->len == 0, match the exact address. If r2->len != 0, 3179 * accept any overlapping write. Any order is acceptable for 3180 * overlapping ranges, because kvm_io_bus_get_first_dev ensures 3181 * we process all of them. 3182 */ 3183 if (r2->len) { 3184 addr1 += r1->len; 3185 addr2 += r2->len; 3186 } 3187 3188 if (addr1 > addr2) 3189 return 1; 3190 3191 return 0; 3192} 3193 3194static int kvm_io_bus_sort_cmp(const void *p1, const void *p2) 3195{ 3196 return kvm_io_bus_cmp(p1, p2); 3197} 3198 3199static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, 3200 gpa_t addr, int len) 3201{ 3202 bus->range[bus->dev_count++] = (struct kvm_io_range) { 3203 .addr = addr, 3204 .len = len, 3205 .dev = dev, 3206 }; 3207 3208 sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range), 3209 kvm_io_bus_sort_cmp, NULL); 3210 3211 return 0; 3212} 3213 3214static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus, 3215 gpa_t addr, int len) 3216{ 3217 struct kvm_io_range *range, key; 3218 int off; 3219 3220 key = (struct kvm_io_range) { 3221 .addr = addr, 3222 .len = len, 3223 }; 3224 3225 range = bsearch(&key, bus->range, bus->dev_count, 3226 sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp); 3227 if (range == NULL) 3228 return -ENOENT; 3229 3230 off = range - bus->range; 3231 3232 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0) 3233 off--; 3234 3235 return off; 3236} 3237 3238static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 3239 struct kvm_io_range *range, const void *val) 3240{ 3241 int idx; 3242 3243 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 3244 if (idx < 0) 3245 return -EOPNOTSUPP; 3246 3247 while (idx < bus->dev_count && 3248 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3249 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, 3250 range->len, val)) 3251 return idx; 3252 idx++; 3253 } 3254 3255 return -EOPNOTSUPP; 3256} 3257 3258/* kvm_io_bus_write - called under kvm->slots_lock */ 3259int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3260 int len, const void *val) 3261{ 3262 struct kvm_io_bus *bus; 3263 struct kvm_io_range range; 3264 int r; 3265 3266 range = (struct kvm_io_range) { 3267 .addr = addr, 3268 .len = len, 3269 }; 3270 3271 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3272 r = __kvm_io_bus_write(vcpu, bus, &range, val); 3273 return r < 0 ? r : 0; 3274} 3275 3276/* kvm_io_bus_write_cookie - called under kvm->slots_lock */ 3277int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 3278 gpa_t addr, int len, const void *val, long cookie) 3279{ 3280 struct kvm_io_bus *bus; 3281 struct kvm_io_range range; 3282 3283 range = (struct kvm_io_range) { 3284 .addr = addr, 3285 .len = len, 3286 }; 3287 3288 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3289 3290 /* First try the device referenced by cookie. */ 3291 if ((cookie >= 0) && (cookie < bus->dev_count) && 3292 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0)) 3293 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, 3294 val)) 3295 return cookie; 3296 3297 /* 3298 * cookie contained garbage; fall back to search and return the 3299 * correct cookie value. 3300 */ 3301 return __kvm_io_bus_write(vcpu, bus, &range, val); 3302} 3303 3304static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, 3305 struct kvm_io_range *range, void *val) 3306{ 3307 int idx; 3308 3309 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); 3310 if (idx < 0) 3311 return -EOPNOTSUPP; 3312 3313 while (idx < bus->dev_count && 3314 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) { 3315 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, 3316 range->len, val)) 3317 return idx; 3318 idx++; 3319 } 3320 3321 return -EOPNOTSUPP; 3322} 3323EXPORT_SYMBOL_GPL(kvm_io_bus_write); 3324 3325/* kvm_io_bus_read - called under kvm->slots_lock */ 3326int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 3327 int len, void *val) 3328{ 3329 struct kvm_io_bus *bus; 3330 struct kvm_io_range range; 3331 int r; 3332 3333 range = (struct kvm_io_range) { 3334 .addr = addr, 3335 .len = len, 3336 }; 3337 3338 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 3339 r = __kvm_io_bus_read(vcpu, bus, &range, val); 3340 return r < 0 ? r : 0; 3341} 3342 3343 3344/* Caller must hold slots_lock. */ 3345int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 3346 int len, struct kvm_io_device *dev) 3347{ 3348 struct kvm_io_bus *new_bus, *bus; 3349 3350 bus = kvm->buses[bus_idx]; 3351 /* exclude ioeventfd which is limited by maximum fd */ 3352 if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) 3353 return -ENOSPC; 3354 3355 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count + 1) * 3356 sizeof(struct kvm_io_range)), GFP_KERNEL); 3357 if (!new_bus) 3358 return -ENOMEM; 3359 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count * 3360 sizeof(struct kvm_io_range))); 3361 kvm_io_bus_insert_dev(new_bus, dev, addr, len); 3362 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3363 synchronize_srcu_expedited(&kvm->srcu); 3364 kfree(bus); 3365 3366 return 0; 3367} 3368 3369/* Caller must hold slots_lock. */ 3370int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 3371 struct kvm_io_device *dev) 3372{ 3373 int i, r; 3374 struct kvm_io_bus *new_bus, *bus; 3375 3376 bus = kvm->buses[bus_idx]; 3377 r = -ENOENT; 3378 for (i = 0; i < bus->dev_count; i++) 3379 if (bus->range[i].dev == dev) { 3380 r = 0; 3381 break; 3382 } 3383 3384 if (r) 3385 return r; 3386 3387 new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * 3388 sizeof(struct kvm_io_range)), GFP_KERNEL); 3389 if (!new_bus) 3390 return -ENOMEM; 3391 3392 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); 3393 new_bus->dev_count--; 3394 memcpy(new_bus->range + i, bus->range + i + 1, 3395 (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); 3396 3397 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 3398 synchronize_srcu_expedited(&kvm->srcu); 3399 kfree(bus); 3400 return r; 3401} 3402 3403static struct notifier_block kvm_cpu_notifier = { 3404 .notifier_call = kvm_cpu_hotplug, 3405}; 3406 3407static int vm_stat_get(void *_offset, u64 *val) 3408{ 3409 unsigned offset = (long)_offset; 3410 struct kvm *kvm; 3411 3412 *val = 0; 3413 spin_lock(&kvm_lock); 3414 list_for_each_entry(kvm, &vm_list, vm_list) 3415 *val += *(u32 *)((void *)kvm + offset); 3416 spin_unlock(&kvm_lock); 3417 return 0; 3418} 3419 3420DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n"); 3421 3422static int vcpu_stat_get(void *_offset, u64 *val) 3423{ 3424 unsigned offset = (long)_offset; 3425 struct kvm *kvm; 3426 struct kvm_vcpu *vcpu; 3427 int i; 3428 3429 *val = 0; 3430 spin_lock(&kvm_lock); 3431 list_for_each_entry(kvm, &vm_list, vm_list) 3432 kvm_for_each_vcpu(i, vcpu, kvm) 3433 *val += *(u32 *)((void *)vcpu + offset); 3434 3435 spin_unlock(&kvm_lock); 3436 return 0; 3437} 3438 3439DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 3440 3441static const struct file_operations *stat_fops[] = { 3442 [KVM_STAT_VCPU] = &vcpu_stat_fops, 3443 [KVM_STAT_VM] = &vm_stat_fops, 3444}; 3445 3446static int kvm_init_debug(void) 3447{ 3448 int r = -EEXIST; 3449 struct kvm_stats_debugfs_item *p; 3450 3451 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); 3452 if (kvm_debugfs_dir == NULL) 3453 goto out; 3454 3455 for (p = debugfs_entries; p->name; ++p) { 3456 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir, 3457 (void *)(long)p->offset, 3458 stat_fops[p->kind]); 3459 if (p->dentry == NULL) 3460 goto out_dir; 3461 } 3462 3463 return 0; 3464 3465out_dir: 3466 debugfs_remove_recursive(kvm_debugfs_dir); 3467out: 3468 return r; 3469} 3470 3471static void kvm_exit_debug(void) 3472{ 3473 struct kvm_stats_debugfs_item *p; 3474 3475 for (p = debugfs_entries; p->name; ++p) 3476 debugfs_remove(p->dentry); 3477 debugfs_remove(kvm_debugfs_dir); 3478} 3479 3480static int kvm_suspend(void) 3481{ 3482 if (kvm_usage_count) 3483 hardware_disable_nolock(NULL); 3484 return 0; 3485} 3486 3487static void kvm_resume(void) 3488{ 3489 if (kvm_usage_count) { 3490 WARN_ON(raw_spin_is_locked(&kvm_count_lock)); 3491 hardware_enable_nolock(NULL); 3492 } 3493} 3494 3495static struct syscore_ops kvm_syscore_ops = { 3496 .suspend = kvm_suspend, 3497 .resume = kvm_resume, 3498}; 3499 3500static inline 3501struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn) 3502{ 3503 return container_of(pn, struct kvm_vcpu, preempt_notifier); 3504} 3505 3506static void kvm_sched_in(struct preempt_notifier *pn, int cpu) 3507{ 3508 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3509 3510 if (vcpu->preempted) 3511 vcpu->preempted = false; 3512 3513 kvm_arch_sched_in(vcpu, cpu); 3514 3515 kvm_arch_vcpu_load(vcpu, cpu); 3516} 3517 3518static void kvm_sched_out(struct preempt_notifier *pn, 3519 struct task_struct *next) 3520{ 3521 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn); 3522 3523 if (current->state == TASK_RUNNING) 3524 vcpu->preempted = true; 3525 kvm_arch_vcpu_put(vcpu); 3526} 3527 3528int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, 3529 struct module *module) 3530{ 3531 int r; 3532 int cpu; 3533 3534 r = kvm_arch_init(opaque); 3535 if (r) 3536 goto out_fail; 3537 3538 /* 3539 * kvm_arch_init makes sure there's at most one caller 3540 * for architectures that support multiple implementations, 3541 * like intel and amd on x86. 3542 * kvm_arch_init must be called before kvm_irqfd_init to avoid creating 3543 * conflicts in case kvm is already setup for another implementation. 3544 */ 3545 r = kvm_irqfd_init(); 3546 if (r) 3547 goto out_irqfd; 3548 3549 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 3550 r = -ENOMEM; 3551 goto out_free_0; 3552 } 3553 3554 r = kvm_arch_hardware_setup(); 3555 if (r < 0) 3556 goto out_free_0a; 3557 3558 for_each_online_cpu(cpu) { 3559 smp_call_function_single(cpu, 3560 kvm_arch_check_processor_compat, 3561 &r, 1); 3562 if (r < 0) 3563 goto out_free_1; 3564 } 3565 3566 r = register_cpu_notifier(&kvm_cpu_notifier); 3567 if (r) 3568 goto out_free_2; 3569 register_reboot_notifier(&kvm_reboot_notifier); 3570 3571 /* A kmem cache lets us meet the alignment requirements of fx_save. */ 3572 if (!vcpu_align) 3573 vcpu_align = __alignof__(struct kvm_vcpu); 3574 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, 3575 0, NULL); 3576 if (!kvm_vcpu_cache) { 3577 r = -ENOMEM; 3578 goto out_free_3; 3579 } 3580 3581 r = kvm_async_pf_init(); 3582 if (r) 3583 goto out_free; 3584 3585 kvm_chardev_ops.owner = module; 3586 kvm_vm_fops.owner = module; 3587 kvm_vcpu_fops.owner = module; 3588 3589 r = misc_register(&kvm_dev); 3590 if (r) { 3591 pr_err("kvm: misc device register failed\n"); 3592 goto out_unreg; 3593 } 3594 3595 register_syscore_ops(&kvm_syscore_ops); 3596 3597 kvm_preempt_ops.sched_in = kvm_sched_in; 3598 kvm_preempt_ops.sched_out = kvm_sched_out; 3599 3600 r = kvm_init_debug(); 3601 if (r) { 3602 pr_err("kvm: create debugfs files failed\n"); 3603 goto out_undebugfs; 3604 } 3605 3606 r = kvm_vfio_ops_init(); 3607 WARN_ON(r); 3608 3609 return 0; 3610 3611out_undebugfs: 3612 unregister_syscore_ops(&kvm_syscore_ops); 3613 misc_deregister(&kvm_dev); 3614out_unreg: 3615 kvm_async_pf_deinit(); 3616out_free: 3617 kmem_cache_destroy(kvm_vcpu_cache); 3618out_free_3: 3619 unregister_reboot_notifier(&kvm_reboot_notifier); 3620 unregister_cpu_notifier(&kvm_cpu_notifier); 3621out_free_2: 3622out_free_1: 3623 kvm_arch_hardware_unsetup(); 3624out_free_0a: 3625 free_cpumask_var(cpus_hardware_enabled); 3626out_free_0: 3627 kvm_irqfd_exit(); 3628out_irqfd: 3629 kvm_arch_exit(); 3630out_fail: 3631 return r; 3632} 3633EXPORT_SYMBOL_GPL(kvm_init); 3634 3635void kvm_exit(void) 3636{ 3637 kvm_exit_debug(); 3638 misc_deregister(&kvm_dev); 3639 kmem_cache_destroy(kvm_vcpu_cache); 3640 kvm_async_pf_deinit(); 3641 unregister_syscore_ops(&kvm_syscore_ops); 3642 unregister_reboot_notifier(&kvm_reboot_notifier); 3643 unregister_cpu_notifier(&kvm_cpu_notifier); 3644 on_each_cpu(hardware_disable_nolock, NULL, 1); 3645 kvm_arch_hardware_unsetup(); 3646 kvm_arch_exit(); 3647 kvm_irqfd_exit(); 3648 free_cpumask_var(cpus_hardware_enabled); 3649 kvm_vfio_ops_exit(); 3650} 3651EXPORT_SYMBOL_GPL(kvm_exit); 3652