1/* 2 * Kernel-based Virtual Machine driver for Linux 3 * 4 * This module enables machines with Intel VT-x extensions to run virtual 5 * machines without emulation or binary translation. 6 * 7 * MMU support 8 * 9 * Copyright (C) 2006 Qumranet, Inc. 10 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 11 * 12 * Authors: 13 * Yaniv Kamay <yaniv@qumranet.com> 14 * Avi Kivity <avi@qumranet.com> 15 * 16 * This work is licensed under the terms of the GNU GPL, version 2. See 17 * the COPYING file in the top-level directory. 18 * 19 */ 20 21#include "irq.h" 22#include "mmu.h" 23#include "x86.h" 24#include "kvm_cache_regs.h" 25#include "cpuid.h" 26 27#include <linux/kvm_host.h> 28#include <linux/types.h> 29#include <linux/string.h> 30#include <linux/mm.h> 31#include <linux/highmem.h> 32#include <linux/module.h> 33#include <linux/swap.h> 34#include <linux/hugetlb.h> 35#include <linux/compiler.h> 36#include <linux/srcu.h> 37#include <linux/slab.h> 38#include <linux/uaccess.h> 39 40#include <asm/page.h> 41#include <asm/cmpxchg.h> 42#include <asm/io.h> 43#include <asm/vmx.h> 44 45/* 46 * When setting this variable to true it enables Two-Dimensional-Paging 47 * where the hardware walks 2 page tables: 48 * 1. the guest-virtual to guest-physical 49 * 2. while doing 1. it walks guest-physical to host-physical 50 * If the hardware supports that we don't need to do shadow paging. 51 */ 52bool tdp_enabled = false; 53 54enum { 55 AUDIT_PRE_PAGE_FAULT, 56 AUDIT_POST_PAGE_FAULT, 57 AUDIT_PRE_PTE_WRITE, 58 AUDIT_POST_PTE_WRITE, 59 AUDIT_PRE_SYNC, 60 AUDIT_POST_SYNC 61}; 62 63#undef MMU_DEBUG 64 65#ifdef MMU_DEBUG 66static bool dbg = 0; 67module_param(dbg, bool, 0644); 68 69#define pgprintk(x...) do { if (dbg) printk(x); } while (0) 70#define rmap_printk(x...) do { if (dbg) printk(x); } while (0) 71#define MMU_WARN_ON(x) WARN_ON(x) 72#else 73#define pgprintk(x...) do { } while (0) 74#define rmap_printk(x...) do { } while (0) 75#define MMU_WARN_ON(x) do { } while (0) 76#endif 77 78#define PTE_PREFETCH_NUM 8 79 80#define PT_FIRST_AVAIL_BITS_SHIFT 10 81#define PT64_SECOND_AVAIL_BITS_SHIFT 52 82 83#define PT64_LEVEL_BITS 9 84 85#define PT64_LEVEL_SHIFT(level) \ 86 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS) 87 88#define PT64_INDEX(address, level)\ 89 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1)) 90 91 92#define PT32_LEVEL_BITS 10 93 94#define PT32_LEVEL_SHIFT(level) \ 95 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS) 96 97#define PT32_LVL_OFFSET_MASK(level) \ 98 (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ 99 * PT32_LEVEL_BITS))) - 1)) 100 101#define PT32_INDEX(address, level)\ 102 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1)) 103 104 105#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) 106#define PT64_DIR_BASE_ADDR_MASK \ 107 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1)) 108#define PT64_LVL_ADDR_MASK(level) \ 109 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ 110 * PT64_LEVEL_BITS))) - 1)) 111#define PT64_LVL_OFFSET_MASK(level) \ 112 (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ 113 * PT64_LEVEL_BITS))) - 1)) 114 115#define PT32_BASE_ADDR_MASK PAGE_MASK 116#define PT32_DIR_BASE_ADDR_MASK \ 117 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) 118#define PT32_LVL_ADDR_MASK(level) \ 119 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ 120 * PT32_LEVEL_BITS))) - 1)) 121 122#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \ 123 | shadow_x_mask | shadow_nx_mask) 124 125#define ACC_EXEC_MASK 1 126#define ACC_WRITE_MASK PT_WRITABLE_MASK 127#define ACC_USER_MASK PT_USER_MASK 128#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) 129 130#include <trace/events/kvm.h> 131 132#define CREATE_TRACE_POINTS 133#include "mmutrace.h" 134 135#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) 136#define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1)) 137 138#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 139 140/* make pte_list_desc fit well in cache line */ 141#define PTE_LIST_EXT 3 142 143struct pte_list_desc { 144 u64 *sptes[PTE_LIST_EXT]; 145 struct pte_list_desc *more; 146}; 147 148struct kvm_shadow_walk_iterator { 149 u64 addr; 150 hpa_t shadow_addr; 151 u64 *sptep; 152 int level; 153 unsigned index; 154}; 155 156#define for_each_shadow_entry(_vcpu, _addr, _walker) \ 157 for (shadow_walk_init(&(_walker), _vcpu, _addr); \ 158 shadow_walk_okay(&(_walker)); \ 159 shadow_walk_next(&(_walker))) 160 161#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ 162 for (shadow_walk_init(&(_walker), _vcpu, _addr); \ 163 shadow_walk_okay(&(_walker)) && \ 164 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ 165 __shadow_walk_next(&(_walker), spte)) 166 167static struct kmem_cache *pte_list_desc_cache; 168static struct kmem_cache *mmu_page_header_cache; 169static struct percpu_counter kvm_total_used_mmu_pages; 170 171static u64 __read_mostly shadow_nx_mask; 172static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ 173static u64 __read_mostly shadow_user_mask; 174static u64 __read_mostly shadow_accessed_mask; 175static u64 __read_mostly shadow_dirty_mask; 176static u64 __read_mostly shadow_mmio_mask; 177 178static void mmu_spte_set(u64 *sptep, u64 spte); 179static void mmu_free_roots(struct kvm_vcpu *vcpu); 180 181void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask) 182{ 183 shadow_mmio_mask = mmio_mask; 184} 185EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); 186 187/* 188 * the low bit of the generation number is always presumed to be zero. 189 * This disables mmio caching during memslot updates. The concept is 190 * similar to a seqcount but instead of retrying the access we just punt 191 * and ignore the cache. 192 * 193 * spte bits 3-11 are used as bits 1-9 of the generation number, 194 * the bits 52-61 are used as bits 10-19 of the generation number. 195 */ 196#define MMIO_SPTE_GEN_LOW_SHIFT 2 197#define MMIO_SPTE_GEN_HIGH_SHIFT 52 198 199#define MMIO_GEN_SHIFT 20 200#define MMIO_GEN_LOW_SHIFT 10 201#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2) 202#define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1) 203 204static u64 generation_mmio_spte_mask(unsigned int gen) 205{ 206 u64 mask; 207 208 WARN_ON(gen & ~MMIO_GEN_MASK); 209 210 mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT; 211 mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT; 212 return mask; 213} 214 215static unsigned int get_mmio_spte_generation(u64 spte) 216{ 217 unsigned int gen; 218 219 spte &= ~shadow_mmio_mask; 220 221 gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK; 222 gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT; 223 return gen; 224} 225 226static unsigned int kvm_current_mmio_generation(struct kvm *kvm) 227{ 228 return kvm_memslots(kvm)->generation & MMIO_GEN_MASK; 229} 230 231static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn, 232 unsigned access) 233{ 234 unsigned int gen = kvm_current_mmio_generation(kvm); 235 u64 mask = generation_mmio_spte_mask(gen); 236 237 access &= ACC_WRITE_MASK | ACC_USER_MASK; 238 mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT; 239 240 trace_mark_mmio_spte(sptep, gfn, access, gen); 241 mmu_spte_set(sptep, mask); 242} 243 244static bool is_mmio_spte(u64 spte) 245{ 246 return (spte & shadow_mmio_mask) == shadow_mmio_mask; 247} 248 249static gfn_t get_mmio_spte_gfn(u64 spte) 250{ 251 u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask; 252 return (spte & ~mask) >> PAGE_SHIFT; 253} 254 255static unsigned get_mmio_spte_access(u64 spte) 256{ 257 u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask; 258 return (spte & ~mask) & ~PAGE_MASK; 259} 260 261static bool set_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, 262 pfn_t pfn, unsigned access) 263{ 264 if (unlikely(is_noslot_pfn(pfn))) { 265 mark_mmio_spte(kvm, sptep, gfn, access); 266 return true; 267 } 268 269 return false; 270} 271 272static bool check_mmio_spte(struct kvm *kvm, u64 spte) 273{ 274 unsigned int kvm_gen, spte_gen; 275 276 kvm_gen = kvm_current_mmio_generation(kvm); 277 spte_gen = get_mmio_spte_generation(spte); 278 279 trace_check_mmio_spte(spte, kvm_gen, spte_gen); 280 return likely(kvm_gen == spte_gen); 281} 282 283void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 284 u64 dirty_mask, u64 nx_mask, u64 x_mask) 285{ 286 shadow_user_mask = user_mask; 287 shadow_accessed_mask = accessed_mask; 288 shadow_dirty_mask = dirty_mask; 289 shadow_nx_mask = nx_mask; 290 shadow_x_mask = x_mask; 291} 292EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); 293 294static int is_cpuid_PSE36(void) 295{ 296 return 1; 297} 298 299static int is_nx(struct kvm_vcpu *vcpu) 300{ 301 return vcpu->arch.efer & EFER_NX; 302} 303 304static int is_shadow_present_pte(u64 pte) 305{ 306 return pte & PT_PRESENT_MASK && !is_mmio_spte(pte); 307} 308 309static int is_large_pte(u64 pte) 310{ 311 return pte & PT_PAGE_SIZE_MASK; 312} 313 314static int is_rmap_spte(u64 pte) 315{ 316 return is_shadow_present_pte(pte); 317} 318 319static int is_last_spte(u64 pte, int level) 320{ 321 if (level == PT_PAGE_TABLE_LEVEL) 322 return 1; 323 if (is_large_pte(pte)) 324 return 1; 325 return 0; 326} 327 328static pfn_t spte_to_pfn(u64 pte) 329{ 330 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 331} 332 333static gfn_t pse36_gfn_delta(u32 gpte) 334{ 335 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT; 336 337 return (gpte & PT32_DIR_PSE36_MASK) << shift; 338} 339 340#ifdef CONFIG_X86_64 341static void __set_spte(u64 *sptep, u64 spte) 342{ 343 *sptep = spte; 344} 345 346static void __update_clear_spte_fast(u64 *sptep, u64 spte) 347{ 348 *sptep = spte; 349} 350 351static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) 352{ 353 return xchg(sptep, spte); 354} 355 356static u64 __get_spte_lockless(u64 *sptep) 357{ 358 return ACCESS_ONCE(*sptep); 359} 360#else 361union split_spte { 362 struct { 363 u32 spte_low; 364 u32 spte_high; 365 }; 366 u64 spte; 367}; 368 369static void count_spte_clear(u64 *sptep, u64 spte) 370{ 371 struct kvm_mmu_page *sp = page_header(__pa(sptep)); 372 373 if (is_shadow_present_pte(spte)) 374 return; 375 376 /* Ensure the spte is completely set before we increase the count */ 377 smp_wmb(); 378 sp->clear_spte_count++; 379} 380 381static void __set_spte(u64 *sptep, u64 spte) 382{ 383 union split_spte *ssptep, sspte; 384 385 ssptep = (union split_spte *)sptep; 386 sspte = (union split_spte)spte; 387 388 ssptep->spte_high = sspte.spte_high; 389 390 /* 391 * If we map the spte from nonpresent to present, We should store 392 * the high bits firstly, then set present bit, so cpu can not 393 * fetch this spte while we are setting the spte. 394 */ 395 smp_wmb(); 396 397 ssptep->spte_low = sspte.spte_low; 398} 399 400static void __update_clear_spte_fast(u64 *sptep, u64 spte) 401{ 402 union split_spte *ssptep, sspte; 403 404 ssptep = (union split_spte *)sptep; 405 sspte = (union split_spte)spte; 406 407 ssptep->spte_low = sspte.spte_low; 408 409 /* 410 * If we map the spte from present to nonpresent, we should clear 411 * present bit firstly to avoid vcpu fetch the old high bits. 412 */ 413 smp_wmb(); 414 415 ssptep->spte_high = sspte.spte_high; 416 count_spte_clear(sptep, spte); 417} 418 419static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) 420{ 421 union split_spte *ssptep, sspte, orig; 422 423 ssptep = (union split_spte *)sptep; 424 sspte = (union split_spte)spte; 425 426 /* xchg acts as a barrier before the setting of the high bits */ 427 orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); 428 orig.spte_high = ssptep->spte_high; 429 ssptep->spte_high = sspte.spte_high; 430 count_spte_clear(sptep, spte); 431 432 return orig.spte; 433} 434 435/* 436 * The idea using the light way get the spte on x86_32 guest is from 437 * gup_get_pte(arch/x86/mm/gup.c). 438 * 439 * An spte tlb flush may be pending, because kvm_set_pte_rmapp 440 * coalesces them and we are running out of the MMU lock. Therefore 441 * we need to protect against in-progress updates of the spte. 442 * 443 * Reading the spte while an update is in progress may get the old value 444 * for the high part of the spte. The race is fine for a present->non-present 445 * change (because the high part of the spte is ignored for non-present spte), 446 * but for a present->present change we must reread the spte. 447 * 448 * All such changes are done in two steps (present->non-present and 449 * non-present->present), hence it is enough to count the number of 450 * present->non-present updates: if it changed while reading the spte, 451 * we might have hit the race. This is done using clear_spte_count. 452 */ 453static u64 __get_spte_lockless(u64 *sptep) 454{ 455 struct kvm_mmu_page *sp = page_header(__pa(sptep)); 456 union split_spte spte, *orig = (union split_spte *)sptep; 457 int count; 458 459retry: 460 count = sp->clear_spte_count; 461 smp_rmb(); 462 463 spte.spte_low = orig->spte_low; 464 smp_rmb(); 465 466 spte.spte_high = orig->spte_high; 467 smp_rmb(); 468 469 if (unlikely(spte.spte_low != orig->spte_low || 470 count != sp->clear_spte_count)) 471 goto retry; 472 473 return spte.spte; 474} 475#endif 476 477static bool spte_is_locklessly_modifiable(u64 spte) 478{ 479 return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) == 480 (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE); 481} 482 483static bool spte_has_volatile_bits(u64 spte) 484{ 485 /* 486 * Always atomicly update spte if it can be updated 487 * out of mmu-lock, it can ensure dirty bit is not lost, 488 * also, it can help us to get a stable is_writable_pte() 489 * to ensure tlb flush is not missed. 490 */ 491 if (spte_is_locklessly_modifiable(spte)) 492 return true; 493 494 if (!shadow_accessed_mask) 495 return false; 496 497 if (!is_shadow_present_pte(spte)) 498 return false; 499 500 if ((spte & shadow_accessed_mask) && 501 (!is_writable_pte(spte) || (spte & shadow_dirty_mask))) 502 return false; 503 504 return true; 505} 506 507static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask) 508{ 509 return (old_spte & bit_mask) && !(new_spte & bit_mask); 510} 511 512static bool spte_is_bit_changed(u64 old_spte, u64 new_spte, u64 bit_mask) 513{ 514 return (old_spte & bit_mask) != (new_spte & bit_mask); 515} 516 517/* Rules for using mmu_spte_set: 518 * Set the sptep from nonpresent to present. 519 * Note: the sptep being assigned *must* be either not present 520 * or in a state where the hardware will not attempt to update 521 * the spte. 522 */ 523static void mmu_spte_set(u64 *sptep, u64 new_spte) 524{ 525 WARN_ON(is_shadow_present_pte(*sptep)); 526 __set_spte(sptep, new_spte); 527} 528 529/* Rules for using mmu_spte_update: 530 * Update the state bits, it means the mapped pfn is not changged. 531 * 532 * Whenever we overwrite a writable spte with a read-only one we 533 * should flush remote TLBs. Otherwise rmap_write_protect 534 * will find a read-only spte, even though the writable spte 535 * might be cached on a CPU's TLB, the return value indicates this 536 * case. 537 */ 538static bool mmu_spte_update(u64 *sptep, u64 new_spte) 539{ 540 u64 old_spte = *sptep; 541 bool ret = false; 542 543 WARN_ON(!is_rmap_spte(new_spte)); 544 545 if (!is_shadow_present_pte(old_spte)) { 546 mmu_spte_set(sptep, new_spte); 547 return ret; 548 } 549 550 if (!spte_has_volatile_bits(old_spte)) 551 __update_clear_spte_fast(sptep, new_spte); 552 else 553 old_spte = __update_clear_spte_slow(sptep, new_spte); 554 555 /* 556 * For the spte updated out of mmu-lock is safe, since 557 * we always atomicly update it, see the comments in 558 * spte_has_volatile_bits(). 559 */ 560 if (spte_is_locklessly_modifiable(old_spte) && 561 !is_writable_pte(new_spte)) 562 ret = true; 563 564 if (!shadow_accessed_mask) 565 return ret; 566 567 /* 568 * Flush TLB when accessed/dirty bits are changed in the page tables, 569 * to guarantee consistency between TLB and page tables. 570 */ 571 if (spte_is_bit_changed(old_spte, new_spte, 572 shadow_accessed_mask | shadow_dirty_mask)) 573 ret = true; 574 575 if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask)) 576 kvm_set_pfn_accessed(spte_to_pfn(old_spte)); 577 if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask)) 578 kvm_set_pfn_dirty(spte_to_pfn(old_spte)); 579 580 return ret; 581} 582 583/* 584 * Rules for using mmu_spte_clear_track_bits: 585 * It sets the sptep from present to nonpresent, and track the 586 * state bits, it is used to clear the last level sptep. 587 */ 588static int mmu_spte_clear_track_bits(u64 *sptep) 589{ 590 pfn_t pfn; 591 u64 old_spte = *sptep; 592 593 if (!spte_has_volatile_bits(old_spte)) 594 __update_clear_spte_fast(sptep, 0ull); 595 else 596 old_spte = __update_clear_spte_slow(sptep, 0ull); 597 598 if (!is_rmap_spte(old_spte)) 599 return 0; 600 601 pfn = spte_to_pfn(old_spte); 602 603 /* 604 * KVM does not hold the refcount of the page used by 605 * kvm mmu, before reclaiming the page, we should 606 * unmap it from mmu first. 607 */ 608 WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); 609 610 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 611 kvm_set_pfn_accessed(pfn); 612 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) 613 kvm_set_pfn_dirty(pfn); 614 return 1; 615} 616 617/* 618 * Rules for using mmu_spte_clear_no_track: 619 * Directly clear spte without caring the state bits of sptep, 620 * it is used to set the upper level spte. 621 */ 622static void mmu_spte_clear_no_track(u64 *sptep) 623{ 624 __update_clear_spte_fast(sptep, 0ull); 625} 626 627static u64 mmu_spte_get_lockless(u64 *sptep) 628{ 629 return __get_spte_lockless(sptep); 630} 631 632static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) 633{ 634 /* 635 * Prevent page table teardown by making any free-er wait during 636 * kvm_flush_remote_tlbs() IPI to all active vcpus. 637 */ 638 local_irq_disable(); 639 vcpu->mode = READING_SHADOW_PAGE_TABLES; 640 /* 641 * Make sure a following spte read is not reordered ahead of the write 642 * to vcpu->mode. 643 */ 644 smp_mb(); 645} 646 647static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) 648{ 649 /* 650 * Make sure the write to vcpu->mode is not reordered in front of 651 * reads to sptes. If it does, kvm_commit_zap_page() can see us 652 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table. 653 */ 654 smp_mb(); 655 vcpu->mode = OUTSIDE_GUEST_MODE; 656 local_irq_enable(); 657} 658 659static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 660 struct kmem_cache *base_cache, int min) 661{ 662 void *obj; 663 664 if (cache->nobjs >= min) 665 return 0; 666 while (cache->nobjs < ARRAY_SIZE(cache->objects)) { 667 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL); 668 if (!obj) 669 return -ENOMEM; 670 cache->objects[cache->nobjs++] = obj; 671 } 672 return 0; 673} 674 675static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache) 676{ 677 return cache->nobjs; 678} 679 680static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc, 681 struct kmem_cache *cache) 682{ 683 while (mc->nobjs) 684 kmem_cache_free(cache, mc->objects[--mc->nobjs]); 685} 686 687static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, 688 int min) 689{ 690 void *page; 691 692 if (cache->nobjs >= min) 693 return 0; 694 while (cache->nobjs < ARRAY_SIZE(cache->objects)) { 695 page = (void *)__get_free_page(GFP_KERNEL); 696 if (!page) 697 return -ENOMEM; 698 cache->objects[cache->nobjs++] = page; 699 } 700 return 0; 701} 702 703static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) 704{ 705 while (mc->nobjs) 706 free_page((unsigned long)mc->objects[--mc->nobjs]); 707} 708 709static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) 710{ 711 int r; 712 713 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, 714 pte_list_desc_cache, 8 + PTE_PREFETCH_NUM); 715 if (r) 716 goto out; 717 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); 718 if (r) 719 goto out; 720 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, 721 mmu_page_header_cache, 4); 722out: 723 return r; 724} 725 726static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) 727{ 728 mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, 729 pte_list_desc_cache); 730 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); 731 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, 732 mmu_page_header_cache); 733} 734 735static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) 736{ 737 void *p; 738 739 BUG_ON(!mc->nobjs); 740 p = mc->objects[--mc->nobjs]; 741 return p; 742} 743 744static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) 745{ 746 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); 747} 748 749static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) 750{ 751 kmem_cache_free(pte_list_desc_cache, pte_list_desc); 752} 753 754static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) 755{ 756 if (!sp->role.direct) 757 return sp->gfns[index]; 758 759 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); 760} 761 762static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) 763{ 764 if (sp->role.direct) 765 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); 766 else 767 sp->gfns[index] = gfn; 768} 769 770/* 771 * Return the pointer to the large page information for a given gfn, 772 * handling slots that are not large page aligned. 773 */ 774static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, 775 struct kvm_memory_slot *slot, 776 int level) 777{ 778 unsigned long idx; 779 780 idx = gfn_to_index(gfn, slot->base_gfn, level); 781 return &slot->arch.lpage_info[level - 2][idx]; 782} 783 784static void account_shadowed(struct kvm *kvm, gfn_t gfn) 785{ 786 struct kvm_memory_slot *slot; 787 struct kvm_lpage_info *linfo; 788 int i; 789 790 slot = gfn_to_memslot(kvm, gfn); 791 for (i = PT_DIRECTORY_LEVEL; 792 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 793 linfo = lpage_info_slot(gfn, slot, i); 794 linfo->write_count += 1; 795 } 796 kvm->arch.indirect_shadow_pages++; 797} 798 799static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) 800{ 801 struct kvm_memory_slot *slot; 802 struct kvm_lpage_info *linfo; 803 int i; 804 805 slot = gfn_to_memslot(kvm, gfn); 806 for (i = PT_DIRECTORY_LEVEL; 807 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 808 linfo = lpage_info_slot(gfn, slot, i); 809 linfo->write_count -= 1; 810 WARN_ON(linfo->write_count < 0); 811 } 812 kvm->arch.indirect_shadow_pages--; 813} 814 815static int has_wrprotected_page(struct kvm *kvm, 816 gfn_t gfn, 817 int level) 818{ 819 struct kvm_memory_slot *slot; 820 struct kvm_lpage_info *linfo; 821 822 slot = gfn_to_memslot(kvm, gfn); 823 if (slot) { 824 linfo = lpage_info_slot(gfn, slot, level); 825 return linfo->write_count; 826 } 827 828 return 1; 829} 830 831static int host_mapping_level(struct kvm *kvm, gfn_t gfn) 832{ 833 unsigned long page_size; 834 int i, ret = 0; 835 836 page_size = kvm_host_page_size(kvm, gfn); 837 838 for (i = PT_PAGE_TABLE_LEVEL; 839 i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) { 840 if (page_size >= KVM_HPAGE_SIZE(i)) 841 ret = i; 842 else 843 break; 844 } 845 846 return ret; 847} 848 849static struct kvm_memory_slot * 850gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, 851 bool no_dirty_log) 852{ 853 struct kvm_memory_slot *slot; 854 855 slot = gfn_to_memslot(vcpu->kvm, gfn); 856 if (!slot || slot->flags & KVM_MEMSLOT_INVALID || 857 (no_dirty_log && slot->dirty_bitmap)) 858 slot = NULL; 859 860 return slot; 861} 862 863static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) 864{ 865 return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); 866} 867 868static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) 869{ 870 int host_level, level, max_level; 871 872 host_level = host_mapping_level(vcpu->kvm, large_gfn); 873 874 if (host_level == PT_PAGE_TABLE_LEVEL) 875 return host_level; 876 877 max_level = min(kvm_x86_ops->get_lpage_level(), host_level); 878 879 for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) 880 if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) 881 break; 882 883 return level - 1; 884} 885 886/* 887 * Pte mapping structures: 888 * 889 * If pte_list bit zero is zero, then pte_list point to the spte. 890 * 891 * If pte_list bit zero is one, (then pte_list & ~1) points to a struct 892 * pte_list_desc containing more mappings. 893 * 894 * Returns the number of pte entries before the spte was added or zero if 895 * the spte was not added. 896 * 897 */ 898static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, 899 unsigned long *pte_list) 900{ 901 struct pte_list_desc *desc; 902 int i, count = 0; 903 904 if (!*pte_list) { 905 rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); 906 *pte_list = (unsigned long)spte; 907 } else if (!(*pte_list & 1)) { 908 rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); 909 desc = mmu_alloc_pte_list_desc(vcpu); 910 desc->sptes[0] = (u64 *)*pte_list; 911 desc->sptes[1] = spte; 912 *pte_list = (unsigned long)desc | 1; 913 ++count; 914 } else { 915 rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); 916 desc = (struct pte_list_desc *)(*pte_list & ~1ul); 917 while (desc->sptes[PTE_LIST_EXT-1] && desc->more) { 918 desc = desc->more; 919 count += PTE_LIST_EXT; 920 } 921 if (desc->sptes[PTE_LIST_EXT-1]) { 922 desc->more = mmu_alloc_pte_list_desc(vcpu); 923 desc = desc->more; 924 } 925 for (i = 0; desc->sptes[i]; ++i) 926 ++count; 927 desc->sptes[i] = spte; 928 } 929 return count; 930} 931 932static void 933pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc, 934 int i, struct pte_list_desc *prev_desc) 935{ 936 int j; 937 938 for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j) 939 ; 940 desc->sptes[i] = desc->sptes[j]; 941 desc->sptes[j] = NULL; 942 if (j != 0) 943 return; 944 if (!prev_desc && !desc->more) 945 *pte_list = (unsigned long)desc->sptes[0]; 946 else 947 if (prev_desc) 948 prev_desc->more = desc->more; 949 else 950 *pte_list = (unsigned long)desc->more | 1; 951 mmu_free_pte_list_desc(desc); 952} 953 954static void pte_list_remove(u64 *spte, unsigned long *pte_list) 955{ 956 struct pte_list_desc *desc; 957 struct pte_list_desc *prev_desc; 958 int i; 959 960 if (!*pte_list) { 961 printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte); 962 BUG(); 963 } else if (!(*pte_list & 1)) { 964 rmap_printk("pte_list_remove: %p 1->0\n", spte); 965 if ((u64 *)*pte_list != spte) { 966 printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte); 967 BUG(); 968 } 969 *pte_list = 0; 970 } else { 971 rmap_printk("pte_list_remove: %p many->many\n", spte); 972 desc = (struct pte_list_desc *)(*pte_list & ~1ul); 973 prev_desc = NULL; 974 while (desc) { 975 for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) 976 if (desc->sptes[i] == spte) { 977 pte_list_desc_remove_entry(pte_list, 978 desc, i, 979 prev_desc); 980 return; 981 } 982 prev_desc = desc; 983 desc = desc->more; 984 } 985 pr_err("pte_list_remove: %p many->many\n", spte); 986 BUG(); 987 } 988} 989 990typedef void (*pte_list_walk_fn) (u64 *spte); 991static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn) 992{ 993 struct pte_list_desc *desc; 994 int i; 995 996 if (!*pte_list) 997 return; 998 999 if (!(*pte_list & 1)) 1000 return fn((u64 *)*pte_list); 1001 1002 desc = (struct pte_list_desc *)(*pte_list & ~1ul); 1003 while (desc) { 1004 for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) 1005 fn(desc->sptes[i]); 1006 desc = desc->more; 1007 } 1008} 1009 1010static unsigned long *__gfn_to_rmap(gfn_t gfn, int level, 1011 struct kvm_memory_slot *slot) 1012{ 1013 unsigned long idx; 1014 1015 idx = gfn_to_index(gfn, slot->base_gfn, level); 1016 return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx]; 1017} 1018 1019/* 1020 * Take gfn and return the reverse mapping to it. 1021 */ 1022static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) 1023{ 1024 struct kvm_memory_slot *slot; 1025 1026 slot = gfn_to_memslot(kvm, gfn); 1027 return __gfn_to_rmap(gfn, level, slot); 1028} 1029 1030static bool rmap_can_add(struct kvm_vcpu *vcpu) 1031{ 1032 struct kvm_mmu_memory_cache *cache; 1033 1034 cache = &vcpu->arch.mmu_pte_list_desc_cache; 1035 return mmu_memory_cache_free_objects(cache); 1036} 1037 1038static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) 1039{ 1040 struct kvm_mmu_page *sp; 1041 unsigned long *rmapp; 1042 1043 sp = page_header(__pa(spte)); 1044 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); 1045 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); 1046 return pte_list_add(vcpu, spte, rmapp); 1047} 1048 1049static void rmap_remove(struct kvm *kvm, u64 *spte) 1050{ 1051 struct kvm_mmu_page *sp; 1052 gfn_t gfn; 1053 unsigned long *rmapp; 1054 1055 sp = page_header(__pa(spte)); 1056 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); 1057 rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); 1058 pte_list_remove(spte, rmapp); 1059} 1060 1061/* 1062 * Used by the following functions to iterate through the sptes linked by a 1063 * rmap. All fields are private and not assumed to be used outside. 1064 */ 1065struct rmap_iterator { 1066 /* private fields */ 1067 struct pte_list_desc *desc; /* holds the sptep if not NULL */ 1068 int pos; /* index of the sptep */ 1069}; 1070 1071/* 1072 * Iteration must be started by this function. This should also be used after 1073 * removing/dropping sptes from the rmap link because in such cases the 1074 * information in the itererator may not be valid. 1075 * 1076 * Returns sptep if found, NULL otherwise. 1077 */ 1078static u64 *rmap_get_first(unsigned long rmap, struct rmap_iterator *iter) 1079{ 1080 if (!rmap) 1081 return NULL; 1082 1083 if (!(rmap & 1)) { 1084 iter->desc = NULL; 1085 return (u64 *)rmap; 1086 } 1087 1088 iter->desc = (struct pte_list_desc *)(rmap & ~1ul); 1089 iter->pos = 0; 1090 return iter->desc->sptes[iter->pos]; 1091} 1092 1093/* 1094 * Must be used with a valid iterator: e.g. after rmap_get_first(). 1095 * 1096 * Returns sptep if found, NULL otherwise. 1097 */ 1098static u64 *rmap_get_next(struct rmap_iterator *iter) 1099{ 1100 if (iter->desc) { 1101 if (iter->pos < PTE_LIST_EXT - 1) { 1102 u64 *sptep; 1103 1104 ++iter->pos; 1105 sptep = iter->desc->sptes[iter->pos]; 1106 if (sptep) 1107 return sptep; 1108 } 1109 1110 iter->desc = iter->desc->more; 1111 1112 if (iter->desc) { 1113 iter->pos = 0; 1114 /* desc->sptes[0] cannot be NULL */ 1115 return iter->desc->sptes[iter->pos]; 1116 } 1117 } 1118 1119 return NULL; 1120} 1121 1122static void drop_spte(struct kvm *kvm, u64 *sptep) 1123{ 1124 if (mmu_spte_clear_track_bits(sptep)) 1125 rmap_remove(kvm, sptep); 1126} 1127 1128 1129static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) 1130{ 1131 if (is_large_pte(*sptep)) { 1132 WARN_ON(page_header(__pa(sptep))->role.level == 1133 PT_PAGE_TABLE_LEVEL); 1134 drop_spte(kvm, sptep); 1135 --kvm->stat.lpages; 1136 return true; 1137 } 1138 1139 return false; 1140} 1141 1142static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) 1143{ 1144 if (__drop_large_spte(vcpu->kvm, sptep)) 1145 kvm_flush_remote_tlbs(vcpu->kvm); 1146} 1147 1148/* 1149 * Write-protect on the specified @sptep, @pt_protect indicates whether 1150 * spte write-protection is caused by protecting shadow page table. 1151 * 1152 * Note: write protection is difference between dirty logging and spte 1153 * protection: 1154 * - for dirty logging, the spte can be set to writable at anytime if 1155 * its dirty bitmap is properly set. 1156 * - for spte protection, the spte can be writable only after unsync-ing 1157 * shadow page. 1158 * 1159 * Return true if tlb need be flushed. 1160 */ 1161static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect) 1162{ 1163 u64 spte = *sptep; 1164 1165 if (!is_writable_pte(spte) && 1166 !(pt_protect && spte_is_locklessly_modifiable(spte))) 1167 return false; 1168 1169 rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); 1170 1171 if (pt_protect) 1172 spte &= ~SPTE_MMU_WRITEABLE; 1173 spte = spte & ~PT_WRITABLE_MASK; 1174 1175 return mmu_spte_update(sptep, spte); 1176} 1177 1178static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, 1179 bool pt_protect) 1180{ 1181 u64 *sptep; 1182 struct rmap_iterator iter; 1183 bool flush = false; 1184 1185 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { 1186 BUG_ON(!(*sptep & PT_PRESENT_MASK)); 1187 1188 flush |= spte_write_protect(kvm, sptep, pt_protect); 1189 sptep = rmap_get_next(&iter); 1190 } 1191 1192 return flush; 1193} 1194 1195static bool spte_clear_dirty(struct kvm *kvm, u64 *sptep) 1196{ 1197 u64 spte = *sptep; 1198 1199 rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); 1200 1201 spte &= ~shadow_dirty_mask; 1202 1203 return mmu_spte_update(sptep, spte); 1204} 1205 1206static bool __rmap_clear_dirty(struct kvm *kvm, unsigned long *rmapp) 1207{ 1208 u64 *sptep; 1209 struct rmap_iterator iter; 1210 bool flush = false; 1211 1212 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { 1213 BUG_ON(!(*sptep & PT_PRESENT_MASK)); 1214 1215 flush |= spte_clear_dirty(kvm, sptep); 1216 sptep = rmap_get_next(&iter); 1217 } 1218 1219 return flush; 1220} 1221 1222static bool spte_set_dirty(struct kvm *kvm, u64 *sptep) 1223{ 1224 u64 spte = *sptep; 1225 1226 rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); 1227 1228 spte |= shadow_dirty_mask; 1229 1230 return mmu_spte_update(sptep, spte); 1231} 1232 1233static bool __rmap_set_dirty(struct kvm *kvm, unsigned long *rmapp) 1234{ 1235 u64 *sptep; 1236 struct rmap_iterator iter; 1237 bool flush = false; 1238 1239 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { 1240 BUG_ON(!(*sptep & PT_PRESENT_MASK)); 1241 1242 flush |= spte_set_dirty(kvm, sptep); 1243 sptep = rmap_get_next(&iter); 1244 } 1245 1246 return flush; 1247} 1248 1249/** 1250 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages 1251 * @kvm: kvm instance 1252 * @slot: slot to protect 1253 * @gfn_offset: start of the BITS_PER_LONG pages we care about 1254 * @mask: indicates which pages we should protect 1255 * 1256 * Used when we do not need to care about huge page mappings: e.g. during dirty 1257 * logging we do not have any such mappings. 1258 */ 1259static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, 1260 struct kvm_memory_slot *slot, 1261 gfn_t gfn_offset, unsigned long mask) 1262{ 1263 unsigned long *rmapp; 1264 1265 while (mask) { 1266 rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), 1267 PT_PAGE_TABLE_LEVEL, slot); 1268 __rmap_write_protect(kvm, rmapp, false); 1269 1270 /* clear the first set bit */ 1271 mask &= mask - 1; 1272 } 1273} 1274 1275/** 1276 * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages 1277 * @kvm: kvm instance 1278 * @slot: slot to clear D-bit 1279 * @gfn_offset: start of the BITS_PER_LONG pages we care about 1280 * @mask: indicates which pages we should clear D-bit 1281 * 1282 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap. 1283 */ 1284void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, 1285 struct kvm_memory_slot *slot, 1286 gfn_t gfn_offset, unsigned long mask) 1287{ 1288 unsigned long *rmapp; 1289 1290 while (mask) { 1291 rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), 1292 PT_PAGE_TABLE_LEVEL, slot); 1293 __rmap_clear_dirty(kvm, rmapp); 1294 1295 /* clear the first set bit */ 1296 mask &= mask - 1; 1297 } 1298} 1299EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked); 1300 1301/** 1302 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected 1303 * PT level pages. 1304 * 1305 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to 1306 * enable dirty logging for them. 1307 * 1308 * Used when we do not need to care about huge page mappings: e.g. during dirty 1309 * logging we do not have any such mappings. 1310 */ 1311void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, 1312 struct kvm_memory_slot *slot, 1313 gfn_t gfn_offset, unsigned long mask) 1314{ 1315 if (kvm_x86_ops->enable_log_dirty_pt_masked) 1316 kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset, 1317 mask); 1318 else 1319 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); 1320} 1321 1322static bool rmap_write_protect(struct kvm *kvm, u64 gfn) 1323{ 1324 struct kvm_memory_slot *slot; 1325 unsigned long *rmapp; 1326 int i; 1327 bool write_protected = false; 1328 1329 slot = gfn_to_memslot(kvm, gfn); 1330 1331 for (i = PT_PAGE_TABLE_LEVEL; 1332 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 1333 rmapp = __gfn_to_rmap(gfn, i, slot); 1334 write_protected |= __rmap_write_protect(kvm, rmapp, true); 1335 } 1336 1337 return write_protected; 1338} 1339 1340static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, 1341 struct kvm_memory_slot *slot, gfn_t gfn, int level, 1342 unsigned long data) 1343{ 1344 u64 *sptep; 1345 struct rmap_iterator iter; 1346 int need_tlb_flush = 0; 1347 1348 while ((sptep = rmap_get_first(*rmapp, &iter))) { 1349 BUG_ON(!(*sptep & PT_PRESENT_MASK)); 1350 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx gfn %llx (%d)\n", 1351 sptep, *sptep, gfn, level); 1352 1353 drop_spte(kvm, sptep); 1354 need_tlb_flush = 1; 1355 } 1356 1357 return need_tlb_flush; 1358} 1359 1360static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, 1361 struct kvm_memory_slot *slot, gfn_t gfn, int level, 1362 unsigned long data) 1363{ 1364 u64 *sptep; 1365 struct rmap_iterator iter; 1366 int need_flush = 0; 1367 u64 new_spte; 1368 pte_t *ptep = (pte_t *)data; 1369 pfn_t new_pfn; 1370 1371 WARN_ON(pte_huge(*ptep)); 1372 new_pfn = pte_pfn(*ptep); 1373 1374 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { 1375 BUG_ON(!is_shadow_present_pte(*sptep)); 1376 rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n", 1377 sptep, *sptep, gfn, level); 1378 1379 need_flush = 1; 1380 1381 if (pte_write(*ptep)) { 1382 drop_spte(kvm, sptep); 1383 sptep = rmap_get_first(*rmapp, &iter); 1384 } else { 1385 new_spte = *sptep & ~PT64_BASE_ADDR_MASK; 1386 new_spte |= (u64)new_pfn << PAGE_SHIFT; 1387 1388 new_spte &= ~PT_WRITABLE_MASK; 1389 new_spte &= ~SPTE_HOST_WRITEABLE; 1390 new_spte &= ~shadow_accessed_mask; 1391 1392 mmu_spte_clear_track_bits(sptep); 1393 mmu_spte_set(sptep, new_spte); 1394 sptep = rmap_get_next(&iter); 1395 } 1396 } 1397 1398 if (need_flush) 1399 kvm_flush_remote_tlbs(kvm); 1400 1401 return 0; 1402} 1403 1404static int kvm_handle_hva_range(struct kvm *kvm, 1405 unsigned long start, 1406 unsigned long end, 1407 unsigned long data, 1408 int (*handler)(struct kvm *kvm, 1409 unsigned long *rmapp, 1410 struct kvm_memory_slot *slot, 1411 gfn_t gfn, 1412 int level, 1413 unsigned long data)) 1414{ 1415 int j; 1416 int ret = 0; 1417 struct kvm_memslots *slots; 1418 struct kvm_memory_slot *memslot; 1419 1420 slots = kvm_memslots(kvm); 1421 1422 kvm_for_each_memslot(memslot, slots) { 1423 unsigned long hva_start, hva_end; 1424 gfn_t gfn_start, gfn_end; 1425 1426 hva_start = max(start, memslot->userspace_addr); 1427 hva_end = min(end, memslot->userspace_addr + 1428 (memslot->npages << PAGE_SHIFT)); 1429 if (hva_start >= hva_end) 1430 continue; 1431 /* 1432 * {gfn(page) | page intersects with [hva_start, hva_end)} = 1433 * {gfn_start, gfn_start+1, ..., gfn_end-1}. 1434 */ 1435 gfn_start = hva_to_gfn_memslot(hva_start, memslot); 1436 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); 1437 1438 for (j = PT_PAGE_TABLE_LEVEL; 1439 j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) { 1440 unsigned long idx, idx_end; 1441 unsigned long *rmapp; 1442 gfn_t gfn = gfn_start; 1443 1444 /* 1445 * {idx(page_j) | page_j intersects with 1446 * [hva_start, hva_end)} = {idx, idx+1, ..., idx_end}. 1447 */ 1448 idx = gfn_to_index(gfn_start, memslot->base_gfn, j); 1449 idx_end = gfn_to_index(gfn_end - 1, memslot->base_gfn, j); 1450 1451 rmapp = __gfn_to_rmap(gfn_start, j, memslot); 1452 1453 for (; idx <= idx_end; 1454 ++idx, gfn += (1UL << KVM_HPAGE_GFN_SHIFT(j))) 1455 ret |= handler(kvm, rmapp++, memslot, 1456 gfn, j, data); 1457 } 1458 } 1459 1460 return ret; 1461} 1462 1463static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, 1464 unsigned long data, 1465 int (*handler)(struct kvm *kvm, unsigned long *rmapp, 1466 struct kvm_memory_slot *slot, 1467 gfn_t gfn, int level, 1468 unsigned long data)) 1469{ 1470 return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); 1471} 1472 1473int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 1474{ 1475 return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); 1476} 1477 1478int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 1479{ 1480 return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); 1481} 1482 1483void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 1484{ 1485 kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); 1486} 1487 1488static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, 1489 struct kvm_memory_slot *slot, gfn_t gfn, int level, 1490 unsigned long data) 1491{ 1492 u64 *sptep; 1493 struct rmap_iterator uninitialized_var(iter); 1494 int young = 0; 1495 1496 BUG_ON(!shadow_accessed_mask); 1497 1498 for (sptep = rmap_get_first(*rmapp, &iter); sptep; 1499 sptep = rmap_get_next(&iter)) { 1500 BUG_ON(!is_shadow_present_pte(*sptep)); 1501 1502 if (*sptep & shadow_accessed_mask) { 1503 young = 1; 1504 clear_bit((ffs(shadow_accessed_mask) - 1), 1505 (unsigned long *)sptep); 1506 } 1507 } 1508 trace_kvm_age_page(gfn, level, slot, young); 1509 return young; 1510} 1511 1512static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, 1513 struct kvm_memory_slot *slot, gfn_t gfn, 1514 int level, unsigned long data) 1515{ 1516 u64 *sptep; 1517 struct rmap_iterator iter; 1518 int young = 0; 1519 1520 /* 1521 * If there's no access bit in the secondary pte set by the 1522 * hardware it's up to gup-fast/gup to set the access bit in 1523 * the primary pte or in the page structure. 1524 */ 1525 if (!shadow_accessed_mask) 1526 goto out; 1527 1528 for (sptep = rmap_get_first(*rmapp, &iter); sptep; 1529 sptep = rmap_get_next(&iter)) { 1530 BUG_ON(!is_shadow_present_pte(*sptep)); 1531 1532 if (*sptep & shadow_accessed_mask) { 1533 young = 1; 1534 break; 1535 } 1536 } 1537out: 1538 return young; 1539} 1540 1541#define RMAP_RECYCLE_THRESHOLD 1000 1542 1543static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) 1544{ 1545 unsigned long *rmapp; 1546 struct kvm_mmu_page *sp; 1547 1548 sp = page_header(__pa(spte)); 1549 1550 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); 1551 1552 kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); 1553 kvm_flush_remote_tlbs(vcpu->kvm); 1554} 1555 1556int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) 1557{ 1558 /* 1559 * In case of absence of EPT Access and Dirty Bits supports, 1560 * emulate the accessed bit for EPT, by checking if this page has 1561 * an EPT mapping, and clearing it if it does. On the next access, 1562 * a new EPT mapping will be established. 1563 * This has some overhead, but not as much as the cost of swapping 1564 * out actively used pages or breaking up actively used hugepages. 1565 */ 1566 if (!shadow_accessed_mask) { 1567 /* 1568 * We are holding the kvm->mmu_lock, and we are blowing up 1569 * shadow PTEs. MMU notifier consumers need to be kept at bay. 1570 * This is correct as long as we don't decouple the mmu_lock 1571 * protected regions (like invalidate_range_start|end does). 1572 */ 1573 kvm->mmu_notifier_seq++; 1574 return kvm_handle_hva_range(kvm, start, end, 0, 1575 kvm_unmap_rmapp); 1576 } 1577 1578 return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); 1579} 1580 1581int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 1582{ 1583 return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); 1584} 1585 1586#ifdef MMU_DEBUG 1587static int is_empty_shadow_page(u64 *spt) 1588{ 1589 u64 *pos; 1590 u64 *end; 1591 1592 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) 1593 if (is_shadow_present_pte(*pos)) { 1594 printk(KERN_ERR "%s: %p %llx\n", __func__, 1595 pos, *pos); 1596 return 0; 1597 } 1598 return 1; 1599} 1600#endif 1601 1602/* 1603 * This value is the sum of all of the kvm instances's 1604 * kvm->arch.n_used_mmu_pages values. We need a global, 1605 * aggregate version in order to make the slab shrinker 1606 * faster 1607 */ 1608static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) 1609{ 1610 kvm->arch.n_used_mmu_pages += nr; 1611 percpu_counter_add(&kvm_total_used_mmu_pages, nr); 1612} 1613 1614static void kvm_mmu_free_page(struct kvm_mmu_page *sp) 1615{ 1616 MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); 1617 hlist_del(&sp->hash_link); 1618 list_del(&sp->link); 1619 free_page((unsigned long)sp->spt); 1620 if (!sp->role.direct) 1621 free_page((unsigned long)sp->gfns); 1622 kmem_cache_free(mmu_page_header_cache, sp); 1623} 1624 1625static unsigned kvm_page_table_hashfn(gfn_t gfn) 1626{ 1627 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1); 1628} 1629 1630static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, 1631 struct kvm_mmu_page *sp, u64 *parent_pte) 1632{ 1633 if (!parent_pte) 1634 return; 1635 1636 pte_list_add(vcpu, parent_pte, &sp->parent_ptes); 1637} 1638 1639static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, 1640 u64 *parent_pte) 1641{ 1642 pte_list_remove(parent_pte, &sp->parent_ptes); 1643} 1644 1645static void drop_parent_pte(struct kvm_mmu_page *sp, 1646 u64 *parent_pte) 1647{ 1648 mmu_page_remove_parent_pte(sp, parent_pte); 1649 mmu_spte_clear_no_track(parent_pte); 1650} 1651 1652static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, 1653 u64 *parent_pte, int direct) 1654{ 1655 struct kvm_mmu_page *sp; 1656 1657 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); 1658 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); 1659 if (!direct) 1660 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); 1661 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 1662 1663 /* 1664 * The active_mmu_pages list is the FIFO list, do not move the 1665 * page until it is zapped. kvm_zap_obsolete_pages depends on 1666 * this feature. See the comments in kvm_zap_obsolete_pages(). 1667 */ 1668 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); 1669 sp->parent_ptes = 0; 1670 mmu_page_add_parent_pte(vcpu, sp, parent_pte); 1671 kvm_mod_used_mmu_pages(vcpu->kvm, +1); 1672 return sp; 1673} 1674 1675static void mark_unsync(u64 *spte); 1676static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) 1677{ 1678 pte_list_walk(&sp->parent_ptes, mark_unsync); 1679} 1680 1681static void mark_unsync(u64 *spte) 1682{ 1683 struct kvm_mmu_page *sp; 1684 unsigned int index; 1685 1686 sp = page_header(__pa(spte)); 1687 index = spte - sp->spt; 1688 if (__test_and_set_bit(index, sp->unsync_child_bitmap)) 1689 return; 1690 if (sp->unsync_children++) 1691 return; 1692 kvm_mmu_mark_parents_unsync(sp); 1693} 1694 1695static int nonpaging_sync_page(struct kvm_vcpu *vcpu, 1696 struct kvm_mmu_page *sp) 1697{ 1698 return 1; 1699} 1700 1701static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) 1702{ 1703} 1704 1705static void nonpaging_update_pte(struct kvm_vcpu *vcpu, 1706 struct kvm_mmu_page *sp, u64 *spte, 1707 const void *pte) 1708{ 1709 WARN_ON(1); 1710} 1711 1712#define KVM_PAGE_ARRAY_NR 16 1713 1714struct kvm_mmu_pages { 1715 struct mmu_page_and_offset { 1716 struct kvm_mmu_page *sp; 1717 unsigned int idx; 1718 } page[KVM_PAGE_ARRAY_NR]; 1719 unsigned int nr; 1720}; 1721 1722static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, 1723 int idx) 1724{ 1725 int i; 1726 1727 if (sp->unsync) 1728 for (i=0; i < pvec->nr; i++) 1729 if (pvec->page[i].sp == sp) 1730 return 0; 1731 1732 pvec->page[pvec->nr].sp = sp; 1733 pvec->page[pvec->nr].idx = idx; 1734 pvec->nr++; 1735 return (pvec->nr == KVM_PAGE_ARRAY_NR); 1736} 1737 1738static int __mmu_unsync_walk(struct kvm_mmu_page *sp, 1739 struct kvm_mmu_pages *pvec) 1740{ 1741 int i, ret, nr_unsync_leaf = 0; 1742 1743 for_each_set_bit(i, sp->unsync_child_bitmap, 512) { 1744 struct kvm_mmu_page *child; 1745 u64 ent = sp->spt[i]; 1746 1747 if (!is_shadow_present_pte(ent) || is_large_pte(ent)) 1748 goto clear_child_bitmap; 1749 1750 child = page_header(ent & PT64_BASE_ADDR_MASK); 1751 1752 if (child->unsync_children) { 1753 if (mmu_pages_add(pvec, child, i)) 1754 return -ENOSPC; 1755 1756 ret = __mmu_unsync_walk(child, pvec); 1757 if (!ret) 1758 goto clear_child_bitmap; 1759 else if (ret > 0) 1760 nr_unsync_leaf += ret; 1761 else 1762 return ret; 1763 } else if (child->unsync) { 1764 nr_unsync_leaf++; 1765 if (mmu_pages_add(pvec, child, i)) 1766 return -ENOSPC; 1767 } else 1768 goto clear_child_bitmap; 1769 1770 continue; 1771 1772clear_child_bitmap: 1773 __clear_bit(i, sp->unsync_child_bitmap); 1774 sp->unsync_children--; 1775 WARN_ON((int)sp->unsync_children < 0); 1776 } 1777 1778 1779 return nr_unsync_leaf; 1780} 1781 1782static int mmu_unsync_walk(struct kvm_mmu_page *sp, 1783 struct kvm_mmu_pages *pvec) 1784{ 1785 if (!sp->unsync_children) 1786 return 0; 1787 1788 mmu_pages_add(pvec, sp, 0); 1789 return __mmu_unsync_walk(sp, pvec); 1790} 1791 1792static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) 1793{ 1794 WARN_ON(!sp->unsync); 1795 trace_kvm_mmu_sync_page(sp); 1796 sp->unsync = 0; 1797 --kvm->stat.mmu_unsync; 1798} 1799 1800static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, 1801 struct list_head *invalid_list); 1802static void kvm_mmu_commit_zap_page(struct kvm *kvm, 1803 struct list_head *invalid_list); 1804 1805/* 1806 * NOTE: we should pay more attention on the zapped-obsolete page 1807 * (is_obsolete_sp(sp) && sp->role.invalid) when you do hash list walk 1808 * since it has been deleted from active_mmu_pages but still can be found 1809 * at hast list. 1810 * 1811 * for_each_gfn_indirect_valid_sp has skipped that kind of page and 1812 * kvm_mmu_get_page(), the only user of for_each_gfn_sp(), has skipped 1813 * all the obsolete pages. 1814 */ 1815#define for_each_gfn_sp(_kvm, _sp, _gfn) \ 1816 hlist_for_each_entry(_sp, \ 1817 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ 1818 if ((_sp)->gfn != (_gfn)) {} else 1819 1820#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \ 1821 for_each_gfn_sp(_kvm, _sp, _gfn) \ 1822 if ((_sp)->role.direct || (_sp)->role.invalid) {} else 1823 1824/* @sp->gfn should be write-protected at the call site */ 1825static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 1826 struct list_head *invalid_list, bool clear_unsync) 1827{ 1828 if (sp->role.cr4_pae != !!is_pae(vcpu)) { 1829 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); 1830 return 1; 1831 } 1832 1833 if (clear_unsync) 1834 kvm_unlink_unsync_page(vcpu->kvm, sp); 1835 1836 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { 1837 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); 1838 return 1; 1839 } 1840 1841 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1842 return 0; 1843} 1844 1845static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, 1846 struct kvm_mmu_page *sp) 1847{ 1848 LIST_HEAD(invalid_list); 1849 int ret; 1850 1851 ret = __kvm_sync_page(vcpu, sp, &invalid_list, false); 1852 if (ret) 1853 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 1854 1855 return ret; 1856} 1857 1858#ifdef CONFIG_KVM_MMU_AUDIT 1859#include "mmu_audit.c" 1860#else 1861static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { } 1862static void mmu_audit_disable(void) { } 1863#endif 1864 1865static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 1866 struct list_head *invalid_list) 1867{ 1868 return __kvm_sync_page(vcpu, sp, invalid_list, true); 1869} 1870 1871/* @gfn should be write-protected at the call site */ 1872static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 1873{ 1874 struct kvm_mmu_page *s; 1875 LIST_HEAD(invalid_list); 1876 bool flush = false; 1877 1878 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { 1879 if (!s->unsync) 1880 continue; 1881 1882 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); 1883 kvm_unlink_unsync_page(vcpu->kvm, s); 1884 if ((s->role.cr4_pae != !!is_pae(vcpu)) || 1885 (vcpu->arch.mmu.sync_page(vcpu, s))) { 1886 kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); 1887 continue; 1888 } 1889 flush = true; 1890 } 1891 1892 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 1893 if (flush) 1894 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1895} 1896 1897struct mmu_page_path { 1898 struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1]; 1899 unsigned int idx[PT64_ROOT_LEVEL-1]; 1900}; 1901 1902#define for_each_sp(pvec, sp, parents, i) \ 1903 for (i = mmu_pages_next(&pvec, &parents, -1), \ 1904 sp = pvec.page[i].sp; \ 1905 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ 1906 i = mmu_pages_next(&pvec, &parents, i)) 1907 1908static int mmu_pages_next(struct kvm_mmu_pages *pvec, 1909 struct mmu_page_path *parents, 1910 int i) 1911{ 1912 int n; 1913 1914 for (n = i+1; n < pvec->nr; n++) { 1915 struct kvm_mmu_page *sp = pvec->page[n].sp; 1916 1917 if (sp->role.level == PT_PAGE_TABLE_LEVEL) { 1918 parents->idx[0] = pvec->page[n].idx; 1919 return n; 1920 } 1921 1922 parents->parent[sp->role.level-2] = sp; 1923 parents->idx[sp->role.level-1] = pvec->page[n].idx; 1924 } 1925 1926 return n; 1927} 1928 1929static void mmu_pages_clear_parents(struct mmu_page_path *parents) 1930{ 1931 struct kvm_mmu_page *sp; 1932 unsigned int level = 0; 1933 1934 do { 1935 unsigned int idx = parents->idx[level]; 1936 1937 sp = parents->parent[level]; 1938 if (!sp) 1939 return; 1940 1941 --sp->unsync_children; 1942 WARN_ON((int)sp->unsync_children < 0); 1943 __clear_bit(idx, sp->unsync_child_bitmap); 1944 level++; 1945 } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children); 1946} 1947 1948static void kvm_mmu_pages_init(struct kvm_mmu_page *parent, 1949 struct mmu_page_path *parents, 1950 struct kvm_mmu_pages *pvec) 1951{ 1952 parents->parent[parent->role.level-1] = NULL; 1953 pvec->nr = 0; 1954} 1955 1956static void mmu_sync_children(struct kvm_vcpu *vcpu, 1957 struct kvm_mmu_page *parent) 1958{ 1959 int i; 1960 struct kvm_mmu_page *sp; 1961 struct mmu_page_path parents; 1962 struct kvm_mmu_pages pages; 1963 LIST_HEAD(invalid_list); 1964 1965 kvm_mmu_pages_init(parent, &parents, &pages); 1966 while (mmu_unsync_walk(parent, &pages)) { 1967 bool protected = false; 1968 1969 for_each_sp(pages, sp, parents, i) 1970 protected |= rmap_write_protect(vcpu->kvm, sp->gfn); 1971 1972 if (protected) 1973 kvm_flush_remote_tlbs(vcpu->kvm); 1974 1975 for_each_sp(pages, sp, parents, i) { 1976 kvm_sync_page(vcpu, sp, &invalid_list); 1977 mmu_pages_clear_parents(&parents); 1978 } 1979 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 1980 cond_resched_lock(&vcpu->kvm->mmu_lock); 1981 kvm_mmu_pages_init(parent, &parents, &pages); 1982 } 1983} 1984 1985static void init_shadow_page_table(struct kvm_mmu_page *sp) 1986{ 1987 int i; 1988 1989 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) 1990 sp->spt[i] = 0ull; 1991} 1992 1993static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) 1994{ 1995 sp->write_flooding_count = 0; 1996} 1997 1998static void clear_sp_write_flooding_count(u64 *spte) 1999{ 2000 struct kvm_mmu_page *sp = page_header(__pa(spte)); 2001 2002 __clear_sp_write_flooding_count(sp); 2003} 2004 2005static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) 2006{ 2007 return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); 2008} 2009 2010static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, 2011 gfn_t gfn, 2012 gva_t gaddr, 2013 unsigned level, 2014 int direct, 2015 unsigned access, 2016 u64 *parent_pte) 2017{ 2018 union kvm_mmu_page_role role; 2019 unsigned quadrant; 2020 struct kvm_mmu_page *sp; 2021 bool need_sync = false; 2022 2023 role = vcpu->arch.mmu.base_role; 2024 role.level = level; 2025 role.direct = direct; 2026 if (role.direct) 2027 role.cr4_pae = 0; 2028 role.access = access; 2029 if (!vcpu->arch.mmu.direct_map 2030 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { 2031 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); 2032 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 2033 role.quadrant = quadrant; 2034 } 2035 for_each_gfn_sp(vcpu->kvm, sp, gfn) { 2036 if (is_obsolete_sp(vcpu->kvm, sp)) 2037 continue; 2038 2039 if (!need_sync && sp->unsync) 2040 need_sync = true; 2041 2042 if (sp->role.word != role.word) 2043 continue; 2044 2045 if (sp->unsync && kvm_sync_page_transient(vcpu, sp)) 2046 break; 2047 2048 mmu_page_add_parent_pte(vcpu, sp, parent_pte); 2049 if (sp->unsync_children) { 2050 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 2051 kvm_mmu_mark_parents_unsync(sp); 2052 } else if (sp->unsync) 2053 kvm_mmu_mark_parents_unsync(sp); 2054 2055 __clear_sp_write_flooding_count(sp); 2056 trace_kvm_mmu_get_page(sp, false); 2057 return sp; 2058 } 2059 ++vcpu->kvm->stat.mmu_cache_miss; 2060 sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct); 2061 if (!sp) 2062 return sp; 2063 sp->gfn = gfn; 2064 sp->role = role; 2065 hlist_add_head(&sp->hash_link, 2066 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); 2067 if (!direct) { 2068 if (rmap_write_protect(vcpu->kvm, gfn)) 2069 kvm_flush_remote_tlbs(vcpu->kvm); 2070 if (level > PT_PAGE_TABLE_LEVEL && need_sync) 2071 kvm_sync_pages(vcpu, gfn); 2072 2073 account_shadowed(vcpu->kvm, gfn); 2074 } 2075 sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; 2076 init_shadow_page_table(sp); 2077 trace_kvm_mmu_get_page(sp, true); 2078 return sp; 2079} 2080 2081static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, 2082 struct kvm_vcpu *vcpu, u64 addr) 2083{ 2084 iterator->addr = addr; 2085 iterator->shadow_addr = vcpu->arch.mmu.root_hpa; 2086 iterator->level = vcpu->arch.mmu.shadow_root_level; 2087 2088 if (iterator->level == PT64_ROOT_LEVEL && 2089 vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && 2090 !vcpu->arch.mmu.direct_map) 2091 --iterator->level; 2092 2093 if (iterator->level == PT32E_ROOT_LEVEL) { 2094 iterator->shadow_addr 2095 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; 2096 iterator->shadow_addr &= PT64_BASE_ADDR_MASK; 2097 --iterator->level; 2098 if (!iterator->shadow_addr) 2099 iterator->level = 0; 2100 } 2101} 2102 2103static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) 2104{ 2105 if (iterator->level < PT_PAGE_TABLE_LEVEL) 2106 return false; 2107 2108 iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); 2109 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; 2110 return true; 2111} 2112 2113static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, 2114 u64 spte) 2115{ 2116 if (is_last_spte(spte, iterator->level)) { 2117 iterator->level = 0; 2118 return; 2119 } 2120 2121 iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK; 2122 --iterator->level; 2123} 2124 2125static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) 2126{ 2127 return __shadow_walk_next(iterator, *iterator->sptep); 2128} 2129 2130static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp, bool accessed) 2131{ 2132 u64 spte; 2133 2134 BUILD_BUG_ON(VMX_EPT_READABLE_MASK != PT_PRESENT_MASK || 2135 VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK); 2136 2137 spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK | 2138 shadow_user_mask | shadow_x_mask; 2139 2140 if (accessed) 2141 spte |= shadow_accessed_mask; 2142 2143 mmu_spte_set(sptep, spte); 2144} 2145 2146static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, 2147 unsigned direct_access) 2148{ 2149 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { 2150 struct kvm_mmu_page *child; 2151 2152 /* 2153 * For the direct sp, if the guest pte's dirty bit 2154 * changed form clean to dirty, it will corrupt the 2155 * sp's access: allow writable in the read-only sp, 2156 * so we should update the spte at this point to get 2157 * a new sp with the correct access. 2158 */ 2159 child = page_header(*sptep & PT64_BASE_ADDR_MASK); 2160 if (child->role.access == direct_access) 2161 return; 2162 2163 drop_parent_pte(child, sptep); 2164 kvm_flush_remote_tlbs(vcpu->kvm); 2165 } 2166} 2167 2168static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, 2169 u64 *spte) 2170{ 2171 u64 pte; 2172 struct kvm_mmu_page *child; 2173 2174 pte = *spte; 2175 if (is_shadow_present_pte(pte)) { 2176 if (is_last_spte(pte, sp->role.level)) { 2177 drop_spte(kvm, spte); 2178 if (is_large_pte(pte)) 2179 --kvm->stat.lpages; 2180 } else { 2181 child = page_header(pte & PT64_BASE_ADDR_MASK); 2182 drop_parent_pte(child, spte); 2183 } 2184 return true; 2185 } 2186 2187 if (is_mmio_spte(pte)) 2188 mmu_spte_clear_no_track(spte); 2189 2190 return false; 2191} 2192 2193static void kvm_mmu_page_unlink_children(struct kvm *kvm, 2194 struct kvm_mmu_page *sp) 2195{ 2196 unsigned i; 2197 2198 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) 2199 mmu_page_zap_pte(kvm, sp, sp->spt + i); 2200} 2201 2202static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) 2203{ 2204 mmu_page_remove_parent_pte(sp, parent_pte); 2205} 2206 2207static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) 2208{ 2209 u64 *sptep; 2210 struct rmap_iterator iter; 2211 2212 while ((sptep = rmap_get_first(sp->parent_ptes, &iter))) 2213 drop_parent_pte(sp, sptep); 2214} 2215 2216static int mmu_zap_unsync_children(struct kvm *kvm, 2217 struct kvm_mmu_page *parent, 2218 struct list_head *invalid_list) 2219{ 2220 int i, zapped = 0; 2221 struct mmu_page_path parents; 2222 struct kvm_mmu_pages pages; 2223 2224 if (parent->role.level == PT_PAGE_TABLE_LEVEL) 2225 return 0; 2226 2227 kvm_mmu_pages_init(parent, &parents, &pages); 2228 while (mmu_unsync_walk(parent, &pages)) { 2229 struct kvm_mmu_page *sp; 2230 2231 for_each_sp(pages, sp, parents, i) { 2232 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); 2233 mmu_pages_clear_parents(&parents); 2234 zapped++; 2235 } 2236 kvm_mmu_pages_init(parent, &parents, &pages); 2237 } 2238 2239 return zapped; 2240} 2241 2242static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, 2243 struct list_head *invalid_list) 2244{ 2245 int ret; 2246 2247 trace_kvm_mmu_prepare_zap_page(sp); 2248 ++kvm->stat.mmu_shadow_zapped; 2249 ret = mmu_zap_unsync_children(kvm, sp, invalid_list); 2250 kvm_mmu_page_unlink_children(kvm, sp); 2251 kvm_mmu_unlink_parents(kvm, sp); 2252 2253 if (!sp->role.invalid && !sp->role.direct) 2254 unaccount_shadowed(kvm, sp->gfn); 2255 2256 if (sp->unsync) 2257 kvm_unlink_unsync_page(kvm, sp); 2258 if (!sp->root_count) { 2259 /* Count self */ 2260 ret++; 2261 list_move(&sp->link, invalid_list); 2262 kvm_mod_used_mmu_pages(kvm, -1); 2263 } else { 2264 list_move(&sp->link, &kvm->arch.active_mmu_pages); 2265 2266 /* 2267 * The obsolete pages can not be used on any vcpus. 2268 * See the comments in kvm_mmu_invalidate_zap_all_pages(). 2269 */ 2270 if (!sp->role.invalid && !is_obsolete_sp(kvm, sp)) 2271 kvm_reload_remote_mmus(kvm); 2272 } 2273 2274 sp->role.invalid = 1; 2275 return ret; 2276} 2277 2278static void kvm_mmu_commit_zap_page(struct kvm *kvm, 2279 struct list_head *invalid_list) 2280{ 2281 struct kvm_mmu_page *sp, *nsp; 2282 2283 if (list_empty(invalid_list)) 2284 return; 2285 2286 /* 2287 * wmb: make sure everyone sees our modifications to the page tables 2288 * rmb: make sure we see changes to vcpu->mode 2289 */ 2290 smp_mb(); 2291 2292 /* 2293 * Wait for all vcpus to exit guest mode and/or lockless shadow 2294 * page table walks. 2295 */ 2296 kvm_flush_remote_tlbs(kvm); 2297 2298 list_for_each_entry_safe(sp, nsp, invalid_list, link) { 2299 WARN_ON(!sp->role.invalid || sp->root_count); 2300 kvm_mmu_free_page(sp); 2301 } 2302} 2303 2304static bool prepare_zap_oldest_mmu_page(struct kvm *kvm, 2305 struct list_head *invalid_list) 2306{ 2307 struct kvm_mmu_page *sp; 2308 2309 if (list_empty(&kvm->arch.active_mmu_pages)) 2310 return false; 2311 2312 sp = list_entry(kvm->arch.active_mmu_pages.prev, 2313 struct kvm_mmu_page, link); 2314 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); 2315 2316 return true; 2317} 2318 2319/* 2320 * Changing the number of mmu pages allocated to the vm 2321 * Note: if goal_nr_mmu_pages is too small, you will get dead lock 2322 */ 2323void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) 2324{ 2325 LIST_HEAD(invalid_list); 2326 2327 spin_lock(&kvm->mmu_lock); 2328 2329 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { 2330 /* Need to free some mmu pages to achieve the goal. */ 2331 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) 2332 if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list)) 2333 break; 2334 2335 kvm_mmu_commit_zap_page(kvm, &invalid_list); 2336 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; 2337 } 2338 2339 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; 2340 2341 spin_unlock(&kvm->mmu_lock); 2342} 2343 2344int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) 2345{ 2346 struct kvm_mmu_page *sp; 2347 LIST_HEAD(invalid_list); 2348 int r; 2349 2350 pgprintk("%s: looking for gfn %llx\n", __func__, gfn); 2351 r = 0; 2352 spin_lock(&kvm->mmu_lock); 2353 for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { 2354 pgprintk("%s: gfn %llx role %x\n", __func__, gfn, 2355 sp->role.word); 2356 r = 1; 2357 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); 2358 } 2359 kvm_mmu_commit_zap_page(kvm, &invalid_list); 2360 spin_unlock(&kvm->mmu_lock); 2361 2362 return r; 2363} 2364EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); 2365 2366/* 2367 * The function is based on mtrr_type_lookup() in 2368 * arch/x86/kernel/cpu/mtrr/generic.c 2369 */ 2370static int get_mtrr_type(struct mtrr_state_type *mtrr_state, 2371 u64 start, u64 end) 2372{ 2373 int i; 2374 u64 base, mask; 2375 u8 prev_match, curr_match; 2376 int num_var_ranges = KVM_NR_VAR_MTRR; 2377 2378 if (!mtrr_state->enabled) 2379 return 0xFF; 2380 2381 /* Make end inclusive end, instead of exclusive */ 2382 end--; 2383 2384 /* Look in fixed ranges. Just return the type as per start */ 2385 if (mtrr_state->have_fixed && (start < 0x100000)) { 2386 int idx; 2387 2388 if (start < 0x80000) { 2389 idx = 0; 2390 idx += (start >> 16); 2391 return mtrr_state->fixed_ranges[idx]; 2392 } else if (start < 0xC0000) { 2393 idx = 1 * 8; 2394 idx += ((start - 0x80000) >> 14); 2395 return mtrr_state->fixed_ranges[idx]; 2396 } else if (start < 0x1000000) { 2397 idx = 3 * 8; 2398 idx += ((start - 0xC0000) >> 12); 2399 return mtrr_state->fixed_ranges[idx]; 2400 } 2401 } 2402 2403 /* 2404 * Look in variable ranges 2405 * Look of multiple ranges matching this address and pick type 2406 * as per MTRR precedence 2407 */ 2408 if (!(mtrr_state->enabled & 2)) 2409 return mtrr_state->def_type; 2410 2411 prev_match = 0xFF; 2412 for (i = 0; i < num_var_ranges; ++i) { 2413 unsigned short start_state, end_state; 2414 2415 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11))) 2416 continue; 2417 2418 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) + 2419 (mtrr_state->var_ranges[i].base_lo & PAGE_MASK); 2420 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) + 2421 (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK); 2422 2423 start_state = ((start & mask) == (base & mask)); 2424 end_state = ((end & mask) == (base & mask)); 2425 if (start_state != end_state) 2426 return 0xFE; 2427 2428 if ((start & mask) != (base & mask)) 2429 continue; 2430 2431 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff; 2432 if (prev_match == 0xFF) { 2433 prev_match = curr_match; 2434 continue; 2435 } 2436 2437 if (prev_match == MTRR_TYPE_UNCACHABLE || 2438 curr_match == MTRR_TYPE_UNCACHABLE) 2439 return MTRR_TYPE_UNCACHABLE; 2440 2441 if ((prev_match == MTRR_TYPE_WRBACK && 2442 curr_match == MTRR_TYPE_WRTHROUGH) || 2443 (prev_match == MTRR_TYPE_WRTHROUGH && 2444 curr_match == MTRR_TYPE_WRBACK)) { 2445 prev_match = MTRR_TYPE_WRTHROUGH; 2446 curr_match = MTRR_TYPE_WRTHROUGH; 2447 } 2448 2449 if (prev_match != curr_match) 2450 return MTRR_TYPE_UNCACHABLE; 2451 } 2452 2453 if (prev_match != 0xFF) 2454 return prev_match; 2455 2456 return mtrr_state->def_type; 2457} 2458 2459u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) 2460{ 2461 u8 mtrr; 2462 2463 mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT, 2464 (gfn << PAGE_SHIFT) + PAGE_SIZE); 2465 if (mtrr == 0xfe || mtrr == 0xff) 2466 mtrr = MTRR_TYPE_WRBACK; 2467 return mtrr; 2468} 2469EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type); 2470 2471static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 2472{ 2473 trace_kvm_mmu_unsync_page(sp); 2474 ++vcpu->kvm->stat.mmu_unsync; 2475 sp->unsync = 1; 2476 2477 kvm_mmu_mark_parents_unsync(sp); 2478} 2479 2480static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 2481{ 2482 struct kvm_mmu_page *s; 2483 2484 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { 2485 if (s->unsync) 2486 continue; 2487 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); 2488 __kvm_unsync_page(vcpu, s); 2489 } 2490} 2491 2492static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, 2493 bool can_unsync) 2494{ 2495 struct kvm_mmu_page *s; 2496 bool need_unsync = false; 2497 2498 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { 2499 if (!can_unsync) 2500 return 1; 2501 2502 if (s->role.level != PT_PAGE_TABLE_LEVEL) 2503 return 1; 2504 2505 if (!s->unsync) 2506 need_unsync = true; 2507 } 2508 if (need_unsync) 2509 kvm_unsync_pages(vcpu, gfn); 2510 return 0; 2511} 2512 2513static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, 2514 unsigned pte_access, int level, 2515 gfn_t gfn, pfn_t pfn, bool speculative, 2516 bool can_unsync, bool host_writable) 2517{ 2518 u64 spte; 2519 int ret = 0; 2520 2521 if (set_mmio_spte(vcpu->kvm, sptep, gfn, pfn, pte_access)) 2522 return 0; 2523 2524 spte = PT_PRESENT_MASK; 2525 if (!speculative) 2526 spte |= shadow_accessed_mask; 2527 2528 if (pte_access & ACC_EXEC_MASK) 2529 spte |= shadow_x_mask; 2530 else 2531 spte |= shadow_nx_mask; 2532 2533 if (pte_access & ACC_USER_MASK) 2534 spte |= shadow_user_mask; 2535 2536 if (level > PT_PAGE_TABLE_LEVEL) 2537 spte |= PT_PAGE_SIZE_MASK; 2538 if (tdp_enabled) 2539 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 2540 kvm_is_reserved_pfn(pfn)); 2541 2542 if (host_writable) 2543 spte |= SPTE_HOST_WRITEABLE; 2544 else 2545 pte_access &= ~ACC_WRITE_MASK; 2546 2547 spte |= (u64)pfn << PAGE_SHIFT; 2548 2549 if (pte_access & ACC_WRITE_MASK) { 2550 2551 /* 2552 * Other vcpu creates new sp in the window between 2553 * mapping_level() and acquiring mmu-lock. We can 2554 * allow guest to retry the access, the mapping can 2555 * be fixed if guest refault. 2556 */ 2557 if (level > PT_PAGE_TABLE_LEVEL && 2558 has_wrprotected_page(vcpu->kvm, gfn, level)) 2559 goto done; 2560 2561 spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; 2562 2563 /* 2564 * Optimization: for pte sync, if spte was writable the hash 2565 * lookup is unnecessary (and expensive). Write protection 2566 * is responsibility of mmu_get_page / kvm_sync_page. 2567 * Same reasoning can be applied to dirty page accounting. 2568 */ 2569 if (!can_unsync && is_writable_pte(*sptep)) 2570 goto set_pte; 2571 2572 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { 2573 pgprintk("%s: found shadow page for %llx, marking ro\n", 2574 __func__, gfn); 2575 ret = 1; 2576 pte_access &= ~ACC_WRITE_MASK; 2577 spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); 2578 } 2579 } 2580 2581 if (pte_access & ACC_WRITE_MASK) { 2582 mark_page_dirty(vcpu->kvm, gfn); 2583 spte |= shadow_dirty_mask; 2584 } 2585 2586set_pte: 2587 if (mmu_spte_update(sptep, spte)) 2588 kvm_flush_remote_tlbs(vcpu->kvm); 2589done: 2590 return ret; 2591} 2592 2593static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, 2594 unsigned pte_access, int write_fault, int *emulate, 2595 int level, gfn_t gfn, pfn_t pfn, bool speculative, 2596 bool host_writable) 2597{ 2598 int was_rmapped = 0; 2599 int rmap_count; 2600 2601 pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, 2602 *sptep, write_fault, gfn); 2603 2604 if (is_rmap_spte(*sptep)) { 2605 /* 2606 * If we overwrite a PTE page pointer with a 2MB PMD, unlink 2607 * the parent of the now unreachable PTE. 2608 */ 2609 if (level > PT_PAGE_TABLE_LEVEL && 2610 !is_large_pte(*sptep)) { 2611 struct kvm_mmu_page *child; 2612 u64 pte = *sptep; 2613 2614 child = page_header(pte & PT64_BASE_ADDR_MASK); 2615 drop_parent_pte(child, sptep); 2616 kvm_flush_remote_tlbs(vcpu->kvm); 2617 } else if (pfn != spte_to_pfn(*sptep)) { 2618 pgprintk("hfn old %llx new %llx\n", 2619 spte_to_pfn(*sptep), pfn); 2620 drop_spte(vcpu->kvm, sptep); 2621 kvm_flush_remote_tlbs(vcpu->kvm); 2622 } else 2623 was_rmapped = 1; 2624 } 2625 2626 if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, 2627 true, host_writable)) { 2628 if (write_fault) 2629 *emulate = 1; 2630 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 2631 } 2632 2633 if (unlikely(is_mmio_spte(*sptep) && emulate)) 2634 *emulate = 1; 2635 2636 pgprintk("%s: setting spte %llx\n", __func__, *sptep); 2637 pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n", 2638 is_large_pte(*sptep)? "2MB" : "4kB", 2639 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn, 2640 *sptep, sptep); 2641 if (!was_rmapped && is_large_pte(*sptep)) 2642 ++vcpu->kvm->stat.lpages; 2643 2644 if (is_shadow_present_pte(*sptep)) { 2645 if (!was_rmapped) { 2646 rmap_count = rmap_add(vcpu, sptep, gfn); 2647 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 2648 rmap_recycle(vcpu, sptep, gfn); 2649 } 2650 } 2651 2652 kvm_release_pfn_clean(pfn); 2653} 2654 2655static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, 2656 bool no_dirty_log) 2657{ 2658 struct kvm_memory_slot *slot; 2659 2660 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); 2661 if (!slot) 2662 return KVM_PFN_ERR_FAULT; 2663 2664 return gfn_to_pfn_memslot_atomic(slot, gfn); 2665} 2666 2667static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, 2668 struct kvm_mmu_page *sp, 2669 u64 *start, u64 *end) 2670{ 2671 struct page *pages[PTE_PREFETCH_NUM]; 2672 unsigned access = sp->role.access; 2673 int i, ret; 2674 gfn_t gfn; 2675 2676 gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); 2677 if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK)) 2678 return -1; 2679 2680 ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start); 2681 if (ret <= 0) 2682 return -1; 2683 2684 for (i = 0; i < ret; i++, gfn++, start++) 2685 mmu_set_spte(vcpu, start, access, 0, NULL, 2686 sp->role.level, gfn, page_to_pfn(pages[i]), 2687 true, true); 2688 2689 return 0; 2690} 2691 2692static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, 2693 struct kvm_mmu_page *sp, u64 *sptep) 2694{ 2695 u64 *spte, *start = NULL; 2696 int i; 2697 2698 WARN_ON(!sp->role.direct); 2699 2700 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); 2701 spte = sp->spt + i; 2702 2703 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { 2704 if (is_shadow_present_pte(*spte) || spte == sptep) { 2705 if (!start) 2706 continue; 2707 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) 2708 break; 2709 start = NULL; 2710 } else if (!start) 2711 start = spte; 2712 } 2713} 2714 2715static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) 2716{ 2717 struct kvm_mmu_page *sp; 2718 2719 /* 2720 * Since it's no accessed bit on EPT, it's no way to 2721 * distinguish between actually accessed translations 2722 * and prefetched, so disable pte prefetch if EPT is 2723 * enabled. 2724 */ 2725 if (!shadow_accessed_mask) 2726 return; 2727 2728 sp = page_header(__pa(sptep)); 2729 if (sp->role.level > PT_PAGE_TABLE_LEVEL) 2730 return; 2731 2732 __direct_pte_prefetch(vcpu, sp, sptep); 2733} 2734 2735static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, 2736 int map_writable, int level, gfn_t gfn, pfn_t pfn, 2737 bool prefault) 2738{ 2739 struct kvm_shadow_walk_iterator iterator; 2740 struct kvm_mmu_page *sp; 2741 int emulate = 0; 2742 gfn_t pseudo_gfn; 2743 2744 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 2745 return 0; 2746 2747 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { 2748 if (iterator.level == level) { 2749 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, 2750 write, &emulate, level, gfn, pfn, 2751 prefault, map_writable); 2752 direct_pte_prefetch(vcpu, iterator.sptep); 2753 ++vcpu->stat.pf_fixed; 2754 break; 2755 } 2756 2757 drop_large_spte(vcpu, iterator.sptep); 2758 if (!is_shadow_present_pte(*iterator.sptep)) { 2759 u64 base_addr = iterator.addr; 2760 2761 base_addr &= PT64_LVL_ADDR_MASK(iterator.level); 2762 pseudo_gfn = base_addr >> PAGE_SHIFT; 2763 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr, 2764 iterator.level - 1, 2765 1, ACC_ALL, iterator.sptep); 2766 2767 link_shadow_page(iterator.sptep, sp, true); 2768 } 2769 } 2770 return emulate; 2771} 2772 2773static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) 2774{ 2775 siginfo_t info; 2776 2777 info.si_signo = SIGBUS; 2778 info.si_errno = 0; 2779 info.si_code = BUS_MCEERR_AR; 2780 info.si_addr = (void __user *)address; 2781 info.si_addr_lsb = PAGE_SHIFT; 2782 2783 send_sig_info(SIGBUS, &info, tsk); 2784} 2785 2786static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) 2787{ 2788 /* 2789 * Do not cache the mmio info caused by writing the readonly gfn 2790 * into the spte otherwise read access on readonly gfn also can 2791 * caused mmio page fault and treat it as mmio access. 2792 * Return 1 to tell kvm to emulate it. 2793 */ 2794 if (pfn == KVM_PFN_ERR_RO_FAULT) 2795 return 1; 2796 2797 if (pfn == KVM_PFN_ERR_HWPOISON) { 2798 kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); 2799 return 0; 2800 } 2801 2802 return -EFAULT; 2803} 2804 2805static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, 2806 gfn_t *gfnp, pfn_t *pfnp, int *levelp) 2807{ 2808 pfn_t pfn = *pfnp; 2809 gfn_t gfn = *gfnp; 2810 int level = *levelp; 2811 2812 /* 2813 * Check if it's a transparent hugepage. If this would be an 2814 * hugetlbfs page, level wouldn't be set to 2815 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done 2816 * here. 2817 */ 2818 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && 2819 level == PT_PAGE_TABLE_LEVEL && 2820 PageTransCompound(pfn_to_page(pfn)) && 2821 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { 2822 unsigned long mask; 2823 /* 2824 * mmu_notifier_retry was successful and we hold the 2825 * mmu_lock here, so the pmd can't become splitting 2826 * from under us, and in turn 2827 * __split_huge_page_refcount() can't run from under 2828 * us and we can safely transfer the refcount from 2829 * PG_tail to PG_head as we switch the pfn to tail to 2830 * head. 2831 */ 2832 *levelp = level = PT_DIRECTORY_LEVEL; 2833 mask = KVM_PAGES_PER_HPAGE(level) - 1; 2834 VM_BUG_ON((gfn & mask) != (pfn & mask)); 2835 if (pfn & mask) { 2836 gfn &= ~mask; 2837 *gfnp = gfn; 2838 kvm_release_pfn_clean(pfn); 2839 pfn &= ~mask; 2840 kvm_get_pfn(pfn); 2841 *pfnp = pfn; 2842 } 2843 } 2844} 2845 2846static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, 2847 pfn_t pfn, unsigned access, int *ret_val) 2848{ 2849 bool ret = true; 2850 2851 /* The pfn is invalid, report the error! */ 2852 if (unlikely(is_error_pfn(pfn))) { 2853 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); 2854 goto exit; 2855 } 2856 2857 if (unlikely(is_noslot_pfn(pfn))) 2858 vcpu_cache_mmio_info(vcpu, gva, gfn, access); 2859 2860 ret = false; 2861exit: 2862 return ret; 2863} 2864 2865static bool page_fault_can_be_fast(u32 error_code) 2866{ 2867 /* 2868 * Do not fix the mmio spte with invalid generation number which 2869 * need to be updated by slow page fault path. 2870 */ 2871 if (unlikely(error_code & PFERR_RSVD_MASK)) 2872 return false; 2873 2874 /* 2875 * #PF can be fast only if the shadow page table is present and it 2876 * is caused by write-protect, that means we just need change the 2877 * W bit of the spte which can be done out of mmu-lock. 2878 */ 2879 if (!(error_code & PFERR_PRESENT_MASK) || 2880 !(error_code & PFERR_WRITE_MASK)) 2881 return false; 2882 2883 return true; 2884} 2885 2886static bool 2887fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 2888 u64 *sptep, u64 spte) 2889{ 2890 gfn_t gfn; 2891 2892 WARN_ON(!sp->role.direct); 2893 2894 /* 2895 * The gfn of direct spte is stable since it is calculated 2896 * by sp->gfn. 2897 */ 2898 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); 2899 2900 /* 2901 * Theoretically we could also set dirty bit (and flush TLB) here in 2902 * order to eliminate unnecessary PML logging. See comments in 2903 * set_spte. But fast_page_fault is very unlikely to happen with PML 2904 * enabled, so we do not do this. This might result in the same GPA 2905 * to be logged in PML buffer again when the write really happens, and 2906 * eventually to be called by mark_page_dirty twice. But it's also no 2907 * harm. This also avoids the TLB flush needed after setting dirty bit 2908 * so non-PML cases won't be impacted. 2909 * 2910 * Compare with set_spte where instead shadow_dirty_mask is set. 2911 */ 2912 if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte) 2913 mark_page_dirty(vcpu->kvm, gfn); 2914 2915 return true; 2916} 2917 2918/* 2919 * Return value: 2920 * - true: let the vcpu to access on the same address again. 2921 * - false: let the real page fault path to fix it. 2922 */ 2923static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, 2924 u32 error_code) 2925{ 2926 struct kvm_shadow_walk_iterator iterator; 2927 struct kvm_mmu_page *sp; 2928 bool ret = false; 2929 u64 spte = 0ull; 2930 2931 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 2932 return false; 2933 2934 if (!page_fault_can_be_fast(error_code)) 2935 return false; 2936 2937 walk_shadow_page_lockless_begin(vcpu); 2938 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) 2939 if (!is_shadow_present_pte(spte) || iterator.level < level) 2940 break; 2941 2942 /* 2943 * If the mapping has been changed, let the vcpu fault on the 2944 * same address again. 2945 */ 2946 if (!is_rmap_spte(spte)) { 2947 ret = true; 2948 goto exit; 2949 } 2950 2951 sp = page_header(__pa(iterator.sptep)); 2952 if (!is_last_spte(spte, sp->role.level)) 2953 goto exit; 2954 2955 /* 2956 * Check if it is a spurious fault caused by TLB lazily flushed. 2957 * 2958 * Need not check the access of upper level table entries since 2959 * they are always ACC_ALL. 2960 */ 2961 if (is_writable_pte(spte)) { 2962 ret = true; 2963 goto exit; 2964 } 2965 2966 /* 2967 * Currently, to simplify the code, only the spte write-protected 2968 * by dirty-log can be fast fixed. 2969 */ 2970 if (!spte_is_locklessly_modifiable(spte)) 2971 goto exit; 2972 2973 /* 2974 * Do not fix write-permission on the large spte since we only dirty 2975 * the first page into the dirty-bitmap in fast_pf_fix_direct_spte() 2976 * that means other pages are missed if its slot is dirty-logged. 2977 * 2978 * Instead, we let the slow page fault path create a normal spte to 2979 * fix the access. 2980 * 2981 * See the comments in kvm_arch_commit_memory_region(). 2982 */ 2983 if (sp->role.level > PT_PAGE_TABLE_LEVEL) 2984 goto exit; 2985 2986 /* 2987 * Currently, fast page fault only works for direct mapping since 2988 * the gfn is not stable for indirect shadow page. 2989 * See Documentation/virtual/kvm/locking.txt to get more detail. 2990 */ 2991 ret = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte); 2992exit: 2993 trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, 2994 spte, ret); 2995 walk_shadow_page_lockless_end(vcpu); 2996 2997 return ret; 2998} 2999 3000static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, 3001 gva_t gva, pfn_t *pfn, bool write, bool *writable); 3002static void make_mmu_pages_available(struct kvm_vcpu *vcpu); 3003 3004static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code, 3005 gfn_t gfn, bool prefault) 3006{ 3007 int r; 3008 int level; 3009 int force_pt_level; 3010 pfn_t pfn; 3011 unsigned long mmu_seq; 3012 bool map_writable, write = error_code & PFERR_WRITE_MASK; 3013 3014 force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); 3015 if (likely(!force_pt_level)) { 3016 level = mapping_level(vcpu, gfn); 3017 /* 3018 * This path builds a PAE pagetable - so we can map 3019 * 2mb pages at maximum. Therefore check if the level 3020 * is larger than that. 3021 */ 3022 if (level > PT_DIRECTORY_LEVEL) 3023 level = PT_DIRECTORY_LEVEL; 3024 3025 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); 3026 } else 3027 level = PT_PAGE_TABLE_LEVEL; 3028 3029 if (fast_page_fault(vcpu, v, level, error_code)) 3030 return 0; 3031 3032 mmu_seq = vcpu->kvm->mmu_notifier_seq; 3033 smp_rmb(); 3034 3035 if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) 3036 return 0; 3037 3038 if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) 3039 return r; 3040 3041 spin_lock(&vcpu->kvm->mmu_lock); 3042 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) 3043 goto out_unlock; 3044 make_mmu_pages_available(vcpu); 3045 if (likely(!force_pt_level)) 3046 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); 3047 r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, 3048 prefault); 3049 spin_unlock(&vcpu->kvm->mmu_lock); 3050 3051 3052 return r; 3053 3054out_unlock: 3055 spin_unlock(&vcpu->kvm->mmu_lock); 3056 kvm_release_pfn_clean(pfn); 3057 return 0; 3058} 3059 3060 3061static void mmu_free_roots(struct kvm_vcpu *vcpu) 3062{ 3063 int i; 3064 struct kvm_mmu_page *sp; 3065 LIST_HEAD(invalid_list); 3066 3067 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 3068 return; 3069 3070 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && 3071 (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || 3072 vcpu->arch.mmu.direct_map)) { 3073 hpa_t root = vcpu->arch.mmu.root_hpa; 3074 3075 spin_lock(&vcpu->kvm->mmu_lock); 3076 sp = page_header(root); 3077 --sp->root_count; 3078 if (!sp->root_count && sp->role.invalid) { 3079 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); 3080 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 3081 } 3082 spin_unlock(&vcpu->kvm->mmu_lock); 3083 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 3084 return; 3085 } 3086 3087 spin_lock(&vcpu->kvm->mmu_lock); 3088 for (i = 0; i < 4; ++i) { 3089 hpa_t root = vcpu->arch.mmu.pae_root[i]; 3090 3091 if (root) { 3092 root &= PT64_BASE_ADDR_MASK; 3093 sp = page_header(root); 3094 --sp->root_count; 3095 if (!sp->root_count && sp->role.invalid) 3096 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, 3097 &invalid_list); 3098 } 3099 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; 3100 } 3101 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 3102 spin_unlock(&vcpu->kvm->mmu_lock); 3103 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 3104} 3105 3106static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) 3107{ 3108 int ret = 0; 3109 3110 if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { 3111 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 3112 ret = 1; 3113 } 3114 3115 return ret; 3116} 3117 3118static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) 3119{ 3120 struct kvm_mmu_page *sp; 3121 unsigned i; 3122 3123 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { 3124 spin_lock(&vcpu->kvm->mmu_lock); 3125 make_mmu_pages_available(vcpu); 3126 sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 3127 1, ACC_ALL, NULL); 3128 ++sp->root_count; 3129 spin_unlock(&vcpu->kvm->mmu_lock); 3130 vcpu->arch.mmu.root_hpa = __pa(sp->spt); 3131 } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { 3132 for (i = 0; i < 4; ++i) { 3133 hpa_t root = vcpu->arch.mmu.pae_root[i]; 3134 3135 MMU_WARN_ON(VALID_PAGE(root)); 3136 spin_lock(&vcpu->kvm->mmu_lock); 3137 make_mmu_pages_available(vcpu); 3138 sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), 3139 i << 30, 3140 PT32_ROOT_LEVEL, 1, ACC_ALL, 3141 NULL); 3142 root = __pa(sp->spt); 3143 ++sp->root_count; 3144 spin_unlock(&vcpu->kvm->mmu_lock); 3145 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; 3146 } 3147 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); 3148 } else 3149 BUG(); 3150 3151 return 0; 3152} 3153 3154static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) 3155{ 3156 struct kvm_mmu_page *sp; 3157 u64 pdptr, pm_mask; 3158 gfn_t root_gfn; 3159 int i; 3160 3161 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; 3162 3163 if (mmu_check_root(vcpu, root_gfn)) 3164 return 1; 3165 3166 /* 3167 * Do we shadow a long mode page table? If so we need to 3168 * write-protect the guests page table root. 3169 */ 3170 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { 3171 hpa_t root = vcpu->arch.mmu.root_hpa; 3172 3173 MMU_WARN_ON(VALID_PAGE(root)); 3174 3175 spin_lock(&vcpu->kvm->mmu_lock); 3176 make_mmu_pages_available(vcpu); 3177 sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, 3178 0, ACC_ALL, NULL); 3179 root = __pa(sp->spt); 3180 ++sp->root_count; 3181 spin_unlock(&vcpu->kvm->mmu_lock); 3182 vcpu->arch.mmu.root_hpa = root; 3183 return 0; 3184 } 3185 3186 /* 3187 * We shadow a 32 bit page table. This may be a legacy 2-level 3188 * or a PAE 3-level page table. In either case we need to be aware that 3189 * the shadow page table may be a PAE or a long mode page table. 3190 */ 3191 pm_mask = PT_PRESENT_MASK; 3192 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) 3193 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; 3194 3195 for (i = 0; i < 4; ++i) { 3196 hpa_t root = vcpu->arch.mmu.pae_root[i]; 3197 3198 MMU_WARN_ON(VALID_PAGE(root)); 3199 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { 3200 pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); 3201 if (!is_present_gpte(pdptr)) { 3202 vcpu->arch.mmu.pae_root[i] = 0; 3203 continue; 3204 } 3205 root_gfn = pdptr >> PAGE_SHIFT; 3206 if (mmu_check_root(vcpu, root_gfn)) 3207 return 1; 3208 } 3209 spin_lock(&vcpu->kvm->mmu_lock); 3210 make_mmu_pages_available(vcpu); 3211 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, 3212 PT32_ROOT_LEVEL, 0, 3213 ACC_ALL, NULL); 3214 root = __pa(sp->spt); 3215 ++sp->root_count; 3216 spin_unlock(&vcpu->kvm->mmu_lock); 3217 3218 vcpu->arch.mmu.pae_root[i] = root | pm_mask; 3219 } 3220 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); 3221 3222 /* 3223 * If we shadow a 32 bit page table with a long mode page 3224 * table we enter this path. 3225 */ 3226 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { 3227 if (vcpu->arch.mmu.lm_root == NULL) { 3228 /* 3229 * The additional page necessary for this is only 3230 * allocated on demand. 3231 */ 3232 3233 u64 *lm_root; 3234 3235 lm_root = (void*)get_zeroed_page(GFP_KERNEL); 3236 if (lm_root == NULL) 3237 return 1; 3238 3239 lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; 3240 3241 vcpu->arch.mmu.lm_root = lm_root; 3242 } 3243 3244 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); 3245 } 3246 3247 return 0; 3248} 3249 3250static int mmu_alloc_roots(struct kvm_vcpu *vcpu) 3251{ 3252 if (vcpu->arch.mmu.direct_map) 3253 return mmu_alloc_direct_roots(vcpu); 3254 else 3255 return mmu_alloc_shadow_roots(vcpu); 3256} 3257 3258static void mmu_sync_roots(struct kvm_vcpu *vcpu) 3259{ 3260 int i; 3261 struct kvm_mmu_page *sp; 3262 3263 if (vcpu->arch.mmu.direct_map) 3264 return; 3265 3266 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 3267 return; 3268 3269 vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); 3270 kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); 3271 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { 3272 hpa_t root = vcpu->arch.mmu.root_hpa; 3273 sp = page_header(root); 3274 mmu_sync_children(vcpu, sp); 3275 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); 3276 return; 3277 } 3278 for (i = 0; i < 4; ++i) { 3279 hpa_t root = vcpu->arch.mmu.pae_root[i]; 3280 3281 if (root && VALID_PAGE(root)) { 3282 root &= PT64_BASE_ADDR_MASK; 3283 sp = page_header(root); 3284 mmu_sync_children(vcpu, sp); 3285 } 3286 } 3287 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); 3288} 3289 3290void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) 3291{ 3292 spin_lock(&vcpu->kvm->mmu_lock); 3293 mmu_sync_roots(vcpu); 3294 spin_unlock(&vcpu->kvm->mmu_lock); 3295} 3296EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots); 3297 3298static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, 3299 u32 access, struct x86_exception *exception) 3300{ 3301 if (exception) 3302 exception->error_code = 0; 3303 return vaddr; 3304} 3305 3306static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, 3307 u32 access, 3308 struct x86_exception *exception) 3309{ 3310 if (exception) 3311 exception->error_code = 0; 3312 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); 3313} 3314 3315static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct) 3316{ 3317 if (direct) 3318 return vcpu_match_mmio_gpa(vcpu, addr); 3319 3320 return vcpu_match_mmio_gva(vcpu, addr); 3321} 3322 3323static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr) 3324{ 3325 struct kvm_shadow_walk_iterator iterator; 3326 u64 spte = 0ull; 3327 3328 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 3329 return spte; 3330 3331 walk_shadow_page_lockless_begin(vcpu); 3332 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) 3333 if (!is_shadow_present_pte(spte)) 3334 break; 3335 walk_shadow_page_lockless_end(vcpu); 3336 3337 return spte; 3338} 3339 3340int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) 3341{ 3342 u64 spte; 3343 3344 if (quickly_check_mmio_pf(vcpu, addr, direct)) 3345 return RET_MMIO_PF_EMULATE; 3346 3347 spte = walk_shadow_page_get_mmio_spte(vcpu, addr); 3348 3349 if (is_mmio_spte(spte)) { 3350 gfn_t gfn = get_mmio_spte_gfn(spte); 3351 unsigned access = get_mmio_spte_access(spte); 3352 3353 if (!check_mmio_spte(vcpu->kvm, spte)) 3354 return RET_MMIO_PF_INVALID; 3355 3356 if (direct) 3357 addr = 0; 3358 3359 trace_handle_mmio_page_fault(addr, gfn, access); 3360 vcpu_cache_mmio_info(vcpu, addr, gfn, access); 3361 return RET_MMIO_PF_EMULATE; 3362 } 3363 3364 /* 3365 * If the page table is zapped by other cpus, let CPU fault again on 3366 * the address. 3367 */ 3368 return RET_MMIO_PF_RETRY; 3369} 3370EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common); 3371 3372static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, 3373 u32 error_code, bool direct) 3374{ 3375 int ret; 3376 3377 ret = handle_mmio_page_fault_common(vcpu, addr, direct); 3378 WARN_ON(ret == RET_MMIO_PF_BUG); 3379 return ret; 3380} 3381 3382static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, 3383 u32 error_code, bool prefault) 3384{ 3385 gfn_t gfn; 3386 int r; 3387 3388 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); 3389 3390 if (unlikely(error_code & PFERR_RSVD_MASK)) { 3391 r = handle_mmio_page_fault(vcpu, gva, error_code, true); 3392 3393 if (likely(r != RET_MMIO_PF_INVALID)) 3394 return r; 3395 } 3396 3397 r = mmu_topup_memory_caches(vcpu); 3398 if (r) 3399 return r; 3400 3401 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 3402 3403 gfn = gva >> PAGE_SHIFT; 3404 3405 return nonpaging_map(vcpu, gva & PAGE_MASK, 3406 error_code, gfn, prefault); 3407} 3408 3409static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) 3410{ 3411 struct kvm_arch_async_pf arch; 3412 3413 arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; 3414 arch.gfn = gfn; 3415 arch.direct_map = vcpu->arch.mmu.direct_map; 3416 arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); 3417 3418 return kvm_setup_async_pf(vcpu, gva, gfn_to_hva(vcpu->kvm, gfn), &arch); 3419} 3420 3421static bool can_do_async_pf(struct kvm_vcpu *vcpu) 3422{ 3423 if (unlikely(!irqchip_in_kernel(vcpu->kvm) || 3424 kvm_event_needs_reinjection(vcpu))) 3425 return false; 3426 3427 return kvm_x86_ops->interrupt_allowed(vcpu); 3428} 3429 3430static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, 3431 gva_t gva, pfn_t *pfn, bool write, bool *writable) 3432{ 3433 bool async; 3434 3435 *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable); 3436 3437 if (!async) 3438 return false; /* *pfn has correct page already */ 3439 3440 if (!prefault && can_do_async_pf(vcpu)) { 3441 trace_kvm_try_async_get_page(gva, gfn); 3442 if (kvm_find_async_pf_gfn(vcpu, gfn)) { 3443 trace_kvm_async_pf_doublefault(gva, gfn); 3444 kvm_make_request(KVM_REQ_APF_HALT, vcpu); 3445 return true; 3446 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) 3447 return true; 3448 } 3449 3450 *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable); 3451 3452 return false; 3453} 3454 3455static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, 3456 bool prefault) 3457{ 3458 pfn_t pfn; 3459 int r; 3460 int level; 3461 int force_pt_level; 3462 gfn_t gfn = gpa >> PAGE_SHIFT; 3463 unsigned long mmu_seq; 3464 int write = error_code & PFERR_WRITE_MASK; 3465 bool map_writable; 3466 3467 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); 3468 3469 if (unlikely(error_code & PFERR_RSVD_MASK)) { 3470 r = handle_mmio_page_fault(vcpu, gpa, error_code, true); 3471 3472 if (likely(r != RET_MMIO_PF_INVALID)) 3473 return r; 3474 } 3475 3476 r = mmu_topup_memory_caches(vcpu); 3477 if (r) 3478 return r; 3479 3480 force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); 3481 if (likely(!force_pt_level)) { 3482 level = mapping_level(vcpu, gfn); 3483 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); 3484 } else 3485 level = PT_PAGE_TABLE_LEVEL; 3486 3487 if (fast_page_fault(vcpu, gpa, level, error_code)) 3488 return 0; 3489 3490 mmu_seq = vcpu->kvm->mmu_notifier_seq; 3491 smp_rmb(); 3492 3493 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) 3494 return 0; 3495 3496 if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) 3497 return r; 3498 3499 spin_lock(&vcpu->kvm->mmu_lock); 3500 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) 3501 goto out_unlock; 3502 make_mmu_pages_available(vcpu); 3503 if (likely(!force_pt_level)) 3504 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); 3505 r = __direct_map(vcpu, gpa, write, map_writable, 3506 level, gfn, pfn, prefault); 3507 spin_unlock(&vcpu->kvm->mmu_lock); 3508 3509 return r; 3510 3511out_unlock: 3512 spin_unlock(&vcpu->kvm->mmu_lock); 3513 kvm_release_pfn_clean(pfn); 3514 return 0; 3515} 3516 3517static void nonpaging_init_context(struct kvm_vcpu *vcpu, 3518 struct kvm_mmu *context) 3519{ 3520 context->page_fault = nonpaging_page_fault; 3521 context->gva_to_gpa = nonpaging_gva_to_gpa; 3522 context->sync_page = nonpaging_sync_page; 3523 context->invlpg = nonpaging_invlpg; 3524 context->update_pte = nonpaging_update_pte; 3525 context->root_level = 0; 3526 context->shadow_root_level = PT32E_ROOT_LEVEL; 3527 context->root_hpa = INVALID_PAGE; 3528 context->direct_map = true; 3529 context->nx = false; 3530} 3531 3532void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu) 3533{ 3534 mmu_free_roots(vcpu); 3535} 3536 3537static unsigned long get_cr3(struct kvm_vcpu *vcpu) 3538{ 3539 return kvm_read_cr3(vcpu); 3540} 3541 3542static void inject_page_fault(struct kvm_vcpu *vcpu, 3543 struct x86_exception *fault) 3544{ 3545 vcpu->arch.mmu.inject_page_fault(vcpu, fault); 3546} 3547 3548static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, 3549 unsigned access, int *nr_present) 3550{ 3551 if (unlikely(is_mmio_spte(*sptep))) { 3552 if (gfn != get_mmio_spte_gfn(*sptep)) { 3553 mmu_spte_clear_no_track(sptep); 3554 return true; 3555 } 3556 3557 (*nr_present)++; 3558 mark_mmio_spte(kvm, sptep, gfn, access); 3559 return true; 3560 } 3561 3562 return false; 3563} 3564 3565static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) 3566{ 3567 unsigned index; 3568 3569 index = level - 1; 3570 index |= (gpte & PT_PAGE_SIZE_MASK) >> (PT_PAGE_SIZE_SHIFT - 2); 3571 return mmu->last_pte_bitmap & (1 << index); 3572} 3573 3574#define PTTYPE_EPT 18 /* arbitrary */ 3575#define PTTYPE PTTYPE_EPT 3576#include "paging_tmpl.h" 3577#undef PTTYPE 3578 3579#define PTTYPE 64 3580#include "paging_tmpl.h" 3581#undef PTTYPE 3582 3583#define PTTYPE 32 3584#include "paging_tmpl.h" 3585#undef PTTYPE 3586 3587static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, 3588 struct kvm_mmu *context) 3589{ 3590 int maxphyaddr = cpuid_maxphyaddr(vcpu); 3591 u64 exb_bit_rsvd = 0; 3592 u64 gbpages_bit_rsvd = 0; 3593 u64 nonleaf_bit8_rsvd = 0; 3594 3595 context->bad_mt_xwr = 0; 3596 3597 if (!context->nx) 3598 exb_bit_rsvd = rsvd_bits(63, 63); 3599 if (!guest_cpuid_has_gbpages(vcpu)) 3600 gbpages_bit_rsvd = rsvd_bits(7, 7); 3601 3602 /* 3603 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for 3604 * leaf entries) on AMD CPUs only. 3605 */ 3606 if (guest_cpuid_is_amd(vcpu)) 3607 nonleaf_bit8_rsvd = rsvd_bits(8, 8); 3608 3609 switch (context->root_level) { 3610 case PT32_ROOT_LEVEL: 3611 /* no rsvd bits for 2 level 4K page table entries */ 3612 context->rsvd_bits_mask[0][1] = 0; 3613 context->rsvd_bits_mask[0][0] = 0; 3614 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; 3615 3616 if (!is_pse(vcpu)) { 3617 context->rsvd_bits_mask[1][1] = 0; 3618 break; 3619 } 3620 3621 if (is_cpuid_PSE36()) 3622 /* 36bits PSE 4MB page */ 3623 context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); 3624 else 3625 /* 32 bits PSE 4MB page */ 3626 context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); 3627 break; 3628 case PT32E_ROOT_LEVEL: 3629 context->rsvd_bits_mask[0][2] = 3630 rsvd_bits(maxphyaddr, 63) | 3631 rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */ 3632 context->rsvd_bits_mask[0][1] = exb_bit_rsvd | 3633 rsvd_bits(maxphyaddr, 62); /* PDE */ 3634 context->rsvd_bits_mask[0][0] = exb_bit_rsvd | 3635 rsvd_bits(maxphyaddr, 62); /* PTE */ 3636 context->rsvd_bits_mask[1][1] = exb_bit_rsvd | 3637 rsvd_bits(maxphyaddr, 62) | 3638 rsvd_bits(13, 20); /* large page */ 3639 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; 3640 break; 3641 case PT64_ROOT_LEVEL: 3642 context->rsvd_bits_mask[0][3] = exb_bit_rsvd | 3643 nonleaf_bit8_rsvd | rsvd_bits(7, 7) | rsvd_bits(maxphyaddr, 51); 3644 context->rsvd_bits_mask[0][2] = exb_bit_rsvd | 3645 nonleaf_bit8_rsvd | gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51); 3646 context->rsvd_bits_mask[0][1] = exb_bit_rsvd | 3647 rsvd_bits(maxphyaddr, 51); 3648 context->rsvd_bits_mask[0][0] = exb_bit_rsvd | 3649 rsvd_bits(maxphyaddr, 51); 3650 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3]; 3651 context->rsvd_bits_mask[1][2] = exb_bit_rsvd | 3652 gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) | 3653 rsvd_bits(13, 29); 3654 context->rsvd_bits_mask[1][1] = exb_bit_rsvd | 3655 rsvd_bits(maxphyaddr, 51) | 3656 rsvd_bits(13, 20); /* large page */ 3657 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; 3658 break; 3659 } 3660} 3661 3662static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, 3663 struct kvm_mmu *context, bool execonly) 3664{ 3665 int maxphyaddr = cpuid_maxphyaddr(vcpu); 3666 int pte; 3667 3668 context->rsvd_bits_mask[0][3] = 3669 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); 3670 context->rsvd_bits_mask[0][2] = 3671 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); 3672 context->rsvd_bits_mask[0][1] = 3673 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); 3674 context->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); 3675 3676 /* large page */ 3677 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3]; 3678 context->rsvd_bits_mask[1][2] = 3679 rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29); 3680 context->rsvd_bits_mask[1][1] = 3681 rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20); 3682 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; 3683 3684 for (pte = 0; pte < 64; pte++) { 3685 int rwx_bits = pte & 7; 3686 int mt = pte >> 3; 3687 if (mt == 0x2 || mt == 0x3 || mt == 0x7 || 3688 rwx_bits == 0x2 || rwx_bits == 0x6 || 3689 (rwx_bits == 0x4 && !execonly)) 3690 context->bad_mt_xwr |= (1ull << pte); 3691 } 3692} 3693 3694static void update_permission_bitmask(struct kvm_vcpu *vcpu, 3695 struct kvm_mmu *mmu, bool ept) 3696{ 3697 unsigned bit, byte, pfec; 3698 u8 map; 3699 bool fault, x, w, u, wf, uf, ff, smapf, cr4_smap, cr4_smep, smap = 0; 3700 3701 cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); 3702 cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); 3703 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { 3704 pfec = byte << 1; 3705 map = 0; 3706 wf = pfec & PFERR_WRITE_MASK; 3707 uf = pfec & PFERR_USER_MASK; 3708 ff = pfec & PFERR_FETCH_MASK; 3709 /* 3710 * PFERR_RSVD_MASK bit is set in PFEC if the access is not 3711 * subject to SMAP restrictions, and cleared otherwise. The 3712 * bit is only meaningful if the SMAP bit is set in CR4. 3713 */ 3714 smapf = !(pfec & PFERR_RSVD_MASK); 3715 for (bit = 0; bit < 8; ++bit) { 3716 x = bit & ACC_EXEC_MASK; 3717 w = bit & ACC_WRITE_MASK; 3718 u = bit & ACC_USER_MASK; 3719 3720 if (!ept) { 3721 /* Not really needed: !nx will cause pte.nx to fault */ 3722 x |= !mmu->nx; 3723 /* Allow supervisor writes if !cr0.wp */ 3724 w |= !is_write_protection(vcpu) && !uf; 3725 /* Disallow supervisor fetches of user code if cr4.smep */ 3726 x &= !(cr4_smep && u && !uf); 3727 3728 /* 3729 * SMAP:kernel-mode data accesses from user-mode 3730 * mappings should fault. A fault is considered 3731 * as a SMAP violation if all of the following 3732 * conditions are ture: 3733 * - X86_CR4_SMAP is set in CR4 3734 * - An user page is accessed 3735 * - Page fault in kernel mode 3736 * - if CPL = 3 or X86_EFLAGS_AC is clear 3737 * 3738 * Here, we cover the first three conditions. 3739 * The fourth is computed dynamically in 3740 * permission_fault() and is in smapf. 3741 * 3742 * Also, SMAP does not affect instruction 3743 * fetches, add the !ff check here to make it 3744 * clearer. 3745 */ 3746 smap = cr4_smap && u && !uf && !ff; 3747 } else 3748 /* Not really needed: no U/S accesses on ept */ 3749 u = 1; 3750 3751 fault = (ff && !x) || (uf && !u) || (wf && !w) || 3752 (smapf && smap); 3753 map |= fault << bit; 3754 } 3755 mmu->permissions[byte] = map; 3756 } 3757} 3758 3759static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) 3760{ 3761 u8 map; 3762 unsigned level, root_level = mmu->root_level; 3763 const unsigned ps_set_index = 1 << 2; /* bit 2 of index: ps */ 3764 3765 if (root_level == PT32E_ROOT_LEVEL) 3766 --root_level; 3767 /* PT_PAGE_TABLE_LEVEL always terminates */ 3768 map = 1 | (1 << ps_set_index); 3769 for (level = PT_DIRECTORY_LEVEL; level <= root_level; ++level) { 3770 if (level <= PT_PDPE_LEVEL 3771 && (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu))) 3772 map |= 1 << (ps_set_index | (level - 1)); 3773 } 3774 mmu->last_pte_bitmap = map; 3775} 3776 3777static void paging64_init_context_common(struct kvm_vcpu *vcpu, 3778 struct kvm_mmu *context, 3779 int level) 3780{ 3781 context->nx = is_nx(vcpu); 3782 context->root_level = level; 3783 3784 reset_rsvds_bits_mask(vcpu, context); 3785 update_permission_bitmask(vcpu, context, false); 3786 update_last_pte_bitmap(vcpu, context); 3787 3788 MMU_WARN_ON(!is_pae(vcpu)); 3789 context->page_fault = paging64_page_fault; 3790 context->gva_to_gpa = paging64_gva_to_gpa; 3791 context->sync_page = paging64_sync_page; 3792 context->invlpg = paging64_invlpg; 3793 context->update_pte = paging64_update_pte; 3794 context->shadow_root_level = level; 3795 context->root_hpa = INVALID_PAGE; 3796 context->direct_map = false; 3797} 3798 3799static void paging64_init_context(struct kvm_vcpu *vcpu, 3800 struct kvm_mmu *context) 3801{ 3802 paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL); 3803} 3804 3805static void paging32_init_context(struct kvm_vcpu *vcpu, 3806 struct kvm_mmu *context) 3807{ 3808 context->nx = false; 3809 context->root_level = PT32_ROOT_LEVEL; 3810 3811 reset_rsvds_bits_mask(vcpu, context); 3812 update_permission_bitmask(vcpu, context, false); 3813 update_last_pte_bitmap(vcpu, context); 3814 3815 context->page_fault = paging32_page_fault; 3816 context->gva_to_gpa = paging32_gva_to_gpa; 3817 context->sync_page = paging32_sync_page; 3818 context->invlpg = paging32_invlpg; 3819 context->update_pte = paging32_update_pte; 3820 context->shadow_root_level = PT32E_ROOT_LEVEL; 3821 context->root_hpa = INVALID_PAGE; 3822 context->direct_map = false; 3823} 3824 3825static void paging32E_init_context(struct kvm_vcpu *vcpu, 3826 struct kvm_mmu *context) 3827{ 3828 paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); 3829} 3830 3831static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) 3832{ 3833 struct kvm_mmu *context = &vcpu->arch.mmu; 3834 3835 context->base_role.word = 0; 3836 context->page_fault = tdp_page_fault; 3837 context->sync_page = nonpaging_sync_page; 3838 context->invlpg = nonpaging_invlpg; 3839 context->update_pte = nonpaging_update_pte; 3840 context->shadow_root_level = kvm_x86_ops->get_tdp_level(); 3841 context->root_hpa = INVALID_PAGE; 3842 context->direct_map = true; 3843 context->set_cr3 = kvm_x86_ops->set_tdp_cr3; 3844 context->get_cr3 = get_cr3; 3845 context->get_pdptr = kvm_pdptr_read; 3846 context->inject_page_fault = kvm_inject_page_fault; 3847 3848 if (!is_paging(vcpu)) { 3849 context->nx = false; 3850 context->gva_to_gpa = nonpaging_gva_to_gpa; 3851 context->root_level = 0; 3852 } else if (is_long_mode(vcpu)) { 3853 context->nx = is_nx(vcpu); 3854 context->root_level = PT64_ROOT_LEVEL; 3855 reset_rsvds_bits_mask(vcpu, context); 3856 context->gva_to_gpa = paging64_gva_to_gpa; 3857 } else if (is_pae(vcpu)) { 3858 context->nx = is_nx(vcpu); 3859 context->root_level = PT32E_ROOT_LEVEL; 3860 reset_rsvds_bits_mask(vcpu, context); 3861 context->gva_to_gpa = paging64_gva_to_gpa; 3862 } else { 3863 context->nx = false; 3864 context->root_level = PT32_ROOT_LEVEL; 3865 reset_rsvds_bits_mask(vcpu, context); 3866 context->gva_to_gpa = paging32_gva_to_gpa; 3867 } 3868 3869 update_permission_bitmask(vcpu, context, false); 3870 update_last_pte_bitmap(vcpu, context); 3871} 3872 3873void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) 3874{ 3875 bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); 3876 bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); 3877 struct kvm_mmu *context = &vcpu->arch.mmu; 3878 3879 MMU_WARN_ON(VALID_PAGE(context->root_hpa)); 3880 3881 if (!is_paging(vcpu)) 3882 nonpaging_init_context(vcpu, context); 3883 else if (is_long_mode(vcpu)) 3884 paging64_init_context(vcpu, context); 3885 else if (is_pae(vcpu)) 3886 paging32E_init_context(vcpu, context); 3887 else 3888 paging32_init_context(vcpu, context); 3889 3890 context->base_role.nxe = is_nx(vcpu); 3891 context->base_role.cr4_pae = !!is_pae(vcpu); 3892 context->base_role.cr0_wp = is_write_protection(vcpu); 3893 context->base_role.smep_andnot_wp 3894 = smep && !is_write_protection(vcpu); 3895 context->base_role.smap_andnot_wp 3896 = smap && !is_write_protection(vcpu); 3897} 3898EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); 3899 3900void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly) 3901{ 3902 struct kvm_mmu *context = &vcpu->arch.mmu; 3903 3904 MMU_WARN_ON(VALID_PAGE(context->root_hpa)); 3905 3906 context->shadow_root_level = kvm_x86_ops->get_tdp_level(); 3907 3908 context->nx = true; 3909 context->page_fault = ept_page_fault; 3910 context->gva_to_gpa = ept_gva_to_gpa; 3911 context->sync_page = ept_sync_page; 3912 context->invlpg = ept_invlpg; 3913 context->update_pte = ept_update_pte; 3914 context->root_level = context->shadow_root_level; 3915 context->root_hpa = INVALID_PAGE; 3916 context->direct_map = false; 3917 3918 update_permission_bitmask(vcpu, context, true); 3919 reset_rsvds_bits_mask_ept(vcpu, context, execonly); 3920} 3921EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); 3922 3923static void init_kvm_softmmu(struct kvm_vcpu *vcpu) 3924{ 3925 struct kvm_mmu *context = &vcpu->arch.mmu; 3926 3927 kvm_init_shadow_mmu(vcpu); 3928 context->set_cr3 = kvm_x86_ops->set_cr3; 3929 context->get_cr3 = get_cr3; 3930 context->get_pdptr = kvm_pdptr_read; 3931 context->inject_page_fault = kvm_inject_page_fault; 3932} 3933 3934static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) 3935{ 3936 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; 3937 3938 g_context->get_cr3 = get_cr3; 3939 g_context->get_pdptr = kvm_pdptr_read; 3940 g_context->inject_page_fault = kvm_inject_page_fault; 3941 3942 /* 3943 * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The 3944 * translation of l2_gpa to l1_gpa addresses is done using the 3945 * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa 3946 * functions between mmu and nested_mmu are swapped. 3947 */ 3948 if (!is_paging(vcpu)) { 3949 g_context->nx = false; 3950 g_context->root_level = 0; 3951 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; 3952 } else if (is_long_mode(vcpu)) { 3953 g_context->nx = is_nx(vcpu); 3954 g_context->root_level = PT64_ROOT_LEVEL; 3955 reset_rsvds_bits_mask(vcpu, g_context); 3956 g_context->gva_to_gpa = paging64_gva_to_gpa_nested; 3957 } else if (is_pae(vcpu)) { 3958 g_context->nx = is_nx(vcpu); 3959 g_context->root_level = PT32E_ROOT_LEVEL; 3960 reset_rsvds_bits_mask(vcpu, g_context); 3961 g_context->gva_to_gpa = paging64_gva_to_gpa_nested; 3962 } else { 3963 g_context->nx = false; 3964 g_context->root_level = PT32_ROOT_LEVEL; 3965 reset_rsvds_bits_mask(vcpu, g_context); 3966 g_context->gva_to_gpa = paging32_gva_to_gpa_nested; 3967 } 3968 3969 update_permission_bitmask(vcpu, g_context, false); 3970 update_last_pte_bitmap(vcpu, g_context); 3971} 3972 3973static void init_kvm_mmu(struct kvm_vcpu *vcpu) 3974{ 3975 if (mmu_is_nested(vcpu)) 3976 init_kvm_nested_mmu(vcpu); 3977 else if (tdp_enabled) 3978 init_kvm_tdp_mmu(vcpu); 3979 else 3980 init_kvm_softmmu(vcpu); 3981} 3982 3983void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) 3984{ 3985 kvm_mmu_unload(vcpu); 3986 init_kvm_mmu(vcpu); 3987} 3988EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); 3989 3990int kvm_mmu_load(struct kvm_vcpu *vcpu) 3991{ 3992 int r; 3993 3994 r = mmu_topup_memory_caches(vcpu); 3995 if (r) 3996 goto out; 3997 r = mmu_alloc_roots(vcpu); 3998 kvm_mmu_sync_roots(vcpu); 3999 if (r) 4000 goto out; 4001 /* set_cr3() should ensure TLB has been flushed */ 4002 vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); 4003out: 4004 return r; 4005} 4006EXPORT_SYMBOL_GPL(kvm_mmu_load); 4007 4008void kvm_mmu_unload(struct kvm_vcpu *vcpu) 4009{ 4010 mmu_free_roots(vcpu); 4011 WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); 4012} 4013EXPORT_SYMBOL_GPL(kvm_mmu_unload); 4014 4015static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, 4016 struct kvm_mmu_page *sp, u64 *spte, 4017 const void *new) 4018{ 4019 if (sp->role.level != PT_PAGE_TABLE_LEVEL) { 4020 ++vcpu->kvm->stat.mmu_pde_zapped; 4021 return; 4022 } 4023 4024 ++vcpu->kvm->stat.mmu_pte_updated; 4025 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); 4026} 4027 4028static bool need_remote_flush(u64 old, u64 new) 4029{ 4030 if (!is_shadow_present_pte(old)) 4031 return false; 4032 if (!is_shadow_present_pte(new)) 4033 return true; 4034 if ((old ^ new) & PT64_BASE_ADDR_MASK) 4035 return true; 4036 old ^= shadow_nx_mask; 4037 new ^= shadow_nx_mask; 4038 return (old & ~new & PT64_PERM_MASK) != 0; 4039} 4040 4041static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page, 4042 bool remote_flush, bool local_flush) 4043{ 4044 if (zap_page) 4045 return; 4046 4047 if (remote_flush) 4048 kvm_flush_remote_tlbs(vcpu->kvm); 4049 else if (local_flush) 4050 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 4051} 4052 4053static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, 4054 const u8 *new, int *bytes) 4055{ 4056 u64 gentry; 4057 int r; 4058 4059 /* 4060 * Assume that the pte write on a page table of the same type 4061 * as the current vcpu paging mode since we update the sptes only 4062 * when they have the same mode. 4063 */ 4064 if (is_pae(vcpu) && *bytes == 4) { 4065 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ 4066 *gpa &= ~(gpa_t)7; 4067 *bytes = 8; 4068 r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, 8); 4069 if (r) 4070 gentry = 0; 4071 new = (const u8 *)&gentry; 4072 } 4073 4074 switch (*bytes) { 4075 case 4: 4076 gentry = *(const u32 *)new; 4077 break; 4078 case 8: 4079 gentry = *(const u64 *)new; 4080 break; 4081 default: 4082 gentry = 0; 4083 break; 4084 } 4085 4086 return gentry; 4087} 4088 4089/* 4090 * If we're seeing too many writes to a page, it may no longer be a page table, 4091 * or we may be forking, in which case it is better to unmap the page. 4092 */ 4093static bool detect_write_flooding(struct kvm_mmu_page *sp) 4094{ 4095 /* 4096 * Skip write-flooding detected for the sp whose level is 1, because 4097 * it can become unsync, then the guest page is not write-protected. 4098 */ 4099 if (sp->role.level == PT_PAGE_TABLE_LEVEL) 4100 return false; 4101 4102 return ++sp->write_flooding_count >= 3; 4103} 4104 4105/* 4106 * Misaligned accesses are too much trouble to fix up; also, they usually 4107 * indicate a page is not used as a page table. 4108 */ 4109static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, 4110 int bytes) 4111{ 4112 unsigned offset, pte_size, misaligned; 4113 4114 pgprintk("misaligned: gpa %llx bytes %d role %x\n", 4115 gpa, bytes, sp->role.word); 4116 4117 offset = offset_in_page(gpa); 4118 pte_size = sp->role.cr4_pae ? 8 : 4; 4119 4120 /* 4121 * Sometimes, the OS only writes the last one bytes to update status 4122 * bits, for example, in linux, andb instruction is used in clear_bit(). 4123 */ 4124 if (!(offset & (pte_size - 1)) && bytes == 1) 4125 return false; 4126 4127 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); 4128 misaligned |= bytes < 4; 4129 4130 return misaligned; 4131} 4132 4133static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) 4134{ 4135 unsigned page_offset, quadrant; 4136 u64 *spte; 4137 int level; 4138 4139 page_offset = offset_in_page(gpa); 4140 level = sp->role.level; 4141 *nspte = 1; 4142 if (!sp->role.cr4_pae) { 4143 page_offset <<= 1; /* 32->64 */ 4144 /* 4145 * A 32-bit pde maps 4MB while the shadow pdes map 4146 * only 2MB. So we need to double the offset again 4147 * and zap two pdes instead of one. 4148 */ 4149 if (level == PT32_ROOT_LEVEL) { 4150 page_offset &= ~7; /* kill rounding error */ 4151 page_offset <<= 1; 4152 *nspte = 2; 4153 } 4154 quadrant = page_offset >> PAGE_SHIFT; 4155 page_offset &= ~PAGE_MASK; 4156 if (quadrant != sp->role.quadrant) 4157 return NULL; 4158 } 4159 4160 spte = &sp->spt[page_offset / sizeof(*spte)]; 4161 return spte; 4162} 4163 4164void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 4165 const u8 *new, int bytes) 4166{ 4167 gfn_t gfn = gpa >> PAGE_SHIFT; 4168 struct kvm_mmu_page *sp; 4169 LIST_HEAD(invalid_list); 4170 u64 entry, gentry, *spte; 4171 int npte; 4172 bool remote_flush, local_flush, zap_page; 4173 union kvm_mmu_page_role mask = { }; 4174 4175 mask.cr0_wp = 1; 4176 mask.cr4_pae = 1; 4177 mask.nxe = 1; 4178 mask.smep_andnot_wp = 1; 4179 mask.smap_andnot_wp = 1; 4180 4181 /* 4182 * If we don't have indirect shadow pages, it means no page is 4183 * write-protected, so we can exit simply. 4184 */ 4185 if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) 4186 return; 4187 4188 zap_page = remote_flush = local_flush = false; 4189 4190 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); 4191 4192 gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes); 4193 4194 /* 4195 * No need to care whether allocation memory is successful 4196 * or not since pte prefetch is skiped if it does not have 4197 * enough objects in the cache. 4198 */ 4199 mmu_topup_memory_caches(vcpu); 4200 4201 spin_lock(&vcpu->kvm->mmu_lock); 4202 ++vcpu->kvm->stat.mmu_pte_write; 4203 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); 4204 4205 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { 4206 if (detect_write_misaligned(sp, gpa, bytes) || 4207 detect_write_flooding(sp)) { 4208 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, 4209 &invalid_list); 4210 ++vcpu->kvm->stat.mmu_flooded; 4211 continue; 4212 } 4213 4214 spte = get_written_sptes(sp, gpa, &npte); 4215 if (!spte) 4216 continue; 4217 4218 local_flush = true; 4219 while (npte--) { 4220 entry = *spte; 4221 mmu_page_zap_pte(vcpu->kvm, sp, spte); 4222 if (gentry && 4223 !((sp->role.word ^ vcpu->arch.mmu.base_role.word) 4224 & mask.word) && rmap_can_add(vcpu)) 4225 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); 4226 if (need_remote_flush(entry, *spte)) 4227 remote_flush = true; 4228 ++spte; 4229 } 4230 } 4231 mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush); 4232 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 4233 kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); 4234 spin_unlock(&vcpu->kvm->mmu_lock); 4235} 4236 4237int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) 4238{ 4239 gpa_t gpa; 4240 int r; 4241 4242 if (vcpu->arch.mmu.direct_map) 4243 return 0; 4244 4245 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); 4246 4247 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); 4248 4249 return r; 4250} 4251EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); 4252 4253static void make_mmu_pages_available(struct kvm_vcpu *vcpu) 4254{ 4255 LIST_HEAD(invalid_list); 4256 4257 if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES)) 4258 return; 4259 4260 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) { 4261 if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) 4262 break; 4263 4264 ++vcpu->kvm->stat.mmu_recycled; 4265 } 4266 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 4267} 4268 4269static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr) 4270{ 4271 if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu)) 4272 return vcpu_match_mmio_gpa(vcpu, addr); 4273 4274 return vcpu_match_mmio_gva(vcpu, addr); 4275} 4276 4277int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, 4278 void *insn, int insn_len) 4279{ 4280 int r, emulation_type = EMULTYPE_RETRY; 4281 enum emulation_result er; 4282 4283 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); 4284 if (r < 0) 4285 goto out; 4286 4287 if (!r) { 4288 r = 1; 4289 goto out; 4290 } 4291 4292 if (is_mmio_page_fault(vcpu, cr2)) 4293 emulation_type = 0; 4294 4295 er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); 4296 4297 switch (er) { 4298 case EMULATE_DONE: 4299 return 1; 4300 case EMULATE_USER_EXIT: 4301 ++vcpu->stat.mmio_exits; 4302 /* fall through */ 4303 case EMULATE_FAIL: 4304 return 0; 4305 default: 4306 BUG(); 4307 } 4308out: 4309 return r; 4310} 4311EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); 4312 4313void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) 4314{ 4315 vcpu->arch.mmu.invlpg(vcpu, gva); 4316 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 4317 ++vcpu->stat.invlpg; 4318} 4319EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); 4320 4321void kvm_enable_tdp(void) 4322{ 4323 tdp_enabled = true; 4324} 4325EXPORT_SYMBOL_GPL(kvm_enable_tdp); 4326 4327void kvm_disable_tdp(void) 4328{ 4329 tdp_enabled = false; 4330} 4331EXPORT_SYMBOL_GPL(kvm_disable_tdp); 4332 4333static void free_mmu_pages(struct kvm_vcpu *vcpu) 4334{ 4335 free_page((unsigned long)vcpu->arch.mmu.pae_root); 4336 if (vcpu->arch.mmu.lm_root != NULL) 4337 free_page((unsigned long)vcpu->arch.mmu.lm_root); 4338} 4339 4340static int alloc_mmu_pages(struct kvm_vcpu *vcpu) 4341{ 4342 struct page *page; 4343 int i; 4344 4345 /* 4346 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. 4347 * Therefore we need to allocate shadow page tables in the first 4348 * 4GB of memory, which happens to fit the DMA32 zone. 4349 */ 4350 page = alloc_page(GFP_KERNEL | __GFP_DMA32); 4351 if (!page) 4352 return -ENOMEM; 4353 4354 vcpu->arch.mmu.pae_root = page_address(page); 4355 for (i = 0; i < 4; ++i) 4356 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; 4357 4358 return 0; 4359} 4360 4361int kvm_mmu_create(struct kvm_vcpu *vcpu) 4362{ 4363 vcpu->arch.walk_mmu = &vcpu->arch.mmu; 4364 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 4365 vcpu->arch.mmu.translate_gpa = translate_gpa; 4366 vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; 4367 4368 return alloc_mmu_pages(vcpu); 4369} 4370 4371void kvm_mmu_setup(struct kvm_vcpu *vcpu) 4372{ 4373 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); 4374 4375 init_kvm_mmu(vcpu); 4376} 4377 4378void kvm_mmu_slot_remove_write_access(struct kvm *kvm, 4379 struct kvm_memory_slot *memslot) 4380{ 4381 gfn_t last_gfn; 4382 int i; 4383 bool flush = false; 4384 4385 last_gfn = memslot->base_gfn + memslot->npages - 1; 4386 4387 spin_lock(&kvm->mmu_lock); 4388 4389 for (i = PT_PAGE_TABLE_LEVEL; 4390 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 4391 unsigned long *rmapp; 4392 unsigned long last_index, index; 4393 4394 rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL]; 4395 last_index = gfn_to_index(last_gfn, memslot->base_gfn, i); 4396 4397 for (index = 0; index <= last_index; ++index, ++rmapp) { 4398 if (*rmapp) 4399 flush |= __rmap_write_protect(kvm, rmapp, 4400 false); 4401 4402 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) 4403 cond_resched_lock(&kvm->mmu_lock); 4404 } 4405 } 4406 4407 spin_unlock(&kvm->mmu_lock); 4408 4409 /* 4410 * kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log() 4411 * which do tlb flush out of mmu-lock should be serialized by 4412 * kvm->slots_lock otherwise tlb flush would be missed. 4413 */ 4414 lockdep_assert_held(&kvm->slots_lock); 4415 4416 /* 4417 * We can flush all the TLBs out of the mmu lock without TLB 4418 * corruption since we just change the spte from writable to 4419 * readonly so that we only need to care the case of changing 4420 * spte from present to present (changing the spte from present 4421 * to nonpresent will flush all the TLBs immediately), in other 4422 * words, the only case we care is mmu_spte_update() where we 4423 * haved checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE 4424 * instead of PT_WRITABLE_MASK, that means it does not depend 4425 * on PT_WRITABLE_MASK anymore. 4426 */ 4427 if (flush) 4428 kvm_flush_remote_tlbs(kvm); 4429} 4430 4431static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, 4432 unsigned long *rmapp) 4433{ 4434 u64 *sptep; 4435 struct rmap_iterator iter; 4436 int need_tlb_flush = 0; 4437 pfn_t pfn; 4438 struct kvm_mmu_page *sp; 4439 4440 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { 4441 BUG_ON(!(*sptep & PT_PRESENT_MASK)); 4442 4443 sp = page_header(__pa(sptep)); 4444 pfn = spte_to_pfn(*sptep); 4445 4446 /* 4447 * We cannot do huge page mapping for indirect shadow pages, 4448 * which are found on the last rmap (level = 1) when not using 4449 * tdp; such shadow pages are synced with the page table in 4450 * the guest, and the guest page table is using 4K page size 4451 * mapping if the indirect sp has level = 1. 4452 */ 4453 if (sp->role.direct && 4454 !kvm_is_reserved_pfn(pfn) && 4455 PageTransCompound(pfn_to_page(pfn))) { 4456 drop_spte(kvm, sptep); 4457 sptep = rmap_get_first(*rmapp, &iter); 4458 need_tlb_flush = 1; 4459 } else 4460 sptep = rmap_get_next(&iter); 4461 } 4462 4463 return need_tlb_flush; 4464} 4465 4466void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, 4467 struct kvm_memory_slot *memslot) 4468{ 4469 bool flush = false; 4470 unsigned long *rmapp; 4471 unsigned long last_index, index; 4472 4473 spin_lock(&kvm->mmu_lock); 4474 4475 rmapp = memslot->arch.rmap[0]; 4476 last_index = gfn_to_index(memslot->base_gfn + memslot->npages - 1, 4477 memslot->base_gfn, PT_PAGE_TABLE_LEVEL); 4478 4479 for (index = 0; index <= last_index; ++index, ++rmapp) { 4480 if (*rmapp) 4481 flush |= kvm_mmu_zap_collapsible_spte(kvm, rmapp); 4482 4483 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { 4484 if (flush) { 4485 kvm_flush_remote_tlbs(kvm); 4486 flush = false; 4487 } 4488 cond_resched_lock(&kvm->mmu_lock); 4489 } 4490 } 4491 4492 if (flush) 4493 kvm_flush_remote_tlbs(kvm); 4494 4495 spin_unlock(&kvm->mmu_lock); 4496} 4497 4498void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, 4499 struct kvm_memory_slot *memslot) 4500{ 4501 gfn_t last_gfn; 4502 unsigned long *rmapp; 4503 unsigned long last_index, index; 4504 bool flush = false; 4505 4506 last_gfn = memslot->base_gfn + memslot->npages - 1; 4507 4508 spin_lock(&kvm->mmu_lock); 4509 4510 rmapp = memslot->arch.rmap[PT_PAGE_TABLE_LEVEL - 1]; 4511 last_index = gfn_to_index(last_gfn, memslot->base_gfn, 4512 PT_PAGE_TABLE_LEVEL); 4513 4514 for (index = 0; index <= last_index; ++index, ++rmapp) { 4515 if (*rmapp) 4516 flush |= __rmap_clear_dirty(kvm, rmapp); 4517 4518 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) 4519 cond_resched_lock(&kvm->mmu_lock); 4520 } 4521 4522 spin_unlock(&kvm->mmu_lock); 4523 4524 lockdep_assert_held(&kvm->slots_lock); 4525 4526 /* 4527 * It's also safe to flush TLBs out of mmu lock here as currently this 4528 * function is only used for dirty logging, in which case flushing TLB 4529 * out of mmu lock also guarantees no dirty pages will be lost in 4530 * dirty_bitmap. 4531 */ 4532 if (flush) 4533 kvm_flush_remote_tlbs(kvm); 4534} 4535EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty); 4536 4537void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, 4538 struct kvm_memory_slot *memslot) 4539{ 4540 gfn_t last_gfn; 4541 int i; 4542 bool flush = false; 4543 4544 last_gfn = memslot->base_gfn + memslot->npages - 1; 4545 4546 spin_lock(&kvm->mmu_lock); 4547 4548 for (i = PT_PAGE_TABLE_LEVEL + 1; /* skip rmap for 4K page */ 4549 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 4550 unsigned long *rmapp; 4551 unsigned long last_index, index; 4552 4553 rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL]; 4554 last_index = gfn_to_index(last_gfn, memslot->base_gfn, i); 4555 4556 for (index = 0; index <= last_index; ++index, ++rmapp) { 4557 if (*rmapp) 4558 flush |= __rmap_write_protect(kvm, rmapp, 4559 false); 4560 4561 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) 4562 cond_resched_lock(&kvm->mmu_lock); 4563 } 4564 } 4565 spin_unlock(&kvm->mmu_lock); 4566 4567 /* see kvm_mmu_slot_remove_write_access */ 4568 lockdep_assert_held(&kvm->slots_lock); 4569 4570 if (flush) 4571 kvm_flush_remote_tlbs(kvm); 4572} 4573EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access); 4574 4575void kvm_mmu_slot_set_dirty(struct kvm *kvm, 4576 struct kvm_memory_slot *memslot) 4577{ 4578 gfn_t last_gfn; 4579 int i; 4580 bool flush = false; 4581 4582 last_gfn = memslot->base_gfn + memslot->npages - 1; 4583 4584 spin_lock(&kvm->mmu_lock); 4585 4586 for (i = PT_PAGE_TABLE_LEVEL; 4587 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 4588 unsigned long *rmapp; 4589 unsigned long last_index, index; 4590 4591 rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL]; 4592 last_index = gfn_to_index(last_gfn, memslot->base_gfn, i); 4593 4594 for (index = 0; index <= last_index; ++index, ++rmapp) { 4595 if (*rmapp) 4596 flush |= __rmap_set_dirty(kvm, rmapp); 4597 4598 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) 4599 cond_resched_lock(&kvm->mmu_lock); 4600 } 4601 } 4602 4603 spin_unlock(&kvm->mmu_lock); 4604 4605 lockdep_assert_held(&kvm->slots_lock); 4606 4607 /* see kvm_mmu_slot_leaf_clear_dirty */ 4608 if (flush) 4609 kvm_flush_remote_tlbs(kvm); 4610} 4611EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); 4612 4613#define BATCH_ZAP_PAGES 10 4614static void kvm_zap_obsolete_pages(struct kvm *kvm) 4615{ 4616 struct kvm_mmu_page *sp, *node; 4617 int batch = 0; 4618 4619restart: 4620 list_for_each_entry_safe_reverse(sp, node, 4621 &kvm->arch.active_mmu_pages, link) { 4622 int ret; 4623 4624 /* 4625 * No obsolete page exists before new created page since 4626 * active_mmu_pages is the FIFO list. 4627 */ 4628 if (!is_obsolete_sp(kvm, sp)) 4629 break; 4630 4631 /* 4632 * Since we are reversely walking the list and the invalid 4633 * list will be moved to the head, skip the invalid page 4634 * can help us to avoid the infinity list walking. 4635 */ 4636 if (sp->role.invalid) 4637 continue; 4638 4639 /* 4640 * Need not flush tlb since we only zap the sp with invalid 4641 * generation number. 4642 */ 4643 if (batch >= BATCH_ZAP_PAGES && 4644 cond_resched_lock(&kvm->mmu_lock)) { 4645 batch = 0; 4646 goto restart; 4647 } 4648 4649 ret = kvm_mmu_prepare_zap_page(kvm, sp, 4650 &kvm->arch.zapped_obsolete_pages); 4651 batch += ret; 4652 4653 if (ret) 4654 goto restart; 4655 } 4656 4657 /* 4658 * Should flush tlb before free page tables since lockless-walking 4659 * may use the pages. 4660 */ 4661 kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); 4662} 4663 4664/* 4665 * Fast invalidate all shadow pages and use lock-break technique 4666 * to zap obsolete pages. 4667 * 4668 * It's required when memslot is being deleted or VM is being 4669 * destroyed, in these cases, we should ensure that KVM MMU does 4670 * not use any resource of the being-deleted slot or all slots 4671 * after calling the function. 4672 */ 4673void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm) 4674{ 4675 spin_lock(&kvm->mmu_lock); 4676 trace_kvm_mmu_invalidate_zap_all_pages(kvm); 4677 kvm->arch.mmu_valid_gen++; 4678 4679 /* 4680 * Notify all vcpus to reload its shadow page table 4681 * and flush TLB. Then all vcpus will switch to new 4682 * shadow page table with the new mmu_valid_gen. 4683 * 4684 * Note: we should do this under the protection of 4685 * mmu-lock, otherwise, vcpu would purge shadow page 4686 * but miss tlb flush. 4687 */ 4688 kvm_reload_remote_mmus(kvm); 4689 4690 kvm_zap_obsolete_pages(kvm); 4691 spin_unlock(&kvm->mmu_lock); 4692} 4693 4694static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) 4695{ 4696 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); 4697} 4698 4699void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm) 4700{ 4701 /* 4702 * The very rare case: if the generation-number is round, 4703 * zap all shadow pages. 4704 */ 4705 if (unlikely(kvm_current_mmio_generation(kvm) == 0)) { 4706 printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n"); 4707 kvm_mmu_invalidate_zap_all_pages(kvm); 4708 } 4709} 4710 4711static unsigned long 4712mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 4713{ 4714 struct kvm *kvm; 4715 int nr_to_scan = sc->nr_to_scan; 4716 unsigned long freed = 0; 4717 4718 spin_lock(&kvm_lock); 4719 4720 list_for_each_entry(kvm, &vm_list, vm_list) { 4721 int idx; 4722 LIST_HEAD(invalid_list); 4723 4724 /* 4725 * Never scan more than sc->nr_to_scan VM instances. 4726 * Will not hit this condition practically since we do not try 4727 * to shrink more than one VM and it is very unlikely to see 4728 * !n_used_mmu_pages so many times. 4729 */ 4730 if (!nr_to_scan--) 4731 break; 4732 /* 4733 * n_used_mmu_pages is accessed without holding kvm->mmu_lock 4734 * here. We may skip a VM instance errorneosly, but we do not 4735 * want to shrink a VM that only started to populate its MMU 4736 * anyway. 4737 */ 4738 if (!kvm->arch.n_used_mmu_pages && 4739 !kvm_has_zapped_obsolete_pages(kvm)) 4740 continue; 4741 4742 idx = srcu_read_lock(&kvm->srcu); 4743 spin_lock(&kvm->mmu_lock); 4744 4745 if (kvm_has_zapped_obsolete_pages(kvm)) { 4746 kvm_mmu_commit_zap_page(kvm, 4747 &kvm->arch.zapped_obsolete_pages); 4748 goto unlock; 4749 } 4750 4751 if (prepare_zap_oldest_mmu_page(kvm, &invalid_list)) 4752 freed++; 4753 kvm_mmu_commit_zap_page(kvm, &invalid_list); 4754 4755unlock: 4756 spin_unlock(&kvm->mmu_lock); 4757 srcu_read_unlock(&kvm->srcu, idx); 4758 4759 /* 4760 * unfair on small ones 4761 * per-vm shrinkers cry out 4762 * sadness comes quickly 4763 */ 4764 list_move_tail(&kvm->vm_list, &vm_list); 4765 break; 4766 } 4767 4768 spin_unlock(&kvm_lock); 4769 return freed; 4770} 4771 4772static unsigned long 4773mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 4774{ 4775 return percpu_counter_read_positive(&kvm_total_used_mmu_pages); 4776} 4777 4778static struct shrinker mmu_shrinker = { 4779 .count_objects = mmu_shrink_count, 4780 .scan_objects = mmu_shrink_scan, 4781 .seeks = DEFAULT_SEEKS * 10, 4782}; 4783 4784static void mmu_destroy_caches(void) 4785{ 4786 if (pte_list_desc_cache) 4787 kmem_cache_destroy(pte_list_desc_cache); 4788 if (mmu_page_header_cache) 4789 kmem_cache_destroy(mmu_page_header_cache); 4790} 4791 4792int kvm_mmu_module_init(void) 4793{ 4794 pte_list_desc_cache = kmem_cache_create("pte_list_desc", 4795 sizeof(struct pte_list_desc), 4796 0, 0, NULL); 4797 if (!pte_list_desc_cache) 4798 goto nomem; 4799 4800 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", 4801 sizeof(struct kvm_mmu_page), 4802 0, 0, NULL); 4803 if (!mmu_page_header_cache) 4804 goto nomem; 4805 4806 if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) 4807 goto nomem; 4808 4809 register_shrinker(&mmu_shrinker); 4810 4811 return 0; 4812 4813nomem: 4814 mmu_destroy_caches(); 4815 return -ENOMEM; 4816} 4817 4818/* 4819 * Caculate mmu pages needed for kvm. 4820 */ 4821unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) 4822{ 4823 unsigned int nr_mmu_pages; 4824 unsigned int nr_pages = 0; 4825 struct kvm_memslots *slots; 4826 struct kvm_memory_slot *memslot; 4827 4828 slots = kvm_memslots(kvm); 4829 4830 kvm_for_each_memslot(memslot, slots) 4831 nr_pages += memslot->npages; 4832 4833 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; 4834 nr_mmu_pages = max(nr_mmu_pages, 4835 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES); 4836 4837 return nr_mmu_pages; 4838} 4839 4840int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) 4841{ 4842 struct kvm_shadow_walk_iterator iterator; 4843 u64 spte; 4844 int nr_sptes = 0; 4845 4846 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 4847 return nr_sptes; 4848 4849 walk_shadow_page_lockless_begin(vcpu); 4850 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { 4851 sptes[iterator.level-1] = spte; 4852 nr_sptes++; 4853 if (!is_shadow_present_pte(spte)) 4854 break; 4855 } 4856 walk_shadow_page_lockless_end(vcpu); 4857 4858 return nr_sptes; 4859} 4860EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy); 4861 4862void kvm_mmu_destroy(struct kvm_vcpu *vcpu) 4863{ 4864 kvm_mmu_unload(vcpu); 4865 free_mmu_pages(vcpu); 4866 mmu_free_memory_caches(vcpu); 4867} 4868 4869void kvm_mmu_module_exit(void) 4870{ 4871 mmu_destroy_caches(); 4872 percpu_counter_destroy(&kvm_total_used_mmu_pages); 4873 unregister_shrinker(&mmu_shrinker); 4874 mmu_audit_disable(); 4875} 4876