Lines Matching refs:level
85 #define PT64_LEVEL_SHIFT(level) \ argument
86 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
88 #define PT64_INDEX(address, level)\ argument
89 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
94 #define PT32_LEVEL_SHIFT(level) \ argument
95 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
97 #define PT32_LVL_OFFSET_MASK(level) \ argument
98 (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
101 #define PT32_INDEX(address, level)\ argument
102 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
108 #define PT64_LVL_ADDR_MASK(level) \ argument
109 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
111 #define PT64_LVL_OFFSET_MASK(level) \ argument
112 (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
118 #define PT32_LVL_ADDR_MASK(level) \ argument
119 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
138 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) argument
152 int level; member
319 static int is_last_spte(u64 pte, int level) in is_last_spte() argument
321 if (level == PT_PAGE_TABLE_LEVEL) in is_last_spte()
759 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
776 int level) in lpage_info_slot() argument
780 idx = gfn_to_index(gfn, slot->base_gfn, level); in lpage_info_slot()
781 return &slot->arch.lpage_info[level - 2][idx]; in lpage_info_slot()
821 static int __has_wrprotected_page(gfn_t gfn, int level, in __has_wrprotected_page() argument
827 linfo = lpage_info_slot(gfn, slot, level); in __has_wrprotected_page()
834 static int has_wrprotected_page(struct kvm_vcpu *vcpu, gfn_t gfn, int level) in has_wrprotected_page() argument
839 return __has_wrprotected_page(gfn, level, slot); in has_wrprotected_page()
886 int host_level, level, max_level; in mapping_level() local
904 for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) in mapping_level()
905 if (__has_wrprotected_page(large_gfn, level, slot)) in mapping_level()
908 return level - 1; in mapping_level()
1035 static unsigned long *__gfn_to_rmap(gfn_t gfn, int level, in __gfn_to_rmap() argument
1040 idx = gfn_to_index(gfn, slot->base_gfn, level); in __gfn_to_rmap()
1041 return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx]; in __gfn_to_rmap()
1054 return __gfn_to_rmap(gfn, sp->role.level, slot); in gfn_to_rmap()
1164 WARN_ON(page_header(__pa(sptep))->role.level == in __drop_large_spte()
1377 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_unmap_rmapp() argument
1384 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_set_pte_rmapp() argument
1400 sptep, *sptep, gfn, level); in kvm_set_pte_rmapp()
1437 int level; member
1444 rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level) in rmap_walk_init_level() argument
1446 iterator->level = level; in rmap_walk_init_level()
1448 iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot); in rmap_walk_init_level()
1449 iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level, in rmap_walk_init_level()
1475 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level)); in slot_rmap_walk_next()
1479 if (++iterator->level > iterator->end_level) { in slot_rmap_walk_next()
1484 rmap_walk_init_level(iterator, iterator->level); in slot_rmap_walk_next()
1502 int level, in kvm_handle_hva_range() argument
1534 iterator.gfn, iterator.level, data); in kvm_handle_hva_range()
1545 gfn_t gfn, int level, in kvm_handle_hva() argument
1567 struct kvm_memory_slot *slot, gfn_t gfn, int level, in kvm_age_rmapp() argument
1583 trace_kvm_age_page(gfn, level, slot, young); in kvm_age_rmapp()
1589 int level, unsigned long data) in kvm_test_age_rmapp() argument
1623 kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); in rmap_recycle()
1953 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); in kvm_sync_pages()
1988 if (sp->role.level == PT_PAGE_TABLE_LEVEL) { in mmu_pages_next()
1993 parents->parent[sp->role.level-2] = sp; in mmu_pages_next()
1994 parents->idx[sp->role.level-1] = pvec->page[n].idx; in mmu_pages_next()
2003 unsigned int level = 0; in mmu_pages_clear_parents() local
2006 unsigned int idx = parents->idx[level]; in mmu_pages_clear_parents()
2008 sp = parents->parent[level]; in mmu_pages_clear_parents()
2015 level++; in mmu_pages_clear_parents()
2016 } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children); in mmu_pages_clear_parents()
2023 parents->parent[parent->role.level-1] = NULL; in kvm_mmu_pages_init()
2084 unsigned level, in kvm_mmu_get_page() argument
2095 role.level = level; in kvm_mmu_get_page()
2102 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); in kvm_mmu_get_page()
2103 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; in kvm_mmu_get_page()
2141 if (level > PT_PAGE_TABLE_LEVEL && need_sync) in kvm_mmu_get_page()
2157 iterator->level = vcpu->arch.mmu.shadow_root_level; in shadow_walk_init()
2159 if (iterator->level == PT64_ROOT_LEVEL && in shadow_walk_init()
2162 --iterator->level; in shadow_walk_init()
2164 if (iterator->level == PT32E_ROOT_LEVEL) { in shadow_walk_init()
2168 --iterator->level; in shadow_walk_init()
2170 iterator->level = 0; in shadow_walk_init()
2176 if (iterator->level < PT_PAGE_TABLE_LEVEL) in shadow_walk_okay()
2179 iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); in shadow_walk_okay()
2187 if (is_last_spte(spte, iterator->level)) { in __shadow_walk_next()
2188 iterator->level = 0; in __shadow_walk_next()
2193 --iterator->level; in __shadow_walk_next()
2247 if (is_last_spte(pte, sp->role.level)) { in mmu_page_zap_pte()
2295 if (parent->role.level == PT_PAGE_TABLE_LEVEL) in mmu_zap_unsync_children()
2453 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); in kvm_unsync_pages()
2468 if (s->role.level != PT_PAGE_TABLE_LEVEL) in mmu_need_write_protect()
2488 unsigned pte_access, int level, in set_spte() argument
2510 if (level > PT_PAGE_TABLE_LEVEL) in set_spte()
2531 if (level > PT_PAGE_TABLE_LEVEL && in set_spte()
2532 has_wrprotected_page(vcpu, gfn, level)) in set_spte()
2569 int level, gfn_t gfn, pfn_t pfn, bool speculative, in mmu_set_spte() argument
2583 if (level > PT_PAGE_TABLE_LEVEL && in mmu_set_spte()
2600 if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative, in mmu_set_spte()
2662 sp->role.level, gfn, page_to_pfn(pages[i]), in direct_pte_prefetch_many()
2705 if (sp->role.level > PT_PAGE_TABLE_LEVEL) in direct_pte_prefetch()
2712 int map_writable, int level, gfn_t gfn, pfn_t pfn, in __direct_map() argument
2724 if (iterator.level == level) { in __direct_map()
2726 write, &emulate, level, gfn, pfn, in __direct_map()
2737 base_addr &= PT64_LVL_ADDR_MASK(iterator.level); in __direct_map()
2740 iterator.level - 1, in __direct_map()
2786 int level = *levelp; in transparent_hugepage_adjust() local
2795 level == PT_PAGE_TABLE_LEVEL && in transparent_hugepage_adjust()
2808 *levelp = level = PT_DIRECTORY_LEVEL; in transparent_hugepage_adjust()
2809 mask = KVM_PAGES_PER_HPAGE(level) - 1; in transparent_hugepage_adjust()
2899 static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, in fast_page_fault() argument
2915 if (!is_shadow_present_pte(spte) || iterator.level < level) in fast_page_fault()
2928 if (!is_last_spte(spte, sp->role.level)) in fast_page_fault()
2959 if (sp->role.level > PT_PAGE_TABLE_LEVEL) in fast_page_fault()
2984 int level; in nonpaging_map() local
2990 level = mapping_level(vcpu, gfn, &force_pt_level); in nonpaging_map()
2997 if (level > PT_DIRECTORY_LEVEL) in nonpaging_map()
2998 level = PT_DIRECTORY_LEVEL; in nonpaging_map()
3000 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); in nonpaging_map()
3003 if (fast_page_fault(vcpu, v, level, error_code)) in nonpaging_map()
3020 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); in nonpaging_map()
3021 r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, in nonpaging_map()
3290 __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level) in __is_rsvd_bits_set() argument
3294 return (pte & rsvd_check->rsvd_bits_mask[bit7][level-1]) | in __is_rsvd_bits_set()
3298 static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) in is_rsvd_bits_set() argument
3300 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level); in is_rsvd_bits_set()
3303 static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level) in is_shadow_zero_bits_set() argument
3305 return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level); in is_shadow_zero_bits_set()
3331 leaf = root = iterator.level; in walk_shadow_page_get_mmio_spte()
3343 iterator.level); in walk_shadow_page_get_mmio_spte()
3472 check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level) in check_hugepage_cache_consistency() argument
3474 int page_num = KVM_PAGES_PER_HPAGE(level); in check_hugepage_cache_consistency()
3486 int level; in tdp_page_fault() local
3508 level = mapping_level(vcpu, gfn, &force_pt_level); in tdp_page_fault()
3510 if (level > PT_DIRECTORY_LEVEL && in tdp_page_fault()
3511 !check_hugepage_cache_consistency(vcpu, gfn, level)) in tdp_page_fault()
3512 level = PT_DIRECTORY_LEVEL; in tdp_page_fault()
3513 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); in tdp_page_fault()
3516 if (fast_page_fault(vcpu, gpa, level, error_code)) in tdp_page_fault()
3533 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); in tdp_page_fault()
3535 level, gfn, pfn, prefault); in tdp_page_fault()
3594 static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) in is_last_gpte() argument
3598 index = level - 1; in is_last_gpte()
3619 int maxphyaddr, int level, bool nx, bool gbpages, in __reset_rsvds_bits_mask() argument
3640 switch (level) { in __reset_rsvds_bits_mask()
3877 unsigned level, root_level = mmu->root_level; in update_last_pte_bitmap() local
3884 for (level = PT_DIRECTORY_LEVEL; level <= root_level; ++level) { in update_last_pte_bitmap()
3885 if (level <= PT_PDPE_LEVEL in update_last_pte_bitmap()
3887 map |= 1 << (ps_set_index | (level - 1)); in update_last_pte_bitmap()
3894 int level) in paging64_init_context_common() argument
3897 context->root_level = level; in paging64_init_context_common()
3909 context->shadow_root_level = level; in paging64_init_context_common()
4139 if (sp->role.level != PT_PAGE_TABLE_LEVEL) { in mmu_pte_write_new_pte()
4219 if (sp->role.level == PT_PAGE_TABLE_LEVEL) in detect_write_flooding()
4257 int level; in get_written_sptes() local
4260 level = sp->role.level; in get_written_sptes()
4269 if (level == PT32_ROOT_LEVEL) { in get_written_sptes()