Lines Matching refs:level

40 	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)  argument
60 #define PT_INDEX(addr, level) PT32_INDEX(addr, level) argument
75 #define PT_INDEX(addr, level) PT64_INDEX(addr, level) argument
95 int level; member
204 unsigned level, index; in FNAME() local
214 for (level = walker->max_level; level >= walker->level; --level) { in FNAME()
215 pte = orig_pte = walker->ptes[level - 1]; in FNAME()
216 table_gfn = walker->table_gfn[level - 1]; in FNAME()
217 ptep_user = walker->ptep_user[level - 1]; in FNAME()
223 if (level == walker->level && write_fault && in FNAME()
244 if (unlikely(!walker->pte_writable[level - 1])) in FNAME()
252 walker->ptes[level - 1] = pte; in FNAME()
280 walker->level = mmu->root_level; in FNAME()
284 if (walker->level == PT32E_ROOT_LEVEL) { in FNAME()
286 trace_kvm_mmu_paging_element(pte, walker->level); in FNAME()
289 --walker->level; in FNAME()
292 walker->max_level = walker->level; in FNAME()
297 ++walker->level; in FNAME()
304 --walker->level; in FNAME()
306 index = PT_INDEX(addr, walker->level); in FNAME()
311 walker->table_gfn[walker->level - 1] = table_gfn; in FNAME()
312 walker->pte_gpa[walker->level - 1] = pte_gpa; in FNAME()
334 &walker->pte_writable[walker->level - 1]); in FNAME()
341 walker->ptep_user[walker->level - 1] = ptep_user; in FNAME()
343 trace_kvm_mmu_paging_element(pte, walker->level); in FNAME()
348 if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) { in FNAME()
356 walker->ptes[walker->level - 1] = pte; in FNAME()
357 } while (!is_last_gpte(mmu, walker->level, pte)); in FNAME()
364 gfn = gpte_to_gfn_lvl(pte, walker->level); in FNAME()
365 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; in FNAME()
367 if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36()) in FNAME()
493 struct guest_walker *gw, int level) in FNAME()
496 gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1]; in FNAME()
500 if (level == PT_PAGE_TABLE_LEVEL) { in FNAME()
512 return r || curr_pte != gw->ptes[level - 1]; in FNAME()
525 if (sp->role.level > PT_PAGE_TABLE_LEVEL) in FNAME()
579 shadow_walk_okay(&it) && it.level > gw->level; in FNAME()
588 table_gfn = gw->table_gfn[it.level - 2]; in FNAME()
589 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1, in FNAME()
597 if (FNAME(gpte_changed)(vcpu, gw, it.level - 1)) in FNAME()
605 shadow_walk_okay(&it) && it.level > hlevel; in FNAME()
617 direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); in FNAME()
619 sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1, in FNAME()
626 it.level, gw->gfn, pfn, prefault, map_writable); in FNAME()
660 int level; in FNAME() local
661 gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1); in FNAME()
668 for (level = walker->level; level <= walker->max_level; level++) { in FNAME()
669 gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1]; in FNAME()
700 int level = PT_PAGE_TABLE_LEVEL; in FNAME() local
745 if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) { in FNAME()
746 level = mapping_level(vcpu, walker.gfn, &force_pt_level); in FNAME()
748 level = min(walker.level, level); in FNAME()
749 walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); in FNAME()
792 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); in FNAME()
794 level, pfn, map_writable, prefault); in FNAME()
811 WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL); in FNAME()
823 int level; in FNAME() local
841 level = iterator.level; in FNAME()
845 if (is_last_spte(*sptep, level)) { in FNAME()