Lines Matching refs:level

40 	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)  argument
60 #define PT_INDEX(addr, level) PT32_INDEX(addr, level) argument
75 #define PT_INDEX(addr, level) PT64_INDEX(addr, level) argument
95 int level; member
131 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) in FNAME()
135 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) | in FNAME()
212 unsigned level, index; in FNAME() local
222 for (level = walker->max_level; level >= walker->level; --level) { in FNAME()
223 pte = orig_pte = walker->ptes[level - 1]; in FNAME()
224 table_gfn = walker->table_gfn[level - 1]; in FNAME()
225 ptep_user = walker->ptep_user[level - 1]; in FNAME()
231 if (level == walker->level && write_fault && in FNAME()
252 if (unlikely(!walker->pte_writable[level - 1])) in FNAME()
260 walker->ptes[level - 1] = pte; in FNAME()
288 walker->level = mmu->root_level; in FNAME()
292 if (walker->level == PT32E_ROOT_LEVEL) { in FNAME()
294 trace_kvm_mmu_paging_element(pte, walker->level); in FNAME()
297 --walker->level; in FNAME()
300 walker->max_level = walker->level; in FNAME()
305 ++walker->level; in FNAME()
312 --walker->level; in FNAME()
314 index = PT_INDEX(addr, walker->level); in FNAME()
319 walker->table_gfn[walker->level - 1] = table_gfn; in FNAME()
320 walker->pte_gpa[walker->level - 1] = pte_gpa; in FNAME()
342 &walker->pte_writable[walker->level - 1]); in FNAME()
349 walker->ptep_user[walker->level - 1] = ptep_user; in FNAME()
351 trace_kvm_mmu_paging_element(pte, walker->level); in FNAME()
357 walker->level))) { in FNAME()
365 walker->ptes[walker->level - 1] = pte; in FNAME()
366 } while (!is_last_gpte(mmu, walker->level, pte)); in FNAME()
373 gfn = gpte_to_gfn_lvl(pte, walker->level); in FNAME()
374 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; in FNAME()
376 if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36()) in FNAME()
502 struct guest_walker *gw, int level) in FNAME()
505 gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1]; in FNAME()
509 if (level == PT_PAGE_TABLE_LEVEL) { in FNAME()
521 return r || curr_pte != gw->ptes[level - 1]; in FNAME()
534 if (sp->role.level > PT_PAGE_TABLE_LEVEL) in FNAME()
588 shadow_walk_okay(&it) && it.level > gw->level; in FNAME()
597 table_gfn = gw->table_gfn[it.level - 2]; in FNAME()
598 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1, in FNAME()
606 if (FNAME(gpte_changed)(vcpu, gw, it.level - 1)) in FNAME()
614 shadow_walk_okay(&it) && it.level > hlevel; in FNAME()
626 direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); in FNAME()
628 sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1, in FNAME()
635 it.level, gw->gfn, pfn, prefault, map_writable); in FNAME()
669 int level; in FNAME() local
670 gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1); in FNAME()
677 for (level = walker->level; level <= walker->max_level; level++) { in FNAME()
678 gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1]; in FNAME()
709 int level = PT_PAGE_TABLE_LEVEL; in FNAME() local
755 if (walker.level >= PT_DIRECTORY_LEVEL) in FNAME()
761 level = min(walker.level, mapping_level(vcpu, walker.gfn)); in FNAME()
762 walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); in FNAME()
803 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); in FNAME()
805 level, pfn, map_writable, prefault); in FNAME()
822 WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL); in FNAME()
834 int level; in FNAME() local
852 level = iterator.level; in FNAME()
856 if (is_last_spte(*sptep, level)) { in FNAME()