kpte              163 arch/powerpc/mm/pgtable_32.c 	pte_t *kpte;
kpte              172 arch/powerpc/mm/pgtable_32.c 	if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
kpte              174 arch/powerpc/mm/pgtable_32.c 	__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
kpte              175 arch/powerpc/mm/pgtable_32.c 	pte_unmap(kpte);
kpte              199 arch/x86/mm/mem_encrypt.c static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
kpte              207 arch/x86/mm/mem_encrypt.c 		pfn = pte_pfn(*kpte);
kpte              208 arch/x86/mm/mem_encrypt.c 		old_prot = pte_pgprot(*kpte);
kpte              211 arch/x86/mm/mem_encrypt.c 		pfn = pmd_pfn(*(pmd_t *)kpte);
kpte              212 arch/x86/mm/mem_encrypt.c 		old_prot = pmd_pgprot(*(pmd_t *)kpte);
kpte              215 arch/x86/mm/mem_encrypt.c 		pfn = pud_pfn(*(pud_t *)kpte);
kpte              216 arch/x86/mm/mem_encrypt.c 		old_prot = pud_pgprot(*(pud_t *)kpte);
kpte              250 arch/x86/mm/mem_encrypt.c 	set_pte_atomic(kpte, new_pte);
kpte              260 arch/x86/mm/mem_encrypt.c 	pte_t *kpte;
kpte              266 arch/x86/mm/mem_encrypt.c 		kpte = lookup_address(vaddr, &level);
kpte              267 arch/x86/mm/mem_encrypt.c 		if (!kpte || pte_none(*kpte)) {
kpte              273 arch/x86/mm/mem_encrypt.c 			__set_clr_pte_enc(kpte, level, enc);
kpte              289 arch/x86/mm/mem_encrypt.c 			__set_clr_pte_enc(kpte, level, enc);
kpte              705 arch/x86/mm/pageattr.c static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
kpte              708 arch/x86/mm/pageattr.c 	set_pte_atomic(kpte, pte);
kpte              746 arch/x86/mm/pageattr.c static int __should_split_large_page(pte_t *kpte, unsigned long address,
kpte              759 arch/x86/mm/pageattr.c 	if (tmp != kpte)
kpte              764 arch/x86/mm/pageattr.c 		old_prot = pmd_pgprot(*(pmd_t *)kpte);
kpte              765 arch/x86/mm/pageattr.c 		old_pfn = pmd_pfn(*(pmd_t *)kpte);
kpte              769 arch/x86/mm/pageattr.c 		old_prot = pud_pgprot(*(pud_t *)kpte);
kpte              770 arch/x86/mm/pageattr.c 		old_pfn = pud_pfn(*(pud_t *)kpte);
kpte              883 arch/x86/mm/pageattr.c 	__set_pmd_pte(kpte, address, new_pte);
kpte              889 arch/x86/mm/pageattr.c static int should_split_large_page(pte_t *kpte, unsigned long address,
kpte              898 arch/x86/mm/pageattr.c 	do_split = __should_split_large_page(kpte, address, cpa);
kpte              941 arch/x86/mm/pageattr.c __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
kpte              956 arch/x86/mm/pageattr.c 	if (tmp != kpte) {
kpte              965 arch/x86/mm/pageattr.c 		ref_prot = pmd_pgprot(*(pmd_t *)kpte);
kpte              971 arch/x86/mm/pageattr.c 		ref_pfn = pmd_pfn(*(pmd_t *)kpte);
kpte              977 arch/x86/mm/pageattr.c 		ref_prot = pud_pgprot(*(pud_t *)kpte);
kpte              978 arch/x86/mm/pageattr.c 		ref_pfn = pud_pfn(*(pud_t *)kpte);
kpte             1019 arch/x86/mm/pageattr.c 	__set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
kpte             1045 arch/x86/mm/pageattr.c static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
kpte             1058 arch/x86/mm/pageattr.c 	if (__split_large_page(cpa, kpte, address, base))
kpte             1495 arch/x86/mm/pageattr.c 	pte_t *kpte, old_pte;
kpte             1499 arch/x86/mm/pageattr.c 	kpte = _lookup_address_cpa(cpa, address, &level);
kpte             1500 arch/x86/mm/pageattr.c 	if (!kpte)
kpte             1503 arch/x86/mm/pageattr.c 	old_pte = *kpte;
kpte             1533 arch/x86/mm/pageattr.c 			set_pte_atomic(kpte, new_pte);
kpte             1544 arch/x86/mm/pageattr.c 	do_split = should_split_large_page(kpte, address, cpa);
kpte             1556 arch/x86/mm/pageattr.c 	err = split_large_page(cpa, kpte, address);