new_pte 35 arch/arm/mm/pgd.c pte_t *new_pte, *init_pte; new_pte 80 arch/arm/mm/pgd.c new_pte = pte_alloc_map(mm, new_pmd, 0); new_pte 81 arch/arm/mm/pgd.c if (!new_pte) new_pte 97 arch/arm/mm/pgd.c set_pte_ext(new_pte + 0, init_pte[0], 0); new_pte 98 arch/arm/mm/pgd.c set_pte_ext(new_pte + 1, init_pte[1], 0); new_pte 100 arch/arm/mm/pgd.c pte_unmap(new_pte); new_pte 161 arch/mips/kvm/mmu.c pte_t *new_pte; new_pte 165 arch/mips/kvm/mmu.c new_pte = mmu_memory_cache_alloc(cache); new_pte 166 arch/mips/kvm/mmu.c clear_page(new_pte); new_pte 167 arch/mips/kvm/mmu.c pmd_populate_kernel(NULL, pmd, new_pte); new_pte 63 arch/powerpc/include/asm/book3s/64/hugetlb.h pte_t old_pte, pte_t new_pte); new_pte 422 arch/powerpc/include/asm/kvm_book3s_64.h pte_t old_pte, new_pte = __pte(0); new_pte 440 arch/powerpc/include/asm/kvm_book3s_64.h new_pte = pte_mkyoung(old_pte); new_pte 442 arch/powerpc/include/asm/kvm_book3s_64.h new_pte = pte_mkdirty(new_pte); new_pte 444 arch/powerpc/include/asm/kvm_book3s_64.h if (pte_xchg(ptep, old_pte, new_pte)) new_pte 447 arch/powerpc/include/asm/kvm_book3s_64.h return new_pte; new_pte 26 arch/powerpc/mm/book3s64/hash_4k.c unsigned long old_pte, new_pte; new_pte 48 arch/powerpc/mm/book3s64/hash_4k.c new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED; new_pte 50 arch/powerpc/mm/book3s64/hash_4k.c new_pte |= _PAGE_DIRTY; new_pte 51 arch/powerpc/mm/book3s64/hash_4k.c } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); new_pte 57 arch/powerpc/mm/book3s64/hash_4k.c rflags = htab_convert_pte_flags(new_pte); new_pte 119 arch/powerpc/mm/book3s64/hash_4k.c new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; new_pte 120 arch/powerpc/mm/book3s64/hash_4k.c new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE); new_pte 122 arch/powerpc/mm/book3s64/hash_4k.c *ptep = __pte(new_pte & ~H_PAGE_BUSY); new_pte 44 arch/powerpc/mm/book3s64/hash_64k.c unsigned long old_pte, new_pte, subpg_pte; new_pte 66 arch/powerpc/mm/book3s64/hash_64k.c new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED | H_PAGE_COMBO; new_pte 68 arch/powerpc/mm/book3s64/hash_64k.c new_pte |= _PAGE_DIRTY; new_pte 69 arch/powerpc/mm/book3s64/hash_64k.c } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); new_pte 74 arch/powerpc/mm/book3s64/hash_64k.c subpg_pte = new_pte & ~subpg_prot; new_pte 107 arch/powerpc/mm/book3s64/hash_64k.c new_pte &= ~H_PAGE_HASHPTE; new_pte 129 arch/powerpc/mm/book3s64/hash_64k.c *ptep = __pte(new_pte & ~H_PAGE_BUSY); new_pte 216 arch/powerpc/mm/book3s64/hash_64k.c new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot, PTRS_PER_PTE); new_pte 217 arch/powerpc/mm/book3s64/hash_64k.c new_pte |= H_PAGE_HASHPTE; new_pte 219 arch/powerpc/mm/book3s64/hash_64k.c *ptep = __pte(new_pte & ~H_PAGE_BUSY); new_pte 230 arch/powerpc/mm/book3s64/hash_64k.c unsigned long old_pte, new_pte; new_pte 258 arch/powerpc/mm/book3s64/hash_64k.c new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED; new_pte 260 arch/powerpc/mm/book3s64/hash_64k.c new_pte |= _PAGE_DIRTY; new_pte 261 arch/powerpc/mm/book3s64/hash_64k.c } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); new_pte 263 arch/powerpc/mm/book3s64/hash_64k.c rflags = htab_convert_pte_flags(new_pte); new_pte 328 arch/powerpc/mm/book3s64/hash_64k.c new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; new_pte 329 arch/powerpc/mm/book3s64/hash_64k.c new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE); new_pte 331 arch/powerpc/mm/book3s64/hash_64k.c *ptep = __pte(new_pte & ~H_PAGE_BUSY); new_pte 31 arch/powerpc/mm/book3s64/hash_hugetlbpage.c unsigned long old_pte, new_pte; new_pte 66 arch/powerpc/mm/book3s64/hash_hugetlbpage.c new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED; new_pte 68 arch/powerpc/mm/book3s64/hash_hugetlbpage.c new_pte |= _PAGE_DIRTY; new_pte 69 arch/powerpc/mm/book3s64/hash_hugetlbpage.c } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); new_pte 75 arch/powerpc/mm/book3s64/hash_hugetlbpage.c rflags = htab_convert_pte_flags(new_pte); new_pte 106 arch/powerpc/mm/book3s64/hash_hugetlbpage.c new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; new_pte 122 arch/powerpc/mm/book3s64/hash_hugetlbpage.c new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset); new_pte 128 arch/powerpc/mm/book3s64/hash_hugetlbpage.c *ptep = __pte(new_pte & ~H_PAGE_BUSY); new_pte 1045 arch/powerpc/mm/book3s64/radix_pgtable.c unsigned long old_pte, new_pte; new_pte 1051 arch/powerpc/mm/book3s64/radix_pgtable.c new_pte = old_pte | set; new_pte 1053 arch/powerpc/mm/book3s64/radix_pgtable.c __radix_pte_update(ptep, _PAGE_INVALID, new_pte); new_pte 28 arch/unicore32/mm/pgd.c pte_t *new_pte, *init_pte; new_pte 54 arch/unicore32/mm/pgd.c new_pte = pte_alloc_map(mm, new_pmd, 0); new_pte 55 arch/unicore32/mm/pgd.c if (!new_pte) new_pte 60 arch/unicore32/mm/pgd.c set_pte(new_pte, *init_pte); new_pte 62 arch/unicore32/mm/pgd.c pte_unmap(new_pte); new_pte 56 arch/x86/include/asm/pgtable_64.h void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); new_pte 57 arch/x86/include/asm/pgtable_64.h void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); new_pte 133 arch/x86/kvm/paging_tmpl.h pt_element_t orig_pte, pt_element_t new_pte) new_pte 143 arch/x86/kvm/paging_tmpl.h ret = CMPXCHG(&table[index], orig_pte, new_pte); new_pte 166 arch/x86/kvm/paging_tmpl.h ret = CMPXCHG(&table[index], orig_pte, new_pte); new_pte 290 arch/x86/mm/init_64.c static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) new_pte 295 arch/x86/mm/init_64.c set_pte(pte, new_pte); new_pte 304 arch/x86/mm/init_64.c void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) new_pte 309 arch/x86/mm/init_64.c __set_pte_vaddr(pud, vaddr, new_pte); new_pte 312 arch/x86/mm/init_64.c void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) new_pte 316 arch/x86/mm/init_64.c __set_pte_vaddr(pud, vaddr, new_pte); new_pte 203 arch/x86/mm/mem_encrypt.c pte_t new_pte; new_pte 249 arch/x86/mm/mem_encrypt.c new_pte = pfn_pte(pfn, new_prot); new_pte 250 arch/x86/mm/mem_encrypt.c set_pte_atomic(kpte, new_pte); new_pte 751 arch/x86/mm/pageattr.c pte_t new_pte, *tmp; new_pte 882 arch/x86/mm/pageattr.c new_pte = pfn_pte(old_pfn, new_prot); new_pte 883 arch/x86/mm/pageattr.c __set_pmd_pte(kpte, address, new_pte); new_pte 1508 arch/x86/mm/pageattr.c pte_t new_pte; new_pte 1527 arch/x86/mm/pageattr.c new_pte = pfn_pte(pfn, new_prot); new_pte 1532 arch/x86/mm/pageattr.c if (pte_val(old_pte) != pte_val(new_pte)) { new_pte 1533 arch/x86/mm/pageattr.c set_pte_atomic(kpte, new_pte); new_pte 3917 mm/hugetlb.c pte_t new_pte; new_pte 4060 mm/hugetlb.c new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) new_pte 4062 mm/hugetlb.c set_huge_pte_at(mm, haddr, ptep, new_pte); new_pte 121 mm/mremap.c pte_t *old_pte, *new_pte, pte; new_pte 152 mm/mremap.c new_pte = pte_offset_map(new_pmd, new_addr); new_pte 160 mm/mremap.c new_pte++, new_addr += PAGE_SIZE) { new_pte 180 mm/mremap.c set_pte_at(mm, new_addr, new_pte, pte); new_pte 188 mm/mremap.c pte_unmap(new_pte - 1); new_pte 189 virt/kvm/arm/mmu.c static inline void kvm_set_pte(pte_t *ptep, pte_t new_pte) new_pte 191 virt/kvm/arm/mmu.c WRITE_ONCE(*ptep, new_pte); new_pte 1222 virt/kvm/arm/mmu.c phys_addr_t addr, const pte_t *new_pte, new_pte 1292 virt/kvm/arm/mmu.c if (pte_val(old_pte) == pte_val(*new_pte)) new_pte 1301 virt/kvm/arm/mmu.c kvm_set_pte(pte, *new_pte); new_pte 1834 virt/kvm/arm/mmu.c pte_t new_pte = kvm_pfn_pte(pfn, mem_type); new_pte 1837 virt/kvm/arm/mmu.c new_pte = kvm_s2pte_mkwrite(new_pte); new_pte 1842 virt/kvm/arm/mmu.c new_pte = kvm_s2pte_mkexec(new_pte); new_pte 1844 virt/kvm/arm/mmu.c ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);