pvmw 215 include/linux/rmap.h static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) pvmw 217 include/linux/rmap.h if (pvmw->pte) pvmw 218 include/linux/rmap.h pte_unmap(pvmw->pte); pvmw 219 include/linux/rmap.h if (pvmw->ptl) pvmw 220 include/linux/rmap.h spin_unlock(pvmw->ptl); pvmw 223 include/linux/rmap.h bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); pvmw 245 include/linux/swapops.h extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, pvmw 248 include/linux/swapops.h extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, pvmw 276 include/linux/swapops.h static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, pvmw 282 include/linux/swapops.h static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, pvmw 158 kernel/events/uprobes.c struct page_vma_mapped_walk pvmw = { pvmw 182 kernel/events/uprobes.c if (!page_vma_mapped_walk(&pvmw)) { pvmw 187 kernel/events/uprobes.c VM_BUG_ON_PAGE(addr != pvmw.address, old_page); pvmw 203 kernel/events/uprobes.c flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); pvmw 204 kernel/events/uprobes.c ptep_clear_flush_notify(vma, addr, pvmw.pte); pvmw 206 kernel/events/uprobes.c set_pte_at_notify(mm, addr, pvmw.pte, pvmw 212 kernel/events/uprobes.c page_vma_mapped_walk_done(&pvmw); pvmw 3020 mm/huge_memory.c void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, pvmw 3023 mm/huge_memory.c struct vm_area_struct *vma = pvmw->vma; pvmw 3025 mm/huge_memory.c unsigned long address = pvmw->address; pvmw 3030 mm/huge_memory.c if (!(pvmw->pmd && !pvmw->pte)) pvmw 3034 mm/huge_memory.c pmdval = pmdp_invalidate(vma, address, pvmw->pmd); pvmw 3041 mm/huge_memory.c set_pmd_at(mm, address, pvmw->pmd, pmdswp); pvmw 3046 mm/huge_memory.c void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) pvmw 3048 mm/huge_memory.c struct vm_area_struct *vma = pvmw->vma; pvmw 3050 mm/huge_memory.c unsigned long address = pvmw->address; pvmw 3055 mm/huge_memory.c if (!(pvmw->pmd && !pvmw->pte)) pvmw 3058 mm/huge_memory.c entry = pmd_to_swp_entry(*pvmw->pmd); pvmw 3061 mm/huge_memory.c if (pmd_swp_soft_dirty(*pvmw->pmd)) pvmw 3071 mm/huge_memory.c set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); pvmw 3074 mm/huge_memory.c update_mmu_cache_pmd(vma, address, pvmw->pmd); pvmw 1036 mm/ksm.c struct page_vma_mapped_walk pvmw = { pvmw 1044 mm/ksm.c pvmw.address = page_address_in_vma(page, vma); pvmw 1045 mm/ksm.c if (pvmw.address == -EFAULT) pvmw 1051 mm/ksm.c pvmw.address, pvmw 1052 mm/ksm.c pvmw.address + PAGE_SIZE); pvmw 1055 mm/ksm.c if (!page_vma_mapped_walk(&pvmw)) pvmw 1057 mm/ksm.c if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) pvmw 1060 mm/ksm.c if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || pvmw 1061 mm/ksm.c (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) || pvmw 1066 mm/ksm.c flush_cache_page(vma, pvmw.address, page_to_pfn(page)); pvmw 1081 mm/ksm.c entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); pvmw 1087 mm/ksm.c set_pte_at(mm, pvmw.address, pvmw.pte, entry); pvmw 1097 mm/ksm.c set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); pvmw 1099 mm/ksm.c *orig_pte = *pvmw.pte; pvmw 1103 mm/ksm.c page_vma_mapped_walk_done(&pvmw); pvmw 207 mm/migrate.c struct page_vma_mapped_walk pvmw = { pvmw 218 mm/migrate.c while (page_vma_mapped_walk(&pvmw)) { pvmw 222 mm/migrate.c new = page - pvmw.page->index + pvmw 223 mm/migrate.c linear_page_index(vma, pvmw.address); pvmw 227 mm/migrate.c if (!pvmw.pte) { pvmw 229 mm/migrate.c remove_migration_pmd(&pvmw, new); pvmw 236 mm/migrate.c if (pte_swp_soft_dirty(*pvmw.pte)) pvmw 242 mm/migrate.c entry = pte_to_swp_entry(*pvmw.pte); pvmw 257 mm/migrate.c set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); pvmw 259 mm/migrate.c hugepage_add_anon_rmap(new, vma, pvmw.address); pvmw 265 mm/migrate.c set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); pvmw 268 mm/migrate.c page_add_anon_rmap(new, vma, pvmw.address, false); pvmw 279 mm/migrate.c update_mmu_cache(vma, pvmw.address, pvmw.pte); pvmw 58 mm/page_idle.c struct page_vma_mapped_walk pvmw = { pvmw 65 mm/page_idle.c while (page_vma_mapped_walk(&pvmw)) { pvmw 66 mm/page_idle.c addr = pvmw.address; pvmw 67 mm/page_idle.c if (pvmw.pte) { pvmw 72 mm/page_idle.c if (ptep_clear_young_notify(vma, addr, pvmw.pte)) pvmw 75 mm/page_idle.c if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) pvmw 10 mm/page_vma_mapped.c static inline bool not_found(struct page_vma_mapped_walk *pvmw) pvmw 12 mm/page_vma_mapped.c page_vma_mapped_walk_done(pvmw); pvmw 16 mm/page_vma_mapped.c static bool map_pte(struct page_vma_mapped_walk *pvmw) pvmw 18 mm/page_vma_mapped.c pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); pvmw 19 mm/page_vma_mapped.c if (!(pvmw->flags & PVMW_SYNC)) { pvmw 20 mm/page_vma_mapped.c if (pvmw->flags & PVMW_MIGRATION) { pvmw 21 mm/page_vma_mapped.c if (!is_swap_pte(*pvmw->pte)) pvmw 39 mm/page_vma_mapped.c if (is_swap_pte(*pvmw->pte)) { pvmw 43 mm/page_vma_mapped.c entry = pte_to_swp_entry(*pvmw->pte); pvmw 46 mm/page_vma_mapped.c } else if (!pte_present(*pvmw->pte)) pvmw 50 mm/page_vma_mapped.c pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); pvmw 51 mm/page_vma_mapped.c spin_lock(pvmw->ptl); pvmw 81 mm/page_vma_mapped.c static bool check_pte(struct page_vma_mapped_walk *pvmw) pvmw 85 mm/page_vma_mapped.c if (pvmw->flags & PVMW_MIGRATION) { pvmw 87 mm/page_vma_mapped.c if (!is_swap_pte(*pvmw->pte)) pvmw 89 mm/page_vma_mapped.c entry = pte_to_swp_entry(*pvmw->pte); pvmw 95 mm/page_vma_mapped.c } else if (is_swap_pte(*pvmw->pte)) { pvmw 99 mm/page_vma_mapped.c entry = pte_to_swp_entry(*pvmw->pte); pvmw 105 mm/page_vma_mapped.c if (!pte_present(*pvmw->pte)) pvmw 108 mm/page_vma_mapped.c pfn = pte_pfn(*pvmw->pte); pvmw 111 mm/page_vma_mapped.c return pfn_in_hpage(pvmw->page, pfn); pvmw 138 mm/page_vma_mapped.c bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) pvmw 140 mm/page_vma_mapped.c struct mm_struct *mm = pvmw->vma->vm_mm; pvmw 141 mm/page_vma_mapped.c struct page *page = pvmw->page; pvmw 148 mm/page_vma_mapped.c if (pvmw->pmd && !pvmw->pte) pvmw 149 mm/page_vma_mapped.c return not_found(pvmw); pvmw 151 mm/page_vma_mapped.c if (pvmw->pte) pvmw 154 mm/page_vma_mapped.c if (unlikely(PageHuge(pvmw->page))) { pvmw 156 mm/page_vma_mapped.c pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page)); pvmw 157 mm/page_vma_mapped.c if (!pvmw->pte) pvmw 160 mm/page_vma_mapped.c pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte); pvmw 161 mm/page_vma_mapped.c spin_lock(pvmw->ptl); pvmw 162 mm/page_vma_mapped.c if (!check_pte(pvmw)) pvmw 163 mm/page_vma_mapped.c return not_found(pvmw); pvmw 167 mm/page_vma_mapped.c pgd = pgd_offset(mm, pvmw->address); pvmw 170 mm/page_vma_mapped.c p4d = p4d_offset(pgd, pvmw->address); pvmw 173 mm/page_vma_mapped.c pud = pud_offset(p4d, pvmw->address); pvmw 176 mm/page_vma_mapped.c pvmw->pmd = pmd_offset(pud, pvmw->address); pvmw 182 mm/page_vma_mapped.c pmde = READ_ONCE(*pvmw->pmd); pvmw 184 mm/page_vma_mapped.c pvmw->ptl = pmd_lock(mm, pvmw->pmd); pvmw 185 mm/page_vma_mapped.c if (likely(pmd_trans_huge(*pvmw->pmd))) { pvmw 186 mm/page_vma_mapped.c if (pvmw->flags & PVMW_MIGRATION) pvmw 187 mm/page_vma_mapped.c return not_found(pvmw); pvmw 188 mm/page_vma_mapped.c if (pmd_page(*pvmw->pmd) != page) pvmw 189 mm/page_vma_mapped.c return not_found(pvmw); pvmw 191 mm/page_vma_mapped.c } else if (!pmd_present(*pvmw->pmd)) { pvmw 193 mm/page_vma_mapped.c if (!(pvmw->flags & PVMW_MIGRATION)) pvmw 194 mm/page_vma_mapped.c return not_found(pvmw); pvmw 195 mm/page_vma_mapped.c if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) { pvmw 196 mm/page_vma_mapped.c swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd); pvmw 199 mm/page_vma_mapped.c return not_found(pvmw); pvmw 203 mm/page_vma_mapped.c return not_found(pvmw); pvmw 206 mm/page_vma_mapped.c spin_unlock(pvmw->ptl); pvmw 207 mm/page_vma_mapped.c pvmw->ptl = NULL; pvmw 212 mm/page_vma_mapped.c if (!map_pte(pvmw)) pvmw 215 mm/page_vma_mapped.c if (check_pte(pvmw)) pvmw 219 mm/page_vma_mapped.c if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page)) pvmw 220 mm/page_vma_mapped.c return not_found(pvmw); pvmw 222 mm/page_vma_mapped.c pvmw->address += PAGE_SIZE; pvmw 223 mm/page_vma_mapped.c if (pvmw->address >= pvmw->vma->vm_end || pvmw 224 mm/page_vma_mapped.c pvmw->address >= pvmw 225 mm/page_vma_mapped.c __vma_address(pvmw->page, pvmw->vma) + pvmw 226 mm/page_vma_mapped.c hpage_nr_pages(pvmw->page) * PAGE_SIZE) pvmw 227 mm/page_vma_mapped.c return not_found(pvmw); pvmw 229 mm/page_vma_mapped.c if (pvmw->address % PMD_SIZE == 0) { pvmw 230 mm/page_vma_mapped.c pte_unmap(pvmw->pte); pvmw 231 mm/page_vma_mapped.c if (pvmw->ptl) { pvmw 232 mm/page_vma_mapped.c spin_unlock(pvmw->ptl); pvmw 233 mm/page_vma_mapped.c pvmw->ptl = NULL; pvmw 237 mm/page_vma_mapped.c pvmw->pte++; pvmw 239 mm/page_vma_mapped.c } while (pte_none(*pvmw->pte)); pvmw 241 mm/page_vma_mapped.c if (!pvmw->ptl) { pvmw 242 mm/page_vma_mapped.c pvmw->ptl = pte_lockptr(mm, pvmw->pmd); pvmw 243 mm/page_vma_mapped.c spin_lock(pvmw->ptl); pvmw 259 mm/page_vma_mapped.c struct page_vma_mapped_walk pvmw = { pvmw 271 mm/page_vma_mapped.c pvmw.address = max(start, vma->vm_start); pvmw 272 mm/page_vma_mapped.c if (!page_vma_mapped_walk(&pvmw)) pvmw 274 mm/page_vma_mapped.c page_vma_mapped_walk_done(&pvmw); pvmw 758 mm/rmap.c struct page_vma_mapped_walk pvmw = { pvmw 765 mm/rmap.c while (page_vma_mapped_walk(&pvmw)) { pvmw 766 mm/rmap.c address = pvmw.address; pvmw 769 mm/rmap.c page_vma_mapped_walk_done(&pvmw); pvmw 774 mm/rmap.c if (pvmw.pte) { pvmw 776 mm/rmap.c pvmw.pte)) { pvmw 790 mm/rmap.c pvmw.pmd)) pvmw 887 mm/rmap.c struct page_vma_mapped_walk pvmw = { pvmw 905 mm/rmap.c while (page_vma_mapped_walk(&pvmw)) { pvmw 908 mm/rmap.c address = pvmw.address; pvmw 909 mm/rmap.c if (pvmw.pte) { pvmw 911 mm/rmap.c pte_t *pte = pvmw.pte; pvmw 924 mm/rmap.c pmd_t *pmd = pvmw.pmd; pvmw 1345 mm/rmap.c struct page_vma_mapped_walk pvmw = { pvmw 1390 mm/rmap.c while (page_vma_mapped_walk(&pvmw)) { pvmw 1393 mm/rmap.c if (!pvmw.pte && (flags & TTU_MIGRATION)) { pvmw 1396 mm/rmap.c set_pmd_migration_entry(&pvmw, page); pvmw 1417 mm/rmap.c page_vma_mapped_walk_done(&pvmw); pvmw 1425 mm/rmap.c VM_BUG_ON_PAGE(!pvmw.pte, page); pvmw 1427 mm/rmap.c subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); pvmw 1428 mm/rmap.c address = pvmw.address; pvmw 1431 mm/rmap.c if (huge_pmd_unshare(mm, &address, pvmw.pte)) { pvmw 1453 mm/rmap.c page_vma_mapped_walk_done(&pvmw); pvmw 1464 mm/rmap.c pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte); pvmw 1475 mm/rmap.c set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); pvmw 1493 mm/rmap.c pvmw.pte)) { pvmw 1495 mm/rmap.c page_vma_mapped_walk_done(&pvmw); pvmw 1501 mm/rmap.c flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); pvmw 1511 mm/rmap.c pteval = ptep_get_and_clear(mm, address, pvmw.pte); pvmw 1515 mm/rmap.c pteval = ptep_clear_flush(vma, address, pvmw.pte); pvmw 1530 mm/rmap.c pvmw.pte, pteval, pvmw 1534 mm/rmap.c set_pte_at(mm, address, pvmw.pte, pteval); pvmw 1558 mm/rmap.c set_pte_at(mm, address, pvmw.pte, pteval); pvmw 1560 mm/rmap.c page_vma_mapped_walk_done(&pvmw); pvmw 1574 mm/rmap.c set_pte_at(mm, address, pvmw.pte, swp_pte); pvmw 1592 mm/rmap.c page_vma_mapped_walk_done(&pvmw); pvmw 1610 mm/rmap.c set_pte_at(mm, address, pvmw.pte, pteval); pvmw 1613 mm/rmap.c page_vma_mapped_walk_done(&pvmw); pvmw 1618 mm/rmap.c set_pte_at(mm, address, pvmw.pte, pteval); pvmw 1620 mm/rmap.c page_vma_mapped_walk_done(&pvmw); pvmw 1624 mm/rmap.c set_pte_at(mm, address, pvmw.pte, pteval); pvmw 1626 mm/rmap.c page_vma_mapped_walk_done(&pvmw); pvmw 1640 mm/rmap.c set_pte_at(mm, address, pvmw.pte, swp_pte);