Searched refs:page_mapcount (Results 1 – 25 of 25) sorted by relevance
/linux-4.1.27/mm/ |
D | huge_memory.c | 1105 if (page_mapcount(page) == 1) { in do_huge_pmd_wp_page() 1414 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); in zap_huge_pmd() 1655 BUG_ON(page_mapcount(page_tail) < 0); in __split_huge_page_refcount() 1656 tail_count += page_mapcount(page_tail); in __split_huge_page_refcount() 1673 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1, in __split_huge_page_refcount() 1767 BUG_ON(page_mapcount(page) != 1); in __split_huge_page_map() 1855 if (mapcount != page_mapcount(page)) { in __split_huge_page() 1857 mapcount, page_mapcount(page)); in __split_huge_page() 1872 mapcount, mapcount2, page_mapcount(page)); in __split_huge_page() 2235 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); in __collapse_huge_page_copy()
|
D | mlock.c | 128 if (page_mapcount(page) > 1) in __munlock_isolated_page() 238 if (page_mapcount(page) <= 1 && page_evictable(page)) { in __putback_lru_fast_prepare()
|
D | debug.c | 87 page, atomic_read(&page->_count), page_mapcount(page), in dump_page_badflags()
|
D | swap.c | 134 VM_BUG_ON_PAGE(page_mapcount(page) != 0, page); in put_unrefcounted_compound_page() 214 VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page); in put_refcounted_compound_page()
|
D | migrate.c | 1252 if (page_mapcount(page) > 1 && in do_move_page_to_node_array() 1672 if (page_mapcount(page) != 1 && page_is_file_cache(page) && in migrate_misplaced_page()
|
D | compaction.c | 756 page_count(page) > page_mapcount(page)) in isolate_migratepages_block()
|
D | memory-failure.c | 1046 pfn, page_mapcount(ppage)); in hwpoison_user_mappings()
|
D | mempolicy.c | 547 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) in queue_pages_hugetlb() 933 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { in migrate_page_add()
|
D | rmap.c | 825 .mapcount = page_mapcount(page), in page_referenced()
|
D | shmem.c | 1852 } else if (page_count(page) - page_mapcount(page) > 1) { in shmem_tag_pins() 1912 page_count(page) - page_mapcount(page) != 1) { in shmem_wait_for_pins()
|
D | memory.c | 1128 if (unlikely(page_mapcount(page) < 0)) in zap_pte_range() 3191 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) in do_numa_page()
|
D | hugetlb.c | 972 BUG_ON(page_mapcount(page)); in free_huge_page() 2943 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { in hugetlb_cow()
|
D | ksm.c | 900 if (page_mapcount(page) + 1 + swapped != page_count(page)) { in write_protect_page()
|
D | page_alloc.c | 679 if (unlikely(page_mapcount(page))) in free_pages_check() 959 if (unlikely(page_mapcount(page))) in check_new_page()
|
D | swapfile.c | 890 count = page_mapcount(page); in reuse_swap_page()
|
/linux-4.1.27/arch/xtensa/mm/ |
D | tlb.c | 245 page_mapcount(p)); in check_tlb_entry()
|
/linux-4.1.27/fs/proc/ |
D | page.c | 47 pcount = page_mapcount(ppage); in kpagecount_read()
|
D | task_mmu.c | 463 mapcount = page_mapcount(page); in smaps_account() 1365 int count = page_mapcount(page); in gather_stats()
|
/linux-4.1.27/include/linux/ |
D | swap.h | 526 #define reuse_swap_page(page) (page_mapcount(page) == 1)
|
D | mm.h | 489 static inline int page_mapcount(struct page *page) in page_mapcount() function 525 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); in get_huge_page_tail()
|
/linux-4.1.27/arch/tile/mm/ |
D | homecache.c | 369 BUG_ON(page_mapcount(page) != 0); in homecache_change_page_home()
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
D | vvp_page.c | 385 page_mapcount(vmpage), vmpage->private, in vvp_page_print()
|
/linux-4.1.27/fs/fuse/ |
D | dev.c | 839 if (page_mapcount(page) || in fuse_check_page() 850 …=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->m… in fuse_check_page()
|
/linux-4.1.27/Documentation/vm/ |
D | unevictable-lru.txt | 579 notice this on return (page_mapcount(page) will be 0) and return
|
/linux-4.1.27/Documentation/cgroups/ |
D | memory.txt | 711 | page_mapcount(page) > 1). You must enable Swap Extension (see 2.4) to
|