/linux-4.1.27/mm/ |
D | page_isolation.c | 303 nodemask_t src = nodemask_of_node(page_to_nid(page)); in alloc_migrate_target() 307 next_node(page_to_nid(page), dst)); in alloc_migrate_target()
|
D | page_ext.c | 104 base = NODE_DATA(page_to_nid(page))->node_page_ext; in lookup_page_ext() 115 offset = pfn - round_down(node_start_pfn(page_to_nid(page)), in lookup_page_ext()
|
D | mm_init.c | 136 BUG_ON(page_to_nid(page) != nid); in mminit_verify_page_links()
|
D | list_lru.c | 97 int nid = page_to_nid(virt_to_page(item)); in list_lru_add() 116 int nid = page_to_nid(virt_to_page(item)); in list_lru_del()
|
D | migrate.c | 978 *result = page_to_nid(newpage); in unmap_and_move() 1076 *result = page_to_nid(new_hpage); in unmap_and_move_huge_page() 1243 err = page_to_nid(page); in do_move_page_to_node_array() 1409 err = page_to_nid(page); in do_pages_stat_array()
|
D | sparse.c | 45 int page_to_nid(const struct page *page) in page_to_nid() function 49 EXPORT_SYMBOL(page_to_nid);
|
D | slub.c | 1070 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing() 1427 inc_slabs_node(s, page_to_nid(page), page->objects); in new_slab() 1526 dec_slabs_node(s, page_to_nid(page), page->objects); in discard_slab() 1821 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in deactivate_slab() 1979 n2 = get_node(s, page_to_nid(page)); in unfreeze_partials() 2142 if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node)) in node_match() 2619 n = get_node(s, page_to_nid(page)); in __slab_free() 2934 if (page_to_nid(page) != node) { in early_kmem_cache_node_alloc() 4049 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location() 4080 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location() [all …]
|
D | hugetlb.c | 591 int nid = page_to_nid(page); in enqueue_huge_page() 902 h->nr_huge_pages_node[page_to_nid(page)]--; in update_and_free_page() 964 int nid = page_to_nid(page); in free_huge_page() 1180 int nid = page_to_nid(page); in dissolve_free_huge_page() 1264 r_nid = page_to_nid(page); in alloc_buddy_huge_page() 1598 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc() 1683 h->free_huge_pages_node[page_to_nid(page)]--; in try_to_free_low() 3918 int nid = page_to_nid(hpage); in dequeue_hwpoisoned_huge_page()
|
D | memory-failure.c | 246 drop_slab_node(page_to_nid(p)); in shake_page() 1501 int nid = page_to_nid(p); in new_page()
|
D | mempolicy.c | 513 nid = page_to_nid(page); in queue_pages_pte_range() 542 nid = page_to_nid(page); in queue_pages_hugetlb() 826 err = page_to_nid(p); in lookup_node() 2254 int curnid = page_to_nid(page); in mpol_misplaced()
|
D | slob.c | 291 if (node != NUMA_NO_NODE && page_to_nid(sp) != node) in slob_alloc()
|
D | huge_memory.c | 999 vma, address, page_to_nid(page)); in do_huge_pmd_wp_page_fallback() 1294 page_nid = page_to_nid(page); in do_huge_pmd_numa_page() 2607 node = page_to_nid(page); in khugepaged_scan_pmd()
|
D | slab.c | 1020 int page_node = page_to_nid(virt_to_page(objp)); in cache_free_alien() 2557 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); in slab_get_obj() 2571 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); in slab_put_obj() 3068 nid = page_to_nid(page); in fallback_alloc()
|
D | page_alloc.c | 1110 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages() 4143 if (page_to_nid(page) != zone_to_nid(zone)) in setup_zone_migrate_reserve() 6344 if (!node_online(page_to_nid(page))) in is_pageblock_removable_nolock()
|
D | ksm.c | 1364 page_to_nid(tree_page) != nid) { in unstable_tree_search_insert()
|
D | vmalloc.c | 2607 counters[page_to_nid(v->pages[nr])]++; in show_numa_info()
|
D | memcontrol.c | 602 int nid = page_to_nid(page); in mem_cgroup_page_zoneinfo() 617 int nid = page_to_nid(page); in soft_limit_tree_from_page()
|
D | memory.c | 3195 page_nid = page_to_nid(page); in do_numa_page()
|
/linux-4.1.27/include/asm-generic/ |
D | memory_model.h | 43 struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
|
/linux-4.1.27/arch/m68k/include/asm/ |
D | page_mm.h | 167 pgdat = &pg_data_map[page_to_nid(__p)]; \
|
/linux-4.1.27/include/linux/ |
D | mm.h | 757 extern int page_to_nid(const struct page *page); 759 static inline int page_to_nid(const struct page *page) in page_to_nid() function 835 return page_to_nid(page); /* XXX */ in page_cpupid_xchg_last() 840 return page_to_nid(page); /* XXX */ in page_cpupid_last() 880 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; in page_zone()
|
D | mmzone.h | 1206 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
|
/linux-4.1.27/fs/proc/ |
D | task_mmu.c | 1386 md->node[page_to_nid(page)] += nr_pages; in gather_stats() 1405 nid = page_to_nid(page); in can_gather_numa_stats()
|
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_main.c | 219 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); in fm10k_page_is_reserved()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/ |
D | i40e_txrx.c | 1105 (page_to_nid(rx_bi->page) == current_node)) in i40e_clean_rx_irq_ps()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40e/ |
D | i40e_txrx.c | 1630 (page_to_nid(rx_bi->page) == current_node)) in i40e_clean_rx_irq_ps()
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbevf/ |
D | ixgbevf_main.c | 768 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); in ixgbevf_page_is_reserved()
|
/linux-4.1.27/drivers/net/ethernet/intel/igb/ |
D | igb_main.c | 6599 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); in igb_page_is_reserved()
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_main.c | 1832 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); in ixgbe_page_is_reserved()
|