/linux-4.1.27/include/asm-generic/ |
H A D | memory_model.h | 43 struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
|
/linux-4.1.27/arch/m68k/include/asm/ |
H A D | page_mm.h | 167 pgdat = &pg_data_map[page_to_nid(__p)]; \
|
/linux-4.1.27/mm/ |
H A D | page_isolation.c | 303 nodemask_t src = nodemask_of_node(page_to_nid(page)); alloc_migrate_target() 307 next_node(page_to_nid(page), dst)); alloc_migrate_target()
|
H A D | mm_init.c | 136 BUG_ON(page_to_nid(page) != nid); mminit_verify_page_links()
|
H A D | list_lru.c | 97 int nid = page_to_nid(virt_to_page(item)); list_lru_add() 116 int nid = page_to_nid(virt_to_page(item)); list_lru_del()
|
H A D | page_ext.c | 104 base = NODE_DATA(page_to_nid(page))->node_page_ext; lookup_page_ext() 115 offset = pfn - round_down(node_start_pfn(page_to_nid(page)), lookup_page_ext()
|
H A D | sparse.c | 45 int page_to_nid(const struct page *page) page_to_nid() function 49 EXPORT_SYMBOL(page_to_nid); variable
|
H A D | slub.c | 1070 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); free_debug_processing() 1427 inc_slabs_node(s, page_to_nid(page), page->objects); new_slab() 1526 dec_slabs_node(s, page_to_nid(page), page->objects); discard_slab() 1821 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); deactivate_slab() 1979 n2 = get_node(s, page_to_nid(page)); unfreeze_partials() 2142 if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node)) node_match() 2619 n = get_node(s, page_to_nid(page)); __slab_free() 2934 if (page_to_nid(page) != node) { early_kmem_cache_node_alloc() 4049 node_set(page_to_nid(virt_to_page(track)), l->nodes); add_location() 4080 node_set(page_to_nid(virt_to_page(track)), l->nodes); add_location() 4284 node = page_to_nid(page); for_each_possible_cpu() 4297 node = page_to_nid(page); for_each_possible_cpu()
|
H A D | migrate.c | 978 *result = page_to_nid(newpage); unmap_and_move() 1076 *result = page_to_nid(new_hpage); unmap_and_move_huge_page() 1243 err = page_to_nid(page); do_move_page_to_node_array() 1409 err = page_to_nid(page); do_pages_stat_array()
|
H A D | hugetlb.c | 591 int nid = page_to_nid(page); enqueue_huge_page() 902 h->nr_huge_pages_node[page_to_nid(page)]--; update_and_free_page() 964 int nid = page_to_nid(page); free_huge_page() 1180 int nid = page_to_nid(page); dissolve_free_huge_page() 1264 r_nid = page_to_nid(page); alloc_buddy_huge_page() 1598 prep_new_huge_page(h, page, page_to_nid(page)); gather_bootmem_prealloc() 1683 h->free_huge_pages_node[page_to_nid(page)]--; list_for_each_entry_safe() 3918 int nid = page_to_nid(hpage); dequeue_hwpoisoned_huge_page()
|
H A D | mempolicy.c | 513 nid = page_to_nid(page); queue_pages_pte_range() 542 nid = page_to_nid(page); queue_pages_hugetlb() 826 err = page_to_nid(p); lookup_node() 2254 int curnid = page_to_nid(page); mpol_misplaced()
|
H A D | slob.c | 291 if (node != NUMA_NO_NODE && page_to_nid(sp) != node) list_for_each_entry()
|
H A D | huge_memory.c | 999 vma, address, page_to_nid(page)); do_huge_pmd_wp_page_fallback() 1294 page_nid = page_to_nid(page); do_huge_pmd_numa_page() 2607 node = page_to_nid(page); khugepaged_scan_pmd()
|
H A D | memory-failure.c | 246 drop_slab_node(page_to_nid(p)); shake_page() 1501 int nid = page_to_nid(p); new_page()
|
H A D | slab.c | 1020 int page_node = page_to_nid(virt_to_page(objp)); cache_free_alien() 2557 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); slab_get_obj() 2571 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); slab_put_obj() 3068 nid = page_to_nid(page);
|
H A D | page_alloc.c | 1110 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); move_freepages() 4143 if (page_to_nid(page) != zone_to_nid(zone)) setup_zone_migrate_reserve() 6344 if (!node_online(page_to_nid(page))) is_pageblock_removable_nolock()
|
H A D | ksm.c | 1364 page_to_nid(tree_page) != nid) { unstable_tree_search_insert()
|
H A D | vmalloc.c | 2607 counters[page_to_nid(v->pages[nr])]++; show_numa_info()
|
H A D | memcontrol.c | 602 int nid = page_to_nid(page); mem_cgroup_page_zoneinfo() 617 int nid = page_to_nid(page); soft_limit_tree_from_page()
|
H A D | memory.c | 3195 page_nid = page_to_nid(page); do_numa_page()
|
/linux-4.1.27/include/linux/ |
H A D | mm.h | 757 extern int page_to_nid(const struct page *page); 759 static inline int page_to_nid(const struct page *page) page_to_nid() function 835 return page_to_nid(page); /* XXX */ page_cpupid_xchg_last() 840 return page_to_nid(page); /* XXX */ page_cpupid_last() 880 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; page_zone()
|
H A D | mmzone.h | 1206 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
|
/linux-4.1.27/arch/x86/include/asm/ |
H A D | pgtable.h | 524 * to linux/mm.h:page_to_nid())
|
/linux-4.1.27/fs/proc/ |
H A D | task_mmu.c | 1386 md->node[page_to_nid(page)] += nr_pages; gather_stats() 1405 nid = page_to_nid(page); can_gather_numa_stats()
|
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/ |
H A D | fm10k_main.c | 219 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); fm10k_page_is_reserved()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/ |
H A D | i40e_txrx.c | 1105 (page_to_nid(rx_bi->page) == current_node)) i40e_clean_rx_irq_ps()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_txrx.c | 1630 (page_to_nid(rx_bi->page) == current_node)) i40e_clean_rx_irq_ps()
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbevf/ |
H A D | ixgbevf_main.c | 768 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); ixgbevf_page_is_reserved()
|
/linux-4.1.27/drivers/net/ethernet/intel/igb/ |
H A D | igb_main.c | 6599 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); igb_page_is_reserved()
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/ |
H A D | ixgbe_main.c | 1832 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); ixgbe_page_is_reserved()
|