/linux-4.4.14/include/asm-generic/ |
D | memory_model.h | 43 struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
|
/linux-4.4.14/mm/ |
D | page_ext.c | 108 base = NODE_DATA(page_to_nid(page))->node_page_ext; in lookup_page_ext() 119 offset = pfn - round_down(node_start_pfn(page_to_nid(page)), in lookup_page_ext()
|
D | list_lru.c | 111 int nid = page_to_nid(virt_to_page(item)); in list_lru_add() 130 int nid = page_to_nid(virt_to_page(item)); in list_lru_del()
|
D | page_isolation.c | 286 int node = next_online_node(page_to_nid(page)); in alloc_migrate_target()
|
D | migrate.c | 997 *result = page_to_nid(newpage); in unmap_and_move() 1102 *result = page_to_nid(new_hpage); in unmap_and_move_huge_page() 1268 err = page_to_nid(page); in do_move_page_to_node_array() 1430 err = page ? page_to_nid(page) : -ENOENT; in do_pages_stat_array()
|
D | sparse.c | 45 int page_to_nid(const struct page *page) in page_to_nid() function 49 EXPORT_SYMBOL(page_to_nid);
|
D | slub.c | 1074 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing() 1486 inc_slabs_node(s, page_to_nid(page), page->objects); in allocate_slab() 1569 dec_slabs_node(s, page_to_nid(page), page->objects); in discard_slab() 1864 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in deactivate_slab() 2022 n2 = get_node(s, page_to_nid(page)); in unfreeze_partials() 2185 if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node)) in node_match() 2678 n = get_node(s, page_to_nid(page)); in __slab_free() 3148 if (page_to_nid(page) != node) { in early_kmem_cache_node_alloc() 4264 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location() 4295 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location() [all …]
|
D | hugetlb.c | 840 int nid = page_to_nid(page); in enqueue_huge_page() 1150 h->nr_huge_pages_node[page_to_nid(page)]--; in update_and_free_page() 1211 int nid = page_to_nid(page); in free_huge_page() 1420 int nid = page_to_nid(page); in dissolve_free_huge_page() 1576 r_nid = page_to_nid(page); in __alloc_buddy_huge_page() 2012 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc() 2097 h->free_huge_pages_node[page_to_nid(page)]--; in try_to_free_low() 4390 int nid = page_to_nid(hpage); in dequeue_hwpoisoned_huge_page()
|
D | memory-failure.c | 237 drop_slab_node(page_to_nid(p)); in shake_page() 1516 int nid = page_to_nid(p); in new_page()
|
D | mempolicy.c | 513 nid = page_to_nid(page); in queue_pages_pte_range() 542 nid = page_to_nid(page); in queue_pages_hugetlb() 823 err = page_to_nid(p); in lookup_node() 2251 int curnid = page_to_nid(page); in mpol_misplaced()
|
D | slob.c | 291 if (node != NUMA_NO_NODE && page_to_nid(sp) != node) in slob_alloc()
|
D | huge_memory.c | 1068 vma, address, page_to_nid(page)); in do_huge_pmd_wp_page_fallback() 1363 page_nid = page_to_nid(page); in do_huge_pmd_numa_page() 2685 node = page_to_nid(page); in khugepaged_scan_pmd()
|
D | slab.c | 1021 int page_node = page_to_nid(virt_to_page(objp)); in cache_free_alien() 2548 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); in slab_get_obj() 2562 WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); in slab_put_obj() 3059 nid = page_to_nid(page); in fallback_alloc()
|
D | ksm.c | 1383 page_to_nid(tree_page) != nid) { in unstable_tree_search_insert()
|
D | memcontrol.c | 477 int nid = page_to_nid(page); in mem_cgroup_page_zoneinfo() 492 int nid = page_to_nid(page); in soft_limit_tree_from_page()
|
D | vmalloc.c | 2608 counters[page_to_nid(v->pages[nr])]++; in show_numa_info()
|
D | page_alloc.c | 1510 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages() 6611 if (!node_online(page_to_nid(page))) in is_pageblock_removable_nolock()
|
D | memory.c | 3253 page_nid = page_to_nid(page); in do_numa_page()
|
/linux-4.4.14/arch/m68k/include/asm/ |
D | page_mm.h | 167 pgdat = &pg_data_map[page_to_nid(__p)]; \
|
/linux-4.4.14/include/linux/ |
D | mm.h | 719 extern int page_to_nid(const struct page *page); 721 static inline int page_to_nid(const struct page *page) in page_to_nid() function 797 return page_to_nid(page); /* XXX */ in page_cpupid_xchg_last() 802 return page_to_nid(page); /* XXX */ in page_cpupid_last() 842 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; in page_zone()
|
D | mmzone.h | 1151 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
|
/linux-4.4.14/fs/proc/ |
D | task_mmu.c | 1412 md->node[page_to_nid(page)] += nr_pages; in gather_stats() 1431 nid = page_to_nid(page); in can_gather_numa_stats() 1456 nid = page_to_nid(page); in can_gather_numa_stats_pmd()
|
/linux-4.4.14/drivers/net/ethernet/hisilicon/hns/ |
D | hns_enet.c | 320 if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) { in hns_nic_reuse_page() 365 if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) in hns_nic_poll_rx_skb()
|
/linux-4.4.14/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_main.c | 219 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); in fm10k_page_is_reserved()
|
/linux-4.4.14/drivers/net/ethernet/intel/i40evf/ |
D | i40e_txrx.c | 1045 (page_to_nid(rx_bi->page) == current_node)) in i40e_clean_rx_irq_ps()
|
/linux-4.4.14/drivers/net/ethernet/intel/i40e/ |
D | i40e_txrx.c | 1580 (page_to_nid(rx_bi->page) == current_node)) in i40e_clean_rx_irq_ps()
|
/linux-4.4.14/drivers/net/ethernet/intel/ixgbevf/ |
D | ixgbevf_main.c | 751 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); in ixgbevf_page_is_reserved()
|
/linux-4.4.14/drivers/net/ethernet/intel/igb/ |
D | igb_main.c | 6596 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); in igb_page_is_reserved()
|
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_main.c | 1872 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); in ixgbe_page_is_reserved()
|