Searched refs:numa_mem_id (Results 1 – 13 of 13) sorted by relevance
139 #ifndef numa_mem_id141 static inline int numa_mem_id(void) in numa_mem_id() function164 #ifndef numa_mem_id166 static inline int numa_mem_id(void) in numa_mem_id() function
440 nid = numa_mem_id(); in alloc_pages_node()
699 struct kmem_cache_node *n = get_node(cachep, numa_mem_id()); in recheck_pfmemalloc_active()754 n = get_node(cachep, numa_mem_id()); in __ac_get_obj()1022 int node = numa_mem_id(); in cache_free_alien()2039 cachep->node[numa_mem_id()]->next_reap = in setup_cpu_cache()2300 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); in check_spinlock_acquired()2327 int node = numa_mem_id(); in do_drain()2768 node = numa_mem_id(); in cache_alloc_refill()2844 node = numa_mem_id(); in cache_alloc_refill()2984 nid_alloc = nid_here = numa_mem_id(); in alternate_node_alloc()3052 page = kmem_getpages(cache, local_flags, numa_mem_id()); in fallback_alloc()[all …]
1694 int node = numa_mem_id(); in mempolicy_slab_node()
1779 searchnode = numa_mem_id(); in get_partial()
190 nid = numa_mem_id(); in devm_memremap_pages()
146 architectures transparently, kernel subsystems can use the numa_mem_id()
219 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); in fm10k_page_is_reserved()
945 const int current_node = numa_mem_id(); in i40e_clean_rx_irq_ps()
1472 const int current_node = numa_mem_id(); in i40e_clean_rx_irq_ps()
751 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); in ixgbevf_page_is_reserved()
6596 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); in igb_page_is_reserved()
1872 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); in ixgbe_page_is_reserved()