/linux-4.1.27/include/linux/ |
D | topology.h | 73 #ifndef numa_node_id 75 static inline int numa_node_id(void) in numa_node_id() function 105 #ifndef numa_node_id 106 static inline int numa_node_id(void) in numa_node_id() function 128 _node_numa_mem_[numa_node_id()] = node; in set_numa_mem() 168 return numa_node_id(); in numa_mem_id()
|
D | gfp.h | 308 nid = numa_node_id(); in alloc_pages_node() 333 alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) 336 alloc_pages_node(numa_node_id(), gfp_mask, order) 344 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
|
/linux-4.1.27/arch/x86/include/asm/ |
D | topology.h | 100 static inline int numa_node_id(void) in numa_node_id() function 107 #define numa_node_id numa_node_id macro
|
/linux-4.1.27/arch/sh/kernel/ |
D | topology.c | 76 if (i != numa_node_id()) in topology_init()
|
/linux-4.1.27/arch/metag/kernel/ |
D | topology.c | 72 if (i != numa_node_id()) in topology_init()
|
/linux-4.1.27/mm/ |
D | quicklist.c | 29 int node = numa_node_id(); in max_pages()
|
D | mempolicy.c | 134 node = numa_node_id(); in get_task_policy() 354 current->il_next = numa_node_id(); in mpol_rebind_nodemask() 1746 return numa_node_id(); in offset_il_node() 1822 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); in huge_zonelist() 1858 nid = numa_node_id(); in init_nodemask_of_mempolicy() 2059 policy_zonelist(gfp, pol, numa_node_id()), in alloc_pages_current() 2279 polnid = numa_node_id(); in mpol_misplaced() 2294 node_zonelist(numa_node_id(), GFP_HIGHUSER), in mpol_misplaced()
|
D | vmstat.c | 503 if (zone_to_nid(zone) == numa_node_id()) { in refresh_cpu_vm_stats() 589 preferred_zone->node : numa_node_id())) in zone_statistics()
|
D | vmscan.c | 3490 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); in shrink_all_memory() 3739 if (node_state(node_id, N_CPU) && node_id != numa_node_id()) in zone_reclaim()
|
D | vmalloc.c | 825 node = numa_node_id(); in new_vmap_block()
|
D | page_alloc.c | 2740 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); in __alloc_pages_slowpath() 3142 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); in nr_free_zone_pages()
|
D | huge_memory.c | 1266 int page_nid = -1, this_nid = numa_node_id(); in do_huge_pmd_numa_page()
|
D | memory.c | 3123 if (page_nid == numa_node_id()) { in numa_migrate_prep()
|
D | memcontrol.c | 1680 node = numa_node_id(); in mem_cgroup_select_victim_node()
|
/linux-4.1.27/arch/sh/kernel/cpu/irq/ |
D | ipr.c | 69 res = irq_alloc_desc_at(p->irq, numa_node_id()); in register_ipr_controller()
|
/linux-4.1.27/arch/sh/boards/mach-se/7724/ |
D | irq.c | 127 SE7724_FPGA_IRQ_NR, numa_node_id()); in init_se7724_IRQ()
|
/linux-4.1.27/arch/arm/mach-imx/ |
D | avic.c | 178 irq_base = irq_alloc_descs(-1, 0, AVIC_NUM_IRQS, numa_node_id()); in mxc_init_irq()
|
D | 3ds_debugboard.c | 189 irq_base = irq_alloc_descs(-1, 0, MXC_MAX_EXP_IO_LINES, numa_node_id()); in mxc_expio_init()
|
D | tzic.c | 184 irq_base = irq_alloc_descs(-1, 0, TZIC_NUM_IRQS, numa_node_id()); in tzic_init_irq()
|
D | mach-mx31ads.c | 232 irq_base = irq_alloc_descs(-1, 0, MXC_MAX_EXP_IO_LINES, numa_node_id()); in mx31ads_init_expio()
|
/linux-4.1.27/arch/ia64/sn/kernel/ |
D | setup.c | 682 cnodeid_to_nasid(numa_node_id() == in sn_cpu_init() 683 num_online_nodes() - 1 ? 0 : numa_node_id() + 1); in sn_cpu_init()
|
/linux-4.1.27/arch/ia64/kernel/ |
D | uncached.c | 195 starting_nid = numa_node_id(); in uncached_alloc_page()
|
/linux-4.1.27/drivers/irqchip/ |
D | irq-clps711x.c | 185 err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id()); in _clps711x_intc_init()
|
D | irq-hip04.c | 401 irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id()); in hip04_of_init()
|
D | irq-gic.c | 976 numa_node_id()); in gic_init_bases()
|
/linux-4.1.27/drivers/sh/intc/ |
D | virq.c | 222 irq = irq_alloc_desc(numa_node_id()); in intc_subgroup_map()
|
/linux-4.1.27/drivers/char/ |
D | mspec.c | 209 maddr = uncached_alloc_page(numa_node_id(), 1); in mspec_fault()
|
/linux-4.1.27/drivers/gpio/ |
D | gpio-mxs.c | 308 irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id()); in mxs_gpio_probe()
|
D | gpio-mxc.c | 465 irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id()); in mxc_gpio_probe()
|
/linux-4.1.27/drivers/dma/ipu/ |
D | ipu_irq.c | 358 numa_node_id()); in ipu_irq_attach_irq()
|
/linux-4.1.27/arch/sparc/mm/ |
D | tsb.c | 360 gfp_flags, numa_node_id()); in tsb_grow()
|
/linux-4.1.27/net/core/ |
D | dev.c | 2004 int cpu, numa_node_id = -2; in netif_set_xps_queue() local 2048 if (numa_node_id == -2) in netif_set_xps_queue() 2049 numa_node_id = cpu_to_node(cpu); in netif_set_xps_queue() 2050 else if (numa_node_id != cpu_to_node(cpu)) in netif_set_xps_queue() 2051 numa_node_id = -1; in netif_set_xps_queue() 2081 (numa_node_id >= 0) ? numa_node_id : in netif_set_xps_queue()
|
D | pktgen.c | 2682 int node = numa_node_id(); in pktgen_finalize_skb() 2732 int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id(); in pktgen_alloc_skb()
|
/linux-4.1.27/Documentation/vm/ |
D | numa | 135 numa_node_id() or CPU_to_node() functions and then request memory from only
|
/linux-4.1.27/arch/sparc/kernel/ |
D | irq_64.c | 245 irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL); in irq_alloc()
|
/linux-4.1.27/net/openvswitch/ |
D | flow.c | 72 int node = numa_node_id(); in ovs_flow_stats_update()
|
/linux-4.1.27/drivers/pci/ |
D | pci-driver.c | 350 if (node >= 0 && node != numa_node_id()) { in pci_call_probe()
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
D | qib_init.c | 127 local_node_id = numa_node_id(); in qib_create_ctxts()
|
D | qib_file_ops.c | 1322 numa_node_id()) : dd->assigned_node_id; in setup_ctxt()
|
/linux-4.1.27/arch/ia64/hp/common/ |
D | sba_iommu.c | 1147 node = numa_node_id(); in sba_alloc_coherent()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/ |
D | i40e_txrx.c | 1006 const int current_node = numa_node_id(); in i40e_clean_rx_irq_ps()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40e/ |
D | i40e_txrx.c | 1523 const int current_node = numa_node_id(); in i40e_clean_rx_irq_ps()
|