next_cpu 347 arch/parisc/kernel/irq.c static int next_cpu = -1; next_cpu 349 arch/parisc/kernel/irq.c next_cpu++; /* assign to "next" CPU we want this bugger on */ next_cpu 352 arch/parisc/kernel/irq.c while ((next_cpu < nr_cpu_ids) && next_cpu 353 arch/parisc/kernel/irq.c (!per_cpu(cpu_data, next_cpu).txn_addr || next_cpu 354 arch/parisc/kernel/irq.c !cpu_online(next_cpu))) next_cpu 355 arch/parisc/kernel/irq.c next_cpu++; next_cpu 357 arch/parisc/kernel/irq.c if (next_cpu >= nr_cpu_ids) next_cpu 358 arch/parisc/kernel/irq.c next_cpu = 0; /* nothing else, assign monarch */ next_cpu 360 arch/parisc/kernel/irq.c return txn_affinity_addr(virt_irq, next_cpu); next_cpu 49 arch/x86/platform/uv/uv_time.c int next_cpu; next_cpu 169 arch/x86/platform/uv/uv_time.c head->next_cpu = -1; next_cpu 186 arch/x86/platform/uv/uv_time.c head->next_cpu = -1; next_cpu 195 arch/x86/platform/uv/uv_time.c head->next_cpu = bcpu; next_cpu 219 arch/x86/platform/uv/uv_time.c int next_cpu; next_cpu 223 arch/x86/platform/uv/uv_time.c next_cpu = head->next_cpu; next_cpu 227 arch/x86/platform/uv/uv_time.c if (next_cpu < 0 || bcpu == next_cpu || next_cpu 228 arch/x86/platform/uv/uv_time.c expires < head->cpu[next_cpu].expires) { next_cpu 229 arch/x86/platform/uv/uv_time.c head->next_cpu = bcpu; next_cpu 259 arch/x86/platform/uv/uv_time.c if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) next_cpu 265 arch/x86/platform/uv/uv_time.c if (head->next_cpu == bcpu) next_cpu 1389 block/blk-mq.c cpu_online(hctx->next_cpu)) { next_cpu 1427 block/blk-mq.c int next_cpu = hctx->next_cpu; next_cpu 1434 block/blk-mq.c next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, next_cpu 1436 block/blk-mq.c if (next_cpu >= nr_cpu_ids) next_cpu 1437 block/blk-mq.c next_cpu = blk_mq_first_mapped_cpu(hctx); next_cpu 1445 block/blk-mq.c if (!cpu_online(next_cpu)) { next_cpu 1455 block/blk-mq.c hctx->next_cpu = next_cpu; next_cpu 1460 block/blk-mq.c hctx->next_cpu = next_cpu; next_cpu 1461 block/blk-mq.c return next_cpu; next_cpu 2574 block/blk-mq.c hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); next_cpu 1052 drivers/irqchip/irq-gic-v3.c int next_cpu, cpu = *base_cpu; next_cpu 1059 drivers/irqchip/irq-gic-v3.c next_cpu = cpumask_next(cpu, mask); next_cpu 1060 drivers/irqchip/irq-gic-v3.c if (next_cpu >= nr_cpu_ids) next_cpu 1062 drivers/irqchip/irq-gic-v3.c cpu = next_cpu; next_cpu 1356 drivers/net/ethernet/mediatek/mtk_eth_soc.c u32 next_cpu = desc->txd2; next_cpu 1381 drivers/net/ethernet/mediatek/mtk_eth_soc.c cpu = next_cpu; next_cpu 24 include/linux/blk-mq.h int next_cpu; next_cpu 416 kernel/smp.c int cpu, next_cpu, this_cpu = smp_processor_id(); next_cpu 445 kernel/smp.c next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); next_cpu 446 kernel/smp.c if (next_cpu == this_cpu) next_cpu 447 kernel/smp.c next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); next_cpu 450 kernel/smp.c if (next_cpu >= nr_cpu_ids) { next_cpu 192 kernel/time/clocksource.c int next_cpu, reset_pending; next_cpu 293 kernel/time/clocksource.c next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); next_cpu 294 kernel/time/clocksource.c if (next_cpu >= nr_cpu_ids) next_cpu 295 kernel/time/clocksource.c next_cpu = cpumask_first(cpu_online_mask); next_cpu 303 kernel/time/clocksource.c add_timer_on(&watchdog_timer, next_cpu); next_cpu 608 kernel/time/tick-broadcast.c int cpu, next_cpu = 0; next_cpu 637 kernel/time/tick-broadcast.c next_cpu = cpu; next_cpu 674 kernel/time/tick-broadcast.c tick_broadcast_set_event(dev, next_cpu, next_event); next_cpu 3305 kernel/trace/trace.c int next_cpu = -1; next_cpu 3335 kernel/trace/trace.c next_cpu = cpu; next_cpu 3345 kernel/trace/trace.c *ent_cpu = next_cpu; next_cpu 123 kernel/trace/trace_entries.h __field( unsigned int, next_cpu ) \ next_cpu 140 kernel/trace/trace_entries.h __entry->next_cpu), next_cpu 160 kernel/trace/trace_entries.h __entry->next_cpu), next_cpu 273 kernel/trace/trace_hwlat.c int next_cpu; next_cpu 287 kernel/trace/trace_hwlat.c next_cpu = cpumask_next(smp_processor_id(), current_mask); next_cpu 290 kernel/trace/trace_hwlat.c if (next_cpu >= nr_cpu_ids) next_cpu 291 kernel/trace/trace_hwlat.c next_cpu = cpumask_first(current_mask); next_cpu 293 kernel/trace/trace_hwlat.c if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */ next_cpu 297 kernel/trace/trace_hwlat.c cpumask_set_cpu(next_cpu, current_mask); next_cpu 355 kernel/trace/trace_hwlat.c int next_cpu; next_cpu 365 kernel/trace/trace_hwlat.c next_cpu = cpumask_first(current_mask); next_cpu 374 kernel/trace/trace_hwlat.c cpumask_set_cpu(next_cpu, current_mask); next_cpu 912 kernel/trace/trace_output.c field->next_cpu, next_cpu 946 kernel/trace/trace_output.c field->next_cpu, next_cpu 982 kernel/trace/trace_output.c SEQ_PUT_HEX_FIELD(s, field->next_cpu); next_cpu 1013 kernel/trace/trace_output.c SEQ_PUT_FIELD(s, field->next_cpu); next_cpu 396 kernel/trace/trace_sched_wakeup.c entry->next_cpu = task_cpu(next); next_cpu 424 kernel/trace/trace_sched_wakeup.c entry->next_cpu = task_cpu(wakee); next_cpu 3899 net/core/dev.c struct rps_dev_flow *rflow, u16 next_cpu) next_cpu 3901 net/core/dev.c if (next_cpu < nr_cpu_ids) { next_cpu 3914 net/core/dev.c rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); next_cpu 3935 net/core/dev.c per_cpu(softnet_data, next_cpu).input_queue_head; next_cpu 3938 net/core/dev.c rflow->cpu = next_cpu; next_cpu 3986 net/core/dev.c u32 next_cpu; next_cpu 3994 net/core/dev.c next_cpu = ident & rps_cpu_mask; next_cpu 4013 net/core/dev.c if (unlikely(tcpu != next_cpu) && next_cpu 4017 net/core/dev.c tcpu = next_cpu; next_cpu 4018 net/core/dev.c rflow = set_rps_cpu(dev, skb, rflow, next_cpu); next_cpu 163 tools/testing/selftests/bpf/test_lru_map.c int next_cpu = 0; next_cpu 168 tools/testing/selftests/bpf/test_lru_map.c assert(sched_next_online(0, &next_cpu) != -1); next_cpu 254 tools/testing/selftests/bpf/test_lru_map.c int next_cpu = 0; next_cpu 263 tools/testing/selftests/bpf/test_lru_map.c assert(sched_next_online(0, &next_cpu) != -1); next_cpu 331 tools/testing/selftests/bpf/test_lru_map.c int next_cpu = 0; next_cpu 340 tools/testing/selftests/bpf/test_lru_map.c assert(sched_next_online(0, &next_cpu) != -1); next_cpu 438 tools/testing/selftests/bpf/test_lru_map.c int next_cpu = 0; next_cpu 447 tools/testing/selftests/bpf/test_lru_map.c assert(sched_next_online(0, &next_cpu) != -1); next_cpu 501 tools/testing/selftests/bpf/test_lru_map.c int next_cpu = 0; next_cpu 506 tools/testing/selftests/bpf/test_lru_map.c assert(sched_next_online(0, &next_cpu) != -1); next_cpu 576 tools/testing/selftests/bpf/test_lru_map.c int next_cpu = 0; next_cpu 592 tools/testing/selftests/bpf/test_lru_map.c while (sched_next_online(0, &next_cpu) != -1) { next_cpu 625 tools/testing/selftests/bpf/test_lru_map.c int next_cpu = 0; next_cpu 633 tools/testing/selftests/bpf/test_lru_map.c assert(sched_next_online(0, &next_cpu) != -1); next_cpu 691 tools/testing/selftests/bpf/test_lru_map.c int next_cpu = 0; next_cpu 696 tools/testing/selftests/bpf/test_lru_map.c assert(sched_next_online(0, &next_cpu) != -1); next_cpu 787 tools/testing/selftests/bpf/test_lru_map.c int next_cpu = 0; next_cpu 792 tools/testing/selftests/bpf/test_lru_map.c assert(sched_next_online(0, &next_cpu) != -1);