/linux-4.1.27/arch/x86/platform/uv/ |
D | uv_time.c | 63 int next_cpu; member 183 head->next_cpu = -1; in uv_rtc_allocate_timers() 200 head->next_cpu = -1; in uv_rtc_find_next_timer() 209 head->next_cpu = bcpu; in uv_rtc_find_next_timer() 233 int next_cpu; in uv_rtc_set_timer() local 237 next_cpu = head->next_cpu; in uv_rtc_set_timer() 241 if (next_cpu < 0 || bcpu == next_cpu || in uv_rtc_set_timer() 242 expires < head->cpu[next_cpu].expires) { in uv_rtc_set_timer() 243 head->next_cpu = bcpu; in uv_rtc_set_timer() 273 if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) in uv_rtc_unset_timer() [all …]
|
/linux-4.1.27/arch/parisc/kernel/ |
D | irq.c | 351 static int next_cpu = -1; in txn_alloc_addr() local 353 next_cpu++; /* assign to "next" CPU we want this bugger on */ in txn_alloc_addr() 356 while ((next_cpu < nr_cpu_ids) && in txn_alloc_addr() 357 (!per_cpu(cpu_data, next_cpu).txn_addr || in txn_alloc_addr() 358 !cpu_online(next_cpu))) in txn_alloc_addr() 359 next_cpu++; in txn_alloc_addr() 361 if (next_cpu >= nr_cpu_ids) in txn_alloc_addr() 362 next_cpu = 0; /* nothing else, assign monarch */ in txn_alloc_addr() 364 return txn_affinity_addr(virt_irq, next_cpu); in txn_alloc_addr()
|
/linux-4.1.27/kernel/ |
D | smp.c | 408 int cpu, next_cpu, this_cpu = smp_processor_id(); in smp_call_function_many() local 429 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); in smp_call_function_many() 430 if (next_cpu == this_cpu) in smp_call_function_many() 431 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); in smp_call_function_many() 434 if (next_cpu >= nr_cpu_ids) { in smp_call_function_many()
|
/linux-4.1.27/kernel/trace/ |
D | trace_entries.h | 121 __field( unsigned int, next_cpu ) \ 138 __entry->next_cpu), 158 __entry->next_cpu),
|
D | trace_output.c | 906 field->next_cpu, in trace_ctxwake_print() 940 field->next_cpu, in trace_ctxwake_raw() 976 SEQ_PUT_HEX_FIELD(s, field->next_cpu); in trace_ctxwake_hex() 1007 SEQ_PUT_FIELD(s, field->next_cpu); in trace_ctxwake_bin()
|
D | trace_sched_wakeup.c | 388 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace() 416 entry->next_cpu = task_cpu(wakee); in tracing_sched_wakeup_trace()
|
D | trace.c | 2317 int next_cpu = -1; in __find_next_entry() local 2347 next_cpu = cpu; in __find_next_entry() 2357 *ent_cpu = next_cpu; in __find_next_entry()
|
/linux-4.1.27/kernel/time/ |
D | clocksource.c | 172 int next_cpu, reset_pending; in clocksource_watchdog() local 269 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); in clocksource_watchdog() 270 if (next_cpu >= nr_cpu_ids) in clocksource_watchdog() 271 next_cpu = cpumask_first(cpu_online_mask); in clocksource_watchdog() 273 add_timer_on(&watchdog_timer, next_cpu); in clocksource_watchdog()
|
D | tick-broadcast.c | 582 int cpu, next_cpu = 0; in tick_handle_oneshot_broadcast() local 603 next_cpu = cpu; in tick_handle_oneshot_broadcast() 644 if (tick_broadcast_set_event(dev, next_cpu, next_event, 0)) in tick_handle_oneshot_broadcast()
|
/linux-4.1.27/block/ |
D | blk-mq.c | 886 int cpu = hctx->next_cpu, next_cpu; in blk_mq_hctx_next_cpu() local 888 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); in blk_mq_hctx_next_cpu() 889 if (next_cpu >= nr_cpu_ids) in blk_mq_hctx_next_cpu() 890 next_cpu = cpumask_first(hctx->cpumask); in blk_mq_hctx_next_cpu() 892 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu() 898 return hctx->next_cpu; in blk_mq_hctx_next_cpu() 1831 hctx->next_cpu = cpumask_first(hctx->cpumask); in blk_mq_map_swqueue()
|
/linux-4.1.27/include/linux/ |
D | blk-mq.h | 31 int next_cpu; member
|
/linux-4.1.27/net/core/ |
D | dev.c | 3074 struct rps_dev_flow *rflow, u16 next_cpu) in set_rps_cpu() argument 3076 if (next_cpu < nr_cpu_ids) { in set_rps_cpu() 3089 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu); in set_rps_cpu() 3110 per_cpu(softnet_data, next_cpu).input_queue_head; in set_rps_cpu() 3113 rflow->cpu = next_cpu; in set_rps_cpu() 3161 u32 next_cpu; in get_rps_cpu() local 3169 next_cpu = ident & rps_cpu_mask; in get_rps_cpu() 3188 if (unlikely(tcpu != next_cpu) && in get_rps_cpu() 3192 tcpu = next_cpu; in get_rps_cpu() 3193 rflow = set_rps_cpu(dev, skb, rflow, next_cpu); in get_rps_cpu()
|
/linux-4.1.27/kernel/sched/ |
D | sched.h | 1187 void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
|
D | fair.c | 4914 migrate_task_rq_fair(struct task_struct *p, int next_cpu) in migrate_task_rq_fair() argument
|