Lines Matching refs:cpu

44 struct tick_sched *tick_get_tick_sched(int cpu)  in tick_get_tick_sched()  argument
46 return &per_cpu(tick_cpu_sched, cpu); in tick_get_tick_sched()
114 int cpu = smp_processor_id(); in tick_sched_do_timer() local
125 && !tick_nohz_full_cpu(cpu)) in tick_sched_do_timer()
126 tick_do_timer_cpu = cpu; in tick_sched_do_timer()
130 if (tick_do_timer_cpu == cpu) in tick_sched_do_timer()
227 void tick_nohz_full_kick_cpu(int cpu) in tick_nohz_full_kick_cpu() argument
229 if (!tick_nohz_full_cpu(cpu)) in tick_nohz_full_kick_cpu()
232 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); in tick_nohz_full_kick_cpu()
296 unsigned int cpu = (unsigned long)hcpu; in tick_nohz_cpu_down_callback() local
305 if (tick_nohz_full_running && tick_do_timer_cpu == cpu) in tick_nohz_cpu_down_callback()
330 int cpu; in tick_nohz_init() local
358 cpu = smp_processor_id(); in tick_nohz_init()
360 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { in tick_nohz_init()
361 pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu); in tick_nohz_init()
362 cpumask_clear_cpu(cpu, tick_nohz_full_mask); in tick_nohz_init()
368 for_each_cpu(cpu, tick_nohz_full_mask) in tick_nohz_init()
369 context_tracking_cpu_set(cpu); in tick_nohz_init()
440 update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) in update_ts_time_stats() argument
446 if (nr_iowait_cpu(cpu) > 0) in update_ts_time_stats()
490 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) in get_cpu_idle_time_us() argument
492 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in get_cpu_idle_time_us()
500 update_ts_time_stats(cpu, ts, now, last_update_time); in get_cpu_idle_time_us()
503 if (ts->idle_active && !nr_iowait_cpu(cpu)) { in get_cpu_idle_time_us()
531 u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) in get_cpu_iowait_time_us() argument
533 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in get_cpu_iowait_time_us()
541 update_ts_time_stats(cpu, ts, now, last_update_time); in get_cpu_iowait_time_us()
544 if (ts->idle_active && nr_iowait_cpu(cpu) > 0) { in get_cpu_iowait_time_us()
572 ktime_t now, int cpu) in tick_nohz_stop_sched_tick() argument
632 if (cpu == tick_do_timer_cpu) { in tick_nohz_stop_sched_tick()
669 nohz_balance_enter_idle(cpu); in tick_nohz_stop_sched_tick()
717 int cpu = smp_processor_id(); in tick_nohz_full_update_tick() local
719 if (!tick_nohz_full_cpu(cpu)) in tick_nohz_full_update_tick()
726 tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); in tick_nohz_full_update_tick()
732 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) in can_stop_idle_tick() argument
741 if (unlikely(!cpu_online(cpu))) { in can_stop_idle_tick()
742 if (cpu == tick_do_timer_cpu) in can_stop_idle_tick()
755 if (unlikely(local_softirq_pending() && cpu_online(cpu))) { in can_stop_idle_tick()
772 if (tick_do_timer_cpu == cpu) in can_stop_idle_tick()
788 int cpu = smp_processor_id(); in __tick_nohz_idle_enter() local
792 if (can_stop_idle_tick(cpu, ts)) { in __tick_nohz_idle_enter()
797 expires = tick_nohz_stop_sched_tick(ts, now, cpu); in __tick_nohz_idle_enter()
1123 void tick_cancel_sched_timer(int cpu) in tick_cancel_sched_timer() argument
1125 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in tick_cancel_sched_timer()
1141 int cpu; in tick_clock_notify() local
1143 for_each_possible_cpu(cpu) in tick_clock_notify()
1144 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); in tick_clock_notify()