Searched refs:nr_running (Results 1 - 17 of 17) sorted by relevance

/linux-4.1.27/fs/proc/
H A Dloadavg.c23 nr_running(), nr_threads, loadavg_proc_show()
H A Dstat.c173 nr_running(),
/linux-4.1.27/arch/s390/appldata/
H A Dappldata_os.c69 u32 nr_running; /* number of runnable threads */ member in struct:appldata_os_data
107 os_data->nr_running = nr_running(); appldata_get_os_data()
H A Dappldata_base.c573 EXPORT_SYMBOL_GPL(nr_running); variable
/linux-4.1.27/include/uapi/linux/
H A Dcgroupstats.h33 __u64 nr_running; /* Number of tasks running */ member in struct:cgroupstats
/linux-4.1.27/kernel/sched/
H A Ddebug.c212 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); print_cfs_rq()
303 P(nr_running); print_cpu()
H A Dfair.c617 static u64 __sched_period(unsigned long nr_running) __sched_period() argument
622 if (unlikely(nr_running > nr_latency)) { __sched_period()
624 period *= nr_running; __sched_period()
638 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); sched_slice()
1119 unsigned long nr_running; member in struct:numa_stats
1142 ns->nr_running += rq->nr_running; for_each_cpu()
1166 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
1338 if (env->src_stats.nr_running <= env->src_stats.task_capacity && task_numa_compare()
1346 if (imp > env->best_imp && src_rq->nr_running == 1 && task_numa_compare()
1347 dst_rq->nr_running == 1) task_numa_compare()
2303 cfs_rq->nr_running++; account_entity_enqueue()
2316 cfs_rq->nr_running--; account_entity_dequeue()
3093 if (cfs_rq->nr_running == 1) { enqueue_entity()
3380 if (cfs_rq->nr_running > 1) entity_tick()
3719 if (rq->curr == rq->idle && rq->cfs.nr_running)
3915 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) return_cfs_rq_runtime()
4176 if (cfs_rq->nr_running > 1) { hrtick_start_fair()
4192 * current task is from our class and nr_running is low enough
4202 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) hrtick_update()
4217 * The enqueue_task method is called before nr_running is
4258 update_rq_runnable_avg(rq, rq->nr_running);
4267 * The dequeue_task method is called before nr_running is
4378 unsigned long nr_running = ACCESS_ONCE(rq->cfs.h_nr_running); cpu_avg_load_per_task() local
4381 if (nr_running) cpu_avg_load_per_task()
4382 return load_avg / nr_running; cpu_avg_load_per_task()
5018 int scale = cfs_rq->nr_running >= sched_nr_latency; check_preempt_wakeup()
5106 if (!cfs_rq->nr_running) pick_next_task_fair()
5138 * Therefore the 'simple' nr_running test will indeed pick_next_task_fair()
5185 if (!cfs_rq->nr_running) pick_next_task_fair()
5247 if (unlikely(rq->nr_running == 1)) yield_task_fair()
5457 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && task_hot()
5838 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running) __update_blocked_averages_cpu()
5842 update_rq_runnable_avg(rq, rq->nr_running); __update_blocked_averages_cpu()
6284 if (rq->nr_running > 1) for_each_cpu_and()
6370 if (rq->nr_running > rq->nr_numa_running) fbq_classify_rq()
6372 if (rq->nr_running > rq->nr_preferred_running) fbq_classify_rq()
6806 if (rq->nr_running == 1 && wl > env->imbalance && for_each_cpu_and()
6971 if (busiest->nr_running > 1) { load_balance()
6974 * an imbalance but busiest->nr_running <= 1, the group is load_balance()
6979 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); load_balance()
7271 if (pulled_task || this_rq->nr_running > 0) for_each_domain()
7295 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
7329 if (busiest_rq->nr_running <= 1) active_load_balance_cpu_stop()
7715 if (rq->nr_running >= 2) nohz_kick_needed()
H A Dsched.h339 unsigned int nr_running, h_nr_running; member in struct:cfs_rq
561 * nr_running and cpu_load should be in the same cacheline because
564 unsigned int nr_running; member in struct:rq
1308 unsigned prev_nr = rq->nr_running; add_nr_running()
1310 rq->nr_running = prev_nr + count; add_nr_running()
1312 if (prev_nr < 2 && rq->nr_running >= 2) { add_nr_running()
1325 * new value of rq->nr_running is visible on reception add_nr_running()
1336 rq->nr_running -= count; sub_nr_running()
H A Dproc.c17 * The global load average is an exponentially decaying average of nr_running +
24 * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
83 nr_active = this_rq->nr_running; calc_load_fold_active()
H A Dcore.c714 * nr_running update is assumed to be visible sched_can_stop_tick()
717 if (this_rq()->nr_running > 1) sched_can_stop_tick()
2344 * nr_running and nr_context_switches:
2349 unsigned long nr_running(void) nr_running() function
2354 sum += cpu_rq(i)->nr_running; nr_running()
2374 return raw_rq()->nr_running == 1; single_task_running()
2674 rq->nr_running == rq->cfs.h_nr_running)) { pick_next_task()
3204 if (rq->nr_running) idle_cpu()
4345 if (rq->nr_running == 1 && p_rq->nr_running == 1) { yield_to()
5032 if (rq->nr_running == 1) migrate_tasks()
5299 BUG_ON(rq->nr_running != 1); /* the migration thread */ migration_call()
7171 rq->nr_running = 0; for_each_possible_cpu()
H A Drt.c986 BUG_ON(!rq->nr_running); dequeue_top_rt_rq()
/linux-4.1.27/kernel/
H A Dworkqueue.c183 atomic_t nr_running ____cacheline_aligned_in_smp;
778 return !atomic_read(&pool->nr_running); __need_more_worker()
785 * Note that, because unbound workers never contribute to nr_running, this
804 atomic_read(&pool->nr_running) <= 1; keep_working()
870 atomic_inc(&worker->pool->nr_running); wq_worker_waking_up()
919 if (atomic_dec_and_test(&pool->nr_running) && wq_worker_sleeping()
926 * worker_set_flags - set worker flags and adjust nr_running accordingly
930 * Set @flags in @worker->flags and adjust nr_running accordingly.
941 /* If transitioning into NOT_RUNNING, adjust nr_running. */ worker_set_flags()
944 atomic_dec(&pool->nr_running); worker_set_flags()
951 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
955 * Clear @flags in @worker->flags and adjust nr_running accordingly.
970 * If transitioning out of NOT_RUNNING, increment nr_running. Note worker_clr_flags()
976 atomic_inc(&pool->nr_running); worker_clr_flags()
1316 * list_add_tail() or we see zero nr_running to avoid workers lying insert_work()
1614 * Sanity check nr_running. Because wq_unbind_fn() releases worker_enter_idle()
1616 * nr_running, the warning may trigger spuriously. Check iff worker_enter_idle()
1621 atomic_read(&pool->nr_running)); worker_enter_idle()
2055 * false for normal per-cpu workers since nr_running would always
4436 * Sched callbacks are disabled now. Zap nr_running. for_each_cpu_worker_pool()
4437 * After this, nr_running stays zero and need_more_worker() for_each_cpu_worker_pool()
4443 atomic_set(&pool->nr_running, 0); for_each_cpu_worker_pool()
4509 * worker_clr_flags() or adjust nr_running. Atomically for_each_pool_worker()
H A Dcgroup.c4075 stats->nr_running++; cgroupstats_build()
/linux-4.1.27/Documentation/accounting/
H A Dgetdelays.c240 (unsigned long long)c->nr_running, print_cgroupstats()
/linux-4.1.27/drivers/staging/lustre/lnet/lnet/
H A Drouter.c1218 * because kernel counts # active tasks as nr_running lnet_router_checker()
/linux-4.1.27/arch/x86/kernel/
H A Dapm_32.c112 * Check nr_running to detect if we are idle (from
/linux-4.1.27/include/linux/
H A Dsched.h169 extern unsigned long nr_running(void);

Completed in 663 milliseconds