nr_tasks 1291 block/blk-cgroup.c if (ioc && atomic_read(&ioc->nr_tasks) > 1) nr_tasks 208 block/blk-ioc.c atomic_dec(&ioc->nr_tasks); nr_tasks 262 block/blk-ioc.c atomic_set(&ioc->nr_tasks, 1); nr_tasks 213 include/linux/cgroup-defs.h int nr_tasks; nr_tasks 102 include/linux/iocontext.h atomic_t nr_tasks; nr_tasks 142 include/linux/iocontext.h WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0); nr_tasks 143 include/linux/iocontext.h atomic_inc(&ioc->nr_tasks); nr_tasks 95 kernel/cgroup/cgroup-internal.h int nr_tasks; nr_tasks 610 kernel/cgroup/cgroup.c count += link->cset->nr_tasks; nr_tasks 1936 kernel/cgroup/cgroup.c cset->nr_tasks++; nr_tasks 2383 kernel/cgroup/cgroup.c mgctx->tset.nr_tasks++; nr_tasks 2474 kernel/cgroup/cgroup.c if (tset->nr_tasks) { nr_tasks 2499 kernel/cgroup/cgroup.c to_cset->nr_tasks++; nr_tasks 2501 kernel/cgroup/cgroup.c from_cset->nr_tasks--; nr_tasks 2521 kernel/cgroup/cgroup.c if (tset->nr_tasks) { nr_tasks 2534 kernel/cgroup/cgroup.c if (tset->nr_tasks) { nr_tasks 2558 kernel/cgroup/cgroup.c tset->nr_tasks = 0; nr_tasks 6049 kernel/cgroup/cgroup.c cset->nr_tasks++; nr_tasks 6120 kernel/cgroup/cgroup.c cset->nr_tasks--; nr_tasks 56 kernel/cgroup/debug.c if (refcnt > cset->nr_tasks) nr_tasks 57 kernel/cgroup/debug.c seq_printf(seq, " +%d", refcnt - cset->nr_tasks); nr_tasks 149 kernel/cgroup/debug.c if (refcnt - cset->nr_tasks > 0) { nr_tasks 150 kernel/cgroup/debug.c int extra = refcnt - cset->nr_tasks; nr_tasks 185 kernel/cgroup/debug.c WARN_ON(count != cset->nr_tasks); nr_tasks 1072 kernel/sched/fair.c int nr_tasks; nr_tasks 2275 kernel/sched/fair.c grp->nr_tasks++; nr_tasks 2297 kernel/sched/fair.c if (my_grp->nr_tasks > grp->nr_tasks) nr_tasks 2303 kernel/sched/fair.c if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) nr_tasks 2335 kernel/sched/fair.c my_grp->nr_tasks--; nr_tasks 2336 kernel/sched/fair.c grp->nr_tasks++; nr_tasks 2375 kernel/sched/fair.c grp->nr_tasks--; nr_tasks 311 samples/bpf/test_lru_dist.c int nr_tasks, unsigned int lru_size) nr_tasks 324 samples/bpf/test_lru_dist.c nr_tasks * lru_size); nr_tasks 330 samples/bpf/test_lru_dist.c run_parallel(nr_tasks, do_test_lru_dist, child_data); nr_tasks 468 samples/bpf/test_lru_dist.c static void test_parallel_lru_loss(int map_type, int map_flags, int nr_tasks) nr_tasks 481 samples/bpf/test_lru_dist.c nr_tasks * (1000 + 200)); nr_tasks 485 samples/bpf/test_lru_dist.c run_parallel(nr_tasks, do_test_parallel_lru_loss, &map_fd); nr_tasks 495 samples/bpf/test_lru_dist.c int nr_tasks = 1; nr_tasks 507 samples/bpf/test_lru_dist.c nr_tasks = atoi(argv[3]); nr_tasks 519 samples/bpf/test_lru_dist.c nr_tasks = min(nr_tasks, nr_cpus); nr_tasks 531 samples/bpf/test_lru_dist.c nr_tasks); nr_tasks 533 samples/bpf/test_lru_dist.c nr_tasks, lru_size); nr_tasks 118 tools/perf/bench/numa.c int nr_tasks; nr_tasks 500 tools/perf/bench/numa.c dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks); nr_tasks 576 tools/perf/bench/numa.c if (t >= g->p.nr_tasks) { nr_tasks 603 tools/perf/bench/numa.c if (t < g->p.nr_tasks) nr_tasks 604 tools/perf/bench/numa.c printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t); nr_tasks 637 tools/perf/bench/numa.c dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks); nr_tasks 698 tools/perf/bench/numa.c if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) { nr_tasks 718 tools/perf/bench/numa.c if (t < g->p.nr_tasks) nr_tasks 719 tools/perf/bench/numa.c printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t); nr_tasks 989 tools/perf/bench/numa.c for (t = 0; t < g->p.nr_tasks; t++) { nr_tasks 1009 tools/perf/bench/numa.c nr_min = g->p.nr_tasks; nr_tasks 1022 tools/perf/bench/numa.c BUG_ON(sum > g->p.nr_tasks); nr_tasks 1024 tools/perf/bench/numa.c if (0 && (sum < g->p.nr_tasks)) nr_tasks 1145 tools/perf/bench/numa.c if (g->nr_tasks_working == g->p.nr_tasks) nr_tasks 1346 tools/perf/bench/numa.c g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus); nr_tasks 1361 tools/perf/bench/numa.c ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; nr_tasks 1366 tools/perf/bench/numa.c for (t = 0; t < g->p.nr_tasks; t++) { nr_tasks 1382 tools/perf/bench/numa.c ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; nr_tasks 1432 tools/perf/bench/numa.c g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads; nr_tasks 1529 tools/perf/bench/numa.c while (g->nr_tasks_started != g->p.nr_tasks) nr_tasks 1532 tools/perf/bench/numa.c BUG_ON(g->nr_tasks_started != g->p.nr_tasks); nr_tasks 1575 tools/perf/bench/numa.c for (t = 0; t < g->p.nr_tasks; t++) { nr_tasks 1597 tools/perf/bench/numa.c runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / NSEC_PER_SEC; nr_tasks 1617 tools/perf/bench/numa.c print_res(name, bytes / g->p.nr_tasks / 1e9, nr_tasks 1623 tools/perf/bench/numa.c print_res(name, runtime_sec_max * NSEC_PER_SEC / (bytes / g->p.nr_tasks), nr_tasks 1626 tools/perf/bench/numa.c print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max, nr_tasks 179 tools/perf/builtin-sched.c unsigned long nr_tasks; nr_tasks 467 tools/perf/builtin-sched.c task->nr = sched->nr_tasks; nr_tasks 476 tools/perf/builtin-sched.c sched->nr_tasks++; nr_tasks 477 tools/perf/builtin-sched.c sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *)); nr_tasks 482 tools/perf/builtin-sched.c printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm); nr_tasks 493 tools/perf/builtin-sched.c for (i = 0; i < sched->nr_tasks; i++) { nr_tasks 505 tools/perf/builtin-sched.c for (i = 0; i < sched->nr_tasks; i++) { nr_tasks 508 tools/perf/builtin-sched.c if (j == sched->nr_tasks) nr_tasks 577 tools/perf/builtin-sched.c limit.rlim_cur += sched->nr_tasks - cur_task; nr_tasks 675 tools/perf/builtin-sched.c for (i = 0; i < sched->nr_tasks; i++) { nr_tasks 700 tools/perf/builtin-sched.c for (i = 0; i < sched->nr_tasks; i++) { nr_tasks 713 tools/perf/builtin-sched.c for (i = 0; i < sched->nr_tasks; i++) { nr_tasks 736 tools/perf/builtin-sched.c for (i = 0; i < sched->nr_tasks; i++) {