tasks 1820 arch/ia64/kernel/mca.c INIT_LIST_HEAD(&p->tasks); tasks 489 drivers/dma/pl330.c struct tasklet_struct tasks; tasks 1723 drivers/dma/pl330.c tasklet_schedule(&pl330->tasks); tasks 1974 drivers/dma/pl330.c tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330); tasks 2002 drivers/dma/pl330.c tasklet_kill(&pl330->tasks); tasks 124 drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h SMU_Task tasks[1]; tasks 336 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++]; tasks 373 drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++]; tasks 117 drivers/gpu/drm/drm_flip_work.c struct list_head tasks; tasks 123 drivers/gpu/drm/drm_flip_work.c INIT_LIST_HEAD(&tasks); tasks 125 drivers/gpu/drm/drm_flip_work.c list_splice_tail(&work->commited, &tasks); tasks 129 drivers/gpu/drm/drm_flip_work.c if (list_empty(&tasks)) tasks 132 drivers/gpu/drm/drm_flip_work.c list_for_each_entry_safe(task, tmp, &tasks, node) { tasks 1594 drivers/net/ethernet/freescale/fman/fman.c u8 tasks = *num_of_tasks; tasks 1603 drivers/net/ethernet/freescale/fman/fman.c if ((fman->state->accumulated_num_of_tasks + tasks) > tasks 1611 drivers/net/ethernet/freescale/fman/fman.c fman->state->accumulated_num_of_tasks += tasks; tasks 1616 drivers/net/ethernet/freescale/fman/fman.c tmp |= ((u32)((tasks - 1) << BMI_NUM_OF_TASKS_SHIFT) | tasks 470 drivers/net/ethernet/freescale/fman/fman_port.c struct fman_port_rsrc tasks; tasks 1379 drivers/net/ethernet/freescale/fman/fman_port.c port->tasks.num = tasks 1382 drivers/net/ethernet/freescale/fman/fman_port.c port->tasks.extra = tasks 1487 drivers/net/ethernet/freescale/fman/fman_port.c params.num_of_tasks = (u8)port->tasks.num; tasks 1488 drivers/net/ethernet/freescale/fman/fman_port.c params.num_of_extra_tasks = (u8)port->tasks.extra; tasks 822 drivers/net/ethernet/qlogic/qed/qed_fcoe.c static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks) tasks 841 drivers/net/ethernet/qlogic/qed/qed_fcoe.c if (tasks) { tasks 861 drivers/net/ethernet/qlogic/qed/qed_fcoe.c tasks->size = tid_info->tid_size; tasks 862 drivers/net/ethernet/qlogic/qed/qed_fcoe.c tasks->num_tids_per_block = tid_info->num_tids_per_block; tasks 863 drivers/net/ethernet/qlogic/qed/qed_fcoe.c memcpy(tasks->blocks, tid_info->blocks, tasks 1152 drivers/net/ethernet/qlogic/qed/qed_iscsi.c struct qed_iscsi_tid *tasks, tasks 1174 drivers/net/ethernet/qlogic/qed/qed_iscsi.c if (!tasks) tasks 1193 drivers/net/ethernet/qlogic/qed/qed_iscsi.c tasks->size = tid_info->tid_size; tasks 1194 drivers/net/ethernet/qlogic/qed/qed_iscsi.c tasks->num_tids_per_block = tid_info->num_tids_per_block; tasks 1195 drivers/net/ethernet/qlogic/qed/qed_iscsi.c memcpy(tasks->blocks, tid_info->blocks, tasks 62 drivers/oprofile/buffer_sync.c list_add(&task->tasks, &dying_tasks); tasks 449 drivers/oprofile/buffer_sync.c list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) { tasks 450 drivers/oprofile/buffer_sync.c list_del(&task->tasks); tasks 380 drivers/scsi/qedf/qedf.h struct qed_fcoe_tid tasks; tasks 124 drivers/scsi/qedf/qedf_els.c task = qedf_get_task_mem(&qedf->tasks, xid); tasks 917 drivers/scsi/qedf/qedf_io.c task_ctx = qedf_get_task_mem(&qedf->tasks, xid); tasks 1153 drivers/scsi/qedf/qedf_io.c task_ctx = qedf_get_task_mem(&qedf->tasks, xid); tasks 2199 drivers/scsi/qedf/qedf_io.c task = qedf_get_task_mem(&qedf->tasks, xid); tasks 2330 drivers/scsi/qedf/qedf_io.c task = qedf_get_task_mem(&qedf->tasks, xid); tasks 3326 drivers/scsi/qedf/qedf_main.c rc = qed_ops->start(qedf->cdev, &qedf->tasks); tasks 3331 drivers/scsi/qedf/qedf_main.c task_start = qedf_get_task_mem(&qedf->tasks, 0); tasks 3332 drivers/scsi/qedf/qedf_main.c task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1); tasks 3335 drivers/scsi/qedf/qedf_main.c qedf->tasks.size); tasks 314 drivers/scsi/qedi/qedi.h struct qed_iscsi_tid tasks; tasks 94 drivers/scsi/qedi/qedi_fw.c task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id); tasks 267 drivers/scsi/qedi/qedi_fw.c task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id); tasks 1033 drivers/scsi/qedi/qedi_fw.c (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tasks 1134 drivers/scsi/qedi/qedi_fw.c (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tasks 1486 drivers/scsi/qedi/qedi_fw.c (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tasks 1624 drivers/scsi/qedi/qedi_fw.c (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tasks 1723 drivers/scsi/qedi/qedi_fw.c (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tasks 2068 drivers/scsi/qedi/qedi_fw.c (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tasks 2542 drivers/scsi/qedi/qedi_main.c rc = qedi_ops->start(qedi->cdev, &qedi->tasks, tasks 2550 drivers/scsi/qedi/qedi_main.c task_start = qedi_get_task_mem(&qedi->tasks, 0); tasks 2551 drivers/scsi/qedi/qedi_main.c task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1); tasks 2554 drivers/scsi/qedi/qedi_main.c task_start, task_end, qedi->tasks.size); tasks 1157 fs/exec.c list_replace_rcu(&leader->tasks, &tsk->tasks); tasks 1850 fs/proc/base.c return !proc_pid(inode)->tasks[PIDTYPE_PID].first; tasks 222 include/linux/cgroup-defs.h struct list_head tasks; tasks 64 include/linux/pid.h struct hlist_head tasks[PIDTYPE_MAX]; tasks 182 include/linux/pid.h &(pid)->tasks[type], pid_links[type]) { tasks 63 include/linux/psi_types.h unsigned int tasks[NR_PSI_TASK_COUNTS]; tasks 130 include/linux/qed/qed_fcoe_if.h int (*start)(struct qed_dev *cdev, struct qed_fcoe_tid *tasks); tasks 229 include/linux/qed/qed_iscsi_if.h struct qed_iscsi_tid *tasks, tasks 721 include/linux/sched.h struct list_head tasks; tasks 560 include/linux/sched/signal.h list_empty(&init_task.tasks) tasks 563 include/linux/sched/signal.h list_entry_rcu((p)->tasks.next, struct task_struct, tasks) tasks 196 include/linux/sunrpc/sched.h struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ tasks 88 init/init_task.c .tasks = LIST_HEAD_INIT(init_task.tasks), tasks 740 kernel/cgroup/cgroup.c .tasks = LIST_HEAD_INIT(init_css_set.tasks), tasks 778 kernel/cgroup/cgroup.c return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks); tasks 909 kernel/cgroup/cgroup.c &to_cset->tasks); tasks 1215 kernel/cgroup/cgroup.c INIT_LIST_HEAD(&cset->tasks); tasks 1934 kernel/cgroup/cgroup.c list_add_tail(&p->cg_list, &cset->tasks); tasks 2548 kernel/cgroup/cgroup.c list_splice_tail_init(&cset->mg_tasks, &cset->tasks); tasks 2964 kernel/cgroup/cgroup.c list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list) tasks 4464 kernel/cgroup/cgroup.c if (!list_empty(&cset->tasks)) { tasks 4465 kernel/cgroup/cgroup.c it->task_pos = cset->tasks.next; tasks 4466 kernel/cgroup/cgroup.c it->cur_tasks_head = &cset->tasks; tasks 4475 kernel/cgroup/cgroup.c it->tasks_head = &cset->tasks; tasks 5678 kernel/cgroup/cgroup.c BUG_ON(!list_empty(&init_task.tasks)); tasks 164 kernel/cgroup/debug.c list_for_each_entry(task, &cset->tasks, cg_list) { tasks 81 kernel/exit.c list_del_rcu(&p->tasks); tasks 1438 kernel/exit.c (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type]))) tasks 2192 kernel/fork.c list_add_tail_rcu(&p->tasks, &init_task.tasks); tasks 48 kernel/pid.c .tasks = { tasks 216 kernel/pid.c INIT_HLIST_HEAD(&pid->tasks[type]); tasks 286 kernel/pid.c hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]); tasks 302 kernel/pid.c if (!hlist_empty(&pid->tasks[tmp])) tasks 334 kernel/pid.c first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), tasks 867 kernel/sched/core.c if (!bucket[bucket_id].tasks) tasks 957 kernel/sched/core.c bucket->tasks++; tasks 966 kernel/sched/core.c if (bucket->tasks == 1 || uc_se->value > bucket->value) tasks 994 kernel/sched/core.c SCHED_WARN_ON(!bucket->tasks); tasks 995 kernel/sched/core.c if (likely(bucket->tasks)) tasks 996 kernel/sched/core.c bucket->tasks--; tasks 1005 kernel/sched/core.c if (likely(bucket->tasks)) tasks 7146 kernel/sched/fair.c struct list_head tasks; tasks 7375 kernel/sched/fair.c struct list_head *tasks = &env->src_rq->cfs_tasks; tasks 7385 kernel/sched/fair.c while (!list_empty(tasks)) { tasks 7393 kernel/sched/fair.c p = list_last_entry(tasks, struct task_struct, se.group_node); tasks 7419 kernel/sched/fair.c list_add(&p->se.group_node, &env->tasks); tasks 7443 kernel/sched/fair.c list_move(&p->se.group_node, tasks); tasks 7488 kernel/sched/fair.c struct list_head *tasks = &env->tasks; tasks 7495 kernel/sched/fair.c while (!list_empty(tasks)) { tasks 7496 kernel/sched/fair.c p = list_first_entry(tasks, struct task_struct, se.group_node); tasks 8849 kernel/sched/fair.c .tasks = LIST_HEAD_INIT(env.tasks), tasks 216 kernel/sched/psi.c static bool test_state(unsigned int *tasks, enum psi_states state) tasks 220 kernel/sched/psi.c return tasks[NR_IOWAIT]; tasks 222 kernel/sched/psi.c return tasks[NR_IOWAIT] && !tasks[NR_RUNNING]; tasks 224 kernel/sched/psi.c return tasks[NR_MEMSTALL]; tasks 226 kernel/sched/psi.c return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING]; tasks 228 kernel/sched/psi.c return tasks[NR_RUNNING] > 1; tasks 230 kernel/sched/psi.c return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || tasks 231 kernel/sched/psi.c tasks[NR_RUNNING]; tasks 697 kernel/sched/psi.c if (groupc->tasks[t] == 0 && !psi_bug) { tasks 699 kernel/sched/psi.c cpu, t, groupc->tasks[0], tasks 700 kernel/sched/psi.c groupc->tasks[1], groupc->tasks[2], tasks 704 kernel/sched/psi.c groupc->tasks[t]--; tasks 709 kernel/sched/psi.c groupc->tasks[t]++; tasks 713 kernel/sched/psi.c if (test_state(groupc->tasks, s)) tasks 815 kernel/sched/sched.h unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); tasks 192 net/sunrpc/sched.c __rpc_list_enqueue_task(&queue->tasks[queue_priority], task); tasks 215 net/sunrpc/sched.c list_add(&task->u.tk_wait.list, &queue->tasks[0]); tasks 217 net/sunrpc/sched.c list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); tasks 257 net/sunrpc/sched.c for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) tasks 258 net/sunrpc/sched.c INIT_LIST_HEAD(&queue->tasks[i]); tasks 612 net/sunrpc/sched.c q = &queue->tasks[queue->priority]; tasks 622 net/sunrpc/sched.c if (q == &queue->tasks[0]) tasks 623 net/sunrpc/sched.c q = &queue->tasks[queue->maxpriority]; tasks 630 net/sunrpc/sched.c } while (q != &queue->tasks[queue->priority]); tasks 636 net/sunrpc/sched.c rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); tasks 645 net/sunrpc/sched.c if (!list_empty(&queue->tasks[0])) tasks 646 net/sunrpc/sched.c return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); tasks 706 net/sunrpc/sched.c head = &queue->tasks[queue->maxpriority]; tasks 715 net/sunrpc/sched.c if (head == &queue->tasks[0]) tasks 735 net/sunrpc/sched.c head = &queue->tasks[queue->maxpriority]; tasks 745 net/sunrpc/sched.c if (head == &queue->tasks[0]) tasks 90 samples/bpf/map_perf_test_user.c static int pre_test_lru_hash_lookup(int tasks) tasks 288 samples/bpf/map_perf_test_user.c typedef int (*pre_test_func)(int tasks); tasks 308 samples/bpf/map_perf_test_user.c static int pre_test(int tasks) tasks 314 samples/bpf/map_perf_test_user.c int ret = pre_test_funcs[i](tasks); tasks 339 samples/bpf/map_perf_test_user.c static void run_perf_test(int tasks) tasks 341 samples/bpf/map_perf_test_user.c pid_t pid[tasks]; tasks 344 samples/bpf/map_perf_test_user.c assert(!pre_test(tasks)); tasks 346 samples/bpf/map_perf_test_user.c for (i = 0; i < tasks; i++) { tasks 356 samples/bpf/map_perf_test_user.c for (i = 0; i < tasks; i++) { tasks 238 samples/bpf/test_lru_dist.c static void run_parallel(unsigned int tasks, void (*fn)(int i, void *data), tasks 242 samples/bpf/test_lru_dist.c pid_t pid[tasks]; tasks 245 samples/bpf/test_lru_dist.c for (i = 0; i < tasks; i++) { tasks 261 samples/bpf/test_lru_dist.c for (i = 0; i < tasks; i++) { tasks 95 samples/bpf/test_overhead_user.c static void run_perf_test(int tasks, int flags) tasks 97 samples/bpf/test_overhead_user.c pid_t pid[tasks]; tasks 100 samples/bpf/test_overhead_user.c for (i = 0; i < tasks; i++) { tasks 110 samples/bpf/test_overhead_user.c for (i = 0; i < tasks; i++) { tasks 84 samples/bpf/tracex2_user.c static struct task tasks[1024]; tasks 92 samples/bpf/tracex2_user.c if (memcmp(&tasks[i], &next_key, SIZE) == 0) tasks 95 samples/bpf/tracex2_user.c memcpy(&tasks[task_cnt++], &next_key, SIZE); tasks 101 samples/bpf/tracex2_user.c (__u32) tasks[i].pid_tgid, tasks 102 samples/bpf/tracex2_user.c tasks[i].comm, tasks 103 samples/bpf/tracex2_user.c (__u32) tasks[i].uid_gid); tasks 104 samples/bpf/tracex2_user.c print_hist_for_pid(fd, &tasks[i]); tasks 174 sound/pci/cs46xx/cs46xx_dsp_spos.h struct dsp_task_descriptor tasks[DSP_MAX_TASK_DESC]; tasks 555 sound/pci/cs46xx/dsp_spos.c snd_iprintf(buffer,"\n%04x %s:\n",ins->tasks[i].address,ins->tasks[i].task_name); tasks 557 sound/pci/cs46xx/dsp_spos.c for (col = 0,j = 0;j < ins->tasks[i].size; j++,col++) { tasks 563 sound/pci/cs46xx/dsp_spos.c val = readl(dst + (ins->tasks[i].address + j) * sizeof(u32)); tasks 955 sound/pci/cs46xx/dsp_spos.c strcpy(ins->tasks[ins->ntask].task_name, name); tasks 957 sound/pci/cs46xx/dsp_spos.c strcpy(ins->tasks[ins->ntask].task_name, "(NULL)"); tasks 958 sound/pci/cs46xx/dsp_spos.c ins->tasks[ins->ntask].address = dest; tasks 959 sound/pci/cs46xx/dsp_spos.c ins->tasks[ins->ntask].size = size; tasks 962 sound/pci/cs46xx/dsp_spos.c ins->tasks[ins->ntask].index = ins->ntask; tasks 963 sound/pci/cs46xx/dsp_spos.c desc = (ins->tasks + ins->ntask); tasks 1948 sound/pci/cs46xx/dsp_spos.c struct dsp_task_descriptor *t = &ins->tasks[i]; tasks 774 tools/perf/builtin-report.c struct task *tasks, *task; tasks 788 tools/perf/builtin-report.c tasks = malloc(sizeof(*tasks) * nr); tasks 789 tools/perf/builtin-report.c if (!tasks) tasks 797 tools/perf/builtin-report.c task = tasks + itask++; tasks 812 tools/perf/builtin-report.c task = tasks + itask; tasks 820 tools/perf/builtin-report.c free(tasks); tasks 833 tools/perf/builtin-report.c free(tasks); tasks 181 tools/perf/builtin-sched.c struct task_desc **tasks; tasks 477 tools/perf/builtin-sched.c sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *)); tasks 478 tools/perf/builtin-sched.c BUG_ON(!sched->tasks); tasks 479 tools/perf/builtin-sched.c sched->tasks[task->nr] = task; tasks 494 tools/perf/builtin-sched.c task = sched->tasks[i]; tasks 506 tools/perf/builtin-sched.c task1 = sched->tasks[i]; tasks 510 tools/perf/builtin-sched.c task2 = sched->tasks[j]; tasks 678 tools/perf/builtin-sched.c parms->task = task = sched->tasks[i]; tasks 701 tools/perf/builtin-sched.c task = sched->tasks[i]; tasks 714 tools/perf/builtin-sched.c task = sched->tasks[i]; tasks 737 tools/perf/builtin-sched.c task = sched->tasks[i]; tasks 477 tools/perf/util/synthetic-events.c DIR *tasks; tasks 512 tools/perf/util/synthetic-events.c tasks = opendir(filename); tasks 513 tools/perf/util/synthetic-events.c if (tasks == NULL) { tasks 518 tools/perf/util/synthetic-events.c while ((dirent = readdir(tasks)) != NULL) { tasks 555 tools/perf/util/synthetic-events.c closedir(tasks); tasks 649 tools/testing/selftests/bpf/test_maps.c static void test_sockmap(unsigned int tasks, void *data) tasks 662 tools/testing/selftests/bpf/test_maps.c pid_t pid[tasks]; tasks 1072 tools/testing/selftests/bpf/test_maps.c for (i = 0; i < tasks; i++) { tasks 1090 tools/testing/selftests/bpf/test_maps.c for (i = 0; i < tasks; i++) { tasks 1278 tools/testing/selftests/bpf/test_maps.c static void __run_parallel(unsigned int tasks, tasks 1282 tools/testing/selftests/bpf/test_maps.c pid_t pid[tasks]; tasks 1285 tools/testing/selftests/bpf/test_maps.c for (i = 0; i < tasks; i++) { tasks 1296 tools/testing/selftests/bpf/test_maps.c for (i = 0; i < tasks; i++) {