delta_exec        770 include/linux/cgroup.h void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
delta_exec        772 include/linux/cgroup.h 				    enum cpu_usage_stat index, u64 delta_exec);
delta_exec        775 include/linux/cgroup.h 					  u64 delta_exec)
delta_exec        779 include/linux/cgroup.h 	cpuacct_charge(task, delta_exec);
delta_exec        784 include/linux/cgroup.h 		__cgroup_account_cputime(cgrp, delta_exec);
delta_exec        790 include/linux/cgroup.h 						u64 delta_exec)
delta_exec        794 include/linux/cgroup.h 	cpuacct_account_field(task, index, delta_exec);
delta_exec        799 include/linux/cgroup.h 		__cgroup_account_cputime_field(cgrp, index, delta_exec);
delta_exec        806 include/linux/cgroup.h 					  u64 delta_exec) {}
delta_exec        809 include/linux/cgroup.h 						u64 delta_exec) {}
delta_exec        355 kernel/cgroup/rstat.c void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
delta_exec        360 kernel/cgroup/rstat.c 	rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
delta_exec        365 kernel/cgroup/rstat.c 				    enum cpu_usage_stat index, u64 delta_exec)
delta_exec        374 kernel/cgroup/rstat.c 		rstatc->bstat.cputime.utime += delta_exec;
delta_exec        379 kernel/cgroup/rstat.c 		rstatc->bstat.cputime.stime += delta_exec;
delta_exec       1183 kernel/sched/deadline.c 	u64 delta_exec, scaled_delta_exec;
delta_exec       1199 kernel/sched/deadline.c 	delta_exec = now - curr->se.exec_start;
delta_exec       1200 kernel/sched/deadline.c 	if (unlikely((s64)delta_exec <= 0)) {
delta_exec       1207 kernel/sched/deadline.c 		      max(curr->se.statistics.exec_max, delta_exec));
delta_exec       1209 kernel/sched/deadline.c 	curr->se.sum_exec_runtime += delta_exec;
delta_exec       1210 kernel/sched/deadline.c 	account_group_exec_runtime(curr, delta_exec);
delta_exec       1213 kernel/sched/deadline.c 	cgroup_account_cputime(curr, delta_exec);
delta_exec       1226 kernel/sched/deadline.c 		scaled_delta_exec = grub_reclaim(delta_exec,
delta_exec       1233 kernel/sched/deadline.c 		scaled_delta_exec = cap_scale(delta_exec, scale_freq);
delta_exec       1277 kernel/sched/deadline.c 			rt_rq->rt_time += delta_exec;
delta_exec        218 kernel/sched/fair.c static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
delta_exec        240 kernel/sched/fair.c 	return mul_u64_u32_shr(delta_exec, fact, shift);
delta_exec        500 kernel/sched/fair.c void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
delta_exec        837 kernel/sched/fair.c 	u64 delta_exec;
delta_exec        842 kernel/sched/fair.c 	delta_exec = now - curr->exec_start;
delta_exec        843 kernel/sched/fair.c 	if (unlikely((s64)delta_exec <= 0))
delta_exec        849 kernel/sched/fair.c 		      max(delta_exec, curr->statistics.exec_max));
delta_exec        851 kernel/sched/fair.c 	curr->sum_exec_runtime += delta_exec;
delta_exec        852 kernel/sched/fair.c 	schedstat_add(cfs_rq->exec_clock, delta_exec);
delta_exec        854 kernel/sched/fair.c 	curr->vruntime += calc_delta_fair(delta_exec, curr);
delta_exec        860 kernel/sched/fair.c 		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
delta_exec        861 kernel/sched/fair.c 		cgroup_account_cputime(curtask, delta_exec);
delta_exec        862 kernel/sched/fair.c 		account_group_exec_runtime(curtask, delta_exec);
delta_exec        865 kernel/sched/fair.c 	account_cfs_rq_runtime(cfs_rq, delta_exec);
delta_exec       4126 kernel/sched/fair.c 	unsigned long ideal_runtime, delta_exec;
delta_exec       4131 kernel/sched/fair.c 	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
delta_exec       4132 kernel/sched/fair.c 	if (delta_exec > ideal_runtime) {
delta_exec       4147 kernel/sched/fair.c 	if (delta_exec < sysctl_sched_min_granularity)
delta_exec       4410 kernel/sched/fair.c static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
delta_exec       4413 kernel/sched/fair.c 	cfs_rq->runtime_remaining -= delta_exec;
delta_exec       4429 kernel/sched/fair.c void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
delta_exec       4434 kernel/sched/fair.c 	__account_cfs_rq_runtime(cfs_rq, delta_exec);
delta_exec       5092 kernel/sched/fair.c static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
delta_exec        959 kernel/sched/rt.c 	u64 delta_exec;
delta_exec        966 kernel/sched/rt.c 	delta_exec = now - curr->se.exec_start;
delta_exec        967 kernel/sched/rt.c 	if (unlikely((s64)delta_exec <= 0))
delta_exec        971 kernel/sched/rt.c 		      max(curr->se.statistics.exec_max, delta_exec));
delta_exec        973 kernel/sched/rt.c 	curr->se.sum_exec_runtime += delta_exec;
delta_exec        974 kernel/sched/rt.c 	account_group_exec_runtime(curr, delta_exec);
delta_exec        977 kernel/sched/rt.c 	cgroup_account_cputime(curr, delta_exec);
delta_exec        987 kernel/sched/rt.c 			rt_rq->rt_time += delta_exec;
delta_exec         69 kernel/sched/stop_task.c 	u64 delta_exec;
delta_exec         71 kernel/sched/stop_task.c 	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
delta_exec         72 kernel/sched/stop_task.c 	if (unlikely((s64)delta_exec < 0))
delta_exec         73 kernel/sched/stop_task.c 		delta_exec = 0;
delta_exec         76 kernel/sched/stop_task.c 			max(curr->se.statistics.exec_max, delta_exec));
delta_exec         78 kernel/sched/stop_task.c 	curr->se.sum_exec_runtime += delta_exec;
delta_exec         79 kernel/sched/stop_task.c 	account_group_exec_runtime(curr, delta_exec);
delta_exec         82 kernel/sched/stop_task.c 	cgroup_account_cputime(curr, delta_exec);