root/include/linux/sched/cputime.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. task_cputime
  2. task_gtime
  3. task_cputime_scaled
  4. task_cputime_scaled
  5. get_running_cputimer
  6. get_running_cputimer
  7. account_group_user_time
  8. account_group_system_time
  9. account_group_exec_runtime
  10. prev_cputime_init

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _LINUX_SCHED_CPUTIME_H
   3 #define _LINUX_SCHED_CPUTIME_H
   4 
   5 #include <linux/sched/signal.h>
   6 
   7 /*
   8  * cputime accounting APIs:
   9  */
  10 
  11 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
  12 #include <asm/cputime.h>
  13 
  14 #ifndef cputime_to_nsecs
  15 # define cputime_to_nsecs(__ct) \
  16         (cputime_to_usecs(__ct) * NSEC_PER_USEC)
  17 #endif
  18 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
  19 
  20 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
  21 extern void task_cputime(struct task_struct *t,
  22                          u64 *utime, u64 *stime);
  23 extern u64 task_gtime(struct task_struct *t);
  24 #else
  25 static inline void task_cputime(struct task_struct *t,
  26                                 u64 *utime, u64 *stime)
  27 {
  28         *utime = t->utime;
  29         *stime = t->stime;
  30 }
  31 
  32 static inline u64 task_gtime(struct task_struct *t)
  33 {
  34         return t->gtime;
  35 }
  36 #endif
  37 
  38 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
  39 static inline void task_cputime_scaled(struct task_struct *t,
  40                                        u64 *utimescaled,
  41                                        u64 *stimescaled)
  42 {
  43         *utimescaled = t->utimescaled;
  44         *stimescaled = t->stimescaled;
  45 }
  46 #else
  47 static inline void task_cputime_scaled(struct task_struct *t,
  48                                        u64 *utimescaled,
  49                                        u64 *stimescaled)
  50 {
  51         task_cputime(t, utimescaled, stimescaled);
  52 }
  53 #endif
  54 
  55 extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
  56 extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
  57 extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
  58                            u64 *ut, u64 *st);
  59 
  60 /*
  61  * Thread group CPU time accounting.
  62  */
  63 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
  64 void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples);
  65 
  66 /*
  67  * The following are functions that support scheduler-internal time accounting.
  68  * These functions are generally called at the timer tick.  None of this depends
  69  * on CONFIG_SCHEDSTATS.
  70  */
  71 
  72 /**
  73  * get_running_cputimer - return &tsk->signal->cputimer if cputimers are active
  74  *
  75  * @tsk:        Pointer to target task.
  76  */
  77 #ifdef CONFIG_POSIX_TIMERS
  78 static inline
  79 struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
  80 {
  81         struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
  82 
  83         /*
  84          * Check whether posix CPU timers are active. If not the thread
  85          * group accounting is not active either. Lockless check.
  86          */
  87         if (!READ_ONCE(tsk->signal->posix_cputimers.timers_active))
  88                 return NULL;
  89 
  90         /*
  91          * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
  92          * in __exit_signal(), we won't account to the signal struct further
  93          * cputime consumed by that task, even though the task can still be
  94          * ticking after __exit_signal().
  95          *
  96          * In order to keep a consistent behaviour between thread group cputime
  97          * and thread group cputimer accounting, lets also ignore the cputime
  98          * elapsing after __exit_signal() in any thread group timer running.
  99          *
 100          * This makes sure that POSIX CPU clocks and timers are synchronized, so
 101          * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
 102          * clock delta is behind the expiring timer value.
 103          */
 104         if (unlikely(!tsk->sighand))
 105                 return NULL;
 106 
 107         return cputimer;
 108 }
 109 #else
 110 static inline
 111 struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
 112 {
 113         return NULL;
 114 }
 115 #endif
 116 
 117 /**
 118  * account_group_user_time - Maintain utime for a thread group.
 119  *
 120  * @tsk:        Pointer to task structure.
 121  * @cputime:    Time value by which to increment the utime field of the
 122  *              thread_group_cputime structure.
 123  *
 124  * If thread group time is being maintained, get the structure for the
 125  * running CPU and update the utime field there.
 126  */
 127 static inline void account_group_user_time(struct task_struct *tsk,
 128                                            u64 cputime)
 129 {
 130         struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
 131 
 132         if (!cputimer)
 133                 return;
 134 
 135         atomic64_add(cputime, &cputimer->cputime_atomic.utime);
 136 }
 137 
 138 /**
 139  * account_group_system_time - Maintain stime for a thread group.
 140  *
 141  * @tsk:        Pointer to task structure.
 142  * @cputime:    Time value by which to increment the stime field of the
 143  *              thread_group_cputime structure.
 144  *
 145  * If thread group time is being maintained, get the structure for the
 146  * running CPU and update the stime field there.
 147  */
 148 static inline void account_group_system_time(struct task_struct *tsk,
 149                                              u64 cputime)
 150 {
 151         struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
 152 
 153         if (!cputimer)
 154                 return;
 155 
 156         atomic64_add(cputime, &cputimer->cputime_atomic.stime);
 157 }
 158 
 159 /**
 160  * account_group_exec_runtime - Maintain exec runtime for a thread group.
 161  *
 162  * @tsk:        Pointer to task structure.
 163  * @ns:         Time value by which to increment the sum_exec_runtime field
 164  *              of the thread_group_cputime structure.
 165  *
 166  * If thread group time is being maintained, get the structure for the
 167  * running CPU and update the sum_exec_runtime field there.
 168  */
 169 static inline void account_group_exec_runtime(struct task_struct *tsk,
 170                                               unsigned long long ns)
 171 {
 172         struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
 173 
 174         if (!cputimer)
 175                 return;
 176 
 177         atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
 178 }
 179 
 180 static inline void prev_cputime_init(struct prev_cputime *prev)
 181 {
 182 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 183         prev->utime = prev->stime = 0;
 184         raw_spin_lock_init(&prev->lock);
 185 #endif
 186 }
 187 
 188 extern unsigned long long
 189 task_sched_runtime(struct task_struct *task);
 190 
 191 #endif /* _LINUX_SCHED_CPUTIME_H */

/* [<][>][^][v][top][bottom][index][help] */