root/kernel/sched/stats.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. rq_sched_info_arrive
  2. rq_sched_info_depart
  3. rq_sched_info_dequeued
  4. rq_sched_info_arrive
  5. rq_sched_info_dequeued
  6. rq_sched_info_depart
  7. psi_enqueue
  8. psi_dequeue
  9. psi_ttwu_dequeue
  10. psi_task_tick
  11. psi_enqueue
  12. psi_dequeue
  13. psi_ttwu_dequeue
  14. psi_task_tick
  15. sched_info_reset_dequeued
  16. sched_info_dequeued
  17. sched_info_arrive
  18. sched_info_queued
  19. sched_info_depart
  20. __sched_info_switch
  21. sched_info_switch

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 
   3 #ifdef CONFIG_SCHEDSTATS
   4 
   5 /*
   6  * Expects runqueue lock to be held for atomicity of update
   7  */
   8 static inline void
   9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
  10 {
  11         if (rq) {
  12                 rq->rq_sched_info.run_delay += delta;
  13                 rq->rq_sched_info.pcount++;
  14         }
  15 }
  16 
  17 /*
  18  * Expects runqueue lock to be held for atomicity of update
  19  */
  20 static inline void
  21 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
  22 {
  23         if (rq)
  24                 rq->rq_cpu_time += delta;
  25 }
  26 
  27 static inline void
  28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
  29 {
  30         if (rq)
  31                 rq->rq_sched_info.run_delay += delta;
  32 }
  33 #define   schedstat_enabled()           static_branch_unlikely(&sched_schedstats)
  34 #define __schedstat_inc(var)            do { var++; } while (0)
  35 #define   schedstat_inc(var)            do { if (schedstat_enabled()) { var++; } } while (0)
  36 #define __schedstat_add(var, amt)       do { var += (amt); } while (0)
  37 #define   schedstat_add(var, amt)       do { if (schedstat_enabled()) { var += (amt); } } while (0)
  38 #define __schedstat_set(var, val)       do { var = (val); } while (0)
  39 #define   schedstat_set(var, val)       do { if (schedstat_enabled()) { var = (val); } } while (0)
  40 #define   schedstat_val(var)            (var)
  41 #define   schedstat_val_or_zero(var)    ((schedstat_enabled()) ? (var) : 0)
  42 
  43 #else /* !CONFIG_SCHEDSTATS: */
  44 static inline void rq_sched_info_arrive  (struct rq *rq, unsigned long long delta) { }
  45 static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { }
  46 static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delta) { }
  47 # define   schedstat_enabled()          0
  48 # define __schedstat_inc(var)           do { } while (0)
  49 # define   schedstat_inc(var)           do { } while (0)
  50 # define __schedstat_add(var, amt)      do { } while (0)
  51 # define   schedstat_add(var, amt)      do { } while (0)
  52 # define __schedstat_set(var, val)      do { } while (0)
  53 # define   schedstat_set(var, val)      do { } while (0)
  54 # define   schedstat_val(var)           0
  55 # define   schedstat_val_or_zero(var)   0
  56 #endif /* CONFIG_SCHEDSTATS */
  57 
  58 #ifdef CONFIG_PSI
  59 /*
  60  * PSI tracks state that persists across sleeps, such as iowaits and
  61  * memory stalls. As a result, it has to distinguish between sleeps,
  62  * where a task's runnable state changes, and requeues, where a task
  63  * and its state are being moved between CPUs and runqueues.
  64  */
  65 static inline void psi_enqueue(struct task_struct *p, bool wakeup)
  66 {
  67         int clear = 0, set = TSK_RUNNING;
  68 
  69         if (static_branch_likely(&psi_disabled))
  70                 return;
  71 
  72         if (!wakeup || p->sched_psi_wake_requeue) {
  73                 if (p->flags & PF_MEMSTALL)
  74                         set |= TSK_MEMSTALL;
  75                 if (p->sched_psi_wake_requeue)
  76                         p->sched_psi_wake_requeue = 0;
  77         } else {
  78                 if (p->in_iowait)
  79                         clear |= TSK_IOWAIT;
  80         }
  81 
  82         psi_task_change(p, clear, set);
  83 }
  84 
  85 static inline void psi_dequeue(struct task_struct *p, bool sleep)
  86 {
  87         int clear = TSK_RUNNING, set = 0;
  88 
  89         if (static_branch_likely(&psi_disabled))
  90                 return;
  91 
  92         if (!sleep) {
  93                 if (p->flags & PF_MEMSTALL)
  94                         clear |= TSK_MEMSTALL;
  95         } else {
  96                 if (p->in_iowait)
  97                         set |= TSK_IOWAIT;
  98         }
  99 
 100         psi_task_change(p, clear, set);
 101 }
 102 
 103 static inline void psi_ttwu_dequeue(struct task_struct *p)
 104 {
 105         if (static_branch_likely(&psi_disabled))
 106                 return;
 107         /*
 108          * Is the task being migrated during a wakeup? Make sure to
 109          * deregister its sleep-persistent psi states from the old
 110          * queue, and let psi_enqueue() know it has to requeue.
 111          */
 112         if (unlikely(p->in_iowait || (p->flags & PF_MEMSTALL))) {
 113                 struct rq_flags rf;
 114                 struct rq *rq;
 115                 int clear = 0;
 116 
 117                 if (p->in_iowait)
 118                         clear |= TSK_IOWAIT;
 119                 if (p->flags & PF_MEMSTALL)
 120                         clear |= TSK_MEMSTALL;
 121 
 122                 rq = __task_rq_lock(p, &rf);
 123                 psi_task_change(p, clear, 0);
 124                 p->sched_psi_wake_requeue = 1;
 125                 __task_rq_unlock(rq, &rf);
 126         }
 127 }
 128 
 129 static inline void psi_task_tick(struct rq *rq)
 130 {
 131         if (static_branch_likely(&psi_disabled))
 132                 return;
 133 
 134         if (unlikely(rq->curr->flags & PF_MEMSTALL))
 135                 psi_memstall_tick(rq->curr, cpu_of(rq));
 136 }
 137 #else /* CONFIG_PSI */
 138 static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
 139 static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
 140 static inline void psi_ttwu_dequeue(struct task_struct *p) {}
 141 static inline void psi_task_tick(struct rq *rq) {}
 142 #endif /* CONFIG_PSI */
 143 
 144 #ifdef CONFIG_SCHED_INFO
 145 static inline void sched_info_reset_dequeued(struct task_struct *t)
 146 {
 147         t->sched_info.last_queued = 0;
 148 }
 149 
 150 /*
 151  * We are interested in knowing how long it was from the *first* time a
 152  * task was queued to the time that it finally hit a CPU, we call this routine
 153  * from dequeue_task() to account for possible rq->clock skew across CPUs. The
 154  * delta taken on each CPU would annul the skew.
 155  */
 156 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
 157 {
 158         unsigned long long now = rq_clock(rq), delta = 0;
 159 
 160         if (sched_info_on()) {
 161                 if (t->sched_info.last_queued)
 162                         delta = now - t->sched_info.last_queued;
 163         }
 164         sched_info_reset_dequeued(t);
 165         t->sched_info.run_delay += delta;
 166 
 167         rq_sched_info_dequeued(rq, delta);
 168 }
 169 
 170 /*
 171  * Called when a task finally hits the CPU.  We can now calculate how
 172  * long it was waiting to run.  We also note when it began so that we
 173  * can keep stats on how long its timeslice is.
 174  */
 175 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
 176 {
 177         unsigned long long now = rq_clock(rq), delta = 0;
 178 
 179         if (t->sched_info.last_queued)
 180                 delta = now - t->sched_info.last_queued;
 181         sched_info_reset_dequeued(t);
 182         t->sched_info.run_delay += delta;
 183         t->sched_info.last_arrival = now;
 184         t->sched_info.pcount++;
 185 
 186         rq_sched_info_arrive(rq, delta);
 187 }
 188 
 189 /*
 190  * This function is only called from enqueue_task(), but also only updates
 191  * the timestamp if it is already not set.  It's assumed that
 192  * sched_info_dequeued() will clear that stamp when appropriate.
 193  */
 194 static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
 195 {
 196         if (sched_info_on()) {
 197                 if (!t->sched_info.last_queued)
 198                         t->sched_info.last_queued = rq_clock(rq);
 199         }
 200 }
 201 
 202 /*
 203  * Called when a process ceases being the active-running process involuntarily
 204  * due, typically, to expiring its time slice (this may also be called when
 205  * switching to the idle task).  Now we can calculate how long we ran.
 206  * Also, if the process is still in the TASK_RUNNING state, call
 207  * sched_info_queued() to mark that it has now again started waiting on
 208  * the runqueue.
 209  */
 210 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
 211 {
 212         unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
 213 
 214         rq_sched_info_depart(rq, delta);
 215 
 216         if (t->state == TASK_RUNNING)
 217                 sched_info_queued(rq, t);
 218 }
 219 
 220 /*
 221  * Called when tasks are switched involuntarily due, typically, to expiring
 222  * their time slice.  (This may also be called when switching to or from
 223  * the idle task.)  We are only called when prev != next.
 224  */
 225 static inline void
 226 __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
 227 {
 228         /*
 229          * prev now departs the CPU.  It's not interesting to record
 230          * stats about how efficient we were at scheduling the idle
 231          * process, however.
 232          */
 233         if (prev != rq->idle)
 234                 sched_info_depart(rq, prev);
 235 
 236         if (next != rq->idle)
 237                 sched_info_arrive(rq, next);
 238 }
 239 
 240 static inline void
 241 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
 242 {
 243         if (sched_info_on())
 244                 __sched_info_switch(rq, prev, next);
 245 }
 246 
 247 #else /* !CONFIG_SCHED_INFO: */
 248 # define sched_info_queued(rq, t)       do { } while (0)
 249 # define sched_info_reset_dequeued(t)   do { } while (0)
 250 # define sched_info_dequeued(rq, t)     do { } while (0)
 251 # define sched_info_depart(rq, t)       do { } while (0)
 252 # define sched_info_arrive(rq, next)    do { } while (0)
 253 # define sched_info_switch(rq, t, next) do { } while (0)
 254 #endif /* CONFIG_SCHED_INFO */

/* [<][>][^][v][top][bottom][index][help] */