root/kernel/sched/cpupri.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. convert_prio
  2. cpupri_find
  3. cpupri_set
  4. cpupri_init
  5. cpupri_cleanup

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *  kernel/sched/cpupri.c
   4  *
   5  *  CPU priority management
   6  *
   7  *  Copyright (C) 2007-2008 Novell
   8  *
   9  *  Author: Gregory Haskins <ghaskins@novell.com>
  10  *
  11  *  This code tracks the priority of each CPU so that global migration
  12  *  decisions are easy to calculate.  Each CPU can be in a state as follows:
  13  *
  14  *                 (INVALID), IDLE, NORMAL, RT1, ... RT99
  15  *
  16  *  going from the lowest priority to the highest.  CPUs in the INVALID state
  17  *  are not eligible for routing.  The system maintains this state with
  18  *  a 2 dimensional bitmap (the first for priority class, the second for CPUs
  19  *  in that class).  Therefore a typical application without affinity
  20  *  restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
  21  *  searches).  For tasks with affinity restrictions, the algorithm has a
  22  *  worst case complexity of O(min(102, nr_domcpus)), though the scenario that
  23  *  yields the worst case search is fairly contrived.
  24  */
  25 #include "sched.h"
  26 
  27 /* Convert between a 140 based task->prio, and our 102 based cpupri */
  28 static int convert_prio(int prio)
  29 {
  30         int cpupri;
  31 
  32         if (prio == CPUPRI_INVALID)
  33                 cpupri = CPUPRI_INVALID;
  34         else if (prio == MAX_PRIO)
  35                 cpupri = CPUPRI_IDLE;
  36         else if (prio >= MAX_RT_PRIO)
  37                 cpupri = CPUPRI_NORMAL;
  38         else
  39                 cpupri = MAX_RT_PRIO - prio + 1;
  40 
  41         return cpupri;
  42 }
  43 
  44 /**
  45  * cpupri_find - find the best (lowest-pri) CPU in the system
  46  * @cp: The cpupri context
  47  * @p: The task
  48  * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
  49  *
  50  * Note: This function returns the recommended CPUs as calculated during the
  51  * current invocation.  By the time the call returns, the CPUs may have in
  52  * fact changed priorities any number of times.  While not ideal, it is not
  53  * an issue of correctness since the normal rebalancer logic will correct
  54  * any discrepancies created by racing against the uncertainty of the current
  55  * priority configuration.
  56  *
  57  * Return: (int)bool - CPUs were found
  58  */
  59 int cpupri_find(struct cpupri *cp, struct task_struct *p,
  60                 struct cpumask *lowest_mask)
  61 {
  62         int idx = 0;
  63         int task_pri = convert_prio(p->prio);
  64 
  65         BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
  66 
  67         for (idx = 0; idx < task_pri; idx++) {
  68                 struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
  69                 int skip = 0;
  70 
  71                 if (!atomic_read(&(vec)->count))
  72                         skip = 1;
  73                 /*
  74                  * When looking at the vector, we need to read the counter,
  75                  * do a memory barrier, then read the mask.
  76                  *
  77                  * Note: This is still all racey, but we can deal with it.
  78                  *  Ideally, we only want to look at masks that are set.
  79                  *
  80                  *  If a mask is not set, then the only thing wrong is that we
  81                  *  did a little more work than necessary.
  82                  *
  83                  *  If we read a zero count but the mask is set, because of the
  84                  *  memory barriers, that can only happen when the highest prio
  85                  *  task for a run queue has left the run queue, in which case,
  86                  *  it will be followed by a pull. If the task we are processing
  87                  *  fails to find a proper place to go, that pull request will
  88                  *  pull this task if the run queue is running at a lower
  89                  *  priority.
  90                  */
  91                 smp_rmb();
  92 
  93                 /* Need to do the rmb for every iteration */
  94                 if (skip)
  95                         continue;
  96 
  97                 if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
  98                         continue;
  99 
 100                 if (lowest_mask) {
 101                         cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
 102 
 103                         /*
 104                          * We have to ensure that we have at least one bit
 105                          * still set in the array, since the map could have
 106                          * been concurrently emptied between the first and
 107                          * second reads of vec->mask.  If we hit this
 108                          * condition, simply act as though we never hit this
 109                          * priority level and continue on.
 110                          */
 111                         if (cpumask_any(lowest_mask) >= nr_cpu_ids)
 112                                 continue;
 113                 }
 114 
 115                 return 1;
 116         }
 117 
 118         return 0;
 119 }
 120 
 121 /**
 122  * cpupri_set - update the CPU priority setting
 123  * @cp: The cpupri context
 124  * @cpu: The target CPU
 125  * @newpri: The priority (INVALID-RT99) to assign to this CPU
 126  *
 127  * Note: Assumes cpu_rq(cpu)->lock is locked
 128  *
 129  * Returns: (void)
 130  */
 131 void cpupri_set(struct cpupri *cp, int cpu, int newpri)
 132 {
 133         int *currpri = &cp->cpu_to_pri[cpu];
 134         int oldpri = *currpri;
 135         int do_mb = 0;
 136 
 137         newpri = convert_prio(newpri);
 138 
 139         BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
 140 
 141         if (newpri == oldpri)
 142                 return;
 143 
 144         /*
 145          * If the CPU was currently mapped to a different value, we
 146          * need to map it to the new value then remove the old value.
 147          * Note, we must add the new value first, otherwise we risk the
 148          * cpu being missed by the priority loop in cpupri_find.
 149          */
 150         if (likely(newpri != CPUPRI_INVALID)) {
 151                 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
 152 
 153                 cpumask_set_cpu(cpu, vec->mask);
 154                 /*
 155                  * When adding a new vector, we update the mask first,
 156                  * do a write memory barrier, and then update the count, to
 157                  * make sure the vector is visible when count is set.
 158                  */
 159                 smp_mb__before_atomic();
 160                 atomic_inc(&(vec)->count);
 161                 do_mb = 1;
 162         }
 163         if (likely(oldpri != CPUPRI_INVALID)) {
 164                 struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
 165 
 166                 /*
 167                  * Because the order of modification of the vec->count
 168                  * is important, we must make sure that the update
 169                  * of the new prio is seen before we decrement the
 170                  * old prio. This makes sure that the loop sees
 171                  * one or the other when we raise the priority of
 172                  * the run queue. We don't care about when we lower the
 173                  * priority, as that will trigger an rt pull anyway.
 174                  *
 175                  * We only need to do a memory barrier if we updated
 176                  * the new priority vec.
 177                  */
 178                 if (do_mb)
 179                         smp_mb__after_atomic();
 180 
 181                 /*
 182                  * When removing from the vector, we decrement the counter first
 183                  * do a memory barrier and then clear the mask.
 184                  */
 185                 atomic_dec(&(vec)->count);
 186                 smp_mb__after_atomic();
 187                 cpumask_clear_cpu(cpu, vec->mask);
 188         }
 189 
 190         *currpri = newpri;
 191 }
 192 
 193 /**
 194  * cpupri_init - initialize the cpupri structure
 195  * @cp: The cpupri context
 196  *
 197  * Return: -ENOMEM on memory allocation failure.
 198  */
 199 int cpupri_init(struct cpupri *cp)
 200 {
 201         int i;
 202 
 203         for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
 204                 struct cpupri_vec *vec = &cp->pri_to_cpu[i];
 205 
 206                 atomic_set(&vec->count, 0);
 207                 if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
 208                         goto cleanup;
 209         }
 210 
 211         cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
 212         if (!cp->cpu_to_pri)
 213                 goto cleanup;
 214 
 215         for_each_possible_cpu(i)
 216                 cp->cpu_to_pri[i] = CPUPRI_INVALID;
 217 
 218         return 0;
 219 
 220 cleanup:
 221         for (i--; i >= 0; i--)
 222                 free_cpumask_var(cp->pri_to_cpu[i].mask);
 223         return -ENOMEM;
 224 }
 225 
 226 /**
 227  * cpupri_cleanup - clean up the cpupri structure
 228  * @cp: The cpupri context
 229  */
 230 void cpupri_cleanup(struct cpupri *cp)
 231 {
 232         int i;
 233 
 234         kfree(cp->cpu_to_pri);
 235         for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
 236                 free_cpumask_var(cp->pri_to_cpu[i].mask);
 237 }

/* [<][>][^][v][top][bottom][index][help] */