root/lib/percpu_counter.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. percpu_counter_fixup_free
  2. debug_percpu_counter_activate
  3. debug_percpu_counter_deactivate
  4. debug_percpu_counter_activate
  5. debug_percpu_counter_deactivate
  6. percpu_counter_set
  7. percpu_counter_add_batch
  8. __percpu_counter_sum
  9. __percpu_counter_init
  10. percpu_counter_destroy
  11. compute_batch_value
  12. percpu_counter_cpu_dead
  13. __percpu_counter_compare
  14. percpu_counter_startup

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Fast batching percpu counters.
   4  */
   5 
   6 #include <linux/percpu_counter.h>
   7 #include <linux/mutex.h>
   8 #include <linux/init.h>
   9 #include <linux/cpu.h>
  10 #include <linux/module.h>
  11 #include <linux/debugobjects.h>
  12 
  13 #ifdef CONFIG_HOTPLUG_CPU
  14 static LIST_HEAD(percpu_counters);
  15 static DEFINE_SPINLOCK(percpu_counters_lock);
  16 #endif
  17 
  18 #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
  19 
  20 static struct debug_obj_descr percpu_counter_debug_descr;
  21 
  22 static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
  23 {
  24         struct percpu_counter *fbc = addr;
  25 
  26         switch (state) {
  27         case ODEBUG_STATE_ACTIVE:
  28                 percpu_counter_destroy(fbc);
  29                 debug_object_free(fbc, &percpu_counter_debug_descr);
  30                 return true;
  31         default:
  32                 return false;
  33         }
  34 }
  35 
  36 static struct debug_obj_descr percpu_counter_debug_descr = {
  37         .name           = "percpu_counter",
  38         .fixup_free     = percpu_counter_fixup_free,
  39 };
  40 
  41 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
  42 {
  43         debug_object_init(fbc, &percpu_counter_debug_descr);
  44         debug_object_activate(fbc, &percpu_counter_debug_descr);
  45 }
  46 
  47 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
  48 {
  49         debug_object_deactivate(fbc, &percpu_counter_debug_descr);
  50         debug_object_free(fbc, &percpu_counter_debug_descr);
  51 }
  52 
  53 #else   /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
  54 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
  55 { }
  56 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
  57 { }
  58 #endif  /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
  59 
  60 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  61 {
  62         int cpu;
  63         unsigned long flags;
  64 
  65         raw_spin_lock_irqsave(&fbc->lock, flags);
  66         for_each_possible_cpu(cpu) {
  67                 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  68                 *pcount = 0;
  69         }
  70         fbc->count = amount;
  71         raw_spin_unlock_irqrestore(&fbc->lock, flags);
  72 }
  73 EXPORT_SYMBOL(percpu_counter_set);
  74 
  75 /**
  76  * This function is both preempt and irq safe. The former is due to explicit
  77  * preemption disable. The latter is guaranteed by the fact that the slow path
  78  * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
  79  * this_cpu_add which is irq-safe by definition. Hence there is no need muck
  80  * with irq state before calling this one
  81  */
  82 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
  83 {
  84         s64 count;
  85 
  86         preempt_disable();
  87         count = __this_cpu_read(*fbc->counters) + amount;
  88         if (count >= batch || count <= -batch) {
  89                 unsigned long flags;
  90                 raw_spin_lock_irqsave(&fbc->lock, flags);
  91                 fbc->count += count;
  92                 __this_cpu_sub(*fbc->counters, count - amount);
  93                 raw_spin_unlock_irqrestore(&fbc->lock, flags);
  94         } else {
  95                 this_cpu_add(*fbc->counters, amount);
  96         }
  97         preempt_enable();
  98 }
  99 EXPORT_SYMBOL(percpu_counter_add_batch);
 100 
 101 /*
 102  * Add up all the per-cpu counts, return the result.  This is a more accurate
 103  * but much slower version of percpu_counter_read_positive()
 104  */
 105 s64 __percpu_counter_sum(struct percpu_counter *fbc)
 106 {
 107         s64 ret;
 108         int cpu;
 109         unsigned long flags;
 110 
 111         raw_spin_lock_irqsave(&fbc->lock, flags);
 112         ret = fbc->count;
 113         for_each_online_cpu(cpu) {
 114                 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
 115                 ret += *pcount;
 116         }
 117         raw_spin_unlock_irqrestore(&fbc->lock, flags);
 118         return ret;
 119 }
 120 EXPORT_SYMBOL(__percpu_counter_sum);
 121 
 122 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
 123                           struct lock_class_key *key)
 124 {
 125         unsigned long flags __maybe_unused;
 126 
 127         raw_spin_lock_init(&fbc->lock);
 128         lockdep_set_class(&fbc->lock, key);
 129         fbc->count = amount;
 130         fbc->counters = alloc_percpu_gfp(s32, gfp);
 131         if (!fbc->counters)
 132                 return -ENOMEM;
 133 
 134         debug_percpu_counter_activate(fbc);
 135 
 136 #ifdef CONFIG_HOTPLUG_CPU
 137         INIT_LIST_HEAD(&fbc->list);
 138         spin_lock_irqsave(&percpu_counters_lock, flags);
 139         list_add(&fbc->list, &percpu_counters);
 140         spin_unlock_irqrestore(&percpu_counters_lock, flags);
 141 #endif
 142         return 0;
 143 }
 144 EXPORT_SYMBOL(__percpu_counter_init);
 145 
 146 void percpu_counter_destroy(struct percpu_counter *fbc)
 147 {
 148         unsigned long flags __maybe_unused;
 149 
 150         if (!fbc->counters)
 151                 return;
 152 
 153         debug_percpu_counter_deactivate(fbc);
 154 
 155 #ifdef CONFIG_HOTPLUG_CPU
 156         spin_lock_irqsave(&percpu_counters_lock, flags);
 157         list_del(&fbc->list);
 158         spin_unlock_irqrestore(&percpu_counters_lock, flags);
 159 #endif
 160         free_percpu(fbc->counters);
 161         fbc->counters = NULL;
 162 }
 163 EXPORT_SYMBOL(percpu_counter_destroy);
 164 
 165 int percpu_counter_batch __read_mostly = 32;
 166 EXPORT_SYMBOL(percpu_counter_batch);
 167 
 168 static int compute_batch_value(unsigned int cpu)
 169 {
 170         int nr = num_online_cpus();
 171 
 172         percpu_counter_batch = max(32, nr*2);
 173         return 0;
 174 }
 175 
 176 static int percpu_counter_cpu_dead(unsigned int cpu)
 177 {
 178 #ifdef CONFIG_HOTPLUG_CPU
 179         struct percpu_counter *fbc;
 180 
 181         compute_batch_value(cpu);
 182 
 183         spin_lock_irq(&percpu_counters_lock);
 184         list_for_each_entry(fbc, &percpu_counters, list) {
 185                 s32 *pcount;
 186 
 187                 raw_spin_lock(&fbc->lock);
 188                 pcount = per_cpu_ptr(fbc->counters, cpu);
 189                 fbc->count += *pcount;
 190                 *pcount = 0;
 191                 raw_spin_unlock(&fbc->lock);
 192         }
 193         spin_unlock_irq(&percpu_counters_lock);
 194 #endif
 195         return 0;
 196 }
 197 
 198 /*
 199  * Compare counter against given value.
 200  * Return 1 if greater, 0 if equal and -1 if less
 201  */
 202 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
 203 {
 204         s64     count;
 205 
 206         count = percpu_counter_read(fbc);
 207         /* Check to see if rough count will be sufficient for comparison */
 208         if (abs(count - rhs) > (batch * num_online_cpus())) {
 209                 if (count > rhs)
 210                         return 1;
 211                 else
 212                         return -1;
 213         }
 214         /* Need to use precise count */
 215         count = percpu_counter_sum(fbc);
 216         if (count > rhs)
 217                 return 1;
 218         else if (count < rhs)
 219                 return -1;
 220         else
 221                 return 0;
 222 }
 223 EXPORT_SYMBOL(__percpu_counter_compare);
 224 
 225 static int __init percpu_counter_startup(void)
 226 {
 227         int ret;
 228 
 229         ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
 230                                 compute_batch_value, NULL);
 231         WARN_ON(ret < 0);
 232         ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
 233                                         "lib/percpu_cnt:dead", NULL,
 234                                         percpu_counter_cpu_dead);
 235         WARN_ON(ret < 0);
 236         return 0;
 237 }
 238 module_init(percpu_counter_startup);

/* [<][>][^][v][top][bottom][index][help] */