root/include/linux/percpu-refcount.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. percpu_ref_kill
  2. __ref_is_percpu
  3. percpu_ref_get_many
  4. percpu_ref_get
  5. percpu_ref_tryget
  6. percpu_ref_tryget_live
  7. percpu_ref_put_many
  8. percpu_ref_put
  9. percpu_ref_is_dying
  10. percpu_ref_is_zero

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * Percpu refcounts:
   4  * (C) 2012 Google, Inc.
   5  * Author: Kent Overstreet <koverstreet@google.com>
   6  *
   7  * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
   8  * atomic_dec_and_test() - but percpu.
   9  *
  10  * There's one important difference between percpu refs and normal atomic_t
  11  * refcounts; you have to keep track of your initial refcount, and then when you
  12  * start shutting down you call percpu_ref_kill() _before_ dropping the initial
  13  * refcount.
  14  *
  15  * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
  16  * than an atomic_t - this is because of the way shutdown works, see
  17  * percpu_ref_kill()/PERCPU_COUNT_BIAS.
  18  *
  19  * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
  20  * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
  21  * puts the ref back in single atomic_t mode, collecting the per cpu refs and
  22  * issuing the appropriate barriers, and then marks the ref as shutting down so
  23  * that percpu_ref_put() will check for the ref hitting 0.  After it returns,
  24  * it's safe to drop the initial ref.
  25  *
  26  * USAGE:
  27  *
  28  * See fs/aio.c for some example usage; it's used there for struct kioctx, which
  29  * is created when userspaces calls io_setup(), and destroyed when userspace
  30  * calls io_destroy() or the process exits.
  31  *
  32  * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
  33  * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref.
  34  * After that, there can't be any new users of the kioctx (from lookup_ioctx())
  35  * and it's then safe to drop the initial ref with percpu_ref_put().
  36  *
  37  * Note that the free path, free_ioctx(), needs to go through explicit call_rcu()
  38  * to synchronize with RCU protected lookup_ioctx().  percpu_ref operations don't
  39  * imply RCU grace periods of any kind and if a user wants to combine percpu_ref
  40  * with RCU protection, it must be done explicitly.
  41  *
  42  * Code that does a two stage shutdown like this often needs some kind of
  43  * explicit synchronization to ensure the initial refcount can only be dropped
  44  * once - percpu_ref_kill() does this for you, it returns true once and false if
  45  * someone else already called it. The aio code uses it this way, but it's not
  46  * necessary if the code has some other mechanism to synchronize teardown.
  47  * around.
  48  */
  49 
  50 #ifndef _LINUX_PERCPU_REFCOUNT_H
  51 #define _LINUX_PERCPU_REFCOUNT_H
  52 
  53 #include <linux/atomic.h>
  54 #include <linux/kernel.h>
  55 #include <linux/percpu.h>
  56 #include <linux/rcupdate.h>
  57 #include <linux/gfp.h>
  58 
  59 struct percpu_ref;
  60 typedef void (percpu_ref_func_t)(struct percpu_ref *);
  61 
  62 /* flags set in the lower bits of percpu_ref->percpu_count_ptr */
  63 enum {
  64         __PERCPU_REF_ATOMIC     = 1LU << 0,     /* operating in atomic mode */
  65         __PERCPU_REF_DEAD       = 1LU << 1,     /* (being) killed */
  66         __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
  67 
  68         __PERCPU_REF_FLAG_BITS  = 2,
  69 };
  70 
  71 /* @flags for percpu_ref_init() */
  72 enum {
  73         /*
  74          * Start w/ ref == 1 in atomic mode.  Can be switched to percpu
  75          * operation using percpu_ref_switch_to_percpu().  If initialized
  76          * with this flag, the ref will stay in atomic mode until
  77          * percpu_ref_switch_to_percpu() is invoked on it.
  78          * Implies ALLOW_REINIT.
  79          */
  80         PERCPU_REF_INIT_ATOMIC  = 1 << 0,
  81 
  82         /*
  83          * Start dead w/ ref == 0 in atomic mode.  Must be revived with
  84          * percpu_ref_reinit() before used.  Implies INIT_ATOMIC and
  85          * ALLOW_REINIT.
  86          */
  87         PERCPU_REF_INIT_DEAD    = 1 << 1,
  88 
  89         /*
  90          * Allow switching from atomic mode to percpu mode.
  91          */
  92         PERCPU_REF_ALLOW_REINIT = 1 << 2,
  93 };
  94 
  95 struct percpu_ref {
  96         atomic_long_t           count;
  97         /*
  98          * The low bit of the pointer indicates whether the ref is in percpu
  99          * mode; if set, then get/put will manipulate the atomic_t.
 100          */
 101         unsigned long           percpu_count_ptr;
 102         percpu_ref_func_t       *release;
 103         percpu_ref_func_t       *confirm_switch;
 104         bool                    force_atomic:1;
 105         bool                    allow_reinit:1;
 106         struct rcu_head         rcu;
 107 };
 108 
 109 int __must_check percpu_ref_init(struct percpu_ref *ref,
 110                                  percpu_ref_func_t *release, unsigned int flags,
 111                                  gfp_t gfp);
 112 void percpu_ref_exit(struct percpu_ref *ref);
 113 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
 114                                  percpu_ref_func_t *confirm_switch);
 115 void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
 116 void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
 117 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
 118                                  percpu_ref_func_t *confirm_kill);
 119 void percpu_ref_resurrect(struct percpu_ref *ref);
 120 void percpu_ref_reinit(struct percpu_ref *ref);
 121 
 122 /**
 123  * percpu_ref_kill - drop the initial ref
 124  * @ref: percpu_ref to kill
 125  *
 126  * Must be used to drop the initial ref on a percpu refcount; must be called
 127  * precisely once before shutdown.
 128  *
 129  * Switches @ref into atomic mode before gathering up the percpu counters
 130  * and dropping the initial ref.
 131  *
 132  * There are no implied RCU grace periods between kill and release.
 133  */
 134 static inline void percpu_ref_kill(struct percpu_ref *ref)
 135 {
 136         percpu_ref_kill_and_confirm(ref, NULL);
 137 }
 138 
 139 /*
 140  * Internal helper.  Don't use outside percpu-refcount proper.  The
 141  * function doesn't return the pointer and let the caller test it for NULL
 142  * because doing so forces the compiler to generate two conditional
 143  * branches as it can't assume that @ref->percpu_count is not NULL.
 144  */
 145 static inline bool __ref_is_percpu(struct percpu_ref *ref,
 146                                           unsigned long __percpu **percpu_countp)
 147 {
 148         unsigned long percpu_ptr;
 149 
 150         /*
 151          * The value of @ref->percpu_count_ptr is tested for
 152          * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
 153          * used as a pointer.  If the compiler generates a separate fetch
 154          * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
 155          * between contaminating the pointer value, meaning that
 156          * READ_ONCE() is required when fetching it.
 157          *
 158          * The smp_read_barrier_depends() implied by READ_ONCE() pairs
 159          * with smp_store_release() in __percpu_ref_switch_to_percpu().
 160          */
 161         percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
 162 
 163         /*
 164          * Theoretically, the following could test just ATOMIC; however,
 165          * then we'd have to mask off DEAD separately as DEAD may be
 166          * visible without ATOMIC if we race with percpu_ref_kill().  DEAD
 167          * implies ATOMIC anyway.  Test them together.
 168          */
 169         if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
 170                 return false;
 171 
 172         *percpu_countp = (unsigned long __percpu *)percpu_ptr;
 173         return true;
 174 }
 175 
 176 /**
 177  * percpu_ref_get_many - increment a percpu refcount
 178  * @ref: percpu_ref to get
 179  * @nr: number of references to get
 180  *
 181  * Analogous to atomic_long_add().
 182  *
 183  * This function is safe to call as long as @ref is between init and exit.
 184  */
 185 static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
 186 {
 187         unsigned long __percpu *percpu_count;
 188 
 189         rcu_read_lock_sched();
 190 
 191         if (__ref_is_percpu(ref, &percpu_count))
 192                 this_cpu_add(*percpu_count, nr);
 193         else
 194                 atomic_long_add(nr, &ref->count);
 195 
 196         rcu_read_unlock_sched();
 197 }
 198 
 199 /**
 200  * percpu_ref_get - increment a percpu refcount
 201  * @ref: percpu_ref to get
 202  *
 203  * Analagous to atomic_long_inc().
 204  *
 205  * This function is safe to call as long as @ref is between init and exit.
 206  */
 207 static inline void percpu_ref_get(struct percpu_ref *ref)
 208 {
 209         percpu_ref_get_many(ref, 1);
 210 }
 211 
 212 /**
 213  * percpu_ref_tryget - try to increment a percpu refcount
 214  * @ref: percpu_ref to try-get
 215  *
 216  * Increment a percpu refcount unless its count already reached zero.
 217  * Returns %true on success; %false on failure.
 218  *
 219  * This function is safe to call as long as @ref is between init and exit.
 220  */
 221 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 222 {
 223         unsigned long __percpu *percpu_count;
 224         bool ret;
 225 
 226         rcu_read_lock_sched();
 227 
 228         if (__ref_is_percpu(ref, &percpu_count)) {
 229                 this_cpu_inc(*percpu_count);
 230                 ret = true;
 231         } else {
 232                 ret = atomic_long_inc_not_zero(&ref->count);
 233         }
 234 
 235         rcu_read_unlock_sched();
 236 
 237         return ret;
 238 }
 239 
 240 /**
 241  * percpu_ref_tryget_live - try to increment a live percpu refcount
 242  * @ref: percpu_ref to try-get
 243  *
 244  * Increment a percpu refcount unless it has already been killed.  Returns
 245  * %true on success; %false on failure.
 246  *
 247  * Completion of percpu_ref_kill() in itself doesn't guarantee that this
 248  * function will fail.  For such guarantee, percpu_ref_kill_and_confirm()
 249  * should be used.  After the confirm_kill callback is invoked, it's
 250  * guaranteed that no new reference will be given out by
 251  * percpu_ref_tryget_live().
 252  *
 253  * This function is safe to call as long as @ref is between init and exit.
 254  */
 255 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
 256 {
 257         unsigned long __percpu *percpu_count;
 258         bool ret = false;
 259 
 260         rcu_read_lock_sched();
 261 
 262         if (__ref_is_percpu(ref, &percpu_count)) {
 263                 this_cpu_inc(*percpu_count);
 264                 ret = true;
 265         } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
 266                 ret = atomic_long_inc_not_zero(&ref->count);
 267         }
 268 
 269         rcu_read_unlock_sched();
 270 
 271         return ret;
 272 }
 273 
 274 /**
 275  * percpu_ref_put_many - decrement a percpu refcount
 276  * @ref: percpu_ref to put
 277  * @nr: number of references to put
 278  *
 279  * Decrement the refcount, and if 0, call the release function (which was passed
 280  * to percpu_ref_init())
 281  *
 282  * This function is safe to call as long as @ref is between init and exit.
 283  */
 284 static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
 285 {
 286         unsigned long __percpu *percpu_count;
 287 
 288         rcu_read_lock_sched();
 289 
 290         if (__ref_is_percpu(ref, &percpu_count))
 291                 this_cpu_sub(*percpu_count, nr);
 292         else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
 293                 ref->release(ref);
 294 
 295         rcu_read_unlock_sched();
 296 }
 297 
 298 /**
 299  * percpu_ref_put - decrement a percpu refcount
 300  * @ref: percpu_ref to put
 301  *
 302  * Decrement the refcount, and if 0, call the release function (which was passed
 303  * to percpu_ref_init())
 304  *
 305  * This function is safe to call as long as @ref is between init and exit.
 306  */
 307 static inline void percpu_ref_put(struct percpu_ref *ref)
 308 {
 309         percpu_ref_put_many(ref, 1);
 310 }
 311 
 312 /**
 313  * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
 314  * @ref: percpu_ref to test
 315  *
 316  * Returns %true if @ref is dying or dead.
 317  *
 318  * This function is safe to call as long as @ref is between init and exit
 319  * and the caller is responsible for synchronizing against state changes.
 320  */
 321 static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
 322 {
 323         return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
 324 }
 325 
 326 /**
 327  * percpu_ref_is_zero - test whether a percpu refcount reached zero
 328  * @ref: percpu_ref to test
 329  *
 330  * Returns %true if @ref reached zero.
 331  *
 332  * This function is safe to call as long as @ref is between init and exit.
 333  */
 334 static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
 335 {
 336         unsigned long __percpu *percpu_count;
 337 
 338         if (__ref_is_percpu(ref, &percpu_count))
 339                 return false;
 340         return !atomic_long_read(&ref->count);
 341 }
 342 
 343 #endif

/* [<][>][^][v][top][bottom][index][help] */