root/lib/refcount.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. refcount_add_not_zero_checked
  2. refcount_add_checked
  3. refcount_inc_not_zero_checked
  4. refcount_inc_checked
  5. refcount_sub_and_test_checked
  6. refcount_dec_and_test_checked
  7. refcount_dec_checked
  8. refcount_dec_if_one
  9. refcount_dec_not_one
  10. refcount_dec_and_mutex_lock
  11. refcount_dec_and_lock
  12. refcount_dec_and_lock_irqsave

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Variant of atomic_t specialized for reference counts.
   4  *
   5  * The interface matches the atomic_t interface (to aid in porting) but only
   6  * provides the few functions one should use for reference counting.
   7  *
   8  * It differs in that the counter saturates at UINT_MAX and will not move once
   9  * there. This avoids wrapping the counter and causing 'spurious'
  10  * use-after-free issues.
  11  *
  12  * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
  13  * and provide only what is strictly required for refcounts.
  14  *
  15  * The increments are fully relaxed; these will not provide ordering. The
  16  * rationale is that whatever is used to obtain the object we're increasing the
  17  * reference count on will provide the ordering. For locked data structures,
  18  * its the lock acquire, for RCU/lockless data structures its the dependent
  19  * load.
  20  *
  21  * Do note that inc_not_zero() provides a control dependency which will order
  22  * future stores against the inc, this ensures we'll never modify the object
  23  * if we did not in fact acquire a reference.
  24  *
  25  * The decrements will provide release order, such that all the prior loads and
  26  * stores will be issued before, it also provides a control dependency, which
  27  * will order us against the subsequent free().
  28  *
  29  * The control dependency is against the load of the cmpxchg (ll/sc) that
  30  * succeeded. This means the stores aren't fully ordered, but this is fine
  31  * because the 1->0 transition indicates no concurrency.
  32  *
  33  * Note that the allocator is responsible for ordering things between free()
  34  * and alloc().
  35  *
  36  * The decrements dec_and_test() and sub_and_test() also provide acquire
  37  * ordering on success.
  38  *
  39  */
  40 
  41 #include <linux/mutex.h>
  42 #include <linux/refcount.h>
  43 #include <linux/spinlock.h>
  44 #include <linux/bug.h>
  45 
  46 /**
  47  * refcount_add_not_zero_checked - add a value to a refcount unless it is 0
  48  * @i: the value to add to the refcount
  49  * @r: the refcount
  50  *
  51  * Will saturate at UINT_MAX and WARN.
  52  *
  53  * Provides no memory ordering, it is assumed the caller has guaranteed the
  54  * object memory to be stable (RCU, etc.). It does provide a control dependency
  55  * and thereby orders future stores. See the comment on top.
  56  *
  57  * Use of this function is not recommended for the normal reference counting
  58  * use case in which references are taken and released one at a time.  In these
  59  * cases, refcount_inc(), or one of its variants, should instead be used to
  60  * increment a reference count.
  61  *
  62  * Return: false if the passed refcount is 0, true otherwise
  63  */
  64 bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
  65 {
  66         unsigned int new, val = atomic_read(&r->refs);
  67 
  68         do {
  69                 if (!val)
  70                         return false;
  71 
  72                 if (unlikely(val == UINT_MAX))
  73                         return true;
  74 
  75                 new = val + i;
  76                 if (new < val)
  77                         new = UINT_MAX;
  78 
  79         } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
  80 
  81         WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
  82 
  83         return true;
  84 }
  85 EXPORT_SYMBOL(refcount_add_not_zero_checked);
  86 
  87 /**
  88  * refcount_add_checked - add a value to a refcount
  89  * @i: the value to add to the refcount
  90  * @r: the refcount
  91  *
  92  * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
  93  *
  94  * Provides no memory ordering, it is assumed the caller has guaranteed the
  95  * object memory to be stable (RCU, etc.). It does provide a control dependency
  96  * and thereby orders future stores. See the comment on top.
  97  *
  98  * Use of this function is not recommended for the normal reference counting
  99  * use case in which references are taken and released one at a time.  In these
 100  * cases, refcount_inc(), or one of its variants, should instead be used to
 101  * increment a reference count.
 102  */
 103 void refcount_add_checked(unsigned int i, refcount_t *r)
 104 {
 105         WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n");
 106 }
 107 EXPORT_SYMBOL(refcount_add_checked);
 108 
 109 /**
 110  * refcount_inc_not_zero_checked - increment a refcount unless it is 0
 111  * @r: the refcount to increment
 112  *
 113  * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
 114  *
 115  * Provides no memory ordering, it is assumed the caller has guaranteed the
 116  * object memory to be stable (RCU, etc.). It does provide a control dependency
 117  * and thereby orders future stores. See the comment on top.
 118  *
 119  * Return: true if the increment was successful, false otherwise
 120  */
 121 bool refcount_inc_not_zero_checked(refcount_t *r)
 122 {
 123         unsigned int new, val = atomic_read(&r->refs);
 124 
 125         do {
 126                 new = val + 1;
 127 
 128                 if (!val)
 129                         return false;
 130 
 131                 if (unlikely(!new))
 132                         return true;
 133 
 134         } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
 135 
 136         WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
 137 
 138         return true;
 139 }
 140 EXPORT_SYMBOL(refcount_inc_not_zero_checked);
 141 
 142 /**
 143  * refcount_inc_checked - increment a refcount
 144  * @r: the refcount to increment
 145  *
 146  * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
 147  *
 148  * Provides no memory ordering, it is assumed the caller already has a
 149  * reference on the object.
 150  *
 151  * Will WARN if the refcount is 0, as this represents a possible use-after-free
 152  * condition.
 153  */
 154 void refcount_inc_checked(refcount_t *r)
 155 {
 156         WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n");
 157 }
 158 EXPORT_SYMBOL(refcount_inc_checked);
 159 
 160 /**
 161  * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0
 162  * @i: amount to subtract from the refcount
 163  * @r: the refcount
 164  *
 165  * Similar to atomic_dec_and_test(), but it will WARN, return false and
 166  * ultimately leak on underflow and will fail to decrement when saturated
 167  * at UINT_MAX.
 168  *
 169  * Provides release memory ordering, such that prior loads and stores are done
 170  * before, and provides an acquire ordering on success such that free()
 171  * must come after.
 172  *
 173  * Use of this function is not recommended for the normal reference counting
 174  * use case in which references are taken and released one at a time.  In these
 175  * cases, refcount_dec(), or one of its variants, should instead be used to
 176  * decrement a reference count.
 177  *
 178  * Return: true if the resulting refcount is 0, false otherwise
 179  */
 180 bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
 181 {
 182         unsigned int new, val = atomic_read(&r->refs);
 183 
 184         do {
 185                 if (unlikely(val == UINT_MAX))
 186                         return false;
 187 
 188                 new = val - i;
 189                 if (new > val) {
 190                         WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
 191                         return false;
 192                 }
 193 
 194         } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
 195 
 196         if (!new) {
 197                 smp_acquire__after_ctrl_dep();
 198                 return true;
 199         }
 200         return false;
 201 
 202 }
 203 EXPORT_SYMBOL(refcount_sub_and_test_checked);
 204 
 205 /**
 206  * refcount_dec_and_test_checked - decrement a refcount and test if it is 0
 207  * @r: the refcount
 208  *
 209  * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
 210  * decrement when saturated at UINT_MAX.
 211  *
 212  * Provides release memory ordering, such that prior loads and stores are done
 213  * before, and provides an acquire ordering on success such that free()
 214  * must come after.
 215  *
 216  * Return: true if the resulting refcount is 0, false otherwise
 217  */
 218 bool refcount_dec_and_test_checked(refcount_t *r)
 219 {
 220         return refcount_sub_and_test_checked(1, r);
 221 }
 222 EXPORT_SYMBOL(refcount_dec_and_test_checked);
 223 
 224 /**
 225  * refcount_dec_checked - decrement a refcount
 226  * @r: the refcount
 227  *
 228  * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
 229  * when saturated at UINT_MAX.
 230  *
 231  * Provides release memory ordering, such that prior loads and stores are done
 232  * before.
 233  */
 234 void refcount_dec_checked(refcount_t *r)
 235 {
 236         WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n");
 237 }
 238 EXPORT_SYMBOL(refcount_dec_checked);
 239 
 240 /**
 241  * refcount_dec_if_one - decrement a refcount if it is 1
 242  * @r: the refcount
 243  *
 244  * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
 245  * success thereof.
 246  *
 247  * Like all decrement operations, it provides release memory order and provides
 248  * a control dependency.
 249  *
 250  * It can be used like a try-delete operator; this explicit case is provided
 251  * and not cmpxchg in generic, because that would allow implementing unsafe
 252  * operations.
 253  *
 254  * Return: true if the resulting refcount is 0, false otherwise
 255  */
 256 bool refcount_dec_if_one(refcount_t *r)
 257 {
 258         int val = 1;
 259 
 260         return atomic_try_cmpxchg_release(&r->refs, &val, 0);
 261 }
 262 EXPORT_SYMBOL(refcount_dec_if_one);
 263 
 264 /**
 265  * refcount_dec_not_one - decrement a refcount if it is not 1
 266  * @r: the refcount
 267  *
 268  * No atomic_t counterpart, it decrements unless the value is 1, in which case
 269  * it will return false.
 270  *
 271  * Was often done like: atomic_add_unless(&var, -1, 1)
 272  *
 273  * Return: true if the decrement operation was successful, false otherwise
 274  */
 275 bool refcount_dec_not_one(refcount_t *r)
 276 {
 277         unsigned int new, val = atomic_read(&r->refs);
 278 
 279         do {
 280                 if (unlikely(val == UINT_MAX))
 281                         return true;
 282 
 283                 if (val == 1)
 284                         return false;
 285 
 286                 new = val - 1;
 287                 if (new > val) {
 288                         WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
 289                         return true;
 290                 }
 291 
 292         } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
 293 
 294         return true;
 295 }
 296 EXPORT_SYMBOL(refcount_dec_not_one);
 297 
 298 /**
 299  * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
 300  *                               refcount to 0
 301  * @r: the refcount
 302  * @lock: the mutex to be locked
 303  *
 304  * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
 305  * to decrement when saturated at UINT_MAX.
 306  *
 307  * Provides release memory ordering, such that prior loads and stores are done
 308  * before, and provides a control dependency such that free() must come after.
 309  * See the comment on top.
 310  *
 311  * Return: true and hold mutex if able to decrement refcount to 0, false
 312  *         otherwise
 313  */
 314 bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
 315 {
 316         if (refcount_dec_not_one(r))
 317                 return false;
 318 
 319         mutex_lock(lock);
 320         if (!refcount_dec_and_test(r)) {
 321                 mutex_unlock(lock);
 322                 return false;
 323         }
 324 
 325         return true;
 326 }
 327 EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
 328 
 329 /**
 330  * refcount_dec_and_lock - return holding spinlock if able to decrement
 331  *                         refcount to 0
 332  * @r: the refcount
 333  * @lock: the spinlock to be locked
 334  *
 335  * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
 336  * decrement when saturated at UINT_MAX.
 337  *
 338  * Provides release memory ordering, such that prior loads and stores are done
 339  * before, and provides a control dependency such that free() must come after.
 340  * See the comment on top.
 341  *
 342  * Return: true and hold spinlock if able to decrement refcount to 0, false
 343  *         otherwise
 344  */
 345 bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
 346 {
 347         if (refcount_dec_not_one(r))
 348                 return false;
 349 
 350         spin_lock(lock);
 351         if (!refcount_dec_and_test(r)) {
 352                 spin_unlock(lock);
 353                 return false;
 354         }
 355 
 356         return true;
 357 }
 358 EXPORT_SYMBOL(refcount_dec_and_lock);
 359 
 360 /**
 361  * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
 362  *                                 interrupts if able to decrement refcount to 0
 363  * @r: the refcount
 364  * @lock: the spinlock to be locked
 365  * @flags: saved IRQ-flags if the is acquired
 366  *
 367  * Same as refcount_dec_and_lock() above except that the spinlock is acquired
 368  * with disabled interupts.
 369  *
 370  * Return: true and hold spinlock if able to decrement refcount to 0, false
 371  *         otherwise
 372  */
 373 bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
 374                                    unsigned long *flags)
 375 {
 376         if (refcount_dec_not_one(r))
 377                 return false;
 378 
 379         spin_lock_irqsave(lock, *flags);
 380         if (!refcount_dec_and_test(r)) {
 381                 spin_unlock_irqrestore(lock, *flags);
 382                 return false;
 383         }
 384 
 385         return true;
 386 }
 387 EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);

/* [<][>][^][v][top][bottom][index][help] */