root/arch/sparc/lib/atomic32.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ATOMIC_FETCH_OP
  2. atomic_cmpxchg
  3. atomic_fetch_add_unless
  4. atomic_set
  5. ___set_bit
  6. ___clear_bit
  7. ___change_bit
  8. __cmpxchg_u32
  9. __cmpxchg_u64
  10. __xchg_u32

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * atomic32.c: 32-bit atomic_t implementation
   4  *
   5  * Copyright (C) 2004 Keith M Wesolowski
   6  * Copyright (C) 2007 Kyle McMartin
   7  * 
   8  * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
   9  */
  10 
  11 #include <linux/atomic.h>
  12 #include <linux/spinlock.h>
  13 #include <linux/module.h>
  14 
  15 #ifdef CONFIG_SMP
  16 #define ATOMIC_HASH_SIZE        4
  17 #define ATOMIC_HASH(a)  (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
  18 
  19 spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
  20         [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
  21 };
  22 
  23 #else /* SMP */
  24 
  25 static DEFINE_SPINLOCK(dummy);
  26 #define ATOMIC_HASH_SIZE        1
  27 #define ATOMIC_HASH(a)          (&dummy)
  28 
  29 #endif /* SMP */
  30 
  31 #define ATOMIC_FETCH_OP(op, c_op)                                       \
  32 int atomic_fetch_##op(int i, atomic_t *v)                               \
  33 {                                                                       \
  34         int ret;                                                        \
  35         unsigned long flags;                                            \
  36         spin_lock_irqsave(ATOMIC_HASH(v), flags);                       \
  37                                                                         \
  38         ret = v->counter;                                               \
  39         v->counter c_op i;                                              \
  40                                                                         \
  41         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);                  \
  42         return ret;                                                     \
  43 }                                                                       \
  44 EXPORT_SYMBOL(atomic_fetch_##op);
  45 
  46 #define ATOMIC_OP_RETURN(op, c_op)                                      \
  47 int atomic_##op##_return(int i, atomic_t *v)                            \
  48 {                                                                       \
  49         int ret;                                                        \
  50         unsigned long flags;                                            \
  51         spin_lock_irqsave(ATOMIC_HASH(v), flags);                       \
  52                                                                         \
  53         ret = (v->counter c_op i);                                      \
  54                                                                         \
  55         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);                  \
  56         return ret;                                                     \
  57 }                                                                       \
  58 EXPORT_SYMBOL(atomic_##op##_return);
  59 
  60 ATOMIC_OP_RETURN(add, +=)
  61 
  62 ATOMIC_FETCH_OP(add, +=)
  63 ATOMIC_FETCH_OP(and, &=)
  64 ATOMIC_FETCH_OP(or, |=)
  65 ATOMIC_FETCH_OP(xor, ^=)
  66 
  67 #undef ATOMIC_FETCH_OP
  68 #undef ATOMIC_OP_RETURN
  69 
  70 int atomic_xchg(atomic_t *v, int new)
  71 {
  72         int ret;
  73         unsigned long flags;
  74 
  75         spin_lock_irqsave(ATOMIC_HASH(v), flags);
  76         ret = v->counter;
  77         v->counter = new;
  78         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  79         return ret;
  80 }
  81 EXPORT_SYMBOL(atomic_xchg);
  82 
  83 int atomic_cmpxchg(atomic_t *v, int old, int new)
  84 {
  85         int ret;
  86         unsigned long flags;
  87 
  88         spin_lock_irqsave(ATOMIC_HASH(v), flags);
  89         ret = v->counter;
  90         if (likely(ret == old))
  91                 v->counter = new;
  92 
  93         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  94         return ret;
  95 }
  96 EXPORT_SYMBOL(atomic_cmpxchg);
  97 
  98 int atomic_fetch_add_unless(atomic_t *v, int a, int u)
  99 {
 100         int ret;
 101         unsigned long flags;
 102 
 103         spin_lock_irqsave(ATOMIC_HASH(v), flags);
 104         ret = v->counter;
 105         if (ret != u)
 106                 v->counter += a;
 107         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 108         return ret;
 109 }
 110 EXPORT_SYMBOL(atomic_fetch_add_unless);
 111 
 112 /* Atomic operations are already serializing */
 113 void atomic_set(atomic_t *v, int i)
 114 {
 115         unsigned long flags;
 116 
 117         spin_lock_irqsave(ATOMIC_HASH(v), flags);
 118         v->counter = i;
 119         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 120 }
 121 EXPORT_SYMBOL(atomic_set);
 122 
 123 unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
 124 {
 125         unsigned long old, flags;
 126 
 127         spin_lock_irqsave(ATOMIC_HASH(addr), flags);
 128         old = *addr;
 129         *addr = old | mask;
 130         spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
 131 
 132         return old & mask;
 133 }
 134 EXPORT_SYMBOL(___set_bit);
 135 
 136 unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
 137 {
 138         unsigned long old, flags;
 139 
 140         spin_lock_irqsave(ATOMIC_HASH(addr), flags);
 141         old = *addr;
 142         *addr = old & ~mask;
 143         spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
 144 
 145         return old & mask;
 146 }
 147 EXPORT_SYMBOL(___clear_bit);
 148 
 149 unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
 150 {
 151         unsigned long old, flags;
 152 
 153         spin_lock_irqsave(ATOMIC_HASH(addr), flags);
 154         old = *addr;
 155         *addr = old ^ mask;
 156         spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
 157 
 158         return old & mask;
 159 }
 160 EXPORT_SYMBOL(___change_bit);
 161 
 162 unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
 163 {
 164         unsigned long flags;
 165         u32 prev;
 166 
 167         spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
 168         if ((prev = *ptr) == old)
 169                 *ptr = new;
 170         spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
 171 
 172         return (unsigned long)prev;
 173 }
 174 EXPORT_SYMBOL(__cmpxchg_u32);
 175 
 176 u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new)
 177 {
 178         unsigned long flags;
 179         u64 prev;
 180 
 181         spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
 182         if ((prev = *ptr) == old)
 183                 *ptr = new;
 184         spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
 185 
 186         return prev;
 187 }
 188 EXPORT_SYMBOL(__cmpxchg_u64);
 189 
 190 unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
 191 {
 192         unsigned long flags;
 193         u32 prev;
 194 
 195         spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
 196         prev = *ptr;
 197         *ptr = new;
 198         spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
 199 
 200         return (unsigned long)prev;
 201 }
 202 EXPORT_SYMBOL(__xchg_u32);

/* [<][>][^][v][top][bottom][index][help] */