root/arch/parisc/include/asm/atomic.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. atomic_set
  2. atomic_read
  3. ATOMIC64_OPS
  4. atomic64_read

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
   3  * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
   4  */
   5 
   6 #ifndef _ASM_PARISC_ATOMIC_H_
   7 #define _ASM_PARISC_ATOMIC_H_
   8 
   9 #include <linux/types.h>
  10 #include <asm/cmpxchg.h>
  11 #include <asm/barrier.h>
  12 
  13 /*
  14  * Atomic operations that C can't guarantee us.  Useful for
  15  * resource counting etc..
  16  *
  17  * And probably incredibly slow on parisc.  OTOH, we don't
  18  * have to write any serious assembly.   prumpf
  19  */
  20 
  21 #ifdef CONFIG_SMP
  22 #include <asm/spinlock.h>
  23 #include <asm/cache.h>          /* we use L1_CACHE_BYTES */
  24 
  25 /* Use an array of spinlocks for our atomic_ts.
  26  * Hash function to index into a different SPINLOCK.
  27  * Since "a" is usually an address, use one spinlock per cacheline.
  28  */
  29 #  define ATOMIC_HASH_SIZE 4
  30 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
  31 
  32 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
  33 
  34 /* Can't use raw_spin_lock_irq because of #include problems, so
  35  * this is the substitute */
  36 #define _atomic_spin_lock_irqsave(l,f) do {     \
  37         arch_spinlock_t *s = ATOMIC_HASH(l);            \
  38         local_irq_save(f);                      \
  39         arch_spin_lock(s);                      \
  40 } while(0)
  41 
  42 #define _atomic_spin_unlock_irqrestore(l,f) do {        \
  43         arch_spinlock_t *s = ATOMIC_HASH(l);                    \
  44         arch_spin_unlock(s);                            \
  45         local_irq_restore(f);                           \
  46 } while(0)
  47 
  48 
  49 #else
  50 #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
  51 #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
  52 #endif
  53 
  54 /*
  55  * Note that we need not lock read accesses - aligned word writes/reads
  56  * are atomic, so a reader never sees inconsistent values.
  57  */
  58 
  59 static __inline__ void atomic_set(atomic_t *v, int i)
  60 {
  61         unsigned long flags;
  62         _atomic_spin_lock_irqsave(v, flags);
  63 
  64         v->counter = i;
  65 
  66         _atomic_spin_unlock_irqrestore(v, flags);
  67 }
  68 
  69 #define atomic_set_release(v, i)        atomic_set((v), (i))
  70 
  71 static __inline__ int atomic_read(const atomic_t *v)
  72 {
  73         return READ_ONCE((v)->counter);
  74 }
  75 
  76 /* exported interface */
  77 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
  78 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  79 
  80 #define ATOMIC_OP(op, c_op)                                             \
  81 static __inline__ void atomic_##op(int i, atomic_t *v)                  \
  82 {                                                                       \
  83         unsigned long flags;                                            \
  84                                                                         \
  85         _atomic_spin_lock_irqsave(v, flags);                            \
  86         v->counter c_op i;                                              \
  87         _atomic_spin_unlock_irqrestore(v, flags);                       \
  88 }                                                                       \
  89 
  90 #define ATOMIC_OP_RETURN(op, c_op)                                      \
  91 static __inline__ int atomic_##op##_return(int i, atomic_t *v)          \
  92 {                                                                       \
  93         unsigned long flags;                                            \
  94         int ret;                                                        \
  95                                                                         \
  96         _atomic_spin_lock_irqsave(v, flags);                            \
  97         ret = (v->counter c_op i);                                      \
  98         _atomic_spin_unlock_irqrestore(v, flags);                       \
  99                                                                         \
 100         return ret;                                                     \
 101 }
 102 
 103 #define ATOMIC_FETCH_OP(op, c_op)                                       \
 104 static __inline__ int atomic_fetch_##op(int i, atomic_t *v)             \
 105 {                                                                       \
 106         unsigned long flags;                                            \
 107         int ret;                                                        \
 108                                                                         \
 109         _atomic_spin_lock_irqsave(v, flags);                            \
 110         ret = v->counter;                                               \
 111         v->counter c_op i;                                              \
 112         _atomic_spin_unlock_irqrestore(v, flags);                       \
 113                                                                         \
 114         return ret;                                                     \
 115 }
 116 
 117 #define ATOMIC_OPS(op, c_op)                                            \
 118         ATOMIC_OP(op, c_op)                                             \
 119         ATOMIC_OP_RETURN(op, c_op)                                      \
 120         ATOMIC_FETCH_OP(op, c_op)
 121 
 122 ATOMIC_OPS(add, +=)
 123 ATOMIC_OPS(sub, -=)
 124 
 125 #undef ATOMIC_OPS
 126 #define ATOMIC_OPS(op, c_op)                                            \
 127         ATOMIC_OP(op, c_op)                                             \
 128         ATOMIC_FETCH_OP(op, c_op)
 129 
 130 ATOMIC_OPS(and, &=)
 131 ATOMIC_OPS(or, |=)
 132 ATOMIC_OPS(xor, ^=)
 133 
 134 #undef ATOMIC_OPS
 135 #undef ATOMIC_FETCH_OP
 136 #undef ATOMIC_OP_RETURN
 137 #undef ATOMIC_OP
 138 
 139 #define ATOMIC_INIT(i)  { (i) }
 140 
 141 #ifdef CONFIG_64BIT
 142 
 143 #define ATOMIC64_INIT(i) { (i) }
 144 
 145 #define ATOMIC64_OP(op, c_op)                                           \
 146 static __inline__ void atomic64_##op(s64 i, atomic64_t *v)              \
 147 {                                                                       \
 148         unsigned long flags;                                            \
 149                                                                         \
 150         _atomic_spin_lock_irqsave(v, flags);                            \
 151         v->counter c_op i;                                              \
 152         _atomic_spin_unlock_irqrestore(v, flags);                       \
 153 }                                                                       \
 154 
 155 #define ATOMIC64_OP_RETURN(op, c_op)                                    \
 156 static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v)      \
 157 {                                                                       \
 158         unsigned long flags;                                            \
 159         s64 ret;                                                        \
 160                                                                         \
 161         _atomic_spin_lock_irqsave(v, flags);                            \
 162         ret = (v->counter c_op i);                                      \
 163         _atomic_spin_unlock_irqrestore(v, flags);                       \
 164                                                                         \
 165         return ret;                                                     \
 166 }
 167 
 168 #define ATOMIC64_FETCH_OP(op, c_op)                                     \
 169 static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v)         \
 170 {                                                                       \
 171         unsigned long flags;                                            \
 172         s64 ret;                                                        \
 173                                                                         \
 174         _atomic_spin_lock_irqsave(v, flags);                            \
 175         ret = v->counter;                                               \
 176         v->counter c_op i;                                              \
 177         _atomic_spin_unlock_irqrestore(v, flags);                       \
 178                                                                         \
 179         return ret;                                                     \
 180 }
 181 
 182 #define ATOMIC64_OPS(op, c_op)                                          \
 183         ATOMIC64_OP(op, c_op)                                           \
 184         ATOMIC64_OP_RETURN(op, c_op)                                    \
 185         ATOMIC64_FETCH_OP(op, c_op)
 186 
 187 ATOMIC64_OPS(add, +=)
 188 ATOMIC64_OPS(sub, -=)
 189 
 190 #undef ATOMIC64_OPS
 191 #define ATOMIC64_OPS(op, c_op)                                          \
 192         ATOMIC64_OP(op, c_op)                                           \
 193         ATOMIC64_FETCH_OP(op, c_op)
 194 
 195 ATOMIC64_OPS(and, &=)
 196 ATOMIC64_OPS(or, |=)
 197 ATOMIC64_OPS(xor, ^=)
 198 
 199 #undef ATOMIC64_OPS
 200 #undef ATOMIC64_FETCH_OP
 201 #undef ATOMIC64_OP_RETURN
 202 #undef ATOMIC64_OP
 203 
 204 static __inline__ void
 205 atomic64_set(atomic64_t *v, s64 i)
 206 {
 207         unsigned long flags;
 208         _atomic_spin_lock_irqsave(v, flags);
 209 
 210         v->counter = i;
 211 
 212         _atomic_spin_unlock_irqrestore(v, flags);
 213 }
 214 
 215 static __inline__ s64
 216 atomic64_read(const atomic64_t *v)
 217 {
 218         return READ_ONCE((v)->counter);
 219 }
 220 
 221 /* exported interface */
 222 #define atomic64_cmpxchg(v, o, n) \
 223         ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
 224 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 225 
 226 #endif /* !CONFIG_64BIT */
 227 
 228 
 229 #endif /* _ASM_PARISC_ATOMIC_H_ */

/* [<][>][^][v][top][bottom][index][help] */