root/arch/m68k/include/asm/atomic.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. ATOMIC_OPS
  2. atomic_dec
  3. atomic_dec_and_test
  4. atomic_dec_and_test_lt
  5. atomic_inc_and_test
  6. atomic_cmpxchg
  7. atomic_xchg
  8. atomic_sub_and_test
  9. atomic_add_negative

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef __ARCH_M68K_ATOMIC__
   3 #define __ARCH_M68K_ATOMIC__
   4 
   5 #include <linux/types.h>
   6 #include <linux/irqflags.h>
   7 #include <asm/cmpxchg.h>
   8 #include <asm/barrier.h>
   9 
  10 /*
  11  * Atomic operations that C can't guarantee us.  Useful for
  12  * resource counting etc..
  13  */
  14 
  15 /*
  16  * We do not have SMP m68k systems, so we don't have to deal with that.
  17  */
  18 
  19 #define ATOMIC_INIT(i)  { (i) }
  20 
  21 #define atomic_read(v)          READ_ONCE((v)->counter)
  22 #define atomic_set(v, i)        WRITE_ONCE(((v)->counter), (i))
  23 
  24 /*
  25  * The ColdFire parts cannot do some immediate to memory operations,
  26  * so for them we do not specify the "i" asm constraint.
  27  */
  28 #ifdef CONFIG_COLDFIRE
  29 #define ASM_DI  "d"
  30 #else
  31 #define ASM_DI  "di"
  32 #endif
  33 
  34 #define ATOMIC_OP(op, c_op, asm_op)                                     \
  35 static inline void atomic_##op(int i, atomic_t *v)                      \
  36 {                                                                       \
  37         __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
  38 }                                                                       \
  39 
  40 #ifdef CONFIG_RMW_INSNS
  41 
  42 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
  43 static inline int atomic_##op##_return(int i, atomic_t *v)              \
  44 {                                                                       \
  45         int t, tmp;                                                     \
  46                                                                         \
  47         __asm__ __volatile__(                                           \
  48                         "1:     movel %2,%1\n"                          \
  49                         "       " #asm_op "l %3,%1\n"                   \
  50                         "       casl %2,%1,%0\n"                        \
  51                         "       jne 1b"                                 \
  52                         : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
  53                         : "g" (i), "2" (atomic_read(v)));               \
  54         return t;                                                       \
  55 }
  56 
  57 #define ATOMIC_FETCH_OP(op, c_op, asm_op)                               \
  58 static inline int atomic_fetch_##op(int i, atomic_t *v)                 \
  59 {                                                                       \
  60         int t, tmp;                                                     \
  61                                                                         \
  62         __asm__ __volatile__(                                           \
  63                         "1:     movel %2,%1\n"                          \
  64                         "       " #asm_op "l %3,%1\n"                   \
  65                         "       casl %2,%1,%0\n"                        \
  66                         "       jne 1b"                                 \
  67                         : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
  68                         : "g" (i), "2" (atomic_read(v)));               \
  69         return tmp;                                                     \
  70 }
  71 
  72 #else
  73 
  74 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
  75 static inline int atomic_##op##_return(int i, atomic_t * v)             \
  76 {                                                                       \
  77         unsigned long flags;                                            \
  78         int t;                                                          \
  79                                                                         \
  80         local_irq_save(flags);                                          \
  81         t = (v->counter c_op i);                                        \
  82         local_irq_restore(flags);                                       \
  83                                                                         \
  84         return t;                                                       \
  85 }
  86 
  87 #define ATOMIC_FETCH_OP(op, c_op, asm_op)                               \
  88 static inline int atomic_fetch_##op(int i, atomic_t * v)                \
  89 {                                                                       \
  90         unsigned long flags;                                            \
  91         int t;                                                          \
  92                                                                         \
  93         local_irq_save(flags);                                          \
  94         t = v->counter;                                                 \
  95         v->counter c_op i;                                              \
  96         local_irq_restore(flags);                                       \
  97                                                                         \
  98         return t;                                                       \
  99 }
 100 
 101 #endif /* CONFIG_RMW_INSNS */
 102 
 103 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
 104         ATOMIC_OP(op, c_op, asm_op)                                     \
 105         ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
 106         ATOMIC_FETCH_OP(op, c_op, asm_op)
 107 
 108 ATOMIC_OPS(add, +=, add)
 109 ATOMIC_OPS(sub, -=, sub)
 110 
 111 #undef ATOMIC_OPS
 112 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
 113         ATOMIC_OP(op, c_op, asm_op)                                     \
 114         ATOMIC_FETCH_OP(op, c_op, asm_op)
 115 
 116 ATOMIC_OPS(and, &=, and)
 117 ATOMIC_OPS(or, |=, or)
 118 ATOMIC_OPS(xor, ^=, eor)
 119 
 120 #undef ATOMIC_OPS
 121 #undef ATOMIC_FETCH_OP
 122 #undef ATOMIC_OP_RETURN
 123 #undef ATOMIC_OP
 124 
 125 static inline void atomic_inc(atomic_t *v)
 126 {
 127         __asm__ __volatile__("addql #1,%0" : "+m" (*v));
 128 }
 129 #define atomic_inc atomic_inc
 130 
 131 static inline void atomic_dec(atomic_t *v)
 132 {
 133         __asm__ __volatile__("subql #1,%0" : "+m" (*v));
 134 }
 135 #define atomic_dec atomic_dec
 136 
 137 static inline int atomic_dec_and_test(atomic_t *v)
 138 {
 139         char c;
 140         __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
 141         return c != 0;
 142 }
 143 #define atomic_dec_and_test atomic_dec_and_test
 144 
 145 static inline int atomic_dec_and_test_lt(atomic_t *v)
 146 {
 147         char c;
 148         __asm__ __volatile__(
 149                 "subql #1,%1; slt %0"
 150                 : "=d" (c), "=m" (*v)
 151                 : "m" (*v));
 152         return c != 0;
 153 }
 154 
 155 static inline int atomic_inc_and_test(atomic_t *v)
 156 {
 157         char c;
 158         __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
 159         return c != 0;
 160 }
 161 #define atomic_inc_and_test atomic_inc_and_test
 162 
 163 #ifdef CONFIG_RMW_INSNS
 164 
 165 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
 166 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 167 
 168 #else /* !CONFIG_RMW_INSNS */
 169 
 170 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 171 {
 172         unsigned long flags;
 173         int prev;
 174 
 175         local_irq_save(flags);
 176         prev = atomic_read(v);
 177         if (prev == old)
 178                 atomic_set(v, new);
 179         local_irq_restore(flags);
 180         return prev;
 181 }
 182 
 183 static inline int atomic_xchg(atomic_t *v, int new)
 184 {
 185         unsigned long flags;
 186         int prev;
 187 
 188         local_irq_save(flags);
 189         prev = atomic_read(v);
 190         atomic_set(v, new);
 191         local_irq_restore(flags);
 192         return prev;
 193 }
 194 
 195 #endif /* !CONFIG_RMW_INSNS */
 196 
 197 static inline int atomic_sub_and_test(int i, atomic_t *v)
 198 {
 199         char c;
 200         __asm__ __volatile__("subl %2,%1; seq %0"
 201                              : "=d" (c), "+m" (*v)
 202                              : ASM_DI (i));
 203         return c != 0;
 204 }
 205 #define atomic_sub_and_test atomic_sub_and_test
 206 
 207 static inline int atomic_add_negative(int i, atomic_t *v)
 208 {
 209         char c;
 210         __asm__ __volatile__("addl %2,%1; smi %0"
 211                              : "=d" (c), "+m" (*v)
 212                              : ASM_DI (i));
 213         return c != 0;
 214 }
 215 #define atomic_add_negative atomic_add_negative
 216 
 217 #endif /* __ARCH_M68K_ATOMIC __ */

/* [<][>][^][v][top][bottom][index][help] */