root/arch/powerpc/include/asm/cmpxchg.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. __xchg_u32_local
  2. __xchg_u32_relaxed
  3. __xchg_u64_local
  4. __xchg_u64_relaxed
  5. __xchg_local
  6. __xchg_relaxed
  7. __cmpxchg_u32
  8. __cmpxchg_u32_local
  9. __cmpxchg_u32_relaxed
  10. __cmpxchg_u32_acquire
  11. __cmpxchg_u64
  12. __cmpxchg_u64_local
  13. __cmpxchg_u64_relaxed
  14. __cmpxchg_u64_acquire
  15. __cmpxchg
  16. __cmpxchg_local
  17. __cmpxchg_relaxed
  18. __cmpxchg_acquire

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ASM_POWERPC_CMPXCHG_H_
   3 #define _ASM_POWERPC_CMPXCHG_H_
   4 
   5 #ifdef __KERNEL__
   6 #include <linux/compiler.h>
   7 #include <asm/synch.h>
   8 #include <linux/bug.h>
   9 #include <asm/asm-405.h>
  10 
  11 #ifdef __BIG_ENDIAN
  12 #define BITOFF_CAL(size, off)   ((sizeof(u32) - size - off) * BITS_PER_BYTE)
  13 #else
  14 #define BITOFF_CAL(size, off)   (off * BITS_PER_BYTE)
  15 #endif
  16 
  17 #define XCHG_GEN(type, sfx, cl)                         \
  18 static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
  19 {                                                               \
  20         unsigned int prev, prev_mask, tmp, bitoff, off;         \
  21                                                                 \
  22         off = (unsigned long)p % sizeof(u32);                   \
  23         bitoff = BITOFF_CAL(sizeof(type), off);                 \
  24         p -= off;                                               \
  25         val <<= bitoff;                                         \
  26         prev_mask = (u32)(type)-1 << bitoff;                    \
  27                                                                 \
  28         __asm__ __volatile__(                                   \
  29 "1:     lwarx   %0,0,%3\n"                                      \
  30 "       andc    %1,%0,%5\n"                                     \
  31 "       or      %1,%1,%4\n"                                     \
  32         PPC405_ERR77(0,%3)                                      \
  33 "       stwcx.  %1,0,%3\n"                                      \
  34 "       bne-    1b\n"                                           \
  35         : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p)            \
  36         : "r" (p), "r" (val), "r" (prev_mask)                   \
  37         : "cc", cl);                                            \
  38                                                                 \
  39         return prev >> bitoff;                                  \
  40 }
  41 
  42 #define CMPXCHG_GEN(type, sfx, br, br2, cl)                     \
  43 static inline                                                   \
  44 u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new)   \
  45 {                                                               \
  46         unsigned int prev, prev_mask, tmp, bitoff, off;         \
  47                                                                 \
  48         off = (unsigned long)p % sizeof(u32);                   \
  49         bitoff = BITOFF_CAL(sizeof(type), off);                 \
  50         p -= off;                                               \
  51         old <<= bitoff;                                         \
  52         new <<= bitoff;                                         \
  53         prev_mask = (u32)(type)-1 << bitoff;                    \
  54                                                                 \
  55         __asm__ __volatile__(                                   \
  56         br                                                      \
  57 "1:     lwarx   %0,0,%3\n"                                      \
  58 "       and     %1,%0,%6\n"                                     \
  59 "       cmpw    0,%1,%4\n"                                      \
  60 "       bne-    2f\n"                                           \
  61 "       andc    %1,%0,%6\n"                                     \
  62 "       or      %1,%1,%5\n"                                     \
  63         PPC405_ERR77(0,%3)                                      \
  64 "       stwcx.  %1,0,%3\n"                                      \
  65 "       bne-    1b\n"                                           \
  66         br2                                                     \
  67         "\n"                                                    \
  68 "2:"                                                            \
  69         : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p)            \
  70         : "r" (p), "r" (old), "r" (new), "r" (prev_mask)        \
  71         : "cc", cl);                                            \
  72                                                                 \
  73         return prev >> bitoff;                                  \
  74 }
  75 
  76 /*
  77  * Atomic exchange
  78  *
  79  * Changes the memory location '*p' to be val and returns
  80  * the previous value stored there.
  81  */
  82 
  83 XCHG_GEN(u8, _local, "memory");
  84 XCHG_GEN(u8, _relaxed, "cc");
  85 XCHG_GEN(u16, _local, "memory");
  86 XCHG_GEN(u16, _relaxed, "cc");
  87 
  88 static __always_inline unsigned long
  89 __xchg_u32_local(volatile void *p, unsigned long val)
  90 {
  91         unsigned long prev;
  92 
  93         __asm__ __volatile__(
  94 "1:     lwarx   %0,0,%2 \n"
  95         PPC405_ERR77(0,%2)
  96 "       stwcx.  %3,0,%2 \n\
  97         bne-    1b"
  98         : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
  99         : "r" (p), "r" (val)
 100         : "cc", "memory");
 101 
 102         return prev;
 103 }
 104 
 105 static __always_inline unsigned long
 106 __xchg_u32_relaxed(u32 *p, unsigned long val)
 107 {
 108         unsigned long prev;
 109 
 110         __asm__ __volatile__(
 111 "1:     lwarx   %0,0,%2\n"
 112         PPC405_ERR77(0, %2)
 113 "       stwcx.  %3,0,%2\n"
 114 "       bne-    1b"
 115         : "=&r" (prev), "+m" (*p)
 116         : "r" (p), "r" (val)
 117         : "cc");
 118 
 119         return prev;
 120 }
 121 
 122 #ifdef CONFIG_PPC64
 123 static __always_inline unsigned long
 124 __xchg_u64_local(volatile void *p, unsigned long val)
 125 {
 126         unsigned long prev;
 127 
 128         __asm__ __volatile__(
 129 "1:     ldarx   %0,0,%2 \n"
 130         PPC405_ERR77(0,%2)
 131 "       stdcx.  %3,0,%2 \n\
 132         bne-    1b"
 133         : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
 134         : "r" (p), "r" (val)
 135         : "cc", "memory");
 136 
 137         return prev;
 138 }
 139 
 140 static __always_inline unsigned long
 141 __xchg_u64_relaxed(u64 *p, unsigned long val)
 142 {
 143         unsigned long prev;
 144 
 145         __asm__ __volatile__(
 146 "1:     ldarx   %0,0,%2\n"
 147         PPC405_ERR77(0, %2)
 148 "       stdcx.  %3,0,%2\n"
 149 "       bne-    1b"
 150         : "=&r" (prev), "+m" (*p)
 151         : "r" (p), "r" (val)
 152         : "cc");
 153 
 154         return prev;
 155 }
 156 #endif
 157 
 158 static __always_inline unsigned long
 159 __xchg_local(void *ptr, unsigned long x, unsigned int size)
 160 {
 161         switch (size) {
 162         case 1:
 163                 return __xchg_u8_local(ptr, x);
 164         case 2:
 165                 return __xchg_u16_local(ptr, x);
 166         case 4:
 167                 return __xchg_u32_local(ptr, x);
 168 #ifdef CONFIG_PPC64
 169         case 8:
 170                 return __xchg_u64_local(ptr, x);
 171 #endif
 172         }
 173         BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg");
 174         return x;
 175 }
 176 
 177 static __always_inline unsigned long
 178 __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
 179 {
 180         switch (size) {
 181         case 1:
 182                 return __xchg_u8_relaxed(ptr, x);
 183         case 2:
 184                 return __xchg_u16_relaxed(ptr, x);
 185         case 4:
 186                 return __xchg_u32_relaxed(ptr, x);
 187 #ifdef CONFIG_PPC64
 188         case 8:
 189                 return __xchg_u64_relaxed(ptr, x);
 190 #endif
 191         }
 192         BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
 193         return x;
 194 }
 195 #define xchg_local(ptr,x)                                                    \
 196   ({                                                                         \
 197      __typeof__(*(ptr)) _x_ = (x);                                           \
 198      (__typeof__(*(ptr))) __xchg_local((ptr),                                \
 199                 (unsigned long)_x_, sizeof(*(ptr)));                         \
 200   })
 201 
 202 #define xchg_relaxed(ptr, x)                                            \
 203 ({                                                                      \
 204         __typeof__(*(ptr)) _x_ = (x);                                   \
 205         (__typeof__(*(ptr))) __xchg_relaxed((ptr),                      \
 206                         (unsigned long)_x_, sizeof(*(ptr)));            \
 207 })
 208 /*
 209  * Compare and exchange - if *p == old, set it to new,
 210  * and return the old value of *p.
 211  */
 212 
 213 CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
 214 CMPXCHG_GEN(u8, _local, , , "memory");
 215 CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
 216 CMPXCHG_GEN(u8, _relaxed, , , "cc");
 217 CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
 218 CMPXCHG_GEN(u16, _local, , , "memory");
 219 CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
 220 CMPXCHG_GEN(u16, _relaxed, , , "cc");
 221 
 222 static __always_inline unsigned long
 223 __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
 224 {
 225         unsigned int prev;
 226 
 227         __asm__ __volatile__ (
 228         PPC_ATOMIC_ENTRY_BARRIER
 229 "1:     lwarx   %0,0,%2         # __cmpxchg_u32\n\
 230         cmpw    0,%0,%3\n\
 231         bne-    2f\n"
 232         PPC405_ERR77(0,%2)
 233 "       stwcx.  %4,0,%2\n\
 234         bne-    1b"
 235         PPC_ATOMIC_EXIT_BARRIER
 236         "\n\
 237 2:"
 238         : "=&r" (prev), "+m" (*p)
 239         : "r" (p), "r" (old), "r" (new)
 240         : "cc", "memory");
 241 
 242         return prev;
 243 }
 244 
 245 static __always_inline unsigned long
 246 __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
 247                         unsigned long new)
 248 {
 249         unsigned int prev;
 250 
 251         __asm__ __volatile__ (
 252 "1:     lwarx   %0,0,%2         # __cmpxchg_u32\n\
 253         cmpw    0,%0,%3\n\
 254         bne-    2f\n"
 255         PPC405_ERR77(0,%2)
 256 "       stwcx.  %4,0,%2\n\
 257         bne-    1b"
 258         "\n\
 259 2:"
 260         : "=&r" (prev), "+m" (*p)
 261         : "r" (p), "r" (old), "r" (new)
 262         : "cc", "memory");
 263 
 264         return prev;
 265 }
 266 
 267 static __always_inline unsigned long
 268 __cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
 269 {
 270         unsigned long prev;
 271 
 272         __asm__ __volatile__ (
 273 "1:     lwarx   %0,0,%2         # __cmpxchg_u32_relaxed\n"
 274 "       cmpw    0,%0,%3\n"
 275 "       bne-    2f\n"
 276         PPC405_ERR77(0, %2)
 277 "       stwcx.  %4,0,%2\n"
 278 "       bne-    1b\n"
 279 "2:"
 280         : "=&r" (prev), "+m" (*p)
 281         : "r" (p), "r" (old), "r" (new)
 282         : "cc");
 283 
 284         return prev;
 285 }
 286 
 287 /*
 288  * cmpxchg family don't have order guarantee if cmp part fails, therefore we
 289  * can avoid superfluous barriers if we use assembly code to implement
 290  * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
 291  * cmpxchg_release() because that will result in putting a barrier in the
 292  * middle of a ll/sc loop, which is probably a bad idea. For example, this
 293  * might cause the conditional store more likely to fail.
 294  */
 295 static __always_inline unsigned long
 296 __cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
 297 {
 298         unsigned long prev;
 299 
 300         __asm__ __volatile__ (
 301 "1:     lwarx   %0,0,%2         # __cmpxchg_u32_acquire\n"
 302 "       cmpw    0,%0,%3\n"
 303 "       bne-    2f\n"
 304         PPC405_ERR77(0, %2)
 305 "       stwcx.  %4,0,%2\n"
 306 "       bne-    1b\n"
 307         PPC_ACQUIRE_BARRIER
 308         "\n"
 309 "2:"
 310         : "=&r" (prev), "+m" (*p)
 311         : "r" (p), "r" (old), "r" (new)
 312         : "cc", "memory");
 313 
 314         return prev;
 315 }
 316 
 317 #ifdef CONFIG_PPC64
 318 static __always_inline unsigned long
 319 __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
 320 {
 321         unsigned long prev;
 322 
 323         __asm__ __volatile__ (
 324         PPC_ATOMIC_ENTRY_BARRIER
 325 "1:     ldarx   %0,0,%2         # __cmpxchg_u64\n\
 326         cmpd    0,%0,%3\n\
 327         bne-    2f\n\
 328         stdcx.  %4,0,%2\n\
 329         bne-    1b"
 330         PPC_ATOMIC_EXIT_BARRIER
 331         "\n\
 332 2:"
 333         : "=&r" (prev), "+m" (*p)
 334         : "r" (p), "r" (old), "r" (new)
 335         : "cc", "memory");
 336 
 337         return prev;
 338 }
 339 
 340 static __always_inline unsigned long
 341 __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
 342                         unsigned long new)
 343 {
 344         unsigned long prev;
 345 
 346         __asm__ __volatile__ (
 347 "1:     ldarx   %0,0,%2         # __cmpxchg_u64\n\
 348         cmpd    0,%0,%3\n\
 349         bne-    2f\n\
 350         stdcx.  %4,0,%2\n\
 351         bne-    1b"
 352         "\n\
 353 2:"
 354         : "=&r" (prev), "+m" (*p)
 355         : "r" (p), "r" (old), "r" (new)
 356         : "cc", "memory");
 357 
 358         return prev;
 359 }
 360 
 361 static __always_inline unsigned long
 362 __cmpxchg_u64_relaxed(u64 *p, unsigned long old, unsigned long new)
 363 {
 364         unsigned long prev;
 365 
 366         __asm__ __volatile__ (
 367 "1:     ldarx   %0,0,%2         # __cmpxchg_u64_relaxed\n"
 368 "       cmpd    0,%0,%3\n"
 369 "       bne-    2f\n"
 370 "       stdcx.  %4,0,%2\n"
 371 "       bne-    1b\n"
 372 "2:"
 373         : "=&r" (prev), "+m" (*p)
 374         : "r" (p), "r" (old), "r" (new)
 375         : "cc");
 376 
 377         return prev;
 378 }
 379 
 380 static __always_inline unsigned long
 381 __cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new)
 382 {
 383         unsigned long prev;
 384 
 385         __asm__ __volatile__ (
 386 "1:     ldarx   %0,0,%2         # __cmpxchg_u64_acquire\n"
 387 "       cmpd    0,%0,%3\n"
 388 "       bne-    2f\n"
 389 "       stdcx.  %4,0,%2\n"
 390 "       bne-    1b\n"
 391         PPC_ACQUIRE_BARRIER
 392         "\n"
 393 "2:"
 394         : "=&r" (prev), "+m" (*p)
 395         : "r" (p), "r" (old), "r" (new)
 396         : "cc", "memory");
 397 
 398         return prev;
 399 }
 400 #endif
 401 
 402 static __always_inline unsigned long
 403 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
 404           unsigned int size)
 405 {
 406         switch (size) {
 407         case 1:
 408                 return __cmpxchg_u8(ptr, old, new);
 409         case 2:
 410                 return __cmpxchg_u16(ptr, old, new);
 411         case 4:
 412                 return __cmpxchg_u32(ptr, old, new);
 413 #ifdef CONFIG_PPC64
 414         case 8:
 415                 return __cmpxchg_u64(ptr, old, new);
 416 #endif
 417         }
 418         BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
 419         return old;
 420 }
 421 
 422 static __always_inline unsigned long
 423 __cmpxchg_local(void *ptr, unsigned long old, unsigned long new,
 424           unsigned int size)
 425 {
 426         switch (size) {
 427         case 1:
 428                 return __cmpxchg_u8_local(ptr, old, new);
 429         case 2:
 430                 return __cmpxchg_u16_local(ptr, old, new);
 431         case 4:
 432                 return __cmpxchg_u32_local(ptr, old, new);
 433 #ifdef CONFIG_PPC64
 434         case 8:
 435                 return __cmpxchg_u64_local(ptr, old, new);
 436 #endif
 437         }
 438         BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
 439         return old;
 440 }
 441 
 442 static __always_inline unsigned long
 443 __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
 444                   unsigned int size)
 445 {
 446         switch (size) {
 447         case 1:
 448                 return __cmpxchg_u8_relaxed(ptr, old, new);
 449         case 2:
 450                 return __cmpxchg_u16_relaxed(ptr, old, new);
 451         case 4:
 452                 return __cmpxchg_u32_relaxed(ptr, old, new);
 453 #ifdef CONFIG_PPC64
 454         case 8:
 455                 return __cmpxchg_u64_relaxed(ptr, old, new);
 456 #endif
 457         }
 458         BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
 459         return old;
 460 }
 461 
 462 static __always_inline unsigned long
 463 __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
 464                   unsigned int size)
 465 {
 466         switch (size) {
 467         case 1:
 468                 return __cmpxchg_u8_acquire(ptr, old, new);
 469         case 2:
 470                 return __cmpxchg_u16_acquire(ptr, old, new);
 471         case 4:
 472                 return __cmpxchg_u32_acquire(ptr, old, new);
 473 #ifdef CONFIG_PPC64
 474         case 8:
 475                 return __cmpxchg_u64_acquire(ptr, old, new);
 476 #endif
 477         }
 478         BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
 479         return old;
 480 }
 481 #define cmpxchg(ptr, o, n)                                               \
 482   ({                                                                     \
 483      __typeof__(*(ptr)) _o_ = (o);                                       \
 484      __typeof__(*(ptr)) _n_ = (n);                                       \
 485      (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,           \
 486                                     (unsigned long)_n_, sizeof(*(ptr))); \
 487   })
 488 
 489 
 490 #define cmpxchg_local(ptr, o, n)                                         \
 491   ({                                                                     \
 492      __typeof__(*(ptr)) _o_ = (o);                                       \
 493      __typeof__(*(ptr)) _n_ = (n);                                       \
 494      (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_,     \
 495                                     (unsigned long)_n_, sizeof(*(ptr))); \
 496   })
 497 
 498 #define cmpxchg_relaxed(ptr, o, n)                                      \
 499 ({                                                                      \
 500         __typeof__(*(ptr)) _o_ = (o);                                   \
 501         __typeof__(*(ptr)) _n_ = (n);                                   \
 502         (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr),                   \
 503                         (unsigned long)_o_, (unsigned long)_n_,         \
 504                         sizeof(*(ptr)));                                \
 505 })
 506 
 507 #define cmpxchg_acquire(ptr, o, n)                                      \
 508 ({                                                                      \
 509         __typeof__(*(ptr)) _o_ = (o);                                   \
 510         __typeof__(*(ptr)) _n_ = (n);                                   \
 511         (__typeof__(*(ptr))) __cmpxchg_acquire((ptr),                   \
 512                         (unsigned long)_o_, (unsigned long)_n_,         \
 513                         sizeof(*(ptr)));                                \
 514 })
 515 #ifdef CONFIG_PPC64
 516 #define cmpxchg64(ptr, o, n)                                            \
 517   ({                                                                    \
 518         BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
 519         cmpxchg((ptr), (o), (n));                                       \
 520   })
 521 #define cmpxchg64_local(ptr, o, n)                                      \
 522   ({                                                                    \
 523         BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
 524         cmpxchg_local((ptr), (o), (n));                                 \
 525   })
 526 #define cmpxchg64_relaxed(ptr, o, n)                                    \
 527 ({                                                                      \
 528         BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
 529         cmpxchg_relaxed((ptr), (o), (n));                               \
 530 })
 531 #define cmpxchg64_acquire(ptr, o, n)                                    \
 532 ({                                                                      \
 533         BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
 534         cmpxchg_acquire((ptr), (o), (n));                               \
 535 })
 536 #else
 537 #include <asm-generic/cmpxchg-local.h>
 538 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 539 #endif
 540 
 541 #endif /* __KERNEL__ */
 542 #endif /* _ASM_POWERPC_CMPXCHG_H_ */

/* [<][>][^][v][top][bottom][index][help] */