root/arch/alpha/include/asm/barrier.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef __BARRIER_H
   3 #define __BARRIER_H
   4 
   5 #include <asm/compiler.h>
   6 
   7 #define mb()    __asm__ __volatile__("mb": : :"memory")
   8 #define rmb()   __asm__ __volatile__("mb": : :"memory")
   9 #define wmb()   __asm__ __volatile__("wmb": : :"memory")
  10 
  11 /**
  12  * read_barrier_depends - Flush all pending reads that subsequents reads
  13  * depend on.
  14  *
  15  * No data-dependent reads from memory-like regions are ever reordered
  16  * over this barrier.  All reads preceding this primitive are guaranteed
  17  * to access memory (but not necessarily other CPUs' caches) before any
  18  * reads following this primitive that depend on the data return by
  19  * any of the preceding reads.  This primitive is much lighter weight than
  20  * rmb() on most CPUs, and is never heavier weight than is
  21  * rmb().
  22  *
  23  * These ordering constraints are respected by both the local CPU
  24  * and the compiler.
  25  *
  26  * Ordering is not guaranteed by anything other than these primitives,
  27  * not even by data dependencies.  See the documentation for
  28  * memory_barrier() for examples and URLs to more information.
  29  *
  30  * For example, the following code would force ordering (the initial
  31  * value of "a" is zero, "b" is one, and "p" is "&a"):
  32  *
  33  * <programlisting>
  34  *      CPU 0                           CPU 1
  35  *
  36  *      b = 2;
  37  *      memory_barrier();
  38  *      p = &b;                         q = p;
  39  *                                      read_barrier_depends();
  40  *                                      d = *q;
  41  * </programlisting>
  42  *
  43  * because the read of "*q" depends on the read of "p" and these
  44  * two reads are separated by a read_barrier_depends().  However,
  45  * the following code, with the same initial values for "a" and "b":
  46  *
  47  * <programlisting>
  48  *      CPU 0                           CPU 1
  49  *
  50  *      a = 2;
  51  *      memory_barrier();
  52  *      b = 3;                          y = b;
  53  *                                      read_barrier_depends();
  54  *                                      x = a;
  55  * </programlisting>
  56  *
  57  * does not enforce ordering, since there is no data dependency between
  58  * the read of "a" and the read of "b".  Therefore, on some CPUs, such
  59  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
  60  * in cases like this where there are no data dependencies.
  61  */
  62 #define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
  63 
  64 #ifdef CONFIG_SMP
  65 #define __ASM_SMP_MB    "\tmb\n"
  66 #else
  67 #define __ASM_SMP_MB
  68 #endif
  69 
  70 #include <asm-generic/barrier.h>
  71 
  72 #endif          /* __BARRIER_H */

/* [<][>][^][v][top][bottom][index][help] */