root/arch/x86/include/asm/barrier.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. array_index_mask_nospec

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ASM_X86_BARRIER_H
   3 #define _ASM_X86_BARRIER_H
   4 
   5 #include <asm/alternative.h>
   6 #include <asm/nops.h>
   7 
   8 /*
   9  * Force strict CPU ordering.
  10  * And yes, this might be required on UP too when we're talking
  11  * to devices.
  12  */
  13 
  14 #ifdef CONFIG_X86_32
  15 #define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
  16                                       X86_FEATURE_XMM2) ::: "memory", "cc")
  17 #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
  18                                        X86_FEATURE_XMM2) ::: "memory", "cc")
  19 #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
  20                                        X86_FEATURE_XMM2) ::: "memory", "cc")
  21 #else
  22 #define mb()    asm volatile("mfence":::"memory")
  23 #define rmb()   asm volatile("lfence":::"memory")
  24 #define wmb()   asm volatile("sfence" ::: "memory")
  25 #endif
  26 
  27 /**
  28  * array_index_mask_nospec() - generate a mask that is ~0UL when the
  29  *      bounds check succeeds and 0 otherwise
  30  * @index: array element index
  31  * @size: number of elements in array
  32  *
  33  * Returns:
  34  *     0 - (index < size)
  35  */
  36 static inline unsigned long array_index_mask_nospec(unsigned long index,
  37                 unsigned long size)
  38 {
  39         unsigned long mask;
  40 
  41         asm volatile ("cmp %1,%2; sbb %0,%0;"
  42                         :"=r" (mask)
  43                         :"g"(size),"r" (index)
  44                         :"cc");
  45         return mask;
  46 }
  47 
  48 /* Override the default implementation from linux/nospec.h. */
  49 #define array_index_mask_nospec array_index_mask_nospec
  50 
  51 /* Prevent speculative execution past this barrier. */
  52 #define barrier_nospec() alternative("", "lfence", X86_FEATURE_LFENCE_RDTSC)
  53 
  54 #define dma_rmb()       barrier()
  55 #define dma_wmb()       barrier()
  56 
  57 #ifdef CONFIG_X86_32
  58 #define __smp_mb()      asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc")
  59 #else
  60 #define __smp_mb()      asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc")
  61 #endif
  62 #define __smp_rmb()     dma_rmb()
  63 #define __smp_wmb()     barrier()
  64 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
  65 
  66 #define __smp_store_release(p, v)                                       \
  67 do {                                                                    \
  68         compiletime_assert_atomic_type(*p);                             \
  69         barrier();                                                      \
  70         WRITE_ONCE(*p, v);                                              \
  71 } while (0)
  72 
  73 #define __smp_load_acquire(p)                                           \
  74 ({                                                                      \
  75         typeof(*p) ___p1 = READ_ONCE(*p);                               \
  76         compiletime_assert_atomic_type(*p);                             \
  77         barrier();                                                      \
  78         ___p1;                                                          \
  79 })
  80 
  81 /* Atomic operations are already serializing on x86 */
  82 #define __smp_mb__before_atomic()       do { } while (0)
  83 #define __smp_mb__after_atomic()        do { } while (0)
  84 
  85 #include <asm-generic/barrier.h>
  86 
  87 #endif /* _ASM_X86_BARRIER_H */

/* [<][>][^][v][top][bottom][index][help] */