root/include/asm-generic/qspinlock_types.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  * Queued spinlock
   4  *
   5  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
   6  *
   7  * Authors: Waiman Long <waiman.long@hp.com>
   8  */
   9 #ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
  10 #define __ASM_GENERIC_QSPINLOCK_TYPES_H
  11 
  12 /*
  13  * Including atomic.h with PARAVIRT on will cause compilation errors because
  14  * of recursive header file incluson via paravirt_types.h. So don't include
  15  * it if PARAVIRT is on.
  16  */
  17 #ifndef CONFIG_PARAVIRT
  18 #include <linux/types.h>
  19 #include <linux/atomic.h>
  20 #endif
  21 
  22 typedef struct qspinlock {
  23         union {
  24                 atomic_t val;
  25 
  26                 /*
  27                  * By using the whole 2nd least significant byte for the
  28                  * pending bit, we can allow better optimization of the lock
  29                  * acquisition for the pending bit holder.
  30                  */
  31 #ifdef __LITTLE_ENDIAN
  32                 struct {
  33                         u8      locked;
  34                         u8      pending;
  35                 };
  36                 struct {
  37                         u16     locked_pending;
  38                         u16     tail;
  39                 };
  40 #else
  41                 struct {
  42                         u16     tail;
  43                         u16     locked_pending;
  44                 };
  45                 struct {
  46                         u8      reserved[2];
  47                         u8      pending;
  48                         u8      locked;
  49                 };
  50 #endif
  51         };
  52 } arch_spinlock_t;
  53 
  54 /*
  55  * Initializier
  56  */
  57 #define __ARCH_SPIN_LOCK_UNLOCKED       { { .val = ATOMIC_INIT(0) } }
  58 
  59 /*
  60  * Bitfields in the atomic value:
  61  *
  62  * When NR_CPUS < 16K
  63  *  0- 7: locked byte
  64  *     8: pending
  65  *  9-15: not used
  66  * 16-17: tail index
  67  * 18-31: tail cpu (+1)
  68  *
  69  * When NR_CPUS >= 16K
  70  *  0- 7: locked byte
  71  *     8: pending
  72  *  9-10: tail index
  73  * 11-31: tail cpu (+1)
  74  */
  75 #define _Q_SET_MASK(type)       (((1U << _Q_ ## type ## _BITS) - 1)\
  76                                       << _Q_ ## type ## _OFFSET)
  77 #define _Q_LOCKED_OFFSET        0
  78 #define _Q_LOCKED_BITS          8
  79 #define _Q_LOCKED_MASK          _Q_SET_MASK(LOCKED)
  80 
  81 #define _Q_PENDING_OFFSET       (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
  82 #if CONFIG_NR_CPUS < (1U << 14)
  83 #define _Q_PENDING_BITS         8
  84 #else
  85 #define _Q_PENDING_BITS         1
  86 #endif
  87 #define _Q_PENDING_MASK         _Q_SET_MASK(PENDING)
  88 
  89 #define _Q_TAIL_IDX_OFFSET      (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
  90 #define _Q_TAIL_IDX_BITS        2
  91 #define _Q_TAIL_IDX_MASK        _Q_SET_MASK(TAIL_IDX)
  92 
  93 #define _Q_TAIL_CPU_OFFSET      (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
  94 #define _Q_TAIL_CPU_BITS        (32 - _Q_TAIL_CPU_OFFSET)
  95 #define _Q_TAIL_CPU_MASK        _Q_SET_MASK(TAIL_CPU)
  96 
  97 #define _Q_TAIL_OFFSET          _Q_TAIL_IDX_OFFSET
  98 #define _Q_TAIL_MASK            (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
  99 
 100 #define _Q_LOCKED_VAL           (1U << _Q_LOCKED_OFFSET)
 101 #define _Q_PENDING_VAL          (1U << _Q_PENDING_OFFSET)
 102 
 103 #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */

/* [<][>][^][v][top][bottom][index][help] */