root/kernel/locking/lockdep_internals.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. __LOCKF
  2. __LOCKF
  3. __LOCKF
  4. __LOCKF
  5. __LOCKF
  6. lockdep_count_forward_deps
  7. lockdep_count_backward_deps
  8. debug_class_ops_inc
  9. debug_class_ops_read

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * kernel/lockdep_internals.h
   4  *
   5  * Runtime locking correctness validator
   6  *
   7  * lockdep subsystem internal functions and variables.
   8  */
   9 
  10 /*
  11  * Lock-class usage-state bits:
  12  */
  13 enum lock_usage_bit {
  14 #define LOCKDEP_STATE(__STATE)          \
  15         LOCK_USED_IN_##__STATE,         \
  16         LOCK_USED_IN_##__STATE##_READ,  \
  17         LOCK_ENABLED_##__STATE,         \
  18         LOCK_ENABLED_##__STATE##_READ,
  19 #include "lockdep_states.h"
  20 #undef LOCKDEP_STATE
  21         LOCK_USED,
  22         LOCK_USAGE_STATES
  23 };
  24 
  25 #define LOCK_USAGE_READ_MASK 1
  26 #define LOCK_USAGE_DIR_MASK  2
  27 #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
  28 
  29 /*
  30  * Usage-state bitmasks:
  31  */
  32 #define __LOCKF(__STATE)        LOCKF_##__STATE = (1 << LOCK_##__STATE),
  33 
  34 enum {
  35 #define LOCKDEP_STATE(__STATE)                                          \
  36         __LOCKF(USED_IN_##__STATE)                                      \
  37         __LOCKF(USED_IN_##__STATE##_READ)                               \
  38         __LOCKF(ENABLED_##__STATE)                                      \
  39         __LOCKF(ENABLED_##__STATE##_READ)
  40 #include "lockdep_states.h"
  41 #undef LOCKDEP_STATE
  42         __LOCKF(USED)
  43 };
  44 
  45 #define LOCKDEP_STATE(__STATE)  LOCKF_ENABLED_##__STATE |
  46 static const unsigned long LOCKF_ENABLED_IRQ =
  47 #include "lockdep_states.h"
  48         0;
  49 #undef LOCKDEP_STATE
  50 
  51 #define LOCKDEP_STATE(__STATE)  LOCKF_USED_IN_##__STATE |
  52 static const unsigned long LOCKF_USED_IN_IRQ =
  53 #include "lockdep_states.h"
  54         0;
  55 #undef LOCKDEP_STATE
  56 
  57 #define LOCKDEP_STATE(__STATE)  LOCKF_ENABLED_##__STATE##_READ |
  58 static const unsigned long LOCKF_ENABLED_IRQ_READ =
  59 #include "lockdep_states.h"
  60         0;
  61 #undef LOCKDEP_STATE
  62 
  63 #define LOCKDEP_STATE(__STATE)  LOCKF_USED_IN_##__STATE##_READ |
  64 static const unsigned long LOCKF_USED_IN_IRQ_READ =
  65 #include "lockdep_states.h"
  66         0;
  67 #undef LOCKDEP_STATE
  68 
  69 #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
  70 #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
  71 
  72 #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
  73 #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
  74 
  75 /*
  76  * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
  77  * .data and .bss to fit in required 32MB limit for the kernel. With
  78  * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
  79  * So, reduce the static allocations for lockdeps related structures so that
  80  * everything fits in current required size limit.
  81  */
  82 #ifdef CONFIG_LOCKDEP_SMALL
  83 /*
  84  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
  85  * we track.
  86  *
  87  * We use the per-lock dependency maps in two ways: we grow it by adding
  88  * every to-be-taken lock to all currently held lock's own dependency
  89  * table (if it's not there yet), and we check it for lock order
  90  * conflicts and deadlocks.
  91  */
  92 #define MAX_LOCKDEP_ENTRIES     16384UL
  93 #define MAX_LOCKDEP_CHAINS_BITS 15
  94 #define MAX_STACK_TRACE_ENTRIES 262144UL
  95 #define STACK_TRACE_HASH_SIZE   8192
  96 #else
  97 #define MAX_LOCKDEP_ENTRIES     32768UL
  98 
  99 #define MAX_LOCKDEP_CHAINS_BITS 16
 100 
 101 /*
 102  * Stack-trace: tightly packed array of stack backtrace
 103  * addresses. Protected by the hash_lock.
 104  */
 105 #define MAX_STACK_TRACE_ENTRIES 524288UL
 106 #define STACK_TRACE_HASH_SIZE   16384
 107 #endif
 108 
 109 #define MAX_LOCKDEP_CHAINS      (1UL << MAX_LOCKDEP_CHAINS_BITS)
 110 
 111 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
 112 
 113 extern struct list_head all_lock_classes;
 114 extern struct lock_chain lock_chains[];
 115 
 116 #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
 117 
 118 extern void get_usage_chars(struct lock_class *class,
 119                             char usage[LOCK_USAGE_CHARS]);
 120 
 121 extern const char *__get_key_name(const struct lockdep_subclass_key *key,
 122                                   char *str);
 123 
 124 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
 125 
 126 extern unsigned long nr_lock_classes;
 127 extern unsigned long nr_list_entries;
 128 long lockdep_next_lockchain(long i);
 129 unsigned long lock_chain_count(void);
 130 extern int nr_chain_hlocks;
 131 extern unsigned long nr_stack_trace_entries;
 132 
 133 extern unsigned int nr_hardirq_chains;
 134 extern unsigned int nr_softirq_chains;
 135 extern unsigned int nr_process_chains;
 136 extern unsigned int max_lockdep_depth;
 137 
 138 extern unsigned int max_bfs_queue_depth;
 139 
 140 #ifdef CONFIG_PROVE_LOCKING
 141 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
 142 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
 143 #ifdef CONFIG_TRACE_IRQFLAGS
 144 u64 lockdep_stack_trace_count(void);
 145 u64 lockdep_stack_hash_count(void);
 146 #endif
 147 #else
 148 static inline unsigned long
 149 lockdep_count_forward_deps(struct lock_class *class)
 150 {
 151         return 0;
 152 }
 153 static inline unsigned long
 154 lockdep_count_backward_deps(struct lock_class *class)
 155 {
 156         return 0;
 157 }
 158 #endif
 159 
 160 #ifdef CONFIG_DEBUG_LOCKDEP
 161 
 162 #include <asm/local.h>
 163 /*
 164  * Various lockdep statistics.
 165  * We want them per cpu as they are often accessed in fast path
 166  * and we want to avoid too much cache bouncing.
 167  */
 168 struct lockdep_stats {
 169         unsigned long  chain_lookup_hits;
 170         unsigned int   chain_lookup_misses;
 171         unsigned long  hardirqs_on_events;
 172         unsigned long  hardirqs_off_events;
 173         unsigned long  redundant_hardirqs_on;
 174         unsigned long  redundant_hardirqs_off;
 175         unsigned long  softirqs_on_events;
 176         unsigned long  softirqs_off_events;
 177         unsigned long  redundant_softirqs_on;
 178         unsigned long  redundant_softirqs_off;
 179         int            nr_unused_locks;
 180         unsigned int   nr_redundant_checks;
 181         unsigned int   nr_redundant;
 182         unsigned int   nr_cyclic_checks;
 183         unsigned int   nr_find_usage_forwards_checks;
 184         unsigned int   nr_find_usage_backwards_checks;
 185 
 186         /*
 187          * Per lock class locking operation stat counts
 188          */
 189         unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
 190 };
 191 
 192 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
 193 extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
 194 
 195 #define __debug_atomic_inc(ptr)                                 \
 196         this_cpu_inc(lockdep_stats.ptr);
 197 
 198 #define debug_atomic_inc(ptr)                   {               \
 199         WARN_ON_ONCE(!irqs_disabled());                         \
 200         __this_cpu_inc(lockdep_stats.ptr);                      \
 201 }
 202 
 203 #define debug_atomic_dec(ptr)                   {               \
 204         WARN_ON_ONCE(!irqs_disabled());                         \
 205         __this_cpu_dec(lockdep_stats.ptr);                      \
 206 }
 207 
 208 #define debug_atomic_read(ptr)          ({                              \
 209         struct lockdep_stats *__cpu_lockdep_stats;                      \
 210         unsigned long long __total = 0;                                 \
 211         int __cpu;                                                      \
 212         for_each_possible_cpu(__cpu) {                                  \
 213                 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);   \
 214                 __total += __cpu_lockdep_stats->ptr;                    \
 215         }                                                               \
 216         __total;                                                        \
 217 })
 218 
 219 static inline void debug_class_ops_inc(struct lock_class *class)
 220 {
 221         int idx;
 222 
 223         idx = class - lock_classes;
 224         __debug_atomic_inc(lock_class_ops[idx]);
 225 }
 226 
 227 static inline unsigned long debug_class_ops_read(struct lock_class *class)
 228 {
 229         int idx, cpu;
 230         unsigned long ops = 0;
 231 
 232         idx = class - lock_classes;
 233         for_each_possible_cpu(cpu)
 234                 ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
 235         return ops;
 236 }
 237 
 238 #else
 239 # define __debug_atomic_inc(ptr)        do { } while (0)
 240 # define debug_atomic_inc(ptr)          do { } while (0)
 241 # define debug_atomic_dec(ptr)          do { } while (0)
 242 # define debug_atomic_read(ptr)         0
 243 # define debug_class_ops_inc(ptr)       do { } while (0)
 244 #endif

/* [<][>][^][v][top][bottom][index][help] */