root/include/linux/spinlock_api_smp.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. __raw_spin_trylock
  2. __raw_spin_lock_irqsave
  3. __raw_spin_lock_irq
  4. __raw_spin_lock_bh
  5. __raw_spin_lock
  6. __raw_spin_unlock
  7. __raw_spin_unlock_irqrestore
  8. __raw_spin_unlock_irq
  9. __raw_spin_unlock_bh
  10. __raw_spin_trylock_bh

   1 #ifndef __LINUX_SPINLOCK_API_SMP_H
   2 #define __LINUX_SPINLOCK_API_SMP_H
   3 
   4 #ifndef __LINUX_SPINLOCK_H
   5 # error "please don't include this file directly"
   6 #endif
   7 
   8 /*
   9  * include/linux/spinlock_api_smp.h
  10  *
  11  * spinlock API declarations on SMP (and debug)
  12  * (implemented in kernel/spinlock.c)
  13  *
  14  * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
  15  * Released under the General Public License (GPL).
  16  */
  17 
  18 int in_lock_functions(unsigned long addr);
  19 
  20 #define assert_raw_spin_locked(x)       BUG_ON(!raw_spin_is_locked(x))
  21 
  22 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)            __acquires(lock);
  23 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
  24                                                                 __acquires(lock);
  25 void __lockfunc
  26 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
  27                                                                 __acquires(lock);
  28 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)         __acquires(lock);
  29 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
  30                                                                 __acquires(lock);
  31 
  32 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
  33                                                                 __acquires(lock);
  34 unsigned long __lockfunc
  35 _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
  36                                                                 __acquires(lock);
  37 int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
  38 int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
  39 void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)          __releases(lock);
  40 void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)       __releases(lock);
  41 void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)      __releases(lock);
  42 void __lockfunc
  43 _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
  44                                                                 __releases(lock);
  45 
  46 #ifdef CONFIG_INLINE_SPIN_LOCK
  47 #define _raw_spin_lock(lock) __raw_spin_lock(lock)
  48 #endif
  49 
  50 #ifdef CONFIG_INLINE_SPIN_LOCK_BH
  51 #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
  52 #endif
  53 
  54 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
  55 #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
  56 #endif
  57 
  58 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
  59 #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
  60 #endif
  61 
  62 #ifdef CONFIG_INLINE_SPIN_TRYLOCK
  63 #define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
  64 #endif
  65 
  66 #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
  67 #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
  68 #endif
  69 
  70 #ifndef CONFIG_UNINLINE_SPIN_UNLOCK
  71 #define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
  72 #endif
  73 
  74 #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
  75 #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
  76 #endif
  77 
  78 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
  79 #define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
  80 #endif
  81 
  82 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
  83 #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
  84 #endif
  85 
  86 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  87 {
  88         preempt_disable();
  89         if (do_raw_spin_trylock(lock)) {
  90                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  91                 return 1;
  92         }
  93         preempt_enable();
  94         return 0;
  95 }
  96 
  97 /*
  98  * If lockdep is enabled then we use the non-preemption spin-ops
  99  * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are
 100  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
 101  */
 102 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
 103 
 104 static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
 105 {
 106         unsigned long flags;
 107 
 108         local_irq_save(flags);
 109         preempt_disable();
 110         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 111         /*
 112          * On lockdep we dont want the hand-coded irq-enable of
 113          * do_raw_spin_lock_flags() code, because lockdep assumes
 114          * that interrupts are not re-enabled during lock-acquire:
 115          */
 116 #ifdef CONFIG_LOCKDEP
 117         LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 118 #else
 119         do_raw_spin_lock_flags(lock, &flags);
 120 #endif
 121         return flags;
 122 }
 123 
 124 static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
 125 {
 126         local_irq_disable();
 127         preempt_disable();
 128         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 129         LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 130 }
 131 
 132 static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
 133 {
 134         __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 135         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 136         LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 137 }
 138 
 139 static inline void __raw_spin_lock(raw_spinlock_t *lock)
 140 {
 141         preempt_disable();
 142         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 143         LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 144 }
 145 
 146 #endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
 147 
 148 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 149 {
 150         spin_release(&lock->dep_map, 1, _RET_IP_);
 151         do_raw_spin_unlock(lock);
 152         preempt_enable();
 153 }
 154 
 155 static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
 156                                             unsigned long flags)
 157 {
 158         spin_release(&lock->dep_map, 1, _RET_IP_);
 159         do_raw_spin_unlock(lock);
 160         local_irq_restore(flags);
 161         preempt_enable();
 162 }
 163 
 164 static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
 165 {
 166         spin_release(&lock->dep_map, 1, _RET_IP_);
 167         do_raw_spin_unlock(lock);
 168         local_irq_enable();
 169         preempt_enable();
 170 }
 171 
 172 static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
 173 {
 174         spin_release(&lock->dep_map, 1, _RET_IP_);
 175         do_raw_spin_unlock(lock);
 176         __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 177 }
 178 
 179 static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
 180 {
 181         __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 182         if (do_raw_spin_trylock(lock)) {
 183                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 184                 return 1;
 185         }
 186         __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 187         return 0;
 188 }
 189 
 190 #include <linux/rwlock_api_smp.h>
 191 
 192 #endif /* __LINUX_SPINLOCK_API_SMP_H */

/* [<][>][^][v][top][bottom][index][help] */