This source file includes following definitions.
- lock_impl_lock
- lock_impl_unlock
- lock_impl_trylock
- lock_impl_init
- lock_impl_trylock
- lock_impl_lock
- lock_impl_unlock
- lock_impl_init
- spin_lock_init
- spin_lock
- spin_unlock
- spin_trylock
- init_completion
- wait_for_completion
- complete
- try_wait_for_completion
- completion_done
- complete_all
   1 
   2 #ifndef LOCKS_H
   3 #define LOCKS_H
   4 
   5 #include <limits.h>
   6 #include <pthread.h>
   7 #include <stdbool.h>
   8 
   9 #include "assume.h"
  10 #include "bug_on.h"
  11 #include "preempt.h"
  12 
  13 int nondet_int(void);
  14 
  15 #define __acquire(x)
  16 #define __acquires(x)
  17 #define __release(x)
  18 #define __releases(x)
  19 
  20 
  21 #ifdef PTHREAD_LOCK
  22 struct lock_impl {
  23         pthread_mutex_t mutex;
  24 };
  25 
  26 static inline void lock_impl_lock(struct lock_impl *lock)
  27 {
  28         BUG_ON(pthread_mutex_lock(&lock->mutex));
  29 }
  30 
  31 static inline void lock_impl_unlock(struct lock_impl *lock)
  32 {
  33         BUG_ON(pthread_mutex_unlock(&lock->mutex));
  34 }
  35 
  36 static inline bool lock_impl_trylock(struct lock_impl *lock)
  37 {
  38         int err = pthread_mutex_trylock(&lock->mutex);
  39 
  40         if (!err)
  41                 return true;
  42         else if (err == EBUSY)
  43                 return false;
  44         BUG();
  45 }
  46 
  47 static inline void lock_impl_init(struct lock_impl *lock)
  48 {
  49         pthread_mutex_init(&lock->mutex, NULL);
  50 }
  51 
  52 #define LOCK_IMPL_INITIALIZER {.mutex = PTHREAD_MUTEX_INITIALIZER}
  53 
  54 #else 
  55 
  56 
  57 struct lock_impl {
  58         bool locked;
  59 };
  60 
  61 static inline bool lock_impl_trylock(struct lock_impl *lock)
  62 {
  63 #ifdef RUN
  64         
  65         return __sync_bool_compare_and_swap(&lock->locked, false, true);
  66 #else
  67         __CPROVER_atomic_begin();
  68         bool old_locked = lock->locked;
  69         lock->locked = true;
  70         __CPROVER_atomic_end();
  71 
  72         
  73         __CPROVER_fence("RRfence", "RWfence");
  74 
  75         return !old_locked;
  76 #endif
  77 }
  78 
  79 static inline void lock_impl_lock(struct lock_impl *lock)
  80 {
  81         
  82 
  83 
  84 
  85         assume(lock_impl_trylock(lock));
  86 
  87         
  88 
  89 
  90 
  91 }
  92 
  93 static inline void lock_impl_unlock(struct lock_impl *lock)
  94 {
  95 #ifdef RUN
  96         BUG_ON(!__sync_bool_compare_and_swap(&lock->locked, true, false));
  97 #else
  98         
  99         __CPROVER_fence("RWfence", "WWfence");
 100 
 101         __CPROVER_atomic_begin();
 102         bool old_locked = lock->locked;
 103         lock->locked = false;
 104         __CPROVER_atomic_end();
 105 
 106         BUG_ON(!old_locked);
 107 #endif
 108 }
 109 
 110 static inline void lock_impl_init(struct lock_impl *lock)
 111 {
 112         lock->locked = false;
 113 }
 114 
 115 #define LOCK_IMPL_INITIALIZER {.locked = false}
 116 
 117 #endif 
 118 
 119 
 120 
 121 
 122 
 123 typedef struct {
 124         struct lock_impl internal_lock;
 125 } spinlock_t;
 126 
 127 #define SPIN_LOCK_UNLOCKED {.internal_lock = LOCK_IMPL_INITIALIZER}
 128 #define __SPIN_LOCK_UNLOCKED(x) SPIN_LOCK_UNLOCKED
 129 #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
 130 
 131 static inline void spin_lock_init(spinlock_t *lock)
 132 {
 133         lock_impl_init(&lock->internal_lock);
 134 }
 135 
 136 static inline void spin_lock(spinlock_t *lock)
 137 {
 138         
 139 
 140 
 141 
 142 #ifndef NO_SYNC_SMP_MB
 143         preempt_disable();
 144         lock_impl_lock(&lock->internal_lock);
 145 #endif
 146 }
 147 
 148 static inline void spin_unlock(spinlock_t *lock)
 149 {
 150 #ifndef NO_SYNC_SMP_MB
 151         lock_impl_unlock(&lock->internal_lock);
 152         preempt_enable();
 153 #endif
 154 }
 155 
 156 
 157 #define spin_lock_irq(lock) spin_lock(lock)
 158 #define spin_unlock_irq(lock) spin_unlock(lock)
 159 #define spin_lock_irqsave(lock, flags) spin_lock(lock)
 160 #define spin_unlock_irqrestore(lock, flags) spin_unlock(lock)
 161 
 162 
 163 
 164 
 165 
 166 static inline bool spin_trylock(spinlock_t *lock)
 167 {
 168 #ifndef NO_SYNC_SMP_MB
 169         preempt_disable();
 170         return lock_impl_trylock(&lock->internal_lock);
 171 #else
 172         return true;
 173 #endif
 174 }
 175 
 176 struct completion {
 177         
 178         unsigned int count;
 179 };
 180 
 181 #define COMPLETION_INITIALIZER(x) {.count = 0}
 182 #define DECLARE_COMPLETION(x) struct completion x = COMPLETION_INITIALIZER(x)
 183 #define DECLARE_COMPLETION_ONSTACK(x) DECLARE_COMPLETION(x)
 184 
 185 static inline void init_completion(struct completion *c)
 186 {
 187         c->count = 0;
 188 }
 189 
 190 static inline void wait_for_completion(struct completion *c)
 191 {
 192         unsigned int prev_count = __sync_fetch_and_sub(&c->count, 1);
 193 
 194         assume(prev_count);
 195 }
 196 
 197 static inline void complete(struct completion *c)
 198 {
 199         unsigned int prev_count = __sync_fetch_and_add(&c->count, 1);
 200 
 201         BUG_ON(prev_count == UINT_MAX);
 202 }
 203 
 204 
 205 static inline bool try_wait_for_completion(struct completion *c)
 206 {
 207         BUG();
 208 }
 209 
 210 static inline bool completion_done(struct completion *c)
 211 {
 212         return c->count;
 213 }
 214 
 215 
 216 static inline void complete_all(struct completion *c)
 217 {
 218         BUG();
 219 }
 220 
 221 #endif