This source file includes following definitions.
- __init_ldsem
- __ldsem_wake_readers
- writer_trylock
- __ldsem_wake_writer
- __ldsem_wake
- ldsem_wake
- down_read_failed
- down_write_failed
- __ldsem_down_read_nested
- __ldsem_down_write_nested
- ldsem_down_read
- ldsem_down_read_trylock
- ldsem_down_write
- ldsem_down_write_trylock
- ldsem_up_read
- ldsem_up_write
- ldsem_down_read_nested
- ldsem_down_write_nested
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 
  25 
  26 
  27 
  28 #include <linux/list.h>
  29 #include <linux/spinlock.h>
  30 #include <linux/atomic.h>
  31 #include <linux/tty.h>
  32 #include <linux/sched.h>
  33 #include <linux/sched/debug.h>
  34 #include <linux/sched/task.h>
  35 
  36 
  37 #if BITS_PER_LONG == 64
  38 # define LDSEM_ACTIVE_MASK      0xffffffffL
  39 #else
  40 # define LDSEM_ACTIVE_MASK      0x0000ffffL
  41 #endif
  42 
  43 #define LDSEM_UNLOCKED          0L
  44 #define LDSEM_ACTIVE_BIAS       1L
  45 #define LDSEM_WAIT_BIAS         (-LDSEM_ACTIVE_MASK-1)
  46 #define LDSEM_READ_BIAS         LDSEM_ACTIVE_BIAS
  47 #define LDSEM_WRITE_BIAS        (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
  48 
  49 struct ldsem_waiter {
  50         struct list_head list;
  51         struct task_struct *task;
  52 };
  53 
  54 
  55 
  56 
  57 void __init_ldsem(struct ld_semaphore *sem, const char *name,
  58                   struct lock_class_key *key)
  59 {
  60 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  61         
  62 
  63 
  64         debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  65         lockdep_init_map(&sem->dep_map, name, key, 0);
  66 #endif
  67         atomic_long_set(&sem->count, LDSEM_UNLOCKED);
  68         sem->wait_readers = 0;
  69         raw_spin_lock_init(&sem->wait_lock);
  70         INIT_LIST_HEAD(&sem->read_wait);
  71         INIT_LIST_HEAD(&sem->write_wait);
  72 }
  73 
  74 static void __ldsem_wake_readers(struct ld_semaphore *sem)
  75 {
  76         struct ldsem_waiter *waiter, *next;
  77         struct task_struct *tsk;
  78         long adjust, count;
  79 
  80         
  81 
  82 
  83 
  84 
  85         adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
  86         count = atomic_long_add_return(adjust, &sem->count);
  87         do {
  88                 if (count > 0)
  89                         break;
  90                 if (atomic_long_try_cmpxchg(&sem->count, &count, count - adjust))
  91                         return;
  92         } while (1);
  93 
  94         list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
  95                 tsk = waiter->task;
  96                 smp_store_release(&waiter->task, NULL);
  97                 wake_up_process(tsk);
  98                 put_task_struct(tsk);
  99         }
 100         INIT_LIST_HEAD(&sem->read_wait);
 101         sem->wait_readers = 0;
 102 }
 103 
 104 static inline int writer_trylock(struct ld_semaphore *sem)
 105 {
 106         
 107 
 108 
 109 
 110         long count = atomic_long_add_return(LDSEM_ACTIVE_BIAS, &sem->count);
 111         do {
 112                 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
 113                         return 1;
 114                 if (atomic_long_try_cmpxchg(&sem->count, &count, count - LDSEM_ACTIVE_BIAS))
 115                         return 0;
 116         } while (1);
 117 }
 118 
 119 static void __ldsem_wake_writer(struct ld_semaphore *sem)
 120 {
 121         struct ldsem_waiter *waiter;
 122 
 123         waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
 124         wake_up_process(waiter->task);
 125 }
 126 
 127 
 128 
 129 
 130 
 131 
 132 
 133 
 134 
 135 static void __ldsem_wake(struct ld_semaphore *sem)
 136 {
 137         if (!list_empty(&sem->write_wait))
 138                 __ldsem_wake_writer(sem);
 139         else if (!list_empty(&sem->read_wait))
 140                 __ldsem_wake_readers(sem);
 141 }
 142 
 143 static void ldsem_wake(struct ld_semaphore *sem)
 144 {
 145         unsigned long flags;
 146 
 147         raw_spin_lock_irqsave(&sem->wait_lock, flags);
 148         __ldsem_wake(sem);
 149         raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 150 }
 151 
 152 
 153 
 154 
 155 static struct ld_semaphore __sched *
 156 down_read_failed(struct ld_semaphore *sem, long count, long timeout)
 157 {
 158         struct ldsem_waiter waiter;
 159         long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
 160 
 161         
 162         raw_spin_lock_irq(&sem->wait_lock);
 163 
 164         
 165 
 166 
 167 
 168 
 169         do {
 170                 if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust)) {
 171                         count += adjust;
 172                         break;
 173                 }
 174                 if (count > 0) {
 175                         raw_spin_unlock_irq(&sem->wait_lock);
 176                         return sem;
 177                 }
 178         } while (1);
 179 
 180         list_add_tail(&waiter.list, &sem->read_wait);
 181         sem->wait_readers++;
 182 
 183         waiter.task = current;
 184         get_task_struct(current);
 185 
 186         
 187         if ((count & LDSEM_ACTIVE_MASK) == 0)
 188                 __ldsem_wake(sem);
 189 
 190         raw_spin_unlock_irq(&sem->wait_lock);
 191 
 192         
 193         for (;;) {
 194                 set_current_state(TASK_UNINTERRUPTIBLE);
 195 
 196                 if (!smp_load_acquire(&waiter.task))
 197                         break;
 198                 if (!timeout)
 199                         break;
 200                 timeout = schedule_timeout(timeout);
 201         }
 202 
 203         __set_current_state(TASK_RUNNING);
 204 
 205         if (!timeout) {
 206                 
 207 
 208 
 209 
 210 
 211                 raw_spin_lock_irq(&sem->wait_lock);
 212                 if (waiter.task) {
 213                         atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
 214                         sem->wait_readers--;
 215                         list_del(&waiter.list);
 216                         raw_spin_unlock_irq(&sem->wait_lock);
 217                         put_task_struct(waiter.task);
 218                         return NULL;
 219                 }
 220                 raw_spin_unlock_irq(&sem->wait_lock);
 221         }
 222 
 223         return sem;
 224 }
 225 
 226 
 227 
 228 
 229 static struct ld_semaphore __sched *
 230 down_write_failed(struct ld_semaphore *sem, long count, long timeout)
 231 {
 232         struct ldsem_waiter waiter;
 233         long adjust = -LDSEM_ACTIVE_BIAS;
 234         int locked = 0;
 235 
 236         
 237         raw_spin_lock_irq(&sem->wait_lock);
 238 
 239         
 240 
 241 
 242 
 243 
 244         do {
 245                 if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust))
 246                         break;
 247                 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
 248                         raw_spin_unlock_irq(&sem->wait_lock);
 249                         return sem;
 250                 }
 251         } while (1);
 252 
 253         list_add_tail(&waiter.list, &sem->write_wait);
 254 
 255         waiter.task = current;
 256 
 257         set_current_state(TASK_UNINTERRUPTIBLE);
 258         for (;;) {
 259                 if (!timeout)
 260                         break;
 261                 raw_spin_unlock_irq(&sem->wait_lock);
 262                 timeout = schedule_timeout(timeout);
 263                 raw_spin_lock_irq(&sem->wait_lock);
 264                 set_current_state(TASK_UNINTERRUPTIBLE);
 265                 locked = writer_trylock(sem);
 266                 if (locked)
 267                         break;
 268         }
 269 
 270         if (!locked)
 271                 atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
 272         list_del(&waiter.list);
 273 
 274         
 275 
 276 
 277 
 278 
 279 
 280         if (!locked && list_empty(&sem->write_wait))
 281                 __ldsem_wake_readers(sem);
 282 
 283         raw_spin_unlock_irq(&sem->wait_lock);
 284 
 285         __set_current_state(TASK_RUNNING);
 286 
 287         
 288         if (!locked)
 289                 return NULL;
 290         return sem;
 291 }
 292 
 293 
 294 
 295 static int __ldsem_down_read_nested(struct ld_semaphore *sem,
 296                                            int subclass, long timeout)
 297 {
 298         long count;
 299 
 300         rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
 301 
 302         count = atomic_long_add_return(LDSEM_READ_BIAS, &sem->count);
 303         if (count <= 0) {
 304                 lock_contended(&sem->dep_map, _RET_IP_);
 305                 if (!down_read_failed(sem, count, timeout)) {
 306                         rwsem_release(&sem->dep_map, 1, _RET_IP_);
 307                         return 0;
 308                 }
 309         }
 310         lock_acquired(&sem->dep_map, _RET_IP_);
 311         return 1;
 312 }
 313 
 314 static int __ldsem_down_write_nested(struct ld_semaphore *sem,
 315                                             int subclass, long timeout)
 316 {
 317         long count;
 318 
 319         rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
 320 
 321         count = atomic_long_add_return(LDSEM_WRITE_BIAS, &sem->count);
 322         if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
 323                 lock_contended(&sem->dep_map, _RET_IP_);
 324                 if (!down_write_failed(sem, count, timeout)) {
 325                         rwsem_release(&sem->dep_map, 1, _RET_IP_);
 326                         return 0;
 327                 }
 328         }
 329         lock_acquired(&sem->dep_map, _RET_IP_);
 330         return 1;
 331 }
 332 
 333 
 334 
 335 
 336 
 337 int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
 338 {
 339         might_sleep();
 340         return __ldsem_down_read_nested(sem, 0, timeout);
 341 }
 342 
 343 
 344 
 345 
 346 int ldsem_down_read_trylock(struct ld_semaphore *sem)
 347 {
 348         long count = atomic_long_read(&sem->count);
 349 
 350         while (count >= 0) {
 351                 if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_READ_BIAS)) {
 352                         rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
 353                         lock_acquired(&sem->dep_map, _RET_IP_);
 354                         return 1;
 355                 }
 356         }
 357         return 0;
 358 }
 359 
 360 
 361 
 362 
 363 int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
 364 {
 365         might_sleep();
 366         return __ldsem_down_write_nested(sem, 0, timeout);
 367 }
 368 
 369 
 370 
 371 
 372 int ldsem_down_write_trylock(struct ld_semaphore *sem)
 373 {
 374         long count = atomic_long_read(&sem->count);
 375 
 376         while ((count & LDSEM_ACTIVE_MASK) == 0) {
 377                 if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_WRITE_BIAS)) {
 378                         rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
 379                         lock_acquired(&sem->dep_map, _RET_IP_);
 380                         return 1;
 381                 }
 382         }
 383         return 0;
 384 }
 385 
 386 
 387 
 388 
 389 void ldsem_up_read(struct ld_semaphore *sem)
 390 {
 391         long count;
 392 
 393         rwsem_release(&sem->dep_map, 1, _RET_IP_);
 394 
 395         count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count);
 396         if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
 397                 ldsem_wake(sem);
 398 }
 399 
 400 
 401 
 402 
 403 void ldsem_up_write(struct ld_semaphore *sem)
 404 {
 405         long count;
 406 
 407         rwsem_release(&sem->dep_map, 1, _RET_IP_);
 408 
 409         count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count);
 410         if (count < 0)
 411                 ldsem_wake(sem);
 412 }
 413 
 414 
 415 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 416 
 417 int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
 418 {
 419         might_sleep();
 420         return __ldsem_down_read_nested(sem, subclass, timeout);
 421 }
 422 
 423 int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
 424                             long timeout)
 425 {
 426         might_sleep();
 427         return __ldsem_down_write_nested(sem, subclass, timeout);
 428 }
 429 
 430 #endif