root/kernel/sched/wait.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __init_waitqueue_head
  2. add_wait_queue
  3. add_wait_queue_exclusive
  4. remove_wait_queue
  5. __wake_up_common
  6. __wake_up_common_lock
  7. __wake_up
  8. __wake_up_locked
  9. __wake_up_locked_key
  10. __wake_up_locked_key_bookmark
  11. __wake_up_sync_key
  12. __wake_up_sync
  13. prepare_to_wait
  14. prepare_to_wait_exclusive
  15. init_wait_entry
  16. prepare_to_wait_event
  17. do_wait_intr
  18. do_wait_intr_irq
  19. finish_wait
  20. autoremove_wake_function
  21. is_kthread_should_stop
  22. wait_woken
  23. woken_wake_function

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Generic waiting primitives.
   4  *
   5  * (C) 2004 Nadia Yvette Chambers, Oracle
   6  */
   7 #include "sched.h"
   8 
   9 void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
  10 {
  11         spin_lock_init(&wq_head->lock);
  12         lockdep_set_class_and_name(&wq_head->lock, key, name);
  13         INIT_LIST_HEAD(&wq_head->head);
  14 }
  15 
  16 EXPORT_SYMBOL(__init_waitqueue_head);
  17 
  18 void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  19 {
  20         unsigned long flags;
  21 
  22         wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
  23         spin_lock_irqsave(&wq_head->lock, flags);
  24         __add_wait_queue(wq_head, wq_entry);
  25         spin_unlock_irqrestore(&wq_head->lock, flags);
  26 }
  27 EXPORT_SYMBOL(add_wait_queue);
  28 
  29 void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  30 {
  31         unsigned long flags;
  32 
  33         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  34         spin_lock_irqsave(&wq_head->lock, flags);
  35         __add_wait_queue_entry_tail(wq_head, wq_entry);
  36         spin_unlock_irqrestore(&wq_head->lock, flags);
  37 }
  38 EXPORT_SYMBOL(add_wait_queue_exclusive);
  39 
  40 void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  41 {
  42         unsigned long flags;
  43 
  44         spin_lock_irqsave(&wq_head->lock, flags);
  45         __remove_wait_queue(wq_head, wq_entry);
  46         spin_unlock_irqrestore(&wq_head->lock, flags);
  47 }
  48 EXPORT_SYMBOL(remove_wait_queue);
  49 
  50 /*
  51  * Scan threshold to break wait queue walk.
  52  * This allows a waker to take a break from holding the
  53  * wait queue lock during the wait queue walk.
  54  */
  55 #define WAITQUEUE_WALK_BREAK_CNT 64
  56 
  57 /*
  58  * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
  59  * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
  60  * number) then we wake all the non-exclusive tasks and one exclusive task.
  61  *
  62  * There are circumstances in which we can try to wake a task which has already
  63  * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
  64  * zero in this (rare) case, and we handle it by continuing to scan the queue.
  65  */
  66 static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
  67                         int nr_exclusive, int wake_flags, void *key,
  68                         wait_queue_entry_t *bookmark)
  69 {
  70         wait_queue_entry_t *curr, *next;
  71         int cnt = 0;
  72 
  73         lockdep_assert_held(&wq_head->lock);
  74 
  75         if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
  76                 curr = list_next_entry(bookmark, entry);
  77 
  78                 list_del(&bookmark->entry);
  79                 bookmark->flags = 0;
  80         } else
  81                 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
  82 
  83         if (&curr->entry == &wq_head->head)
  84                 return nr_exclusive;
  85 
  86         list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
  87                 unsigned flags = curr->flags;
  88                 int ret;
  89 
  90                 if (flags & WQ_FLAG_BOOKMARK)
  91                         continue;
  92 
  93                 ret = curr->func(curr, mode, wake_flags, key);
  94                 if (ret < 0)
  95                         break;
  96                 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
  97                         break;
  98 
  99                 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
 100                                 (&next->entry != &wq_head->head)) {
 101                         bookmark->flags = WQ_FLAG_BOOKMARK;
 102                         list_add_tail(&bookmark->entry, &next->entry);
 103                         break;
 104                 }
 105         }
 106 
 107         return nr_exclusive;
 108 }
 109 
 110 static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
 111                         int nr_exclusive, int wake_flags, void *key)
 112 {
 113         unsigned long flags;
 114         wait_queue_entry_t bookmark;
 115 
 116         bookmark.flags = 0;
 117         bookmark.private = NULL;
 118         bookmark.func = NULL;
 119         INIT_LIST_HEAD(&bookmark.entry);
 120 
 121         do {
 122                 spin_lock_irqsave(&wq_head->lock, flags);
 123                 nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
 124                                                 wake_flags, key, &bookmark);
 125                 spin_unlock_irqrestore(&wq_head->lock, flags);
 126         } while (bookmark.flags & WQ_FLAG_BOOKMARK);
 127 }
 128 
 129 /**
 130  * __wake_up - wake up threads blocked on a waitqueue.
 131  * @wq_head: the waitqueue
 132  * @mode: which threads
 133  * @nr_exclusive: how many wake-one or wake-many threads to wake up
 134  * @key: is directly passed to the wakeup function
 135  *
 136  * If this function wakes up a task, it executes a full memory barrier before
 137  * accessing the task state.
 138  */
 139 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
 140                         int nr_exclusive, void *key)
 141 {
 142         __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
 143 }
 144 EXPORT_SYMBOL(__wake_up);
 145 
 146 /*
 147  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
 148  */
 149 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
 150 {
 151         __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
 152 }
 153 EXPORT_SYMBOL_GPL(__wake_up_locked);
 154 
 155 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
 156 {
 157         __wake_up_common(wq_head, mode, 1, 0, key, NULL);
 158 }
 159 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
 160 
 161 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
 162                 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
 163 {
 164         __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
 165 }
 166 EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
 167 
 168 /**
 169  * __wake_up_sync_key - wake up threads blocked on a waitqueue.
 170  * @wq_head: the waitqueue
 171  * @mode: which threads
 172  * @nr_exclusive: how many wake-one or wake-many threads to wake up
 173  * @key: opaque value to be passed to wakeup targets
 174  *
 175  * The sync wakeup differs that the waker knows that it will schedule
 176  * away soon, so while the target thread will be woken up, it will not
 177  * be migrated to another CPU - ie. the two threads are 'synchronized'
 178  * with each other. This can prevent needless bouncing between CPUs.
 179  *
 180  * On UP it can prevent extra preemption.
 181  *
 182  * If this function wakes up a task, it executes a full memory barrier before
 183  * accessing the task state.
 184  */
 185 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
 186                         int nr_exclusive, void *key)
 187 {
 188         int wake_flags = 1; /* XXX WF_SYNC */
 189 
 190         if (unlikely(!wq_head))
 191                 return;
 192 
 193         if (unlikely(nr_exclusive != 1))
 194                 wake_flags = 0;
 195 
 196         __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key);
 197 }
 198 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
 199 
 200 /*
 201  * __wake_up_sync - see __wake_up_sync_key()
 202  */
 203 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
 204 {
 205         __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
 206 }
 207 EXPORT_SYMBOL_GPL(__wake_up_sync);      /* For internal use only */
 208 
 209 /*
 210  * Note: we use "set_current_state()" _after_ the wait-queue add,
 211  * because we need a memory barrier there on SMP, so that any
 212  * wake-function that tests for the wait-queue being active
 213  * will be guaranteed to see waitqueue addition _or_ subsequent
 214  * tests in this thread will see the wakeup having taken place.
 215  *
 216  * The spin_unlock() itself is semi-permeable and only protects
 217  * one way (it only protects stuff inside the critical region and
 218  * stops them from bleeding out - it would still allow subsequent
 219  * loads to move into the critical region).
 220  */
 221 void
 222 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
 223 {
 224         unsigned long flags;
 225 
 226         wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
 227         spin_lock_irqsave(&wq_head->lock, flags);
 228         if (list_empty(&wq_entry->entry))
 229                 __add_wait_queue(wq_head, wq_entry);
 230         set_current_state(state);
 231         spin_unlock_irqrestore(&wq_head->lock, flags);
 232 }
 233 EXPORT_SYMBOL(prepare_to_wait);
 234 
 235 void
 236 prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
 237 {
 238         unsigned long flags;
 239 
 240         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
 241         spin_lock_irqsave(&wq_head->lock, flags);
 242         if (list_empty(&wq_entry->entry))
 243                 __add_wait_queue_entry_tail(wq_head, wq_entry);
 244         set_current_state(state);
 245         spin_unlock_irqrestore(&wq_head->lock, flags);
 246 }
 247 EXPORT_SYMBOL(prepare_to_wait_exclusive);
 248 
 249 void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
 250 {
 251         wq_entry->flags = flags;
 252         wq_entry->private = current;
 253         wq_entry->func = autoremove_wake_function;
 254         INIT_LIST_HEAD(&wq_entry->entry);
 255 }
 256 EXPORT_SYMBOL(init_wait_entry);
 257 
 258 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
 259 {
 260         unsigned long flags;
 261         long ret = 0;
 262 
 263         spin_lock_irqsave(&wq_head->lock, flags);
 264         if (signal_pending_state(state, current)) {
 265                 /*
 266                  * Exclusive waiter must not fail if it was selected by wakeup,
 267                  * it should "consume" the condition we were waiting for.
 268                  *
 269                  * The caller will recheck the condition and return success if
 270                  * we were already woken up, we can not miss the event because
 271                  * wakeup locks/unlocks the same wq_head->lock.
 272                  *
 273                  * But we need to ensure that set-condition + wakeup after that
 274                  * can't see us, it should wake up another exclusive waiter if
 275                  * we fail.
 276                  */
 277                 list_del_init(&wq_entry->entry);
 278                 ret = -ERESTARTSYS;
 279         } else {
 280                 if (list_empty(&wq_entry->entry)) {
 281                         if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
 282                                 __add_wait_queue_entry_tail(wq_head, wq_entry);
 283                         else
 284                                 __add_wait_queue(wq_head, wq_entry);
 285                 }
 286                 set_current_state(state);
 287         }
 288         spin_unlock_irqrestore(&wq_head->lock, flags);
 289 
 290         return ret;
 291 }
 292 EXPORT_SYMBOL(prepare_to_wait_event);
 293 
 294 /*
 295  * Note! These two wait functions are entered with the
 296  * wait-queue lock held (and interrupts off in the _irq
 297  * case), so there is no race with testing the wakeup
 298  * condition in the caller before they add the wait
 299  * entry to the wake queue.
 300  */
 301 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
 302 {
 303         if (likely(list_empty(&wait->entry)))
 304                 __add_wait_queue_entry_tail(wq, wait);
 305 
 306         set_current_state(TASK_INTERRUPTIBLE);
 307         if (signal_pending(current))
 308                 return -ERESTARTSYS;
 309 
 310         spin_unlock(&wq->lock);
 311         schedule();
 312         spin_lock(&wq->lock);
 313 
 314         return 0;
 315 }
 316 EXPORT_SYMBOL(do_wait_intr);
 317 
 318 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
 319 {
 320         if (likely(list_empty(&wait->entry)))
 321                 __add_wait_queue_entry_tail(wq, wait);
 322 
 323         set_current_state(TASK_INTERRUPTIBLE);
 324         if (signal_pending(current))
 325                 return -ERESTARTSYS;
 326 
 327         spin_unlock_irq(&wq->lock);
 328         schedule();
 329         spin_lock_irq(&wq->lock);
 330 
 331         return 0;
 332 }
 333 EXPORT_SYMBOL(do_wait_intr_irq);
 334 
 335 /**
 336  * finish_wait - clean up after waiting in a queue
 337  * @wq_head: waitqueue waited on
 338  * @wq_entry: wait descriptor
 339  *
 340  * Sets current thread back to running state and removes
 341  * the wait descriptor from the given waitqueue if still
 342  * queued.
 343  */
 344 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 345 {
 346         unsigned long flags;
 347 
 348         __set_current_state(TASK_RUNNING);
 349         /*
 350          * We can check for list emptiness outside the lock
 351          * IFF:
 352          *  - we use the "careful" check that verifies both
 353          *    the next and prev pointers, so that there cannot
 354          *    be any half-pending updates in progress on other
 355          *    CPU's that we haven't seen yet (and that might
 356          *    still change the stack area.
 357          * and
 358          *  - all other users take the lock (ie we can only
 359          *    have _one_ other CPU that looks at or modifies
 360          *    the list).
 361          */
 362         if (!list_empty_careful(&wq_entry->entry)) {
 363                 spin_lock_irqsave(&wq_head->lock, flags);
 364                 list_del_init(&wq_entry->entry);
 365                 spin_unlock_irqrestore(&wq_head->lock, flags);
 366         }
 367 }
 368 EXPORT_SYMBOL(finish_wait);
 369 
 370 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
 371 {
 372         int ret = default_wake_function(wq_entry, mode, sync, key);
 373 
 374         if (ret)
 375                 list_del_init(&wq_entry->entry);
 376 
 377         return ret;
 378 }
 379 EXPORT_SYMBOL(autoremove_wake_function);
 380 
 381 static inline bool is_kthread_should_stop(void)
 382 {
 383         return (current->flags & PF_KTHREAD) && kthread_should_stop();
 384 }
 385 
 386 /*
 387  * DEFINE_WAIT_FUNC(wait, woken_wake_func);
 388  *
 389  * add_wait_queue(&wq_head, &wait);
 390  * for (;;) {
 391  *     if (condition)
 392  *         break;
 393  *
 394  *     // in wait_woken()                       // in woken_wake_function()
 395  *
 396  *     p->state = mode;                         wq_entry->flags |= WQ_FLAG_WOKEN;
 397  *     smp_mb(); // A                           try_to_wake_up():
 398  *     if (!(wq_entry->flags & WQ_FLAG_WOKEN))     <full barrier>
 399  *         schedule()                              if (p->state & mode)
 400  *     p->state = TASK_RUNNING;                       p->state = TASK_RUNNING;
 401  *     wq_entry->flags &= ~WQ_FLAG_WOKEN;       ~~~~~~~~~~~~~~~~~~
 402  *     smp_mb(); // B                           condition = true;
 403  * }                                            smp_mb(); // C
 404  * remove_wait_queue(&wq_head, &wait);          wq_entry->flags |= WQ_FLAG_WOKEN;
 405  */
 406 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
 407 {
 408         /*
 409          * The below executes an smp_mb(), which matches with the full barrier
 410          * executed by the try_to_wake_up() in woken_wake_function() such that
 411          * either we see the store to wq_entry->flags in woken_wake_function()
 412          * or woken_wake_function() sees our store to current->state.
 413          */
 414         set_current_state(mode); /* A */
 415         if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
 416                 timeout = schedule_timeout(timeout);
 417         __set_current_state(TASK_RUNNING);
 418 
 419         /*
 420          * The below executes an smp_mb(), which matches with the smp_mb() (C)
 421          * in woken_wake_function() such that either we see the wait condition
 422          * being true or the store to wq_entry->flags in woken_wake_function()
 423          * follows ours in the coherence order.
 424          */
 425         smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
 426 
 427         return timeout;
 428 }
 429 EXPORT_SYMBOL(wait_woken);
 430 
 431 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
 432 {
 433         /* Pairs with the smp_store_mb() in wait_woken(). */
 434         smp_mb(); /* C */
 435         wq_entry->flags |= WQ_FLAG_WOKEN;
 436 
 437         return default_wake_function(wq_entry, mode, sync, key);
 438 }
 439 EXPORT_SYMBOL(woken_wake_function);

/* [<][>][^][v][top][bottom][index][help] */