root/drivers/md/bcache/closure.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. closure_put_after_sub
  2. closure_sub
  3. closure_put
  4. __closure_wake_up
  5. closure_wait
  6. closure_sync_fn
  7. __closure_sync
  8. closure_debug_create
  9. closure_debug_destroy
  10. debug_seq_show
  11. debug_seq_open
  12. closure_debug_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Asynchronous refcounty things
   4  *
   5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
   6  * Copyright 2012 Google, Inc.
   7  */
   8 
   9 #include <linux/debugfs.h>
  10 #include <linux/module.h>
  11 #include <linux/seq_file.h>
  12 #include <linux/sched/debug.h>
  13 
  14 #include "closure.h"
  15 
  16 static inline void closure_put_after_sub(struct closure *cl, int flags)
  17 {
  18         int r = flags & CLOSURE_REMAINING_MASK;
  19 
  20         BUG_ON(flags & CLOSURE_GUARD_MASK);
  21         BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
  22 
  23         if (!r) {
  24                 if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
  25                         atomic_set(&cl->remaining,
  26                                    CLOSURE_REMAINING_INITIALIZER);
  27                         closure_queue(cl);
  28                 } else {
  29                         struct closure *parent = cl->parent;
  30                         closure_fn *destructor = cl->fn;
  31 
  32                         closure_debug_destroy(cl);
  33 
  34                         if (destructor)
  35                                 destructor(cl);
  36 
  37                         if (parent)
  38                                 closure_put(parent);
  39                 }
  40         }
  41 }
  42 
  43 /* For clearing flags with the same atomic op as a put */
  44 void closure_sub(struct closure *cl, int v)
  45 {
  46         closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
  47 }
  48 EXPORT_SYMBOL(closure_sub);
  49 
  50 /*
  51  * closure_put - decrement a closure's refcount
  52  */
  53 void closure_put(struct closure *cl)
  54 {
  55         closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
  56 }
  57 EXPORT_SYMBOL(closure_put);
  58 
  59 /*
  60  * closure_wake_up - wake up all closures on a wait list, without memory barrier
  61  */
  62 void __closure_wake_up(struct closure_waitlist *wait_list)
  63 {
  64         struct llist_node *list;
  65         struct closure *cl, *t;
  66         struct llist_node *reverse = NULL;
  67 
  68         list = llist_del_all(&wait_list->list);
  69 
  70         /* We first reverse the list to preserve FIFO ordering and fairness */
  71         reverse = llist_reverse_order(list);
  72 
  73         /* Then do the wakeups */
  74         llist_for_each_entry_safe(cl, t, reverse, list) {
  75                 closure_set_waiting(cl, 0);
  76                 closure_sub(cl, CLOSURE_WAITING + 1);
  77         }
  78 }
  79 EXPORT_SYMBOL(__closure_wake_up);
  80 
  81 /**
  82  * closure_wait - add a closure to a waitlist
  83  * @waitlist: will own a ref on @cl, which will be released when
  84  * closure_wake_up() is called on @waitlist.
  85  * @cl: closure pointer.
  86  *
  87  */
  88 bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
  89 {
  90         if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
  91                 return false;
  92 
  93         closure_set_waiting(cl, _RET_IP_);
  94         atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
  95         llist_add(&cl->list, &waitlist->list);
  96 
  97         return true;
  98 }
  99 EXPORT_SYMBOL(closure_wait);
 100 
 101 struct closure_syncer {
 102         struct task_struct      *task;
 103         int                     done;
 104 };
 105 
 106 static void closure_sync_fn(struct closure *cl)
 107 {
 108         struct closure_syncer *s = cl->s;
 109         struct task_struct *p;
 110 
 111         rcu_read_lock();
 112         p = READ_ONCE(s->task);
 113         s->done = 1;
 114         wake_up_process(p);
 115         rcu_read_unlock();
 116 }
 117 
 118 void __sched __closure_sync(struct closure *cl)
 119 {
 120         struct closure_syncer s = { .task = current };
 121 
 122         cl->s = &s;
 123         continue_at(cl, closure_sync_fn, NULL);
 124 
 125         while (1) {
 126                 set_current_state(TASK_UNINTERRUPTIBLE);
 127                 if (s.done)
 128                         break;
 129                 schedule();
 130         }
 131 
 132         __set_current_state(TASK_RUNNING);
 133 }
 134 EXPORT_SYMBOL(__closure_sync);
 135 
 136 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
 137 
 138 static LIST_HEAD(closure_list);
 139 static DEFINE_SPINLOCK(closure_list_lock);
 140 
 141 void closure_debug_create(struct closure *cl)
 142 {
 143         unsigned long flags;
 144 
 145         BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
 146         cl->magic = CLOSURE_MAGIC_ALIVE;
 147 
 148         spin_lock_irqsave(&closure_list_lock, flags);
 149         list_add(&cl->all, &closure_list);
 150         spin_unlock_irqrestore(&closure_list_lock, flags);
 151 }
 152 EXPORT_SYMBOL(closure_debug_create);
 153 
 154 void closure_debug_destroy(struct closure *cl)
 155 {
 156         unsigned long flags;
 157 
 158         BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
 159         cl->magic = CLOSURE_MAGIC_DEAD;
 160 
 161         spin_lock_irqsave(&closure_list_lock, flags);
 162         list_del(&cl->all);
 163         spin_unlock_irqrestore(&closure_list_lock, flags);
 164 }
 165 EXPORT_SYMBOL(closure_debug_destroy);
 166 
 167 static struct dentry *closure_debug;
 168 
 169 static int debug_seq_show(struct seq_file *f, void *data)
 170 {
 171         struct closure *cl;
 172 
 173         spin_lock_irq(&closure_list_lock);
 174 
 175         list_for_each_entry(cl, &closure_list, all) {
 176                 int r = atomic_read(&cl->remaining);
 177 
 178                 seq_printf(f, "%p: %pS -> %pS p %p r %i ",
 179                            cl, (void *) cl->ip, cl->fn, cl->parent,
 180                            r & CLOSURE_REMAINING_MASK);
 181 
 182                 seq_printf(f, "%s%s\n",
 183                            test_bit(WORK_STRUCT_PENDING_BIT,
 184                                     work_data_bits(&cl->work)) ? "Q" : "",
 185                            r & CLOSURE_RUNNING  ? "R" : "");
 186 
 187                 if (r & CLOSURE_WAITING)
 188                         seq_printf(f, " W %pS\n",
 189                                    (void *) cl->waiting_on);
 190 
 191                 seq_printf(f, "\n");
 192         }
 193 
 194         spin_unlock_irq(&closure_list_lock);
 195         return 0;
 196 }
 197 
 198 static int debug_seq_open(struct inode *inode, struct file *file)
 199 {
 200         return single_open(file, debug_seq_show, NULL);
 201 }
 202 
 203 static const struct file_operations debug_ops = {
 204         .owner          = THIS_MODULE,
 205         .open           = debug_seq_open,
 206         .read           = seq_read,
 207         .release        = single_release
 208 };
 209 
 210 void  __init closure_debug_init(void)
 211 {
 212         if (!IS_ERR_OR_NULL(bcache_debug))
 213                 /*
 214                  * it is unnecessary to check return value of
 215                  * debugfs_create_file(), we should not care
 216                  * about this.
 217                  */
 218                 closure_debug = debugfs_create_file(
 219                         "closures", 0400, bcache_debug, NULL, &debug_ops);
 220 }
 221 #endif
 222 
 223 MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
 224 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */