root/net/sunrpc/sched.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rpc_task_timeout
  2. __rpc_disable_timer
  3. rpc_set_queue_timer
  4. __rpc_add_timer
  5. rpc_set_waitqueue_priority
  6. rpc_reset_waitqueue_priority
  7. __rpc_list_enqueue_task
  8. __rpc_list_dequeue_task
  9. __rpc_add_wait_queue_priority
  10. __rpc_add_wait_queue
  11. __rpc_remove_wait_queue_priority
  12. __rpc_remove_wait_queue
  13. __rpc_init_priority_wait_queue
  14. rpc_init_priority_wait_queue
  15. rpc_init_wait_queue
  16. rpc_destroy_wait_queue
  17. rpc_wait_bit_killable
  18. rpc_task_set_debuginfo
  19. rpc_task_set_debuginfo
  20. rpc_set_active
  21. rpc_complete_task
  22. __rpc_wait_for_completion_task
  23. rpc_make_runnable
  24. __rpc_sleep_on_priority
  25. __rpc_sleep_on_priority_timeout
  26. rpc_set_tk_callback
  27. rpc_sleep_check_activated
  28. rpc_sleep_on_timeout
  29. rpc_sleep_on
  30. rpc_sleep_on_priority_timeout
  31. rpc_sleep_on_priority
  32. __rpc_do_wake_up_task_on_wq
  33. rpc_wake_up_task_on_wq_queue_action_locked
  34. rpc_wake_up_task_queue_locked
  35. rpc_wake_up_queued_task
  36. rpc_task_action_set_status
  37. rpc_wake_up_task_queue_set_status_locked
  38. rpc_wake_up_queued_task_set_status
  39. __rpc_find_next_queued_priority
  40. __rpc_find_next_queued
  41. rpc_wake_up_first_on_wq
  42. rpc_wake_up_first
  43. rpc_wake_up_next_func
  44. rpc_wake_up_next
  45. rpc_wake_up
  46. rpc_wake_up_status
  47. __rpc_queue_timer_fn
  48. __rpc_atrun
  49. rpc_delay
  50. rpc_prepare_task
  51. rpc_init_task_statistics
  52. rpc_reset_task_statistics
  53. rpc_exit_task
  54. rpc_signal_task
  55. rpc_exit
  56. rpc_release_calldata
  57. __rpc_execute
  58. rpc_execute
  59. rpc_async_schedule
  60. rpc_malloc
  61. rpc_free
  62. rpc_init_task
  63. rpc_alloc_task
  64. rpc_new_task
  65. rpc_free_task
  66. rpc_async_release
  67. rpc_release_resources_task
  68. rpc_final_put_task
  69. rpc_do_put_task
  70. rpc_put_task
  71. rpc_put_task_async
  72. rpc_release_task
  73. rpciod_up
  74. rpciod_down
  75. rpciod_start
  76. rpciod_stop
  77. rpc_destroy_mempool
  78. rpc_init_mempool

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * linux/net/sunrpc/sched.c
   4  *
   5  * Scheduling for synchronous and asynchronous RPC requests.
   6  *
   7  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
   8  *
   9  * TCP NFS related read + write fixes
  10  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
  11  */
  12 
  13 #include <linux/module.h>
  14 
  15 #include <linux/sched.h>
  16 #include <linux/interrupt.h>
  17 #include <linux/slab.h>
  18 #include <linux/mempool.h>
  19 #include <linux/smp.h>
  20 #include <linux/spinlock.h>
  21 #include <linux/mutex.h>
  22 #include <linux/freezer.h>
  23 #include <linux/sched/mm.h>
  24 
  25 #include <linux/sunrpc/clnt.h>
  26 #include <linux/sunrpc/metrics.h>
  27 
  28 #include "sunrpc.h"
  29 
  30 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  31 #define RPCDBG_FACILITY         RPCDBG_SCHED
  32 #endif
  33 
  34 #define CREATE_TRACE_POINTS
  35 #include <trace/events/sunrpc.h>
  36 
  37 /*
  38  * RPC slabs and memory pools
  39  */
  40 #define RPC_BUFFER_MAXSIZE      (2048)
  41 #define RPC_BUFFER_POOLSIZE     (8)
  42 #define RPC_TASK_POOLSIZE       (8)
  43 static struct kmem_cache        *rpc_task_slabp __read_mostly;
  44 static struct kmem_cache        *rpc_buffer_slabp __read_mostly;
  45 static mempool_t        *rpc_task_mempool __read_mostly;
  46 static mempool_t        *rpc_buffer_mempool __read_mostly;
  47 
  48 static void                     rpc_async_schedule(struct work_struct *);
  49 static void                      rpc_release_task(struct rpc_task *task);
  50 static void __rpc_queue_timer_fn(struct work_struct *);
  51 
  52 /*
  53  * RPC tasks sit here while waiting for conditions to improve.
  54  */
  55 static struct rpc_wait_queue delay_queue;
  56 
  57 /*
  58  * rpciod-related stuff
  59  */
  60 struct workqueue_struct *rpciod_workqueue __read_mostly;
  61 struct workqueue_struct *xprtiod_workqueue __read_mostly;
  62 EXPORT_SYMBOL_GPL(xprtiod_workqueue);
  63 
  64 unsigned long
  65 rpc_task_timeout(const struct rpc_task *task)
  66 {
  67         unsigned long timeout = READ_ONCE(task->tk_timeout);
  68 
  69         if (timeout != 0) {
  70                 unsigned long now = jiffies;
  71                 if (time_before(now, timeout))
  72                         return timeout - now;
  73         }
  74         return 0;
  75 }
  76 EXPORT_SYMBOL_GPL(rpc_task_timeout);
  77 
  78 /*
  79  * Disable the timer for a given RPC task. Should be called with
  80  * queue->lock and bh_disabled in order to avoid races within
  81  * rpc_run_timer().
  82  */
  83 static void
  84 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
  85 {
  86         if (list_empty(&task->u.tk_wait.timer_list))
  87                 return;
  88         dprintk("RPC: %5u disabling timer\n", task->tk_pid);
  89         task->tk_timeout = 0;
  90         list_del(&task->u.tk_wait.timer_list);
  91         if (list_empty(&queue->timer_list.list))
  92                 cancel_delayed_work(&queue->timer_list.dwork);
  93 }
  94 
  95 static void
  96 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
  97 {
  98         unsigned long now = jiffies;
  99         queue->timer_list.expires = expires;
 100         if (time_before_eq(expires, now))
 101                 expires = 0;
 102         else
 103                 expires -= now;
 104         mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
 105 }
 106 
 107 /*
 108  * Set up a timer for the current task.
 109  */
 110 static void
 111 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
 112                 unsigned long timeout)
 113 {
 114         dprintk("RPC: %5u setting alarm for %u ms\n",
 115                 task->tk_pid, jiffies_to_msecs(timeout - jiffies));
 116 
 117         task->tk_timeout = timeout;
 118         if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
 119                 rpc_set_queue_timer(queue, timeout);
 120         list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
 121 }
 122 
 123 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
 124 {
 125         if (queue->priority != priority) {
 126                 queue->priority = priority;
 127                 queue->nr = 1U << priority;
 128         }
 129 }
 130 
 131 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
 132 {
 133         rpc_set_waitqueue_priority(queue, queue->maxpriority);
 134 }
 135 
 136 /*
 137  * Add a request to a queue list
 138  */
 139 static void
 140 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
 141 {
 142         struct rpc_task *t;
 143 
 144         list_for_each_entry(t, q, u.tk_wait.list) {
 145                 if (t->tk_owner == task->tk_owner) {
 146                         list_add_tail(&task->u.tk_wait.links,
 147                                         &t->u.tk_wait.links);
 148                         /* Cache the queue head in task->u.tk_wait.list */
 149                         task->u.tk_wait.list.next = q;
 150                         task->u.tk_wait.list.prev = NULL;
 151                         return;
 152                 }
 153         }
 154         INIT_LIST_HEAD(&task->u.tk_wait.links);
 155         list_add_tail(&task->u.tk_wait.list, q);
 156 }
 157 
 158 /*
 159  * Remove request from a queue list
 160  */
 161 static void
 162 __rpc_list_dequeue_task(struct rpc_task *task)
 163 {
 164         struct list_head *q;
 165         struct rpc_task *t;
 166 
 167         if (task->u.tk_wait.list.prev == NULL) {
 168                 list_del(&task->u.tk_wait.links);
 169                 return;
 170         }
 171         if (!list_empty(&task->u.tk_wait.links)) {
 172                 t = list_first_entry(&task->u.tk_wait.links,
 173                                 struct rpc_task,
 174                                 u.tk_wait.links);
 175                 /* Assume __rpc_list_enqueue_task() cached the queue head */
 176                 q = t->u.tk_wait.list.next;
 177                 list_add_tail(&t->u.tk_wait.list, q);
 178                 list_del(&task->u.tk_wait.links);
 179         }
 180         list_del(&task->u.tk_wait.list);
 181 }
 182 
 183 /*
 184  * Add new request to a priority queue.
 185  */
 186 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
 187                 struct rpc_task *task,
 188                 unsigned char queue_priority)
 189 {
 190         if (unlikely(queue_priority > queue->maxpriority))
 191                 queue_priority = queue->maxpriority;
 192         __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
 193 }
 194 
 195 /*
 196  * Add new request to wait queue.
 197  *
 198  * Swapper tasks always get inserted at the head of the queue.
 199  * This should avoid many nasty memory deadlocks and hopefully
 200  * improve overall performance.
 201  * Everyone else gets appended to the queue to ensure proper FIFO behavior.
 202  */
 203 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
 204                 struct rpc_task *task,
 205                 unsigned char queue_priority)
 206 {
 207         WARN_ON_ONCE(RPC_IS_QUEUED(task));
 208         if (RPC_IS_QUEUED(task))
 209                 return;
 210 
 211         INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
 212         if (RPC_IS_PRIORITY(queue))
 213                 __rpc_add_wait_queue_priority(queue, task, queue_priority);
 214         else if (RPC_IS_SWAPPER(task))
 215                 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
 216         else
 217                 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
 218         task->tk_waitqueue = queue;
 219         queue->qlen++;
 220         /* barrier matches the read in rpc_wake_up_task_queue_locked() */
 221         smp_wmb();
 222         rpc_set_queued(task);
 223 
 224         dprintk("RPC: %5u added to queue %p \"%s\"\n",
 225                         task->tk_pid, queue, rpc_qname(queue));
 226 }
 227 
 228 /*
 229  * Remove request from a priority queue.
 230  */
 231 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
 232 {
 233         __rpc_list_dequeue_task(task);
 234 }
 235 
 236 /*
 237  * Remove request from queue.
 238  * Note: must be called with spin lock held.
 239  */
 240 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
 241 {
 242         __rpc_disable_timer(queue, task);
 243         if (RPC_IS_PRIORITY(queue))
 244                 __rpc_remove_wait_queue_priority(task);
 245         else
 246                 list_del(&task->u.tk_wait.list);
 247         queue->qlen--;
 248         dprintk("RPC: %5u removed from queue %p \"%s\"\n",
 249                         task->tk_pid, queue, rpc_qname(queue));
 250 }
 251 
 252 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
 253 {
 254         int i;
 255 
 256         spin_lock_init(&queue->lock);
 257         for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
 258                 INIT_LIST_HEAD(&queue->tasks[i]);
 259         queue->maxpriority = nr_queues - 1;
 260         rpc_reset_waitqueue_priority(queue);
 261         queue->qlen = 0;
 262         queue->timer_list.expires = 0;
 263         INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
 264         INIT_LIST_HEAD(&queue->timer_list.list);
 265         rpc_assign_waitqueue_name(queue, qname);
 266 }
 267 
 268 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 269 {
 270         __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
 271 }
 272 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
 273 
 274 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
 275 {
 276         __rpc_init_priority_wait_queue(queue, qname, 1);
 277 }
 278 EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
 279 
 280 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
 281 {
 282         cancel_delayed_work_sync(&queue->timer_list.dwork);
 283 }
 284 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
 285 
 286 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
 287 {
 288         freezable_schedule_unsafe();
 289         if (signal_pending_state(mode, current))
 290                 return -ERESTARTSYS;
 291         return 0;
 292 }
 293 
 294 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
 295 static void rpc_task_set_debuginfo(struct rpc_task *task)
 296 {
 297         static atomic_t rpc_pid;
 298 
 299         task->tk_pid = atomic_inc_return(&rpc_pid);
 300 }
 301 #else
 302 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
 303 {
 304 }
 305 #endif
 306 
 307 static void rpc_set_active(struct rpc_task *task)
 308 {
 309         rpc_task_set_debuginfo(task);
 310         set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 311         trace_rpc_task_begin(task, NULL);
 312 }
 313 
 314 /*
 315  * Mark an RPC call as having completed by clearing the 'active' bit
 316  * and then waking up all tasks that were sleeping.
 317  */
 318 static int rpc_complete_task(struct rpc_task *task)
 319 {
 320         void *m = &task->tk_runstate;
 321         wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
 322         struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
 323         unsigned long flags;
 324         int ret;
 325 
 326         trace_rpc_task_complete(task, NULL);
 327 
 328         spin_lock_irqsave(&wq->lock, flags);
 329         clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
 330         ret = atomic_dec_and_test(&task->tk_count);
 331         if (waitqueue_active(wq))
 332                 __wake_up_locked_key(wq, TASK_NORMAL, &k);
 333         spin_unlock_irqrestore(&wq->lock, flags);
 334         return ret;
 335 }
 336 
 337 /*
 338  * Allow callers to wait for completion of an RPC call
 339  *
 340  * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
 341  * to enforce taking of the wq->lock and hence avoid races with
 342  * rpc_complete_task().
 343  */
 344 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
 345 {
 346         if (action == NULL)
 347                 action = rpc_wait_bit_killable;
 348         return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
 349                         action, TASK_KILLABLE);
 350 }
 351 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
 352 
 353 /*
 354  * Make an RPC task runnable.
 355  *
 356  * Note: If the task is ASYNC, and is being made runnable after sitting on an
 357  * rpc_wait_queue, this must be called with the queue spinlock held to protect
 358  * the wait queue operation.
 359  * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
 360  * which is needed to ensure that __rpc_execute() doesn't loop (due to the
 361  * lockless RPC_IS_QUEUED() test) before we've had a chance to test
 362  * the RPC_TASK_RUNNING flag.
 363  */
 364 static void rpc_make_runnable(struct workqueue_struct *wq,
 365                 struct rpc_task *task)
 366 {
 367         bool need_wakeup = !rpc_test_and_set_running(task);
 368 
 369         rpc_clear_queued(task);
 370         if (!need_wakeup)
 371                 return;
 372         if (RPC_IS_ASYNC(task)) {
 373                 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
 374                 queue_work(wq, &task->u.tk_work);
 375         } else
 376                 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
 377 }
 378 
 379 /*
 380  * Prepare for sleeping on a wait queue.
 381  * By always appending tasks to the list we ensure FIFO behavior.
 382  * NB: An RPC task will only receive interrupt-driven events as long
 383  * as it's on a wait queue.
 384  */
 385 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
 386                 struct rpc_task *task,
 387                 unsigned char queue_priority)
 388 {
 389         dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
 390                         task->tk_pid, rpc_qname(q), jiffies);
 391 
 392         trace_rpc_task_sleep(task, q);
 393 
 394         __rpc_add_wait_queue(q, task, queue_priority);
 395 
 396 }
 397 
 398 static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
 399                 struct rpc_task *task, unsigned long timeout,
 400                 unsigned char queue_priority)
 401 {
 402         if (time_is_after_jiffies(timeout)) {
 403                 __rpc_sleep_on_priority(q, task, queue_priority);
 404                 __rpc_add_timer(q, task, timeout);
 405         } else
 406                 task->tk_status = -ETIMEDOUT;
 407 }
 408 
 409 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
 410 {
 411         if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
 412                 task->tk_callback = action;
 413 }
 414 
 415 static bool rpc_sleep_check_activated(struct rpc_task *task)
 416 {
 417         /* We shouldn't ever put an inactive task to sleep */
 418         if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
 419                 task->tk_status = -EIO;
 420                 rpc_put_task_async(task);
 421                 return false;
 422         }
 423         return true;
 424 }
 425 
 426 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
 427                                 rpc_action action, unsigned long timeout)
 428 {
 429         if (!rpc_sleep_check_activated(task))
 430                 return;
 431 
 432         rpc_set_tk_callback(task, action);
 433 
 434         /*
 435          * Protect the queue operations.
 436          */
 437         spin_lock(&q->lock);
 438         __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
 439         spin_unlock(&q->lock);
 440 }
 441 EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
 442 
 443 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
 444                                 rpc_action action)
 445 {
 446         if (!rpc_sleep_check_activated(task))
 447                 return;
 448 
 449         rpc_set_tk_callback(task, action);
 450 
 451         WARN_ON_ONCE(task->tk_timeout != 0);
 452         /*
 453          * Protect the queue operations.
 454          */
 455         spin_lock(&q->lock);
 456         __rpc_sleep_on_priority(q, task, task->tk_priority);
 457         spin_unlock(&q->lock);
 458 }
 459 EXPORT_SYMBOL_GPL(rpc_sleep_on);
 460 
 461 void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
 462                 struct rpc_task *task, unsigned long timeout, int priority)
 463 {
 464         if (!rpc_sleep_check_activated(task))
 465                 return;
 466 
 467         priority -= RPC_PRIORITY_LOW;
 468         /*
 469          * Protect the queue operations.
 470          */
 471         spin_lock(&q->lock);
 472         __rpc_sleep_on_priority_timeout(q, task, timeout, priority);
 473         spin_unlock(&q->lock);
 474 }
 475 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
 476 
 477 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
 478                 int priority)
 479 {
 480         if (!rpc_sleep_check_activated(task))
 481                 return;
 482 
 483         WARN_ON_ONCE(task->tk_timeout != 0);
 484         priority -= RPC_PRIORITY_LOW;
 485         /*
 486          * Protect the queue operations.
 487          */
 488         spin_lock(&q->lock);
 489         __rpc_sleep_on_priority(q, task, priority);
 490         spin_unlock(&q->lock);
 491 }
 492 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
 493 
 494 /**
 495  * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
 496  * @wq: workqueue on which to run task
 497  * @queue: wait queue
 498  * @task: task to be woken up
 499  *
 500  * Caller must hold queue->lock, and have cleared the task queued flag.
 501  */
 502 static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
 503                 struct rpc_wait_queue *queue,
 504                 struct rpc_task *task)
 505 {
 506         dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
 507                         task->tk_pid, jiffies);
 508 
 509         /* Has the task been executed yet? If not, we cannot wake it up! */
 510         if (!RPC_IS_ACTIVATED(task)) {
 511                 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
 512                 return;
 513         }
 514 
 515         trace_rpc_task_wakeup(task, queue);
 516 
 517         __rpc_remove_wait_queue(queue, task);
 518 
 519         rpc_make_runnable(wq, task);
 520 
 521         dprintk("RPC:       __rpc_wake_up_task done\n");
 522 }
 523 
 524 /*
 525  * Wake up a queued task while the queue lock is being held
 526  */
 527 static struct rpc_task *
 528 rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
 529                 struct rpc_wait_queue *queue, struct rpc_task *task,
 530                 bool (*action)(struct rpc_task *, void *), void *data)
 531 {
 532         if (RPC_IS_QUEUED(task)) {
 533                 smp_rmb();
 534                 if (task->tk_waitqueue == queue) {
 535                         if (action == NULL || action(task, data)) {
 536                                 __rpc_do_wake_up_task_on_wq(wq, queue, task);
 537                                 return task;
 538                         }
 539                 }
 540         }
 541         return NULL;
 542 }
 543 
 544 /*
 545  * Wake up a queued task while the queue lock is being held
 546  */
 547 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
 548                                           struct rpc_task *task)
 549 {
 550         rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
 551                                                    task, NULL, NULL);
 552 }
 553 
 554 /*
 555  * Wake up a task on a specific queue
 556  */
 557 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
 558 {
 559         if (!RPC_IS_QUEUED(task))
 560                 return;
 561         spin_lock(&queue->lock);
 562         rpc_wake_up_task_queue_locked(queue, task);
 563         spin_unlock(&queue->lock);
 564 }
 565 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
 566 
 567 static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
 568 {
 569         task->tk_status = *(int *)status;
 570         return true;
 571 }
 572 
 573 static void
 574 rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
 575                 struct rpc_task *task, int status)
 576 {
 577         rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
 578                         task, rpc_task_action_set_status, &status);
 579 }
 580 
 581 /**
 582  * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
 583  * @queue: pointer to rpc_wait_queue
 584  * @task: pointer to rpc_task
 585  * @status: integer error value
 586  *
 587  * If @task is queued on @queue, then it is woken up, and @task->tk_status is
 588  * set to the value of @status.
 589  */
 590 void
 591 rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
 592                 struct rpc_task *task, int status)
 593 {
 594         if (!RPC_IS_QUEUED(task))
 595                 return;
 596         spin_lock(&queue->lock);
 597         rpc_wake_up_task_queue_set_status_locked(queue, task, status);
 598         spin_unlock(&queue->lock);
 599 }
 600 
 601 /*
 602  * Wake up the next task on a priority queue.
 603  */
 604 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
 605 {
 606         struct list_head *q;
 607         struct rpc_task *task;
 608 
 609         /*
 610          * Service a batch of tasks from a single owner.
 611          */
 612         q = &queue->tasks[queue->priority];
 613         if (!list_empty(q) && --queue->nr) {
 614                 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
 615                 goto out;
 616         }
 617 
 618         /*
 619          * Service the next queue.
 620          */
 621         do {
 622                 if (q == &queue->tasks[0])
 623                         q = &queue->tasks[queue->maxpriority];
 624                 else
 625                         q = q - 1;
 626                 if (!list_empty(q)) {
 627                         task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
 628                         goto new_queue;
 629                 }
 630         } while (q != &queue->tasks[queue->priority]);
 631 
 632         rpc_reset_waitqueue_priority(queue);
 633         return NULL;
 634 
 635 new_queue:
 636         rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
 637 out:
 638         return task;
 639 }
 640 
 641 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
 642 {
 643         if (RPC_IS_PRIORITY(queue))
 644                 return __rpc_find_next_queued_priority(queue);
 645         if (!list_empty(&queue->tasks[0]))
 646                 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
 647         return NULL;
 648 }
 649 
 650 /*
 651  * Wake up the first task on the wait queue.
 652  */
 653 struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
 654                 struct rpc_wait_queue *queue,
 655                 bool (*func)(struct rpc_task *, void *), void *data)
 656 {
 657         struct rpc_task *task = NULL;
 658 
 659         dprintk("RPC:       wake_up_first(%p \"%s\")\n",
 660                         queue, rpc_qname(queue));
 661         spin_lock(&queue->lock);
 662         task = __rpc_find_next_queued(queue);
 663         if (task != NULL)
 664                 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
 665                                 task, func, data);
 666         spin_unlock(&queue->lock);
 667 
 668         return task;
 669 }
 670 
 671 /*
 672  * Wake up the first task on the wait queue.
 673  */
 674 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
 675                 bool (*func)(struct rpc_task *, void *), void *data)
 676 {
 677         return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
 678 }
 679 EXPORT_SYMBOL_GPL(rpc_wake_up_first);
 680 
 681 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
 682 {
 683         return true;
 684 }
 685 
 686 /*
 687  * Wake up the next task on the wait queue.
 688 */
 689 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
 690 {
 691         return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
 692 }
 693 EXPORT_SYMBOL_GPL(rpc_wake_up_next);
 694 
 695 /**
 696  * rpc_wake_up - wake up all rpc_tasks
 697  * @queue: rpc_wait_queue on which the tasks are sleeping
 698  *
 699  * Grabs queue->lock
 700  */
 701 void rpc_wake_up(struct rpc_wait_queue *queue)
 702 {
 703         struct list_head *head;
 704 
 705         spin_lock(&queue->lock);
 706         head = &queue->tasks[queue->maxpriority];
 707         for (;;) {
 708                 while (!list_empty(head)) {
 709                         struct rpc_task *task;
 710                         task = list_first_entry(head,
 711                                         struct rpc_task,
 712                                         u.tk_wait.list);
 713                         rpc_wake_up_task_queue_locked(queue, task);
 714                 }
 715                 if (head == &queue->tasks[0])
 716                         break;
 717                 head--;
 718         }
 719         spin_unlock(&queue->lock);
 720 }
 721 EXPORT_SYMBOL_GPL(rpc_wake_up);
 722 
 723 /**
 724  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
 725  * @queue: rpc_wait_queue on which the tasks are sleeping
 726  * @status: status value to set
 727  *
 728  * Grabs queue->lock
 729  */
 730 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
 731 {
 732         struct list_head *head;
 733 
 734         spin_lock(&queue->lock);
 735         head = &queue->tasks[queue->maxpriority];
 736         for (;;) {
 737                 while (!list_empty(head)) {
 738                         struct rpc_task *task;
 739                         task = list_first_entry(head,
 740                                         struct rpc_task,
 741                                         u.tk_wait.list);
 742                         task->tk_status = status;
 743                         rpc_wake_up_task_queue_locked(queue, task);
 744                 }
 745                 if (head == &queue->tasks[0])
 746                         break;
 747                 head--;
 748         }
 749         spin_unlock(&queue->lock);
 750 }
 751 EXPORT_SYMBOL_GPL(rpc_wake_up_status);
 752 
 753 static void __rpc_queue_timer_fn(struct work_struct *work)
 754 {
 755         struct rpc_wait_queue *queue = container_of(work,
 756                         struct rpc_wait_queue,
 757                         timer_list.dwork.work);
 758         struct rpc_task *task, *n;
 759         unsigned long expires, now, timeo;
 760 
 761         spin_lock(&queue->lock);
 762         expires = now = jiffies;
 763         list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
 764                 timeo = task->tk_timeout;
 765                 if (time_after_eq(now, timeo)) {
 766                         dprintk("RPC: %5u timeout\n", task->tk_pid);
 767                         task->tk_status = -ETIMEDOUT;
 768                         rpc_wake_up_task_queue_locked(queue, task);
 769                         continue;
 770                 }
 771                 if (expires == now || time_after(expires, timeo))
 772                         expires = timeo;
 773         }
 774         if (!list_empty(&queue->timer_list.list))
 775                 rpc_set_queue_timer(queue, expires);
 776         spin_unlock(&queue->lock);
 777 }
 778 
 779 static void __rpc_atrun(struct rpc_task *task)
 780 {
 781         if (task->tk_status == -ETIMEDOUT)
 782                 task->tk_status = 0;
 783 }
 784 
 785 /*
 786  * Run a task at a later time
 787  */
 788 void rpc_delay(struct rpc_task *task, unsigned long delay)
 789 {
 790         rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
 791 }
 792 EXPORT_SYMBOL_GPL(rpc_delay);
 793 
 794 /*
 795  * Helper to call task->tk_ops->rpc_call_prepare
 796  */
 797 void rpc_prepare_task(struct rpc_task *task)
 798 {
 799         task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
 800 }
 801 
 802 static void
 803 rpc_init_task_statistics(struct rpc_task *task)
 804 {
 805         /* Initialize retry counters */
 806         task->tk_garb_retry = 2;
 807         task->tk_cred_retry = 2;
 808         task->tk_rebind_retry = 2;
 809 
 810         /* starting timestamp */
 811         task->tk_start = ktime_get();
 812 }
 813 
 814 static void
 815 rpc_reset_task_statistics(struct rpc_task *task)
 816 {
 817         task->tk_timeouts = 0;
 818         task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
 819         rpc_init_task_statistics(task);
 820 }
 821 
 822 /*
 823  * Helper that calls task->tk_ops->rpc_call_done if it exists
 824  */
 825 void rpc_exit_task(struct rpc_task *task)
 826 {
 827         task->tk_action = NULL;
 828         if (task->tk_ops->rpc_count_stats)
 829                 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
 830         else if (task->tk_client)
 831                 rpc_count_iostats(task, task->tk_client->cl_metrics);
 832         if (task->tk_ops->rpc_call_done != NULL) {
 833                 task->tk_ops->rpc_call_done(task, task->tk_calldata);
 834                 if (task->tk_action != NULL) {
 835                         /* Always release the RPC slot and buffer memory */
 836                         xprt_release(task);
 837                         rpc_reset_task_statistics(task);
 838                 }
 839         }
 840 }
 841 
 842 void rpc_signal_task(struct rpc_task *task)
 843 {
 844         struct rpc_wait_queue *queue;
 845 
 846         if (!RPC_IS_ACTIVATED(task))
 847                 return;
 848         set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
 849         smp_mb__after_atomic();
 850         queue = READ_ONCE(task->tk_waitqueue);
 851         if (queue)
 852                 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
 853 }
 854 
 855 void rpc_exit(struct rpc_task *task, int status)
 856 {
 857         task->tk_status = status;
 858         task->tk_action = rpc_exit_task;
 859         rpc_wake_up_queued_task(task->tk_waitqueue, task);
 860 }
 861 EXPORT_SYMBOL_GPL(rpc_exit);
 862 
 863 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
 864 {
 865         if (ops->rpc_release != NULL)
 866                 ops->rpc_release(calldata);
 867 }
 868 
 869 /*
 870  * This is the RPC `scheduler' (or rather, the finite state machine).
 871  */
 872 static void __rpc_execute(struct rpc_task *task)
 873 {
 874         struct rpc_wait_queue *queue;
 875         int task_is_async = RPC_IS_ASYNC(task);
 876         int status = 0;
 877 
 878         dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
 879                         task->tk_pid, task->tk_flags);
 880 
 881         WARN_ON_ONCE(RPC_IS_QUEUED(task));
 882         if (RPC_IS_QUEUED(task))
 883                 return;
 884 
 885         for (;;) {
 886                 void (*do_action)(struct rpc_task *);
 887 
 888                 /*
 889                  * Perform the next FSM step or a pending callback.
 890                  *
 891                  * tk_action may be NULL if the task has been killed.
 892                  * In particular, note that rpc_killall_tasks may
 893                  * do this at any time, so beware when dereferencing.
 894                  */
 895                 do_action = task->tk_action;
 896                 if (task->tk_callback) {
 897                         do_action = task->tk_callback;
 898                         task->tk_callback = NULL;
 899                 }
 900                 if (!do_action)
 901                         break;
 902                 trace_rpc_task_run_action(task, do_action);
 903                 do_action(task);
 904 
 905                 /*
 906                  * Lockless check for whether task is sleeping or not.
 907                  */
 908                 if (!RPC_IS_QUEUED(task))
 909                         continue;
 910 
 911                 /*
 912                  * Signalled tasks should exit rather than sleep.
 913                  */
 914                 if (RPC_SIGNALLED(task)) {
 915                         task->tk_rpc_status = -ERESTARTSYS;
 916                         rpc_exit(task, -ERESTARTSYS);
 917                 }
 918 
 919                 /*
 920                  * The queue->lock protects against races with
 921                  * rpc_make_runnable().
 922                  *
 923                  * Note that once we clear RPC_TASK_RUNNING on an asynchronous
 924                  * rpc_task, rpc_make_runnable() can assign it to a
 925                  * different workqueue. We therefore cannot assume that the
 926                  * rpc_task pointer may still be dereferenced.
 927                  */
 928                 queue = task->tk_waitqueue;
 929                 spin_lock(&queue->lock);
 930                 if (!RPC_IS_QUEUED(task)) {
 931                         spin_unlock(&queue->lock);
 932                         continue;
 933                 }
 934                 rpc_clear_running(task);
 935                 spin_unlock(&queue->lock);
 936                 if (task_is_async)
 937                         return;
 938 
 939                 /* sync task: sleep here */
 940                 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
 941                 status = out_of_line_wait_on_bit(&task->tk_runstate,
 942                                 RPC_TASK_QUEUED, rpc_wait_bit_killable,
 943                                 TASK_KILLABLE);
 944                 if (status < 0) {
 945                         /*
 946                          * When a sync task receives a signal, it exits with
 947                          * -ERESTARTSYS. In order to catch any callbacks that
 948                          * clean up after sleeping on some queue, we don't
 949                          * break the loop here, but go around once more.
 950                          */
 951                         dprintk("RPC: %5u got signal\n", task->tk_pid);
 952                         set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
 953                         task->tk_rpc_status = -ERESTARTSYS;
 954                         rpc_exit(task, -ERESTARTSYS);
 955                 }
 956                 dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
 957         }
 958 
 959         dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
 960                         task->tk_status);
 961         /* Release all resources associated with the task */
 962         rpc_release_task(task);
 963 }
 964 
 965 /*
 966  * User-visible entry point to the scheduler.
 967  *
 968  * This may be called recursively if e.g. an async NFS task updates
 969  * the attributes and finds that dirty pages must be flushed.
 970  * NOTE: Upon exit of this function the task is guaranteed to be
 971  *       released. In particular note that tk_release() will have
 972  *       been called, so your task memory may have been freed.
 973  */
 974 void rpc_execute(struct rpc_task *task)
 975 {
 976         bool is_async = RPC_IS_ASYNC(task);
 977 
 978         rpc_set_active(task);
 979         rpc_make_runnable(rpciod_workqueue, task);
 980         if (!is_async)
 981                 __rpc_execute(task);
 982 }
 983 
 984 static void rpc_async_schedule(struct work_struct *work)
 985 {
 986         unsigned int pflags = memalloc_nofs_save();
 987 
 988         __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
 989         memalloc_nofs_restore(pflags);
 990 }
 991 
 992 /**
 993  * rpc_malloc - allocate RPC buffer resources
 994  * @task: RPC task
 995  *
 996  * A single memory region is allocated, which is split between the
 997  * RPC call and RPC reply that this task is being used for. When
 998  * this RPC is retired, the memory is released by calling rpc_free.
 999  *
1000  * To prevent rpciod from hanging, this allocator never sleeps,
1001  * returning -ENOMEM and suppressing warning if the request cannot
1002  * be serviced immediately. The caller can arrange to sleep in a
1003  * way that is safe for rpciod.
1004  *
1005  * Most requests are 'small' (under 2KiB) and can be serviced from a
1006  * mempool, ensuring that NFS reads and writes can always proceed,
1007  * and that there is good locality of reference for these buffers.
1008  */
1009 int rpc_malloc(struct rpc_task *task)
1010 {
1011         struct rpc_rqst *rqst = task->tk_rqstp;
1012         size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
1013         struct rpc_buffer *buf;
1014         gfp_t gfp = GFP_NOFS;
1015 
1016         if (RPC_IS_SWAPPER(task))
1017                 gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
1018 
1019         size += sizeof(struct rpc_buffer);
1020         if (size <= RPC_BUFFER_MAXSIZE)
1021                 buf = mempool_alloc(rpc_buffer_mempool, gfp);
1022         else
1023                 buf = kmalloc(size, gfp);
1024 
1025         if (!buf)
1026                 return -ENOMEM;
1027 
1028         buf->len = size;
1029         dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
1030                         task->tk_pid, size, buf);
1031         rqst->rq_buffer = buf->data;
1032         rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
1033         return 0;
1034 }
1035 EXPORT_SYMBOL_GPL(rpc_malloc);
1036 
1037 /**
1038  * rpc_free - free RPC buffer resources allocated via rpc_malloc
1039  * @task: RPC task
1040  *
1041  */
1042 void rpc_free(struct rpc_task *task)
1043 {
1044         void *buffer = task->tk_rqstp->rq_buffer;
1045         size_t size;
1046         struct rpc_buffer *buf;
1047 
1048         buf = container_of(buffer, struct rpc_buffer, data);
1049         size = buf->len;
1050 
1051         dprintk("RPC:       freeing buffer of size %zu at %p\n",
1052                         size, buf);
1053 
1054         if (size <= RPC_BUFFER_MAXSIZE)
1055                 mempool_free(buf, rpc_buffer_mempool);
1056         else
1057                 kfree(buf);
1058 }
1059 EXPORT_SYMBOL_GPL(rpc_free);
1060 
1061 /*
1062  * Creation and deletion of RPC task structures
1063  */
1064 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1065 {
1066         memset(task, 0, sizeof(*task));
1067         atomic_set(&task->tk_count, 1);
1068         task->tk_flags  = task_setup_data->flags;
1069         task->tk_ops = task_setup_data->callback_ops;
1070         task->tk_calldata = task_setup_data->callback_data;
1071         INIT_LIST_HEAD(&task->tk_task);
1072 
1073         task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
1074         task->tk_owner = current->tgid;
1075 
1076         /* Initialize workqueue for async tasks */
1077         task->tk_workqueue = task_setup_data->workqueue;
1078 
1079         task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
1080                         xprt_get(task_setup_data->rpc_xprt));
1081 
1082         task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
1083 
1084         if (task->tk_ops->rpc_call_prepare != NULL)
1085                 task->tk_action = rpc_prepare_task;
1086 
1087         rpc_init_task_statistics(task);
1088 
1089         dprintk("RPC:       new task initialized, procpid %u\n",
1090                                 task_pid_nr(current));
1091 }
1092 
1093 static struct rpc_task *
1094 rpc_alloc_task(void)
1095 {
1096         return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
1097 }
1098 
1099 /*
1100  * Create a new task for the specified client.
1101  */
1102 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
1103 {
1104         struct rpc_task *task = setup_data->task;
1105         unsigned short flags = 0;
1106 
1107         if (task == NULL) {
1108                 task = rpc_alloc_task();
1109                 flags = RPC_TASK_DYNAMIC;
1110         }
1111 
1112         rpc_init_task(task, setup_data);
1113         task->tk_flags |= flags;
1114         dprintk("RPC:       allocated task %p\n", task);
1115         return task;
1116 }
1117 
1118 /*
1119  * rpc_free_task - release rpc task and perform cleanups
1120  *
1121  * Note that we free up the rpc_task _after_ rpc_release_calldata()
1122  * in order to work around a workqueue dependency issue.
1123  *
1124  * Tejun Heo states:
1125  * "Workqueue currently considers two work items to be the same if they're
1126  * on the same address and won't execute them concurrently - ie. it
1127  * makes a work item which is queued again while being executed wait
1128  * for the previous execution to complete.
1129  *
1130  * If a work function frees the work item, and then waits for an event
1131  * which should be performed by another work item and *that* work item
1132  * recycles the freed work item, it can create a false dependency loop.
1133  * There really is no reliable way to detect this short of verifying
1134  * every memory free."
1135  *
1136  */
1137 static void rpc_free_task(struct rpc_task *task)
1138 {
1139         unsigned short tk_flags = task->tk_flags;
1140 
1141         put_rpccred(task->tk_op_cred);
1142         rpc_release_calldata(task->tk_ops, task->tk_calldata);
1143 
1144         if (tk_flags & RPC_TASK_DYNAMIC) {
1145                 dprintk("RPC: %5u freeing task\n", task->tk_pid);
1146                 mempool_free(task, rpc_task_mempool);
1147         }
1148 }
1149 
1150 static void rpc_async_release(struct work_struct *work)
1151 {
1152         unsigned int pflags = memalloc_nofs_save();
1153 
1154         rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
1155         memalloc_nofs_restore(pflags);
1156 }
1157 
1158 static void rpc_release_resources_task(struct rpc_task *task)
1159 {
1160         xprt_release(task);
1161         if (task->tk_msg.rpc_cred) {
1162                 put_cred(task->tk_msg.rpc_cred);
1163                 task->tk_msg.rpc_cred = NULL;
1164         }
1165         rpc_task_release_client(task);
1166 }
1167 
1168 static void rpc_final_put_task(struct rpc_task *task,
1169                 struct workqueue_struct *q)
1170 {
1171         if (q != NULL) {
1172                 INIT_WORK(&task->u.tk_work, rpc_async_release);
1173                 queue_work(q, &task->u.tk_work);
1174         } else
1175                 rpc_free_task(task);
1176 }
1177 
1178 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1179 {
1180         if (atomic_dec_and_test(&task->tk_count)) {
1181                 rpc_release_resources_task(task);
1182                 rpc_final_put_task(task, q);
1183         }
1184 }
1185 
1186 void rpc_put_task(struct rpc_task *task)
1187 {
1188         rpc_do_put_task(task, NULL);
1189 }
1190 EXPORT_SYMBOL_GPL(rpc_put_task);
1191 
1192 void rpc_put_task_async(struct rpc_task *task)
1193 {
1194         rpc_do_put_task(task, task->tk_workqueue);
1195 }
1196 EXPORT_SYMBOL_GPL(rpc_put_task_async);
1197 
1198 static void rpc_release_task(struct rpc_task *task)
1199 {
1200         dprintk("RPC: %5u release task\n", task->tk_pid);
1201 
1202         WARN_ON_ONCE(RPC_IS_QUEUED(task));
1203 
1204         rpc_release_resources_task(task);
1205 
1206         /*
1207          * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1208          * so it should be safe to use task->tk_count as a test for whether
1209          * or not any other processes still hold references to our rpc_task.
1210          */
1211         if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1212                 /* Wake up anyone who may be waiting for task completion */
1213                 if (!rpc_complete_task(task))
1214                         return;
1215         } else {
1216                 if (!atomic_dec_and_test(&task->tk_count))
1217                         return;
1218         }
1219         rpc_final_put_task(task, task->tk_workqueue);
1220 }
1221 
1222 int rpciod_up(void)
1223 {
1224         return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1225 }
1226 
1227 void rpciod_down(void)
1228 {
1229         module_put(THIS_MODULE);
1230 }
1231 
1232 /*
1233  * Start up the rpciod workqueue.
1234  */
1235 static int rpciod_start(void)
1236 {
1237         struct workqueue_struct *wq;
1238 
1239         /*
1240          * Create the rpciod thread and wait for it to start.
1241          */
1242         dprintk("RPC:       creating workqueue rpciod\n");
1243         wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
1244         if (!wq)
1245                 goto out_failed;
1246         rpciod_workqueue = wq;
1247         /* Note: highpri because network receive is latency sensitive */
1248         wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0);
1249         if (!wq)
1250                 goto free_rpciod;
1251         xprtiod_workqueue = wq;
1252         return 1;
1253 free_rpciod:
1254         wq = rpciod_workqueue;
1255         rpciod_workqueue = NULL;
1256         destroy_workqueue(wq);
1257 out_failed:
1258         return 0;
1259 }
1260 
1261 static void rpciod_stop(void)
1262 {
1263         struct workqueue_struct *wq = NULL;
1264 
1265         if (rpciod_workqueue == NULL)
1266                 return;
1267         dprintk("RPC:       destroying workqueue rpciod\n");
1268 
1269         wq = rpciod_workqueue;
1270         rpciod_workqueue = NULL;
1271         destroy_workqueue(wq);
1272         wq = xprtiod_workqueue;
1273         xprtiod_workqueue = NULL;
1274         destroy_workqueue(wq);
1275 }
1276 
1277 void
1278 rpc_destroy_mempool(void)
1279 {
1280         rpciod_stop();
1281         mempool_destroy(rpc_buffer_mempool);
1282         mempool_destroy(rpc_task_mempool);
1283         kmem_cache_destroy(rpc_task_slabp);
1284         kmem_cache_destroy(rpc_buffer_slabp);
1285         rpc_destroy_wait_queue(&delay_queue);
1286 }
1287 
1288 int
1289 rpc_init_mempool(void)
1290 {
1291         /*
1292          * The following is not strictly a mempool initialisation,
1293          * but there is no harm in doing it here
1294          */
1295         rpc_init_wait_queue(&delay_queue, "delayq");
1296         if (!rpciod_start())
1297                 goto err_nomem;
1298 
1299         rpc_task_slabp = kmem_cache_create("rpc_tasks",
1300                                              sizeof(struct rpc_task),
1301                                              0, SLAB_HWCACHE_ALIGN,
1302                                              NULL);
1303         if (!rpc_task_slabp)
1304                 goto err_nomem;
1305         rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1306                                              RPC_BUFFER_MAXSIZE,
1307                                              0, SLAB_HWCACHE_ALIGN,
1308                                              NULL);
1309         if (!rpc_buffer_slabp)
1310                 goto err_nomem;
1311         rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1312                                                     rpc_task_slabp);
1313         if (!rpc_task_mempool)
1314                 goto err_nomem;
1315         rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1316                                                       rpc_buffer_slabp);
1317         if (!rpc_buffer_mempool)
1318                 goto err_nomem;
1319         return 0;
1320 err_nomem:
1321         rpc_destroy_mempool();
1322         return -ENOMEM;
1323 }

/* [<][>][^][v][top][bottom][index][help] */