1/*
2 * linux/net/sunrpc/sched.c
3 *
4 * Scheduling for synchronous and asynchronous RPC requests.
5 *
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7 *
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10 */
11
12#include <linux/module.h>
13
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/mempool.h>
18#include <linux/smp.h>
19#include <linux/spinlock.h>
20#include <linux/mutex.h>
21#include <linux/freezer.h>
22
23#include <linux/sunrpc/clnt.h>
24
25#include "sunrpc.h"
26
27#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
28#define RPCDBG_FACILITY		RPCDBG_SCHED
29#endif
30
31#define CREATE_TRACE_POINTS
32#include <trace/events/sunrpc.h>
33
34/*
35 * RPC slabs and memory pools
36 */
37#define RPC_BUFFER_MAXSIZE	(2048)
38#define RPC_BUFFER_POOLSIZE	(8)
39#define RPC_TASK_POOLSIZE	(8)
40static struct kmem_cache	*rpc_task_slabp __read_mostly;
41static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
42static mempool_t	*rpc_task_mempool __read_mostly;
43static mempool_t	*rpc_buffer_mempool __read_mostly;
44
45static void			rpc_async_schedule(struct work_struct *);
46static void			 rpc_release_task(struct rpc_task *task);
47static void __rpc_queue_timer_fn(unsigned long ptr);
48
49/*
50 * RPC tasks sit here while waiting for conditions to improve.
51 */
52static struct rpc_wait_queue delay_queue;
53
54/*
55 * rpciod-related stuff
56 */
57struct workqueue_struct *rpciod_workqueue;
58
59/*
60 * Disable the timer for a given RPC task. Should be called with
61 * queue->lock and bh_disabled in order to avoid races within
62 * rpc_run_timer().
63 */
64static void
65__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
66{
67	if (task->tk_timeout == 0)
68		return;
69	dprintk("RPC: %5u disabling timer\n", task->tk_pid);
70	task->tk_timeout = 0;
71	list_del(&task->u.tk_wait.timer_list);
72	if (list_empty(&queue->timer_list.list))
73		del_timer(&queue->timer_list.timer);
74}
75
76static void
77rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
78{
79	queue->timer_list.expires = expires;
80	mod_timer(&queue->timer_list.timer, expires);
81}
82
83/*
84 * Set up a timer for the current task.
85 */
86static void
87__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
88{
89	if (!task->tk_timeout)
90		return;
91
92	dprintk("RPC: %5u setting alarm for %u ms\n",
93		task->tk_pid, jiffies_to_msecs(task->tk_timeout));
94
95	task->u.tk_wait.expires = jiffies + task->tk_timeout;
96	if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
97		rpc_set_queue_timer(queue, task->u.tk_wait.expires);
98	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
99}
100
101static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
102{
103	struct list_head *q = &queue->tasks[queue->priority];
104	struct rpc_task *task;
105
106	if (!list_empty(q)) {
107		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
108		if (task->tk_owner == queue->owner)
109			list_move_tail(&task->u.tk_wait.list, q);
110	}
111}
112
113static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
114{
115	if (queue->priority != priority) {
116		/* Fairness: rotate the list when changing priority */
117		rpc_rotate_queue_owner(queue);
118		queue->priority = priority;
119	}
120}
121
122static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
123{
124	queue->owner = pid;
125	queue->nr = RPC_BATCH_COUNT;
126}
127
128static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
129{
130	rpc_set_waitqueue_priority(queue, queue->maxpriority);
131	rpc_set_waitqueue_owner(queue, 0);
132}
133
134/*
135 * Add new request to a priority queue.
136 */
137static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
138		struct rpc_task *task,
139		unsigned char queue_priority)
140{
141	struct list_head *q;
142	struct rpc_task *t;
143
144	INIT_LIST_HEAD(&task->u.tk_wait.links);
145	if (unlikely(queue_priority > queue->maxpriority))
146		queue_priority = queue->maxpriority;
147	if (queue_priority > queue->priority)
148		rpc_set_waitqueue_priority(queue, queue_priority);
149	q = &queue->tasks[queue_priority];
150	list_for_each_entry(t, q, u.tk_wait.list) {
151		if (t->tk_owner == task->tk_owner) {
152			list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
153			return;
154		}
155	}
156	list_add_tail(&task->u.tk_wait.list, q);
157}
158
159/*
160 * Add new request to wait queue.
161 *
162 * Swapper tasks always get inserted at the head of the queue.
163 * This should avoid many nasty memory deadlocks and hopefully
164 * improve overall performance.
165 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
166 */
167static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
168		struct rpc_task *task,
169		unsigned char queue_priority)
170{
171	WARN_ON_ONCE(RPC_IS_QUEUED(task));
172	if (RPC_IS_QUEUED(task))
173		return;
174
175	if (RPC_IS_PRIORITY(queue))
176		__rpc_add_wait_queue_priority(queue, task, queue_priority);
177	else if (RPC_IS_SWAPPER(task))
178		list_add(&task->u.tk_wait.list, &queue->tasks[0]);
179	else
180		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
181	task->tk_waitqueue = queue;
182	queue->qlen++;
183	/* barrier matches the read in rpc_wake_up_task_queue_locked() */
184	smp_wmb();
185	rpc_set_queued(task);
186
187	dprintk("RPC: %5u added to queue %p \"%s\"\n",
188			task->tk_pid, queue, rpc_qname(queue));
189}
190
191/*
192 * Remove request from a priority queue.
193 */
194static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
195{
196	struct rpc_task *t;
197
198	if (!list_empty(&task->u.tk_wait.links)) {
199		t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
200		list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
201		list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
202	}
203}
204
205/*
206 * Remove request from queue.
207 * Note: must be called with spin lock held.
208 */
209static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
210{
211	__rpc_disable_timer(queue, task);
212	if (RPC_IS_PRIORITY(queue))
213		__rpc_remove_wait_queue_priority(task);
214	list_del(&task->u.tk_wait.list);
215	queue->qlen--;
216	dprintk("RPC: %5u removed from queue %p \"%s\"\n",
217			task->tk_pid, queue, rpc_qname(queue));
218}
219
220static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
221{
222	int i;
223
224	spin_lock_init(&queue->lock);
225	for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
226		INIT_LIST_HEAD(&queue->tasks[i]);
227	queue->maxpriority = nr_queues - 1;
228	rpc_reset_waitqueue_priority(queue);
229	queue->qlen = 0;
230	setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
231	INIT_LIST_HEAD(&queue->timer_list.list);
232	rpc_assign_waitqueue_name(queue, qname);
233}
234
235void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
236{
237	__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
238}
239EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
240
241void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
242{
243	__rpc_init_priority_wait_queue(queue, qname, 1);
244}
245EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
246
247void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
248{
249	del_timer_sync(&queue->timer_list.timer);
250}
251EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
252
253static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
254{
255	freezable_schedule_unsafe();
256	if (signal_pending_state(mode, current))
257		return -ERESTARTSYS;
258	return 0;
259}
260
261#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
262static void rpc_task_set_debuginfo(struct rpc_task *task)
263{
264	static atomic_t rpc_pid;
265
266	task->tk_pid = atomic_inc_return(&rpc_pid);
267}
268#else
269static inline void rpc_task_set_debuginfo(struct rpc_task *task)
270{
271}
272#endif
273
274static void rpc_set_active(struct rpc_task *task)
275{
276	trace_rpc_task_begin(task->tk_client, task, NULL);
277
278	rpc_task_set_debuginfo(task);
279	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
280}
281
282/*
283 * Mark an RPC call as having completed by clearing the 'active' bit
284 * and then waking up all tasks that were sleeping.
285 */
286static int rpc_complete_task(struct rpc_task *task)
287{
288	void *m = &task->tk_runstate;
289	wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
290	struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
291	unsigned long flags;
292	int ret;
293
294	trace_rpc_task_complete(task->tk_client, task, NULL);
295
296	spin_lock_irqsave(&wq->lock, flags);
297	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
298	ret = atomic_dec_and_test(&task->tk_count);
299	if (waitqueue_active(wq))
300		__wake_up_locked_key(wq, TASK_NORMAL, &k);
301	spin_unlock_irqrestore(&wq->lock, flags);
302	return ret;
303}
304
305/*
306 * Allow callers to wait for completion of an RPC call
307 *
308 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
309 * to enforce taking of the wq->lock and hence avoid races with
310 * rpc_complete_task().
311 */
312int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
313{
314	if (action == NULL)
315		action = rpc_wait_bit_killable;
316	return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
317			action, TASK_KILLABLE);
318}
319EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
320
321/*
322 * Make an RPC task runnable.
323 *
324 * Note: If the task is ASYNC, and is being made runnable after sitting on an
325 * rpc_wait_queue, this must be called with the queue spinlock held to protect
326 * the wait queue operation.
327 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
328 * which is needed to ensure that __rpc_execute() doesn't loop (due to the
329 * lockless RPC_IS_QUEUED() test) before we've had a chance to test
330 * the RPC_TASK_RUNNING flag.
331 */
332static void rpc_make_runnable(struct rpc_task *task)
333{
334	bool need_wakeup = !rpc_test_and_set_running(task);
335
336	rpc_clear_queued(task);
337	if (!need_wakeup)
338		return;
339	if (RPC_IS_ASYNC(task)) {
340		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
341		queue_work(rpciod_workqueue, &task->u.tk_work);
342	} else
343		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
344}
345
346/*
347 * Prepare for sleeping on a wait queue.
348 * By always appending tasks to the list we ensure FIFO behavior.
349 * NB: An RPC task will only receive interrupt-driven events as long
350 * as it's on a wait queue.
351 */
352static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
353		struct rpc_task *task,
354		rpc_action action,
355		unsigned char queue_priority)
356{
357	dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
358			task->tk_pid, rpc_qname(q), jiffies);
359
360	trace_rpc_task_sleep(task->tk_client, task, q);
361
362	__rpc_add_wait_queue(q, task, queue_priority);
363
364	WARN_ON_ONCE(task->tk_callback != NULL);
365	task->tk_callback = action;
366	__rpc_add_timer(q, task);
367}
368
369void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
370				rpc_action action)
371{
372	/* We shouldn't ever put an inactive task to sleep */
373	WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
374	if (!RPC_IS_ACTIVATED(task)) {
375		task->tk_status = -EIO;
376		rpc_put_task_async(task);
377		return;
378	}
379
380	/*
381	 * Protect the queue operations.
382	 */
383	spin_lock_bh(&q->lock);
384	__rpc_sleep_on_priority(q, task, action, task->tk_priority);
385	spin_unlock_bh(&q->lock);
386}
387EXPORT_SYMBOL_GPL(rpc_sleep_on);
388
389void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
390		rpc_action action, int priority)
391{
392	/* We shouldn't ever put an inactive task to sleep */
393	WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
394	if (!RPC_IS_ACTIVATED(task)) {
395		task->tk_status = -EIO;
396		rpc_put_task_async(task);
397		return;
398	}
399
400	/*
401	 * Protect the queue operations.
402	 */
403	spin_lock_bh(&q->lock);
404	__rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
405	spin_unlock_bh(&q->lock);
406}
407EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
408
409/**
410 * __rpc_do_wake_up_task - wake up a single rpc_task
411 * @queue: wait queue
412 * @task: task to be woken up
413 *
414 * Caller must hold queue->lock, and have cleared the task queued flag.
415 */
416static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
417{
418	dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
419			task->tk_pid, jiffies);
420
421	/* Has the task been executed yet? If not, we cannot wake it up! */
422	if (!RPC_IS_ACTIVATED(task)) {
423		printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
424		return;
425	}
426
427	trace_rpc_task_wakeup(task->tk_client, task, queue);
428
429	__rpc_remove_wait_queue(queue, task);
430
431	rpc_make_runnable(task);
432
433	dprintk("RPC:       __rpc_wake_up_task done\n");
434}
435
436/*
437 * Wake up a queued task while the queue lock is being held
438 */
439static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
440{
441	if (RPC_IS_QUEUED(task)) {
442		smp_rmb();
443		if (task->tk_waitqueue == queue)
444			__rpc_do_wake_up_task(queue, task);
445	}
446}
447
448/*
449 * Wake up a task on a specific queue
450 */
451void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
452{
453	spin_lock_bh(&queue->lock);
454	rpc_wake_up_task_queue_locked(queue, task);
455	spin_unlock_bh(&queue->lock);
456}
457EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
458
459/*
460 * Wake up the next task on a priority queue.
461 */
462static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
463{
464	struct list_head *q;
465	struct rpc_task *task;
466
467	/*
468	 * Service a batch of tasks from a single owner.
469	 */
470	q = &queue->tasks[queue->priority];
471	if (!list_empty(q)) {
472		task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
473		if (queue->owner == task->tk_owner) {
474			if (--queue->nr)
475				goto out;
476			list_move_tail(&task->u.tk_wait.list, q);
477		}
478		/*
479		 * Check if we need to switch queues.
480		 */
481		goto new_owner;
482	}
483
484	/*
485	 * Service the next queue.
486	 */
487	do {
488		if (q == &queue->tasks[0])
489			q = &queue->tasks[queue->maxpriority];
490		else
491			q = q - 1;
492		if (!list_empty(q)) {
493			task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
494			goto new_queue;
495		}
496	} while (q != &queue->tasks[queue->priority]);
497
498	rpc_reset_waitqueue_priority(queue);
499	return NULL;
500
501new_queue:
502	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
503new_owner:
504	rpc_set_waitqueue_owner(queue, task->tk_owner);
505out:
506	return task;
507}
508
509static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
510{
511	if (RPC_IS_PRIORITY(queue))
512		return __rpc_find_next_queued_priority(queue);
513	if (!list_empty(&queue->tasks[0]))
514		return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
515	return NULL;
516}
517
518/*
519 * Wake up the first task on the wait queue.
520 */
521struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
522		bool (*func)(struct rpc_task *, void *), void *data)
523{
524	struct rpc_task	*task = NULL;
525
526	dprintk("RPC:       wake_up_first(%p \"%s\")\n",
527			queue, rpc_qname(queue));
528	spin_lock_bh(&queue->lock);
529	task = __rpc_find_next_queued(queue);
530	if (task != NULL) {
531		if (func(task, data))
532			rpc_wake_up_task_queue_locked(queue, task);
533		else
534			task = NULL;
535	}
536	spin_unlock_bh(&queue->lock);
537
538	return task;
539}
540EXPORT_SYMBOL_GPL(rpc_wake_up_first);
541
542static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
543{
544	return true;
545}
546
547/*
548 * Wake up the next task on the wait queue.
549*/
550struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
551{
552	return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
553}
554EXPORT_SYMBOL_GPL(rpc_wake_up_next);
555
556/**
557 * rpc_wake_up - wake up all rpc_tasks
558 * @queue: rpc_wait_queue on which the tasks are sleeping
559 *
560 * Grabs queue->lock
561 */
562void rpc_wake_up(struct rpc_wait_queue *queue)
563{
564	struct list_head *head;
565
566	spin_lock_bh(&queue->lock);
567	head = &queue->tasks[queue->maxpriority];
568	for (;;) {
569		while (!list_empty(head)) {
570			struct rpc_task *task;
571			task = list_first_entry(head,
572					struct rpc_task,
573					u.tk_wait.list);
574			rpc_wake_up_task_queue_locked(queue, task);
575		}
576		if (head == &queue->tasks[0])
577			break;
578		head--;
579	}
580	spin_unlock_bh(&queue->lock);
581}
582EXPORT_SYMBOL_GPL(rpc_wake_up);
583
584/**
585 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
586 * @queue: rpc_wait_queue on which the tasks are sleeping
587 * @status: status value to set
588 *
589 * Grabs queue->lock
590 */
591void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
592{
593	struct list_head *head;
594
595	spin_lock_bh(&queue->lock);
596	head = &queue->tasks[queue->maxpriority];
597	for (;;) {
598		while (!list_empty(head)) {
599			struct rpc_task *task;
600			task = list_first_entry(head,
601					struct rpc_task,
602					u.tk_wait.list);
603			task->tk_status = status;
604			rpc_wake_up_task_queue_locked(queue, task);
605		}
606		if (head == &queue->tasks[0])
607			break;
608		head--;
609	}
610	spin_unlock_bh(&queue->lock);
611}
612EXPORT_SYMBOL_GPL(rpc_wake_up_status);
613
614static void __rpc_queue_timer_fn(unsigned long ptr)
615{
616	struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
617	struct rpc_task *task, *n;
618	unsigned long expires, now, timeo;
619
620	spin_lock(&queue->lock);
621	expires = now = jiffies;
622	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
623		timeo = task->u.tk_wait.expires;
624		if (time_after_eq(now, timeo)) {
625			dprintk("RPC: %5u timeout\n", task->tk_pid);
626			task->tk_status = -ETIMEDOUT;
627			rpc_wake_up_task_queue_locked(queue, task);
628			continue;
629		}
630		if (expires == now || time_after(expires, timeo))
631			expires = timeo;
632	}
633	if (!list_empty(&queue->timer_list.list))
634		rpc_set_queue_timer(queue, expires);
635	spin_unlock(&queue->lock);
636}
637
638static void __rpc_atrun(struct rpc_task *task)
639{
640	if (task->tk_status == -ETIMEDOUT)
641		task->tk_status = 0;
642}
643
644/*
645 * Run a task at a later time
646 */
647void rpc_delay(struct rpc_task *task, unsigned long delay)
648{
649	task->tk_timeout = delay;
650	rpc_sleep_on(&delay_queue, task, __rpc_atrun);
651}
652EXPORT_SYMBOL_GPL(rpc_delay);
653
654/*
655 * Helper to call task->tk_ops->rpc_call_prepare
656 */
657void rpc_prepare_task(struct rpc_task *task)
658{
659	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
660}
661
662static void
663rpc_init_task_statistics(struct rpc_task *task)
664{
665	/* Initialize retry counters */
666	task->tk_garb_retry = 2;
667	task->tk_cred_retry = 2;
668	task->tk_rebind_retry = 2;
669
670	/* starting timestamp */
671	task->tk_start = ktime_get();
672}
673
674static void
675rpc_reset_task_statistics(struct rpc_task *task)
676{
677	task->tk_timeouts = 0;
678	task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
679
680	rpc_init_task_statistics(task);
681}
682
683/*
684 * Helper that calls task->tk_ops->rpc_call_done if it exists
685 */
686void rpc_exit_task(struct rpc_task *task)
687{
688	task->tk_action = NULL;
689	if (task->tk_ops->rpc_call_done != NULL) {
690		task->tk_ops->rpc_call_done(task, task->tk_calldata);
691		if (task->tk_action != NULL) {
692			WARN_ON(RPC_ASSASSINATED(task));
693			/* Always release the RPC slot and buffer memory */
694			xprt_release(task);
695			rpc_reset_task_statistics(task);
696		}
697	}
698}
699
700void rpc_exit(struct rpc_task *task, int status)
701{
702	task->tk_status = status;
703	task->tk_action = rpc_exit_task;
704	if (RPC_IS_QUEUED(task))
705		rpc_wake_up_queued_task(task->tk_waitqueue, task);
706}
707EXPORT_SYMBOL_GPL(rpc_exit);
708
709void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
710{
711	if (ops->rpc_release != NULL)
712		ops->rpc_release(calldata);
713}
714
715/*
716 * This is the RPC `scheduler' (or rather, the finite state machine).
717 */
718static void __rpc_execute(struct rpc_task *task)
719{
720	struct rpc_wait_queue *queue;
721	int task_is_async = RPC_IS_ASYNC(task);
722	int status = 0;
723
724	dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
725			task->tk_pid, task->tk_flags);
726
727	WARN_ON_ONCE(RPC_IS_QUEUED(task));
728	if (RPC_IS_QUEUED(task))
729		return;
730
731	for (;;) {
732		void (*do_action)(struct rpc_task *);
733
734		/*
735		 * Execute any pending callback first.
736		 */
737		do_action = task->tk_callback;
738		task->tk_callback = NULL;
739		if (do_action == NULL) {
740			/*
741			 * Perform the next FSM step.
742			 * tk_action may be NULL if the task has been killed.
743			 * In particular, note that rpc_killall_tasks may
744			 * do this at any time, so beware when dereferencing.
745			 */
746			do_action = task->tk_action;
747			if (do_action == NULL)
748				break;
749		}
750		trace_rpc_task_run_action(task->tk_client, task, task->tk_action);
751		do_action(task);
752
753		/*
754		 * Lockless check for whether task is sleeping or not.
755		 */
756		if (!RPC_IS_QUEUED(task))
757			continue;
758		/*
759		 * The queue->lock protects against races with
760		 * rpc_make_runnable().
761		 *
762		 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
763		 * rpc_task, rpc_make_runnable() can assign it to a
764		 * different workqueue. We therefore cannot assume that the
765		 * rpc_task pointer may still be dereferenced.
766		 */
767		queue = task->tk_waitqueue;
768		spin_lock_bh(&queue->lock);
769		if (!RPC_IS_QUEUED(task)) {
770			spin_unlock_bh(&queue->lock);
771			continue;
772		}
773		rpc_clear_running(task);
774		spin_unlock_bh(&queue->lock);
775		if (task_is_async)
776			return;
777
778		/* sync task: sleep here */
779		dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
780		status = out_of_line_wait_on_bit(&task->tk_runstate,
781				RPC_TASK_QUEUED, rpc_wait_bit_killable,
782				TASK_KILLABLE);
783		if (status == -ERESTARTSYS) {
784			/*
785			 * When a sync task receives a signal, it exits with
786			 * -ERESTARTSYS. In order to catch any callbacks that
787			 * clean up after sleeping on some queue, we don't
788			 * break the loop here, but go around once more.
789			 */
790			dprintk("RPC: %5u got signal\n", task->tk_pid);
791			task->tk_flags |= RPC_TASK_KILLED;
792			rpc_exit(task, -ERESTARTSYS);
793		}
794		dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
795	}
796
797	dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
798			task->tk_status);
799	/* Release all resources associated with the task */
800	rpc_release_task(task);
801}
802
803/*
804 * User-visible entry point to the scheduler.
805 *
806 * This may be called recursively if e.g. an async NFS task updates
807 * the attributes and finds that dirty pages must be flushed.
808 * NOTE: Upon exit of this function the task is guaranteed to be
809 *	 released. In particular note that tk_release() will have
810 *	 been called, so your task memory may have been freed.
811 */
812void rpc_execute(struct rpc_task *task)
813{
814	bool is_async = RPC_IS_ASYNC(task);
815
816	rpc_set_active(task);
817	rpc_make_runnable(task);
818	if (!is_async)
819		__rpc_execute(task);
820}
821
822static void rpc_async_schedule(struct work_struct *work)
823{
824	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
825}
826
827/**
828 * rpc_malloc - allocate an RPC buffer
829 * @task: RPC task that will use this buffer
830 * @size: requested byte size
831 *
832 * To prevent rpciod from hanging, this allocator never sleeps,
833 * returning NULL and suppressing warning if the request cannot be serviced
834 * immediately.
835 * The caller can arrange to sleep in a way that is safe for rpciod.
836 *
837 * Most requests are 'small' (under 2KiB) and can be serviced from a
838 * mempool, ensuring that NFS reads and writes can always proceed,
839 * and that there is good locality of reference for these buffers.
840 *
841 * In order to avoid memory starvation triggering more writebacks of
842 * NFS requests, we avoid using GFP_KERNEL.
843 */
844void *rpc_malloc(struct rpc_task *task, size_t size)
845{
846	struct rpc_buffer *buf;
847	gfp_t gfp = GFP_NOIO | __GFP_NOWARN;
848
849	if (RPC_IS_SWAPPER(task))
850		gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
851
852	size += sizeof(struct rpc_buffer);
853	if (size <= RPC_BUFFER_MAXSIZE)
854		buf = mempool_alloc(rpc_buffer_mempool, gfp);
855	else
856		buf = kmalloc(size, gfp);
857
858	if (!buf)
859		return NULL;
860
861	buf->len = size;
862	dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
863			task->tk_pid, size, buf);
864	return &buf->data;
865}
866EXPORT_SYMBOL_GPL(rpc_malloc);
867
868/**
869 * rpc_free - free buffer allocated via rpc_malloc
870 * @buffer: buffer to free
871 *
872 */
873void rpc_free(void *buffer)
874{
875	size_t size;
876	struct rpc_buffer *buf;
877
878	if (!buffer)
879		return;
880
881	buf = container_of(buffer, struct rpc_buffer, data);
882	size = buf->len;
883
884	dprintk("RPC:       freeing buffer of size %zu at %p\n",
885			size, buf);
886
887	if (size <= RPC_BUFFER_MAXSIZE)
888		mempool_free(buf, rpc_buffer_mempool);
889	else
890		kfree(buf);
891}
892EXPORT_SYMBOL_GPL(rpc_free);
893
894/*
895 * Creation and deletion of RPC task structures
896 */
897static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
898{
899	memset(task, 0, sizeof(*task));
900	atomic_set(&task->tk_count, 1);
901	task->tk_flags  = task_setup_data->flags;
902	task->tk_ops = task_setup_data->callback_ops;
903	task->tk_calldata = task_setup_data->callback_data;
904	INIT_LIST_HEAD(&task->tk_task);
905
906	task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
907	task->tk_owner = current->tgid;
908
909	/* Initialize workqueue for async tasks */
910	task->tk_workqueue = task_setup_data->workqueue;
911
912	if (task->tk_ops->rpc_call_prepare != NULL)
913		task->tk_action = rpc_prepare_task;
914
915	rpc_init_task_statistics(task);
916
917	dprintk("RPC:       new task initialized, procpid %u\n",
918				task_pid_nr(current));
919}
920
921static struct rpc_task *
922rpc_alloc_task(void)
923{
924	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO);
925}
926
927/*
928 * Create a new task for the specified client.
929 */
930struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
931{
932	struct rpc_task	*task = setup_data->task;
933	unsigned short flags = 0;
934
935	if (task == NULL) {
936		task = rpc_alloc_task();
937		if (task == NULL) {
938			rpc_release_calldata(setup_data->callback_ops,
939					setup_data->callback_data);
940			return ERR_PTR(-ENOMEM);
941		}
942		flags = RPC_TASK_DYNAMIC;
943	}
944
945	rpc_init_task(task, setup_data);
946	task->tk_flags |= flags;
947	dprintk("RPC:       allocated task %p\n", task);
948	return task;
949}
950
951/*
952 * rpc_free_task - release rpc task and perform cleanups
953 *
954 * Note that we free up the rpc_task _after_ rpc_release_calldata()
955 * in order to work around a workqueue dependency issue.
956 *
957 * Tejun Heo states:
958 * "Workqueue currently considers two work items to be the same if they're
959 * on the same address and won't execute them concurrently - ie. it
960 * makes a work item which is queued again while being executed wait
961 * for the previous execution to complete.
962 *
963 * If a work function frees the work item, and then waits for an event
964 * which should be performed by another work item and *that* work item
965 * recycles the freed work item, it can create a false dependency loop.
966 * There really is no reliable way to detect this short of verifying
967 * every memory free."
968 *
969 */
970static void rpc_free_task(struct rpc_task *task)
971{
972	unsigned short tk_flags = task->tk_flags;
973
974	rpc_release_calldata(task->tk_ops, task->tk_calldata);
975
976	if (tk_flags & RPC_TASK_DYNAMIC) {
977		dprintk("RPC: %5u freeing task\n", task->tk_pid);
978		mempool_free(task, rpc_task_mempool);
979	}
980}
981
982static void rpc_async_release(struct work_struct *work)
983{
984	rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
985}
986
987static void rpc_release_resources_task(struct rpc_task *task)
988{
989	xprt_release(task);
990	if (task->tk_msg.rpc_cred) {
991		put_rpccred(task->tk_msg.rpc_cred);
992		task->tk_msg.rpc_cred = NULL;
993	}
994	rpc_task_release_client(task);
995}
996
997static void rpc_final_put_task(struct rpc_task *task,
998		struct workqueue_struct *q)
999{
1000	if (q != NULL) {
1001		INIT_WORK(&task->u.tk_work, rpc_async_release);
1002		queue_work(q, &task->u.tk_work);
1003	} else
1004		rpc_free_task(task);
1005}
1006
1007static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1008{
1009	if (atomic_dec_and_test(&task->tk_count)) {
1010		rpc_release_resources_task(task);
1011		rpc_final_put_task(task, q);
1012	}
1013}
1014
1015void rpc_put_task(struct rpc_task *task)
1016{
1017	rpc_do_put_task(task, NULL);
1018}
1019EXPORT_SYMBOL_GPL(rpc_put_task);
1020
1021void rpc_put_task_async(struct rpc_task *task)
1022{
1023	rpc_do_put_task(task, task->tk_workqueue);
1024}
1025EXPORT_SYMBOL_GPL(rpc_put_task_async);
1026
1027static void rpc_release_task(struct rpc_task *task)
1028{
1029	dprintk("RPC: %5u release task\n", task->tk_pid);
1030
1031	WARN_ON_ONCE(RPC_IS_QUEUED(task));
1032
1033	rpc_release_resources_task(task);
1034
1035	/*
1036	 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1037	 * so it should be safe to use task->tk_count as a test for whether
1038	 * or not any other processes still hold references to our rpc_task.
1039	 */
1040	if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1041		/* Wake up anyone who may be waiting for task completion */
1042		if (!rpc_complete_task(task))
1043			return;
1044	} else {
1045		if (!atomic_dec_and_test(&task->tk_count))
1046			return;
1047	}
1048	rpc_final_put_task(task, task->tk_workqueue);
1049}
1050
1051int rpciod_up(void)
1052{
1053	return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1054}
1055
1056void rpciod_down(void)
1057{
1058	module_put(THIS_MODULE);
1059}
1060
1061/*
1062 * Start up the rpciod workqueue.
1063 */
1064static int rpciod_start(void)
1065{
1066	struct workqueue_struct *wq;
1067
1068	/*
1069	 * Create the rpciod thread and wait for it to start.
1070	 */
1071	dprintk("RPC:       creating workqueue rpciod\n");
1072	/* Note: highpri because network receive is latency sensitive */
1073	wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1074	rpciod_workqueue = wq;
1075	return rpciod_workqueue != NULL;
1076}
1077
1078static void rpciod_stop(void)
1079{
1080	struct workqueue_struct *wq = NULL;
1081
1082	if (rpciod_workqueue == NULL)
1083		return;
1084	dprintk("RPC:       destroying workqueue rpciod\n");
1085
1086	wq = rpciod_workqueue;
1087	rpciod_workqueue = NULL;
1088	destroy_workqueue(wq);
1089}
1090
1091void
1092rpc_destroy_mempool(void)
1093{
1094	rpciod_stop();
1095	mempool_destroy(rpc_buffer_mempool);
1096	mempool_destroy(rpc_task_mempool);
1097	kmem_cache_destroy(rpc_task_slabp);
1098	kmem_cache_destroy(rpc_buffer_slabp);
1099	rpc_destroy_wait_queue(&delay_queue);
1100}
1101
1102int
1103rpc_init_mempool(void)
1104{
1105	/*
1106	 * The following is not strictly a mempool initialisation,
1107	 * but there is no harm in doing it here
1108	 */
1109	rpc_init_wait_queue(&delay_queue, "delayq");
1110	if (!rpciod_start())
1111		goto err_nomem;
1112
1113	rpc_task_slabp = kmem_cache_create("rpc_tasks",
1114					     sizeof(struct rpc_task),
1115					     0, SLAB_HWCACHE_ALIGN,
1116					     NULL);
1117	if (!rpc_task_slabp)
1118		goto err_nomem;
1119	rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1120					     RPC_BUFFER_MAXSIZE,
1121					     0, SLAB_HWCACHE_ALIGN,
1122					     NULL);
1123	if (!rpc_buffer_slabp)
1124		goto err_nomem;
1125	rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1126						    rpc_task_slabp);
1127	if (!rpc_task_mempool)
1128		goto err_nomem;
1129	rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1130						      rpc_buffer_slabp);
1131	if (!rpc_buffer_mempool)
1132		goto err_nomem;
1133	return 0;
1134err_nomem:
1135	rpc_destroy_mempool();
1136	return -ENOMEM;
1137}
1138