1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/ptlrpcd.c
37  */
38 
39 /** \defgroup ptlrpcd PortalRPC daemon
40  *
41  * ptlrpcd is a special thread with its own set where other user might add
42  * requests when they don't want to wait for their completion.
43  * PtlRPCD will take care of sending such requests and then processing their
44  * replies and calling completion callbacks as necessary.
45  * The callbacks are called directly from ptlrpcd context.
46  * It is important to never significantly block (esp. on RPCs!) within such
47  * completion handler or a deadlock might occur where ptlrpcd enters some
48  * callback that attempts to send another RPC and wait for it to return,
49  * during which time ptlrpcd is completely blocked, so e.g. if import
50  * fails, recovery cannot progress because connection requests are also
51  * sent by ptlrpcd.
52  *
53  * @{
54  */
55 
56 #define DEBUG_SUBSYSTEM S_RPC
57 
58 #include "../../include/linux/libcfs/libcfs.h"
59 
60 #include "../include/lustre_net.h"
61 #include "../include/lustre_lib.h"
62 #include "../include/lustre_ha.h"
63 #include "../include/obd_class.h"	/* for obd_zombie */
64 #include "../include/obd_support.h"	/* for OBD_FAIL_CHECK */
65 #include "../include/cl_object.h"	/* cl_env_{get,put}() */
66 #include "../include/lprocfs_status.h"
67 
68 #include "ptlrpc_internal.h"
69 
70 struct ptlrpcd {
71 	int		pd_size;
72 	int		pd_index;
73 	int		pd_nthreads;
74 	struct ptlrpcd_ctl pd_thread_rcv;
75 	struct ptlrpcd_ctl pd_threads[0];
76 };
77 
78 static int max_ptlrpcds;
79 module_param(max_ptlrpcds, int, 0644);
80 MODULE_PARM_DESC(max_ptlrpcds, "Max ptlrpcd thread count to be started.");
81 
82 static int ptlrpcd_bind_policy = PDB_POLICY_PAIR;
83 module_param(ptlrpcd_bind_policy, int, 0644);
84 MODULE_PARM_DESC(ptlrpcd_bind_policy, "Ptlrpcd threads binding mode.");
85 static struct ptlrpcd *ptlrpcds;
86 
87 struct mutex ptlrpcd_mutex;
88 static int ptlrpcd_users;
89 
ptlrpcd_wake(struct ptlrpc_request * req)90 void ptlrpcd_wake(struct ptlrpc_request *req)
91 {
92 	struct ptlrpc_request_set *rq_set = req->rq_set;
93 
94 	LASSERT(rq_set != NULL);
95 
96 	wake_up(&rq_set->set_waitq);
97 }
98 EXPORT_SYMBOL(ptlrpcd_wake);
99 
100 static struct ptlrpcd_ctl *
ptlrpcd_select_pc(struct ptlrpc_request * req,pdl_policy_t policy,int index)101 ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
102 {
103 	int idx = 0;
104 
105 	if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
106 		return &ptlrpcds->pd_thread_rcv;
107 
108 	switch (policy) {
109 	case PDL_POLICY_SAME:
110 		idx = smp_processor_id() % ptlrpcds->pd_nthreads;
111 		break;
112 	case PDL_POLICY_LOCAL:
113 		/* Before CPU partition patches available, process it the same
114 		 * as "PDL_POLICY_ROUND". */
115 # ifdef CFS_CPU_MODE_NUMA
116 # warning "fix this code to use new CPU partition APIs"
117 # endif
118 		/* Fall through to PDL_POLICY_ROUND until the CPU
119 		 * CPU partition patches are available. */
120 		index = -1;
121 	case PDL_POLICY_PREFERRED:
122 		if (index >= 0 && index < num_online_cpus()) {
123 			idx = index % ptlrpcds->pd_nthreads;
124 			break;
125 		}
126 		/* Fall through to PDL_POLICY_ROUND for bad index. */
127 	default:
128 		/* Fall through to PDL_POLICY_ROUND for unknown policy. */
129 	case PDL_POLICY_ROUND:
130 		/* We do not care whether it is strict load balance. */
131 		idx = ptlrpcds->pd_index + 1;
132 		if (idx == smp_processor_id())
133 			idx++;
134 		idx %= ptlrpcds->pd_nthreads;
135 		ptlrpcds->pd_index = idx;
136 		break;
137 	}
138 
139 	return &ptlrpcds->pd_threads[idx];
140 }
141 
142 /**
143  * Move all request from an existing request set to the ptlrpcd queue.
144  * All requests from the set must be in phase RQ_PHASE_NEW.
145  */
ptlrpcd_add_rqset(struct ptlrpc_request_set * set)146 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
147 {
148 	struct list_head *tmp, *pos;
149 	struct ptlrpcd_ctl *pc;
150 	struct ptlrpc_request_set *new;
151 	int count, i;
152 
153 	pc = ptlrpcd_select_pc(NULL, PDL_POLICY_LOCAL, -1);
154 	new = pc->pc_set;
155 
156 	list_for_each_safe(pos, tmp, &set->set_requests) {
157 		struct ptlrpc_request *req =
158 			list_entry(pos, struct ptlrpc_request,
159 				       rq_set_chain);
160 
161 		LASSERT(req->rq_phase == RQ_PHASE_NEW);
162 		req->rq_set = new;
163 		req->rq_queued_time = cfs_time_current();
164 	}
165 
166 	spin_lock(&new->set_new_req_lock);
167 	list_splice_init(&set->set_requests, &new->set_new_requests);
168 	i = atomic_read(&set->set_remaining);
169 	count = atomic_add_return(i, &new->set_new_count);
170 	atomic_set(&set->set_remaining, 0);
171 	spin_unlock(&new->set_new_req_lock);
172 	if (count == i) {
173 		wake_up(&new->set_waitq);
174 
175 		/* XXX: It maybe unnecessary to wakeup all the partners. But to
176 		 *      guarantee the async RPC can be processed ASAP, we have
177 		 *      no other better choice. It maybe fixed in future. */
178 		for (i = 0; i < pc->pc_npartners; i++)
179 			wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
180 	}
181 }
182 EXPORT_SYMBOL(ptlrpcd_add_rqset);
183 
184 /**
185  * Return transferred RPCs count.
186  */
ptlrpcd_steal_rqset(struct ptlrpc_request_set * des,struct ptlrpc_request_set * src)187 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
188 			       struct ptlrpc_request_set *src)
189 {
190 	struct list_head *tmp, *pos;
191 	struct ptlrpc_request *req;
192 	int rc = 0;
193 
194 	spin_lock(&src->set_new_req_lock);
195 	if (likely(!list_empty(&src->set_new_requests))) {
196 		list_for_each_safe(pos, tmp, &src->set_new_requests) {
197 			req = list_entry(pos, struct ptlrpc_request,
198 					     rq_set_chain);
199 			req->rq_set = des;
200 		}
201 		list_splice_init(&src->set_new_requests,
202 				     &des->set_requests);
203 		rc = atomic_read(&src->set_new_count);
204 		atomic_add(rc, &des->set_remaining);
205 		atomic_set(&src->set_new_count, 0);
206 	}
207 	spin_unlock(&src->set_new_req_lock);
208 	return rc;
209 }
210 
211 /**
212  * Requests that are added to the ptlrpcd queue are sent via
213  * ptlrpcd_check->ptlrpc_check_set().
214  */
ptlrpcd_add_req(struct ptlrpc_request * req,pdl_policy_t policy,int idx)215 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
216 {
217 	struct ptlrpcd_ctl *pc;
218 
219 	if (req->rq_reqmsg)
220 		lustre_msg_set_jobid(req->rq_reqmsg, NULL);
221 
222 	spin_lock(&req->rq_lock);
223 	if (req->rq_invalid_rqset) {
224 		struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
225 						     back_to_sleep, NULL);
226 
227 		req->rq_invalid_rqset = 0;
228 		spin_unlock(&req->rq_lock);
229 		l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
230 	} else if (req->rq_set) {
231 		/* If we have a valid "rq_set", just reuse it to avoid double
232 		 * linked. */
233 		LASSERT(req->rq_phase == RQ_PHASE_NEW);
234 		LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
235 
236 		/* ptlrpc_check_set will decrease the count */
237 		atomic_inc(&req->rq_set->set_remaining);
238 		spin_unlock(&req->rq_lock);
239 		wake_up(&req->rq_set->set_waitq);
240 		return;
241 	} else {
242 		spin_unlock(&req->rq_lock);
243 	}
244 
245 	pc = ptlrpcd_select_pc(req, policy, idx);
246 
247 	DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
248 		  req, pc->pc_name, pc->pc_index);
249 
250 	ptlrpc_set_add_new_req(pc, req);
251 }
252 EXPORT_SYMBOL(ptlrpcd_add_req);
253 
ptlrpc_reqset_get(struct ptlrpc_request_set * set)254 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
255 {
256 	atomic_inc(&set->set_refcount);
257 }
258 
259 /**
260  * Check if there is more work to do on ptlrpcd set.
261  * Returns 1 if yes.
262  */
ptlrpcd_check(struct lu_env * env,struct ptlrpcd_ctl * pc)263 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
264 {
265 	struct list_head *tmp, *pos;
266 	struct ptlrpc_request *req;
267 	struct ptlrpc_request_set *set = pc->pc_set;
268 	int rc = 0;
269 	int rc2;
270 
271 	if (atomic_read(&set->set_new_count)) {
272 		spin_lock(&set->set_new_req_lock);
273 		if (likely(!list_empty(&set->set_new_requests))) {
274 			list_splice_init(&set->set_new_requests,
275 					     &set->set_requests);
276 			atomic_add(atomic_read(&set->set_new_count),
277 				       &set->set_remaining);
278 			atomic_set(&set->set_new_count, 0);
279 			/*
280 			 * Need to calculate its timeout.
281 			 */
282 			rc = 1;
283 		}
284 		spin_unlock(&set->set_new_req_lock);
285 	}
286 
287 	/* We should call lu_env_refill() before handling new requests to make
288 	 * sure that env key the requests depending on really exists.
289 	 */
290 	rc2 = lu_env_refill(env);
291 	if (rc2 != 0) {
292 		/*
293 		 * XXX This is very awkward situation, because
294 		 * execution can neither continue (request
295 		 * interpreters assume that env is set up), nor repeat
296 		 * the loop (as this potentially results in a tight
297 		 * loop of -ENOMEM's).
298 		 *
299 		 * Fortunately, refill only ever does something when
300 		 * new modules are loaded, i.e., early during boot up.
301 		 */
302 		CERROR("Failure to refill session: %d\n", rc2);
303 		return rc;
304 	}
305 
306 	if (atomic_read(&set->set_remaining))
307 		rc |= ptlrpc_check_set(env, set);
308 
309 	/* NB: ptlrpc_check_set has already moved completed request at the
310 	 * head of seq::set_requests */
311 	list_for_each_safe(pos, tmp, &set->set_requests) {
312 		req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
313 		if (req->rq_phase != RQ_PHASE_COMPLETE)
314 			break;
315 
316 		list_del_init(&req->rq_set_chain);
317 		req->rq_set = NULL;
318 		ptlrpc_req_finished(req);
319 	}
320 
321 	if (rc == 0) {
322 		/*
323 		 * If new requests have been added, make sure to wake up.
324 		 */
325 		rc = atomic_read(&set->set_new_count);
326 
327 		/* If we have nothing to do, check whether we can take some
328 		 * work from our partner threads. */
329 		if (rc == 0 && pc->pc_npartners > 0) {
330 			struct ptlrpcd_ctl *partner;
331 			struct ptlrpc_request_set *ps;
332 			int first = pc->pc_cursor;
333 
334 			do {
335 				partner = pc->pc_partners[pc->pc_cursor++];
336 				if (pc->pc_cursor >= pc->pc_npartners)
337 					pc->pc_cursor = 0;
338 				if (partner == NULL)
339 					continue;
340 
341 				spin_lock(&partner->pc_lock);
342 				ps = partner->pc_set;
343 				if (ps == NULL) {
344 					spin_unlock(&partner->pc_lock);
345 					continue;
346 				}
347 
348 				ptlrpc_reqset_get(ps);
349 				spin_unlock(&partner->pc_lock);
350 
351 				if (atomic_read(&ps->set_new_count)) {
352 					rc = ptlrpcd_steal_rqset(set, ps);
353 					if (rc > 0)
354 						CDEBUG(D_RPCTRACE, "transfer %d async RPCs [%d->%d]\n",
355 						       rc, partner->pc_index,
356 						       pc->pc_index);
357 				}
358 				ptlrpc_reqset_put(ps);
359 			} while (rc == 0 && pc->pc_cursor != first);
360 		}
361 	}
362 
363 	return rc;
364 }
365 
366 /**
367  * Main ptlrpcd thread.
368  * ptlrpc's code paths like to execute in process context, so we have this
369  * thread which spins on a set which contains the rpcs and sends them.
370  *
371  */
ptlrpcd(void * arg)372 static int ptlrpcd(void *arg)
373 {
374 	struct ptlrpcd_ctl *pc = arg;
375 	struct ptlrpc_request_set *set = pc->pc_set;
376 	struct lu_env env = { .le_ses = NULL };
377 	int rc, exit = 0;
378 
379 	unshare_fs_struct();
380 #if defined(CONFIG_SMP)
381 	if (test_bit(LIOD_BIND, &pc->pc_flags)) {
382 		int index = pc->pc_index;
383 
384 		if (index >= 0 && index < num_possible_cpus()) {
385 			while (!cpu_online(index)) {
386 				if (++index >= num_possible_cpus())
387 					index = 0;
388 			}
389 			set_cpus_allowed_ptr(current,
390 					cpumask_of_node(cpu_to_node(index)));
391 		}
392 	}
393 #endif
394 	/*
395 	 * XXX So far only "client" ptlrpcd uses an environment. In
396 	 * the future, ptlrpcd thread (or a thread-set) has to given
397 	 * an argument, describing its "scope".
398 	 */
399 	rc = lu_context_init(&env.le_ctx,
400 			     LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
401 	complete(&pc->pc_starting);
402 
403 	if (rc != 0)
404 		return rc;
405 
406 	/*
407 	 * This mainloop strongly resembles ptlrpc_set_wait() except that our
408 	 * set never completes.  ptlrpcd_check() calls ptlrpc_check_set() when
409 	 * there are requests in the set. New requests come in on the set's
410 	 * new_req_list and ptlrpcd_check() moves them into the set.
411 	 */
412 	do {
413 		struct l_wait_info lwi;
414 		int timeout;
415 
416 		timeout = ptlrpc_set_next_timeout(set);
417 		lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
418 				  ptlrpc_expired_set, set);
419 
420 		lu_context_enter(&env.le_ctx);
421 		l_wait_event(set->set_waitq,
422 			     ptlrpcd_check(&env, pc), &lwi);
423 		lu_context_exit(&env.le_ctx);
424 
425 		/*
426 		 * Abort inflight rpcs for forced stop case.
427 		 */
428 		if (test_bit(LIOD_STOP, &pc->pc_flags)) {
429 			if (test_bit(LIOD_FORCE, &pc->pc_flags))
430 				ptlrpc_abort_set(set);
431 			exit++;
432 		}
433 
434 		/*
435 		 * Let's make one more loop to make sure that ptlrpcd_check()
436 		 * copied all raced new rpcs into the set so we can kill them.
437 		 */
438 	} while (exit < 2);
439 
440 	/*
441 	 * Wait for inflight requests to drain.
442 	 */
443 	if (!list_empty(&set->set_requests))
444 		ptlrpc_set_wait(set);
445 	lu_context_fini(&env.le_ctx);
446 
447 	complete(&pc->pc_finishing);
448 
449 	return 0;
450 }
451 
452 /* XXX: We want multiple CPU cores to share the async RPC load. So we start many
453  *      ptlrpcd threads. We also want to reduce the ptlrpcd overhead caused by
454  *      data transfer cross-CPU cores. So we bind ptlrpcd thread to specified
455  *      CPU core. But binding all ptlrpcd threads maybe cause response delay
456  *      because of some CPU core(s) busy with other loads.
457  *
458  *      For example: "ls -l", some async RPCs for statahead are assigned to
459  *      ptlrpcd_0, and ptlrpcd_0 is bound to CPU_0, but CPU_0 may be quite busy
460  *      with other non-ptlrpcd, like "ls -l" itself (we want to the "ls -l"
461  *      thread, statahead thread, and ptlrpcd thread can run in parallel), under
462  *      such case, the statahead async RPCs can not be processed in time, it is
463  *      unexpected. If ptlrpcd_0 can be re-scheduled on other CPU core, it may
464  *      be better. But it breaks former data transfer policy.
465  *
466  *      So we shouldn't be blind for avoiding the data transfer. We make some
467  *      compromise: divide the ptlrpcd threads pool into two parts. One part is
468  *      for bound mode, each ptlrpcd thread in this part is bound to some CPU
469  *      core. The other part is for free mode, all the ptlrpcd threads in the
470  *      part can be scheduled on any CPU core. We specify some partnership
471  *      between bound mode ptlrpcd thread(s) and free mode ptlrpcd thread(s),
472  *      and the async RPC load within the partners are shared.
473  *
474  *      It can partly avoid data transfer cross-CPU (if the bound mode ptlrpcd
475  *      thread can be scheduled in time), and try to guarantee the async RPC
476  *      processed ASAP (as long as the free mode ptlrpcd thread can be scheduled
477  *      on any CPU core).
478  *
479  *      As for how to specify the partnership between bound mode ptlrpcd
480  *      thread(s) and free mode ptlrpcd thread(s), the simplest way is to use
481  *      <free bound> pair. In future, we can specify some more complex
482  *      partnership based on the patches for CPU partition. But before such
483  *      patches are available, we prefer to use the simplest one.
484  */
485 # ifdef CFS_CPU_MODE_NUMA
486 # warning "fix ptlrpcd_bind() to use new CPU partition APIs"
487 # endif
ptlrpcd_bind(int index,int max)488 static int ptlrpcd_bind(int index, int max)
489 {
490 	struct ptlrpcd_ctl *pc;
491 	int rc = 0;
492 #if defined(CONFIG_NUMA)
493 	cpumask_t mask;
494 #endif
495 
496 	LASSERT(index <= max - 1);
497 	pc = &ptlrpcds->pd_threads[index];
498 	switch (ptlrpcd_bind_policy) {
499 	case PDB_POLICY_NONE:
500 		pc->pc_npartners = -1;
501 		break;
502 	case PDB_POLICY_FULL:
503 		pc->pc_npartners = 0;
504 		set_bit(LIOD_BIND, &pc->pc_flags);
505 		break;
506 	case PDB_POLICY_PAIR:
507 		LASSERT(max % 2 == 0);
508 		pc->pc_npartners = 1;
509 		break;
510 	case PDB_POLICY_NEIGHBOR:
511 #if defined(CONFIG_NUMA)
512 	{
513 		int i;
514 		cpumask_copy(&mask, cpumask_of_node(cpu_to_node(index)));
515 		for (i = max; i < num_online_cpus(); i++)
516 			cpumask_clear_cpu(i, &mask);
517 		pc->pc_npartners = cpumask_weight(&mask) - 1;
518 		set_bit(LIOD_BIND, &pc->pc_flags);
519 	}
520 #else
521 		LASSERT(max >= 3);
522 		pc->pc_npartners = 2;
523 #endif
524 		break;
525 	default:
526 		CERROR("unknown ptlrpcd bind policy %d\n", ptlrpcd_bind_policy);
527 		rc = -EINVAL;
528 	}
529 
530 	if (rc == 0 && pc->pc_npartners > 0) {
531 		OBD_ALLOC(pc->pc_partners,
532 			  sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
533 		if (pc->pc_partners == NULL) {
534 			pc->pc_npartners = 0;
535 			rc = -ENOMEM;
536 		} else {
537 			switch (ptlrpcd_bind_policy) {
538 			case PDB_POLICY_PAIR:
539 				if (index & 0x1) {
540 					set_bit(LIOD_BIND, &pc->pc_flags);
541 					pc->pc_partners[0] = &ptlrpcds->
542 						pd_threads[index - 1];
543 					ptlrpcds->pd_threads[index - 1].
544 						pc_partners[0] = pc;
545 				}
546 				break;
547 			case PDB_POLICY_NEIGHBOR:
548 #if defined(CONFIG_NUMA)
549 			{
550 				struct ptlrpcd_ctl *ppc;
551 				int i, pidx;
552 				/* partners are cores in the same NUMA node.
553 				 * setup partnership only with ptlrpcd threads
554 				 * that are already initialized
555 				 */
556 				for (pidx = 0, i = 0; i < index; i++) {
557 					if (cpumask_test_cpu(i, &mask)) {
558 						ppc = &ptlrpcds->pd_threads[i];
559 						pc->pc_partners[pidx++] = ppc;
560 						ppc->pc_partners[ppc->
561 							  pc_npartners++] = pc;
562 					}
563 				}
564 				/* adjust number of partners to the number
565 				 * of partnership really setup */
566 				pc->pc_npartners = pidx;
567 			}
568 #else
569 				if (index & 0x1)
570 					set_bit(LIOD_BIND, &pc->pc_flags);
571 				if (index > 0) {
572 					pc->pc_partners[0] = &ptlrpcds->
573 						pd_threads[index - 1];
574 					ptlrpcds->pd_threads[index - 1].
575 						pc_partners[1] = pc;
576 					if (index == max - 1) {
577 						pc->pc_partners[1] =
578 						&ptlrpcds->pd_threads[0];
579 						ptlrpcds->pd_threads[0].
580 						pc_partners[0] = pc;
581 					}
582 				}
583 #endif
584 				break;
585 			}
586 		}
587 	}
588 
589 	return rc;
590 }
591 
592 
ptlrpcd_start(int index,int max,const char * name,struct ptlrpcd_ctl * pc)593 int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
594 {
595 	int rc;
596 
597 	/*
598 	 * Do not allow start second thread for one pc.
599 	 */
600 	if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
601 		CWARN("Starting second thread (%s) for same pc %p\n",
602 		      name, pc);
603 		return 0;
604 	}
605 
606 	pc->pc_index = index;
607 	init_completion(&pc->pc_starting);
608 	init_completion(&pc->pc_finishing);
609 	spin_lock_init(&pc->pc_lock);
610 	strlcpy(pc->pc_name, name, sizeof(pc->pc_name));
611 	pc->pc_set = ptlrpc_prep_set();
612 	if (pc->pc_set == NULL) {
613 		rc = -ENOMEM;
614 		goto out;
615 	}
616 
617 	/*
618 	 * So far only "client" ptlrpcd uses an environment. In the future,
619 	 * ptlrpcd thread (or a thread-set) has to be given an argument,
620 	 * describing its "scope".
621 	 */
622 	rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
623 	if (rc != 0)
624 		goto out_set;
625 
626 	{
627 		struct task_struct *task;
628 		if (index >= 0) {
629 			rc = ptlrpcd_bind(index, max);
630 			if (rc < 0)
631 				goto out_env;
632 		}
633 
634 		task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
635 		if (IS_ERR(task)) {
636 			rc = PTR_ERR(task);
637 			goto out_env;
638 		}
639 
640 		wait_for_completion(&pc->pc_starting);
641 	}
642 	return 0;
643 
644 out_env:
645 	lu_context_fini(&pc->pc_env.le_ctx);
646 
647 out_set:
648 	if (pc->pc_set != NULL) {
649 		struct ptlrpc_request_set *set = pc->pc_set;
650 
651 		spin_lock(&pc->pc_lock);
652 		pc->pc_set = NULL;
653 		spin_unlock(&pc->pc_lock);
654 		ptlrpc_set_destroy(set);
655 	}
656 	clear_bit(LIOD_BIND, &pc->pc_flags);
657 
658 out:
659 	clear_bit(LIOD_START, &pc->pc_flags);
660 	return rc;
661 }
662 
ptlrpcd_stop(struct ptlrpcd_ctl * pc,int force)663 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
664 {
665 	if (!test_bit(LIOD_START, &pc->pc_flags)) {
666 		CWARN("Thread for pc %p was not started\n", pc);
667 		return;
668 	}
669 
670 	set_bit(LIOD_STOP, &pc->pc_flags);
671 	if (force)
672 		set_bit(LIOD_FORCE, &pc->pc_flags);
673 	wake_up(&pc->pc_set->set_waitq);
674 }
675 
ptlrpcd_free(struct ptlrpcd_ctl * pc)676 void ptlrpcd_free(struct ptlrpcd_ctl *pc)
677 {
678 	struct ptlrpc_request_set *set = pc->pc_set;
679 
680 	if (!test_bit(LIOD_START, &pc->pc_flags)) {
681 		CWARN("Thread for pc %p was not started\n", pc);
682 		goto out;
683 	}
684 
685 	wait_for_completion(&pc->pc_finishing);
686 	lu_context_fini(&pc->pc_env.le_ctx);
687 
688 	spin_lock(&pc->pc_lock);
689 	pc->pc_set = NULL;
690 	spin_unlock(&pc->pc_lock);
691 	ptlrpc_set_destroy(set);
692 
693 	clear_bit(LIOD_START, &pc->pc_flags);
694 	clear_bit(LIOD_STOP, &pc->pc_flags);
695 	clear_bit(LIOD_FORCE, &pc->pc_flags);
696 	clear_bit(LIOD_BIND, &pc->pc_flags);
697 
698 out:
699 	if (pc->pc_npartners > 0) {
700 		LASSERT(pc->pc_partners != NULL);
701 
702 		OBD_FREE(pc->pc_partners,
703 			 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
704 		pc->pc_partners = NULL;
705 	}
706 	pc->pc_npartners = 0;
707 }
708 
ptlrpcd_fini(void)709 static void ptlrpcd_fini(void)
710 {
711 	int i;
712 
713 	if (ptlrpcds != NULL) {
714 		for (i = 0; i < ptlrpcds->pd_nthreads; i++)
715 			ptlrpcd_stop(&ptlrpcds->pd_threads[i], 0);
716 		for (i = 0; i < ptlrpcds->pd_nthreads; i++)
717 			ptlrpcd_free(&ptlrpcds->pd_threads[i]);
718 		ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
719 		ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
720 		OBD_FREE(ptlrpcds, ptlrpcds->pd_size);
721 		ptlrpcds = NULL;
722 	}
723 }
724 
ptlrpcd_init(void)725 static int ptlrpcd_init(void)
726 {
727 	int nthreads = num_online_cpus();
728 	char name[16];
729 	int size, i = -1, j, rc = 0;
730 
731 	if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
732 		nthreads = max_ptlrpcds;
733 	if (nthreads < 2)
734 		nthreads = 2;
735 	if (nthreads < 3 && ptlrpcd_bind_policy == PDB_POLICY_NEIGHBOR)
736 		ptlrpcd_bind_policy = PDB_POLICY_PAIR;
737 	else if (nthreads % 2 != 0 && ptlrpcd_bind_policy == PDB_POLICY_PAIR)
738 		nthreads &= ~1; /* make sure it is even */
739 
740 	size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
741 	OBD_ALLOC(ptlrpcds, size);
742 	if (ptlrpcds == NULL) {
743 		rc = -ENOMEM;
744 		goto out;
745 	}
746 
747 	snprintf(name, sizeof(name), "ptlrpcd_rcv");
748 	set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
749 	rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
750 	if (rc < 0)
751 		goto out;
752 
753 	/* XXX: We start nthreads ptlrpc daemons. Each of them can process any
754 	 *      non-recovery async RPC to improve overall async RPC efficiency.
755 	 *
756 	 *      But there are some issues with async I/O RPCs and async non-I/O
757 	 *      RPCs processed in the same set under some cases. The ptlrpcd may
758 	 *      be blocked by some async I/O RPC(s), then will cause other async
759 	 *      non-I/O RPC(s) can not be processed in time.
760 	 *
761 	 *      Maybe we should distinguish blocked async RPCs from non-blocked
762 	 *      async RPCs, and process them in different ptlrpcd sets to avoid
763 	 *      unnecessary dependency. But how to distribute async RPCs load
764 	 *      among all the ptlrpc daemons becomes another trouble. */
765 	for (i = 0; i < nthreads; i++) {
766 		snprintf(name, sizeof(name), "ptlrpcd_%d", i);
767 		rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
768 		if (rc < 0)
769 			goto out;
770 	}
771 
772 	ptlrpcds->pd_size = size;
773 	ptlrpcds->pd_index = 0;
774 	ptlrpcds->pd_nthreads = nthreads;
775 
776 out:
777 	if (rc != 0 && ptlrpcds != NULL) {
778 		for (j = 0; j <= i; j++)
779 			ptlrpcd_stop(&ptlrpcds->pd_threads[j], 0);
780 		for (j = 0; j <= i; j++)
781 			ptlrpcd_free(&ptlrpcds->pd_threads[j]);
782 		ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
783 		ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
784 		OBD_FREE(ptlrpcds, size);
785 		ptlrpcds = NULL;
786 	}
787 
788 	return 0;
789 }
790 
ptlrpcd_addref(void)791 int ptlrpcd_addref(void)
792 {
793 	int rc = 0;
794 
795 	mutex_lock(&ptlrpcd_mutex);
796 	if (++ptlrpcd_users == 1)
797 		rc = ptlrpcd_init();
798 	mutex_unlock(&ptlrpcd_mutex);
799 	return rc;
800 }
801 EXPORT_SYMBOL(ptlrpcd_addref);
802 
ptlrpcd_decref(void)803 void ptlrpcd_decref(void)
804 {
805 	mutex_lock(&ptlrpcd_mutex);
806 	if (--ptlrpcd_users == 0)
807 		ptlrpcd_fini();
808 	mutex_unlock(&ptlrpcd_mutex);
809 }
810 EXPORT_SYMBOL(ptlrpcd_decref);
811 /** @} ptlrpcd */
812