1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/ptlrpcd.c
37  */
38 
39 /** \defgroup ptlrpcd PortalRPC daemon
40  *
41  * ptlrpcd is a special thread with its own set where other user might add
42  * requests when they don't want to wait for their completion.
43  * PtlRPCD will take care of sending such requests and then processing their
44  * replies and calling completion callbacks as necessary.
45  * The callbacks are called directly from ptlrpcd context.
46  * It is important to never significantly block (esp. on RPCs!) within such
47  * completion handler or a deadlock might occur where ptlrpcd enters some
48  * callback that attempts to send another RPC and wait for it to return,
49  * during which time ptlrpcd is completely blocked, so e.g. if import
50  * fails, recovery cannot progress because connection requests are also
51  * sent by ptlrpcd.
52  *
53  * @{
54  */
55 
56 #define DEBUG_SUBSYSTEM S_RPC
57 
58 #include "../../include/linux/libcfs/libcfs.h"
59 
60 #include "../include/lustre_net.h"
61 #include "../include/lustre_lib.h"
62 #include "../include/lustre_ha.h"
63 #include "../include/obd_class.h"	/* for obd_zombie */
64 #include "../include/obd_support.h"	/* for OBD_FAIL_CHECK */
65 #include "../include/cl_object.h"	/* cl_env_{get,put}() */
66 #include "../include/lprocfs_status.h"
67 
68 #include "ptlrpc_internal.h"
69 
70 /* One of these per CPT. */
71 struct ptlrpcd {
72 	int pd_size;
73 	int pd_index;
74 	int pd_cpt;
75 	int pd_cursor;
76 	int pd_nthreads;
77 	int pd_groupsize;
78 	struct ptlrpcd_ctl pd_threads[0];
79 };
80 
81 /*
82  * max_ptlrpcds is obsolete, but retained to ensure that the kernel
83  * module will load on a system where it has been tuned.
84  * A value other than 0 implies it was tuned, in which case the value
85  * is used to derive a setting for ptlrpcd_per_cpt_max.
86  */
87 static int max_ptlrpcds;
88 module_param(max_ptlrpcds, int, 0644);
89 MODULE_PARM_DESC(max_ptlrpcds, "Max ptlrpcd thread count to be started.");
90 
91 /*
92  * ptlrpcd_bind_policy is obsolete, but retained to ensure that
93  * the kernel module will load on a system where it has been tuned.
94  * A value other than 0 implies it was tuned, in which case the value
95  * is used to derive a setting for ptlrpcd_partner_group_size.
96  */
97 static int ptlrpcd_bind_policy;
98 module_param(ptlrpcd_bind_policy, int, 0644);
99 MODULE_PARM_DESC(ptlrpcd_bind_policy,
100 		 "Ptlrpcd threads binding mode (obsolete).");
101 
102 /*
103  * ptlrpcd_per_cpt_max: The maximum number of ptlrpcd threads to run
104  * in a CPT.
105  */
106 static int ptlrpcd_per_cpt_max;
107 module_param(ptlrpcd_per_cpt_max, int, 0644);
108 MODULE_PARM_DESC(ptlrpcd_per_cpt_max,
109 		 "Max ptlrpcd thread count to be started per cpt.");
110 
111 /*
112  * ptlrpcd_partner_group_size: The desired number of threads in each
113  * ptlrpcd partner thread group. Default is 2, corresponding to the
114  * old PDB_POLICY_PAIR. A negative value makes all ptlrpcd threads in
115  * a CPT partners of each other.
116  */
117 static int ptlrpcd_partner_group_size;
118 module_param(ptlrpcd_partner_group_size, int, 0644);
119 MODULE_PARM_DESC(ptlrpcd_partner_group_size,
120 		 "Number of ptlrpcd threads in a partner group.");
121 
122 /*
123  * ptlrpcd_cpts: A CPT string describing the CPU partitions that
124  * ptlrpcd threads should run on. Used to make ptlrpcd threads run on
125  * a subset of all CPTs.
126  *
127  * ptlrpcd_cpts=2
128  * ptlrpcd_cpts=[2]
129  *   run ptlrpcd threads only on CPT 2.
130  *
131  * ptlrpcd_cpts=0-3
132  * ptlrpcd_cpts=[0-3]
133  *   run ptlrpcd threads on CPTs 0, 1, 2, and 3.
134  *
135  * ptlrpcd_cpts=[0-3,5,7]
136  *   run ptlrpcd threads on CPTS 0, 1, 2, 3, 5, and 7.
137  */
138 static char *ptlrpcd_cpts;
139 module_param(ptlrpcd_cpts, charp, 0644);
140 MODULE_PARM_DESC(ptlrpcd_cpts,
141 		 "CPU partitions ptlrpcd threads should run in");
142 
143 /* ptlrpcds_cpt_idx maps cpt numbers to an index in the ptlrpcds array. */
144 static int		*ptlrpcds_cpt_idx;
145 
146 /* ptlrpcds_num is the number of entries in the ptlrpcds array. */
147 static int		ptlrpcds_num;
148 static struct ptlrpcd	**ptlrpcds;
149 
150 /*
151  * In addition to the regular thread pool above, there is a single
152  * global recovery thread. Recovery isn't critical for performance,
153  * and doesn't block, but must always be able to proceed, and it is
154  * possible that all normal ptlrpcd threads are blocked. Hence the
155  * need for a dedicated thread.
156  */
157 static struct ptlrpcd_ctl ptlrpcd_rcv;
158 
159 struct mutex ptlrpcd_mutex;
160 static int ptlrpcd_users;
161 
ptlrpcd_wake(struct ptlrpc_request * req)162 void ptlrpcd_wake(struct ptlrpc_request *req)
163 {
164 	struct ptlrpc_request_set *rq_set = req->rq_set;
165 
166 	LASSERT(rq_set != NULL);
167 
168 	wake_up(&rq_set->set_waitq);
169 }
170 EXPORT_SYMBOL(ptlrpcd_wake);
171 
172 static struct ptlrpcd_ctl *
ptlrpcd_select_pc(struct ptlrpc_request * req)173 ptlrpcd_select_pc(struct ptlrpc_request *req)
174 {
175 	struct ptlrpcd	*pd;
176 	int		cpt;
177 	int		idx;
178 
179 	if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
180 		return &ptlrpcd_rcv;
181 
182 	cpt = cfs_cpt_current(cfs_cpt_table, 1);
183 	if (!ptlrpcds_cpt_idx)
184 		idx = cpt;
185 	else
186 		idx = ptlrpcds_cpt_idx[cpt];
187 	pd = ptlrpcds[idx];
188 
189 		/* We do not care whether it is strict load balance. */
190 	idx = pd->pd_cursor;
191 	if (++idx == pd->pd_nthreads)
192 		idx = 0;
193 	pd->pd_cursor = idx;
194 
195 	return &pd->pd_threads[idx];
196 }
197 
198 /**
199  * Return transferred RPCs count.
200  */
ptlrpcd_steal_rqset(struct ptlrpc_request_set * des,struct ptlrpc_request_set * src)201 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
202 			       struct ptlrpc_request_set *src)
203 {
204 	struct list_head *tmp, *pos;
205 	struct ptlrpc_request *req;
206 	int rc = 0;
207 
208 	spin_lock(&src->set_new_req_lock);
209 	if (likely(!list_empty(&src->set_new_requests))) {
210 		list_for_each_safe(pos, tmp, &src->set_new_requests) {
211 			req = list_entry(pos, struct ptlrpc_request,
212 					     rq_set_chain);
213 			req->rq_set = des;
214 		}
215 		list_splice_init(&src->set_new_requests,
216 				     &des->set_requests);
217 		rc = atomic_read(&src->set_new_count);
218 		atomic_add(rc, &des->set_remaining);
219 		atomic_set(&src->set_new_count, 0);
220 	}
221 	spin_unlock(&src->set_new_req_lock);
222 	return rc;
223 }
224 
225 /**
226  * Requests that are added to the ptlrpcd queue are sent via
227  * ptlrpcd_check->ptlrpc_check_set().
228  */
ptlrpcd_add_req(struct ptlrpc_request * req)229 void ptlrpcd_add_req(struct ptlrpc_request *req)
230 {
231 	struct ptlrpcd_ctl *pc;
232 
233 	if (req->rq_reqmsg)
234 		lustre_msg_set_jobid(req->rq_reqmsg, NULL);
235 
236 	spin_lock(&req->rq_lock);
237 	if (req->rq_invalid_rqset) {
238 		struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
239 						     back_to_sleep, NULL);
240 
241 		req->rq_invalid_rqset = 0;
242 		spin_unlock(&req->rq_lock);
243 		l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
244 	} else if (req->rq_set) {
245 		/* If we have a valid "rq_set", just reuse it to avoid double
246 		 * linked. */
247 		LASSERT(req->rq_phase == RQ_PHASE_NEW);
248 		LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
249 
250 		/* ptlrpc_check_set will decrease the count */
251 		atomic_inc(&req->rq_set->set_remaining);
252 		spin_unlock(&req->rq_lock);
253 		wake_up(&req->rq_set->set_waitq);
254 		return;
255 	} else {
256 		spin_unlock(&req->rq_lock);
257 	}
258 
259 	pc = ptlrpcd_select_pc(req);
260 
261 	DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
262 		  req, pc->pc_name, pc->pc_index);
263 
264 	ptlrpc_set_add_new_req(pc, req);
265 }
266 EXPORT_SYMBOL(ptlrpcd_add_req);
267 
ptlrpc_reqset_get(struct ptlrpc_request_set * set)268 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
269 {
270 	atomic_inc(&set->set_refcount);
271 }
272 
273 /**
274  * Check if there is more work to do on ptlrpcd set.
275  * Returns 1 if yes.
276  */
ptlrpcd_check(struct lu_env * env,struct ptlrpcd_ctl * pc)277 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
278 {
279 	struct list_head *tmp, *pos;
280 	struct ptlrpc_request *req;
281 	struct ptlrpc_request_set *set = pc->pc_set;
282 	int rc = 0;
283 	int rc2;
284 
285 	if (atomic_read(&set->set_new_count)) {
286 		spin_lock(&set->set_new_req_lock);
287 		if (likely(!list_empty(&set->set_new_requests))) {
288 			list_splice_init(&set->set_new_requests,
289 					     &set->set_requests);
290 			atomic_add(atomic_read(&set->set_new_count),
291 				       &set->set_remaining);
292 			atomic_set(&set->set_new_count, 0);
293 			/*
294 			 * Need to calculate its timeout.
295 			 */
296 			rc = 1;
297 		}
298 		spin_unlock(&set->set_new_req_lock);
299 	}
300 
301 	/* We should call lu_env_refill() before handling new requests to make
302 	 * sure that env key the requests depending on really exists.
303 	 */
304 	rc2 = lu_env_refill(env);
305 	if (rc2 != 0) {
306 		/*
307 		 * XXX This is very awkward situation, because
308 		 * execution can neither continue (request
309 		 * interpreters assume that env is set up), nor repeat
310 		 * the loop (as this potentially results in a tight
311 		 * loop of -ENOMEM's).
312 		 *
313 		 * Fortunately, refill only ever does something when
314 		 * new modules are loaded, i.e., early during boot up.
315 		 */
316 		CERROR("Failure to refill session: %d\n", rc2);
317 		return rc;
318 	}
319 
320 	if (atomic_read(&set->set_remaining))
321 		rc |= ptlrpc_check_set(env, set);
322 
323 	/* NB: ptlrpc_check_set has already moved completed request at the
324 	 * head of seq::set_requests */
325 	list_for_each_safe(pos, tmp, &set->set_requests) {
326 		req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
327 		if (req->rq_phase != RQ_PHASE_COMPLETE)
328 			break;
329 
330 		list_del_init(&req->rq_set_chain);
331 		req->rq_set = NULL;
332 		ptlrpc_req_finished(req);
333 	}
334 
335 	if (rc == 0) {
336 		/*
337 		 * If new requests have been added, make sure to wake up.
338 		 */
339 		rc = atomic_read(&set->set_new_count);
340 
341 		/* If we have nothing to do, check whether we can take some
342 		 * work from our partner threads. */
343 		if (rc == 0 && pc->pc_npartners > 0) {
344 			struct ptlrpcd_ctl *partner;
345 			struct ptlrpc_request_set *ps;
346 			int first = pc->pc_cursor;
347 
348 			do {
349 				partner = pc->pc_partners[pc->pc_cursor++];
350 				if (pc->pc_cursor >= pc->pc_npartners)
351 					pc->pc_cursor = 0;
352 				if (partner == NULL)
353 					continue;
354 
355 				spin_lock(&partner->pc_lock);
356 				ps = partner->pc_set;
357 				if (ps == NULL) {
358 					spin_unlock(&partner->pc_lock);
359 					continue;
360 				}
361 
362 				ptlrpc_reqset_get(ps);
363 				spin_unlock(&partner->pc_lock);
364 
365 				if (atomic_read(&ps->set_new_count)) {
366 					rc = ptlrpcd_steal_rqset(set, ps);
367 					if (rc > 0)
368 						CDEBUG(D_RPCTRACE, "transfer %d async RPCs [%d->%d]\n",
369 						       rc, partner->pc_index,
370 						       pc->pc_index);
371 				}
372 				ptlrpc_reqset_put(ps);
373 			} while (rc == 0 && pc->pc_cursor != first);
374 		}
375 	}
376 
377 	return rc;
378 }
379 
380 /**
381  * Main ptlrpcd thread.
382  * ptlrpc's code paths like to execute in process context, so we have this
383  * thread which spins on a set which contains the rpcs and sends them.
384  *
385  */
ptlrpcd(void * arg)386 static int ptlrpcd(void *arg)
387 {
388 	struct ptlrpcd_ctl *pc = arg;
389 	struct ptlrpc_request_set *set;
390 	struct lu_env env = { .le_ses = NULL };
391 	int rc = 0;
392 	int exit = 0;
393 
394 	unshare_fs_struct();
395 	if (cfs_cpt_bind(cfs_cpt_table, pc->pc_cpt) != 0)
396 		CWARN("Failed to bind %s on CPT %d\n", pc->pc_name, pc->pc_cpt);
397 
398 	/*
399 	 * Allocate the request set after the thread has been bound
400 	 * above. This is safe because no requests will be queued
401 	 * until all ptlrpcd threads have confirmed that they have
402 	 * successfully started.
403 	 */
404 	set = ptlrpc_prep_set();
405 	if (!set) {
406 		rc = -ENOMEM;
407 		goto failed;
408 	}
409 	spin_lock(&pc->pc_lock);
410 	pc->pc_set = set;
411 	spin_unlock(&pc->pc_lock);
412 	/*
413 	 * XXX So far only "client" ptlrpcd uses an environment. In
414 	 * the future, ptlrpcd thread (or a thread-set) has to given
415 	 * an argument, describing its "scope".
416 	 */
417 	rc = lu_context_init(&env.le_ctx,
418 			     LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
419 	if (rc != 0)
420 		goto failed;
421 
422 	complete(&pc->pc_starting);
423 
424 	/*
425 	 * This mainloop strongly resembles ptlrpc_set_wait() except that our
426 	 * set never completes.  ptlrpcd_check() calls ptlrpc_check_set() when
427 	 * there are requests in the set. New requests come in on the set's
428 	 * new_req_list and ptlrpcd_check() moves them into the set.
429 	 */
430 	do {
431 		struct l_wait_info lwi;
432 		int timeout;
433 
434 		timeout = ptlrpc_set_next_timeout(set);
435 		lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
436 				  ptlrpc_expired_set, set);
437 
438 		lu_context_enter(&env.le_ctx);
439 		l_wait_event(set->set_waitq,
440 			     ptlrpcd_check(&env, pc), &lwi);
441 		lu_context_exit(&env.le_ctx);
442 
443 		/*
444 		 * Abort inflight rpcs for forced stop case.
445 		 */
446 		if (test_bit(LIOD_STOP, &pc->pc_flags)) {
447 			if (test_bit(LIOD_FORCE, &pc->pc_flags))
448 				ptlrpc_abort_set(set);
449 			exit++;
450 		}
451 
452 		/*
453 		 * Let's make one more loop to make sure that ptlrpcd_check()
454 		 * copied all raced new rpcs into the set so we can kill them.
455 		 */
456 	} while (exit < 2);
457 
458 	/*
459 	 * Wait for inflight requests to drain.
460 	 */
461 	if (!list_empty(&set->set_requests))
462 		ptlrpc_set_wait(set);
463 	lu_context_fini(&env.le_ctx);
464 
465 	complete(&pc->pc_finishing);
466 
467 	return 0;
468 failed:
469 	pc->pc_error = rc;
470 	complete(&pc->pc_starting);
471 	return rc;
472 }
473 
ptlrpcd_ctl_init(struct ptlrpcd_ctl * pc,int index,int cpt)474 static void ptlrpcd_ctl_init(struct ptlrpcd_ctl *pc, int index, int cpt)
475 {
476 	pc->pc_index = index;
477 	pc->pc_cpt = cpt;
478 	init_completion(&pc->pc_starting);
479 	init_completion(&pc->pc_finishing);
480 	spin_lock_init(&pc->pc_lock);
481 
482 	if (index < 0) {
483 		/* Recovery thread. */
484 		snprintf(pc->pc_name, sizeof(pc->pc_name), "ptlrpcd_rcv");
485 	} else {
486 		/* Regular thread. */
487 		snprintf(pc->pc_name, sizeof(pc->pc_name),
488 			 "ptlrpcd_%02d_%02d", cpt, index);
489 	}
490 }
491 
492 /* XXX: We want multiple CPU cores to share the async RPC load. So we
493  *	start many ptlrpcd threads. We also want to reduce the ptlrpcd
494  *	overhead caused by data transfer cross-CPU cores. So we bind
495  *	all ptlrpcd threads to a CPT, in the expectation that CPTs
496  *	will be defined in a way that matches these boundaries. Within
497  *	a CPT a ptlrpcd thread can be scheduled on any available core.
498  *
499  *	Each ptlrpcd thread has its own request queue. This can cause
500  *	response delay if the thread is already busy. To help with
501  *	this we define partner threads: these are other threads bound
502  *	to the same CPT which will check for work in each other's
503  *	request queues if they have no work to do.
504  *
505  *	The desired number of partner threads can be tuned by setting
506  *	ptlrpcd_partner_group_size. The default is to create pairs of
507  *	partner threads.
508  */
ptlrpcd_partners(struct ptlrpcd * pd,int index)509 static int ptlrpcd_partners(struct ptlrpcd *pd, int index)
510 {
511 	struct ptlrpcd_ctl *pc;
512 	struct ptlrpcd_ctl **ppc;
513 	int first;
514 	int i;
515 	int rc = 0;
516 	int size;
517 
518 	LASSERT(index >= 0 && index < pd->pd_nthreads);
519 	pc = &pd->pd_threads[index];
520 	pc->pc_npartners = pd->pd_groupsize - 1;
521 
522 	if (pc->pc_npartners <= 0)
523 		goto out;
524 
525 	size = sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners;
526 	pc->pc_partners = kzalloc_node(size, GFP_NOFS,
527 				       cfs_cpt_spread_node(cfs_cpt_table,
528 							   pc->pc_cpt));
529 	if (!pc->pc_partners) {
530 		pc->pc_npartners = 0;
531 		rc = -ENOMEM;
532 		goto out;
533 	}
534 
535 	first = index - index % pd->pd_groupsize;
536 	ppc = pc->pc_partners;
537 	for (i = first; i < first + pd->pd_groupsize; i++) {
538 		if (i != index)
539 			*ppc++ = &pd->pd_threads[i];
540 	}
541 out:
542 	return rc;
543 }
544 
ptlrpcd_start(struct ptlrpcd_ctl * pc)545 int ptlrpcd_start(struct ptlrpcd_ctl *pc)
546 {
547 	struct task_struct *task;
548 	int rc = 0;
549 
550 	/*
551 	 * Do not allow start second thread for one pc.
552 	 */
553 	if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
554 		CWARN("Starting second thread (%s) for same pc %p\n",
555 		      pc->pc_name, pc);
556 		return 0;
557 	}
558 
559 	/*
560 	 * So far only "client" ptlrpcd uses an environment. In the future,
561 	 * ptlrpcd thread (or a thread-set) has to be given an argument,
562 	 * describing its "scope".
563 	 */
564 	rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
565 	if (rc != 0)
566 		goto out;
567 
568 	task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
569 	if (IS_ERR(task)) {
570 		rc = PTR_ERR(task);
571 		goto out_set;
572 	}
573 
574 	wait_for_completion(&pc->pc_starting);
575 	rc = pc->pc_error;
576 	if (rc != 0)
577 		goto out_set;
578 
579 	return 0;
580 
581 out_set:
582 	if (pc->pc_set != NULL) {
583 		struct ptlrpc_request_set *set = pc->pc_set;
584 
585 		spin_lock(&pc->pc_lock);
586 		pc->pc_set = NULL;
587 		spin_unlock(&pc->pc_lock);
588 		ptlrpc_set_destroy(set);
589 	}
590 	lu_context_fini(&pc->pc_env.le_ctx);
591 
592 out:
593 	clear_bit(LIOD_START, &pc->pc_flags);
594 	return rc;
595 }
596 
ptlrpcd_stop(struct ptlrpcd_ctl * pc,int force)597 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
598 {
599 	if (!test_bit(LIOD_START, &pc->pc_flags)) {
600 		CWARN("Thread for pc %p was not started\n", pc);
601 		return;
602 	}
603 
604 	set_bit(LIOD_STOP, &pc->pc_flags);
605 	if (force)
606 		set_bit(LIOD_FORCE, &pc->pc_flags);
607 	wake_up(&pc->pc_set->set_waitq);
608 }
609 
ptlrpcd_free(struct ptlrpcd_ctl * pc)610 void ptlrpcd_free(struct ptlrpcd_ctl *pc)
611 {
612 	struct ptlrpc_request_set *set = pc->pc_set;
613 
614 	if (!test_bit(LIOD_START, &pc->pc_flags)) {
615 		CWARN("Thread for pc %p was not started\n", pc);
616 		goto out;
617 	}
618 
619 	wait_for_completion(&pc->pc_finishing);
620 	lu_context_fini(&pc->pc_env.le_ctx);
621 
622 	spin_lock(&pc->pc_lock);
623 	pc->pc_set = NULL;
624 	spin_unlock(&pc->pc_lock);
625 	ptlrpc_set_destroy(set);
626 
627 	clear_bit(LIOD_START, &pc->pc_flags);
628 	clear_bit(LIOD_STOP, &pc->pc_flags);
629 	clear_bit(LIOD_FORCE, &pc->pc_flags);
630 
631 out:
632 	if (pc->pc_npartners > 0) {
633 		LASSERT(pc->pc_partners != NULL);
634 
635 		kfree(pc->pc_partners);
636 		pc->pc_partners = NULL;
637 	}
638 	pc->pc_npartners = 0;
639 	pc->pc_error = 0;
640 }
641 
ptlrpcd_fini(void)642 static void ptlrpcd_fini(void)
643 {
644 	int i;
645 	int j;
646 
647 	if (ptlrpcds != NULL) {
648 		for (i = 0; i < ptlrpcds_num; i++) {
649 			if (!ptlrpcds[i])
650 				break;
651 			for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++)
652 				ptlrpcd_stop(&ptlrpcds[i]->pd_threads[j], 0);
653 			for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++)
654 				ptlrpcd_free(&ptlrpcds[i]->pd_threads[j]);
655 			kfree(ptlrpcds[i]);
656 			ptlrpcds[i] = NULL;
657 		}
658 		kfree(ptlrpcds);
659 	}
660 	ptlrpcds_num = 0;
661 
662 	ptlrpcd_stop(&ptlrpcd_rcv, 0);
663 	ptlrpcd_free(&ptlrpcd_rcv);
664 
665 	kfree(ptlrpcds_cpt_idx);
666 	ptlrpcds_cpt_idx = NULL;
667 }
668 
ptlrpcd_init(void)669 static int ptlrpcd_init(void)
670 {
671 	int nthreads;
672 	int groupsize;
673 	int size;
674 	int i;
675 	int j;
676 	int rc = 0;
677 	struct cfs_cpt_table *cptable;
678 	__u32 *cpts = NULL;
679 	int ncpts;
680 	int cpt;
681 	struct ptlrpcd *pd;
682 
683 	/*
684 	 * Determine the CPTs that ptlrpcd threads will run on.
685 	 */
686 	cptable = cfs_cpt_table;
687 	ncpts = cfs_cpt_number(cptable);
688 	if (ptlrpcd_cpts) {
689 		struct cfs_expr_list *el;
690 
691 		size = ncpts * sizeof(ptlrpcds_cpt_idx[0]);
692 		ptlrpcds_cpt_idx = kzalloc(size, GFP_KERNEL);
693 		if (!ptlrpcds_cpt_idx) {
694 			rc = -ENOMEM;
695 			goto out;
696 		}
697 
698 		rc = cfs_expr_list_parse(ptlrpcd_cpts,
699 					 strlen(ptlrpcd_cpts),
700 					 0, ncpts - 1, &el);
701 
702 		if (rc != 0) {
703 			CERROR("ptlrpcd_cpts: invalid CPT pattern string: %s",
704 			       ptlrpcd_cpts);
705 			rc = -EINVAL;
706 			goto out;
707 		}
708 
709 		rc = cfs_expr_list_values(el, ncpts, &cpts);
710 		cfs_expr_list_free(el);
711 		if (rc <= 0) {
712 			CERROR("ptlrpcd_cpts: failed to parse CPT array %s: %d\n",
713 			       ptlrpcd_cpts, rc);
714 			if (rc == 0)
715 				rc = -EINVAL;
716 			goto out;
717 		}
718 
719 		/*
720 		 * Create the cpt-to-index map. When there is no match
721 		 * in the cpt table, pick a cpt at random. This could
722 		 * be changed to take the topology of the system into
723 		 * account.
724 		 */
725 		for (cpt = 0; cpt < ncpts; cpt++) {
726 			for (i = 0; i < rc; i++)
727 				if (cpts[i] == cpt)
728 					break;
729 			if (i >= rc)
730 				i = cpt % rc;
731 			ptlrpcds_cpt_idx[cpt] = i;
732 		}
733 
734 		cfs_expr_list_values_free(cpts, rc);
735 		ncpts = rc;
736 	}
737 	ptlrpcds_num = ncpts;
738 
739 	size = ncpts * sizeof(ptlrpcds[0]);
740 	ptlrpcds = kzalloc(size, GFP_KERNEL);
741 	if (!ptlrpcds) {
742 		rc = -ENOMEM;
743 		goto out;
744 	}
745 
746 	/*
747 	 * The max_ptlrpcds parameter is obsolete, but do something
748 	 * sane if it has been tuned, and complain if
749 	 * ptlrpcd_per_cpt_max has also been tuned.
750 	 */
751 	if (max_ptlrpcds != 0) {
752 		CWARN("max_ptlrpcds is obsolete.\n");
753 		if (ptlrpcd_per_cpt_max == 0) {
754 			ptlrpcd_per_cpt_max = max_ptlrpcds / ncpts;
755 			/* Round up if there is a remainder. */
756 			if (max_ptlrpcds % ncpts != 0)
757 				ptlrpcd_per_cpt_max++;
758 			CWARN("Setting ptlrpcd_per_cpt_max = %d\n",
759 			      ptlrpcd_per_cpt_max);
760 		} else {
761 			CWARN("ptlrpd_per_cpt_max is also set!\n");
762 		}
763 	}
764 
765 	/*
766 	 * The ptlrpcd_bind_policy parameter is obsolete, but do
767 	 * something sane if it has been tuned, and complain if
768 	 * ptlrpcd_partner_group_size is also tuned.
769 	 */
770 	if (ptlrpcd_bind_policy != 0) {
771 		CWARN("ptlrpcd_bind_policy is obsolete.\n");
772 		if (ptlrpcd_partner_group_size == 0) {
773 			switch (ptlrpcd_bind_policy) {
774 			case 1: /* PDB_POLICY_NONE */
775 			case 2: /* PDB_POLICY_FULL */
776 				ptlrpcd_partner_group_size = 1;
777 				break;
778 			case 3: /* PDB_POLICY_PAIR */
779 				ptlrpcd_partner_group_size = 2;
780 				break;
781 			case 4: /* PDB_POLICY_NEIGHBOR */
782 #ifdef CONFIG_NUMA
783 				ptlrpcd_partner_group_size = -1; /* CPT */
784 #else
785 				ptlrpcd_partner_group_size = 3; /* Triplets */
786 #endif
787 				break;
788 			default: /* Illegal value, use the default. */
789 				ptlrpcd_partner_group_size = 2;
790 				break;
791 			}
792 			CWARN("Setting ptlrpcd_partner_group_size = %d\n",
793 			      ptlrpcd_partner_group_size);
794 		} else {
795 			CWARN("ptlrpcd_partner_group_size is also set!\n");
796 		}
797 	}
798 
799 	if (ptlrpcd_partner_group_size == 0)
800 		ptlrpcd_partner_group_size = 2;
801 	else if (ptlrpcd_partner_group_size < 0)
802 		ptlrpcd_partner_group_size = -1;
803 	else if (ptlrpcd_per_cpt_max > 0 &&
804 		 ptlrpcd_partner_group_size > ptlrpcd_per_cpt_max)
805 		ptlrpcd_partner_group_size = ptlrpcd_per_cpt_max;
806 
807 	/*
808 	 * Start the recovery thread first.
809 	 */
810 	set_bit(LIOD_RECOVERY, &ptlrpcd_rcv.pc_flags);
811 	ptlrpcd_ctl_init(&ptlrpcd_rcv, -1, CFS_CPT_ANY);
812 	rc = ptlrpcd_start(&ptlrpcd_rcv);
813 	if (rc < 0)
814 		goto out;
815 
816 	for (i = 0; i < ncpts; i++) {
817 		if (!cpts)
818 			cpt = i;
819 		else
820 			cpt = cpts[i];
821 
822 		nthreads = cfs_cpt_weight(cptable, cpt);
823 		if (ptlrpcd_per_cpt_max > 0 && ptlrpcd_per_cpt_max < nthreads)
824 			nthreads = ptlrpcd_per_cpt_max;
825 		if (nthreads < 2)
826 			nthreads = 2;
827 
828 		if (ptlrpcd_partner_group_size <= 0) {
829 			groupsize = nthreads;
830 		} else if (nthreads <= ptlrpcd_partner_group_size) {
831 			groupsize = nthreads;
832 		} else {
833 			groupsize = ptlrpcd_partner_group_size;
834 			if (nthreads % groupsize != 0)
835 				nthreads += groupsize - (nthreads % groupsize);
836 		}
837 
838 		size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
839 		pd = kzalloc_node(size, GFP_NOFS,
840 				  cfs_cpt_spread_node(cfs_cpt_table, cpt));
841 		if (!pd) {
842 			rc = -ENOMEM;
843 			goto out;
844 		}
845 		pd->pd_size = size;
846 		pd->pd_index = i;
847 		pd->pd_cpt = cpt;
848 		pd->pd_cursor = 0;
849 		pd->pd_nthreads = nthreads;
850 		pd->pd_groupsize = groupsize;
851 		ptlrpcds[i] = pd;
852 
853 		/*
854 		 * The ptlrpcd threads in a partner group can access
855 		 * each other's struct ptlrpcd_ctl, so these must be
856 		 * initialized before any thread is started.
857 		 */
858 		for (j = 0; j < nthreads; j++) {
859 			ptlrpcd_ctl_init(&pd->pd_threads[j], j, cpt);
860 			rc = ptlrpcd_partners(pd, j);
861 			if (rc < 0)
862 				goto out;
863 		}
864 
865 		/* XXX: We start nthreads ptlrpc daemons.
866 		 *	Each of them can process any non-recovery
867 		 *	async RPC to improve overall async RPC
868 		 *	efficiency.
869 		 *
870 		 *	But there are some issues with async I/O RPCs
871 		 *	and async non-I/O RPCs processed in the same
872 		 *	set under some cases. The ptlrpcd may be
873 		 *	blocked by some async I/O RPC(s), then will
874 		 *	cause other async non-I/O RPC(s) can not be
875 		 *	processed in time.
876 		 *
877 		 *	Maybe we should distinguish blocked async RPCs
878 		 *	from non-blocked async RPCs, and process them
879 		 *	in different ptlrpcd sets to avoid unnecessary
880 		 *	dependency. But how to distribute async RPCs
881 		 *	load among all the ptlrpc daemons becomes
882 		 *	another trouble.
883 		 */
884 		for (j = 0; j < nthreads; j++) {
885 			rc = ptlrpcd_start(&pd->pd_threads[j]);
886 			if (rc < 0)
887 				goto out;
888 		}
889 	}
890 out:
891 	if (rc != 0)
892 		ptlrpcd_fini();
893 
894 	return rc;
895 }
896 
ptlrpcd_addref(void)897 int ptlrpcd_addref(void)
898 {
899 	int rc = 0;
900 
901 	mutex_lock(&ptlrpcd_mutex);
902 	if (++ptlrpcd_users == 1)
903 		rc = ptlrpcd_init();
904 	mutex_unlock(&ptlrpcd_mutex);
905 	return rc;
906 }
907 EXPORT_SYMBOL(ptlrpcd_addref);
908 
ptlrpcd_decref(void)909 void ptlrpcd_decref(void)
910 {
911 	mutex_lock(&ptlrpcd_mutex);
912 	if (--ptlrpcd_users == 0)
913 		ptlrpcd_fini();
914 	mutex_unlock(&ptlrpcd_mutex);
915 }
916 EXPORT_SYMBOL(ptlrpcd_decref);
917 /** @} ptlrpcd */
918