1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_resource.c
37  *
38  * Author: Phil Schwan <phil@clusterfs.com>
39  * Author: Peter Braam <braam@clusterfs.com>
40  */
41 
42 #define DEBUG_SUBSYSTEM S_LDLM
43 #include "../include/lustre_dlm.h"
44 #include "../include/lustre_fid.h"
45 #include "../include/obd_class.h"
46 #include "ldlm_internal.h"
47 
48 struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
49 
50 int ldlm_srv_namespace_nr = 0;
51 int ldlm_cli_namespace_nr = 0;
52 
53 struct mutex ldlm_srv_namespace_lock;
54 LIST_HEAD(ldlm_srv_namespace_list);
55 
56 struct mutex ldlm_cli_namespace_lock;
57 /* Client Namespaces that have active resources in them.
58  * Once all resources go away, ldlm_poold moves such namespaces to the
59  * inactive list */
60 LIST_HEAD(ldlm_cli_active_namespace_list);
61 /* Client namespaces that don't have any locks in them */
62 LIST_HEAD(ldlm_cli_inactive_namespace_list);
63 
64 struct proc_dir_entry *ldlm_type_proc_dir = NULL;
65 static struct proc_dir_entry *ldlm_ns_proc_dir = NULL;
66 struct proc_dir_entry *ldlm_svc_proc_dir = NULL;
67 
68 extern unsigned int ldlm_cancel_unused_locks_before_replay;
69 
70 /* during debug dump certain amount of granted locks for one resource to avoid
71  * DDOS. */
72 unsigned int ldlm_dump_granted_max = 256;
73 
74 #if defined(CONFIG_PROC_FS)
lprocfs_wr_dump_ns(struct file * file,const char __user * buffer,size_t count,loff_t * off)75 static ssize_t lprocfs_wr_dump_ns(struct file *file, const char __user *buffer,
76 				  size_t count, loff_t *off)
77 {
78 	ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
79 	ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
80 	return count;
81 }
82 LPROC_SEQ_FOPS_WR_ONLY(ldlm, dump_ns);
83 
84 LPROC_SEQ_FOPS_RW_TYPE(ldlm_rw, uint);
85 LPROC_SEQ_FOPS_RO_TYPE(ldlm, uint);
86 
ldlm_proc_setup(void)87 int ldlm_proc_setup(void)
88 {
89 	int rc;
90 	struct lprocfs_vars list[] = {
91 		{ "dump_namespaces", &ldlm_dump_ns_fops, NULL, 0222 },
92 		{ "dump_granted_max", &ldlm_rw_uint_fops,
93 		  &ldlm_dump_granted_max },
94 		{ "cancel_unused_locks_before_replay", &ldlm_rw_uint_fops,
95 		  &ldlm_cancel_unused_locks_before_replay },
96 		{ NULL } };
97 	LASSERT(ldlm_ns_proc_dir == NULL);
98 
99 	ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
100 					      proc_lustre_root,
101 					      NULL, NULL);
102 	if (IS_ERR(ldlm_type_proc_dir)) {
103 		CERROR("LProcFS failed in ldlm-init\n");
104 		rc = PTR_ERR(ldlm_type_proc_dir);
105 		goto err;
106 	}
107 
108 	ldlm_ns_proc_dir = lprocfs_register("namespaces",
109 					    ldlm_type_proc_dir,
110 					    NULL, NULL);
111 	if (IS_ERR(ldlm_ns_proc_dir)) {
112 		CERROR("LProcFS failed in ldlm-init\n");
113 		rc = PTR_ERR(ldlm_ns_proc_dir);
114 		goto err_type;
115 	}
116 
117 	ldlm_svc_proc_dir = lprocfs_register("services",
118 					    ldlm_type_proc_dir,
119 					    NULL, NULL);
120 	if (IS_ERR(ldlm_svc_proc_dir)) {
121 		CERROR("LProcFS failed in ldlm-init\n");
122 		rc = PTR_ERR(ldlm_svc_proc_dir);
123 		goto err_ns;
124 	}
125 
126 	rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
127 
128 	return 0;
129 
130 err_ns:
131 	lprocfs_remove(&ldlm_ns_proc_dir);
132 err_type:
133 	lprocfs_remove(&ldlm_type_proc_dir);
134 err:
135 	ldlm_svc_proc_dir = NULL;
136 	ldlm_type_proc_dir = NULL;
137 	ldlm_ns_proc_dir = NULL;
138 	return rc;
139 }
140 
ldlm_proc_cleanup(void)141 void ldlm_proc_cleanup(void)
142 {
143 	if (ldlm_svc_proc_dir)
144 		lprocfs_remove(&ldlm_svc_proc_dir);
145 
146 	if (ldlm_ns_proc_dir)
147 		lprocfs_remove(&ldlm_ns_proc_dir);
148 
149 	if (ldlm_type_proc_dir)
150 		lprocfs_remove(&ldlm_type_proc_dir);
151 
152 	ldlm_svc_proc_dir = NULL;
153 	ldlm_type_proc_dir = NULL;
154 	ldlm_ns_proc_dir = NULL;
155 }
156 
lprocfs_ns_resources_seq_show(struct seq_file * m,void * v)157 static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v)
158 {
159 	struct ldlm_namespace *ns  = m->private;
160 	__u64		  res = 0;
161 	struct cfs_hash_bd	  bd;
162 	int		    i;
163 
164 	/* result is not strictly consistent */
165 	cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
166 		res += cfs_hash_bd_count_get(&bd);
167 	return lprocfs_rd_u64(m, &res);
168 }
169 LPROC_SEQ_FOPS_RO(lprocfs_ns_resources);
170 
lprocfs_ns_locks_seq_show(struct seq_file * m,void * v)171 static int lprocfs_ns_locks_seq_show(struct seq_file *m, void *v)
172 {
173 	struct ldlm_namespace *ns = m->private;
174 	__u64		  locks;
175 
176 	locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
177 					LPROCFS_FIELDS_FLAGS_SUM);
178 	return lprocfs_rd_u64(m, &locks);
179 }
180 LPROC_SEQ_FOPS_RO(lprocfs_ns_locks);
181 
lprocfs_lru_size_seq_show(struct seq_file * m,void * v)182 static int lprocfs_lru_size_seq_show(struct seq_file *m, void *v)
183 {
184 	struct ldlm_namespace *ns = m->private;
185 	__u32 *nr = &ns->ns_max_unused;
186 
187 	if (ns_connect_lru_resize(ns))
188 		nr = &ns->ns_nr_unused;
189 	return lprocfs_rd_uint(m, nr);
190 }
191 
lprocfs_lru_size_seq_write(struct file * file,const char __user * buffer,size_t count,loff_t * off)192 static ssize_t lprocfs_lru_size_seq_write(struct file *file,
193 					const char __user *buffer,
194 					size_t count, loff_t *off)
195 {
196 	struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
197 	char dummy[MAX_STRING_SIZE + 1];
198 	unsigned long tmp;
199 	int lru_resize;
200 	int err;
201 
202 	dummy[MAX_STRING_SIZE] = '\0';
203 	if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
204 		return -EFAULT;
205 
206 	if (strncmp(dummy, "clear", 5) == 0) {
207 		CDEBUG(D_DLMTRACE,
208 		       "dropping all unused locks from namespace %s\n",
209 		       ldlm_ns_name(ns));
210 		if (ns_connect_lru_resize(ns)) {
211 			int canceled, unused  = ns->ns_nr_unused;
212 
213 			/* Try to cancel all @ns_nr_unused locks. */
214 			canceled = ldlm_cancel_lru(ns, unused, 0,
215 						   LDLM_CANCEL_PASSED);
216 			if (canceled < unused) {
217 				CDEBUG(D_DLMTRACE,
218 				       "not all requested locks are canceled, requested: %d, canceled: %d\n",
219 				       unused,
220 				       canceled);
221 				return -EINVAL;
222 			}
223 		} else {
224 			tmp = ns->ns_max_unused;
225 			ns->ns_max_unused = 0;
226 			ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED);
227 			ns->ns_max_unused = tmp;
228 		}
229 		return count;
230 	}
231 
232 	err = kstrtoul(dummy, 10, &tmp);
233 	if (err != 0) {
234 		CERROR("invalid value written\n");
235 		return -EINVAL;
236 	}
237 	lru_resize = (tmp == 0);
238 
239 	if (ns_connect_lru_resize(ns)) {
240 		if (!lru_resize)
241 			ns->ns_max_unused = (unsigned int)tmp;
242 
243 		if (tmp > ns->ns_nr_unused)
244 			tmp = ns->ns_nr_unused;
245 		tmp = ns->ns_nr_unused - tmp;
246 
247 		CDEBUG(D_DLMTRACE,
248 		       "changing namespace %s unused locks from %u to %u\n",
249 		       ldlm_ns_name(ns), ns->ns_nr_unused,
250 		       (unsigned int)tmp);
251 		ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED);
252 
253 		if (!lru_resize) {
254 			CDEBUG(D_DLMTRACE,
255 			       "disable lru_resize for namespace %s\n",
256 			       ldlm_ns_name(ns));
257 			ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
258 		}
259 	} else {
260 		CDEBUG(D_DLMTRACE,
261 		       "changing namespace %s max_unused from %u to %u\n",
262 		       ldlm_ns_name(ns), ns->ns_max_unused,
263 		       (unsigned int)tmp);
264 		ns->ns_max_unused = (unsigned int)tmp;
265 		ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
266 
267 		/* Make sure that LRU resize was originally supported before
268 		 * turning it on here. */
269 		if (lru_resize &&
270 		    (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
271 			CDEBUG(D_DLMTRACE,
272 			       "enable lru_resize for namespace %s\n",
273 			       ldlm_ns_name(ns));
274 			ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
275 		}
276 	}
277 
278 	return count;
279 }
280 LPROC_SEQ_FOPS(lprocfs_lru_size);
281 
lprocfs_elc_seq_show(struct seq_file * m,void * v)282 static int lprocfs_elc_seq_show(struct seq_file *m, void *v)
283 {
284 	struct ldlm_namespace *ns = m->private;
285 	unsigned int supp = ns_connect_cancelset(ns);
286 
287 	return lprocfs_rd_uint(m, &supp);
288 }
289 
lprocfs_elc_seq_write(struct file * file,const char __user * buffer,size_t count,loff_t * off)290 static ssize_t lprocfs_elc_seq_write(struct file *file,
291 				const char __user *buffer,
292 				size_t count, loff_t *off)
293 {
294 	struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
295 	unsigned int supp = -1;
296 	int rc;
297 
298 	rc = lprocfs_wr_uint(file, buffer, count, &supp);
299 	if (rc < 0)
300 		return rc;
301 
302 	if (supp == 0)
303 		ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
304 	else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
305 		ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
306 	return count;
307 }
308 LPROC_SEQ_FOPS(lprocfs_elc);
309 
ldlm_namespace_proc_unregister(struct ldlm_namespace * ns)310 void ldlm_namespace_proc_unregister(struct ldlm_namespace *ns)
311 {
312 	if (ns->ns_proc_dir_entry == NULL)
313 		CERROR("dlm namespace %s has no procfs dir?\n",
314 		       ldlm_ns_name(ns));
315 	else
316 		lprocfs_remove(&ns->ns_proc_dir_entry);
317 
318 	if (ns->ns_stats != NULL)
319 		lprocfs_free_stats(&ns->ns_stats);
320 }
321 
322 #define LDLM_NS_ADD_VAR(name, var, ops)				\
323 	do {							\
324 		snprintf(lock_name, MAX_STRING_SIZE, name);	\
325 		lock_vars[0].data = var;			\
326 		lock_vars[0].fops = ops;			\
327 		lprocfs_add_vars(ns_pde, lock_vars, NULL);	\
328 	} while (0)
329 
ldlm_namespace_proc_register(struct ldlm_namespace * ns)330 int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
331 {
332 	struct lprocfs_vars lock_vars[2];
333 	char lock_name[MAX_STRING_SIZE + 1];
334 	struct proc_dir_entry *ns_pde;
335 
336 	LASSERT(ns != NULL);
337 	LASSERT(ns->ns_rs_hash != NULL);
338 
339 	if (ns->ns_proc_dir_entry != NULL) {
340 		ns_pde = ns->ns_proc_dir_entry;
341 	} else {
342 		ns_pde = proc_mkdir(ldlm_ns_name(ns), ldlm_ns_proc_dir);
343 		if (ns_pde == NULL)
344 			return -ENOMEM;
345 		ns->ns_proc_dir_entry = ns_pde;
346 	}
347 
348 	ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
349 	if (ns->ns_stats == NULL)
350 		return -ENOMEM;
351 
352 	lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
353 			     LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
354 
355 	lock_name[MAX_STRING_SIZE] = '\0';
356 
357 	memset(lock_vars, 0, sizeof(lock_vars));
358 	lock_vars[0].name = lock_name;
359 
360 	LDLM_NS_ADD_VAR("resource_count", ns, &lprocfs_ns_resources_fops);
361 	LDLM_NS_ADD_VAR("lock_count", ns, &lprocfs_ns_locks_fops);
362 
363 	if (ns_is_client(ns)) {
364 		LDLM_NS_ADD_VAR("lock_unused_count", &ns->ns_nr_unused,
365 				&ldlm_uint_fops);
366 		LDLM_NS_ADD_VAR("lru_size", ns, &lprocfs_lru_size_fops);
367 		LDLM_NS_ADD_VAR("lru_max_age", &ns->ns_max_age,
368 				&ldlm_rw_uint_fops);
369 		LDLM_NS_ADD_VAR("early_lock_cancel", ns, &lprocfs_elc_fops);
370 	} else {
371 		LDLM_NS_ADD_VAR("ctime_age_limit", &ns->ns_ctime_age_limit,
372 				&ldlm_rw_uint_fops);
373 		LDLM_NS_ADD_VAR("lock_timeouts", &ns->ns_timeouts,
374 				&ldlm_uint_fops);
375 		LDLM_NS_ADD_VAR("max_nolock_bytes", &ns->ns_max_nolock_size,
376 				&ldlm_rw_uint_fops);
377 		LDLM_NS_ADD_VAR("contention_seconds", &ns->ns_contention_time,
378 				&ldlm_rw_uint_fops);
379 		LDLM_NS_ADD_VAR("contended_locks", &ns->ns_contended_locks,
380 				&ldlm_rw_uint_fops);
381 		LDLM_NS_ADD_VAR("max_parallel_ast", &ns->ns_max_parallel_ast,
382 				&ldlm_rw_uint_fops);
383 	}
384 	return 0;
385 }
386 #undef MAX_STRING_SIZE
387 #else /* CONFIG_PROC_FS */
388 
389 #define ldlm_namespace_proc_unregister(ns)      ({; })
390 #define ldlm_namespace_proc_register(ns)	({0; })
391 
392 #endif /* CONFIG_PROC_FS */
393 
ldlm_res_hop_hash(struct cfs_hash * hs,const void * key,unsigned mask)394 static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
395 				  const void *key, unsigned mask)
396 {
397 	const struct ldlm_res_id     *id  = key;
398 	unsigned		val = 0;
399 	unsigned		i;
400 
401 	for (i = 0; i < RES_NAME_SIZE; i++)
402 		val += id->name[i];
403 	return val & mask;
404 }
405 
ldlm_res_hop_fid_hash(struct cfs_hash * hs,const void * key,unsigned mask)406 static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
407 				      const void *key, unsigned mask)
408 {
409 	const struct ldlm_res_id *id = key;
410 	struct lu_fid       fid;
411 	__u32	       hash;
412 	__u32	       val;
413 
414 	fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
415 	fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
416 	fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
417 
418 	hash = fid_flatten32(&fid);
419 	hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
420 	if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
421 		val = id->name[LUSTRE_RES_ID_HSH_OFF];
422 		hash += (val >> 5) + (val << 11);
423 	} else {
424 		val = fid_oid(&fid);
425 	}
426 	hash = hash_long(hash, hs->hs_bkt_bits);
427 	/* give me another random factor */
428 	hash -= hash_long((unsigned long)hs, val % 11 + 3);
429 
430 	hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
431 	hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
432 
433 	return hash & mask;
434 }
435 
ldlm_res_hop_key(struct hlist_node * hnode)436 static void *ldlm_res_hop_key(struct hlist_node *hnode)
437 {
438 	struct ldlm_resource   *res;
439 
440 	res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
441 	return &res->lr_name;
442 }
443 
ldlm_res_hop_keycmp(const void * key,struct hlist_node * hnode)444 static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
445 {
446 	struct ldlm_resource   *res;
447 
448 	res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
449 	return ldlm_res_eq((const struct ldlm_res_id *)key,
450 			   (const struct ldlm_res_id *)&res->lr_name);
451 }
452 
ldlm_res_hop_object(struct hlist_node * hnode)453 static void *ldlm_res_hop_object(struct hlist_node *hnode)
454 {
455 	return hlist_entry(hnode, struct ldlm_resource, lr_hash);
456 }
457 
ldlm_res_hop_get_locked(struct cfs_hash * hs,struct hlist_node * hnode)458 static void ldlm_res_hop_get_locked(struct cfs_hash *hs,
459 				    struct hlist_node *hnode)
460 {
461 	struct ldlm_resource *res;
462 
463 	res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
464 	ldlm_resource_getref(res);
465 }
466 
ldlm_res_hop_put_locked(struct cfs_hash * hs,struct hlist_node * hnode)467 static void ldlm_res_hop_put_locked(struct cfs_hash *hs,
468 				    struct hlist_node *hnode)
469 {
470 	struct ldlm_resource *res;
471 
472 	res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
473 	/* cfs_hash_for_each_nolock is the only chance we call it */
474 	ldlm_resource_putref_locked(res);
475 }
476 
ldlm_res_hop_put(struct cfs_hash * hs,struct hlist_node * hnode)477 static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
478 {
479 	struct ldlm_resource *res;
480 
481 	res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
482 	ldlm_resource_putref(res);
483 }
484 
485 cfs_hash_ops_t ldlm_ns_hash_ops = {
486 	.hs_hash	= ldlm_res_hop_hash,
487 	.hs_key	 = ldlm_res_hop_key,
488 	.hs_keycmp      = ldlm_res_hop_keycmp,
489 	.hs_keycpy      = NULL,
490 	.hs_object      = ldlm_res_hop_object,
491 	.hs_get	 = ldlm_res_hop_get_locked,
492 	.hs_put_locked  = ldlm_res_hop_put_locked,
493 	.hs_put	 = ldlm_res_hop_put
494 };
495 
496 cfs_hash_ops_t ldlm_ns_fid_hash_ops = {
497 	.hs_hash	= ldlm_res_hop_fid_hash,
498 	.hs_key	 = ldlm_res_hop_key,
499 	.hs_keycmp      = ldlm_res_hop_keycmp,
500 	.hs_keycpy      = NULL,
501 	.hs_object      = ldlm_res_hop_object,
502 	.hs_get	 = ldlm_res_hop_get_locked,
503 	.hs_put_locked  = ldlm_res_hop_put_locked,
504 	.hs_put	 = ldlm_res_hop_put
505 };
506 
507 struct ldlm_ns_hash_def {
508 	ldlm_ns_type_t  nsd_type;
509 	/** hash bucket bits */
510 	unsigned	nsd_bkt_bits;
511 	/** hash bits */
512 	unsigned	nsd_all_bits;
513 	/** hash operations */
514 	cfs_hash_ops_t *nsd_hops;
515 };
516 
517 struct ldlm_ns_hash_def ldlm_ns_hash_defs[] = {
518 	{
519 		.nsd_type       = LDLM_NS_TYPE_MDC,
520 		.nsd_bkt_bits   = 11,
521 		.nsd_all_bits   = 16,
522 		.nsd_hops       = &ldlm_ns_fid_hash_ops,
523 	},
524 	{
525 		.nsd_type       = LDLM_NS_TYPE_MDT,
526 		.nsd_bkt_bits   = 14,
527 		.nsd_all_bits   = 21,
528 		.nsd_hops       = &ldlm_ns_fid_hash_ops,
529 	},
530 	{
531 		.nsd_type       = LDLM_NS_TYPE_OSC,
532 		.nsd_bkt_bits   = 8,
533 		.nsd_all_bits   = 12,
534 		.nsd_hops       = &ldlm_ns_hash_ops,
535 	},
536 	{
537 		.nsd_type       = LDLM_NS_TYPE_OST,
538 		.nsd_bkt_bits   = 11,
539 		.nsd_all_bits   = 17,
540 		.nsd_hops       = &ldlm_ns_hash_ops,
541 	},
542 	{
543 		.nsd_type       = LDLM_NS_TYPE_MGC,
544 		.nsd_bkt_bits   = 4,
545 		.nsd_all_bits   = 4,
546 		.nsd_hops       = &ldlm_ns_hash_ops,
547 	},
548 	{
549 		.nsd_type       = LDLM_NS_TYPE_MGT,
550 		.nsd_bkt_bits   = 4,
551 		.nsd_all_bits   = 4,
552 		.nsd_hops       = &ldlm_ns_hash_ops,
553 	},
554 	{
555 		.nsd_type       = LDLM_NS_TYPE_UNKNOWN,
556 	},
557 };
558 
559 /**
560  * Create and initialize new empty namespace.
561  */
ldlm_namespace_new(struct obd_device * obd,char * name,ldlm_side_t client,ldlm_appetite_t apt,ldlm_ns_type_t ns_type)562 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
563 					  ldlm_side_t client,
564 					  ldlm_appetite_t apt,
565 					  ldlm_ns_type_t ns_type)
566 {
567 	struct ldlm_namespace *ns = NULL;
568 	struct ldlm_ns_bucket *nsb;
569 	struct ldlm_ns_hash_def    *nsd;
570 	struct cfs_hash_bd	  bd;
571 	int		    idx;
572 	int		    rc;
573 
574 	LASSERT(obd != NULL);
575 
576 	rc = ldlm_get_ref();
577 	if (rc) {
578 		CERROR("ldlm_get_ref failed: %d\n", rc);
579 		return NULL;
580 	}
581 
582 	for (idx = 0;; idx++) {
583 		nsd = &ldlm_ns_hash_defs[idx];
584 		if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
585 			CERROR("Unknown type %d for ns %s\n", ns_type, name);
586 			goto out_ref;
587 		}
588 
589 		if (nsd->nsd_type == ns_type)
590 			break;
591 	}
592 
593 	OBD_ALLOC_PTR(ns);
594 	if (!ns)
595 		goto out_ref;
596 
597 	ns->ns_rs_hash = cfs_hash_create(name,
598 					 nsd->nsd_all_bits, nsd->nsd_all_bits,
599 					 nsd->nsd_bkt_bits, sizeof(*nsb),
600 					 CFS_HASH_MIN_THETA,
601 					 CFS_HASH_MAX_THETA,
602 					 nsd->nsd_hops,
603 					 CFS_HASH_DEPTH |
604 					 CFS_HASH_BIGNAME |
605 					 CFS_HASH_SPIN_BKTLOCK |
606 					 CFS_HASH_NO_ITEMREF);
607 	if (ns->ns_rs_hash == NULL)
608 		goto out_ns;
609 
610 	cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
611 		nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
612 		at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
613 		nsb->nsb_namespace = ns;
614 	}
615 
616 	ns->ns_obd      = obd;
617 	ns->ns_appetite = apt;
618 	ns->ns_client   = client;
619 
620 	INIT_LIST_HEAD(&ns->ns_list_chain);
621 	INIT_LIST_HEAD(&ns->ns_unused_list);
622 	spin_lock_init(&ns->ns_lock);
623 	atomic_set(&ns->ns_bref, 0);
624 	init_waitqueue_head(&ns->ns_waitq);
625 
626 	ns->ns_max_nolock_size    = NS_DEFAULT_MAX_NOLOCK_BYTES;
627 	ns->ns_contention_time    = NS_DEFAULT_CONTENTION_SECONDS;
628 	ns->ns_contended_locks    = NS_DEFAULT_CONTENDED_LOCKS;
629 
630 	ns->ns_max_parallel_ast   = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
631 	ns->ns_nr_unused	  = 0;
632 	ns->ns_max_unused	 = LDLM_DEFAULT_LRU_SIZE;
633 	ns->ns_max_age	    = LDLM_DEFAULT_MAX_ALIVE;
634 	ns->ns_ctime_age_limit    = LDLM_CTIME_AGE_LIMIT;
635 	ns->ns_timeouts	   = 0;
636 	ns->ns_orig_connect_flags = 0;
637 	ns->ns_connect_flags      = 0;
638 	ns->ns_stopping	   = 0;
639 	rc = ldlm_namespace_proc_register(ns);
640 	if (rc != 0) {
641 		CERROR("Can't initialize ns proc, rc %d\n", rc);
642 		goto out_hash;
643 	}
644 
645 	idx = ldlm_namespace_nr_read(client);
646 	rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
647 	if (rc) {
648 		CERROR("Can't initialize lock pool, rc %d\n", rc);
649 		goto out_proc;
650 	}
651 
652 	ldlm_namespace_register(ns, client);
653 	return ns;
654 out_proc:
655 	ldlm_namespace_proc_unregister(ns);
656 	ldlm_namespace_cleanup(ns, 0);
657 out_hash:
658 	cfs_hash_putref(ns->ns_rs_hash);
659 out_ns:
660 	OBD_FREE_PTR(ns);
661 out_ref:
662 	ldlm_put_ref();
663 	return NULL;
664 }
665 EXPORT_SYMBOL(ldlm_namespace_new);
666 
667 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
668 
669 /**
670  * Cancel and destroy all locks on a resource.
671  *
672  * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
673  * clean up.  This is currently only used for recovery, and we make
674  * certain assumptions as a result--notably, that we shouldn't cancel
675  * locks with refs.
676  */
cleanup_resource(struct ldlm_resource * res,struct list_head * q,__u64 flags)677 static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
678 			     __u64 flags)
679 {
680 	struct list_head *tmp;
681 	int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
682 	bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
683 
684 	do {
685 		struct ldlm_lock *lock = NULL;
686 
687 		/* First, we look for non-cleaned-yet lock
688 		 * all cleaned locks are marked by CLEANED flag. */
689 		lock_res(res);
690 		list_for_each(tmp, q) {
691 			lock = list_entry(tmp, struct ldlm_lock,
692 					      l_res_link);
693 			if (lock->l_flags & LDLM_FL_CLEANED) {
694 				lock = NULL;
695 				continue;
696 			}
697 			LDLM_LOCK_GET(lock);
698 			lock->l_flags |= LDLM_FL_CLEANED;
699 			break;
700 		}
701 
702 		if (lock == NULL) {
703 			unlock_res(res);
704 			break;
705 		}
706 
707 		/* Set CBPENDING so nothing in the cancellation path
708 		 * can match this lock. */
709 		lock->l_flags |= LDLM_FL_CBPENDING;
710 		lock->l_flags |= LDLM_FL_FAILED;
711 		lock->l_flags |= flags;
712 
713 		/* ... without sending a CANCEL message for local_only. */
714 		if (local_only)
715 			lock->l_flags |= LDLM_FL_LOCAL_ONLY;
716 
717 		if (local_only && (lock->l_readers || lock->l_writers)) {
718 			/* This is a little bit gross, but much better than the
719 			 * alternative: pretend that we got a blocking AST from
720 			 * the server, so that when the lock is decref'd, it
721 			 * will go away ... */
722 			unlock_res(res);
723 			LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
724 			if (lock->l_completion_ast)
725 				lock->l_completion_ast(lock, 0, NULL);
726 			LDLM_LOCK_RELEASE(lock);
727 			continue;
728 		}
729 
730 		if (client) {
731 			struct lustre_handle lockh;
732 
733 			unlock_res(res);
734 			ldlm_lock2handle(lock, &lockh);
735 			rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
736 			if (rc)
737 				CERROR("ldlm_cli_cancel: %d\n", rc);
738 		} else {
739 			ldlm_resource_unlink_lock(lock);
740 			unlock_res(res);
741 			LDLM_DEBUG(lock, "Freeing a lock still held by a client node");
742 			ldlm_lock_destroy(lock);
743 		}
744 		LDLM_LOCK_RELEASE(lock);
745 	} while (1);
746 }
747 
ldlm_resource_clean(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode,void * arg)748 static int ldlm_resource_clean(struct cfs_hash *hs, struct cfs_hash_bd *bd,
749 			       struct hlist_node *hnode, void *arg)
750 {
751 	struct ldlm_resource *res = cfs_hash_object(hs, hnode);
752 	__u64 flags = *(__u64 *)arg;
753 
754 	cleanup_resource(res, &res->lr_granted, flags);
755 	cleanup_resource(res, &res->lr_converting, flags);
756 	cleanup_resource(res, &res->lr_waiting, flags);
757 
758 	return 0;
759 }
760 
ldlm_resource_complain(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode,void * arg)761 static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
762 				  struct hlist_node *hnode, void *arg)
763 {
764 	struct ldlm_resource  *res = cfs_hash_object(hs, hnode);
765 
766 	lock_res(res);
767 	CERROR("%s: namespace resource "DLDLMRES
768 	       " (%p) refcount nonzero (%d) after lock cleanup; forcing cleanup.\n",
769 	       ldlm_ns_name(ldlm_res_to_ns(res)), PLDLMRES(res), res,
770 	       atomic_read(&res->lr_refcount) - 1);
771 
772 	ldlm_resource_dump(D_ERROR, res);
773 	unlock_res(res);
774 	return 0;
775 }
776 
777 /**
778  * Cancel and destroy all locks in the namespace.
779  *
780  * Typically used during evictions when server notified client that it was
781  * evicted and all of its state needs to be destroyed.
782  * Also used during shutdown.
783  */
ldlm_namespace_cleanup(struct ldlm_namespace * ns,__u64 flags)784 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
785 {
786 	if (ns == NULL) {
787 		CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
788 		return ELDLM_OK;
789 	}
790 
791 	cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags);
792 	cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL);
793 	return ELDLM_OK;
794 }
795 EXPORT_SYMBOL(ldlm_namespace_cleanup);
796 
797 /**
798  * Attempts to free namespace.
799  *
800  * Only used when namespace goes away, like during an unmount.
801  */
__ldlm_namespace_free(struct ldlm_namespace * ns,int force)802 static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
803 {
804 	/* At shutdown time, don't call the cancellation callback */
805 	ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
806 
807 	if (atomic_read(&ns->ns_bref) > 0) {
808 		struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
809 		int rc;
810 
811 		CDEBUG(D_DLMTRACE,
812 		       "dlm namespace %s free waiting on refcount %d\n",
813 		       ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
814 force_wait:
815 		if (force)
816 			lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
817 
818 		rc = l_wait_event(ns->ns_waitq,
819 				  atomic_read(&ns->ns_bref) == 0, &lwi);
820 
821 		/* Forced cleanups should be able to reclaim all references,
822 		 * so it's safe to wait forever... we can't leak locks... */
823 		if (force && rc == -ETIMEDOUT) {
824 			LCONSOLE_ERROR("Forced cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
825 				       ldlm_ns_name(ns),
826 				       atomic_read(&ns->ns_bref), rc);
827 			goto force_wait;
828 		}
829 
830 		if (atomic_read(&ns->ns_bref)) {
831 			LCONSOLE_ERROR("Cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
832 				       ldlm_ns_name(ns),
833 				       atomic_read(&ns->ns_bref), rc);
834 			return ELDLM_NAMESPACE_EXISTS;
835 		}
836 		CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
837 		       ldlm_ns_name(ns));
838 	}
839 
840 	return ELDLM_OK;
841 }
842 
843 /**
844  * Performs various cleanups for passed \a ns to make it drop refc and be
845  * ready for freeing. Waits for refc == 0.
846  *
847  * The following is done:
848  * (0) Unregister \a ns from its list to make inaccessible for potential
849  * users like pools thread and others;
850  * (1) Clear all locks in \a ns.
851  */
ldlm_namespace_free_prior(struct ldlm_namespace * ns,struct obd_import * imp,int force)852 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
853 			       struct obd_import *imp,
854 			       int force)
855 {
856 	int rc;
857 
858 	if (!ns)
859 		return;
860 
861 	spin_lock(&ns->ns_lock);
862 	ns->ns_stopping = 1;
863 	spin_unlock(&ns->ns_lock);
864 
865 	/*
866 	 * Can fail with -EINTR when force == 0 in which case try harder.
867 	 */
868 	rc = __ldlm_namespace_free(ns, force);
869 	if (rc != ELDLM_OK) {
870 		if (imp) {
871 			ptlrpc_disconnect_import(imp, 0);
872 			ptlrpc_invalidate_import(imp);
873 		}
874 
875 		/*
876 		 * With all requests dropped and the import inactive
877 		 * we are guaranteed all reference will be dropped.
878 		 */
879 		rc = __ldlm_namespace_free(ns, 1);
880 		LASSERT(rc == 0);
881 	}
882 }
883 
884 /**
885  * Performs freeing memory structures related to \a ns. This is only done
886  * when ldlm_namespce_free_prior() successfully removed all resources
887  * referencing \a ns and its refc == 0.
888  */
ldlm_namespace_free_post(struct ldlm_namespace * ns)889 void ldlm_namespace_free_post(struct ldlm_namespace *ns)
890 {
891 	if (!ns)
892 		return;
893 
894 	/* Make sure that nobody can find this ns in its list. */
895 	ldlm_namespace_unregister(ns, ns->ns_client);
896 	/* Fini pool _before_ parent proc dir is removed. This is important as
897 	 * ldlm_pool_fini() removes own proc dir which is child to @dir.
898 	 * Removing it after @dir may cause oops. */
899 	ldlm_pool_fini(&ns->ns_pool);
900 
901 	ldlm_namespace_proc_unregister(ns);
902 	cfs_hash_putref(ns->ns_rs_hash);
903 	/* Namespace \a ns should be not on list at this time, otherwise
904 	 * this will cause issues related to using freed \a ns in poold
905 	 * thread. */
906 	LASSERT(list_empty(&ns->ns_list_chain));
907 	OBD_FREE_PTR(ns);
908 	ldlm_put_ref();
909 }
910 
911 /**
912  * Cleanup the resource, and free namespace.
913  * bug 12864:
914  * Deadlock issue:
915  * proc1: destroy import
916  *	class_disconnect_export(grab cl_sem) ->
917  *	      -> ldlm_namespace_free ->
918  *	      -> lprocfs_remove(grab _lprocfs_lock).
919  * proc2: read proc info
920  *	lprocfs_fops_read(grab _lprocfs_lock) ->
921  *	      -> osc_rd_active, etc(grab cl_sem).
922  *
923  * So that I have to split the ldlm_namespace_free into two parts - the first
924  * part ldlm_namespace_free_prior is used to cleanup the resource which is
925  * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
926  * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
927  * held.
928  */
ldlm_namespace_free(struct ldlm_namespace * ns,struct obd_import * imp,int force)929 void ldlm_namespace_free(struct ldlm_namespace *ns,
930 			 struct obd_import *imp,
931 			 int force)
932 {
933 	ldlm_namespace_free_prior(ns, imp, force);
934 	ldlm_namespace_free_post(ns);
935 }
936 EXPORT_SYMBOL(ldlm_namespace_free);
937 
ldlm_namespace_get(struct ldlm_namespace * ns)938 void ldlm_namespace_get(struct ldlm_namespace *ns)
939 {
940 	atomic_inc(&ns->ns_bref);
941 }
942 EXPORT_SYMBOL(ldlm_namespace_get);
943 
944 /* This is only for callers that care about refcount */
ldlm_namespace_get_return(struct ldlm_namespace * ns)945 int ldlm_namespace_get_return(struct ldlm_namespace *ns)
946 {
947 	return atomic_inc_return(&ns->ns_bref);
948 }
949 
ldlm_namespace_put(struct ldlm_namespace * ns)950 void ldlm_namespace_put(struct ldlm_namespace *ns)
951 {
952 	if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
953 		wake_up(&ns->ns_waitq);
954 		spin_unlock(&ns->ns_lock);
955 	}
956 }
957 EXPORT_SYMBOL(ldlm_namespace_put);
958 
959 /** Register \a ns in the list of namespaces */
ldlm_namespace_register(struct ldlm_namespace * ns,ldlm_side_t client)960 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
961 {
962 	mutex_lock(ldlm_namespace_lock(client));
963 	LASSERT(list_empty(&ns->ns_list_chain));
964 	list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
965 	ldlm_namespace_nr_inc(client);
966 	mutex_unlock(ldlm_namespace_lock(client));
967 }
968 
969 /** Unregister \a ns from the list of namespaces. */
ldlm_namespace_unregister(struct ldlm_namespace * ns,ldlm_side_t client)970 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
971 {
972 	mutex_lock(ldlm_namespace_lock(client));
973 	LASSERT(!list_empty(&ns->ns_list_chain));
974 	/* Some asserts and possibly other parts of the code are still
975 	 * using list_empty(&ns->ns_list_chain). This is why it is
976 	 * important to use list_del_init() here. */
977 	list_del_init(&ns->ns_list_chain);
978 	ldlm_namespace_nr_dec(client);
979 	mutex_unlock(ldlm_namespace_lock(client));
980 }
981 
982 /** Should be called with ldlm_namespace_lock(client) taken. */
ldlm_namespace_move_to_active_locked(struct ldlm_namespace * ns,ldlm_side_t client)983 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
984 					  ldlm_side_t client)
985 {
986 	LASSERT(!list_empty(&ns->ns_list_chain));
987 	LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
988 	list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
989 }
990 
991 /** Should be called with ldlm_namespace_lock(client) taken. */
ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace * ns,ldlm_side_t client)992 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
993 					    ldlm_side_t client)
994 {
995 	LASSERT(!list_empty(&ns->ns_list_chain));
996 	LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
997 	list_move_tail(&ns->ns_list_chain,
998 		       ldlm_namespace_inactive_list(client));
999 }
1000 
1001 /** Should be called with ldlm_namespace_lock(client) taken. */
ldlm_namespace_first_locked(ldlm_side_t client)1002 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
1003 {
1004 	LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
1005 	LASSERT(!list_empty(ldlm_namespace_list(client)));
1006 	return container_of(ldlm_namespace_list(client)->next,
1007 		struct ldlm_namespace, ns_list_chain);
1008 }
1009 
1010 /** Create and initialize new resource. */
ldlm_resource_new(void)1011 static struct ldlm_resource *ldlm_resource_new(void)
1012 {
1013 	struct ldlm_resource *res;
1014 	int idx;
1015 
1016 	OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, GFP_NOFS);
1017 	if (res == NULL)
1018 		return NULL;
1019 
1020 	INIT_LIST_HEAD(&res->lr_granted);
1021 	INIT_LIST_HEAD(&res->lr_converting);
1022 	INIT_LIST_HEAD(&res->lr_waiting);
1023 
1024 	/* Initialize interval trees for each lock mode. */
1025 	for (idx = 0; idx < LCK_MODE_NUM; idx++) {
1026 		res->lr_itree[idx].lit_size = 0;
1027 		res->lr_itree[idx].lit_mode = 1 << idx;
1028 		res->lr_itree[idx].lit_root = NULL;
1029 	}
1030 
1031 	atomic_set(&res->lr_refcount, 1);
1032 	spin_lock_init(&res->lr_lock);
1033 	lu_ref_init(&res->lr_reference);
1034 
1035 	/* The creator of the resource must unlock the mutex after LVB
1036 	 * initialization. */
1037 	mutex_init(&res->lr_lvb_mutex);
1038 	mutex_lock(&res->lr_lvb_mutex);
1039 
1040 	return res;
1041 }
1042 
1043 /**
1044  * Return a reference to resource with given name, creating it if necessary.
1045  * Args: namespace with ns_lock unlocked
1046  * Locks: takes and releases NS hash-lock and res->lr_lock
1047  * Returns: referenced, unlocked ldlm_resource or NULL
1048  */
1049 struct ldlm_resource *
ldlm_resource_get(struct ldlm_namespace * ns,struct ldlm_resource * parent,const struct ldlm_res_id * name,ldlm_type_t type,int create)1050 ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
1051 		  const struct ldlm_res_id *name, ldlm_type_t type, int create)
1052 {
1053 	struct hlist_node     *hnode;
1054 	struct ldlm_resource *res;
1055 	struct cfs_hash_bd	 bd;
1056 	__u64		 version;
1057 	int		      ns_refcount = 0;
1058 
1059 	LASSERT(ns != NULL);
1060 	LASSERT(parent == NULL);
1061 	LASSERT(ns->ns_rs_hash != NULL);
1062 	LASSERT(name->name[0] != 0);
1063 
1064 	cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
1065 	hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1066 	if (hnode != NULL) {
1067 		cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1068 		res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1069 		/* Synchronize with regard to resource creation. */
1070 		if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1071 			mutex_lock(&res->lr_lvb_mutex);
1072 			mutex_unlock(&res->lr_lvb_mutex);
1073 		}
1074 
1075 		if (unlikely(res->lr_lvb_len < 0)) {
1076 			ldlm_resource_putref(res);
1077 			res = NULL;
1078 		}
1079 		return res;
1080 	}
1081 
1082 	version = cfs_hash_bd_version_get(&bd);
1083 	cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
1084 
1085 	if (create == 0)
1086 		return NULL;
1087 
1088 	LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
1089 		 "type: %d\n", type);
1090 	res = ldlm_resource_new();
1091 	if (!res)
1092 		return NULL;
1093 
1094 	res->lr_ns_bucket  = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
1095 	res->lr_name       = *name;
1096 	res->lr_type       = type;
1097 	res->lr_most_restr = LCK_NL;
1098 
1099 	cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1100 	hnode = (version == cfs_hash_bd_version_get(&bd)) ?  NULL :
1101 		cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
1102 
1103 	if (hnode != NULL) {
1104 		/* Someone won the race and already added the resource. */
1105 		cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1106 		/* Clean lu_ref for failed resource. */
1107 		lu_ref_fini(&res->lr_reference);
1108 		/* We have taken lr_lvb_mutex. Drop it. */
1109 		mutex_unlock(&res->lr_lvb_mutex);
1110 		OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res));
1111 
1112 		res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
1113 		/* Synchronize with regard to resource creation. */
1114 		if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1115 			mutex_lock(&res->lr_lvb_mutex);
1116 			mutex_unlock(&res->lr_lvb_mutex);
1117 		}
1118 
1119 		if (unlikely(res->lr_lvb_len < 0)) {
1120 			ldlm_resource_putref(res);
1121 			res = NULL;
1122 		}
1123 		return res;
1124 	}
1125 	/* We won! Let's add the resource. */
1126 	cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
1127 	if (cfs_hash_bd_count_get(&bd) == 1)
1128 		ns_refcount = ldlm_namespace_get_return(ns);
1129 
1130 	cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1131 	if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
1132 		int rc;
1133 
1134 		OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
1135 		rc = ns->ns_lvbo->lvbo_init(res);
1136 		if (rc < 0) {
1137 			CERROR("%s: lvbo_init failed for resource %#llx:%#llx: rc = %d\n",
1138 			       ns->ns_obd->obd_name, name->name[0],
1139 			       name->name[1], rc);
1140 			if (res->lr_lvb_data) {
1141 				OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
1142 				res->lr_lvb_data = NULL;
1143 			}
1144 			res->lr_lvb_len = rc;
1145 			mutex_unlock(&res->lr_lvb_mutex);
1146 			ldlm_resource_putref(res);
1147 			return NULL;
1148 		}
1149 	}
1150 
1151 	/* We create resource with locked lr_lvb_mutex. */
1152 	mutex_unlock(&res->lr_lvb_mutex);
1153 
1154 	/* Let's see if we happened to be the very first resource in this
1155 	 * namespace. If so, and this is a client namespace, we need to move
1156 	 * the namespace into the active namespaces list to be patrolled by
1157 	 * the ldlm_poold. */
1158 	if (ns_is_client(ns) && ns_refcount == 1) {
1159 		mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1160 		ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
1161 		mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1162 	}
1163 
1164 	return res;
1165 }
1166 EXPORT_SYMBOL(ldlm_resource_get);
1167 
ldlm_resource_getref(struct ldlm_resource * res)1168 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
1169 {
1170 	LASSERT(res != NULL);
1171 	LASSERT(res != LP_POISON);
1172 	atomic_inc(&res->lr_refcount);
1173 	CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
1174 	       atomic_read(&res->lr_refcount));
1175 	return res;
1176 }
1177 
__ldlm_resource_putref_final(struct cfs_hash_bd * bd,struct ldlm_resource * res)1178 static void __ldlm_resource_putref_final(struct cfs_hash_bd *bd,
1179 					 struct ldlm_resource *res)
1180 {
1181 	struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
1182 
1183 	if (!list_empty(&res->lr_granted)) {
1184 		ldlm_resource_dump(D_ERROR, res);
1185 		LBUG();
1186 	}
1187 
1188 	if (!list_empty(&res->lr_converting)) {
1189 		ldlm_resource_dump(D_ERROR, res);
1190 		LBUG();
1191 	}
1192 
1193 	if (!list_empty(&res->lr_waiting)) {
1194 		ldlm_resource_dump(D_ERROR, res);
1195 		LBUG();
1196 	}
1197 
1198 	cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
1199 			       bd, &res->lr_hash);
1200 	lu_ref_fini(&res->lr_reference);
1201 	if (cfs_hash_bd_count_get(bd) == 0)
1202 		ldlm_namespace_put(nsb->nsb_namespace);
1203 }
1204 
1205 /* Returns 1 if the resource was freed, 0 if it remains. */
ldlm_resource_putref(struct ldlm_resource * res)1206 int ldlm_resource_putref(struct ldlm_resource *res)
1207 {
1208 	struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1209 	struct cfs_hash_bd   bd;
1210 
1211 	LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1212 	CDEBUG(D_INFO, "putref res: %p count: %d\n",
1213 	       res, atomic_read(&res->lr_refcount) - 1);
1214 
1215 	cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
1216 	if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
1217 		__ldlm_resource_putref_final(&bd, res);
1218 		cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1219 		if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1220 			ns->ns_lvbo->lvbo_free(res);
1221 		OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res));
1222 		return 1;
1223 	}
1224 	return 0;
1225 }
1226 EXPORT_SYMBOL(ldlm_resource_putref);
1227 
1228 /* Returns 1 if the resource was freed, 0 if it remains. */
ldlm_resource_putref_locked(struct ldlm_resource * res)1229 int ldlm_resource_putref_locked(struct ldlm_resource *res)
1230 {
1231 	struct ldlm_namespace *ns = ldlm_res_to_ns(res);
1232 
1233 	LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
1234 	CDEBUG(D_INFO, "putref res: %p count: %d\n",
1235 	       res, atomic_read(&res->lr_refcount) - 1);
1236 
1237 	if (atomic_dec_and_test(&res->lr_refcount)) {
1238 		struct cfs_hash_bd bd;
1239 
1240 		cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
1241 				&res->lr_name, &bd);
1242 		__ldlm_resource_putref_final(&bd, res);
1243 		cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
1244 		/* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF,
1245 		 * so we should never be here while calling cfs_hash_del,
1246 		 * cfs_hash_for_each_nolock is the only case we can get
1247 		 * here, which is safe to release cfs_hash_bd_lock.
1248 		 */
1249 		if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
1250 			ns->ns_lvbo->lvbo_free(res);
1251 		OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof(*res));
1252 
1253 		cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
1254 		return 1;
1255 	}
1256 	return 0;
1257 }
1258 
1259 /**
1260  * Add a lock into a given resource into specified lock list.
1261  */
ldlm_resource_add_lock(struct ldlm_resource * res,struct list_head * head,struct ldlm_lock * lock)1262 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
1263 			    struct ldlm_lock *lock)
1264 {
1265 	check_res_locked(res);
1266 
1267 	LDLM_DEBUG(lock, "About to add this lock:\n");
1268 
1269 	if (lock->l_flags & LDLM_FL_DESTROYED) {
1270 		CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1271 		return;
1272 	}
1273 
1274 	LASSERT(list_empty(&lock->l_res_link));
1275 
1276 	list_add_tail(&lock->l_res_link, head);
1277 }
1278 
1279 /**
1280  * Insert a lock into resource after specified lock.
1281  *
1282  * Obtain resource description from the lock we are inserting after.
1283  */
ldlm_resource_insert_lock_after(struct ldlm_lock * original,struct ldlm_lock * new)1284 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
1285 				     struct ldlm_lock *new)
1286 {
1287 	struct ldlm_resource *res = original->l_resource;
1288 
1289 	check_res_locked(res);
1290 
1291 	ldlm_resource_dump(D_INFO, res);
1292 	LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
1293 
1294 	if (new->l_flags & LDLM_FL_DESTROYED) {
1295 		CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
1296 		goto out;
1297 	}
1298 
1299 	LASSERT(list_empty(&new->l_res_link));
1300 
1301 	list_add(&new->l_res_link, &original->l_res_link);
1302  out:;
1303 }
1304 
ldlm_resource_unlink_lock(struct ldlm_lock * lock)1305 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
1306 {
1307 	int type = lock->l_resource->lr_type;
1308 
1309 	check_res_locked(lock->l_resource);
1310 	if (type == LDLM_IBITS || type == LDLM_PLAIN)
1311 		ldlm_unlink_lock_skiplist(lock);
1312 	else if (type == LDLM_EXTENT)
1313 		ldlm_extent_unlink_lock(lock);
1314 	list_del_init(&lock->l_res_link);
1315 }
1316 EXPORT_SYMBOL(ldlm_resource_unlink_lock);
1317 
ldlm_res2desc(struct ldlm_resource * res,struct ldlm_resource_desc * desc)1318 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
1319 {
1320 	desc->lr_type = res->lr_type;
1321 	desc->lr_name = res->lr_name;
1322 }
1323 
1324 /**
1325  * Print information about all locks in all namespaces on this node to debug
1326  * log.
1327  */
ldlm_dump_all_namespaces(ldlm_side_t client,int level)1328 void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
1329 {
1330 	struct list_head *tmp;
1331 
1332 	if (!((libcfs_debug | D_ERROR) & level))
1333 		return;
1334 
1335 	mutex_lock(ldlm_namespace_lock(client));
1336 
1337 	list_for_each(tmp, ldlm_namespace_list(client)) {
1338 		struct ldlm_namespace *ns;
1339 
1340 		ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
1341 		ldlm_namespace_dump(level, ns);
1342 	}
1343 
1344 	mutex_unlock(ldlm_namespace_lock(client));
1345 }
1346 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
1347 
ldlm_res_hash_dump(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode,void * arg)1348 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1349 			      struct hlist_node *hnode, void *arg)
1350 {
1351 	struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1352 	int    level = (int)(unsigned long)arg;
1353 
1354 	lock_res(res);
1355 	ldlm_resource_dump(level, res);
1356 	unlock_res(res);
1357 
1358 	return 0;
1359 }
1360 
1361 /**
1362  * Print information about all locks in this namespace on this node to debug
1363  * log.
1364  */
ldlm_namespace_dump(int level,struct ldlm_namespace * ns)1365 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
1366 {
1367 	if (!((libcfs_debug | D_ERROR) & level))
1368 		return;
1369 
1370 	CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
1371 	       ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
1372 	       ns_is_client(ns) ? "client" : "server");
1373 
1374 	if (time_before(cfs_time_current(), ns->ns_next_dump))
1375 		return;
1376 
1377 	cfs_hash_for_each_nolock(ns->ns_rs_hash,
1378 				 ldlm_res_hash_dump,
1379 				 (void *)(unsigned long)level);
1380 	spin_lock(&ns->ns_lock);
1381 	ns->ns_next_dump = cfs_time_shift(10);
1382 	spin_unlock(&ns->ns_lock);
1383 }
1384 EXPORT_SYMBOL(ldlm_namespace_dump);
1385 
1386 /**
1387  * Print information about all locks in this resource to debug log.
1388  */
ldlm_resource_dump(int level,struct ldlm_resource * res)1389 void ldlm_resource_dump(int level, struct ldlm_resource *res)
1390 {
1391 	struct ldlm_lock *lock;
1392 	unsigned int granted = 0;
1393 
1394 	CLASSERT(RES_NAME_SIZE == 4);
1395 
1396 	if (!((libcfs_debug | D_ERROR) & level))
1397 		return;
1398 
1399 	CDEBUG(level, "--- Resource: "DLDLMRES" (%p) refcount = %d\n",
1400 	       PLDLMRES(res), res, atomic_read(&res->lr_refcount));
1401 
1402 	if (!list_empty(&res->lr_granted)) {
1403 		CDEBUG(level, "Granted locks (in reverse order):\n");
1404 		list_for_each_entry_reverse(lock, &res->lr_granted,
1405 						l_res_link) {
1406 			LDLM_DEBUG_LIMIT(level, lock, "###");
1407 			if (!(level & D_CANTMASK) &&
1408 			    ++granted > ldlm_dump_granted_max) {
1409 				CDEBUG(level, "only dump %d granted locks to avoid DDOS.\n",
1410 				       granted);
1411 				break;
1412 			}
1413 		}
1414 	}
1415 	if (!list_empty(&res->lr_converting)) {
1416 		CDEBUG(level, "Converting locks:\n");
1417 		list_for_each_entry(lock, &res->lr_converting, l_res_link)
1418 			LDLM_DEBUG_LIMIT(level, lock, "###");
1419 	}
1420 	if (!list_empty(&res->lr_waiting)) {
1421 		CDEBUG(level, "Waiting locks:\n");
1422 		list_for_each_entry(lock, &res->lr_waiting, l_res_link)
1423 			LDLM_DEBUG_LIMIT(level, lock, "###");
1424 	}
1425 }
1426