1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Client Extent Lock.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40 
41 #define DEBUG_SUBSYSTEM S_CLASS
42 
43 #include "../include/obd_class.h"
44 #include "../include/obd_support.h"
45 #include "../include/lustre_fid.h"
46 #include <linux/list.h>
47 #include "../include/cl_object.h"
48 #include "cl_internal.h"
49 
50 /** Lock class of cl_lock::cll_guard */
51 static struct lock_class_key cl_lock_guard_class;
52 static struct kmem_cache *cl_lock_kmem;
53 
54 static struct lu_kmem_descr cl_lock_caches[] = {
55 	{
56 		.ckd_cache = &cl_lock_kmem,
57 		.ckd_name  = "cl_lock_kmem",
58 		.ckd_size  = sizeof (struct cl_lock)
59 	},
60 	{
61 		.ckd_cache = NULL
62 	}
63 };
64 
65 #define CS_LOCK_INC(o, item)
66 #define CS_LOCK_DEC(o, item)
67 #define CS_LOCKSTATE_INC(o, state)
68 #define CS_LOCKSTATE_DEC(o, state)
69 
70 /**
71  * Basic lock invariant that is maintained at all times. Caller either has a
72  * reference to \a lock, or somehow assures that \a lock cannot be freed.
73  *
74  * \see cl_lock_invariant()
75  */
cl_lock_invariant_trusted(const struct lu_env * env,const struct cl_lock * lock)76 static int cl_lock_invariant_trusted(const struct lu_env *env,
77 				     const struct cl_lock *lock)
78 {
79 	return  ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
80 		atomic_read(&lock->cll_ref) >= lock->cll_holds &&
81 		lock->cll_holds >= lock->cll_users &&
82 		lock->cll_holds >= 0 &&
83 		lock->cll_users >= 0 &&
84 		lock->cll_depth >= 0;
85 }
86 
87 /**
88  * Stronger lock invariant, checking that caller has a reference on a lock.
89  *
90  * \see cl_lock_invariant_trusted()
91  */
cl_lock_invariant(const struct lu_env * env,const struct cl_lock * lock)92 static int cl_lock_invariant(const struct lu_env *env,
93 			     const struct cl_lock *lock)
94 {
95 	int result;
96 
97 	result = atomic_read(&lock->cll_ref) > 0 &&
98 		cl_lock_invariant_trusted(env, lock);
99 	if (!result && env != NULL)
100 		CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
101 	return result;
102 }
103 
104 /**
105  * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
106  */
cl_lock_nesting(const struct cl_lock * lock)107 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
108 {
109 	return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
110 }
111 
112 /**
113  * Returns a set of counters for this lock, depending on a lock nesting.
114  */
cl_lock_counters(const struct lu_env * env,const struct cl_lock * lock)115 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
116 						   const struct cl_lock *lock)
117 {
118 	struct cl_thread_info *info;
119 	enum clt_nesting_level nesting;
120 
121 	info = cl_env_info(env);
122 	nesting = cl_lock_nesting(lock);
123 	LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
124 	return &info->clt_counters[nesting];
125 }
126 
cl_lock_trace0(int level,const struct lu_env * env,const char * prefix,const struct cl_lock * lock,const char * func,const int line)127 static void cl_lock_trace0(int level, const struct lu_env *env,
128 			   const char *prefix, const struct cl_lock *lock,
129 			   const char *func, const int line)
130 {
131 	struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
132 	CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)(%p/%d/%d) at %s():%d\n",
133 	       prefix, lock, atomic_read(&lock->cll_ref),
134 	       lock->cll_guarder, lock->cll_depth,
135 	       lock->cll_state, lock->cll_error, lock->cll_holds,
136 	       lock->cll_users, lock->cll_flags,
137 	       env, h->coh_nesting, cl_lock_nr_mutexed(env),
138 	       func, line);
139 }
140 #define cl_lock_trace(level, env, prefix, lock)			 \
141 	cl_lock_trace0(level, env, prefix, lock, __func__, __LINE__)
142 
143 #define RETIP ((unsigned long)__builtin_return_address(0))
144 
145 #ifdef CONFIG_LOCKDEP
146 static struct lock_class_key cl_lock_key;
147 
cl_lock_lockdep_init(struct cl_lock * lock)148 static void cl_lock_lockdep_init(struct cl_lock *lock)
149 {
150 	lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
151 }
152 
cl_lock_lockdep_acquire(const struct lu_env * env,struct cl_lock * lock,__u32 enqflags)153 static void cl_lock_lockdep_acquire(const struct lu_env *env,
154 				    struct cl_lock *lock, __u32 enqflags)
155 {
156 	cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
157 	lock_map_acquire(&lock->dep_map);
158 }
159 
cl_lock_lockdep_release(const struct lu_env * env,struct cl_lock * lock)160 static void cl_lock_lockdep_release(const struct lu_env *env,
161 				    struct cl_lock *lock)
162 {
163 	cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
164 	lock_release(&lock->dep_map, 0, RETIP);
165 }
166 
167 #else /* !CONFIG_LOCKDEP */
168 
cl_lock_lockdep_init(struct cl_lock * lock)169 static void cl_lock_lockdep_init(struct cl_lock *lock)
170 {}
cl_lock_lockdep_acquire(const struct lu_env * env,struct cl_lock * lock,__u32 enqflags)171 static void cl_lock_lockdep_acquire(const struct lu_env *env,
172 				    struct cl_lock *lock, __u32 enqflags)
173 {}
cl_lock_lockdep_release(const struct lu_env * env,struct cl_lock * lock)174 static void cl_lock_lockdep_release(const struct lu_env *env,
175 				    struct cl_lock *lock)
176 {}
177 
178 #endif /* !CONFIG_LOCKDEP */
179 
180 /**
181  * Adds lock slice to the compound lock.
182  *
183  * This is called by cl_object_operations::coo_lock_init() methods to add a
184  * per-layer state to the lock. New state is added at the end of
185  * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
186  *
187  * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
188  */
cl_lock_slice_add(struct cl_lock * lock,struct cl_lock_slice * slice,struct cl_object * obj,const struct cl_lock_operations * ops)189 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
190 		       struct cl_object *obj,
191 		       const struct cl_lock_operations *ops)
192 {
193 	slice->cls_lock = lock;
194 	list_add_tail(&slice->cls_linkage, &lock->cll_layers);
195 	slice->cls_obj = obj;
196 	slice->cls_ops = ops;
197 }
198 EXPORT_SYMBOL(cl_lock_slice_add);
199 
200 /**
201  * Returns true iff a lock with the mode \a has provides at least the same
202  * guarantees as a lock with the mode \a need.
203  */
cl_lock_mode_match(enum cl_lock_mode has,enum cl_lock_mode need)204 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
205 {
206 	LINVRNT(need == CLM_READ || need == CLM_WRITE ||
207 		need == CLM_PHANTOM || need == CLM_GROUP);
208 	LINVRNT(has == CLM_READ || has == CLM_WRITE ||
209 		has == CLM_PHANTOM || has == CLM_GROUP);
210 	CLASSERT(CLM_PHANTOM < CLM_READ);
211 	CLASSERT(CLM_READ < CLM_WRITE);
212 	CLASSERT(CLM_WRITE < CLM_GROUP);
213 
214 	if (has != CLM_GROUP)
215 		return need <= has;
216 	else
217 		return need == has;
218 }
219 EXPORT_SYMBOL(cl_lock_mode_match);
220 
221 /**
222  * Returns true iff extent portions of lock descriptions match.
223  */
cl_lock_ext_match(const struct cl_lock_descr * has,const struct cl_lock_descr * need)224 int cl_lock_ext_match(const struct cl_lock_descr *has,
225 		      const struct cl_lock_descr *need)
226 {
227 	return
228 		has->cld_start <= need->cld_start &&
229 		has->cld_end >= need->cld_end &&
230 		cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
231 		(has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
232 }
233 EXPORT_SYMBOL(cl_lock_ext_match);
234 
235 /**
236  * Returns true iff a lock with the description \a has provides at least the
237  * same guarantees as a lock with the description \a need.
238  */
cl_lock_descr_match(const struct cl_lock_descr * has,const struct cl_lock_descr * need)239 int cl_lock_descr_match(const struct cl_lock_descr *has,
240 			const struct cl_lock_descr *need)
241 {
242 	return
243 		cl_object_same(has->cld_obj, need->cld_obj) &&
244 		cl_lock_ext_match(has, need);
245 }
246 EXPORT_SYMBOL(cl_lock_descr_match);
247 
cl_lock_free(const struct lu_env * env,struct cl_lock * lock)248 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
249 {
250 	struct cl_object *obj = lock->cll_descr.cld_obj;
251 
252 	LINVRNT(!cl_lock_is_mutexed(lock));
253 
254 	cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
255 	might_sleep();
256 	while (!list_empty(&lock->cll_layers)) {
257 		struct cl_lock_slice *slice;
258 
259 		slice = list_entry(lock->cll_layers.next,
260 				       struct cl_lock_slice, cls_linkage);
261 		list_del_init(lock->cll_layers.next);
262 		slice->cls_ops->clo_fini(env, slice);
263 	}
264 	CS_LOCK_DEC(obj, total);
265 	CS_LOCKSTATE_DEC(obj, lock->cll_state);
266 	lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
267 	cl_object_put(env, obj);
268 	lu_ref_fini(&lock->cll_reference);
269 	lu_ref_fini(&lock->cll_holders);
270 	mutex_destroy(&lock->cll_guard);
271 	OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
272 }
273 
274 /**
275  * Releases a reference on a lock.
276  *
277  * When last reference is released, lock is returned to the cache, unless it
278  * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
279  * immediately.
280  *
281  * \see cl_object_put(), cl_page_put()
282  */
cl_lock_put(const struct lu_env * env,struct cl_lock * lock)283 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
284 {
285 	struct cl_object	*obj;
286 
287 	LINVRNT(cl_lock_invariant(env, lock));
288 	obj = lock->cll_descr.cld_obj;
289 	LINVRNT(obj != NULL);
290 
291 	CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
292 	       atomic_read(&lock->cll_ref), lock, RETIP);
293 
294 	if (atomic_dec_and_test(&lock->cll_ref)) {
295 		if (lock->cll_state == CLS_FREEING) {
296 			LASSERT(list_empty(&lock->cll_linkage));
297 			cl_lock_free(env, lock);
298 		}
299 		CS_LOCK_DEC(obj, busy);
300 	}
301 }
302 EXPORT_SYMBOL(cl_lock_put);
303 
304 /**
305  * Acquires an additional reference to a lock.
306  *
307  * This can be called only by caller already possessing a reference to \a
308  * lock.
309  *
310  * \see cl_object_get(), cl_page_get()
311  */
cl_lock_get(struct cl_lock * lock)312 void cl_lock_get(struct cl_lock *lock)
313 {
314 	LINVRNT(cl_lock_invariant(NULL, lock));
315 	CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
316 	       atomic_read(&lock->cll_ref), lock, RETIP);
317 	atomic_inc(&lock->cll_ref);
318 }
319 EXPORT_SYMBOL(cl_lock_get);
320 
321 /**
322  * Acquires a reference to a lock.
323  *
324  * This is much like cl_lock_get(), except that this function can be used to
325  * acquire initial reference to the cached lock. Caller has to deal with all
326  * possible races. Use with care!
327  *
328  * \see cl_page_get_trust()
329  */
cl_lock_get_trust(struct cl_lock * lock)330 void cl_lock_get_trust(struct cl_lock *lock)
331 {
332 	CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
333 	       atomic_read(&lock->cll_ref), lock, RETIP);
334 	if (atomic_inc_return(&lock->cll_ref) == 1)
335 		CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
336 }
337 EXPORT_SYMBOL(cl_lock_get_trust);
338 
339 /**
340  * Helper function destroying the lock that wasn't completely initialized.
341  *
342  * Other threads can acquire references to the top-lock through its
343  * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
344  */
cl_lock_finish(const struct lu_env * env,struct cl_lock * lock)345 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
346 {
347 	cl_lock_mutex_get(env, lock);
348 	cl_lock_cancel(env, lock);
349 	cl_lock_delete(env, lock);
350 	cl_lock_mutex_put(env, lock);
351 	cl_lock_put(env, lock);
352 }
353 
cl_lock_alloc(const struct lu_env * env,struct cl_object * obj,const struct cl_io * io,const struct cl_lock_descr * descr)354 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
355 				     struct cl_object *obj,
356 				     const struct cl_io *io,
357 				     const struct cl_lock_descr *descr)
358 {
359 	struct cl_lock	  *lock;
360 	struct lu_object_header *head;
361 
362 	OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, GFP_NOFS);
363 	if (lock != NULL) {
364 		atomic_set(&lock->cll_ref, 1);
365 		lock->cll_descr = *descr;
366 		lock->cll_state = CLS_NEW;
367 		cl_object_get(obj);
368 		lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
369 				     lock);
370 		INIT_LIST_HEAD(&lock->cll_layers);
371 		INIT_LIST_HEAD(&lock->cll_linkage);
372 		INIT_LIST_HEAD(&lock->cll_inclosure);
373 		lu_ref_init(&lock->cll_reference);
374 		lu_ref_init(&lock->cll_holders);
375 		mutex_init(&lock->cll_guard);
376 		lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
377 		init_waitqueue_head(&lock->cll_wq);
378 		head = obj->co_lu.lo_header;
379 		CS_LOCKSTATE_INC(obj, CLS_NEW);
380 		CS_LOCK_INC(obj, total);
381 		CS_LOCK_INC(obj, create);
382 		cl_lock_lockdep_init(lock);
383 		list_for_each_entry(obj, &head->loh_layers,
384 					co_lu.lo_linkage) {
385 			int err;
386 
387 			err = obj->co_ops->coo_lock_init(env, obj, lock, io);
388 			if (err != 0) {
389 				cl_lock_finish(env, lock);
390 				lock = ERR_PTR(err);
391 				break;
392 			}
393 		}
394 	} else
395 		lock = ERR_PTR(-ENOMEM);
396 	return lock;
397 }
398 
399 /**
400  * Transfer the lock into INTRANSIT state and return the original state.
401  *
402  * \pre  state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
403  * \post state: CLS_INTRANSIT
404  * \see CLS_INTRANSIT
405  */
cl_lock_intransit(const struct lu_env * env,struct cl_lock * lock)406 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
407 				     struct cl_lock *lock)
408 {
409 	enum cl_lock_state state = lock->cll_state;
410 
411 	LASSERT(cl_lock_is_mutexed(lock));
412 	LASSERT(state != CLS_INTRANSIT);
413 	LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
414 		 "Malformed lock state %d.\n", state);
415 
416 	cl_lock_state_set(env, lock, CLS_INTRANSIT);
417 	lock->cll_intransit_owner = current;
418 	cl_lock_hold_add(env, lock, "intransit", current);
419 	return state;
420 }
421 EXPORT_SYMBOL(cl_lock_intransit);
422 
423 /**
424  *  Exit the intransit state and restore the lock state to the original state
425  */
cl_lock_extransit(const struct lu_env * env,struct cl_lock * lock,enum cl_lock_state state)426 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
427 		       enum cl_lock_state state)
428 {
429 	LASSERT(cl_lock_is_mutexed(lock));
430 	LASSERT(lock->cll_state == CLS_INTRANSIT);
431 	LASSERT(state != CLS_INTRANSIT);
432 	LASSERT(lock->cll_intransit_owner == current);
433 
434 	lock->cll_intransit_owner = NULL;
435 	cl_lock_state_set(env, lock, state);
436 	cl_lock_unhold(env, lock, "intransit", current);
437 }
438 EXPORT_SYMBOL(cl_lock_extransit);
439 
440 /**
441  * Checking whether the lock is intransit state
442  */
cl_lock_is_intransit(struct cl_lock * lock)443 int cl_lock_is_intransit(struct cl_lock *lock)
444 {
445 	LASSERT(cl_lock_is_mutexed(lock));
446 	return lock->cll_state == CLS_INTRANSIT &&
447 	       lock->cll_intransit_owner != current;
448 }
449 EXPORT_SYMBOL(cl_lock_is_intransit);
450 /**
451  * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
452  * truncate and O_APPEND cannot be reused for read/non-append-write, as they
453  * cover multiple stripes and can trigger cascading timeouts.
454  */
cl_lock_fits_into(const struct lu_env * env,const struct cl_lock * lock,const struct cl_lock_descr * need,const struct cl_io * io)455 static int cl_lock_fits_into(const struct lu_env *env,
456 			     const struct cl_lock *lock,
457 			     const struct cl_lock_descr *need,
458 			     const struct cl_io *io)
459 {
460 	const struct cl_lock_slice *slice;
461 
462 	LINVRNT(cl_lock_invariant_trusted(env, lock));
463 	list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
464 		if (slice->cls_ops->clo_fits_into != NULL &&
465 		    !slice->cls_ops->clo_fits_into(env, slice, need, io))
466 			return 0;
467 	}
468 	return 1;
469 }
470 
cl_lock_lookup(const struct lu_env * env,struct cl_object * obj,const struct cl_io * io,const struct cl_lock_descr * need)471 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
472 				      struct cl_object *obj,
473 				      const struct cl_io *io,
474 				      const struct cl_lock_descr *need)
475 {
476 	struct cl_lock	  *lock;
477 	struct cl_object_header *head;
478 
479 	head = cl_object_header(obj);
480 	assert_spin_locked(&head->coh_lock_guard);
481 	CS_LOCK_INC(obj, lookup);
482 	list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
483 		int matched;
484 
485 		matched = cl_lock_ext_match(&lock->cll_descr, need) &&
486 			  lock->cll_state < CLS_FREEING &&
487 			  lock->cll_error == 0 &&
488 			  !(lock->cll_flags & CLF_CANCELLED) &&
489 			  cl_lock_fits_into(env, lock, need, io);
490 		CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
491 		       PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
492 		       matched);
493 		if (matched) {
494 			cl_lock_get_trust(lock);
495 			CS_LOCK_INC(obj, hit);
496 			return lock;
497 		}
498 	}
499 	return NULL;
500 }
501 
502 /**
503  * Returns a lock matching description \a need.
504  *
505  * This is the main entry point into the cl_lock caching interface. First, a
506  * cache (implemented as a per-object linked list) is consulted. If lock is
507  * found there, it is returned immediately. Otherwise new lock is allocated
508  * and returned. In any case, additional reference to lock is acquired.
509  *
510  * \see cl_object_find(), cl_page_find()
511  */
cl_lock_find(const struct lu_env * env,const struct cl_io * io,const struct cl_lock_descr * need)512 static struct cl_lock *cl_lock_find(const struct lu_env *env,
513 				    const struct cl_io *io,
514 				    const struct cl_lock_descr *need)
515 {
516 	struct cl_object_header *head;
517 	struct cl_object	*obj;
518 	struct cl_lock	  *lock;
519 
520 	obj  = need->cld_obj;
521 	head = cl_object_header(obj);
522 
523 	spin_lock(&head->coh_lock_guard);
524 	lock = cl_lock_lookup(env, obj, io, need);
525 	spin_unlock(&head->coh_lock_guard);
526 
527 	if (lock == NULL) {
528 		lock = cl_lock_alloc(env, obj, io, need);
529 		if (!IS_ERR(lock)) {
530 			struct cl_lock *ghost;
531 
532 			spin_lock(&head->coh_lock_guard);
533 			ghost = cl_lock_lookup(env, obj, io, need);
534 			if (ghost == NULL) {
535 				cl_lock_get_trust(lock);
536 				list_add_tail(&lock->cll_linkage,
537 						  &head->coh_locks);
538 				spin_unlock(&head->coh_lock_guard);
539 				CS_LOCK_INC(obj, busy);
540 			} else {
541 				spin_unlock(&head->coh_lock_guard);
542 				/*
543 				 * Other threads can acquire references to the
544 				 * top-lock through its sub-locks. Hence, it
545 				 * cannot be cl_lock_free()-ed immediately.
546 				 */
547 				cl_lock_finish(env, lock);
548 				lock = ghost;
549 			}
550 		}
551 	}
552 	return lock;
553 }
554 
555 /**
556  * Returns existing lock matching given description. This is similar to
557  * cl_lock_find() except that no new lock is created, and returned lock is
558  * guaranteed to be in enum cl_lock_state::CLS_HELD state.
559  */
cl_lock_peek(const struct lu_env * env,const struct cl_io * io,const struct cl_lock_descr * need,const char * scope,const void * source)560 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
561 			     const struct cl_lock_descr *need,
562 			     const char *scope, const void *source)
563 {
564 	struct cl_object_header *head;
565 	struct cl_object	*obj;
566 	struct cl_lock	  *lock;
567 
568 	obj  = need->cld_obj;
569 	head = cl_object_header(obj);
570 
571 	do {
572 		spin_lock(&head->coh_lock_guard);
573 		lock = cl_lock_lookup(env, obj, io, need);
574 		spin_unlock(&head->coh_lock_guard);
575 		if (lock == NULL)
576 			return NULL;
577 
578 		cl_lock_mutex_get(env, lock);
579 		if (lock->cll_state == CLS_INTRANSIT)
580 			/* Don't care return value. */
581 			cl_lock_state_wait(env, lock);
582 		if (lock->cll_state == CLS_FREEING) {
583 			cl_lock_mutex_put(env, lock);
584 			cl_lock_put(env, lock);
585 			lock = NULL;
586 		}
587 	} while (lock == NULL);
588 
589 	cl_lock_hold_add(env, lock, scope, source);
590 	cl_lock_user_add(env, lock);
591 	if (lock->cll_state == CLS_CACHED)
592 		cl_use_try(env, lock, 1);
593 	if (lock->cll_state == CLS_HELD) {
594 		cl_lock_mutex_put(env, lock);
595 		cl_lock_lockdep_acquire(env, lock, 0);
596 		cl_lock_put(env, lock);
597 	} else {
598 		cl_unuse_try(env, lock);
599 		cl_lock_unhold(env, lock, scope, source);
600 		cl_lock_mutex_put(env, lock);
601 		cl_lock_put(env, lock);
602 		lock = NULL;
603 	}
604 
605 	return lock;
606 }
607 EXPORT_SYMBOL(cl_lock_peek);
608 
609 /**
610  * Returns a slice within a lock, corresponding to the given layer in the
611  * device stack.
612  *
613  * \see cl_page_at()
614  */
cl_lock_at(const struct cl_lock * lock,const struct lu_device_type * dtype)615 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
616 				       const struct lu_device_type *dtype)
617 {
618 	const struct cl_lock_slice *slice;
619 
620 	LINVRNT(cl_lock_invariant_trusted(NULL, lock));
621 
622 	list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
623 		if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
624 			return slice;
625 	}
626 	return NULL;
627 }
628 EXPORT_SYMBOL(cl_lock_at);
629 
cl_lock_mutex_tail(const struct lu_env * env,struct cl_lock * lock)630 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
631 {
632 	struct cl_thread_counters *counters;
633 
634 	counters = cl_lock_counters(env, lock);
635 	lock->cll_depth++;
636 	counters->ctc_nr_locks_locked++;
637 	lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
638 	cl_lock_trace(D_TRACE, env, "got mutex", lock);
639 }
640 
641 /**
642  * Locks cl_lock object.
643  *
644  * This is used to manipulate cl_lock fields, and to serialize state
645  * transitions in the lock state machine.
646  *
647  * \post cl_lock_is_mutexed(lock)
648  *
649  * \see cl_lock_mutex_put()
650  */
cl_lock_mutex_get(const struct lu_env * env,struct cl_lock * lock)651 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
652 {
653 	LINVRNT(cl_lock_invariant(env, lock));
654 
655 	if (lock->cll_guarder == current) {
656 		LINVRNT(cl_lock_is_mutexed(lock));
657 		LINVRNT(lock->cll_depth > 0);
658 	} else {
659 		struct cl_object_header *hdr;
660 		struct cl_thread_info   *info;
661 		int i;
662 
663 		LINVRNT(lock->cll_guarder != current);
664 		hdr = cl_object_header(lock->cll_descr.cld_obj);
665 		/*
666 		 * Check that mutices are taken in the bottom-to-top order.
667 		 */
668 		info = cl_env_info(env);
669 		for (i = 0; i < hdr->coh_nesting; ++i)
670 			LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
671 		mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
672 		lock->cll_guarder = current;
673 		LINVRNT(lock->cll_depth == 0);
674 	}
675 	cl_lock_mutex_tail(env, lock);
676 }
677 EXPORT_SYMBOL(cl_lock_mutex_get);
678 
679 /**
680  * Try-locks cl_lock object.
681  *
682  * \retval 0 \a lock was successfully locked
683  *
684  * \retval -EBUSY \a lock cannot be locked right now
685  *
686  * \post ergo(result == 0, cl_lock_is_mutexed(lock))
687  *
688  * \see cl_lock_mutex_get()
689  */
cl_lock_mutex_try(const struct lu_env * env,struct cl_lock * lock)690 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
691 {
692 	int result;
693 
694 	LINVRNT(cl_lock_invariant_trusted(env, lock));
695 
696 	result = 0;
697 	if (lock->cll_guarder == current) {
698 		LINVRNT(lock->cll_depth > 0);
699 		cl_lock_mutex_tail(env, lock);
700 	} else if (mutex_trylock(&lock->cll_guard)) {
701 		LINVRNT(lock->cll_depth == 0);
702 		lock->cll_guarder = current;
703 		cl_lock_mutex_tail(env, lock);
704 	} else
705 		result = -EBUSY;
706 	return result;
707 }
708 EXPORT_SYMBOL(cl_lock_mutex_try);
709 
710 /**
711  {* Unlocks cl_lock object.
712  *
713  * \pre cl_lock_is_mutexed(lock)
714  *
715  * \see cl_lock_mutex_get()
716  */
cl_lock_mutex_put(const struct lu_env * env,struct cl_lock * lock)717 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
718 {
719 	struct cl_thread_counters *counters;
720 
721 	LINVRNT(cl_lock_invariant(env, lock));
722 	LINVRNT(cl_lock_is_mutexed(lock));
723 	LINVRNT(lock->cll_guarder == current);
724 	LINVRNT(lock->cll_depth > 0);
725 
726 	counters = cl_lock_counters(env, lock);
727 	LINVRNT(counters->ctc_nr_locks_locked > 0);
728 
729 	cl_lock_trace(D_TRACE, env, "put mutex", lock);
730 	lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
731 	counters->ctc_nr_locks_locked--;
732 	if (--lock->cll_depth == 0) {
733 		lock->cll_guarder = NULL;
734 		mutex_unlock(&lock->cll_guard);
735 	}
736 }
737 EXPORT_SYMBOL(cl_lock_mutex_put);
738 
739 /**
740  * Returns true iff lock's mutex is owned by the current thread.
741  */
cl_lock_is_mutexed(struct cl_lock * lock)742 int cl_lock_is_mutexed(struct cl_lock *lock)
743 {
744 	return lock->cll_guarder == current;
745 }
746 EXPORT_SYMBOL(cl_lock_is_mutexed);
747 
748 /**
749  * Returns number of cl_lock mutices held by the current thread (environment).
750  */
cl_lock_nr_mutexed(const struct lu_env * env)751 int cl_lock_nr_mutexed(const struct lu_env *env)
752 {
753 	struct cl_thread_info *info;
754 	int i;
755 	int locked;
756 
757 	/*
758 	 * NOTE: if summation across all nesting levels (currently 2) proves
759 	 *       too expensive, a summary counter can be added to
760 	 *       struct cl_thread_info.
761 	 */
762 	info = cl_env_info(env);
763 	for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
764 		locked += info->clt_counters[i].ctc_nr_locks_locked;
765 	return locked;
766 }
767 EXPORT_SYMBOL(cl_lock_nr_mutexed);
768 
cl_lock_cancel0(const struct lu_env * env,struct cl_lock * lock)769 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
770 {
771 	LINVRNT(cl_lock_is_mutexed(lock));
772 	LINVRNT(cl_lock_invariant(env, lock));
773 	if (!(lock->cll_flags & CLF_CANCELLED)) {
774 		const struct cl_lock_slice *slice;
775 
776 		lock->cll_flags |= CLF_CANCELLED;
777 		list_for_each_entry_reverse(slice, &lock->cll_layers,
778 						cls_linkage) {
779 			if (slice->cls_ops->clo_cancel != NULL)
780 				slice->cls_ops->clo_cancel(env, slice);
781 		}
782 	}
783 }
784 
cl_lock_delete0(const struct lu_env * env,struct cl_lock * lock)785 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
786 {
787 	struct cl_object_header    *head;
788 	const struct cl_lock_slice *slice;
789 
790 	LINVRNT(cl_lock_is_mutexed(lock));
791 	LINVRNT(cl_lock_invariant(env, lock));
792 
793 	if (lock->cll_state < CLS_FREEING) {
794 		bool in_cache;
795 
796 		LASSERT(lock->cll_state != CLS_INTRANSIT);
797 		cl_lock_state_set(env, lock, CLS_FREEING);
798 
799 		head = cl_object_header(lock->cll_descr.cld_obj);
800 
801 		spin_lock(&head->coh_lock_guard);
802 		in_cache = !list_empty(&lock->cll_linkage);
803 		if (in_cache)
804 			list_del_init(&lock->cll_linkage);
805 		spin_unlock(&head->coh_lock_guard);
806 
807 		if (in_cache) /* coh_locks cache holds a refcount. */
808 			cl_lock_put(env, lock);
809 
810 		/*
811 		 * From now on, no new references to this lock can be acquired
812 		 * by cl_lock_lookup().
813 		 */
814 		list_for_each_entry_reverse(slice, &lock->cll_layers,
815 						cls_linkage) {
816 			if (slice->cls_ops->clo_delete != NULL)
817 				slice->cls_ops->clo_delete(env, slice);
818 		}
819 		/*
820 		 * From now on, no new references to this lock can be acquired
821 		 * by layer-specific means (like a pointer from struct
822 		 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
823 		 * lov).
824 		 *
825 		 * Lock will be finally freed in cl_lock_put() when last of
826 		 * existing references goes away.
827 		 */
828 	}
829 }
830 
831 /**
832  * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
833  * top-lock (nesting == 0) accounts for this modification in the per-thread
834  * debugging counters. Sub-lock holds can be released by a thread different
835  * from one that acquired it.
836  */
cl_lock_hold_mod(const struct lu_env * env,struct cl_lock * lock,int delta)837 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
838 			     int delta)
839 {
840 	struct cl_thread_counters *counters;
841 	enum clt_nesting_level     nesting;
842 
843 	lock->cll_holds += delta;
844 	nesting = cl_lock_nesting(lock);
845 	if (nesting == CNL_TOP) {
846 		counters = &cl_env_info(env)->clt_counters[CNL_TOP];
847 		counters->ctc_nr_held += delta;
848 		LASSERT(counters->ctc_nr_held >= 0);
849 	}
850 }
851 
852 /**
853  * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
854  * cl_lock_hold_mod() for the explanation of the debugging code.
855  */
cl_lock_used_mod(const struct lu_env * env,struct cl_lock * lock,int delta)856 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
857 			     int delta)
858 {
859 	struct cl_thread_counters *counters;
860 	enum clt_nesting_level     nesting;
861 
862 	lock->cll_users += delta;
863 	nesting = cl_lock_nesting(lock);
864 	if (nesting == CNL_TOP) {
865 		counters = &cl_env_info(env)->clt_counters[CNL_TOP];
866 		counters->ctc_nr_used += delta;
867 		LASSERT(counters->ctc_nr_used >= 0);
868 	}
869 }
870 
cl_lock_hold_release(const struct lu_env * env,struct cl_lock * lock,const char * scope,const void * source)871 void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
872 			  const char *scope, const void *source)
873 {
874 	LINVRNT(cl_lock_is_mutexed(lock));
875 	LINVRNT(cl_lock_invariant(env, lock));
876 	LASSERT(lock->cll_holds > 0);
877 
878 	cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
879 	lu_ref_del(&lock->cll_holders, scope, source);
880 	cl_lock_hold_mod(env, lock, -1);
881 	if (lock->cll_holds == 0) {
882 		CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
883 		if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
884 		    lock->cll_descr.cld_mode == CLM_GROUP ||
885 		    lock->cll_state != CLS_CACHED)
886 			/*
887 			 * If lock is still phantom or grouplock when user is
888 			 * done with it---destroy the lock.
889 			 */
890 			lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
891 		if (lock->cll_flags & CLF_CANCELPEND) {
892 			lock->cll_flags &= ~CLF_CANCELPEND;
893 			cl_lock_cancel0(env, lock);
894 		}
895 		if (lock->cll_flags & CLF_DOOMED) {
896 			/* no longer doomed: it's dead... Jim. */
897 			lock->cll_flags &= ~CLF_DOOMED;
898 			cl_lock_delete0(env, lock);
899 		}
900 	}
901 }
902 EXPORT_SYMBOL(cl_lock_hold_release);
903 
904 /**
905  * Waits until lock state is changed.
906  *
907  * This function is called with cl_lock mutex locked, atomically releases
908  * mutex and goes to sleep, waiting for a lock state change (signaled by
909  * cl_lock_signal()), and re-acquires the mutex before return.
910  *
911  * This function is used to wait until lock state machine makes some progress
912  * and to emulate synchronous operations on top of asynchronous lock
913  * interface.
914  *
915  * \retval -EINTR wait was interrupted
916  *
917  * \retval 0 wait wasn't interrupted
918  *
919  * \pre cl_lock_is_mutexed(lock)
920  *
921  * \see cl_lock_signal()
922  */
cl_lock_state_wait(const struct lu_env * env,struct cl_lock * lock)923 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
924 {
925 	wait_queue_t waiter;
926 	sigset_t blocked;
927 	int result;
928 
929 	LINVRNT(cl_lock_is_mutexed(lock));
930 	LINVRNT(cl_lock_invariant(env, lock));
931 	LASSERT(lock->cll_depth == 1);
932 	LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
933 
934 	cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
935 	result = lock->cll_error;
936 	if (result == 0) {
937 		/* To avoid being interrupted by the 'non-fatal' signals
938 		 * (SIGCHLD, for instance), we'd block them temporarily.
939 		 * LU-305 */
940 		blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
941 
942 		init_waitqueue_entry(&waiter, current);
943 		add_wait_queue(&lock->cll_wq, &waiter);
944 		set_current_state(TASK_INTERRUPTIBLE);
945 		cl_lock_mutex_put(env, lock);
946 
947 		LASSERT(cl_lock_nr_mutexed(env) == 0);
948 
949 		/* Returning ERESTARTSYS instead of EINTR so syscalls
950 		 * can be restarted if signals are pending here */
951 		result = -ERESTARTSYS;
952 		if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
953 			schedule();
954 			if (!cfs_signal_pending())
955 				result = 0;
956 		}
957 
958 		cl_lock_mutex_get(env, lock);
959 		set_current_state(TASK_RUNNING);
960 		remove_wait_queue(&lock->cll_wq, &waiter);
961 
962 		/* Restore old blocked signals */
963 		cfs_restore_sigs(blocked);
964 	}
965 	return result;
966 }
967 EXPORT_SYMBOL(cl_lock_state_wait);
968 
cl_lock_state_signal(const struct lu_env * env,struct cl_lock * lock,enum cl_lock_state state)969 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
970 				 enum cl_lock_state state)
971 {
972 	const struct cl_lock_slice *slice;
973 
974 	LINVRNT(cl_lock_is_mutexed(lock));
975 	LINVRNT(cl_lock_invariant(env, lock));
976 
977 	list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
978 		if (slice->cls_ops->clo_state != NULL)
979 			slice->cls_ops->clo_state(env, slice, state);
980 	wake_up_all(&lock->cll_wq);
981 }
982 
983 /**
984  * Notifies waiters that lock state changed.
985  *
986  * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
987  * layers about state change by calling cl_lock_operations::clo_state()
988  * top-to-bottom.
989  */
cl_lock_signal(const struct lu_env * env,struct cl_lock * lock)990 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
991 {
992 	cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
993 	cl_lock_state_signal(env, lock, lock->cll_state);
994 }
995 EXPORT_SYMBOL(cl_lock_signal);
996 
997 /**
998  * Changes lock state.
999  *
1000  * This function is invoked to notify layers that lock state changed, possible
1001  * as a result of an asynchronous event such as call-back reception.
1002  *
1003  * \post lock->cll_state == state
1004  *
1005  * \see cl_lock_operations::clo_state()
1006  */
cl_lock_state_set(const struct lu_env * env,struct cl_lock * lock,enum cl_lock_state state)1007 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1008 		       enum cl_lock_state state)
1009 {
1010 	LASSERT(lock->cll_state <= state ||
1011 		(lock->cll_state == CLS_CACHED &&
1012 		 (state == CLS_HELD || /* lock found in cache */
1013 		  state == CLS_NEW  ||   /* sub-lock canceled */
1014 		  state == CLS_INTRANSIT)) ||
1015 		/* lock is in transit state */
1016 		lock->cll_state == CLS_INTRANSIT);
1017 
1018 	if (lock->cll_state != state) {
1019 		CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
1020 		CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
1021 
1022 		cl_lock_state_signal(env, lock, state);
1023 		lock->cll_state = state;
1024 	}
1025 }
1026 EXPORT_SYMBOL(cl_lock_state_set);
1027 
cl_unuse_try_internal(const struct lu_env * env,struct cl_lock * lock)1028 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1029 {
1030 	const struct cl_lock_slice *slice;
1031 	int result;
1032 
1033 	do {
1034 		result = 0;
1035 
1036 		LINVRNT(cl_lock_is_mutexed(lock));
1037 		LINVRNT(cl_lock_invariant(env, lock));
1038 		LASSERT(lock->cll_state == CLS_INTRANSIT);
1039 
1040 		result = -ENOSYS;
1041 		list_for_each_entry_reverse(slice, &lock->cll_layers,
1042 						cls_linkage) {
1043 			if (slice->cls_ops->clo_unuse != NULL) {
1044 				result = slice->cls_ops->clo_unuse(env, slice);
1045 				if (result != 0)
1046 					break;
1047 			}
1048 		}
1049 		LASSERT(result != -ENOSYS);
1050 	} while (result == CLO_REPEAT);
1051 
1052 	return result;
1053 }
1054 
1055 /**
1056  * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1057  * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1058  * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1059  *  use process atomic
1060  */
cl_use_try(const struct lu_env * env,struct cl_lock * lock,int atomic)1061 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1062 {
1063 	const struct cl_lock_slice *slice;
1064 	int result;
1065 	enum cl_lock_state state;
1066 
1067 	cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1068 
1069 	LASSERT(lock->cll_state == CLS_CACHED);
1070 	if (lock->cll_error)
1071 		return lock->cll_error;
1072 
1073 	result = -ENOSYS;
1074 	state = cl_lock_intransit(env, lock);
1075 	list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1076 		if (slice->cls_ops->clo_use != NULL) {
1077 			result = slice->cls_ops->clo_use(env, slice);
1078 			if (result != 0)
1079 				break;
1080 		}
1081 	}
1082 	LASSERT(result != -ENOSYS);
1083 
1084 	LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1085 		 lock->cll_state);
1086 
1087 	if (result == 0) {
1088 		state = CLS_HELD;
1089 	} else {
1090 		if (result == -ESTALE) {
1091 			/*
1092 			 * ESTALE means sublock being cancelled
1093 			 * at this time, and set lock state to
1094 			 * be NEW here and ask the caller to repeat.
1095 			 */
1096 			state = CLS_NEW;
1097 			result = CLO_REPEAT;
1098 		}
1099 
1100 		/* @atomic means back-off-on-failure. */
1101 		if (atomic) {
1102 			int rc;
1103 			rc = cl_unuse_try_internal(env, lock);
1104 			/* Vet the results. */
1105 			if (rc < 0 && result > 0)
1106 				result = rc;
1107 		}
1108 
1109 	}
1110 	cl_lock_extransit(env, lock, state);
1111 	return result;
1112 }
1113 EXPORT_SYMBOL(cl_use_try);
1114 
1115 /**
1116  * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1117  * top-to-bottom.
1118  */
cl_enqueue_kick(const struct lu_env * env,struct cl_lock * lock,struct cl_io * io,__u32 flags)1119 static int cl_enqueue_kick(const struct lu_env *env,
1120 			   struct cl_lock *lock,
1121 			   struct cl_io *io, __u32 flags)
1122 {
1123 	int result;
1124 	const struct cl_lock_slice *slice;
1125 
1126 	result = -ENOSYS;
1127 	list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1128 		if (slice->cls_ops->clo_enqueue != NULL) {
1129 			result = slice->cls_ops->clo_enqueue(env,
1130 							     slice, io, flags);
1131 			if (result != 0)
1132 				break;
1133 		}
1134 	}
1135 	LASSERT(result != -ENOSYS);
1136 	return result;
1137 }
1138 
1139 /**
1140  * Tries to enqueue a lock.
1141  *
1142  * This function is called repeatedly by cl_enqueue() until either lock is
1143  * enqueued, or error occurs. This function does not block waiting for
1144  * networking communication to complete.
1145  *
1146  * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1147  *			 lock->cll_state == CLS_HELD)
1148  *
1149  * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1150  * \see cl_lock_state::CLS_ENQUEUED
1151  */
cl_enqueue_try(const struct lu_env * env,struct cl_lock * lock,struct cl_io * io,__u32 flags)1152 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1153 		   struct cl_io *io, __u32 flags)
1154 {
1155 	int result;
1156 
1157 	cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1158 	do {
1159 		LINVRNT(cl_lock_is_mutexed(lock));
1160 
1161 		result = lock->cll_error;
1162 		if (result != 0)
1163 			break;
1164 
1165 		switch (lock->cll_state) {
1166 		case CLS_NEW:
1167 			cl_lock_state_set(env, lock, CLS_QUEUING);
1168 			/* fall-through */
1169 		case CLS_QUEUING:
1170 			/* kick layers. */
1171 			result = cl_enqueue_kick(env, lock, io, flags);
1172 			/* For AGL case, the cl_lock::cll_state may
1173 			 * become CLS_HELD already. */
1174 			if (result == 0 && lock->cll_state == CLS_QUEUING)
1175 				cl_lock_state_set(env, lock, CLS_ENQUEUED);
1176 			break;
1177 		case CLS_INTRANSIT:
1178 			LASSERT(cl_lock_is_intransit(lock));
1179 			result = CLO_WAIT;
1180 			break;
1181 		case CLS_CACHED:
1182 			/* yank lock from the cache. */
1183 			result = cl_use_try(env, lock, 0);
1184 			break;
1185 		case CLS_ENQUEUED:
1186 		case CLS_HELD:
1187 			result = 0;
1188 			break;
1189 		default:
1190 		case CLS_FREEING:
1191 			/*
1192 			 * impossible, only held locks with increased
1193 			 * ->cll_holds can be enqueued, and they cannot be
1194 			 * freed.
1195 			 */
1196 			LBUG();
1197 		}
1198 	} while (result == CLO_REPEAT);
1199 	return result;
1200 }
1201 EXPORT_SYMBOL(cl_enqueue_try);
1202 
1203 /**
1204  * Cancel the conflicting lock found during previous enqueue.
1205  *
1206  * \retval 0 conflicting lock has been canceled.
1207  * \retval -ve error code.
1208  */
cl_lock_enqueue_wait(const struct lu_env * env,struct cl_lock * lock,int keep_mutex)1209 int cl_lock_enqueue_wait(const struct lu_env *env,
1210 			 struct cl_lock *lock,
1211 			 int keep_mutex)
1212 {
1213 	struct cl_lock  *conflict;
1214 	int	      rc = 0;
1215 
1216 	LASSERT(cl_lock_is_mutexed(lock));
1217 	LASSERT(lock->cll_state == CLS_QUEUING);
1218 	LASSERT(lock->cll_conflict != NULL);
1219 
1220 	conflict = lock->cll_conflict;
1221 	lock->cll_conflict = NULL;
1222 
1223 	cl_lock_mutex_put(env, lock);
1224 	LASSERT(cl_lock_nr_mutexed(env) == 0);
1225 
1226 	cl_lock_mutex_get(env, conflict);
1227 	cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
1228 	cl_lock_cancel(env, conflict);
1229 	cl_lock_delete(env, conflict);
1230 
1231 	while (conflict->cll_state != CLS_FREEING) {
1232 		rc = cl_lock_state_wait(env, conflict);
1233 		if (rc != 0)
1234 			break;
1235 	}
1236 	cl_lock_mutex_put(env, conflict);
1237 	lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1238 	cl_lock_put(env, conflict);
1239 
1240 	if (keep_mutex)
1241 		cl_lock_mutex_get(env, lock);
1242 
1243 	LASSERT(rc <= 0);
1244 	return rc;
1245 }
1246 EXPORT_SYMBOL(cl_lock_enqueue_wait);
1247 
cl_enqueue_locked(const struct lu_env * env,struct cl_lock * lock,struct cl_io * io,__u32 enqflags)1248 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1249 			     struct cl_io *io, __u32 enqflags)
1250 {
1251 	int result;
1252 
1253 	LINVRNT(cl_lock_is_mutexed(lock));
1254 	LINVRNT(cl_lock_invariant(env, lock));
1255 	LASSERT(lock->cll_holds > 0);
1256 
1257 	cl_lock_user_add(env, lock);
1258 	do {
1259 		result = cl_enqueue_try(env, lock, io, enqflags);
1260 		if (result == CLO_WAIT) {
1261 			if (lock->cll_conflict != NULL)
1262 				result = cl_lock_enqueue_wait(env, lock, 1);
1263 			else
1264 				result = cl_lock_state_wait(env, lock);
1265 			if (result == 0)
1266 				continue;
1267 		}
1268 		break;
1269 	} while (1);
1270 	if (result != 0)
1271 		cl_unuse_try(env, lock);
1272 	LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
1273 		     lock->cll_state == CLS_ENQUEUED ||
1274 		     lock->cll_state == CLS_HELD));
1275 	return result;
1276 }
1277 
1278 /**
1279  * Enqueues a lock.
1280  *
1281  * \pre current thread or io owns a hold on lock.
1282  *
1283  * \post ergo(result == 0, lock->users increased)
1284  * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1285  *			 lock->cll_state == CLS_HELD)
1286  */
cl_enqueue(const struct lu_env * env,struct cl_lock * lock,struct cl_io * io,__u32 enqflags)1287 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1288 	       struct cl_io *io, __u32 enqflags)
1289 {
1290 	int result;
1291 
1292 	cl_lock_lockdep_acquire(env, lock, enqflags);
1293 	cl_lock_mutex_get(env, lock);
1294 	result = cl_enqueue_locked(env, lock, io, enqflags);
1295 	cl_lock_mutex_put(env, lock);
1296 	if (result != 0)
1297 		cl_lock_lockdep_release(env, lock);
1298 	LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1299 		     lock->cll_state == CLS_HELD));
1300 	return result;
1301 }
1302 EXPORT_SYMBOL(cl_enqueue);
1303 
1304 /**
1305  * Tries to unlock a lock.
1306  *
1307  * This function is called to release underlying resource:
1308  * 1. for top lock, the resource is sublocks it held;
1309  * 2. for sublock, the resource is the reference to dlmlock.
1310  *
1311  * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1312  *
1313  * \see cl_unuse() cl_lock_operations::clo_unuse()
1314  * \see cl_lock_state::CLS_CACHED
1315  */
cl_unuse_try(const struct lu_env * env,struct cl_lock * lock)1316 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1317 {
1318 	int			 result;
1319 	enum cl_lock_state	  state = CLS_NEW;
1320 
1321 	cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1322 
1323 	if (lock->cll_users > 1) {
1324 		cl_lock_user_del(env, lock);
1325 		return 0;
1326 	}
1327 
1328 	/* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
1329 	 * underlying resources. */
1330 	if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
1331 		cl_lock_user_del(env, lock);
1332 		return 0;
1333 	}
1334 
1335 	/*
1336 	 * New lock users (->cll_users) are not protecting unlocking
1337 	 * from proceeding. From this point, lock eventually reaches
1338 	 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1339 	 * CLS_FREEING.
1340 	 */
1341 	state = cl_lock_intransit(env, lock);
1342 
1343 	result = cl_unuse_try_internal(env, lock);
1344 	LASSERT(lock->cll_state == CLS_INTRANSIT);
1345 	LASSERT(result != CLO_WAIT);
1346 	cl_lock_user_del(env, lock);
1347 	if (result == 0 || result == -ESTALE) {
1348 		/*
1349 		 * Return lock back to the cache. This is the only
1350 		 * place where lock is moved into CLS_CACHED state.
1351 		 *
1352 		 * If one of ->clo_unuse() methods returned -ESTALE, lock
1353 		 * cannot be placed into cache and has to be
1354 		 * re-initialized. This happens e.g., when a sub-lock was
1355 		 * canceled while unlocking was in progress.
1356 		 */
1357 		if (state == CLS_HELD && result == 0)
1358 			state = CLS_CACHED;
1359 		else
1360 			state = CLS_NEW;
1361 		cl_lock_extransit(env, lock, state);
1362 
1363 		/*
1364 		 * Hide -ESTALE error.
1365 		 * If the lock is a glimpse lock, and it has multiple
1366 		 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1367 		 * and other sublocks are matched write locks. In this case,
1368 		 * we can't set this lock to error because otherwise some of
1369 		 * its sublocks may not be canceled. This causes some dirty
1370 		 * pages won't be written to OSTs. -jay
1371 		 */
1372 		result = 0;
1373 	} else {
1374 		CERROR("result = %d, this is unlikely!\n", result);
1375 		state = CLS_NEW;
1376 		cl_lock_extransit(env, lock, state);
1377 	}
1378 	return result ?: lock->cll_error;
1379 }
1380 EXPORT_SYMBOL(cl_unuse_try);
1381 
cl_unuse_locked(const struct lu_env * env,struct cl_lock * lock)1382 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1383 {
1384 	int result;
1385 
1386 	result = cl_unuse_try(env, lock);
1387 	if (result)
1388 		CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1389 }
1390 
1391 /**
1392  * Unlocks a lock.
1393  */
cl_unuse(const struct lu_env * env,struct cl_lock * lock)1394 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1395 {
1396 	cl_lock_mutex_get(env, lock);
1397 	cl_unuse_locked(env, lock);
1398 	cl_lock_mutex_put(env, lock);
1399 	cl_lock_lockdep_release(env, lock);
1400 }
1401 EXPORT_SYMBOL(cl_unuse);
1402 
1403 /**
1404  * Tries to wait for a lock.
1405  *
1406  * This function is called repeatedly by cl_wait() until either lock is
1407  * granted, or error occurs. This function does not block waiting for network
1408  * communication to complete.
1409  *
1410  * \see cl_wait() cl_lock_operations::clo_wait()
1411  * \see cl_lock_state::CLS_HELD
1412  */
cl_wait_try(const struct lu_env * env,struct cl_lock * lock)1413 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1414 {
1415 	const struct cl_lock_slice *slice;
1416 	int			 result;
1417 
1418 	cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1419 	do {
1420 		LINVRNT(cl_lock_is_mutexed(lock));
1421 		LINVRNT(cl_lock_invariant(env, lock));
1422 		LASSERTF(lock->cll_state == CLS_QUEUING ||
1423 			 lock->cll_state == CLS_ENQUEUED ||
1424 			 lock->cll_state == CLS_HELD ||
1425 			 lock->cll_state == CLS_INTRANSIT,
1426 			 "lock state: %d\n", lock->cll_state);
1427 		LASSERT(lock->cll_users > 0);
1428 		LASSERT(lock->cll_holds > 0);
1429 
1430 		result = lock->cll_error;
1431 		if (result != 0)
1432 			break;
1433 
1434 		if (cl_lock_is_intransit(lock)) {
1435 			result = CLO_WAIT;
1436 			break;
1437 		}
1438 
1439 		if (lock->cll_state == CLS_HELD)
1440 			/* nothing to do */
1441 			break;
1442 
1443 		result = -ENOSYS;
1444 		list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1445 			if (slice->cls_ops->clo_wait != NULL) {
1446 				result = slice->cls_ops->clo_wait(env, slice);
1447 				if (result != 0)
1448 					break;
1449 			}
1450 		}
1451 		LASSERT(result != -ENOSYS);
1452 		if (result == 0) {
1453 			LASSERT(lock->cll_state != CLS_INTRANSIT);
1454 			cl_lock_state_set(env, lock, CLS_HELD);
1455 		}
1456 	} while (result == CLO_REPEAT);
1457 	return result;
1458 }
1459 EXPORT_SYMBOL(cl_wait_try);
1460 
1461 /**
1462  * Waits until enqueued lock is granted.
1463  *
1464  * \pre current thread or io owns a hold on the lock
1465  * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1466  *			lock->cll_state == CLS_HELD)
1467  *
1468  * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1469  */
cl_wait(const struct lu_env * env,struct cl_lock * lock)1470 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1471 {
1472 	int result;
1473 
1474 	cl_lock_mutex_get(env, lock);
1475 
1476 	LINVRNT(cl_lock_invariant(env, lock));
1477 	LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1478 		 "Wrong state %d \n", lock->cll_state);
1479 	LASSERT(lock->cll_holds > 0);
1480 
1481 	do {
1482 		result = cl_wait_try(env, lock);
1483 		if (result == CLO_WAIT) {
1484 			result = cl_lock_state_wait(env, lock);
1485 			if (result == 0)
1486 				continue;
1487 		}
1488 		break;
1489 	} while (1);
1490 	if (result < 0) {
1491 		cl_unuse_try(env, lock);
1492 		cl_lock_lockdep_release(env, lock);
1493 	}
1494 	cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1495 	cl_lock_mutex_put(env, lock);
1496 	LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1497 	return result;
1498 }
1499 EXPORT_SYMBOL(cl_wait);
1500 
1501 /**
1502  * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1503  * value.
1504  */
cl_lock_weigh(const struct lu_env * env,struct cl_lock * lock)1505 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1506 {
1507 	const struct cl_lock_slice *slice;
1508 	unsigned long pound;
1509 	unsigned long ounce;
1510 
1511 	LINVRNT(cl_lock_is_mutexed(lock));
1512 	LINVRNT(cl_lock_invariant(env, lock));
1513 
1514 	pound = 0;
1515 	list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1516 		if (slice->cls_ops->clo_weigh != NULL) {
1517 			ounce = slice->cls_ops->clo_weigh(env, slice);
1518 			pound += ounce;
1519 			if (pound < ounce) /* over-weight^Wflow */
1520 				pound = ~0UL;
1521 		}
1522 	}
1523 	return pound;
1524 }
1525 EXPORT_SYMBOL(cl_lock_weigh);
1526 
1527 /**
1528  * Notifies layers that lock description changed.
1529  *
1530  * The server can grant client a lock different from one that was requested
1531  * (e.g., larger in extent). This method is called when actually granted lock
1532  * description becomes known to let layers to accommodate for changed lock
1533  * description.
1534  *
1535  * \see cl_lock_operations::clo_modify()
1536  */
cl_lock_modify(const struct lu_env * env,struct cl_lock * lock,const struct cl_lock_descr * desc)1537 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1538 		   const struct cl_lock_descr *desc)
1539 {
1540 	const struct cl_lock_slice *slice;
1541 	struct cl_object	   *obj = lock->cll_descr.cld_obj;
1542 	struct cl_object_header    *hdr = cl_object_header(obj);
1543 	int result;
1544 
1545 	cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1546 	/* don't allow object to change */
1547 	LASSERT(obj == desc->cld_obj);
1548 	LINVRNT(cl_lock_is_mutexed(lock));
1549 	LINVRNT(cl_lock_invariant(env, lock));
1550 
1551 	list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1552 		if (slice->cls_ops->clo_modify != NULL) {
1553 			result = slice->cls_ops->clo_modify(env, slice, desc);
1554 			if (result != 0)
1555 				return result;
1556 		}
1557 	}
1558 	CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1559 		      PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1560 	/*
1561 	 * Just replace description in place. Nothing more is needed for
1562 	 * now. If locks were indexed according to their extent and/or mode,
1563 	 * that index would have to be updated here.
1564 	 */
1565 	spin_lock(&hdr->coh_lock_guard);
1566 	lock->cll_descr = *desc;
1567 	spin_unlock(&hdr->coh_lock_guard);
1568 	return 0;
1569 }
1570 EXPORT_SYMBOL(cl_lock_modify);
1571 
1572 /**
1573  * Initializes lock closure with a given origin.
1574  *
1575  * \see cl_lock_closure
1576  */
cl_lock_closure_init(const struct lu_env * env,struct cl_lock_closure * closure,struct cl_lock * origin,int wait)1577 void cl_lock_closure_init(const struct lu_env *env,
1578 			  struct cl_lock_closure *closure,
1579 			  struct cl_lock *origin, int wait)
1580 {
1581 	LINVRNT(cl_lock_is_mutexed(origin));
1582 	LINVRNT(cl_lock_invariant(env, origin));
1583 
1584 	INIT_LIST_HEAD(&closure->clc_list);
1585 	closure->clc_origin = origin;
1586 	closure->clc_wait   = wait;
1587 	closure->clc_nr     = 0;
1588 }
1589 EXPORT_SYMBOL(cl_lock_closure_init);
1590 
1591 /**
1592  * Builds a closure of \a lock.
1593  *
1594  * Building of a closure consists of adding initial lock (\a lock) into it,
1595  * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1596  * methods might call cl_lock_closure_build() recursively again, adding more
1597  * locks to the closure, etc.
1598  *
1599  * \see cl_lock_closure
1600  */
cl_lock_closure_build(const struct lu_env * env,struct cl_lock * lock,struct cl_lock_closure * closure)1601 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1602 			  struct cl_lock_closure *closure)
1603 {
1604 	const struct cl_lock_slice *slice;
1605 	int result;
1606 
1607 	LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1608 	LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1609 
1610 	result = cl_lock_enclosure(env, lock, closure);
1611 	if (result == 0) {
1612 		list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1613 			if (slice->cls_ops->clo_closure != NULL) {
1614 				result = slice->cls_ops->clo_closure(env, slice,
1615 								     closure);
1616 				if (result != 0)
1617 					break;
1618 			}
1619 		}
1620 	}
1621 	if (result != 0)
1622 		cl_lock_disclosure(env, closure);
1623 	return result;
1624 }
1625 EXPORT_SYMBOL(cl_lock_closure_build);
1626 
1627 /**
1628  * Adds new lock to a closure.
1629  *
1630  * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1631  * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1632  * until next try-lock is likely to succeed.
1633  */
cl_lock_enclosure(const struct lu_env * env,struct cl_lock * lock,struct cl_lock_closure * closure)1634 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1635 		      struct cl_lock_closure *closure)
1636 {
1637 	int result = 0;
1638 
1639 	cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1640 	if (!cl_lock_mutex_try(env, lock)) {
1641 		/*
1642 		 * If lock->cll_inclosure is not empty, lock is already in
1643 		 * this closure.
1644 		 */
1645 		if (list_empty(&lock->cll_inclosure)) {
1646 			cl_lock_get_trust(lock);
1647 			lu_ref_add(&lock->cll_reference, "closure", closure);
1648 			list_add(&lock->cll_inclosure, &closure->clc_list);
1649 			closure->clc_nr++;
1650 		} else
1651 			cl_lock_mutex_put(env, lock);
1652 		result = 0;
1653 	} else {
1654 		cl_lock_disclosure(env, closure);
1655 		if (closure->clc_wait) {
1656 			cl_lock_get_trust(lock);
1657 			lu_ref_add(&lock->cll_reference, "closure-w", closure);
1658 			cl_lock_mutex_put(env, closure->clc_origin);
1659 
1660 			LASSERT(cl_lock_nr_mutexed(env) == 0);
1661 			cl_lock_mutex_get(env, lock);
1662 			cl_lock_mutex_put(env, lock);
1663 
1664 			cl_lock_mutex_get(env, closure->clc_origin);
1665 			lu_ref_del(&lock->cll_reference, "closure-w", closure);
1666 			cl_lock_put(env, lock);
1667 		}
1668 		result = CLO_REPEAT;
1669 	}
1670 	return result;
1671 }
1672 EXPORT_SYMBOL(cl_lock_enclosure);
1673 
1674 /** Releases mutices of enclosed locks. */
cl_lock_disclosure(const struct lu_env * env,struct cl_lock_closure * closure)1675 void cl_lock_disclosure(const struct lu_env *env,
1676 			struct cl_lock_closure *closure)
1677 {
1678 	struct cl_lock *scan;
1679 	struct cl_lock *temp;
1680 
1681 	cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1682 	list_for_each_entry_safe(scan, temp, &closure->clc_list,
1683 				     cll_inclosure){
1684 		list_del_init(&scan->cll_inclosure);
1685 		cl_lock_mutex_put(env, scan);
1686 		lu_ref_del(&scan->cll_reference, "closure", closure);
1687 		cl_lock_put(env, scan);
1688 		closure->clc_nr--;
1689 	}
1690 	LASSERT(closure->clc_nr == 0);
1691 }
1692 EXPORT_SYMBOL(cl_lock_disclosure);
1693 
1694 /** Finalizes a closure. */
cl_lock_closure_fini(struct cl_lock_closure * closure)1695 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1696 {
1697 	LASSERT(closure->clc_nr == 0);
1698 	LASSERT(list_empty(&closure->clc_list));
1699 }
1700 EXPORT_SYMBOL(cl_lock_closure_fini);
1701 
1702 /**
1703  * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1704  * destroyed, then destroy the lock. If there are holds on the lock, postpone
1705  * destruction until all holds are released. This is called when a decision is
1706  * made to destroy the lock in the future. E.g., when a blocking AST is
1707  * received on it, or fatal communication error happens.
1708  *
1709  * Caller must have a reference on this lock to prevent a situation, when
1710  * deleted lock lingers in memory for indefinite time, because nobody calls
1711  * cl_lock_put() to finish it.
1712  *
1713  * \pre atomic_read(&lock->cll_ref) > 0
1714  * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1715  *	   cl_lock_nr_mutexed(env) == 1)
1716  *      [i.e., if a top-lock is deleted, mutices of no other locks can be
1717  *      held, as deletion of sub-locks might require releasing a top-lock
1718  *      mutex]
1719  *
1720  * \see cl_lock_operations::clo_delete()
1721  * \see cl_lock::cll_holds
1722  */
cl_lock_delete(const struct lu_env * env,struct cl_lock * lock)1723 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1724 {
1725 	LINVRNT(cl_lock_is_mutexed(lock));
1726 	LINVRNT(cl_lock_invariant(env, lock));
1727 	LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1728 		     cl_lock_nr_mutexed(env) == 1));
1729 
1730 	cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1731 	if (lock->cll_holds == 0)
1732 		cl_lock_delete0(env, lock);
1733 	else
1734 		lock->cll_flags |= CLF_DOOMED;
1735 }
1736 EXPORT_SYMBOL(cl_lock_delete);
1737 
1738 /**
1739  * Mark lock as irrecoverably failed, and mark it for destruction. This
1740  * happens when, e.g., server fails to grant a lock to us, or networking
1741  * time-out happens.
1742  *
1743  * \pre atomic_read(&lock->cll_ref) > 0
1744  *
1745  * \see clo_lock_delete()
1746  * \see cl_lock::cll_holds
1747  */
cl_lock_error(const struct lu_env * env,struct cl_lock * lock,int error)1748 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1749 {
1750 	LINVRNT(cl_lock_is_mutexed(lock));
1751 	LINVRNT(cl_lock_invariant(env, lock));
1752 
1753 	if (lock->cll_error == 0 && error != 0) {
1754 		cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1755 		lock->cll_error = error;
1756 		cl_lock_signal(env, lock);
1757 		cl_lock_cancel(env, lock);
1758 		cl_lock_delete(env, lock);
1759 	}
1760 }
1761 EXPORT_SYMBOL(cl_lock_error);
1762 
1763 /**
1764  * Cancels this lock. Notifies layers
1765  * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1766  * there are holds on the lock, postpone cancellation until
1767  * all holds are released.
1768  *
1769  * Cancellation notification is delivered to layers at most once.
1770  *
1771  * \see cl_lock_operations::clo_cancel()
1772  * \see cl_lock::cll_holds
1773  */
cl_lock_cancel(const struct lu_env * env,struct cl_lock * lock)1774 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1775 {
1776 	LINVRNT(cl_lock_is_mutexed(lock));
1777 	LINVRNT(cl_lock_invariant(env, lock));
1778 
1779 	cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1780 	if (lock->cll_holds == 0)
1781 		cl_lock_cancel0(env, lock);
1782 	else
1783 		lock->cll_flags |= CLF_CANCELPEND;
1784 }
1785 EXPORT_SYMBOL(cl_lock_cancel);
1786 
1787 /**
1788  * Finds an existing lock covering given index and optionally different from a
1789  * given \a except lock.
1790  */
cl_lock_at_pgoff(const struct lu_env * env,struct cl_object * obj,pgoff_t index,struct cl_lock * except,int pending,int canceld)1791 struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
1792 				 struct cl_object *obj, pgoff_t index,
1793 				 struct cl_lock *except,
1794 				 int pending, int canceld)
1795 {
1796 	struct cl_object_header *head;
1797 	struct cl_lock	  *scan;
1798 	struct cl_lock	  *lock;
1799 	struct cl_lock_descr    *need;
1800 
1801 	head = cl_object_header(obj);
1802 	need = &cl_env_info(env)->clt_descr;
1803 	lock = NULL;
1804 
1805 	need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1806 				    * not PHANTOM */
1807 	need->cld_start = need->cld_end = index;
1808 	need->cld_enq_flags = 0;
1809 
1810 	spin_lock(&head->coh_lock_guard);
1811 	/* It is fine to match any group lock since there could be only one
1812 	 * with a uniq gid and it conflicts with all other lock modes too */
1813 	list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1814 		if (scan != except &&
1815 		    (scan->cll_descr.cld_mode == CLM_GROUP ||
1816 		    cl_lock_ext_match(&scan->cll_descr, need)) &&
1817 		    scan->cll_state >= CLS_HELD &&
1818 		    scan->cll_state < CLS_FREEING &&
1819 		    /*
1820 		     * This check is racy as the lock can be canceled right
1821 		     * after it is done, but this is fine, because page exists
1822 		     * already.
1823 		     */
1824 		    (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1825 		    (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1826 			/* Don't increase cs_hit here since this
1827 			 * is just a helper function. */
1828 			cl_lock_get_trust(scan);
1829 			lock = scan;
1830 			break;
1831 		}
1832 	}
1833 	spin_unlock(&head->coh_lock_guard);
1834 	return lock;
1835 }
1836 EXPORT_SYMBOL(cl_lock_at_pgoff);
1837 
1838 /**
1839  * Calculate the page offset at the layer of @lock.
1840  * At the time of this writing, @page is top page and @lock is sub lock.
1841  */
pgoff_at_lock(struct cl_page * page,struct cl_lock * lock)1842 static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
1843 {
1844 	struct lu_device_type *dtype;
1845 	const struct cl_page_slice *slice;
1846 
1847 	dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
1848 	slice = cl_page_at(page, dtype);
1849 	LASSERT(slice != NULL);
1850 	return slice->cpl_page->cp_index;
1851 }
1852 
1853 /**
1854  * Check if page @page is covered by an extra lock or discard it.
1855  */
check_and_discard_cb(const struct lu_env * env,struct cl_io * io,struct cl_page * page,void * cbdata)1856 static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
1857 				struct cl_page *page, void *cbdata)
1858 {
1859 	struct cl_thread_info *info = cl_env_info(env);
1860 	struct cl_lock *lock = cbdata;
1861 	pgoff_t index = pgoff_at_lock(page, lock);
1862 
1863 	if (index >= info->clt_fn_index) {
1864 		struct cl_lock *tmp;
1865 
1866 		/* refresh non-overlapped index */
1867 		tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
1868 					lock, 1, 0);
1869 		if (tmp != NULL) {
1870 			/* Cache the first-non-overlapped index so as to skip
1871 			 * all pages within [index, clt_fn_index). This
1872 			 * is safe because if tmp lock is canceled, it will
1873 			 * discard these pages. */
1874 			info->clt_fn_index = tmp->cll_descr.cld_end + 1;
1875 			if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
1876 				info->clt_fn_index = CL_PAGE_EOF;
1877 			cl_lock_put(env, tmp);
1878 		} else if (cl_page_own(env, io, page) == 0) {
1879 			/* discard the page */
1880 			cl_page_unmap(env, io, page);
1881 			cl_page_discard(env, io, page);
1882 			cl_page_disown(env, io, page);
1883 		} else {
1884 			LASSERT(page->cp_state == CPS_FREEING);
1885 		}
1886 	}
1887 
1888 	info->clt_next_index = index + 1;
1889 	return CLP_GANG_OKAY;
1890 }
1891 
discard_cb(const struct lu_env * env,struct cl_io * io,struct cl_page * page,void * cbdata)1892 static int discard_cb(const struct lu_env *env, struct cl_io *io,
1893 		      struct cl_page *page, void *cbdata)
1894 {
1895 	struct cl_thread_info *info = cl_env_info(env);
1896 	struct cl_lock *lock   = cbdata;
1897 
1898 	LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
1899 	KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1900 		      !PageWriteback(cl_page_vmpage(env, page))));
1901 	KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1902 		      !PageDirty(cl_page_vmpage(env, page))));
1903 
1904 	info->clt_next_index = pgoff_at_lock(page, lock) + 1;
1905 	if (cl_page_own(env, io, page) == 0) {
1906 		/* discard the page */
1907 		cl_page_unmap(env, io, page);
1908 		cl_page_discard(env, io, page);
1909 		cl_page_disown(env, io, page);
1910 	} else {
1911 		LASSERT(page->cp_state == CPS_FREEING);
1912 	}
1913 
1914 	return CLP_GANG_OKAY;
1915 }
1916 
1917 /**
1918  * Discard pages protected by the given lock. This function traverses radix
1919  * tree to find all covering pages and discard them. If a page is being covered
1920  * by other locks, it should remain in cache.
1921  *
1922  * If error happens on any step, the process continues anyway (the reasoning
1923  * behind this being that lock cancellation cannot be delayed indefinitely).
1924  */
cl_lock_discard_pages(const struct lu_env * env,struct cl_lock * lock)1925 int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
1926 {
1927 	struct cl_thread_info *info  = cl_env_info(env);
1928 	struct cl_io	  *io    = &info->clt_io;
1929 	struct cl_lock_descr  *descr = &lock->cll_descr;
1930 	cl_page_gang_cb_t      cb;
1931 	int res;
1932 	int result;
1933 
1934 	LINVRNT(cl_lock_invariant(env, lock));
1935 
1936 	io->ci_obj = cl_object_top(descr->cld_obj);
1937 	io->ci_ignore_layout = 1;
1938 	result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1939 	if (result != 0)
1940 		goto out;
1941 
1942 	cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
1943 	info->clt_fn_index = info->clt_next_index = descr->cld_start;
1944 	do {
1945 		res = cl_page_gang_lookup(env, descr->cld_obj, io,
1946 					  info->clt_next_index, descr->cld_end,
1947 					  cb, (void *)lock);
1948 		if (info->clt_next_index > descr->cld_end)
1949 			break;
1950 
1951 		if (res == CLP_GANG_RESCHED)
1952 			cond_resched();
1953 	} while (res != CLP_GANG_OKAY);
1954 out:
1955 	cl_io_fini(env, io);
1956 	return result;
1957 }
1958 EXPORT_SYMBOL(cl_lock_discard_pages);
1959 
1960 /**
1961  * Eliminate all locks for a given object.
1962  *
1963  * Caller has to guarantee that no lock is in active use.
1964  *
1965  * \param cancel when this is set, cl_locks_prune() cancels locks before
1966  *	       destroying.
1967  */
cl_locks_prune(const struct lu_env * env,struct cl_object * obj,int cancel)1968 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
1969 {
1970 	struct cl_object_header *head;
1971 	struct cl_lock	  *lock;
1972 
1973 	head = cl_object_header(obj);
1974 	/*
1975 	 * If locks are destroyed without cancellation, all pages must be
1976 	 * already destroyed (as otherwise they will be left unprotected).
1977 	 */
1978 	LASSERT(ergo(!cancel,
1979 		     head->coh_tree.rnode == NULL && head->coh_pages == 0));
1980 
1981 	spin_lock(&head->coh_lock_guard);
1982 	while (!list_empty(&head->coh_locks)) {
1983 		lock = container_of(head->coh_locks.next,
1984 				    struct cl_lock, cll_linkage);
1985 		cl_lock_get_trust(lock);
1986 		spin_unlock(&head->coh_lock_guard);
1987 		lu_ref_add(&lock->cll_reference, "prune", current);
1988 
1989 again:
1990 		cl_lock_mutex_get(env, lock);
1991 		if (lock->cll_state < CLS_FREEING) {
1992 			LASSERT(lock->cll_users <= 1);
1993 			if (unlikely(lock->cll_users == 1)) {
1994 				struct l_wait_info lwi = { 0 };
1995 
1996 				cl_lock_mutex_put(env, lock);
1997 				l_wait_event(lock->cll_wq,
1998 					     lock->cll_users == 0,
1999 					     &lwi);
2000 				goto again;
2001 			}
2002 
2003 			if (cancel)
2004 				cl_lock_cancel(env, lock);
2005 			cl_lock_delete(env, lock);
2006 		}
2007 		cl_lock_mutex_put(env, lock);
2008 		lu_ref_del(&lock->cll_reference, "prune", current);
2009 		cl_lock_put(env, lock);
2010 		spin_lock(&head->coh_lock_guard);
2011 	}
2012 	spin_unlock(&head->coh_lock_guard);
2013 }
2014 EXPORT_SYMBOL(cl_locks_prune);
2015 
cl_lock_hold_mutex(const struct lu_env * env,const struct cl_io * io,const struct cl_lock_descr * need,const char * scope,const void * source)2016 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2017 					  const struct cl_io *io,
2018 					  const struct cl_lock_descr *need,
2019 					  const char *scope, const void *source)
2020 {
2021 	struct cl_lock *lock;
2022 
2023 	while (1) {
2024 		lock = cl_lock_find(env, io, need);
2025 		if (IS_ERR(lock))
2026 			break;
2027 		cl_lock_mutex_get(env, lock);
2028 		if (lock->cll_state < CLS_FREEING &&
2029 		    !(lock->cll_flags & CLF_CANCELLED)) {
2030 			cl_lock_hold_mod(env, lock, +1);
2031 			lu_ref_add(&lock->cll_holders, scope, source);
2032 			lu_ref_add(&lock->cll_reference, scope, source);
2033 			break;
2034 		}
2035 		cl_lock_mutex_put(env, lock);
2036 		cl_lock_put(env, lock);
2037 	}
2038 	return lock;
2039 }
2040 
2041 /**
2042  * Returns a lock matching \a need description with a reference and a hold on
2043  * it.
2044  *
2045  * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2046  * guarantees that lock is not in the CLS_FREEING state on return.
2047  */
cl_lock_hold(const struct lu_env * env,const struct cl_io * io,const struct cl_lock_descr * need,const char * scope,const void * source)2048 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2049 			     const struct cl_lock_descr *need,
2050 			     const char *scope, const void *source)
2051 {
2052 	struct cl_lock *lock;
2053 
2054 	lock = cl_lock_hold_mutex(env, io, need, scope, source);
2055 	if (!IS_ERR(lock))
2056 		cl_lock_mutex_put(env, lock);
2057 	return lock;
2058 }
2059 EXPORT_SYMBOL(cl_lock_hold);
2060 
2061 /**
2062  * Main high-level entry point of cl_lock interface that finds existing or
2063  * enqueues new lock matching given description.
2064  */
cl_lock_request(const struct lu_env * env,struct cl_io * io,const struct cl_lock_descr * need,const char * scope,const void * source)2065 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2066 				const struct cl_lock_descr *need,
2067 				const char *scope, const void *source)
2068 {
2069 	struct cl_lock       *lock;
2070 	int		   rc;
2071 	__u32		 enqflags = need->cld_enq_flags;
2072 
2073 	do {
2074 		lock = cl_lock_hold_mutex(env, io, need, scope, source);
2075 		if (IS_ERR(lock))
2076 			break;
2077 
2078 		rc = cl_enqueue_locked(env, lock, io, enqflags);
2079 		if (rc == 0) {
2080 			if (cl_lock_fits_into(env, lock, need, io)) {
2081 				if (!(enqflags & CEF_AGL)) {
2082 					cl_lock_mutex_put(env, lock);
2083 					cl_lock_lockdep_acquire(env, lock,
2084 								enqflags);
2085 					break;
2086 				}
2087 				rc = 1;
2088 			}
2089 			cl_unuse_locked(env, lock);
2090 		}
2091 		cl_lock_trace(D_DLMTRACE, env,
2092 			      rc <= 0 ? "enqueue failed" : "agl succeed", lock);
2093 		cl_lock_hold_release(env, lock, scope, source);
2094 		cl_lock_mutex_put(env, lock);
2095 		lu_ref_del(&lock->cll_reference, scope, source);
2096 		cl_lock_put(env, lock);
2097 		if (rc > 0) {
2098 			LASSERT(enqflags & CEF_AGL);
2099 			lock = NULL;
2100 		} else if (rc != 0) {
2101 			lock = ERR_PTR(rc);
2102 		}
2103 	} while (rc == 0);
2104 	return lock;
2105 }
2106 EXPORT_SYMBOL(cl_lock_request);
2107 
2108 /**
2109  * Adds a hold to a known lock.
2110  */
cl_lock_hold_add(const struct lu_env * env,struct cl_lock * lock,const char * scope,const void * source)2111 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2112 		      const char *scope, const void *source)
2113 {
2114 	LINVRNT(cl_lock_is_mutexed(lock));
2115 	LINVRNT(cl_lock_invariant(env, lock));
2116 	LASSERT(lock->cll_state != CLS_FREEING);
2117 
2118 	cl_lock_hold_mod(env, lock, +1);
2119 	cl_lock_get(lock);
2120 	lu_ref_add(&lock->cll_holders, scope, source);
2121 	lu_ref_add(&lock->cll_reference, scope, source);
2122 }
2123 EXPORT_SYMBOL(cl_lock_hold_add);
2124 
2125 /**
2126  * Releases a hold and a reference on a lock, on which caller acquired a
2127  * mutex.
2128  */
cl_lock_unhold(const struct lu_env * env,struct cl_lock * lock,const char * scope,const void * source)2129 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2130 		    const char *scope, const void *source)
2131 {
2132 	LINVRNT(cl_lock_invariant(env, lock));
2133 	cl_lock_hold_release(env, lock, scope, source);
2134 	lu_ref_del(&lock->cll_reference, scope, source);
2135 	cl_lock_put(env, lock);
2136 }
2137 EXPORT_SYMBOL(cl_lock_unhold);
2138 
2139 /**
2140  * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2141  */
cl_lock_release(const struct lu_env * env,struct cl_lock * lock,const char * scope,const void * source)2142 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2143 		     const char *scope, const void *source)
2144 {
2145 	LINVRNT(cl_lock_invariant(env, lock));
2146 	cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2147 	cl_lock_mutex_get(env, lock);
2148 	cl_lock_hold_release(env, lock, scope, source);
2149 	cl_lock_mutex_put(env, lock);
2150 	lu_ref_del(&lock->cll_reference, scope, source);
2151 	cl_lock_put(env, lock);
2152 }
2153 EXPORT_SYMBOL(cl_lock_release);
2154 
cl_lock_user_add(const struct lu_env * env,struct cl_lock * lock)2155 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2156 {
2157 	LINVRNT(cl_lock_is_mutexed(lock));
2158 	LINVRNT(cl_lock_invariant(env, lock));
2159 
2160 	cl_lock_used_mod(env, lock, +1);
2161 }
2162 EXPORT_SYMBOL(cl_lock_user_add);
2163 
cl_lock_user_del(const struct lu_env * env,struct cl_lock * lock)2164 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2165 {
2166 	LINVRNT(cl_lock_is_mutexed(lock));
2167 	LINVRNT(cl_lock_invariant(env, lock));
2168 	LASSERT(lock->cll_users > 0);
2169 
2170 	cl_lock_used_mod(env, lock, -1);
2171 	if (lock->cll_users == 0)
2172 		wake_up_all(&lock->cll_wq);
2173 }
2174 EXPORT_SYMBOL(cl_lock_user_del);
2175 
cl_lock_mode_name(const enum cl_lock_mode mode)2176 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2177 {
2178 	static const char *names[] = {
2179 		[CLM_PHANTOM] = "P",
2180 		[CLM_READ]    = "R",
2181 		[CLM_WRITE]   = "W",
2182 		[CLM_GROUP]   = "G"
2183 	};
2184 	if (0 <= mode && mode < ARRAY_SIZE(names))
2185 		return names[mode];
2186 	else
2187 		return "U";
2188 }
2189 EXPORT_SYMBOL(cl_lock_mode_name);
2190 
2191 /**
2192  * Prints human readable representation of a lock description.
2193  */
cl_lock_descr_print(const struct lu_env * env,void * cookie,lu_printer_t printer,const struct cl_lock_descr * descr)2194 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2195 		       lu_printer_t printer,
2196 		       const struct cl_lock_descr *descr)
2197 {
2198 	const struct lu_fid  *fid;
2199 
2200 	fid = lu_object_fid(&descr->cld_obj->co_lu);
2201 	(*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2202 }
2203 EXPORT_SYMBOL(cl_lock_descr_print);
2204 
2205 /**
2206  * Prints human readable representation of \a lock to the \a f.
2207  */
cl_lock_print(const struct lu_env * env,void * cookie,lu_printer_t printer,const struct cl_lock * lock)2208 void cl_lock_print(const struct lu_env *env, void *cookie,
2209 		   lu_printer_t printer, const struct cl_lock *lock)
2210 {
2211 	const struct cl_lock_slice *slice;
2212 	(*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2213 		   lock, atomic_read(&lock->cll_ref),
2214 		   lock->cll_state, lock->cll_error, lock->cll_holds,
2215 		   lock->cll_users, lock->cll_flags);
2216 	cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2217 	(*printer)(env, cookie, " {\n");
2218 
2219 	list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2220 		(*printer)(env, cookie, "    %s@%p: ",
2221 			   slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2222 			   slice);
2223 		if (slice->cls_ops->clo_print != NULL)
2224 			slice->cls_ops->clo_print(env, cookie, printer, slice);
2225 		(*printer)(env, cookie, "\n");
2226 	}
2227 	(*printer)(env, cookie, "} lock@%p\n", lock);
2228 }
2229 EXPORT_SYMBOL(cl_lock_print);
2230 
cl_lock_init(void)2231 int cl_lock_init(void)
2232 {
2233 	return lu_kmem_init(cl_lock_caches);
2234 }
2235 
cl_lock_fini(void)2236 void cl_lock_fini(void)
2237 {
2238 	lu_kmem_fini(cl_lock_caches);
2239 }
2240