1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_extent.c
37  *
38  * Author: Peter Braam <braam@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41 
42 /**
43  * This file contains implementation of EXTENT lock type
44  *
45  * EXTENT lock type is for locking a contiguous range of values, represented
46  * by 64-bit starting and ending offsets (inclusive). There are several extent
47  * lock modes, some of which may be mutually incompatible. Extent locks are
48  * considered incompatible if their modes are incompatible and their extents
49  * intersect.  See the lock mode compatibility matrix in lustre_dlm.h.
50  */
51 
52 #define DEBUG_SUBSYSTEM S_LDLM
53 #include "../../include/linux/libcfs/libcfs.h"
54 #include "../include/lustre_dlm.h"
55 #include "../include/obd_support.h"
56 #include "../include/obd.h"
57 #include "../include/obd_class.h"
58 #include "../include/lustre_lib.h"
59 #include "ldlm_internal.h"
60 
61 
62 /* When a lock is cancelled by a client, the KMS may undergo change if this
63  * is the "highest lock".  This function returns the new KMS value.
64  * Caller must hold lr_lock already.
65  *
66  * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
ldlm_extent_shift_kms(struct ldlm_lock * lock,__u64 old_kms)67 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
68 {
69 	struct ldlm_resource *res = lock->l_resource;
70 	struct list_head *tmp;
71 	struct ldlm_lock *lck;
72 	__u64 kms = 0;
73 
74 	/* don't let another thread in ldlm_extent_shift_kms race in
75 	 * just after we finish and take our lock into account in its
76 	 * calculation of the kms */
77 	lock->l_flags |= LDLM_FL_KMS_IGNORE;
78 
79 	list_for_each(tmp, &res->lr_granted) {
80 		lck = list_entry(tmp, struct ldlm_lock, l_res_link);
81 
82 		if (lck->l_flags & LDLM_FL_KMS_IGNORE)
83 			continue;
84 
85 		if (lck->l_policy_data.l_extent.end >= old_kms)
86 			return old_kms;
87 
88 		/* This extent _has_ to be smaller than old_kms (checked above)
89 		 * so kms can only ever be smaller or the same as old_kms. */
90 		if (lck->l_policy_data.l_extent.end + 1 > kms)
91 			kms = lck->l_policy_data.l_extent.end + 1;
92 	}
93 	LASSERTF(kms <= old_kms, "kms %llu old_kms %llu\n", kms, old_kms);
94 
95 	return kms;
96 }
97 EXPORT_SYMBOL(ldlm_extent_shift_kms);
98 
99 struct kmem_cache *ldlm_interval_slab;
ldlm_interval_alloc(struct ldlm_lock * lock)100 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
101 {
102 	struct ldlm_interval *node;
103 
104 	LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
105 	OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
106 	if (node == NULL)
107 		return NULL;
108 
109 	INIT_LIST_HEAD(&node->li_group);
110 	ldlm_interval_attach(node, lock);
111 	return node;
112 }
113 
ldlm_interval_free(struct ldlm_interval * node)114 void ldlm_interval_free(struct ldlm_interval *node)
115 {
116 	if (node) {
117 		LASSERT(list_empty(&node->li_group));
118 		LASSERT(!interval_is_intree(&node->li_node));
119 		OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
120 	}
121 }
122 
123 /* interval tree, for LDLM_EXTENT. */
ldlm_interval_attach(struct ldlm_interval * n,struct ldlm_lock * l)124 void ldlm_interval_attach(struct ldlm_interval *n,
125 			  struct ldlm_lock *l)
126 {
127 	LASSERT(l->l_tree_node == NULL);
128 	LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
129 
130 	list_add_tail(&l->l_sl_policy, &n->li_group);
131 	l->l_tree_node = n;
132 }
133 
ldlm_interval_detach(struct ldlm_lock * l)134 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
135 {
136 	struct ldlm_interval *n = l->l_tree_node;
137 
138 	if (n == NULL)
139 		return NULL;
140 
141 	LASSERT(!list_empty(&n->li_group));
142 	l->l_tree_node = NULL;
143 	list_del_init(&l->l_sl_policy);
144 
145 	return list_empty(&n->li_group) ? n : NULL;
146 }
147 
lock_mode_to_index(ldlm_mode_t mode)148 static inline int lock_mode_to_index(ldlm_mode_t mode)
149 {
150 	int index;
151 
152 	LASSERT(mode != 0);
153 	LASSERT(IS_PO2(mode));
154 	for (index = -1; mode; index++)
155 		mode >>= 1;
156 	LASSERT(index < LCK_MODE_NUM);
157 	return index;
158 }
159 
160 /** Add newly granted lock into interval tree for the resource. */
ldlm_extent_add_lock(struct ldlm_resource * res,struct ldlm_lock * lock)161 void ldlm_extent_add_lock(struct ldlm_resource *res,
162 			  struct ldlm_lock *lock)
163 {
164 	struct interval_node *found, **root;
165 	struct ldlm_interval *node;
166 	struct ldlm_extent *extent;
167 	int idx;
168 
169 	LASSERT(lock->l_granted_mode == lock->l_req_mode);
170 
171 	node = lock->l_tree_node;
172 	LASSERT(node != NULL);
173 	LASSERT(!interval_is_intree(&node->li_node));
174 
175 	idx = lock_mode_to_index(lock->l_granted_mode);
176 	LASSERT(lock->l_granted_mode == 1 << idx);
177 	LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
178 
179 	/* node extent initialize */
180 	extent = &lock->l_policy_data.l_extent;
181 	interval_set(&node->li_node, extent->start, extent->end);
182 
183 	root = &res->lr_itree[idx].lit_root;
184 	found = interval_insert(&node->li_node, root);
185 	if (found) { /* The policy group found. */
186 		struct ldlm_interval *tmp;
187 
188 		tmp = ldlm_interval_detach(lock);
189 		LASSERT(tmp != NULL);
190 		ldlm_interval_free(tmp);
191 		ldlm_interval_attach(to_ldlm_interval(found), lock);
192 	}
193 	res->lr_itree[idx].lit_size++;
194 
195 	/* even though we use interval tree to manage the extent lock, we also
196 	 * add the locks into grant list, for debug purpose, .. */
197 	ldlm_resource_add_lock(res, &res->lr_granted, lock);
198 }
199 
200 /** Remove cancelled lock from resource interval tree. */
ldlm_extent_unlink_lock(struct ldlm_lock * lock)201 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
202 {
203 	struct ldlm_resource *res = lock->l_resource;
204 	struct ldlm_interval *node = lock->l_tree_node;
205 	struct ldlm_interval_tree *tree;
206 	int idx;
207 
208 	if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
209 		return;
210 
211 	idx = lock_mode_to_index(lock->l_granted_mode);
212 	LASSERT(lock->l_granted_mode == 1 << idx);
213 	tree = &res->lr_itree[idx];
214 
215 	LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
216 
217 	tree->lit_size--;
218 	node = ldlm_interval_detach(lock);
219 	if (node) {
220 		interval_erase(&node->li_node, &tree->lit_root);
221 		ldlm_interval_free(node);
222 	}
223 }
224 
ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t * wpolicy,ldlm_policy_data_t * lpolicy)225 void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
226 				     ldlm_policy_data_t *lpolicy)
227 {
228 	memset(lpolicy, 0, sizeof(*lpolicy));
229 	lpolicy->l_extent.start = wpolicy->l_extent.start;
230 	lpolicy->l_extent.end = wpolicy->l_extent.end;
231 	lpolicy->l_extent.gid = wpolicy->l_extent.gid;
232 }
233 
ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t * lpolicy,ldlm_wire_policy_data_t * wpolicy)234 void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
235 				     ldlm_wire_policy_data_t *wpolicy)
236 {
237 	memset(wpolicy, 0, sizeof(*wpolicy));
238 	wpolicy->l_extent.start = lpolicy->l_extent.start;
239 	wpolicy->l_extent.end = lpolicy->l_extent.end;
240 	wpolicy->l_extent.gid = lpolicy->l_extent.gid;
241 }
242