1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/obdclass/lustre_handles.c
37 *
38 * Author: Phil Schwan <phil@clusterfs.com>
39 */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42
43 #include "../include/obd_support.h"
44 #include "../include/lustre_handles.h"
45 #include "../include/lustre_lib.h"
46
47
48 static __u64 handle_base;
49 #define HANDLE_INCR 7
50 static spinlock_t handle_base_lock;
51
52 static struct handle_bucket {
53 spinlock_t lock;
54 struct list_head head;
55 } *handle_hash;
56
57 #define HANDLE_HASH_SIZE (1 << 16)
58 #define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
59
60 /*
61 * Generate a unique 64bit cookie (hash) for a handle and insert it into
62 * global (per-node) hash-table.
63 */
class_handle_hash(struct portals_handle * h,struct portals_handle_ops * ops)64 void class_handle_hash(struct portals_handle *h,
65 struct portals_handle_ops *ops)
66 {
67 struct handle_bucket *bucket;
68
69 LASSERT(h != NULL);
70 LASSERT(list_empty(&h->h_link));
71
72 /*
73 * This is fast, but simplistic cookie generation algorithm, it will
74 * need a re-do at some point in the future for security.
75 */
76 spin_lock(&handle_base_lock);
77 handle_base += HANDLE_INCR;
78
79 if (unlikely(handle_base == 0)) {
80 /*
81 * Cookie of zero is "dangerous", because in many places it's
82 * assumed that 0 means "unassigned" handle, not bound to any
83 * object.
84 */
85 CWARN("The universe has been exhausted: cookie wrap-around.\n");
86 handle_base += HANDLE_INCR;
87 }
88 h->h_cookie = handle_base;
89 spin_unlock(&handle_base_lock);
90
91 h->h_ops = ops;
92 spin_lock_init(&h->h_lock);
93
94 bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
95 spin_lock(&bucket->lock);
96 list_add_rcu(&h->h_link, &bucket->head);
97 h->h_in = 1;
98 spin_unlock(&bucket->lock);
99
100 CDEBUG(D_INFO, "added object %p with handle %#llx to hash\n",
101 h, h->h_cookie);
102 }
103 EXPORT_SYMBOL(class_handle_hash);
104
class_handle_unhash_nolock(struct portals_handle * h)105 static void class_handle_unhash_nolock(struct portals_handle *h)
106 {
107 if (list_empty(&h->h_link)) {
108 CERROR("removing an already-removed handle (%#llx)\n",
109 h->h_cookie);
110 return;
111 }
112
113 CDEBUG(D_INFO, "removing object %p with handle %#llx from hash\n",
114 h, h->h_cookie);
115
116 spin_lock(&h->h_lock);
117 if (h->h_in == 0) {
118 spin_unlock(&h->h_lock);
119 return;
120 }
121 h->h_in = 0;
122 spin_unlock(&h->h_lock);
123 list_del_rcu(&h->h_link);
124 }
125
class_handle_unhash(struct portals_handle * h)126 void class_handle_unhash(struct portals_handle *h)
127 {
128 struct handle_bucket *bucket;
129 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
130
131 spin_lock(&bucket->lock);
132 class_handle_unhash_nolock(h);
133 spin_unlock(&bucket->lock);
134 }
135 EXPORT_SYMBOL(class_handle_unhash);
136
class_handle_hash_back(struct portals_handle * h)137 void class_handle_hash_back(struct portals_handle *h)
138 {
139 struct handle_bucket *bucket;
140
141 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
142
143 spin_lock(&bucket->lock);
144 list_add_rcu(&h->h_link, &bucket->head);
145 h->h_in = 1;
146 spin_unlock(&bucket->lock);
147 }
148 EXPORT_SYMBOL(class_handle_hash_back);
149
class_handle2object(__u64 cookie)150 void *class_handle2object(__u64 cookie)
151 {
152 struct handle_bucket *bucket;
153 struct portals_handle *h;
154 void *retval = NULL;
155
156 LASSERT(handle_hash != NULL);
157
158 /* Be careful when you want to change this code. See the
159 * rcu_read_lock() definition on top this file. - jxiong */
160 bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
161
162 rcu_read_lock();
163 list_for_each_entry_rcu(h, &bucket->head, h_link) {
164 if (h->h_cookie != cookie)
165 continue;
166
167 spin_lock(&h->h_lock);
168 if (likely(h->h_in != 0)) {
169 h->h_ops->hop_addref(h);
170 retval = h;
171 }
172 spin_unlock(&h->h_lock);
173 break;
174 }
175 rcu_read_unlock();
176
177 return retval;
178 }
179 EXPORT_SYMBOL(class_handle2object);
180
class_handle_free_cb(struct rcu_head * rcu)181 void class_handle_free_cb(struct rcu_head *rcu)
182 {
183 struct portals_handle *h = RCU2HANDLE(rcu);
184 void *ptr = (void *)(unsigned long)h->h_cookie;
185
186 if (h->h_ops->hop_free != NULL)
187 h->h_ops->hop_free(ptr, h->h_size);
188 else
189 OBD_FREE(ptr, h->h_size);
190 }
191 EXPORT_SYMBOL(class_handle_free_cb);
192
class_handle_init(void)193 int class_handle_init(void)
194 {
195 struct handle_bucket *bucket;
196 struct timeval tv;
197 int seed[2];
198
199 LASSERT(handle_hash == NULL);
200
201 OBD_ALLOC_LARGE(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE);
202 if (handle_hash == NULL)
203 return -ENOMEM;
204
205 spin_lock_init(&handle_base_lock);
206 for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
207 bucket--) {
208 INIT_LIST_HEAD(&bucket->head);
209 spin_lock_init(&bucket->lock);
210 }
211
212 /** bug 21430: add randomness to the initial base */
213 cfs_get_random_bytes(seed, sizeof(seed));
214 do_gettimeofday(&tv);
215 cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
216
217 cfs_get_random_bytes(&handle_base, sizeof(handle_base));
218 LASSERT(handle_base != 0ULL);
219
220 return 0;
221 }
222
cleanup_all_handles(void)223 static int cleanup_all_handles(void)
224 {
225 int rc;
226 int i;
227
228 for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
229 struct portals_handle *h;
230
231 spin_lock(&handle_hash[i].lock);
232 list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
233 CERROR("force clean handle %#llx addr %p ops %p\n",
234 h->h_cookie, h, h->h_ops);
235
236 class_handle_unhash_nolock(h);
237 rc++;
238 }
239 spin_unlock(&handle_hash[i].lock);
240 }
241
242 return rc;
243 }
244
class_handle_cleanup(void)245 void class_handle_cleanup(void)
246 {
247 int count;
248 LASSERT(handle_hash != NULL);
249
250 count = cleanup_all_handles();
251
252 OBD_FREE_LARGE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
253 handle_hash = NULL;
254
255 if (count != 0)
256 CERROR("handle_count at cleanup: %d\n", count);
257 }
258