1 /*
2  * Copyright 2012 Xyratex Technology Limited
3  *
4  * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com>
5  *
6  */
7 
8 #define DEBUG_SUBSYSTEM S_LLITE
9 
10 #include <linux/fs.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include "../include/obd_support.h"
14 #include "../include/lustre_lite.h"
15 #include "../include/lustre_dlm.h"
16 #include "../include/lustre_ver.h"
17 #include "llite_internal.h"
18 
19 /* If we ever have hundreds of extended attributes, we might want to consider
20  * using a hash or a tree structure instead of list for faster lookups.
21  */
22 struct ll_xattr_entry {
23 	struct list_head	xe_list;    /* protected with
24 					     * lli_xattrs_list_rwsem */
25 	char			*xe_name;   /* xattr name, \0-terminated */
26 	char			*xe_value;  /* xattr value */
27 	unsigned		xe_namelen; /* strlen(xe_name) + 1 */
28 	unsigned		xe_vallen;  /* xattr value length */
29 };
30 
31 static struct kmem_cache *xattr_kmem;
32 static struct lu_kmem_descr xattr_caches[] = {
33 	{
34 		.ckd_cache = &xattr_kmem,
35 		.ckd_name  = "xattr_kmem",
36 		.ckd_size  = sizeof(struct ll_xattr_entry)
37 	},
38 	{
39 		.ckd_cache = NULL
40 	}
41 };
42 
ll_xattr_init(void)43 int ll_xattr_init(void)
44 {
45 	return lu_kmem_init(xattr_caches);
46 }
47 
ll_xattr_fini(void)48 void ll_xattr_fini(void)
49 {
50 	lu_kmem_fini(xattr_caches);
51 }
52 
53 /**
54  * Initializes xattr cache for an inode.
55  *
56  * This initializes the xattr list and marks cache presence.
57  */
ll_xattr_cache_init(struct ll_inode_info * lli)58 static void ll_xattr_cache_init(struct ll_inode_info *lli)
59 {
60 
61 
62 	LASSERT(lli != NULL);
63 
64 	INIT_LIST_HEAD(&lli->lli_xattrs);
65 	lli->lli_flags |= LLIF_XATTR_CACHE;
66 }
67 
68 /**
69  *  This looks for a specific extended attribute.
70  *
71  *  Find in @cache and return @xattr_name attribute in @xattr,
72  *  for the NULL @xattr_name return the first cached @xattr.
73  *
74  *  \retval 0        success
75  *  \retval -ENODATA if not found
76  */
ll_xattr_cache_find(struct list_head * cache,const char * xattr_name,struct ll_xattr_entry ** xattr)77 static int ll_xattr_cache_find(struct list_head *cache,
78 			       const char *xattr_name,
79 			       struct ll_xattr_entry **xattr)
80 {
81 	struct ll_xattr_entry *entry;
82 
83 
84 
85 	list_for_each_entry(entry, cache, xe_list) {
86 		/* xattr_name == NULL means look for any entry */
87 		if (xattr_name == NULL ||
88 		    strcmp(xattr_name, entry->xe_name) == 0) {
89 			*xattr = entry;
90 			CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
91 			       entry->xe_name, entry->xe_vallen,
92 			       entry->xe_value);
93 			return 0;
94 		}
95 	}
96 
97 	return -ENODATA;
98 }
99 
100 /**
101  * This adds an xattr.
102  *
103  * Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
104  *
105  * \retval 0       success
106  * \retval -ENOMEM if no memory could be allocated for the cached attr
107  * \retval -EPROTO if duplicate xattr is being added
108  */
ll_xattr_cache_add(struct list_head * cache,const char * xattr_name,const char * xattr_val,unsigned xattr_val_len)109 static int ll_xattr_cache_add(struct list_head *cache,
110 			      const char *xattr_name,
111 			      const char *xattr_val,
112 			      unsigned xattr_val_len)
113 {
114 	struct ll_xattr_entry *xattr;
115 
116 
117 
118 	if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
119 		CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name);
120 		return -EPROTO;
121 	}
122 
123 	OBD_SLAB_ALLOC_PTR_GFP(xattr, xattr_kmem, GFP_NOFS);
124 	if (xattr == NULL) {
125 		CDEBUG(D_CACHE, "failed to allocate xattr\n");
126 		return -ENOMEM;
127 	}
128 
129 	xattr->xe_name = kstrdup(xattr_name, GFP_NOFS);
130 	if (!xattr->xe_name) {
131 		CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
132 		       xattr->xe_namelen);
133 		goto err_name;
134 	}
135 	xattr->xe_value = kmemdup(xattr_val, xattr_val_len, GFP_NOFS);
136 	if (!xattr->xe_value)
137 		goto err_value;
138 
139 	xattr->xe_vallen = xattr_val_len;
140 	list_add(&xattr->xe_list, cache);
141 
142 	CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name,
143 		xattr_val_len, xattr_val);
144 
145 	return 0;
146 err_value:
147 	OBD_FREE(xattr->xe_name, xattr->xe_namelen);
148 err_name:
149 	OBD_SLAB_FREE_PTR(xattr, xattr_kmem);
150 
151 	return -ENOMEM;
152 }
153 
154 /**
155  * This removes an extended attribute from cache.
156  *
157  * Remove @xattr_name attribute from @cache.
158  *
159  * \retval 0        success
160  * \retval -ENODATA if @xattr_name is not cached
161  */
ll_xattr_cache_del(struct list_head * cache,const char * xattr_name)162 static int ll_xattr_cache_del(struct list_head *cache,
163 			      const char *xattr_name)
164 {
165 	struct ll_xattr_entry *xattr;
166 
167 
168 
169 	CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name);
170 
171 	if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
172 		list_del(&xattr->xe_list);
173 		OBD_FREE(xattr->xe_name, xattr->xe_namelen);
174 		OBD_FREE(xattr->xe_value, xattr->xe_vallen);
175 		OBD_SLAB_FREE_PTR(xattr, xattr_kmem);
176 
177 		return 0;
178 	}
179 
180 	return -ENODATA;
181 }
182 
183 /**
184  * This iterates cached extended attributes.
185  *
186  * Walk over cached attributes in @cache and
187  * fill in @xld_buffer or only calculate buffer
188  * size if @xld_buffer is NULL.
189  *
190  * \retval >= 0     buffer list size
191  * \retval -ENODATA if the list cannot fit @xld_size buffer
192  */
ll_xattr_cache_list(struct list_head * cache,char * xld_buffer,int xld_size)193 static int ll_xattr_cache_list(struct list_head *cache,
194 			       char *xld_buffer,
195 			       int xld_size)
196 {
197 	struct ll_xattr_entry *xattr, *tmp;
198 	int xld_tail = 0;
199 
200 
201 
202 	list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
203 		CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
204 			xld_buffer, xld_tail, xattr->xe_name);
205 
206 		if (xld_buffer) {
207 			xld_size -= xattr->xe_namelen;
208 			if (xld_size < 0)
209 				break;
210 			memcpy(&xld_buffer[xld_tail],
211 			       xattr->xe_name, xattr->xe_namelen);
212 		}
213 		xld_tail += xattr->xe_namelen;
214 	}
215 
216 	if (xld_size < 0)
217 		return -ERANGE;
218 
219 	return xld_tail;
220 }
221 
222 /**
223  * Check if the xattr cache is initialized (filled).
224  *
225  * \retval 0 @cache is not initialized
226  * \retval 1 @cache is initialized
227  */
ll_xattr_cache_valid(struct ll_inode_info * lli)228 static int ll_xattr_cache_valid(struct ll_inode_info *lli)
229 {
230 	return !!(lli->lli_flags & LLIF_XATTR_CACHE);
231 }
232 
233 /**
234  * This finalizes the xattr cache.
235  *
236  * Free all xattr memory. @lli is the inode info pointer.
237  *
238  * \retval 0 no error occurred
239  */
ll_xattr_cache_destroy_locked(struct ll_inode_info * lli)240 static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
241 {
242 
243 
244 	if (!ll_xattr_cache_valid(lli))
245 		return 0;
246 
247 	while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
248 		; /* empty loop */
249 	lli->lli_flags &= ~LLIF_XATTR_CACHE;
250 
251 	return 0;
252 }
253 
ll_xattr_cache_destroy(struct inode * inode)254 int ll_xattr_cache_destroy(struct inode *inode)
255 {
256 	struct ll_inode_info *lli = ll_i2info(inode);
257 	int rc;
258 
259 
260 
261 	down_write(&lli->lli_xattrs_list_rwsem);
262 	rc = ll_xattr_cache_destroy_locked(lli);
263 	up_write(&lli->lli_xattrs_list_rwsem);
264 
265 	return rc;
266 }
267 
268 /**
269  * Match or enqueue a PR lock.
270  *
271  * Find or request an LDLM lock with xattr data.
272  * Since LDLM does not provide API for atomic match_or_enqueue,
273  * the function handles it with a separate enq lock.
274  * If successful, the function exits with the list lock held.
275  *
276  * \retval 0       no error occurred
277  * \retval -ENOMEM not enough memory
278  */
ll_xattr_find_get_lock(struct inode * inode,struct lookup_intent * oit,struct ptlrpc_request ** req)279 static int ll_xattr_find_get_lock(struct inode *inode,
280 				  struct lookup_intent *oit,
281 				  struct ptlrpc_request **req)
282 {
283 	ldlm_mode_t mode;
284 	struct lustre_handle lockh = { 0 };
285 	struct md_op_data *op_data;
286 	struct ll_inode_info *lli = ll_i2info(inode);
287 	struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS,
288 					   .ei_mode = it_to_lock_mode(oit),
289 					   .ei_cb_bl = ll_md_blocking_ast,
290 					   .ei_cb_cp = ldlm_completion_ast };
291 	struct ll_sb_info *sbi = ll_i2sbi(inode);
292 	struct obd_export *exp = sbi->ll_md_exp;
293 	int rc;
294 
295 
296 
297 	mutex_lock(&lli->lli_xattrs_enq_lock);
298 	/* inode may have been shrunk and recreated, so data is gone, match lock
299 	 * only when data exists. */
300 	if (ll_xattr_cache_valid(lli)) {
301 		/* Try matching first. */
302 		mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
303 				       LCK_PR);
304 		if (mode != 0) {
305 			/* fake oit in mdc_revalidate_lock() manner */
306 			oit->d.lustre.it_lock_handle = lockh.cookie;
307 			oit->d.lustre.it_lock_mode = mode;
308 			goto out;
309 		}
310 	}
311 
312 	/* Enqueue if the lock isn't cached locally. */
313 	op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
314 				     LUSTRE_OPC_ANY, NULL);
315 	if (IS_ERR(op_data)) {
316 		mutex_unlock(&lli->lli_xattrs_enq_lock);
317 		return PTR_ERR(op_data);
318 	}
319 
320 	op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS;
321 
322 	rc = md_enqueue(exp, &einfo, oit, op_data, &lockh, NULL, 0, NULL, 0);
323 	ll_finish_md_op_data(op_data);
324 
325 	if (rc < 0) {
326 		CDEBUG(D_CACHE,
327 		       "md_intent_lock failed with %d for fid "DFID"\n",
328 		       rc, PFID(ll_inode2fid(inode)));
329 		mutex_unlock(&lli->lli_xattrs_enq_lock);
330 		return rc;
331 	}
332 
333 	*req = (struct ptlrpc_request *)oit->d.lustre.it_data;
334 out:
335 	down_write(&lli->lli_xattrs_list_rwsem);
336 	mutex_unlock(&lli->lli_xattrs_enq_lock);
337 
338 	return 0;
339 }
340 
341 /**
342  * Refill the xattr cache.
343  *
344  * Fetch and cache the whole of xattrs for @inode, acquiring
345  * a read or a write xattr lock depending on operation in @oit.
346  * Intent is dropped on exit unless the operation is setxattr.
347  *
348  * \retval 0       no error occurred
349  * \retval -EPROTO network protocol error
350  * \retval -ENOMEM not enough memory for the cache
351  */
ll_xattr_cache_refill(struct inode * inode,struct lookup_intent * oit)352 static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
353 {
354 	struct ll_sb_info *sbi = ll_i2sbi(inode);
355 	struct ptlrpc_request *req = NULL;
356 	const char *xdata, *xval, *xtail, *xvtail;
357 	struct ll_inode_info *lli = ll_i2info(inode);
358 	struct mdt_body *body;
359 	__u32 *xsizes;
360 	int rc = 0, i;
361 
362 
363 
364 	rc = ll_xattr_find_get_lock(inode, oit, &req);
365 	if (rc)
366 		goto out_no_unlock;
367 
368 	/* Do we have the data at this point? */
369 	if (ll_xattr_cache_valid(lli)) {
370 		ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1);
371 		rc = 0;
372 		goto out_maybe_drop;
373 	}
374 
375 	/* Matched but no cache? Cancelled on error by a parallel refill. */
376 	if (unlikely(req == NULL)) {
377 		CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
378 		rc = -EIO;
379 		goto out_maybe_drop;
380 	}
381 
382 	if (oit->d.lustre.it_status < 0) {
383 		CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
384 		       oit->d.lustre.it_status, PFID(ll_inode2fid(inode)));
385 		rc = oit->d.lustre.it_status;
386 		/* xattr data is so large that we don't want to cache it */
387 		if (rc == -ERANGE)
388 			rc = -EAGAIN;
389 		goto out_destroy;
390 	}
391 
392 	body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
393 	if (body == NULL) {
394 		CERROR("no MDT BODY in the refill xattr reply\n");
395 		rc = -EPROTO;
396 		goto out_destroy;
397 	}
398 	/* do not need swab xattr data */
399 	xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
400 						body->eadatasize);
401 	xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
402 						body->aclsize);
403 	xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
404 					      body->max_mdsize * sizeof(__u32));
405 	if (xdata == NULL || xval == NULL || xsizes == NULL) {
406 		CERROR("wrong setxattr reply\n");
407 		rc = -EPROTO;
408 		goto out_destroy;
409 	}
410 
411 	xtail = xdata + body->eadatasize;
412 	xvtail = xval + body->aclsize;
413 
414 	CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail);
415 
416 	ll_xattr_cache_init(lli);
417 
418 	for (i = 0; i < body->max_mdsize; i++) {
419 		CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
420 		/* Perform consistency checks: attr names and vals in pill */
421 		if (memchr(xdata, 0, xtail - xdata) == NULL) {
422 			CERROR("xattr protocol violation (names are broken)\n");
423 			rc = -EPROTO;
424 		} else if (xval + *xsizes > xvtail) {
425 			CERROR("xattr protocol violation (vals are broken)\n");
426 			rc = -EPROTO;
427 		} else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
428 			rc = -ENOMEM;
429 		} else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) {
430 			/* Filter out ACL ACCESS since it's cached separately */
431 			CDEBUG(D_CACHE, "not caching %s\n",
432 			       XATTR_NAME_ACL_ACCESS);
433 			rc = 0;
434 		} else {
435 			rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
436 						*xsizes);
437 		}
438 		if (rc < 0) {
439 			ll_xattr_cache_destroy_locked(lli);
440 			goto out_destroy;
441 		}
442 		xdata += strlen(xdata) + 1;
443 		xval  += *xsizes;
444 		xsizes++;
445 	}
446 
447 	if (xdata != xtail || xval != xvtail)
448 		CERROR("a hole in xattr data\n");
449 
450 	ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL);
451 
452 	goto out_maybe_drop;
453 out_maybe_drop:
454 
455 		ll_intent_drop_lock(oit);
456 
457 	if (rc != 0)
458 		up_write(&lli->lli_xattrs_list_rwsem);
459 out_no_unlock:
460 	ptlrpc_req_finished(req);
461 
462 	return rc;
463 
464 out_destroy:
465 	up_write(&lli->lli_xattrs_list_rwsem);
466 
467 	ldlm_lock_decref_and_cancel((struct lustre_handle *)
468 					&oit->d.lustre.it_lock_handle,
469 					oit->d.lustre.it_lock_mode);
470 
471 	goto out_no_unlock;
472 }
473 
474 /**
475  * Get an xattr value or list xattrs using the write-through cache.
476  *
477  * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or
478  * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode.
479  * The resulting value/list is stored in @buffer if the former
480  * is not larger than @size.
481  *
482  * \retval 0        no error occurred
483  * \retval -EPROTO  network protocol error
484  * \retval -ENOMEM  not enough memory for the cache
485  * \retval -ERANGE  the buffer is not large enough
486  * \retval -ENODATA no such attr or the list is empty
487  */
ll_xattr_cache_get(struct inode * inode,const char * name,char * buffer,size_t size,__u64 valid)488 int ll_xattr_cache_get(struct inode *inode,
489 			const char *name,
490 			char *buffer,
491 			size_t size,
492 			__u64 valid)
493 {
494 	struct lookup_intent oit = { .it_op = IT_GETXATTR };
495 	struct ll_inode_info *lli = ll_i2info(inode);
496 	int rc = 0;
497 
498 
499 
500 	LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS));
501 
502 	down_read(&lli->lli_xattrs_list_rwsem);
503 	if (!ll_xattr_cache_valid(lli)) {
504 		up_read(&lli->lli_xattrs_list_rwsem);
505 		rc = ll_xattr_cache_refill(inode, &oit);
506 		if (rc)
507 			return rc;
508 		downgrade_write(&lli->lli_xattrs_list_rwsem);
509 	} else {
510 		ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1);
511 	}
512 
513 	if (valid & OBD_MD_FLXATTR) {
514 		struct ll_xattr_entry *xattr;
515 
516 		rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr);
517 		if (rc == 0) {
518 			rc = xattr->xe_vallen;
519 			/* zero size means we are only requested size in rc */
520 			if (size != 0) {
521 				if (size >= xattr->xe_vallen)
522 					memcpy(buffer, xattr->xe_value,
523 						xattr->xe_vallen);
524 				else
525 					rc = -ERANGE;
526 			}
527 		}
528 	} else if (valid & OBD_MD_FLXATTRLS) {
529 		rc = ll_xattr_cache_list(&lli->lli_xattrs,
530 					 size ? buffer : NULL, size);
531 	}
532 
533 	goto out;
534 out:
535 	up_read(&lli->lli_xattrs_list_rwsem);
536 
537 	return rc;
538 }
539