1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * cl code shared between vvp and liblustre (and other Lustre clients in the
37 * future).
38 *
39 * Author: Nikita Danilov <nikita.danilov@sun.com>
40 */
41
42 #define DEBUG_SUBSYSTEM S_LLITE
43
44 #include "../../include/linux/libcfs/libcfs.h"
45 # include <linux/fs.h>
46 # include <linux/sched.h>
47 # include <linux/mm.h>
48 # include <linux/quotaops.h>
49 # include <linux/highmem.h>
50 # include <linux/pagemap.h>
51 # include <linux/rbtree.h>
52
53 #include "../include/obd.h"
54 #include "../include/obd_support.h"
55 #include "../include/lustre_fid.h"
56 #include "../include/lustre_lite.h"
57 #include "../include/lustre_dlm.h"
58 #include "../include/lustre_ver.h"
59 #include "../include/lustre_mdc.h"
60 #include "../include/cl_object.h"
61
62 #include "../include/lclient.h"
63
64 #include "../llite/llite_internal.h"
65
66 static const struct cl_req_operations ccc_req_ops;
67
68 /*
69 * ccc_ prefix stands for "Common Client Code".
70 */
71
72 static struct kmem_cache *ccc_lock_kmem;
73 static struct kmem_cache *ccc_object_kmem;
74 static struct kmem_cache *ccc_thread_kmem;
75 static struct kmem_cache *ccc_session_kmem;
76 static struct kmem_cache *ccc_req_kmem;
77
78 static struct lu_kmem_descr ccc_caches[] = {
79 {
80 .ckd_cache = &ccc_lock_kmem,
81 .ckd_name = "ccc_lock_kmem",
82 .ckd_size = sizeof(struct ccc_lock)
83 },
84 {
85 .ckd_cache = &ccc_object_kmem,
86 .ckd_name = "ccc_object_kmem",
87 .ckd_size = sizeof(struct ccc_object)
88 },
89 {
90 .ckd_cache = &ccc_thread_kmem,
91 .ckd_name = "ccc_thread_kmem",
92 .ckd_size = sizeof(struct ccc_thread_info),
93 },
94 {
95 .ckd_cache = &ccc_session_kmem,
96 .ckd_name = "ccc_session_kmem",
97 .ckd_size = sizeof(struct ccc_session)
98 },
99 {
100 .ckd_cache = &ccc_req_kmem,
101 .ckd_name = "ccc_req_kmem",
102 .ckd_size = sizeof(struct ccc_req)
103 },
104 {
105 .ckd_cache = NULL
106 }
107 };
108
109 /*****************************************************************************
110 *
111 * Vvp device and device type functions.
112 *
113 */
114
ccc_key_init(const struct lu_context * ctx,struct lu_context_key * key)115 void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key)
116 {
117 struct ccc_thread_info *info;
118
119 OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, GFP_NOFS);
120 if (info == NULL)
121 info = ERR_PTR(-ENOMEM);
122 return info;
123 }
124
ccc_key_fini(const struct lu_context * ctx,struct lu_context_key * key,void * data)125 void ccc_key_fini(const struct lu_context *ctx,
126 struct lu_context_key *key, void *data)
127 {
128 struct ccc_thread_info *info = data;
129
130 OBD_SLAB_FREE_PTR(info, ccc_thread_kmem);
131 }
132
ccc_session_key_init(const struct lu_context * ctx,struct lu_context_key * key)133 void *ccc_session_key_init(const struct lu_context *ctx,
134 struct lu_context_key *key)
135 {
136 struct ccc_session *session;
137
138 OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, GFP_NOFS);
139 if (session == NULL)
140 session = ERR_PTR(-ENOMEM);
141 return session;
142 }
143
ccc_session_key_fini(const struct lu_context * ctx,struct lu_context_key * key,void * data)144 void ccc_session_key_fini(const struct lu_context *ctx,
145 struct lu_context_key *key, void *data)
146 {
147 struct ccc_session *session = data;
148
149 OBD_SLAB_FREE_PTR(session, ccc_session_kmem);
150 }
151
152 struct lu_context_key ccc_key = {
153 .lct_tags = LCT_CL_THREAD,
154 .lct_init = ccc_key_init,
155 .lct_fini = ccc_key_fini
156 };
157
158 struct lu_context_key ccc_session_key = {
159 .lct_tags = LCT_SESSION,
160 .lct_init = ccc_session_key_init,
161 .lct_fini = ccc_session_key_fini
162 };
163
164
165 /* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */
166 /* LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key); */
167
ccc_device_init(const struct lu_env * env,struct lu_device * d,const char * name,struct lu_device * next)168 int ccc_device_init(const struct lu_env *env, struct lu_device *d,
169 const char *name, struct lu_device *next)
170 {
171 struct ccc_device *vdv;
172 int rc;
173
174 vdv = lu2ccc_dev(d);
175 vdv->cdv_next = lu2cl_dev(next);
176
177 LASSERT(d->ld_site != NULL && next->ld_type != NULL);
178 next->ld_site = d->ld_site;
179 rc = next->ld_type->ldt_ops->ldto_device_init(
180 env, next, next->ld_type->ldt_name, NULL);
181 if (rc == 0) {
182 lu_device_get(next);
183 lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
184 }
185 return rc;
186 }
187
ccc_device_fini(const struct lu_env * env,struct lu_device * d)188 struct lu_device *ccc_device_fini(const struct lu_env *env,
189 struct lu_device *d)
190 {
191 return cl2lu_dev(lu2ccc_dev(d)->cdv_next);
192 }
193
ccc_device_alloc(const struct lu_env * env,struct lu_device_type * t,struct lustre_cfg * cfg,const struct lu_device_operations * luops,const struct cl_device_operations * clops)194 struct lu_device *ccc_device_alloc(const struct lu_env *env,
195 struct lu_device_type *t,
196 struct lustre_cfg *cfg,
197 const struct lu_device_operations *luops,
198 const struct cl_device_operations *clops)
199 {
200 struct ccc_device *vdv;
201 struct lu_device *lud;
202 struct cl_site *site;
203 int rc;
204
205 OBD_ALLOC_PTR(vdv);
206 if (vdv == NULL)
207 return ERR_PTR(-ENOMEM);
208
209 lud = &vdv->cdv_cl.cd_lu_dev;
210 cl_device_init(&vdv->cdv_cl, t);
211 ccc2lu_dev(vdv)->ld_ops = luops;
212 vdv->cdv_cl.cd_ops = clops;
213
214 OBD_ALLOC_PTR(site);
215 if (site != NULL) {
216 rc = cl_site_init(site, &vdv->cdv_cl);
217 if (rc == 0)
218 rc = lu_site_init_finish(&site->cs_lu);
219 else {
220 LASSERT(lud->ld_site == NULL);
221 CERROR("Cannot init lu_site, rc %d.\n", rc);
222 OBD_FREE_PTR(site);
223 }
224 } else
225 rc = -ENOMEM;
226 if (rc != 0) {
227 ccc_device_free(env, lud);
228 lud = ERR_PTR(rc);
229 }
230 return lud;
231 }
232
ccc_device_free(const struct lu_env * env,struct lu_device * d)233 struct lu_device *ccc_device_free(const struct lu_env *env,
234 struct lu_device *d)
235 {
236 struct ccc_device *vdv = lu2ccc_dev(d);
237 struct cl_site *site = lu2cl_site(d->ld_site);
238 struct lu_device *next = cl2lu_dev(vdv->cdv_next);
239
240 if (d->ld_site != NULL) {
241 cl_site_fini(site);
242 OBD_FREE_PTR(site);
243 }
244 cl_device_fini(lu2cl_dev(d));
245 OBD_FREE_PTR(vdv);
246 return next;
247 }
248
ccc_req_init(const struct lu_env * env,struct cl_device * dev,struct cl_req * req)249 int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
250 struct cl_req *req)
251 {
252 struct ccc_req *vrq;
253 int result;
254
255 OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, GFP_NOFS);
256 if (vrq != NULL) {
257 cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
258 result = 0;
259 } else
260 result = -ENOMEM;
261 return result;
262 }
263
264 /**
265 * An `emergency' environment used by ccc_inode_fini() when cl_env_get()
266 * fails. Access to this environment is serialized by ccc_inode_fini_guard
267 * mutex.
268 */
269 static struct lu_env *ccc_inode_fini_env;
270
271 /**
272 * A mutex serializing calls to slp_inode_fini() under extreme memory
273 * pressure, when environments cannot be allocated.
274 */
275 static DEFINE_MUTEX(ccc_inode_fini_guard);
276 static int dummy_refcheck;
277
ccc_global_init(struct lu_device_type * device_type)278 int ccc_global_init(struct lu_device_type *device_type)
279 {
280 int result;
281
282 result = lu_kmem_init(ccc_caches);
283 if (result)
284 return result;
285
286 result = lu_device_type_init(device_type);
287 if (result)
288 goto out_kmem;
289
290 ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
291 LCT_REMEMBER|LCT_NOREF);
292 if (IS_ERR(ccc_inode_fini_env)) {
293 result = PTR_ERR(ccc_inode_fini_env);
294 goto out_device;
295 }
296
297 ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
298 return 0;
299 out_device:
300 lu_device_type_fini(device_type);
301 out_kmem:
302 lu_kmem_fini(ccc_caches);
303 return result;
304 }
305
ccc_global_fini(struct lu_device_type * device_type)306 void ccc_global_fini(struct lu_device_type *device_type)
307 {
308 if (ccc_inode_fini_env != NULL) {
309 cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
310 ccc_inode_fini_env = NULL;
311 }
312 lu_device_type_fini(device_type);
313 lu_kmem_fini(ccc_caches);
314 }
315
316 /*****************************************************************************
317 *
318 * Object operations.
319 *
320 */
321
ccc_object_alloc(const struct lu_env * env,const struct lu_object_header * unused,struct lu_device * dev,const struct cl_object_operations * clops,const struct lu_object_operations * luops)322 struct lu_object *ccc_object_alloc(const struct lu_env *env,
323 const struct lu_object_header *unused,
324 struct lu_device *dev,
325 const struct cl_object_operations *clops,
326 const struct lu_object_operations *luops)
327 {
328 struct ccc_object *vob;
329 struct lu_object *obj;
330
331 OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, GFP_NOFS);
332 if (vob != NULL) {
333 struct cl_object_header *hdr;
334
335 obj = ccc2lu(vob);
336 hdr = &vob->cob_header;
337 cl_object_header_init(hdr);
338 lu_object_init(obj, &hdr->coh_lu, dev);
339 lu_object_add_top(&hdr->coh_lu, obj);
340
341 vob->cob_cl.co_ops = clops;
342 obj->lo_ops = luops;
343 } else
344 obj = NULL;
345 return obj;
346 }
347
ccc_object_init0(const struct lu_env * env,struct ccc_object * vob,const struct cl_object_conf * conf)348 int ccc_object_init0(const struct lu_env *env,
349 struct ccc_object *vob,
350 const struct cl_object_conf *conf)
351 {
352 vob->cob_inode = conf->coc_inode;
353 vob->cob_transient_pages = 0;
354 cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page));
355 return 0;
356 }
357
ccc_object_init(const struct lu_env * env,struct lu_object * obj,const struct lu_object_conf * conf)358 int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
359 const struct lu_object_conf *conf)
360 {
361 struct ccc_device *dev = lu2ccc_dev(obj->lo_dev);
362 struct ccc_object *vob = lu2ccc(obj);
363 struct lu_object *below;
364 struct lu_device *under;
365 int result;
366
367 under = &dev->cdv_next->cd_lu_dev;
368 below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
369 if (below != NULL) {
370 const struct cl_object_conf *cconf;
371
372 cconf = lu2cl_conf(conf);
373 INIT_LIST_HEAD(&vob->cob_pending_list);
374 lu_object_add(obj, below);
375 result = ccc_object_init0(env, vob, cconf);
376 } else
377 result = -ENOMEM;
378 return result;
379 }
380
ccc_object_free(const struct lu_env * env,struct lu_object * obj)381 void ccc_object_free(const struct lu_env *env, struct lu_object *obj)
382 {
383 struct ccc_object *vob = lu2ccc(obj);
384
385 lu_object_fini(obj);
386 lu_object_header_fini(obj->lo_header);
387 OBD_SLAB_FREE_PTR(vob, ccc_object_kmem);
388 }
389
ccc_lock_init(const struct lu_env * env,struct cl_object * obj,struct cl_lock * lock,const struct cl_io * unused,const struct cl_lock_operations * lkops)390 int ccc_lock_init(const struct lu_env *env,
391 struct cl_object *obj, struct cl_lock *lock,
392 const struct cl_io *unused,
393 const struct cl_lock_operations *lkops)
394 {
395 struct ccc_lock *clk;
396 int result;
397
398 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
399
400 OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, GFP_NOFS);
401 if (clk != NULL) {
402 cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
403 result = 0;
404 } else
405 result = -ENOMEM;
406 return result;
407 }
408
ccc_attr_set(const struct lu_env * env,struct cl_object * obj,const struct cl_attr * attr,unsigned valid)409 int ccc_attr_set(const struct lu_env *env, struct cl_object *obj,
410 const struct cl_attr *attr, unsigned valid)
411 {
412 return 0;
413 }
414
ccc_object_glimpse(const struct lu_env * env,const struct cl_object * obj,struct ost_lvb * lvb)415 int ccc_object_glimpse(const struct lu_env *env,
416 const struct cl_object *obj, struct ost_lvb *lvb)
417 {
418 struct inode *inode = ccc_object_inode(obj);
419
420 lvb->lvb_mtime = cl_inode_mtime(inode);
421 lvb->lvb_atime = cl_inode_atime(inode);
422 lvb->lvb_ctime = cl_inode_ctime(inode);
423 /*
424 * LU-417: Add dirty pages block count lest i_blocks reports 0, some
425 * "cp" or "tar" on remote node may think it's a completely sparse file
426 * and skip it.
427 */
428 if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
429 lvb->lvb_blocks = dirty_cnt(inode);
430 return 0;
431 }
432
433
434
ccc_conf_set(const struct lu_env * env,struct cl_object * obj,const struct cl_object_conf * conf)435 int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
436 const struct cl_object_conf *conf)
437 {
438 /* TODO: destroy all pages attached to this object. */
439 return 0;
440 }
441
ccc_object_size_lock(struct cl_object * obj)442 static void ccc_object_size_lock(struct cl_object *obj)
443 {
444 struct inode *inode = ccc_object_inode(obj);
445
446 cl_isize_lock(inode);
447 cl_object_attr_lock(obj);
448 }
449
ccc_object_size_unlock(struct cl_object * obj)450 static void ccc_object_size_unlock(struct cl_object *obj)
451 {
452 struct inode *inode = ccc_object_inode(obj);
453
454 cl_object_attr_unlock(obj);
455 cl_isize_unlock(inode);
456 }
457
458 /*****************************************************************************
459 *
460 * Page operations.
461 *
462 */
463
ccc_page_vmpage(const struct lu_env * env,const struct cl_page_slice * slice)464 struct page *ccc_page_vmpage(const struct lu_env *env,
465 const struct cl_page_slice *slice)
466 {
467 return cl2vm_page(slice);
468 }
469
ccc_page_is_under_lock(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * io)470 int ccc_page_is_under_lock(const struct lu_env *env,
471 const struct cl_page_slice *slice,
472 struct cl_io *io)
473 {
474 struct ccc_io *cio = ccc_env_io(env);
475 struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr;
476 struct cl_page *page = slice->cpl_page;
477
478 int result;
479
480 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
481 io->ci_type == CIT_FAULT) {
482 if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
483 result = -EBUSY;
484 else {
485 desc->cld_start = page->cp_index;
486 desc->cld_end = page->cp_index;
487 desc->cld_obj = page->cp_obj;
488 desc->cld_mode = CLM_READ;
489 result = cl_queue_match(&io->ci_lockset.cls_done,
490 desc) ? -EBUSY : 0;
491 }
492 } else
493 result = 0;
494 return result;
495 }
496
ccc_fail(const struct lu_env * env,const struct cl_page_slice * slice)497 int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice)
498 {
499 /*
500 * Cached read?
501 */
502 LBUG();
503 return 0;
504 }
505
ccc_transient_page_verify(const struct cl_page * page)506 void ccc_transient_page_verify(const struct cl_page *page)
507 {
508 }
509
ccc_transient_page_own(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused,int nonblock)510 int ccc_transient_page_own(const struct lu_env *env,
511 const struct cl_page_slice *slice,
512 struct cl_io *unused,
513 int nonblock)
514 {
515 ccc_transient_page_verify(slice->cpl_page);
516 return 0;
517 }
518
ccc_transient_page_assume(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)519 void ccc_transient_page_assume(const struct lu_env *env,
520 const struct cl_page_slice *slice,
521 struct cl_io *unused)
522 {
523 ccc_transient_page_verify(slice->cpl_page);
524 }
525
ccc_transient_page_unassume(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)526 void ccc_transient_page_unassume(const struct lu_env *env,
527 const struct cl_page_slice *slice,
528 struct cl_io *unused)
529 {
530 ccc_transient_page_verify(slice->cpl_page);
531 }
532
ccc_transient_page_disown(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)533 void ccc_transient_page_disown(const struct lu_env *env,
534 const struct cl_page_slice *slice,
535 struct cl_io *unused)
536 {
537 ccc_transient_page_verify(slice->cpl_page);
538 }
539
ccc_transient_page_discard(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)540 void ccc_transient_page_discard(const struct lu_env *env,
541 const struct cl_page_slice *slice,
542 struct cl_io *unused)
543 {
544 struct cl_page *page = slice->cpl_page;
545
546 ccc_transient_page_verify(slice->cpl_page);
547
548 /*
549 * For transient pages, remove it from the radix tree.
550 */
551 cl_page_delete(env, page);
552 }
553
ccc_transient_page_prep(const struct lu_env * env,const struct cl_page_slice * slice,struct cl_io * unused)554 int ccc_transient_page_prep(const struct lu_env *env,
555 const struct cl_page_slice *slice,
556 struct cl_io *unused)
557 {
558 /* transient page should always be sent. */
559 return 0;
560 }
561
562 /*****************************************************************************
563 *
564 * Lock operations.
565 *
566 */
567
ccc_lock_delete(const struct lu_env * env,const struct cl_lock_slice * slice)568 void ccc_lock_delete(const struct lu_env *env,
569 const struct cl_lock_slice *slice)
570 {
571 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
572 }
573
ccc_lock_fini(const struct lu_env * env,struct cl_lock_slice * slice)574 void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
575 {
576 struct ccc_lock *clk = cl2ccc_lock(slice);
577
578 OBD_SLAB_FREE_PTR(clk, ccc_lock_kmem);
579 }
580
ccc_lock_enqueue(const struct lu_env * env,const struct cl_lock_slice * slice,struct cl_io * unused,__u32 enqflags)581 int ccc_lock_enqueue(const struct lu_env *env,
582 const struct cl_lock_slice *slice,
583 struct cl_io *unused, __u32 enqflags)
584 {
585 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
586 return 0;
587 }
588
ccc_lock_use(const struct lu_env * env,const struct cl_lock_slice * slice)589 int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice)
590 {
591 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
592 return 0;
593 }
594
ccc_lock_unuse(const struct lu_env * env,const struct cl_lock_slice * slice)595 int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice)
596 {
597 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
598 return 0;
599 }
600
ccc_lock_wait(const struct lu_env * env,const struct cl_lock_slice * slice)601 int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice)
602 {
603 CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
604 return 0;
605 }
606
607 /**
608 * Implementation of cl_lock_operations::clo_fits_into() methods for ccc
609 * layer. This function is executed every time io finds an existing lock in
610 * the lock cache while creating new lock. This function has to decide whether
611 * cached lock "fits" into io.
612 *
613 * \param slice lock to be checked
614 * \param io IO that wants a lock.
615 *
616 * \see lov_lock_fits_into().
617 */
ccc_lock_fits_into(const struct lu_env * env,const struct cl_lock_slice * slice,const struct cl_lock_descr * need,const struct cl_io * io)618 int ccc_lock_fits_into(const struct lu_env *env,
619 const struct cl_lock_slice *slice,
620 const struct cl_lock_descr *need,
621 const struct cl_io *io)
622 {
623 const struct cl_lock *lock = slice->cls_lock;
624 const struct cl_lock_descr *descr = &lock->cll_descr;
625 const struct ccc_io *cio = ccc_env_io(env);
626 int result;
627
628 /*
629 * Work around DLM peculiarity: it assumes that glimpse
630 * (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock
631 * when asked for LCK_PW lock with LDLM_FL_HAS_INTENT flag set. Make
632 * sure that glimpse doesn't get CLM_WRITE top-lock, so that it
633 * doesn't enqueue CLM_WRITE sub-locks.
634 */
635 if (cio->cui_glimpse)
636 result = descr->cld_mode != CLM_WRITE;
637
638 /*
639 * Also, don't match incomplete write locks for read, otherwise read
640 * would enqueue missing sub-locks in the write mode.
641 */
642 else if (need->cld_mode != descr->cld_mode)
643 result = lock->cll_state >= CLS_ENQUEUED;
644 else
645 result = 1;
646 return result;
647 }
648
649 /**
650 * Implements cl_lock_operations::clo_state() method for ccc layer, invoked
651 * whenever lock state changes. Transfers object attributes, that might be
652 * updated as a result of lock acquiring into inode.
653 */
ccc_lock_state(const struct lu_env * env,const struct cl_lock_slice * slice,enum cl_lock_state state)654 void ccc_lock_state(const struct lu_env *env,
655 const struct cl_lock_slice *slice,
656 enum cl_lock_state state)
657 {
658 struct cl_lock *lock = slice->cls_lock;
659
660 /*
661 * Refresh inode attributes when the lock is moving into CLS_HELD
662 * state, and only when this is a result of real enqueue, rather than
663 * of finding lock in the cache.
664 */
665 if (state == CLS_HELD && lock->cll_state < CLS_HELD) {
666 struct cl_object *obj;
667 struct inode *inode;
668
669 obj = slice->cls_obj;
670 inode = ccc_object_inode(obj);
671
672 /* vmtruncate() sets the i_size
673 * under both a DLM lock and the
674 * ll_inode_size_lock(). If we don't get the
675 * ll_inode_size_lock() here we can match the DLM lock and
676 * reset i_size. generic_file_write can then trust the
677 * stale i_size when doing appending writes and effectively
678 * cancel the result of the truncate. Getting the
679 * ll_inode_size_lock() after the enqueue maintains the DLM
680 * -> ll_inode_size_lock() acquiring order. */
681 if (lock->cll_descr.cld_start == 0 &&
682 lock->cll_descr.cld_end == CL_PAGE_EOF)
683 cl_merge_lvb(env, inode);
684 }
685 }
686
687 /*****************************************************************************
688 *
689 * io operations.
690 *
691 */
692
ccc_io_fini(const struct lu_env * env,const struct cl_io_slice * ios)693 void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
694 {
695 struct cl_io *io = ios->cis_io;
696
697 CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
698 }
699
ccc_io_one_lock_index(const struct lu_env * env,struct cl_io * io,__u32 enqflags,enum cl_lock_mode mode,pgoff_t start,pgoff_t end)700 int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
701 __u32 enqflags, enum cl_lock_mode mode,
702 pgoff_t start, pgoff_t end)
703 {
704 struct ccc_io *cio = ccc_env_io(env);
705 struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
706 struct cl_object *obj = io->ci_obj;
707
708 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
709
710 CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
711
712 memset(&cio->cui_link, 0, sizeof(cio->cui_link));
713
714 if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
715 descr->cld_mode = CLM_GROUP;
716 descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid;
717 } else {
718 descr->cld_mode = mode;
719 }
720 descr->cld_obj = obj;
721 descr->cld_start = start;
722 descr->cld_end = end;
723 descr->cld_enq_flags = enqflags;
724
725 cl_io_lock_add(env, io, &cio->cui_link);
726 return 0;
727 }
728
ccc_io_update_iov(const struct lu_env * env,struct ccc_io * cio,struct cl_io * io)729 void ccc_io_update_iov(const struct lu_env *env,
730 struct ccc_io *cio, struct cl_io *io)
731 {
732 size_t size = io->u.ci_rw.crw_count;
733
734 if (!cl_is_normalio(env, io) || cio->cui_iter == NULL)
735 return;
736
737 iov_iter_truncate(cio->cui_iter, size);
738 }
739
ccc_io_one_lock(const struct lu_env * env,struct cl_io * io,__u32 enqflags,enum cl_lock_mode mode,loff_t start,loff_t end)740 int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
741 __u32 enqflags, enum cl_lock_mode mode,
742 loff_t start, loff_t end)
743 {
744 struct cl_object *obj = io->ci_obj;
745
746 return ccc_io_one_lock_index(env, io, enqflags, mode,
747 cl_index(obj, start), cl_index(obj, end));
748 }
749
ccc_io_end(const struct lu_env * env,const struct cl_io_slice * ios)750 void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
751 {
752 CLOBINVRNT(env, ios->cis_io->ci_obj,
753 ccc_object_invariant(ios->cis_io->ci_obj));
754 }
755
ccc_io_advance(const struct lu_env * env,const struct cl_io_slice * ios,size_t nob)756 void ccc_io_advance(const struct lu_env *env,
757 const struct cl_io_slice *ios,
758 size_t nob)
759 {
760 struct ccc_io *cio = cl2ccc_io(env, ios);
761 struct cl_io *io = ios->cis_io;
762 struct cl_object *obj = ios->cis_io->ci_obj;
763
764 CLOBINVRNT(env, obj, ccc_object_invariant(obj));
765
766 if (!cl_is_normalio(env, io))
767 return;
768
769 iov_iter_reexpand(cio->cui_iter, cio->cui_tot_count -= nob);
770 }
771
772 /**
773 * Helper function that if necessary adjusts file size (inode->i_size), when
774 * position at the offset \a pos is accessed. File size can be arbitrary stale
775 * on a Lustre client, but client at least knows KMS. If accessed area is
776 * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
777 *
778 * Locking: cl_isize_lock is used to serialize changes to inode size and to
779 * protect consistency between inode size and cl_object
780 * attributes. cl_object_size_lock() protects consistency between cl_attr's of
781 * top-object and sub-objects.
782 */
ccc_prep_size(const struct lu_env * env,struct cl_object * obj,struct cl_io * io,loff_t start,size_t count,int * exceed)783 int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
784 struct cl_io *io, loff_t start, size_t count, int *exceed)
785 {
786 struct cl_attr *attr = ccc_env_thread_attr(env);
787 struct inode *inode = ccc_object_inode(obj);
788 loff_t pos = start + count - 1;
789 loff_t kms;
790 int result;
791
792 /*
793 * Consistency guarantees: following possibilities exist for the
794 * relation between region being accessed and real file size at this
795 * moment:
796 *
797 * (A): the region is completely inside of the file;
798 *
799 * (B-x): x bytes of region are inside of the file, the rest is
800 * outside;
801 *
802 * (C): the region is completely outside of the file.
803 *
804 * This classification is stable under DLM lock already acquired by
805 * the caller, because to change the class, other client has to take
806 * DLM lock conflicting with our lock. Also, any updates to ->i_size
807 * by other threads on this client are serialized by
808 * ll_inode_size_lock(). This guarantees that short reads are handled
809 * correctly in the face of concurrent writes and truncates.
810 */
811 ccc_object_size_lock(obj);
812 result = cl_object_attr_get(env, obj, attr);
813 if (result == 0) {
814 kms = attr->cat_kms;
815 if (pos > kms) {
816 /*
817 * A glimpse is necessary to determine whether we
818 * return a short read (B) or some zeroes at the end
819 * of the buffer (C)
820 */
821 ccc_object_size_unlock(obj);
822 result = cl_glimpse_lock(env, io, inode, obj, 0);
823 if (result == 0 && exceed != NULL) {
824 /* If objective page index exceed end-of-file
825 * page index, return directly. Do not expect
826 * kernel will check such case correctly.
827 * linux-2.6.18-128.1.1 miss to do that.
828 * --bug 17336 */
829 loff_t size = cl_isize_read(inode);
830 loff_t cur_index = start >> PAGE_CACHE_SHIFT;
831 loff_t size_index = (size - 1) >>
832 PAGE_CACHE_SHIFT;
833
834 if ((size == 0 && cur_index != 0) ||
835 size_index < cur_index)
836 *exceed = 1;
837 }
838 return result;
839 } else {
840 /*
841 * region is within kms and, hence, within real file
842 * size (A). We need to increase i_size to cover the
843 * read region so that generic_file_read() will do its
844 * job, but that doesn't mean the kms size is
845 * _correct_, it is only the _minimum_ size. If
846 * someone does a stat they will get the correct size
847 * which will always be >= the kms value here.
848 * b=11081
849 */
850 if (cl_isize_read(inode) < kms) {
851 cl_isize_write_nolock(inode, kms);
852 CDEBUG(D_VFSTRACE,
853 DFID" updating i_size %llu\n",
854 PFID(lu_object_fid(&obj->co_lu)),
855 (__u64)cl_isize_read(inode));
856
857 }
858 }
859 }
860 ccc_object_size_unlock(obj);
861 return result;
862 }
863
864 /*****************************************************************************
865 *
866 * Transfer operations.
867 *
868 */
869
ccc_req_completion(const struct lu_env * env,const struct cl_req_slice * slice,int ioret)870 void ccc_req_completion(const struct lu_env *env,
871 const struct cl_req_slice *slice, int ioret)
872 {
873 struct ccc_req *vrq;
874
875 if (ioret > 0)
876 cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
877
878 vrq = cl2ccc_req(slice);
879 OBD_SLAB_FREE_PTR(vrq, ccc_req_kmem);
880 }
881
882 /**
883 * Implementation of struct cl_req_operations::cro_attr_set() for ccc
884 * layer. ccc is responsible for
885 *
886 * - o_[mac]time
887 *
888 * - o_mode
889 *
890 * - o_parent_seq
891 *
892 * - o_[ug]id
893 *
894 * - o_parent_oid
895 *
896 * - o_parent_ver
897 *
898 * - o_ioepoch,
899 *
900 * and capability.
901 */
ccc_req_attr_set(const struct lu_env * env,const struct cl_req_slice * slice,const struct cl_object * obj,struct cl_req_attr * attr,u64 flags)902 void ccc_req_attr_set(const struct lu_env *env,
903 const struct cl_req_slice *slice,
904 const struct cl_object *obj,
905 struct cl_req_attr *attr, u64 flags)
906 {
907 struct inode *inode;
908 struct obdo *oa;
909 u32 valid_flags;
910
911 oa = attr->cra_oa;
912 inode = ccc_object_inode(obj);
913 valid_flags = OBD_MD_FLTYPE;
914
915 if ((flags & OBD_MD_FLOSSCAPA) != 0) {
916 LASSERT(attr->cra_capa == NULL);
917 attr->cra_capa = cl_capa_lookup(inode,
918 slice->crs_req->crq_type);
919 }
920
921 if (slice->crs_req->crq_type == CRT_WRITE) {
922 if (flags & OBD_MD_FLEPOCH) {
923 oa->o_valid |= OBD_MD_FLEPOCH;
924 oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
925 valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
926 OBD_MD_FLUID | OBD_MD_FLGID;
927 }
928 }
929 obdo_from_inode(oa, inode, valid_flags & flags);
930 obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid);
931 memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid,
932 JOBSTATS_JOBID_SIZE);
933 }
934
935 static const struct cl_req_operations ccc_req_ops = {
936 .cro_attr_set = ccc_req_attr_set,
937 .cro_completion = ccc_req_completion
938 };
939
cl_setattr_ost(struct inode * inode,const struct iattr * attr,struct obd_capa * capa)940 int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
941 struct obd_capa *capa)
942 {
943 struct lu_env *env;
944 struct cl_io *io;
945 int result;
946 int refcheck;
947
948 env = cl_env_get(&refcheck);
949 if (IS_ERR(env))
950 return PTR_ERR(env);
951
952 io = ccc_env_thread_io(env);
953 io->ci_obj = cl_i2info(inode)->lli_clob;
954
955 io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
956 io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
957 io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
958 io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
959 io->u.ci_setattr.sa_valid = attr->ia_valid;
960 io->u.ci_setattr.sa_capa = capa;
961
962 again:
963 if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
964 struct ccc_io *cio = ccc_env_io(env);
965
966 if (attr->ia_valid & ATTR_FILE)
967 /* populate the file descriptor for ftruncate to honor
968 * group lock - see LU-787 */
969 cio->cui_fd = cl_iattr2fd(inode, attr);
970
971 result = cl_io_loop(env, io);
972 } else {
973 result = io->ci_result;
974 }
975 cl_io_fini(env, io);
976 if (unlikely(io->ci_need_restart))
977 goto again;
978 /* HSM import case: file is released, cannot be restored
979 * no need to fail except if restore registration failed
980 * with -ENODATA */
981 if (result == -ENODATA && io->ci_restore_needed &&
982 io->ci_result != -ENODATA)
983 result = 0;
984 cl_env_put(env, &refcheck);
985 return result;
986 }
987
988 /*****************************************************************************
989 *
990 * Type conversions.
991 *
992 */
993
ccc2lu_dev(struct ccc_device * vdv)994 struct lu_device *ccc2lu_dev(struct ccc_device *vdv)
995 {
996 return &vdv->cdv_cl.cd_lu_dev;
997 }
998
lu2ccc_dev(const struct lu_device * d)999 struct ccc_device *lu2ccc_dev(const struct lu_device *d)
1000 {
1001 return container_of0(d, struct ccc_device, cdv_cl.cd_lu_dev);
1002 }
1003
cl2ccc_dev(const struct cl_device * d)1004 struct ccc_device *cl2ccc_dev(const struct cl_device *d)
1005 {
1006 return container_of0(d, struct ccc_device, cdv_cl);
1007 }
1008
ccc2lu(struct ccc_object * vob)1009 struct lu_object *ccc2lu(struct ccc_object *vob)
1010 {
1011 return &vob->cob_cl.co_lu;
1012 }
1013
lu2ccc(const struct lu_object * obj)1014 struct ccc_object *lu2ccc(const struct lu_object *obj)
1015 {
1016 return container_of0(obj, struct ccc_object, cob_cl.co_lu);
1017 }
1018
cl2ccc(const struct cl_object * obj)1019 struct ccc_object *cl2ccc(const struct cl_object *obj)
1020 {
1021 return container_of0(obj, struct ccc_object, cob_cl);
1022 }
1023
cl2ccc_lock(const struct cl_lock_slice * slice)1024 struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice)
1025 {
1026 return container_of(slice, struct ccc_lock, clk_cl);
1027 }
1028
cl2ccc_io(const struct lu_env * env,const struct cl_io_slice * slice)1029 struct ccc_io *cl2ccc_io(const struct lu_env *env,
1030 const struct cl_io_slice *slice)
1031 {
1032 struct ccc_io *cio;
1033
1034 cio = container_of(slice, struct ccc_io, cui_cl);
1035 LASSERT(cio == ccc_env_io(env));
1036 return cio;
1037 }
1038
cl2ccc_req(const struct cl_req_slice * slice)1039 struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
1040 {
1041 return container_of0(slice, struct ccc_req, crq_cl);
1042 }
1043
cl2vm_page(const struct cl_page_slice * slice)1044 struct page *cl2vm_page(const struct cl_page_slice *slice)
1045 {
1046 return cl2ccc_page(slice)->cpg_page;
1047 }
1048
1049 /*****************************************************************************
1050 *
1051 * Accessors.
1052 *
1053 */
ccc_object_invariant(const struct cl_object * obj)1054 int ccc_object_invariant(const struct cl_object *obj)
1055 {
1056 struct inode *inode = ccc_object_inode(obj);
1057 struct cl_inode_info *lli = cl_i2info(inode);
1058
1059 return (S_ISREG(cl_inode_mode(inode)) ||
1060 /* i_mode of unlinked inode is zeroed. */
1061 cl_inode_mode(inode) == 0) && lli->lli_clob == obj;
1062 }
1063
ccc_object_inode(const struct cl_object * obj)1064 struct inode *ccc_object_inode(const struct cl_object *obj)
1065 {
1066 return cl2ccc(obj)->cob_inode;
1067 }
1068
1069 /**
1070 * Returns a pointer to cl_page associated with \a vmpage, without acquiring
1071 * additional reference to the resulting page. This is an unsafe version of
1072 * cl_vmpage_page() that can only be used under vmpage lock.
1073 */
ccc_vmpage_page_transient(struct page * vmpage)1074 struct cl_page *ccc_vmpage_page_transient(struct page *vmpage)
1075 {
1076 KLASSERT(PageLocked(vmpage));
1077 return (struct cl_page *)vmpage->private;
1078 }
1079
1080 /**
1081 * Initialize or update CLIO structures for regular files when new
1082 * meta-data arrives from the server.
1083 *
1084 * \param inode regular file inode
1085 * \param md new file metadata from MDS
1086 * - allocates cl_object if necessary,
1087 * - updated layout, if object was already here.
1088 */
cl_file_inode_init(struct inode * inode,struct lustre_md * md)1089 int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
1090 {
1091 struct lu_env *env;
1092 struct cl_inode_info *lli;
1093 struct cl_object *clob;
1094 struct lu_site *site;
1095 struct lu_fid *fid;
1096 struct cl_object_conf conf = {
1097 .coc_inode = inode,
1098 .u = {
1099 .coc_md = md
1100 }
1101 };
1102 int result = 0;
1103 int refcheck;
1104
1105 LASSERT(md->body->valid & OBD_MD_FLID);
1106 LASSERT(S_ISREG(cl_inode_mode(inode)));
1107
1108 env = cl_env_get(&refcheck);
1109 if (IS_ERR(env))
1110 return PTR_ERR(env);
1111
1112 site = cl_i2sbi(inode)->ll_site;
1113 lli = cl_i2info(inode);
1114 fid = &lli->lli_fid;
1115 LASSERT(fid_is_sane(fid));
1116
1117 if (lli->lli_clob == NULL) {
1118 /* clob is slave of inode, empty lli_clob means for new inode,
1119 * there is no clob in cache with the given fid, so it is
1120 * unnecessary to perform lookup-alloc-lookup-insert, just
1121 * alloc and insert directly. */
1122 LASSERT(inode->i_state & I_NEW);
1123 conf.coc_lu.loc_flags = LOC_F_NEW;
1124 clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
1125 fid, &conf);
1126 if (!IS_ERR(clob)) {
1127 /*
1128 * No locking is necessary, as new inode is
1129 * locked by I_NEW bit.
1130 */
1131 lli->lli_clob = clob;
1132 lli->lli_has_smd = lsm_has_objects(md->lsm);
1133 lu_object_ref_add(&clob->co_lu, "inode", inode);
1134 } else
1135 result = PTR_ERR(clob);
1136 } else {
1137 result = cl_conf_set(env, lli->lli_clob, &conf);
1138 }
1139
1140 cl_env_put(env, &refcheck);
1141
1142 if (result != 0)
1143 CERROR("Failure to initialize cl object "DFID": %d\n",
1144 PFID(fid), result);
1145 return result;
1146 }
1147
1148 /**
1149 * Wait for others drop their references of the object at first, then we drop
1150 * the last one, which will lead to the object be destroyed immediately.
1151 * Must be called after cl_object_kill() against this object.
1152 *
1153 * The reason we want to do this is: destroying top object will wait for sub
1154 * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
1155 * to initiate top object destroying which may deadlock. See bz22520.
1156 */
cl_object_put_last(struct lu_env * env,struct cl_object * obj)1157 static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
1158 {
1159 struct lu_object_header *header = obj->co_lu.lo_header;
1160 wait_queue_t waiter;
1161
1162 if (unlikely(atomic_read(&header->loh_ref) != 1)) {
1163 struct lu_site *site = obj->co_lu.lo_dev->ld_site;
1164 struct lu_site_bkt_data *bkt;
1165
1166 bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
1167
1168 init_waitqueue_entry(&waiter, current);
1169 add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
1170
1171 while (1) {
1172 set_current_state(TASK_UNINTERRUPTIBLE);
1173 if (atomic_read(&header->loh_ref) == 1)
1174 break;
1175 schedule();
1176 }
1177
1178 set_current_state(TASK_RUNNING);
1179 remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
1180 }
1181
1182 cl_object_put(env, obj);
1183 }
1184
cl_inode_fini(struct inode * inode)1185 void cl_inode_fini(struct inode *inode)
1186 {
1187 struct lu_env *env;
1188 struct cl_inode_info *lli = cl_i2info(inode);
1189 struct cl_object *clob = lli->lli_clob;
1190 int refcheck;
1191 int emergency;
1192
1193 if (clob != NULL) {
1194 void *cookie;
1195
1196 cookie = cl_env_reenter();
1197 env = cl_env_get(&refcheck);
1198 emergency = IS_ERR(env);
1199 if (emergency) {
1200 mutex_lock(&ccc_inode_fini_guard);
1201 LASSERT(ccc_inode_fini_env != NULL);
1202 cl_env_implant(ccc_inode_fini_env, &refcheck);
1203 env = ccc_inode_fini_env;
1204 }
1205 /*
1206 * cl_object cache is a slave to inode cache (which, in turn
1207 * is a slave to dentry cache), don't keep cl_object in memory
1208 * when its master is evicted.
1209 */
1210 cl_object_kill(env, clob);
1211 lu_object_ref_del(&clob->co_lu, "inode", inode);
1212 cl_object_put_last(env, clob);
1213 lli->lli_clob = NULL;
1214 if (emergency) {
1215 cl_env_unplant(ccc_inode_fini_env, &refcheck);
1216 mutex_unlock(&ccc_inode_fini_guard);
1217 } else
1218 cl_env_put(env, &refcheck);
1219 cl_env_reexit(cookie);
1220 }
1221 }
1222
1223 /**
1224 * return IF_* type for given lu_dirent entry.
1225 * IF_* flag shld be converted to particular OS file type in
1226 * platform llite module.
1227 */
ll_dirent_type_get(struct lu_dirent * ent)1228 __u16 ll_dirent_type_get(struct lu_dirent *ent)
1229 {
1230 __u16 type = 0;
1231 struct luda_type *lt;
1232 int len = 0;
1233
1234 if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
1235 const unsigned align = sizeof(struct luda_type) - 1;
1236
1237 len = le16_to_cpu(ent->lde_namelen);
1238 len = (len + align) & ~align;
1239 lt = (void *)ent->lde_name + len;
1240 type = IFTODT(le16_to_cpu(lt->lt_type));
1241 }
1242 return type;
1243 }
1244
1245 /**
1246 * build inode number from passed @fid */
cl_fid_build_ino(const struct lu_fid * fid,int api32)1247 __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
1248 {
1249 if (BITS_PER_LONG == 32 || api32)
1250 return fid_flatten32(fid);
1251 else
1252 return fid_flatten(fid);
1253 }
1254
1255 /**
1256 * build inode generation from passed @fid. If our FID overflows the 32-bit
1257 * inode number then return a non-zero generation to distinguish them. */
cl_fid_build_gen(const struct lu_fid * fid)1258 __u32 cl_fid_build_gen(const struct lu_fid *fid)
1259 {
1260 __u32 gen;
1261
1262 if (fid_is_igif(fid)) {
1263 gen = lu_igif_gen(fid);
1264 return gen;
1265 }
1266
1267 gen = fid_flatten(fid) >> 32;
1268 return gen;
1269 }
1270
1271 /* lsm is unreliable after hsm implementation as layout can be changed at
1272 * any time. This is only to support old, non-clio-ized interfaces. It will
1273 * cause deadlock if clio operations are called with this extra layout refcount
1274 * because in case the layout changed during the IO, ll_layout_refresh() will
1275 * have to wait for the refcount to become zero to destroy the older layout.
1276 *
1277 * Notice that the lsm returned by this function may not be valid unless called
1278 * inside layout lock - MDS_INODELOCK_LAYOUT. */
ccc_inode_lsm_get(struct inode * inode)1279 struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
1280 {
1281 return lov_lsm_get(cl_i2info(inode)->lli_clob);
1282 }
1283
ccc_inode_lsm_put(struct inode * inode,struct lov_stripe_md * lsm)1284 inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
1285 {
1286 lov_lsm_put(cl_i2info(inode)->lli_clob, lsm);
1287 }
1288