1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_io for VVP layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
40  */
41 
42 #define DEBUG_SUBSYSTEM S_LLITE
43 
44 
45 #include "../include/obd.h"
46 #include "../include/lustre_lite.h"
47 
48 #include "vvp_internal.h"
49 
50 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
51 				const struct cl_io_slice *slice);
52 
53 /**
54  * True, if \a io is a normal io, False for splice_{read,write}
55  */
cl_is_normalio(const struct lu_env * env,const struct cl_io * io)56 int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
57 {
58 	struct vvp_io *vio = vvp_env_io(env);
59 
60 	LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
61 
62 	return vio->cui_io_subtype == IO_NORMAL;
63 }
64 
65 /**
66  * For swapping layout. The file's layout may have changed.
67  * To avoid populating pages to a wrong stripe, we have to verify the
68  * correctness of layout. It works because swapping layout processes
69  * have to acquire group lock.
70  */
can_populate_pages(const struct lu_env * env,struct cl_io * io,struct inode * inode)71 static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
72 				struct inode *inode)
73 {
74 	struct ll_inode_info	*lli = ll_i2info(inode);
75 	struct ccc_io		*cio = ccc_env_io(env);
76 	bool rc = true;
77 
78 	switch (io->ci_type) {
79 	case CIT_READ:
80 	case CIT_WRITE:
81 		/* don't need lock here to check lli_layout_gen as we have held
82 		 * extent lock and GROUP lock has to hold to swap layout */
83 		if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
84 			io->ci_need_restart = 1;
85 			/* this will return application a short read/write */
86 			io->ci_continue = 0;
87 			rc = false;
88 		}
89 	case CIT_FAULT:
90 		/* fault is okay because we've already had a page. */
91 	default:
92 		break;
93 	}
94 
95 	return rc;
96 }
97 
98 /*****************************************************************************
99  *
100  * io operations.
101  *
102  */
103 
vvp_io_fault_iter_init(const struct lu_env * env,const struct cl_io_slice * ios)104 static int vvp_io_fault_iter_init(const struct lu_env *env,
105 				  const struct cl_io_slice *ios)
106 {
107 	struct vvp_io *vio   = cl2vvp_io(env, ios);
108 	struct inode  *inode = ccc_object_inode(ios->cis_obj);
109 
110 	LASSERT(inode ==
111 		file_inode(cl2ccc_io(env, ios)->cui_fd->fd_file));
112 	vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime);
113 	return 0;
114 }
115 
vvp_io_fini(const struct lu_env * env,const struct cl_io_slice * ios)116 static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
117 {
118 	struct cl_io     *io  = ios->cis_io;
119 	struct cl_object *obj = io->ci_obj;
120 	struct ccc_io    *cio = cl2ccc_io(env, ios);
121 
122 	CLOBINVRNT(env, obj, ccc_object_invariant(obj));
123 
124 	CDEBUG(D_VFSTRACE, DFID
125 	       " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
126 	       PFID(lu_object_fid(&obj->co_lu)),
127 	       io->ci_ignore_layout, io->ci_verify_layout,
128 	       cio->cui_layout_gen, io->ci_restore_needed);
129 
130 	if (io->ci_restore_needed == 1) {
131 		int	rc;
132 
133 		/* file was detected release, we need to restore it
134 		 * before finishing the io
135 		 */
136 		rc = ll_layout_restore(ccc_object_inode(obj));
137 		/* if restore registration failed, no restart,
138 		 * we will return -ENODATA */
139 		/* The layout will change after restore, so we need to
140 		 * block on layout lock hold by the MDT
141 		 * as MDT will not send new layout in lvb (see LU-3124)
142 		 * we have to explicitly fetch it, all this will be done
143 		 * by ll_layout_refresh()
144 		 */
145 		if (rc == 0) {
146 			io->ci_restore_needed = 0;
147 			io->ci_need_restart = 1;
148 			io->ci_verify_layout = 1;
149 		} else {
150 			io->ci_restore_needed = 1;
151 			io->ci_need_restart = 0;
152 			io->ci_verify_layout = 0;
153 			io->ci_result = rc;
154 		}
155 	}
156 
157 	if (!io->ci_ignore_layout && io->ci_verify_layout) {
158 		__u32 gen = 0;
159 
160 		/* check layout version */
161 		ll_layout_refresh(ccc_object_inode(obj), &gen);
162 		io->ci_need_restart = cio->cui_layout_gen != gen;
163 		if (io->ci_need_restart) {
164 			CDEBUG(D_VFSTRACE,
165 			       DFID" layout changed from %d to %d.\n",
166 			       PFID(lu_object_fid(&obj->co_lu)),
167 			       cio->cui_layout_gen, gen);
168 			/* today successful restore is the only possible
169 			 * case */
170 			/* restore was done, clear restoring state */
171 			ll_i2info(ccc_object_inode(obj))->lli_flags &=
172 				~LLIF_FILE_RESTORING;
173 		}
174 	}
175 }
176 
vvp_io_fault_fini(const struct lu_env * env,const struct cl_io_slice * ios)177 static void vvp_io_fault_fini(const struct lu_env *env,
178 			      const struct cl_io_slice *ios)
179 {
180 	struct cl_io   *io   = ios->cis_io;
181 	struct cl_page *page = io->u.ci_fault.ft_page;
182 
183 	CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
184 
185 	if (page != NULL) {
186 		lu_ref_del(&page->cp_reference, "fault", io);
187 		cl_page_put(env, page);
188 		io->u.ci_fault.ft_page = NULL;
189 	}
190 	vvp_io_fini(env, ios);
191 }
192 
vvp_mode_from_vma(struct vm_area_struct * vma)193 static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
194 {
195 	/*
196 	 * we only want to hold PW locks if the mmap() can generate
197 	 * writes back to the file and that only happens in shared
198 	 * writable vmas
199 	 */
200 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
201 		return CLM_WRITE;
202 	return CLM_READ;
203 }
204 
vvp_mmap_locks(const struct lu_env * env,struct ccc_io * vio,struct cl_io * io)205 static int vvp_mmap_locks(const struct lu_env *env,
206 			  struct ccc_io *vio, struct cl_io *io)
207 {
208 	struct ccc_thread_info *cti = ccc_env_info(env);
209 	struct mm_struct       *mm = current->mm;
210 	struct vm_area_struct  *vma;
211 	struct cl_lock_descr   *descr = &cti->cti_descr;
212 	ldlm_policy_data_t      policy;
213 	unsigned long	   addr;
214 	ssize_t		 count;
215 	int		     result;
216 	struct iov_iter i;
217 	struct iovec iov;
218 
219 	LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
220 
221 	if (!cl_is_normalio(env, io))
222 		return 0;
223 
224 	if (vio->cui_iter == NULL) /* nfs or loop back device write */
225 		return 0;
226 
227 	/* No MM (e.g. NFS)? No vmas too. */
228 	if (mm == NULL)
229 		return 0;
230 
231 	iov_for_each(iov, i, *(vio->cui_iter)) {
232 		addr = (unsigned long)iov.iov_base;
233 		count = iov.iov_len;
234 		if (count == 0)
235 			continue;
236 
237 		count += addr & (~CFS_PAGE_MASK);
238 		addr &= CFS_PAGE_MASK;
239 
240 		down_read(&mm->mmap_sem);
241 		while ((vma = our_vma(mm, addr, count)) != NULL) {
242 			struct inode *inode = file_inode(vma->vm_file);
243 			int flags = CEF_MUST;
244 
245 			if (ll_file_nolock(vma->vm_file)) {
246 				/*
247 				 * For no lock case, a lockless lock will be
248 				 * generated.
249 				 */
250 				flags = CEF_NEVER;
251 			}
252 
253 			/*
254 			 * XXX: Required lock mode can be weakened: CIT_WRITE
255 			 * io only ever reads user level buffer, and CIT_READ
256 			 * only writes on it.
257 			 */
258 			policy_from_vma(&policy, vma, addr, count);
259 			descr->cld_mode = vvp_mode_from_vma(vma);
260 			descr->cld_obj = ll_i2info(inode)->lli_clob;
261 			descr->cld_start = cl_index(descr->cld_obj,
262 						    policy.l_extent.start);
263 			descr->cld_end = cl_index(descr->cld_obj,
264 						  policy.l_extent.end);
265 			descr->cld_enq_flags = flags;
266 			result = cl_io_lock_alloc_add(env, io, descr);
267 
268 			CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
269 			       descr->cld_mode, descr->cld_start,
270 			       descr->cld_end);
271 
272 			if (result < 0) {
273 				up_read(&mm->mmap_sem);
274 				return result;
275 			}
276 
277 			if (vma->vm_end - addr >= count)
278 				break;
279 
280 			count -= vma->vm_end - addr;
281 			addr = vma->vm_end;
282 		}
283 		up_read(&mm->mmap_sem);
284 	}
285 	return 0;
286 }
287 
vvp_io_rw_lock(const struct lu_env * env,struct cl_io * io,enum cl_lock_mode mode,loff_t start,loff_t end)288 static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
289 			  enum cl_lock_mode mode, loff_t start, loff_t end)
290 {
291 	struct ccc_io *cio = ccc_env_io(env);
292 	int result;
293 	int ast_flags = 0;
294 
295 	LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
296 
297 	ccc_io_update_iov(env, cio, io);
298 
299 	if (io->u.ci_rw.crw_nonblock)
300 		ast_flags |= CEF_NONBLOCK;
301 	result = vvp_mmap_locks(env, cio, io);
302 	if (result == 0)
303 		result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
304 	return result;
305 }
306 
vvp_io_read_lock(const struct lu_env * env,const struct cl_io_slice * ios)307 static int vvp_io_read_lock(const struct lu_env *env,
308 			    const struct cl_io_slice *ios)
309 {
310 	struct cl_io	 *io = ios->cis_io;
311 	struct cl_io_rw_common *rd = &io->u.ci_rd.rd;
312 	int result;
313 
314 	result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
315 				rd->crw_pos + rd->crw_count - 1);
316 
317 	return result;
318 }
319 
vvp_io_fault_lock(const struct lu_env * env,const struct cl_io_slice * ios)320 static int vvp_io_fault_lock(const struct lu_env *env,
321 			     const struct cl_io_slice *ios)
322 {
323 	struct cl_io *io   = ios->cis_io;
324 	struct vvp_io *vio = cl2vvp_io(env, ios);
325 	/*
326 	 * XXX LDLM_FL_CBPENDING
327 	 */
328 	return ccc_io_one_lock_index
329 		(env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma),
330 		 io->u.ci_fault.ft_index, io->u.ci_fault.ft_index);
331 }
332 
vvp_io_write_lock(const struct lu_env * env,const struct cl_io_slice * ios)333 static int vvp_io_write_lock(const struct lu_env *env,
334 			     const struct cl_io_slice *ios)
335 {
336 	struct cl_io *io = ios->cis_io;
337 	loff_t start;
338 	loff_t end;
339 
340 	if (io->u.ci_wr.wr_append) {
341 		start = 0;
342 		end   = OBD_OBJECT_EOF;
343 	} else {
344 		start = io->u.ci_wr.wr.crw_pos;
345 		end   = start + io->u.ci_wr.wr.crw_count - 1;
346 	}
347 	return vvp_io_rw_lock(env, io, CLM_WRITE, start, end);
348 }
349 
vvp_io_setattr_iter_init(const struct lu_env * env,const struct cl_io_slice * ios)350 static int vvp_io_setattr_iter_init(const struct lu_env *env,
351 				    const struct cl_io_slice *ios)
352 {
353 	return 0;
354 }
355 
356 /**
357  * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
358  *
359  * Handles "lockless io" mode when extent locking is done by server.
360  */
vvp_io_setattr_lock(const struct lu_env * env,const struct cl_io_slice * ios)361 static int vvp_io_setattr_lock(const struct lu_env *env,
362 			       const struct cl_io_slice *ios)
363 {
364 	struct ccc_io *cio = ccc_env_io(env);
365 	struct cl_io  *io  = ios->cis_io;
366 	__u64 new_size;
367 	__u32 enqflags = 0;
368 
369 	if (cl_io_is_trunc(io)) {
370 		new_size = io->u.ci_setattr.sa_attr.lvb_size;
371 		if (new_size == 0)
372 			enqflags = CEF_DISCARD_DATA;
373 	} else {
374 		if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
375 		     io->u.ci_setattr.sa_attr.lvb_ctime) ||
376 		    (io->u.ci_setattr.sa_attr.lvb_atime >=
377 		     io->u.ci_setattr.sa_attr.lvb_ctime))
378 			return 0;
379 		new_size = 0;
380 	}
381 	cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
382 	return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
383 			       new_size, OBD_OBJECT_EOF);
384 }
385 
vvp_do_vmtruncate(struct inode * inode,size_t size)386 static int vvp_do_vmtruncate(struct inode *inode, size_t size)
387 {
388 	int     result;
389 	/*
390 	 * Only ll_inode_size_lock is taken at this level.
391 	 */
392 	ll_inode_size_lock(inode);
393 	result = inode_newsize_ok(inode, size);
394 	if (result < 0) {
395 		ll_inode_size_unlock(inode);
396 		return result;
397 	}
398 	truncate_setsize(inode, size);
399 	ll_inode_size_unlock(inode);
400 	return result;
401 }
402 
vvp_io_setattr_trunc(const struct lu_env * env,const struct cl_io_slice * ios,struct inode * inode,loff_t size)403 static int vvp_io_setattr_trunc(const struct lu_env *env,
404 				const struct cl_io_slice *ios,
405 				struct inode *inode, loff_t size)
406 {
407 	inode_dio_wait(inode);
408 	return 0;
409 }
410 
vvp_io_setattr_time(const struct lu_env * env,const struct cl_io_slice * ios)411 static int vvp_io_setattr_time(const struct lu_env *env,
412 			       const struct cl_io_slice *ios)
413 {
414 	struct cl_io       *io    = ios->cis_io;
415 	struct cl_object   *obj   = io->ci_obj;
416 	struct cl_attr     *attr  = ccc_env_thread_attr(env);
417 	int result;
418 	unsigned valid = CAT_CTIME;
419 
420 	cl_object_attr_lock(obj);
421 	attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
422 	if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) {
423 		attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
424 		valid |= CAT_ATIME;
425 	}
426 	if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) {
427 		attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
428 		valid |= CAT_MTIME;
429 	}
430 	result = cl_object_attr_set(env, obj, attr, valid);
431 	cl_object_attr_unlock(obj);
432 
433 	return result;
434 }
435 
vvp_io_setattr_start(const struct lu_env * env,const struct cl_io_slice * ios)436 static int vvp_io_setattr_start(const struct lu_env *env,
437 				const struct cl_io_slice *ios)
438 {
439 	struct cl_io	*io    = ios->cis_io;
440 	struct inode	*inode = ccc_object_inode(io->ci_obj);
441 	int result = 0;
442 
443 	mutex_lock(&inode->i_mutex);
444 	if (cl_io_is_trunc(io))
445 		result = vvp_io_setattr_trunc(env, ios, inode,
446 					io->u.ci_setattr.sa_attr.lvb_size);
447 	if (result == 0)
448 		result = vvp_io_setattr_time(env, ios);
449 	return result;
450 }
451 
vvp_io_setattr_end(const struct lu_env * env,const struct cl_io_slice * ios)452 static void vvp_io_setattr_end(const struct lu_env *env,
453 			       const struct cl_io_slice *ios)
454 {
455 	struct cl_io *io    = ios->cis_io;
456 	struct inode *inode = ccc_object_inode(io->ci_obj);
457 
458 	if (cl_io_is_trunc(io)) {
459 		/* Truncate in memory pages - they must be clean pages
460 		 * because osc has already notified to destroy osc_extents. */
461 		vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
462 		inode_dio_write_done(inode);
463 	}
464 	mutex_unlock(&inode->i_mutex);
465 }
466 
vvp_io_setattr_fini(const struct lu_env * env,const struct cl_io_slice * ios)467 static void vvp_io_setattr_fini(const struct lu_env *env,
468 				const struct cl_io_slice *ios)
469 {
470 	vvp_io_fini(env, ios);
471 }
472 
vvp_io_read_start(const struct lu_env * env,const struct cl_io_slice * ios)473 static int vvp_io_read_start(const struct lu_env *env,
474 			     const struct cl_io_slice *ios)
475 {
476 	struct vvp_io     *vio   = cl2vvp_io(env, ios);
477 	struct ccc_io     *cio   = cl2ccc_io(env, ios);
478 	struct cl_io      *io    = ios->cis_io;
479 	struct cl_object  *obj   = io->ci_obj;
480 	struct inode      *inode = ccc_object_inode(obj);
481 	struct ll_ra_read *bead  = &vio->cui_bead;
482 	struct file       *file  = cio->cui_fd->fd_file;
483 
484 	int     result;
485 	loff_t  pos = io->u.ci_rd.rd.crw_pos;
486 	long    cnt = io->u.ci_rd.rd.crw_count;
487 	long    tot = cio->cui_tot_count;
488 	int     exceed = 0;
489 
490 	CLOBINVRNT(env, obj, ccc_object_invariant(obj));
491 
492 	CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
493 
494 	if (!can_populate_pages(env, io, inode))
495 		return 0;
496 
497 	result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
498 	if (result != 0)
499 		return result;
500 	else if (exceed != 0)
501 		goto out;
502 
503 	LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
504 			"Read ino %lu, %lu bytes, offset %lld, size %llu\n",
505 			inode->i_ino, cnt, pos, i_size_read(inode));
506 
507 	/* turn off the kernel's read-ahead */
508 	cio->cui_fd->fd_file->f_ra.ra_pages = 0;
509 
510 	/* initialize read-ahead window once per syscall */
511 	if (!vio->cui_ra_window_set) {
512 		vio->cui_ra_window_set = 1;
513 		bead->lrr_start = cl_index(obj, pos);
514 		/*
515 		 * XXX: explicit PAGE_CACHE_SIZE
516 		 */
517 		bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
518 		ll_ra_read_in(file, bead);
519 	}
520 
521 	/* BUG: 5972 */
522 	file_accessed(file);
523 	switch (vio->cui_io_subtype) {
524 	case IO_NORMAL:
525 		LASSERT(cio->cui_iocb->ki_pos == pos);
526 		result = generic_file_read_iter(cio->cui_iocb, cio->cui_iter);
527 		break;
528 	case IO_SPLICE:
529 		result = generic_file_splice_read(file, &pos,
530 				vio->u.splice.cui_pipe, cnt,
531 				vio->u.splice.cui_flags);
532 		/* LU-1109: do splice read stripe by stripe otherwise if it
533 		 * may make nfsd stuck if this read occupied all internal pipe
534 		 * buffers. */
535 		io->ci_continue = 0;
536 		break;
537 	default:
538 		CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
539 		LBUG();
540 	}
541 
542 out:
543 	if (result >= 0) {
544 		if (result < cnt)
545 			io->ci_continue = 0;
546 		io->ci_nob += result;
547 		ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
548 				  cio->cui_fd, pos, result, READ);
549 		result = 0;
550 	}
551 	return result;
552 }
553 
vvp_io_read_fini(const struct lu_env * env,const struct cl_io_slice * ios)554 static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
555 {
556 	struct vvp_io *vio = cl2vvp_io(env, ios);
557 	struct ccc_io *cio = cl2ccc_io(env, ios);
558 
559 	if (vio->cui_ra_window_set)
560 		ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
561 
562 	vvp_io_fini(env, ios);
563 }
564 
vvp_io_write_start(const struct lu_env * env,const struct cl_io_slice * ios)565 static int vvp_io_write_start(const struct lu_env *env,
566 			      const struct cl_io_slice *ios)
567 {
568 	struct ccc_io      *cio   = cl2ccc_io(env, ios);
569 	struct cl_io       *io    = ios->cis_io;
570 	struct cl_object   *obj   = io->ci_obj;
571 	struct inode       *inode = ccc_object_inode(obj);
572 	ssize_t result = 0;
573 	loff_t pos = io->u.ci_wr.wr.crw_pos;
574 	size_t cnt = io->u.ci_wr.wr.crw_count;
575 
576 	if (!can_populate_pages(env, io, inode))
577 		return 0;
578 
579 	if (cl_io_is_append(io)) {
580 		/*
581 		 * PARALLEL IO This has to be changed for parallel IO doing
582 		 * out-of-order writes.
583 		 */
584 		pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
585 		cio->cui_iocb->ki_pos = pos;
586 	} else {
587 		LASSERT(cio->cui_iocb->ki_pos == pos);
588 	}
589 
590 	CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
591 
592 	if (cio->cui_iter == NULL) /* from a temp io in ll_cl_init(). */
593 		result = 0;
594 	else
595 		result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter);
596 
597 	if (result > 0) {
598 		if (result < cnt)
599 			io->ci_continue = 0;
600 		io->ci_nob += result;
601 		ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
602 				  cio->cui_fd, pos, result, WRITE);
603 		result = 0;
604 	}
605 	return result;
606 }
607 
vvp_io_kernel_fault(struct vvp_fault_io * cfio)608 static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
609 {
610 	struct vm_fault *vmf = cfio->fault.ft_vmf;
611 
612 	cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
613 	cfio->fault.ft_flags_valid = 1;
614 
615 	if (vmf->page) {
616 		CDEBUG(D_PAGE,
617 		       "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
618 		       vmf->page, vmf->page->mapping, vmf->page->index,
619 		       (long)vmf->page->flags, page_count(vmf->page),
620 		       page_private(vmf->page), vmf->virtual_address);
621 		if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
622 			lock_page(vmf->page);
623 			cfio->fault.ft_flags |= VM_FAULT_LOCKED;
624 		}
625 
626 		cfio->ft_vmpage = vmf->page;
627 		return 0;
628 	}
629 
630 	if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
631 		CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
632 		return -EFAULT;
633 	}
634 
635 	if (cfio->fault.ft_flags & VM_FAULT_OOM) {
636 		CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
637 		return -ENOMEM;
638 	}
639 
640 	if (cfio->fault.ft_flags & VM_FAULT_RETRY)
641 		return -EAGAIN;
642 
643 	CERROR("Unknown error in page fault %d!\n", cfio->fault.ft_flags);
644 	return -EINVAL;
645 }
646 
647 
vvp_io_fault_start(const struct lu_env * env,const struct cl_io_slice * ios)648 static int vvp_io_fault_start(const struct lu_env *env,
649 			      const struct cl_io_slice *ios)
650 {
651 	struct vvp_io       *vio     = cl2vvp_io(env, ios);
652 	struct cl_io	*io      = ios->cis_io;
653 	struct cl_object    *obj     = io->ci_obj;
654 	struct inode	*inode   = ccc_object_inode(obj);
655 	struct cl_fault_io  *fio     = &io->u.ci_fault;
656 	struct vvp_fault_io *cfio    = &vio->u.fault;
657 	loff_t	       offset;
658 	int		  result  = 0;
659 	struct page	  *vmpage  = NULL;
660 	struct cl_page      *page;
661 	loff_t	       size;
662 	pgoff_t	      last; /* last page in a file data region */
663 
664 	if (fio->ft_executable &&
665 	    LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
666 		CWARN("binary "DFID
667 		      " changed while waiting for the page fault lock\n",
668 		      PFID(lu_object_fid(&obj->co_lu)));
669 
670 	/* offset of the last byte on the page */
671 	offset = cl_offset(obj, fio->ft_index + 1) - 1;
672 	LASSERT(cl_index(obj, offset) == fio->ft_index);
673 	result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
674 	if (result != 0)
675 		return result;
676 
677 	/* must return locked page */
678 	if (fio->ft_mkwrite) {
679 		LASSERT(cfio->ft_vmpage != NULL);
680 		lock_page(cfio->ft_vmpage);
681 	} else {
682 		result = vvp_io_kernel_fault(cfio);
683 		if (result != 0)
684 			return result;
685 	}
686 
687 	vmpage = cfio->ft_vmpage;
688 	LASSERT(PageLocked(vmpage));
689 
690 	if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
691 		ll_invalidate_page(vmpage);
692 
693 	size = i_size_read(inode);
694 	/* Though we have already held a cl_lock upon this page, but
695 	 * it still can be truncated locally. */
696 	if (unlikely((vmpage->mapping != inode->i_mapping) ||
697 		     (page_offset(vmpage) > size))) {
698 		CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
699 
700 		/* return +1 to stop cl_io_loop() and ll_fault() will catch
701 		 * and retry. */
702 		result = +1;
703 		goto out;
704 	}
705 
706 
707 	if (fio->ft_mkwrite) {
708 		pgoff_t last_index;
709 		/*
710 		 * Capture the size while holding the lli_trunc_sem from above
711 		 * we want to make sure that we complete the mkwrite action
712 		 * while holding this lock. We need to make sure that we are
713 		 * not past the end of the file.
714 		 */
715 		last_index = cl_index(obj, size - 1);
716 		if (last_index < fio->ft_index) {
717 			CDEBUG(D_PAGE,
718 			       "llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n",
719 			       vmpage->mapping, fio->ft_index, last_index);
720 			/*
721 			 * We need to return if we are
722 			 * passed the end of the file. This will propagate
723 			 * up the call stack to ll_page_mkwrite where
724 			 * we will return VM_FAULT_NOPAGE. Any non-negative
725 			 * value returned here will be silently
726 			 * converted to 0. If the vmpage->mapping is null
727 			 * the error code would be converted back to ENODATA
728 			 * in ll_page_mkwrite0. Thus we return -ENODATA
729 			 * to handle both cases
730 			 */
731 			result = -ENODATA;
732 			goto out;
733 		}
734 	}
735 
736 	page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
737 	if (IS_ERR(page)) {
738 		result = PTR_ERR(page);
739 		goto out;
740 	}
741 
742 	/* if page is going to be written, we should add this page into cache
743 	 * earlier. */
744 	if (fio->ft_mkwrite) {
745 		wait_on_page_writeback(vmpage);
746 		if (set_page_dirty(vmpage)) {
747 			struct ccc_page *cp;
748 
749 			/* vvp_page_assume() calls wait_on_page_writeback(). */
750 			cl_page_assume(env, io, page);
751 
752 			cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
753 			vvp_write_pending(cl2ccc(obj), cp);
754 
755 			/* Do not set Dirty bit here so that in case IO is
756 			 * started before the page is really made dirty, we
757 			 * still have chance to detect it. */
758 			result = cl_page_cache_add(env, io, page, CRT_WRITE);
759 			LASSERT(cl_page_is_owned(page, io));
760 
761 			vmpage = NULL;
762 			if (result < 0) {
763 				cl_page_unmap(env, io, page);
764 				cl_page_discard(env, io, page);
765 				cl_page_disown(env, io, page);
766 
767 				cl_page_put(env, page);
768 
769 				/* we're in big trouble, what can we do now? */
770 				if (result == -EDQUOT)
771 					result = -ENOSPC;
772 				goto out;
773 			} else
774 				cl_page_disown(env, io, page);
775 		}
776 	}
777 
778 	last = cl_index(obj, size - 1);
779 	/*
780 	 * The ft_index is only used in the case of
781 	 * a mkwrite action. We need to check
782 	 * our assertions are correct, since
783 	 * we should have caught this above
784 	 */
785 	LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
786 	if (fio->ft_index == last)
787 		/*
788 		 * Last page is mapped partially.
789 		 */
790 		fio->ft_nob = size - cl_offset(obj, fio->ft_index);
791 	else
792 		fio->ft_nob = cl_page_size(obj);
793 
794 	lu_ref_add(&page->cp_reference, "fault", io);
795 	fio->ft_page = page;
796 
797 out:
798 	/* return unlocked vmpage to avoid deadlocking */
799 	if (vmpage != NULL)
800 		unlock_page(vmpage);
801 	cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
802 	return result;
803 }
804 
vvp_io_fsync_start(const struct lu_env * env,const struct cl_io_slice * ios)805 static int vvp_io_fsync_start(const struct lu_env *env,
806 			      const struct cl_io_slice *ios)
807 {
808 	/* we should mark TOWRITE bit to each dirty page in radix tree to
809 	 * verify pages have been written, but this is difficult because of
810 	 * race. */
811 	return 0;
812 }
813 
vvp_io_read_page(const struct lu_env * env,const struct cl_io_slice * ios,const struct cl_page_slice * slice)814 static int vvp_io_read_page(const struct lu_env *env,
815 			    const struct cl_io_slice *ios,
816 			    const struct cl_page_slice *slice)
817 {
818 	struct cl_io	      *io     = ios->cis_io;
819 	struct cl_object	  *obj    = slice->cpl_obj;
820 	struct ccc_page	   *cp     = cl2ccc_page(slice);
821 	struct cl_page	    *page   = slice->cpl_page;
822 	struct inode	      *inode  = ccc_object_inode(obj);
823 	struct ll_sb_info	 *sbi    = ll_i2sbi(inode);
824 	struct ll_file_data       *fd     = cl2ccc_io(env, ios)->cui_fd;
825 	struct ll_readahead_state *ras    = &fd->fd_ras;
826 	struct page		*vmpage = cp->cpg_page;
827 	struct cl_2queue	  *queue  = &io->ci_queue;
828 	int rc;
829 
830 	CLOBINVRNT(env, obj, ccc_object_invariant(obj));
831 	LASSERT(slice->cpl_obj == obj);
832 
833 	if (sbi->ll_ra_info.ra_max_pages_per_file &&
834 	    sbi->ll_ra_info.ra_max_pages)
835 		ras_update(sbi, inode, ras, page->cp_index,
836 			   cp->cpg_defer_uptodate);
837 
838 	/* Sanity check whether the page is protected by a lock. */
839 	rc = cl_page_is_under_lock(env, io, page);
840 	if (rc != -EBUSY) {
841 		CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
842 			       rc == -ENODATA ? "without a lock" :
843 			       "match failed", rc);
844 		if (rc != -ENODATA)
845 			return rc;
846 	}
847 
848 	if (cp->cpg_defer_uptodate) {
849 		cp->cpg_ra_used = 1;
850 		cl_page_export(env, page, 1);
851 	}
852 	/*
853 	 * Add page into the queue even when it is marked uptodate above.
854 	 * this will unlock it automatically as part of cl_page_list_disown().
855 	 */
856 	cl_2queue_add(queue, page);
857 	if (sbi->ll_ra_info.ra_max_pages_per_file &&
858 	    sbi->ll_ra_info.ra_max_pages)
859 		ll_readahead(env, io, ras,
860 			     vmpage->mapping, &queue->c2_qin, fd->fd_flags);
861 
862 	return 0;
863 }
864 
vvp_page_sync_io(const struct lu_env * env,struct cl_io * io,struct cl_page * page,struct ccc_page * cp,enum cl_req_type crt)865 static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
866 			    struct cl_page *page, struct ccc_page *cp,
867 			    enum cl_req_type crt)
868 {
869 	struct cl_2queue  *queue;
870 	int result;
871 
872 	LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
873 
874 	queue = &io->ci_queue;
875 	cl_2queue_init_page(queue, page);
876 
877 	result = cl_io_submit_sync(env, io, crt, queue, 0);
878 	LASSERT(cl_page_is_owned(page, io));
879 
880 	if (crt == CRT_READ)
881 		/*
882 		 * in CRT_WRITE case page is left locked even in case of
883 		 * error.
884 		 */
885 		cl_page_list_disown(env, io, &queue->c2_qin);
886 	cl_2queue_fini(env, queue);
887 
888 	return result;
889 }
890 
891 /**
892  * Prepare partially written-to page for a write.
893  */
vvp_io_prepare_partial(const struct lu_env * env,struct cl_io * io,struct cl_object * obj,struct cl_page * pg,struct ccc_page * cp,unsigned from,unsigned to)894 static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
895 				  struct cl_object *obj, struct cl_page *pg,
896 				  struct ccc_page *cp,
897 				  unsigned from, unsigned to)
898 {
899 	struct cl_attr *attr   = ccc_env_thread_attr(env);
900 	loff_t	  offset = cl_offset(obj, pg->cp_index);
901 	int	     result;
902 
903 	cl_object_attr_lock(obj);
904 	result = cl_object_attr_get(env, obj, attr);
905 	cl_object_attr_unlock(obj);
906 	if (result == 0) {
907 		/*
908 		 * If are writing to a new page, no need to read old data.
909 		 * The extent locking will have updated the KMS, and for our
910 		 * purposes here we can treat it like i_size.
911 		 */
912 		if (attr->cat_kms <= offset) {
913 			char *kaddr = kmap_atomic(cp->cpg_page);
914 
915 			memset(kaddr, 0, cl_page_size(obj));
916 			kunmap_atomic(kaddr);
917 		} else if (cp->cpg_defer_uptodate)
918 			cp->cpg_ra_used = 1;
919 		else
920 			result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
921 		/*
922 		 * In older implementations, obdo_refresh_inode is called here
923 		 * to update the inode because the write might modify the
924 		 * object info at OST. However, this has been proven useless,
925 		 * since LVB functions will be called when user space program
926 		 * tries to retrieve inode attribute.  Also, see bug 15909 for
927 		 * details. -jay
928 		 */
929 		if (result == 0)
930 			cl_page_export(env, pg, 1);
931 	}
932 	return result;
933 }
934 
vvp_io_prepare_write(const struct lu_env * env,const struct cl_io_slice * ios,const struct cl_page_slice * slice,unsigned from,unsigned to)935 static int vvp_io_prepare_write(const struct lu_env *env,
936 				const struct cl_io_slice *ios,
937 				const struct cl_page_slice *slice,
938 				unsigned from, unsigned to)
939 {
940 	struct cl_object *obj    = slice->cpl_obj;
941 	struct ccc_page  *cp     = cl2ccc_page(slice);
942 	struct cl_page   *pg     = slice->cpl_page;
943 	struct page       *vmpage = cp->cpg_page;
944 
945 	int result;
946 
947 	LINVRNT(cl_page_is_vmlocked(env, pg));
948 	LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
949 
950 	result = 0;
951 
952 	CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to);
953 	if (!PageUptodate(vmpage)) {
954 		/*
955 		 * We're completely overwriting an existing page, so _don't_
956 		 * set it up to date until commit_write
957 		 */
958 		if (from == 0 && to == PAGE_CACHE_SIZE) {
959 			CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
960 			POISON_PAGE(page, 0x11);
961 		} else
962 			result = vvp_io_prepare_partial(env, ios->cis_io, obj,
963 							pg, cp, from, to);
964 	} else
965 		CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
966 	return result;
967 }
968 
vvp_io_commit_write(const struct lu_env * env,const struct cl_io_slice * ios,const struct cl_page_slice * slice,unsigned from,unsigned to)969 static int vvp_io_commit_write(const struct lu_env *env,
970 			       const struct cl_io_slice *ios,
971 			       const struct cl_page_slice *slice,
972 			       unsigned from, unsigned to)
973 {
974 	struct cl_object  *obj    = slice->cpl_obj;
975 	struct cl_io      *io     = ios->cis_io;
976 	struct ccc_page   *cp     = cl2ccc_page(slice);
977 	struct cl_page    *pg     = slice->cpl_page;
978 	struct inode      *inode  = ccc_object_inode(obj);
979 	struct ll_sb_info *sbi    = ll_i2sbi(inode);
980 	struct ll_inode_info *lli = ll_i2info(inode);
981 	struct page	*vmpage = cp->cpg_page;
982 
983 	int    result;
984 	int    tallyop;
985 	loff_t size;
986 
987 	LINVRNT(cl_page_is_vmlocked(env, pg));
988 	LASSERT(vmpage->mapping->host == inode);
989 
990 	LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "committing page write\n");
991 	CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);
992 
993 	/*
994 	 * queue a write for some time in the future the first time we
995 	 * dirty the page.
996 	 *
997 	 * This is different from what other file systems do: they usually
998 	 * just mark page (and some of its buffers) dirty and rely on
999 	 * balance_dirty_pages() to start a write-back. Lustre wants write-back
1000 	 * to be started earlier for the following reasons:
1001 	 *
1002 	 *     (1) with a large number of clients we need to limit the amount
1003 	 *     of cached data on the clients a lot;
1004 	 *
1005 	 *     (2) large compute jobs generally want compute-only then io-only
1006 	 *     and the IO should complete as quickly as possible;
1007 	 *
1008 	 *     (3) IO is batched up to the RPC size and is async until the
1009 	 *     client max cache is hit
1010 	 *     (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
1011 	 *
1012 	 */
1013 	if (!PageDirty(vmpage)) {
1014 		tallyop = LPROC_LL_DIRTY_MISSES;
1015 		result = cl_page_cache_add(env, io, pg, CRT_WRITE);
1016 		if (result == 0) {
1017 			/* page was added into cache successfully. */
1018 			set_page_dirty(vmpage);
1019 			vvp_write_pending(cl2ccc(obj), cp);
1020 		} else if (result == -EDQUOT) {
1021 			pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
1022 			bool need_clip = true;
1023 
1024 			/*
1025 			 * Client ran out of disk space grant. Possible
1026 			 * strategies are:
1027 			 *
1028 			 *     (a) do a sync write, renewing grant;
1029 			 *
1030 			 *     (b) stop writing on this stripe, switch to the
1031 			 *     next one.
1032 			 *
1033 			 * (b) is a part of "parallel io" design that is the
1034 			 * ultimate goal. (a) is what "old" client did, and
1035 			 * what the new code continues to do for the time
1036 			 * being.
1037 			 */
1038 			if (last_index > pg->cp_index) {
1039 				to = PAGE_CACHE_SIZE;
1040 				need_clip = false;
1041 			} else if (last_index == pg->cp_index) {
1042 				int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
1043 				if (to < size_to)
1044 					to = size_to;
1045 			}
1046 			if (need_clip)
1047 				cl_page_clip(env, pg, 0, to);
1048 			result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
1049 			if (result)
1050 				CERROR("Write page %lu of inode %p failed %d\n",
1051 				       pg->cp_index, inode, result);
1052 		}
1053 	} else {
1054 		tallyop = LPROC_LL_DIRTY_HITS;
1055 		result = 0;
1056 	}
1057 	ll_stats_ops_tally(sbi, tallyop, 1);
1058 
1059 	/* Inode should be marked DIRTY even if no new page was marked DIRTY
1060 	 * because page could have been not flushed between 2 modifications.
1061 	 * It is important the file is marked DIRTY as soon as the I/O is done
1062 	 * Indeed, when cache is flushed, file could be already closed and it
1063 	 * is too late to warn the MDT.
1064 	 * It is acceptable that file is marked DIRTY even if I/O is dropped
1065 	 * for some reasons before being flushed to OST.
1066 	 */
1067 	if (result == 0) {
1068 		spin_lock(&lli->lli_lock);
1069 		lli->lli_flags |= LLIF_DATA_MODIFIED;
1070 		spin_unlock(&lli->lli_lock);
1071 	}
1072 
1073 	size = cl_offset(obj, pg->cp_index) + to;
1074 
1075 	ll_inode_size_lock(inode);
1076 	if (result == 0) {
1077 		if (size > i_size_read(inode)) {
1078 			cl_isize_write_nolock(inode, size);
1079 			CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n",
1080 			       PFID(lu_object_fid(&obj->co_lu)),
1081 			       (unsigned long)size);
1082 		}
1083 		cl_page_export(env, pg, 1);
1084 	} else {
1085 		if (size > i_size_read(inode))
1086 			cl_page_discard(env, io, pg);
1087 	}
1088 	ll_inode_size_unlock(inode);
1089 	return result;
1090 }
1091 
1092 static const struct cl_io_operations vvp_io_ops = {
1093 	.op = {
1094 		[CIT_READ] = {
1095 			.cio_fini      = vvp_io_read_fini,
1096 			.cio_lock      = vvp_io_read_lock,
1097 			.cio_start     = vvp_io_read_start,
1098 			.cio_advance   = ccc_io_advance
1099 		},
1100 		[CIT_WRITE] = {
1101 			.cio_fini      = vvp_io_fini,
1102 			.cio_lock      = vvp_io_write_lock,
1103 			.cio_start     = vvp_io_write_start,
1104 			.cio_advance   = ccc_io_advance
1105 		},
1106 		[CIT_SETATTR] = {
1107 			.cio_fini       = vvp_io_setattr_fini,
1108 			.cio_iter_init  = vvp_io_setattr_iter_init,
1109 			.cio_lock       = vvp_io_setattr_lock,
1110 			.cio_start      = vvp_io_setattr_start,
1111 			.cio_end	= vvp_io_setattr_end
1112 		},
1113 		[CIT_FAULT] = {
1114 			.cio_fini      = vvp_io_fault_fini,
1115 			.cio_iter_init = vvp_io_fault_iter_init,
1116 			.cio_lock      = vvp_io_fault_lock,
1117 			.cio_start     = vvp_io_fault_start,
1118 			.cio_end       = ccc_io_end
1119 		},
1120 		[CIT_FSYNC] = {
1121 			.cio_start  = vvp_io_fsync_start,
1122 			.cio_fini   = vvp_io_fini
1123 		},
1124 		[CIT_MISC] = {
1125 			.cio_fini   = vvp_io_fini
1126 		}
1127 	},
1128 	.cio_read_page     = vvp_io_read_page,
1129 	.cio_prepare_write = vvp_io_prepare_write,
1130 	.cio_commit_write  = vvp_io_commit_write
1131 };
1132 
vvp_io_init(const struct lu_env * env,struct cl_object * obj,struct cl_io * io)1133 int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
1134 		struct cl_io *io)
1135 {
1136 	struct vvp_io      *vio   = vvp_env_io(env);
1137 	struct ccc_io      *cio   = ccc_env_io(env);
1138 	struct inode       *inode = ccc_object_inode(obj);
1139 	int		 result;
1140 
1141 	CLOBINVRNT(env, obj, ccc_object_invariant(obj));
1142 
1143 	CDEBUG(D_VFSTRACE, DFID
1144 	       " ignore/verify layout %d/%d, layout version %d restore needed %d\n",
1145 	       PFID(lu_object_fid(&obj->co_lu)),
1146 	       io->ci_ignore_layout, io->ci_verify_layout,
1147 	       cio->cui_layout_gen, io->ci_restore_needed);
1148 
1149 	CL_IO_SLICE_CLEAN(cio, cui_cl);
1150 	cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
1151 	vio->cui_ra_window_set = 0;
1152 	result = 0;
1153 	if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
1154 		size_t count;
1155 		struct ll_inode_info *lli = ll_i2info(inode);
1156 
1157 		count = io->u.ci_rw.crw_count;
1158 		/* "If nbyte is 0, read() will return 0 and have no other
1159 		 *  results."  -- Single Unix Spec */
1160 		if (count == 0)
1161 			result = 1;
1162 		else
1163 			cio->cui_tot_count = count;
1164 
1165 		/* for read/write, we store the jobid in the inode, and
1166 		 * it'll be fetched by osc when building RPC.
1167 		 *
1168 		 * it's not accurate if the file is shared by different
1169 		 * jobs.
1170 		 */
1171 		lustre_get_jobid(lli->lli_jobid);
1172 	} else if (io->ci_type == CIT_SETATTR) {
1173 		if (!cl_io_is_trunc(io))
1174 			io->ci_lockreq = CILR_MANDATORY;
1175 	}
1176 
1177 	/* ignore layout change for generic CIT_MISC but not for glimpse.
1178 	 * io context for glimpse must set ci_verify_layout to true,
1179 	 * see cl_glimpse_size0() for details. */
1180 	if (io->ci_type == CIT_MISC && !io->ci_verify_layout)
1181 		io->ci_ignore_layout = 1;
1182 
1183 	/* Enqueue layout lock and get layout version. We need to do this
1184 	 * even for operations requiring to open file, such as read and write,
1185 	 * because it might not grant layout lock in IT_OPEN. */
1186 	if (result == 0 && !io->ci_ignore_layout) {
1187 		result = ll_layout_refresh(inode, &cio->cui_layout_gen);
1188 		if (result == -ENOENT)
1189 			/* If the inode on MDS has been removed, but the objects
1190 			 * on OSTs haven't been destroyed (async unlink), layout
1191 			 * fetch will return -ENOENT, we'd ignore this error
1192 			 * and continue with dirty flush. LU-3230. */
1193 			result = 0;
1194 		if (result < 0)
1195 			CERROR("%s: refresh file layout " DFID " error %d.\n",
1196 				ll_get_fsname(inode->i_sb, NULL, 0),
1197 				PFID(lu_object_fid(&obj->co_lu)), result);
1198 	}
1199 
1200 	return result;
1201 }
1202 
cl2vvp_io(const struct lu_env * env,const struct cl_io_slice * slice)1203 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
1204 				const struct cl_io_slice *slice)
1205 {
1206 	/* Calling just for assertion */
1207 	cl2ccc_io(env, slice);
1208 	return vvp_env_io(env);
1209 }
1210