1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36 
37 #include <linux/kernel.h>
38 #include <linux/mm.h>
39 #include <linux/string.h>
40 #include <linux/stat.h>
41 #include <linux/errno.h>
42 #include <linux/unistd.h>
43 #include <linux/uaccess.h>
44 
45 #include <linux/fs.h>
46 #include <linux/pagemap.h>
47 
48 #define DEBUG_SUBSYSTEM S_LLITE
49 
50 #include "../include/lustre_lite.h"
51 #include "llite_internal.h"
52 #include "../include/linux/lustre_compat25.h"
53 
54 static const struct vm_operations_struct ll_file_vm_ops;
55 
policy_from_vma(ldlm_policy_data_t * policy,struct vm_area_struct * vma,unsigned long addr,size_t count)56 void policy_from_vma(ldlm_policy_data_t *policy,
57 			    struct vm_area_struct *vma, unsigned long addr,
58 			    size_t count)
59 {
60 	policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
61 				 (vma->vm_pgoff << PAGE_CACHE_SHIFT);
62 	policy->l_extent.end = (policy->l_extent.start + count - 1) |
63 			       ~CFS_PAGE_MASK;
64 }
65 
our_vma(struct mm_struct * mm,unsigned long addr,size_t count)66 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
67 			       size_t count)
68 {
69 	struct vm_area_struct *vma, *ret = NULL;
70 
71 	/* mmap_sem must have been held by caller. */
72 	LASSERT(!down_write_trylock(&mm->mmap_sem));
73 
74 	for (vma = find_vma(mm, addr);
75 	    vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
76 		if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
77 		    vma->vm_flags & VM_SHARED) {
78 			ret = vma;
79 			break;
80 		}
81 	}
82 	return ret;
83 }
84 
85 /**
86  * API independent part for page fault initialization.
87  * \param vma - virtual memory area addressed to page fault
88  * \param env - corespondent lu_env to processing
89  * \param nest - nested level
90  * \param index - page index corespondent to fault.
91  * \parm ra_flags - vma readahead flags.
92  *
93  * \return allocated and initialized env for fault operation.
94  * \retval EINVAL if env can't allocated
95  * \return other error codes from cl_io_init.
96  */
97 static struct cl_io *
ll_fault_io_init(struct vm_area_struct * vma,struct lu_env ** env_ret,struct cl_env_nest * nest,pgoff_t index,unsigned long * ra_flags)98 ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
99 		 struct cl_env_nest *nest, pgoff_t index,
100 		 unsigned long *ra_flags)
101 {
102 	struct file	       *file = vma->vm_file;
103 	struct inode	       *inode = file_inode(file);
104 	struct cl_io	       *io;
105 	struct cl_fault_io     *fio;
106 	struct lu_env	       *env;
107 	int			rc;
108 
109 	*env_ret = NULL;
110 	if (ll_file_nolock(file))
111 		return ERR_PTR(-EOPNOTSUPP);
112 
113 	/*
114 	 * page fault can be called when lustre IO is
115 	 * already active for the current thread, e.g., when doing read/write
116 	 * against user level buffer mapped from Lustre buffer. To avoid
117 	 * stomping on existing context, optionally force an allocation of a new
118 	 * one.
119 	 */
120 	env = cl_env_nested_get(nest);
121 	if (IS_ERR(env))
122 		 return ERR_PTR(-EINVAL);
123 
124 	*env_ret = env;
125 
126 	io = ccc_env_thread_io(env);
127 	io->ci_obj = ll_i2info(inode)->lli_clob;
128 	LASSERT(io->ci_obj != NULL);
129 
130 	fio = &io->u.ci_fault;
131 	fio->ft_index      = index;
132 	fio->ft_executable = vma->vm_flags&VM_EXEC;
133 
134 	/*
135 	 * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
136 	 * the kernel will not read other pages not covered by ldlm in
137 	 * filemap_nopage. we do our readahead in ll_readpage.
138 	 */
139 	if (ra_flags != NULL)
140 		*ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
141 	vma->vm_flags &= ~VM_SEQ_READ;
142 	vma->vm_flags |= VM_RAND_READ;
143 
144 	CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
145 	       fio->ft_index, fio->ft_executable);
146 
147 	rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
148 	if (rc == 0) {
149 		struct ccc_io *cio = ccc_env_io(env);
150 		struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
151 
152 		LASSERT(cio->cui_cl.cis_io == io);
153 
154 		/* mmap lock must be MANDATORY it has to cache
155 		 * pages. */
156 		io->ci_lockreq = CILR_MANDATORY;
157 		cio->cui_fd = fd;
158 	} else {
159 		LASSERT(rc < 0);
160 		cl_io_fini(env, io);
161 		cl_env_nested_put(nest, env);
162 		io = ERR_PTR(rc);
163 	}
164 
165 	return io;
166 }
167 
168 /* Sharing code of page_mkwrite method for rhel5 and rhel6 */
ll_page_mkwrite0(struct vm_area_struct * vma,struct page * vmpage,bool * retry)169 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
170 			    bool *retry)
171 {
172 	struct lu_env	   *env;
173 	struct cl_io	    *io;
174 	struct vvp_io	   *vio;
175 	struct cl_env_nest       nest;
176 	int		      result;
177 	sigset_t	     set;
178 	struct inode	     *inode;
179 	struct ll_inode_info     *lli;
180 
181 	LASSERT(vmpage != NULL);
182 
183 	io = ll_fault_io_init(vma, &env,  &nest, vmpage->index, NULL);
184 	if (IS_ERR(io)) {
185 		result = PTR_ERR(io);
186 		goto out;
187 	}
188 
189 	result = io->ci_result;
190 	if (result < 0)
191 		goto out_io;
192 
193 	io->u.ci_fault.ft_mkwrite = 1;
194 	io->u.ci_fault.ft_writable = 1;
195 
196 	vio = vvp_env_io(env);
197 	vio->u.fault.ft_vma    = vma;
198 	vio->u.fault.ft_vmpage = vmpage;
199 
200 	set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
201 
202 	/* we grab lli_trunc_sem to exclude truncate case.
203 	 * Otherwise, we could add dirty pages into osc cache
204 	 * while truncate is on-going. */
205 	inode = ccc_object_inode(io->ci_obj);
206 	lli = ll_i2info(inode);
207 	down_read(&lli->lli_trunc_sem);
208 
209 	result = cl_io_loop(env, io);
210 
211 	up_read(&lli->lli_trunc_sem);
212 
213 	cfs_restore_sigs(set);
214 
215 	if (result == 0) {
216 		struct inode *inode = file_inode(vma->vm_file);
217 		struct ll_inode_info *lli = ll_i2info(inode);
218 
219 		lock_page(vmpage);
220 		if (vmpage->mapping == NULL) {
221 			unlock_page(vmpage);
222 
223 			/* page was truncated and lock was cancelled, return
224 			 * ENODATA so that VM_FAULT_NOPAGE will be returned
225 			 * to handle_mm_fault(). */
226 			if (result == 0)
227 				result = -ENODATA;
228 		} else if (!PageDirty(vmpage)) {
229 			/* race, the page has been cleaned by ptlrpcd after
230 			 * it was unlocked, it has to be added into dirty
231 			 * cache again otherwise this soon-to-dirty page won't
232 			 * consume any grants, even worse if this page is being
233 			 * transferred because it will break RPC checksum.
234 			 */
235 			unlock_page(vmpage);
236 
237 			CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
238 			       vmpage, vmpage->index);
239 
240 			*retry = true;
241 			result = -EAGAIN;
242 		}
243 
244 		if (result == 0) {
245 			spin_lock(&lli->lli_lock);
246 			lli->lli_flags |= LLIF_DATA_MODIFIED;
247 			spin_unlock(&lli->lli_lock);
248 		}
249 	}
250 
251 out_io:
252 	cl_io_fini(env, io);
253 	cl_env_nested_put(&nest, env);
254 out:
255 	CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
256 	LASSERT(ergo(result == 0, PageLocked(vmpage)));
257 
258 	return result;
259 }
260 
261 
262 
to_fault_error(int result)263 static inline int to_fault_error(int result)
264 {
265 	switch (result) {
266 	case 0:
267 		result = VM_FAULT_LOCKED;
268 		break;
269 	case -EFAULT:
270 		result = VM_FAULT_NOPAGE;
271 		break;
272 	case -ENOMEM:
273 		result = VM_FAULT_OOM;
274 		break;
275 	default:
276 		result = VM_FAULT_SIGBUS;
277 		break;
278 	}
279 	return result;
280 }
281 
282 /**
283  * Lustre implementation of a vm_operations_struct::fault() method, called by
284  * VM to server page fault (both in kernel and user space).
285  *
286  * \param vma - is virtual area struct related to page fault
287  * \param vmf - structure which describe type and address where hit fault
288  *
289  * \return allocated and filled _locked_ page for address
290  * \retval VM_FAULT_ERROR on general error
291  * \retval NOPAGE_OOM not have memory for allocate new page
292  */
ll_fault0(struct vm_area_struct * vma,struct vm_fault * vmf)293 static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
294 {
295 	struct lu_env	   *env;
296 	struct cl_io	    *io;
297 	struct vvp_io	   *vio = NULL;
298 	struct page	     *vmpage;
299 	unsigned long	    ra_flags;
300 	struct cl_env_nest       nest;
301 	int		      result;
302 	int		      fault_ret = 0;
303 
304 	io = ll_fault_io_init(vma, &env,  &nest, vmf->pgoff, &ra_flags);
305 	if (IS_ERR(io))
306 		return to_fault_error(PTR_ERR(io));
307 
308 	result = io->ci_result;
309 	if (result == 0) {
310 		vio = vvp_env_io(env);
311 		vio->u.fault.ft_vma       = vma;
312 		vio->u.fault.ft_vmpage    = NULL;
313 		vio->u.fault.fault.ft_vmf = vmf;
314 		vio->u.fault.fault.ft_flags = 0;
315 		vio->u.fault.fault.ft_flags_valid = false;
316 
317 		result = cl_io_loop(env, io);
318 
319 		/* ft_flags are only valid if we reached
320 		 * the call to filemap_fault */
321 		if (vio->u.fault.fault.ft_flags_valid)
322 			fault_ret = vio->u.fault.fault.ft_flags;
323 
324 		vmpage = vio->u.fault.ft_vmpage;
325 		if (result != 0 && vmpage != NULL) {
326 			page_cache_release(vmpage);
327 			vmf->page = NULL;
328 		}
329 	}
330 	cl_io_fini(env, io);
331 	cl_env_nested_put(&nest, env);
332 
333 	vma->vm_flags |= ra_flags;
334 	if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
335 		fault_ret |= to_fault_error(result);
336 
337 	CDEBUG(D_MMAP, "%s fault %d/%d\n",
338 	       current->comm, fault_ret, result);
339 	return fault_ret;
340 }
341 
ll_fault(struct vm_area_struct * vma,struct vm_fault * vmf)342 static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
343 {
344 	int count = 0;
345 	bool printed = false;
346 	int result;
347 	sigset_t set;
348 
349 	/* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
350 	 * so that it can be killed by admin but not cause segfault by
351 	 * other signals. */
352 	set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
353 
354 restart:
355 	result = ll_fault0(vma, vmf);
356 	LASSERT(!(result & VM_FAULT_LOCKED));
357 	if (result == 0) {
358 		struct page *vmpage = vmf->page;
359 
360 		/* check if this page has been truncated */
361 		lock_page(vmpage);
362 		if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
363 			unlock_page(vmpage);
364 			page_cache_release(vmpage);
365 			vmf->page = NULL;
366 
367 			if (!printed && ++count > 16) {
368 				CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
369 				      current->comm);
370 				printed = true;
371 			}
372 
373 			goto restart;
374 		}
375 
376 		result = VM_FAULT_LOCKED;
377 	}
378 	cfs_restore_sigs(set);
379 	return result;
380 }
381 
ll_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf)382 static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
383 {
384 	int count = 0;
385 	bool printed = false;
386 	bool retry;
387 	int result;
388 
389 	do {
390 		retry = false;
391 		result = ll_page_mkwrite0(vma, vmf->page, &retry);
392 
393 		if (!printed && ++count > 16) {
394 			CWARN("app(%s): the page %lu of file %lu is under heavy contention.\n",
395 			      current->comm, vmf->pgoff,
396 			      file_inode(vma->vm_file)->i_ino);
397 			printed = true;
398 		}
399 	} while (retry);
400 
401 	switch (result) {
402 	case 0:
403 		LASSERT(PageLocked(vmf->page));
404 		result = VM_FAULT_LOCKED;
405 		break;
406 	case -ENODATA:
407 	case -EFAULT:
408 		result = VM_FAULT_NOPAGE;
409 		break;
410 	case -ENOMEM:
411 		result = VM_FAULT_OOM;
412 		break;
413 	case -EAGAIN:
414 		result = VM_FAULT_RETRY;
415 		break;
416 	default:
417 		result = VM_FAULT_SIGBUS;
418 		break;
419 	}
420 
421 	return result;
422 }
423 
424 /**
425  *  To avoid cancel the locks covering mmapped region for lock cache pressure,
426  *  we track the mapped vma count in ccc_object::cob_mmap_cnt.
427  */
ll_vm_open(struct vm_area_struct * vma)428 static void ll_vm_open(struct vm_area_struct *vma)
429 {
430 	struct inode *inode    = file_inode(vma->vm_file);
431 	struct ccc_object *vob = cl_inode2ccc(inode);
432 
433 	LASSERT(vma->vm_file);
434 	LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
435 	atomic_inc(&vob->cob_mmap_cnt);
436 }
437 
438 /**
439  * Dual to ll_vm_open().
440  */
ll_vm_close(struct vm_area_struct * vma)441 static void ll_vm_close(struct vm_area_struct *vma)
442 {
443 	struct inode      *inode = file_inode(vma->vm_file);
444 	struct ccc_object *vob   = cl_inode2ccc(inode);
445 
446 	LASSERT(vma->vm_file);
447 	atomic_dec(&vob->cob_mmap_cnt);
448 	LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
449 }
450 
451 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
452  * nopage's reference passing to the pte */
ll_teardown_mmaps(struct address_space * mapping,__u64 first,__u64 last)453 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
454 {
455 	int rc = -ENOENT;
456 
457 	LASSERTF(last > first, "last %llu first %llu\n", last, first);
458 	if (mapping_mapped(mapping)) {
459 		rc = 0;
460 		unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
461 				    last - first + 1, 0);
462 	}
463 
464 	return rc;
465 }
466 
467 static const struct vm_operations_struct ll_file_vm_ops = {
468 	.fault			= ll_fault,
469 	.page_mkwrite		= ll_page_mkwrite,
470 	.open			= ll_vm_open,
471 	.close			= ll_vm_close,
472 };
473 
ll_file_mmap(struct file * file,struct vm_area_struct * vma)474 int ll_file_mmap(struct file *file, struct vm_area_struct *vma)
475 {
476 	struct inode *inode = file_inode(file);
477 	int rc;
478 
479 	if (ll_file_nolock(file))
480 		return -EOPNOTSUPP;
481 
482 	ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
483 	rc = generic_file_mmap(file, vma);
484 	if (rc == 0) {
485 		vma->vm_ops = &ll_file_vm_ops;
486 		vma->vm_ops->open(vma);
487 		/* update the inode's size and mtime */
488 		rc = ll_glimpse_size(inode);
489 	}
490 
491 	return rc;
492 }
493