1/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
20#include <linux/fs.h>
21#include <linux/genhd.h>
22#include <linux/highmem.h>
23#include <linux/memcontrol.h>
24#include <linux/mm.h>
25#include <linux/mutex.h>
26#include <linux/sched.h>
27#include <linux/uio.h>
28#include <linux/vmstat.h>
29
30int dax_clear_blocks(struct inode *inode, sector_t block, long size)
31{
32	struct block_device *bdev = inode->i_sb->s_bdev;
33	sector_t sector = block << (inode->i_blkbits - 9);
34
35	might_sleep();
36	do {
37		void *addr;
38		unsigned long pfn;
39		long count;
40
41		count = bdev_direct_access(bdev, sector, &addr, &pfn, size);
42		if (count < 0)
43			return count;
44		BUG_ON(size < count);
45		while (count > 0) {
46			unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
47			if (pgsz > count)
48				pgsz = count;
49			if (pgsz < PAGE_SIZE)
50				memset(addr, 0, pgsz);
51			else
52				clear_page(addr);
53			addr += pgsz;
54			size -= pgsz;
55			count -= pgsz;
56			BUG_ON(pgsz & 511);
57			sector += pgsz / 512;
58			cond_resched();
59		}
60	} while (size);
61
62	return 0;
63}
64EXPORT_SYMBOL_GPL(dax_clear_blocks);
65
66static long dax_get_addr(struct buffer_head *bh, void **addr, unsigned blkbits)
67{
68	unsigned long pfn;
69	sector_t sector = bh->b_blocknr << (blkbits - 9);
70	return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size);
71}
72
73static void dax_new_buf(void *addr, unsigned size, unsigned first, loff_t pos,
74			loff_t end)
75{
76	loff_t final = end - pos + first; /* The final byte of the buffer */
77
78	if (first > 0)
79		memset(addr, 0, first);
80	if (final < size)
81		memset(addr + final, 0, size - final);
82}
83
84static bool buffer_written(struct buffer_head *bh)
85{
86	return buffer_mapped(bh) && !buffer_unwritten(bh);
87}
88
89/*
90 * When ext4 encounters a hole, it returns without modifying the buffer_head
91 * which means that we can't trust b_size.  To cope with this, we set b_state
92 * to 0 before calling get_block and, if any bit is set, we know we can trust
93 * b_size.  Unfortunate, really, since ext4 knows precisely how long a hole is
94 * and would save us time calling get_block repeatedly.
95 */
96static bool buffer_size_valid(struct buffer_head *bh)
97{
98	return bh->b_state != 0;
99}
100
101static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
102		      loff_t start, loff_t end, get_block_t get_block,
103		      struct buffer_head *bh)
104{
105	ssize_t retval = 0;
106	loff_t pos = start;
107	loff_t max = start;
108	loff_t bh_max = start;
109	void *addr;
110	bool hole = false;
111
112	if (iov_iter_rw(iter) != WRITE)
113		end = min(end, i_size_read(inode));
114
115	while (pos < end) {
116		unsigned len;
117		if (pos == max) {
118			unsigned blkbits = inode->i_blkbits;
119			sector_t block = pos >> blkbits;
120			unsigned first = pos - (block << blkbits);
121			long size;
122
123			if (pos == bh_max) {
124				bh->b_size = PAGE_ALIGN(end - pos);
125				bh->b_state = 0;
126				retval = get_block(inode, block, bh,
127						   iov_iter_rw(iter) == WRITE);
128				if (retval)
129					break;
130				if (!buffer_size_valid(bh))
131					bh->b_size = 1 << blkbits;
132				bh_max = pos - first + bh->b_size;
133			} else {
134				unsigned done = bh->b_size -
135						(bh_max - (pos - first));
136				bh->b_blocknr += done >> blkbits;
137				bh->b_size -= done;
138			}
139
140			hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
141			if (hole) {
142				addr = NULL;
143				size = bh->b_size - first;
144			} else {
145				retval = dax_get_addr(bh, &addr, blkbits);
146				if (retval < 0)
147					break;
148				if (buffer_unwritten(bh) || buffer_new(bh))
149					dax_new_buf(addr, retval, first, pos,
150									end);
151				addr += first;
152				size = retval - first;
153			}
154			max = min(pos + size, end);
155		}
156
157		if (iov_iter_rw(iter) == WRITE)
158			len = copy_from_iter(addr, max - pos, iter);
159		else if (!hole)
160			len = copy_to_iter(addr, max - pos, iter);
161		else
162			len = iov_iter_zero(max - pos, iter);
163
164		if (!len)
165			break;
166
167		pos += len;
168		addr += len;
169	}
170
171	return (pos == start) ? retval : pos - start;
172}
173
174/**
175 * dax_do_io - Perform I/O to a DAX file
176 * @iocb: The control block for this I/O
177 * @inode: The file which the I/O is directed at
178 * @iter: The addresses to do I/O from or to
179 * @pos: The file offset where the I/O starts
180 * @get_block: The filesystem method used to translate file offsets to blocks
181 * @end_io: A filesystem callback for I/O completion
182 * @flags: See below
183 *
184 * This function uses the same locking scheme as do_blockdev_direct_IO:
185 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
186 * caller for writes.  For reads, we take and release the i_mutex ourselves.
187 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
188 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
189 * is in progress.
190 */
191ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
192		  struct iov_iter *iter, loff_t pos, get_block_t get_block,
193		  dio_iodone_t end_io, int flags)
194{
195	struct buffer_head bh;
196	ssize_t retval = -EINVAL;
197	loff_t end = pos + iov_iter_count(iter);
198
199	memset(&bh, 0, sizeof(bh));
200
201	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
202		struct address_space *mapping = inode->i_mapping;
203		mutex_lock(&inode->i_mutex);
204		retval = filemap_write_and_wait_range(mapping, pos, end - 1);
205		if (retval) {
206			mutex_unlock(&inode->i_mutex);
207			goto out;
208		}
209	}
210
211	/* Protects against truncate */
212	inode_dio_begin(inode);
213
214	retval = dax_io(inode, iter, pos, end, get_block, &bh);
215
216	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
217		mutex_unlock(&inode->i_mutex);
218
219	if ((retval > 0) && end_io)
220		end_io(iocb, pos, retval, bh.b_private);
221
222	inode_dio_end(inode);
223 out:
224	return retval;
225}
226EXPORT_SYMBOL_GPL(dax_do_io);
227
228/*
229 * The user has performed a load from a hole in the file.  Allocating
230 * a new page in the file would cause excessive storage usage for
231 * workloads with sparse files.  We allocate a page cache page instead.
232 * We'll kick it out of the page cache if it's ever written to,
233 * otherwise it will simply fall out of the page cache under memory
234 * pressure without ever having been dirtied.
235 */
236static int dax_load_hole(struct address_space *mapping, struct page *page,
237							struct vm_fault *vmf)
238{
239	unsigned long size;
240	struct inode *inode = mapping->host;
241	if (!page)
242		page = find_or_create_page(mapping, vmf->pgoff,
243						GFP_KERNEL | __GFP_ZERO);
244	if (!page)
245		return VM_FAULT_OOM;
246	/* Recheck i_size under page lock to avoid truncate race */
247	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
248	if (vmf->pgoff >= size) {
249		unlock_page(page);
250		page_cache_release(page);
251		return VM_FAULT_SIGBUS;
252	}
253
254	vmf->page = page;
255	return VM_FAULT_LOCKED;
256}
257
258static int copy_user_bh(struct page *to, struct buffer_head *bh,
259			unsigned blkbits, unsigned long vaddr)
260{
261	void *vfrom, *vto;
262	if (dax_get_addr(bh, &vfrom, blkbits) < 0)
263		return -EIO;
264	vto = kmap_atomic(to);
265	copy_user_page(vto, vfrom, vaddr, to);
266	kunmap_atomic(vto);
267	return 0;
268}
269
270static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
271			struct vm_area_struct *vma, struct vm_fault *vmf)
272{
273	struct address_space *mapping = inode->i_mapping;
274	sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
275	unsigned long vaddr = (unsigned long)vmf->virtual_address;
276	void *addr;
277	unsigned long pfn;
278	pgoff_t size;
279	int error;
280
281	i_mmap_lock_read(mapping);
282
283	/*
284	 * Check truncate didn't happen while we were allocating a block.
285	 * If it did, this block may or may not be still allocated to the
286	 * file.  We can't tell the filesystem to free it because we can't
287	 * take i_mutex here.  In the worst case, the file still has blocks
288	 * allocated past the end of the file.
289	 */
290	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
291	if (unlikely(vmf->pgoff >= size)) {
292		error = -EIO;
293		goto out;
294	}
295
296	error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size);
297	if (error < 0)
298		goto out;
299	if (error < PAGE_SIZE) {
300		error = -EIO;
301		goto out;
302	}
303
304	if (buffer_unwritten(bh) || buffer_new(bh))
305		clear_page(addr);
306
307	error = vm_insert_mixed(vma, vaddr, pfn);
308
309 out:
310	i_mmap_unlock_read(mapping);
311
312	return error;
313}
314
315static int do_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
316			get_block_t get_block, dax_iodone_t complete_unwritten)
317{
318	struct file *file = vma->vm_file;
319	struct address_space *mapping = file->f_mapping;
320	struct inode *inode = mapping->host;
321	struct page *page;
322	struct buffer_head bh;
323	unsigned long vaddr = (unsigned long)vmf->virtual_address;
324	unsigned blkbits = inode->i_blkbits;
325	sector_t block;
326	pgoff_t size;
327	int error;
328	int major = 0;
329
330	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
331	if (vmf->pgoff >= size)
332		return VM_FAULT_SIGBUS;
333
334	memset(&bh, 0, sizeof(bh));
335	block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
336	bh.b_size = PAGE_SIZE;
337
338 repeat:
339	page = find_get_page(mapping, vmf->pgoff);
340	if (page) {
341		if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
342			page_cache_release(page);
343			return VM_FAULT_RETRY;
344		}
345		if (unlikely(page->mapping != mapping)) {
346			unlock_page(page);
347			page_cache_release(page);
348			goto repeat;
349		}
350		size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
351		if (unlikely(vmf->pgoff >= size)) {
352			/*
353			 * We have a struct page covering a hole in the file
354			 * from a read fault and we've raced with a truncate
355			 */
356			error = -EIO;
357			goto unlock_page;
358		}
359	}
360
361	error = get_block(inode, block, &bh, 0);
362	if (!error && (bh.b_size < PAGE_SIZE))
363		error = -EIO;		/* fs corruption? */
364	if (error)
365		goto unlock_page;
366
367	if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
368		if (vmf->flags & FAULT_FLAG_WRITE) {
369			error = get_block(inode, block, &bh, 1);
370			count_vm_event(PGMAJFAULT);
371			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
372			major = VM_FAULT_MAJOR;
373			if (!error && (bh.b_size < PAGE_SIZE))
374				error = -EIO;
375			if (error)
376				goto unlock_page;
377		} else {
378			return dax_load_hole(mapping, page, vmf);
379		}
380	}
381
382	if (vmf->cow_page) {
383		struct page *new_page = vmf->cow_page;
384		if (buffer_written(&bh))
385			error = copy_user_bh(new_page, &bh, blkbits, vaddr);
386		else
387			clear_user_highpage(new_page, vaddr);
388		if (error)
389			goto unlock_page;
390		vmf->page = page;
391		if (!page) {
392			i_mmap_lock_read(mapping);
393			/* Check we didn't race with truncate */
394			size = (i_size_read(inode) + PAGE_SIZE - 1) >>
395								PAGE_SHIFT;
396			if (vmf->pgoff >= size) {
397				i_mmap_unlock_read(mapping);
398				error = -EIO;
399				goto out;
400			}
401		}
402		return VM_FAULT_LOCKED;
403	}
404
405	/* Check we didn't race with a read fault installing a new page */
406	if (!page && major)
407		page = find_lock_page(mapping, vmf->pgoff);
408
409	if (page) {
410		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
411							PAGE_CACHE_SIZE, 0);
412		delete_from_page_cache(page);
413		unlock_page(page);
414		page_cache_release(page);
415	}
416
417	/*
418	 * If we successfully insert the new mapping over an unwritten extent,
419	 * we need to ensure we convert the unwritten extent. If there is an
420	 * error inserting the mapping, the filesystem needs to leave it as
421	 * unwritten to prevent exposure of the stale underlying data to
422	 * userspace, but we still need to call the completion function so
423	 * the private resources on the mapping buffer can be released. We
424	 * indicate what the callback should do via the uptodate variable, same
425	 * as for normal BH based IO completions.
426	 */
427	error = dax_insert_mapping(inode, &bh, vma, vmf);
428	if (buffer_unwritten(&bh))
429		complete_unwritten(&bh, !error);
430
431 out:
432	if (error == -ENOMEM)
433		return VM_FAULT_OOM | major;
434	/* -EBUSY is fine, somebody else faulted on the same PTE */
435	if ((error < 0) && (error != -EBUSY))
436		return VM_FAULT_SIGBUS | major;
437	return VM_FAULT_NOPAGE | major;
438
439 unlock_page:
440	if (page) {
441		unlock_page(page);
442		page_cache_release(page);
443	}
444	goto out;
445}
446
447/**
448 * dax_fault - handle a page fault on a DAX file
449 * @vma: The virtual memory area where the fault occurred
450 * @vmf: The description of the fault
451 * @get_block: The filesystem method used to translate file offsets to blocks
452 *
453 * When a page fault occurs, filesystems may call this helper in their
454 * fault handler for DAX files.
455 */
456int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
457	      get_block_t get_block, dax_iodone_t complete_unwritten)
458{
459	int result;
460	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
461
462	if (vmf->flags & FAULT_FLAG_WRITE) {
463		sb_start_pagefault(sb);
464		file_update_time(vma->vm_file);
465	}
466	result = do_dax_fault(vma, vmf, get_block, complete_unwritten);
467	if (vmf->flags & FAULT_FLAG_WRITE)
468		sb_end_pagefault(sb);
469
470	return result;
471}
472EXPORT_SYMBOL_GPL(dax_fault);
473
474/**
475 * dax_pfn_mkwrite - handle first write to DAX page
476 * @vma: The virtual memory area where the fault occurred
477 * @vmf: The description of the fault
478 *
479 */
480int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
481{
482	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
483
484	sb_start_pagefault(sb);
485	file_update_time(vma->vm_file);
486	sb_end_pagefault(sb);
487	return VM_FAULT_NOPAGE;
488}
489EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
490
491/**
492 * dax_zero_page_range - zero a range within a page of a DAX file
493 * @inode: The file being truncated
494 * @from: The file offset that is being truncated to
495 * @length: The number of bytes to zero
496 * @get_block: The filesystem method used to translate file offsets to blocks
497 *
498 * This function can be called by a filesystem when it is zeroing part of a
499 * page in a DAX file.  This is intended for hole-punch operations.  If
500 * you are truncating a file, the helper function dax_truncate_page() may be
501 * more convenient.
502 *
503 * We work in terms of PAGE_CACHE_SIZE here for commonality with
504 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
505 * took care of disposing of the unnecessary blocks.  Even if the filesystem
506 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
507 * since the file might be mmapped.
508 */
509int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
510							get_block_t get_block)
511{
512	struct buffer_head bh;
513	pgoff_t index = from >> PAGE_CACHE_SHIFT;
514	unsigned offset = from & (PAGE_CACHE_SIZE-1);
515	int err;
516
517	/* Block boundary? Nothing to do */
518	if (!length)
519		return 0;
520	BUG_ON((offset + length) > PAGE_CACHE_SIZE);
521
522	memset(&bh, 0, sizeof(bh));
523	bh.b_size = PAGE_CACHE_SIZE;
524	err = get_block(inode, index, &bh, 0);
525	if (err < 0)
526		return err;
527	if (buffer_written(&bh)) {
528		void *addr;
529		err = dax_get_addr(&bh, &addr, inode->i_blkbits);
530		if (err < 0)
531			return err;
532		memset(addr + offset, 0, length);
533	}
534
535	return 0;
536}
537EXPORT_SYMBOL_GPL(dax_zero_page_range);
538
539/**
540 * dax_truncate_page - handle a partial page being truncated in a DAX file
541 * @inode: The file being truncated
542 * @from: The file offset that is being truncated to
543 * @get_block: The filesystem method used to translate file offsets to blocks
544 *
545 * Similar to block_truncate_page(), this function can be called by a
546 * filesystem when it is truncating a DAX file to handle the partial page.
547 *
548 * We work in terms of PAGE_CACHE_SIZE here for commonality with
549 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
550 * took care of disposing of the unnecessary blocks.  Even if the filesystem
551 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
552 * since the file might be mmapped.
553 */
554int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
555{
556	unsigned length = PAGE_CACHE_ALIGN(from) - from;
557	return dax_zero_page_range(inode, from, length, get_block);
558}
559EXPORT_SYMBOL_GPL(dax_truncate_page);
560