1/*
2 * inode.c - NILFS inode operations.
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19 *
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
21 *
22 */
23
24#include <linux/buffer_head.h>
25#include <linux/gfp.h>
26#include <linux/mpage.h>
27#include <linux/pagemap.h>
28#include <linux/writeback.h>
29#include <linux/uio.h>
30#include "nilfs.h"
31#include "btnode.h"
32#include "segment.h"
33#include "page.h"
34#include "mdt.h"
35#include "cpfile.h"
36#include "ifile.h"
37
38/**
39 * struct nilfs_iget_args - arguments used during comparison between inodes
40 * @ino: inode number
41 * @cno: checkpoint number
42 * @root: pointer on NILFS root object (mounted checkpoint)
43 * @for_gc: inode for GC flag
44 */
45struct nilfs_iget_args {
46	u64 ino;
47	__u64 cno;
48	struct nilfs_root *root;
49	int for_gc;
50};
51
52static int nilfs_iget_test(struct inode *inode, void *opaque);
53
54void nilfs_inode_add_blocks(struct inode *inode, int n)
55{
56	struct nilfs_root *root = NILFS_I(inode)->i_root;
57
58	inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
59	if (root)
60		atomic64_add(n, &root->blocks_count);
61}
62
63void nilfs_inode_sub_blocks(struct inode *inode, int n)
64{
65	struct nilfs_root *root = NILFS_I(inode)->i_root;
66
67	inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
68	if (root)
69		atomic64_sub(n, &root->blocks_count);
70}
71
72/**
73 * nilfs_get_block() - get a file block on the filesystem (callback function)
74 * @inode - inode struct of the target file
75 * @blkoff - file block number
76 * @bh_result - buffer head to be mapped on
77 * @create - indicate whether allocating the block or not when it has not
78 *      been allocated yet.
79 *
80 * This function does not issue actual read request of the specified data
81 * block. It is done by VFS.
82 */
83int nilfs_get_block(struct inode *inode, sector_t blkoff,
84		    struct buffer_head *bh_result, int create)
85{
86	struct nilfs_inode_info *ii = NILFS_I(inode);
87	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
88	__u64 blknum = 0;
89	int err = 0, ret;
90	unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
91
92	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
93	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
94	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
95	if (ret >= 0) {	/* found */
96		map_bh(bh_result, inode->i_sb, blknum);
97		if (ret > 0)
98			bh_result->b_size = (ret << inode->i_blkbits);
99		goto out;
100	}
101	/* data block was not found */
102	if (ret == -ENOENT && create) {
103		struct nilfs_transaction_info ti;
104
105		bh_result->b_blocknr = 0;
106		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
107		if (unlikely(err))
108			goto out;
109		err = nilfs_bmap_insert(ii->i_bmap, blkoff,
110					(unsigned long)bh_result);
111		if (unlikely(err != 0)) {
112			if (err == -EEXIST) {
113				/*
114				 * The get_block() function could be called
115				 * from multiple callers for an inode.
116				 * However, the page having this block must
117				 * be locked in this case.
118				 */
119				printk(KERN_WARNING
120				       "nilfs_get_block: a race condition "
121				       "while inserting a data block. "
122				       "(inode number=%lu, file block "
123				       "offset=%llu)\n",
124				       inode->i_ino,
125				       (unsigned long long)blkoff);
126				err = 0;
127			}
128			nilfs_transaction_abort(inode->i_sb);
129			goto out;
130		}
131		nilfs_mark_inode_dirty_sync(inode);
132		nilfs_transaction_commit(inode->i_sb); /* never fails */
133		/* Error handling should be detailed */
134		set_buffer_new(bh_result);
135		set_buffer_delay(bh_result);
136		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
137						      to proper value */
138	} else if (ret == -ENOENT) {
139		/* not found is not error (e.g. hole); must return without
140		   the mapped state flag. */
141		;
142	} else {
143		err = ret;
144	}
145
146 out:
147	return err;
148}
149
150/**
151 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
152 * address_space_operations.
153 * @file - file struct of the file to be read
154 * @page - the page to be read
155 */
156static int nilfs_readpage(struct file *file, struct page *page)
157{
158	return mpage_readpage(page, nilfs_get_block);
159}
160
161/**
162 * nilfs_readpages() - implement readpages() method of nilfs_aops {}
163 * address_space_operations.
164 * @file - file struct of the file to be read
165 * @mapping - address_space struct used for reading multiple pages
166 * @pages - the pages to be read
167 * @nr_pages - number of pages to be read
168 */
169static int nilfs_readpages(struct file *file, struct address_space *mapping,
170			   struct list_head *pages, unsigned nr_pages)
171{
172	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
173}
174
175static int nilfs_writepages(struct address_space *mapping,
176			    struct writeback_control *wbc)
177{
178	struct inode *inode = mapping->host;
179	int err = 0;
180
181	if (inode->i_sb->s_flags & MS_RDONLY) {
182		nilfs_clear_dirty_pages(mapping, false);
183		return -EROFS;
184	}
185
186	if (wbc->sync_mode == WB_SYNC_ALL)
187		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
188						    wbc->range_start,
189						    wbc->range_end);
190	return err;
191}
192
193static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
194{
195	struct inode *inode = page->mapping->host;
196	int err;
197
198	if (inode->i_sb->s_flags & MS_RDONLY) {
199		/*
200		 * It means that filesystem was remounted in read-only
201		 * mode because of error or metadata corruption. But we
202		 * have dirty pages that try to be flushed in background.
203		 * So, here we simply discard this dirty page.
204		 */
205		nilfs_clear_dirty_page(page, false);
206		unlock_page(page);
207		return -EROFS;
208	}
209
210	redirty_page_for_writepage(wbc, page);
211	unlock_page(page);
212
213	if (wbc->sync_mode == WB_SYNC_ALL) {
214		err = nilfs_construct_segment(inode->i_sb);
215		if (unlikely(err))
216			return err;
217	} else if (wbc->for_reclaim)
218		nilfs_flush_segment(inode->i_sb, inode->i_ino);
219
220	return 0;
221}
222
223static int nilfs_set_page_dirty(struct page *page)
224{
225	struct inode *inode = page->mapping->host;
226	int ret = __set_page_dirty_nobuffers(page);
227
228	if (page_has_buffers(page)) {
229		unsigned nr_dirty = 0;
230		struct buffer_head *bh, *head;
231
232		/*
233		 * This page is locked by callers, and no other thread
234		 * concurrently marks its buffers dirty since they are
235		 * only dirtied through routines in fs/buffer.c in
236		 * which call sites of mark_buffer_dirty are protected
237		 * by page lock.
238		 */
239		bh = head = page_buffers(page);
240		do {
241			/* Do not mark hole blocks dirty */
242			if (buffer_dirty(bh) || !buffer_mapped(bh))
243				continue;
244
245			set_buffer_dirty(bh);
246			nr_dirty++;
247		} while (bh = bh->b_this_page, bh != head);
248
249		if (nr_dirty)
250			nilfs_set_file_dirty(inode, nr_dirty);
251	} else if (ret) {
252		unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
253
254		nilfs_set_file_dirty(inode, nr_dirty);
255	}
256	return ret;
257}
258
259void nilfs_write_failed(struct address_space *mapping, loff_t to)
260{
261	struct inode *inode = mapping->host;
262
263	if (to > inode->i_size) {
264		truncate_pagecache(inode, inode->i_size);
265		nilfs_truncate(inode);
266	}
267}
268
269static int nilfs_write_begin(struct file *file, struct address_space *mapping,
270			     loff_t pos, unsigned len, unsigned flags,
271			     struct page **pagep, void **fsdata)
272
273{
274	struct inode *inode = mapping->host;
275	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
276
277	if (unlikely(err))
278		return err;
279
280	err = block_write_begin(mapping, pos, len, flags, pagep,
281				nilfs_get_block);
282	if (unlikely(err)) {
283		nilfs_write_failed(mapping, pos + len);
284		nilfs_transaction_abort(inode->i_sb);
285	}
286	return err;
287}
288
289static int nilfs_write_end(struct file *file, struct address_space *mapping,
290			   loff_t pos, unsigned len, unsigned copied,
291			   struct page *page, void *fsdata)
292{
293	struct inode *inode = mapping->host;
294	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
295	unsigned nr_dirty;
296	int err;
297
298	nr_dirty = nilfs_page_count_clean_buffers(page, start,
299						  start + copied);
300	copied = generic_write_end(file, mapping, pos, len, copied, page,
301				   fsdata);
302	nilfs_set_file_dirty(inode, nr_dirty);
303	err = nilfs_transaction_commit(inode->i_sb);
304	return err ? : copied;
305}
306
307static ssize_t
308nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
309{
310	struct inode *inode = file_inode(iocb->ki_filp);
311
312	if (iov_iter_rw(iter) == WRITE)
313		return 0;
314
315	/* Needs synchronization with the cleaner */
316	return blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block);
317}
318
319const struct address_space_operations nilfs_aops = {
320	.writepage		= nilfs_writepage,
321	.readpage		= nilfs_readpage,
322	.writepages		= nilfs_writepages,
323	.set_page_dirty		= nilfs_set_page_dirty,
324	.readpages		= nilfs_readpages,
325	.write_begin		= nilfs_write_begin,
326	.write_end		= nilfs_write_end,
327	/* .releasepage		= nilfs_releasepage, */
328	.invalidatepage		= block_invalidatepage,
329	.direct_IO		= nilfs_direct_IO,
330	.is_partially_uptodate  = block_is_partially_uptodate,
331};
332
333static int nilfs_insert_inode_locked(struct inode *inode,
334				     struct nilfs_root *root,
335				     unsigned long ino)
336{
337	struct nilfs_iget_args args = {
338		.ino = ino, .root = root, .cno = 0, .for_gc = 0
339	};
340
341	return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
342}
343
344struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
345{
346	struct super_block *sb = dir->i_sb;
347	struct the_nilfs *nilfs = sb->s_fs_info;
348	struct inode *inode;
349	struct nilfs_inode_info *ii;
350	struct nilfs_root *root;
351	int err = -ENOMEM;
352	ino_t ino;
353
354	inode = new_inode(sb);
355	if (unlikely(!inode))
356		goto failed;
357
358	mapping_set_gfp_mask(inode->i_mapping,
359			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
360
361	root = NILFS_I(dir)->i_root;
362	ii = NILFS_I(inode);
363	ii->i_state = 1 << NILFS_I_NEW;
364	ii->i_root = root;
365
366	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
367	if (unlikely(err))
368		goto failed_ifile_create_inode;
369	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
370
371	atomic64_inc(&root->inodes_count);
372	inode_init_owner(inode, dir, mode);
373	inode->i_ino = ino;
374	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
375
376	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
377		err = nilfs_bmap_read(ii->i_bmap, NULL);
378		if (err < 0)
379			goto failed_after_creation;
380
381		set_bit(NILFS_I_BMAP, &ii->i_state);
382		/* No lock is needed; iget() ensures it. */
383	}
384
385	ii->i_flags = nilfs_mask_flags(
386		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
387
388	/* ii->i_file_acl = 0; */
389	/* ii->i_dir_acl = 0; */
390	ii->i_dir_start_lookup = 0;
391	nilfs_set_inode_flags(inode);
392	spin_lock(&nilfs->ns_next_gen_lock);
393	inode->i_generation = nilfs->ns_next_generation++;
394	spin_unlock(&nilfs->ns_next_gen_lock);
395	if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
396		err = -EIO;
397		goto failed_after_creation;
398	}
399
400	err = nilfs_init_acl(inode, dir);
401	if (unlikely(err))
402		goto failed_after_creation; /* never occur. When supporting
403				    nilfs_init_acl(), proper cancellation of
404				    above jobs should be considered */
405
406	return inode;
407
408 failed_after_creation:
409	clear_nlink(inode);
410	unlock_new_inode(inode);
411	iput(inode);  /* raw_inode will be deleted through
412			 nilfs_evict_inode() */
413	goto failed;
414
415 failed_ifile_create_inode:
416	make_bad_inode(inode);
417	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
418			 called */
419 failed:
420	return ERR_PTR(err);
421}
422
423void nilfs_set_inode_flags(struct inode *inode)
424{
425	unsigned int flags = NILFS_I(inode)->i_flags;
426	unsigned int new_fl = 0;
427
428	if (flags & FS_SYNC_FL)
429		new_fl |= S_SYNC;
430	if (flags & FS_APPEND_FL)
431		new_fl |= S_APPEND;
432	if (flags & FS_IMMUTABLE_FL)
433		new_fl |= S_IMMUTABLE;
434	if (flags & FS_NOATIME_FL)
435		new_fl |= S_NOATIME;
436	if (flags & FS_DIRSYNC_FL)
437		new_fl |= S_DIRSYNC;
438	inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
439			S_NOATIME | S_DIRSYNC);
440}
441
442int nilfs_read_inode_common(struct inode *inode,
443			    struct nilfs_inode *raw_inode)
444{
445	struct nilfs_inode_info *ii = NILFS_I(inode);
446	int err;
447
448	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
449	i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
450	i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
451	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
452	inode->i_size = le64_to_cpu(raw_inode->i_size);
453	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
454	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
455	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
456	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
457	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
458	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
459	if (inode->i_nlink == 0)
460		return -ESTALE; /* this inode is deleted */
461
462	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
463	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
464#if 0
465	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
466	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
467		0 : le32_to_cpu(raw_inode->i_dir_acl);
468#endif
469	ii->i_dir_start_lookup = 0;
470	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
471
472	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
473	    S_ISLNK(inode->i_mode)) {
474		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
475		if (err < 0)
476			return err;
477		set_bit(NILFS_I_BMAP, &ii->i_state);
478		/* No lock is needed; iget() ensures it. */
479	}
480	return 0;
481}
482
483static int __nilfs_read_inode(struct super_block *sb,
484			      struct nilfs_root *root, unsigned long ino,
485			      struct inode *inode)
486{
487	struct the_nilfs *nilfs = sb->s_fs_info;
488	struct buffer_head *bh;
489	struct nilfs_inode *raw_inode;
490	int err;
491
492	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
493	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
494	if (unlikely(err))
495		goto bad_inode;
496
497	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
498
499	err = nilfs_read_inode_common(inode, raw_inode);
500	if (err)
501		goto failed_unmap;
502
503	if (S_ISREG(inode->i_mode)) {
504		inode->i_op = &nilfs_file_inode_operations;
505		inode->i_fop = &nilfs_file_operations;
506		inode->i_mapping->a_ops = &nilfs_aops;
507	} else if (S_ISDIR(inode->i_mode)) {
508		inode->i_op = &nilfs_dir_inode_operations;
509		inode->i_fop = &nilfs_dir_operations;
510		inode->i_mapping->a_ops = &nilfs_aops;
511	} else if (S_ISLNK(inode->i_mode)) {
512		inode->i_op = &nilfs_symlink_inode_operations;
513		inode->i_mapping->a_ops = &nilfs_aops;
514	} else {
515		inode->i_op = &nilfs_special_inode_operations;
516		init_special_inode(
517			inode, inode->i_mode,
518			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
519	}
520	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
521	brelse(bh);
522	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
523	nilfs_set_inode_flags(inode);
524	mapping_set_gfp_mask(inode->i_mapping,
525			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
526	return 0;
527
528 failed_unmap:
529	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
530	brelse(bh);
531
532 bad_inode:
533	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
534	return err;
535}
536
537static int nilfs_iget_test(struct inode *inode, void *opaque)
538{
539	struct nilfs_iget_args *args = opaque;
540	struct nilfs_inode_info *ii;
541
542	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
543		return 0;
544
545	ii = NILFS_I(inode);
546	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
547		return !args->for_gc;
548
549	return args->for_gc && args->cno == ii->i_cno;
550}
551
552static int nilfs_iget_set(struct inode *inode, void *opaque)
553{
554	struct nilfs_iget_args *args = opaque;
555
556	inode->i_ino = args->ino;
557	if (args->for_gc) {
558		NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
559		NILFS_I(inode)->i_cno = args->cno;
560		NILFS_I(inode)->i_root = NULL;
561	} else {
562		if (args->root && args->ino == NILFS_ROOT_INO)
563			nilfs_get_root(args->root);
564		NILFS_I(inode)->i_root = args->root;
565	}
566	return 0;
567}
568
569struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
570			    unsigned long ino)
571{
572	struct nilfs_iget_args args = {
573		.ino = ino, .root = root, .cno = 0, .for_gc = 0
574	};
575
576	return ilookup5(sb, ino, nilfs_iget_test, &args);
577}
578
579struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
580				unsigned long ino)
581{
582	struct nilfs_iget_args args = {
583		.ino = ino, .root = root, .cno = 0, .for_gc = 0
584	};
585
586	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
587}
588
589struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
590			 unsigned long ino)
591{
592	struct inode *inode;
593	int err;
594
595	inode = nilfs_iget_locked(sb, root, ino);
596	if (unlikely(!inode))
597		return ERR_PTR(-ENOMEM);
598	if (!(inode->i_state & I_NEW))
599		return inode;
600
601	err = __nilfs_read_inode(sb, root, ino, inode);
602	if (unlikely(err)) {
603		iget_failed(inode);
604		return ERR_PTR(err);
605	}
606	unlock_new_inode(inode);
607	return inode;
608}
609
610struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
611				__u64 cno)
612{
613	struct nilfs_iget_args args = {
614		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
615	};
616	struct inode *inode;
617	int err;
618
619	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
620	if (unlikely(!inode))
621		return ERR_PTR(-ENOMEM);
622	if (!(inode->i_state & I_NEW))
623		return inode;
624
625	err = nilfs_init_gcinode(inode);
626	if (unlikely(err)) {
627		iget_failed(inode);
628		return ERR_PTR(err);
629	}
630	unlock_new_inode(inode);
631	return inode;
632}
633
634void nilfs_write_inode_common(struct inode *inode,
635			      struct nilfs_inode *raw_inode, int has_bmap)
636{
637	struct nilfs_inode_info *ii = NILFS_I(inode);
638
639	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
640	raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
641	raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
642	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
643	raw_inode->i_size = cpu_to_le64(inode->i_size);
644	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
645	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
646	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
647	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
648	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
649
650	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
651	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
652
653	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
654		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
655
656		/* zero-fill unused portion in the case of super root block */
657		raw_inode->i_xattr = 0;
658		raw_inode->i_pad = 0;
659		memset((void *)raw_inode + sizeof(*raw_inode), 0,
660		       nilfs->ns_inode_size - sizeof(*raw_inode));
661	}
662
663	if (has_bmap)
664		nilfs_bmap_write(ii->i_bmap, raw_inode);
665	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
666		raw_inode->i_device_code =
667			cpu_to_le64(huge_encode_dev(inode->i_rdev));
668	/* When extending inode, nilfs->ns_inode_size should be checked
669	   for substitutions of appended fields */
670}
671
672void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
673{
674	ino_t ino = inode->i_ino;
675	struct nilfs_inode_info *ii = NILFS_I(inode);
676	struct inode *ifile = ii->i_root->ifile;
677	struct nilfs_inode *raw_inode;
678
679	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
680
681	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
682		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
683	if (flags & I_DIRTY_DATASYNC)
684		set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
685
686	nilfs_write_inode_common(inode, raw_inode, 0);
687		/* XXX: call with has_bmap = 0 is a workaround to avoid
688		   deadlock of bmap. This delays update of i_bmap to just
689		   before writing */
690	nilfs_ifile_unmap_inode(ifile, ino, ibh);
691}
692
693#define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
694
695static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
696				unsigned long from)
697{
698	__u64 b;
699	int ret;
700
701	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
702		return;
703repeat:
704	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
705	if (ret == -ENOENT)
706		return;
707	else if (ret < 0)
708		goto failed;
709
710	if (b < from)
711		return;
712
713	b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
714	ret = nilfs_bmap_truncate(ii->i_bmap, b);
715	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
716	if (!ret || (ret == -ENOMEM &&
717		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
718		goto repeat;
719
720failed:
721	nilfs_warning(ii->vfs_inode.i_sb, __func__,
722		      "failed to truncate bmap (ino=%lu, err=%d)",
723		      ii->vfs_inode.i_ino, ret);
724}
725
726void nilfs_truncate(struct inode *inode)
727{
728	unsigned long blkoff;
729	unsigned int blocksize;
730	struct nilfs_transaction_info ti;
731	struct super_block *sb = inode->i_sb;
732	struct nilfs_inode_info *ii = NILFS_I(inode);
733
734	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
735		return;
736	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
737		return;
738
739	blocksize = sb->s_blocksize;
740	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
741	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
742
743	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
744
745	nilfs_truncate_bmap(ii, blkoff);
746
747	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
748	if (IS_SYNC(inode))
749		nilfs_set_transaction_flag(NILFS_TI_SYNC);
750
751	nilfs_mark_inode_dirty(inode);
752	nilfs_set_file_dirty(inode, 0);
753	nilfs_transaction_commit(sb);
754	/* May construct a logical segment and may fail in sync mode.
755	   But truncate has no return value. */
756}
757
758static void nilfs_clear_inode(struct inode *inode)
759{
760	struct nilfs_inode_info *ii = NILFS_I(inode);
761	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
762
763	/*
764	 * Free resources allocated in nilfs_read_inode(), here.
765	 */
766	BUG_ON(!list_empty(&ii->i_dirty));
767	brelse(ii->i_bh);
768	ii->i_bh = NULL;
769
770	if (mdi && mdi->mi_palloc_cache)
771		nilfs_palloc_destroy_cache(inode);
772
773	if (test_bit(NILFS_I_BMAP, &ii->i_state))
774		nilfs_bmap_clear(ii->i_bmap);
775
776	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
777
778	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
779		nilfs_put_root(ii->i_root);
780}
781
782void nilfs_evict_inode(struct inode *inode)
783{
784	struct nilfs_transaction_info ti;
785	struct super_block *sb = inode->i_sb;
786	struct nilfs_inode_info *ii = NILFS_I(inode);
787	int ret;
788
789	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
790		truncate_inode_pages_final(&inode->i_data);
791		clear_inode(inode);
792		nilfs_clear_inode(inode);
793		return;
794	}
795	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
796
797	truncate_inode_pages_final(&inode->i_data);
798
799	/* TODO: some of the following operations may fail.  */
800	nilfs_truncate_bmap(ii, 0);
801	nilfs_mark_inode_dirty(inode);
802	clear_inode(inode);
803
804	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
805	if (!ret)
806		atomic64_dec(&ii->i_root->inodes_count);
807
808	nilfs_clear_inode(inode);
809
810	if (IS_SYNC(inode))
811		nilfs_set_transaction_flag(NILFS_TI_SYNC);
812	nilfs_transaction_commit(sb);
813	/* May construct a logical segment and may fail in sync mode.
814	   But delete_inode has no return value. */
815}
816
817int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
818{
819	struct nilfs_transaction_info ti;
820	struct inode *inode = d_inode(dentry);
821	struct super_block *sb = inode->i_sb;
822	int err;
823
824	err = inode_change_ok(inode, iattr);
825	if (err)
826		return err;
827
828	err = nilfs_transaction_begin(sb, &ti, 0);
829	if (unlikely(err))
830		return err;
831
832	if ((iattr->ia_valid & ATTR_SIZE) &&
833	    iattr->ia_size != i_size_read(inode)) {
834		inode_dio_wait(inode);
835		truncate_setsize(inode, iattr->ia_size);
836		nilfs_truncate(inode);
837	}
838
839	setattr_copy(inode, iattr);
840	mark_inode_dirty(inode);
841
842	if (iattr->ia_valid & ATTR_MODE) {
843		err = nilfs_acl_chmod(inode);
844		if (unlikely(err))
845			goto out_err;
846	}
847
848	return nilfs_transaction_commit(sb);
849
850out_err:
851	nilfs_transaction_abort(sb);
852	return err;
853}
854
855int nilfs_permission(struct inode *inode, int mask)
856{
857	struct nilfs_root *root = NILFS_I(inode)->i_root;
858	if ((mask & MAY_WRITE) && root &&
859	    root->cno != NILFS_CPTREE_CURRENT_CNO)
860		return -EROFS; /* snapshot is not writable */
861
862	return generic_permission(inode, mask);
863}
864
865int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
866{
867	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
868	struct nilfs_inode_info *ii = NILFS_I(inode);
869	int err;
870
871	spin_lock(&nilfs->ns_inode_lock);
872	if (ii->i_bh == NULL) {
873		spin_unlock(&nilfs->ns_inode_lock);
874		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
875						  inode->i_ino, pbh);
876		if (unlikely(err))
877			return err;
878		spin_lock(&nilfs->ns_inode_lock);
879		if (ii->i_bh == NULL)
880			ii->i_bh = *pbh;
881		else {
882			brelse(*pbh);
883			*pbh = ii->i_bh;
884		}
885	} else
886		*pbh = ii->i_bh;
887
888	get_bh(*pbh);
889	spin_unlock(&nilfs->ns_inode_lock);
890	return 0;
891}
892
893int nilfs_inode_dirty(struct inode *inode)
894{
895	struct nilfs_inode_info *ii = NILFS_I(inode);
896	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
897	int ret = 0;
898
899	if (!list_empty(&ii->i_dirty)) {
900		spin_lock(&nilfs->ns_inode_lock);
901		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
902			test_bit(NILFS_I_BUSY, &ii->i_state);
903		spin_unlock(&nilfs->ns_inode_lock);
904	}
905	return ret;
906}
907
908int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
909{
910	struct nilfs_inode_info *ii = NILFS_I(inode);
911	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
912
913	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
914
915	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
916		return 0;
917
918	spin_lock(&nilfs->ns_inode_lock);
919	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
920	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
921		/* Because this routine may race with nilfs_dispose_list(),
922		   we have to check NILFS_I_QUEUED here, too. */
923		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
924			/* This will happen when somebody is freeing
925			   this inode. */
926			nilfs_warning(inode->i_sb, __func__,
927				      "cannot get inode (ino=%lu)\n",
928				      inode->i_ino);
929			spin_unlock(&nilfs->ns_inode_lock);
930			return -EINVAL; /* NILFS_I_DIRTY may remain for
931					   freeing inode */
932		}
933		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
934		set_bit(NILFS_I_QUEUED, &ii->i_state);
935	}
936	spin_unlock(&nilfs->ns_inode_lock);
937	return 0;
938}
939
940int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
941{
942	struct buffer_head *ibh;
943	int err;
944
945	err = nilfs_load_inode_block(inode, &ibh);
946	if (unlikely(err)) {
947		nilfs_warning(inode->i_sb, __func__,
948			      "failed to reget inode block.\n");
949		return err;
950	}
951	nilfs_update_inode(inode, ibh, flags);
952	mark_buffer_dirty(ibh);
953	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
954	brelse(ibh);
955	return 0;
956}
957
958/**
959 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
960 * @inode: inode of the file to be registered.
961 *
962 * nilfs_dirty_inode() loads a inode block containing the specified
963 * @inode and copies data from a nilfs_inode to a corresponding inode
964 * entry in the inode block. This operation is excluded from the segment
965 * construction. This function can be called both as a single operation
966 * and as a part of indivisible file operations.
967 */
968void nilfs_dirty_inode(struct inode *inode, int flags)
969{
970	struct nilfs_transaction_info ti;
971	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
972
973	if (is_bad_inode(inode)) {
974		nilfs_warning(inode->i_sb, __func__,
975			      "tried to mark bad_inode dirty. ignored.\n");
976		dump_stack();
977		return;
978	}
979	if (mdi) {
980		nilfs_mdt_mark_dirty(inode);
981		return;
982	}
983	nilfs_transaction_begin(inode->i_sb, &ti, 0);
984	__nilfs_mark_inode_dirty(inode, flags);
985	nilfs_transaction_commit(inode->i_sb); /* never fails */
986}
987
988int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
989		 __u64 start, __u64 len)
990{
991	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
992	__u64 logical = 0, phys = 0, size = 0;
993	__u32 flags = 0;
994	loff_t isize;
995	sector_t blkoff, end_blkoff;
996	sector_t delalloc_blkoff;
997	unsigned long delalloc_blklen;
998	unsigned int blkbits = inode->i_blkbits;
999	int ret, n;
1000
1001	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
1002	if (ret)
1003		return ret;
1004
1005	mutex_lock(&inode->i_mutex);
1006
1007	isize = i_size_read(inode);
1008
1009	blkoff = start >> blkbits;
1010	end_blkoff = (start + len - 1) >> blkbits;
1011
1012	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1013							&delalloc_blkoff);
1014
1015	do {
1016		__u64 blkphy;
1017		unsigned int maxblocks;
1018
1019		if (delalloc_blklen && blkoff == delalloc_blkoff) {
1020			if (size) {
1021				/* End of the current extent */
1022				ret = fiemap_fill_next_extent(
1023					fieinfo, logical, phys, size, flags);
1024				if (ret)
1025					break;
1026			}
1027			if (blkoff > end_blkoff)
1028				break;
1029
1030			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1031			logical = blkoff << blkbits;
1032			phys = 0;
1033			size = delalloc_blklen << blkbits;
1034
1035			blkoff = delalloc_blkoff + delalloc_blklen;
1036			delalloc_blklen = nilfs_find_uncommitted_extent(
1037				inode, blkoff, &delalloc_blkoff);
1038			continue;
1039		}
1040
1041		/*
1042		 * Limit the number of blocks that we look up so as
1043		 * not to get into the next delayed allocation extent.
1044		 */
1045		maxblocks = INT_MAX;
1046		if (delalloc_blklen)
1047			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1048					  maxblocks);
1049		blkphy = 0;
1050
1051		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1052		n = nilfs_bmap_lookup_contig(
1053			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1054		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1055
1056		if (n < 0) {
1057			int past_eof;
1058
1059			if (unlikely(n != -ENOENT))
1060				break; /* error */
1061
1062			/* HOLE */
1063			blkoff++;
1064			past_eof = ((blkoff << blkbits) >= isize);
1065
1066			if (size) {
1067				/* End of the current extent */
1068
1069				if (past_eof)
1070					flags |= FIEMAP_EXTENT_LAST;
1071
1072				ret = fiemap_fill_next_extent(
1073					fieinfo, logical, phys, size, flags);
1074				if (ret)
1075					break;
1076				size = 0;
1077			}
1078			if (blkoff > end_blkoff || past_eof)
1079				break;
1080		} else {
1081			if (size) {
1082				if (phys && blkphy << blkbits == phys + size) {
1083					/* The current extent goes on */
1084					size += n << blkbits;
1085				} else {
1086					/* Terminate the current extent */
1087					ret = fiemap_fill_next_extent(
1088						fieinfo, logical, phys, size,
1089						flags);
1090					if (ret || blkoff > end_blkoff)
1091						break;
1092
1093					/* Start another extent */
1094					flags = FIEMAP_EXTENT_MERGED;
1095					logical = blkoff << blkbits;
1096					phys = blkphy << blkbits;
1097					size = n << blkbits;
1098				}
1099			} else {
1100				/* Start a new extent */
1101				flags = FIEMAP_EXTENT_MERGED;
1102				logical = blkoff << blkbits;
1103				phys = blkphy << blkbits;
1104				size = n << blkbits;
1105			}
1106			blkoff += n;
1107		}
1108		cond_resched();
1109	} while (true);
1110
1111	/* If ret is 1 then we just hit the end of the extent array */
1112	if (ret == 1)
1113		ret = 0;
1114
1115	mutex_unlock(&inode->i_mutex);
1116	return ret;
1117}
1118