1/*
2 *  linux/fs/hfsplus/inode.c
3 *
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
7 *
8 * Inode handling routines
9 */
10
11#include <linux/blkdev.h>
12#include <linux/mm.h>
13#include <linux/fs.h>
14#include <linux/pagemap.h>
15#include <linux/mpage.h>
16#include <linux/sched.h>
17#include <linux/uio.h>
18
19#include "hfsplus_fs.h"
20#include "hfsplus_raw.h"
21#include "xattr.h"
22#include "acl.h"
23
24static int hfsplus_readpage(struct file *file, struct page *page)
25{
26	return block_read_full_page(page, hfsplus_get_block);
27}
28
29static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
30{
31	return block_write_full_page(page, hfsplus_get_block, wbc);
32}
33
34static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
35{
36	struct inode *inode = mapping->host;
37
38	if (to > inode->i_size) {
39		truncate_pagecache(inode, inode->i_size);
40		hfsplus_file_truncate(inode);
41	}
42}
43
44static int hfsplus_write_begin(struct file *file, struct address_space *mapping,
45			loff_t pos, unsigned len, unsigned flags,
46			struct page **pagep, void **fsdata)
47{
48	int ret;
49
50	*pagep = NULL;
51	ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
52				hfsplus_get_block,
53				&HFSPLUS_I(mapping->host)->phys_size);
54	if (unlikely(ret))
55		hfsplus_write_failed(mapping, pos + len);
56
57	return ret;
58}
59
60static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
61{
62	return generic_block_bmap(mapping, block, hfsplus_get_block);
63}
64
65static int hfsplus_releasepage(struct page *page, gfp_t mask)
66{
67	struct inode *inode = page->mapping->host;
68	struct super_block *sb = inode->i_sb;
69	struct hfs_btree *tree;
70	struct hfs_bnode *node;
71	u32 nidx;
72	int i, res = 1;
73
74	switch (inode->i_ino) {
75	case HFSPLUS_EXT_CNID:
76		tree = HFSPLUS_SB(sb)->ext_tree;
77		break;
78	case HFSPLUS_CAT_CNID:
79		tree = HFSPLUS_SB(sb)->cat_tree;
80		break;
81	case HFSPLUS_ATTR_CNID:
82		tree = HFSPLUS_SB(sb)->attr_tree;
83		break;
84	default:
85		BUG();
86		return 0;
87	}
88	if (!tree)
89		return 0;
90	if (tree->node_size >= PAGE_CACHE_SIZE) {
91		nidx = page->index >>
92			(tree->node_size_shift - PAGE_CACHE_SHIFT);
93		spin_lock(&tree->hash_lock);
94		node = hfs_bnode_findhash(tree, nidx);
95		if (!node)
96			;
97		else if (atomic_read(&node->refcnt))
98			res = 0;
99		if (res && node) {
100			hfs_bnode_unhash(node);
101			hfs_bnode_free(node);
102		}
103		spin_unlock(&tree->hash_lock);
104	} else {
105		nidx = page->index <<
106			(PAGE_CACHE_SHIFT - tree->node_size_shift);
107		i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
108		spin_lock(&tree->hash_lock);
109		do {
110			node = hfs_bnode_findhash(tree, nidx++);
111			if (!node)
112				continue;
113			if (atomic_read(&node->refcnt)) {
114				res = 0;
115				break;
116			}
117			hfs_bnode_unhash(node);
118			hfs_bnode_free(node);
119		} while (--i && nidx < tree->node_count);
120		spin_unlock(&tree->hash_lock);
121	}
122	return res ? try_to_free_buffers(page) : 0;
123}
124
125static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
126				 loff_t offset)
127{
128	struct file *file = iocb->ki_filp;
129	struct address_space *mapping = file->f_mapping;
130	struct inode *inode = file_inode(file)->i_mapping->host;
131	size_t count = iov_iter_count(iter);
132	ssize_t ret;
133
134	ret = blockdev_direct_IO(iocb, inode, iter, offset, hfsplus_get_block);
135
136	/*
137	 * In case of error extending write may have instantiated a few
138	 * blocks outside i_size. Trim these off again.
139	 */
140	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
141		loff_t isize = i_size_read(inode);
142		loff_t end = offset + count;
143
144		if (end > isize)
145			hfsplus_write_failed(mapping, end);
146	}
147
148	return ret;
149}
150
151static int hfsplus_writepages(struct address_space *mapping,
152			      struct writeback_control *wbc)
153{
154	return mpage_writepages(mapping, wbc, hfsplus_get_block);
155}
156
157const struct address_space_operations hfsplus_btree_aops = {
158	.readpage	= hfsplus_readpage,
159	.writepage	= hfsplus_writepage,
160	.write_begin	= hfsplus_write_begin,
161	.write_end	= generic_write_end,
162	.bmap		= hfsplus_bmap,
163	.releasepage	= hfsplus_releasepage,
164};
165
166const struct address_space_operations hfsplus_aops = {
167	.readpage	= hfsplus_readpage,
168	.writepage	= hfsplus_writepage,
169	.write_begin	= hfsplus_write_begin,
170	.write_end	= generic_write_end,
171	.bmap		= hfsplus_bmap,
172	.direct_IO	= hfsplus_direct_IO,
173	.writepages	= hfsplus_writepages,
174};
175
176const struct dentry_operations hfsplus_dentry_operations = {
177	.d_hash       = hfsplus_hash_dentry,
178	.d_compare    = hfsplus_compare_dentry,
179};
180
181static void hfsplus_get_perms(struct inode *inode,
182		struct hfsplus_perm *perms, int dir)
183{
184	struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
185	u16 mode;
186
187	mode = be16_to_cpu(perms->mode);
188
189	i_uid_write(inode, be32_to_cpu(perms->owner));
190	if (!i_uid_read(inode) && !mode)
191		inode->i_uid = sbi->uid;
192
193	i_gid_write(inode, be32_to_cpu(perms->group));
194	if (!i_gid_read(inode) && !mode)
195		inode->i_gid = sbi->gid;
196
197	if (dir) {
198		mode = mode ? (mode & S_IALLUGO) : (S_IRWXUGO & ~(sbi->umask));
199		mode |= S_IFDIR;
200	} else if (!mode)
201		mode = S_IFREG | ((S_IRUGO|S_IWUGO) & ~(sbi->umask));
202	inode->i_mode = mode;
203
204	HFSPLUS_I(inode)->userflags = perms->userflags;
205	if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
206		inode->i_flags |= S_IMMUTABLE;
207	else
208		inode->i_flags &= ~S_IMMUTABLE;
209	if (perms->rootflags & HFSPLUS_FLG_APPEND)
210		inode->i_flags |= S_APPEND;
211	else
212		inode->i_flags &= ~S_APPEND;
213}
214
215static int hfsplus_file_open(struct inode *inode, struct file *file)
216{
217	if (HFSPLUS_IS_RSRC(inode))
218		inode = HFSPLUS_I(inode)->rsrc_inode;
219	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
220		return -EOVERFLOW;
221	atomic_inc(&HFSPLUS_I(inode)->opencnt);
222	return 0;
223}
224
225static int hfsplus_file_release(struct inode *inode, struct file *file)
226{
227	struct super_block *sb = inode->i_sb;
228
229	if (HFSPLUS_IS_RSRC(inode))
230		inode = HFSPLUS_I(inode)->rsrc_inode;
231	if (atomic_dec_and_test(&HFSPLUS_I(inode)->opencnt)) {
232		mutex_lock(&inode->i_mutex);
233		hfsplus_file_truncate(inode);
234		if (inode->i_flags & S_DEAD) {
235			hfsplus_delete_cat(inode->i_ino,
236					   HFSPLUS_SB(sb)->hidden_dir, NULL);
237			hfsplus_delete_inode(inode);
238		}
239		mutex_unlock(&inode->i_mutex);
240	}
241	return 0;
242}
243
244static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
245{
246	struct inode *inode = d_inode(dentry);
247	int error;
248
249	error = inode_change_ok(inode, attr);
250	if (error)
251		return error;
252
253	if ((attr->ia_valid & ATTR_SIZE) &&
254	    attr->ia_size != i_size_read(inode)) {
255		inode_dio_wait(inode);
256		if (attr->ia_size > inode->i_size) {
257			error = generic_cont_expand_simple(inode,
258							   attr->ia_size);
259			if (error)
260				return error;
261		}
262		truncate_setsize(inode, attr->ia_size);
263		hfsplus_file_truncate(inode);
264	}
265
266	setattr_copy(inode, attr);
267	mark_inode_dirty(inode);
268
269	if (attr->ia_valid & ATTR_MODE) {
270		error = posix_acl_chmod(inode, inode->i_mode);
271		if (unlikely(error))
272			return error;
273	}
274
275	return 0;
276}
277
278int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
279		       int datasync)
280{
281	struct inode *inode = file->f_mapping->host;
282	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
283	struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
284	int error = 0, error2;
285
286	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
287	if (error)
288		return error;
289	mutex_lock(&inode->i_mutex);
290
291	/*
292	 * Sync inode metadata into the catalog and extent trees.
293	 */
294	sync_inode_metadata(inode, 1);
295
296	/*
297	 * And explicitly write out the btrees.
298	 */
299	if (test_and_clear_bit(HFSPLUS_I_CAT_DIRTY, &hip->flags))
300		error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping);
301
302	if (test_and_clear_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags)) {
303		error2 =
304			filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
305		if (!error)
306			error = error2;
307	}
308
309	if (test_and_clear_bit(HFSPLUS_I_ATTR_DIRTY, &hip->flags)) {
310		if (sbi->attr_tree) {
311			error2 =
312				filemap_write_and_wait(
313					    sbi->attr_tree->inode->i_mapping);
314			if (!error)
315				error = error2;
316		} else {
317			pr_err("sync non-existent attributes tree\n");
318		}
319	}
320
321	if (test_and_clear_bit(HFSPLUS_I_ALLOC_DIRTY, &hip->flags)) {
322		error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
323		if (!error)
324			error = error2;
325	}
326
327	if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
328		blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
329
330	mutex_unlock(&inode->i_mutex);
331
332	return error;
333}
334
335static const struct inode_operations hfsplus_file_inode_operations = {
336	.setattr	= hfsplus_setattr,
337	.setxattr	= generic_setxattr,
338	.getxattr	= generic_getxattr,
339	.listxattr	= hfsplus_listxattr,
340	.removexattr	= generic_removexattr,
341#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
342	.get_acl	= hfsplus_get_posix_acl,
343	.set_acl	= hfsplus_set_posix_acl,
344#endif
345};
346
347static const struct file_operations hfsplus_file_operations = {
348	.llseek		= generic_file_llseek,
349	.read_iter	= generic_file_read_iter,
350	.write_iter	= generic_file_write_iter,
351	.mmap		= generic_file_mmap,
352	.splice_read	= generic_file_splice_read,
353	.fsync		= hfsplus_file_fsync,
354	.open		= hfsplus_file_open,
355	.release	= hfsplus_file_release,
356	.unlocked_ioctl = hfsplus_ioctl,
357};
358
359struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode)
360{
361	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
362	struct inode *inode = new_inode(sb);
363	struct hfsplus_inode_info *hip;
364
365	if (!inode)
366		return NULL;
367
368	inode->i_ino = sbi->next_cnid++;
369	inode->i_mode = mode;
370	inode->i_uid = current_fsuid();
371	inode->i_gid = current_fsgid();
372	set_nlink(inode, 1);
373	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
374
375	hip = HFSPLUS_I(inode);
376	INIT_LIST_HEAD(&hip->open_dir_list);
377	mutex_init(&hip->extents_lock);
378	atomic_set(&hip->opencnt, 0);
379	hip->extent_state = 0;
380	hip->flags = 0;
381	hip->userflags = 0;
382	hip->subfolders = 0;
383	memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
384	memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
385	hip->alloc_blocks = 0;
386	hip->first_blocks = 0;
387	hip->cached_start = 0;
388	hip->cached_blocks = 0;
389	hip->phys_size = 0;
390	hip->fs_blocks = 0;
391	hip->rsrc_inode = NULL;
392	if (S_ISDIR(inode->i_mode)) {
393		inode->i_size = 2;
394		sbi->folder_count++;
395		inode->i_op = &hfsplus_dir_inode_operations;
396		inode->i_fop = &hfsplus_dir_operations;
397	} else if (S_ISREG(inode->i_mode)) {
398		sbi->file_count++;
399		inode->i_op = &hfsplus_file_inode_operations;
400		inode->i_fop = &hfsplus_file_operations;
401		inode->i_mapping->a_ops = &hfsplus_aops;
402		hip->clump_blocks = sbi->data_clump_blocks;
403	} else if (S_ISLNK(inode->i_mode)) {
404		sbi->file_count++;
405		inode->i_op = &page_symlink_inode_operations;
406		inode->i_mapping->a_ops = &hfsplus_aops;
407		hip->clump_blocks = 1;
408	} else
409		sbi->file_count++;
410	insert_inode_hash(inode);
411	mark_inode_dirty(inode);
412	hfsplus_mark_mdb_dirty(sb);
413
414	return inode;
415}
416
417void hfsplus_delete_inode(struct inode *inode)
418{
419	struct super_block *sb = inode->i_sb;
420
421	if (S_ISDIR(inode->i_mode)) {
422		HFSPLUS_SB(sb)->folder_count--;
423		hfsplus_mark_mdb_dirty(sb);
424		return;
425	}
426	HFSPLUS_SB(sb)->file_count--;
427	if (S_ISREG(inode->i_mode)) {
428		if (!inode->i_nlink) {
429			inode->i_size = 0;
430			hfsplus_file_truncate(inode);
431		}
432	} else if (S_ISLNK(inode->i_mode)) {
433		inode->i_size = 0;
434		hfsplus_file_truncate(inode);
435	}
436	hfsplus_mark_mdb_dirty(sb);
437}
438
439void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
440{
441	struct super_block *sb = inode->i_sb;
442	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
443	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
444	u32 count;
445	int i;
446
447	memcpy(&hip->first_extents, &fork->extents, sizeof(hfsplus_extent_rec));
448	for (count = 0, i = 0; i < 8; i++)
449		count += be32_to_cpu(fork->extents[i].block_count);
450	hip->first_blocks = count;
451	memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
452	hip->cached_start = 0;
453	hip->cached_blocks = 0;
454
455	hip->alloc_blocks = be32_to_cpu(fork->total_blocks);
456	hip->phys_size = inode->i_size = be64_to_cpu(fork->total_size);
457	hip->fs_blocks =
458		(inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
459	inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
460	hip->clump_blocks =
461		be32_to_cpu(fork->clump_size) >> sbi->alloc_blksz_shift;
462	if (!hip->clump_blocks) {
463		hip->clump_blocks = HFSPLUS_IS_RSRC(inode) ?
464			sbi->rsrc_clump_blocks :
465			sbi->data_clump_blocks;
466	}
467}
468
469void hfsplus_inode_write_fork(struct inode *inode,
470		struct hfsplus_fork_raw *fork)
471{
472	memcpy(&fork->extents, &HFSPLUS_I(inode)->first_extents,
473	       sizeof(hfsplus_extent_rec));
474	fork->total_size = cpu_to_be64(inode->i_size);
475	fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode)->alloc_blocks);
476}
477
478int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
479{
480	hfsplus_cat_entry entry;
481	int res = 0;
482	u16 type;
483
484	type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
485
486	HFSPLUS_I(inode)->linkid = 0;
487	if (type == HFSPLUS_FOLDER) {
488		struct hfsplus_cat_folder *folder = &entry.folder;
489
490		if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
491			/* panic? */;
492		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
493					sizeof(struct hfsplus_cat_folder));
494		hfsplus_get_perms(inode, &folder->permissions, 1);
495		set_nlink(inode, 1);
496		inode->i_size = 2 + be32_to_cpu(folder->valence);
497		inode->i_atime = hfsp_mt2ut(folder->access_date);
498		inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
499		inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
500		HFSPLUS_I(inode)->create_date = folder->create_date;
501		HFSPLUS_I(inode)->fs_blocks = 0;
502		if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
503			HFSPLUS_I(inode)->subfolders =
504				be32_to_cpu(folder->subfolders);
505		}
506		inode->i_op = &hfsplus_dir_inode_operations;
507		inode->i_fop = &hfsplus_dir_operations;
508	} else if (type == HFSPLUS_FILE) {
509		struct hfsplus_cat_file *file = &entry.file;
510
511		if (fd->entrylength < sizeof(struct hfsplus_cat_file))
512			/* panic? */;
513		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
514					sizeof(struct hfsplus_cat_file));
515
516		hfsplus_inode_read_fork(inode, HFSPLUS_IS_RSRC(inode) ?
517					&file->rsrc_fork : &file->data_fork);
518		hfsplus_get_perms(inode, &file->permissions, 0);
519		set_nlink(inode, 1);
520		if (S_ISREG(inode->i_mode)) {
521			if (file->permissions.dev)
522				set_nlink(inode,
523					  be32_to_cpu(file->permissions.dev));
524			inode->i_op = &hfsplus_file_inode_operations;
525			inode->i_fop = &hfsplus_file_operations;
526			inode->i_mapping->a_ops = &hfsplus_aops;
527		} else if (S_ISLNK(inode->i_mode)) {
528			inode->i_op = &page_symlink_inode_operations;
529			inode->i_mapping->a_ops = &hfsplus_aops;
530		} else {
531			init_special_inode(inode, inode->i_mode,
532					   be32_to_cpu(file->permissions.dev));
533		}
534		inode->i_atime = hfsp_mt2ut(file->access_date);
535		inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
536		inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
537		HFSPLUS_I(inode)->create_date = file->create_date;
538	} else {
539		pr_err("bad catalog entry used to create inode\n");
540		res = -EIO;
541	}
542	return res;
543}
544
545int hfsplus_cat_write_inode(struct inode *inode)
546{
547	struct inode *main_inode = inode;
548	struct hfs_find_data fd;
549	hfsplus_cat_entry entry;
550
551	if (HFSPLUS_IS_RSRC(inode))
552		main_inode = HFSPLUS_I(inode)->rsrc_inode;
553
554	if (!main_inode->i_nlink)
555		return 0;
556
557	if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb)->cat_tree, &fd))
558		/* panic? */
559		return -EIO;
560
561	if (hfsplus_find_cat(main_inode->i_sb, main_inode->i_ino, &fd))
562		/* panic? */
563		goto out;
564
565	if (S_ISDIR(main_inode->i_mode)) {
566		struct hfsplus_cat_folder *folder = &entry.folder;
567
568		if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
569			/* panic? */;
570		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
571					sizeof(struct hfsplus_cat_folder));
572		/* simple node checks? */
573		hfsplus_cat_set_perms(inode, &folder->permissions);
574		folder->access_date = hfsp_ut2mt(inode->i_atime);
575		folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
576		folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
577		folder->valence = cpu_to_be32(inode->i_size - 2);
578		if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
579			folder->subfolders =
580				cpu_to_be32(HFSPLUS_I(inode)->subfolders);
581		}
582		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
583					 sizeof(struct hfsplus_cat_folder));
584	} else if (HFSPLUS_IS_RSRC(inode)) {
585		struct hfsplus_cat_file *file = &entry.file;
586		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
587			       sizeof(struct hfsplus_cat_file));
588		hfsplus_inode_write_fork(inode, &file->rsrc_fork);
589		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
590				sizeof(struct hfsplus_cat_file));
591	} else {
592		struct hfsplus_cat_file *file = &entry.file;
593
594		if (fd.entrylength < sizeof(struct hfsplus_cat_file))
595			/* panic? */;
596		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
597					sizeof(struct hfsplus_cat_file));
598		hfsplus_inode_write_fork(inode, &file->data_fork);
599		hfsplus_cat_set_perms(inode, &file->permissions);
600		if (HFSPLUS_FLG_IMMUTABLE &
601				(file->permissions.rootflags |
602					file->permissions.userflags))
603			file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
604		else
605			file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
606		file->access_date = hfsp_ut2mt(inode->i_atime);
607		file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
608		file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
609		hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
610					 sizeof(struct hfsplus_cat_file));
611	}
612
613	set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags);
614out:
615	hfs_find_exit(&fd);
616	return 0;
617}
618