Lines Matching refs:inode

44 static int ufs_block_to_path(struct inode *inode, sector_t i_block, unsigned offsets[4])  in ufs_block_to_path()  argument
46 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; in ufs_block_to_path()
71 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big"); in ufs_block_to_path()
122 static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth) in ufs_frag_map() argument
124 struct ufs_inode_info *ufsi = UFS_I(inode); in ufs_frag_map()
125 struct super_block *sb = inode->i_sb; in ufs_frag_map()
219 ufs_extend_tail(struct inode *inode, u64 writes_to, in ufs_extend_tail() argument
222 struct ufs_inode_info *ufsi = UFS_I(inode); in ufs_extend_tail()
223 struct super_block *sb = inode->i_sb; in ufs_extend_tail()
237 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p), in ufs_extend_tail()
252 ufs_inode_getfrag(struct inode *inode, unsigned index, in ufs_inode_getfrag() argument
256 struct ufs_inode_info *ufsi = UFS_I(inode); in ufs_inode_getfrag()
257 struct super_block *sb = inode->i_sb; in ufs_inode_getfrag()
286 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), in ufs_inode_getfrag()
296 inode->i_ctime = CURRENT_TIME_SEC; in ufs_inode_getfrag()
297 if (IS_SYNC(inode)) in ufs_inode_getfrag()
298 ufs_sync_inode (inode); in ufs_inode_getfrag()
299 mark_inode_dirty(inode); in ufs_inode_getfrag()
331 ufs_inode_getblock(struct inode *inode, u64 ind_block, in ufs_inode_getblock() argument
335 struct super_block *sb = inode->i_sb; in ufs_inode_getblock()
367 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, in ufs_inode_getblock()
376 if (IS_SYNC(inode)) in ufs_inode_getblock()
378 inode->i_ctime = CURRENT_TIME_SEC; in ufs_inode_getblock()
379 mark_inode_dirty(inode); in ufs_inode_getblock()
393 static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result,… in ufs_getfrag_block() argument
395 struct super_block *sb = inode->i_sb; in ufs_getfrag_block()
399 int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets); in ufs_getfrag_block()
404 phys64 = ufs_frag_map(inode, offsets, depth); in ufs_getfrag_block()
410 mutex_lock(&UFS_I(inode)->truncate_mutex); in ufs_getfrag_block()
412 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment); in ufs_getfrag_block()
419 if (UFS_I(inode)->i_lastfrag < UFS_NDIR_FRAGMENT) { in ufs_getfrag_block()
420 unsigned lastfrag = UFS_I(inode)->i_lastfrag; in ufs_getfrag_block()
423 if (!ufs_extend_tail(inode, fragment, in ufs_getfrag_block()
430 phys64 = ufs_inode_getfrag(inode, offsets[0], fragment, in ufs_getfrag_block()
434 phys64 = ufs_inode_getfrag(inode, offsets[0], fragment, in ufs_getfrag_block()
437 phys64 = ufs_inode_getblock(inode, phys64, offsets[i], in ufs_getfrag_block()
439 phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1], in ufs_getfrag_block()
449 mutex_unlock(&UFS_I(inode)->truncate_mutex); in ufs_getfrag_block()
468 static void ufs_truncate_blocks(struct inode *);
472 struct inode *inode = mapping->host; in ufs_write_failed() local
474 if (to > inode->i_size) { in ufs_write_failed()
475 truncate_pagecache(inode, inode->i_size); in ufs_write_failed()
476 ufs_truncate_blocks(inode); in ufs_write_failed()
519 static void ufs_set_inode_ops(struct inode *inode) in ufs_set_inode_ops() argument
521 if (S_ISREG(inode->i_mode)) { in ufs_set_inode_ops()
522 inode->i_op = &ufs_file_inode_operations; in ufs_set_inode_ops()
523 inode->i_fop = &ufs_file_operations; in ufs_set_inode_ops()
524 inode->i_mapping->a_ops = &ufs_aops; in ufs_set_inode_ops()
525 } else if (S_ISDIR(inode->i_mode)) { in ufs_set_inode_ops()
526 inode->i_op = &ufs_dir_inode_operations; in ufs_set_inode_ops()
527 inode->i_fop = &ufs_dir_operations; in ufs_set_inode_ops()
528 inode->i_mapping->a_ops = &ufs_aops; in ufs_set_inode_ops()
529 } else if (S_ISLNK(inode->i_mode)) { in ufs_set_inode_ops()
530 if (!inode->i_blocks) { in ufs_set_inode_ops()
531 inode->i_op = &ufs_fast_symlink_inode_operations; in ufs_set_inode_ops()
532 inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink; in ufs_set_inode_ops()
534 inode->i_op = &ufs_symlink_inode_operations; in ufs_set_inode_ops()
535 inode->i_mapping->a_ops = &ufs_aops; in ufs_set_inode_ops()
538 init_special_inode(inode, inode->i_mode, in ufs_set_inode_ops()
539 ufs_get_inode_dev(inode->i_sb, UFS_I(inode))); in ufs_set_inode_ops()
542 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode) in ufs1_read_inode() argument
544 struct ufs_inode_info *ufsi = UFS_I(inode); in ufs1_read_inode()
545 struct super_block *sb = inode->i_sb; in ufs1_read_inode()
551 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); in ufs1_read_inode()
552 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); in ufs1_read_inode()
553 if (inode->i_nlink == 0) { in ufs1_read_inode()
554 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); in ufs1_read_inode()
561 i_uid_write(inode, ufs_get_inode_uid(sb, ufs_inode)); in ufs1_read_inode()
562 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); in ufs1_read_inode()
564 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); in ufs1_read_inode()
565 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); in ufs1_read_inode()
566 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); in ufs1_read_inode()
567 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); in ufs1_read_inode()
568 inode->i_mtime.tv_nsec = 0; in ufs1_read_inode()
569 inode->i_atime.tv_nsec = 0; in ufs1_read_inode()
570 inode->i_ctime.tv_nsec = 0; in ufs1_read_inode()
571 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks); in ufs1_read_inode()
572 inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen); in ufs1_read_inode()
578 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { in ufs1_read_inode()
589 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) in ufs2_read_inode() argument
591 struct ufs_inode_info *ufsi = UFS_I(inode); in ufs2_read_inode()
592 struct super_block *sb = inode->i_sb; in ufs2_read_inode()
595 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); in ufs2_read_inode()
599 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); in ufs2_read_inode()
600 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); in ufs2_read_inode()
601 if (inode->i_nlink == 0) { in ufs2_read_inode()
602 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); in ufs2_read_inode()
609 i_uid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_uid)); in ufs2_read_inode()
610 i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid)); in ufs2_read_inode()
612 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size); in ufs2_read_inode()
613 inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime); in ufs2_read_inode()
614 inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime); in ufs2_read_inode()
615 inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime); in ufs2_read_inode()
616 inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec); in ufs2_read_inode()
617 inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec); in ufs2_read_inode()
618 inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec); in ufs2_read_inode()
619 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks); in ufs2_read_inode()
620 inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen); in ufs2_read_inode()
627 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { in ufs2_read_inode()
638 struct inode *ufs_iget(struct super_block *sb, unsigned long ino) in ufs_iget()
643 struct inode *inode; in ufs_iget() local
654 inode = iget_locked(sb, ino); in ufs_iget()
655 if (!inode) in ufs_iget()
657 if (!(inode->i_state & I_NEW)) in ufs_iget()
658 return inode; in ufs_iget()
660 ufsi = UFS_I(inode); in ufs_iget()
662 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); in ufs_iget()
665 inode->i_ino); in ufs_iget()
671 err = ufs2_read_inode(inode, in ufs_iget()
672 ufs2_inode + ufs_inotofsbo(inode->i_ino)); in ufs_iget()
676 err = ufs1_read_inode(inode, in ufs_iget()
677 ufs_inode + ufs_inotofsbo(inode->i_ino)); in ufs_iget()
682 inode->i_version++; in ufs_iget()
684 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; in ufs_iget()
688 ufs_set_inode_ops(inode); in ufs_iget()
693 unlock_new_inode(inode); in ufs_iget()
694 return inode; in ufs_iget()
697 iget_failed(inode); in ufs_iget()
701 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) in ufs1_update_inode() argument
703 struct super_block *sb = inode->i_sb; in ufs1_update_inode()
704 struct ufs_inode_info *ufsi = UFS_I(inode); in ufs1_update_inode()
706 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); in ufs1_update_inode()
707 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); in ufs1_update_inode()
709 ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode)); in ufs1_update_inode()
710 ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode)); in ufs1_update_inode()
712 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); in ufs1_update_inode()
713 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); in ufs1_update_inode()
715 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec); in ufs1_update_inode()
717 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec); in ufs1_update_inode()
719 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks); in ufs1_update_inode()
721 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); in ufs1_update_inode()
728 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { in ufs1_update_inode()
731 } else if (inode->i_blocks) { in ufs1_update_inode()
740 if (!inode->i_nlink) in ufs1_update_inode()
744 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode) in ufs2_update_inode() argument
746 struct super_block *sb = inode->i_sb; in ufs2_update_inode()
747 struct ufs_inode_info *ufsi = UFS_I(inode); in ufs2_update_inode()
750 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); in ufs2_update_inode()
751 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); in ufs2_update_inode()
753 ufs_inode->ui_uid = cpu_to_fs32(sb, i_uid_read(inode)); in ufs2_update_inode()
754 ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode)); in ufs2_update_inode()
756 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); in ufs2_update_inode()
757 ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec); in ufs2_update_inode()
758 ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec); in ufs2_update_inode()
759 ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec); in ufs2_update_inode()
760 ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec); in ufs2_update_inode()
761 ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec); in ufs2_update_inode()
762 ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec); in ufs2_update_inode()
764 ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks); in ufs2_update_inode()
766 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation); in ufs2_update_inode()
768 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { in ufs2_update_inode()
771 } else if (inode->i_blocks) { in ufs2_update_inode()
779 if (!inode->i_nlink) in ufs2_update_inode()
784 static int ufs_update_inode(struct inode * inode, int do_sync) in ufs_update_inode() argument
786 struct super_block *sb = inode->i_sb; in ufs_update_inode()
790 UFSD("ENTER, ino %lu\n", inode->i_ino); in ufs_update_inode()
792 if (inode->i_ino < UFS_ROOTINO || in ufs_update_inode()
793 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { in ufs_update_inode()
794 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino); in ufs_update_inode()
798 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino)); in ufs_update_inode()
800 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); in ufs_update_inode()
806 ufs2_update_inode(inode, in ufs_update_inode()
807 ufs2_inode + ufs_inotofsbo(inode->i_ino)); in ufs_update_inode()
811 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); in ufs_update_inode()
823 int ufs_write_inode(struct inode *inode, struct writeback_control *wbc) in ufs_write_inode() argument
825 return ufs_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); in ufs_write_inode()
828 int ufs_sync_inode (struct inode *inode) in ufs_sync_inode() argument
830 return ufs_update_inode (inode, 1); in ufs_sync_inode()
833 void ufs_evict_inode(struct inode * inode) in ufs_evict_inode() argument
837 if (!inode->i_nlink && !is_bad_inode(inode)) in ufs_evict_inode()
840 truncate_inode_pages_final(&inode->i_data); in ufs_evict_inode()
842 inode->i_size = 0; in ufs_evict_inode()
843 if (inode->i_blocks) in ufs_evict_inode()
844 ufs_truncate_blocks(inode); in ufs_evict_inode()
847 invalidate_inode_buffers(inode); in ufs_evict_inode()
848 clear_inode(inode); in ufs_evict_inode()
851 ufs_free_inode(inode); in ufs_evict_inode()
855 struct inode *inode; member
863 ufs_free_blocks(ctx->inode, ctx->to - ctx->count, ctx->count); in free_data()
870 #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
871 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
873 static void ufs_trunc_direct(struct inode *inode) in ufs_trunc_direct() argument
875 struct ufs_inode_info *ufsi = UFS_I(inode); in ufs_trunc_direct()
880 struct to_free ctx = {.inode = inode}; in ufs_trunc_direct()
883 UFSD("ENTER: ino %lu\n", inode->i_ino); in ufs_trunc_direct()
885 sb = inode->i_sb; in ufs_trunc_direct()
902 " frag3 %llu, frag4 %llu\n", inode->i_ino, in ufs_trunc_direct()
920 ufs_free_fragments(inode, tmp + frag1, frag2); in ufs_trunc_direct()
955 ufs_free_fragments (inode, tmp, frag4); in ufs_trunc_direct()
958 UFSD("EXIT: ino %lu\n", inode->i_ino); in ufs_trunc_direct()
961 static void free_full_branch(struct inode *inode, u64 ind_block, int depth) in free_full_branch() argument
963 struct super_block *sb = inode->i_sb; in free_full_branch()
976 free_full_branch(inode, block, depth); in free_full_branch()
979 struct to_free ctx = {.inode = inode}; in free_full_branch()
991 ufs_free_blocks(inode, ind_block, uspi->s_fpb); in free_full_branch()
994 static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int d… in free_branch_tail() argument
996 struct super_block *sb = inode->i_sb; in free_branch_tail()
1005 write_seqlock(&UFS_I(inode)->meta_lock); in free_branch_tail()
1007 write_sequnlock(&UFS_I(inode)->meta_lock); in free_branch_tail()
1009 free_full_branch(inode, block, depth); in free_branch_tail()
1013 struct to_free ctx = {.inode = inode}; in free_branch_tail()
1019 write_seqlock(&UFS_I(inode)->meta_lock); in free_branch_tail()
1021 write_sequnlock(&UFS_I(inode)->meta_lock); in free_branch_tail()
1028 if (IS_SYNC(inode) && ubh_buffer_dirty(ubh)) in free_branch_tail()
1033 static int ufs_alloc_lastblock(struct inode *inode, loff_t size) in ufs_alloc_lastblock() argument
1036 struct super_block *sb = inode->i_sb; in ufs_alloc_lastblock()
1037 struct address_space *mapping = inode->i_mapping; in ufs_alloc_lastblock()
1053 (PAGE_CACHE_SHIFT - inode->i_blkbits)); in ufs_alloc_lastblock()
1059 end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1); in ufs_alloc_lastblock()
1065 err = ufs_getfrag_block(inode, lastfrag, bh, 1); in ufs_alloc_lastblock()
1103 static void __ufs_truncate_blocks(struct inode *inode) in __ufs_truncate_blocks() argument
1105 struct ufs_inode_info *ufsi = UFS_I(inode); in __ufs_truncate_blocks()
1106 struct super_block *sb = inode->i_sb; in __ufs_truncate_blocks()
1109 int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets); in __ufs_truncate_blocks()
1126 ufs_trunc_direct(inode); in __ufs_truncate_blocks()
1146 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1); in __ufs_truncate_blocks()
1155 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1); in __ufs_truncate_blocks()
1159 mark_inode_dirty(inode); in __ufs_truncate_blocks()
1163 static int ufs_truncate(struct inode *inode, loff_t size) in ufs_truncate() argument
1168 inode->i_ino, (unsigned long long)size, in ufs_truncate()
1169 (unsigned long long)i_size_read(inode)); in ufs_truncate()
1171 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || in ufs_truncate()
1172 S_ISLNK(inode->i_mode))) in ufs_truncate()
1174 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) in ufs_truncate()
1177 err = ufs_alloc_lastblock(inode, size); in ufs_truncate()
1182 block_truncate_page(inode->i_mapping, size, ufs_getfrag_block); in ufs_truncate()
1184 truncate_setsize(inode, size); in ufs_truncate()
1186 __ufs_truncate_blocks(inode); in ufs_truncate()
1187 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; in ufs_truncate()
1188 mark_inode_dirty(inode); in ufs_truncate()
1194 void ufs_truncate_blocks(struct inode *inode) in ufs_truncate_blocks() argument
1196 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || in ufs_truncate_blocks()
1197 S_ISLNK(inode->i_mode))) in ufs_truncate_blocks()
1199 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) in ufs_truncate_blocks()
1201 __ufs_truncate_blocks(inode); in ufs_truncate_blocks()
1206 struct inode *inode = d_inode(dentry); in ufs_setattr() local
1210 error = inode_change_ok(inode, attr); in ufs_setattr()
1214 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { in ufs_setattr()
1215 error = ufs_truncate(inode, attr->ia_size); in ufs_setattr()
1220 setattr_copy(inode, attr); in ufs_setattr()
1221 mark_inode_dirty(inode); in ufs_setattr()