Home
last modified time | relevance | path

Searched refs:sbi (Results 1 – 164 of 164) sorted by relevance

/linux-4.4.14/fs/f2fs/
Dsegment.h27 #define IS_CURSEG(sbi, seg) \ argument
28 ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
29 (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
30 (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
31 (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
32 (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
33 (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
35 #define IS_CURSEC(sbi, secno) \ argument
36 ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
37 sbi->segs_per_sec) || \
[all …]
Dsegment.c216 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in commit_inmem_pages() local
221 .sbi = sbi, in commit_inmem_pages()
236 f2fs_balance_fs(sbi); in commit_inmem_pages()
237 f2fs_lock_op(sbi); in commit_inmem_pages()
273 f2fs_unlock_op(sbi); in commit_inmem_pages()
275 f2fs_submit_merged_bio(sbi, DATA, WRITE); in commit_inmem_pages()
284 void f2fs_balance_fs(struct f2fs_sb_info *sbi) in f2fs_balance_fs() argument
290 if (has_not_enough_free_secs(sbi, 0)) { in f2fs_balance_fs()
291 mutex_lock(&sbi->gc_mutex); in f2fs_balance_fs()
292 f2fs_gc(sbi, false); in f2fs_balance_fs()
[all …]
Dcheckpoint.c32 struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in grab_meta_page() argument
34 struct address_space *mapping = META_MAPPING(sbi); in grab_meta_page()
50 static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, in __get_meta_page() argument
53 struct address_space *mapping = META_MAPPING(sbi); in __get_meta_page()
56 .sbi = sbi, in __get_meta_page()
93 f2fs_stop_checkpoint(sbi); in __get_meta_page()
98 struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in get_meta_page() argument
100 return __get_meta_page(sbi, index, true); in get_meta_page()
104 struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index) in get_tmp_page() argument
106 return __get_meta_page(sbi, index, false); in get_tmp_page()
[all …]
Dsuper.c114 static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type) in __struct_ptr() argument
117 return (unsigned char *)sbi->gc_thread; in __struct_ptr()
119 return (unsigned char *)SM_I(sbi); in __struct_ptr()
121 return (unsigned char *)NM_I(sbi); in __struct_ptr()
123 return (unsigned char *)sbi; in __struct_ptr()
128 struct f2fs_sb_info *sbi, char *buf) in f2fs_sbi_show() argument
133 ptr = __struct_ptr(sbi, a->struct_type); in f2fs_sbi_show()
143 struct f2fs_sb_info *sbi, in f2fs_sbi_store() argument
151 ptr = __struct_ptr(sbi, a->struct_type); in f2fs_sbi_store()
167 struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info, in f2fs_attr_show() local
[all …]
Ddebug.c30 static void update_general_status(struct f2fs_sb_info *sbi) in update_general_status() argument
32 struct f2fs_stat_info *si = F2FS_STAT(sbi); in update_general_status()
36 si->hit_largest = atomic64_read(&sbi->read_hit_largest); in update_general_status()
37 si->hit_cached = atomic64_read(&sbi->read_hit_cached); in update_general_status()
38 si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree); in update_general_status()
40 si->total_ext = atomic64_read(&sbi->total_hit_ext); in update_general_status()
41 si->ext_tree = sbi->total_ext_tree; in update_general_status()
42 si->ext_node = atomic_read(&sbi->total_ext_node); in update_general_status()
43 si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES); in update_general_status()
44 si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS); in update_general_status()
[all …]
Dgc.c29 struct f2fs_sb_info *sbi = data; in gc_thread_func() local
30 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in gc_thread_func()
31 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; in gc_thread_func()
46 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { in gc_thread_func()
64 if (!mutex_trylock(&sbi->gc_mutex)) in gc_thread_func()
67 if (!is_idle(sbi)) { in gc_thread_func()
69 mutex_unlock(&sbi->gc_mutex); in gc_thread_func()
73 if (has_enough_invalid_blocks(sbi)) in gc_thread_func()
78 stat_inc_bggc_count(sbi); in gc_thread_func()
81 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC))) in gc_thread_func()
[all …]
Dshrinker.c21 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi) in __count_nat_entries() argument
23 return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt; in __count_nat_entries()
26 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi) in __count_free_nids() argument
28 if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK) in __count_free_nids()
29 return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK; in __count_free_nids()
33 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi) in __count_extent_cache() argument
35 return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node); in __count_extent_cache()
41 struct f2fs_sb_info *sbi; in f2fs_shrink_count() local
48 sbi = list_entry(p, struct f2fs_sb_info, s_list); in f2fs_shrink_count()
51 if (!mutex_trylock(&sbi->umount_mutex)) { in f2fs_shrink_count()
[all …]
Dgc.h46 static inline block_t free_user_blocks(struct f2fs_sb_info *sbi) in free_user_blocks() argument
48 if (free_segments(sbi) < overprovision_segments(sbi)) in free_user_blocks()
51 return (free_segments(sbi) - overprovision_segments(sbi)) in free_user_blocks()
52 << sbi->log_blocks_per_seg; in free_user_blocks()
55 static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi) in limit_invalid_user_blocks() argument
57 return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100; in limit_invalid_user_blocks()
60 static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi) in limit_free_user_blocks() argument
62 block_t reclaimable_user_blocks = sbi->user_block_count - in limit_free_user_blocks()
63 written_block_count(sbi); in limit_free_user_blocks()
89 static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi) in has_enough_invalid_blocks() argument
[all …]
Dextent_cache.c24 static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi, in __attach_extent_node() argument
40 atomic_inc(&sbi->total_ext_node); in __attach_extent_node()
44 static void __detach_extent_node(struct f2fs_sb_info *sbi, in __detach_extent_node() argument
49 atomic_dec(&sbi->total_ext_node); in __detach_extent_node()
57 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in __grab_extent_tree() local
61 down_write(&sbi->extent_tree_lock); in __grab_extent_tree()
62 et = radix_tree_lookup(&sbi->extent_tree_root, ino); in __grab_extent_tree()
65 f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et); in __grab_extent_tree()
73 sbi->total_ext_tree++; in __grab_extent_tree()
76 up_write(&sbi->extent_tree_lock); in __grab_extent_tree()
[all …]
Df2fs.h26 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) argument
29 #define f2fs_bug_on(sbi, condition) \ argument
33 set_sbi_flag(sbi, SBI_NEED_FSCK); \
58 #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option) argument
59 #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option) argument
60 #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option) argument
123 #define BATCHED_TRIM_SEGMENTS(sbi) \ argument
124 (SM_I(sbi)->trim_sections * (sbi)->segs_per_sec)
125 #define BATCHED_TRIM_BLOCKS(sbi) \ argument
126 (BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
[all …]
Drecovery.c50 bool space_for_roll_forward(struct f2fs_sb_info *sbi) in space_for_roll_forward() argument
52 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count in space_for_roll_forward()
53 > sbi->user_block_count) in space_for_roll_forward()
171 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) in find_fsync_dnodes() argument
173 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); in find_fsync_dnodes()
180 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); in find_fsync_dnodes()
181 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); in find_fsync_dnodes()
183 ra_meta_pages(sbi, blkaddr, 1, META_POR, true); in find_fsync_dnodes()
188 if (!is_valid_blkaddr(sbi, blkaddr, META_POR)) in find_fsync_dnodes()
191 page = get_tmp_page(sbi, blkaddr); in find_fsync_dnodes()
[all …]
Dnode.c31 bool available_free_memory(struct f2fs_sb_info *sbi, int type) in available_free_memory() argument
33 struct f2fs_nm_info *nm_i = NM_I(sbi); in available_free_memory()
56 if (sbi->sb->s_bdi->wb.dirty_exceeded) in available_free_memory()
58 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); in available_free_memory()
64 mem_size += (sbi->im[i].ino_num * in available_free_memory()
68 mem_size = (sbi->total_ext_tree * sizeof(struct extent_tree) + in available_free_memory()
69 atomic_read(&sbi->total_ext_node) * in available_free_memory()
73 if (sbi->sb->s_bdi->wb.dirty_exceeded) in available_free_memory()
97 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) in get_current_nat_page() argument
99 pgoff_t index = current_nat_addr(sbi, nid); in get_current_nat_page()
[all …]
Dnamei.c27 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); in f2fs_new_inode() local
37 f2fs_lock_op(sbi); in f2fs_new_inode()
38 if (!alloc_nid(sbi, &ino)) { in f2fs_new_inode()
39 f2fs_unlock_op(sbi); in f2fs_new_inode()
43 f2fs_unlock_op(sbi); in f2fs_new_inode()
50 inode->i_generation = sbi->s_next_generation++; in f2fs_new_inode()
108 static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode, in set_cold_files() argument
112 __u8 (*extlist)[8] = sbi->raw_super->extension_list; in set_cold_files()
114 int count = le32_to_cpu(sbi->raw_super->extension_count); in set_cold_files()
126 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); in f2fs_create() local
[all …]
Dinode.c99 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in do_read_inode() local
105 if (check_nid_range(sbi, inode->i_ino)) { in do_read_inode()
112 node_page = get_node_page(sbi, inode->i_ino); in do_read_inode()
166 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_iget() local
178 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi)) in f2fs_iget()
185 if (ino == F2FS_NODE_INO(sbi)) { in f2fs_iget()
188 } else if (ino == F2FS_META_INO(sbi)) { in f2fs_iget()
269 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in update_inode_page() local
272 node_page = get_node_page(sbi, inode->i_ino); in update_inode_page()
279 f2fs_stop_checkpoint(sbi); in update_inode_page()
[all …]
Dfile.c39 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_vm_page_mkwrite() local
43 f2fs_balance_fs(sbi); in f2fs_vm_page_mkwrite()
47 f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); in f2fs_vm_page_mkwrite()
50 f2fs_lock_op(sbi); in f2fs_vm_page_mkwrite()
54 f2fs_unlock_op(sbi); in f2fs_vm_page_mkwrite()
58 f2fs_unlock_op(sbi); in f2fs_vm_page_mkwrite()
93 f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr); in f2fs_vm_page_mkwrite()
130 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in need_do_checkpoint() local
135 else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino)) in need_do_checkpoint()
139 else if (!space_for_roll_forward(sbi)) in need_do_checkpoint()
[all …]
Ddata.c60 struct f2fs_sb_info *sbi = bio->bi_private; in f2fs_write_end_io() local
72 f2fs_stop_checkpoint(sbi); in f2fs_write_end_io()
75 dec_page_count(sbi, F2FS_WRITEBACK); in f2fs_write_end_io()
78 if (!get_pages(sbi, F2FS_WRITEBACK) && in f2fs_write_end_io()
79 !list_empty(&sbi->cp_wait.task_list)) in f2fs_write_end_io()
80 wake_up(&sbi->cp_wait); in f2fs_write_end_io()
88 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, in __bio_alloc() argument
95 bio->bi_bdev = sbi->sb->s_bdev; in __bio_alloc()
98 bio->bi_private = is_read ? NULL : sbi; in __bio_alloc()
111 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); in __submit_merged_bio()
[all …]
Dxattr.c32 struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); in f2fs_xattr_generic_list() local
37 if (!test_opt(sbi, XATTR_USER)) in f2fs_xattr_generic_list()
64 struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); in f2fs_xattr_generic_get() local
68 if (!test_opt(sbi, XATTR_USER)) in f2fs_xattr_generic_get()
90 struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); in f2fs_xattr_generic_set() local
94 if (!test_opt(sbi, XATTR_USER)) in f2fs_xattr_generic_set()
269 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in read_all_xattrs() local
288 page = get_node_page(sbi, inode->i_ino); in read_all_xattrs()
303 xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid); in read_all_xattrs()
328 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in write_all_xattrs() local
[all …]
Dnode.h150 static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) in next_free_nid() argument
152 struct f2fs_nm_info *nm_i = NM_I(sbi); in next_free_nid()
168 static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr) in get_nat_bitmap() argument
170 struct f2fs_nm_info *nm_i = NM_I(sbi); in get_nat_bitmap()
174 static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start) in current_nat_addr() argument
176 struct f2fs_nm_info *nm_i = NM_I(sbi); in current_nat_addr()
182 seg_off = block_off >> sbi->log_blocks_per_seg; in current_nat_addr()
185 (seg_off << sbi->log_blocks_per_seg << 1) + in current_nat_addr()
186 (block_off & ((1 << sbi->log_blocks_per_seg) - 1))); in current_nat_addr()
189 block_addr += sbi->blocks_per_seg; in current_nat_addr()
[all …]
Dinline.c113 .sbi = F2FS_I_SB(dn->inode), in f2fs_convert_inline_page()
175 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_convert_inline_inode() local
184 f2fs_lock_op(sbi); in f2fs_convert_inline_inode()
186 ipage = get_node_page(sbi, inode->i_ino); in f2fs_convert_inline_inode()
199 f2fs_unlock_op(sbi); in f2fs_convert_inline_inode()
239 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in recover_inline_data() local
258 ipage = get_node_page(sbi, inode->i_ino); in recover_inline_data()
259 f2fs_bug_on(sbi, IS_ERR(ipage)); in recover_inline_data()
276 ipage = get_node_page(sbi, inode->i_ino); in recover_inline_data()
277 f2fs_bug_on(sbi, IS_ERR(ipage)); in recover_inline_data()
[all …]
Ddir.c659 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); in f2fs_drop_nlink() local
681 add_orphan_inode(sbi, inode->i_ino); in f2fs_drop_nlink()
683 release_orphan_inode(sbi); in f2fs_drop_nlink()
/linux-4.4.14/fs/sysv/
Dsuper.c47 static void detected_xenix(struct sysv_sb_info *sbi, unsigned *max_links) in detected_xenix() argument
49 struct buffer_head *bh1 = sbi->s_bh1; in detected_xenix()
50 struct buffer_head *bh2 = sbi->s_bh2; in detected_xenix()
63 sbi->s_fic_size = XENIX_NICINOD; in detected_xenix()
64 sbi->s_flc_size = XENIX_NICFREE; in detected_xenix()
65 sbi->s_sbd1 = (char *)sbd1; in detected_xenix()
66 sbi->s_sbd2 = (char *)sbd2; in detected_xenix()
67 sbi->s_sb_fic_count = &sbd1->s_ninode; in detected_xenix()
68 sbi->s_sb_fic_inodes = &sbd1->s_inode[0]; in detected_xenix()
69 sbi->s_sb_total_free_inodes = &sbd2->s_tinode; in detected_xenix()
[all …]
Dballoc.c44 struct sysv_sb_info * sbi = SYSV_SB(sb); in sysv_free_block() local
46 sysv_zone_t *blocks = sbi->s_bcache; in sysv_free_block()
48 unsigned block = fs32_to_cpu(sbi, nr); in sysv_free_block()
55 if (sbi->s_type == FSTYPE_AFS) in sysv_free_block()
58 if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { in sysv_free_block()
63 mutex_lock(&sbi->s_lock); in sysv_free_block()
64 count = fs16_to_cpu(sbi, *sbi->s_bcache_count); in sysv_free_block()
66 if (count > sbi->s_flc_size) { in sysv_free_block()
68 mutex_unlock(&sbi->s_lock); in sysv_free_block()
75 if (count == sbi->s_flc_size || count == 0) { in sysv_free_block()
[all …]
Dialloc.c41 struct sysv_sb_info *sbi = SYSV_SB(sb); in sv_sb_fic_inode() local
43 if (sbi->s_bh1 == sbi->s_bh2) in sv_sb_fic_inode()
44 return &sbi->s_sb_fic_inodes[i]; in sv_sb_fic_inode()
49 return (sysv_ino_t*)(sbi->s_sbd1 + offset); in sv_sb_fic_inode()
51 return (sysv_ino_t*)(sbi->s_sbd2 + offset); in sv_sb_fic_inode()
58 struct sysv_sb_info *sbi = SYSV_SB(sb); in sysv_raw_inode() local
60 int block = sbi->s_firstinodezone + sbi->s_block_base; in sysv_raw_inode()
62 block += (ino-1) >> sbi->s_inodes_per_block_bits; in sysv_raw_inode()
67 return res + ((ino-1) & sbi->s_inodes_per_block_1); in sysv_raw_inode()
72 struct sysv_sb_info *sbi = SYSV_SB(sb); in refill_free_cache() local
[all …]
Dinode.c36 struct sysv_sb_info *sbi = SYSV_SB(sb); in sysv_sync_fs() local
39 mutex_lock(&sbi->s_lock); in sysv_sync_fs()
46 old_time = fs32_to_cpu(sbi, *sbi->s_sb_time); in sysv_sync_fs()
47 if (sbi->s_type == FSTYPE_SYSV4) { in sysv_sync_fs()
48 if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38 - old_time)) in sysv_sync_fs()
49 *sbi->s_sb_state = cpu_to_fs32(sbi, 0x7c269d38 - time); in sysv_sync_fs()
50 *sbi->s_sb_time = cpu_to_fs32(sbi, time); in sysv_sync_fs()
51 mark_buffer_dirty(sbi->s_bh2); in sysv_sync_fs()
54 mutex_unlock(&sbi->s_lock); in sysv_sync_fs()
61 struct sysv_sb_info *sbi = SYSV_SB(sb); in sysv_remount() local
[all …]
Dsysv.h116 struct sysv_sb_info *sbi = SYSV_SB(sb); in dirty_sb() local
118 mark_buffer_dirty(sbi->s_bh1); in dirty_sb()
119 if (sbi->s_bh1 != sbi->s_bh2) in dirty_sb()
120 mark_buffer_dirty(sbi->s_bh2); in dirty_sb()
190 static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n) in fs32_to_cpu() argument
192 if (sbi->s_bytesex == BYTESEX_PDP) in fs32_to_cpu()
194 else if (sbi->s_bytesex == BYTESEX_LE) in fs32_to_cpu()
200 static inline __fs32 cpu_to_fs32(struct sysv_sb_info *sbi, __u32 n) in cpu_to_fs32() argument
202 if (sbi->s_bytesex == BYTESEX_PDP) in cpu_to_fs32()
204 else if (sbi->s_bytesex == BYTESEX_LE) in cpu_to_fs32()
[all …]
Ditree.c25 struct sysv_sb_info *sbi = SYSV_SB(sb); in block_to_path() local
26 int ptrs_bits = sbi->s_ind_per_block_bits; in block_to_path()
27 unsigned long indirect_blocks = sbi->s_ind_per_block, in block_to_path()
28 double_blocks = sbi->s_ind_per_block_2; in block_to_path()
53 static inline int block_to_cpu(struct sysv_sb_info *sbi, sysv_zone_t nr) in block_to_cpu() argument
55 return sbi->s_block_base + fs32_to_cpu(sbi, nr); in block_to_cpu()
430 struct sysv_sb_info *sbi = SYSV_SB(s); in sysv_nblocks() local
431 int ptrs_bits = sbi->s_ind_per_block_bits; in sysv_nblocks()
/linux-4.4.14/fs/hfsplus/
Dsuper.c100 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); in hfsplus_system_write_inode() local
101 struct hfsplus_vh *vhdr = sbi->s_vhdr; in hfsplus_system_write_inode()
108 tree = sbi->ext_tree; in hfsplus_system_write_inode()
112 tree = sbi->cat_tree; in hfsplus_system_write_inode()
122 tree = sbi->attr_tree; in hfsplus_system_write_inode()
129 set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags); in hfsplus_system_write_inode()
176 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_sync_fs() local
177 struct hfsplus_vh *vhdr = sbi->s_vhdr; in hfsplus_sync_fs()
194 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping); in hfsplus_sync_fs()
195 error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping); in hfsplus_sync_fs()
[all …]
Doptions.c99 int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi) in hfsplus_parse_options() argument
115 if (match_fourchar(&args[0], &sbi->creator)) { in hfsplus_parse_options()
121 if (match_fourchar(&args[0], &sbi->type)) { in hfsplus_parse_options()
131 sbi->umask = (umode_t)tmp; in hfsplus_parse_options()
138 sbi->uid = make_kuid(current_user_ns(), (uid_t)tmp); in hfsplus_parse_options()
139 if (!uid_valid(sbi->uid)) { in hfsplus_parse_options()
149 sbi->gid = make_kgid(current_user_ns(), (gid_t)tmp); in hfsplus_parse_options()
150 if (!gid_valid(sbi->gid)) { in hfsplus_parse_options()
156 if (match_int(&args[0], &sbi->part)) { in hfsplus_parse_options()
162 if (match_int(&args[0], &sbi->session)) { in hfsplus_parse_options()
[all …]
Dwrapper.c159 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_read_wrapper() local
174 sbi->s_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL); in hfsplus_read_wrapper()
175 if (!sbi->s_vhdr_buf) in hfsplus_read_wrapper()
177 sbi->s_backup_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL); in hfsplus_read_wrapper()
178 if (!sbi->s_backup_vhdr_buf) in hfsplus_read_wrapper()
183 sbi->s_vhdr_buf, (void **)&sbi->s_vhdr, in hfsplus_read_wrapper()
189 switch (sbi->s_vhdr->signature) { in hfsplus_read_wrapper()
191 set_bit(HFSPLUS_SB_HFSX, &sbi->flags); in hfsplus_read_wrapper()
196 if (!hfsplus_read_mdb(sbi->s_vhdr, &wd)) in hfsplus_read_wrapper()
215 sbi->s_backup_vhdr_buf, in hfsplus_read_wrapper()
[all …]
Dbitmap.c21 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_block_allocate() local
34 mutex_lock(&sbi->alloc_mutex); in hfsplus_block_allocate()
35 mapping = sbi->alloc_file->i_mapping; in hfsplus_block_allocate()
155 sbi->free_blocks -= *max; in hfsplus_block_allocate()
159 mutex_unlock(&sbi->alloc_mutex); in hfsplus_block_allocate()
165 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_block_free() local
178 if ((offset + count) > sbi->total_blocks) in hfsplus_block_free()
181 mutex_lock(&sbi->alloc_mutex); in hfsplus_block_free()
182 mapping = sbi->alloc_file->i_mapping; in hfsplus_block_free()
234 sbi->free_blocks += len; in hfsplus_block_free()
[all …]
Ddir.c298 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dst_dir->i_sb); in hfsplus_link() local
311 mutex_lock(&sbi->vh_mutex); in hfsplus_link()
320 sbi->hidden_dir, &str); in hfsplus_link()
327 cnid = sbi->next_cnid++; in hfsplus_link()
334 sbi->file_count++; in hfsplus_link()
336 cnid = sbi->next_cnid++; in hfsplus_link()
346 sbi->file_count++; in hfsplus_link()
349 mutex_unlock(&sbi->vh_mutex); in hfsplus_link()
355 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); in hfsplus_unlink() local
365 mutex_lock(&sbi->vh_mutex); in hfsplus_unlink()
[all …]
Dinode.c184 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); in hfsplus_get_perms() local
191 inode->i_uid = sbi->uid; in hfsplus_get_perms()
195 inode->i_gid = sbi->gid; in hfsplus_get_perms()
198 mode = mode ? (mode & S_IALLUGO) : (S_IRWXUGO & ~(sbi->umask)); in hfsplus_get_perms()
201 mode = S_IFREG | ((S_IRUGO|S_IWUGO) & ~(sbi->umask)); in hfsplus_get_perms()
283 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); in hfsplus_file_fsync() local
300 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping); in hfsplus_file_fsync()
304 filemap_write_and_wait(sbi->ext_tree->inode->i_mapping); in hfsplus_file_fsync()
310 if (sbi->attr_tree) { in hfsplus_file_fsync()
313 sbi->attr_tree->inode->i_mapping); in hfsplus_file_fsync()
[all …]
Dpart_tbl.c73 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfs_parse_old_pmap() local
81 (sbi->part < 0 || sbi->part == i)) { in hfs_parse_old_pmap()
94 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfs_parse_new_pmap() local
102 (sbi->part < 0 || sbi->part == i)) { in hfs_parse_new_pmap()
Dioctl.c30 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); in hfsplus_ioctl_bless() local
31 struct hfsplus_vh *vh = sbi->s_vhdr; in hfsplus_ioctl_bless()
32 struct hfsplus_vh *bvh = sbi->s_backup_vhdr; in hfsplus_ioctl_bless()
38 mutex_lock(&sbi->vh_mutex); in hfsplus_ioctl_bless()
55 mutex_unlock(&sbi->vh_mutex); in hfsplus_ioctl_bless()
Dextents.c224 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_get_block() local
232 ablock = iblock >> sbi->fs_shift; in hfsplus_get_block()
274 mask = (1 << sbi->fs_shift) - 1; in hfsplus_get_block()
275 sector = ((sector_t)dblock << sbi->fs_shift) + in hfsplus_get_block()
276 sbi->blockoffset + (iblock & mask); in hfsplus_get_block()
431 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_file_extend() local
436 if (sbi->alloc_file->i_size * 8 < in hfsplus_file_extend()
437 sbi->total_blocks - sbi->free_blocks + 8) { in hfsplus_file_extend()
440 sbi->alloc_file->i_size * 8, in hfsplus_file_extend()
441 sbi->total_blocks, sbi->free_blocks); in hfsplus_file_extend()
[all …]
Dcatalog.c107 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); in hfsplus_cat_build_record() local
115 if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) in hfsplus_cat_build_record()
124 if (inode == sbi->hidden_dir) in hfsplus_cat_build_record()
150 cpu_to_be32(sbi->type); in hfsplus_cat_build_record()
152 cpu_to_be32(sbi->creator); in hfsplus_cat_build_record()
167 HFSPLUS_I(sbi->hidden_dir)->create_date; in hfsplus_cat_build_record()
224 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); in hfsplus_subfolders_inc() local
226 if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) { in hfsplus_subfolders_inc()
237 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); in hfsplus_subfolders_dec() local
239 if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) { in hfsplus_subfolders_dec()
Dxattr.c129 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_create_attributes_file() local
143 switch (atomic_read(&sbi->attr_tree_state)) { in hfsplus_create_attributes_file()
145 if (old_state != atomic_cmpxchg(&sbi->attr_tree_state, in hfsplus_create_attributes_file()
186 sbi->sect_count, in hfsplus_create_attributes_file()
190 hip->clump_blocks = clump_size >> sbi->alloc_blksz_shift; in hfsplus_create_attributes_file()
193 if (sbi->free_blocks <= (hip->clump_blocks << 1)) { in hfsplus_create_attributes_file()
205 (loff_t)hip->alloc_blocks << sbi->alloc_blksz_shift; in hfsplus_create_attributes_file()
206 hip->fs_blocks = hip->alloc_blocks << sbi->fs_shift; in hfsplus_create_attributes_file()
243 sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID); in hfsplus_create_attributes_file()
244 if (!sbi->attr_tree) in hfsplus_create_attributes_file()
[all …]
Dbnode.c663 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfs_bnode_need_zeroout() local
664 const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes); in hfs_bnode_need_zeroout()
Dhfsplus_fs.h496 int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi);
/linux-4.4.14/fs/autofs4/
Dinode.c25 struct autofs_info *autofs4_new_ino(struct autofs_sb_info *sbi) in autofs4_new_ino() argument
32 ino->sbi = sbi; in autofs4_new_ino()
51 struct autofs_sb_info *sbi = autofs4_sbi(sb); in autofs4_kill_sb() local
59 if (sbi) { in autofs4_kill_sb()
61 autofs4_catatonic_mode(sbi); in autofs4_kill_sb()
62 put_pid(sbi->oz_pgrp); in autofs4_kill_sb()
67 if (sbi) in autofs4_kill_sb()
68 kfree_rcu(sbi, rcu); in autofs4_kill_sb()
73 struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb); in autofs4_show_options() local
76 if (!sbi) in autofs4_show_options()
[all …]
Droot.c76 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); in autofs4_add_active() local
79 spin_lock(&sbi->lookup_lock); in autofs4_add_active()
82 list_add(&ino->active, &sbi->active_list); in autofs4_add_active()
85 spin_unlock(&sbi->lookup_lock); in autofs4_add_active()
92 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); in autofs4_del_active() local
95 spin_lock(&sbi->lookup_lock); in autofs4_del_active()
101 spin_unlock(&sbi->lookup_lock); in autofs4_del_active()
109 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); in autofs4_dir_open() local
113 if (autofs4_oz_mode(sbi)) in autofs4_dir_open()
125 spin_lock(&sbi->lookup_lock); in autofs4_dir_open()
[all …]
Dwaitq.c27 void autofs4_catatonic_mode(struct autofs_sb_info *sbi) in autofs4_catatonic_mode() argument
31 mutex_lock(&sbi->wq_mutex); in autofs4_catatonic_mode()
32 if (sbi->catatonic) { in autofs4_catatonic_mode()
33 mutex_unlock(&sbi->wq_mutex); in autofs4_catatonic_mode()
39 sbi->catatonic = 1; in autofs4_catatonic_mode()
40 wq = sbi->queues; in autofs4_catatonic_mode()
41 sbi->queues = NULL; /* Erase all wait queues */ in autofs4_catatonic_mode()
51 fput(sbi->pipe); /* Close the pipe */ in autofs4_catatonic_mode()
52 sbi->pipe = NULL; in autofs4_catatonic_mode()
53 sbi->pipefd = -1; in autofs4_catatonic_mode()
[all …]
Dexpire.c52 struct autofs_sb_info *sbi = autofs4_sbi(path.dentry->d_sb); in autofs4_mount_busy() local
55 if (autofs_type_indirect(sbi->type)) in autofs4_mount_busy()
79 struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb); in get_next_positive_subdir() local
83 spin_lock(&sbi->lookup_lock); in get_next_positive_subdir()
96 spin_unlock(&sbi->lookup_lock); in get_next_positive_subdir()
113 spin_unlock(&sbi->lookup_lock); in get_next_positive_subdir()
126 struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb); in get_next_positive_dentry() local
133 spin_lock(&sbi->lookup_lock); in get_next_positive_dentry()
145 spin_unlock(&sbi->lookup_lock); in get_next_positive_dentry()
176 spin_unlock(&sbi->lookup_lock); in get_next_positive_dentry()
[all …]
Ddev-ioctl.c165 struct autofs_sb_info *sbi = NULL; in autofs_dev_ioctl_sbi() local
170 sbi = autofs4_sbi(inode->i_sb); in autofs_dev_ioctl_sbi()
172 return sbi; in autofs_dev_ioctl_sbi()
177 struct autofs_sb_info *sbi, in autofs_dev_ioctl_protover() argument
180 param->protover.version = sbi->version; in autofs_dev_ioctl_protover()
186 struct autofs_sb_info *sbi, in autofs_dev_ioctl_protosubver() argument
189 param->protosubver.sub_version = sbi->sub_version; in autofs_dev_ioctl_protosubver()
228 return ino && ino->sbi->type & *(unsigned *)p; in test_by_type()
272 struct autofs_sb_info *sbi, in autofs_dev_ioctl_openmount() argument
302 struct autofs_sb_info *sbi, in autofs_dev_ioctl_closemount() argument
[all …]
Dautofs_i.h73 struct autofs_sb_info *sbi; member
146 static inline int autofs4_oz_mode(struct autofs_sb_info *sbi) { in autofs4_oz_mode() argument
147 return sbi->catatonic || task_pgrp(current) == sbi->oz_pgrp; in autofs4_oz_mode()
160 struct autofs_sb_info *sbi, int when);
165 struct autofs_sb_info *sbi, int how);
168 struct autofs_sb_info *sbi, int how);
231 static inline u32 autofs4_get_dev(struct autofs_sb_info *sbi) in autofs4_get_dev() argument
233 return new_encode_dev(sbi->sb->s_dev); in autofs4_get_dev()
236 static inline u64 autofs4_get_ino(struct autofs_sb_info *sbi) in autofs4_get_ino() argument
238 return d_inode(sbi->sb->s_root)->i_ino; in autofs4_get_ino()
[all …]
Dsymlink.c17 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); in autofs4_follow_link() local
19 if (ino && !autofs4_oz_mode(sbi)) in autofs4_follow_link()
/linux-4.4.14/fs/affs/
Dbitmap.c40 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_free_block() local
48 if (block > sbi->s_partition_size) in affs_free_block()
51 blk = block - sbi->s_reserved; in affs_free_block()
52 bmap = blk / sbi->s_bmap_bits; in affs_free_block()
53 bit = blk % sbi->s_bmap_bits; in affs_free_block()
54 bm = &sbi->s_bitmap[bmap]; in affs_free_block()
56 mutex_lock(&sbi->s_bmlock); in affs_free_block()
58 bh = sbi->s_bmap_bh; in affs_free_block()
59 if (sbi->s_last_bmap != bmap) { in affs_free_block()
64 sbi->s_bmap_bh = bh; in affs_free_block()
[all …]
Dsuper.c30 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_commit_super() local
31 struct buffer_head *bh = sbi->s_root_bh; in affs_commit_super()
47 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_put_super() local
50 cancel_delayed_work_sync(&sbi->sb_work); in affs_put_super()
62 struct affs_sb_info *sbi; in flush_superblock() local
65 sbi = container_of(work, struct affs_sb_info, sb_work.work); in flush_superblock()
66 sb = sbi->sb; in flush_superblock()
68 spin_lock(&sbi->work_lock); in flush_superblock()
69 sbi->work_queued = 0; in flush_superblock()
70 spin_unlock(&sbi->work_lock); in flush_superblock()
[all …]
Dsymlink.c34 struct affs_sb_info *sbi = AFFS_SB(inode->i_sb); in affs_symlink_readpage() local
36 spin_lock(&sbi->symlink_lock); in affs_symlink_readpage()
37 pf = sbi->s_prefix ? sbi->s_prefix : "/"; in affs_symlink_readpage()
40 spin_unlock(&sbi->symlink_lock); in affs_symlink_readpage()
Dinode.c18 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_iget() local
69 if (affs_test_opt(sbi->s_flags, SF_SETMODE)) in affs_iget()
70 inode->i_mode = sbi->s_mode; in affs_iget()
75 if (id == 0 || affs_test_opt(sbi->s_flags, SF_SETUID)) in affs_iget()
76 inode->i_uid = sbi->s_uid; in affs_iget()
77 else if (id == 0xFFFF && affs_test_opt(sbi->s_flags, SF_MUFS)) in affs_iget()
83 if (id == 0 || affs_test_opt(sbi->s_flags, SF_SETGID)) in affs_iget()
84 inode->i_gid = sbi->s_gid; in affs_iget()
85 else if (id == 0xFFFF && affs_test_opt(sbi->s_flags, SF_MUFS)) in affs_iget()
92 inode->i_uid = sbi->s_uid; in affs_iget()
[all …]
Dnamei.c359 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_symlink() local
362 spin_lock(&sbi->symlink_lock); in affs_symlink()
363 while (sbi->s_volume[i]) /* Cannot overflow */ in affs_symlink()
364 *p++ = sbi->s_volume[i++]; in affs_symlink()
365 spin_unlock(&sbi->symlink_lock); in affs_symlink()
/linux-4.4.14/fs/ext2/
Dsuper.c52 struct ext2_sb_info *sbi = EXT2_SB(sb); in ext2_error() local
53 struct ext2_super_block *es = sbi->s_es; in ext2_error()
56 spin_lock(&sbi->s_lock); in ext2_error()
57 sbi->s_mount_state |= EXT2_ERROR_FS; in ext2_error()
59 spin_unlock(&sbi->s_lock); in ext2_error()
130 struct ext2_sb_info *sbi = EXT2_SB(sb); in ext2_put_super() local
136 struct ext2_super_block *es = sbi->s_es; in ext2_put_super()
138 spin_lock(&sbi->s_lock); in ext2_put_super()
139 es->s_state = cpu_to_le16(sbi->s_mount_state); in ext2_put_super()
140 spin_unlock(&sbi->s_lock); in ext2_put_super()
[all …]
Dialloc.c261 struct ext2_sb_info *sbi = EXT2_SB(sb); in find_group_orlov() local
262 struct ext2_super_block *es = sbi->s_es; in find_group_orlov()
263 int ngroups = sbi->s_groups_count; in find_group_orlov()
275 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); in find_group_orlov()
277 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); in find_group_orlov()
279 ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); in find_group_orlov()
334 if (sbi->s_debts[group] >= max_debt) in find_group_orlov()
442 struct ext2_sb_info *sbi; in ext2_new_inode() local
451 sbi = EXT2_SB(sb); in ext2_new_inode()
452 es = sbi->s_es; in ext2_new_inode()
[all …]
Dballoc.c46 struct ext2_sb_info *sbi = EXT2_SB(sb); in ext2_get_group_desc() local
48 if (block_group >= sbi->s_groups_count) { in ext2_get_group_desc()
52 block_group, sbi->s_groups_count); in ext2_get_group_desc()
59 if (!sbi->s_group_desc[group_desc]) { in ext2_get_group_desc()
67 desc = (struct ext2_group_desc *) sbi->s_group_desc[group_desc]->b_data; in ext2_get_group_desc()
69 *bh = sbi->s_group_desc[group_desc]; in ext2_get_group_desc()
166 struct ext2_sb_info *sbi = EXT2_SB(sb); in group_adjust_blocks() local
169 spin_lock(sb_bgl_lock(sbi, group_no)); in group_adjust_blocks()
172 spin_unlock(sb_bgl_lock(sbi, group_no)); in group_adjust_blocks()
486 struct ext2_sb_info * sbi = EXT2_SB(sb); in ext2_free_blocks() local
[all …]
Dext2.h117 sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group) in sb_bgl_lock() argument
119 return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group); in sb_bgl_lock()
/linux-4.4.14/fs/ufs/
Dcylinder.c31 struct ufs_sb_info * sbi = UFS_SB(sb); in ufs_read_cylinder() local
38 uspi = sbi->s_uspi; in ufs_read_cylinder()
39 ucpi = sbi->s_ucpi[bitmap_nr]; in ufs_read_cylinder()
40 ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data; in ufs_read_cylinder()
47 UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno]; in ufs_read_cylinder()
51 sbi->s_cgno[bitmap_nr] = cgno; in ufs_read_cylinder()
73 brelse (sbi->s_ucg[j]); in ufs_read_cylinder()
74 sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; in ufs_read_cylinder()
84 struct ufs_sb_info * sbi = UFS_SB(sb); in ufs_put_cylinder() local
92 uspi = sbi->s_uspi; in ufs_put_cylinder()
[all …]
Dsuper.c470 struct ufs_sb_info *sbi = UFS_SB(sb); in ufs_setup_cstotal() local
471 struct ufs_sb_private_info *uspi = sbi->s_uspi; in ufs_setup_cstotal()
475 unsigned mtype = sbi->s_mount_opt & UFS_MOUNT_UFSTYPE; in ufs_setup_cstotal()
504 struct ufs_sb_info *sbi = UFS_SB(sb); in ufs_read_cylinder_structures() local
505 struct ufs_sb_private_info *uspi = sbi->s_uspi; in ufs_read_cylinder_structures()
521 sbi->s_csp = (struct ufs_csum *)space; in ufs_read_cylinder_structures()
543 if (!(sbi->s_ucg = kmalloc (sizeof(struct buffer_head *) * uspi->s_ncg, GFP_NOFS))) in ufs_read_cylinder_structures()
546 sbi->s_ucg[i] = NULL; in ufs_read_cylinder_structures()
548 sbi->s_ucpi[i] = NULL; in ufs_read_cylinder_structures()
549 sbi->s_cgno[i] = UFS_CGNO_EMPTY; in ufs_read_cylinder_structures()
[all …]
Dialloc.c174 struct ufs_sb_info * sbi; in ufs_new_inode() local
193 sbi = UFS_SB(sb); in ufs_new_inode()
194 uspi = sbi->s_uspi; in ufs_new_inode()
196 mutex_lock(&sbi->s_lock); in ufs_new_inode()
202 if (sbi->fs_cs(i).cs_nifree) { in ufs_new_inode()
214 if (sbi->fs_cs(i).cs_nifree) { in ufs_new_inode()
228 if (sbi->fs_cs(i).cs_nifree) { in ufs_new_inode()
276 fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1); in ufs_new_inode()
281 fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1); in ufs_new_inode()
334 mutex_unlock(&sbi->s_lock); in ufs_new_inode()
[all …]
/linux-4.4.14/drivers/staging/lustre/lustre/llite/
Dlproc_llite.c54 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, in blocksize_show() local
59 rc = ll_statfs_internal(sbi->ll_sb, &osfs, in blocksize_show()
72 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, in kbytestotal_show() local
77 rc = ll_statfs_internal(sbi->ll_sb, &osfs, in kbytestotal_show()
97 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, in kbytesfree_show() local
102 rc = ll_statfs_internal(sbi->ll_sb, &osfs, in kbytesfree_show()
122 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, in kbytesavail_show() local
127 rc = ll_statfs_internal(sbi->ll_sb, &osfs, in kbytesavail_show()
147 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, in filestotal_show() local
152 rc = ll_statfs_internal(sbi->ll_sb, &osfs, in filestotal_show()
[all …]
Dllite_lib.c69 struct ll_sb_info *sbi = NULL; in ll_init_sbi() local
76 sbi = kzalloc(sizeof(*sbi), GFP_NOFS); in ll_init_sbi()
77 if (!sbi) in ll_init_sbi()
80 spin_lock_init(&sbi->ll_lock); in ll_init_sbi()
81 mutex_init(&sbi->ll_lco.lco_lock); in ll_init_sbi()
82 spin_lock_init(&sbi->ll_pp_extent_lock); in ll_init_sbi()
83 spin_lock_init(&sbi->ll_process_lock); in ll_init_sbi()
84 sbi->ll_rw_stats_on = 0; in ll_init_sbi()
94 atomic_set(&sbi->ll_cache.ccc_users, 0); in ll_init_sbi()
95 sbi->ll_cache.ccc_lru_max = lru_page_max; in ll_init_sbi()
[all …]
Dvvp_dev.c196 struct ll_sb_info *sbi; in cl_sb_init() local
202 sbi = ll_s2sbi(sb); in cl_sb_init()
206 sbi->ll_dt_exp->exp_obd->obd_lu_dev); in cl_sb_init()
209 sbi->ll_cl = cl; in cl_sb_init()
210 sbi->ll_site = cl2lu_dev(cl)->ld_site; in cl_sb_init()
220 struct ll_sb_info *sbi; in cl_sb_fini() local
226 sbi = ll_s2sbi(sb); in cl_sb_fini()
229 cld = sbi->ll_cl; in cl_sb_fini()
233 sbi->ll_cl = NULL; in cl_sb_fini()
234 sbi->ll_site = NULL; in cl_sb_fini()
[all …]
Dxattr.c89 int xattr_type_filter(struct ll_sb_info *sbi, int xattr_type) in xattr_type_filter() argument
93 !(sbi->ll_flags & LL_SBI_ACL)) in xattr_type_filter()
96 if (xattr_type == XATTR_USER_T && !(sbi->ll_flags & LL_SBI_USER_XATTR)) in xattr_type_filter()
111 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_setxattr_common() local
122 rc = xattr_type_filter(sbi, xattr_type); in ll_setxattr_common()
147 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && in ll_setxattr_common()
150 rce = rct_search(&sbi->ll_rct, current_pid()); in ll_setxattr_common()
159 ee = et_search_del(&sbi->ll_et, current_pid(), in ll_setxattr_common()
191 rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), in ll_setxattr_common()
203 sbi->ll_flags &= ~LL_SBI_USER_XATTR; in ll_setxattr_common()
[all …]
Ddir.c480 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_dir_read() local
482 int api32 = ll_need_32bit_api(sbi); in ll_dir_read()
483 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH; in ll_dir_read()
597 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_readdir() local
598 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH; in ll_readdir()
599 int api32 = ll_need_32bit_api(sbi); in ll_readdir()
630 ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1); in ll_readdir()
659 struct ll_sb_info *sbi = ll_i2sbi(dir); in ll_dir_setdirstripe() local
673 err = md_create(sbi->ll_md_exp, op_data, lump, sizeof(*lump), mode, in ll_dir_setdirstripe()
688 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_dir_setstripe() local
[all …]
Dllite_nfs.c88 struct ll_sb_info *sbi = ll_s2sbi(sb); in search_inode_for_lustre() local
93 ll_need_32bit_api(sbi)); in search_inode_for_lustre()
103 rc = ll_get_default_mdsize(sbi, &eadatalen); in search_inode_for_lustre()
118 rc = md_getattr(sbi->ll_md_exp, op_data, &req); in search_inode_for_lustre()
283 struct ll_sb_info *sbi; in ll_get_parent() local
293 sbi = ll_s2sbi(dir->i_sb); in ll_get_parent()
298 rc = ll_get_default_mdsize(sbi, &lmmsize); in ll_get_parent()
308 rc = md_getattr_name(sbi->ll_md_exp, op_data, &req); in ll_get_parent()
Ddcache.c123 struct ll_sb_info *sbi = ll_i2sbi(inode); in find_cbdata() local
128 rc = md_find_cbdata(sbi->ll_md_exp, ll_inode2fid(inode), in find_cbdata()
137 rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL); in find_cbdata()
292 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_lookup_finish_locks() local
296 ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL); in ll_lookup_finish_locks()
Drw.c280 static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
301 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, in ll_ra_count_get() argument
305 struct ll_ra_info *ra = &sbi->ll_ra_info; in ll_ra_count_get()
344 void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len) in ll_ra_count_put() argument
346 struct ll_ra_info *ra = &sbi->ll_ra_info; in ll_ra_count_put()
351 static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which) in ll_ra_stats_inc_sbi() argument
354 lprocfs_counter_incr(sbi->ll_ra_stats, which); in ll_ra_stats_inc_sbi()
359 struct ll_sb_info *sbi = ll_i2sbi(mapping->host); in ll_ra_stats_inc() local
361 ll_ra_stats_inc_sbi(sbi, which); in ll_ra_stats_inc()
946 void ras_update(struct ll_sb_info *sbi, struct inode *inode, in ras_update() argument
[all …]
Dfile.c335 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_file_release() local
343 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) { in ll_file_release()
349 rct_del(&sbi->ll_rct, current_pid()); in ll_file_release()
350 et_search_free(&sbi->ll_et, current_pid()); in ll_file_release()
356 ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1); in ll_file_release()
378 rc = ll_md_close(sbi->ll_md_exp, inode, file); in ll_file_release()
390 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_intent_file_open() local
421 rc = md_intent_lock(sbi->ll_md_exp, op_data, lmm, lmmsize, itp, in ll_intent_file_open()
448 ll_set_lock_data(sbi->ll_md_exp, inode, itp, NULL); in ll_intent_file_open()
744 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_lease_open() local
[all …]
Dxattr_cache.c279 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_xattr_find_get_lock() local
280 struct obd_export *exp = sbi->ll_md_exp; in ll_xattr_find_get_lock()
340 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_xattr_cache_refill() local
354 ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1); in ll_xattr_cache_refill()
434 ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL); in ll_xattr_cache_refill()
Dllite_internal.h647 static inline int ll_need_32bit_api(struct ll_sb_info *sbi) in ll_need_32bit_api() argument
652 return unlikely(is_compat_task() || (sbi->ll_flags & LL_SBI_32BIT_API)); in ll_need_32bit_api()
654 return unlikely(sbi->ll_flags & LL_SBI_32BIT_API); in ll_need_32bit_api()
664 void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi);
665 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count);
667 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
716 int ll_glimpse_ioctl(struct ll_sb_info *sbi,
788 int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize);
789 int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize);
994 static inline struct client_obd *sbi2mdc(struct ll_sb_info *sbi) in sbi2mdc() argument
[all …]
Dsymlink.c49 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_readlink_internal() local
75 rc = md_getattr(sbi->ll_md_exp, op_data, request); in ll_readlink_internal()
Dnamei.c681 struct ll_sb_info *sbi = ll_i2sbi(dir); in ll_create_node() local
702 ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL); in ll_create_node()
769 struct ll_sb_info *sbi = ll_i2sbi(dir); in ll_new_node() local
785 err = md_create(sbi->ll_md_exp, op_data, tgt, tgt_len, mode, in ll_new_node()
1046 struct ll_sb_info *sbi = ll_i2sbi(dir); in ll_link() local
1062 err = md_link(sbi->ll_md_exp, op_data, &request); in ll_link()
1068 ll_stats_ops_tally(sbi, LPROC_LL_LINK, 1); in ll_link()
1078 struct ll_sb_info *sbi = ll_i2sbi(old_dir); in ll_rename() local
1094 err = md_rename(sbi->ll_md_exp, op_data, in ll_rename()
1103 ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1); in ll_rename()
Dvvp_io.c819 struct ll_sb_info *sbi = ll_i2sbi(inode); in vvp_io_read_page() local
829 if (sbi->ll_ra_info.ra_max_pages_per_file && in vvp_io_read_page()
830 sbi->ll_ra_info.ra_max_pages) in vvp_io_read_page()
831 ras_update(sbi, inode, ras, page->cp_index, in vvp_io_read_page()
853 if (sbi->ll_ra_info.ra_max_pages_per_file && in vvp_io_read_page()
854 sbi->ll_ra_info.ra_max_pages) in vvp_io_read_page()
975 struct ll_sb_info *sbi = ll_i2sbi(inode); in vvp_io_commit_write() local
1054 ll_stats_ops_tally(sbi, tallyop, 1); in vvp_io_commit_write()
Dremote_perm.c246 struct ll_sb_info *sbi = ll_i2sbi(inode); in lustre_check_remote_perm() local
275 rc = md_get_remote_perm(sbi->ll_md_exp, ll_inode2fid(inode), in lustre_check_remote_perm()
Dstatahead.c954 struct ll_sb_info *sbi = ll_i2sbi(dir); in ll_agl_thread() local
963 atomic_inc(&sbi->ll_agl_total); in ll_agl_thread()
1045 struct ll_sb_info *sbi = ll_i2sbi(dir); in ll_statahead_thread() local
1060 if (sbi->ll_flags & LL_SBI_AGL_ENABLED) in ll_statahead_thread()
1063 atomic_inc(&sbi->ll_sa_total); in ll_statahead_thread()
1462 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode); in ll_sai_unplug() local
1474 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max); in ll_sai_unplug()
1481 atomic_inc(&sbi->ll_sa_wrong); in ll_sai_unplug()
/linux-4.4.14/fs/omfs/
Dinode.c24 struct omfs_sb_info *sbi = OMFS_SB(sb); in omfs_bread() local
25 if (block >= sbi->s_num_blocks) in omfs_bread()
28 return sb_bread(sb, clus_to_blk(sbi, block)); in omfs_bread()
37 struct omfs_sb_info *sbi = OMFS_SB(dir->i_sb); in omfs_new_inode() local
43 err = omfs_allocate_range(dir->i_sb, sbi->s_mirrors, sbi->s_mirrors, in omfs_new_inode()
57 inode->i_size = sbi->s_sys_blocksize; in omfs_new_inode()
103 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); in __omfs_write_inode() local
128 oi->i_head.h_body_size = cpu_to_be32(sbi->s_sys_blocksize - in __omfs_write_inode()
149 for (i = 1; i < sbi->s_mirrors; i++) { in __omfs_write_inode()
202 struct omfs_sb_info *sbi = OMFS_SB(sb); in omfs_iget() local
[all …]
Dbitmap.c11 struct omfs_sb_info *sbi = OMFS_SB(sb); in omfs_count_free() local
14 for (i = 0; i < sbi->s_imap_size; i++) in omfs_count_free()
15 sum += nbits - bitmap_weight(sbi->s_imap[i], nbits); in omfs_count_free()
53 struct omfs_sb_info *sbi = OMFS_SB(sb); in set_run() local
56 bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); in set_run()
68 clus_to_blk(sbi, sbi->s_bitmap_ino) + map); in set_run()
73 set_bit(bit, sbi->s_imap[map]); in set_run()
76 clear_bit(bit, sbi->s_imap[map]); in set_run()
93 struct omfs_sb_info *sbi = OMFS_SB(sb); in omfs_allocate_block() local
103 mutex_lock(&sbi->s_bitmap_lock); in omfs_allocate_block()
[all …]
Dfile.c13 static u32 omfs_max_extents(struct omfs_sb_info *sbi, int offset) in omfs_max_extents() argument
15 return (sbi->s_sys_blocksize - offset - in omfs_max_extents()
33 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); in omfs_shrink_inode() local
57 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START); in omfs_shrink_inode()
61 if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next)) in omfs_shrink_inode()
87 omfs_clear_range(inode->i_sb, last, sbi->s_mirrors); in omfs_shrink_inode()
96 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT); in omfs_shrink_inode()
121 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); in omfs_grow_extent() local
155 max_count = omfs_max_extents(sbi, OMFS_EXTENT_START); in omfs_grow_extent()
162 ret = omfs_allocate_range(inode->i_sb, 1, sbi->s_clustersize, in omfs_grow_extent()
[all …]
Domfs.h29 static inline sector_t clus_to_blk(struct omfs_sb_info *sbi, sector_t block) in clus_to_blk() argument
31 return block << sbi->s_block_shift; in clus_to_blk()
50 extern int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header,
Ddir.c86 struct omfs_sb_info *sbi = OMFS_SB(sb); in omfs_make_empty() local
98 sbi->s_sys_blocksize - OMFS_DIR_START); in omfs_make_empty()
316 int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header, in omfs_is_bad() argument
321 is_bad = ((ino != fsblock) || (ino < sbi->s_root_ino) || in omfs_is_bad()
322 (ino > sbi->s_num_blocks)); in omfs_is_bad()
/linux-4.4.14/fs/hpfs/
Dsuper.c112 static void free_sbi(struct hpfs_sb_info *sbi) in free_sbi() argument
114 kfree(sbi->sb_cp_table); in free_sbi()
115 kfree(sbi->sb_bmp_dir); in free_sbi()
116 kfree(sbi); in free_sbi()
166 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_get_free_dnodes() local
167 if (sbi->sb_n_free_dnodes == (unsigned)-1) { in hpfs_get_free_dnodes()
168 unsigned c = hpfs_count_one_bitmap(s, sbi->sb_dmap); in hpfs_get_free_dnodes()
171 sbi->sb_n_free_dnodes = c; in hpfs_get_free_dnodes()
173 return sbi->sb_n_free_dnodes; in hpfs_get_free_dnodes()
179 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_statfs() local
[all …]
Dalloc.c13 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_claim_alloc() local
14 if (sbi->sb_n_free != (unsigned)-1) { in hpfs_claim_alloc()
15 if (unlikely(!sbi->sb_n_free)) { in hpfs_claim_alloc()
17 sbi->sb_n_free = -1; in hpfs_claim_alloc()
20 sbi->sb_n_free--; in hpfs_claim_alloc()
26 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_claim_free() local
27 if (sbi->sb_n_free != (unsigned)-1) { in hpfs_claim_free()
28 if (unlikely(sbi->sb_n_free >= sbi->sb_fs_size)) { in hpfs_claim_free()
30 sbi->sb_n_free = -1; in hpfs_claim_free()
33 sbi->sb_n_free++; in hpfs_claim_free()
[all …]
Dbuffer.c16 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_search_hotfix_map() local
17 for (i = 0; unlikely(i < sbi->n_hotfixes); i++) { in hpfs_search_hotfix_map()
18 if (sbi->hotfix_from[i] == sec) { in hpfs_search_hotfix_map()
19 return sbi->hotfix_to[i]; in hpfs_search_hotfix_map()
28 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_search_hotfix_map_for_range() local
29 for (i = 0; unlikely(i < sbi->n_hotfixes); i++) { in hpfs_search_hotfix_map_for_range()
30 if (sbi->hotfix_from[i] >= sec && sbi->hotfix_from[i] < sec + n) { in hpfs_search_hotfix_map_for_range()
31 n = sbi->hotfix_from[i] - sec; in hpfs_search_hotfix_map_for_range()
Dhpfs_fn.h360 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_lock() local
361 mutex_lock(&sbi->hpfs_mutex); in hpfs_lock()
366 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_unlock() local
367 mutex_unlock(&sbi->hpfs_mutex); in hpfs_unlock()
372 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_lock_assert() local
373 WARN_ON(!mutex_is_locked(&sbi->hpfs_mutex)); in hpfs_lock_assert()
/linux-4.4.14/fs/minix/
Dinode.c43 struct minix_sb_info *sbi = minix_sb(sb); in minix_put_super() local
46 if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ in minix_put_super()
47 sbi->s_ms->s_state = sbi->s_mount_state; in minix_put_super()
48 mark_buffer_dirty(sbi->s_sbh); in minix_put_super()
50 for (i = 0; i < sbi->s_imap_blocks; i++) in minix_put_super()
51 brelse(sbi->s_imap[i]); in minix_put_super()
52 for (i = 0; i < sbi->s_zmap_blocks; i++) in minix_put_super()
53 brelse(sbi->s_zmap[i]); in minix_put_super()
54 brelse (sbi->s_sbh); in minix_put_super()
55 kfree(sbi->s_imap); in minix_put_super()
[all …]
Dbitmap.c44 struct minix_sb_info *sbi = minix_sb(sb); in minix_free_block() local
49 if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { in minix_free_block()
53 zone = block - sbi->s_firstdatazone + 1; in minix_free_block()
56 if (zone >= sbi->s_zmap_blocks) { in minix_free_block()
60 bh = sbi->s_zmap[zone]; in minix_free_block()
72 struct minix_sb_info *sbi = minix_sb(inode->i_sb); in minix_new_block() local
76 for (i = 0; i < sbi->s_zmap_blocks; i++) { in minix_new_block()
77 struct buffer_head *bh = sbi->s_zmap[i]; in minix_new_block()
86 j += i * bits_per_zone + sbi->s_firstdatazone-1; in minix_new_block()
87 if (j < sbi->s_firstdatazone || j >= sbi->s_nzones) in minix_new_block()
[all …]
Ddir.c75 static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi) in minix_next_entry() argument
77 return (void*)((char*)de + sbi->s_dirsize); in minix_next_entry()
84 struct minix_sb_info *sbi = minix_sb(sb); in minix_readdir() local
85 unsigned chunk_size = sbi->s_dirsize; in minix_readdir()
107 for ( ; p <= limit; p = minix_next_entry(p, sbi)) { in minix_readdir()
110 if (sbi->s_version == MINIX_V3) { in minix_readdir()
120 unsigned l = strnlen(name, sbi->s_namelen); in minix_readdir()
156 struct minix_sb_info * sbi = minix_sb(sb); in minix_find_entry() local
174 limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize; in minix_find_entry()
175 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { in minix_find_entry()
[all …]
/linux-4.4.14/fs/jfs/
Djfs_mount.c84 struct jfs_sb_info *sbi = JFS_SBI(sb); in jfs_mount() local
104 sbi->ipaimap = ipaimap; in jfs_mount()
127 sbi->ipbmap = ipbmap; in jfs_mount()
148 if ((sbi->mntflag & JFS_BAD_SAIT) == 0) { in jfs_mount()
155 sbi->ipaimap2 = ipaimap2; in jfs_mount()
169 sbi->ipaimap2 = NULL; in jfs_mount()
187 sbi->ipimap = ipimap; in jfs_mount()
241 struct jfs_sb_info *sbi = JFS_SBI(sb); in jfs_mount_rw() local
250 if (chkSuper(sb) || (sbi->state != FM_CLEAN)) in jfs_mount_rw()
253 truncate_inode_pages(sbi->ipimap->i_mapping, 0); in jfs_mount_rw()
[all …]
Djfs_umount.c52 struct jfs_sb_info *sbi = JFS_SBI(sb); in jfs_umount() local
53 struct inode *ipbmap = sbi->ipbmap; in jfs_umount()
54 struct inode *ipimap = sbi->ipimap; in jfs_umount()
55 struct inode *ipaimap = sbi->ipaimap; in jfs_umount()
56 struct inode *ipaimap2 = sbi->ipaimap2; in jfs_umount()
67 if ((log = sbi->log)) in jfs_umount()
79 sbi->ipimap = NULL; in jfs_umount()
84 ipaimap2 = sbi->ipaimap2; in jfs_umount()
88 sbi->ipaimap2 = NULL; in jfs_umount()
94 ipaimap = sbi->ipaimap; in jfs_umount()
[all …]
Dsuper.c76 struct jfs_sb_info *sbi = JFS_SBI(sb); in jfs_handle_error() local
83 if (sbi->flag & JFS_ERR_PANIC) in jfs_handle_error()
86 else if (sbi->flag & JFS_ERR_REMOUNT_RO) { in jfs_handle_error()
151 struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb); in jfs_statfs() local
153 struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap; in jfs_statfs()
157 buf->f_bsize = sbi->bsize; in jfs_statfs()
158 buf->f_blocks = sbi->bmap->db_mapsize; in jfs_statfs()
159 buf->f_bfree = sbi->bmap->db_nfree; in jfs_statfs()
160 buf->f_bavail = sbi->bmap->db_nfree; in jfs_statfs()
171 ((sbi->bmap->db_nfree >> imap->im_l2nbperiext) in jfs_statfs()
[all …]
Dresize.c65 struct jfs_sb_info *sbi = JFS_SBI(sb); in jfs_extendfs() local
66 struct inode *ipbmap = sbi->ipbmap; in jfs_extendfs()
68 struct inode *ipimap = sbi->ipimap; in jfs_extendfs()
69 struct jfs_log *log = sbi->log; in jfs_extendfs()
70 struct bmap *bmp = sbi->bmap; in jfs_extendfs()
89 if (sbi->mntflag & JFS_INLINELOG) in jfs_extendfs()
90 oldLVSize = addressPXD(&sbi->logpxd) + lengthPXD(&sbi->logpxd); in jfs_extendfs()
92 oldLVSize = addressPXD(&sbi->fsckpxd) + in jfs_extendfs()
93 lengthPXD(&sbi->fsckpxd); in jfs_extendfs()
138 if ((sbi->mntflag & JFS_INLINELOG)) { in jfs_extendfs()
[all …]
Djfs_imap.c306 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); in diRead() local
321 ipimap = sbi->ipimap; in diRead()
352 blkno = INOPBLK(&iagp->inoext[extno], ino, sbi->l2nbperpage); in diRead()
360 pageno = blkno >> sbi->l2nbperpage; in diRead()
362 if ((block_offset = ((u32) blkno & (sbi->nbperpage - 1)))) { in diRead()
367 (sbi->nbperpage - block_offset) << sbi->l2niperblk; in diRead()
370 rel_inode += block_offset << sbi->l2niperblk; in diRead()
378 mp = read_metapage(ipimap, pageno << sbi->l2nbperpage, PSIZE, 1); in diRead()
431 struct jfs_sb_info *sbi = JFS_SBI(sb); in diReadSpecial() local
444 address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage; in diReadSpecial()
[all …]
Djfs_logmgr.c180 static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi,
217 struct jfs_sb_info *sbi; in write_special_inodes() local
219 list_for_each_entry(sbi, &log->sb_list, log_list) { in write_special_inodes()
220 writer(sbi->ipbmap->i_mapping); in write_special_inodes()
221 writer(sbi->ipimap->i_mapping); in write_special_inodes()
222 writer(sbi->direct_inode->i_mapping); in write_special_inodes()
1084 struct jfs_sb_info *sbi = JFS_SBI(sb); in lmLogOpen() local
1086 if (sbi->flag & JFS_NOINTEGRITY) in lmLogOpen()
1089 if (sbi->mntflag & JFS_INLINELOG) in lmLogOpen()
1094 if (log->bdev->bd_dev == sbi->logdev) { in lmLogOpen()
[all …]
Djfs_extent.c88 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); in extAlloc() local
104 xoff = pno << sbi->l2nbperpage; in extAlloc()
512 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); in extBalloc() local
514 int rc, nbperpage = sbi->nbperpage; in extBalloc()
515 struct bmap *bmp = sbi->bmap; in extBalloc()
550 ag = BLKTOAG(daddr, sbi); in extBalloc()
Dxattr.c221 struct jfs_sb_info *sbi = JFS_SBI(sb); in ea_write() local
261 for (i = 0; i < nblocks; i += sbi->nbperpage) { in ea_write()
370 struct jfs_sb_info *sbi = JFS_SBI(sb); in ea_read() local
393 nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage; in ea_read()
394 blkno = addressDXD(&ji->ea) << sbi->l2nbperpage; in ea_read()
401 for (i = 0; i < nblocks; i += sbi->nbperpage) { in ea_read()
Djfs_dmap.h134 #define BLKTOAG(b,sbi) ((b) >> ((sbi)->bmap->db_agl2size)) argument
Djfs_dtree.c337 struct jfs_sb_info *sbi = JFS_SBI(sb); in add_index() local
385 if (dquot_alloc_block(ip, sbi->nbperpage)) in add_index()
387 if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) { in add_index()
388 dquot_free_block(ip, sbi->nbperpage); in add_index()
406 if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) { in add_index()
411 dbFree(ip, xaddr, sbi->nbperpage); in add_index()
412 dquot_free_block(ip, sbi->nbperpage); in add_index()
447 blkno = ((offset + 1) >> L2PSIZE) << sbi->l2nbperpage; in add_index()
453 if (xtInsert(tid, ip, 0, blkno, sbi->nbperpage, &xaddr, 0)) { in add_index()
933 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); in dtSplitUp() local
[all …]
Djfs_dmap.c1028 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); in dbExtend() local
1034 struct inode *ipbmap = sbi->ipbmap; in dbExtend()
1040 if (((rel_block = blkno & (sbi->nbperpage - 1))) && in dbExtend()
1041 (rel_block + nblocks + addnblocks > sbi->nbperpage)) in dbExtend()
1055 bmp = sbi->bmap; in dbExtend()
3386 struct jfs_sb_info *sbi = JFS_SBI(ipbmap->i_sb); in dbExtendFS() local
3387 int nbperpage = sbi->nbperpage; in dbExtendFS()
3395 struct bmap *bmp = sbi->bmap; in dbExtendFS()
3478 p = BLKTOL1(blkno, sbi->l2nbperpage); /* L1 page */ in dbExtendFS()
3495 p = BLKTOL0(blkno, sbi->l2nbperpage); in dbExtendFS()
[all …]
Djfs_txnmgr.c2752 struct jfs_sb_info *sbi; in jfs_lazycommit() local
2762 sbi = JFS_SBI(tblk->sb); in jfs_lazycommit()
2769 if (sbi->commit_state & IN_LAZYCOMMIT) in jfs_lazycommit()
2772 sbi->commit_state |= IN_LAZYCOMMIT; in jfs_lazycommit()
2784 sbi->commit_state &= ~IN_LAZYCOMMIT; in jfs_lazycommit()
/linux-4.4.14/fs/exofs/
Dsuper.c266 static int __sbi_read_stats(struct exofs_sb_info *sbi) in __sbi_read_stats() argument
274 ret = ore_get_io_state(&sbi->layout, &sbi->oc, &ios); in __sbi_read_stats()
305 sbi->s_nextid = le64_to_cpu(ess->s_nextid); in __sbi_read_stats()
306 sbi->s_numfiles = le32_to_cpu(ess->s_numfiles); in __sbi_read_stats()
321 int exofs_sbi_write_stats(struct exofs_sb_info *sbi) in exofs_sbi_write_stats() argument
329 ret = ore_get_io_state(&sbi->layout, &sbi->oc, &ios); in exofs_sbi_write_stats()
335 sbi->s_ess.s_nextid = cpu_to_le64(sbi->s_nextid); in exofs_sbi_write_stats()
336 sbi->s_ess.s_numfiles = cpu_to_le64(sbi->s_numfiles); in exofs_sbi_write_stats()
337 attrs[0].val_ptr = &sbi->s_ess; in exofs_sbi_write_stats()
341 ios->private = sbi; in exofs_sbi_write_stats()
[all …]
Dexofs.h181 int exofs_sbi_write_stats(struct exofs_sb_info *sbi);
186 int exofs_sysfs_sb_add(struct exofs_sb_info *sbi,
188 void exofs_sysfs_sb_del(struct exofs_sb_info *sbi);
190 struct exofs_sb_info *sbi);
222 struct exofs_sb_info *sbi, osd_id oid) in exofs_init_comps() argument
226 one_comp->obj.partition = sbi->one_comp.obj.partition; in exofs_init_comps()
231 oc->numdevs = sbi->layout.group_width * sbi->layout.mirrors_p1 * in exofs_init_comps()
232 sbi->layout.group_count; in exofs_init_comps()
237 first_dev = (dev_mod * sbi->layout.mirrors_p1) % sbi->oc.numdevs; in exofs_init_comps()
238 oc->ods = &sbi->oc.ods[first_dev]; in exofs_init_comps()
Dinode.c50 struct exofs_sb_info *sbi; member
69 struct exofs_sb_info *sbi = inode->i_sb->s_fs_info; in _pcol_init() local
71 pcol->sbi = sbi; in _pcol_init()
102 exofs_max_io_pages(&pcol->sbi->layout, ~0); in _pcol_reset()
110 pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages); in pcol_try_alloc()
248 atomic_dec(&pcol->sbi->s_curr_pending); in readpages_done()
319 int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true, in read_exec()
359 atomic_inc(&pcol->sbi->s_curr_pending); in read_exec()
528 atomic_dec(&pcol->sbi->s_curr_pending); in writepages_done()
635 ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false, in write_exec()
[all …]
Dsys.c134 void exofs_sysfs_sb_del(struct exofs_sb_info *sbi) in exofs_sysfs_sb_del() argument
137 struct kobject *s_kobj = &sbi->s_kobj; in exofs_sysfs_sb_del()
152 int exofs_sysfs_sb_add(struct exofs_sb_info *sbi, in exofs_sysfs_sb_add() argument
157 uint64_t pid = sbi->one_comp.obj.partition; in exofs_sysfs_sb_add()
160 s_kobj = &sbi->s_kobj; in exofs_sysfs_sb_add()
172 int exofs_sysfs_odev_add(struct exofs_dev *edev, struct exofs_sb_info *sbi) in exofs_sysfs_odev_add() argument
183 &sbi->s_kobj, "dev%u", edev->did); in exofs_sysfs_odev_add()
Ddir.c433 struct exofs_sb_info *sbi = inode->i_sb->s_fs_info; in exofs_add_link() local
508 sbi->s_numfiles++; in exofs_add_link()
523 struct exofs_sb_info *sbi = inode->i_sb->s_fs_info; in exofs_delete_entry() local
559 sbi->s_numfiles--; in exofs_delete_entry()
/linux-4.4.14/fs/fat/
Dinode.c116 struct msdos_sb_info *sbi = MSDOS_SB(sb); in __fat_get_block() local
138 offset = (unsigned long)iblock & (sbi->sec_per_clus - 1); in __fat_get_block()
146 mapped_blocks = sbi->sec_per_clus - offset; in __fat_get_block()
337 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_hash_init() local
340 spin_lock_init(&sbi->inode_hash_lock); in fat_hash_init()
342 INIT_HLIST_HEAD(&sbi->inode_hashtable[i]); in fat_hash_init()
352 struct msdos_sb_info *sbi = MSDOS_SB(sb); in dir_hash_init() local
355 spin_lock_init(&sbi->dir_hash_lock); in dir_hash_init()
357 INIT_HLIST_HEAD(&sbi->dir_hashtable[i]); in dir_hash_init()
362 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); in fat_attach() local
[all …]
Dfatent.c24 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat12_ent_blocknr() local
26 WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); in fat12_ent_blocknr()
28 *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); in fat12_ent_blocknr()
34 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_ent_blocknr() local
35 int bytes = (entry << sbi->fatent_shift); in fat_ent_blocknr()
36 WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); in fat_ent_blocknr()
38 *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); in fat_ent_blocknr()
276 static inline void lock_fat(struct msdos_sb_info *sbi) in lock_fat() argument
278 mutex_lock(&sbi->fat_lock); in lock_fat()
281 static inline void unlock_fat(struct msdos_sb_info *sbi) in unlock_fat() argument
[all …]
Dmisc.c62 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_clusters_flush() local
66 if (sbi->fat_bits != 32) in fat_clusters_flush()
69 bh = sb_bread(sb, sbi->fsinfo_sector); in fat_clusters_flush()
82 sbi->fsinfo_sector); in fat_clusters_flush()
84 if (sbi->free_clusters != -1) in fat_clusters_flush()
85 fsinfo->free_clusters = cpu_to_le32(sbi->free_clusters); in fat_clusters_flush()
86 if (sbi->prev_free != -1) in fat_clusters_flush()
87 fsinfo->next_cluster = cpu_to_le32(sbi->prev_free); in fat_clusters_flush()
102 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_chain_add() local
152 if (new_fclus != (inode->i_blocks >> (sbi->cluster_bits - 9))) { in fat_chain_add()
[all …]
Dfile.c33 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); in fat_ioctl_set_attributes() local
64 ia.ia_mode = fat_make_mode(sbi, attr, S_IRWXUGO); in fat_ioctl_set_attributes()
66 ia.ia_mode = fat_make_mode(sbi, attr, in fat_ioctl_set_attributes()
76 if (sbi->options.sys_immutable && in fat_ioctl_set_attributes()
98 if (sbi->options.sys_immutable) { in fat_ioctl_set_attributes()
116 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); in fat_ioctl_get_volume_id() local
117 return put_user(sbi->vol_id, user_attr); in fat_ioctl_get_volume_id()
290 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); in fat_truncate_blocks() local
291 const unsigned int cluster_size = sbi->cluster_size; in fat_truncate_blocks()
301 nr_clusters = (offset + (cluster_size - 1)) >> sbi->cluster_bits; in fat_truncate_blocks()
[all …]
Dfat.h158 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); in fat_mode_can_hold_ro() local
162 if (!sbi->options.rodir) in fat_mode_can_hold_ro()
164 mask = ~sbi->options.fs_dmask; in fat_mode_can_hold_ro()
166 mask = ~sbi->options.fs_fmask; in fat_mode_can_hold_ro()
174 static inline umode_t fat_make_mode(struct msdos_sb_info *sbi, in fat_make_mode() argument
177 if (attrs & ATTR_RO && !((attrs & ATTR_DIR) && !sbi->options.rodir)) in fat_make_mode()
181 return (mode & ~sbi->options.fs_dmask) | S_IFDIR; in fat_make_mode()
183 return (mode & ~sbi->options.fs_fmask) | S_IFREG; in fat_make_mode()
216 static inline sector_t fat_clus_to_blknr(struct msdos_sb_info *sbi, int clus) in fat_clus_to_blknr() argument
218 return ((sector_t)clus - FAT_START_ENT) * sbi->sec_per_clus in fat_clus_to_blknr()
[all …]
Dnfs.c34 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_dget() local
39 head = sbi->dir_hashtable + fat_dir_hash(i_logstart); in fat_dget()
40 spin_lock(&sbi->dir_hash_lock); in fat_dget()
49 spin_unlock(&sbi->dir_hash_lock); in fat_dget()
113 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); in fat_encode_fh_nostale() local
130 i_pos = fat_i_pos_read(sbi, inode); in fat_encode_fh_nostale()
136 i_pos = fat_i_pos_read(sbi, parent); in fat_encode_fh_nostale()
230 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_rebuild_parent() local
231 sector_t blknr = fat_clus_to_blknr(sbi, parent_logstart); in fat_rebuild_parent()
240 clus_to_match = fat_get_start(sbi, &de[0]); in fat_rebuild_parent()
[all …]
Ddir.c51 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_dir_readahead() local
56 if ((iblock & (sbi->sec_per_clus - 1)) || sbi->sec_per_clus == 1) in fat_dir_readahead()
59 if ((sbi->fat_bits != 32) && (dir->i_ino == MSDOS_ROOT_INO)) in fat_dir_readahead()
64 for (sec = 0; sec < sbi->sec_per_clus; sec++) in fat_dir_readahead()
184 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_uni_to_x8() local
185 if (sbi->options.utf8) in fat_uni_to_x8()
189 return uni16_to_x8(sb, buf, uni, size, sbi->nls_io); in fat_uni_to_x8()
254 static inline int fat_name_match(struct msdos_sb_info *sbi, in fat_name_match() argument
261 if (sbi->options.name_check != 's') in fat_name_match()
262 return !nls_strnicmp(sbi->nls_io, a, b, a_len); in fat_name_match()
[all …]
Dcache.c308 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_bmap() local
316 if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) { in fat_bmap()
317 if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) { in fat_bmap()
318 *phys = sector + sbi->dir_start; in fat_bmap()
339 cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); in fat_bmap()
340 offset = sector & (sbi->sec_per_clus - 1); in fat_bmap()
345 *phys = fat_clus_to_blknr(sbi, cluster) + offset; in fat_bmap()
346 *mapped_blocks = sbi->sec_per_clus - offset; in fat_bmap()
Dnamei_msdos.c120 struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb); in msdos_find() local
124 err = msdos_format_name(name, len, msdos_name, &sbi->options); in msdos_find()
129 if (!err && sbi->options.dotsOK) { in msdos_find()
229 struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb); in msdos_add_entry() local
239 fat_time_unix2fat(sbi, ts, &time, &date, NULL); in msdos_add_entry()
Dnamei_vfat.c583 struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb); in vfat_build_slots() local
584 struct fat_mount_options *opts = &sbi->options; in vfat_build_slots()
602 opts->unicode_xlate, opts->utf8, sbi->nls_io); in vfat_build_slots()
610 err = vfat_create_shortname(dir, sbi->nls_disk, uname, ulen, in vfat_build_slots()
644 fat_time_unix2fat(sbi, ts, &time, &date, &time_cs); in vfat_build_slots()
/linux-4.4.14/fs/ext4/
Dsuper.c122 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_superblock_csum() local
126 csum = ext4_chksum(sbi, ~0, (char *)es, offset); in ext4_superblock_csum()
338 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_journal_commit_callback() local
343 spin_lock(&sbi->s_md_lock); in ext4_journal_commit_callback()
348 spin_unlock(&sbi->s_md_lock); in ext4_journal_commit_callback()
350 spin_lock(&sbi->s_md_lock); in ext4_journal_commit_callback()
352 spin_unlock(&sbi->s_md_lock); in ext4_journal_commit_callback()
759 static void ext4_blkdev_remove(struct ext4_sb_info *sbi) in ext4_blkdev_remove() argument
762 bdev = sbi->journal_bdev; in ext4_blkdev_remove()
765 sbi->journal_bdev = NULL; in ext4_blkdev_remove()
[all …]
Dsysfs.c51 struct ext4_sb_info *sbi, char *buf) in session_write_kbytes_show() argument
53 struct super_block *sb = sbi->s_buddy_cache->i_sb; in session_write_kbytes_show()
59 sbi->s_sectors_written_start) >> 1); in session_write_kbytes_show()
63 struct ext4_sb_info *sbi, char *buf) in lifetime_write_kbytes_show() argument
65 struct super_block *sb = sbi->s_buddy_cache->i_sb; in lifetime_write_kbytes_show()
70 (unsigned long long)(sbi->s_kbytes_written + in lifetime_write_kbytes_show()
76 struct ext4_sb_info *sbi, in inode_readahead_blks_store() argument
89 sbi->s_inode_readahead_blks = t; in inode_readahead_blks_store()
94 struct ext4_sb_info *sbi, in reserved_clusters_store() argument
98 ext4_fsblk_t clusters = (ext4_blocks_count(sbi->s_es) >> in reserved_clusters_store()
[all …]
Dbitmap.c24 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_inode_bitmap_csum_verify() local
30 calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); in ext4_inode_bitmap_csum_verify()
31 if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) { in ext4_inode_bitmap_csum_verify()
45 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_inode_bitmap_csum_set() local
50 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); in ext4_inode_bitmap_csum_set()
52 if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) in ext4_inode_bitmap_csum_set()
62 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_block_bitmap_csum_verify() local
69 calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); in ext4_block_bitmap_csum_verify()
70 if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END) { in ext4_block_bitmap_csum_verify()
88 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_block_bitmap_csum_set() local
[all …]
Dblock_validity.c56 static int add_system_zone(struct ext4_sb_info *sbi, in add_system_zone() argument
61 struct rb_node **n = &sbi->system_blks.rb_node, *node; in add_system_zone()
93 rb_insert_color(new_node, &sbi->system_blks); in add_system_zone()
103 rb_erase(node, &sbi->system_blks); in add_system_zone()
114 rb_erase(node, &sbi->system_blks); in add_system_zone()
121 static void debug_print_tree(struct ext4_sb_info *sbi) in debug_print_tree() argument
128 node = rb_first(&sbi->system_blks); in debug_print_tree()
142 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_setup_system_zone() local
145 int flex_size = ext4_flex_bg_size(sbi); in ext4_setup_system_zone()
159 add_system_zone(sbi, ext4_group_first_block_no(sb, i), in ext4_setup_system_zone()
[all …]
Dballoc.c93 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_num_overhead_clusters() local
113 block_cluster = EXT4_B2C(sbi, in ext4_num_overhead_clusters()
124 inode_cluster = EXT4_B2C(sbi, in ext4_num_overhead_clusters()
135 for (i = 0; i < sbi->s_itb_per_group; i++) { in ext4_num_overhead_clusters()
137 c = EXT4_B2C(sbi, itbl_blk + i - start); in ext4_num_overhead_clusters()
184 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_init_block_bitmap() local
196 percpu_counter_sub(&sbi->s_freeclusters_counter, in ext4_init_block_bitmap()
202 percpu_counter_sub(&sbi->s_freeinodes_counter, in ext4_init_block_bitmap()
222 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); in ext4_init_block_bitmap()
226 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); in ext4_init_block_bitmap()
[all …]
Dextents_status.c149 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
301 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_es_list_add() local
306 spin_lock(&sbi->s_es_lock); in ext4_es_list_add()
308 list_add_tail(&ei->i_es_list, &sbi->s_es_list); in ext4_es_list_add()
309 sbi->s_es_nr_inode++; in ext4_es_list_add()
311 spin_unlock(&sbi->s_es_lock); in ext4_es_list_add()
317 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_es_list_del() local
319 spin_lock(&sbi->s_es_lock); in ext4_es_list_del()
322 sbi->s_es_nr_inode--; in ext4_es_list_del()
323 WARN_ON_ONCE(sbi->s_es_nr_inode < 0); in ext4_es_list_del()
[all …]
Dmballoc.c668 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_mb_mark_free_simple() local
693 buddy + sbi->s_mb_offsets[min]); in ext4_mb_mark_free_simple()
726 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_mb_generate_buddy() local
765 percpu_counter_sub(&sbi->s_freeclusters_counter, in ext4_mb_generate_buddy()
1122 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_mb_load_buddy() local
1123 struct inode *inode = sbi->s_buddy_cache; in ext4_mb_load_buddy()
1453 struct ext4_sb_info *sbi = EXT4_SB(sb); in mb_free_blocks() local
1465 percpu_counter_sub(&sbi->s_freeclusters_counter, in mb_free_blocks()
1637 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_use_best_found() local
1668 spin_lock(&sbi->s_md_lock); in ext4_mb_use_best_found()
[all …]
Dialloc.c73 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_init_inode_bitmap() local
81 percpu_counter_sub(&sbi->s_freeclusters_counter, in ext4_init_inode_bitmap()
87 percpu_counter_sub(&sbi->s_freeinodes_counter, in ext4_init_inode_bitmap()
121 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_validate_inode_bitmap() local
139 percpu_counter_sub(&sbi->s_freeinodes_counter, in ext4_validate_inode_bitmap()
264 struct ext4_sb_info *sbi; in ext4_free_inode() local
284 sbi = EXT4_SB(sb); in ext4_free_inode()
347 percpu_counter_dec(&sbi->s_dirs_counter); in ext4_free_inode()
354 percpu_counter_inc(&sbi->s_freeinodes_counter); in ext4_free_inode()
355 if (sbi->s_log_groups_per_flex) { in ext4_free_inode()
[all …]
Dresize.c88 struct ext4_sb_info *sbi = EXT4_SB(sb); in verify_group_input() local
89 struct ext4_super_block *es = sbi->s_es; in verify_group_input()
93 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; in verify_group_input()
100 if (group != sbi->s_groups_count) { in verify_group_input()
102 input->group, sbi->s_groups_count); in verify_group_input()
109 input->blocks_count - 2 - overhead - sbi->s_itb_per_group; in verify_group_input()
479 struct ext4_sb_info *sbi = EXT4_SB(sb); in setup_new_flex_group_blocks() local
480 struct ext4_super_block *es = sbi->s_es; in setup_new_flex_group_blocks()
490 group_data[0].group != sbi->s_groups_count); in setup_new_flex_group_blocks()
541 memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data, in setup_new_flex_group_blocks()
[all …]
Dioctl.c100 struct ext4_sb_info *sbi = EXT4_SB(sb); in swap_inode_boot_loader() local
159 spin_lock(&sbi->s_next_gen_lock); in swap_inode_boot_loader()
160 inode->i_generation = sbi->s_next_generation++; in swap_inode_boot_loader()
161 inode_bl->i_generation = sbi->s_next_generation++; in swap_inode_boot_loader()
162 spin_unlock(&sbi->s_next_gen_lock); in swap_inode_boot_loader()
641 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_ioctl() local
646 if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) { in ext4_ioctl()
655 err = ext4_journal_get_write_access(handle, sbi->s_sbh); in ext4_ioctl()
658 generate_random_uuid(sbi->s_es->s_encrypt_pw_salt); in ext4_ioctl()
660 sbi->s_sbh); in ext4_ioctl()
[all …]
Dextents.c64 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_extent_block_csum() local
67 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, in ext4_extent_block_csum()
1876 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, in ext4_ext_check_overlap() argument
1890 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); in ext4_ext_check_overlap()
1900 b2 = EXT4_LBLK_CMASK(sbi, b2); in ext4_ext_check_overlap()
2481 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_remove_blocks() local
2503 *partial_cluster != (long long) EXT4_B2C(sbi, pblk)) { in ext4_remove_blocks()
2505 EXT4_C2B(sbi, *partial_cluster), in ext4_remove_blocks()
2506 sbi->s_cluster_ratio, flags); in ext4_remove_blocks()
2512 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_remove_blocks() local
[all …]
Dext4_jbd2.h184 struct ext4_sb_info *sbi = in ext4_journal_callback_add() local
189 spin_lock(&sbi->s_md_lock); in ext4_journal_callback_add()
191 spin_unlock(&sbi->s_md_lock); in ext4_journal_callback_add()
204 struct ext4_sb_info *sbi = in ext4_journal_callback_try_del() local
207 spin_lock(&sbi->s_md_lock); in ext4_journal_callback_try_del()
210 spin_unlock(&sbi->s_md_lock); in ext4_journal_callback_try_del()
Dfile.c126 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_file_write_iter() local
128 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) { in ext4_file_write_iter()
132 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); in ext4_file_write_iter()
363 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_file_open() local
369 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && in ext4_file_open()
371 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; in ext4_file_open()
389 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); in ext4_file_open()
390 err = ext4_journal_get_write_access(handle, sbi->s_sbh); in ext4_file_open()
395 strlcpy(sbi->s_es->s_last_mounted, cp, in ext4_file_open()
396 sizeof(sbi->s_es->s_last_mounted)); in ext4_file_open()
Dinode.c53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_inode_csum() local
66 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, in ext4_inode_csum()
329 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_da_update_reserve_space() local
345 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used); in ext4_da_update_reserve_space()
351 dquot_claim_block(inode, EXT4_C2B(sbi, used)); in ext4_da_update_reserve_space()
358 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); in ext4_da_update_reserve_space()
1322 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_da_reserve_space() local
1331 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); in ext4_da_reserve_space()
1336 if (ext4_claim_free_clusters(sbi, 1, 0)) { in ext4_da_reserve_space()
1338 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); in ext4_da_reserve_space()
[all …]
Dcrypto_key.c124 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in _ext4_get_encryption_info() local
151 if (!DUMMY_ENCRYPTION_ENABLED(sbi)) in _ext4_get_encryption_info()
192 if (DUMMY_ENCRYPTION_ENABLED(sbi)) { in _ext4_get_encryption_info()
Dextents_status.h172 extern int ext4_es_register_shrinker(struct ext4_sb_info *sbi);
173 extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
Dnamei.c325 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_dirent_csum() local
329 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); in ext4_dirent_csum()
420 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_dx_csum() local
429 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); in ext4_dx_csum()
430 csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail)); in ext4_dx_csum()
2750 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_orphan_add() local
2755 if (!sbi->s_journal || is_bad_inode(inode)) in ext4_orphan_add()
2776 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); in ext4_orphan_add()
2777 err = ext4_journal_get_write_access(handle, sbi->s_sbh); in ext4_orphan_add()
2785 mutex_lock(&sbi->s_orphan_lock); in ext4_orphan_add()
[all …]
Dext4.h253 #define EXT4_B2C(sbi, blk) ((blk) >> (sbi)->s_cluster_bits) argument
255 #define EXT4_C2B(sbi, cluster) ((cluster) << (sbi)->s_cluster_bits) argument
257 #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ argument
258 (sbi)->s_cluster_bits)
1276 #define DUMMY_ENCRYPTION_ENABLED(sbi) (unlikely((sbi)->s_mount_flags & \ argument
1279 #define DUMMY_ENCRYPTION_ENABLED(sbi) (0) argument
1989 static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc, in ext4_chksum() argument
1998 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver)!=sizeof(desc.ctx)); in ext4_chksum()
2000 desc.shash.tfm = sbi->s_chksum_driver; in ext4_chksum()
2227 extern int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
[all …]
Dmmp.c12 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_mmp_csum() local
16 csum = ext4_chksum(sbi, sbi->s_csum_seed, (char *)mmp, offset); in ext4_mmp_csum()
Dpage-io.c214 struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb); in ext4_add_complete_io() local
220 WARN_ON(!io_end->handle && sbi->s_journal); in ext4_add_complete_io()
222 wq = sbi->rsv_conversion_wq; in ext4_add_complete_io()
Dxattr.c124 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_xattr_block_csum() local
131 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr, in ext4_xattr_block_csum()
133 csum = ext4_chksum(sbi, csum, (__u8 *)hdr, in ext4_xattr_block_csum()
/linux-4.4.14/fs/qnx6/
Dinode.c49 struct qnx6_sb_info *sbi = QNX6_SB(sb); in qnx6_show_options() local
51 if (sbi->s_mount_opt & QNX6_MOUNT_MMI_FS) in qnx6_show_options()
65 struct qnx6_sb_info *sbi = QNX6_SB(sb); in qnx6_get_devblock() local
66 return fs32_to_cpu(sbi, block) + sbi->s_blks_off; in qnx6_get_devblock()
114 struct qnx6_sb_info *sbi = QNX6_SB(s); in qnx6_block_map() local
120 int ptrbits = sbi->s_ptrbits; in qnx6_block_map()
158 struct qnx6_sb_info *sbi = QNX6_SB(sb); in qnx6_statfs() local
163 buf->f_blocks = fs32_to_cpu(sbi, sbi->sb->sb_num_blocks); in qnx6_statfs()
164 buf->f_bfree = fs32_to_cpu(sbi, sbi->sb->sb_free_blocks); in qnx6_statfs()
165 buf->f_files = fs32_to_cpu(sbi, sbi->sb->sb_num_inodes); in qnx6_statfs()
[all …]
Dsuper_mmi.c40 struct qnx6_sb_info *sbi; in qnx6_mmi_fill_super() local
51 sbi = QNX6_SB(s); in qnx6_mmi_fill_super()
52 if (fs32_to_cpu(sbi, sb1->sb_magic) != QNX6_SUPER_MAGIC) { in qnx6_mmi_fill_super()
60 if (fs32_to_cpu(sbi, sb1->sb_checksum) != in qnx6_mmi_fill_super()
67 offset = fs32_to_cpu(sbi, sb1->sb_num_blocks) + QNX6_SUPERBLOCK_AREA / in qnx6_mmi_fill_super()
68 fs32_to_cpu(sbi, sb1->sb_blocksize); in qnx6_mmi_fill_super()
71 if (!sb_set_blocksize(s, fs32_to_cpu(sbi, sb1->sb_blocksize))) { in qnx6_mmi_fill_super()
89 if (fs32_to_cpu(sbi, sb2->sb_magic) != QNX6_SUPER_MAGIC) { in qnx6_mmi_fill_super()
96 if (fs32_to_cpu(sbi, sb2->sb_checksum) in qnx6_mmi_fill_super()
108 if (fs64_to_cpu(sbi, sb1->sb_serial) > in qnx6_mmi_fill_super()
[all …]
Dqnx6.h77 static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n) in fs64_to_cpu() argument
79 if (sbi->s_bytesex == BYTESEX_LE) in fs64_to_cpu()
85 static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n) in cpu_to_fs64() argument
87 if (sbi->s_bytesex == BYTESEX_LE) in cpu_to_fs64()
93 static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n) in fs32_to_cpu() argument
95 if (sbi->s_bytesex == BYTESEX_LE) in fs32_to_cpu()
101 static inline __fs32 cpu_to_fs32(struct qnx6_sb_info *sbi, __u32 n) in cpu_to_fs32() argument
103 if (sbi->s_bytesex == BYTESEX_LE) in cpu_to_fs32()
109 static inline __u16 fs16_to_cpu(struct qnx6_sb_info *sbi, __fs16 n) in fs16_to_cpu() argument
111 if (sbi->s_bytesex == BYTESEX_LE) in fs16_to_cpu()
[all …]
Ddir.c48 struct qnx6_sb_info *sbi = QNX6_SB(sb); in qnx6_longname() local
49 u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */ in qnx6_longname()
53 struct address_space *mapping = sbi->longfile->i_mapping; in qnx6_longname()
68 struct qnx6_sb_info *sbi = QNX6_SB(s); in qnx6_dir_longfilename() local
84 lf_size = fs16_to_cpu(sbi, lf->lf_size); in qnx6_dir_longfilename()
95 if (!test_opt(s, MMI_FS) && fs32_to_cpu(sbi, de->de_checksum) != in qnx6_dir_longfilename()
115 struct qnx6_sb_info *sbi = QNX6_SB(s); in qnx6_readdir() local
140 u32 no_inode = fs32_to_cpu(sbi, de->de_inode); in qnx6_readdir()
178 struct qnx6_sb_info *sbi = QNX6_SB(s); in qnx6_long_match() local
186 thislen = fs16_to_cpu(sbi, lf->lf_size); in qnx6_long_match()
[all …]
/linux-4.4.14/fs/hfs/
Dsuper.c56 struct hfs_sb_info *sbi; in flush_mdb() local
59 sbi = container_of(work, struct hfs_sb_info, mdb_work.work); in flush_mdb()
60 sb = sbi->sb; in flush_mdb()
62 spin_lock(&sbi->work_lock); in flush_mdb()
63 sbi->work_queued = 0; in flush_mdb()
64 spin_unlock(&sbi->work_lock); in flush_mdb()
71 struct hfs_sb_info *sbi = HFS_SB(sb); in hfs_mark_mdb_dirty() local
77 spin_lock(&sbi->work_lock); in hfs_mark_mdb_dirty()
78 if (!sbi->work_queued) { in hfs_mark_mdb_dirty()
80 queue_delayed_work(system_long_wq, &sbi->mdb_work, delay); in hfs_mark_mdb_dirty()
[all …]
/linux-4.4.14/fs/udf/
Dsuper.c262 struct udf_sb_info *sbi = UDF_SB(sb); in module_exit() local
264 sbi->s_partmaps = kcalloc(count, sizeof(struct udf_part_map), in module_exit()
266 if (!sbi->s_partmaps) { in module_exit()
269 sbi->s_partitions = 0; in module_exit()
273 sbi->s_partitions = count; in module_exit()
325 struct udf_sb_info *sbi = UDF_SB(sb); in udf_sb_free_partitions() local
327 if (sbi->s_partmaps == NULL) in udf_sb_free_partitions()
329 for (i = 0; i < sbi->s_partitions; i++) in udf_sb_free_partitions()
330 udf_free_partition(&sbi->s_partmaps[i]); in udf_sb_free_partitions()
331 kfree(sbi->s_partmaps); in udf_sb_free_partitions()
[all …]
Dpartition.c32 struct udf_sb_info *sbi = UDF_SB(sb); in udf_get_pblock() local
34 if (partition >= sbi->s_partitions) { in udf_get_pblock()
39 map = &sbi->s_partmaps[partition]; in udf_get_pblock()
53 struct udf_sb_info *sbi = UDF_SB(sb); in udf_get_pblock_virt15() local
56 struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode); in udf_get_pblock_virt15()
58 map = &sbi->s_partmaps[partition]; in udf_get_pblock_virt15()
82 loc = udf_block_map(sbi->s_vat_inode, newblock); in udf_get_pblock_virt15()
117 struct udf_sb_info *sbi = UDF_SB(sb); in udf_get_pblock_spar15() local
122 map = &sbi->s_partmaps[partition]; in udf_get_pblock_spar15()
159 struct udf_sb_info *sbi = UDF_SB(sb); in udf_relocate_blocks() local
[all …]
Dballoc.c94 struct udf_sb_info *sbi = UDF_SB(sb); in udf_add_free_space() local
97 if (!sbi->s_lvid_bh) in udf_add_free_space()
100 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; in udf_add_free_space()
111 struct udf_sb_info *sbi = UDF_SB(sb); in udf_bitmap_free_blocks() local
121 mutex_lock(&sbi->s_alloc_mutex); in udf_bitmap_free_blocks()
122 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; in udf_bitmap_free_blocks()
159 udf_add_free_space(sb, sbi->s_partition, count); in udf_bitmap_free_blocks()
168 mutex_unlock(&sbi->s_alloc_mutex); in udf_bitmap_free_blocks()
176 struct udf_sb_info *sbi = UDF_SB(sb); in udf_bitmap_prealloc_blocks() local
183 mutex_lock(&sbi->s_alloc_mutex); in udf_bitmap_prealloc_blocks()
[all …]
Dialloc.c32 struct udf_sb_info *sbi = UDF_SB(sb); in udf_free_inode() local
36 mutex_lock(&sbi->s_alloc_mutex); in udf_free_inode()
42 mutex_unlock(&sbi->s_alloc_mutex); in udf_free_inode()
51 struct udf_sb_info *sbi = UDF_SB(sb); in udf_new_inode() local
68 if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev) in udf_new_inode()
69 sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE; in udf_new_inode()
97 mutex_lock(&sbi->s_alloc_mutex); in udf_new_inode()
103 mutex_unlock(&sbi->s_alloc_mutex); in udf_new_inode()
Dmisc.c83 struct udf_sb_info *sbi = UDF_SB(inode->i_sb); in udf_add_extendedattr() local
89 if (sbi->s_udfrev >= 0x0200) in udf_add_extendedattr()
94 cpu_to_le16(sbi->s_serial_number); in udf_add_extendedattr()
Dinode.c1285 struct udf_sb_info *sbi = UDF_SB(inode->i_sb); in udf_read_inode() local
1294 sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) { in udf_read_inode()
1409 read_lock(&sbi->s_cred_lock); in udf_read_inode()
1423 sbi->s_fmode != UDF_INVALID_MODE) in udf_read_inode()
1424 inode->i_mode = sbi->s_fmode; in udf_read_inode()
1426 sbi->s_dmode != UDF_INVALID_MODE) in udf_read_inode()
1427 inode->i_mode = sbi->s_dmode; in udf_read_inode()
1430 inode->i_mode &= ~sbi->s_umask; in udf_read_inode()
1431 read_unlock(&sbi->s_cred_lock); in udf_read_inode()
1451 inode->i_atime = sbi->s_record_time; in udf_read_inode()
[all …]
Dtruncate.c184 struct udf_sb_info *sbi = UDF_SB(sb); in udf_update_alloc_ext_desc() local
190 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || sbi->s_udfrev >= 0x0201) in udf_update_alloc_ext_desc()
/linux-4.4.14/fs/isofs/
Dinode.c49 struct isofs_sb_info *sbi = ISOFS_SB(sb); in isofs_put_super() local
52 unload_nls(sbi->s_nls_iocharset); in isofs_put_super()
55 kfree(sbi); in isofs_put_super()
580 struct isofs_sb_info *sbi; in isofs_fill_super() local
590 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); in isofs_fill_super()
591 if (!sbi) in isofs_fill_super()
593 s->s_fs_info = sbi; in isofs_fill_super()
609 sbi->s_high_sierra = 0; /* default is iso9660 */ in isofs_fill_super()
670 sbi->s_high_sierra = 1; in isofs_fill_super()
701 if(sbi->s_high_sierra){ in isofs_fill_super()
[all …]
Dnamei.c43 struct isofs_sb_info *sbi = ISOFS_SB(dir->i_sb); in isofs_find_entry() local
107 if (sbi->s_rock && in isofs_find_entry()
112 } else if (sbi->s_joliet_level) { in isofs_find_entry()
116 } else if (sbi->s_mapping == 'a') { in isofs_find_entry()
119 } else if (sbi->s_mapping == 'n') { in isofs_find_entry()
130 (!sbi->s_hide || in isofs_find_entry()
131 (!(de->flags[-sbi->s_high_sierra] & 1))) && in isofs_find_entry()
132 (sbi->s_showassoc || in isofs_find_entry()
133 (!(de->flags[-sbi->s_high_sierra] & 4)))) { in isofs_find_entry()
Ddir.c95 struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb); in do_isofs_readdir() local
165 if (de->flags[-sbi->s_high_sierra] & 0x80) { in do_isofs_readdir()
197 if ((sbi->s_hide && (de->flags[-sbi->s_high_sierra] & 1)) || in do_isofs_readdir()
198 (!sbi->s_showassoc && in do_isofs_readdir()
199 (de->flags[-sbi->s_high_sierra] & 4))) { in do_isofs_readdir()
205 if (sbi->s_rock) { in do_isofs_readdir()
214 if (sbi->s_joliet_level) { in do_isofs_readdir()
219 if (sbi->s_mapping == 'a') { in do_isofs_readdir()
223 if (sbi->s_mapping == 'n') { in do_isofs_readdir()
Drock.c696 struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb); in rock_ridge_symlink_readpage() local
709 if (!sbi->s_rock) in rock_ridge_symlink_readpage()
/linux-4.4.14/sound/drivers/opl3/
Dopl3_oss.c207 struct sbi_instrument sbi; in snd_opl3_load_patch_seq_oss() local
222 if (count < (int)sizeof(sbi)) { in snd_opl3_load_patch_seq_oss()
226 if (copy_from_user(&sbi, buf, sizeof(sbi))) in snd_opl3_load_patch_seq_oss()
229 if (sbi.channel < 0 || sbi.channel >= SBFM_MAXINSTR) { in snd_opl3_load_patch_seq_oss()
231 sbi.channel); in snd_opl3_load_patch_seq_oss()
236 sprintf(name, "Chan%d", sbi.channel); in snd_opl3_load_patch_seq_oss()
238 err = snd_opl3_load_patch(opl3, sbi.channel, 127, type, name, NULL, in snd_opl3_load_patch_seq_oss()
239 sbi.operators); in snd_opl3_load_patch_seq_oss()
243 return sizeof(sbi); in snd_opl3_load_patch_seq_oss()
/linux-4.4.14/fs/efs/
Dsuper.c31 struct efs_sb_info *sbi = SUPER_INFO(s); in efs_kill_sb() local
33 kfree(sbi); in efs_kill_sb()
334 struct efs_sb_info *sbi = SUPER_INFO(sb); in efs_statfs() local
339 buf->f_blocks = sbi->total_groups * /* total data blocks */ in efs_statfs()
340 (sbi->group_size - sbi->inode_blocks); in efs_statfs()
341 buf->f_bfree = sbi->data_free; /* free data blocks */ in efs_statfs()
342 buf->f_bavail = sbi->data_free; /* free blocks for non-root */ in efs_statfs()
343 buf->f_files = sbi->total_groups * /* total inodes */ in efs_statfs()
344 sbi->inode_blocks * in efs_statfs()
346 buf->f_ffree = sbi->inode_free; /* free inodes */ in efs_statfs()
/linux-4.4.14/fs/reiserfs/
Dsuper.c86 struct reiserfs_sb_info *sbi; in flush_old_commits() local
89 sbi = container_of(work, struct reiserfs_sb_info, old_work.work); in flush_old_commits()
90 s = sbi->s_journal->j_work_sb; in flush_old_commits()
92 spin_lock(&sbi->old_work_lock); in flush_old_commits()
93 sbi->work_queued = 0; in flush_old_commits()
94 spin_unlock(&sbi->old_work_lock); in flush_old_commits()
101 struct reiserfs_sb_info *sbi = REISERFS_SB(s); in reiserfs_schedule_old_flush() local
111 spin_lock(&sbi->old_work_lock); in reiserfs_schedule_old_flush()
112 if (!sbi->work_queued) { in reiserfs_schedule_old_flush()
114 queue_delayed_work(system_long_wq, &sbi->old_work, delay); in reiserfs_schedule_old_flush()
[all …]
/linux-4.4.14/arch/microblaze/kernel/
Dhw_exception_handler.S486 sbi r6, r0, TOPHYS(ex_reg_op);
496 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
498 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
500 sbi r5, r0, TOPHYS(ex_tmp_data_loc_2);
502 sbi r5, r0, TOPHYS(ex_tmp_data_loc_3);
510 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
512 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
545 sbi r4, r3, 0;
547 sbi r4, r3, 1;
549 sbi r4, r3, 2;
[all …]
/linux-4.4.14/fs/cramfs/
Dinode.c243 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); in cramfs_kill_sb() local
246 kfree(sbi); in cramfs_kill_sb()
261 struct cramfs_sb_info *sbi; in cramfs_fill_super() local
266 sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL); in cramfs_fill_super()
267 if (!sbi) in cramfs_fill_super()
269 sb->s_fs_info = sbi; in cramfs_fill_super()
318 sbi->size = super.size; in cramfs_fill_super()
319 sbi->blocks = super.fsid.blocks; in cramfs_fill_super()
320 sbi->files = super.fsid.files; in cramfs_fill_super()
322 sbi->size = 1<<28; in cramfs_fill_super()
[all …]
/linux-4.4.14/fs/ecryptfs/
Dmain.c257 static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, in ecryptfs_parse_options() argument
270 &sbi->mount_crypt_stat; in ecryptfs_parse_options()
495 struct ecryptfs_sb_info *sbi; in ecryptfs_mount() local
504 sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL); in ecryptfs_mount()
505 if (!sbi) { in ecryptfs_mount()
510 rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid); in ecryptfs_mount()
515 mount_crypt_stat = &sbi->mount_crypt_stat; in ecryptfs_mount()
523 rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs"); in ecryptfs_mount()
527 ecryptfs_set_superblock_private(s, sbi); in ecryptfs_mount()
528 s->s_bdi = &sbi->bdi; in ecryptfs_mount()
[all …]
/linux-4.4.14/fs/squashfs/
Dsuper.c385 struct squashfs_sb_info *sbi = sb->s_fs_info; in squashfs_put_super() local
386 squashfs_cache_delete(sbi->block_cache); in squashfs_put_super()
387 squashfs_cache_delete(sbi->fragment_cache); in squashfs_put_super()
388 squashfs_cache_delete(sbi->read_page); in squashfs_put_super()
389 squashfs_decompressor_destroy(sbi); in squashfs_put_super()
390 kfree(sbi->id_table); in squashfs_put_super()
391 kfree(sbi->fragment_index); in squashfs_put_super()
392 kfree(sbi->meta_index); in squashfs_put_super()
393 kfree(sbi->inode_lookup_table); in squashfs_put_super()
394 kfree(sbi->xattr_id_table); in squashfs_put_super()
/linux-4.4.14/arch/s390/hypfs/
Dinode.c272 struct hypfs_sb_info *sbi; in hypfs_fill_super() local
274 sbi = kzalloc(sizeof(struct hypfs_sb_info), GFP_KERNEL); in hypfs_fill_super()
275 if (!sbi) in hypfs_fill_super()
277 mutex_init(&sbi->lock); in hypfs_fill_super()
278 sbi->uid = current_uid(); in hypfs_fill_super()
279 sbi->gid = current_gid(); in hypfs_fill_super()
280 sb->s_fs_info = sbi; in hypfs_fill_super()
301 sbi->update_file = hypfs_create_update_file(root_dentry); in hypfs_fill_super()
302 if (IS_ERR(sbi->update_file)) in hypfs_fill_super()
303 return PTR_ERR(sbi->update_file); in hypfs_fill_super()
/linux-4.4.14/include/linux/
Df2fs_fs.h35 #define F2FS_ROOT_INO(sbi) (sbi->root_ino_num) argument
36 #define F2FS_NODE_INO(sbi) (sbi->node_ino_num) argument
37 #define F2FS_META_INO(sbi) (sbi->meta_ino_num) argument
/linux-4.4.14/fs/adfs/
Dsuper.c223 struct adfs_sb_info *sbi = ADFS_SB(sb); in adfs_statfs() local
227 buf->f_namelen = sbi->s_namelen; in adfs_statfs()
229 buf->f_blocks = sbi->s_size; in adfs_statfs()
230 buf->f_files = sbi->s_ids_per_zone * sbi->s_map_size; in adfs_statfs()
/linux-4.4.14/arch/microblaze/lib/
Dfastcopy.S63 sbi r11, r5, 0 /* *d = h */
344 sbi r9, r5, 0 /* *d = t1 */
387 sbi r11, r5, 0 /* *d = h */
661 sbi r9, r5, 0 /* *d = t1 */
/linux-4.4.14/fs/hugetlbfs/
Dinode.c884 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); in hugetlbfs_put_super() local
886 if (sbi) { in hugetlbfs_put_super()
889 if (sbi->spool) in hugetlbfs_put_super()
890 hugepage_put_subpool(sbi->spool); in hugetlbfs_put_super()
892 kfree(sbi); in hugetlbfs_put_super()
/linux-4.4.14/include/trace/events/
Df2fs.h1222 TP_PROTO(struct f2fs_sb_info *sbi, unsigned int node_cnt,
1225 TP_ARGS(sbi, node_cnt, tree_cnt),
1234 __entry->dev = sbi->sb->s_dev;
/linux-4.4.14/fs/cifs/
Dconnect.c3895 struct cifs_sb_info *sbi = container_of(p, struct cifs_sb_info, rcu); in delayed_free() local
3896 unload_nls(sbi->local_nls); in delayed_free()
3897 kfree(sbi); in delayed_free()