/linux-4.1.27/fs/f2fs/ |
D | segment.h | 26 #define IS_CURSEG(sbi, seg) \ argument 27 ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \ 28 (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \ 29 (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \ 30 (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \ 31 (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \ 32 (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno)) 34 #define IS_CURSEC(sbi, secno) \ argument 35 ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \ 36 sbi->segs_per_sec) || \ [all …]
|
D | segment.c | 214 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in commit_inmem_pages() local 231 f2fs_balance_fs(sbi); in commit_inmem_pages() 232 f2fs_lock_op(sbi); in commit_inmem_pages() 260 f2fs_unlock_op(sbi); in commit_inmem_pages() 262 f2fs_submit_merged_bio(sbi, DATA, WRITE); in commit_inmem_pages() 270 void f2fs_balance_fs(struct f2fs_sb_info *sbi) in f2fs_balance_fs() argument 276 if (has_not_enough_free_secs(sbi, 0)) { in f2fs_balance_fs() 277 mutex_lock(&sbi->gc_mutex); in f2fs_balance_fs() 278 f2fs_gc(sbi); in f2fs_balance_fs() 282 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) in f2fs_balance_fs_bg() argument [all …]
|
D | checkpoint.c | 32 struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in grab_meta_page() argument 34 struct address_space *mapping = META_MAPPING(sbi); in grab_meta_page() 50 struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) in get_meta_page() argument 52 struct address_space *mapping = META_MAPPING(sbi); in get_meta_page() 68 if (f2fs_submit_page_bio(sbi, page, &fio)) in get_meta_page() 80 static inline bool is_valid_blkaddr(struct f2fs_sb_info *sbi, in is_valid_blkaddr() argument 87 if (unlikely(blkaddr >= SIT_BLK_CNT(sbi))) in is_valid_blkaddr() 91 if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) || in is_valid_blkaddr() 92 blkaddr < SM_I(sbi)->ssa_blkaddr)) in is_valid_blkaddr() 96 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr || in is_valid_blkaddr() [all …]
|
D | super.c | 105 static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type) in __struct_ptr() argument 108 return (unsigned char *)sbi->gc_thread; in __struct_ptr() 110 return (unsigned char *)SM_I(sbi); in __struct_ptr() 112 return (unsigned char *)NM_I(sbi); in __struct_ptr() 114 return (unsigned char *)sbi; in __struct_ptr() 119 struct f2fs_sb_info *sbi, char *buf) in f2fs_sbi_show() argument 124 ptr = __struct_ptr(sbi, a->struct_type); in f2fs_sbi_show() 134 struct f2fs_sb_info *sbi, in f2fs_sbi_store() argument 142 ptr = __struct_ptr(sbi, a->struct_type); in f2fs_sbi_store() 158 struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info, in f2fs_attr_show() local [all …]
|
D | gc.c | 29 struct f2fs_sb_info *sbi = data; in gc_thread_func() local 30 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in gc_thread_func() 31 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; in gc_thread_func() 46 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { in gc_thread_func() 64 if (!mutex_trylock(&sbi->gc_mutex)) in gc_thread_func() 67 if (!is_idle(sbi)) { in gc_thread_func() 69 mutex_unlock(&sbi->gc_mutex); in gc_thread_func() 73 if (has_enough_invalid_blocks(sbi)) in gc_thread_func() 78 stat_inc_bggc_count(sbi); in gc_thread_func() 81 if (f2fs_gc(sbi)) in gc_thread_func() [all …]
|
D | debug.c | 30 static void update_general_status(struct f2fs_sb_info *sbi) in update_general_status() argument 32 struct f2fs_stat_info *si = F2FS_STAT(sbi); in update_general_status() 36 si->hit_ext = sbi->read_hit_ext; in update_general_status() 37 si->total_ext = sbi->total_hit_ext; in update_general_status() 38 si->ext_tree = sbi->total_ext_tree; in update_general_status() 39 si->ext_node = atomic_read(&sbi->total_ext_node); in update_general_status() 40 si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES); in update_general_status() 41 si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS); in update_general_status() 42 si->ndirty_dirs = sbi->n_dirty_dirs; in update_general_status() 43 si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META); in update_general_status() [all …]
|
D | f2fs.h | 24 #define f2fs_bug_on(sbi, condition) BUG_ON(condition) argument 27 #define f2fs_bug_on(sbi, condition) \ argument 31 set_sbi_flag(sbi, SBI_NEED_FSCK); \ 55 #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option) argument 56 #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option) argument 57 #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option) argument 111 #define BATCHED_TRIM_SEGMENTS(sbi) \ argument 112 (SM_I(sbi)->trim_sections * (sbi)->segs_per_sec) 581 struct f2fs_sb_info *sbi; /* f2fs superblock */ member 734 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) in F2FS_RAW_SUPER() argument [all …]
|
D | gc.h | 46 static inline block_t free_user_blocks(struct f2fs_sb_info *sbi) in free_user_blocks() argument 48 if (free_segments(sbi) < overprovision_segments(sbi)) in free_user_blocks() 51 return (free_segments(sbi) - overprovision_segments(sbi)) in free_user_blocks() 52 << sbi->log_blocks_per_seg; in free_user_blocks() 55 static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi) in limit_invalid_user_blocks() argument 57 return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100; in limit_invalid_user_blocks() 60 static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi) in limit_free_user_blocks() argument 62 block_t reclaimable_user_blocks = sbi->user_block_count - in limit_free_user_blocks() 63 written_block_count(sbi); in limit_free_user_blocks() 89 static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi) in has_enough_invalid_blocks() argument [all …]
|
D | recovery.c | 50 bool space_for_roll_forward(struct f2fs_sb_info *sbi) in space_for_roll_forward() argument 52 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count in space_for_roll_forward() 53 > sbi->user_block_count) in space_for_roll_forward() 160 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) in find_fsync_dnodes() argument 162 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); in find_fsync_dnodes() 169 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); in find_fsync_dnodes() 170 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); in find_fsync_dnodes() 172 ra_meta_pages(sbi, blkaddr, 1, META_POR); in find_fsync_dnodes() 177 if (blkaddr < MAIN_BLKADDR(sbi) || blkaddr >= MAX_BLKADDR(sbi)) in find_fsync_dnodes() 180 page = get_meta_page(sbi, blkaddr); in find_fsync_dnodes() [all …]
|
D | node.c | 31 bool available_free_memory(struct f2fs_sb_info *sbi, int type) in available_free_memory() argument 33 struct f2fs_nm_info *nm_i = NM_I(sbi); in available_free_memory() 56 if (sbi->sb->s_bdi->dirty_exceeded) in available_free_memory() 58 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); in available_free_memory() 64 mem_size += (sbi->im[i].ino_num * in available_free_memory() 68 mem_size = (sbi->total_ext_tree * sizeof(struct extent_tree) + in available_free_memory() 69 atomic_read(&sbi->total_ext_node) * in available_free_memory() 73 if (sbi->sb->s_bdi->dirty_exceeded) in available_free_memory() 97 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) in get_current_nat_page() argument 99 pgoff_t index = current_nat_addr(sbi, nid); in get_current_nat_page() [all …]
|
D | namei.c | 27 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); in f2fs_new_inode() local 37 f2fs_lock_op(sbi); in f2fs_new_inode() 38 if (!alloc_nid(sbi, &ino)) { in f2fs_new_inode() 39 f2fs_unlock_op(sbi); in f2fs_new_inode() 43 f2fs_unlock_op(sbi); in f2fs_new_inode() 50 inode->i_generation = sbi->s_next_generation++; in f2fs_new_inode() 61 if (test_opt(sbi, INLINE_DENTRY) && S_ISDIR(inode->i_mode)) in f2fs_new_inode() 76 alloc_nid_failed(sbi, ino); in f2fs_new_inode() 94 static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode, in set_cold_files() argument 98 __u8 (*extlist)[8] = sbi->raw_super->extension_list; in set_cold_files() [all …]
|
D | data.c | 52 struct f2fs_sb_info *sbi = bio->bi_private; in f2fs_write_end_io() local 62 f2fs_stop_checkpoint(sbi); in f2fs_write_end_io() 65 dec_page_count(sbi, F2FS_WRITEBACK); in f2fs_write_end_io() 68 if (!get_pages(sbi, F2FS_WRITEBACK) && in f2fs_write_end_io() 69 !list_empty(&sbi->cp_wait.task_list)) in f2fs_write_end_io() 70 wake_up(&sbi->cp_wait); in f2fs_write_end_io() 78 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, in __bio_alloc() argument 86 bio->bi_bdev = sbi->sb->s_bdev; in __bio_alloc() 89 bio->bi_private = sbi; in __bio_alloc() 102 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); in __submit_merged_bio() [all …]
|
D | inode.c | 100 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in do_read_inode() local 106 if (check_nid_range(sbi, inode->i_ino)) { in do_read_inode() 113 node_page = get_node_page(sbi, inode->i_ino); in do_read_inode() 166 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_iget() local 178 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi)) in f2fs_iget() 185 if (ino == F2FS_NODE_INO(sbi)) { in f2fs_iget() 188 } else if (ino == F2FS_META_INO(sbi)) { in f2fs_iget() 265 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in update_inode_page() local 268 node_page = get_node_page(sbi, inode->i_ino); in update_inode_page() 275 f2fs_stop_checkpoint(sbi); in update_inode_page() [all …]
|
D | file.c | 37 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_vm_page_mkwrite() local 41 f2fs_balance_fs(sbi); in f2fs_vm_page_mkwrite() 45 f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); in f2fs_vm_page_mkwrite() 48 f2fs_lock_op(sbi); in f2fs_vm_page_mkwrite() 52 f2fs_unlock_op(sbi); in f2fs_vm_page_mkwrite() 56 f2fs_unlock_op(sbi); in f2fs_vm_page_mkwrite() 120 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in need_do_checkpoint() local 127 else if (!space_for_roll_forward(sbi)) in need_do_checkpoint() 129 else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) in need_do_checkpoint() 131 else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi))) in need_do_checkpoint() [all …]
|
D | xattr.c | 31 struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); in f2fs_xattr_generic_list() local 37 if (!test_opt(sbi, XATTR_USER)) in f2fs_xattr_generic_list() 68 struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); in f2fs_xattr_generic_get() local 72 if (!test_opt(sbi, XATTR_USER)) in f2fs_xattr_generic_get() 92 struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); in f2fs_xattr_generic_set() local 96 if (!test_opt(sbi, XATTR_USER)) in f2fs_xattr_generic_set() 271 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in read_all_xattrs() local 290 page = get_node_page(sbi, inode->i_ino); in read_all_xattrs() 305 xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid); in read_all_xattrs() 330 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in write_all_xattrs() local [all …]
|
D | node.h | 148 static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) in next_free_nid() argument 150 struct f2fs_nm_info *nm_i = NM_I(sbi); in next_free_nid() 166 static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr) in get_nat_bitmap() argument 168 struct f2fs_nm_info *nm_i = NM_I(sbi); in get_nat_bitmap() 172 static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start) in current_nat_addr() argument 174 struct f2fs_nm_info *nm_i = NM_I(sbi); in current_nat_addr() 180 seg_off = block_off >> sbi->log_blocks_per_seg; in current_nat_addr() 183 (seg_off << sbi->log_blocks_per_seg << 1) + in current_nat_addr() 184 (block_off & ((1 << sbi->log_blocks_per_seg) - 1))); in current_nat_addr() 187 block_addr += sbi->blocks_per_seg; in current_nat_addr() [all …]
|
D | inline.c | 155 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_convert_inline_inode() local 164 f2fs_lock_op(sbi); in f2fs_convert_inline_inode() 166 ipage = get_node_page(sbi, inode->i_ino); in f2fs_convert_inline_inode() 179 f2fs_unlock_op(sbi); in f2fs_convert_inline_inode() 219 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in recover_inline_data() local 238 ipage = get_node_page(sbi, inode->i_ino); in recover_inline_data() 239 f2fs_bug_on(sbi, IS_ERR(ipage)); in recover_inline_data() 256 ipage = get_node_page(sbi, inode->i_ino); in recover_inline_data() 257 f2fs_bug_on(sbi, IS_ERR(ipage)); in recover_inline_data() 272 struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); in find_in_inline_dir() local [all …]
|
D | dir.c | 621 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); in f2fs_drop_nlink() local 643 add_orphan_inode(sbi, inode->i_ino); in f2fs_drop_nlink() 645 release_orphan_inode(sbi); in f2fs_drop_nlink()
|
/linux-4.1.27/fs/sysv/ |
D | super.c | 47 static void detected_xenix(struct sysv_sb_info *sbi, unsigned *max_links) in detected_xenix() argument 49 struct buffer_head *bh1 = sbi->s_bh1; in detected_xenix() 50 struct buffer_head *bh2 = sbi->s_bh2; in detected_xenix() 63 sbi->s_fic_size = XENIX_NICINOD; in detected_xenix() 64 sbi->s_flc_size = XENIX_NICFREE; in detected_xenix() 65 sbi->s_sbd1 = (char *)sbd1; in detected_xenix() 66 sbi->s_sbd2 = (char *)sbd2; in detected_xenix() 67 sbi->s_sb_fic_count = &sbd1->s_ninode; in detected_xenix() 68 sbi->s_sb_fic_inodes = &sbd1->s_inode[0]; in detected_xenix() 69 sbi->s_sb_total_free_inodes = &sbd2->s_tinode; in detected_xenix() [all …]
|
D | balloc.c | 44 struct sysv_sb_info * sbi = SYSV_SB(sb); in sysv_free_block() local 46 sysv_zone_t *blocks = sbi->s_bcache; in sysv_free_block() 48 unsigned block = fs32_to_cpu(sbi, nr); in sysv_free_block() 55 if (sbi->s_type == FSTYPE_AFS) in sysv_free_block() 58 if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { in sysv_free_block() 63 mutex_lock(&sbi->s_lock); in sysv_free_block() 64 count = fs16_to_cpu(sbi, *sbi->s_bcache_count); in sysv_free_block() 66 if (count > sbi->s_flc_size) { in sysv_free_block() 68 mutex_unlock(&sbi->s_lock); in sysv_free_block() 75 if (count == sbi->s_flc_size || count == 0) { in sysv_free_block() [all …]
|
D | ialloc.c | 41 struct sysv_sb_info *sbi = SYSV_SB(sb); in sv_sb_fic_inode() local 43 if (sbi->s_bh1 == sbi->s_bh2) in sv_sb_fic_inode() 44 return &sbi->s_sb_fic_inodes[i]; in sv_sb_fic_inode() 49 return (sysv_ino_t*)(sbi->s_sbd1 + offset); in sv_sb_fic_inode() 51 return (sysv_ino_t*)(sbi->s_sbd2 + offset); in sv_sb_fic_inode() 58 struct sysv_sb_info *sbi = SYSV_SB(sb); in sysv_raw_inode() local 60 int block = sbi->s_firstinodezone + sbi->s_block_base; in sysv_raw_inode() 62 block += (ino-1) >> sbi->s_inodes_per_block_bits; in sysv_raw_inode() 67 return res + ((ino-1) & sbi->s_inodes_per_block_1); in sysv_raw_inode() 72 struct sysv_sb_info *sbi = SYSV_SB(sb); in refill_free_cache() local [all …]
|
D | inode.c | 36 struct sysv_sb_info *sbi = SYSV_SB(sb); in sysv_sync_fs() local 39 mutex_lock(&sbi->s_lock); in sysv_sync_fs() 46 old_time = fs32_to_cpu(sbi, *sbi->s_sb_time); in sysv_sync_fs() 47 if (sbi->s_type == FSTYPE_SYSV4) { in sysv_sync_fs() 48 if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38 - old_time)) in sysv_sync_fs() 49 *sbi->s_sb_state = cpu_to_fs32(sbi, 0x7c269d38 - time); in sysv_sync_fs() 50 *sbi->s_sb_time = cpu_to_fs32(sbi, time); in sysv_sync_fs() 51 mark_buffer_dirty(sbi->s_bh2); in sysv_sync_fs() 54 mutex_unlock(&sbi->s_lock); in sysv_sync_fs() 61 struct sysv_sb_info *sbi = SYSV_SB(sb); in sysv_remount() local [all …]
|
D | sysv.h | 116 struct sysv_sb_info *sbi = SYSV_SB(sb); in dirty_sb() local 118 mark_buffer_dirty(sbi->s_bh1); in dirty_sb() 119 if (sbi->s_bh1 != sbi->s_bh2) in dirty_sb() 120 mark_buffer_dirty(sbi->s_bh2); in dirty_sb() 191 static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n) in fs32_to_cpu() argument 193 if (sbi->s_bytesex == BYTESEX_PDP) in fs32_to_cpu() 195 else if (sbi->s_bytesex == BYTESEX_LE) in fs32_to_cpu() 201 static inline __fs32 cpu_to_fs32(struct sysv_sb_info *sbi, __u32 n) in cpu_to_fs32() argument 203 if (sbi->s_bytesex == BYTESEX_PDP) in cpu_to_fs32() 205 else if (sbi->s_bytesex == BYTESEX_LE) in cpu_to_fs32() [all …]
|
D | itree.c | 25 struct sysv_sb_info *sbi = SYSV_SB(sb); in block_to_path() local 26 int ptrs_bits = sbi->s_ind_per_block_bits; in block_to_path() 27 unsigned long indirect_blocks = sbi->s_ind_per_block, in block_to_path() 28 double_blocks = sbi->s_ind_per_block_2; in block_to_path() 53 static inline int block_to_cpu(struct sysv_sb_info *sbi, sysv_zone_t nr) in block_to_cpu() argument 55 return sbi->s_block_base + fs32_to_cpu(sbi, nr); in block_to_cpu() 430 struct sysv_sb_info *sbi = SYSV_SB(s); in sysv_nblocks() local 431 int ptrs_bits = sbi->s_ind_per_block_bits; in sysv_nblocks()
|
/linux-4.1.27/fs/hfsplus/ |
D | super.c | 99 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); in hfsplus_system_write_inode() local 100 struct hfsplus_vh *vhdr = sbi->s_vhdr; in hfsplus_system_write_inode() 107 tree = sbi->ext_tree; in hfsplus_system_write_inode() 111 tree = sbi->cat_tree; in hfsplus_system_write_inode() 121 tree = sbi->attr_tree; in hfsplus_system_write_inode() 128 set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags); in hfsplus_system_write_inode() 175 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_sync_fs() local 176 struct hfsplus_vh *vhdr = sbi->s_vhdr; in hfsplus_sync_fs() 193 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping); in hfsplus_sync_fs() 194 error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping); in hfsplus_sync_fs() [all …]
|
D | options.c | 99 int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi) in hfsplus_parse_options() argument 115 if (match_fourchar(&args[0], &sbi->creator)) { in hfsplus_parse_options() 121 if (match_fourchar(&args[0], &sbi->type)) { in hfsplus_parse_options() 131 sbi->umask = (umode_t)tmp; in hfsplus_parse_options() 138 sbi->uid = make_kuid(current_user_ns(), (uid_t)tmp); in hfsplus_parse_options() 139 if (!uid_valid(sbi->uid)) { in hfsplus_parse_options() 149 sbi->gid = make_kgid(current_user_ns(), (gid_t)tmp); in hfsplus_parse_options() 150 if (!gid_valid(sbi->gid)) { in hfsplus_parse_options() 156 if (match_int(&args[0], &sbi->part)) { in hfsplus_parse_options() 162 if (match_int(&args[0], &sbi->session)) { in hfsplus_parse_options() [all …]
|
D | wrapper.c | 159 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_read_wrapper() local 174 sbi->s_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL); in hfsplus_read_wrapper() 175 if (!sbi->s_vhdr_buf) in hfsplus_read_wrapper() 177 sbi->s_backup_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL); in hfsplus_read_wrapper() 178 if (!sbi->s_backup_vhdr_buf) in hfsplus_read_wrapper() 183 sbi->s_vhdr_buf, (void **)&sbi->s_vhdr, in hfsplus_read_wrapper() 189 switch (sbi->s_vhdr->signature) { in hfsplus_read_wrapper() 191 set_bit(HFSPLUS_SB_HFSX, &sbi->flags); in hfsplus_read_wrapper() 196 if (!hfsplus_read_mdb(sbi->s_vhdr, &wd)) in hfsplus_read_wrapper() 215 sbi->s_backup_vhdr_buf, in hfsplus_read_wrapper() [all …]
|
D | bitmap.c | 21 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_block_allocate() local 34 mutex_lock(&sbi->alloc_mutex); in hfsplus_block_allocate() 35 mapping = sbi->alloc_file->i_mapping; in hfsplus_block_allocate() 155 sbi->free_blocks -= *max; in hfsplus_block_allocate() 159 mutex_unlock(&sbi->alloc_mutex); in hfsplus_block_allocate() 165 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_block_free() local 178 if ((offset + count) > sbi->total_blocks) in hfsplus_block_free() 181 mutex_lock(&sbi->alloc_mutex); in hfsplus_block_free() 182 mapping = sbi->alloc_file->i_mapping; in hfsplus_block_free() 234 sbi->free_blocks += len; in hfsplus_block_free() [all …]
|
D | dir.c | 298 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dst_dir->i_sb); in hfsplus_link() local 311 mutex_lock(&sbi->vh_mutex); in hfsplus_link() 320 sbi->hidden_dir, &str); in hfsplus_link() 327 cnid = sbi->next_cnid++; in hfsplus_link() 334 sbi->file_count++; in hfsplus_link() 336 cnid = sbi->next_cnid++; in hfsplus_link() 346 sbi->file_count++; in hfsplus_link() 349 mutex_unlock(&sbi->vh_mutex); in hfsplus_link() 355 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); in hfsplus_unlink() local 365 mutex_lock(&sbi->vh_mutex); in hfsplus_unlink() [all …]
|
D | inode.c | 184 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); in hfsplus_get_perms() local 191 inode->i_uid = sbi->uid; in hfsplus_get_perms() 195 inode->i_gid = sbi->gid; in hfsplus_get_perms() 198 mode = mode ? (mode & S_IALLUGO) : (S_IRWXUGO & ~(sbi->umask)); in hfsplus_get_perms() 201 mode = S_IFREG | ((S_IRUGO|S_IWUGO) & ~(sbi->umask)); in hfsplus_get_perms() 283 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); in hfsplus_file_fsync() local 300 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping); in hfsplus_file_fsync() 304 filemap_write_and_wait(sbi->ext_tree->inode->i_mapping); in hfsplus_file_fsync() 310 if (sbi->attr_tree) { in hfsplus_file_fsync() 313 sbi->attr_tree->inode->i_mapping); in hfsplus_file_fsync() [all …]
|
D | part_tbl.c | 73 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfs_parse_old_pmap() local 81 (sbi->part < 0 || sbi->part == i)) { in hfs_parse_old_pmap() 94 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfs_parse_new_pmap() local 102 (sbi->part < 0 || sbi->part == i)) { in hfs_parse_new_pmap()
|
D | ioctl.c | 30 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); in hfsplus_ioctl_bless() local 31 struct hfsplus_vh *vh = sbi->s_vhdr; in hfsplus_ioctl_bless() 32 struct hfsplus_vh *bvh = sbi->s_backup_vhdr; in hfsplus_ioctl_bless() 38 mutex_lock(&sbi->vh_mutex); in hfsplus_ioctl_bless() 55 mutex_unlock(&sbi->vh_mutex); in hfsplus_ioctl_bless()
|
D | extents.c | 224 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_get_block() local 232 ablock = iblock >> sbi->fs_shift; in hfsplus_get_block() 274 mask = (1 << sbi->fs_shift) - 1; in hfsplus_get_block() 275 sector = ((sector_t)dblock << sbi->fs_shift) + in hfsplus_get_block() 276 sbi->blockoffset + (iblock & mask); in hfsplus_get_block() 431 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_file_extend() local 436 if (sbi->alloc_file->i_size * 8 < in hfsplus_file_extend() 437 sbi->total_blocks - sbi->free_blocks + 8) { in hfsplus_file_extend() 440 sbi->alloc_file->i_size * 8, in hfsplus_file_extend() 441 sbi->total_blocks, sbi->free_blocks); in hfsplus_file_extend() [all …]
|
D | catalog.c | 107 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); in hfsplus_cat_build_record() local 115 if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) in hfsplus_cat_build_record() 124 if (inode == sbi->hidden_dir) in hfsplus_cat_build_record() 150 cpu_to_be32(sbi->type); in hfsplus_cat_build_record() 152 cpu_to_be32(sbi->creator); in hfsplus_cat_build_record() 167 HFSPLUS_I(sbi->hidden_dir)->create_date; in hfsplus_cat_build_record() 224 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); in hfsplus_subfolders_inc() local 226 if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) { in hfsplus_subfolders_inc() 237 struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); in hfsplus_subfolders_dec() local 239 if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) { in hfsplus_subfolders_dec()
|
D | xattr.c | 129 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfsplus_create_attributes_file() local 143 switch (atomic_read(&sbi->attr_tree_state)) { in hfsplus_create_attributes_file() 145 if (old_state != atomic_cmpxchg(&sbi->attr_tree_state, in hfsplus_create_attributes_file() 186 sbi->sect_count, in hfsplus_create_attributes_file() 190 hip->clump_blocks = clump_size >> sbi->alloc_blksz_shift; in hfsplus_create_attributes_file() 193 if (sbi->free_blocks <= (hip->clump_blocks << 1)) { in hfsplus_create_attributes_file() 205 (loff_t)hip->alloc_blocks << sbi->alloc_blksz_shift; in hfsplus_create_attributes_file() 206 hip->fs_blocks = hip->alloc_blocks << sbi->fs_shift; in hfsplus_create_attributes_file() 243 sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID); in hfsplus_create_attributes_file() 244 if (!sbi->attr_tree) in hfsplus_create_attributes_file() [all …]
|
D | bnode.c | 663 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); in hfs_bnode_need_zeroout() local 664 const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes); in hfs_bnode_need_zeroout()
|
D | hfsplus_fs.h | 496 int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi);
|
/linux-4.1.27/fs/autofs4/ |
D | inode.c | 25 struct autofs_info *autofs4_new_ino(struct autofs_sb_info *sbi) in autofs4_new_ino() argument 32 ino->sbi = sbi; in autofs4_new_ino() 51 struct autofs_sb_info *sbi = autofs4_sbi(sb); in autofs4_kill_sb() local 59 if (sbi) { in autofs4_kill_sb() 61 autofs4_catatonic_mode(sbi); in autofs4_kill_sb() 62 put_pid(sbi->oz_pgrp); in autofs4_kill_sb() 67 if (sbi) in autofs4_kill_sb() 68 kfree_rcu(sbi, rcu); in autofs4_kill_sb() 73 struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb); in autofs4_show_options() local 76 if (!sbi) in autofs4_show_options() [all …]
|
D | root.c | 76 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); in autofs4_add_active() local 79 spin_lock(&sbi->lookup_lock); in autofs4_add_active() 82 list_add(&ino->active, &sbi->active_list); in autofs4_add_active() 85 spin_unlock(&sbi->lookup_lock); in autofs4_add_active() 92 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); in autofs4_del_active() local 95 spin_lock(&sbi->lookup_lock); in autofs4_del_active() 101 spin_unlock(&sbi->lookup_lock); in autofs4_del_active() 109 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); in autofs4_dir_open() local 113 if (autofs4_oz_mode(sbi)) in autofs4_dir_open() 125 spin_lock(&sbi->lookup_lock); in autofs4_dir_open() [all …]
|
D | waitq.c | 27 void autofs4_catatonic_mode(struct autofs_sb_info *sbi) in autofs4_catatonic_mode() argument 31 mutex_lock(&sbi->wq_mutex); in autofs4_catatonic_mode() 32 if (sbi->catatonic) { in autofs4_catatonic_mode() 33 mutex_unlock(&sbi->wq_mutex); in autofs4_catatonic_mode() 39 sbi->catatonic = 1; in autofs4_catatonic_mode() 40 wq = sbi->queues; in autofs4_catatonic_mode() 41 sbi->queues = NULL; /* Erase all wait queues */ in autofs4_catatonic_mode() 51 fput(sbi->pipe); /* Close the pipe */ in autofs4_catatonic_mode() 52 sbi->pipe = NULL; in autofs4_catatonic_mode() 53 sbi->pipefd = -1; in autofs4_catatonic_mode() [all …]
|
D | expire.c | 52 struct autofs_sb_info *sbi = autofs4_sbi(path.dentry->d_sb); in autofs4_mount_busy() local 55 if (autofs_type_indirect(sbi->type)) in autofs4_mount_busy() 79 struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb); in get_next_positive_subdir() local 83 spin_lock(&sbi->lookup_lock); in get_next_positive_subdir() 96 spin_unlock(&sbi->lookup_lock); in get_next_positive_subdir() 113 spin_unlock(&sbi->lookup_lock); in get_next_positive_subdir() 126 struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb); in get_next_positive_dentry() local 133 spin_lock(&sbi->lookup_lock); in get_next_positive_dentry() 145 spin_unlock(&sbi->lookup_lock); in get_next_positive_dentry() 176 spin_unlock(&sbi->lookup_lock); in get_next_positive_dentry() [all …]
|
D | dev-ioctl.c | 165 struct autofs_sb_info *sbi = NULL; in autofs_dev_ioctl_sbi() local 170 sbi = autofs4_sbi(inode->i_sb); in autofs_dev_ioctl_sbi() 172 return sbi; in autofs_dev_ioctl_sbi() 177 struct autofs_sb_info *sbi, in autofs_dev_ioctl_protover() argument 180 param->protover.version = sbi->version; in autofs_dev_ioctl_protover() 186 struct autofs_sb_info *sbi, in autofs_dev_ioctl_protosubver() argument 189 param->protosubver.sub_version = sbi->sub_version; in autofs_dev_ioctl_protosubver() 228 return ino && ino->sbi->type & *(unsigned *)p; in test_by_type() 272 struct autofs_sb_info *sbi, in autofs_dev_ioctl_openmount() argument 302 struct autofs_sb_info *sbi, in autofs_dev_ioctl_closemount() argument [all …]
|
D | autofs_i.h | 73 struct autofs_sb_info *sbi; member 146 static inline int autofs4_oz_mode(struct autofs_sb_info *sbi) { in autofs4_oz_mode() argument 147 return sbi->catatonic || task_pgrp(current) == sbi->oz_pgrp; in autofs4_oz_mode() 160 struct autofs_sb_info *sbi, int when); 165 struct autofs_sb_info *sbi, int how); 168 struct autofs_sb_info *sbi, int how); 231 static inline u32 autofs4_get_dev(struct autofs_sb_info *sbi) in autofs4_get_dev() argument 233 return new_encode_dev(sbi->sb->s_dev); in autofs4_get_dev() 236 static inline u64 autofs4_get_ino(struct autofs_sb_info *sbi) in autofs4_get_ino() argument 238 return d_inode(sbi->sb->s_root)->i_ino; in autofs4_get_ino() [all …]
|
D | symlink.c | 17 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); in autofs4_follow_link() local 19 if (ino && !autofs4_oz_mode(sbi)) in autofs4_follow_link()
|
/linux-4.1.27/fs/affs/ |
D | bitmap.c | 40 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_free_block() local 48 if (block > sbi->s_partition_size) in affs_free_block() 51 blk = block - sbi->s_reserved; in affs_free_block() 52 bmap = blk / sbi->s_bmap_bits; in affs_free_block() 53 bit = blk % sbi->s_bmap_bits; in affs_free_block() 54 bm = &sbi->s_bitmap[bmap]; in affs_free_block() 56 mutex_lock(&sbi->s_bmlock); in affs_free_block() 58 bh = sbi->s_bmap_bh; in affs_free_block() 59 if (sbi->s_last_bmap != bmap) { in affs_free_block() 64 sbi->s_bmap_bh = bh; in affs_free_block() [all …]
|
D | super.c | 29 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_commit_super() local 30 struct buffer_head *bh = sbi->s_root_bh; in affs_commit_super() 46 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_put_super() local 49 cancel_delayed_work_sync(&sbi->sb_work); in affs_put_super() 61 struct affs_sb_info *sbi; in flush_superblock() local 64 sbi = container_of(work, struct affs_sb_info, sb_work.work); in flush_superblock() 65 sb = sbi->sb; in flush_superblock() 67 spin_lock(&sbi->work_lock); in flush_superblock() 68 sbi->work_queued = 0; in flush_superblock() 69 spin_unlock(&sbi->work_lock); in flush_superblock() [all …]
|
D | symlink.c | 36 struct affs_sb_info *sbi = AFFS_SB(inode->i_sb); in affs_symlink_readpage() local 38 spin_lock(&sbi->symlink_lock); in affs_symlink_readpage() 39 pf = sbi->s_prefix ? sbi->s_prefix : "/"; in affs_symlink_readpage() 42 spin_unlock(&sbi->symlink_lock); in affs_symlink_readpage()
|
D | inode.c | 18 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_iget() local 69 if (affs_test_opt(sbi->s_flags, SF_SETMODE)) in affs_iget() 70 inode->i_mode = sbi->s_mode; in affs_iget() 75 if (id == 0 || affs_test_opt(sbi->s_flags, SF_SETUID)) in affs_iget() 76 inode->i_uid = sbi->s_uid; in affs_iget() 77 else if (id == 0xFFFF && affs_test_opt(sbi->s_flags, SF_MUFS)) in affs_iget() 83 if (id == 0 || affs_test_opt(sbi->s_flags, SF_SETGID)) in affs_iget() 84 inode->i_gid = sbi->s_gid; in affs_iget() 85 else if (id == 0xFFFF && affs_test_opt(sbi->s_flags, SF_MUFS)) in affs_iget() 92 inode->i_uid = sbi->s_uid; in affs_iget() [all …]
|
D | namei.c | 359 struct affs_sb_info *sbi = AFFS_SB(sb); in affs_symlink() local 362 spin_lock(&sbi->symlink_lock); in affs_symlink() 363 while (sbi->s_volume[i]) /* Cannot overflow */ in affs_symlink() 364 *p++ = sbi->s_volume[i++]; in affs_symlink() 365 spin_unlock(&sbi->symlink_lock); in affs_symlink()
|
/linux-4.1.27/fs/ext2/ |
D | super.c | 52 struct ext2_sb_info *sbi = EXT2_SB(sb); in ext2_error() local 53 struct ext2_super_block *es = sbi->s_es; in ext2_error() 56 spin_lock(&sbi->s_lock); in ext2_error() 57 sbi->s_mount_state |= EXT2_ERROR_FS; in ext2_error() 59 spin_unlock(&sbi->s_lock); in ext2_error() 130 struct ext2_sb_info *sbi = EXT2_SB(sb); in ext2_put_super() local 136 struct ext2_super_block *es = sbi->s_es; in ext2_put_super() 138 spin_lock(&sbi->s_lock); in ext2_put_super() 139 es->s_state = cpu_to_le16(sbi->s_mount_state); in ext2_put_super() 140 spin_unlock(&sbi->s_lock); in ext2_put_super() [all …]
|
D | ialloc.c | 261 struct ext2_sb_info *sbi = EXT2_SB(sb); in find_group_orlov() local 262 struct ext2_super_block *es = sbi->s_es; in find_group_orlov() 263 int ngroups = sbi->s_groups_count; in find_group_orlov() 275 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); in find_group_orlov() 277 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); in find_group_orlov() 279 ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter); in find_group_orlov() 334 if (sbi->s_debts[group] >= max_debt) in find_group_orlov() 442 struct ext2_sb_info *sbi; in ext2_new_inode() local 451 sbi = EXT2_SB(sb); in ext2_new_inode() 452 es = sbi->s_es; in ext2_new_inode() [all …]
|
D | balloc.c | 46 struct ext2_sb_info *sbi = EXT2_SB(sb); in ext2_get_group_desc() local 48 if (block_group >= sbi->s_groups_count) { in ext2_get_group_desc() 52 block_group, sbi->s_groups_count); in ext2_get_group_desc() 59 if (!sbi->s_group_desc[group_desc]) { in ext2_get_group_desc() 67 desc = (struct ext2_group_desc *) sbi->s_group_desc[group_desc]->b_data; in ext2_get_group_desc() 69 *bh = sbi->s_group_desc[group_desc]; in ext2_get_group_desc() 166 struct ext2_sb_info *sbi = EXT2_SB(sb); in group_adjust_blocks() local 169 spin_lock(sb_bgl_lock(sbi, group_no)); in group_adjust_blocks() 172 spin_unlock(sb_bgl_lock(sbi, group_no)); in group_adjust_blocks() 486 struct ext2_sb_info * sbi = EXT2_SB(sb); in ext2_free_blocks() local [all …]
|
D | ext2.h | 117 sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group) in sb_bgl_lock() argument 119 return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group); in sb_bgl_lock()
|
/linux-4.1.27/fs/ufs/ |
D | cylinder.c | 31 struct ufs_sb_info * sbi = UFS_SB(sb); in ufs_read_cylinder() local 38 uspi = sbi->s_uspi; in ufs_read_cylinder() 39 ucpi = sbi->s_ucpi[bitmap_nr]; in ufs_read_cylinder() 40 ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data; in ufs_read_cylinder() 47 UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno]; in ufs_read_cylinder() 51 sbi->s_cgno[bitmap_nr] = cgno; in ufs_read_cylinder() 73 brelse (sbi->s_ucg[j]); in ufs_read_cylinder() 74 sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; in ufs_read_cylinder() 84 struct ufs_sb_info * sbi = UFS_SB(sb); in ufs_put_cylinder() local 92 uspi = sbi->s_uspi; in ufs_put_cylinder() [all …]
|
D | super.c | 98 struct ufs_sb_info *sbi = UFS_SB(sb); in lock_ufs() local 100 mutex_lock(&sbi->mutex); in lock_ufs() 101 sbi->mutex_owner = current; in lock_ufs() 106 struct ufs_sb_info *sbi = UFS_SB(sb); in unlock_ufs() local 108 sbi->mutex_owner = NULL; in unlock_ufs() 109 mutex_unlock(&sbi->mutex); in unlock_ufs() 485 struct ufs_sb_info *sbi = UFS_SB(sb); in ufs_setup_cstotal() local 486 struct ufs_sb_private_info *uspi = sbi->s_uspi; in ufs_setup_cstotal() 490 unsigned mtype = sbi->s_mount_opt & UFS_MOUNT_UFSTYPE; in ufs_setup_cstotal() 519 struct ufs_sb_info *sbi = UFS_SB(sb); in ufs_read_cylinder_structures() local [all …]
|
D | ialloc.c | 174 struct ufs_sb_info * sbi; in ufs_new_inode() local 193 sbi = UFS_SB(sb); in ufs_new_inode() 194 uspi = sbi->s_uspi; in ufs_new_inode() 196 mutex_lock(&sbi->s_lock); in ufs_new_inode() 202 if (sbi->fs_cs(i).cs_nifree) { in ufs_new_inode() 214 if (sbi->fs_cs(i).cs_nifree) { in ufs_new_inode() 228 if (sbi->fs_cs(i).cs_nifree) { in ufs_new_inode() 276 fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1); in ufs_new_inode() 281 fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1); in ufs_new_inode() 334 mutex_unlock(&sbi->s_lock); in ufs_new_inode() [all …]
|
D | inode.c | 418 struct ufs_sb_info * sbi = UFS_SB(sb); in ufs_getfrag_block() local 419 struct ufs_sb_private_info * uspi = sbi->s_uspi; in ufs_getfrag_block() 424 bool needs_lock = (sbi->mutex_owner != current); in ufs_getfrag_block()
|
/linux-4.1.27/fs/omfs/ |
D | inode.c | 24 struct omfs_sb_info *sbi = OMFS_SB(sb); in omfs_bread() local 25 if (block >= sbi->s_num_blocks) in omfs_bread() 28 return sb_bread(sb, clus_to_blk(sbi, block)); in omfs_bread() 37 struct omfs_sb_info *sbi = OMFS_SB(dir->i_sb); in omfs_new_inode() local 43 err = omfs_allocate_range(dir->i_sb, sbi->s_mirrors, sbi->s_mirrors, in omfs_new_inode() 57 inode->i_size = sbi->s_sys_blocksize; in omfs_new_inode() 103 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); in __omfs_write_inode() local 128 oi->i_head.h_body_size = cpu_to_be32(sbi->s_sys_blocksize - in __omfs_write_inode() 149 for (i = 1; i < sbi->s_mirrors; i++) { in __omfs_write_inode() 202 struct omfs_sb_info *sbi = OMFS_SB(sb); in omfs_iget() local [all …]
|
D | bitmap.c | 11 struct omfs_sb_info *sbi = OMFS_SB(sb); in omfs_count_free() local 14 for (i = 0; i < sbi->s_imap_size; i++) in omfs_count_free() 15 sum += nbits - bitmap_weight(sbi->s_imap[i], nbits); in omfs_count_free() 53 struct omfs_sb_info *sbi = OMFS_SB(sb); in set_run() local 56 bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); in set_run() 68 clus_to_blk(sbi, sbi->s_bitmap_ino) + map); in set_run() 73 set_bit(bit, sbi->s_imap[map]); in set_run() 76 clear_bit(bit, sbi->s_imap[map]); in set_run() 93 struct omfs_sb_info *sbi = OMFS_SB(sb); in omfs_allocate_block() local 103 mutex_lock(&sbi->s_bitmap_lock); in omfs_allocate_block() [all …]
|
D | file.c | 13 static u32 omfs_max_extents(struct omfs_sb_info *sbi, int offset) in omfs_max_extents() argument 15 return (sbi->s_sys_blocksize - offset - in omfs_max_extents() 33 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); in omfs_shrink_inode() local 57 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START); in omfs_shrink_inode() 61 if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next)) in omfs_shrink_inode() 87 omfs_clear_range(inode->i_sb, last, sbi->s_mirrors); in omfs_shrink_inode() 96 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT); in omfs_shrink_inode() 121 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb); in omfs_grow_extent() local 155 max_count = omfs_max_extents(sbi, OMFS_EXTENT_START); in omfs_grow_extent() 162 ret = omfs_allocate_range(inode->i_sb, 1, sbi->s_clustersize, in omfs_grow_extent() [all …]
|
D | omfs.h | 29 static inline sector_t clus_to_blk(struct omfs_sb_info *sbi, sector_t block) in clus_to_blk() argument 31 return block << sbi->s_block_shift; in clus_to_blk() 50 extern int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header,
|
D | dir.c | 86 struct omfs_sb_info *sbi = OMFS_SB(sb); in omfs_make_empty() local 98 sbi->s_sys_blocksize - OMFS_DIR_START); in omfs_make_empty() 316 int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header, in omfs_is_bad() argument 321 is_bad = ((ino != fsblock) || (ino < sbi->s_root_ino) || in omfs_is_bad() 322 (ino > sbi->s_num_blocks)); in omfs_is_bad()
|
/linux-4.1.27/fs/hpfs/ |
D | super.c | 112 static void free_sbi(struct hpfs_sb_info *sbi) in free_sbi() argument 114 kfree(sbi->sb_cp_table); in free_sbi() 115 kfree(sbi->sb_bmp_dir); in free_sbi() 116 kfree(sbi); in free_sbi() 166 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_get_free_dnodes() local 167 if (sbi->sb_n_free_dnodes == (unsigned)-1) { in hpfs_get_free_dnodes() 168 unsigned c = hpfs_count_one_bitmap(s, sbi->sb_dmap); in hpfs_get_free_dnodes() 171 sbi->sb_n_free_dnodes = c; in hpfs_get_free_dnodes() 173 return sbi->sb_n_free_dnodes; in hpfs_get_free_dnodes() 179 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_statfs() local [all …]
|
D | alloc.c | 13 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_claim_alloc() local 14 if (sbi->sb_n_free != (unsigned)-1) { in hpfs_claim_alloc() 15 if (unlikely(!sbi->sb_n_free)) { in hpfs_claim_alloc() 17 sbi->sb_n_free = -1; in hpfs_claim_alloc() 20 sbi->sb_n_free--; in hpfs_claim_alloc() 26 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_claim_free() local 27 if (sbi->sb_n_free != (unsigned)-1) { in hpfs_claim_free() 28 if (unlikely(sbi->sb_n_free >= sbi->sb_fs_size)) { in hpfs_claim_free() 30 sbi->sb_n_free = -1; in hpfs_claim_free() 33 sbi->sb_n_free++; in hpfs_claim_free() [all …]
|
D | hpfs_fn.h | 349 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_lock() local 350 mutex_lock(&sbi->hpfs_mutex); in hpfs_lock() 355 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_unlock() local 356 mutex_unlock(&sbi->hpfs_mutex); in hpfs_unlock() 361 struct hpfs_sb_info *sbi = hpfs_sb(s); in hpfs_lock_assert() local 362 WARN_ON(!mutex_is_locked(&sbi->hpfs_mutex)); in hpfs_lock_assert()
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
D | lproc_llite.c | 176 struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)m->private); in ll_client_type_seq_show() local 178 LASSERT(sbi != NULL); in ll_client_type_seq_show() 180 if (sbi->ll_flags & LL_SBI_RMT_CLIENT) in ll_client_type_seq_show() 224 struct ll_sb_info *sbi = ll_s2sbi(sb); in ll_max_readahead_mb_seq_show() local 228 spin_lock(&sbi->ll_lock); in ll_max_readahead_mb_seq_show() 229 pages_number = sbi->ll_ra_info.ra_max_pages; in ll_max_readahead_mb_seq_show() 230 spin_unlock(&sbi->ll_lock); in ll_max_readahead_mb_seq_show() 241 struct ll_sb_info *sbi = ll_s2sbi(sb); in ll_max_readahead_mb_seq_write() local 255 spin_lock(&sbi->ll_lock); in ll_max_readahead_mb_seq_write() 256 sbi->ll_ra_info.ra_max_pages = pages_number; in ll_max_readahead_mb_seq_write() [all …]
|
D | llite_lib.c | 71 struct ll_sb_info *sbi = NULL; in ll_init_sbi() local 78 sbi = kzalloc(sizeof(*sbi), GFP_NOFS); in ll_init_sbi() 79 if (!sbi) in ll_init_sbi() 82 spin_lock_init(&sbi->ll_lock); in ll_init_sbi() 83 mutex_init(&sbi->ll_lco.lco_lock); in ll_init_sbi() 84 spin_lock_init(&sbi->ll_pp_extent_lock); in ll_init_sbi() 85 spin_lock_init(&sbi->ll_process_lock); in ll_init_sbi() 86 sbi->ll_rw_stats_on = 0; in ll_init_sbi() 96 atomic_set(&sbi->ll_cache.ccc_users, 0); in ll_init_sbi() 97 sbi->ll_cache.ccc_lru_max = lru_page_max; in ll_init_sbi() [all …]
|
D | vvp_dev.c | 199 struct ll_sb_info *sbi; in cl_sb_init() local 205 sbi = ll_s2sbi(sb); in cl_sb_init() 209 sbi->ll_dt_exp->exp_obd->obd_lu_dev); in cl_sb_init() 212 sbi->ll_cl = cl; in cl_sb_init() 213 sbi->ll_site = cl2lu_dev(cl)->ld_site; in cl_sb_init() 223 struct ll_sb_info *sbi; in cl_sb_fini() local 229 sbi = ll_s2sbi(sb); in cl_sb_fini() 232 cld = sbi->ll_cl; in cl_sb_fini() 236 sbi->ll_cl = NULL; in cl_sb_fini() 237 sbi->ll_site = NULL; in cl_sb_fini() [all …]
|
D | dir.c | 484 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_dir_read() local 486 int api32 = ll_need_32bit_api(sbi); in ll_dir_read() 487 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH; in ll_dir_read() 601 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_readdir() local 602 int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH; in ll_readdir() 603 int api32 = ll_need_32bit_api(sbi); in ll_readdir() 634 ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1); in ll_readdir() 663 struct ll_sb_info *sbi = ll_i2sbi(dir); in ll_dir_setdirstripe() local 677 err = md_create(sbi->ll_md_exp, op_data, lump, sizeof(*lump), mode, in ll_dir_setdirstripe() 692 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_dir_setstripe() local [all …]
|
D | xattr.c | 89 int xattr_type_filter(struct ll_sb_info *sbi, int xattr_type) in xattr_type_filter() argument 93 !(sbi->ll_flags & LL_SBI_ACL)) in xattr_type_filter() 96 if (xattr_type == XATTR_USER_T && !(sbi->ll_flags & LL_SBI_USER_XATTR)) in xattr_type_filter() 111 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_setxattr_common() local 123 rc = xattr_type_filter(sbi, xattr_type); in ll_setxattr_common() 148 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && in ll_setxattr_common() 151 rce = rct_search(&sbi->ll_rct, current_pid()); in ll_setxattr_common() 160 ee = et_search_del(&sbi->ll_et, current_pid(), in ll_setxattr_common() 192 rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc, in ll_setxattr_common() 205 sbi->ll_flags &= ~LL_SBI_USER_XATTR; in ll_setxattr_common() [all …]
|
D | llite_nfs.c | 88 struct ll_sb_info *sbi = ll_s2sbi(sb); in search_inode_for_lustre() local 93 ll_need_32bit_api(sbi)); in search_inode_for_lustre() 103 rc = ll_get_default_mdsize(sbi, &eadatalen); in search_inode_for_lustre() 118 rc = md_getattr(sbi->ll_md_exp, op_data, &req); in search_inode_for_lustre() 286 struct ll_sb_info *sbi; in ll_get_parent() local 296 sbi = ll_s2sbi(dir->i_sb); in ll_get_parent() 301 rc = ll_get_default_mdsize(sbi, &lmmsize); in ll_get_parent() 311 rc = md_getattr_name(sbi->ll_md_exp, op_data, &req); in ll_get_parent()
|
D | llite_internal.h | 657 static inline int ll_need_32bit_api(struct ll_sb_info *sbi) in ll_need_32bit_api() argument 662 return unlikely(is_compat_task() || (sbi->ll_flags & LL_SBI_32BIT_API)); in ll_need_32bit_api() 664 return unlikely(sbi->ll_flags & LL_SBI_32BIT_API); in ll_need_32bit_api() 676 void lprocfs_unregister_mountpoint(struct ll_sb_info *sbi); 677 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count); 679 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid, 685 static inline void lprocfs_unregister_mountpoint(struct ll_sb_info *sbi) {} in lprocfs_unregister_mountpoint() argument 687 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count) {} in ll_stats_ops_tally() argument 692 static inline void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid, in ll_rw_stats_tally() argument 748 int ll_glimpse_ioctl(struct ll_sb_info *sbi, [all …]
|
D | dcache.c | 123 struct ll_sb_info *sbi = ll_i2sbi(inode); in find_cbdata() local 128 rc = md_find_cbdata(sbi->ll_md_exp, ll_inode2fid(inode), in find_cbdata() 137 rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL); in find_cbdata() 293 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_lookup_finish_locks() local 297 ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL); in ll_lookup_finish_locks()
|
D | rw.c | 288 static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which); 309 static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, in ll_ra_count_get() argument 313 struct ll_ra_info *ra = &sbi->ll_ra_info; in ll_ra_count_get() 351 void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len) in ll_ra_count_put() argument 353 struct ll_ra_info *ra = &sbi->ll_ra_info; in ll_ra_count_put() 357 static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which) in ll_ra_stats_inc_sbi() argument 360 lprocfs_counter_incr(sbi->ll_ra_stats, which); in ll_ra_stats_inc_sbi() 365 struct ll_sb_info *sbi = ll_i2sbi(mapping->host); in ll_ra_stats_inc() local 366 ll_ra_stats_inc_sbi(sbi, which); in ll_ra_stats_inc() 974 void ras_update(struct ll_sb_info *sbi, struct inode *inode, in ras_update() argument [all …]
|
D | file.c | 337 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_file_release() local 345 if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) { in ll_file_release() 351 rct_del(&sbi->ll_rct, current_pid()); in ll_file_release() 352 et_search_free(&sbi->ll_et, current_pid()); in ll_file_release() 358 ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1); in ll_file_release() 380 rc = ll_md_close(sbi->ll_md_exp, inode, file); in ll_file_release() 392 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_intent_file_open() local 423 rc = md_intent_lock(sbi->ll_md_exp, op_data, lmm, lmmsize, itp, in ll_intent_file_open() 450 ll_set_lock_data(sbi->ll_md_exp, inode, itp, NULL); in ll_intent_file_open() 749 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_lease_open() local [all …]
|
D | xattr_cache.c | 291 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_xattr_find_get_lock() local 292 struct obd_export *exp = sbi->ll_md_exp; in ll_xattr_find_get_lock() 354 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_xattr_cache_refill() local 370 ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1); in ll_xattr_cache_refill() 450 ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL); in ll_xattr_cache_refill()
|
D | symlink.c | 49 struct ll_sb_info *sbi = ll_i2sbi(inode); in ll_readlink_internal() local 75 rc = md_getattr(sbi->ll_md_exp, op_data, request); in ll_readlink_internal()
|
D | namei.c | 679 struct ll_sb_info *sbi = ll_i2sbi(dir); in ll_create_node() local 700 ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL); in ll_create_node() 767 struct ll_sb_info *sbi = ll_i2sbi(dir); in ll_new_node() local 783 err = md_create(sbi->ll_md_exp, op_data, tgt, tgt_len, mode, in ll_new_node() 1080 struct ll_sb_info *sbi = ll_i2sbi(dir); in ll_link() local 1096 err = md_link(sbi->ll_md_exp, op_data, &request); in ll_link() 1102 ll_stats_ops_tally(sbi, LPROC_LL_LINK, 1); in ll_link() 1112 struct ll_sb_info *sbi = ll_i2sbi(old_dir); in ll_rename() local 1128 err = md_rename(sbi->ll_md_exp, op_data, in ll_rename() 1137 ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1); in ll_rename()
|
D | vvp_io.c | 823 struct ll_sb_info *sbi = ll_i2sbi(inode); in vvp_io_read_page() local 833 if (sbi->ll_ra_info.ra_max_pages_per_file && in vvp_io_read_page() 834 sbi->ll_ra_info.ra_max_pages) in vvp_io_read_page() 835 ras_update(sbi, inode, ras, page->cp_index, in vvp_io_read_page() 857 if (sbi->ll_ra_info.ra_max_pages_per_file && in vvp_io_read_page() 858 sbi->ll_ra_info.ra_max_pages) in vvp_io_read_page() 979 struct ll_sb_info *sbi = ll_i2sbi(inode); in vvp_io_commit_write() local 1057 ll_stats_ops_tally(sbi, tallyop, 1); in vvp_io_commit_write()
|
D | remote_perm.c | 249 struct ll_sb_info *sbi = ll_i2sbi(inode); in lustre_check_remote_perm() local 280 rc = md_get_remote_perm(sbi->ll_md_exp, ll_inode2fid(inode), oc, in lustre_check_remote_perm()
|
D | statahead.c | 974 struct ll_sb_info *sbi = ll_i2sbi(dir); in ll_agl_thread() local 983 atomic_inc(&sbi->ll_agl_total); in ll_agl_thread() 1065 struct ll_sb_info *sbi = ll_i2sbi(dir); in ll_statahead_thread() local 1080 if (sbi->ll_flags & LL_SBI_AGL_ENABLED) in ll_statahead_thread() 1083 atomic_inc(&sbi->ll_sa_total); in ll_statahead_thread() 1482 struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode); in ll_sai_unplug() local 1494 sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max); in ll_sai_unplug() 1501 atomic_inc(&sbi->ll_sa_wrong); in ll_sai_unplug()
|
D | llite_capa.c | 645 void ll_print_capa_stat(struct ll_sb_info *sbi) in ll_print_capa_stat() argument 647 if (sbi->ll_flags & (LL_SBI_MDS_CAPA | LL_SBI_OSS_CAPA)) in ll_print_capa_stat()
|
/linux-4.1.27/fs/minix/ |
D | inode.c | 43 struct minix_sb_info *sbi = minix_sb(sb); in minix_put_super() local 46 if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */ in minix_put_super() 47 sbi->s_ms->s_state = sbi->s_mount_state; in minix_put_super() 48 mark_buffer_dirty(sbi->s_sbh); in minix_put_super() 50 for (i = 0; i < sbi->s_imap_blocks; i++) in minix_put_super() 51 brelse(sbi->s_imap[i]); in minix_put_super() 52 for (i = 0; i < sbi->s_zmap_blocks; i++) in minix_put_super() 53 brelse(sbi->s_zmap[i]); in minix_put_super() 54 brelse (sbi->s_sbh); in minix_put_super() 55 kfree(sbi->s_imap); in minix_put_super() [all …]
|
D | bitmap.c | 44 struct minix_sb_info *sbi = minix_sb(sb); in minix_free_block() local 49 if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { in minix_free_block() 53 zone = block - sbi->s_firstdatazone + 1; in minix_free_block() 56 if (zone >= sbi->s_zmap_blocks) { in minix_free_block() 60 bh = sbi->s_zmap[zone]; in minix_free_block() 72 struct minix_sb_info *sbi = minix_sb(inode->i_sb); in minix_new_block() local 76 for (i = 0; i < sbi->s_zmap_blocks; i++) { in minix_new_block() 77 struct buffer_head *bh = sbi->s_zmap[i]; in minix_new_block() 86 j += i * bits_per_zone + sbi->s_firstdatazone-1; in minix_new_block() 87 if (j < sbi->s_firstdatazone || j >= sbi->s_nzones) in minix_new_block() [all …]
|
D | dir.c | 80 static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi) in minix_next_entry() argument 82 return (void*)((char*)de + sbi->s_dirsize); in minix_next_entry() 89 struct minix_sb_info *sbi = minix_sb(sb); in minix_readdir() local 90 unsigned chunk_size = sbi->s_dirsize; in minix_readdir() 112 for ( ; p <= limit; p = minix_next_entry(p, sbi)) { in minix_readdir() 115 if (sbi->s_version == MINIX_V3) { in minix_readdir() 125 unsigned l = strnlen(name, sbi->s_namelen); in minix_readdir() 161 struct minix_sb_info * sbi = minix_sb(sb); in minix_find_entry() local 179 limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize; in minix_find_entry() 180 for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { in minix_find_entry() [all …]
|
/linux-4.1.27/fs/jfs/ |
D | jfs_mount.c | 84 struct jfs_sb_info *sbi = JFS_SBI(sb); in jfs_mount() local 104 sbi->ipaimap = ipaimap; in jfs_mount() 127 sbi->ipbmap = ipbmap; in jfs_mount() 148 if ((sbi->mntflag & JFS_BAD_SAIT) == 0) { in jfs_mount() 155 sbi->ipaimap2 = ipaimap2; in jfs_mount() 169 sbi->ipaimap2 = NULL; in jfs_mount() 187 sbi->ipimap = ipimap; in jfs_mount() 241 struct jfs_sb_info *sbi = JFS_SBI(sb); in jfs_mount_rw() local 250 if (chkSuper(sb) || (sbi->state != FM_CLEAN)) in jfs_mount_rw() 253 truncate_inode_pages(sbi->ipimap->i_mapping, 0); in jfs_mount_rw() [all …]
|
D | jfs_umount.c | 52 struct jfs_sb_info *sbi = JFS_SBI(sb); in jfs_umount() local 53 struct inode *ipbmap = sbi->ipbmap; in jfs_umount() 54 struct inode *ipimap = sbi->ipimap; in jfs_umount() 55 struct inode *ipaimap = sbi->ipaimap; in jfs_umount() 56 struct inode *ipaimap2 = sbi->ipaimap2; in jfs_umount() 67 if ((log = sbi->log)) in jfs_umount() 79 sbi->ipimap = NULL; in jfs_umount() 84 ipaimap2 = sbi->ipaimap2; in jfs_umount() 88 sbi->ipaimap2 = NULL; in jfs_umount() 94 ipaimap = sbi->ipaimap; in jfs_umount() [all …]
|
D | super.c | 76 struct jfs_sb_info *sbi = JFS_SBI(sb); in jfs_handle_error() local 83 if (sbi->flag & JFS_ERR_PANIC) in jfs_handle_error() 86 else if (sbi->flag & JFS_ERR_REMOUNT_RO) { in jfs_handle_error() 151 struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb); in jfs_statfs() local 153 struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap; in jfs_statfs() 157 buf->f_bsize = sbi->bsize; in jfs_statfs() 158 buf->f_blocks = sbi->bmap->db_mapsize; in jfs_statfs() 159 buf->f_bfree = sbi->bmap->db_nfree; in jfs_statfs() 160 buf->f_bavail = sbi->bmap->db_nfree; in jfs_statfs() 171 ((sbi->bmap->db_nfree >> imap->im_l2nbperiext) in jfs_statfs() [all …]
|
D | resize.c | 65 struct jfs_sb_info *sbi = JFS_SBI(sb); in jfs_extendfs() local 66 struct inode *ipbmap = sbi->ipbmap; in jfs_extendfs() 68 struct inode *ipimap = sbi->ipimap; in jfs_extendfs() 69 struct jfs_log *log = sbi->log; in jfs_extendfs() 70 struct bmap *bmp = sbi->bmap; in jfs_extendfs() 89 if (sbi->mntflag & JFS_INLINELOG) in jfs_extendfs() 90 oldLVSize = addressPXD(&sbi->logpxd) + lengthPXD(&sbi->logpxd); in jfs_extendfs() 92 oldLVSize = addressPXD(&sbi->fsckpxd) + in jfs_extendfs() 93 lengthPXD(&sbi->fsckpxd); in jfs_extendfs() 138 if ((sbi->mntflag & JFS_INLINELOG)) { in jfs_extendfs() [all …]
|
D | jfs_imap.c | 306 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); in diRead() local 321 ipimap = sbi->ipimap; in diRead() 352 blkno = INOPBLK(&iagp->inoext[extno], ino, sbi->l2nbperpage); in diRead() 360 pageno = blkno >> sbi->l2nbperpage; in diRead() 362 if ((block_offset = ((u32) blkno & (sbi->nbperpage - 1)))) { in diRead() 367 (sbi->nbperpage - block_offset) << sbi->l2niperblk; in diRead() 370 rel_inode += block_offset << sbi->l2niperblk; in diRead() 378 mp = read_metapage(ipimap, pageno << sbi->l2nbperpage, PSIZE, 1); in diRead() 431 struct jfs_sb_info *sbi = JFS_SBI(sb); in diReadSpecial() local 444 address = addressPXD(&sbi->ait2) >> sbi->l2nbperpage; in diReadSpecial() [all …]
|
D | jfs_logmgr.c | 180 static int lmLogFileSystem(struct jfs_log * log, struct jfs_sb_info *sbi, 217 struct jfs_sb_info *sbi; in write_special_inodes() local 219 list_for_each_entry(sbi, &log->sb_list, log_list) { in write_special_inodes() 220 writer(sbi->ipbmap->i_mapping); in write_special_inodes() 221 writer(sbi->ipimap->i_mapping); in write_special_inodes() 222 writer(sbi->direct_inode->i_mapping); in write_special_inodes() 1084 struct jfs_sb_info *sbi = JFS_SBI(sb); in lmLogOpen() local 1086 if (sbi->flag & JFS_NOINTEGRITY) in lmLogOpen() 1089 if (sbi->mntflag & JFS_INLINELOG) in lmLogOpen() 1094 if (log->bdev->bd_dev == sbi->logdev) { in lmLogOpen() [all …]
|
D | jfs_extent.c | 88 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); in extAlloc() local 104 xoff = pno << sbi->l2nbperpage; in extAlloc() 512 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); in extBalloc() local 514 int rc, nbperpage = sbi->nbperpage; in extBalloc() 515 struct bmap *bmp = sbi->bmap; in extBalloc() 550 ag = BLKTOAG(daddr, sbi); in extBalloc()
|
D | xattr.c | 221 struct jfs_sb_info *sbi = JFS_SBI(sb); in ea_write() local 261 for (i = 0; i < nblocks; i += sbi->nbperpage) { in ea_write() 370 struct jfs_sb_info *sbi = JFS_SBI(sb); in ea_read() local 393 nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage; in ea_read() 394 blkno = addressDXD(&ji->ea) << sbi->l2nbperpage; in ea_read() 401 for (i = 0; i < nblocks; i += sbi->nbperpage) { in ea_read()
|
D | jfs_dmap.h | 134 #define BLKTOAG(b,sbi) ((b) >> ((sbi)->bmap->db_agl2size)) argument
|
D | jfs_dtree.c | 337 struct jfs_sb_info *sbi = JFS_SBI(sb); in add_index() local 385 if (dquot_alloc_block(ip, sbi->nbperpage)) in add_index() 387 if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) { in add_index() 388 dquot_free_block(ip, sbi->nbperpage); in add_index() 406 if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) { in add_index() 411 dbFree(ip, xaddr, sbi->nbperpage); in add_index() 412 dquot_free_block(ip, sbi->nbperpage); in add_index() 447 blkno = ((offset + 1) >> L2PSIZE) << sbi->l2nbperpage; in add_index() 453 if (xtInsert(tid, ip, 0, blkno, sbi->nbperpage, &xaddr, 0)) { in add_index() 933 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); in dtSplitUp() local [all …]
|
D | jfs_dmap.c | 1028 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); in dbExtend() local 1034 struct inode *ipbmap = sbi->ipbmap; in dbExtend() 1040 if (((rel_block = blkno & (sbi->nbperpage - 1))) && in dbExtend() 1041 (rel_block + nblocks + addnblocks > sbi->nbperpage)) in dbExtend() 1055 bmp = sbi->bmap; in dbExtend() 3386 struct jfs_sb_info *sbi = JFS_SBI(ipbmap->i_sb); in dbExtendFS() local 3387 int nbperpage = sbi->nbperpage; in dbExtendFS() 3395 struct bmap *bmp = sbi->bmap; in dbExtendFS() 3478 p = BLKTOL1(blkno, sbi->l2nbperpage); /* L1 page */ in dbExtendFS() 3495 p = BLKTOL0(blkno, sbi->l2nbperpage); in dbExtendFS() [all …]
|
D | jfs_txnmgr.c | 2752 struct jfs_sb_info *sbi; in jfs_lazycommit() local 2762 sbi = JFS_SBI(tblk->sb); in jfs_lazycommit() 2769 if (sbi->commit_state & IN_LAZYCOMMIT) in jfs_lazycommit() 2772 sbi->commit_state |= IN_LAZYCOMMIT; in jfs_lazycommit() 2784 sbi->commit_state &= ~IN_LAZYCOMMIT; in jfs_lazycommit()
|
/linux-4.1.27/fs/exofs/ |
D | super.c | 266 static int __sbi_read_stats(struct exofs_sb_info *sbi) in __sbi_read_stats() argument 274 ret = ore_get_io_state(&sbi->layout, &sbi->oc, &ios); in __sbi_read_stats() 305 sbi->s_nextid = le64_to_cpu(ess->s_nextid); in __sbi_read_stats() 306 sbi->s_numfiles = le32_to_cpu(ess->s_numfiles); in __sbi_read_stats() 321 int exofs_sbi_write_stats(struct exofs_sb_info *sbi) in exofs_sbi_write_stats() argument 329 ret = ore_get_io_state(&sbi->layout, &sbi->oc, &ios); in exofs_sbi_write_stats() 335 sbi->s_ess.s_nextid = cpu_to_le64(sbi->s_nextid); in exofs_sbi_write_stats() 336 sbi->s_ess.s_numfiles = cpu_to_le64(sbi->s_numfiles); in exofs_sbi_write_stats() 337 attrs[0].val_ptr = &sbi->s_ess; in exofs_sbi_write_stats() 341 ios->private = sbi; in exofs_sbi_write_stats() [all …]
|
D | exofs.h | 181 int exofs_sbi_write_stats(struct exofs_sb_info *sbi); 186 int exofs_sysfs_sb_add(struct exofs_sb_info *sbi, 188 void exofs_sysfs_sb_del(struct exofs_sb_info *sbi); 190 struct exofs_sb_info *sbi); 226 struct exofs_sb_info *sbi, osd_id oid) in exofs_init_comps() argument 230 one_comp->obj.partition = sbi->one_comp.obj.partition; in exofs_init_comps() 235 oc->numdevs = sbi->layout.group_width * sbi->layout.mirrors_p1 * in exofs_init_comps() 236 sbi->layout.group_count; in exofs_init_comps() 241 first_dev = (dev_mod * sbi->layout.mirrors_p1) % sbi->oc.numdevs; in exofs_init_comps() 242 oc->ods = &sbi->oc.ods[first_dev]; in exofs_init_comps()
|
D | inode.c | 50 struct exofs_sb_info *sbi; member 69 struct exofs_sb_info *sbi = inode->i_sb->s_fs_info; in _pcol_init() local 71 pcol->sbi = sbi; in _pcol_init() 102 exofs_max_io_pages(&pcol->sbi->layout, ~0); in _pcol_reset() 110 pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages); in pcol_try_alloc() 248 atomic_dec(&pcol->sbi->s_curr_pending); in readpages_done() 319 int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true, in read_exec() 359 atomic_inc(&pcol->sbi->s_curr_pending); in read_exec() 528 atomic_dec(&pcol->sbi->s_curr_pending); in writepages_done() 638 ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false, in write_exec() [all …]
|
D | sys.c | 134 void exofs_sysfs_sb_del(struct exofs_sb_info *sbi) in exofs_sysfs_sb_del() argument 137 struct kobject *s_kobj = &sbi->s_kobj; in exofs_sysfs_sb_del() 152 int exofs_sysfs_sb_add(struct exofs_sb_info *sbi, in exofs_sysfs_sb_add() argument 157 uint64_t pid = sbi->one_comp.obj.partition; in exofs_sysfs_sb_add() 160 s_kobj = &sbi->s_kobj; in exofs_sysfs_sb_add() 172 int exofs_sysfs_odev_add(struct exofs_dev *edev, struct exofs_sb_info *sbi) in exofs_sysfs_odev_add() argument 183 &sbi->s_kobj, "dev%u", edev->did); in exofs_sysfs_odev_add()
|
D | dir.c | 439 struct exofs_sb_info *sbi = inode->i_sb->s_fs_info; in exofs_add_link() local 514 sbi->s_numfiles++; in exofs_add_link() 529 struct exofs_sb_info *sbi = inode->i_sb->s_fs_info; in exofs_delete_entry() local 565 sbi->s_numfiles--; in exofs_delete_entry()
|
/linux-4.1.27/fs/fat/ |
D | inode.c | 115 struct msdos_sb_info *sbi = MSDOS_SB(sb); in __fat_get_block() local 137 offset = (unsigned long)iblock & (sbi->sec_per_clus - 1); in __fat_get_block() 145 mapped_blocks = sbi->sec_per_clus - offset; in __fat_get_block() 336 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_hash_init() local 339 spin_lock_init(&sbi->inode_hash_lock); in fat_hash_init() 341 INIT_HLIST_HEAD(&sbi->inode_hashtable[i]); in fat_hash_init() 351 struct msdos_sb_info *sbi = MSDOS_SB(sb); in dir_hash_init() local 354 spin_lock_init(&sbi->dir_hash_lock); in dir_hash_init() 356 INIT_HLIST_HEAD(&sbi->dir_hashtable[i]); in dir_hash_init() 361 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); in fat_attach() local [all …]
|
D | fatent.c | 24 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat12_ent_blocknr() local 26 WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); in fat12_ent_blocknr() 28 *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); in fat12_ent_blocknr() 34 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_ent_blocknr() local 35 int bytes = (entry << sbi->fatent_shift); in fat_ent_blocknr() 36 WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); in fat_ent_blocknr() 38 *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); in fat_ent_blocknr() 276 static inline void lock_fat(struct msdos_sb_info *sbi) in lock_fat() argument 278 mutex_lock(&sbi->fat_lock); in lock_fat() 281 static inline void unlock_fat(struct msdos_sb_info *sbi) in unlock_fat() argument [all …]
|
D | misc.c | 62 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_clusters_flush() local 66 if (sbi->fat_bits != 32) in fat_clusters_flush() 69 bh = sb_bread(sb, sbi->fsinfo_sector); in fat_clusters_flush() 82 sbi->fsinfo_sector); in fat_clusters_flush() 84 if (sbi->free_clusters != -1) in fat_clusters_flush() 85 fsinfo->free_clusters = cpu_to_le32(sbi->free_clusters); in fat_clusters_flush() 86 if (sbi->prev_free != -1) in fat_clusters_flush() 87 fsinfo->next_cluster = cpu_to_le32(sbi->prev_free); in fat_clusters_flush() 102 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_chain_add() local 152 if (new_fclus != (inode->i_blocks >> (sbi->cluster_bits - 9))) { in fat_chain_add() [all …]
|
D | file.c | 32 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); in fat_ioctl_set_attributes() local 63 ia.ia_mode = fat_make_mode(sbi, attr, S_IRWXUGO); in fat_ioctl_set_attributes() 65 ia.ia_mode = fat_make_mode(sbi, attr, in fat_ioctl_set_attributes() 75 if (sbi->options.sys_immutable && in fat_ioctl_set_attributes() 97 if (sbi->options.sys_immutable) { in fat_ioctl_set_attributes() 115 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); in fat_ioctl_get_volume_id() local 116 return put_user(sbi->vol_id, user_attr); in fat_ioctl_get_volume_id() 289 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); in fat_truncate_blocks() local 290 const unsigned int cluster_size = sbi->cluster_size; in fat_truncate_blocks() 300 nr_clusters = (offset + (cluster_size - 1)) >> sbi->cluster_bits; in fat_truncate_blocks() [all …]
|
D | fat.h | 158 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); in fat_mode_can_hold_ro() local 162 if (!sbi->options.rodir) in fat_mode_can_hold_ro() 164 mask = ~sbi->options.fs_dmask; in fat_mode_can_hold_ro() 166 mask = ~sbi->options.fs_fmask; in fat_mode_can_hold_ro() 174 static inline umode_t fat_make_mode(struct msdos_sb_info *sbi, in fat_make_mode() argument 177 if (attrs & ATTR_RO && !((attrs & ATTR_DIR) && !sbi->options.rodir)) in fat_make_mode() 181 return (mode & ~sbi->options.fs_dmask) | S_IFDIR; in fat_make_mode() 183 return (mode & ~sbi->options.fs_fmask) | S_IFREG; in fat_make_mode() 216 static inline sector_t fat_clus_to_blknr(struct msdos_sb_info *sbi, int clus) in fat_clus_to_blknr() argument 218 return ((sector_t)clus - FAT_START_ENT) * sbi->sec_per_clus in fat_clus_to_blknr() [all …]
|
D | nfs.c | 34 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_dget() local 39 head = sbi->dir_hashtable + fat_dir_hash(i_logstart); in fat_dget() 40 spin_lock(&sbi->dir_hash_lock); in fat_dget() 49 spin_unlock(&sbi->dir_hash_lock); in fat_dget() 113 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); in fat_encode_fh_nostale() local 130 i_pos = fat_i_pos_read(sbi, inode); in fat_encode_fh_nostale() 136 i_pos = fat_i_pos_read(sbi, parent); in fat_encode_fh_nostale() 230 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_rebuild_parent() local 231 sector_t blknr = fat_clus_to_blknr(sbi, parent_logstart); in fat_rebuild_parent() 240 clus_to_match = fat_get_start(sbi, &de[0]); in fat_rebuild_parent() [all …]
|
D | dir.c | 51 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_dir_readahead() local 56 if ((iblock & (sbi->sec_per_clus - 1)) || sbi->sec_per_clus == 1) in fat_dir_readahead() 59 if ((sbi->fat_bits != 32) && (dir->i_ino == MSDOS_ROOT_INO)) in fat_dir_readahead() 64 for (sec = 0; sec < sbi->sec_per_clus; sec++) in fat_dir_readahead() 184 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_uni_to_x8() local 185 if (sbi->options.utf8) in fat_uni_to_x8() 189 return uni16_to_x8(sb, buf, uni, size, sbi->nls_io); in fat_uni_to_x8() 254 static inline int fat_name_match(struct msdos_sb_info *sbi, in fat_name_match() argument 261 if (sbi->options.name_check != 's') in fat_name_match() 262 return !nls_strnicmp(sbi->nls_io, a, b, a_len); in fat_name_match() [all …]
|
D | cache.c | 308 struct msdos_sb_info *sbi = MSDOS_SB(sb); in fat_bmap() local 316 if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) { in fat_bmap() 317 if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) { in fat_bmap() 318 *phys = sector + sbi->dir_start; in fat_bmap() 339 cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); in fat_bmap() 340 offset = sector & (sbi->sec_per_clus - 1); in fat_bmap() 345 *phys = fat_clus_to_blknr(sbi, cluster) + offset; in fat_bmap() 346 *mapped_blocks = sbi->sec_per_clus - offset; in fat_bmap()
|
D | namei_msdos.c | 120 struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb); in msdos_find() local 124 err = msdos_format_name(name, len, msdos_name, &sbi->options); in msdos_find() 129 if (!err && sbi->options.dotsOK) { in msdos_find() 229 struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb); in msdos_add_entry() local 239 fat_time_unix2fat(sbi, ts, &time, &date, NULL); in msdos_add_entry()
|
D | namei_vfat.c | 583 struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb); in vfat_build_slots() local 584 struct fat_mount_options *opts = &sbi->options; in vfat_build_slots() 602 opts->unicode_xlate, opts->utf8, sbi->nls_io); in vfat_build_slots() 610 err = vfat_create_shortname(dir, sbi->nls_disk, uname, ulen, in vfat_build_slots() 644 fat_time_unix2fat(sbi, ts, &time, &date, &time_cs); in vfat_build_slots()
|
/linux-4.1.27/fs/ext3/ |
D | super.c | 382 static void ext3_blkdev_remove(struct ext3_sb_info *sbi) in ext3_blkdev_remove() argument 385 bdev = sbi->journal_bdev; in ext3_blkdev_remove() 388 sbi->journal_bdev = NULL; in ext3_blkdev_remove() 397 static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi) in dump_orphan_list() argument 402 le32_to_cpu(sbi->s_es->s_last_orphan)); in dump_orphan_list() 405 list_for_each(l, &sbi->s_orphan) { in dump_orphan_list() 417 struct ext3_sb_info *sbi = EXT3_SB(sb); in ext3_put_super() local 418 struct ext3_super_block *es = sbi->s_es; in ext3_put_super() 423 err = journal_destroy(sbi->s_journal); in ext3_put_super() 424 sbi->s_journal = NULL; in ext3_put_super() [all …]
|
D | ialloc.c | 90 struct ext3_sb_info *sbi; in ext3_free_inode() local 107 sbi = EXT3_SB(sb); in ext3_free_inode() 133 if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group), in ext3_free_inode() 145 spin_lock(sb_bgl_lock(sbi, block_group)); in ext3_free_inode() 149 spin_unlock(sb_bgl_lock(sbi, block_group)); in ext3_free_inode() 150 percpu_counter_inc(&sbi->s_freeinodes_counter); in ext3_free_inode() 152 percpu_counter_dec(&sbi->s_dirs_counter); in ext3_free_inode() 196 struct ext3_sb_info *sbi = EXT3_SB(sb); in find_group_orlov() local 197 int ngroups = sbi->s_groups_count; in find_group_orlov() 207 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter); in find_group_orlov() [all …]
|
D | balloc.c | 65 struct ext3_sb_info *sbi = EXT3_SB(sb); in ext3_get_group_desc() local 67 if (block_group >= sbi->s_groups_count) { in ext3_get_group_desc() 71 block_group, sbi->s_groups_count); in ext3_get_group_desc() 79 if (!sbi->s_group_desc[group_desc]) { in ext3_get_group_desc() 87 desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data; in ext3_get_group_desc() 89 *bh = sbi->s_group_desc[group_desc]; in ext3_get_group_desc() 502 struct ext3_sb_info *sbi; in ext3_free_blocks_sb() local 507 sbi = EXT3_SB(sb); in ext3_free_blocks_sb() 508 es = sbi->s_es; in ext3_free_blocks_sb() 545 sbi->s_itb_per_group) || in ext3_free_blocks_sb() [all …]
|
D | resize.c | 23 struct ext3_sb_info *sbi = EXT3_SB(sb); in verify_group_input() local 24 struct ext3_super_block *es = sbi->s_es; in verify_group_input() 28 ext3_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; in verify_group_input() 38 input->blocks_count - 2 - overhead - sbi->s_itb_per_group; in verify_group_input() 47 if (group != sbi->s_groups_count) in verify_group_input() 50 input->group, sbi->s_groups_count); in verify_group_input() 191 struct ext3_sb_info *sbi = EXT3_SB(sb); in setup_new_group_blocks() local 194 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0; in setup_new_group_blocks() 209 mutex_lock(&sbi->s_resize_lock); in setup_new_group_blocks() 210 if (input->group != sbi->s_groups_count) { in setup_new_group_blocks() [all …]
|
D | namei.c | 2038 struct ext3_sb_info *sbi; in ext3_orphan_del() local 2049 sbi = EXT3_SB(inode->i_sb); in ext3_orphan_del() 2066 if (prev == &sbi->s_orphan) { in ext3_orphan_del() 2068 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); in ext3_orphan_del() 2069 err = ext3_journal_get_write_access(handle, sbi->s_sbh); in ext3_orphan_del() 2072 sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); in ext3_orphan_del() 2073 err = ext3_journal_dirty_metadata(handle, sbi->s_sbh); in ext3_orphan_del()
|
D | ext3.h | 684 sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group) in sb_bgl_lock() argument 686 return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group); in sb_bgl_lock()
|
/linux-4.1.27/fs/ext4/ |
D | super.c | 130 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_superblock_csum() local 134 csum = ext4_chksum(sbi, ~0, (char *)es, offset); in ext4_superblock_csum() 346 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_journal_commit_callback() local 351 spin_lock(&sbi->s_md_lock); in ext4_journal_commit_callback() 356 spin_unlock(&sbi->s_md_lock); in ext4_journal_commit_callback() 358 spin_lock(&sbi->s_md_lock); in ext4_journal_commit_callback() 360 spin_unlock(&sbi->s_md_lock); in ext4_journal_commit_callback() 740 static void ext4_blkdev_remove(struct ext4_sb_info *sbi) in ext4_blkdev_remove() argument 743 bdev = sbi->journal_bdev; in ext4_blkdev_remove() 746 sbi->journal_bdev = NULL; in ext4_blkdev_remove() [all …]
|
D | bitmap.c | 24 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_inode_bitmap_csum_verify() local 30 calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); in ext4_inode_bitmap_csum_verify() 31 if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) { in ext4_inode_bitmap_csum_verify() 45 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_inode_bitmap_csum_set() local 50 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); in ext4_inode_bitmap_csum_set() 52 if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) in ext4_inode_bitmap_csum_set() 62 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_block_bitmap_csum_verify() local 69 calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); in ext4_block_bitmap_csum_verify() 70 if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END) { in ext4_block_bitmap_csum_verify() 88 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_block_bitmap_csum_set() local [all …]
|
D | block_validity.c | 56 static int add_system_zone(struct ext4_sb_info *sbi, in add_system_zone() argument 61 struct rb_node **n = &sbi->system_blks.rb_node, *node; in add_system_zone() 93 rb_insert_color(new_node, &sbi->system_blks); in add_system_zone() 103 rb_erase(node, &sbi->system_blks); in add_system_zone() 114 rb_erase(node, &sbi->system_blks); in add_system_zone() 121 static void debug_print_tree(struct ext4_sb_info *sbi) in debug_print_tree() argument 128 node = rb_first(&sbi->system_blks); in debug_print_tree() 142 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_setup_system_zone() local 145 int flex_size = ext4_flex_bg_size(sbi); in ext4_setup_system_zone() 159 add_system_zone(sbi, ext4_group_first_block_no(sb, i), in ext4_setup_system_zone() [all …]
|
D | balloc.c | 93 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_num_overhead_clusters() local 113 block_cluster = EXT4_B2C(sbi, in ext4_num_overhead_clusters() 124 inode_cluster = EXT4_B2C(sbi, in ext4_num_overhead_clusters() 135 for (i = 0; i < sbi->s_itb_per_group; i++) { in ext4_num_overhead_clusters() 137 c = EXT4_B2C(sbi, itbl_blk + i - start); in ext4_num_overhead_clusters() 184 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_init_block_bitmap() local 196 percpu_counter_sub(&sbi->s_freeclusters_counter, in ext4_init_block_bitmap() 202 percpu_counter_sub(&sbi->s_freeinodes_counter, in ext4_init_block_bitmap() 222 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); in ext4_init_block_bitmap() 226 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); in ext4_init_block_bitmap() [all …]
|
D | extents_status.c | 149 static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, 301 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_es_list_add() local 306 spin_lock(&sbi->s_es_lock); in ext4_es_list_add() 308 list_add_tail(&ei->i_es_list, &sbi->s_es_list); in ext4_es_list_add() 309 sbi->s_es_nr_inode++; in ext4_es_list_add() 311 spin_unlock(&sbi->s_es_lock); in ext4_es_list_add() 317 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_es_list_del() local 319 spin_lock(&sbi->s_es_lock); in ext4_es_list_del() 322 sbi->s_es_nr_inode--; in ext4_es_list_del() 323 WARN_ON_ONCE(sbi->s_es_nr_inode < 0); in ext4_es_list_del() [all …]
|
D | mballoc.c | 667 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_mb_mark_free_simple() local 692 buddy + sbi->s_mb_offsets[min]); in ext4_mb_mark_free_simple() 725 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_mb_generate_buddy() local 764 percpu_counter_sub(&sbi->s_freeclusters_counter, in ext4_mb_generate_buddy() 1111 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_mb_load_buddy() local 1112 struct inode *inode = sbi->s_buddy_cache; in ext4_mb_load_buddy() 1442 struct ext4_sb_info *sbi = EXT4_SB(sb); in mb_free_blocks() local 1454 percpu_counter_sub(&sbi->s_freeclusters_counter, in mb_free_blocks() 1626 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_use_best_found() local 1657 spin_lock(&sbi->s_md_lock); in ext4_mb_use_best_found() [all …]
|
D | ialloc.c | 73 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_init_inode_bitmap() local 82 percpu_counter_sub(&sbi->s_freeclusters_counter, in ext4_init_inode_bitmap() 88 percpu_counter_sub(&sbi->s_freeinodes_counter, in ext4_init_inode_bitmap() 128 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_read_inode_bitmap() local 201 percpu_counter_sub(&sbi->s_freeinodes_counter, in ext4_read_inode_bitmap() 239 struct ext4_sb_info *sbi; in ext4_free_inode() local 259 sbi = EXT4_SB(sb); in ext4_free_inode() 315 percpu_counter_dec(&sbi->s_dirs_counter); in ext4_free_inode() 322 percpu_counter_inc(&sbi->s_freeinodes_counter); in ext4_free_inode() 323 if (sbi->s_log_groups_per_flex) { in ext4_free_inode() [all …]
|
D | resize.c | 88 struct ext4_sb_info *sbi = EXT4_SB(sb); in verify_group_input() local 89 struct ext4_super_block *es = sbi->s_es; in verify_group_input() 93 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; in verify_group_input() 100 if (group != sbi->s_groups_count) { in verify_group_input() 102 input->group, sbi->s_groups_count); in verify_group_input() 109 input->blocks_count - 2 - overhead - sbi->s_itb_per_group; in verify_group_input() 479 struct ext4_sb_info *sbi = EXT4_SB(sb); in setup_new_flex_group_blocks() local 480 struct ext4_super_block *es = sbi->s_es; in setup_new_flex_group_blocks() 490 group_data[0].group != sbi->s_groups_count); in setup_new_flex_group_blocks() 541 memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data, in setup_new_flex_group_blocks() [all …]
|
D | file.c | 125 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_file_write_iter() local 127 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) { in ext4_file_write_iter() 131 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); in ext4_file_write_iter() 255 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_file_open() local 261 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && in ext4_file_open() 263 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; in ext4_file_open() 281 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); in ext4_file_open() 282 err = ext4_journal_get_write_access(handle, sbi->s_sbh); in ext4_file_open() 287 strlcpy(sbi->s_es->s_last_mounted, cp, in ext4_file_open() 288 sizeof(sbi->s_es->s_last_mounted)); in ext4_file_open()
|
D | extents.c | 63 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_extent_block_csum() local 66 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, in ext4_extent_block_csum() 1874 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, in ext4_ext_check_overlap() argument 1888 b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); in ext4_ext_check_overlap() 1898 b2 = EXT4_LBLK_CMASK(sbi, b2); in ext4_ext_check_overlap() 2479 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_remove_blocks() local 2501 *partial_cluster != (long long) EXT4_B2C(sbi, pblk)) { in ext4_remove_blocks() 2503 EXT4_C2B(sbi, *partial_cluster), in ext4_remove_blocks() 2504 sbi->s_cluster_ratio, flags); in ext4_remove_blocks() 2510 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_remove_blocks() local [all …]
|
D | ext4_jbd2.h | 186 struct ext4_sb_info *sbi = in ext4_journal_callback_add() local 191 spin_lock(&sbi->s_md_lock); in ext4_journal_callback_add() 193 spin_unlock(&sbi->s_md_lock); in ext4_journal_callback_add() 206 struct ext4_sb_info *sbi = in ext4_journal_callback_try_del() local 209 spin_lock(&sbi->s_md_lock); in ext4_journal_callback_try_del() 212 spin_unlock(&sbi->s_md_lock); in ext4_journal_callback_try_del()
|
D | ioctl.c | 103 struct ext4_sb_info *sbi = EXT4_SB(sb); in swap_inode_boot_loader() local 163 spin_lock(&sbi->s_next_gen_lock); in swap_inode_boot_loader() 164 inode->i_generation = sbi->s_next_generation++; in swap_inode_boot_loader() 165 inode_bl->i_generation = sbi->s_next_generation++; in swap_inode_boot_loader() 166 spin_unlock(&sbi->s_next_gen_lock); in swap_inode_boot_loader() 649 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_ioctl() local 654 if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) { in ext4_ioctl() 663 err = ext4_journal_get_write_access(handle, sbi->s_sbh); in ext4_ioctl() 666 generate_random_uuid(sbi->s_es->s_encrypt_pw_salt); in ext4_ioctl() 668 sbi->s_sbh); in ext4_ioctl() [all …]
|
D | ext4.h | 256 #define EXT4_B2C(sbi, blk) ((blk) >> (sbi)->s_cluster_bits) argument 258 #define EXT4_C2B(sbi, cluster) ((cluster) << (sbi)->s_cluster_bits) argument 260 #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ argument 261 (sbi)->s_cluster_bits) 1235 #define DUMMY_ENCRYPTION_ENABLED(sbi) (unlikely((sbi)->s_mount_flags & \ argument 1238 #define DUMMY_ENCRYPTION_ENABLED(sbi) (0) argument 1830 static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc, in ext4_chksum() argument 1839 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver)!=sizeof(desc.ctx)); in ext4_chksum() 1841 desc.shash.tfm = sbi->s_chksum_driver; in ext4_chksum() 2057 extern int ext4_claim_free_clusters(struct ext4_sb_info *sbi, [all …]
|
D | inode.c | 52 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_inode_csum() local 65 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, in ext4_inode_csum() 328 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_da_update_reserve_space() local 344 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used); in ext4_da_update_reserve_space() 350 dquot_claim_block(inode, EXT4_C2B(sbi, used)); in ext4_da_update_reserve_space() 357 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); in ext4_da_update_reserve_space() 1289 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_da_reserve_space() local 1299 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); in ext4_da_reserve_space() 1316 if (ext4_claim_free_clusters(sbi, 1, 0)) { in ext4_da_reserve_space() 1318 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); in ext4_da_reserve_space() [all …]
|
D | extents_status.h | 172 extern int ext4_es_register_shrinker(struct ext4_sb_info *sbi); 173 extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
|
D | namei.c | 322 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_dirent_csum() local 326 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); in ext4_dirent_csum() 413 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_dx_csum() local 422 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size); in ext4_dx_csum() 423 csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail)); in ext4_dx_csum() 2838 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_orphan_add() local 2843 if (!sbi->s_journal || is_bad_inode(inode)) in ext4_orphan_add() 2864 BUFFER_TRACE(sbi->s_sbh, "get_write_access"); in ext4_orphan_add() 2865 err = ext4_journal_get_write_access(handle, sbi->s_sbh); in ext4_orphan_add() 2873 mutex_lock(&sbi->s_orphan_lock); in ext4_orphan_add() [all …]
|
D | crypto_key.c | 101 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_generate_encryption_key() local 124 if (DUMMY_ENCRYPTION_ENABLED(sbi)) { in ext4_generate_encryption_key()
|
D | mmp.c | 12 struct ext4_sb_info *sbi = EXT4_SB(sb); in ext4_mmp_csum() local 16 csum = ext4_chksum(sbi, sbi->s_csum_seed, (char *)mmp, offset); in ext4_mmp_csum()
|
D | page-io.c | 215 struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb); in ext4_add_complete_io() local 221 WARN_ON(!io_end->handle && sbi->s_journal); in ext4_add_complete_io() 223 wq = sbi->rsv_conversion_wq; in ext4_add_complete_io()
|
D | xattr.c | 124 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); in ext4_xattr_block_csum() local 131 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr, in ext4_xattr_block_csum() 133 csum = ext4_chksum(sbi, csum, (__u8 *)hdr, in ext4_xattr_block_csum()
|
/linux-4.1.27/fs/qnx6/ |
D | inode.c | 49 struct qnx6_sb_info *sbi = QNX6_SB(sb); in qnx6_show_options() local 51 if (sbi->s_mount_opt & QNX6_MOUNT_MMI_FS) in qnx6_show_options() 65 struct qnx6_sb_info *sbi = QNX6_SB(sb); in qnx6_get_devblock() local 66 return fs32_to_cpu(sbi, block) + sbi->s_blks_off; in qnx6_get_devblock() 114 struct qnx6_sb_info *sbi = QNX6_SB(s); in qnx6_block_map() local 120 int ptrbits = sbi->s_ptrbits; in qnx6_block_map() 158 struct qnx6_sb_info *sbi = QNX6_SB(sb); in qnx6_statfs() local 163 buf->f_blocks = fs32_to_cpu(sbi, sbi->sb->sb_num_blocks); in qnx6_statfs() 164 buf->f_bfree = fs32_to_cpu(sbi, sbi->sb->sb_free_blocks); in qnx6_statfs() 165 buf->f_files = fs32_to_cpu(sbi, sbi->sb->sb_num_inodes); in qnx6_statfs() [all …]
|
D | super_mmi.c | 40 struct qnx6_sb_info *sbi; in qnx6_mmi_fill_super() local 51 sbi = QNX6_SB(s); in qnx6_mmi_fill_super() 52 if (fs32_to_cpu(sbi, sb1->sb_magic) != QNX6_SUPER_MAGIC) { in qnx6_mmi_fill_super() 60 if (fs32_to_cpu(sbi, sb1->sb_checksum) != in qnx6_mmi_fill_super() 67 offset = fs32_to_cpu(sbi, sb1->sb_num_blocks) + QNX6_SUPERBLOCK_AREA / in qnx6_mmi_fill_super() 68 fs32_to_cpu(sbi, sb1->sb_blocksize); in qnx6_mmi_fill_super() 71 if (!sb_set_blocksize(s, fs32_to_cpu(sbi, sb1->sb_blocksize))) { in qnx6_mmi_fill_super() 89 if (fs32_to_cpu(sbi, sb2->sb_magic) != QNX6_SUPER_MAGIC) { in qnx6_mmi_fill_super() 96 if (fs32_to_cpu(sbi, sb2->sb_checksum) in qnx6_mmi_fill_super() 108 if (fs64_to_cpu(sbi, sb1->sb_serial) > in qnx6_mmi_fill_super() [all …]
|
D | qnx6.h | 77 static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n) in fs64_to_cpu() argument 79 if (sbi->s_bytesex == BYTESEX_LE) in fs64_to_cpu() 85 static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n) in cpu_to_fs64() argument 87 if (sbi->s_bytesex == BYTESEX_LE) in cpu_to_fs64() 93 static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n) in fs32_to_cpu() argument 95 if (sbi->s_bytesex == BYTESEX_LE) in fs32_to_cpu() 101 static inline __fs32 cpu_to_fs32(struct qnx6_sb_info *sbi, __u32 n) in cpu_to_fs32() argument 103 if (sbi->s_bytesex == BYTESEX_LE) in cpu_to_fs32() 109 static inline __u16 fs16_to_cpu(struct qnx6_sb_info *sbi, __fs16 n) in fs16_to_cpu() argument 111 if (sbi->s_bytesex == BYTESEX_LE) in fs16_to_cpu() [all …]
|
D | dir.c | 53 struct qnx6_sb_info *sbi = QNX6_SB(sb); in qnx6_longname() local 54 u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */ in qnx6_longname() 58 struct address_space *mapping = sbi->longfile->i_mapping; in qnx6_longname() 73 struct qnx6_sb_info *sbi = QNX6_SB(s); in qnx6_dir_longfilename() local 89 lf_size = fs16_to_cpu(sbi, lf->lf_size); in qnx6_dir_longfilename() 100 if (!test_opt(s, MMI_FS) && fs32_to_cpu(sbi, de->de_checksum) != in qnx6_dir_longfilename() 120 struct qnx6_sb_info *sbi = QNX6_SB(s); in qnx6_readdir() local 145 u32 no_inode = fs32_to_cpu(sbi, de->de_inode); in qnx6_readdir() 183 struct qnx6_sb_info *sbi = QNX6_SB(s); in qnx6_long_match() local 191 thislen = fs16_to_cpu(sbi, lf->lf_size); in qnx6_long_match() [all …]
|
/linux-4.1.27/fs/hfs/ |
D | super.c | 55 struct hfs_sb_info *sbi; in flush_mdb() local 58 sbi = container_of(work, struct hfs_sb_info, mdb_work.work); in flush_mdb() 59 sb = sbi->sb; in flush_mdb() 61 spin_lock(&sbi->work_lock); in flush_mdb() 62 sbi->work_queued = 0; in flush_mdb() 63 spin_unlock(&sbi->work_lock); in flush_mdb() 70 struct hfs_sb_info *sbi = HFS_SB(sb); in hfs_mark_mdb_dirty() local 76 spin_lock(&sbi->work_lock); in hfs_mark_mdb_dirty() 77 if (!sbi->work_queued) { in hfs_mark_mdb_dirty() 79 queue_delayed_work(system_long_wq, &sbi->mdb_work, delay); in hfs_mark_mdb_dirty() [all …]
|
/linux-4.1.27/fs/udf/ |
D | super.c | 262 struct udf_sb_info *sbi = UDF_SB(sb); in module_exit() local 264 sbi->s_partmaps = kcalloc(count, sizeof(struct udf_part_map), in module_exit() 266 if (!sbi->s_partmaps) { in module_exit() 269 sbi->s_partitions = 0; in module_exit() 273 sbi->s_partitions = count; in module_exit() 325 struct udf_sb_info *sbi = UDF_SB(sb); in udf_sb_free_partitions() local 327 if (sbi->s_partmaps == NULL) in udf_sb_free_partitions() 329 for (i = 0; i < sbi->s_partitions; i++) in udf_sb_free_partitions() 330 udf_free_partition(&sbi->s_partmaps[i]); in udf_sb_free_partitions() 331 kfree(sbi->s_partmaps); in udf_sb_free_partitions() [all …]
|
D | partition.c | 32 struct udf_sb_info *sbi = UDF_SB(sb); in udf_get_pblock() local 34 if (partition >= sbi->s_partitions) { in udf_get_pblock() 39 map = &sbi->s_partmaps[partition]; in udf_get_pblock() 53 struct udf_sb_info *sbi = UDF_SB(sb); in udf_get_pblock_virt15() local 56 struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode); in udf_get_pblock_virt15() 58 map = &sbi->s_partmaps[partition]; in udf_get_pblock_virt15() 82 loc = udf_block_map(sbi->s_vat_inode, newblock); in udf_get_pblock_virt15() 117 struct udf_sb_info *sbi = UDF_SB(sb); in udf_get_pblock_spar15() local 122 map = &sbi->s_partmaps[partition]; in udf_get_pblock_spar15() 159 struct udf_sb_info *sbi = UDF_SB(sb); in udf_relocate_blocks() local [all …]
|
D | balloc.c | 94 struct udf_sb_info *sbi = UDF_SB(sb); in udf_add_free_space() local 97 if (!sbi->s_lvid_bh) in udf_add_free_space() 100 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; in udf_add_free_space() 111 struct udf_sb_info *sbi = UDF_SB(sb); in udf_bitmap_free_blocks() local 121 mutex_lock(&sbi->s_alloc_mutex); in udf_bitmap_free_blocks() 122 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum]; in udf_bitmap_free_blocks() 159 udf_add_free_space(sb, sbi->s_partition, count); in udf_bitmap_free_blocks() 168 mutex_unlock(&sbi->s_alloc_mutex); in udf_bitmap_free_blocks() 176 struct udf_sb_info *sbi = UDF_SB(sb); in udf_bitmap_prealloc_blocks() local 183 mutex_lock(&sbi->s_alloc_mutex); in udf_bitmap_prealloc_blocks() [all …]
|
D | ialloc.c | 32 struct udf_sb_info *sbi = UDF_SB(sb); in udf_free_inode() local 36 mutex_lock(&sbi->s_alloc_mutex); in udf_free_inode() 42 mutex_unlock(&sbi->s_alloc_mutex); in udf_free_inode() 51 struct udf_sb_info *sbi = UDF_SB(sb); in udf_new_inode() local 68 if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev) in udf_new_inode() 69 sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE; in udf_new_inode() 97 mutex_lock(&sbi->s_alloc_mutex); in udf_new_inode() 103 mutex_unlock(&sbi->s_alloc_mutex); in udf_new_inode()
|
D | misc.c | 83 struct udf_sb_info *sbi = UDF_SB(inode->i_sb); in udf_add_extendedattr() local 89 if (sbi->s_udfrev >= 0x0200) in udf_add_extendedattr() 94 cpu_to_le16(sbi->s_serial_number); in udf_add_extendedattr()
|
D | inode.c | 1285 struct udf_sb_info *sbi = UDF_SB(inode->i_sb); in udf_read_inode() local 1294 sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) { in udf_read_inode() 1409 read_lock(&sbi->s_cred_lock); in udf_read_inode() 1423 sbi->s_fmode != UDF_INVALID_MODE) in udf_read_inode() 1424 inode->i_mode = sbi->s_fmode; in udf_read_inode() 1426 sbi->s_dmode != UDF_INVALID_MODE) in udf_read_inode() 1427 inode->i_mode = sbi->s_dmode; in udf_read_inode() 1430 inode->i_mode &= ~sbi->s_umask; in udf_read_inode() 1431 read_unlock(&sbi->s_cred_lock); in udf_read_inode() 1451 inode->i_atime = sbi->s_record_time; in udf_read_inode() [all …]
|
D | truncate.c | 184 struct udf_sb_info *sbi = UDF_SB(sb); in udf_update_alloc_ext_desc() local 190 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || sbi->s_udfrev >= 0x0201) in udf_update_alloc_ext_desc()
|
/linux-4.1.27/fs/isofs/ |
D | inode.c | 49 struct isofs_sb_info *sbi = ISOFS_SB(sb); in isofs_put_super() local 52 unload_nls(sbi->s_nls_iocharset); in isofs_put_super() 55 kfree(sbi); in isofs_put_super() 580 struct isofs_sb_info *sbi; in isofs_fill_super() local 590 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); in isofs_fill_super() 591 if (!sbi) in isofs_fill_super() 593 s->s_fs_info = sbi; in isofs_fill_super() 609 sbi->s_high_sierra = 0; /* default is iso9660 */ in isofs_fill_super() 670 sbi->s_high_sierra = 1; in isofs_fill_super() 701 if(sbi->s_high_sierra){ in isofs_fill_super() [all …]
|
D | namei.c | 43 struct isofs_sb_info *sbi = ISOFS_SB(dir->i_sb); in isofs_find_entry() local 107 if (sbi->s_rock && in isofs_find_entry() 112 } else if (sbi->s_joliet_level) { in isofs_find_entry() 116 } else if (sbi->s_mapping == 'a') { in isofs_find_entry() 119 } else if (sbi->s_mapping == 'n') { in isofs_find_entry() 130 (!sbi->s_hide || in isofs_find_entry() 131 (!(de->flags[-sbi->s_high_sierra] & 1))) && in isofs_find_entry() 132 (sbi->s_showassoc || in isofs_find_entry() 133 (!(de->flags[-sbi->s_high_sierra] & 4)))) { in isofs_find_entry()
|
D | dir.c | 95 struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb); in do_isofs_readdir() local 165 if (de->flags[-sbi->s_high_sierra] & 0x80) { in do_isofs_readdir() 197 if ((sbi->s_hide && (de->flags[-sbi->s_high_sierra] & 1)) || in do_isofs_readdir() 198 (!sbi->s_showassoc && in do_isofs_readdir() 199 (de->flags[-sbi->s_high_sierra] & 4))) { in do_isofs_readdir() 205 if (sbi->s_rock) { in do_isofs_readdir() 214 if (sbi->s_joliet_level) { in do_isofs_readdir() 219 if (sbi->s_mapping == 'a') { in do_isofs_readdir() 223 if (sbi->s_mapping == 'n') { in do_isofs_readdir()
|
D | rock.c | 696 struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb); in rock_ridge_symlink_readpage() local 709 if (!sbi->s_rock) in rock_ridge_symlink_readpage()
|
/linux-4.1.27/sound/drivers/opl3/ |
D | opl3_oss.c | 207 struct sbi_instrument sbi; in snd_opl3_load_patch_seq_oss() local 222 if (count < (int)sizeof(sbi)) { in snd_opl3_load_patch_seq_oss() 226 if (copy_from_user(&sbi, buf, sizeof(sbi))) in snd_opl3_load_patch_seq_oss() 229 if (sbi.channel < 0 || sbi.channel >= SBFM_MAXINSTR) { in snd_opl3_load_patch_seq_oss() 231 sbi.channel); in snd_opl3_load_patch_seq_oss() 236 sprintf(name, "Chan%d", sbi.channel); in snd_opl3_load_patch_seq_oss() 238 err = snd_opl3_load_patch(opl3, sbi.channel, 127, type, name, NULL, in snd_opl3_load_patch_seq_oss() 239 sbi.operators); in snd_opl3_load_patch_seq_oss() 243 return sizeof(sbi); in snd_opl3_load_patch_seq_oss()
|
/linux-4.1.27/fs/efs/ |
D | super.c | 31 struct efs_sb_info *sbi = SUPER_INFO(s); in efs_kill_sb() local 33 kfree(sbi); in efs_kill_sb() 334 struct efs_sb_info *sbi = SUPER_INFO(sb); in efs_statfs() local 339 buf->f_blocks = sbi->total_groups * /* total data blocks */ in efs_statfs() 340 (sbi->group_size - sbi->inode_blocks); in efs_statfs() 341 buf->f_bfree = sbi->data_free; /* free data blocks */ in efs_statfs() 342 buf->f_bavail = sbi->data_free; /* free blocks for non-root */ in efs_statfs() 343 buf->f_files = sbi->total_groups * /* total inodes */ in efs_statfs() 344 sbi->inode_blocks * in efs_statfs() 346 buf->f_ffree = sbi->inode_free; /* free inodes */ in efs_statfs()
|
/linux-4.1.27/fs/reiserfs/ |
D | super.c | 85 struct reiserfs_sb_info *sbi; in flush_old_commits() local 88 sbi = container_of(work, struct reiserfs_sb_info, old_work.work); in flush_old_commits() 89 s = sbi->s_journal->j_work_sb; in flush_old_commits() 91 spin_lock(&sbi->old_work_lock); in flush_old_commits() 92 sbi->work_queued = 0; in flush_old_commits() 93 spin_unlock(&sbi->old_work_lock); in flush_old_commits() 100 struct reiserfs_sb_info *sbi = REISERFS_SB(s); in reiserfs_schedule_old_flush() local 110 spin_lock(&sbi->old_work_lock); in reiserfs_schedule_old_flush() 111 if (!sbi->work_queued) { in reiserfs_schedule_old_flush() 113 queue_delayed_work(system_long_wq, &sbi->old_work, delay); in reiserfs_schedule_old_flush() [all …]
|
/linux-4.1.27/arch/microblaze/kernel/ |
D | hw_exception_handler.S | 486 sbi r6, r0, TOPHYS(ex_reg_op); 496 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); 498 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); 500 sbi r5, r0, TOPHYS(ex_tmp_data_loc_2); 502 sbi r5, r0, TOPHYS(ex_tmp_data_loc_3); 510 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); 512 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); 545 sbi r4, r3, 0; 547 sbi r4, r3, 1; 549 sbi r4, r3, 2; [all …]
|
/linux-4.1.27/fs/cramfs/ |
D | inode.c | 243 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); in cramfs_kill_sb() local 246 kfree(sbi); in cramfs_kill_sb() 261 struct cramfs_sb_info *sbi; in cramfs_fill_super() local 266 sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL); in cramfs_fill_super() 267 if (!sbi) in cramfs_fill_super() 269 sb->s_fs_info = sbi; in cramfs_fill_super() 318 sbi->size = super.size; in cramfs_fill_super() 319 sbi->blocks = super.fsid.blocks; in cramfs_fill_super() 320 sbi->files = super.fsid.files; in cramfs_fill_super() 322 sbi->size = 1<<28; in cramfs_fill_super() [all …]
|
/linux-4.1.27/fs/ecryptfs/ |
D | main.c | 257 static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, in ecryptfs_parse_options() argument 270 &sbi->mount_crypt_stat; in ecryptfs_parse_options() 495 struct ecryptfs_sb_info *sbi; in ecryptfs_mount() local 504 sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL); in ecryptfs_mount() 505 if (!sbi) { in ecryptfs_mount() 510 rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid); in ecryptfs_mount() 515 mount_crypt_stat = &sbi->mount_crypt_stat; in ecryptfs_mount() 523 rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs"); in ecryptfs_mount() 527 ecryptfs_set_superblock_private(s, sbi); in ecryptfs_mount() 528 s->s_bdi = &sbi->bdi; in ecryptfs_mount() [all …]
|
/linux-4.1.27/fs/squashfs/ |
D | super.c | 385 struct squashfs_sb_info *sbi = sb->s_fs_info; in squashfs_put_super() local 386 squashfs_cache_delete(sbi->block_cache); in squashfs_put_super() 387 squashfs_cache_delete(sbi->fragment_cache); in squashfs_put_super() 388 squashfs_cache_delete(sbi->read_page); in squashfs_put_super() 389 squashfs_decompressor_destroy(sbi); in squashfs_put_super() 390 kfree(sbi->id_table); in squashfs_put_super() 391 kfree(sbi->fragment_index); in squashfs_put_super() 392 kfree(sbi->meta_index); in squashfs_put_super() 393 kfree(sbi->inode_lookup_table); in squashfs_put_super() 394 kfree(sbi->xattr_id_table); in squashfs_put_super()
|
/linux-4.1.27/arch/s390/hypfs/ |
D | inode.c | 277 struct hypfs_sb_info *sbi; in hypfs_fill_super() local 279 sbi = kzalloc(sizeof(struct hypfs_sb_info), GFP_KERNEL); in hypfs_fill_super() 280 if (!sbi) in hypfs_fill_super() 282 mutex_init(&sbi->lock); in hypfs_fill_super() 283 sbi->uid = current_uid(); in hypfs_fill_super() 284 sbi->gid = current_gid(); in hypfs_fill_super() 285 sb->s_fs_info = sbi; in hypfs_fill_super() 306 sbi->update_file = hypfs_create_update_file(root_dentry); in hypfs_fill_super() 307 if (IS_ERR(sbi->update_file)) in hypfs_fill_super() 308 return PTR_ERR(sbi->update_file); in hypfs_fill_super()
|
/linux-4.1.27/include/linux/ |
D | f2fs_fs.h | 35 #define F2FS_ROOT_INO(sbi) (sbi->root_ino_num) argument 36 #define F2FS_NODE_INO(sbi) (sbi->node_ino_num) argument 37 #define F2FS_META_INO(sbi) (sbi->meta_ino_num) argument
|
/linux-4.1.27/fs/adfs/ |
D | super.c | 223 struct adfs_sb_info *sbi = ADFS_SB(sb); in adfs_statfs() local 227 buf->f_namelen = sbi->s_namelen; in adfs_statfs() 229 buf->f_blocks = sbi->s_size; in adfs_statfs() 230 buf->f_files = sbi->s_ids_per_zone * sbi->s_map_size; in adfs_statfs()
|
/linux-4.1.27/fs/hugetlbfs/ |
D | inode.c | 618 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); in hugetlbfs_put_super() local 620 if (sbi) { in hugetlbfs_put_super() 623 if (sbi->spool) in hugetlbfs_put_super() 624 hugepage_put_subpool(sbi->spool); in hugetlbfs_put_super() 626 kfree(sbi); in hugetlbfs_put_super()
|
/linux-4.1.27/arch/microblaze/lib/ |
D | fastcopy.S | 63 sbi r11, r5, 0 /* *d = h */ 344 sbi r9, r5, 0 /* *d = t1 */ 387 sbi r11, r5, 0 /* *d = h */ 661 sbi r9, r5, 0 /* *d = t1 */
|
/linux-4.1.27/include/trace/events/ |
D | f2fs.h | 1158 TP_PROTO(struct f2fs_sb_info *sbi, unsigned int node_cnt, 1161 TP_ARGS(sbi, node_cnt, tree_cnt), 1170 __entry->dev = sbi->sb->s_dev;
|
/linux-4.1.27/fs/cifs/ |
D | connect.c | 3803 struct cifs_sb_info *sbi = container_of(p, struct cifs_sb_info, rcu); in delayed_free() local 3804 unload_nls(sbi->local_nls); in delayed_free() 3805 kfree(sbi); in delayed_free()
|