Lines Matching refs:sbi

29 	struct f2fs_sb_info *sbi = data;  in gc_thread_func()  local
30 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in gc_thread_func()
31 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; in gc_thread_func()
46 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { in gc_thread_func()
64 if (!mutex_trylock(&sbi->gc_mutex)) in gc_thread_func()
67 if (!is_idle(sbi)) { in gc_thread_func()
69 mutex_unlock(&sbi->gc_mutex); in gc_thread_func()
73 if (has_enough_invalid_blocks(sbi)) in gc_thread_func()
78 stat_inc_bggc_count(sbi); in gc_thread_func()
81 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC))) in gc_thread_func()
84 trace_f2fs_background_gc(sbi->sb, wait_ms, in gc_thread_func()
85 prefree_segments(sbi), free_segments(sbi)); in gc_thread_func()
88 f2fs_balance_fs_bg(sbi); in gc_thread_func()
94 int start_gc_thread(struct f2fs_sb_info *sbi) in start_gc_thread() argument
97 dev_t dev = sbi->sb->s_bdev->bd_dev; in start_gc_thread()
112 sbi->gc_thread = gc_th; in start_gc_thread()
113 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); in start_gc_thread()
114 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, in start_gc_thread()
119 sbi->gc_thread = NULL; in start_gc_thread()
125 void stop_gc_thread(struct f2fs_sb_info *sbi) in stop_gc_thread() argument
127 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in stop_gc_thread()
132 sbi->gc_thread = NULL; in stop_gc_thread()
148 static void select_policy(struct f2fs_sb_info *sbi, int gc_type, in select_policy() argument
151 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in select_policy()
159 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type); in select_policy()
162 p->ofs_unit = sbi->segs_per_sec; in select_policy()
165 if (p->max_search > sbi->max_victim_search) in select_policy()
166 p->max_search = sbi->max_victim_search; in select_policy()
168 p->offset = sbi->last_victim[p->gc_mode]; in select_policy()
171 static unsigned int get_max_cost(struct f2fs_sb_info *sbi, in get_max_cost() argument
176 return 1 << sbi->log_blocks_per_seg; in get_max_cost()
178 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit; in get_max_cost()
185 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) in check_bg_victims() argument
187 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in check_bg_victims()
195 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { in check_bg_victims()
196 if (sec_usage_check(sbi, secno)) in check_bg_victims()
199 return secno * sbi->segs_per_sec; in check_bg_victims()
204 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) in get_cb_cost() argument
206 struct sit_info *sit_i = SIT_I(sbi); in get_cb_cost()
207 unsigned int secno = GET_SECNO(sbi, segno); in get_cb_cost()
208 unsigned int start = secno * sbi->segs_per_sec; in get_cb_cost()
215 for (i = 0; i < sbi->segs_per_sec; i++) in get_cb_cost()
216 mtime += get_seg_entry(sbi, start + i)->mtime; in get_cb_cost()
217 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec); in get_cb_cost()
219 mtime = div_u64(mtime, sbi->segs_per_sec); in get_cb_cost()
220 vblocks = div_u64(vblocks, sbi->segs_per_sec); in get_cb_cost()
222 u = (vblocks * 100) >> sbi->log_blocks_per_seg; in get_cb_cost()
236 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, in get_gc_cost() argument
240 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; in get_gc_cost()
244 return get_valid_blocks(sbi, segno, sbi->segs_per_sec); in get_gc_cost()
246 return get_cb_cost(sbi, segno); in get_gc_cost()
257 static int get_victim_by_default(struct f2fs_sb_info *sbi, in get_victim_by_default() argument
260 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in get_victim_by_default()
263 unsigned int last_segment = MAIN_SEGS(sbi); in get_victim_by_default()
269 select_policy(sbi, gc_type, type, &p); in get_victim_by_default()
272 p.min_cost = max_cost = get_max_cost(sbi, &p); in get_victim_by_default()
278 p.min_segno = check_bg_victims(sbi); in get_victim_by_default()
289 if (sbi->last_victim[p.gc_mode]) { in get_victim_by_default()
290 last_segment = sbi->last_victim[p.gc_mode]; in get_victim_by_default()
291 sbi->last_victim[p.gc_mode] = 0; in get_victim_by_default()
302 secno = GET_SECNO(sbi, segno); in get_victim_by_default()
304 if (sec_usage_check(sbi, secno)) in get_victim_by_default()
309 cost = get_gc_cost(sbi, segno, &p); in get_victim_by_default()
319 sbi->last_victim[p.gc_mode] = segno; in get_victim_by_default()
326 secno = GET_SECNO(sbi, p.min_segno); in get_victim_by_default()
328 sbi->cur_victim_sec = secno; in get_victim_by_default()
334 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, in get_victim_by_default()
335 sbi->cur_victim_sec, in get_victim_by_default()
336 prefree_segments(sbi), free_segments(sbi)); in get_victim_by_default()
384 static int check_valid_map(struct f2fs_sb_info *sbi, in check_valid_map() argument
387 struct sit_info *sit_i = SIT_I(sbi); in check_valid_map()
392 sentry = get_seg_entry(sbi, segno); in check_valid_map()
403 static int gc_node_segment(struct f2fs_sb_info *sbi, in gc_node_segment() argument
411 start_addr = START_BLOCK(sbi, segno); in gc_node_segment()
416 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { in gc_node_segment()
422 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) in gc_node_segment()
425 if (check_valid_map(sbi, segno, off) == 0) in gc_node_segment()
429 ra_node_page(sbi, nid); in gc_node_segment()
432 node_page = get_node_page(sbi, nid); in gc_node_segment()
437 if (check_valid_map(sbi, segno, off) == 0) { in gc_node_segment()
442 get_node_info(sbi, nid, &ni); in gc_node_segment()
457 stat_inc_node_blk_count(sbi, 1, gc_type); in gc_node_segment()
471 sync_node_pages(sbi, 0, &wbc); in gc_node_segment()
474 if (get_valid_blocks(sbi, segno, 1) == 0) in gc_node_segment()
507 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, in is_alive() argument
518 node_page = get_node_page(sbi, nid); in is_alive()
522 get_node_info(sbi, nid, dni); in is_alive()
541 .sbi = F2FS_I_SB(inode), in move_encrypted_block()
573 get_node_info(fio.sbi, dn.nid, &ni); in move_encrypted_block()
580 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), in move_encrypted_block()
596 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) in move_encrypted_block()
602 dec_page_count(fio.sbi, F2FS_DIRTY_META); in move_encrypted_block()
608 allocate_data_block(fio.sbi, NULL, fio.blk_addr, in move_encrypted_block()
642 .sbi = F2FS_I_SB(inode), in move_data_page()
667 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, in gc_data_segment() argument
670 struct super_block *sb = sbi->sb; in gc_data_segment()
676 start_addr = START_BLOCK(sbi, segno); in gc_data_segment()
681 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { in gc_data_segment()
689 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0)) in gc_data_segment()
692 if (check_valid_map(sbi, segno, off) == 0) in gc_data_segment()
696 ra_node_page(sbi, le32_to_cpu(entry->nid)); in gc_data_segment()
701 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) in gc_data_segment()
705 ra_node_page(sbi, dni.ino); in gc_data_segment()
745 stat_inc_data_blk_count(sbi, 1, gc_type); in gc_data_segment()
753 f2fs_submit_merged_bio(sbi, DATA, WRITE); in gc_data_segment()
756 if (get_valid_blocks(sbi, segno, 1) == 0) in gc_data_segment()
762 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, in __get_victim() argument
765 struct sit_info *sit_i = SIT_I(sbi); in __get_victim()
769 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, in __get_victim()
775 static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno, in do_garbage_collect() argument
784 sum_page = get_sum_page(sbi, segno); in do_garbage_collect()
801 nfree = gc_node_segment(sbi, sum->entries, segno, gc_type); in do_garbage_collect()
804 nfree = gc_data_segment(sbi, sum->entries, gc_list, in do_garbage_collect()
810 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type); in do_garbage_collect()
811 stat_inc_call_count(sbi->stat_info); in do_garbage_collect()
817 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync) in f2fs_gc() argument
829 cpc.reason = __get_cp_reason(sbi); in f2fs_gc()
833 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) in f2fs_gc()
835 if (unlikely(f2fs_cp_error(sbi))) in f2fs_gc()
838 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) { in f2fs_gc()
840 if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi)) in f2fs_gc()
841 write_checkpoint(sbi, &cpc); in f2fs_gc()
844 if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type)) in f2fs_gc()
849 if (sbi->segs_per_sec > 1) in f2fs_gc()
850 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec, in f2fs_gc()
853 for (i = 0; i < sbi->segs_per_sec; i++) { in f2fs_gc()
858 if (!do_garbage_collect(sbi, segno + i, &gc_list, gc_type) && in f2fs_gc()
863 if (i == sbi->segs_per_sec && gc_type == FG_GC) in f2fs_gc()
867 sbi->cur_victim_sec = NULL_SEGNO; in f2fs_gc()
870 if (has_not_enough_free_secs(sbi, sec_freed)) in f2fs_gc()
874 write_checkpoint(sbi, &cpc); in f2fs_gc()
877 mutex_unlock(&sbi->gc_mutex); in f2fs_gc()
886 void build_gc_manager(struct f2fs_sb_info *sbi) in build_gc_manager() argument
888 DIRTY_I(sbi)->v_ops = &default_v_ops; in build_gc_manager()