Lines Matching refs:work
121 struct wb_writeback_work *work) in bdi_queue_work() argument
123 trace_writeback_queue(bdi, work); in bdi_queue_work()
127 if (work->done) in bdi_queue_work()
128 complete(work->done); in bdi_queue_work()
131 list_add_tail(&work->list, &bdi->work_list); in bdi_queue_work()
141 struct wb_writeback_work *work; in __bdi_start_writeback() local
147 work = kzalloc(sizeof(*work), GFP_ATOMIC); in __bdi_start_writeback()
148 if (!work) { in __bdi_start_writeback()
154 work->sync_mode = WB_SYNC_NONE; in __bdi_start_writeback()
155 work->nr_pages = nr_pages; in __bdi_start_writeback()
156 work->range_cyclic = range_cyclic; in __bdi_start_writeback()
157 work->reason = reason; in __bdi_start_writeback()
159 bdi_queue_work(bdi, work); in __bdi_start_writeback()
277 struct wb_writeback_work *work) in move_expired_inodes() argument
289 older_than_this = work->older_than_this; in move_expired_inodes()
290 else if (!work->for_sync) { in move_expired_inodes()
340 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) in queue_io() argument
346 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); in queue_io()
348 EXPIRE_DIRTY_ATIME, work); in queue_io()
349 trace_writeback_queue_io(wb, work, moved); in queue_io()
628 struct wb_writeback_work *work) in writeback_chunk_size() argument
645 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) in writeback_chunk_size()
650 pages = min(pages, work->nr_pages); in writeback_chunk_size()
665 struct wb_writeback_work *work) in writeback_sb_inodes() argument
668 .sync_mode = work->sync_mode, in writeback_sb_inodes()
669 .tagged_writepages = work->tagged_writepages, in writeback_sb_inodes()
670 .for_kupdate = work->for_kupdate, in writeback_sb_inodes()
671 .for_background = work->for_background, in writeback_sb_inodes()
672 .for_sync = work->for_sync, in writeback_sb_inodes()
673 .range_cyclic = work->range_cyclic, in writeback_sb_inodes()
685 if (work->sb) { in writeback_sb_inodes()
746 write_chunk = writeback_chunk_size(wb->bdi, work); in writeback_sb_inodes()
756 work->nr_pages -= write_chunk - wbc.nr_to_write; in writeback_sb_inodes()
773 if (work->nr_pages <= 0) in writeback_sb_inodes()
781 struct wb_writeback_work *work) in __writeback_inodes_wb() argument
799 wrote += writeback_sb_inodes(sb, wb, work); in __writeback_inodes_wb()
806 if (work->nr_pages <= 0) in __writeback_inodes_wb()
817 struct wb_writeback_work work = { in writeback_inodes_wb() local
826 queue_io(wb, &work); in writeback_inodes_wb()
827 __writeback_inodes_wb(wb, &work); in writeback_inodes_wb()
830 return nr_pages - work.nr_pages; in writeback_inodes_wb()
876 struct wb_writeback_work *work) in wb_writeback() argument
879 long nr_pages = work->nr_pages; in wb_writeback()
885 work->older_than_this = &oldest_jif; in wb_writeback()
892 if (work->nr_pages <= 0) in wb_writeback()
901 if ((work->for_background || work->for_kupdate) && in wb_writeback()
909 if (work->for_background && !over_bground_thresh(wb->bdi)) in wb_writeback()
918 if (work->for_kupdate) { in wb_writeback()
921 } else if (work->for_background) in wb_writeback()
924 trace_writeback_start(wb->bdi, work); in wb_writeback()
926 queue_io(wb, work); in wb_writeback()
927 if (work->sb) in wb_writeback()
928 progress = writeback_sb_inodes(work->sb, wb, work); in wb_writeback()
930 progress = __writeback_inodes_wb(wb, work); in wb_writeback()
931 trace_writeback_written(wb->bdi, work); in wb_writeback()
956 trace_writeback_wait(wb->bdi, work); in wb_writeback()
967 return nr_pages - work->nr_pages; in wb_writeback()
976 struct wb_writeback_work *work = NULL; in get_next_work_item() local
980 work = list_entry(bdi->work_list.next, in get_next_work_item()
982 list_del_init(&work->list); in get_next_work_item()
985 return work; in get_next_work_item()
1003 struct wb_writeback_work work = { in wb_check_background_flush() local
1011 return wb_writeback(wb, &work); in wb_check_background_flush()
1037 struct wb_writeback_work work = { in wb_check_old_data_flush() local
1045 return wb_writeback(wb, &work); in wb_check_old_data_flush()
1057 struct wb_writeback_work *work; in wb_do_writeback() local
1061 while ((work = get_next_work_item(bdi)) != NULL) { in wb_do_writeback()
1063 trace_writeback_exec(bdi, work); in wb_do_writeback()
1065 wrote += wb_writeback(wb, work); in wb_do_writeback()
1071 if (work->done) in wb_do_writeback()
1072 complete(work->done); in wb_do_writeback()
1074 kfree(work); in wb_do_writeback()
1091 void bdi_writeback_workfn(struct work_struct *work) in bdi_writeback_workfn() argument
1093 struct bdi_writeback *wb = container_of(to_delayed_work(work), in bdi_writeback_workfn()
1429 struct wb_writeback_work work = { in writeback_inodes_sb_nr() local
1441 bdi_queue_work(sb->s_bdi, &work); in writeback_inodes_sb_nr()
1510 struct wb_writeback_work work = { in sync_inodes_sb() local
1525 bdi_queue_work(sb->s_bdi, &work); in sync_inodes_sb()