This source file includes following definitions.
- wb_inode
- wb_io_lists_populated
- wb_io_lists_depopulated
- inode_io_list_move_locked
- inode_io_list_del_locked
- wb_wakeup
- finish_writeback_work
- wb_queue_work
- wb_wait_for_completion
- __inode_attach_wb
- locked_inode_to_wb_and_lock_list
- inode_to_wb_and_lock_list
- bdi_down_write_wb_switch_rwsem
- bdi_up_write_wb_switch_rwsem
- inode_switch_wbs_work_fn
- inode_switch_wbs_rcu_fn
- inode_switch_wbs
- wbc_attach_and_unlock_inode
- wbc_detach_inode
- wbc_account_cgroup_owner
- inode_congested
- wb_split_bdi_pages
- bdi_split_work_to_wbs
- cgroup_writeback_by_id
- cgroup_writeback_umount
- cgroup_writeback_init
- bdi_down_write_wb_switch_rwsem
- bdi_up_write_wb_switch_rwsem
- locked_inode_to_wb_and_lock_list
- inode_to_wb_and_lock_list
- wb_split_bdi_pages
- bdi_split_work_to_wbs
- get_nr_dirty_pages
- wb_start_writeback
- wb_start_background_writeback
- inode_io_list_del
- sb_mark_inode_writeback
- sb_clear_inode_writeback
- redirty_tail
- requeue_io
- inode_sync_complete
- inode_dirtied_after
- move_expired_inodes
- queue_io
- write_inode
- __inode_wait_for_writeback
- inode_wait_for_writeback
- inode_sleep_on_writeback
- requeue_inode
- __writeback_single_inode
- writeback_single_inode
- writeback_chunk_size
- writeback_sb_inodes
- __writeback_inodes_wb
- writeback_inodes_wb
- wb_writeback
- get_next_work_item
- wb_check_background_flush
- wb_check_old_data_flush
- wb_check_start_all
- wb_do_writeback
- wb_workfn
- __wakeup_flusher_threads_bdi
- wakeup_flusher_threads_bdi
- wakeup_flusher_threads
- wakeup_dirtytime_writeback
- start_dirtytime_writeback
- dirtytime_interval_handler
- block_dump___mark_inode_dirty
- __mark_inode_dirty
- wait_sb_inodes
- __writeback_inodes_sb_nr
- writeback_inodes_sb_nr
- writeback_inodes_sb
- try_to_writeback_inodes_sb
- sync_inodes_sb
- write_inode_now
- sync_inode
- sync_inode_metadata
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 #include <linux/kernel.h>
18 #include <linux/export.h>
19 #include <linux/spinlock.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/fs.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/kthread.h>
26 #include <linux/writeback.h>
27 #include <linux/blkdev.h>
28 #include <linux/backing-dev.h>
29 #include <linux/tracepoint.h>
30 #include <linux/device.h>
31 #include <linux/memcontrol.h>
32 #include "internal.h"
33
34
35
36
37 #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10))
38
39
40
41
42 struct wb_writeback_work {
43 long nr_pages;
44 struct super_block *sb;
45 unsigned long *older_than_this;
46 enum writeback_sync_modes sync_mode;
47 unsigned int tagged_writepages:1;
48 unsigned int for_kupdate:1;
49 unsigned int range_cyclic:1;
50 unsigned int for_background:1;
51 unsigned int for_sync:1;
52 unsigned int auto_free:1;
53 enum wb_reason reason;
54
55 struct list_head list;
56 struct wb_completion *done;
57 };
58
59
60
61
62
63
64
65
66
67
68
69 unsigned int dirtytime_expire_interval = 12 * 60 * 60;
70
71 static inline struct inode *wb_inode(struct list_head *head)
72 {
73 return list_entry(head, struct inode, i_io_list);
74 }
75
76
77
78
79
80
81 #define CREATE_TRACE_POINTS
82 #include <trace/events/writeback.h>
83
84 EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
85
86 static bool wb_io_lists_populated(struct bdi_writeback *wb)
87 {
88 if (wb_has_dirty_io(wb)) {
89 return false;
90 } else {
91 set_bit(WB_has_dirty_io, &wb->state);
92 WARN_ON_ONCE(!wb->avg_write_bandwidth);
93 atomic_long_add(wb->avg_write_bandwidth,
94 &wb->bdi->tot_write_bandwidth);
95 return true;
96 }
97 }
98
99 static void wb_io_lists_depopulated(struct bdi_writeback *wb)
100 {
101 if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
102 list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
103 clear_bit(WB_has_dirty_io, &wb->state);
104 WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
105 &wb->bdi->tot_write_bandwidth) < 0);
106 }
107 }
108
109
110
111
112
113
114
115
116
117
118
119 static bool inode_io_list_move_locked(struct inode *inode,
120 struct bdi_writeback *wb,
121 struct list_head *head)
122 {
123 assert_spin_locked(&wb->list_lock);
124
125 list_move(&inode->i_io_list, head);
126
127
128 if (head != &wb->b_dirty_time)
129 return wb_io_lists_populated(wb);
130
131 wb_io_lists_depopulated(wb);
132 return false;
133 }
134
135
136
137
138
139
140
141
142
143 static void inode_io_list_del_locked(struct inode *inode,
144 struct bdi_writeback *wb)
145 {
146 assert_spin_locked(&wb->list_lock);
147
148 list_del_init(&inode->i_io_list);
149 wb_io_lists_depopulated(wb);
150 }
151
152 static void wb_wakeup(struct bdi_writeback *wb)
153 {
154 spin_lock_bh(&wb->work_lock);
155 if (test_bit(WB_registered, &wb->state))
156 mod_delayed_work(bdi_wq, &wb->dwork, 0);
157 spin_unlock_bh(&wb->work_lock);
158 }
159
160 static void finish_writeback_work(struct bdi_writeback *wb,
161 struct wb_writeback_work *work)
162 {
163 struct wb_completion *done = work->done;
164
165 if (work->auto_free)
166 kfree(work);
167 if (done) {
168 wait_queue_head_t *waitq = done->waitq;
169
170
171 if (atomic_dec_and_test(&done->cnt))
172 wake_up_all(waitq);
173 }
174 }
175
176 static void wb_queue_work(struct bdi_writeback *wb,
177 struct wb_writeback_work *work)
178 {
179 trace_writeback_queue(wb, work);
180
181 if (work->done)
182 atomic_inc(&work->done->cnt);
183
184 spin_lock_bh(&wb->work_lock);
185
186 if (test_bit(WB_registered, &wb->state)) {
187 list_add_tail(&work->list, &wb->work_list);
188 mod_delayed_work(bdi_wq, &wb->dwork, 0);
189 } else
190 finish_writeback_work(wb, work);
191
192 spin_unlock_bh(&wb->work_lock);
193 }
194
195
196
197
198
199
200
201
202
203
204
205 void wb_wait_for_completion(struct wb_completion *done)
206 {
207 atomic_dec(&done->cnt);
208 wait_event(*done->waitq, !atomic_read(&done->cnt));
209 }
210
211 #ifdef CONFIG_CGROUP_WRITEBACK
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232 #define WB_FRN_TIME_SHIFT 13
233 #define WB_FRN_TIME_AVG_SHIFT 3
234 #define WB_FRN_TIME_CUT_DIV 8
235 #define WB_FRN_TIME_PERIOD (2 * (1 << WB_FRN_TIME_SHIFT))
236
237 #define WB_FRN_HIST_SLOTS 16
238 #define WB_FRN_HIST_UNIT (WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS)
239
240 #define WB_FRN_HIST_THR_SLOTS (WB_FRN_HIST_SLOTS / 2)
241
242 #define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1)
243
244 #define WB_FRN_MAX_IN_FLIGHT 1024
245
246 static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
247 static struct workqueue_struct *isw_wq;
248
249 void __inode_attach_wb(struct inode *inode, struct page *page)
250 {
251 struct backing_dev_info *bdi = inode_to_bdi(inode);
252 struct bdi_writeback *wb = NULL;
253
254 if (inode_cgwb_enabled(inode)) {
255 struct cgroup_subsys_state *memcg_css;
256
257 if (page) {
258 memcg_css = mem_cgroup_css_from_page(page);
259 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
260 } else {
261
262 memcg_css = task_get_css(current, memory_cgrp_id);
263 wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
264 css_put(memcg_css);
265 }
266 }
267
268 if (!wb)
269 wb = &bdi->wb;
270
271
272
273
274
275 if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
276 wb_put(wb);
277 }
278 EXPORT_SYMBOL_GPL(__inode_attach_wb);
279
280
281
282
283
284
285
286
287
288 static struct bdi_writeback *
289 locked_inode_to_wb_and_lock_list(struct inode *inode)
290 __releases(&inode->i_lock)
291 __acquires(&wb->list_lock)
292 {
293 while (true) {
294 struct bdi_writeback *wb = inode_to_wb(inode);
295
296
297
298
299
300
301
302 wb_get(wb);
303 spin_unlock(&inode->i_lock);
304 spin_lock(&wb->list_lock);
305
306
307 if (likely(wb == inode->i_wb)) {
308 wb_put(wb);
309 return wb;
310 }
311
312 spin_unlock(&wb->list_lock);
313 wb_put(wb);
314 cpu_relax();
315 spin_lock(&inode->i_lock);
316 }
317 }
318
319
320
321
322
323
324
325
326 static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
327 __acquires(&wb->list_lock)
328 {
329 spin_lock(&inode->i_lock);
330 return locked_inode_to_wb_and_lock_list(inode);
331 }
332
333 struct inode_switch_wbs_context {
334 struct inode *inode;
335 struct bdi_writeback *new_wb;
336
337 struct rcu_head rcu_head;
338 struct work_struct work;
339 };
340
341 static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
342 {
343 down_write(&bdi->wb_switch_rwsem);
344 }
345
346 static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
347 {
348 up_write(&bdi->wb_switch_rwsem);
349 }
350
351 static void inode_switch_wbs_work_fn(struct work_struct *work)
352 {
353 struct inode_switch_wbs_context *isw =
354 container_of(work, struct inode_switch_wbs_context, work);
355 struct inode *inode = isw->inode;
356 struct backing_dev_info *bdi = inode_to_bdi(inode);
357 struct address_space *mapping = inode->i_mapping;
358 struct bdi_writeback *old_wb = inode->i_wb;
359 struct bdi_writeback *new_wb = isw->new_wb;
360 XA_STATE(xas, &mapping->i_pages, 0);
361 struct page *page;
362 bool switched = false;
363
364
365
366
367
368 down_read(&bdi->wb_switch_rwsem);
369
370
371
372
373
374
375
376
377
378
379
380 if (old_wb < new_wb) {
381 spin_lock(&old_wb->list_lock);
382 spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
383 } else {
384 spin_lock(&new_wb->list_lock);
385 spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
386 }
387 spin_lock(&inode->i_lock);
388 xa_lock_irq(&mapping->i_pages);
389
390
391
392
393
394 if (unlikely(inode->i_state & I_FREEING))
395 goto skip_switch;
396
397 trace_inode_switch_wbs(inode, old_wb, new_wb);
398
399
400
401
402
403
404 xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
405 if (PageDirty(page)) {
406 dec_wb_stat(old_wb, WB_RECLAIMABLE);
407 inc_wb_stat(new_wb, WB_RECLAIMABLE);
408 }
409 }
410
411 xas_set(&xas, 0);
412 xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
413 WARN_ON_ONCE(!PageWriteback(page));
414 dec_wb_stat(old_wb, WB_WRITEBACK);
415 inc_wb_stat(new_wb, WB_WRITEBACK);
416 }
417
418 wb_get(new_wb);
419
420
421
422
423
424
425
426 if (!list_empty(&inode->i_io_list)) {
427 struct inode *pos;
428
429 inode_io_list_del_locked(inode, old_wb);
430 inode->i_wb = new_wb;
431 list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
432 if (time_after_eq(inode->dirtied_when,
433 pos->dirtied_when))
434 break;
435 inode_io_list_move_locked(inode, new_wb, pos->i_io_list.prev);
436 } else {
437 inode->i_wb = new_wb;
438 }
439
440
441 inode->i_wb_frn_winner = 0;
442 inode->i_wb_frn_avg_time = 0;
443 inode->i_wb_frn_history = 0;
444 switched = true;
445 skip_switch:
446
447
448
449
450 smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
451
452 xa_unlock_irq(&mapping->i_pages);
453 spin_unlock(&inode->i_lock);
454 spin_unlock(&new_wb->list_lock);
455 spin_unlock(&old_wb->list_lock);
456
457 up_read(&bdi->wb_switch_rwsem);
458
459 if (switched) {
460 wb_wakeup(new_wb);
461 wb_put(old_wb);
462 }
463 wb_put(new_wb);
464
465 iput(inode);
466 kfree(isw);
467
468 atomic_dec(&isw_nr_in_flight);
469 }
470
471 static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
472 {
473 struct inode_switch_wbs_context *isw = container_of(rcu_head,
474 struct inode_switch_wbs_context, rcu_head);
475
476
477 INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
478 queue_work(isw_wq, &isw->work);
479 }
480
481
482
483
484
485
486
487
488
489 static void inode_switch_wbs(struct inode *inode, int new_wb_id)
490 {
491 struct backing_dev_info *bdi = inode_to_bdi(inode);
492 struct cgroup_subsys_state *memcg_css;
493 struct inode_switch_wbs_context *isw;
494
495
496 if (inode->i_state & I_WB_SWITCH)
497 return;
498
499
500 if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
501 return;
502
503 isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
504 if (!isw)
505 return;
506
507
508 rcu_read_lock();
509 memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
510 if (memcg_css)
511 isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
512 rcu_read_unlock();
513 if (!isw->new_wb)
514 goto out_free;
515
516
517 spin_lock(&inode->i_lock);
518 if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
519 inode->i_state & (I_WB_SWITCH | I_FREEING) ||
520 inode_to_wb(inode) == isw->new_wb) {
521 spin_unlock(&inode->i_lock);
522 goto out_free;
523 }
524 inode->i_state |= I_WB_SWITCH;
525 __iget(inode);
526 spin_unlock(&inode->i_lock);
527
528 isw->inode = inode;
529
530
531
532
533
534
535
536 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
537
538 atomic_inc(&isw_nr_in_flight);
539 return;
540
541 out_free:
542 if (isw->new_wb)
543 wb_put(isw->new_wb);
544 kfree(isw);
545 }
546
547
548
549
550
551
552
553
554
555
556
557 void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
558 struct inode *inode)
559 {
560 if (!inode_cgwb_enabled(inode)) {
561 spin_unlock(&inode->i_lock);
562 return;
563 }
564
565 wbc->wb = inode_to_wb(inode);
566 wbc->inode = inode;
567
568 wbc->wb_id = wbc->wb->memcg_css->id;
569 wbc->wb_lcand_id = inode->i_wb_frn_winner;
570 wbc->wb_tcand_id = 0;
571 wbc->wb_bytes = 0;
572 wbc->wb_lcand_bytes = 0;
573 wbc->wb_tcand_bytes = 0;
574
575 wb_get(wbc->wb);
576 spin_unlock(&inode->i_lock);
577
578
579
580
581
582
583
584
585 if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
586 inode_switch_wbs(inode, wbc->wb_id);
587 }
588 EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627 void wbc_detach_inode(struct writeback_control *wbc)
628 {
629 struct bdi_writeback *wb = wbc->wb;
630 struct inode *inode = wbc->inode;
631 unsigned long avg_time, max_bytes, max_time;
632 u16 history;
633 int max_id;
634
635 if (!wb)
636 return;
637
638 history = inode->i_wb_frn_history;
639 avg_time = inode->i_wb_frn_avg_time;
640
641
642 if (wbc->wb_bytes >= wbc->wb_lcand_bytes &&
643 wbc->wb_bytes >= wbc->wb_tcand_bytes) {
644 max_id = wbc->wb_id;
645 max_bytes = wbc->wb_bytes;
646 } else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) {
647 max_id = wbc->wb_lcand_id;
648 max_bytes = wbc->wb_lcand_bytes;
649 } else {
650 max_id = wbc->wb_tcand_id;
651 max_bytes = wbc->wb_tcand_bytes;
652 }
653
654
655
656
657
658
659
660
661 max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT,
662 wb->avg_write_bandwidth);
663 if (avg_time)
664 avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) -
665 (avg_time >> WB_FRN_TIME_AVG_SHIFT);
666 else
667 avg_time = max_time;
668
669 if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) {
670 int slots;
671
672
673
674
675
676
677
678
679
680 slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT),
681 (unsigned long)WB_FRN_HIST_MAX_SLOTS);
682 history <<= slots;
683 if (wbc->wb_id != max_id)
684 history |= (1U << slots) - 1;
685
686 if (history)
687 trace_inode_foreign_history(inode, wbc, history);
688
689
690
691
692
693
694
695
696 if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
697 inode_switch_wbs(inode, max_id);
698 }
699
700
701
702
703
704 inode->i_wb_frn_winner = max_id;
705 inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
706 inode->i_wb_frn_history = history;
707
708 wb_put(wbc->wb);
709 wbc->wb = NULL;
710 }
711 EXPORT_SYMBOL_GPL(wbc_detach_inode);
712
713
714
715
716
717
718
719
720
721
722
723 void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
724 size_t bytes)
725 {
726 struct cgroup_subsys_state *css;
727 int id;
728
729
730
731
732
733
734
735 if (!wbc->wb || wbc->no_cgroup_owner)
736 return;
737
738 css = mem_cgroup_css_from_page(page);
739
740 if (!(css->flags & CSS_ONLINE))
741 return;
742
743 id = css->id;
744
745 if (id == wbc->wb_id) {
746 wbc->wb_bytes += bytes;
747 return;
748 }
749
750 if (id == wbc->wb_lcand_id)
751 wbc->wb_lcand_bytes += bytes;
752
753
754 if (!wbc->wb_tcand_bytes)
755 wbc->wb_tcand_id = id;
756 if (id == wbc->wb_tcand_id)
757 wbc->wb_tcand_bytes += bytes;
758 else
759 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
760 }
761 EXPORT_SYMBOL_GPL(wbc_account_cgroup_owner);
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779 int inode_congested(struct inode *inode, int cong_bits)
780 {
781
782
783
784
785 if (inode && inode_to_wb_is_valid(inode)) {
786 struct bdi_writeback *wb;
787 struct wb_lock_cookie lock_cookie = {};
788 bool congested;
789
790 wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
791 congested = wb_congested(wb, cong_bits);
792 unlocked_inode_to_wb_end(inode, &lock_cookie);
793 return congested;
794 }
795
796 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
797 }
798 EXPORT_SYMBOL_GPL(inode_congested);
799
800
801
802
803
804
805
806
807
808
809 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
810 {
811 unsigned long this_bw = wb->avg_write_bandwidth;
812 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
813
814 if (nr_pages == LONG_MAX)
815 return LONG_MAX;
816
817
818
819
820
821
822 if (!tot_bw || this_bw >= tot_bw)
823 return nr_pages;
824 else
825 return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
826 }
827
828
829
830
831
832
833
834
835
836
837
838
839 static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
840 struct wb_writeback_work *base_work,
841 bool skip_if_busy)
842 {
843 struct bdi_writeback *last_wb = NULL;
844 struct bdi_writeback *wb = list_entry(&bdi->wb_list,
845 struct bdi_writeback, bdi_node);
846
847 might_sleep();
848 restart:
849 rcu_read_lock();
850 list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
851 DEFINE_WB_COMPLETION(fallback_work_done, bdi);
852 struct wb_writeback_work fallback_work;
853 struct wb_writeback_work *work;
854 long nr_pages;
855
856 if (last_wb) {
857 wb_put(last_wb);
858 last_wb = NULL;
859 }
860
861
862 if (!wb_has_dirty_io(wb) &&
863 (base_work->sync_mode == WB_SYNC_NONE ||
864 list_empty(&wb->b_dirty_time)))
865 continue;
866 if (skip_if_busy && writeback_in_progress(wb))
867 continue;
868
869 nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
870
871 work = kmalloc(sizeof(*work), GFP_ATOMIC);
872 if (work) {
873 *work = *base_work;
874 work->nr_pages = nr_pages;
875 work->auto_free = 1;
876 wb_queue_work(wb, work);
877 continue;
878 }
879
880
881 work = &fallback_work;
882 *work = *base_work;
883 work->nr_pages = nr_pages;
884 work->auto_free = 0;
885 work->done = &fallback_work_done;
886
887 wb_queue_work(wb, work);
888
889
890
891
892
893
894 wb_get(wb);
895 last_wb = wb;
896
897 rcu_read_unlock();
898 wb_wait_for_completion(&fallback_work_done);
899 goto restart;
900 }
901 rcu_read_unlock();
902
903 if (last_wb)
904 wb_put(last_wb);
905 }
906
907
908
909
910
911
912
913
914
915
916
917
918 int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr,
919 enum wb_reason reason, struct wb_completion *done)
920 {
921 struct backing_dev_info *bdi;
922 struct cgroup_subsys_state *memcg_css;
923 struct bdi_writeback *wb;
924 struct wb_writeback_work *work;
925 int ret;
926
927
928 bdi = bdi_get_by_id(bdi_id);
929 if (!bdi)
930 return -ENOENT;
931
932 rcu_read_lock();
933 memcg_css = css_from_id(memcg_id, &memory_cgrp_subsys);
934 if (memcg_css && !css_tryget(memcg_css))
935 memcg_css = NULL;
936 rcu_read_unlock();
937 if (!memcg_css) {
938 ret = -ENOENT;
939 goto out_bdi_put;
940 }
941
942
943
944
945
946 wb = wb_get_lookup(bdi, memcg_css);
947 if (!wb) {
948 ret = -ENOENT;
949 goto out_css_put;
950 }
951
952
953
954
955
956
957
958
959 if (!nr) {
960 unsigned long filepages, headroom, dirty, writeback;
961
962 mem_cgroup_wb_stats(wb, &filepages, &headroom, &dirty,
963 &writeback);
964 nr = dirty * 10 / 8;
965 }
966
967
968 work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN);
969 if (work) {
970 work->nr_pages = nr;
971 work->sync_mode = WB_SYNC_NONE;
972 work->range_cyclic = 1;
973 work->reason = reason;
974 work->done = done;
975 work->auto_free = 1;
976 wb_queue_work(wb, work);
977 ret = 0;
978 } else {
979 ret = -ENOMEM;
980 }
981
982 wb_put(wb);
983 out_css_put:
984 css_put(memcg_css);
985 out_bdi_put:
986 bdi_put(bdi);
987 return ret;
988 }
989
990
991
992
993
994
995
996
997
998
999
1000 void cgroup_writeback_umount(void)
1001 {
1002 if (atomic_read(&isw_nr_in_flight)) {
1003
1004
1005
1006
1007 rcu_barrier();
1008 flush_workqueue(isw_wq);
1009 }
1010 }
1011
1012 static int __init cgroup_writeback_init(void)
1013 {
1014 isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
1015 if (!isw_wq)
1016 return -ENOMEM;
1017 return 0;
1018 }
1019 fs_initcall(cgroup_writeback_init);
1020
1021 #else
1022
1023 static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1024 static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1025
1026 static struct bdi_writeback *
1027 locked_inode_to_wb_and_lock_list(struct inode *inode)
1028 __releases(&inode->i_lock)
1029 __acquires(&wb->list_lock)
1030 {
1031 struct bdi_writeback *wb = inode_to_wb(inode);
1032
1033 spin_unlock(&inode->i_lock);
1034 spin_lock(&wb->list_lock);
1035 return wb;
1036 }
1037
1038 static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
1039 __acquires(&wb->list_lock)
1040 {
1041 struct bdi_writeback *wb = inode_to_wb(inode);
1042
1043 spin_lock(&wb->list_lock);
1044 return wb;
1045 }
1046
1047 static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
1048 {
1049 return nr_pages;
1050 }
1051
1052 static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
1053 struct wb_writeback_work *base_work,
1054 bool skip_if_busy)
1055 {
1056 might_sleep();
1057
1058 if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
1059 base_work->auto_free = 0;
1060 wb_queue_work(&bdi->wb, base_work);
1061 }
1062 }
1063
1064 #endif
1065
1066
1067
1068
1069
1070 static unsigned long get_nr_dirty_pages(void)
1071 {
1072 return global_node_page_state(NR_FILE_DIRTY) +
1073 global_node_page_state(NR_UNSTABLE_NFS) +
1074 get_nr_dirty_inodes();
1075 }
1076
1077 static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
1078 {
1079 if (!wb_has_dirty_io(wb))
1080 return;
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 if (test_bit(WB_start_all, &wb->state) ||
1091 test_and_set_bit(WB_start_all, &wb->state))
1092 return;
1093
1094 wb->start_all_reason = reason;
1095 wb_wakeup(wb);
1096 }
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108 void wb_start_background_writeback(struct bdi_writeback *wb)
1109 {
1110
1111
1112
1113
1114 trace_writeback_wake_background(wb);
1115 wb_wakeup(wb);
1116 }
1117
1118
1119
1120
1121 void inode_io_list_del(struct inode *inode)
1122 {
1123 struct bdi_writeback *wb;
1124
1125 wb = inode_to_wb_and_lock_list(inode);
1126 inode_io_list_del_locked(inode, wb);
1127 spin_unlock(&wb->list_lock);
1128 }
1129
1130
1131
1132
1133 void sb_mark_inode_writeback(struct inode *inode)
1134 {
1135 struct super_block *sb = inode->i_sb;
1136 unsigned long flags;
1137
1138 if (list_empty(&inode->i_wb_list)) {
1139 spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1140 if (list_empty(&inode->i_wb_list)) {
1141 list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb);
1142 trace_sb_mark_inode_writeback(inode);
1143 }
1144 spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1145 }
1146 }
1147
1148
1149
1150
1151 void sb_clear_inode_writeback(struct inode *inode)
1152 {
1153 struct super_block *sb = inode->i_sb;
1154 unsigned long flags;
1155
1156 if (!list_empty(&inode->i_wb_list)) {
1157 spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1158 if (!list_empty(&inode->i_wb_list)) {
1159 list_del_init(&inode->i_wb_list);
1160 trace_sb_clear_inode_writeback(inode);
1161 }
1162 spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1163 }
1164 }
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
1176 {
1177 if (!list_empty(&wb->b_dirty)) {
1178 struct inode *tail;
1179
1180 tail = wb_inode(wb->b_dirty.next);
1181 if (time_before(inode->dirtied_when, tail->dirtied_when))
1182 inode->dirtied_when = jiffies;
1183 }
1184 inode_io_list_move_locked(inode, wb, &wb->b_dirty);
1185 }
1186
1187
1188
1189
1190 static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
1191 {
1192 inode_io_list_move_locked(inode, wb, &wb->b_more_io);
1193 }
1194
1195 static void inode_sync_complete(struct inode *inode)
1196 {
1197 inode->i_state &= ~I_SYNC;
1198
1199 inode_add_lru(inode);
1200
1201 smp_mb();
1202 wake_up_bit(&inode->i_state, __I_SYNC);
1203 }
1204
1205 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
1206 {
1207 bool ret = time_after(inode->dirtied_when, t);
1208 #ifndef CONFIG_64BIT
1209
1210
1211
1212
1213
1214
1215 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
1216 #endif
1217 return ret;
1218 }
1219
1220 #define EXPIRE_DIRTY_ATIME 0x0001
1221
1222
1223
1224
1225
1226 static int move_expired_inodes(struct list_head *delaying_queue,
1227 struct list_head *dispatch_queue,
1228 int flags,
1229 struct wb_writeback_work *work)
1230 {
1231 unsigned long *older_than_this = NULL;
1232 unsigned long expire_time;
1233 LIST_HEAD(tmp);
1234 struct list_head *pos, *node;
1235 struct super_block *sb = NULL;
1236 struct inode *inode;
1237 int do_sb_sort = 0;
1238 int moved = 0;
1239
1240 if ((flags & EXPIRE_DIRTY_ATIME) == 0)
1241 older_than_this = work->older_than_this;
1242 else if (!work->for_sync) {
1243 expire_time = jiffies - (dirtytime_expire_interval * HZ);
1244 older_than_this = &expire_time;
1245 }
1246 while (!list_empty(delaying_queue)) {
1247 inode = wb_inode(delaying_queue->prev);
1248 if (older_than_this &&
1249 inode_dirtied_after(inode, *older_than_this))
1250 break;
1251 list_move(&inode->i_io_list, &tmp);
1252 moved++;
1253 if (flags & EXPIRE_DIRTY_ATIME)
1254 set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
1255 if (sb_is_blkdev_sb(inode->i_sb))
1256 continue;
1257 if (sb && sb != inode->i_sb)
1258 do_sb_sort = 1;
1259 sb = inode->i_sb;
1260 }
1261
1262
1263 if (!do_sb_sort) {
1264 list_splice(&tmp, dispatch_queue);
1265 goto out;
1266 }
1267
1268
1269 while (!list_empty(&tmp)) {
1270 sb = wb_inode(tmp.prev)->i_sb;
1271 list_for_each_prev_safe(pos, node, &tmp) {
1272 inode = wb_inode(pos);
1273 if (inode->i_sb == sb)
1274 list_move(&inode->i_io_list, dispatch_queue);
1275 }
1276 }
1277 out:
1278 return moved;
1279 }
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
1293 {
1294 int moved;
1295
1296 assert_spin_locked(&wb->list_lock);
1297 list_splice_init(&wb->b_more_io, &wb->b_io);
1298 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
1299 moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
1300 EXPIRE_DIRTY_ATIME, work);
1301 if (moved)
1302 wb_io_lists_populated(wb);
1303 trace_writeback_queue_io(wb, work, moved);
1304 }
1305
1306 static int write_inode(struct inode *inode, struct writeback_control *wbc)
1307 {
1308 int ret;
1309
1310 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
1311 trace_writeback_write_inode_start(inode, wbc);
1312 ret = inode->i_sb->s_op->write_inode(inode, wbc);
1313 trace_writeback_write_inode(inode, wbc);
1314 return ret;
1315 }
1316 return 0;
1317 }
1318
1319
1320
1321
1322
1323 static void __inode_wait_for_writeback(struct inode *inode)
1324 __releases(inode->i_lock)
1325 __acquires(inode->i_lock)
1326 {
1327 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
1328 wait_queue_head_t *wqh;
1329
1330 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1331 while (inode->i_state & I_SYNC) {
1332 spin_unlock(&inode->i_lock);
1333 __wait_on_bit(wqh, &wq, bit_wait,
1334 TASK_UNINTERRUPTIBLE);
1335 spin_lock(&inode->i_lock);
1336 }
1337 }
1338
1339
1340
1341
1342 void inode_wait_for_writeback(struct inode *inode)
1343 {
1344 spin_lock(&inode->i_lock);
1345 __inode_wait_for_writeback(inode);
1346 spin_unlock(&inode->i_lock);
1347 }
1348
1349
1350
1351
1352
1353
1354 static void inode_sleep_on_writeback(struct inode *inode)
1355 __releases(inode->i_lock)
1356 {
1357 DEFINE_WAIT(wait);
1358 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1359 int sleep;
1360
1361 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1362 sleep = inode->i_state & I_SYNC;
1363 spin_unlock(&inode->i_lock);
1364 if (sleep)
1365 schedule();
1366 finish_wait(wqh, &wait);
1367 }
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
1378 struct writeback_control *wbc)
1379 {
1380 if (inode->i_state & I_FREEING)
1381 return;
1382
1383
1384
1385
1386
1387
1388 if ((inode->i_state & I_DIRTY) &&
1389 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
1390 inode->dirtied_when = jiffies;
1391
1392 if (wbc->pages_skipped) {
1393
1394
1395
1396
1397 redirty_tail(inode, wb);
1398 return;
1399 }
1400
1401 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
1402
1403
1404
1405
1406 if (wbc->nr_to_write <= 0) {
1407
1408 requeue_io(inode, wb);
1409 } else {
1410
1411
1412
1413
1414
1415
1416
1417 redirty_tail(inode, wb);
1418 }
1419 } else if (inode->i_state & I_DIRTY) {
1420
1421
1422
1423
1424
1425 redirty_tail(inode, wb);
1426 } else if (inode->i_state & I_DIRTY_TIME) {
1427 inode->dirtied_when = jiffies;
1428 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
1429 } else {
1430
1431 inode_io_list_del_locked(inode, wb);
1432 }
1433 }
1434
1435
1436
1437
1438
1439
1440 static int
1441 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1442 {
1443 struct address_space *mapping = inode->i_mapping;
1444 long nr_to_write = wbc->nr_to_write;
1445 unsigned dirty;
1446 int ret;
1447
1448 WARN_ON(!(inode->i_state & I_SYNC));
1449
1450 trace_writeback_single_inode_start(inode, wbc, nr_to_write);
1451
1452 ret = do_writepages(mapping, wbc);
1453
1454
1455
1456
1457
1458
1459
1460
1461 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
1462 int err = filemap_fdatawait(mapping);
1463 if (ret == 0)
1464 ret = err;
1465 }
1466
1467
1468
1469
1470
1471
1472 spin_lock(&inode->i_lock);
1473
1474 dirty = inode->i_state & I_DIRTY;
1475 if (inode->i_state & I_DIRTY_TIME) {
1476 if ((dirty & I_DIRTY_INODE) ||
1477 wbc->sync_mode == WB_SYNC_ALL ||
1478 unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
1479 unlikely(time_after(jiffies,
1480 (inode->dirtied_time_when +
1481 dirtytime_expire_interval * HZ)))) {
1482 dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED;
1483 trace_writeback_lazytime(inode);
1484 }
1485 } else
1486 inode->i_state &= ~I_DIRTY_TIME_EXPIRED;
1487 inode->i_state &= ~dirty;
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500 smp_mb();
1501
1502 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
1503 inode->i_state |= I_DIRTY_PAGES;
1504
1505 spin_unlock(&inode->i_lock);
1506
1507 if (dirty & I_DIRTY_TIME)
1508 mark_inode_dirty_sync(inode);
1509
1510 if (dirty & ~I_DIRTY_PAGES) {
1511 int err = write_inode(inode, wbc);
1512 if (ret == 0)
1513 ret = err;
1514 }
1515 trace_writeback_single_inode(inode, wbc, nr_to_write);
1516 return ret;
1517 }
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527 static int writeback_single_inode(struct inode *inode,
1528 struct writeback_control *wbc)
1529 {
1530 struct bdi_writeback *wb;
1531 int ret = 0;
1532
1533 spin_lock(&inode->i_lock);
1534 if (!atomic_read(&inode->i_count))
1535 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
1536 else
1537 WARN_ON(inode->i_state & I_WILL_FREE);
1538
1539 if (inode->i_state & I_SYNC) {
1540 if (wbc->sync_mode != WB_SYNC_ALL)
1541 goto out;
1542
1543
1544
1545
1546
1547 __inode_wait_for_writeback(inode);
1548 }
1549 WARN_ON(inode->i_state & I_SYNC);
1550
1551
1552
1553
1554
1555
1556
1557
1558 if (!(inode->i_state & I_DIRTY_ALL) &&
1559 (wbc->sync_mode != WB_SYNC_ALL ||
1560 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
1561 goto out;
1562 inode->i_state |= I_SYNC;
1563 wbc_attach_and_unlock_inode(wbc, inode);
1564
1565 ret = __writeback_single_inode(inode, wbc);
1566
1567 wbc_detach_inode(wbc);
1568
1569 wb = inode_to_wb_and_lock_list(inode);
1570 spin_lock(&inode->i_lock);
1571
1572
1573
1574
1575 if (!(inode->i_state & I_DIRTY_ALL))
1576 inode_io_list_del_locked(inode, wb);
1577 spin_unlock(&wb->list_lock);
1578 inode_sync_complete(inode);
1579 out:
1580 spin_unlock(&inode->i_lock);
1581 return ret;
1582 }
1583
1584 static long writeback_chunk_size(struct bdi_writeback *wb,
1585 struct wb_writeback_work *work)
1586 {
1587 long pages;
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
1603 pages = LONG_MAX;
1604 else {
1605 pages = min(wb->avg_write_bandwidth / 2,
1606 global_wb_domain.dirty_limit / DIRTY_SCOPE);
1607 pages = min(pages, work->nr_pages);
1608 pages = round_down(pages + MIN_WRITEBACK_PAGES,
1609 MIN_WRITEBACK_PAGES);
1610 }
1611
1612 return pages;
1613 }
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624 static long writeback_sb_inodes(struct super_block *sb,
1625 struct bdi_writeback *wb,
1626 struct wb_writeback_work *work)
1627 {
1628 struct writeback_control wbc = {
1629 .sync_mode = work->sync_mode,
1630 .tagged_writepages = work->tagged_writepages,
1631 .for_kupdate = work->for_kupdate,
1632 .for_background = work->for_background,
1633 .for_sync = work->for_sync,
1634 .range_cyclic = work->range_cyclic,
1635 .range_start = 0,
1636 .range_end = LLONG_MAX,
1637 };
1638 unsigned long start_time = jiffies;
1639 long write_chunk;
1640 long wrote = 0;
1641
1642 while (!list_empty(&wb->b_io)) {
1643 struct inode *inode = wb_inode(wb->b_io.prev);
1644 struct bdi_writeback *tmp_wb;
1645
1646 if (inode->i_sb != sb) {
1647 if (work->sb) {
1648
1649
1650
1651
1652
1653 redirty_tail(inode, wb);
1654 continue;
1655 }
1656
1657
1658
1659
1660
1661
1662 break;
1663 }
1664
1665
1666
1667
1668
1669
1670 spin_lock(&inode->i_lock);
1671 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
1672 spin_unlock(&inode->i_lock);
1673 redirty_tail(inode, wb);
1674 continue;
1675 }
1676 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686 spin_unlock(&inode->i_lock);
1687 requeue_io(inode, wb);
1688 trace_writeback_sb_inodes_requeue(inode);
1689 continue;
1690 }
1691 spin_unlock(&wb->list_lock);
1692
1693
1694
1695
1696
1697
1698 if (inode->i_state & I_SYNC) {
1699
1700 inode_sleep_on_writeback(inode);
1701
1702 spin_lock(&wb->list_lock);
1703 continue;
1704 }
1705 inode->i_state |= I_SYNC;
1706 wbc_attach_and_unlock_inode(&wbc, inode);
1707
1708 write_chunk = writeback_chunk_size(wb, work);
1709 wbc.nr_to_write = write_chunk;
1710 wbc.pages_skipped = 0;
1711
1712
1713
1714
1715
1716 __writeback_single_inode(inode, &wbc);
1717
1718 wbc_detach_inode(&wbc);
1719 work->nr_pages -= write_chunk - wbc.nr_to_write;
1720 wrote += write_chunk - wbc.nr_to_write;
1721
1722 if (need_resched()) {
1723
1724
1725
1726
1727
1728
1729
1730
1731 blk_flush_plug(current);
1732 cond_resched();
1733 }
1734
1735
1736
1737
1738
1739 tmp_wb = inode_to_wb_and_lock_list(inode);
1740 spin_lock(&inode->i_lock);
1741 if (!(inode->i_state & I_DIRTY_ALL))
1742 wrote++;
1743 requeue_inode(inode, tmp_wb, &wbc);
1744 inode_sync_complete(inode);
1745 spin_unlock(&inode->i_lock);
1746
1747 if (unlikely(tmp_wb != wb)) {
1748 spin_unlock(&tmp_wb->list_lock);
1749 spin_lock(&wb->list_lock);
1750 }
1751
1752
1753
1754
1755
1756 if (wrote) {
1757 if (time_is_before_jiffies(start_time + HZ / 10UL))
1758 break;
1759 if (work->nr_pages <= 0)
1760 break;
1761 }
1762 }
1763 return wrote;
1764 }
1765
1766 static long __writeback_inodes_wb(struct bdi_writeback *wb,
1767 struct wb_writeback_work *work)
1768 {
1769 unsigned long start_time = jiffies;
1770 long wrote = 0;
1771
1772 while (!list_empty(&wb->b_io)) {
1773 struct inode *inode = wb_inode(wb->b_io.prev);
1774 struct super_block *sb = inode->i_sb;
1775
1776 if (!trylock_super(sb)) {
1777
1778
1779
1780
1781
1782 redirty_tail(inode, wb);
1783 continue;
1784 }
1785 wrote += writeback_sb_inodes(sb, wb, work);
1786 up_read(&sb->s_umount);
1787
1788
1789 if (wrote) {
1790 if (time_is_before_jiffies(start_time + HZ / 10UL))
1791 break;
1792 if (work->nr_pages <= 0)
1793 break;
1794 }
1795 }
1796
1797 return wrote;
1798 }
1799
1800 static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
1801 enum wb_reason reason)
1802 {
1803 struct wb_writeback_work work = {
1804 .nr_pages = nr_pages,
1805 .sync_mode = WB_SYNC_NONE,
1806 .range_cyclic = 1,
1807 .reason = reason,
1808 };
1809 struct blk_plug plug;
1810
1811 blk_start_plug(&plug);
1812 spin_lock(&wb->list_lock);
1813 if (list_empty(&wb->b_io))
1814 queue_io(wb, &work);
1815 __writeback_inodes_wb(wb, &work);
1816 spin_unlock(&wb->list_lock);
1817 blk_finish_plug(&plug);
1818
1819 return nr_pages - work.nr_pages;
1820 }
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837 static long wb_writeback(struct bdi_writeback *wb,
1838 struct wb_writeback_work *work)
1839 {
1840 unsigned long wb_start = jiffies;
1841 long nr_pages = work->nr_pages;
1842 unsigned long oldest_jif;
1843 struct inode *inode;
1844 long progress;
1845 struct blk_plug plug;
1846
1847 oldest_jif = jiffies;
1848 work->older_than_this = &oldest_jif;
1849
1850 blk_start_plug(&plug);
1851 spin_lock(&wb->list_lock);
1852 for (;;) {
1853
1854
1855
1856 if (work->nr_pages <= 0)
1857 break;
1858
1859
1860
1861
1862
1863
1864
1865 if ((work->for_background || work->for_kupdate) &&
1866 !list_empty(&wb->work_list))
1867 break;
1868
1869
1870
1871
1872
1873 if (work->for_background && !wb_over_bg_thresh(wb))
1874 break;
1875
1876
1877
1878
1879
1880
1881
1882 if (work->for_kupdate) {
1883 oldest_jif = jiffies -
1884 msecs_to_jiffies(dirty_expire_interval * 10);
1885 } else if (work->for_background)
1886 oldest_jif = jiffies;
1887
1888 trace_writeback_start(wb, work);
1889 if (list_empty(&wb->b_io))
1890 queue_io(wb, work);
1891 if (work->sb)
1892 progress = writeback_sb_inodes(work->sb, wb, work);
1893 else
1894 progress = __writeback_inodes_wb(wb, work);
1895 trace_writeback_written(wb, work);
1896
1897 wb_update_bandwidth(wb, wb_start);
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907 if (progress)
1908 continue;
1909
1910
1911
1912 if (list_empty(&wb->b_more_io))
1913 break;
1914
1915
1916
1917
1918
1919 trace_writeback_wait(wb, work);
1920 inode = wb_inode(wb->b_more_io.prev);
1921 spin_lock(&inode->i_lock);
1922 spin_unlock(&wb->list_lock);
1923
1924 inode_sleep_on_writeback(inode);
1925 spin_lock(&wb->list_lock);
1926 }
1927 spin_unlock(&wb->list_lock);
1928 blk_finish_plug(&plug);
1929
1930 return nr_pages - work->nr_pages;
1931 }
1932
1933
1934
1935
1936 static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
1937 {
1938 struct wb_writeback_work *work = NULL;
1939
1940 spin_lock_bh(&wb->work_lock);
1941 if (!list_empty(&wb->work_list)) {
1942 work = list_entry(wb->work_list.next,
1943 struct wb_writeback_work, list);
1944 list_del_init(&work->list);
1945 }
1946 spin_unlock_bh(&wb->work_lock);
1947 return work;
1948 }
1949
1950 static long wb_check_background_flush(struct bdi_writeback *wb)
1951 {
1952 if (wb_over_bg_thresh(wb)) {
1953
1954 struct wb_writeback_work work = {
1955 .nr_pages = LONG_MAX,
1956 .sync_mode = WB_SYNC_NONE,
1957 .for_background = 1,
1958 .range_cyclic = 1,
1959 .reason = WB_REASON_BACKGROUND,
1960 };
1961
1962 return wb_writeback(wb, &work);
1963 }
1964
1965 return 0;
1966 }
1967
1968 static long wb_check_old_data_flush(struct bdi_writeback *wb)
1969 {
1970 unsigned long expired;
1971 long nr_pages;
1972
1973
1974
1975
1976 if (!dirty_writeback_interval)
1977 return 0;
1978
1979 expired = wb->last_old_flush +
1980 msecs_to_jiffies(dirty_writeback_interval * 10);
1981 if (time_before(jiffies, expired))
1982 return 0;
1983
1984 wb->last_old_flush = jiffies;
1985 nr_pages = get_nr_dirty_pages();
1986
1987 if (nr_pages) {
1988 struct wb_writeback_work work = {
1989 .nr_pages = nr_pages,
1990 .sync_mode = WB_SYNC_NONE,
1991 .for_kupdate = 1,
1992 .range_cyclic = 1,
1993 .reason = WB_REASON_PERIODIC,
1994 };
1995
1996 return wb_writeback(wb, &work);
1997 }
1998
1999 return 0;
2000 }
2001
2002 static long wb_check_start_all(struct bdi_writeback *wb)
2003 {
2004 long nr_pages;
2005
2006 if (!test_bit(WB_start_all, &wb->state))
2007 return 0;
2008
2009 nr_pages = get_nr_dirty_pages();
2010 if (nr_pages) {
2011 struct wb_writeback_work work = {
2012 .nr_pages = wb_split_bdi_pages(wb, nr_pages),
2013 .sync_mode = WB_SYNC_NONE,
2014 .range_cyclic = 1,
2015 .reason = wb->start_all_reason,
2016 };
2017
2018 nr_pages = wb_writeback(wb, &work);
2019 }
2020
2021 clear_bit(WB_start_all, &wb->state);
2022 return nr_pages;
2023 }
2024
2025
2026
2027
2028
2029 static long wb_do_writeback(struct bdi_writeback *wb)
2030 {
2031 struct wb_writeback_work *work;
2032 long wrote = 0;
2033
2034 set_bit(WB_writeback_running, &wb->state);
2035 while ((work = get_next_work_item(wb)) != NULL) {
2036 trace_writeback_exec(wb, work);
2037 wrote += wb_writeback(wb, work);
2038 finish_writeback_work(wb, work);
2039 }
2040
2041
2042
2043
2044 wrote += wb_check_start_all(wb);
2045
2046
2047
2048
2049 wrote += wb_check_old_data_flush(wb);
2050 wrote += wb_check_background_flush(wb);
2051 clear_bit(WB_writeback_running, &wb->state);
2052
2053 return wrote;
2054 }
2055
2056
2057
2058
2059
2060 void wb_workfn(struct work_struct *work)
2061 {
2062 struct bdi_writeback *wb = container_of(to_delayed_work(work),
2063 struct bdi_writeback, dwork);
2064 long pages_written;
2065
2066 set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
2067 current->flags |= PF_SWAPWRITE;
2068
2069 if (likely(!current_is_workqueue_rescuer() ||
2070 !test_bit(WB_registered, &wb->state))) {
2071
2072
2073
2074
2075
2076
2077 do {
2078 pages_written = wb_do_writeback(wb);
2079 trace_writeback_pages_written(pages_written);
2080 } while (!list_empty(&wb->work_list));
2081 } else {
2082
2083
2084
2085
2086
2087 pages_written = writeback_inodes_wb(wb, 1024,
2088 WB_REASON_FORKER_THREAD);
2089 trace_writeback_pages_written(pages_written);
2090 }
2091
2092 if (!list_empty(&wb->work_list))
2093 wb_wakeup(wb);
2094 else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
2095 wb_wakeup_delayed(wb);
2096
2097 current->flags &= ~PF_SWAPWRITE;
2098 }
2099
2100
2101
2102
2103
2104 static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2105 enum wb_reason reason)
2106 {
2107 struct bdi_writeback *wb;
2108
2109 if (!bdi_has_dirty_io(bdi))
2110 return;
2111
2112 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2113 wb_start_writeback(wb, reason);
2114 }
2115
2116 void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2117 enum wb_reason reason)
2118 {
2119 rcu_read_lock();
2120 __wakeup_flusher_threads_bdi(bdi, reason);
2121 rcu_read_unlock();
2122 }
2123
2124
2125
2126
2127 void wakeup_flusher_threads(enum wb_reason reason)
2128 {
2129 struct backing_dev_info *bdi;
2130
2131
2132
2133
2134 if (blk_needs_flush_plug(current))
2135 blk_schedule_flush_plug(current);
2136
2137 rcu_read_lock();
2138 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2139 __wakeup_flusher_threads_bdi(bdi, reason);
2140 rcu_read_unlock();
2141 }
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158 static void wakeup_dirtytime_writeback(struct work_struct *w);
2159 static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
2160
2161 static void wakeup_dirtytime_writeback(struct work_struct *w)
2162 {
2163 struct backing_dev_info *bdi;
2164
2165 rcu_read_lock();
2166 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
2167 struct bdi_writeback *wb;
2168
2169 list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2170 if (!list_empty(&wb->b_dirty_time))
2171 wb_wakeup(wb);
2172 }
2173 rcu_read_unlock();
2174 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2175 }
2176
2177 static int __init start_dirtytime_writeback(void)
2178 {
2179 schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2180 return 0;
2181 }
2182 __initcall(start_dirtytime_writeback);
2183
2184 int dirtytime_interval_handler(struct ctl_table *table, int write,
2185 void __user *buffer, size_t *lenp, loff_t *ppos)
2186 {
2187 int ret;
2188
2189 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2190 if (ret == 0 && write)
2191 mod_delayed_work(system_wq, &dirtytime_work, 0);
2192 return ret;
2193 }
2194
2195 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
2196 {
2197 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
2198 struct dentry *dentry;
2199 const char *name = "?";
2200
2201 dentry = d_find_alias(inode);
2202 if (dentry) {
2203 spin_lock(&dentry->d_lock);
2204 name = (const char *) dentry->d_name.name;
2205 }
2206 printk(KERN_DEBUG
2207 "%s(%d): dirtied inode %lu (%s) on %s\n",
2208 current->comm, task_pid_nr(current), inode->i_ino,
2209 name, inode->i_sb->s_id);
2210 if (dentry) {
2211 spin_unlock(&dentry->d_lock);
2212 dput(dentry);
2213 }
2214 }
2215 }
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243 void __mark_inode_dirty(struct inode *inode, int flags)
2244 {
2245 struct super_block *sb = inode->i_sb;
2246 int dirtytime;
2247
2248 trace_writeback_mark_inode_dirty(inode, flags);
2249
2250
2251
2252
2253
2254 if (flags & (I_DIRTY_INODE | I_DIRTY_TIME)) {
2255 trace_writeback_dirty_inode_start(inode, flags);
2256
2257 if (sb->s_op->dirty_inode)
2258 sb->s_op->dirty_inode(inode, flags);
2259
2260 trace_writeback_dirty_inode(inode, flags);
2261 }
2262 if (flags & I_DIRTY_INODE)
2263 flags &= ~I_DIRTY_TIME;
2264 dirtytime = flags & I_DIRTY_TIME;
2265
2266
2267
2268
2269
2270 smp_mb();
2271
2272 if (((inode->i_state & flags) == flags) ||
2273 (dirtytime && (inode->i_state & I_DIRTY_INODE)))
2274 return;
2275
2276 if (unlikely(block_dump))
2277 block_dump___mark_inode_dirty(inode);
2278
2279 spin_lock(&inode->i_lock);
2280 if (dirtytime && (inode->i_state & I_DIRTY_INODE))
2281 goto out_unlock_inode;
2282 if ((inode->i_state & flags) != flags) {
2283 const int was_dirty = inode->i_state & I_DIRTY;
2284
2285 inode_attach_wb(inode, NULL);
2286
2287 if (flags & I_DIRTY_INODE)
2288 inode->i_state &= ~I_DIRTY_TIME;
2289 inode->i_state |= flags;
2290
2291
2292
2293
2294
2295
2296 if (inode->i_state & I_SYNC)
2297 goto out_unlock_inode;
2298
2299
2300
2301
2302
2303 if (!S_ISBLK(inode->i_mode)) {
2304 if (inode_unhashed(inode))
2305 goto out_unlock_inode;
2306 }
2307 if (inode->i_state & I_FREEING)
2308 goto out_unlock_inode;
2309
2310
2311
2312
2313
2314 if (!was_dirty) {
2315 struct bdi_writeback *wb;
2316 struct list_head *dirty_list;
2317 bool wakeup_bdi = false;
2318
2319 wb = locked_inode_to_wb_and_lock_list(inode);
2320
2321 WARN(bdi_cap_writeback_dirty(wb->bdi) &&
2322 !test_bit(WB_registered, &wb->state),
2323 "bdi-%s not registered\n", wb->bdi->name);
2324
2325 inode->dirtied_when = jiffies;
2326 if (dirtytime)
2327 inode->dirtied_time_when = jiffies;
2328
2329 if (inode->i_state & I_DIRTY)
2330 dirty_list = &wb->b_dirty;
2331 else
2332 dirty_list = &wb->b_dirty_time;
2333
2334 wakeup_bdi = inode_io_list_move_locked(inode, wb,
2335 dirty_list);
2336
2337 spin_unlock(&wb->list_lock);
2338 trace_writeback_dirty_inode_enqueue(inode);
2339
2340
2341
2342
2343
2344
2345
2346 if (bdi_cap_writeback_dirty(wb->bdi) && wakeup_bdi)
2347 wb_wakeup_delayed(wb);
2348 return;
2349 }
2350 }
2351 out_unlock_inode:
2352 spin_unlock(&inode->i_lock);
2353 }
2354 EXPORT_SYMBOL(__mark_inode_dirty);
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365 static void wait_sb_inodes(struct super_block *sb)
2366 {
2367 LIST_HEAD(sync_list);
2368
2369
2370
2371
2372
2373 WARN_ON(!rwsem_is_locked(&sb->s_umount));
2374
2375 mutex_lock(&sb->s_sync_lock);
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386 rcu_read_lock();
2387 spin_lock_irq(&sb->s_inode_wblist_lock);
2388 list_splice_init(&sb->s_inodes_wb, &sync_list);
2389
2390
2391
2392
2393
2394
2395
2396
2397 while (!list_empty(&sync_list)) {
2398 struct inode *inode = list_first_entry(&sync_list, struct inode,
2399 i_wb_list);
2400 struct address_space *mapping = inode->i_mapping;
2401
2402
2403
2404
2405
2406
2407
2408 list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb);
2409
2410
2411
2412
2413
2414
2415 if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
2416 continue;
2417
2418 spin_unlock_irq(&sb->s_inode_wblist_lock);
2419
2420 spin_lock(&inode->i_lock);
2421 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
2422 spin_unlock(&inode->i_lock);
2423
2424 spin_lock_irq(&sb->s_inode_wblist_lock);
2425 continue;
2426 }
2427 __iget(inode);
2428 spin_unlock(&inode->i_lock);
2429 rcu_read_unlock();
2430
2431
2432
2433
2434
2435
2436 filemap_fdatawait_keep_errors(mapping);
2437
2438 cond_resched();
2439
2440 iput(inode);
2441
2442 rcu_read_lock();
2443 spin_lock_irq(&sb->s_inode_wblist_lock);
2444 }
2445 spin_unlock_irq(&sb->s_inode_wblist_lock);
2446 rcu_read_unlock();
2447 mutex_unlock(&sb->s_sync_lock);
2448 }
2449
2450 static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
2451 enum wb_reason reason, bool skip_if_busy)
2452 {
2453 struct backing_dev_info *bdi = sb->s_bdi;
2454 DEFINE_WB_COMPLETION(done, bdi);
2455 struct wb_writeback_work work = {
2456 .sb = sb,
2457 .sync_mode = WB_SYNC_NONE,
2458 .tagged_writepages = 1,
2459 .done = &done,
2460 .nr_pages = nr,
2461 .reason = reason,
2462 };
2463
2464 if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
2465 return;
2466 WARN_ON(!rwsem_is_locked(&sb->s_umount));
2467
2468 bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
2469 wb_wait_for_completion(&done);
2470 }
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482 void writeback_inodes_sb_nr(struct super_block *sb,
2483 unsigned long nr,
2484 enum wb_reason reason)
2485 {
2486 __writeback_inodes_sb_nr(sb, nr, reason, false);
2487 }
2488 EXPORT_SYMBOL(writeback_inodes_sb_nr);
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499 void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2500 {
2501 return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
2502 }
2503 EXPORT_SYMBOL(writeback_inodes_sb);
2504
2505
2506
2507
2508
2509
2510
2511
2512 void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2513 {
2514 if (!down_read_trylock(&sb->s_umount))
2515 return;
2516
2517 __writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason, true);
2518 up_read(&sb->s_umount);
2519 }
2520 EXPORT_SYMBOL(try_to_writeback_inodes_sb);
2521
2522
2523
2524
2525
2526
2527
2528
2529 void sync_inodes_sb(struct super_block *sb)
2530 {
2531 struct backing_dev_info *bdi = sb->s_bdi;
2532 DEFINE_WB_COMPLETION(done, bdi);
2533 struct wb_writeback_work work = {
2534 .sb = sb,
2535 .sync_mode = WB_SYNC_ALL,
2536 .nr_pages = LONG_MAX,
2537 .range_cyclic = 0,
2538 .done = &done,
2539 .reason = WB_REASON_SYNC,
2540 .for_sync = 1,
2541 };
2542
2543
2544
2545
2546
2547
2548 if (bdi == &noop_backing_dev_info)
2549 return;
2550 WARN_ON(!rwsem_is_locked(&sb->s_umount));
2551
2552
2553 bdi_down_write_wb_switch_rwsem(bdi);
2554 bdi_split_work_to_wbs(bdi, &work, false);
2555 wb_wait_for_completion(&done);
2556 bdi_up_write_wb_switch_rwsem(bdi);
2557
2558 wait_sb_inodes(sb);
2559 }
2560 EXPORT_SYMBOL(sync_inodes_sb);
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572 int write_inode_now(struct inode *inode, int sync)
2573 {
2574 struct writeback_control wbc = {
2575 .nr_to_write = LONG_MAX,
2576 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
2577 .range_start = 0,
2578 .range_end = LLONG_MAX,
2579 };
2580
2581 if (!mapping_cap_writeback_dirty(inode->i_mapping))
2582 wbc.nr_to_write = 0;
2583
2584 might_sleep();
2585 return writeback_single_inode(inode, &wbc);
2586 }
2587 EXPORT_SYMBOL(write_inode_now);
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600 int sync_inode(struct inode *inode, struct writeback_control *wbc)
2601 {
2602 return writeback_single_inode(inode, wbc);
2603 }
2604 EXPORT_SYMBOL(sync_inode);
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615 int sync_inode_metadata(struct inode *inode, int wait)
2616 {
2617 struct writeback_control wbc = {
2618 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
2619 .nr_to_write = 0,
2620 };
2621
2622 return sync_inode(inode, &wbc);
2623 }
2624 EXPORT_SYMBOL(sync_inode_metadata);