This source file includes following definitions.
- nilfs_sc_cstage_inc
- nilfs_sc_cstage_set
- nilfs_sc_cstage_get
- nilfs_prepare_segment_lock
- nilfs_transaction_begin
- nilfs_transaction_commit
- nilfs_transaction_abort
- nilfs_relax_pressure_in_lock
- nilfs_transaction_lock
- nilfs_transaction_unlock
- nilfs_segctor_map_segsum_entry
- nilfs_segctor_reset_segment_buffer
- nilfs_segctor_feed_segment
- nilfs_segctor_add_super_root
- nilfs_segctor_segsum_block_required
- nilfs_segctor_begin_finfo
- nilfs_segctor_end_finfo
- nilfs_segctor_add_file_block
- nilfs_collect_file_data
- nilfs_collect_file_node
- nilfs_collect_file_bmap
- nilfs_write_file_data_binfo
- nilfs_write_file_node_binfo
- nilfs_collect_dat_data
- nilfs_collect_dat_bmap
- nilfs_write_dat_data_binfo
- nilfs_write_dat_node_binfo
- nilfs_lookup_dirty_data_buffers
- nilfs_lookup_dirty_node_buffers
- nilfs_dispose_list
- nilfs_iput_work_func
- nilfs_test_metadata_dirty
- nilfs_segctor_clean
- nilfs_segctor_confirm
- nilfs_segctor_clear_metadata_dirty
- nilfs_segctor_create_checkpoint
- nilfs_segctor_fill_in_checkpoint
- nilfs_fill_in_file_bmap
- nilfs_segctor_fill_in_file_bmap
- nilfs_segctor_fill_in_super_root
- nilfs_redirty_inodes
- nilfs_drop_collected_inodes
- nilfs_segctor_apply_buffers
- nilfs_segctor_buffer_rest
- nilfs_segctor_scan_file
- nilfs_segctor_scan_file_dsync
- nilfs_segctor_collect_blocks
- nilfs_segctor_begin_construction
- nilfs_segctor_extend_segments
- nilfs_free_incomplete_logs
- nilfs_segctor_update_segusage
- nilfs_cancel_segusage
- nilfs_segctor_truncate_segments
- nilfs_segctor_collect
- nilfs_list_replace_buffer
- nilfs_segctor_update_payload_blocknr
- nilfs_segctor_assign
- nilfs_begin_page_io
- nilfs_segctor_prepare_write
- nilfs_segctor_write
- nilfs_end_page_io
- nilfs_abort_logs
- nilfs_segctor_abort_construction
- nilfs_set_next_segment
- nilfs_segctor_complete_write
- nilfs_segctor_wait
- nilfs_segctor_collect_dirty_files
- nilfs_segctor_drop_written_files
- nilfs_segctor_do_construct
- nilfs_segctor_start_timer
- nilfs_segctor_do_flush
- nilfs_flush_segment
- nilfs_segctor_sync
- nilfs_segctor_wakeup
- nilfs_construct_segment
- nilfs_construct_dsync_segment
- nilfs_segctor_accept
- nilfs_segctor_notify
- nilfs_segctor_construct
- nilfs_construction_timeout
- nilfs_remove_written_gcinodes
- nilfs_clean_segments
- nilfs_segctor_thread_construct
- nilfs_segctor_do_immediate_flush
- nilfs_segctor_flush_mode
- nilfs_segctor_thread
- nilfs_segctor_start_thread
- nilfs_segctor_kill_thread
- nilfs_segctor_new
- nilfs_segctor_write_out
- nilfs_segctor_destroy
- nilfs_attach_log_writer
- nilfs_detach_log_writer
1
2
3
4
5
6
7
8
9
10
11 #include <linux/pagemap.h>
12 #include <linux/buffer_head.h>
13 #include <linux/writeback.h>
14 #include <linux/bitops.h>
15 #include <linux/bio.h>
16 #include <linux/completion.h>
17 #include <linux/blkdev.h>
18 #include <linux/backing-dev.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/crc32.h>
22 #include <linux/pagevec.h>
23 #include <linux/slab.h>
24 #include <linux/sched/signal.h>
25
26 #include "nilfs.h"
27 #include "btnode.h"
28 #include "page.h"
29 #include "segment.h"
30 #include "sufile.h"
31 #include "cpfile.h"
32 #include "ifile.h"
33 #include "segbuf.h"
34
35
36
37
38
39 #define SC_N_INODEVEC 16
40
41 #define SC_MAX_SEGDELTA 64
42
43
44
45
46
47 enum {
48 SC_LSEG_SR = 1,
49 SC_LSEG_DSYNC,
50
51
52
53 SC_FLUSH_FILE,
54
55
56
57 SC_FLUSH_DAT,
58
59
60
61 };
62
63
64 enum {
65 NILFS_ST_INIT = 0,
66 NILFS_ST_GC,
67 NILFS_ST_FILE,
68 NILFS_ST_IFILE,
69 NILFS_ST_CPFILE,
70 NILFS_ST_SUFILE,
71 NILFS_ST_DAT,
72 NILFS_ST_SR,
73 NILFS_ST_DSYNC,
74 NILFS_ST_DONE,
75 };
76
77 #define CREATE_TRACE_POINTS
78 #include <trace/events/nilfs2.h>
79
80
81
82
83
84
85
86
87
88
89
90 static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
91 {
92 sci->sc_stage.scnt++;
93 trace_nilfs2_collection_stage_transition(sci);
94 }
95
96 static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
97 {
98 sci->sc_stage.scnt = next_scnt;
99 trace_nilfs2_collection_stage_transition(sci);
100 }
101
102 static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
103 {
104 return sci->sc_stage.scnt;
105 }
106
107
108 #define NILFS_CF_NODE 0x0001
109 #define NILFS_CF_IFILE_STARTED 0x0002
110 #define NILFS_CF_SUFREED 0x0004
111 #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
112
113
114 struct nilfs_sc_operations {
115 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
116 struct inode *);
117 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
118 struct inode *);
119 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
120 struct inode *);
121 void (*write_data_binfo)(struct nilfs_sc_info *,
122 struct nilfs_segsum_pointer *,
123 union nilfs_binfo *);
124 void (*write_node_binfo)(struct nilfs_sc_info *,
125 struct nilfs_segsum_pointer *,
126 union nilfs_binfo *);
127 };
128
129
130
131
132 static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
133 static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
134 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
135 static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
136
137 #define nilfs_cnt32_gt(a, b) \
138 (typecheck(__u32, a) && typecheck(__u32, b) && \
139 ((__s32)(b) - (__s32)(a) < 0))
140 #define nilfs_cnt32_ge(a, b) \
141 (typecheck(__u32, a) && typecheck(__u32, b) && \
142 ((__s32)(a) - (__s32)(b) >= 0))
143 #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
144 #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
145
146 static int nilfs_prepare_segment_lock(struct super_block *sb,
147 struct nilfs_transaction_info *ti)
148 {
149 struct nilfs_transaction_info *cur_ti = current->journal_info;
150 void *save = NULL;
151
152 if (cur_ti) {
153 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
154 return ++cur_ti->ti_count;
155
156
157
158
159
160
161 nilfs_msg(sb, KERN_WARNING, "journal info from a different FS");
162 save = current->journal_info;
163 }
164 if (!ti) {
165 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
166 if (!ti)
167 return -ENOMEM;
168 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
169 } else {
170 ti->ti_flags = 0;
171 }
172 ti->ti_count = 0;
173 ti->ti_save = save;
174 ti->ti_magic = NILFS_TI_MAGIC;
175 current->journal_info = ti;
176 return 0;
177 }
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206 int nilfs_transaction_begin(struct super_block *sb,
207 struct nilfs_transaction_info *ti,
208 int vacancy_check)
209 {
210 struct the_nilfs *nilfs;
211 int ret = nilfs_prepare_segment_lock(sb, ti);
212 struct nilfs_transaction_info *trace_ti;
213
214 if (unlikely(ret < 0))
215 return ret;
216 if (ret > 0) {
217 trace_ti = current->journal_info;
218
219 trace_nilfs2_transaction_transition(sb, trace_ti,
220 trace_ti->ti_count, trace_ti->ti_flags,
221 TRACE_NILFS2_TRANSACTION_BEGIN);
222 return 0;
223 }
224
225 sb_start_intwrite(sb);
226
227 nilfs = sb->s_fs_info;
228 down_read(&nilfs->ns_segctor_sem);
229 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
230 up_read(&nilfs->ns_segctor_sem);
231 ret = -ENOSPC;
232 goto failed;
233 }
234
235 trace_ti = current->journal_info;
236 trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
237 trace_ti->ti_flags,
238 TRACE_NILFS2_TRANSACTION_BEGIN);
239 return 0;
240
241 failed:
242 ti = current->journal_info;
243 current->journal_info = ti->ti_save;
244 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
245 kmem_cache_free(nilfs_transaction_cachep, ti);
246 sb_end_intwrite(sb);
247 return ret;
248 }
249
250
251
252
253
254
255
256
257
258
259
260
261 int nilfs_transaction_commit(struct super_block *sb)
262 {
263 struct nilfs_transaction_info *ti = current->journal_info;
264 struct the_nilfs *nilfs = sb->s_fs_info;
265 int err = 0;
266
267 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
268 ti->ti_flags |= NILFS_TI_COMMIT;
269 if (ti->ti_count > 0) {
270 ti->ti_count--;
271 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
272 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
273 return 0;
274 }
275 if (nilfs->ns_writer) {
276 struct nilfs_sc_info *sci = nilfs->ns_writer;
277
278 if (ti->ti_flags & NILFS_TI_COMMIT)
279 nilfs_segctor_start_timer(sci);
280 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
281 nilfs_segctor_do_flush(sci, 0);
282 }
283 up_read(&nilfs->ns_segctor_sem);
284 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
285 ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
286
287 current->journal_info = ti->ti_save;
288
289 if (ti->ti_flags & NILFS_TI_SYNC)
290 err = nilfs_construct_segment(sb);
291 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
292 kmem_cache_free(nilfs_transaction_cachep, ti);
293 sb_end_intwrite(sb);
294 return err;
295 }
296
297 void nilfs_transaction_abort(struct super_block *sb)
298 {
299 struct nilfs_transaction_info *ti = current->journal_info;
300 struct the_nilfs *nilfs = sb->s_fs_info;
301
302 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
303 if (ti->ti_count > 0) {
304 ti->ti_count--;
305 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
306 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
307 return;
308 }
309 up_read(&nilfs->ns_segctor_sem);
310
311 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
312 ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
313
314 current->journal_info = ti->ti_save;
315 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
316 kmem_cache_free(nilfs_transaction_cachep, ti);
317 sb_end_intwrite(sb);
318 }
319
320 void nilfs_relax_pressure_in_lock(struct super_block *sb)
321 {
322 struct the_nilfs *nilfs = sb->s_fs_info;
323 struct nilfs_sc_info *sci = nilfs->ns_writer;
324
325 if (!sci || !sci->sc_flush_request)
326 return;
327
328 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
329 up_read(&nilfs->ns_segctor_sem);
330
331 down_write(&nilfs->ns_segctor_sem);
332 if (sci->sc_flush_request &&
333 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
334 struct nilfs_transaction_info *ti = current->journal_info;
335
336 ti->ti_flags |= NILFS_TI_WRITER;
337 nilfs_segctor_do_immediate_flush(sci);
338 ti->ti_flags &= ~NILFS_TI_WRITER;
339 }
340 downgrade_write(&nilfs->ns_segctor_sem);
341 }
342
343 static void nilfs_transaction_lock(struct super_block *sb,
344 struct nilfs_transaction_info *ti,
345 int gcflag)
346 {
347 struct nilfs_transaction_info *cur_ti = current->journal_info;
348 struct the_nilfs *nilfs = sb->s_fs_info;
349 struct nilfs_sc_info *sci = nilfs->ns_writer;
350
351 WARN_ON(cur_ti);
352 ti->ti_flags = NILFS_TI_WRITER;
353 ti->ti_count = 0;
354 ti->ti_save = cur_ti;
355 ti->ti_magic = NILFS_TI_MAGIC;
356 current->journal_info = ti;
357
358 for (;;) {
359 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
360 ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
361
362 down_write(&nilfs->ns_segctor_sem);
363 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
364 break;
365
366 nilfs_segctor_do_immediate_flush(sci);
367
368 up_write(&nilfs->ns_segctor_sem);
369 cond_resched();
370 }
371 if (gcflag)
372 ti->ti_flags |= NILFS_TI_GC;
373
374 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
375 ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
376 }
377
378 static void nilfs_transaction_unlock(struct super_block *sb)
379 {
380 struct nilfs_transaction_info *ti = current->journal_info;
381 struct the_nilfs *nilfs = sb->s_fs_info;
382
383 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
384 BUG_ON(ti->ti_count > 0);
385
386 up_write(&nilfs->ns_segctor_sem);
387 current->journal_info = ti->ti_save;
388
389 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
390 ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
391 }
392
393 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
394 struct nilfs_segsum_pointer *ssp,
395 unsigned int bytes)
396 {
397 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
398 unsigned int blocksize = sci->sc_super->s_blocksize;
399 void *p;
400
401 if (unlikely(ssp->offset + bytes > blocksize)) {
402 ssp->offset = 0;
403 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
404 &segbuf->sb_segsum_buffers));
405 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
406 }
407 p = ssp->bh->b_data + ssp->offset;
408 ssp->offset += bytes;
409 return p;
410 }
411
412
413
414
415
416 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
417 {
418 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
419 struct buffer_head *sumbh;
420 unsigned int sumbytes;
421 unsigned int flags = 0;
422 int err;
423
424 if (nilfs_doing_gc())
425 flags = NILFS_SS_GC;
426 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
427 if (unlikely(err))
428 return err;
429
430 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
431 sumbytes = segbuf->sb_sum.sumbytes;
432 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
433 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
434 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
435 return 0;
436 }
437
438 static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
439 {
440 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
441 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
442 return -E2BIG;
443
444
445
446 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
447 return nilfs_segctor_reset_segment_buffer(sci);
448 }
449
450 static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
451 {
452 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
453 int err;
454
455 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
456 err = nilfs_segctor_feed_segment(sci);
457 if (err)
458 return err;
459 segbuf = sci->sc_curseg;
460 }
461 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
462 if (likely(!err))
463 segbuf->sb_sum.flags |= NILFS_SS_SR;
464 return err;
465 }
466
467
468
469
470 static int nilfs_segctor_segsum_block_required(
471 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
472 unsigned int binfo_size)
473 {
474 unsigned int blocksize = sci->sc_super->s_blocksize;
475
476
477 return ssp->offset + binfo_size +
478 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
479 blocksize;
480 }
481
482 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
483 struct inode *inode)
484 {
485 sci->sc_curseg->sb_sum.nfinfo++;
486 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
487 nilfs_segctor_map_segsum_entry(
488 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
489
490 if (NILFS_I(inode)->i_root &&
491 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
492 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
493
494 }
495
496 static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
497 struct inode *inode)
498 {
499 struct nilfs_finfo *finfo;
500 struct nilfs_inode_info *ii;
501 struct nilfs_segment_buffer *segbuf;
502 __u64 cno;
503
504 if (sci->sc_blk_cnt == 0)
505 return;
506
507 ii = NILFS_I(inode);
508
509 if (test_bit(NILFS_I_GCINODE, &ii->i_state))
510 cno = ii->i_cno;
511 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
512 cno = 0;
513 else
514 cno = sci->sc_cno;
515
516 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
517 sizeof(*finfo));
518 finfo->fi_ino = cpu_to_le64(inode->i_ino);
519 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
520 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
521 finfo->fi_cno = cpu_to_le64(cno);
522
523 segbuf = sci->sc_curseg;
524 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
525 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
526 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
527 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
528 }
529
530 static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
531 struct buffer_head *bh,
532 struct inode *inode,
533 unsigned int binfo_size)
534 {
535 struct nilfs_segment_buffer *segbuf;
536 int required, err = 0;
537
538 retry:
539 segbuf = sci->sc_curseg;
540 required = nilfs_segctor_segsum_block_required(
541 sci, &sci->sc_binfo_ptr, binfo_size);
542 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
543 nilfs_segctor_end_finfo(sci, inode);
544 err = nilfs_segctor_feed_segment(sci);
545 if (err)
546 return err;
547 goto retry;
548 }
549 if (unlikely(required)) {
550 err = nilfs_segbuf_extend_segsum(segbuf);
551 if (unlikely(err))
552 goto failed;
553 }
554 if (sci->sc_blk_cnt == 0)
555 nilfs_segctor_begin_finfo(sci, inode);
556
557 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
558
559 nilfs_segbuf_add_file_buffer(segbuf, bh);
560 sci->sc_blk_cnt++;
561 failed:
562 return err;
563 }
564
565
566
567
568 static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
569 struct buffer_head *bh, struct inode *inode)
570 {
571 int err;
572
573 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
574 if (err < 0)
575 return err;
576
577 err = nilfs_segctor_add_file_block(sci, bh, inode,
578 sizeof(struct nilfs_binfo_v));
579 if (!err)
580 sci->sc_datablk_cnt++;
581 return err;
582 }
583
584 static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
585 struct buffer_head *bh,
586 struct inode *inode)
587 {
588 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
589 }
590
591 static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
592 struct buffer_head *bh,
593 struct inode *inode)
594 {
595 WARN_ON(!buffer_dirty(bh));
596 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
597 }
598
599 static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
600 struct nilfs_segsum_pointer *ssp,
601 union nilfs_binfo *binfo)
602 {
603 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
604 sci, ssp, sizeof(*binfo_v));
605 *binfo_v = binfo->bi_v;
606 }
607
608 static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
609 struct nilfs_segsum_pointer *ssp,
610 union nilfs_binfo *binfo)
611 {
612 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
613 sci, ssp, sizeof(*vblocknr));
614 *vblocknr = binfo->bi_v.bi_vblocknr;
615 }
616
617 static const struct nilfs_sc_operations nilfs_sc_file_ops = {
618 .collect_data = nilfs_collect_file_data,
619 .collect_node = nilfs_collect_file_node,
620 .collect_bmap = nilfs_collect_file_bmap,
621 .write_data_binfo = nilfs_write_file_data_binfo,
622 .write_node_binfo = nilfs_write_file_node_binfo,
623 };
624
625 static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
626 struct buffer_head *bh, struct inode *inode)
627 {
628 int err;
629
630 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
631 if (err < 0)
632 return err;
633
634 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
635 if (!err)
636 sci->sc_datablk_cnt++;
637 return err;
638 }
639
640 static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
641 struct buffer_head *bh, struct inode *inode)
642 {
643 WARN_ON(!buffer_dirty(bh));
644 return nilfs_segctor_add_file_block(sci, bh, inode,
645 sizeof(struct nilfs_binfo_dat));
646 }
647
648 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
649 struct nilfs_segsum_pointer *ssp,
650 union nilfs_binfo *binfo)
651 {
652 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
653 sizeof(*blkoff));
654 *blkoff = binfo->bi_dat.bi_blkoff;
655 }
656
657 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
658 struct nilfs_segsum_pointer *ssp,
659 union nilfs_binfo *binfo)
660 {
661 struct nilfs_binfo_dat *binfo_dat =
662 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
663 *binfo_dat = binfo->bi_dat;
664 }
665
666 static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
667 .collect_data = nilfs_collect_dat_data,
668 .collect_node = nilfs_collect_file_node,
669 .collect_bmap = nilfs_collect_dat_bmap,
670 .write_data_binfo = nilfs_write_dat_data_binfo,
671 .write_node_binfo = nilfs_write_dat_node_binfo,
672 };
673
674 static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
675 .collect_data = nilfs_collect_file_data,
676 .collect_node = NULL,
677 .collect_bmap = NULL,
678 .write_data_binfo = nilfs_write_file_data_binfo,
679 .write_node_binfo = NULL,
680 };
681
682 static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
683 struct list_head *listp,
684 size_t nlimit,
685 loff_t start, loff_t end)
686 {
687 struct address_space *mapping = inode->i_mapping;
688 struct pagevec pvec;
689 pgoff_t index = 0, last = ULONG_MAX;
690 size_t ndirties = 0;
691 int i;
692
693 if (unlikely(start != 0 || end != LLONG_MAX)) {
694
695
696
697
698
699 index = start >> PAGE_SHIFT;
700 last = end >> PAGE_SHIFT;
701 }
702 pagevec_init(&pvec);
703 repeat:
704 if (unlikely(index > last) ||
705 !pagevec_lookup_range_tag(&pvec, mapping, &index, last,
706 PAGECACHE_TAG_DIRTY))
707 return ndirties;
708
709 for (i = 0; i < pagevec_count(&pvec); i++) {
710 struct buffer_head *bh, *head;
711 struct page *page = pvec.pages[i];
712
713 lock_page(page);
714 if (!page_has_buffers(page))
715 create_empty_buffers(page, i_blocksize(inode), 0);
716 unlock_page(page);
717
718 bh = head = page_buffers(page);
719 do {
720 if (!buffer_dirty(bh) || buffer_async_write(bh))
721 continue;
722 get_bh(bh);
723 list_add_tail(&bh->b_assoc_buffers, listp);
724 ndirties++;
725 if (unlikely(ndirties >= nlimit)) {
726 pagevec_release(&pvec);
727 cond_resched();
728 return ndirties;
729 }
730 } while (bh = bh->b_this_page, bh != head);
731 }
732 pagevec_release(&pvec);
733 cond_resched();
734 goto repeat;
735 }
736
737 static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
738 struct list_head *listp)
739 {
740 struct nilfs_inode_info *ii = NILFS_I(inode);
741 struct address_space *mapping = &ii->i_btnode_cache;
742 struct pagevec pvec;
743 struct buffer_head *bh, *head;
744 unsigned int i;
745 pgoff_t index = 0;
746
747 pagevec_init(&pvec);
748
749 while (pagevec_lookup_tag(&pvec, mapping, &index,
750 PAGECACHE_TAG_DIRTY)) {
751 for (i = 0; i < pagevec_count(&pvec); i++) {
752 bh = head = page_buffers(pvec.pages[i]);
753 do {
754 if (buffer_dirty(bh) &&
755 !buffer_async_write(bh)) {
756 get_bh(bh);
757 list_add_tail(&bh->b_assoc_buffers,
758 listp);
759 }
760 bh = bh->b_this_page;
761 } while (bh != head);
762 }
763 pagevec_release(&pvec);
764 cond_resched();
765 }
766 }
767
768 static void nilfs_dispose_list(struct the_nilfs *nilfs,
769 struct list_head *head, int force)
770 {
771 struct nilfs_inode_info *ii, *n;
772 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
773 unsigned int nv = 0;
774
775 while (!list_empty(head)) {
776 spin_lock(&nilfs->ns_inode_lock);
777 list_for_each_entry_safe(ii, n, head, i_dirty) {
778 list_del_init(&ii->i_dirty);
779 if (force) {
780 if (unlikely(ii->i_bh)) {
781 brelse(ii->i_bh);
782 ii->i_bh = NULL;
783 }
784 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
785 set_bit(NILFS_I_QUEUED, &ii->i_state);
786 list_add_tail(&ii->i_dirty,
787 &nilfs->ns_dirty_files);
788 continue;
789 }
790 ivec[nv++] = ii;
791 if (nv == SC_N_INODEVEC)
792 break;
793 }
794 spin_unlock(&nilfs->ns_inode_lock);
795
796 for (pii = ivec; nv > 0; pii++, nv--)
797 iput(&(*pii)->vfs_inode);
798 }
799 }
800
801 static void nilfs_iput_work_func(struct work_struct *work)
802 {
803 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
804 sc_iput_work);
805 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
806
807 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
808 }
809
810 static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
811 struct nilfs_root *root)
812 {
813 int ret = 0;
814
815 if (nilfs_mdt_fetch_dirty(root->ifile))
816 ret++;
817 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
818 ret++;
819 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
820 ret++;
821 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
822 ret++;
823 return ret;
824 }
825
826 static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
827 {
828 return list_empty(&sci->sc_dirty_files) &&
829 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
830 sci->sc_nfreesegs == 0 &&
831 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
832 }
833
834 static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
835 {
836 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
837 int ret = 0;
838
839 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
840 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
841
842 spin_lock(&nilfs->ns_inode_lock);
843 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
844 ret++;
845
846 spin_unlock(&nilfs->ns_inode_lock);
847 return ret;
848 }
849
850 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
851 {
852 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
853
854 nilfs_mdt_clear_dirty(sci->sc_root->ifile);
855 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
856 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
857 nilfs_mdt_clear_dirty(nilfs->ns_dat);
858 }
859
860 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
861 {
862 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
863 struct buffer_head *bh_cp;
864 struct nilfs_checkpoint *raw_cp;
865 int err;
866
867
868 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
869 &raw_cp, &bh_cp);
870 if (likely(!err)) {
871
872
873
874
875
876 mark_buffer_dirty(bh_cp);
877 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
878 nilfs_cpfile_put_checkpoint(
879 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
880 } else
881 WARN_ON(err == -EINVAL || err == -ENOENT);
882
883 return err;
884 }
885
886 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
887 {
888 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
889 struct buffer_head *bh_cp;
890 struct nilfs_checkpoint *raw_cp;
891 int err;
892
893 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
894 &raw_cp, &bh_cp);
895 if (unlikely(err)) {
896 WARN_ON(err == -EINVAL || err == -ENOENT);
897 goto failed_ibh;
898 }
899 raw_cp->cp_snapshot_list.ssl_next = 0;
900 raw_cp->cp_snapshot_list.ssl_prev = 0;
901 raw_cp->cp_inodes_count =
902 cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
903 raw_cp->cp_blocks_count =
904 cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
905 raw_cp->cp_nblk_inc =
906 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
907 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
908 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
909
910 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
911 nilfs_checkpoint_clear_minor(raw_cp);
912 else
913 nilfs_checkpoint_set_minor(raw_cp);
914
915 nilfs_write_inode_common(sci->sc_root->ifile,
916 &raw_cp->cp_ifile_inode, 1);
917 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
918 return 0;
919
920 failed_ibh:
921 return err;
922 }
923
924 static void nilfs_fill_in_file_bmap(struct inode *ifile,
925 struct nilfs_inode_info *ii)
926
927 {
928 struct buffer_head *ibh;
929 struct nilfs_inode *raw_inode;
930
931 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
932 ibh = ii->i_bh;
933 BUG_ON(!ibh);
934 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
935 ibh);
936 nilfs_bmap_write(ii->i_bmap, raw_inode);
937 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
938 }
939 }
940
941 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
942 {
943 struct nilfs_inode_info *ii;
944
945 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
946 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
947 set_bit(NILFS_I_COLLECTED, &ii->i_state);
948 }
949 }
950
951 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
952 struct the_nilfs *nilfs)
953 {
954 struct buffer_head *bh_sr;
955 struct nilfs_super_root *raw_sr;
956 unsigned int isz, srsz;
957
958 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
959 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
960 isz = nilfs->ns_inode_size;
961 srsz = NILFS_SR_BYTES(isz);
962
963 raw_sr->sr_bytes = cpu_to_le16(srsz);
964 raw_sr->sr_nongc_ctime
965 = cpu_to_le64(nilfs_doing_gc() ?
966 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
967 raw_sr->sr_flags = 0;
968
969 nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
970 NILFS_SR_DAT_OFFSET(isz), 1);
971 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
972 NILFS_SR_CPFILE_OFFSET(isz), 1);
973 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
974 NILFS_SR_SUFILE_OFFSET(isz), 1);
975 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
976 }
977
978 static void nilfs_redirty_inodes(struct list_head *head)
979 {
980 struct nilfs_inode_info *ii;
981
982 list_for_each_entry(ii, head, i_dirty) {
983 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
984 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
985 }
986 }
987
988 static void nilfs_drop_collected_inodes(struct list_head *head)
989 {
990 struct nilfs_inode_info *ii;
991
992 list_for_each_entry(ii, head, i_dirty) {
993 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
994 continue;
995
996 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
997 set_bit(NILFS_I_UPDATED, &ii->i_state);
998 }
999 }
1000
1001 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1002 struct inode *inode,
1003 struct list_head *listp,
1004 int (*collect)(struct nilfs_sc_info *,
1005 struct buffer_head *,
1006 struct inode *))
1007 {
1008 struct buffer_head *bh, *n;
1009 int err = 0;
1010
1011 if (collect) {
1012 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1013 list_del_init(&bh->b_assoc_buffers);
1014 err = collect(sci, bh, inode);
1015 brelse(bh);
1016 if (unlikely(err))
1017 goto dispose_buffers;
1018 }
1019 return 0;
1020 }
1021
1022 dispose_buffers:
1023 while (!list_empty(listp)) {
1024 bh = list_first_entry(listp, struct buffer_head,
1025 b_assoc_buffers);
1026 list_del_init(&bh->b_assoc_buffers);
1027 brelse(bh);
1028 }
1029 return err;
1030 }
1031
1032 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1033 {
1034
1035 return sci->sc_segbuf_nblocks -
1036 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1037 }
1038
1039 static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1040 struct inode *inode,
1041 const struct nilfs_sc_operations *sc_ops)
1042 {
1043 LIST_HEAD(data_buffers);
1044 LIST_HEAD(node_buffers);
1045 int err;
1046
1047 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1048 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1049
1050 n = nilfs_lookup_dirty_data_buffers(
1051 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1052 if (n > rest) {
1053 err = nilfs_segctor_apply_buffers(
1054 sci, inode, &data_buffers,
1055 sc_ops->collect_data);
1056 BUG_ON(!err);
1057 goto break_or_fail;
1058 }
1059 }
1060 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1061
1062 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1063 err = nilfs_segctor_apply_buffers(
1064 sci, inode, &data_buffers, sc_ops->collect_data);
1065 if (unlikely(err)) {
1066
1067 nilfs_segctor_apply_buffers(
1068 sci, inode, &node_buffers, NULL);
1069 goto break_or_fail;
1070 }
1071 sci->sc_stage.flags |= NILFS_CF_NODE;
1072 }
1073
1074 err = nilfs_segctor_apply_buffers(
1075 sci, inode, &node_buffers, sc_ops->collect_node);
1076 if (unlikely(err))
1077 goto break_or_fail;
1078
1079 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1080 err = nilfs_segctor_apply_buffers(
1081 sci, inode, &node_buffers, sc_ops->collect_bmap);
1082 if (unlikely(err))
1083 goto break_or_fail;
1084
1085 nilfs_segctor_end_finfo(sci, inode);
1086 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1087
1088 break_or_fail:
1089 return err;
1090 }
1091
1092 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1093 struct inode *inode)
1094 {
1095 LIST_HEAD(data_buffers);
1096 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1097 int err;
1098
1099 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1100 sci->sc_dsync_start,
1101 sci->sc_dsync_end);
1102
1103 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1104 nilfs_collect_file_data);
1105 if (!err) {
1106 nilfs_segctor_end_finfo(sci, inode);
1107 BUG_ON(n > rest);
1108
1109 }
1110 return err;
1111 }
1112
1113 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1114 {
1115 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1116 struct list_head *head;
1117 struct nilfs_inode_info *ii;
1118 size_t ndone;
1119 int err = 0;
1120
1121 switch (nilfs_sc_cstage_get(sci)) {
1122 case NILFS_ST_INIT:
1123
1124 sci->sc_stage.flags = 0;
1125
1126 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1127 sci->sc_nblk_inc = 0;
1128 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1129 if (mode == SC_LSEG_DSYNC) {
1130 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
1131 goto dsync_mode;
1132 }
1133 }
1134
1135 sci->sc_stage.dirty_file_ptr = NULL;
1136 sci->sc_stage.gc_inode_ptr = NULL;
1137 if (mode == SC_FLUSH_DAT) {
1138 nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
1139 goto dat_stage;
1140 }
1141 nilfs_sc_cstage_inc(sci);
1142 case NILFS_ST_GC:
1143 if (nilfs_doing_gc()) {
1144 head = &sci->sc_gc_inodes;
1145 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1146 head, i_dirty);
1147 list_for_each_entry_continue(ii, head, i_dirty) {
1148 err = nilfs_segctor_scan_file(
1149 sci, &ii->vfs_inode,
1150 &nilfs_sc_file_ops);
1151 if (unlikely(err)) {
1152 sci->sc_stage.gc_inode_ptr = list_entry(
1153 ii->i_dirty.prev,
1154 struct nilfs_inode_info,
1155 i_dirty);
1156 goto break_or_fail;
1157 }
1158 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1159 }
1160 sci->sc_stage.gc_inode_ptr = NULL;
1161 }
1162 nilfs_sc_cstage_inc(sci);
1163 case NILFS_ST_FILE:
1164 head = &sci->sc_dirty_files;
1165 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1166 i_dirty);
1167 list_for_each_entry_continue(ii, head, i_dirty) {
1168 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1169
1170 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1171 &nilfs_sc_file_ops);
1172 if (unlikely(err)) {
1173 sci->sc_stage.dirty_file_ptr =
1174 list_entry(ii->i_dirty.prev,
1175 struct nilfs_inode_info,
1176 i_dirty);
1177 goto break_or_fail;
1178 }
1179
1180
1181 }
1182 sci->sc_stage.dirty_file_ptr = NULL;
1183 if (mode == SC_FLUSH_FILE) {
1184 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1185 return 0;
1186 }
1187 nilfs_sc_cstage_inc(sci);
1188 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1189
1190 case NILFS_ST_IFILE:
1191 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1192 &nilfs_sc_file_ops);
1193 if (unlikely(err))
1194 break;
1195 nilfs_sc_cstage_inc(sci);
1196
1197 err = nilfs_segctor_create_checkpoint(sci);
1198 if (unlikely(err))
1199 break;
1200
1201 case NILFS_ST_CPFILE:
1202 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1203 &nilfs_sc_file_ops);
1204 if (unlikely(err))
1205 break;
1206 nilfs_sc_cstage_inc(sci);
1207 case NILFS_ST_SUFILE:
1208 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1209 sci->sc_nfreesegs, &ndone);
1210 if (unlikely(err)) {
1211 nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1212 sci->sc_freesegs, ndone,
1213 NULL);
1214 break;
1215 }
1216 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1217
1218 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1219 &nilfs_sc_file_ops);
1220 if (unlikely(err))
1221 break;
1222 nilfs_sc_cstage_inc(sci);
1223 case NILFS_ST_DAT:
1224 dat_stage:
1225 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1226 &nilfs_sc_dat_ops);
1227 if (unlikely(err))
1228 break;
1229 if (mode == SC_FLUSH_DAT) {
1230 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1231 return 0;
1232 }
1233 nilfs_sc_cstage_inc(sci);
1234 case NILFS_ST_SR:
1235 if (mode == SC_LSEG_SR) {
1236
1237 err = nilfs_segctor_add_super_root(sci);
1238 if (unlikely(err))
1239 break;
1240 }
1241
1242 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1243 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1244 return 0;
1245 case NILFS_ST_DSYNC:
1246 dsync_mode:
1247 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1248 ii = sci->sc_dsync_inode;
1249 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1250 break;
1251
1252 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1253 if (unlikely(err))
1254 break;
1255 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1256 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1257 return 0;
1258 case NILFS_ST_DONE:
1259 return 0;
1260 default:
1261 BUG();
1262 }
1263
1264 break_or_fail:
1265 return err;
1266 }
1267
1268
1269
1270
1271
1272
1273 static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1274 struct the_nilfs *nilfs)
1275 {
1276 struct nilfs_segment_buffer *segbuf, *prev;
1277 __u64 nextnum;
1278 int err, alloc = 0;
1279
1280 segbuf = nilfs_segbuf_new(sci->sc_super);
1281 if (unlikely(!segbuf))
1282 return -ENOMEM;
1283
1284 if (list_empty(&sci->sc_write_logs)) {
1285 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1286 nilfs->ns_pseg_offset, nilfs);
1287 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1288 nilfs_shift_to_next_segment(nilfs);
1289 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1290 }
1291
1292 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1293 nextnum = nilfs->ns_nextnum;
1294
1295 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1296
1297 alloc++;
1298 } else {
1299
1300 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1301 nilfs_segbuf_map_cont(segbuf, prev);
1302 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1303 nextnum = prev->sb_nextnum;
1304
1305 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1306 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1307 segbuf->sb_sum.seg_seq++;
1308 alloc++;
1309 }
1310 }
1311
1312 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1313 if (err)
1314 goto failed;
1315
1316 if (alloc) {
1317 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1318 if (err)
1319 goto failed;
1320 }
1321 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1322
1323 BUG_ON(!list_empty(&sci->sc_segbufs));
1324 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1325 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1326 return 0;
1327
1328 failed:
1329 nilfs_segbuf_free(segbuf);
1330 return err;
1331 }
1332
1333 static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1334 struct the_nilfs *nilfs, int nadd)
1335 {
1336 struct nilfs_segment_buffer *segbuf, *prev;
1337 struct inode *sufile = nilfs->ns_sufile;
1338 __u64 nextnextnum;
1339 LIST_HEAD(list);
1340 int err, ret, i;
1341
1342 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1343
1344
1345
1346
1347
1348
1349 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1350 if (unlikely(err))
1351 return err;
1352
1353 for (i = 0; i < nadd; i++) {
1354
1355 err = -ENOMEM;
1356 segbuf = nilfs_segbuf_new(sci->sc_super);
1357 if (unlikely(!segbuf))
1358 goto failed;
1359
1360
1361 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1362 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1363
1364
1365 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1366 if (unlikely(err))
1367 goto failed_segbuf;
1368
1369 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1370 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1371
1372 list_add_tail(&segbuf->sb_list, &list);
1373 prev = segbuf;
1374 }
1375 list_splice_tail(&list, &sci->sc_segbufs);
1376 return 0;
1377
1378 failed_segbuf:
1379 nilfs_segbuf_free(segbuf);
1380 failed:
1381 list_for_each_entry(segbuf, &list, sb_list) {
1382 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1383 WARN_ON(ret);
1384 }
1385 nilfs_destroy_logs(&list);
1386 return err;
1387 }
1388
1389 static void nilfs_free_incomplete_logs(struct list_head *logs,
1390 struct the_nilfs *nilfs)
1391 {
1392 struct nilfs_segment_buffer *segbuf, *prev;
1393 struct inode *sufile = nilfs->ns_sufile;
1394 int ret;
1395
1396 segbuf = NILFS_FIRST_SEGBUF(logs);
1397 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1398 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1399 WARN_ON(ret);
1400 }
1401 if (atomic_read(&segbuf->sb_err)) {
1402
1403 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1404
1405
1406
1407
1408 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1409 segbuf->sb_fseg_end);
1410 else
1411 set_nilfs_discontinued(nilfs);
1412 }
1413
1414 prev = segbuf;
1415 list_for_each_entry_continue(segbuf, logs, sb_list) {
1416 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1417 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1418 WARN_ON(ret);
1419 }
1420 if (atomic_read(&segbuf->sb_err) &&
1421 segbuf->sb_segnum != nilfs->ns_nextnum)
1422
1423 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1424 prev = segbuf;
1425 }
1426 }
1427
1428 static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1429 struct inode *sufile)
1430 {
1431 struct nilfs_segment_buffer *segbuf;
1432 unsigned long live_blocks;
1433 int ret;
1434
1435 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1436 live_blocks = segbuf->sb_sum.nblocks +
1437 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1438 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1439 live_blocks,
1440 sci->sc_seg_ctime);
1441 WARN_ON(ret);
1442 }
1443 }
1444
1445 static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1446 {
1447 struct nilfs_segment_buffer *segbuf;
1448 int ret;
1449
1450 segbuf = NILFS_FIRST_SEGBUF(logs);
1451 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1452 segbuf->sb_pseg_start -
1453 segbuf->sb_fseg_start, 0);
1454 WARN_ON(ret);
1455
1456 list_for_each_entry_continue(segbuf, logs, sb_list) {
1457 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1458 0, 0);
1459 WARN_ON(ret);
1460 }
1461 }
1462
1463 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1464 struct nilfs_segment_buffer *last,
1465 struct inode *sufile)
1466 {
1467 struct nilfs_segment_buffer *segbuf = last;
1468 int ret;
1469
1470 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1471 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1472 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1473 WARN_ON(ret);
1474 }
1475 nilfs_truncate_logs(&sci->sc_segbufs, last);
1476 }
1477
1478
1479 static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1480 struct the_nilfs *nilfs, int mode)
1481 {
1482 struct nilfs_cstage prev_stage = sci->sc_stage;
1483 int err, nadd = 1;
1484
1485
1486 for (;;) {
1487 sci->sc_nblk_this_inc = 0;
1488 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1489
1490 err = nilfs_segctor_reset_segment_buffer(sci);
1491 if (unlikely(err))
1492 goto failed;
1493
1494 err = nilfs_segctor_collect_blocks(sci, mode);
1495 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1496 if (!err)
1497 break;
1498
1499 if (unlikely(err != -E2BIG))
1500 goto failed;
1501
1502
1503 if (mode != SC_LSEG_SR ||
1504 nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
1505 break;
1506
1507 nilfs_clear_logs(&sci->sc_segbufs);
1508
1509 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1510 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1511 sci->sc_freesegs,
1512 sci->sc_nfreesegs,
1513 NULL);
1514 WARN_ON(err);
1515 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1516 }
1517
1518 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1519 if (unlikely(err))
1520 return err;
1521
1522 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1523 sci->sc_stage = prev_stage;
1524 }
1525 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1526 return 0;
1527
1528 failed:
1529 return err;
1530 }
1531
1532 static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1533 struct buffer_head *new_bh)
1534 {
1535 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1536
1537 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1538
1539 }
1540
1541 static int
1542 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1543 struct nilfs_segment_buffer *segbuf,
1544 int mode)
1545 {
1546 struct inode *inode = NULL;
1547 sector_t blocknr;
1548 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1549 unsigned long nblocks = 0, ndatablk = 0;
1550 const struct nilfs_sc_operations *sc_op = NULL;
1551 struct nilfs_segsum_pointer ssp;
1552 struct nilfs_finfo *finfo = NULL;
1553 union nilfs_binfo binfo;
1554 struct buffer_head *bh, *bh_org;
1555 ino_t ino = 0;
1556 int err = 0;
1557
1558 if (!nfinfo)
1559 goto out;
1560
1561 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1562 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1563 ssp.offset = sizeof(struct nilfs_segment_summary);
1564
1565 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1566 if (bh == segbuf->sb_super_root)
1567 break;
1568 if (!finfo) {
1569 finfo = nilfs_segctor_map_segsum_entry(
1570 sci, &ssp, sizeof(*finfo));
1571 ino = le64_to_cpu(finfo->fi_ino);
1572 nblocks = le32_to_cpu(finfo->fi_nblocks);
1573 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1574
1575 inode = bh->b_page->mapping->host;
1576
1577 if (mode == SC_LSEG_DSYNC)
1578 sc_op = &nilfs_sc_dsync_ops;
1579 else if (ino == NILFS_DAT_INO)
1580 sc_op = &nilfs_sc_dat_ops;
1581 else
1582 sc_op = &nilfs_sc_file_ops;
1583 }
1584 bh_org = bh;
1585 get_bh(bh_org);
1586 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1587 &binfo);
1588 if (bh != bh_org)
1589 nilfs_list_replace_buffer(bh_org, bh);
1590 brelse(bh_org);
1591 if (unlikely(err))
1592 goto failed_bmap;
1593
1594 if (ndatablk > 0)
1595 sc_op->write_data_binfo(sci, &ssp, &binfo);
1596 else
1597 sc_op->write_node_binfo(sci, &ssp, &binfo);
1598
1599 blocknr++;
1600 if (--nblocks == 0) {
1601 finfo = NULL;
1602 if (--nfinfo == 0)
1603 break;
1604 } else if (ndatablk > 0)
1605 ndatablk--;
1606 }
1607 out:
1608 return 0;
1609
1610 failed_bmap:
1611 return err;
1612 }
1613
1614 static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1615 {
1616 struct nilfs_segment_buffer *segbuf;
1617 int err;
1618
1619 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1620 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1621 if (unlikely(err))
1622 return err;
1623 nilfs_segbuf_fill_in_segsum(segbuf);
1624 }
1625 return 0;
1626 }
1627
1628 static void nilfs_begin_page_io(struct page *page)
1629 {
1630 if (!page || PageWriteback(page))
1631
1632
1633
1634
1635 return;
1636
1637 lock_page(page);
1638 clear_page_dirty_for_io(page);
1639 set_page_writeback(page);
1640 unlock_page(page);
1641 }
1642
1643 static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1644 {
1645 struct nilfs_segment_buffer *segbuf;
1646 struct page *bd_page = NULL, *fs_page = NULL;
1647
1648 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1649 struct buffer_head *bh;
1650
1651 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1652 b_assoc_buffers) {
1653 if (bh->b_page != bd_page) {
1654 if (bd_page) {
1655 lock_page(bd_page);
1656 clear_page_dirty_for_io(bd_page);
1657 set_page_writeback(bd_page);
1658 unlock_page(bd_page);
1659 }
1660 bd_page = bh->b_page;
1661 }
1662 }
1663
1664 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1665 b_assoc_buffers) {
1666 set_buffer_async_write(bh);
1667 if (bh == segbuf->sb_super_root) {
1668 if (bh->b_page != bd_page) {
1669 lock_page(bd_page);
1670 clear_page_dirty_for_io(bd_page);
1671 set_page_writeback(bd_page);
1672 unlock_page(bd_page);
1673 bd_page = bh->b_page;
1674 }
1675 break;
1676 }
1677 if (bh->b_page != fs_page) {
1678 nilfs_begin_page_io(fs_page);
1679 fs_page = bh->b_page;
1680 }
1681 }
1682 }
1683 if (bd_page) {
1684 lock_page(bd_page);
1685 clear_page_dirty_for_io(bd_page);
1686 set_page_writeback(bd_page);
1687 unlock_page(bd_page);
1688 }
1689 nilfs_begin_page_io(fs_page);
1690 }
1691
1692 static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1693 struct the_nilfs *nilfs)
1694 {
1695 int ret;
1696
1697 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1698 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1699 return ret;
1700 }
1701
1702 static void nilfs_end_page_io(struct page *page, int err)
1703 {
1704 if (!page)
1705 return;
1706
1707 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1708
1709
1710
1711
1712 if (PageDirty(page)) {
1713
1714
1715
1716
1717
1718
1719
1720 lock_page(page);
1721 if (nilfs_page_buffers_clean(page))
1722 __nilfs_clear_page_dirty(page);
1723 unlock_page(page);
1724 }
1725 return;
1726 }
1727
1728 if (!err) {
1729 if (!nilfs_page_buffers_clean(page))
1730 __set_page_dirty_nobuffers(page);
1731 ClearPageError(page);
1732 } else {
1733 __set_page_dirty_nobuffers(page);
1734 SetPageError(page);
1735 }
1736
1737 end_page_writeback(page);
1738 }
1739
1740 static void nilfs_abort_logs(struct list_head *logs, int err)
1741 {
1742 struct nilfs_segment_buffer *segbuf;
1743 struct page *bd_page = NULL, *fs_page = NULL;
1744 struct buffer_head *bh;
1745
1746 if (list_empty(logs))
1747 return;
1748
1749 list_for_each_entry(segbuf, logs, sb_list) {
1750 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1751 b_assoc_buffers) {
1752 if (bh->b_page != bd_page) {
1753 if (bd_page)
1754 end_page_writeback(bd_page);
1755 bd_page = bh->b_page;
1756 }
1757 }
1758
1759 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1760 b_assoc_buffers) {
1761 clear_buffer_async_write(bh);
1762 if (bh == segbuf->sb_super_root) {
1763 if (bh->b_page != bd_page) {
1764 end_page_writeback(bd_page);
1765 bd_page = bh->b_page;
1766 }
1767 break;
1768 }
1769 if (bh->b_page != fs_page) {
1770 nilfs_end_page_io(fs_page, err);
1771 fs_page = bh->b_page;
1772 }
1773 }
1774 }
1775 if (bd_page)
1776 end_page_writeback(bd_page);
1777
1778 nilfs_end_page_io(fs_page, err);
1779 }
1780
1781 static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1782 struct the_nilfs *nilfs, int err)
1783 {
1784 LIST_HEAD(logs);
1785 int ret;
1786
1787 list_splice_tail_init(&sci->sc_write_logs, &logs);
1788 ret = nilfs_wait_on_logs(&logs);
1789 nilfs_abort_logs(&logs, ret ? : err);
1790
1791 list_splice_tail_init(&sci->sc_segbufs, &logs);
1792 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1793 nilfs_free_incomplete_logs(&logs, nilfs);
1794
1795 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1796 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1797 sci->sc_freesegs,
1798 sci->sc_nfreesegs,
1799 NULL);
1800 WARN_ON(ret);
1801 }
1802
1803 nilfs_destroy_logs(&logs);
1804 }
1805
1806 static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1807 struct nilfs_segment_buffer *segbuf)
1808 {
1809 nilfs->ns_segnum = segbuf->sb_segnum;
1810 nilfs->ns_nextnum = segbuf->sb_nextnum;
1811 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1812 + segbuf->sb_sum.nblocks;
1813 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1814 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1815 }
1816
1817 static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1818 {
1819 struct nilfs_segment_buffer *segbuf;
1820 struct page *bd_page = NULL, *fs_page = NULL;
1821 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1822 int update_sr = false;
1823
1824 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1825 struct buffer_head *bh;
1826
1827 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1828 b_assoc_buffers) {
1829 set_buffer_uptodate(bh);
1830 clear_buffer_dirty(bh);
1831 if (bh->b_page != bd_page) {
1832 if (bd_page)
1833 end_page_writeback(bd_page);
1834 bd_page = bh->b_page;
1835 }
1836 }
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1849 b_assoc_buffers) {
1850 const unsigned long set_bits = BIT(BH_Uptodate);
1851 const unsigned long clear_bits =
1852 (BIT(BH_Dirty) | BIT(BH_Async_Write) |
1853 BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
1854 BIT(BH_NILFS_Redirected));
1855
1856 set_mask_bits(&bh->b_state, clear_bits, set_bits);
1857 if (bh == segbuf->sb_super_root) {
1858 if (bh->b_page != bd_page) {
1859 end_page_writeback(bd_page);
1860 bd_page = bh->b_page;
1861 }
1862 update_sr = true;
1863 break;
1864 }
1865 if (bh->b_page != fs_page) {
1866 nilfs_end_page_io(fs_page, 0);
1867 fs_page = bh->b_page;
1868 }
1869 }
1870
1871 if (!nilfs_segbuf_simplex(segbuf)) {
1872 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1873 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1874 sci->sc_lseg_stime = jiffies;
1875 }
1876 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1877 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1878 }
1879 }
1880
1881
1882
1883
1884 if (bd_page)
1885 end_page_writeback(bd_page);
1886
1887 nilfs_end_page_io(fs_page, 0);
1888
1889 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1890
1891 if (nilfs_doing_gc())
1892 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1893 else
1894 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1895
1896 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1897
1898 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1899 nilfs_set_next_segment(nilfs, segbuf);
1900
1901 if (update_sr) {
1902 nilfs->ns_flushed_device = 0;
1903 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1904 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1905
1906 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1907 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1908 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1909 nilfs_segctor_clear_metadata_dirty(sci);
1910 } else
1911 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1912 }
1913
1914 static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1915 {
1916 int ret;
1917
1918 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1919 if (!ret) {
1920 nilfs_segctor_complete_write(sci);
1921 nilfs_destroy_logs(&sci->sc_write_logs);
1922 }
1923 return ret;
1924 }
1925
1926 static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1927 struct the_nilfs *nilfs)
1928 {
1929 struct nilfs_inode_info *ii, *n;
1930 struct inode *ifile = sci->sc_root->ifile;
1931
1932 spin_lock(&nilfs->ns_inode_lock);
1933 retry:
1934 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1935 if (!ii->i_bh) {
1936 struct buffer_head *ibh;
1937 int err;
1938
1939 spin_unlock(&nilfs->ns_inode_lock);
1940 err = nilfs_ifile_get_inode_block(
1941 ifile, ii->vfs_inode.i_ino, &ibh);
1942 if (unlikely(err)) {
1943 nilfs_msg(sci->sc_super, KERN_WARNING,
1944 "log writer: error %d getting inode block (ino=%lu)",
1945 err, ii->vfs_inode.i_ino);
1946 return err;
1947 }
1948 spin_lock(&nilfs->ns_inode_lock);
1949 if (likely(!ii->i_bh))
1950 ii->i_bh = ibh;
1951 else
1952 brelse(ibh);
1953 goto retry;
1954 }
1955
1956
1957 mark_buffer_dirty(ii->i_bh);
1958 nilfs_mdt_mark_dirty(ifile);
1959
1960 clear_bit(NILFS_I_QUEUED, &ii->i_state);
1961 set_bit(NILFS_I_BUSY, &ii->i_state);
1962 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
1963 }
1964 spin_unlock(&nilfs->ns_inode_lock);
1965
1966 return 0;
1967 }
1968
1969 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1970 struct the_nilfs *nilfs)
1971 {
1972 struct nilfs_inode_info *ii, *n;
1973 int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
1974 int defer_iput = false;
1975
1976 spin_lock(&nilfs->ns_inode_lock);
1977 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1978 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
1979 test_bit(NILFS_I_DIRTY, &ii->i_state))
1980 continue;
1981
1982 clear_bit(NILFS_I_BUSY, &ii->i_state);
1983 brelse(ii->i_bh);
1984 ii->i_bh = NULL;
1985 list_del_init(&ii->i_dirty);
1986 if (!ii->vfs_inode.i_nlink || during_mount) {
1987
1988
1989
1990
1991 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
1992 defer_iput = true;
1993 } else {
1994 spin_unlock(&nilfs->ns_inode_lock);
1995 iput(&ii->vfs_inode);
1996 spin_lock(&nilfs->ns_inode_lock);
1997 }
1998 }
1999 spin_unlock(&nilfs->ns_inode_lock);
2000
2001 if (defer_iput)
2002 schedule_work(&sci->sc_iput_work);
2003 }
2004
2005
2006
2007
2008 static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2009 {
2010 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2011 int err;
2012
2013 nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
2014 sci->sc_cno = nilfs->ns_cno;
2015
2016 err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2017 if (unlikely(err))
2018 goto out;
2019
2020 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2021 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2022
2023 if (nilfs_segctor_clean(sci))
2024 goto out;
2025
2026 do {
2027 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2028
2029 err = nilfs_segctor_begin_construction(sci, nilfs);
2030 if (unlikely(err))
2031 goto out;
2032
2033
2034 sci->sc_seg_ctime = ktime_get_real_seconds();
2035
2036 err = nilfs_segctor_collect(sci, nilfs, mode);
2037 if (unlikely(err))
2038 goto failed;
2039
2040
2041 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
2042 nilfs_segbuf_empty(sci->sc_curseg)) {
2043 nilfs_segctor_abort_construction(sci, nilfs, 1);
2044 goto out;
2045 }
2046
2047 err = nilfs_segctor_assign(sci, mode);
2048 if (unlikely(err))
2049 goto failed;
2050
2051 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2052 nilfs_segctor_fill_in_file_bmap(sci);
2053
2054 if (mode == SC_LSEG_SR &&
2055 nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
2056 err = nilfs_segctor_fill_in_checkpoint(sci);
2057 if (unlikely(err))
2058 goto failed_to_write;
2059
2060 nilfs_segctor_fill_in_super_root(sci, nilfs);
2061 }
2062 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2063
2064
2065 nilfs_segctor_prepare_write(sci);
2066
2067 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2068 nilfs->ns_crc_seed);
2069
2070 err = nilfs_segctor_write(sci, nilfs);
2071 if (unlikely(err))
2072 goto failed_to_write;
2073
2074 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2075 nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2076
2077
2078
2079
2080
2081
2082
2083 err = nilfs_segctor_wait(sci);
2084 if (err)
2085 goto failed_to_write;
2086 }
2087 } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
2088
2089 out:
2090 nilfs_segctor_drop_written_files(sci, nilfs);
2091 return err;
2092
2093 failed_to_write:
2094 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2095 nilfs_redirty_inodes(&sci->sc_dirty_files);
2096
2097 failed:
2098 if (nilfs_doing_gc())
2099 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2100 nilfs_segctor_abort_construction(sci, nilfs, err);
2101 goto out;
2102 }
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112 static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2113 {
2114 spin_lock(&sci->sc_state_lock);
2115 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2116 sci->sc_timer.expires = jiffies + sci->sc_interval;
2117 add_timer(&sci->sc_timer);
2118 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2119 }
2120 spin_unlock(&sci->sc_state_lock);
2121 }
2122
2123 static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2124 {
2125 spin_lock(&sci->sc_state_lock);
2126 if (!(sci->sc_flush_request & BIT(bn))) {
2127 unsigned long prev_req = sci->sc_flush_request;
2128
2129 sci->sc_flush_request |= BIT(bn);
2130 if (!prev_req)
2131 wake_up(&sci->sc_wait_daemon);
2132 }
2133 spin_unlock(&sci->sc_state_lock);
2134 }
2135
2136
2137
2138
2139
2140
2141 void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2142 {
2143 struct the_nilfs *nilfs = sb->s_fs_info;
2144 struct nilfs_sc_info *sci = nilfs->ns_writer;
2145
2146 if (!sci || nilfs_doing_construction())
2147 return;
2148 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2149
2150 }
2151
2152 struct nilfs_segctor_wait_request {
2153 wait_queue_entry_t wq;
2154 __u32 seq;
2155 int err;
2156 atomic_t done;
2157 };
2158
2159 static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2160 {
2161 struct nilfs_segctor_wait_request wait_req;
2162 int err = 0;
2163
2164 spin_lock(&sci->sc_state_lock);
2165 init_wait(&wait_req.wq);
2166 wait_req.err = 0;
2167 atomic_set(&wait_req.done, 0);
2168 wait_req.seq = ++sci->sc_seq_request;
2169 spin_unlock(&sci->sc_state_lock);
2170
2171 init_waitqueue_entry(&wait_req.wq, current);
2172 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2173 set_current_state(TASK_INTERRUPTIBLE);
2174 wake_up(&sci->sc_wait_daemon);
2175
2176 for (;;) {
2177 if (atomic_read(&wait_req.done)) {
2178 err = wait_req.err;
2179 break;
2180 }
2181 if (!signal_pending(current)) {
2182 schedule();
2183 continue;
2184 }
2185 err = -ERESTARTSYS;
2186 break;
2187 }
2188 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2189 return err;
2190 }
2191
2192 static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2193 {
2194 struct nilfs_segctor_wait_request *wrq, *n;
2195 unsigned long flags;
2196
2197 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2198 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
2199 if (!atomic_read(&wrq->done) &&
2200 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2201 wrq->err = err;
2202 atomic_set(&wrq->done, 1);
2203 }
2204 if (atomic_read(&wrq->done)) {
2205 wrq->wq.func(&wrq->wq,
2206 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2207 0, NULL);
2208 }
2209 }
2210 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2211 }
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230 int nilfs_construct_segment(struct super_block *sb)
2231 {
2232 struct the_nilfs *nilfs = sb->s_fs_info;
2233 struct nilfs_sc_info *sci = nilfs->ns_writer;
2234 struct nilfs_transaction_info *ti;
2235 int err;
2236
2237 if (!sci)
2238 return -EROFS;
2239
2240
2241 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2242
2243 err = nilfs_segctor_sync(sci);
2244 return err;
2245 }
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267 int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2268 loff_t start, loff_t end)
2269 {
2270 struct the_nilfs *nilfs = sb->s_fs_info;
2271 struct nilfs_sc_info *sci = nilfs->ns_writer;
2272 struct nilfs_inode_info *ii;
2273 struct nilfs_transaction_info ti;
2274 int err = 0;
2275
2276 if (!sci)
2277 return -EROFS;
2278
2279 nilfs_transaction_lock(sb, &ti, 0);
2280
2281 ii = NILFS_I(inode);
2282 if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
2283 nilfs_test_opt(nilfs, STRICT_ORDER) ||
2284 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2285 nilfs_discontinued(nilfs)) {
2286 nilfs_transaction_unlock(sb);
2287 err = nilfs_segctor_sync(sci);
2288 return err;
2289 }
2290
2291 spin_lock(&nilfs->ns_inode_lock);
2292 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2293 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2294 spin_unlock(&nilfs->ns_inode_lock);
2295 nilfs_transaction_unlock(sb);
2296 return 0;
2297 }
2298 spin_unlock(&nilfs->ns_inode_lock);
2299 sci->sc_dsync_inode = ii;
2300 sci->sc_dsync_start = start;
2301 sci->sc_dsync_end = end;
2302
2303 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2304 if (!err)
2305 nilfs->ns_flushed_device = 0;
2306
2307 nilfs_transaction_unlock(sb);
2308 return err;
2309 }
2310
2311 #define FLUSH_FILE_BIT (0x1)
2312 #define FLUSH_DAT_BIT BIT(NILFS_DAT_INO)
2313
2314
2315
2316
2317
2318 static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2319 {
2320 spin_lock(&sci->sc_state_lock);
2321 sci->sc_seq_accepted = sci->sc_seq_request;
2322 spin_unlock(&sci->sc_state_lock);
2323 del_timer_sync(&sci->sc_timer);
2324 }
2325
2326
2327
2328
2329
2330
2331
2332 static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2333 {
2334
2335 spin_lock(&sci->sc_state_lock);
2336
2337 if (mode == SC_LSEG_SR) {
2338 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2339 sci->sc_seq_done = sci->sc_seq_accepted;
2340 nilfs_segctor_wakeup(sci, err);
2341 sci->sc_flush_request = 0;
2342 } else {
2343 if (mode == SC_FLUSH_FILE)
2344 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2345 else if (mode == SC_FLUSH_DAT)
2346 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2347
2348
2349 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2350 time_before(jiffies, sci->sc_timer.expires))
2351 add_timer(&sci->sc_timer);
2352 }
2353 spin_unlock(&sci->sc_state_lock);
2354 }
2355
2356
2357
2358
2359
2360
2361 static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2362 {
2363 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2364 struct nilfs_super_block **sbp;
2365 int err = 0;
2366
2367 nilfs_segctor_accept(sci);
2368
2369 if (nilfs_discontinued(nilfs))
2370 mode = SC_LSEG_SR;
2371 if (!nilfs_segctor_confirm(sci))
2372 err = nilfs_segctor_do_construct(sci, mode);
2373
2374 if (likely(!err)) {
2375 if (mode != SC_FLUSH_DAT)
2376 atomic_set(&nilfs->ns_ndirtyblks, 0);
2377 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2378 nilfs_discontinued(nilfs)) {
2379 down_write(&nilfs->ns_sem);
2380 err = -EIO;
2381 sbp = nilfs_prepare_super(sci->sc_super,
2382 nilfs_sb_will_flip(nilfs));
2383 if (likely(sbp)) {
2384 nilfs_set_log_cursor(sbp[0], nilfs);
2385 err = nilfs_commit_super(sci->sc_super,
2386 NILFS_SB_COMMIT);
2387 }
2388 up_write(&nilfs->ns_sem);
2389 }
2390 }
2391
2392 nilfs_segctor_notify(sci, mode, err);
2393 return err;
2394 }
2395
2396 static void nilfs_construction_timeout(struct timer_list *t)
2397 {
2398 struct nilfs_sc_info *sci = from_timer(sci, t, sc_timer);
2399
2400 wake_up_process(sci->sc_timer_task);
2401 }
2402
2403 static void
2404 nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2405 {
2406 struct nilfs_inode_info *ii, *n;
2407
2408 list_for_each_entry_safe(ii, n, head, i_dirty) {
2409 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2410 continue;
2411 list_del_init(&ii->i_dirty);
2412 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2413 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
2414 iput(&ii->vfs_inode);
2415 }
2416 }
2417
2418 int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2419 void **kbufs)
2420 {
2421 struct the_nilfs *nilfs = sb->s_fs_info;
2422 struct nilfs_sc_info *sci = nilfs->ns_writer;
2423 struct nilfs_transaction_info ti;
2424 int err;
2425
2426 if (unlikely(!sci))
2427 return -EROFS;
2428
2429 nilfs_transaction_lock(sb, &ti, 1);
2430
2431 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2432 if (unlikely(err))
2433 goto out_unlock;
2434
2435 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2436 if (unlikely(err)) {
2437 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2438 goto out_unlock;
2439 }
2440
2441 sci->sc_freesegs = kbufs[4];
2442 sci->sc_nfreesegs = argv[4].v_nmembs;
2443 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2444
2445 for (;;) {
2446 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2447 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2448
2449 if (likely(!err))
2450 break;
2451
2452 nilfs_msg(sb, KERN_WARNING, "error %d cleaning segments", err);
2453 set_current_state(TASK_INTERRUPTIBLE);
2454 schedule_timeout(sci->sc_interval);
2455 }
2456 if (nilfs_test_opt(nilfs, DISCARD)) {
2457 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2458 sci->sc_nfreesegs);
2459 if (ret) {
2460 nilfs_msg(sb, KERN_WARNING,
2461 "error %d on discard request, turning discards off for the device",
2462 ret);
2463 nilfs_clear_opt(nilfs, DISCARD);
2464 }
2465 }
2466
2467 out_unlock:
2468 sci->sc_freesegs = NULL;
2469 sci->sc_nfreesegs = 0;
2470 nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2471 nilfs_transaction_unlock(sb);
2472 return err;
2473 }
2474
2475 static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2476 {
2477 struct nilfs_transaction_info ti;
2478
2479 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2480 nilfs_segctor_construct(sci, mode);
2481
2482
2483
2484
2485
2486
2487 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2488 nilfs_segctor_start_timer(sci);
2489
2490 nilfs_transaction_unlock(sci->sc_super);
2491 }
2492
2493 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2494 {
2495 int mode = 0;
2496
2497 spin_lock(&sci->sc_state_lock);
2498 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2499 SC_FLUSH_DAT : SC_FLUSH_FILE;
2500 spin_unlock(&sci->sc_state_lock);
2501
2502 if (mode) {
2503 nilfs_segctor_do_construct(sci, mode);
2504
2505 spin_lock(&sci->sc_state_lock);
2506 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2507 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2508 spin_unlock(&sci->sc_state_lock);
2509 }
2510 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2511 }
2512
2513 static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2514 {
2515 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2516 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2517 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2518 return SC_FLUSH_FILE;
2519 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2520 return SC_FLUSH_DAT;
2521 }
2522 return SC_LSEG_SR;
2523 }
2524
2525
2526
2527
2528
2529
2530
2531
2532 static int nilfs_segctor_thread(void *arg)
2533 {
2534 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2535 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2536 int timeout = 0;
2537
2538 sci->sc_timer_task = current;
2539
2540
2541 sci->sc_task = current;
2542 wake_up(&sci->sc_wait_task);
2543 nilfs_msg(sci->sc_super, KERN_INFO,
2544 "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
2545 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2546
2547 spin_lock(&sci->sc_state_lock);
2548 loop:
2549 for (;;) {
2550 int mode;
2551
2552 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2553 goto end_thread;
2554
2555 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2556 mode = SC_LSEG_SR;
2557 else if (sci->sc_flush_request)
2558 mode = nilfs_segctor_flush_mode(sci);
2559 else
2560 break;
2561
2562 spin_unlock(&sci->sc_state_lock);
2563 nilfs_segctor_thread_construct(sci, mode);
2564 spin_lock(&sci->sc_state_lock);
2565 timeout = 0;
2566 }
2567
2568
2569 if (freezing(current)) {
2570 spin_unlock(&sci->sc_state_lock);
2571 try_to_freeze();
2572 spin_lock(&sci->sc_state_lock);
2573 } else {
2574 DEFINE_WAIT(wait);
2575 int should_sleep = 1;
2576
2577 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2578 TASK_INTERRUPTIBLE);
2579
2580 if (sci->sc_seq_request != sci->sc_seq_done)
2581 should_sleep = 0;
2582 else if (sci->sc_flush_request)
2583 should_sleep = 0;
2584 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2585 should_sleep = time_before(jiffies,
2586 sci->sc_timer.expires);
2587
2588 if (should_sleep) {
2589 spin_unlock(&sci->sc_state_lock);
2590 schedule();
2591 spin_lock(&sci->sc_state_lock);
2592 }
2593 finish_wait(&sci->sc_wait_daemon, &wait);
2594 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2595 time_after_eq(jiffies, sci->sc_timer.expires));
2596
2597 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2598 set_nilfs_discontinued(nilfs);
2599 }
2600 goto loop;
2601
2602 end_thread:
2603 spin_unlock(&sci->sc_state_lock);
2604
2605
2606 sci->sc_task = NULL;
2607 wake_up(&sci->sc_wait_task);
2608 return 0;
2609 }
2610
2611 static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2612 {
2613 struct task_struct *t;
2614
2615 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2616 if (IS_ERR(t)) {
2617 int err = PTR_ERR(t);
2618
2619 nilfs_msg(sci->sc_super, KERN_ERR,
2620 "error %d creating segctord thread", err);
2621 return err;
2622 }
2623 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2624 return 0;
2625 }
2626
2627 static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2628 __acquires(&sci->sc_state_lock)
2629 __releases(&sci->sc_state_lock)
2630 {
2631 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2632
2633 while (sci->sc_task) {
2634 wake_up(&sci->sc_wait_daemon);
2635 spin_unlock(&sci->sc_state_lock);
2636 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2637 spin_lock(&sci->sc_state_lock);
2638 }
2639 }
2640
2641
2642
2643
2644 static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2645 struct nilfs_root *root)
2646 {
2647 struct the_nilfs *nilfs = sb->s_fs_info;
2648 struct nilfs_sc_info *sci;
2649
2650 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2651 if (!sci)
2652 return NULL;
2653
2654 sci->sc_super = sb;
2655
2656 nilfs_get_root(root);
2657 sci->sc_root = root;
2658
2659 init_waitqueue_head(&sci->sc_wait_request);
2660 init_waitqueue_head(&sci->sc_wait_daemon);
2661 init_waitqueue_head(&sci->sc_wait_task);
2662 spin_lock_init(&sci->sc_state_lock);
2663 INIT_LIST_HEAD(&sci->sc_dirty_files);
2664 INIT_LIST_HEAD(&sci->sc_segbufs);
2665 INIT_LIST_HEAD(&sci->sc_write_logs);
2666 INIT_LIST_HEAD(&sci->sc_gc_inodes);
2667 INIT_LIST_HEAD(&sci->sc_iput_queue);
2668 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2669 timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
2670
2671 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2672 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2673 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2674
2675 if (nilfs->ns_interval)
2676 sci->sc_interval = HZ * nilfs->ns_interval;
2677 if (nilfs->ns_watermark)
2678 sci->sc_watermark = nilfs->ns_watermark;
2679 return sci;
2680 }
2681
2682 static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2683 {
2684 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2685
2686
2687
2688
2689
2690 do {
2691 struct nilfs_transaction_info ti;
2692
2693 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2694 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2695 nilfs_transaction_unlock(sci->sc_super);
2696
2697 flush_work(&sci->sc_iput_work);
2698
2699 } while (ret && retrycount-- > 0);
2700 }
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710 static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2711 {
2712 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2713 int flag;
2714
2715 up_write(&nilfs->ns_segctor_sem);
2716
2717 spin_lock(&sci->sc_state_lock);
2718 nilfs_segctor_kill_thread(sci);
2719 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2720 || sci->sc_seq_request != sci->sc_seq_done);
2721 spin_unlock(&sci->sc_state_lock);
2722
2723 if (flush_work(&sci->sc_iput_work))
2724 flag = true;
2725
2726 if (flag || !nilfs_segctor_confirm(sci))
2727 nilfs_segctor_write_out(sci);
2728
2729 if (!list_empty(&sci->sc_dirty_files)) {
2730 nilfs_msg(sci->sc_super, KERN_WARNING,
2731 "disposed unprocessed dirty file(s) when stopping log writer");
2732 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2733 }
2734
2735 if (!list_empty(&sci->sc_iput_queue)) {
2736 nilfs_msg(sci->sc_super, KERN_WARNING,
2737 "disposed unprocessed inode(s) in iput queue when stopping log writer");
2738 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2739 }
2740
2741 WARN_ON(!list_empty(&sci->sc_segbufs));
2742 WARN_ON(!list_empty(&sci->sc_write_logs));
2743
2744 nilfs_put_root(sci->sc_root);
2745
2746 down_write(&nilfs->ns_segctor_sem);
2747
2748 del_timer_sync(&sci->sc_timer);
2749 kfree(sci);
2750 }
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765 int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2766 {
2767 struct the_nilfs *nilfs = sb->s_fs_info;
2768 int err;
2769
2770 if (nilfs->ns_writer) {
2771
2772
2773
2774
2775
2776 nilfs_detach_log_writer(sb);
2777 }
2778
2779 nilfs->ns_writer = nilfs_segctor_new(sb, root);
2780 if (!nilfs->ns_writer)
2781 return -ENOMEM;
2782
2783 inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
2784
2785 err = nilfs_segctor_start_thread(nilfs->ns_writer);
2786 if (err) {
2787 kfree(nilfs->ns_writer);
2788 nilfs->ns_writer = NULL;
2789 }
2790 return err;
2791 }
2792
2793
2794
2795
2796
2797
2798
2799
2800 void nilfs_detach_log_writer(struct super_block *sb)
2801 {
2802 struct the_nilfs *nilfs = sb->s_fs_info;
2803 LIST_HEAD(garbage_list);
2804
2805 down_write(&nilfs->ns_segctor_sem);
2806 if (nilfs->ns_writer) {
2807 nilfs_segctor_destroy(nilfs->ns_writer);
2808 nilfs->ns_writer = NULL;
2809 }
2810
2811
2812 spin_lock(&nilfs->ns_inode_lock);
2813 if (!list_empty(&nilfs->ns_dirty_files)) {
2814 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2815 nilfs_msg(sb, KERN_WARNING,
2816 "disposed unprocessed dirty file(s) when detaching log writer");
2817 }
2818 spin_unlock(&nilfs->ns_inode_lock);
2819 up_write(&nilfs->ns_segctor_sem);
2820
2821 nilfs_dispose_list(nilfs, &garbage_list, 1);
2822 }