Lines Matching refs:log
99 struct r5l_log *log; member
126 static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) in r5l_ring_add() argument
129 if (start >= log->device_size) in r5l_ring_add()
130 start = start - log->device_size; in r5l_ring_add()
134 static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start, in r5l_ring_distance() argument
140 return end + log->device_size - start; in r5l_ring_distance()
143 static bool r5l_has_free_space(struct r5l_log *log, sector_t size) in r5l_has_free_space() argument
147 used_size = r5l_ring_distance(log, log->last_checkpoint, in r5l_has_free_space()
148 log->log_start); in r5l_has_free_space()
150 return log->device_size > used_size + size; in r5l_has_free_space()
153 static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io) in r5l_free_io_unit() argument
156 kmem_cache_free(log->io_kc, io); in r5l_free_io_unit()
193 static void r5l_log_run_stripes(struct r5l_log *log) in r5l_log_run_stripes() argument
197 assert_spin_locked(&log->io_list_lock); in r5l_log_run_stripes()
199 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { in r5l_log_run_stripes()
204 list_move_tail(&io->log_sibling, &log->finished_ios); in r5l_log_run_stripes()
212 struct r5l_log *log = io->log; in r5l_log_endio() local
216 md_error(log->rdev->mddev, log->rdev); in r5l_log_endio()
220 spin_lock_irqsave(&log->io_list_lock, flags); in r5l_log_endio()
222 if (log->need_cache_flush) in r5l_log_endio()
223 r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios, in r5l_log_endio()
226 r5l_log_run_stripes(log); in r5l_log_endio()
227 spin_unlock_irqrestore(&log->io_list_lock, flags); in r5l_log_endio()
229 if (log->need_cache_flush) in r5l_log_endio()
230 md_wakeup_thread(log->rdev->mddev->thread); in r5l_log_endio()
233 static void r5l_submit_current_io(struct r5l_log *log) in r5l_submit_current_io() argument
235 struct r5l_io_unit *io = log->current_io; in r5l_submit_current_io()
245 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE); in r5l_submit_current_io()
248 log->current_io = NULL; in r5l_submit_current_io()
249 spin_lock_irqsave(&log->io_list_lock, flags); in r5l_submit_current_io()
251 spin_unlock_irqrestore(&log->io_list_lock, flags); in r5l_submit_current_io()
256 static struct bio *r5l_bio_alloc(struct r5l_log *log) in r5l_bio_alloc() argument
261 bio->bi_bdev = log->rdev->bdev; in r5l_bio_alloc()
262 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; in r5l_bio_alloc()
267 static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io) in r5_reserve_log_entry() argument
269 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); in r5_reserve_log_entry()
278 if (log->log_start == 0) in r5_reserve_log_entry()
281 io->log_end = log->log_start; in r5_reserve_log_entry()
284 static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) in r5l_new_meta() argument
290 io = kmem_cache_zalloc(log->io_kc, GFP_NOIO | __GFP_NOFAIL); in r5l_new_meta()
291 io->log = log; in r5l_new_meta()
300 block->seq = cpu_to_le64(log->seq); in r5l_new_meta()
301 block->position = cpu_to_le64(log->log_start); in r5l_new_meta()
303 io->log_start = log->log_start; in r5l_new_meta()
305 io->seq = log->seq++; in r5l_new_meta()
307 io->current_bio = r5l_bio_alloc(log); in r5l_new_meta()
312 r5_reserve_log_entry(log, io); in r5l_new_meta()
314 spin_lock_irq(&log->io_list_lock); in r5l_new_meta()
315 list_add_tail(&io->log_sibling, &log->running_ios); in r5l_new_meta()
316 spin_unlock_irq(&log->io_list_lock); in r5l_new_meta()
321 static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size) in r5l_get_meta() argument
323 if (log->current_io && in r5l_get_meta()
324 log->current_io->meta_offset + payload_size > PAGE_SIZE) in r5l_get_meta()
325 r5l_submit_current_io(log); in r5l_get_meta()
327 if (!log->current_io) in r5l_get_meta()
328 log->current_io = r5l_new_meta(log); in r5l_get_meta()
332 static void r5l_append_payload_meta(struct r5l_log *log, u16 type, in r5l_append_payload_meta() argument
337 struct r5l_io_unit *io = log->current_io; in r5l_append_payload_meta()
354 static void r5l_append_payload_page(struct r5l_log *log, struct page *page) in r5l_append_payload_page() argument
356 struct r5l_io_unit *io = log->current_io; in r5l_append_payload_page()
361 io->current_bio = r5l_bio_alloc(log); in r5l_append_payload_page()
370 r5_reserve_log_entry(log, io); in r5l_append_payload_page()
373 static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, in r5l_log_stripe() argument
386 r5l_get_meta(log, meta_size); in r5l_log_stripe()
387 io = log->current_io; in r5l_log_stripe()
394 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA, in r5l_log_stripe()
397 r5l_append_payload_page(log, sh->dev[i].page); in r5l_log_stripe()
401 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, in r5l_log_stripe()
404 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); in r5l_log_stripe()
405 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page); in r5l_log_stripe()
407 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, in r5l_log_stripe()
410 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); in r5l_log_stripe()
418 static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
423 int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) in r5l_write_stripe() argument
431 if (!log) in r5l_write_stripe()
451 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, in r5l_write_stripe()
475 mutex_lock(&log->io_mutex); in r5l_write_stripe()
478 if (r5l_has_free_space(log, reserve)) in r5l_write_stripe()
479 r5l_log_stripe(log, sh, data_pages, parity_pages); in r5l_write_stripe()
481 spin_lock(&log->no_space_stripes_lock); in r5l_write_stripe()
482 list_add_tail(&sh->log_list, &log->no_space_stripes); in r5l_write_stripe()
483 spin_unlock(&log->no_space_stripes_lock); in r5l_write_stripe()
485 r5l_wake_reclaim(log, reserve); in r5l_write_stripe()
487 mutex_unlock(&log->io_mutex); in r5l_write_stripe()
492 void r5l_write_stripe_run(struct r5l_log *log) in r5l_write_stripe_run() argument
494 if (!log) in r5l_write_stripe_run()
496 mutex_lock(&log->io_mutex); in r5l_write_stripe_run()
497 r5l_submit_current_io(log); in r5l_write_stripe_run()
498 mutex_unlock(&log->io_mutex); in r5l_write_stripe_run()
501 int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) in r5l_handle_flush_request() argument
503 if (!log) in r5l_handle_flush_request()
520 static void r5l_run_no_space_stripes(struct r5l_log *log) in r5l_run_no_space_stripes() argument
524 spin_lock(&log->no_space_stripes_lock); in r5l_run_no_space_stripes()
525 while (!list_empty(&log->no_space_stripes)) { in r5l_run_no_space_stripes()
526 sh = list_first_entry(&log->no_space_stripes, in r5l_run_no_space_stripes()
532 spin_unlock(&log->no_space_stripes_lock); in r5l_run_no_space_stripes()
535 static sector_t r5l_reclaimable_space(struct r5l_log *log) in r5l_reclaimable_space() argument
537 return r5l_ring_distance(log, log->last_checkpoint, in r5l_reclaimable_space()
538 log->next_checkpoint); in r5l_reclaimable_space()
541 static bool r5l_complete_finished_ios(struct r5l_log *log) in r5l_complete_finished_ios() argument
546 assert_spin_locked(&log->io_list_lock); in r5l_complete_finished_ios()
548 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) { in r5l_complete_finished_ios()
553 log->next_checkpoint = io->log_start; in r5l_complete_finished_ios()
554 log->next_cp_seq = io->seq; in r5l_complete_finished_ios()
557 r5l_free_io_unit(log, io); in r5l_complete_finished_ios()
567 struct r5l_log *log = io->log; in __r5l_stripe_write_finished() local
570 spin_lock_irqsave(&log->io_list_lock, flags); in __r5l_stripe_write_finished()
573 if (!r5l_complete_finished_ios(log)) { in __r5l_stripe_write_finished()
574 spin_unlock_irqrestore(&log->io_list_lock, flags); in __r5l_stripe_write_finished()
578 if (r5l_reclaimable_space(log) > log->max_free_space) in __r5l_stripe_write_finished()
579 r5l_wake_reclaim(log, 0); in __r5l_stripe_write_finished()
581 spin_unlock_irqrestore(&log->io_list_lock, flags); in __r5l_stripe_write_finished()
582 wake_up(&log->iounit_wait); in __r5l_stripe_write_finished()
598 struct r5l_log *log = container_of(bio, struct r5l_log, in r5l_log_flush_endio() local
604 md_error(log->rdev->mddev, log->rdev); in r5l_log_flush_endio()
606 spin_lock_irqsave(&log->io_list_lock, flags); in r5l_log_flush_endio()
607 list_for_each_entry(io, &log->flushing_ios, log_sibling) in r5l_log_flush_endio()
609 list_splice_tail_init(&log->flushing_ios, &log->finished_ios); in r5l_log_flush_endio()
610 spin_unlock_irqrestore(&log->io_list_lock, flags); in r5l_log_flush_endio()
627 void r5l_flush_stripe_to_raid(struct r5l_log *log) in r5l_flush_stripe_to_raid() argument
631 if (!log || !log->need_cache_flush) in r5l_flush_stripe_to_raid()
634 spin_lock_irq(&log->io_list_lock); in r5l_flush_stripe_to_raid()
636 if (!list_empty(&log->flushing_ios)) { in r5l_flush_stripe_to_raid()
637 spin_unlock_irq(&log->io_list_lock); in r5l_flush_stripe_to_raid()
640 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios); in r5l_flush_stripe_to_raid()
641 do_flush = !list_empty(&log->flushing_ios); in r5l_flush_stripe_to_raid()
642 spin_unlock_irq(&log->io_list_lock); in r5l_flush_stripe_to_raid()
646 bio_reset(&log->flush_bio); in r5l_flush_stripe_to_raid()
647 log->flush_bio.bi_bdev = log->rdev->bdev; in r5l_flush_stripe_to_raid()
648 log->flush_bio.bi_end_io = r5l_log_flush_endio; in r5l_flush_stripe_to_raid()
649 submit_bio(WRITE_FLUSH, &log->flush_bio); in r5l_flush_stripe_to_raid()
652 static void r5l_write_super(struct r5l_log *log, sector_t cp);
653 static void r5l_write_super_and_discard_space(struct r5l_log *log, in r5l_write_super_and_discard_space() argument
656 struct block_device *bdev = log->rdev->bdev; in r5l_write_super_and_discard_space()
659 r5l_write_super(log, end); in r5l_write_super_and_discard_space()
664 mddev = log->rdev->mddev; in r5l_write_super_and_discard_space()
674 if (!log->in_teardown) { in r5l_write_super_and_discard_space()
680 log->in_teardown); in r5l_write_super_and_discard_space()
685 if (log->in_teardown) in r5l_write_super_and_discard_space()
693 if (log->last_checkpoint < end) { in r5l_write_super_and_discard_space()
695 log->last_checkpoint + log->rdev->data_offset, in r5l_write_super_and_discard_space()
696 end - log->last_checkpoint, GFP_NOIO, 0); in r5l_write_super_and_discard_space()
699 log->last_checkpoint + log->rdev->data_offset, in r5l_write_super_and_discard_space()
700 log->device_size - log->last_checkpoint, in r5l_write_super_and_discard_space()
702 blkdev_issue_discard(bdev, log->rdev->data_offset, end, in r5l_write_super_and_discard_space()
708 static void r5l_do_reclaim(struct r5l_log *log) in r5l_do_reclaim() argument
710 sector_t reclaim_target = xchg(&log->reclaim_target, 0); in r5l_do_reclaim()
715 spin_lock_irq(&log->io_list_lock); in r5l_do_reclaim()
722 reclaimable = r5l_reclaimable_space(log); in r5l_do_reclaim()
724 (list_empty(&log->running_ios) && in r5l_do_reclaim()
725 list_empty(&log->io_end_ios) && in r5l_do_reclaim()
726 list_empty(&log->flushing_ios) && in r5l_do_reclaim()
727 list_empty(&log->finished_ios))) in r5l_do_reclaim()
730 md_wakeup_thread(log->rdev->mddev->thread); in r5l_do_reclaim()
731 wait_event_lock_irq(log->iounit_wait, in r5l_do_reclaim()
732 r5l_reclaimable_space(log) > reclaimable, in r5l_do_reclaim()
733 log->io_list_lock); in r5l_do_reclaim()
736 next_checkpoint = log->next_checkpoint; in r5l_do_reclaim()
737 next_cp_seq = log->next_cp_seq; in r5l_do_reclaim()
738 spin_unlock_irq(&log->io_list_lock); in r5l_do_reclaim()
749 r5l_write_super_and_discard_space(log, next_checkpoint); in r5l_do_reclaim()
751 mutex_lock(&log->io_mutex); in r5l_do_reclaim()
752 log->last_checkpoint = next_checkpoint; in r5l_do_reclaim()
753 log->last_cp_seq = next_cp_seq; in r5l_do_reclaim()
754 mutex_unlock(&log->io_mutex); in r5l_do_reclaim()
756 r5l_run_no_space_stripes(log); in r5l_do_reclaim()
763 struct r5l_log *log = conf->log; in r5l_reclaim_thread() local
765 if (!log) in r5l_reclaim_thread()
767 r5l_do_reclaim(log); in r5l_reclaim_thread()
770 static void r5l_wake_reclaim(struct r5l_log *log, sector_t space) in r5l_wake_reclaim() argument
776 target = log->reclaim_target; in r5l_wake_reclaim()
779 } while (cmpxchg(&log->reclaim_target, target, new) != target); in r5l_wake_reclaim()
780 md_wakeup_thread(log->reclaim_thread); in r5l_wake_reclaim()
783 void r5l_quiesce(struct r5l_log *log, int state) in r5l_quiesce() argument
786 if (!log || state == 2) in r5l_quiesce()
789 log->in_teardown = 0; in r5l_quiesce()
790 log->reclaim_thread = md_register_thread(r5l_reclaim_thread, in r5l_quiesce()
791 log->rdev->mddev, "reclaim"); in r5l_quiesce()
797 log->in_teardown = 1; in r5l_quiesce()
799 mddev = log->rdev->mddev; in r5l_quiesce()
801 r5l_wake_reclaim(log, -1L); in r5l_quiesce()
802 md_unregister_thread(&log->reclaim_thread); in r5l_quiesce()
803 r5l_do_reclaim(log); in r5l_quiesce()
810 if (!conf->log) in r5l_log_disk_error()
812 return test_bit(Faulty, &conf->log->rdev->flags); in r5l_log_disk_error()
822 static int r5l_read_meta_block(struct r5l_log *log, in r5l_read_meta_block() argument
829 if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false)) in r5l_read_meta_block()
842 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); in r5l_read_meta_block()
854 static int r5l_recovery_flush_one_stripe(struct r5l_log *log, in r5l_recovery_flush_one_stripe() argument
859 struct r5conf *conf = log->rdev->mddev->private; in r5l_recovery_flush_one_stripe()
873 sync_page_io(log->rdev, *log_offset, PAGE_SIZE, in r5l_recovery_flush_one_stripe()
881 sync_page_io(log->rdev, *log_offset, PAGE_SIZE, in r5l_recovery_flush_one_stripe()
889 sync_page_io(log->rdev, in r5l_recovery_flush_one_stripe()
890 r5l_ring_add(log, *log_offset, BLOCK_SECTORS), in r5l_recovery_flush_one_stripe()
901 *log_offset = r5l_ring_add(log, *log_offset, in r5l_recovery_flush_one_stripe()
917 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE); in r5l_recovery_flush_one_stripe()
950 static int r5l_recovery_flush_one_meta(struct r5l_log *log, in r5l_recovery_flush_one_meta() argument
953 struct r5conf *conf = log->rdev->mddev->private; in r5l_recovery_flush_one_meta()
962 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); in r5l_recovery_flush_one_meta()
970 if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector, in r5l_recovery_flush_one_meta()
978 static void r5l_recovery_flush_log(struct r5l_log *log, in r5l_recovery_flush_log() argument
982 if (r5l_read_meta_block(log, ctx)) in r5l_recovery_flush_log()
984 if (r5l_recovery_flush_one_meta(log, ctx)) in r5l_recovery_flush_log()
987 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks); in r5l_recovery_flush_log()
991 static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, in r5l_log_write_empty_meta_block() argument
1007 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); in r5l_log_write_empty_meta_block()
1010 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) { in r5l_log_write_empty_meta_block()
1018 static int r5l_recovery_log(struct r5l_log *log) in r5l_recovery_log() argument
1022 ctx.pos = log->last_checkpoint; in r5l_recovery_log()
1023 ctx.seq = log->last_cp_seq; in r5l_recovery_log()
1028 r5l_recovery_flush_log(log, &ctx); in r5l_recovery_log()
1044 if (ctx.seq > log->last_cp_seq + 1) { in r5l_recovery_log()
1047 ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10); in r5l_recovery_log()
1050 log->seq = ctx.seq + 11; in r5l_recovery_log()
1051 log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); in r5l_recovery_log()
1052 r5l_write_super(log, ctx.pos); in r5l_recovery_log()
1054 log->log_start = ctx.pos; in r5l_recovery_log()
1055 log->seq = ctx.seq; in r5l_recovery_log()
1060 static void r5l_write_super(struct r5l_log *log, sector_t cp) in r5l_write_super() argument
1062 struct mddev *mddev = log->rdev->mddev; in r5l_write_super()
1064 log->rdev->journal_tail = cp; in r5l_write_super()
1068 static int r5l_load_log(struct r5l_log *log) in r5l_load_log() argument
1070 struct md_rdev *rdev = log->rdev; in r5l_load_log()
1073 sector_t cp = log->rdev->journal_tail; in r5l_load_log()
1098 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); in r5l_load_log()
1109 log->last_cp_seq = prandom_u32(); in r5l_load_log()
1116 r5l_write_super(log, cp); in r5l_load_log()
1118 log->last_cp_seq = le64_to_cpu(mb->seq); in r5l_load_log()
1120 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS); in r5l_load_log()
1121 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT; in r5l_load_log()
1122 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) in r5l_load_log()
1123 log->max_free_space = RECLAIM_MAX_FREE_SPACE; in r5l_load_log()
1124 log->last_checkpoint = cp; in r5l_load_log()
1128 return r5l_recovery_log(log); in r5l_load_log()
1136 struct r5l_log *log; in r5l_init_log() local
1140 log = kzalloc(sizeof(*log), GFP_KERNEL); in r5l_init_log()
1141 if (!log) in r5l_init_log()
1143 log->rdev = rdev; in r5l_init_log()
1145 log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0); in r5l_init_log()
1147 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid, in r5l_init_log()
1150 mutex_init(&log->io_mutex); in r5l_init_log()
1152 spin_lock_init(&log->io_list_lock); in r5l_init_log()
1153 INIT_LIST_HEAD(&log->running_ios); in r5l_init_log()
1154 INIT_LIST_HEAD(&log->io_end_ios); in r5l_init_log()
1155 INIT_LIST_HEAD(&log->flushing_ios); in r5l_init_log()
1156 INIT_LIST_HEAD(&log->finished_ios); in r5l_init_log()
1157 bio_init(&log->flush_bio); in r5l_init_log()
1159 log->io_kc = KMEM_CACHE(r5l_io_unit, 0); in r5l_init_log()
1160 if (!log->io_kc) in r5l_init_log()
1163 log->reclaim_thread = md_register_thread(r5l_reclaim_thread, in r5l_init_log()
1164 log->rdev->mddev, "reclaim"); in r5l_init_log()
1165 if (!log->reclaim_thread) in r5l_init_log()
1167 init_waitqueue_head(&log->iounit_wait); in r5l_init_log()
1169 INIT_LIST_HEAD(&log->no_space_stripes); in r5l_init_log()
1170 spin_lock_init(&log->no_space_stripes_lock); in r5l_init_log()
1172 if (r5l_load_log(log)) in r5l_init_log()
1175 conf->log = log; in r5l_init_log()
1178 md_unregister_thread(&log->reclaim_thread); in r5l_init_log()
1180 kmem_cache_destroy(log->io_kc); in r5l_init_log()
1182 kfree(log); in r5l_init_log()
1186 void r5l_exit_log(struct r5l_log *log) in r5l_exit_log() argument
1188 md_unregister_thread(&log->reclaim_thread); in r5l_exit_log()
1189 kmem_cache_destroy(log->io_kc); in r5l_exit_log()
1190 kfree(log); in r5l_exit_log()