Lines Matching refs:cache
219 struct cache { struct
349 struct cache *cache; member
356 struct cache *cache; member
386 static enum cache_metadata_mode get_cache_mode(struct cache *cache);
388 static void wake_worker(struct cache *cache) in wake_worker() argument
390 queue_work(cache->wq, &cache->worker); in wake_worker()
395 static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache) in alloc_prison_cell() argument
398 return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT); in alloc_prison_cell()
401 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell) in free_prison_cell() argument
403 dm_bio_prison_free_cell(cache->prison, cell); in free_prison_cell()
406 static struct dm_cache_migration *alloc_migration(struct cache *cache) in alloc_migration() argument
410 mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); in alloc_migration()
412 mg->cache = cache; in alloc_migration()
413 atomic_inc(&mg->cache->nr_allocated_migrations); in alloc_migration()
421 struct cache *cache = mg->cache; in free_migration() local
423 if (atomic_dec_and_test(&cache->nr_allocated_migrations)) in free_migration()
424 wake_up(&cache->migration_wait); in free_migration()
426 mempool_free(mg, cache->migration_pool); in free_migration()
429 static int prealloc_data_structs(struct cache *cache, struct prealloc *p) in prealloc_data_structs() argument
432 p->mg = alloc_migration(cache); in prealloc_data_structs()
438 p->cell1 = alloc_prison_cell(cache); in prealloc_data_structs()
444 p->cell2 = alloc_prison_cell(cache); in prealloc_data_structs()
452 static void prealloc_free_structs(struct cache *cache, struct prealloc *p) in prealloc_free_structs() argument
455 free_prison_cell(cache, p->cell2); in prealloc_free_structs()
458 free_prison_cell(cache, p->cell1); in prealloc_free_structs()
528 static int bio_detain_range(struct cache *cache, dm_oblock_t oblock_begin, dm_oblock_t oblock_end, in bio_detain_range() argument
537 r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result); in bio_detain_range()
544 static int bio_detain(struct cache *cache, dm_oblock_t oblock, in bio_detain() argument
550 return bio_detain_range(cache, oblock, end, bio, in bio_detain()
554 static int get_cell(struct cache *cache, in get_cell() argument
566 r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result); in get_cell()
575 static bool is_dirty(struct cache *cache, dm_cblock_t b) in is_dirty() argument
577 return test_bit(from_cblock(b), cache->dirty_bitset); in is_dirty()
580 static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) in set_dirty() argument
582 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { in set_dirty()
583 atomic_inc(&cache->nr_dirty); in set_dirty()
584 policy_set_dirty(cache->policy, oblock); in set_dirty()
588 static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) in clear_dirty() argument
590 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { in clear_dirty()
591 policy_clear_dirty(cache->policy, oblock); in clear_dirty()
592 if (atomic_dec_return(&cache->nr_dirty) == 0) in clear_dirty()
593 dm_table_event(cache->ti->table); in clear_dirty()
599 static bool block_size_is_power_of_two(struct cache *cache) in block_size_is_power_of_two() argument
601 return cache->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
615 static dm_block_t oblocks_per_dblock(struct cache *cache) in oblocks_per_dblock() argument
617 dm_block_t oblocks = cache->discard_block_size; in oblocks_per_dblock()
619 if (block_size_is_power_of_two(cache)) in oblocks_per_dblock()
620 oblocks >>= cache->sectors_per_block_shift; in oblocks_per_dblock()
622 oblocks = block_div(oblocks, cache->sectors_per_block); in oblocks_per_dblock()
627 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) in oblock_to_dblock() argument
630 oblocks_per_dblock(cache))); in oblock_to_dblock()
633 static dm_oblock_t dblock_to_oblock(struct cache *cache, dm_dblock_t dblock) in dblock_to_oblock() argument
635 return to_oblock(from_dblock(dblock) * oblocks_per_dblock(cache)); in dblock_to_oblock()
638 static void set_discard(struct cache *cache, dm_dblock_t b) in set_discard() argument
642 BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks)); in set_discard()
643 atomic_inc(&cache->stats.discard_count); in set_discard()
645 spin_lock_irqsave(&cache->lock, flags); in set_discard()
646 set_bit(from_dblock(b), cache->discard_bitset); in set_discard()
647 spin_unlock_irqrestore(&cache->lock, flags); in set_discard()
650 static void clear_discard(struct cache *cache, dm_dblock_t b) in clear_discard() argument
654 spin_lock_irqsave(&cache->lock, flags); in clear_discard()
655 clear_bit(from_dblock(b), cache->discard_bitset); in clear_discard()
656 spin_unlock_irqrestore(&cache->lock, flags); in clear_discard()
659 static bool is_discarded(struct cache *cache, dm_dblock_t b) in is_discarded() argument
664 spin_lock_irqsave(&cache->lock, flags); in is_discarded()
665 r = test_bit(from_dblock(b), cache->discard_bitset); in is_discarded()
666 spin_unlock_irqrestore(&cache->lock, flags); in is_discarded()
671 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) in is_discarded_oblock() argument
676 spin_lock_irqsave(&cache->lock, flags); in is_discarded_oblock()
677 r = test_bit(from_dblock(oblock_to_dblock(cache, b)), in is_discarded_oblock()
678 cache->discard_bitset); in is_discarded_oblock()
679 spin_unlock_irqrestore(&cache->lock, flags); in is_discarded_oblock()
686 static void load_stats(struct cache *cache) in load_stats() argument
690 dm_cache_metadata_get_stats(cache->cmd, &stats); in load_stats()
691 atomic_set(&cache->stats.read_hit, stats.read_hits); in load_stats()
692 atomic_set(&cache->stats.read_miss, stats.read_misses); in load_stats()
693 atomic_set(&cache->stats.write_hit, stats.write_hits); in load_stats()
694 atomic_set(&cache->stats.write_miss, stats.write_misses); in load_stats()
697 static void save_stats(struct cache *cache) in save_stats() argument
701 if (get_cache_mode(cache) >= CM_READ_ONLY) in save_stats()
704 stats.read_hits = atomic_read(&cache->stats.read_hit); in save_stats()
705 stats.read_misses = atomic_read(&cache->stats.read_miss); in save_stats()
706 stats.write_hits = atomic_read(&cache->stats.write_hit); in save_stats()
707 stats.write_misses = atomic_read(&cache->stats.write_miss); in save_stats()
709 dm_cache_metadata_set_stats(cache->cmd, &stats); in save_stats()
719 #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
737 static size_t get_per_bio_data_size(struct cache *cache) in get_per_bio_data_size() argument
739 return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; in get_per_bio_data_size()
764 static void remap_to_origin(struct cache *cache, struct bio *bio) in remap_to_origin() argument
766 bio->bi_bdev = cache->origin_dev->bdev; in remap_to_origin()
769 static void remap_to_cache(struct cache *cache, struct bio *bio, in remap_to_cache() argument
775 bio->bi_bdev = cache->cache_dev->bdev; in remap_to_cache()
776 if (!block_size_is_power_of_two(cache)) in remap_to_cache()
778 (block * cache->sectors_per_block) + in remap_to_cache()
779 sector_div(bi_sector, cache->sectors_per_block); in remap_to_cache()
782 (block << cache->sectors_per_block_shift) | in remap_to_cache()
783 (bi_sector & (cache->sectors_per_block - 1)); in remap_to_cache()
786 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) in check_if_tick_bio_needed() argument
789 size_t pb_data_size = get_per_bio_data_size(cache); in check_if_tick_bio_needed()
792 spin_lock_irqsave(&cache->lock, flags); in check_if_tick_bio_needed()
793 if (cache->need_tick_bio && in check_if_tick_bio_needed()
796 cache->need_tick_bio = false; in check_if_tick_bio_needed()
798 spin_unlock_irqrestore(&cache->lock, flags); in check_if_tick_bio_needed()
801 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, in remap_to_origin_clear_discard() argument
804 check_if_tick_bio_needed(cache, bio); in remap_to_origin_clear_discard()
805 remap_to_origin(cache, bio); in remap_to_origin_clear_discard()
807 clear_discard(cache, oblock_to_dblock(cache, oblock)); in remap_to_origin_clear_discard()
810 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, in remap_to_cache_dirty() argument
813 check_if_tick_bio_needed(cache, bio); in remap_to_cache_dirty()
814 remap_to_cache(cache, bio, cblock); in remap_to_cache_dirty()
816 set_dirty(cache, oblock, cblock); in remap_to_cache_dirty()
817 clear_discard(cache, oblock_to_dblock(cache, oblock)); in remap_to_cache_dirty()
821 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) in get_bio_block() argument
825 if (!block_size_is_power_of_two(cache)) in get_bio_block()
826 (void) sector_div(block_nr, cache->sectors_per_block); in get_bio_block()
828 block_nr >>= cache->sectors_per_block_shift; in get_bio_block()
833 static int bio_triggers_commit(struct cache *cache, struct bio *bio) in bio_triggers_commit() argument
842 static void inc_ds(struct cache *cache, struct bio *bio, in inc_ds() argument
845 size_t pb_data_size = get_per_bio_data_size(cache); in inc_ds()
851 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); in inc_ds()
854 static bool accountable_bio(struct cache *cache, struct bio *bio) in accountable_bio() argument
856 return ((bio->bi_bdev == cache->origin_dev->bdev) && in accountable_bio()
860 static void accounted_begin(struct cache *cache, struct bio *bio) in accounted_begin() argument
862 size_t pb_data_size = get_per_bio_data_size(cache); in accounted_begin()
865 if (accountable_bio(cache, bio)) { in accounted_begin()
867 iot_io_begin(&cache->origin_tracker, pb->len); in accounted_begin()
871 static void accounted_complete(struct cache *cache, struct bio *bio) in accounted_complete() argument
873 size_t pb_data_size = get_per_bio_data_size(cache); in accounted_complete()
876 iot_io_end(&cache->origin_tracker, pb->len); in accounted_complete()
879 static void accounted_request(struct cache *cache, struct bio *bio) in accounted_request() argument
881 accounted_begin(cache, bio); in accounted_request()
885 static void issue(struct cache *cache, struct bio *bio) in issue() argument
889 if (!bio_triggers_commit(cache, bio)) { in issue()
890 accounted_request(cache, bio); in issue()
898 spin_lock_irqsave(&cache->lock, flags); in issue()
899 cache->commit_requested = true; in issue()
900 bio_list_add(&cache->deferred_flush_bios, bio); in issue()
901 spin_unlock_irqrestore(&cache->lock, flags); in issue()
904 static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell) in inc_and_issue() argument
906 inc_ds(cache, bio, cell); in inc_and_issue()
907 issue(cache, bio); in inc_and_issue()
910 static void defer_writethrough_bio(struct cache *cache, struct bio *bio) in defer_writethrough_bio() argument
914 spin_lock_irqsave(&cache->lock, flags); in defer_writethrough_bio()
915 bio_list_add(&cache->deferred_writethrough_bios, bio); in defer_writethrough_bio()
916 spin_unlock_irqrestore(&cache->lock, flags); in defer_writethrough_bio()
918 wake_worker(cache); in defer_writethrough_bio()
933 remap_to_cache(pb->cache, bio, pb->cblock); in writethrough_endio()
940 defer_writethrough_bio(pb->cache, bio); in writethrough_endio()
949 static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, in remap_to_origin_then_cache() argument
954 pb->cache = cache; in remap_to_origin_then_cache()
959 remap_to_origin_clear_discard(pb->cache, bio, oblock); in remap_to_origin_then_cache()
965 static enum cache_metadata_mode get_cache_mode(struct cache *cache) in get_cache_mode() argument
967 return cache->features.mode; in get_cache_mode()
970 static const char *cache_device_name(struct cache *cache) in cache_device_name() argument
972 return dm_device_name(dm_table_get_md(cache->ti->table)); in cache_device_name()
975 static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode) in notify_mode_switch() argument
983 dm_table_event(cache->ti->table); in notify_mode_switch()
985 cache_device_name(cache), descs[(int)mode]); in notify_mode_switch()
988 static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode) in set_cache_mode() argument
991 enum cache_metadata_mode old_mode = get_cache_mode(cache); in set_cache_mode()
993 if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) { in set_cache_mode()
1000 cache_device_name(cache)); in set_cache_mode()
1014 dm_cache_metadata_set_read_only(cache->cmd); in set_cache_mode()
1018 dm_cache_metadata_set_read_write(cache->cmd); in set_cache_mode()
1022 cache->features.mode = new_mode; in set_cache_mode()
1025 notify_mode_switch(cache, new_mode); in set_cache_mode()
1028 static void abort_transaction(struct cache *cache) in abort_transaction() argument
1030 const char *dev_name = cache_device_name(cache); in abort_transaction()
1032 if (get_cache_mode(cache) >= CM_READ_ONLY) in abort_transaction()
1035 if (dm_cache_metadata_set_needs_check(cache->cmd)) { in abort_transaction()
1037 set_cache_mode(cache, CM_FAIL); in abort_transaction()
1041 if (dm_cache_metadata_abort(cache->cmd)) { in abort_transaction()
1043 set_cache_mode(cache, CM_FAIL); in abort_transaction()
1047 static void metadata_operation_failed(struct cache *cache, const char *op, int r) in metadata_operation_failed() argument
1050 cache_device_name(cache), op, r); in metadata_operation_failed()
1051 abort_transaction(cache); in metadata_operation_failed()
1052 set_cache_mode(cache, CM_READ_ONLY); in metadata_operation_failed()
1061 static void inc_io_migrations(struct cache *cache) in inc_io_migrations() argument
1063 atomic_inc(&cache->nr_io_migrations); in inc_io_migrations()
1066 static void dec_io_migrations(struct cache *cache) in dec_io_migrations() argument
1068 atomic_dec(&cache->nr_io_migrations); in dec_io_migrations()
1076 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) in __cell_defer() argument
1082 dm_cell_release(cache->prison, cell, &cache->deferred_bios); in __cell_defer()
1083 free_prison_cell(cache, cell); in __cell_defer()
1085 list_add_tail(&cell->user_list, &cache->deferred_cells); in __cell_defer()
1088 static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, bool holder) in cell_defer() argument
1092 if (!holder && dm_cell_promote_or_release(cache->prison, cell)) { in cell_defer()
1097 free_prison_cell(cache, cell); in cell_defer()
1101 spin_lock_irqsave(&cache->lock, flags); in cell_defer()
1102 __cell_defer(cache, cell); in cell_defer()
1103 spin_unlock_irqrestore(&cache->lock, flags); in cell_defer()
1105 wake_worker(cache); in cell_defer()
1108 static void cell_error_with_code(struct cache *cache, struct dm_bio_prison_cell *cell, int err) in cell_error_with_code() argument
1110 dm_cell_error(cache->prison, cell, err); in cell_error_with_code()
1111 free_prison_cell(cache, cell); in cell_error_with_code()
1114 static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell) in cell_requeue() argument
1116 cell_error_with_code(cache, cell, DM_ENDIO_REQUEUE); in cell_requeue()
1121 struct cache *cache = mg->cache; in free_io_migration() local
1123 dec_io_migrations(cache); in free_io_migration()
1125 wake_worker(cache); in free_io_migration()
1130 struct cache *cache = mg->cache; in migration_failure() local
1131 const char *dev_name = cache_device_name(cache); in migration_failure()
1135 set_dirty(cache, mg->old_oblock, mg->cblock); in migration_failure()
1136 cell_defer(cache, mg->old_ocell, false); in migration_failure()
1140 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); in migration_failure()
1142 cell_defer(cache, mg->old_ocell, mg->promote ? false : true); in migration_failure()
1144 cell_defer(cache, mg->new_ocell, true); in migration_failure()
1147 policy_remove_mapping(cache->policy, mg->new_oblock); in migration_failure()
1148 cell_defer(cache, mg->new_ocell, true); in migration_failure()
1158 struct cache *cache = mg->cache; in migration_success_pre_commit() local
1161 clear_dirty(cache, mg->old_oblock, mg->cblock); in migration_success_pre_commit()
1162 cell_defer(cache, mg->old_ocell, false); in migration_success_pre_commit()
1167 r = dm_cache_remove_mapping(cache->cmd, mg->cblock); in migration_success_pre_commit()
1170 cache_device_name(cache)); in migration_success_pre_commit()
1171 metadata_operation_failed(cache, "dm_cache_remove_mapping", r); in migration_success_pre_commit()
1172 policy_force_mapping(cache->policy, mg->new_oblock, in migration_success_pre_commit()
1175 cell_defer(cache, mg->new_ocell, true); in migration_success_pre_commit()
1180 r = dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock); in migration_success_pre_commit()
1183 cache_device_name(cache)); in migration_success_pre_commit()
1184 metadata_operation_failed(cache, "dm_cache_insert_mapping", r); in migration_success_pre_commit()
1185 policy_remove_mapping(cache->policy, mg->new_oblock); in migration_success_pre_commit()
1191 spin_lock_irqsave(&cache->lock, flags); in migration_success_pre_commit()
1192 list_add_tail(&mg->list, &cache->need_commit_migrations); in migration_success_pre_commit()
1193 cache->commit_requested = true; in migration_success_pre_commit()
1194 spin_unlock_irqrestore(&cache->lock, flags); in migration_success_pre_commit()
1200 struct cache *cache = mg->cache; in migration_success_post_commit() local
1204 cache_device_name(cache)); in migration_success_post_commit()
1208 cell_defer(cache, mg->old_ocell, mg->promote ? false : true); in migration_success_post_commit()
1213 spin_lock_irqsave(&cache->lock, flags); in migration_success_post_commit()
1214 list_add_tail(&mg->list, &cache->quiesced_migrations); in migration_success_post_commit()
1215 spin_unlock_irqrestore(&cache->lock, flags); in migration_success_post_commit()
1219 policy_remove_mapping(cache->policy, mg->old_oblock); in migration_success_post_commit()
1225 clear_dirty(cache, mg->new_oblock, mg->cblock); in migration_success_post_commit()
1226 cell_defer(cache, mg->new_ocell, true); in migration_success_post_commit()
1231 set_dirty(cache, mg->new_oblock, mg->cblock); in migration_success_post_commit()
1233 cell_defer(cache, mg->new_ocell, false); in migration_success_post_commit()
1243 struct cache *cache = mg->cache; in copy_complete() local
1248 spin_lock_irqsave(&cache->lock, flags); in copy_complete()
1249 list_add_tail(&mg->list, &cache->completed_migrations); in copy_complete()
1250 spin_unlock_irqrestore(&cache->lock, flags); in copy_complete()
1252 wake_worker(cache); in copy_complete()
1259 struct cache *cache = mg->cache; in issue_copy() local
1262 o_region.bdev = cache->origin_dev->bdev; in issue_copy()
1263 o_region.count = cache->sectors_per_block; in issue_copy()
1265 c_region.bdev = cache->cache_dev->bdev; in issue_copy()
1266 c_region.sector = cblock * cache->sectors_per_block; in issue_copy()
1267 c_region.count = cache->sectors_per_block; in issue_copy()
1271 o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block; in issue_copy()
1272 r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg); in issue_copy()
1275 o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block; in issue_copy()
1276 r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg); in issue_copy()
1280 DMERR_LIMIT("%s: issuing migration failed", cache_device_name(cache)); in issue_copy()
1288 struct cache *cache = mg->cache; in overwrite_endio() local
1289 size_t pb_data_size = get_per_bio_data_size(cache); in overwrite_endio()
1300 spin_lock_irqsave(&cache->lock, flags); in overwrite_endio()
1301 list_add_tail(&mg->list, &cache->completed_migrations); in overwrite_endio()
1302 spin_unlock_irqrestore(&cache->lock, flags); in overwrite_endio()
1304 wake_worker(cache); in overwrite_endio()
1309 size_t pb_data_size = get_per_bio_data_size(mg->cache); in issue_overwrite()
1313 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); in issue_overwrite()
1319 accounted_request(mg->cache, bio); in issue_overwrite()
1322 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) in bio_writes_complete_block() argument
1325 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); in bio_writes_complete_block()
1330 atomic_inc(&mg->cache->stats.copies_avoided); in avoid_copy()
1334 static void calc_discard_block_range(struct cache *cache, struct bio *bio, in calc_discard_block_range() argument
1340 *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size)); in calc_discard_block_range()
1342 if (se - sb < cache->discard_block_size) in calc_discard_block_range()
1345 *e = to_dblock(block_div(se, cache->discard_block_size)); in calc_discard_block_range()
1352 struct cache *cache = mg->cache; in issue_discard() local
1354 calc_discard_block_range(cache, bio, &b, &e); in issue_discard()
1356 set_discard(cache, b); in issue_discard()
1361 cell_defer(cache, mg->new_ocell, false); in issue_discard()
1363 wake_worker(cache); in issue_discard()
1369 struct cache *cache = mg->cache; in issue_copy_or_discard() local
1377 avoid = !is_dirty(cache, mg->cblock) || in issue_copy_or_discard()
1378 is_discarded_oblock(cache, mg->old_oblock); in issue_copy_or_discard()
1382 avoid = is_discarded_oblock(cache, mg->new_oblock); in issue_copy_or_discard()
1384 if (writeback_mode(&cache->features) && in issue_copy_or_discard()
1385 !avoid && bio_writes_complete_block(cache, bio)) { in issue_copy_or_discard()
1402 static void process_migrations(struct cache *cache, struct list_head *head, in process_migrations() argument
1410 spin_lock_irqsave(&cache->lock, flags); in process_migrations()
1412 spin_unlock_irqrestore(&cache->lock, flags); in process_migrations()
1420 list_add_tail(&mg->list, &mg->cache->quiesced_migrations); in __queue_quiesced_migration()
1426 struct cache *cache = mg->cache; in queue_quiesced_migration() local
1428 spin_lock_irqsave(&cache->lock, flags); in queue_quiesced_migration()
1430 spin_unlock_irqrestore(&cache->lock, flags); in queue_quiesced_migration()
1432 wake_worker(cache); in queue_quiesced_migration()
1435 static void queue_quiesced_migrations(struct cache *cache, struct list_head *work) in queue_quiesced_migrations() argument
1440 spin_lock_irqsave(&cache->lock, flags); in queue_quiesced_migrations()
1443 spin_unlock_irqrestore(&cache->lock, flags); in queue_quiesced_migrations()
1445 wake_worker(cache); in queue_quiesced_migrations()
1448 static void check_for_quiesced_migrations(struct cache *cache, in check_for_quiesced_migrations() argument
1460 queue_quiesced_migrations(cache, &work); in check_for_quiesced_migrations()
1465 if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list)) in quiesce_migration()
1469 static void promote(struct cache *cache, struct prealloc *structs, in promote() argument
1482 mg->cache = cache; in promote()
1489 inc_io_migrations(cache); in promote()
1493 static void writeback(struct cache *cache, struct prealloc *structs, in writeback() argument
1506 mg->cache = cache; in writeback()
1513 inc_io_migrations(cache); in writeback()
1517 static void demote_then_promote(struct cache *cache, struct prealloc *structs, in demote_then_promote() argument
1532 mg->cache = cache; in demote_then_promote()
1540 inc_io_migrations(cache); in demote_then_promote()
1548 static void invalidate(struct cache *cache, struct prealloc *structs, in invalidate() argument
1561 mg->cache = cache; in invalidate()
1568 inc_io_migrations(cache); in invalidate()
1572 static void discard(struct cache *cache, struct prealloc *structs, in discard() argument
1584 mg->cache = cache; in discard()
1595 static void defer_bio(struct cache *cache, struct bio *bio) in defer_bio() argument
1599 spin_lock_irqsave(&cache->lock, flags); in defer_bio()
1600 bio_list_add(&cache->deferred_bios, bio); in defer_bio()
1601 spin_unlock_irqrestore(&cache->lock, flags); in defer_bio()
1603 wake_worker(cache); in defer_bio()
1606 static void process_flush_bio(struct cache *cache, struct bio *bio) in process_flush_bio() argument
1608 size_t pb_data_size = get_per_bio_data_size(cache); in process_flush_bio()
1613 remap_to_origin(cache, bio); in process_flush_bio()
1615 remap_to_cache(cache, bio, 0); in process_flush_bio()
1622 issue(cache, bio); in process_flush_bio()
1625 static void process_discard_bio(struct cache *cache, struct prealloc *structs, in process_discard_bio() argument
1632 calc_discard_block_range(cache, bio, &b, &e); in process_discard_bio()
1639 …r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prea… in process_discard_bio()
1645 discard(cache, structs, new_ocell); in process_discard_bio()
1648 static bool spare_migration_bandwidth(struct cache *cache) in spare_migration_bandwidth() argument
1650 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * in spare_migration_bandwidth()
1651 cache->sectors_per_block; in spare_migration_bandwidth()
1652 return current_volume < cache->migration_threshold; in spare_migration_bandwidth()
1655 static void inc_hit_counter(struct cache *cache, struct bio *bio) in inc_hit_counter() argument
1658 &cache->stats.read_hit : &cache->stats.write_hit); in inc_hit_counter()
1661 static void inc_miss_counter(struct cache *cache, struct bio *bio) in inc_miss_counter() argument
1664 &cache->stats.read_miss : &cache->stats.write_miss); in inc_miss_counter()
1670 struct cache *cache; member
1680 struct cache *cache = detail->cache; in inc_fn() local
1682 inc_ds(cache, cell->holder, cell); in inc_fn()
1696 inc_ds(cache, bio, cell); in inc_fn()
1701 static void remap_cell_to_origin_clear_discard(struct cache *cache, in remap_cell_to_origin_clear_discard() argument
1709 detail.cache = cache; in remap_cell_to_origin_clear_discard()
1714 spin_lock_irqsave(&cache->lock, flags); in remap_cell_to_origin_clear_discard()
1715 dm_cell_visit_release(cache->prison, inc_fn, &detail, cell); in remap_cell_to_origin_clear_discard()
1716 bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios); in remap_cell_to_origin_clear_discard()
1717 spin_unlock_irqrestore(&cache->lock, flags); in remap_cell_to_origin_clear_discard()
1719 remap_to_origin(cache, cell->holder); in remap_cell_to_origin_clear_discard()
1721 issue(cache, cell->holder); in remap_cell_to_origin_clear_discard()
1723 accounted_begin(cache, cell->holder); in remap_cell_to_origin_clear_discard()
1726 clear_discard(cache, oblock_to_dblock(cache, oblock)); in remap_cell_to_origin_clear_discard()
1729 remap_to_origin(cache, bio); in remap_cell_to_origin_clear_discard()
1730 issue(cache, bio); in remap_cell_to_origin_clear_discard()
1733 free_prison_cell(cache, cell); in remap_cell_to_origin_clear_discard()
1736 static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell, in remap_cell_to_cache_dirty() argument
1743 detail.cache = cache; in remap_cell_to_cache_dirty()
1748 spin_lock_irqsave(&cache->lock, flags); in remap_cell_to_cache_dirty()
1749 dm_cell_visit_release(cache->prison, inc_fn, &detail, cell); in remap_cell_to_cache_dirty()
1750 bio_list_merge(&cache->deferred_bios, &detail.unhandled_bios); in remap_cell_to_cache_dirty()
1751 spin_unlock_irqrestore(&cache->lock, flags); in remap_cell_to_cache_dirty()
1753 remap_to_cache(cache, cell->holder, cblock); in remap_cell_to_cache_dirty()
1755 issue(cache, cell->holder); in remap_cell_to_cache_dirty()
1757 accounted_begin(cache, cell->holder); in remap_cell_to_cache_dirty()
1760 set_dirty(cache, oblock, cblock); in remap_cell_to_cache_dirty()
1761 clear_discard(cache, oblock_to_dblock(cache, oblock)); in remap_cell_to_cache_dirty()
1765 remap_to_cache(cache, bio, cblock); in remap_cell_to_cache_dirty()
1766 issue(cache, bio); in remap_cell_to_cache_dirty()
1769 free_prison_cell(cache, cell); in remap_cell_to_cache_dirty()
1776 struct cache *cache; member
1793 return bio_detain(l->cache, b, NULL, cell_prealloc, in cell_locker()
1798 static void process_cell(struct cache *cache, struct prealloc *structs, in process_cell() argument
1804 dm_oblock_t block = get_bio_block(cache, bio); in process_cell()
1806 bool passthrough = passthrough_mode(&cache->features); in process_cell()
1810 fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio); in process_cell()
1811 can_migrate = !passthrough && (fast_promotion || spare_migration_bandwidth(cache)); in process_cell()
1814 ool.cache = cache; in process_cell()
1817 r = policy_map(cache->policy, block, true, can_migrate, fast_promotion, in process_cell()
1827 inc_miss_counter(cache, bio); in process_cell()
1836 atomic_inc(&cache->stats.demotion); in process_cell()
1837 invalidate(cache, structs, block, lookup_result.cblock, new_ocell); in process_cell()
1842 remap_to_origin_clear_discard(cache, bio, block); in process_cell()
1843 inc_and_issue(cache, bio, new_ocell); in process_cell()
1846 inc_hit_counter(cache, bio); in process_cell()
1849 writethrough_mode(&cache->features) && in process_cell()
1850 !is_dirty(cache, lookup_result.cblock)) { in process_cell()
1851 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); in process_cell()
1852 inc_and_issue(cache, bio, new_ocell); in process_cell()
1855 remap_cell_to_cache_dirty(cache, new_ocell, block, lookup_result.cblock, true); in process_cell()
1863 inc_miss_counter(cache, bio); in process_cell()
1864 remap_cell_to_origin_clear_discard(cache, new_ocell, block, true); in process_cell()
1869 atomic_inc(&cache->stats.promotion); in process_cell()
1870 promote(cache, structs, block, lookup_result.cblock, new_ocell); in process_cell()
1875 atomic_inc(&cache->stats.demotion); in process_cell()
1876 atomic_inc(&cache->stats.promotion); in process_cell()
1877 demote_then_promote(cache, structs, lookup_result.old_oblock, in process_cell()
1885 cache_device_name(cache), __func__, in process_cell()
1891 cell_defer(cache, new_ocell, false); in process_cell()
1894 static void process_bio(struct cache *cache, struct prealloc *structs, in process_bio() argument
1898 dm_oblock_t block = get_bio_block(cache, bio); in process_bio()
1905 r = bio_detain(cache, block, bio, cell_prealloc, in process_bio()
1911 process_cell(cache, structs, new_ocell); in process_bio()
1914 static int need_commit_due_to_time(struct cache *cache) in need_commit_due_to_time() argument
1916 return jiffies < cache->last_commit_jiffies || in need_commit_due_to_time()
1917 jiffies > cache->last_commit_jiffies + COMMIT_PERIOD; in need_commit_due_to_time()
1923 static int commit(struct cache *cache, bool clean_shutdown) in commit() argument
1927 if (get_cache_mode(cache) >= CM_READ_ONLY) in commit()
1930 atomic_inc(&cache->stats.commit_count); in commit()
1931 r = dm_cache_commit(cache->cmd, clean_shutdown); in commit()
1933 metadata_operation_failed(cache, "dm_cache_commit", r); in commit()
1938 static int commit_if_needed(struct cache *cache) in commit_if_needed() argument
1942 if ((cache->commit_requested || need_commit_due_to_time(cache)) && in commit_if_needed()
1943 dm_cache_changed_this_transaction(cache->cmd)) { in commit_if_needed()
1944 r = commit(cache, false); in commit_if_needed()
1945 cache->commit_requested = false; in commit_if_needed()
1946 cache->last_commit_jiffies = jiffies; in commit_if_needed()
1952 static void process_deferred_bios(struct cache *cache) in process_deferred_bios() argument
1963 spin_lock_irqsave(&cache->lock, flags); in process_deferred_bios()
1964 bio_list_merge(&bios, &cache->deferred_bios); in process_deferred_bios()
1965 bio_list_init(&cache->deferred_bios); in process_deferred_bios()
1966 spin_unlock_irqrestore(&cache->lock, flags); in process_deferred_bios()
1975 if (prealloc_data_structs(cache, &structs)) { in process_deferred_bios()
1976 spin_lock_irqsave(&cache->lock, flags); in process_deferred_bios()
1977 bio_list_merge(&cache->deferred_bios, &bios); in process_deferred_bios()
1978 spin_unlock_irqrestore(&cache->lock, flags); in process_deferred_bios()
1985 process_flush_bio(cache, bio); in process_deferred_bios()
1987 process_discard_bio(cache, &structs, bio); in process_deferred_bios()
1989 process_bio(cache, &structs, bio); in process_deferred_bios()
1993 prealloc_free_structs(cache, &structs); in process_deferred_bios()
1996 static void process_deferred_cells(struct cache *cache) in process_deferred_cells() argument
2008 spin_lock_irqsave(&cache->lock, flags); in process_deferred_cells()
2009 list_splice_init(&cache->deferred_cells, &cells); in process_deferred_cells()
2010 spin_unlock_irqrestore(&cache->lock, flags); in process_deferred_cells()
2019 if (prealloc_data_structs(cache, &structs)) { in process_deferred_cells()
2020 spin_lock_irqsave(&cache->lock, flags); in process_deferred_cells()
2021 list_splice(&cells, &cache->deferred_cells); in process_deferred_cells()
2022 spin_unlock_irqrestore(&cache->lock, flags); in process_deferred_cells()
2026 process_cell(cache, &structs, cell); in process_deferred_cells()
2030 prealloc_free_structs(cache, &structs); in process_deferred_cells()
2033 static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) in process_deferred_flush_bios() argument
2041 spin_lock_irqsave(&cache->lock, flags); in process_deferred_flush_bios()
2042 bio_list_merge(&bios, &cache->deferred_flush_bios); in process_deferred_flush_bios()
2043 bio_list_init(&cache->deferred_flush_bios); in process_deferred_flush_bios()
2044 spin_unlock_irqrestore(&cache->lock, flags); in process_deferred_flush_bios()
2050 submit_bios ? accounted_request(cache, bio) : bio_io_error(bio); in process_deferred_flush_bios()
2053 static void process_deferred_writethrough_bios(struct cache *cache) in process_deferred_writethrough_bios() argument
2061 spin_lock_irqsave(&cache->lock, flags); in process_deferred_writethrough_bios()
2062 bio_list_merge(&bios, &cache->deferred_writethrough_bios); in process_deferred_writethrough_bios()
2063 bio_list_init(&cache->deferred_writethrough_bios); in process_deferred_writethrough_bios()
2064 spin_unlock_irqrestore(&cache->lock, flags); in process_deferred_writethrough_bios()
2070 accounted_request(cache, bio); in process_deferred_writethrough_bios()
2073 static void writeback_some_dirty_blocks(struct cache *cache) in writeback_some_dirty_blocks() argument
2080 bool busy = !iot_idle_for(&cache->origin_tracker, HZ); in writeback_some_dirty_blocks()
2084 while (spare_migration_bandwidth(cache)) { in writeback_some_dirty_blocks()
2085 if (policy_writeback_work(cache->policy, &oblock, &cblock, busy)) in writeback_some_dirty_blocks()
2089 if (prealloc_data_structs(cache, &structs) || in writeback_some_dirty_blocks()
2090 get_cell(cache, oblock, &structs, &old_ocell)) { in writeback_some_dirty_blocks()
2091 policy_set_dirty(cache->policy, oblock); in writeback_some_dirty_blocks()
2095 writeback(cache, &structs, oblock, cblock, old_ocell); in writeback_some_dirty_blocks()
2099 prealloc_free_structs(cache, &structs); in writeback_some_dirty_blocks()
2107 static void process_invalidation_request(struct cache *cache, struct invalidation_request *req) in process_invalidation_request() argument
2114 r = policy_remove_cblock(cache->policy, to_cblock(begin)); in process_invalidation_request()
2116 r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin)); in process_invalidation_request()
2118 metadata_operation_failed(cache, "dm_cache_remove_mapping", r); in process_invalidation_request()
2127 DMERR("%s: policy_remove_cblock failed", cache_device_name(cache)); in process_invalidation_request()
2134 cache->commit_requested = true; in process_invalidation_request()
2142 static void process_invalidation_requests(struct cache *cache) in process_invalidation_requests() argument
2148 spin_lock(&cache->invalidation_lock); in process_invalidation_requests()
2149 list_splice_init(&cache->invalidation_requests, &list); in process_invalidation_requests()
2150 spin_unlock(&cache->invalidation_lock); in process_invalidation_requests()
2153 process_invalidation_request(cache, req); in process_invalidation_requests()
2159 static bool is_quiescing(struct cache *cache) in is_quiescing() argument
2161 return atomic_read(&cache->quiescing); in is_quiescing()
2164 static void ack_quiescing(struct cache *cache) in ack_quiescing() argument
2166 if (is_quiescing(cache)) { in ack_quiescing()
2167 atomic_inc(&cache->quiescing_ack); in ack_quiescing()
2168 wake_up(&cache->quiescing_wait); in ack_quiescing()
2172 static void wait_for_quiescing_ack(struct cache *cache) in wait_for_quiescing_ack() argument
2174 wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack)); in wait_for_quiescing_ack()
2177 static void start_quiescing(struct cache *cache) in start_quiescing() argument
2179 atomic_inc(&cache->quiescing); in start_quiescing()
2180 wait_for_quiescing_ack(cache); in start_quiescing()
2183 static void stop_quiescing(struct cache *cache) in stop_quiescing() argument
2185 atomic_set(&cache->quiescing, 0); in stop_quiescing()
2186 atomic_set(&cache->quiescing_ack, 0); in stop_quiescing()
2189 static void wait_for_migrations(struct cache *cache) in wait_for_migrations() argument
2191 wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations)); in wait_for_migrations()
2194 static void stop_worker(struct cache *cache) in stop_worker() argument
2196 cancel_delayed_work(&cache->waker); in stop_worker()
2197 flush_workqueue(cache->wq); in stop_worker()
2200 static void requeue_deferred_cells(struct cache *cache) in requeue_deferred_cells() argument
2207 spin_lock_irqsave(&cache->lock, flags); in requeue_deferred_cells()
2208 list_splice_init(&cache->deferred_cells, &cells); in requeue_deferred_cells()
2209 spin_unlock_irqrestore(&cache->lock, flags); in requeue_deferred_cells()
2212 cell_requeue(cache, cell); in requeue_deferred_cells()
2215 static void requeue_deferred_bios(struct cache *cache) in requeue_deferred_bios() argument
2221 bio_list_merge(&bios, &cache->deferred_bios); in requeue_deferred_bios()
2222 bio_list_init(&cache->deferred_bios); in requeue_deferred_bios()
2230 static int more_work(struct cache *cache) in more_work() argument
2232 if (is_quiescing(cache)) in more_work()
2233 return !list_empty(&cache->quiesced_migrations) || in more_work()
2234 !list_empty(&cache->completed_migrations) || in more_work()
2235 !list_empty(&cache->need_commit_migrations); in more_work()
2237 return !bio_list_empty(&cache->deferred_bios) || in more_work()
2238 !list_empty(&cache->deferred_cells) || in more_work()
2239 !bio_list_empty(&cache->deferred_flush_bios) || in more_work()
2240 !bio_list_empty(&cache->deferred_writethrough_bios) || in more_work()
2241 !list_empty(&cache->quiesced_migrations) || in more_work()
2242 !list_empty(&cache->completed_migrations) || in more_work()
2243 !list_empty(&cache->need_commit_migrations) || in more_work()
2244 cache->invalidate; in more_work()
2249 struct cache *cache = container_of(ws, struct cache, worker); in do_worker() local
2252 if (!is_quiescing(cache)) { in do_worker()
2253 writeback_some_dirty_blocks(cache); in do_worker()
2254 process_deferred_writethrough_bios(cache); in do_worker()
2255 process_deferred_bios(cache); in do_worker()
2256 process_deferred_cells(cache); in do_worker()
2257 process_invalidation_requests(cache); in do_worker()
2260 process_migrations(cache, &cache->quiesced_migrations, issue_copy_or_discard); in do_worker()
2261 process_migrations(cache, &cache->completed_migrations, complete_migration); in do_worker()
2263 if (commit_if_needed(cache)) { in do_worker()
2264 process_deferred_flush_bios(cache, false); in do_worker()
2265 process_migrations(cache, &cache->need_commit_migrations, migration_failure); in do_worker()
2267 process_deferred_flush_bios(cache, true); in do_worker()
2268 process_migrations(cache, &cache->need_commit_migrations, in do_worker()
2272 ack_quiescing(cache); in do_worker()
2274 } while (more_work(cache)); in do_worker()
2283 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); in do_waker() local
2284 policy_tick(cache->policy, true); in do_waker()
2285 wake_worker(cache); in do_waker()
2286 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); in do_waker()
2299 struct cache *cache = container_of(cb, struct cache, callbacks); in cache_is_congested() local
2301 return is_congested(cache->origin_dev, bdi_bits) || in cache_is_congested()
2302 is_congested(cache->cache_dev, bdi_bits); in cache_is_congested()
2313 static void destroy(struct cache *cache) in destroy() argument
2317 mempool_destroy(cache->migration_pool); in destroy()
2319 if (cache->all_io_ds) in destroy()
2320 dm_deferred_set_destroy(cache->all_io_ds); in destroy()
2322 if (cache->prison) in destroy()
2323 dm_bio_prison_destroy(cache->prison); in destroy()
2325 if (cache->wq) in destroy()
2326 destroy_workqueue(cache->wq); in destroy()
2328 if (cache->dirty_bitset) in destroy()
2329 free_bitset(cache->dirty_bitset); in destroy()
2331 if (cache->discard_bitset) in destroy()
2332 free_bitset(cache->discard_bitset); in destroy()
2334 if (cache->copier) in destroy()
2335 dm_kcopyd_client_destroy(cache->copier); in destroy()
2337 if (cache->cmd) in destroy()
2338 dm_cache_metadata_close(cache->cmd); in destroy()
2340 if (cache->metadata_dev) in destroy()
2341 dm_put_device(cache->ti, cache->metadata_dev); in destroy()
2343 if (cache->origin_dev) in destroy()
2344 dm_put_device(cache->ti, cache->origin_dev); in destroy()
2346 if (cache->cache_dev) in destroy()
2347 dm_put_device(cache->ti, cache->cache_dev); in destroy()
2349 if (cache->policy) in destroy()
2350 dm_cache_policy_destroy(cache->policy); in destroy()
2352 for (i = 0; i < cache->nr_ctr_args ; i++) in destroy()
2353 kfree(cache->ctr_args[i]); in destroy()
2354 kfree(cache->ctr_args); in destroy()
2356 kfree(cache); in destroy()
2361 struct cache *cache = ti->private; in cache_dtr() local
2363 destroy(cache); in cache_dtr()
2651 static int process_config_option(struct cache *cache, const char *key, const char *value) in process_config_option() argument
2659 cache->migration_threshold = tmp; in process_config_option()
2666 static int set_config_value(struct cache *cache, const char *key, const char *value) in set_config_value() argument
2668 int r = process_config_option(cache, key, value); in set_config_value()
2671 r = policy_set_config_value(cache->policy, key, value); in set_config_value()
2679 static int set_config_values(struct cache *cache, int argc, const char **argv) in set_config_values() argument
2689 r = set_config_value(cache, argv[0], argv[1]); in set_config_values()
2700 static int create_cache_policy(struct cache *cache, struct cache_args *ca, in create_cache_policy() argument
2704 cache->cache_size, in create_cache_policy()
2705 cache->origin_sectors, in create_cache_policy()
2706 cache->sectors_per_block); in create_cache_policy()
2711 cache->policy = p; in create_cache_policy()
2742 static void set_cache_size(struct cache *cache, dm_cblock_t size) in set_cache_size() argument
2746 if (nr_blocks > (1 << 20) && cache->cache_size != size) in set_cache_size()
2752 cache->cache_size = size; in set_cache_size()
2757 static int cache_create(struct cache_args *ca, struct cache **result) in cache_create()
2761 struct cache *cache; in cache_create() local
2767 cache = kzalloc(sizeof(*cache), GFP_KERNEL); in cache_create()
2768 if (!cache) in cache_create()
2771 cache->ti = ca->ti; in cache_create()
2772 ti->private = cache; in cache_create()
2781 cache->features = ca->features; in cache_create()
2782 ti->per_bio_data_size = get_per_bio_data_size(cache); in cache_create()
2784 cache->callbacks.congested_fn = cache_is_congested; in cache_create()
2785 dm_table_add_target_callbacks(ti->table, &cache->callbacks); in cache_create()
2787 cache->metadata_dev = ca->metadata_dev; in cache_create()
2788 cache->origin_dev = ca->origin_dev; in cache_create()
2789 cache->cache_dev = ca->cache_dev; in cache_create()
2794 origin_blocks = cache->origin_sectors = ca->origin_sectors; in cache_create()
2796 cache->origin_blocks = to_oblock(origin_blocks); in cache_create()
2798 cache->sectors_per_block = ca->block_size; in cache_create()
2799 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) { in cache_create()
2807 cache->sectors_per_block_shift = -1; in cache_create()
2809 set_cache_size(cache, to_cblock(cache_size)); in cache_create()
2811 cache->sectors_per_block_shift = __ffs(ca->block_size); in cache_create()
2812 set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift)); in cache_create()
2815 r = create_cache_policy(cache, ca, error); in cache_create()
2819 cache->policy_nr_args = ca->policy_argc; in cache_create()
2820 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; in cache_create()
2822 r = set_config_values(cache, ca->policy_argc, ca->policy_argv); in cache_create()
2828 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, in cache_create()
2830 dm_cache_policy_get_hint_size(cache->policy)); in cache_create()
2836 cache->cmd = cmd; in cache_create()
2837 set_cache_mode(cache, CM_WRITE); in cache_create()
2838 if (get_cache_mode(cache) != CM_WRITE) { in cache_create()
2844 if (passthrough_mode(&cache->features)) { in cache_create()
2847 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean); in cache_create()
2860 spin_lock_init(&cache->lock); in cache_create()
2861 INIT_LIST_HEAD(&cache->deferred_cells); in cache_create()
2862 bio_list_init(&cache->deferred_bios); in cache_create()
2863 bio_list_init(&cache->deferred_flush_bios); in cache_create()
2864 bio_list_init(&cache->deferred_writethrough_bios); in cache_create()
2865 INIT_LIST_HEAD(&cache->quiesced_migrations); in cache_create()
2866 INIT_LIST_HEAD(&cache->completed_migrations); in cache_create()
2867 INIT_LIST_HEAD(&cache->need_commit_migrations); in cache_create()
2868 atomic_set(&cache->nr_allocated_migrations, 0); in cache_create()
2869 atomic_set(&cache->nr_io_migrations, 0); in cache_create()
2870 init_waitqueue_head(&cache->migration_wait); in cache_create()
2872 init_waitqueue_head(&cache->quiescing_wait); in cache_create()
2873 atomic_set(&cache->quiescing, 0); in cache_create()
2874 atomic_set(&cache->quiescing_ack, 0); in cache_create()
2877 atomic_set(&cache->nr_dirty, 0); in cache_create()
2878 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); in cache_create()
2879 if (!cache->dirty_bitset) { in cache_create()
2883 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); in cache_create()
2885 cache->discard_block_size = in cache_create()
2886 calculate_discard_block_size(cache->sectors_per_block, in cache_create()
2887 cache->origin_sectors); in cache_create()
2888 cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors, in cache_create()
2889 cache->discard_block_size)); in cache_create()
2890 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); in cache_create()
2891 if (!cache->discard_bitset) { in cache_create()
2895 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); in cache_create()
2897 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); in cache_create()
2898 if (IS_ERR(cache->copier)) { in cache_create()
2900 r = PTR_ERR(cache->copier); in cache_create()
2904 cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); in cache_create()
2905 if (!cache->wq) { in cache_create()
2909 INIT_WORK(&cache->worker, do_worker); in cache_create()
2910 INIT_DELAYED_WORK(&cache->waker, do_waker); in cache_create()
2911 cache->last_commit_jiffies = jiffies; in cache_create()
2913 cache->prison = dm_bio_prison_create(); in cache_create()
2914 if (!cache->prison) { in cache_create()
2919 cache->all_io_ds = dm_deferred_set_create(); in cache_create()
2920 if (!cache->all_io_ds) { in cache_create()
2925 cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE, in cache_create()
2927 if (!cache->migration_pool) { in cache_create()
2932 cache->need_tick_bio = true; in cache_create()
2933 cache->sized = false; in cache_create()
2934 cache->invalidate = false; in cache_create()
2935 cache->commit_requested = false; in cache_create()
2936 cache->loaded_mappings = false; in cache_create()
2937 cache->loaded_discards = false; in cache_create()
2939 load_stats(cache); in cache_create()
2941 atomic_set(&cache->stats.demotion, 0); in cache_create()
2942 atomic_set(&cache->stats.promotion, 0); in cache_create()
2943 atomic_set(&cache->stats.copies_avoided, 0); in cache_create()
2944 atomic_set(&cache->stats.cache_cell_clash, 0); in cache_create()
2945 atomic_set(&cache->stats.commit_count, 0); in cache_create()
2946 atomic_set(&cache->stats.discard_count, 0); in cache_create()
2948 spin_lock_init(&cache->invalidation_lock); in cache_create()
2949 INIT_LIST_HEAD(&cache->invalidation_requests); in cache_create()
2951 iot_init(&cache->origin_tracker); in cache_create()
2953 *result = cache; in cache_create()
2957 destroy(cache); in cache_create()
2961 static int copy_ctr_args(struct cache *cache, int argc, const char **argv) in copy_ctr_args() argument
2979 cache->nr_ctr_args = argc; in copy_ctr_args()
2980 cache->ctr_args = copy; in copy_ctr_args()
2989 struct cache *cache = NULL; in cache_ctr() local
3002 r = cache_create(ca, &cache); in cache_ctr()
3006 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); in cache_ctr()
3008 destroy(cache); in cache_ctr()
3012 ti->private = cache; in cache_ctr()
3023 struct cache *cache = ti->private; in cache_map() local
3027 dm_oblock_t block = get_bio_block(cache, bio); in cache_map()
3028 size_t pb_data_size = get_per_bio_data_size(cache); in cache_map()
3037 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { in cache_map()
3043 remap_to_origin(cache, bio); in cache_map()
3044 accounted_begin(cache, bio); in cache_map()
3049 defer_bio(cache, bio); in cache_map()
3056 cell = alloc_prison_cell(cache); in cache_map()
3058 defer_bio(cache, bio); in cache_map()
3062 r = bio_detain(cache, block, bio, cell, in cache_map()
3064 cache, &cell); in cache_map()
3067 defer_bio(cache, bio); in cache_map()
3072 fast_promotion = is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio); in cache_map()
3074 r = policy_map(cache->policy, block, false, can_migrate, fast_promotion, in cache_map()
3077 cell_defer(cache, cell, true); in cache_map()
3082 cache_device_name(cache), r); in cache_map()
3083 cell_defer(cache, cell, false); in cache_map()
3091 if (passthrough_mode(&cache->features)) { in cache_map()
3097 cell_defer(cache, cell, true); in cache_map()
3101 inc_miss_counter(cache, bio); in cache_map()
3102 remap_to_origin_clear_discard(cache, bio, block); in cache_map()
3103 accounted_begin(cache, bio); in cache_map()
3104 inc_ds(cache, bio, cell); in cache_map()
3107 cell_defer(cache, cell, false); in cache_map()
3111 inc_hit_counter(cache, bio); in cache_map()
3112 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && in cache_map()
3113 !is_dirty(cache, lookup_result.cblock)) { in cache_map()
3114 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); in cache_map()
3115 accounted_begin(cache, bio); in cache_map()
3116 inc_ds(cache, bio, cell); in cache_map()
3117 cell_defer(cache, cell, false); in cache_map()
3120 remap_cell_to_cache_dirty(cache, cell, block, lookup_result.cblock, false); in cache_map()
3125 inc_miss_counter(cache, bio); in cache_map()
3133 cell_defer(cache, cell, false); in cache_map()
3137 remap_cell_to_origin_clear_discard(cache, cell, block, false); in cache_map()
3142 cache_device_name(cache), __func__, in cache_map()
3144 cell_defer(cache, cell, false); in cache_map()
3154 struct cache *cache = ti->private; in cache_end_io() local
3156 size_t pb_data_size = get_per_bio_data_size(cache); in cache_end_io()
3160 policy_tick(cache->policy, false); in cache_end_io()
3162 spin_lock_irqsave(&cache->lock, flags); in cache_end_io()
3163 cache->need_tick_bio = true; in cache_end_io()
3164 spin_unlock_irqrestore(&cache->lock, flags); in cache_end_io()
3167 check_for_quiesced_migrations(cache, pb); in cache_end_io()
3168 accounted_complete(cache, bio); in cache_end_io()
3173 static int write_dirty_bitset(struct cache *cache) in write_dirty_bitset() argument
3177 if (get_cache_mode(cache) >= CM_READ_ONLY) in write_dirty_bitset()
3180 for (i = 0; i < from_cblock(cache->cache_size); i++) { in write_dirty_bitset()
3181 r = dm_cache_set_dirty(cache->cmd, to_cblock(i), in write_dirty_bitset()
3182 is_dirty(cache, to_cblock(i))); in write_dirty_bitset()
3184 metadata_operation_failed(cache, "dm_cache_set_dirty", r); in write_dirty_bitset()
3192 static int write_discard_bitset(struct cache *cache) in write_discard_bitset() argument
3196 if (get_cache_mode(cache) >= CM_READ_ONLY) in write_discard_bitset()
3199 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, in write_discard_bitset()
3200 cache->discard_nr_blocks); in write_discard_bitset()
3202 DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache)); in write_discard_bitset()
3203 metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r); in write_discard_bitset()
3207 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { in write_discard_bitset()
3208 r = dm_cache_set_discard(cache->cmd, to_dblock(i), in write_discard_bitset()
3209 is_discarded(cache, to_dblock(i))); in write_discard_bitset()
3211 metadata_operation_failed(cache, "dm_cache_set_discard", r); in write_discard_bitset()
3219 static int write_hints(struct cache *cache) in write_hints() argument
3223 if (get_cache_mode(cache) >= CM_READ_ONLY) in write_hints()
3226 r = dm_cache_write_hints(cache->cmd, cache->policy); in write_hints()
3228 metadata_operation_failed(cache, "dm_cache_write_hints", r); in write_hints()
3238 static bool sync_metadata(struct cache *cache) in sync_metadata() argument
3242 r1 = write_dirty_bitset(cache); in sync_metadata()
3244 DMERR("%s: could not write dirty bitset", cache_device_name(cache)); in sync_metadata()
3246 r2 = write_discard_bitset(cache); in sync_metadata()
3248 DMERR("%s: could not write discard bitset", cache_device_name(cache)); in sync_metadata()
3250 save_stats(cache); in sync_metadata()
3252 r3 = write_hints(cache); in sync_metadata()
3254 DMERR("%s: could not write hints", cache_device_name(cache)); in sync_metadata()
3261 r4 = commit(cache, !r1 && !r2 && !r3); in sync_metadata()
3263 DMERR("%s: could not write cache metadata", cache_device_name(cache)); in sync_metadata()
3270 struct cache *cache = ti->private; in cache_postsuspend() local
3272 start_quiescing(cache); in cache_postsuspend()
3273 wait_for_migrations(cache); in cache_postsuspend()
3274 stop_worker(cache); in cache_postsuspend()
3275 requeue_deferred_bios(cache); in cache_postsuspend()
3276 requeue_deferred_cells(cache); in cache_postsuspend()
3277 stop_quiescing(cache); in cache_postsuspend()
3279 if (get_cache_mode(cache) == CM_WRITE) in cache_postsuspend()
3280 (void) sync_metadata(cache); in cache_postsuspend()
3287 struct cache *cache = context; in load_mapping() local
3289 r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid); in load_mapping()
3294 set_dirty(cache, oblock, cblock); in load_mapping()
3296 clear_dirty(cache, oblock, cblock); in load_mapping()
3308 struct cache *cache; member
3318 static void discard_load_info_init(struct cache *cache, in discard_load_info_init() argument
3321 li->cache = cache; in discard_load_info_init()
3341 b = dm_sector_div_up(b, li->cache->discard_block_size); in set_discard_range()
3342 sector_div(e, li->cache->discard_block_size); in set_discard_range()
3348 if (e > from_dblock(li->cache->discard_nr_blocks)) in set_discard_range()
3349 e = from_dblock(li->cache->discard_nr_blocks); in set_discard_range()
3352 set_discard(li->cache, to_dblock(b)); in set_discard_range()
3385 static dm_cblock_t get_cache_dev_size(struct cache *cache) in get_cache_dev_size() argument
3387 sector_t size = get_dev_size(cache->cache_dev); in get_cache_dev_size()
3388 (void) sector_div(size, cache->sectors_per_block); in get_cache_dev_size()
3392 static bool can_resize(struct cache *cache, dm_cblock_t new_size) in can_resize() argument
3394 if (from_cblock(new_size) > from_cblock(cache->cache_size)) in can_resize()
3400 while (from_cblock(new_size) < from_cblock(cache->cache_size)) { in can_resize()
3402 if (is_dirty(cache, new_size)) { in can_resize()
3404 cache_device_name(cache), in can_resize()
3413 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) in resize_cache_dev() argument
3417 r = dm_cache_resize(cache->cmd, new_size); in resize_cache_dev()
3419 DMERR("%s: could not resize cache metadata", cache_device_name(cache)); in resize_cache_dev()
3420 metadata_operation_failed(cache, "dm_cache_resize", r); in resize_cache_dev()
3424 set_cache_size(cache, new_size); in resize_cache_dev()
3432 struct cache *cache = ti->private; in cache_preresume() local
3433 dm_cblock_t csize = get_cache_dev_size(cache); in cache_preresume()
3438 if (!cache->sized) { in cache_preresume()
3439 r = resize_cache_dev(cache, csize); in cache_preresume()
3443 cache->sized = true; in cache_preresume()
3445 } else if (csize != cache->cache_size) { in cache_preresume()
3446 if (!can_resize(cache, csize)) in cache_preresume()
3449 r = resize_cache_dev(cache, csize); in cache_preresume()
3454 if (!cache->loaded_mappings) { in cache_preresume()
3455 r = dm_cache_load_mappings(cache->cmd, cache->policy, in cache_preresume()
3456 load_mapping, cache); in cache_preresume()
3458 DMERR("%s: could not load cache mappings", cache_device_name(cache)); in cache_preresume()
3459 metadata_operation_failed(cache, "dm_cache_load_mappings", r); in cache_preresume()
3463 cache->loaded_mappings = true; in cache_preresume()
3466 if (!cache->loaded_discards) { in cache_preresume()
3474 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); in cache_preresume()
3476 discard_load_info_init(cache, &li); in cache_preresume()
3477 r = dm_cache_load_discards(cache->cmd, load_discard, &li); in cache_preresume()
3479 DMERR("%s: could not load origin discards", cache_device_name(cache)); in cache_preresume()
3480 metadata_operation_failed(cache, "dm_cache_load_discards", r); in cache_preresume()
3485 cache->loaded_discards = true; in cache_preresume()
3493 struct cache *cache = ti->private; in cache_resume() local
3495 cache->need_tick_bio = true; in cache_resume()
3496 do_waker(&cache->waker.work); in cache_resume()
3519 struct cache *cache = ti->private; in cache_status() local
3525 if (get_cache_mode(cache) == CM_FAIL) { in cache_status()
3532 (void) commit(cache, false); in cache_status()
3534 r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata); in cache_status()
3537 cache_device_name(cache), r); in cache_status()
3541 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata); in cache_status()
3544 cache_device_name(cache), r); in cache_status()
3548 residency = policy_residency(cache->policy); in cache_status()
3554 cache->sectors_per_block, in cache_status()
3556 (unsigned long long) from_cblock(cache->cache_size), in cache_status()
3557 (unsigned) atomic_read(&cache->stats.read_hit), in cache_status()
3558 (unsigned) atomic_read(&cache->stats.read_miss), in cache_status()
3559 (unsigned) atomic_read(&cache->stats.write_hit), in cache_status()
3560 (unsigned) atomic_read(&cache->stats.write_miss), in cache_status()
3561 (unsigned) atomic_read(&cache->stats.demotion), in cache_status()
3562 (unsigned) atomic_read(&cache->stats.promotion), in cache_status()
3563 (unsigned long) atomic_read(&cache->nr_dirty)); in cache_status()
3565 if (writethrough_mode(&cache->features)) in cache_status()
3568 else if (passthrough_mode(&cache->features)) in cache_status()
3571 else if (writeback_mode(&cache->features)) in cache_status()
3576 cache_device_name(cache), (int) cache->features.io_mode); in cache_status()
3580 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); in cache_status()
3582 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy)); in cache_status()
3584 r = policy_emit_config_values(cache->policy, result, maxlen, &sz); in cache_status()
3587 cache_device_name(cache), r); in cache_status()
3590 if (get_cache_mode(cache) == CM_READ_ONLY) in cache_status()
3595 r = dm_cache_metadata_needs_check(cache->cmd, &needs_check); in cache_status()
3605 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev); in cache_status()
3607 format_dev_t(buf, cache->cache_dev->bdev->bd_dev); in cache_status()
3609 format_dev_t(buf, cache->origin_dev->bdev->bd_dev); in cache_status()
3612 for (i = 0; i < cache->nr_ctr_args - 1; i++) in cache_status()
3613 DMEMIT(" %s", cache->ctr_args[i]); in cache_status()
3614 if (cache->nr_ctr_args) in cache_status()
3615 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]); in cache_status()
3630 static int parse_cblock_range(struct cache *cache, const char *str, in parse_cblock_range() argument
3663 DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str); in parse_cblock_range()
3667 static int validate_cblock_range(struct cache *cache, struct cblock_range *range) in validate_cblock_range() argument
3671 uint64_t n = from_cblock(cache->cache_size); in validate_cblock_range()
3675 cache_device_name(cache), b, n); in validate_cblock_range()
3681 cache_device_name(cache), e, n); in validate_cblock_range()
3687 cache_device_name(cache), b, e); in validate_cblock_range()
3694 static int request_invalidation(struct cache *cache, struct cblock_range *range) in request_invalidation() argument
3704 spin_lock(&cache->invalidation_lock); in request_invalidation()
3705 list_add(&req.list, &cache->invalidation_requests); in request_invalidation()
3706 spin_unlock(&cache->invalidation_lock); in request_invalidation()
3707 wake_worker(cache); in request_invalidation()
3713 static int process_invalidate_cblocks_message(struct cache *cache, unsigned count, in process_invalidate_cblocks_message() argument
3720 if (!passthrough_mode(&cache->features)) { in process_invalidate_cblocks_message()
3722 cache_device_name(cache)); in process_invalidate_cblocks_message()
3727 r = parse_cblock_range(cache, cblock_ranges[i], &range); in process_invalidate_cblocks_message()
3731 r = validate_cblock_range(cache, &range); in process_invalidate_cblocks_message()
3738 r = request_invalidation(cache, &range); in process_invalidate_cblocks_message()
3756 struct cache *cache = ti->private; in cache_message() local
3761 if (get_cache_mode(cache) >= CM_READ_ONLY) { in cache_message()
3763 cache_device_name(cache)); in cache_message()
3768 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1); in cache_message()
3773 return set_config_value(cache, argv[0], argv[1]); in cache_message()
3780 struct cache *cache = ti->private; in cache_iterate_devices() local
3782 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data); in cache_iterate_devices()
3784 r = fn(ti, cache->origin_dev, 0, ti->len, data); in cache_iterate_devices()
3789 static void set_discard_limits(struct cache *cache, struct queue_limits *limits) in set_discard_limits() argument
3794 limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, in set_discard_limits()
3795 cache->origin_sectors); in set_discard_limits()
3796 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; in set_discard_limits()
3801 struct cache *cache = ti->private; in cache_io_hints() local
3808 if (io_opt_sectors < cache->sectors_per_block || in cache_io_hints()
3809 do_div(io_opt_sectors, cache->sectors_per_block)) { in cache_io_hints()
3810 blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); in cache_io_hints()
3811 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); in cache_io_hints()
3813 set_discard_limits(cache, limits); in cache_io_hints()