Lines Matching refs:cache
178 struct cache { struct
304 struct cache *cache; member
311 struct cache *cache; member
341 static void wake_worker(struct cache *cache) in wake_worker() argument
343 queue_work(cache->wq, &cache->worker); in wake_worker()
348 static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache) in alloc_prison_cell() argument
351 return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT); in alloc_prison_cell()
354 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell) in free_prison_cell() argument
356 dm_bio_prison_free_cell(cache->prison, cell); in free_prison_cell()
359 static struct dm_cache_migration *alloc_migration(struct cache *cache) in alloc_migration() argument
363 mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); in alloc_migration()
365 mg->cache = cache; in alloc_migration()
366 atomic_inc(&mg->cache->nr_allocated_migrations); in alloc_migration()
374 if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations)) in free_migration()
375 wake_up(&mg->cache->migration_wait); in free_migration()
377 mempool_free(mg, mg->cache->migration_pool); in free_migration()
380 static int prealloc_data_structs(struct cache *cache, struct prealloc *p) in prealloc_data_structs() argument
383 p->mg = alloc_migration(cache); in prealloc_data_structs()
389 p->cell1 = alloc_prison_cell(cache); in prealloc_data_structs()
395 p->cell2 = alloc_prison_cell(cache); in prealloc_data_structs()
403 static void prealloc_free_structs(struct cache *cache, struct prealloc *p) in prealloc_free_structs() argument
406 free_prison_cell(cache, p->cell2); in prealloc_free_structs()
409 free_prison_cell(cache, p->cell1); in prealloc_free_structs()
479 static int bio_detain_range(struct cache *cache, dm_oblock_t oblock_begin, dm_oblock_t oblock_end, in bio_detain_range() argument
488 r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result); in bio_detain_range()
495 static int bio_detain(struct cache *cache, dm_oblock_t oblock, in bio_detain() argument
501 return bio_detain_range(cache, oblock, end, bio, in bio_detain()
505 static int get_cell(struct cache *cache, in get_cell() argument
517 r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result); in get_cell()
526 static bool is_dirty(struct cache *cache, dm_cblock_t b) in is_dirty() argument
528 return test_bit(from_cblock(b), cache->dirty_bitset); in is_dirty()
531 static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) in set_dirty() argument
533 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { in set_dirty()
534 atomic_inc(&cache->nr_dirty); in set_dirty()
535 policy_set_dirty(cache->policy, oblock); in set_dirty()
539 static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) in clear_dirty() argument
541 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { in clear_dirty()
542 policy_clear_dirty(cache->policy, oblock); in clear_dirty()
543 if (atomic_dec_return(&cache->nr_dirty) == 0) in clear_dirty()
544 dm_table_event(cache->ti->table); in clear_dirty()
550 static bool block_size_is_power_of_two(struct cache *cache) in block_size_is_power_of_two() argument
552 return cache->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
566 static dm_block_t oblocks_per_dblock(struct cache *cache) in oblocks_per_dblock() argument
568 dm_block_t oblocks = cache->discard_block_size; in oblocks_per_dblock()
570 if (block_size_is_power_of_two(cache)) in oblocks_per_dblock()
571 oblocks >>= cache->sectors_per_block_shift; in oblocks_per_dblock()
573 oblocks = block_div(oblocks, cache->sectors_per_block); in oblocks_per_dblock()
578 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) in oblock_to_dblock() argument
581 oblocks_per_dblock(cache))); in oblock_to_dblock()
584 static dm_oblock_t dblock_to_oblock(struct cache *cache, dm_dblock_t dblock) in dblock_to_oblock() argument
586 return to_oblock(from_dblock(dblock) * oblocks_per_dblock(cache)); in dblock_to_oblock()
589 static void set_discard(struct cache *cache, dm_dblock_t b) in set_discard() argument
593 BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks)); in set_discard()
594 atomic_inc(&cache->stats.discard_count); in set_discard()
596 spin_lock_irqsave(&cache->lock, flags); in set_discard()
597 set_bit(from_dblock(b), cache->discard_bitset); in set_discard()
598 spin_unlock_irqrestore(&cache->lock, flags); in set_discard()
601 static void clear_discard(struct cache *cache, dm_dblock_t b) in clear_discard() argument
605 spin_lock_irqsave(&cache->lock, flags); in clear_discard()
606 clear_bit(from_dblock(b), cache->discard_bitset); in clear_discard()
607 spin_unlock_irqrestore(&cache->lock, flags); in clear_discard()
610 static bool is_discarded(struct cache *cache, dm_dblock_t b) in is_discarded() argument
615 spin_lock_irqsave(&cache->lock, flags); in is_discarded()
616 r = test_bit(from_dblock(b), cache->discard_bitset); in is_discarded()
617 spin_unlock_irqrestore(&cache->lock, flags); in is_discarded()
622 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) in is_discarded_oblock() argument
627 spin_lock_irqsave(&cache->lock, flags); in is_discarded_oblock()
628 r = test_bit(from_dblock(oblock_to_dblock(cache, b)), in is_discarded_oblock()
629 cache->discard_bitset); in is_discarded_oblock()
630 spin_unlock_irqrestore(&cache->lock, flags); in is_discarded_oblock()
637 static void load_stats(struct cache *cache) in load_stats() argument
641 dm_cache_metadata_get_stats(cache->cmd, &stats); in load_stats()
642 atomic_set(&cache->stats.read_hit, stats.read_hits); in load_stats()
643 atomic_set(&cache->stats.read_miss, stats.read_misses); in load_stats()
644 atomic_set(&cache->stats.write_hit, stats.write_hits); in load_stats()
645 atomic_set(&cache->stats.write_miss, stats.write_misses); in load_stats()
648 static void save_stats(struct cache *cache) in save_stats() argument
652 stats.read_hits = atomic_read(&cache->stats.read_hit); in save_stats()
653 stats.read_misses = atomic_read(&cache->stats.read_miss); in save_stats()
654 stats.write_hits = atomic_read(&cache->stats.write_hit); in save_stats()
655 stats.write_misses = atomic_read(&cache->stats.write_miss); in save_stats()
657 dm_cache_metadata_set_stats(cache->cmd, &stats); in save_stats()
667 #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
685 static size_t get_per_bio_data_size(struct cache *cache) in get_per_bio_data_size() argument
687 return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; in get_per_bio_data_size()
711 static void remap_to_origin(struct cache *cache, struct bio *bio) in remap_to_origin() argument
713 bio->bi_bdev = cache->origin_dev->bdev; in remap_to_origin()
716 static void remap_to_cache(struct cache *cache, struct bio *bio, in remap_to_cache() argument
722 bio->bi_bdev = cache->cache_dev->bdev; in remap_to_cache()
723 if (!block_size_is_power_of_two(cache)) in remap_to_cache()
725 (block * cache->sectors_per_block) + in remap_to_cache()
726 sector_div(bi_sector, cache->sectors_per_block); in remap_to_cache()
729 (block << cache->sectors_per_block_shift) | in remap_to_cache()
730 (bi_sector & (cache->sectors_per_block - 1)); in remap_to_cache()
733 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) in check_if_tick_bio_needed() argument
736 size_t pb_data_size = get_per_bio_data_size(cache); in check_if_tick_bio_needed()
739 spin_lock_irqsave(&cache->lock, flags); in check_if_tick_bio_needed()
740 if (cache->need_tick_bio && in check_if_tick_bio_needed()
743 cache->need_tick_bio = false; in check_if_tick_bio_needed()
745 spin_unlock_irqrestore(&cache->lock, flags); in check_if_tick_bio_needed()
748 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, in remap_to_origin_clear_discard() argument
751 check_if_tick_bio_needed(cache, bio); in remap_to_origin_clear_discard()
752 remap_to_origin(cache, bio); in remap_to_origin_clear_discard()
754 clear_discard(cache, oblock_to_dblock(cache, oblock)); in remap_to_origin_clear_discard()
757 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, in remap_to_cache_dirty() argument
760 check_if_tick_bio_needed(cache, bio); in remap_to_cache_dirty()
761 remap_to_cache(cache, bio, cblock); in remap_to_cache_dirty()
763 set_dirty(cache, oblock, cblock); in remap_to_cache_dirty()
764 clear_discard(cache, oblock_to_dblock(cache, oblock)); in remap_to_cache_dirty()
768 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) in get_bio_block() argument
772 if (!block_size_is_power_of_two(cache)) in get_bio_block()
773 (void) sector_div(block_nr, cache->sectors_per_block); in get_bio_block()
775 block_nr >>= cache->sectors_per_block_shift; in get_bio_block()
780 static int bio_triggers_commit(struct cache *cache, struct bio *bio) in bio_triggers_commit() argument
789 static void inc_ds(struct cache *cache, struct bio *bio, in inc_ds() argument
792 size_t pb_data_size = get_per_bio_data_size(cache); in inc_ds()
798 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); in inc_ds()
801 static void issue(struct cache *cache, struct bio *bio) in issue() argument
805 if (!bio_triggers_commit(cache, bio)) { in issue()
814 spin_lock_irqsave(&cache->lock, flags); in issue()
815 cache->commit_requested = true; in issue()
816 bio_list_add(&cache->deferred_flush_bios, bio); in issue()
817 spin_unlock_irqrestore(&cache->lock, flags); in issue()
820 static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell) in inc_and_issue() argument
822 inc_ds(cache, bio, cell); in inc_and_issue()
823 issue(cache, bio); in inc_and_issue()
826 static void defer_writethrough_bio(struct cache *cache, struct bio *bio) in defer_writethrough_bio() argument
830 spin_lock_irqsave(&cache->lock, flags); in defer_writethrough_bio()
831 bio_list_add(&cache->deferred_writethrough_bios, bio); in defer_writethrough_bio()
832 spin_unlock_irqrestore(&cache->lock, flags); in defer_writethrough_bio()
834 wake_worker(cache); in defer_writethrough_bio()
849 remap_to_cache(pb->cache, bio, pb->cblock); in writethrough_endio()
856 defer_writethrough_bio(pb->cache, bio); in writethrough_endio()
865 static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, in remap_to_origin_then_cache() argument
870 pb->cache = cache; in remap_to_origin_then_cache()
875 remap_to_origin_clear_discard(pb->cache, bio, oblock); in remap_to_origin_then_cache()
884 static void inc_io_migrations(struct cache *cache) in inc_io_migrations() argument
886 atomic_inc(&cache->nr_io_migrations); in inc_io_migrations()
889 static void dec_io_migrations(struct cache *cache) in dec_io_migrations() argument
891 atomic_dec(&cache->nr_io_migrations); in dec_io_migrations()
894 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, in __cell_defer() argument
898 (cache->prison, cell, &cache->deferred_bios); in __cell_defer()
899 free_prison_cell(cache, cell); in __cell_defer()
902 static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, in cell_defer() argument
907 spin_lock_irqsave(&cache->lock, flags); in cell_defer()
908 __cell_defer(cache, cell, holder); in cell_defer()
909 spin_unlock_irqrestore(&cache->lock, flags); in cell_defer()
911 wake_worker(cache); in cell_defer()
916 dec_io_migrations(mg->cache); in free_io_migration()
922 struct cache *cache = mg->cache; in migration_failure() local
926 set_dirty(cache, mg->old_oblock, mg->cblock); in migration_failure()
927 cell_defer(cache, mg->old_ocell, false); in migration_failure()
931 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); in migration_failure()
933 cell_defer(cache, mg->old_ocell, mg->promote ? false : true); in migration_failure()
935 cell_defer(cache, mg->new_ocell, true); in migration_failure()
938 policy_remove_mapping(cache->policy, mg->new_oblock); in migration_failure()
939 cell_defer(cache, mg->new_ocell, true); in migration_failure()
948 struct cache *cache = mg->cache; in migration_success_pre_commit() local
951 clear_dirty(cache, mg->old_oblock, mg->cblock); in migration_success_pre_commit()
952 cell_defer(cache, mg->old_ocell, false); in migration_success_pre_commit()
957 if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) { in migration_success_pre_commit()
959 policy_force_mapping(cache->policy, mg->new_oblock, in migration_success_pre_commit()
962 cell_defer(cache, mg->new_ocell, true); in migration_success_pre_commit()
967 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { in migration_success_pre_commit()
969 policy_remove_mapping(cache->policy, mg->new_oblock); in migration_success_pre_commit()
975 spin_lock_irqsave(&cache->lock, flags); in migration_success_pre_commit()
976 list_add_tail(&mg->list, &cache->need_commit_migrations); in migration_success_pre_commit()
977 cache->commit_requested = true; in migration_success_pre_commit()
978 spin_unlock_irqrestore(&cache->lock, flags); in migration_success_pre_commit()
984 struct cache *cache = mg->cache; in migration_success_post_commit() local
991 cell_defer(cache, mg->old_ocell, mg->promote ? false : true); in migration_success_post_commit()
996 spin_lock_irqsave(&cache->lock, flags); in migration_success_post_commit()
997 list_add_tail(&mg->list, &cache->quiesced_migrations); in migration_success_post_commit()
998 spin_unlock_irqrestore(&cache->lock, flags); in migration_success_post_commit()
1002 policy_remove_mapping(cache->policy, mg->old_oblock); in migration_success_post_commit()
1008 clear_dirty(cache, mg->new_oblock, mg->cblock); in migration_success_post_commit()
1009 cell_defer(cache, mg->new_ocell, true); in migration_success_post_commit()
1014 set_dirty(cache, mg->new_oblock, mg->cblock); in migration_success_post_commit()
1016 cell_defer(cache, mg->new_ocell, false); in migration_success_post_commit()
1026 struct cache *cache = mg->cache; in copy_complete() local
1031 spin_lock_irqsave(&cache->lock, flags); in copy_complete()
1032 list_add_tail(&mg->list, &cache->completed_migrations); in copy_complete()
1033 spin_unlock_irqrestore(&cache->lock, flags); in copy_complete()
1035 wake_worker(cache); in copy_complete()
1042 struct cache *cache = mg->cache; in issue_copy() local
1045 o_region.bdev = cache->origin_dev->bdev; in issue_copy()
1046 o_region.count = cache->sectors_per_block; in issue_copy()
1048 c_region.bdev = cache->cache_dev->bdev; in issue_copy()
1049 c_region.sector = cblock * cache->sectors_per_block; in issue_copy()
1050 c_region.count = cache->sectors_per_block; in issue_copy()
1054 o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block; in issue_copy()
1055 r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg); in issue_copy()
1058 o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block; in issue_copy()
1059 r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg); in issue_copy()
1071 struct cache *cache = mg->cache; in overwrite_endio() local
1072 size_t pb_data_size = get_per_bio_data_size(cache); in overwrite_endio()
1083 spin_lock_irqsave(&cache->lock, flags); in overwrite_endio()
1084 list_add_tail(&mg->list, &cache->completed_migrations); in overwrite_endio()
1085 spin_unlock_irqrestore(&cache->lock, flags); in overwrite_endio()
1087 wake_worker(cache); in overwrite_endio()
1092 size_t pb_data_size = get_per_bio_data_size(mg->cache); in issue_overwrite()
1096 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock); in issue_overwrite()
1105 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) in bio_writes_complete_block() argument
1108 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); in bio_writes_complete_block()
1113 atomic_inc(&mg->cache->stats.copies_avoided); in avoid_copy()
1117 static void calc_discard_block_range(struct cache *cache, struct bio *bio, in calc_discard_block_range() argument
1123 *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size)); in calc_discard_block_range()
1125 if (se - sb < cache->discard_block_size) in calc_discard_block_range()
1128 *e = to_dblock(block_div(se, cache->discard_block_size)); in calc_discard_block_range()
1136 calc_discard_block_range(mg->cache, bio, &b, &e); in issue_discard()
1138 set_discard(mg->cache, b); in issue_discard()
1143 cell_defer(mg->cache, mg->new_ocell, false); in issue_discard()
1150 struct cache *cache = mg->cache; in issue_copy_or_discard() local
1158 avoid = !is_dirty(cache, mg->cblock) || in issue_copy_or_discard()
1159 is_discarded_oblock(cache, mg->old_oblock); in issue_copy_or_discard()
1163 avoid = is_discarded_oblock(cache, mg->new_oblock); in issue_copy_or_discard()
1165 if (writeback_mode(&cache->features) && in issue_copy_or_discard()
1166 !avoid && bio_writes_complete_block(cache, bio)) { in issue_copy_or_discard()
1183 static void process_migrations(struct cache *cache, struct list_head *head, in process_migrations() argument
1191 spin_lock_irqsave(&cache->lock, flags); in process_migrations()
1193 spin_unlock_irqrestore(&cache->lock, flags); in process_migrations()
1201 list_add_tail(&mg->list, &mg->cache->quiesced_migrations); in __queue_quiesced_migration()
1207 struct cache *cache = mg->cache; in queue_quiesced_migration() local
1209 spin_lock_irqsave(&cache->lock, flags); in queue_quiesced_migration()
1211 spin_unlock_irqrestore(&cache->lock, flags); in queue_quiesced_migration()
1213 wake_worker(cache); in queue_quiesced_migration()
1216 static void queue_quiesced_migrations(struct cache *cache, struct list_head *work) in queue_quiesced_migrations() argument
1221 spin_lock_irqsave(&cache->lock, flags); in queue_quiesced_migrations()
1224 spin_unlock_irqrestore(&cache->lock, flags); in queue_quiesced_migrations()
1226 wake_worker(cache); in queue_quiesced_migrations()
1229 static void check_for_quiesced_migrations(struct cache *cache, in check_for_quiesced_migrations() argument
1241 queue_quiesced_migrations(cache, &work); in check_for_quiesced_migrations()
1246 if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list)) in quiesce_migration()
1250 static void promote(struct cache *cache, struct prealloc *structs, in promote() argument
1263 mg->cache = cache; in promote()
1270 inc_io_migrations(cache); in promote()
1274 static void writeback(struct cache *cache, struct prealloc *structs, in writeback() argument
1287 mg->cache = cache; in writeback()
1294 inc_io_migrations(cache); in writeback()
1298 static void demote_then_promote(struct cache *cache, struct prealloc *structs, in demote_then_promote() argument
1313 mg->cache = cache; in demote_then_promote()
1321 inc_io_migrations(cache); in demote_then_promote()
1329 static void invalidate(struct cache *cache, struct prealloc *structs, in invalidate() argument
1342 mg->cache = cache; in invalidate()
1349 inc_io_migrations(cache); in invalidate()
1353 static void discard(struct cache *cache, struct prealloc *structs, in discard() argument
1365 mg->cache = cache; in discard()
1376 static void defer_bio(struct cache *cache, struct bio *bio) in defer_bio() argument
1380 spin_lock_irqsave(&cache->lock, flags); in defer_bio()
1381 bio_list_add(&cache->deferred_bios, bio); in defer_bio()
1382 spin_unlock_irqrestore(&cache->lock, flags); in defer_bio()
1384 wake_worker(cache); in defer_bio()
1387 static void process_flush_bio(struct cache *cache, struct bio *bio) in process_flush_bio() argument
1389 size_t pb_data_size = get_per_bio_data_size(cache); in process_flush_bio()
1394 remap_to_origin(cache, bio); in process_flush_bio()
1396 remap_to_cache(cache, bio, 0); in process_flush_bio()
1403 issue(cache, bio); in process_flush_bio()
1406 static void process_discard_bio(struct cache *cache, struct prealloc *structs, in process_discard_bio() argument
1413 calc_discard_block_range(cache, bio, &b, &e); in process_discard_bio()
1420 …r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prea… in process_discard_bio()
1426 discard(cache, structs, new_ocell); in process_discard_bio()
1429 static bool spare_migration_bandwidth(struct cache *cache) in spare_migration_bandwidth() argument
1431 sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * in spare_migration_bandwidth()
1432 cache->sectors_per_block; in spare_migration_bandwidth()
1433 return current_volume < cache->migration_threshold; in spare_migration_bandwidth()
1436 static void inc_hit_counter(struct cache *cache, struct bio *bio) in inc_hit_counter() argument
1439 &cache->stats.read_hit : &cache->stats.write_hit); in inc_hit_counter()
1442 static void inc_miss_counter(struct cache *cache, struct bio *bio) in inc_miss_counter() argument
1445 &cache->stats.read_miss : &cache->stats.write_miss); in inc_miss_counter()
1452 struct cache *cache; member
1469 return bio_detain(l->cache, b, NULL, cell_prealloc, in cell_locker()
1474 static void process_bio(struct cache *cache, struct prealloc *structs, in process_bio() argument
1479 dm_oblock_t block = get_bio_block(cache, bio); in process_bio()
1482 bool passthrough = passthrough_mode(&cache->features); in process_bio()
1490 r = bio_detain(cache, block, bio, cell_prealloc, in process_bio()
1496 discarded_block = is_discarded_oblock(cache, block); in process_bio()
1497 can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache)); in process_bio()
1500 ool.cache = cache; in process_bio()
1503 r = policy_map(cache->policy, block, true, can_migrate, discarded_block, in process_bio()
1513 inc_miss_counter(cache, bio); in process_bio()
1522 atomic_inc(&cache->stats.demotion); in process_bio()
1523 invalidate(cache, structs, block, lookup_result.cblock, new_ocell); in process_bio()
1528 remap_to_origin_clear_discard(cache, bio, block); in process_bio()
1529 inc_and_issue(cache, bio, new_ocell); in process_bio()
1532 inc_hit_counter(cache, bio); in process_bio()
1535 writethrough_mode(&cache->features) && in process_bio()
1536 !is_dirty(cache, lookup_result.cblock)) { in process_bio()
1537 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); in process_bio()
1538 inc_and_issue(cache, bio, new_ocell); in process_bio()
1541 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); in process_bio()
1542 inc_and_issue(cache, bio, new_ocell); in process_bio()
1549 inc_miss_counter(cache, bio); in process_bio()
1550 remap_to_origin_clear_discard(cache, bio, block); in process_bio()
1551 inc_and_issue(cache, bio, new_ocell); in process_bio()
1555 atomic_inc(&cache->stats.promotion); in process_bio()
1556 promote(cache, structs, block, lookup_result.cblock, new_ocell); in process_bio()
1561 atomic_inc(&cache->stats.demotion); in process_bio()
1562 atomic_inc(&cache->stats.promotion); in process_bio()
1563 demote_then_promote(cache, structs, lookup_result.old_oblock, in process_bio()
1576 cell_defer(cache, new_ocell, false); in process_bio()
1579 static int need_commit_due_to_time(struct cache *cache) in need_commit_due_to_time() argument
1581 return !time_in_range(jiffies, cache->last_commit_jiffies, in need_commit_due_to_time()
1582 cache->last_commit_jiffies + COMMIT_PERIOD); in need_commit_due_to_time()
1585 static int commit_if_needed(struct cache *cache) in commit_if_needed() argument
1589 if ((cache->commit_requested || need_commit_due_to_time(cache)) && in commit_if_needed()
1590 dm_cache_changed_this_transaction(cache->cmd)) { in commit_if_needed()
1591 atomic_inc(&cache->stats.commit_count); in commit_if_needed()
1592 cache->commit_requested = false; in commit_if_needed()
1593 r = dm_cache_commit(cache->cmd, false); in commit_if_needed()
1594 cache->last_commit_jiffies = jiffies; in commit_if_needed()
1600 static void process_deferred_bios(struct cache *cache) in process_deferred_bios() argument
1610 spin_lock_irqsave(&cache->lock, flags); in process_deferred_bios()
1611 bio_list_merge(&bios, &cache->deferred_bios); in process_deferred_bios()
1612 bio_list_init(&cache->deferred_bios); in process_deferred_bios()
1613 spin_unlock_irqrestore(&cache->lock, flags); in process_deferred_bios()
1621 if (prealloc_data_structs(cache, &structs)) { in process_deferred_bios()
1622 spin_lock_irqsave(&cache->lock, flags); in process_deferred_bios()
1623 bio_list_merge(&cache->deferred_bios, &bios); in process_deferred_bios()
1624 spin_unlock_irqrestore(&cache->lock, flags); in process_deferred_bios()
1631 process_flush_bio(cache, bio); in process_deferred_bios()
1633 process_discard_bio(cache, &structs, bio); in process_deferred_bios()
1635 process_bio(cache, &structs, bio); in process_deferred_bios()
1638 prealloc_free_structs(cache, &structs); in process_deferred_bios()
1641 static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) in process_deferred_flush_bios() argument
1649 spin_lock_irqsave(&cache->lock, flags); in process_deferred_flush_bios()
1650 bio_list_merge(&bios, &cache->deferred_flush_bios); in process_deferred_flush_bios()
1651 bio_list_init(&cache->deferred_flush_bios); in process_deferred_flush_bios()
1652 spin_unlock_irqrestore(&cache->lock, flags); in process_deferred_flush_bios()
1661 static void process_deferred_writethrough_bios(struct cache *cache) in process_deferred_writethrough_bios() argument
1669 spin_lock_irqsave(&cache->lock, flags); in process_deferred_writethrough_bios()
1670 bio_list_merge(&bios, &cache->deferred_writethrough_bios); in process_deferred_writethrough_bios()
1671 bio_list_init(&cache->deferred_writethrough_bios); in process_deferred_writethrough_bios()
1672 spin_unlock_irqrestore(&cache->lock, flags); in process_deferred_writethrough_bios()
1681 static void writeback_some_dirty_blocks(struct cache *cache) in writeback_some_dirty_blocks() argument
1691 while (spare_migration_bandwidth(cache)) { in writeback_some_dirty_blocks()
1692 if (prealloc_data_structs(cache, &structs)) in writeback_some_dirty_blocks()
1695 r = policy_writeback_work(cache->policy, &oblock, &cblock); in writeback_some_dirty_blocks()
1699 r = get_cell(cache, oblock, &structs, &old_ocell); in writeback_some_dirty_blocks()
1701 policy_set_dirty(cache->policy, oblock); in writeback_some_dirty_blocks()
1705 writeback(cache, &structs, oblock, cblock, old_ocell); in writeback_some_dirty_blocks()
1708 prealloc_free_structs(cache, &structs); in writeback_some_dirty_blocks()
1716 static void process_invalidation_request(struct cache *cache, struct invalidation_request *req) in process_invalidation_request() argument
1723 r = policy_remove_cblock(cache->policy, to_cblock(begin)); in process_invalidation_request()
1725 r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin)); in process_invalidation_request()
1741 cache->commit_requested = true; in process_invalidation_request()
1749 static void process_invalidation_requests(struct cache *cache) in process_invalidation_requests() argument
1755 spin_lock(&cache->invalidation_lock); in process_invalidation_requests()
1756 list_splice_init(&cache->invalidation_requests, &list); in process_invalidation_requests()
1757 spin_unlock(&cache->invalidation_lock); in process_invalidation_requests()
1760 process_invalidation_request(cache, req); in process_invalidation_requests()
1766 static bool is_quiescing(struct cache *cache) in is_quiescing() argument
1768 return atomic_read(&cache->quiescing); in is_quiescing()
1771 static void ack_quiescing(struct cache *cache) in ack_quiescing() argument
1773 if (is_quiescing(cache)) { in ack_quiescing()
1774 atomic_inc(&cache->quiescing_ack); in ack_quiescing()
1775 wake_up(&cache->quiescing_wait); in ack_quiescing()
1779 static void wait_for_quiescing_ack(struct cache *cache) in wait_for_quiescing_ack() argument
1781 wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack)); in wait_for_quiescing_ack()
1784 static void start_quiescing(struct cache *cache) in start_quiescing() argument
1786 atomic_inc(&cache->quiescing); in start_quiescing()
1787 wait_for_quiescing_ack(cache); in start_quiescing()
1790 static void stop_quiescing(struct cache *cache) in stop_quiescing() argument
1792 atomic_set(&cache->quiescing, 0); in stop_quiescing()
1793 atomic_set(&cache->quiescing_ack, 0); in stop_quiescing()
1796 static void wait_for_migrations(struct cache *cache) in wait_for_migrations() argument
1798 wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations)); in wait_for_migrations()
1801 static void stop_worker(struct cache *cache) in stop_worker() argument
1803 cancel_delayed_work(&cache->waker); in stop_worker()
1804 flush_workqueue(cache->wq); in stop_worker()
1807 static void requeue_deferred_io(struct cache *cache) in requeue_deferred_io() argument
1813 bio_list_merge(&bios, &cache->deferred_bios); in requeue_deferred_io()
1814 bio_list_init(&cache->deferred_bios); in requeue_deferred_io()
1820 static int more_work(struct cache *cache) in more_work() argument
1822 if (is_quiescing(cache)) in more_work()
1823 return !list_empty(&cache->quiesced_migrations) || in more_work()
1824 !list_empty(&cache->completed_migrations) || in more_work()
1825 !list_empty(&cache->need_commit_migrations); in more_work()
1827 return !bio_list_empty(&cache->deferred_bios) || in more_work()
1828 !bio_list_empty(&cache->deferred_flush_bios) || in more_work()
1829 !bio_list_empty(&cache->deferred_writethrough_bios) || in more_work()
1830 !list_empty(&cache->quiesced_migrations) || in more_work()
1831 !list_empty(&cache->completed_migrations) || in more_work()
1832 !list_empty(&cache->need_commit_migrations) || in more_work()
1833 cache->invalidate; in more_work()
1838 struct cache *cache = container_of(ws, struct cache, worker); in do_worker() local
1841 if (!is_quiescing(cache)) { in do_worker()
1842 writeback_some_dirty_blocks(cache); in do_worker()
1843 process_deferred_writethrough_bios(cache); in do_worker()
1844 process_deferred_bios(cache); in do_worker()
1845 process_invalidation_requests(cache); in do_worker()
1848 process_migrations(cache, &cache->quiesced_migrations, issue_copy_or_discard); in do_worker()
1849 process_migrations(cache, &cache->completed_migrations, complete_migration); in do_worker()
1851 if (commit_if_needed(cache)) { in do_worker()
1852 process_deferred_flush_bios(cache, false); in do_worker()
1853 process_migrations(cache, &cache->need_commit_migrations, migration_failure); in do_worker()
1860 process_deferred_flush_bios(cache, true); in do_worker()
1861 process_migrations(cache, &cache->need_commit_migrations, in do_worker()
1865 ack_quiescing(cache); in do_worker()
1867 } while (more_work(cache)); in do_worker()
1876 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); in do_waker() local
1877 policy_tick(cache->policy); in do_waker()
1878 wake_worker(cache); in do_waker()
1879 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); in do_waker()
1892 struct cache *cache = container_of(cb, struct cache, callbacks); in cache_is_congested() local
1894 return is_congested(cache->origin_dev, bdi_bits) || in cache_is_congested()
1895 is_congested(cache->cache_dev, bdi_bits); in cache_is_congested()
1906 static void destroy(struct cache *cache) in destroy() argument
1910 if (cache->migration_pool) in destroy()
1911 mempool_destroy(cache->migration_pool); in destroy()
1913 if (cache->all_io_ds) in destroy()
1914 dm_deferred_set_destroy(cache->all_io_ds); in destroy()
1916 if (cache->prison) in destroy()
1917 dm_bio_prison_destroy(cache->prison); in destroy()
1919 if (cache->wq) in destroy()
1920 destroy_workqueue(cache->wq); in destroy()
1922 if (cache->dirty_bitset) in destroy()
1923 free_bitset(cache->dirty_bitset); in destroy()
1925 if (cache->discard_bitset) in destroy()
1926 free_bitset(cache->discard_bitset); in destroy()
1928 if (cache->copier) in destroy()
1929 dm_kcopyd_client_destroy(cache->copier); in destroy()
1931 if (cache->cmd) in destroy()
1932 dm_cache_metadata_close(cache->cmd); in destroy()
1934 if (cache->metadata_dev) in destroy()
1935 dm_put_device(cache->ti, cache->metadata_dev); in destroy()
1937 if (cache->origin_dev) in destroy()
1938 dm_put_device(cache->ti, cache->origin_dev); in destroy()
1940 if (cache->cache_dev) in destroy()
1941 dm_put_device(cache->ti, cache->cache_dev); in destroy()
1943 if (cache->policy) in destroy()
1944 dm_cache_policy_destroy(cache->policy); in destroy()
1946 for (i = 0; i < cache->nr_ctr_args ; i++) in destroy()
1947 kfree(cache->ctr_args[i]); in destroy()
1948 kfree(cache->ctr_args); in destroy()
1950 kfree(cache); in destroy()
1955 struct cache *cache = ti->private; in cache_dtr() local
1957 destroy(cache); in cache_dtr()
2245 static int process_config_option(struct cache *cache, const char *key, const char *value) in process_config_option() argument
2253 cache->migration_threshold = tmp; in process_config_option()
2260 static int set_config_value(struct cache *cache, const char *key, const char *value) in set_config_value() argument
2262 int r = process_config_option(cache, key, value); in set_config_value()
2265 r = policy_set_config_value(cache->policy, key, value); in set_config_value()
2273 static int set_config_values(struct cache *cache, int argc, const char **argv) in set_config_values() argument
2283 r = set_config_value(cache, argv[0], argv[1]); in set_config_values()
2294 static int create_cache_policy(struct cache *cache, struct cache_args *ca, in create_cache_policy() argument
2298 cache->cache_size, in create_cache_policy()
2299 cache->origin_sectors, in create_cache_policy()
2300 cache->sectors_per_block); in create_cache_policy()
2305 cache->policy = p; in create_cache_policy()
2336 static void set_cache_size(struct cache *cache, dm_cblock_t size) in set_cache_size() argument
2340 if (nr_blocks > (1 << 20) && cache->cache_size != size) in set_cache_size()
2346 cache->cache_size = size; in set_cache_size()
2351 static int cache_create(struct cache_args *ca, struct cache **result) in cache_create()
2355 struct cache *cache; in cache_create() local
2361 cache = kzalloc(sizeof(*cache), GFP_KERNEL); in cache_create()
2362 if (!cache) in cache_create()
2365 cache->ti = ca->ti; in cache_create()
2366 ti->private = cache; in cache_create()
2375 cache->features = ca->features; in cache_create()
2376 ti->per_bio_data_size = get_per_bio_data_size(cache); in cache_create()
2378 cache->callbacks.congested_fn = cache_is_congested; in cache_create()
2379 dm_table_add_target_callbacks(ti->table, &cache->callbacks); in cache_create()
2381 cache->metadata_dev = ca->metadata_dev; in cache_create()
2382 cache->origin_dev = ca->origin_dev; in cache_create()
2383 cache->cache_dev = ca->cache_dev; in cache_create()
2388 origin_blocks = cache->origin_sectors = ca->origin_sectors; in cache_create()
2390 cache->origin_blocks = to_oblock(origin_blocks); in cache_create()
2392 cache->sectors_per_block = ca->block_size; in cache_create()
2393 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) { in cache_create()
2401 cache->sectors_per_block_shift = -1; in cache_create()
2403 set_cache_size(cache, to_cblock(cache_size)); in cache_create()
2405 cache->sectors_per_block_shift = __ffs(ca->block_size); in cache_create()
2406 set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift)); in cache_create()
2409 r = create_cache_policy(cache, ca, error); in cache_create()
2413 cache->policy_nr_args = ca->policy_argc; in cache_create()
2414 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; in cache_create()
2416 r = set_config_values(cache, ca->policy_argc, ca->policy_argv); in cache_create()
2422 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, in cache_create()
2424 dm_cache_policy_get_hint_size(cache->policy)); in cache_create()
2430 cache->cmd = cmd; in cache_create()
2432 if (passthrough_mode(&cache->features)) { in cache_create()
2435 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean); in cache_create()
2448 spin_lock_init(&cache->lock); in cache_create()
2449 bio_list_init(&cache->deferred_bios); in cache_create()
2450 bio_list_init(&cache->deferred_flush_bios); in cache_create()
2451 bio_list_init(&cache->deferred_writethrough_bios); in cache_create()
2452 INIT_LIST_HEAD(&cache->quiesced_migrations); in cache_create()
2453 INIT_LIST_HEAD(&cache->completed_migrations); in cache_create()
2454 INIT_LIST_HEAD(&cache->need_commit_migrations); in cache_create()
2455 atomic_set(&cache->nr_allocated_migrations, 0); in cache_create()
2456 atomic_set(&cache->nr_io_migrations, 0); in cache_create()
2457 init_waitqueue_head(&cache->migration_wait); in cache_create()
2459 init_waitqueue_head(&cache->quiescing_wait); in cache_create()
2460 atomic_set(&cache->quiescing, 0); in cache_create()
2461 atomic_set(&cache->quiescing_ack, 0); in cache_create()
2464 atomic_set(&cache->nr_dirty, 0); in cache_create()
2465 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); in cache_create()
2466 if (!cache->dirty_bitset) { in cache_create()
2470 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); in cache_create()
2472 cache->discard_block_size = in cache_create()
2473 calculate_discard_block_size(cache->sectors_per_block, in cache_create()
2474 cache->origin_sectors); in cache_create()
2475 cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors, in cache_create()
2476 cache->discard_block_size)); in cache_create()
2477 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); in cache_create()
2478 if (!cache->discard_bitset) { in cache_create()
2482 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); in cache_create()
2484 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); in cache_create()
2485 if (IS_ERR(cache->copier)) { in cache_create()
2487 r = PTR_ERR(cache->copier); in cache_create()
2491 cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); in cache_create()
2492 if (!cache->wq) { in cache_create()
2496 INIT_WORK(&cache->worker, do_worker); in cache_create()
2497 INIT_DELAYED_WORK(&cache->waker, do_waker); in cache_create()
2498 cache->last_commit_jiffies = jiffies; in cache_create()
2500 cache->prison = dm_bio_prison_create(); in cache_create()
2501 if (!cache->prison) { in cache_create()
2506 cache->all_io_ds = dm_deferred_set_create(); in cache_create()
2507 if (!cache->all_io_ds) { in cache_create()
2512 cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE, in cache_create()
2514 if (!cache->migration_pool) { in cache_create()
2519 cache->need_tick_bio = true; in cache_create()
2520 cache->sized = false; in cache_create()
2521 cache->invalidate = false; in cache_create()
2522 cache->commit_requested = false; in cache_create()
2523 cache->loaded_mappings = false; in cache_create()
2524 cache->loaded_discards = false; in cache_create()
2526 load_stats(cache); in cache_create()
2528 atomic_set(&cache->stats.demotion, 0); in cache_create()
2529 atomic_set(&cache->stats.promotion, 0); in cache_create()
2530 atomic_set(&cache->stats.copies_avoided, 0); in cache_create()
2531 atomic_set(&cache->stats.cache_cell_clash, 0); in cache_create()
2532 atomic_set(&cache->stats.commit_count, 0); in cache_create()
2533 atomic_set(&cache->stats.discard_count, 0); in cache_create()
2535 spin_lock_init(&cache->invalidation_lock); in cache_create()
2536 INIT_LIST_HEAD(&cache->invalidation_requests); in cache_create()
2538 *result = cache; in cache_create()
2542 destroy(cache); in cache_create()
2546 static int copy_ctr_args(struct cache *cache, int argc, const char **argv) in copy_ctr_args() argument
2564 cache->nr_ctr_args = argc; in copy_ctr_args()
2565 cache->ctr_args = copy; in copy_ctr_args()
2574 struct cache *cache = NULL; in cache_ctr() local
2587 r = cache_create(ca, &cache); in cache_ctr()
2591 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); in cache_ctr()
2593 destroy(cache); in cache_ctr()
2597 ti->private = cache; in cache_ctr()
2604 static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell **cell) in __cache_map() argument
2607 dm_oblock_t block = get_bio_block(cache, bio); in __cache_map()
2608 size_t pb_data_size = get_per_bio_data_size(cache); in __cache_map()
2617 if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { in __cache_map()
2623 remap_to_origin(cache, bio); in __cache_map()
2628 defer_bio(cache, bio); in __cache_map()
2635 *cell = alloc_prison_cell(cache); in __cache_map()
2637 defer_bio(cache, bio); in __cache_map()
2641 r = bio_detain(cache, block, bio, *cell, in __cache_map()
2643 cache, cell); in __cache_map()
2646 defer_bio(cache, bio); in __cache_map()
2651 discarded_block = is_discarded_oblock(cache, block); in __cache_map()
2653 r = policy_map(cache->policy, block, false, can_migrate, discarded_block, in __cache_map()
2656 cell_defer(cache, *cell, true); in __cache_map()
2661 cell_defer(cache, *cell, false); in __cache_map()
2669 if (passthrough_mode(&cache->features)) { in __cache_map()
2675 cell_defer(cache, *cell, true); in __cache_map()
2679 inc_miss_counter(cache, bio); in __cache_map()
2680 remap_to_origin_clear_discard(cache, bio, block); in __cache_map()
2684 inc_hit_counter(cache, bio); in __cache_map()
2685 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && in __cache_map()
2686 !is_dirty(cache, lookup_result.cblock)) in __cache_map()
2687 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); in __cache_map()
2689 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); in __cache_map()
2694 inc_miss_counter(cache, bio); in __cache_map()
2701 cell_defer(cache, *cell, false); in __cache_map()
2705 remap_to_origin_clear_discard(cache, bio, block); in __cache_map()
2712 cell_defer(cache, *cell, false); in __cache_map()
2724 struct cache *cache = ti->private; in cache_map() local
2726 r = __cache_map(cache, bio, &cell); in cache_map()
2728 inc_ds(cache, bio, cell); in cache_map()
2729 cell_defer(cache, cell, false); in cache_map()
2737 struct cache *cache = ti->private; in cache_end_io() local
2739 size_t pb_data_size = get_per_bio_data_size(cache); in cache_end_io()
2743 policy_tick(cache->policy); in cache_end_io()
2745 spin_lock_irqsave(&cache->lock, flags); in cache_end_io()
2746 cache->need_tick_bio = true; in cache_end_io()
2747 spin_unlock_irqrestore(&cache->lock, flags); in cache_end_io()
2750 check_for_quiesced_migrations(cache, pb); in cache_end_io()
2755 static int write_dirty_bitset(struct cache *cache) in write_dirty_bitset() argument
2759 for (i = 0; i < from_cblock(cache->cache_size); i++) { in write_dirty_bitset()
2760 r = dm_cache_set_dirty(cache->cmd, to_cblock(i), in write_dirty_bitset()
2761 is_dirty(cache, to_cblock(i))); in write_dirty_bitset()
2769 static int write_discard_bitset(struct cache *cache) in write_discard_bitset() argument
2773 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, in write_discard_bitset()
2774 cache->discard_nr_blocks); in write_discard_bitset()
2780 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { in write_discard_bitset()
2781 r = dm_cache_set_discard(cache->cmd, to_dblock(i), in write_discard_bitset()
2782 is_discarded(cache, to_dblock(i))); in write_discard_bitset()
2793 static bool sync_metadata(struct cache *cache) in sync_metadata() argument
2797 r1 = write_dirty_bitset(cache); in sync_metadata()
2801 r2 = write_discard_bitset(cache); in sync_metadata()
2805 save_stats(cache); in sync_metadata()
2807 r3 = dm_cache_write_hints(cache->cmd, cache->policy); in sync_metadata()
2816 r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3); in sync_metadata()
2825 struct cache *cache = ti->private; in cache_postsuspend() local
2827 start_quiescing(cache); in cache_postsuspend()
2828 wait_for_migrations(cache); in cache_postsuspend()
2829 stop_worker(cache); in cache_postsuspend()
2830 requeue_deferred_io(cache); in cache_postsuspend()
2831 stop_quiescing(cache); in cache_postsuspend()
2833 (void) sync_metadata(cache); in cache_postsuspend()
2840 struct cache *cache = context; in load_mapping() local
2842 r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid); in load_mapping()
2847 set_dirty(cache, oblock, cblock); in load_mapping()
2849 clear_dirty(cache, oblock, cblock); in load_mapping()
2861 struct cache *cache; member
2871 static void discard_load_info_init(struct cache *cache, in discard_load_info_init() argument
2874 li->cache = cache; in discard_load_info_init()
2894 b = dm_sector_div_up(b, li->cache->discard_block_size); in set_discard_range()
2895 sector_div(e, li->cache->discard_block_size); in set_discard_range()
2901 if (e > from_dblock(li->cache->discard_nr_blocks)) in set_discard_range()
2902 e = from_dblock(li->cache->discard_nr_blocks); in set_discard_range()
2905 set_discard(li->cache, to_dblock(b)); in set_discard_range()
2938 static dm_cblock_t get_cache_dev_size(struct cache *cache) in get_cache_dev_size() argument
2940 sector_t size = get_dev_size(cache->cache_dev); in get_cache_dev_size()
2941 (void) sector_div(size, cache->sectors_per_block); in get_cache_dev_size()
2945 static bool can_resize(struct cache *cache, dm_cblock_t new_size) in can_resize() argument
2947 if (from_cblock(new_size) > from_cblock(cache->cache_size)) in can_resize()
2953 while (from_cblock(new_size) < from_cblock(cache->cache_size)) { in can_resize()
2955 if (is_dirty(cache, new_size)) { in can_resize()
2965 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) in resize_cache_dev() argument
2969 r = dm_cache_resize(cache->cmd, new_size); in resize_cache_dev()
2975 set_cache_size(cache, new_size); in resize_cache_dev()
2983 struct cache *cache = ti->private; in cache_preresume() local
2984 dm_cblock_t csize = get_cache_dev_size(cache); in cache_preresume()
2989 if (!cache->sized) { in cache_preresume()
2990 r = resize_cache_dev(cache, csize); in cache_preresume()
2994 cache->sized = true; in cache_preresume()
2996 } else if (csize != cache->cache_size) { in cache_preresume()
2997 if (!can_resize(cache, csize)) in cache_preresume()
3000 r = resize_cache_dev(cache, csize); in cache_preresume()
3005 if (!cache->loaded_mappings) { in cache_preresume()
3006 r = dm_cache_load_mappings(cache->cmd, cache->policy, in cache_preresume()
3007 load_mapping, cache); in cache_preresume()
3013 cache->loaded_mappings = true; in cache_preresume()
3016 if (!cache->loaded_discards) { in cache_preresume()
3024 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); in cache_preresume()
3026 discard_load_info_init(cache, &li); in cache_preresume()
3027 r = dm_cache_load_discards(cache->cmd, load_discard, &li); in cache_preresume()
3034 cache->loaded_discards = true; in cache_preresume()
3042 struct cache *cache = ti->private; in cache_resume() local
3044 cache->need_tick_bio = true; in cache_resume()
3045 do_waker(&cache->waker.work); in cache_resume()
3068 struct cache *cache = ti->private; in cache_status() local
3075 r = dm_cache_commit(cache->cmd, false); in cache_status()
3080 r = dm_cache_get_free_metadata_block_count(cache->cmd, in cache_status()
3087 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata); in cache_status()
3093 residency = policy_residency(cache->policy); in cache_status()
3099 cache->sectors_per_block, in cache_status()
3101 (unsigned long long) from_cblock(cache->cache_size), in cache_status()
3102 (unsigned) atomic_read(&cache->stats.read_hit), in cache_status()
3103 (unsigned) atomic_read(&cache->stats.read_miss), in cache_status()
3104 (unsigned) atomic_read(&cache->stats.write_hit), in cache_status()
3105 (unsigned) atomic_read(&cache->stats.write_miss), in cache_status()
3106 (unsigned) atomic_read(&cache->stats.demotion), in cache_status()
3107 (unsigned) atomic_read(&cache->stats.promotion), in cache_status()
3108 (unsigned long) atomic_read(&cache->nr_dirty)); in cache_status()
3110 if (writethrough_mode(&cache->features)) in cache_status()
3113 else if (passthrough_mode(&cache->features)) in cache_status()
3116 else if (writeback_mode(&cache->features)) in cache_status()
3120 DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode); in cache_status()
3124 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); in cache_status()
3126 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy)); in cache_status()
3128 r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz); in cache_status()
3136 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev); in cache_status()
3138 format_dev_t(buf, cache->cache_dev->bdev->bd_dev); in cache_status()
3140 format_dev_t(buf, cache->origin_dev->bdev->bd_dev); in cache_status()
3143 for (i = 0; i < cache->nr_ctr_args - 1; i++) in cache_status()
3144 DMEMIT(" %s", cache->ctr_args[i]); in cache_status()
3145 if (cache->nr_ctr_args) in cache_status()
3146 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]); in cache_status()
3161 static int parse_cblock_range(struct cache *cache, const char *str, in parse_cblock_range() argument
3198 static int validate_cblock_range(struct cache *cache, struct cblock_range *range) in validate_cblock_range() argument
3202 uint64_t n = from_cblock(cache->cache_size); in validate_cblock_range()
3222 static int request_invalidation(struct cache *cache, struct cblock_range *range) in request_invalidation() argument
3232 spin_lock(&cache->invalidation_lock); in request_invalidation()
3233 list_add(&req.list, &cache->invalidation_requests); in request_invalidation()
3234 spin_unlock(&cache->invalidation_lock); in request_invalidation()
3235 wake_worker(cache); in request_invalidation()
3241 static int process_invalidate_cblocks_message(struct cache *cache, unsigned count, in process_invalidate_cblocks_message() argument
3248 if (!passthrough_mode(&cache->features)) { in process_invalidate_cblocks_message()
3254 r = parse_cblock_range(cache, cblock_ranges[i], &range); in process_invalidate_cblocks_message()
3258 r = validate_cblock_range(cache, &range); in process_invalidate_cblocks_message()
3265 r = request_invalidation(cache, &range); in process_invalidate_cblocks_message()
3283 struct cache *cache = ti->private; in cache_message() local
3289 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1); in cache_message()
3294 return set_config_value(cache, argv[0], argv[1]); in cache_message()
3301 struct cache *cache = ti->private; in cache_iterate_devices() local
3303 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data); in cache_iterate_devices()
3305 r = fn(ti, cache->origin_dev, 0, ti->len, data); in cache_iterate_devices()
3320 struct cache *cache = ti->private; in cache_bvec_merge() local
3321 struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev); in cache_bvec_merge()
3326 bvm->bi_bdev = cache->origin_dev->bdev; in cache_bvec_merge()
3330 static void set_discard_limits(struct cache *cache, struct queue_limits *limits) in set_discard_limits() argument
3335 limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, in set_discard_limits()
3336 cache->origin_sectors); in set_discard_limits()
3337 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; in set_discard_limits()
3342 struct cache *cache = ti->private; in cache_io_hints() local
3349 if (io_opt_sectors < cache->sectors_per_block || in cache_io_hints()
3350 do_div(io_opt_sectors, cache->sectors_per_block)) { in cache_io_hints()
3351 blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); in cache_io_hints()
3352 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); in cache_io_hints()
3354 set_discard_limits(cache, limits); in cache_io_hints()