Lines Matching refs:mq

840 static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level)  in writeback_sentinel()  argument
842 return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels); in writeback_sentinel()
845 static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level) in demote_sentinel() argument
847 return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels); in demote_sentinel()
850 static void __update_writeback_sentinels(struct smq_policy *mq) in __update_writeback_sentinels() argument
853 struct queue *q = &mq->dirty; in __update_writeback_sentinels()
857 sentinel = writeback_sentinel(mq, level); in __update_writeback_sentinels()
863 static void __update_demote_sentinels(struct smq_policy *mq) in __update_demote_sentinels() argument
866 struct queue *q = &mq->clean; in __update_demote_sentinels()
870 sentinel = demote_sentinel(mq, level); in __update_demote_sentinels()
876 static void update_sentinels(struct smq_policy *mq) in update_sentinels() argument
878 if (time_after(jiffies, mq->next_writeback_period)) { in update_sentinels()
879 __update_writeback_sentinels(mq); in update_sentinels()
880 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD; in update_sentinels()
881 mq->current_writeback_sentinels = !mq->current_writeback_sentinels; in update_sentinels()
884 if (time_after(jiffies, mq->next_demote_period)) { in update_sentinels()
885 __update_demote_sentinels(mq); in update_sentinels()
886 mq->next_demote_period = jiffies + DEMOTE_PERIOD; in update_sentinels()
887 mq->current_demote_sentinels = !mq->current_demote_sentinels; in update_sentinels()
891 static void __sentinels_init(struct smq_policy *mq) in __sentinels_init() argument
897 sentinel = writeback_sentinel(mq, level); in __sentinels_init()
899 q_push(&mq->dirty, sentinel); in __sentinels_init()
901 sentinel = demote_sentinel(mq, level); in __sentinels_init()
903 q_push(&mq->clean, sentinel); in __sentinels_init()
907 static void sentinels_init(struct smq_policy *mq) in sentinels_init() argument
909 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD; in sentinels_init()
910 mq->next_demote_period = jiffies + DEMOTE_PERIOD; in sentinels_init()
912 mq->current_writeback_sentinels = false; in sentinels_init()
913 mq->current_demote_sentinels = false; in sentinels_init()
914 __sentinels_init(mq); in sentinels_init()
916 mq->current_writeback_sentinels = !mq->current_writeback_sentinels; in sentinels_init()
917 mq->current_demote_sentinels = !mq->current_demote_sentinels; in sentinels_init()
918 __sentinels_init(mq); in sentinels_init()
926 static void push_new(struct smq_policy *mq, struct entry *e) in push_new() argument
928 struct queue *q = e->dirty ? &mq->dirty : &mq->clean; in push_new()
929 h_insert(&mq->table, e); in push_new()
933 static void push(struct smq_policy *mq, struct entry *e) in push() argument
937 h_insert(&mq->table, e); in push()
944 sentinel = writeback_sentinel(mq, e->level); in push()
945 q_push_before(&mq->dirty, sentinel, e); in push()
947 sentinel = demote_sentinel(mq, e->level); in push()
948 q_push_before(&mq->clean, sentinel, e); in push()
955 static void __del(struct smq_policy *mq, struct queue *q, struct entry *e) in __del() argument
958 h_remove(&mq->table, e); in __del()
961 static void del(struct smq_policy *mq, struct entry *e) in del() argument
963 __del(mq, e->dirty ? &mq->dirty : &mq->clean, e); in del()
966 static struct entry *pop_old(struct smq_policy *mq, struct queue *q, unsigned max_level) in pop_old() argument
970 h_remove(&mq->table, e); in pop_old()
974 static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e) in infer_cblock() argument
976 return to_cblock(get_index(&mq->cache_alloc, e)); in infer_cblock()
979 static void requeue(struct smq_policy *mq, struct entry *e) in requeue() argument
983 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) { in requeue()
985 sentinel = writeback_sentinel(mq, e->level); in requeue()
986 q_requeue_before(&mq->dirty, sentinel, e, 1u); in requeue()
988 sentinel = demote_sentinel(mq, e->level); in requeue()
989 q_requeue_before(&mq->clean, sentinel, e, 1u); in requeue()
994 static unsigned default_promote_level(struct smq_policy *mq) in default_promote_level() argument
1014 unsigned hits = mq->cache_stats.hits; in default_promote_level()
1015 unsigned misses = mq->cache_stats.misses; in default_promote_level()
1020 static void update_promote_levels(struct smq_policy *mq) in update_promote_levels() argument
1026 unsigned threshold_level = allocator_empty(&mq->cache_alloc) ? in update_promote_levels()
1027 default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u); in update_promote_levels()
1034 switch (stats_assess(&mq->hotspot_stats)) { in update_promote_levels()
1047 mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level; in update_promote_levels()
1048 mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level) + 2u; in update_promote_levels()
1055 static void update_level_jump(struct smq_policy *mq) in update_level_jump() argument
1057 switch (stats_assess(&mq->hotspot_stats)) { in update_level_jump()
1059 mq->hotspot_level_jump = 4u; in update_level_jump()
1063 mq->hotspot_level_jump = 2u; in update_level_jump()
1067 mq->hotspot_level_jump = 1u; in update_level_jump()
1072 static void end_hotspot_period(struct smq_policy *mq) in end_hotspot_period() argument
1074 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks); in end_hotspot_period()
1075 update_promote_levels(mq); in end_hotspot_period()
1077 if (time_after(jiffies, mq->next_hotspot_period)) { in end_hotspot_period()
1078 update_level_jump(mq); in end_hotspot_period()
1079 q_redistribute(&mq->hotspot); in end_hotspot_period()
1080 stats_reset(&mq->hotspot_stats); in end_hotspot_period()
1081 mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD; in end_hotspot_period()
1085 static void end_cache_period(struct smq_policy *mq) in end_cache_period() argument
1087 if (time_after(jiffies, mq->next_cache_period)) { in end_cache_period()
1088 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); in end_cache_period()
1090 q_redistribute(&mq->dirty); in end_cache_period()
1091 q_redistribute(&mq->clean); in end_cache_period()
1092 stats_reset(&mq->cache_stats); in end_cache_period()
1094 mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD; in end_cache_period()
1098 static int demote_cblock(struct smq_policy *mq, in demote_cblock() argument
1102 struct entry *demoted = q_peek(&mq->clean, mq->clean.nr_levels, false); in demote_cblock()
1119 del(mq, demoted); in demote_cblock()
1121 free_entry(&mq->cache_alloc, demoted); in demote_cblock()
1140 static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e, struct bio *bi… in should_promote() argument
1144 if (!allocator_empty(&mq->cache_alloc) && fast_promote) in should_promote()
1148 return maybe_promote(hs_e->level >= mq->write_promote_level); in should_promote()
1150 return maybe_promote(hs_e->level >= mq->read_promote_level); in should_promote()
1153 static void insert_in_cache(struct smq_policy *mq, dm_oblock_t oblock, in insert_in_cache() argument
1160 if (allocator_empty(&mq->cache_alloc)) { in insert_in_cache()
1162 r = demote_cblock(mq, locker, &result->old_oblock); in insert_in_cache()
1171 e = alloc_entry(&mq->cache_alloc); in insert_in_cache()
1176 push(mq, e); in insert_in_cache()
1178 push_new(mq, e); in insert_in_cache()
1180 result->cblock = infer_cblock(mq, e); in insert_in_cache()
1183 static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b) in to_hblock() argument
1186 (void) sector_div(r, mq->cache_blocks_per_hotspot_block); in to_hblock()
1190 static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b, struct bio *bio) in update_hotspot_queue() argument
1193 dm_oblock_t hb = to_hblock(mq, b); in update_hotspot_queue()
1194 struct entry *e = h_lookup(&mq->hotspot_table, hb); in update_hotspot_queue()
1197 stats_level_accessed(&mq->hotspot_stats, e->level); in update_hotspot_queue()
1199 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1200 q_requeue(&mq->hotspot, e, in update_hotspot_queue()
1201 test_and_set_bit(hi, mq->hotspot_hit_bits) ? in update_hotspot_queue()
1202 0u : mq->hotspot_level_jump); in update_hotspot_queue()
1205 stats_miss(&mq->hotspot_stats); in update_hotspot_queue()
1207 e = alloc_entry(&mq->hotspot_alloc); in update_hotspot_queue()
1209 e = q_pop(&mq->hotspot); in update_hotspot_queue()
1211 h_remove(&mq->hotspot_table, e); in update_hotspot_queue()
1212 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1213 clear_bit(hi, mq->hotspot_hit_bits); in update_hotspot_queue()
1220 q_push(&mq->hotspot, e); in update_hotspot_queue()
1221 h_insert(&mq->hotspot_table, e); in update_hotspot_queue()
1232 static int map(struct smq_policy *mq, struct bio *bio, dm_oblock_t oblock, in map() argument
1239 hs_e = update_hotspot_queue(mq, oblock, bio); in map()
1241 e = h_lookup(&mq->table, oblock); in map()
1243 stats_level_accessed(&mq->cache_stats, e->level); in map()
1245 requeue(mq, e); in map()
1247 result->cblock = infer_cblock(mq, e); in map()
1250 stats_miss(&mq->cache_stats); in map()
1252 pr = should_promote(mq, hs_e, bio, fast_promote); in map()
1262 insert_in_cache(mq, oblock, locker, result, pr); in map()
1283 struct smq_policy *mq = to_smq_policy(p); in smq_destroy() local
1285 h_exit(&mq->hotspot_table); in smq_destroy()
1286 h_exit(&mq->table); in smq_destroy()
1287 free_bitset(mq->hotspot_hit_bits); in smq_destroy()
1288 free_bitset(mq->cache_hit_bits); in smq_destroy()
1289 space_exit(&mq->es); in smq_destroy()
1290 kfree(mq); in smq_destroy()
1300 struct smq_policy *mq = to_smq_policy(p); in smq_map() local
1304 spin_lock_irqsave(&mq->lock, flags); in smq_map()
1305 r = map(mq, bio, oblock, can_migrate, fast_promote, locker, result); in smq_map()
1306 spin_unlock_irqrestore(&mq->lock, flags); in smq_map()
1315 struct smq_policy *mq = to_smq_policy(p); in smq_lookup() local
1318 spin_lock_irqsave(&mq->lock, flags); in smq_lookup()
1319 e = h_lookup(&mq->table, oblock); in smq_lookup()
1321 *cblock = infer_cblock(mq, e); in smq_lookup()
1325 spin_unlock_irqrestore(&mq->lock, flags); in smq_lookup()
1330 static void __smq_set_clear_dirty(struct smq_policy *mq, dm_oblock_t oblock, bool set) in __smq_set_clear_dirty() argument
1334 e = h_lookup(&mq->table, oblock); in __smq_set_clear_dirty()
1337 del(mq, e); in __smq_set_clear_dirty()
1339 push(mq, e); in __smq_set_clear_dirty()
1345 struct smq_policy *mq = to_smq_policy(p); in smq_set_dirty() local
1347 spin_lock_irqsave(&mq->lock, flags); in smq_set_dirty()
1348 __smq_set_clear_dirty(mq, oblock, true); in smq_set_dirty()
1349 spin_unlock_irqrestore(&mq->lock, flags); in smq_set_dirty()
1354 struct smq_policy *mq = to_smq_policy(p); in smq_clear_dirty() local
1357 spin_lock_irqsave(&mq->lock, flags); in smq_clear_dirty()
1358 __smq_set_clear_dirty(mq, oblock, false); in smq_clear_dirty()
1359 spin_unlock_irqrestore(&mq->lock, flags); in smq_clear_dirty()
1366 struct smq_policy *mq = to_smq_policy(p); in smq_load_mapping() local
1369 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_load_mapping()
1373 push(mq, e); in smq_load_mapping()
1378 static int smq_save_hints(struct smq_policy *mq, struct queue *q, in smq_save_hints() argument
1388 r = fn(context, infer_cblock(mq, e), in smq_save_hints()
1401 struct smq_policy *mq = to_smq_policy(p); in smq_walk_mappings() local
1408 r = smq_save_hints(mq, &mq->clean, fn, context); in smq_walk_mappings()
1410 r = smq_save_hints(mq, &mq->dirty, fn, context); in smq_walk_mappings()
1415 static void __remove_mapping(struct smq_policy *mq, dm_oblock_t oblock) in __remove_mapping() argument
1419 e = h_lookup(&mq->table, oblock); in __remove_mapping()
1422 del(mq, e); in __remove_mapping()
1423 free_entry(&mq->cache_alloc, e); in __remove_mapping()
1428 struct smq_policy *mq = to_smq_policy(p); in smq_remove_mapping() local
1431 spin_lock_irqsave(&mq->lock, flags); in smq_remove_mapping()
1432 __remove_mapping(mq, oblock); in smq_remove_mapping()
1433 spin_unlock_irqrestore(&mq->lock, flags); in smq_remove_mapping()
1436 static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock) in __remove_cblock() argument
1438 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in __remove_cblock()
1443 del(mq, e); in __remove_cblock()
1444 free_entry(&mq->cache_alloc, e); in __remove_cblock()
1453 struct smq_policy *mq = to_smq_policy(p); in smq_remove_cblock() local
1455 spin_lock_irqsave(&mq->lock, flags); in smq_remove_cblock()
1456 r = __remove_cblock(mq, cblock); in smq_remove_cblock()
1457 spin_unlock_irqrestore(&mq->lock, flags); in smq_remove_cblock()
1465 static bool clean_target_met(struct smq_policy *mq, bool critical) in clean_target_met() argument
1472 unsigned nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty); in clean_target_met()
1473 unsigned target = from_cblock(mq->cache_size) * CLEAN_TARGET_CRITICAL / 100u; in clean_target_met()
1477 return !q_size(&mq->dirty); in clean_target_met()
1480 static int __smq_writeback_work(struct smq_policy *mq, dm_oblock_t *oblock, in __smq_writeback_work() argument
1484 bool target_met = clean_target_met(mq, critical_only); in __smq_writeback_work()
1490 e = pop_old(mq, &mq->dirty, target_met ? 1u : mq->dirty.nr_levels); in __smq_writeback_work()
1493 e = pop_old(mq, &mq->dirty, mq->dirty.nr_levels); in __smq_writeback_work()
1499 *cblock = infer_cblock(mq, e); in __smq_writeback_work()
1501 push_new(mq, e); in __smq_writeback_work()
1511 struct smq_policy *mq = to_smq_policy(p); in smq_writeback_work() local
1513 spin_lock_irqsave(&mq->lock, flags); in smq_writeback_work()
1514 r = __smq_writeback_work(mq, oblock, cblock, critical_only); in smq_writeback_work()
1515 spin_unlock_irqrestore(&mq->lock, flags); in smq_writeback_work()
1520 static void __force_mapping(struct smq_policy *mq, in __force_mapping() argument
1523 struct entry *e = h_lookup(&mq->table, current_oblock); in __force_mapping()
1526 del(mq, e); in __force_mapping()
1529 push(mq, e); in __force_mapping()
1537 struct smq_policy *mq = to_smq_policy(p); in smq_force_mapping() local
1539 spin_lock_irqsave(&mq->lock, flags); in smq_force_mapping()
1540 __force_mapping(mq, current_oblock, new_oblock); in smq_force_mapping()
1541 spin_unlock_irqrestore(&mq->lock, flags); in smq_force_mapping()
1548 struct smq_policy *mq = to_smq_policy(p); in smq_residency() local
1550 spin_lock_irqsave(&mq->lock, flags); in smq_residency()
1551 r = to_cblock(mq->cache_alloc.nr_allocated); in smq_residency()
1552 spin_unlock_irqrestore(&mq->lock, flags); in smq_residency()
1559 struct smq_policy *mq = to_smq_policy(p); in smq_tick() local
1562 spin_lock_irqsave(&mq->lock, flags); in smq_tick()
1563 mq->tick++; in smq_tick()
1564 update_sentinels(mq); in smq_tick()
1565 end_hotspot_period(mq); in smq_tick()
1566 end_cache_period(mq); in smq_tick()
1567 spin_unlock_irqrestore(&mq->lock, flags); in smq_tick()
1571 static void init_policy_functions(struct smq_policy *mq) in init_policy_functions() argument
1573 mq->policy.destroy = smq_destroy; in init_policy_functions()
1574 mq->policy.map = smq_map; in init_policy_functions()
1575 mq->policy.lookup = smq_lookup; in init_policy_functions()
1576 mq->policy.set_dirty = smq_set_dirty; in init_policy_functions()
1577 mq->policy.clear_dirty = smq_clear_dirty; in init_policy_functions()
1578 mq->policy.load_mapping = smq_load_mapping; in init_policy_functions()
1579 mq->policy.walk_mappings = smq_walk_mappings; in init_policy_functions()
1580 mq->policy.remove_mapping = smq_remove_mapping; in init_policy_functions()
1581 mq->policy.remove_cblock = smq_remove_cblock; in init_policy_functions()
1582 mq->policy.writeback_work = smq_writeback_work; in init_policy_functions()
1583 mq->policy.force_mapping = smq_force_mapping; in init_policy_functions()
1584 mq->policy.residency = smq_residency; in init_policy_functions()
1585 mq->policy.tick = smq_tick; in init_policy_functions()
1616 struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL); in smq_create() local
1618 if (!mq) in smq_create()
1621 init_policy_functions(mq); in smq_create()
1622 mq->cache_size = cache_size; in smq_create()
1623 mq->cache_block_size = cache_block_size; in smq_create()
1626 &mq->hotspot_block_size, &mq->nr_hotspot_blocks); in smq_create()
1628 mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size); in smq_create()
1629 mq->hotspot_level_jump = 1u; in smq_create()
1630 if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) { in smq_create()
1635 init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue); in smq_create()
1637 get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true; in smq_create()
1639 init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels); in smq_create()
1641 get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true; in smq_create()
1643 init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels, in smq_create()
1644 total_sentinels + mq->nr_hotspot_blocks); in smq_create()
1646 init_allocator(&mq->cache_alloc, &mq->es, in smq_create()
1647 total_sentinels + mq->nr_hotspot_blocks, in smq_create()
1648 total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size)); in smq_create()
1650 mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks); in smq_create()
1651 if (!mq->hotspot_hit_bits) { in smq_create()
1655 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks); in smq_create()
1658 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size)); in smq_create()
1659 if (!mq->cache_hit_bits) { in smq_create()
1663 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size)); in smq_create()
1665 mq->cache_hit_bits = NULL; in smq_create()
1667 mq->tick = 0; in smq_create()
1668 spin_lock_init(&mq->lock); in smq_create()
1670 q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS); in smq_create()
1671 mq->hotspot.nr_top_levels = 8; in smq_create()
1672 mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS, in smq_create()
1673 from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block); in smq_create()
1675 q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS); in smq_create()
1676 q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS); in smq_create()
1678 stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS); in smq_create()
1679 stats_init(&mq->cache_stats, NR_CACHE_LEVELS); in smq_create()
1681 if (h_init(&mq->table, &mq->es, from_cblock(cache_size))) in smq_create()
1684 if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks)) in smq_create()
1687 sentinels_init(mq); in smq_create()
1688 mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS; in smq_create()
1690 mq->next_hotspot_period = jiffies; in smq_create()
1691 mq->next_cache_period = jiffies; in smq_create()
1693 return &mq->policy; in smq_create()
1696 h_exit(&mq->table); in smq_create()
1698 free_bitset(mq->cache_hit_bits); in smq_create()
1700 free_bitset(mq->hotspot_hit_bits); in smq_create()
1702 space_exit(&mq->es); in smq_create()
1704 kfree(mq); in smq_create()