Lines Matching refs:mq
495 static void hash_insert(struct mq_policy *mq, struct entry *e) in hash_insert() argument
497 unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits); in hash_insert()
499 hlist_add_head(&e->hlist, mq->table + h); in hash_insert()
502 static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock) in hash_lookup() argument
504 unsigned h = hash_64(from_oblock(oblock), mq->hash_bits); in hash_lookup()
505 struct hlist_head *bucket = mq->table + h; in hash_lookup()
525 static bool any_free_cblocks(struct mq_policy *mq) in any_free_cblocks() argument
527 return !epool_empty(&mq->cache_pool); in any_free_cblocks()
530 static bool any_clean_cblocks(struct mq_policy *mq) in any_clean_cblocks() argument
532 return !queue_empty(&mq->cache_clean); in any_clean_cblocks()
551 static bool in_cache(struct mq_policy *mq, struct entry *e) in in_cache() argument
553 return in_pool(&mq->cache_pool, e); in in_cache()
561 static void push(struct mq_policy *mq, struct entry *e) in push() argument
563 hash_insert(mq, e); in push()
565 if (in_cache(mq, e)) in push()
566 queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean, in push()
569 queue_push(&mq->pre_cache, queue_level(e), &e->list); in push()
575 static void del(struct mq_policy *mq, struct entry *e) in del() argument
577 if (in_cache(mq, e)) in del()
578 queue_remove(e->dirty ? &mq->cache_dirty : &mq->cache_clean, &e->list); in del()
580 queue_remove(&mq->pre_cache, &e->list); in del()
589 static struct entry *pop(struct mq_policy *mq, struct queue *q) in pop() argument
603 static struct entry *pop_old(struct mq_policy *mq, struct queue *q) in pop_old() argument
638 static void check_generation(struct mq_policy *mq) in check_generation() argument
644 if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) { in check_generation()
645 mq->hit_count = 0; in check_generation()
646 mq->generation++; in check_generation()
649 head = mq->cache_clean.qs + level; in check_generation()
658 head = mq->cache_dirty.qs + level; in check_generation()
674 static void requeue(struct mq_policy *mq, struct entry *e) in requeue() argument
676 check_generation(mq); in requeue()
677 del(mq, e); in requeue()
678 push(mq, e); in requeue()
696 static int demote_cblock(struct mq_policy *mq, in demote_cblock() argument
699 struct entry *demoted = peek(&mq->cache_clean); in demote_cblock()
717 del(mq, demoted); in demote_cblock()
719 free_entry(&mq->cache_pool, demoted); in demote_cblock()
735 static unsigned promote_threshold(struct mq_policy *mq) in promote_threshold() argument
739 if (any_free_cblocks(mq)) in promote_threshold()
742 e = peek(&mq->cache_clean); in promote_threshold()
746 e = peek(&mq->cache_dirty); in promote_threshold()
763 static unsigned adjusted_promote_threshold(struct mq_policy *mq, in adjusted_promote_threshold() argument
767 return promote_threshold(mq) + mq->read_promote_adjustment; in adjusted_promote_threshold()
769 if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) { in adjusted_promote_threshold()
774 return mq->discard_promote_adjustment; in adjusted_promote_threshold()
777 return promote_threshold(mq) + mq->write_promote_adjustment; in adjusted_promote_threshold()
780 static bool should_promote(struct mq_policy *mq, struct entry *e, in should_promote() argument
784 adjusted_promote_threshold(mq, discarded_oblock, data_dir); in should_promote()
787 static int cache_entry_found(struct mq_policy *mq, in cache_entry_found() argument
791 requeue(mq, e); in cache_entry_found()
793 if (in_cache(mq, e)) { in cache_entry_found()
795 result->cblock = infer_cblock(&mq->cache_pool, e); in cache_entry_found()
805 static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e, in pre_cache_to_cache() argument
813 if (epool_empty(&mq->cache_pool)) { in pre_cache_to_cache()
815 r = demote_cblock(mq, locker, &result->old_oblock); in pre_cache_to_cache()
824 new_e = alloc_entry(&mq->cache_pool); in pre_cache_to_cache()
831 del(mq, e); in pre_cache_to_cache()
832 free_entry(&mq->pre_cache_pool, e); in pre_cache_to_cache()
833 push(mq, new_e); in pre_cache_to_cache()
835 result->cblock = infer_cblock(&mq->cache_pool, new_e); in pre_cache_to_cache()
840 static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e, in pre_cache_entry_found() argument
847 if (!should_promote(mq, e, discarded_oblock, data_dir)) { in pre_cache_entry_found()
848 requeue(mq, e); in pre_cache_entry_found()
855 requeue(mq, e); in pre_cache_entry_found()
856 r = pre_cache_to_cache(mq, e, locker, result); in pre_cache_entry_found()
862 static void insert_in_pre_cache(struct mq_policy *mq, in insert_in_pre_cache() argument
865 struct entry *e = alloc_entry(&mq->pre_cache_pool); in insert_in_pre_cache()
872 e = pop(mq, &mq->pre_cache); in insert_in_pre_cache()
882 push(mq, e); in insert_in_pre_cache()
885 static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock, in insert_in_cache() argument
892 if (epool_empty(&mq->cache_pool)) { in insert_in_cache()
894 r = demote_cblock(mq, locker, &result->old_oblock); in insert_in_cache()
897 insert_in_pre_cache(mq, oblock); in insert_in_cache()
904 e = alloc_entry(&mq->cache_pool); in insert_in_cache()
908 e = alloc_entry(&mq->cache_pool); in insert_in_cache()
915 push(mq, e); in insert_in_cache()
917 result->cblock = infer_cblock(&mq->cache_pool, e); in insert_in_cache()
920 static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock, in no_entry_found() argument
925 if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) <= 1) { in no_entry_found()
927 insert_in_cache(mq, oblock, locker, result); in no_entry_found()
931 insert_in_pre_cache(mq, oblock); in no_entry_found()
942 static int map(struct mq_policy *mq, dm_oblock_t oblock, in map() argument
948 struct entry *e = hash_lookup(mq, oblock); in map()
950 if (e && in_cache(mq, e)) in map()
951 r = cache_entry_found(mq, e, result); in map()
953 else if (mq->tracker.thresholds[PATTERN_SEQUENTIAL] && in map()
954 iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL) in map()
958 r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock, in map()
962 r = no_entry_found(mq, oblock, can_migrate, discarded_oblock, in map()
985 struct mq_policy *mq = to_mq_policy(p); in mq_destroy() local
987 vfree(mq->table); in mq_destroy()
988 epool_exit(&mq->cache_pool); in mq_destroy()
989 epool_exit(&mq->pre_cache_pool); in mq_destroy()
990 kfree(mq); in mq_destroy()
1001 struct mq_policy *mq = context; in update_cache_hits() local
1004 mq->hit_count++; in update_cache_hits()
1007 static void copy_tick(struct mq_policy *mq) in copy_tick() argument
1011 spin_lock_irqsave(&mq->tick_lock, flags); in copy_tick()
1012 tick = mq->tick_protected; in copy_tick()
1013 if (tick != mq->tick) { in copy_tick()
1014 queue_iterate_tick(&mq->pre_cache, update_pre_cache_hits, mq); in copy_tick()
1015 queue_iterate_tick(&mq->cache_dirty, update_cache_hits, mq); in copy_tick()
1016 queue_iterate_tick(&mq->cache_clean, update_cache_hits, mq); in copy_tick()
1017 mq->tick = tick; in copy_tick()
1020 queue_tick(&mq->pre_cache); in copy_tick()
1021 queue_tick(&mq->cache_dirty); in copy_tick()
1022 queue_tick(&mq->cache_clean); in copy_tick()
1023 queue_update_writeback_sentinels(&mq->cache_dirty); in copy_tick()
1024 spin_unlock_irqrestore(&mq->tick_lock, flags); in copy_tick()
1033 struct mq_policy *mq = to_mq_policy(p); in mq_map() local
1038 mutex_lock(&mq->lock); in mq_map()
1039 else if (!mutex_trylock(&mq->lock)) in mq_map()
1042 copy_tick(mq); in mq_map()
1044 iot_examine_bio(&mq->tracker, bio); in mq_map()
1045 r = map(mq, oblock, can_migrate, discarded_oblock, in mq_map()
1048 mutex_unlock(&mq->lock); in mq_map()
1056 struct mq_policy *mq = to_mq_policy(p); in mq_lookup() local
1059 if (!mutex_trylock(&mq->lock)) in mq_lookup()
1062 e = hash_lookup(mq, oblock); in mq_lookup()
1063 if (e && in_cache(mq, e)) { in mq_lookup()
1064 *cblock = infer_cblock(&mq->cache_pool, e); in mq_lookup()
1069 mutex_unlock(&mq->lock); in mq_lookup()
1074 static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set) in __mq_set_clear_dirty() argument
1078 e = hash_lookup(mq, oblock); in __mq_set_clear_dirty()
1079 BUG_ON(!e || !in_cache(mq, e)); in __mq_set_clear_dirty()
1081 del(mq, e); in __mq_set_clear_dirty()
1083 push(mq, e); in __mq_set_clear_dirty()
1088 struct mq_policy *mq = to_mq_policy(p); in mq_set_dirty() local
1090 mutex_lock(&mq->lock); in mq_set_dirty()
1091 __mq_set_clear_dirty(mq, oblock, true); in mq_set_dirty()
1092 mutex_unlock(&mq->lock); in mq_set_dirty()
1097 struct mq_policy *mq = to_mq_policy(p); in mq_clear_dirty() local
1099 mutex_lock(&mq->lock); in mq_clear_dirty()
1100 __mq_set_clear_dirty(mq, oblock, false); in mq_clear_dirty()
1101 mutex_unlock(&mq->lock); in mq_clear_dirty()
1108 struct mq_policy *mq = to_mq_policy(p); in mq_load_mapping() local
1111 e = alloc_particular_entry(&mq->cache_pool, cblock); in mq_load_mapping()
1115 push(mq, e); in mq_load_mapping()
1120 static int mq_save_hints(struct mq_policy *mq, struct queue *q, in mq_save_hints() argument
1134 r = fn(context, infer_cblock(&mq->cache_pool, e), in mq_save_hints()
1146 struct mq_policy *mq = to_mq_policy(p); in mq_walk_mappings() local
1149 mutex_lock(&mq->lock); in mq_walk_mappings()
1151 r = mq_save_hints(mq, &mq->cache_clean, fn, context); in mq_walk_mappings()
1153 r = mq_save_hints(mq, &mq->cache_dirty, fn, context); in mq_walk_mappings()
1155 mutex_unlock(&mq->lock); in mq_walk_mappings()
1160 static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock) in __remove_mapping() argument
1164 e = hash_lookup(mq, oblock); in __remove_mapping()
1165 BUG_ON(!e || !in_cache(mq, e)); in __remove_mapping()
1167 del(mq, e); in __remove_mapping()
1168 free_entry(&mq->cache_pool, e); in __remove_mapping()
1173 struct mq_policy *mq = to_mq_policy(p); in mq_remove_mapping() local
1175 mutex_lock(&mq->lock); in mq_remove_mapping()
1176 __remove_mapping(mq, oblock); in mq_remove_mapping()
1177 mutex_unlock(&mq->lock); in mq_remove_mapping()
1180 static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock) in __remove_cblock() argument
1182 struct entry *e = epool_find(&mq->cache_pool, cblock); in __remove_cblock()
1187 del(mq, e); in __remove_cblock()
1188 free_entry(&mq->cache_pool, e); in __remove_cblock()
1196 struct mq_policy *mq = to_mq_policy(p); in mq_remove_cblock() local
1198 mutex_lock(&mq->lock); in mq_remove_cblock()
1199 r = __remove_cblock(mq, cblock); in mq_remove_cblock()
1200 mutex_unlock(&mq->lock); in mq_remove_cblock()
1207 static bool clean_target_met(struct mq_policy *mq) in clean_target_met() argument
1213 unsigned nr_clean = from_cblock(mq->cache_size) - queue_size(&mq->cache_dirty); in clean_target_met()
1214 unsigned target = from_cblock(mq->cache_size) * CLEAN_TARGET_PERCENTAGE / 100; in clean_target_met()
1219 static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock, in __mq_writeback_work() argument
1222 struct entry *e = pop_old(mq, &mq->cache_dirty); in __mq_writeback_work()
1224 if (!e && !clean_target_met(mq)) in __mq_writeback_work()
1225 e = pop(mq, &mq->cache_dirty); in __mq_writeback_work()
1231 *cblock = infer_cblock(&mq->cache_pool, e); in __mq_writeback_work()
1233 push(mq, e); in __mq_writeback_work()
1242 struct mq_policy *mq = to_mq_policy(p); in mq_writeback_work() local
1244 mutex_lock(&mq->lock); in mq_writeback_work()
1245 r = __mq_writeback_work(mq, oblock, cblock); in mq_writeback_work()
1246 mutex_unlock(&mq->lock); in mq_writeback_work()
1251 static void __force_mapping(struct mq_policy *mq, in __force_mapping() argument
1254 struct entry *e = hash_lookup(mq, current_oblock); in __force_mapping()
1256 if (e && in_cache(mq, e)) { in __force_mapping()
1257 del(mq, e); in __force_mapping()
1260 push(mq, e); in __force_mapping()
1267 struct mq_policy *mq = to_mq_policy(p); in mq_force_mapping() local
1269 mutex_lock(&mq->lock); in mq_force_mapping()
1270 __force_mapping(mq, current_oblock, new_oblock); in mq_force_mapping()
1271 mutex_unlock(&mq->lock); in mq_force_mapping()
1277 struct mq_policy *mq = to_mq_policy(p); in mq_residency() local
1279 mutex_lock(&mq->lock); in mq_residency()
1280 r = to_cblock(mq->cache_pool.nr_allocated); in mq_residency()
1281 mutex_unlock(&mq->lock); in mq_residency()
1288 struct mq_policy *mq = to_mq_policy(p); in mq_tick() local
1291 spin_lock_irqsave(&mq->tick_lock, flags); in mq_tick()
1292 mq->tick_protected++; in mq_tick()
1293 spin_unlock_irqrestore(&mq->tick_lock, flags); in mq_tick()
1299 struct mq_policy *mq = to_mq_policy(p); in mq_set_config_value() local
1306 mq->tracker.thresholds[PATTERN_RANDOM] = tmp; in mq_set_config_value()
1309 mq->tracker.thresholds[PATTERN_SEQUENTIAL] = tmp; in mq_set_config_value()
1312 mq->discard_promote_adjustment = tmp; in mq_set_config_value()
1315 mq->read_promote_adjustment = tmp; in mq_set_config_value()
1318 mq->write_promote_adjustment = tmp; in mq_set_config_value()
1329 struct mq_policy *mq = to_mq_policy(p); in mq_emit_config_values() local
1336 mq->tracker.thresholds[PATTERN_RANDOM], in mq_emit_config_values()
1337 mq->tracker.thresholds[PATTERN_SEQUENTIAL], in mq_emit_config_values()
1338 mq->discard_promote_adjustment, in mq_emit_config_values()
1339 mq->read_promote_adjustment, in mq_emit_config_values()
1340 mq->write_promote_adjustment); in mq_emit_config_values()
1346 static void init_policy_functions(struct mq_policy *mq) in init_policy_functions() argument
1348 mq->policy.destroy = mq_destroy; in init_policy_functions()
1349 mq->policy.map = mq_map; in init_policy_functions()
1350 mq->policy.lookup = mq_lookup; in init_policy_functions()
1351 mq->policy.set_dirty = mq_set_dirty; in init_policy_functions()
1352 mq->policy.clear_dirty = mq_clear_dirty; in init_policy_functions()
1353 mq->policy.load_mapping = mq_load_mapping; in init_policy_functions()
1354 mq->policy.walk_mappings = mq_walk_mappings; in init_policy_functions()
1355 mq->policy.remove_mapping = mq_remove_mapping; in init_policy_functions()
1356 mq->policy.remove_cblock = mq_remove_cblock; in init_policy_functions()
1357 mq->policy.writeback_work = mq_writeback_work; in init_policy_functions()
1358 mq->policy.force_mapping = mq_force_mapping; in init_policy_functions()
1359 mq->policy.residency = mq_residency; in init_policy_functions()
1360 mq->policy.tick = mq_tick; in init_policy_functions()
1361 mq->policy.emit_config_values = mq_emit_config_values; in init_policy_functions()
1362 mq->policy.set_config_value = mq_set_config_value; in init_policy_functions()
1369 struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL); in mq_create() local
1371 if (!mq) in mq_create()
1374 init_policy_functions(mq); in mq_create()
1375 iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT); in mq_create()
1376 mq->cache_size = cache_size; in mq_create()
1378 if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) { in mq_create()
1383 if (epool_init(&mq->cache_pool, from_cblock(cache_size))) { in mq_create()
1388 mq->tick_protected = 0; in mq_create()
1389 mq->tick = 0; in mq_create()
1390 mq->hit_count = 0; in mq_create()
1391 mq->generation = 0; in mq_create()
1392 mq->discard_promote_adjustment = DEFAULT_DISCARD_PROMOTE_ADJUSTMENT; in mq_create()
1393 mq->read_promote_adjustment = DEFAULT_READ_PROMOTE_ADJUSTMENT; in mq_create()
1394 mq->write_promote_adjustment = DEFAULT_WRITE_PROMOTE_ADJUSTMENT; in mq_create()
1395 mutex_init(&mq->lock); in mq_create()
1396 spin_lock_init(&mq->tick_lock); in mq_create()
1398 queue_init(&mq->pre_cache); in mq_create()
1399 queue_init(&mq->cache_clean); in mq_create()
1400 queue_init(&mq->cache_dirty); in mq_create()
1402 mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U); in mq_create()
1404 mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16); in mq_create()
1405 mq->hash_bits = ffs(mq->nr_buckets) - 1; in mq_create()
1406 mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets); in mq_create()
1407 if (!mq->table) in mq_create()
1410 return &mq->policy; in mq_create()
1413 epool_exit(&mq->cache_pool); in mq_create()
1415 epool_exit(&mq->pre_cache_pool); in mq_create()
1417 kfree(mq); in mq_create()