Lines Matching refs:reg

134 void *dm_rh_region_context(struct dm_region *reg)  in dm_rh_region_context()  argument
136 return reg->rh->context; in dm_rh_region_context()
140 region_t dm_rh_get_region_key(struct dm_region *reg) in dm_rh_get_region_key() argument
142 return reg->key; in dm_rh_get_region_key()
238 struct dm_region *reg, *nreg; in dm_region_hash_destroy() local
242 list_for_each_entry_safe(reg, nreg, rh->buckets + h, in dm_region_hash_destroy()
244 BUG_ON(atomic_read(&reg->pending)); in dm_region_hash_destroy()
245 mempool_free(reg, rh->region_pool); in dm_region_hash_destroy()
271 struct dm_region *reg; in __rh_lookup() local
274 list_for_each_entry(reg, bucket, hash_list) in __rh_lookup()
275 if (reg->key == region) in __rh_lookup()
276 return reg; in __rh_lookup()
281 static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg) in __rh_insert() argument
283 list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key)); in __rh_insert()
288 struct dm_region *reg, *nreg; in __rh_alloc() local
303 reg = __rh_lookup(rh, region); in __rh_alloc()
304 if (reg) in __rh_alloc()
315 reg = nreg; in __rh_alloc()
319 return reg; in __rh_alloc()
324 struct dm_region *reg; in __rh_find() local
326 reg = __rh_lookup(rh, region); in __rh_find()
327 if (!reg) { in __rh_find()
329 reg = __rh_alloc(rh, region); in __rh_find()
333 return reg; in __rh_find()
339 struct dm_region *reg; in dm_rh_get_state() local
342 reg = __rh_lookup(rh, region); in dm_rh_get_state()
345 if (reg) in dm_rh_get_state()
346 return reg->state; in dm_rh_get_state()
362 static void complete_resync_work(struct dm_region *reg, int success) in complete_resync_work() argument
364 struct dm_region_hash *rh = reg->rh; in complete_resync_work()
366 rh->log->type->set_region_sync(rh->log, reg->key, success); in complete_resync_work()
377 rh->dispatch_bios(rh->context, &reg->delayed_bios); in complete_resync_work()
397 struct dm_region *reg; in dm_rh_mark_nosync() local
413 reg = __rh_find(rh, region); in dm_rh_mark_nosync()
417 BUG_ON(!reg); in dm_rh_mark_nosync()
418 BUG_ON(!list_empty(&reg->list)); in dm_rh_mark_nosync()
428 recovering = (reg->state == DM_RH_RECOVERING); in dm_rh_mark_nosync()
429 reg->state = DM_RH_NOSYNC; in dm_rh_mark_nosync()
430 BUG_ON(!list_empty(&reg->list)); in dm_rh_mark_nosync()
434 complete_resync_work(reg, 0); in dm_rh_mark_nosync()
440 struct dm_region *reg, *next; in dm_rh_update_states() local
454 list_for_each_entry(reg, &clean, list) in dm_rh_update_states()
455 list_del(&reg->hash_list); in dm_rh_update_states()
461 list_for_each_entry(reg, &recovered, list) in dm_rh_update_states()
462 list_del(&reg->hash_list); in dm_rh_update_states()
469 list_for_each_entry(reg, &failed_recovered, list) in dm_rh_update_states()
470 list_del(&reg->hash_list); in dm_rh_update_states()
481 list_for_each_entry_safe(reg, next, &recovered, list) { in dm_rh_update_states()
482 rh->log->type->clear_region(rh->log, reg->key); in dm_rh_update_states()
483 complete_resync_work(reg, 1); in dm_rh_update_states()
484 mempool_free(reg, rh->region_pool); in dm_rh_update_states()
487 list_for_each_entry_safe(reg, next, &failed_recovered, list) { in dm_rh_update_states()
488 complete_resync_work(reg, errors_handled ? 0 : 1); in dm_rh_update_states()
489 mempool_free(reg, rh->region_pool); in dm_rh_update_states()
492 list_for_each_entry_safe(reg, next, &clean, list) { in dm_rh_update_states()
493 rh->log->type->clear_region(rh->log, reg->key); in dm_rh_update_states()
494 mempool_free(reg, rh->region_pool); in dm_rh_update_states()
503 struct dm_region *reg; in rh_inc() local
506 reg = __rh_find(rh, region); in rh_inc()
509 atomic_inc(&reg->pending); in rh_inc()
511 if (reg->state == DM_RH_CLEAN) { in rh_inc()
512 reg->state = DM_RH_DIRTY; in rh_inc()
513 list_del_init(&reg->list); /* take off the clean list */ in rh_inc()
516 rh->log->type->mark_region(rh->log, reg->key); in rh_inc()
539 struct dm_region *reg; in dm_rh_dec() local
543 reg = __rh_lookup(rh, region); in dm_rh_dec()
547 if (atomic_dec_and_test(&reg->pending)) { in dm_rh_dec()
566 reg->state = DM_RH_NOSYNC; in dm_rh_dec()
567 } else if (reg->state == DM_RH_RECOVERING) { in dm_rh_dec()
568 list_add_tail(&reg->list, &rh->quiesced_regions); in dm_rh_dec()
569 } else if (reg->state == DM_RH_DIRTY) { in dm_rh_dec()
570 reg->state = DM_RH_CLEAN; in dm_rh_dec()
571 list_add(&reg->list, &rh->clean_regions); in dm_rh_dec()
589 struct dm_region *reg; in __rh_recovery_prepare() local
603 reg = __rh_find(rh, region); in __rh_recovery_prepare()
607 reg->state = DM_RH_RECOVERING; in __rh_recovery_prepare()
610 if (atomic_read(&reg->pending)) in __rh_recovery_prepare()
611 list_del_init(&reg->list); in __rh_recovery_prepare()
613 list_move(&reg->list, &rh->quiesced_regions); in __rh_recovery_prepare()
645 struct dm_region *reg = NULL; in dm_rh_recovery_start() local
649 reg = list_entry(rh->quiesced_regions.next, in dm_rh_recovery_start()
651 list_del_init(&reg->list); /* remove from the quiesced list */ in dm_rh_recovery_start()
655 return reg; in dm_rh_recovery_start()
659 void dm_rh_recovery_end(struct dm_region *reg, int success) in dm_rh_recovery_end() argument
661 struct dm_region_hash *rh = reg->rh; in dm_rh_recovery_end()
665 list_add(&reg->list, &reg->rh->recovered_regions); in dm_rh_recovery_end()
667 list_add(&reg->list, &reg->rh->failed_recovered_regions); in dm_rh_recovery_end()
690 struct dm_region *reg; in dm_rh_delay() local
693 reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio)); in dm_rh_delay()
694 bio_list_add(&reg->delayed_bios, bio); in dm_rh_delay()