Lines Matching refs:pool
215 struct pool { struct
267 static enum pool_mode get_pool_mode(struct pool *pool); argument
268 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
275 struct pool *pool; member
295 struct pool *pool; member
320 static void wake_worker(struct pool *pool) in wake_worker() argument
322 queue_work(pool->wq, &pool->worker); in wake_worker()
327 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, in bio_detain() argument
337 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO); in bio_detain()
339 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); in bio_detain()
345 dm_bio_prison_free_cell(pool->prison, cell_prealloc); in bio_detain()
350 static void cell_release(struct pool *pool, in cell_release() argument
354 dm_cell_release(pool->prison, cell, bios); in cell_release()
355 dm_bio_prison_free_cell(pool->prison, cell); in cell_release()
358 static void cell_visit_release(struct pool *pool, in cell_visit_release() argument
363 dm_cell_visit_release(pool->prison, fn, context, cell); in cell_visit_release()
364 dm_bio_prison_free_cell(pool->prison, cell); in cell_visit_release()
367 static void cell_release_no_holder(struct pool *pool, in cell_release_no_holder() argument
371 dm_cell_release_no_holder(pool->prison, cell, bios); in cell_release_no_holder()
372 dm_bio_prison_free_cell(pool->prison, cell); in cell_release_no_holder()
375 static void cell_error_with_code(struct pool *pool, in cell_error_with_code() argument
378 dm_cell_error(pool->prison, cell, error_code); in cell_error_with_code()
379 dm_bio_prison_free_cell(pool->prison, cell); in cell_error_with_code()
382 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_error() argument
384 cell_error_with_code(pool, cell, -EIO); in cell_error()
387 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_success() argument
389 cell_error_with_code(pool, cell, 0); in cell_success()
392 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_requeue() argument
394 cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE); in cell_requeue()
413 static void __pool_table_insert(struct pool *pool) in __pool_table_insert() argument
416 list_add(&pool->list, &dm_thin_pool_table.pools); in __pool_table_insert()
419 static void __pool_table_remove(struct pool *pool) in __pool_table_remove() argument
422 list_del(&pool->list); in __pool_table_remove()
425 static struct pool *__pool_table_lookup(struct mapped_device *md) in __pool_table_lookup()
427 struct pool *pool = NULL, *tmp; in __pool_table_lookup() local
433 pool = tmp; in __pool_table_lookup()
438 return pool; in __pool_table_lookup()
441 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev) in __pool_table_lookup_metadata_dev()
443 struct pool *pool = NULL, *tmp; in __pool_table_lookup_metadata_dev() local
449 pool = tmp; in __pool_table_lookup_metadata_dev()
454 return pool; in __pool_table_lookup_metadata_dev()
497 struct pool *pool = tc->pool; in requeue_deferred_cells() local
509 cell_requeue(pool, cell); in requeue_deferred_cells()
528 static void error_retry_list(struct pool *pool) in error_retry_list() argument
533 list_for_each_entry_rcu(tc, &pool->active_thins, list) in error_retry_list()
545 static bool block_size_is_power_of_two(struct pool *pool) in block_size_is_power_of_two() argument
547 return pool->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
552 struct pool *pool = tc->pool; in get_bio_block() local
555 if (block_size_is_power_of_two(pool)) in get_bio_block()
556 block_nr >>= pool->sectors_per_block_shift; in get_bio_block()
558 (void) sector_div(block_nr, pool->sectors_per_block); in get_bio_block()
565 struct pool *pool = tc->pool; in remap() local
569 if (block_size_is_power_of_two(pool)) in remap()
571 (block << pool->sectors_per_block_shift) | in remap()
572 (bi_sector & (pool->sectors_per_block - 1)); in remap()
574 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap()
575 sector_div(bi_sector, pool->sectors_per_block); in remap()
589 static void inc_all_io_entry(struct pool *pool, struct bio *bio) in inc_all_io_entry() argument
597 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds); in inc_all_io_entry()
602 struct pool *pool = tc->pool; in issue() local
624 spin_lock_irqsave(&pool->lock, flags); in issue()
625 bio_list_add(&pool->deferred_flush_bios, bio); in issue()
626 spin_unlock_irqrestore(&pool->lock, flags); in issue()
678 struct pool *pool = m->tc->pool; in __complete_mapping_preparation() local
681 list_add_tail(&m->list, &pool->prepared_mappings); in __complete_mapping_preparation()
682 wake_worker(pool); in __complete_mapping_preparation()
689 struct pool *pool = m->tc->pool; in complete_mapping_preparation() local
691 spin_lock_irqsave(&pool->lock, flags); in complete_mapping_preparation()
693 spin_unlock_irqrestore(&pool->lock, flags); in complete_mapping_preparation()
729 struct pool *pool = tc->pool; in cell_defer_no_holder() local
733 cell_release_no_holder(pool, cell, &tc->deferred_bio_list); in cell_defer_no_holder()
736 wake_worker(pool); in cell_defer_no_holder()
757 inc_all_io_entry(info->tc->pool, bio); in __inc_remap_and_issue_cell()
785 cell_visit_release(tc->pool, __inc_remap_and_issue_cell, in inc_remap_and_issue_cell()
801 cell_error(m->tc->pool, m->cell); in process_prepared_mapping_fail()
803 mempool_free(m, m->tc->pool->mapping_pool); in process_prepared_mapping_fail()
809 struct pool *pool = tc->pool; in process_prepared_mapping() local
820 cell_error(pool, m->cell); in process_prepared_mapping()
831 metadata_operation_failed(pool, "dm_thin_insert_block", r); in process_prepared_mapping()
832 cell_error(pool, m->cell); in process_prepared_mapping()
846 inc_all_io_entry(tc->pool, m->cell->holder); in process_prepared_mapping()
853 mempool_free(m, pool->mapping_pool); in process_prepared_mapping()
863 mempool_free(m, tc->pool->mapping_pool); in process_prepared_discard_fail()
870 inc_all_io_entry(tc->pool, m->bio); in process_prepared_discard_passdown()
879 if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used) in process_prepared_discard_passdown()
887 mempool_free(m, tc->pool->mapping_pool); in process_prepared_discard_passdown()
902 static void process_prepared(struct pool *pool, struct list_head *head, in process_prepared() argument
910 spin_lock_irqsave(&pool->lock, flags); in process_prepared()
912 spin_unlock_irqrestore(&pool->lock, flags); in process_prepared()
921 static int io_overlaps_block(struct pool *pool, struct bio *bio) in io_overlaps_block() argument
924 (pool->sectors_per_block << SECTOR_SHIFT); in io_overlaps_block()
927 static int io_overwrites_block(struct pool *pool, struct bio *bio) in io_overwrites_block() argument
930 io_overlaps_block(pool, bio); in io_overwrites_block()
940 static int ensure_next_mapping(struct pool *pool) in ensure_next_mapping() argument
942 if (pool->next_mapping) in ensure_next_mapping()
945 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC); in ensure_next_mapping()
947 return pool->next_mapping ? 0 : -ENOMEM; in ensure_next_mapping()
950 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) in get_next_mapping() argument
952 struct dm_thin_new_mapping *m = pool->next_mapping; in get_next_mapping()
954 BUG_ON(!pool->next_mapping); in get_next_mapping()
960 pool->next_mapping = NULL; in get_next_mapping()
975 r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); in ll_zero()
986 struct pool *pool = tc->pool; in remap_and_issue_overwrite() local
992 inc_all_io_entry(pool, bio); in remap_and_issue_overwrite()
1006 struct pool *pool = tc->pool; in schedule_copy() local
1007 struct dm_thin_new_mapping *m = get_next_mapping(pool); in schedule_copy()
1021 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) in schedule_copy()
1030 if (io_overwrites_block(pool, bio)) in schedule_copy()
1036 from.sector = data_origin * pool->sectors_per_block; in schedule_copy()
1040 to.sector = data_dest * pool->sectors_per_block; in schedule_copy()
1043 r = dm_kcopyd_copy(pool->copier, &from, 1, &to, in schedule_copy()
1060 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) { in schedule_copy()
1063 data_dest * pool->sectors_per_block + len, in schedule_copy()
1064 (data_dest + 1) * pool->sectors_per_block); in schedule_copy()
1077 tc->pool->sectors_per_block); in schedule_internal_copy()
1084 struct pool *pool = tc->pool; in schedule_zero() local
1085 struct dm_thin_new_mapping *m = get_next_mapping(pool); in schedule_zero()
1098 if (!pool->pf.zero_new_blocks) in schedule_zero()
1101 else if (io_overwrites_block(pool, bio)) in schedule_zero()
1106 data_block * pool->sectors_per_block, in schedule_zero()
1107 (data_block + 1) * pool->sectors_per_block); in schedule_zero()
1114 struct pool *pool = tc->pool; in schedule_external_copy() local
1115 sector_t virt_block_begin = virt_block * pool->sectors_per_block; in schedule_external_copy()
1116 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block; in schedule_external_copy()
1121 pool->sectors_per_block); in schedule_external_copy()
1132 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1134 static void check_for_space(struct pool *pool) in check_for_space() argument
1139 if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE) in check_for_space()
1142 r = dm_pool_get_free_block_count(pool->pmd, &nr_free); in check_for_space()
1147 set_pool_mode(pool, PM_WRITE); in check_for_space()
1154 static int commit(struct pool *pool) in commit() argument
1158 if (get_pool_mode(pool) >= PM_READ_ONLY) in commit()
1161 r = dm_pool_commit_metadata(pool->pmd); in commit()
1163 metadata_operation_failed(pool, "dm_pool_commit_metadata", r); in commit()
1165 check_for_space(pool); in commit()
1170 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks) in check_low_water_mark() argument
1174 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { in check_low_water_mark()
1176 dm_device_name(pool->pool_md)); in check_low_water_mark()
1177 spin_lock_irqsave(&pool->lock, flags); in check_low_water_mark()
1178 pool->low_water_triggered = true; in check_low_water_mark()
1179 spin_unlock_irqrestore(&pool->lock, flags); in check_low_water_mark()
1180 dm_table_event(pool->ti->table); in check_low_water_mark()
1188 struct pool *pool = tc->pool; in alloc_data_block() local
1190 if (WARN_ON(get_pool_mode(pool) != PM_WRITE)) in alloc_data_block()
1193 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1195 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); in alloc_data_block()
1199 check_low_water_mark(pool, free_blocks); in alloc_data_block()
1206 r = commit(pool); in alloc_data_block()
1210 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1212 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); in alloc_data_block()
1217 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); in alloc_data_block()
1222 r = dm_pool_alloc_data_block(pool->pmd, result); in alloc_data_block()
1224 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); in alloc_data_block()
1246 static int should_error_unserviceable_bio(struct pool *pool) in should_error_unserviceable_bio() argument
1248 enum pool_mode m = get_pool_mode(pool); in should_error_unserviceable_bio()
1257 return pool->pf.error_if_no_space ? -ENOSPC : 0; in should_error_unserviceable_bio()
1269 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) in handle_unserviceable_bio() argument
1271 int error = should_error_unserviceable_bio(pool); in handle_unserviceable_bio()
1279 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell) in retry_bios_on_resume() argument
1285 error = should_error_unserviceable_bio(pool); in retry_bios_on_resume()
1287 cell_error_with_code(pool, cell, error); in retry_bios_on_resume()
1292 cell_release(pool, cell, &bios); in retry_bios_on_resume()
1302 struct pool *pool = tc->pool; in process_discard_cell() local
1310 cell_requeue(pool, cell); in process_discard_cell()
1323 if (bio_detain(tc->pool, &key2, bio, &cell2)) { in process_discard_cell()
1328 if (io_overlaps_block(pool, bio)) { in process_discard_cell()
1333 m = get_next_mapping(pool); in process_discard_cell()
1335 m->pass_discard = pool->pf.discard_passdown; in process_discard_cell()
1343 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) in process_discard_cell()
1344 pool->process_prepared_discard(m); in process_discard_cell()
1347 inc_all_io_entry(pool, bio); in process_discard_cell()
1356 if ((!lookup_result.shared) && pool->pf.discard_passdown) in process_discard_cell()
1387 if (bio_detain(tc->pool, &key, bio, &cell)) in process_discard_bio()
1400 struct pool *pool = tc->pool; in break_sharing() local
1410 retry_bios_on_resume(pool, cell); in break_sharing()
1416 cell_error(pool, cell); in break_sharing()
1434 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds); in __remap_and_issue_shared_cell()
1435 inc_all_io_entry(info->tc->pool, bio); in __remap_and_issue_shared_cell()
1452 cell_visit_release(tc->pool, __remap_and_issue_shared_cell, in remap_and_issue_shared_cell()
1468 struct pool *pool = tc->pool; in process_shared_bio() local
1476 if (bio_detain(pool, &key, bio, &data_cell)) { in process_shared_bio()
1487 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); in process_shared_bio()
1488 inc_all_io_entry(pool, bio); in process_shared_bio()
1501 struct pool *pool = tc->pool; in provision_block() local
1507 inc_all_io_entry(pool, bio); in provision_block()
1534 retry_bios_on_resume(pool, cell); in provision_block()
1540 cell_error(pool, cell); in provision_block()
1548 struct pool *pool = tc->pool; in process_cell() local
1554 cell_requeue(pool, cell); in process_cell()
1564 inc_all_io_entry(pool, bio); in process_cell()
1572 inc_all_io_entry(pool, bio); in process_cell()
1602 struct pool *pool = tc->pool; in process_bio() local
1612 if (bio_detain(pool, &key, bio, &cell)) in process_bio()
1630 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
1634 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
1645 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
1650 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
1691 cell_success(tc->pool, cell); in process_cell_success()
1696 cell_error(tc->pool, cell); in process_cell_fail()
1703 static int need_commit_due_to_time(struct pool *pool) in need_commit_due_to_time() argument
1705 return !time_in_range(jiffies, pool->last_commit_jiffies, in need_commit_due_to_time()
1706 pool->last_commit_jiffies + COMMIT_PERIOD); in need_commit_due_to_time()
1775 struct pool *pool = tc->pool; in process_thin_deferred_bios() local
1810 if (ensure_next_mapping(pool)) { in process_thin_deferred_bios()
1819 pool->process_discard(tc, bio); in process_thin_deferred_bios()
1821 pool->process_bio(tc, bio); in process_thin_deferred_bios()
1824 throttle_work_update(&pool->throttle); in process_thin_deferred_bios()
1825 dm_pool_issue_prefetches(pool->pmd); in process_thin_deferred_bios()
1848 static unsigned sort_cells(struct pool *pool, struct list_head *cells) in sort_cells() argument
1857 pool->cell_sort_array[count++] = cell; in sort_cells()
1861 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL); in sort_cells()
1868 struct pool *pool = tc->pool; in process_thin_deferred_cells() local
1884 count = sort_cells(tc->pool, &cells); in process_thin_deferred_cells()
1887 cell = pool->cell_sort_array[i]; in process_thin_deferred_cells()
1895 if (ensure_next_mapping(pool)) { in process_thin_deferred_cells()
1897 list_add(&pool->cell_sort_array[j]->user_list, &cells); in process_thin_deferred_cells()
1906 pool->process_discard_cell(tc, cell); in process_thin_deferred_cells()
1908 pool->process_cell(tc, cell); in process_thin_deferred_cells()
1921 static struct thin_c *get_first_thin(struct pool *pool) in get_first_thin() argument
1926 if (!list_empty(&pool->active_thins)) { in get_first_thin()
1927 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); in get_first_thin()
1935 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) in get_next_thin() argument
1940 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { in get_next_thin()
1952 static void process_deferred_bios(struct pool *pool) in process_deferred_bios() argument
1959 tc = get_first_thin(pool); in process_deferred_bios()
1963 tc = get_next_thin(pool, tc); in process_deferred_bios()
1971 spin_lock_irqsave(&pool->lock, flags); in process_deferred_bios()
1972 bio_list_merge(&bios, &pool->deferred_flush_bios); in process_deferred_bios()
1973 bio_list_init(&pool->deferred_flush_bios); in process_deferred_bios()
1974 spin_unlock_irqrestore(&pool->lock, flags); in process_deferred_bios()
1977 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) in process_deferred_bios()
1980 if (commit(pool)) { in process_deferred_bios()
1985 pool->last_commit_jiffies = jiffies; in process_deferred_bios()
1993 struct pool *pool = container_of(ws, struct pool, worker); in do_worker() local
1995 throttle_work_start(&pool->throttle); in do_worker()
1996 dm_pool_issue_prefetches(pool->pmd); in do_worker()
1997 throttle_work_update(&pool->throttle); in do_worker()
1998 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping); in do_worker()
1999 throttle_work_update(&pool->throttle); in do_worker()
2000 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard); in do_worker()
2001 throttle_work_update(&pool->throttle); in do_worker()
2002 process_deferred_bios(pool); in do_worker()
2003 throttle_work_complete(&pool->throttle); in do_worker()
2012 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker); in do_waker() local
2013 wake_worker(pool); in do_waker()
2014 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); in do_waker()
2024 struct pool *pool = container_of(to_delayed_work(ws), struct pool, in do_no_space_timeout() local
2027 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) in do_no_space_timeout()
2028 set_pool_mode(pool, PM_READ_ONLY); in do_no_space_timeout()
2048 static void pool_work_wait(struct pool_work *pw, struct pool *pool, in pool_work_wait() argument
2053 queue_work(pool->wq, &pw->worker); in pool_work_wait()
2089 pool_work_wait(&w.pw, tc->pool, fn); in noflush_work()
2094 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument
2096 return pool->pf.mode; in get_pool_mode()
2099 static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode) in notify_of_pool_mode_change() argument
2101 dm_table_event(pool->ti->table); in notify_of_pool_mode_change()
2103 dm_device_name(pool->pool_md), new_mode); in notify_of_pool_mode_change()
2106 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) in set_pool_mode() argument
2108 struct pool_c *pt = pool->ti->private; in set_pool_mode()
2109 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); in set_pool_mode()
2110 enum pool_mode old_mode = get_pool_mode(pool); in set_pool_mode()
2119 dm_device_name(pool->pool_md)); in set_pool_mode()
2136 notify_of_pool_mode_change(pool, "failure"); in set_pool_mode()
2137 dm_pool_metadata_read_only(pool->pmd); in set_pool_mode()
2138 pool->process_bio = process_bio_fail; in set_pool_mode()
2139 pool->process_discard = process_bio_fail; in set_pool_mode()
2140 pool->process_cell = process_cell_fail; in set_pool_mode()
2141 pool->process_discard_cell = process_cell_fail; in set_pool_mode()
2142 pool->process_prepared_mapping = process_prepared_mapping_fail; in set_pool_mode()
2143 pool->process_prepared_discard = process_prepared_discard_fail; in set_pool_mode()
2145 error_retry_list(pool); in set_pool_mode()
2150 notify_of_pool_mode_change(pool, "read-only"); in set_pool_mode()
2151 dm_pool_metadata_read_only(pool->pmd); in set_pool_mode()
2152 pool->process_bio = process_bio_read_only; in set_pool_mode()
2153 pool->process_discard = process_bio_success; in set_pool_mode()
2154 pool->process_cell = process_cell_read_only; in set_pool_mode()
2155 pool->process_discard_cell = process_cell_success; in set_pool_mode()
2156 pool->process_prepared_mapping = process_prepared_mapping_fail; in set_pool_mode()
2157 pool->process_prepared_discard = process_prepared_discard_passdown; in set_pool_mode()
2159 error_retry_list(pool); in set_pool_mode()
2172 notify_of_pool_mode_change(pool, "out-of-data-space"); in set_pool_mode()
2173 pool->process_bio = process_bio_read_only; in set_pool_mode()
2174 pool->process_discard = process_discard_bio; in set_pool_mode()
2175 pool->process_cell = process_cell_read_only; in set_pool_mode()
2176 pool->process_discard_cell = process_discard_cell; in set_pool_mode()
2177 pool->process_prepared_mapping = process_prepared_mapping; in set_pool_mode()
2178 pool->process_prepared_discard = process_prepared_discard; in set_pool_mode()
2180 if (!pool->pf.error_if_no_space && no_space_timeout) in set_pool_mode()
2181 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); in set_pool_mode()
2186 notify_of_pool_mode_change(pool, "write"); in set_pool_mode()
2187 dm_pool_metadata_read_write(pool->pmd); in set_pool_mode()
2188 pool->process_bio = process_bio; in set_pool_mode()
2189 pool->process_discard = process_discard_bio; in set_pool_mode()
2190 pool->process_cell = process_cell; in set_pool_mode()
2191 pool->process_discard_cell = process_discard_cell; in set_pool_mode()
2192 pool->process_prepared_mapping = process_prepared_mapping; in set_pool_mode()
2193 pool->process_prepared_discard = process_prepared_discard; in set_pool_mode()
2197 pool->pf.mode = new_mode; in set_pool_mode()
2205 static void abort_transaction(struct pool *pool) in abort_transaction() argument
2207 const char *dev_name = dm_device_name(pool->pool_md); in abort_transaction()
2210 if (dm_pool_abort_metadata(pool->pmd)) { in abort_transaction()
2212 set_pool_mode(pool, PM_FAIL); in abort_transaction()
2215 if (dm_pool_metadata_set_needs_check(pool->pmd)) { in abort_transaction()
2217 set_pool_mode(pool, PM_FAIL); in abort_transaction()
2221 static void metadata_operation_failed(struct pool *pool, const char *op, int r) in metadata_operation_failed() argument
2224 dm_device_name(pool->pool_md), op, r); in metadata_operation_failed()
2226 abort_transaction(pool); in metadata_operation_failed()
2227 set_pool_mode(pool, PM_READ_ONLY); in metadata_operation_failed()
2242 struct pool *pool = tc->pool; in thin_defer_bio() local
2248 wake_worker(pool); in thin_defer_bio()
2253 struct pool *pool = tc->pool; in thin_defer_bio_with_throttle() local
2255 throttle_lock(&pool->throttle); in thin_defer_bio_with_throttle()
2257 throttle_unlock(&pool->throttle); in thin_defer_bio_with_throttle()
2263 struct pool *pool = tc->pool; in thin_defer_cell() local
2265 throttle_lock(&pool->throttle); in thin_defer_cell()
2269 throttle_unlock(&pool->throttle); in thin_defer_cell()
2271 wake_worker(pool); in thin_defer_cell()
2304 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_bio_map()
2319 if (bio_detain(tc->pool, &key, bio, &virt_cell)) in thin_bio_map()
2349 if (bio_detain(tc->pool, &key, bio, &data_cell)) { in thin_bio_map()
2354 inc_all_io_entry(tc->pool, bio); in thin_bio_map()
2383 if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE) in pool_is_congested()
2390 static void requeue_bios(struct pool *pool) in requeue_bios() argument
2396 list_for_each_entry_rcu(tc, &pool->active_thins, list) { in requeue_bios()
2426 struct pool *pool = pt->pool; in disable_passdown_if_not_supported() local
2429 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT; in disable_passdown_if_not_supported()
2439 else if (data_limits->max_discard_sectors < pool->sectors_per_block) in disable_passdown_if_not_supported()
2454 static int bind_control_target(struct pool *pool, struct dm_target *ti) in bind_control_target() argument
2461 enum pool_mode old_mode = get_pool_mode(pool); in bind_control_target()
2471 pool->ti = ti; in bind_control_target()
2472 pool->pf = pt->adjusted_pf; in bind_control_target()
2473 pool->low_water_blocks = pt->low_water_blocks; in bind_control_target()
2475 set_pool_mode(pool, new_mode); in bind_control_target()
2480 static void unbind_control_target(struct pool *pool, struct dm_target *ti) in unbind_control_target() argument
2482 if (pool->ti == ti) in unbind_control_target()
2483 pool->ti = NULL; in unbind_control_target()
2499 static void __pool_destroy(struct pool *pool) in __pool_destroy() argument
2501 __pool_table_remove(pool); in __pool_destroy()
2503 vfree(pool->cell_sort_array); in __pool_destroy()
2504 if (dm_pool_metadata_close(pool->pmd) < 0) in __pool_destroy()
2507 dm_bio_prison_destroy(pool->prison); in __pool_destroy()
2508 dm_kcopyd_client_destroy(pool->copier); in __pool_destroy()
2510 if (pool->wq) in __pool_destroy()
2511 destroy_workqueue(pool->wq); in __pool_destroy()
2513 if (pool->next_mapping) in __pool_destroy()
2514 mempool_free(pool->next_mapping, pool->mapping_pool); in __pool_destroy()
2515 mempool_destroy(pool->mapping_pool); in __pool_destroy()
2516 dm_deferred_set_destroy(pool->shared_read_ds); in __pool_destroy()
2517 dm_deferred_set_destroy(pool->all_io_ds); in __pool_destroy()
2518 kfree(pool); in __pool_destroy()
2523 static struct pool *pool_create(struct mapped_device *pool_md, in pool_create()
2530 struct pool *pool; in pool_create() local
2537 return (struct pool *)pmd; in pool_create()
2540 pool = kmalloc(sizeof(*pool), GFP_KERNEL); in pool_create()
2541 if (!pool) { in pool_create()
2547 pool->pmd = pmd; in pool_create()
2548 pool->sectors_per_block = block_size; in pool_create()
2550 pool->sectors_per_block_shift = -1; in pool_create()
2552 pool->sectors_per_block_shift = __ffs(block_size); in pool_create()
2553 pool->low_water_blocks = 0; in pool_create()
2554 pool_features_init(&pool->pf); in pool_create()
2555 pool->prison = dm_bio_prison_create(); in pool_create()
2556 if (!pool->prison) { in pool_create()
2562 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); in pool_create()
2563 if (IS_ERR(pool->copier)) { in pool_create()
2564 r = PTR_ERR(pool->copier); in pool_create()
2574 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); in pool_create()
2575 if (!pool->wq) { in pool_create()
2581 throttle_init(&pool->throttle); in pool_create()
2582 INIT_WORK(&pool->worker, do_worker); in pool_create()
2583 INIT_DELAYED_WORK(&pool->waker, do_waker); in pool_create()
2584 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); in pool_create()
2585 spin_lock_init(&pool->lock); in pool_create()
2586 bio_list_init(&pool->deferred_flush_bios); in pool_create()
2587 INIT_LIST_HEAD(&pool->prepared_mappings); in pool_create()
2588 INIT_LIST_HEAD(&pool->prepared_discards); in pool_create()
2589 INIT_LIST_HEAD(&pool->active_thins); in pool_create()
2590 pool->low_water_triggered = false; in pool_create()
2591 pool->suspended = true; in pool_create()
2593 pool->shared_read_ds = dm_deferred_set_create(); in pool_create()
2594 if (!pool->shared_read_ds) { in pool_create()
2600 pool->all_io_ds = dm_deferred_set_create(); in pool_create()
2601 if (!pool->all_io_ds) { in pool_create()
2607 pool->next_mapping = NULL; in pool_create()
2608 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE, in pool_create()
2610 if (!pool->mapping_pool) { in pool_create()
2616 pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE); in pool_create()
2617 if (!pool->cell_sort_array) { in pool_create()
2623 pool->ref_count = 1; in pool_create()
2624 pool->last_commit_jiffies = jiffies; in pool_create()
2625 pool->pool_md = pool_md; in pool_create()
2626 pool->md_dev = metadata_dev; in pool_create()
2627 __pool_table_insert(pool); in pool_create()
2629 return pool; in pool_create()
2632 mempool_destroy(pool->mapping_pool); in pool_create()
2634 dm_deferred_set_destroy(pool->all_io_ds); in pool_create()
2636 dm_deferred_set_destroy(pool->shared_read_ds); in pool_create()
2638 destroy_workqueue(pool->wq); in pool_create()
2640 dm_kcopyd_client_destroy(pool->copier); in pool_create()
2642 dm_bio_prison_destroy(pool->prison); in pool_create()
2644 kfree(pool); in pool_create()
2652 static void __pool_inc(struct pool *pool) in __pool_inc() argument
2655 pool->ref_count++; in __pool_inc()
2658 static void __pool_dec(struct pool *pool) in __pool_dec() argument
2661 BUG_ON(!pool->ref_count); in __pool_dec()
2662 if (!--pool->ref_count) in __pool_dec()
2663 __pool_destroy(pool); in __pool_dec()
2666 static struct pool *__pool_find(struct mapped_device *pool_md, in __pool_find()
2671 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev); in __pool_find() local
2673 if (pool) { in __pool_find()
2674 if (pool->pool_md != pool_md) { in __pool_find()
2678 __pool_inc(pool); in __pool_find()
2681 pool = __pool_table_lookup(pool_md); in __pool_find()
2682 if (pool) { in __pool_find()
2683 if (pool->md_dev != metadata_dev) { in __pool_find()
2687 __pool_inc(pool); in __pool_find()
2690 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error); in __pool_find()
2695 return pool; in __pool_find()
2707 unbind_control_target(pt->pool, ti); in pool_dtr()
2708 __pool_dec(pt->pool); in pool_dtr()
2768 struct pool *pool = context; in metadata_low_callback() local
2771 dm_device_name(pool->pool_md)); in metadata_low_callback()
2773 dm_table_event(pool->ti->table); in metadata_low_callback()
2844 struct pool *pool; in pool_ctr() local
2912 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, in pool_ctr()
2914 if (IS_ERR(pool)) { in pool_ctr()
2915 r = PTR_ERR(pool); in pool_ctr()
2925 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) { in pool_ctr()
2931 pt->pool = pool; in pool_ctr()
2957 r = dm_pool_register_metadata_threshold(pt->pool->pmd, in pool_ctr()
2960 pool); in pool_ctr()
2972 __pool_dec(pool); in pool_ctr()
2989 struct pool *pool = pt->pool; in pool_map() local
2995 spin_lock_irqsave(&pool->lock, flags); in pool_map()
2998 spin_unlock_irqrestore(&pool->lock, flags); in pool_map()
3007 struct pool *pool = pt->pool; in maybe_resize_data_dev() local
3013 (void) sector_div(data_size, pool->sectors_per_block); in maybe_resize_data_dev()
3015 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size); in maybe_resize_data_dev()
3018 dm_device_name(pool->pool_md)); in maybe_resize_data_dev()
3024 dm_device_name(pool->pool_md), in maybe_resize_data_dev()
3029 if (dm_pool_metadata_needs_check(pool->pmd)) { in maybe_resize_data_dev()
3031 dm_device_name(pool->pool_md)); in maybe_resize_data_dev()
3037 dm_device_name(pool->pool_md), in maybe_resize_data_dev()
3039 r = dm_pool_resize_data_dev(pool->pmd, data_size); in maybe_resize_data_dev()
3041 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r); in maybe_resize_data_dev()
3055 struct pool *pool = pt->pool; in maybe_resize_metadata_dev() local
3060 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev); in maybe_resize_metadata_dev()
3062 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); in maybe_resize_metadata_dev()
3065 dm_device_name(pool->pool_md)); in maybe_resize_metadata_dev()
3071 dm_device_name(pool->pool_md), in maybe_resize_metadata_dev()
3076 if (dm_pool_metadata_needs_check(pool->pmd)) { in maybe_resize_metadata_dev()
3078 dm_device_name(pool->pool_md)); in maybe_resize_metadata_dev()
3082 warn_if_metadata_device_too_big(pool->md_dev); in maybe_resize_metadata_dev()
3084 dm_device_name(pool->pool_md), in maybe_resize_metadata_dev()
3086 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); in maybe_resize_metadata_dev()
3088 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r); in maybe_resize_metadata_dev()
3114 struct pool *pool = pt->pool; in pool_preresume() local
3119 r = bind_control_target(pool, ti); in pool_preresume()
3132 (void) commit(pool); in pool_preresume()
3137 static void pool_suspend_active_thins(struct pool *pool) in pool_suspend_active_thins() argument
3142 tc = get_first_thin(pool); in pool_suspend_active_thins()
3145 tc = get_next_thin(pool, tc); in pool_suspend_active_thins()
3149 static void pool_resume_active_thins(struct pool *pool) in pool_resume_active_thins() argument
3154 tc = get_first_thin(pool); in pool_resume_active_thins()
3157 tc = get_next_thin(pool, tc); in pool_resume_active_thins()
3164 struct pool *pool = pt->pool; in pool_resume() local
3171 requeue_bios(pool); in pool_resume()
3172 pool_resume_active_thins(pool); in pool_resume()
3174 spin_lock_irqsave(&pool->lock, flags); in pool_resume()
3175 pool->low_water_triggered = false; in pool_resume()
3176 pool->suspended = false; in pool_resume()
3177 spin_unlock_irqrestore(&pool->lock, flags); in pool_resume()
3179 do_waker(&pool->waker.work); in pool_resume()
3185 struct pool *pool = pt->pool; in pool_presuspend() local
3188 spin_lock_irqsave(&pool->lock, flags); in pool_presuspend()
3189 pool->suspended = true; in pool_presuspend()
3190 spin_unlock_irqrestore(&pool->lock, flags); in pool_presuspend()
3192 pool_suspend_active_thins(pool); in pool_presuspend()
3198 struct pool *pool = pt->pool; in pool_presuspend_undo() local
3201 pool_resume_active_thins(pool); in pool_presuspend_undo()
3203 spin_lock_irqsave(&pool->lock, flags); in pool_presuspend_undo()
3204 pool->suspended = false; in pool_presuspend_undo()
3205 spin_unlock_irqrestore(&pool->lock, flags); in pool_presuspend_undo()
3211 struct pool *pool = pt->pool; in pool_postsuspend() local
3213 cancel_delayed_work_sync(&pool->waker); in pool_postsuspend()
3214 cancel_delayed_work_sync(&pool->no_space_timeout); in pool_postsuspend()
3215 flush_workqueue(pool->wq); in pool_postsuspend()
3216 (void) commit(pool); in pool_postsuspend()
3242 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool) in process_create_thin_mesg() argument
3255 r = dm_pool_create_thin(pool->pmd, dev_id); in process_create_thin_mesg()
3265 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool) in process_create_snap_mesg() argument
3283 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id); in process_create_snap_mesg()
3293 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool) in process_delete_mesg() argument
3306 r = dm_pool_delete_thin_device(pool->pmd, dev_id); in process_delete_mesg()
3313 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool) in process_set_transaction_id_mesg() argument
3332 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id); in process_set_transaction_id_mesg()
3342 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) in process_reserve_metadata_snap_mesg() argument
3350 (void) commit(pool); in process_reserve_metadata_snap_mesg()
3352 r = dm_pool_reserve_metadata_snap(pool->pmd); in process_reserve_metadata_snap_mesg()
3359 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) in process_release_metadata_snap_mesg() argument
3367 r = dm_pool_release_metadata_snap(pool->pmd); in process_release_metadata_snap_mesg()
3387 struct pool *pool = pt->pool; in pool_message() local
3389 if (get_pool_mode(pool) >= PM_READ_ONLY) { in pool_message()
3391 dm_device_name(pool->pool_md)); in pool_message()
3396 r = process_create_thin_mesg(argc, argv, pool); in pool_message()
3399 r = process_create_snap_mesg(argc, argv, pool); in pool_message()
3402 r = process_delete_mesg(argc, argv, pool); in pool_message()
3405 r = process_set_transaction_id_mesg(argc, argv, pool); in pool_message()
3408 r = process_reserve_metadata_snap_mesg(argc, argv, pool); in pool_message()
3411 r = process_release_metadata_snap_mesg(argc, argv, pool); in pool_message()
3417 (void) commit(pool); in pool_message()
3465 struct pool *pool = pt->pool; in pool_status() local
3469 if (get_pool_mode(pool) == PM_FAIL) { in pool_status()
3476 (void) commit(pool); in pool_status()
3478 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); in pool_status()
3481 dm_device_name(pool->pool_md), r); in pool_status()
3485 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata); in pool_status()
3488 dm_device_name(pool->pool_md), r); in pool_status()
3492 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata); in pool_status()
3495 dm_device_name(pool->pool_md), r); in pool_status()
3499 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data); in pool_status()
3502 dm_device_name(pool->pool_md), r); in pool_status()
3506 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data); in pool_status()
3509 dm_device_name(pool->pool_md), r); in pool_status()
3513 r = dm_pool_get_metadata_snap(pool->pmd, &held_root); in pool_status()
3516 dm_device_name(pool->pool_md), r); in pool_status()
3532 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) in pool_status()
3534 else if (pool->pf.mode == PM_READ_ONLY) in pool_status()
3539 if (!pool->pf.discard_enabled) in pool_status()
3541 else if (pool->pf.discard_passdown) in pool_status()
3546 if (pool->pf.error_if_no_space) in pool_status()
3557 (unsigned long)pool->sectors_per_block, in pool_status()
3592 struct pool *pool = pt->pool; in set_discard_limits() local
3595 limits->max_discard_sectors = pool->sectors_per_block; in set_discard_limits()
3603 pool->sectors_per_block << SECTOR_SHIFT); in set_discard_limits()
3605 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; in set_discard_limits()
3611 struct pool *pool = pt->pool; in pool_io_hints() local
3623 if (limits->max_sectors < pool->sectors_per_block) { in pool_io_hints()
3624 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) { in pool_io_hints()
3635 if (io_opt_sectors < pool->sectors_per_block || in pool_io_hints()
3636 !is_factor(io_opt_sectors, pool->sectors_per_block)) { in pool_io_hints()
3637 if (is_factor(pool->sectors_per_block, limits->max_sectors)) in pool_io_hints()
3640 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints()
3641 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints()
3705 spin_lock_irqsave(&tc->pool->lock, flags); in thin_dtr()
3707 spin_unlock_irqrestore(&tc->pool->lock, flags); in thin_dtr()
3715 __pool_dec(tc->pool); in thin_dtr()
3795 tc->pool = __pool_table_lookup(pool_md); in thin_ctr()
3796 if (!tc->pool) { in thin_ctr()
3801 __pool_inc(tc->pool); in thin_ctr()
3803 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_ctr()
3809 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); in thin_ctr()
3815 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); in thin_ctr()
3825 if (tc->pool->pf.discard_enabled) { in thin_ctr()
3834 spin_lock_irqsave(&tc->pool->lock, flags); in thin_ctr()
3835 if (tc->pool->suspended) { in thin_ctr()
3836 spin_unlock_irqrestore(&tc->pool->lock, flags); in thin_ctr()
3844 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); in thin_ctr()
3845 spin_unlock_irqrestore(&tc->pool->lock, flags); in thin_ctr()
3861 __pool_dec(tc->pool); in thin_ctr()
3890 struct pool *pool = h->tc->pool; in thin_endio() local
3896 spin_lock_irqsave(&pool->lock, flags); in thin_endio()
3901 spin_unlock_irqrestore(&pool->lock, flags); in thin_endio()
3908 spin_lock_irqsave(&pool->lock, flags); in thin_endio()
3910 list_add_tail(&m->list, &pool->prepared_discards); in thin_endio()
3911 spin_unlock_irqrestore(&pool->lock, flags); in thin_endio()
3912 wake_worker(pool); in thin_endio()
3960 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_status()
3982 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); in thin_status()
3985 tc->pool->sectors_per_block) - 1); in thin_status()
4026 struct pool *pool = tc->pool; in thin_iterate_devices() local
4032 if (!pool->ti) in thin_iterate_devices()
4035 blocks = pool->ti->len; in thin_iterate_devices()
4036 (void) sector_div(blocks, pool->sectors_per_block); in thin_iterate_devices()
4038 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); in thin_iterate_devices()