Lines Matching refs:t
94 static inline sector_t *get_node(struct dm_table *t, in get_node() argument
97 return t->index[l] + (n * KEYS_PER_NODE); in get_node()
104 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) in high() argument
106 for (; l < t->depth - 1; l++) in high()
109 if (n >= t->counts[l]) in high()
112 return get_node(t, l, n)[KEYS_PER_NODE - 1]; in high()
119 static int setup_btree_index(unsigned int l, struct dm_table *t) in setup_btree_index() argument
124 for (n = 0U; n < t->counts[l]; n++) { in setup_btree_index()
125 node = get_node(t, l, n); in setup_btree_index()
128 node[k] = high(t, l + 1, get_child(n, k)); in setup_btree_index()
156 static int alloc_targets(struct dm_table *t, unsigned int num) in alloc_targets() argument
174 vfree(t->highs); in alloc_targets()
176 t->num_allocated = num; in alloc_targets()
177 t->highs = n_highs; in alloc_targets()
178 t->targets = n_targets; in alloc_targets()
186 struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); in dm_table_create() local
188 if (!t) in dm_table_create()
191 INIT_LIST_HEAD(&t->devices); in dm_table_create()
192 INIT_LIST_HEAD(&t->target_callbacks); in dm_table_create()
200 kfree(t); in dm_table_create()
204 if (alloc_targets(t, num_targets)) { in dm_table_create()
205 kfree(t); in dm_table_create()
209 t->mode = mode; in dm_table_create()
210 t->md = md; in dm_table_create()
211 *result = t; in dm_table_create()
229 void dm_table_destroy(struct dm_table *t) in dm_table_destroy() argument
233 if (!t) in dm_table_destroy()
237 if (t->depth >= 2) in dm_table_destroy()
238 vfree(t->index[t->depth - 2]); in dm_table_destroy()
241 for (i = 0; i < t->num_targets; i++) { in dm_table_destroy()
242 struct dm_target *tgt = t->targets + i; in dm_table_destroy()
250 vfree(t->highs); in dm_table_destroy()
253 free_devices(&t->devices, t->md); in dm_table_destroy()
255 dm_free_md_mempools(t->mempools); in dm_table_destroy()
257 kfree(t); in dm_table_destroy()
377 struct dm_table *t = ti->table; in dm_get_device() local
380 BUG_ON(!t); in dm_get_device()
393 dd = find_device(&t->devices, dev); in dm_get_device()
399 if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) { in dm_get_device()
405 list_add(&dd->list, &t->devices); in dm_get_device()
408 r = upgrade_mode(dd, mode, t->md); in dm_get_device()
666 int dm_table_add_target(struct dm_table *t, const char *type, in dm_table_add_target() argument
673 if (t->singleton) { in dm_table_add_target()
675 dm_device_name(t->md), t->targets->type->name); in dm_table_add_target()
679 BUG_ON(t->num_targets >= t->num_allocated); in dm_table_add_target()
681 tgt = t->targets + t->num_targets; in dm_table_add_target()
685 DMERR("%s: zero-length target", dm_device_name(t->md)); in dm_table_add_target()
691 DMERR("%s: %s: unknown target type", dm_device_name(t->md), in dm_table_add_target()
697 if (t->num_targets) { in dm_table_add_target()
699 dm_device_name(t->md), type); in dm_table_add_target()
702 t->singleton = 1; in dm_table_add_target()
705 if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { in dm_table_add_target()
707 dm_device_name(t->md), type); in dm_table_add_target()
711 if (t->immutable_target_type) { in dm_table_add_target()
712 if (t->immutable_target_type != tgt->type) { in dm_table_add_target()
714 dm_device_name(t->md), t->immutable_target_type->name); in dm_table_add_target()
718 if (t->num_targets) { in dm_table_add_target()
720 dm_device_name(t->md), tgt->type->name); in dm_table_add_target()
723 t->immutable_target_type = tgt->type; in dm_table_add_target()
726 tgt->table = t; in dm_table_add_target()
734 if (!adjoin(t, tgt)) { in dm_table_add_target()
751 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; in dm_table_add_target()
755 dm_device_name(t->md), type); in dm_table_add_target()
760 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); in dm_table_add_target()
829 static int dm_table_set_type(struct dm_table *t) in dm_table_set_type() argument
837 unsigned live_md_type = dm_get_md_type(t->md); in dm_table_set_type()
839 for (i = 0; i < t->num_targets; i++) { in dm_table_set_type()
840 tgt = t->targets + i; in dm_table_set_type()
869 t->type = DM_TYPE_BIO_BASED; in dm_table_set_type()
881 if (t->num_targets > 1) { in dm_table_set_type()
887 devices = dm_table_get_devices(t); in dm_table_set_type()
909 t->type = DM_TYPE_MQ_REQUEST_BASED; in dm_table_set_type()
913 t->type = live_md_type; in dm_table_set_type()
916 t->type = DM_TYPE_REQUEST_BASED; in dm_table_set_type()
921 unsigned dm_table_get_type(struct dm_table *t) in dm_table_get_type() argument
923 return t->type; in dm_table_get_type()
926 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) in dm_table_get_immutable_target_type() argument
928 return t->immutable_target_type; in dm_table_get_immutable_target_type()
931 bool dm_table_request_based(struct dm_table *t) in dm_table_request_based() argument
933 return __table_type_request_based(dm_table_get_type(t)); in dm_table_request_based()
936 bool dm_table_mq_request_based(struct dm_table *t) in dm_table_mq_request_based() argument
938 return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED; in dm_table_mq_request_based()
941 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) in dm_table_alloc_md_mempools() argument
943 unsigned type = dm_table_get_type(t); in dm_table_alloc_md_mempools()
954 for (i = 0; i < t->num_targets; i++) { in dm_table_alloc_md_mempools()
955 tgt = t->targets + i; in dm_table_alloc_md_mempools()
959 t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_bio_data_size); in dm_table_alloc_md_mempools()
960 if (!t->mempools) in dm_table_alloc_md_mempools()
966 void dm_table_free_md_mempools(struct dm_table *t) in dm_table_free_md_mempools() argument
968 dm_free_md_mempools(t->mempools); in dm_table_free_md_mempools()
969 t->mempools = NULL; in dm_table_free_md_mempools()
972 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) in dm_table_get_md_mempools() argument
974 return t->mempools; in dm_table_get_md_mempools()
977 static int setup_indexes(struct dm_table *t) in setup_indexes() argument
984 for (i = t->depth - 2; i >= 0; i--) { in setup_indexes()
985 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); in setup_indexes()
986 total += t->counts[i]; in setup_indexes()
994 for (i = t->depth - 2; i >= 0; i--) { in setup_indexes()
995 t->index[i] = indexes; in setup_indexes()
996 indexes += (KEYS_PER_NODE * t->counts[i]); in setup_indexes()
997 setup_btree_index(i, t); in setup_indexes()
1006 static int dm_table_build_index(struct dm_table *t) in dm_table_build_index() argument
1012 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); in dm_table_build_index()
1013 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); in dm_table_build_index()
1016 t->counts[t->depth - 1] = leaf_nodes; in dm_table_build_index()
1017 t->index[t->depth - 1] = t->highs; in dm_table_build_index()
1019 if (t->depth >= 2) in dm_table_build_index()
1020 r = setup_indexes(t); in dm_table_build_index()
1032 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t, in dm_table_get_integrity_disk() argument
1035 struct list_head *devices = dm_table_get_devices(t); in dm_table_get_integrity_disk()
1056 dm_device_name(t->md), in dm_table_get_integrity_disk()
1072 static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md) in dm_table_prealloc_integrity() argument
1076 template_disk = dm_table_get_integrity_disk(t, false); in dm_table_prealloc_integrity()
1081 t->integrity_supported = 1; in dm_table_prealloc_integrity()
1093 dm_device_name(t->md), in dm_table_prealloc_integrity()
1099 t->integrity_supported = 1; in dm_table_prealloc_integrity()
1107 int dm_table_complete(struct dm_table *t) in dm_table_complete() argument
1111 r = dm_table_set_type(t); in dm_table_complete()
1117 r = dm_table_build_index(t); in dm_table_complete()
1123 r = dm_table_prealloc_integrity(t, t->md); in dm_table_complete()
1129 r = dm_table_alloc_md_mempools(t, t->md); in dm_table_complete()
1137 void dm_table_event_callback(struct dm_table *t, in dm_table_event_callback() argument
1141 t->event_fn = fn; in dm_table_event_callback()
1142 t->event_context = context; in dm_table_event_callback()
1146 void dm_table_event(struct dm_table *t) in dm_table_event() argument
1155 if (t->event_fn) in dm_table_event()
1156 t->event_fn(t->event_context); in dm_table_event()
1161 sector_t dm_table_get_size(struct dm_table *t) in dm_table_get_size() argument
1163 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; in dm_table_get_size()
1167 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) in dm_table_get_target() argument
1169 if (index >= t->num_targets) in dm_table_get_target()
1172 return t->targets + index; in dm_table_get_target()
1181 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) in dm_table_find_target() argument
1186 for (l = 0; l < t->depth; l++) { in dm_table_find_target()
1188 node = get_node(t, l, n); in dm_table_find_target()
1195 return &t->targets[(KEYS_PER_NODE * n) + k]; in dm_table_find_target()
1295 static void dm_table_set_integrity(struct dm_table *t) in dm_table_set_integrity() argument
1299 if (!blk_get_integrity(dm_disk(t->md))) in dm_table_set_integrity()
1302 template_disk = dm_table_get_integrity_disk(t, true); in dm_table_set_integrity()
1304 blk_integrity_register(dm_disk(t->md), in dm_table_set_integrity()
1306 else if (blk_integrity_is_initialized(dm_disk(t->md))) in dm_table_set_integrity()
1308 dm_device_name(t->md)); in dm_table_set_integrity()
1311 dm_device_name(t->md)); in dm_table_set_integrity()
1323 static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) in dm_table_supports_flush() argument
1334 while (i < dm_table_get_num_targets(t)) { in dm_table_supports_flush()
1335 ti = dm_table_get_target(t, i++); in dm_table_supports_flush()
1351 static bool dm_table_discard_zeroes_data(struct dm_table *t) in dm_table_discard_zeroes_data() argument
1357 while (i < dm_table_get_num_targets(t)) { in dm_table_discard_zeroes_data()
1358 ti = dm_table_get_target(t, i++); in dm_table_discard_zeroes_data()
1399 static bool dm_table_all_devices_attribute(struct dm_table *t, in dm_table_all_devices_attribute() argument
1405 while (i < dm_table_get_num_targets(t)) { in dm_table_all_devices_attribute()
1406 ti = dm_table_get_target(t, i++); in dm_table_all_devices_attribute()
1424 static bool dm_table_supports_write_same(struct dm_table *t) in dm_table_supports_write_same() argument
1429 while (i < dm_table_get_num_targets(t)) { in dm_table_supports_write_same()
1430 ti = dm_table_get_target(t, i++); in dm_table_supports_write_same()
1451 static bool dm_table_supports_discards(struct dm_table *t) in dm_table_supports_discards() argument
1463 while (i < dm_table_get_num_targets(t)) { in dm_table_supports_discards()
1464 ti = dm_table_get_target(t, i++); in dm_table_supports_discards()
1480 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, in dm_table_set_restrictions() argument
1490 if (!dm_table_supports_discards(t)) in dm_table_set_restrictions()
1495 if (dm_table_supports_flush(t, REQ_FLUSH)) { in dm_table_set_restrictions()
1497 if (dm_table_supports_flush(t, REQ_FUA)) in dm_table_set_restrictions()
1502 if (!dm_table_discard_zeroes_data(t)) in dm_table_set_restrictions()
1506 if (dm_table_all_devices_attribute(t, device_is_nonrot)) in dm_table_set_restrictions()
1511 if (!dm_table_supports_write_same(t)) in dm_table_set_restrictions()
1514 if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) in dm_table_set_restrictions()
1519 if (dm_table_all_devices_attribute(t, queue_supports_sg_gaps)) in dm_table_set_restrictions()
1524 dm_table_set_integrity(t); in dm_table_set_restrictions()
1532 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) in dm_table_set_restrictions()
1545 if (dm_table_request_based(t)) in dm_table_set_restrictions()
1549 unsigned int dm_table_get_num_targets(struct dm_table *t) in dm_table_get_num_targets() argument
1551 return t->num_targets; in dm_table_get_num_targets()
1554 struct list_head *dm_table_get_devices(struct dm_table *t) in dm_table_get_devices() argument
1556 return &t->devices; in dm_table_get_devices()
1559 fmode_t dm_table_get_mode(struct dm_table *t) in dm_table_get_mode() argument
1561 return t->mode; in dm_table_get_mode()
1571 static void suspend_targets(struct dm_table *t, enum suspend_mode mode) in suspend_targets() argument
1573 int i = t->num_targets; in suspend_targets()
1574 struct dm_target *ti = t->targets; in suspend_targets()
1595 void dm_table_presuspend_targets(struct dm_table *t) in dm_table_presuspend_targets() argument
1597 if (!t) in dm_table_presuspend_targets()
1600 suspend_targets(t, PRESUSPEND); in dm_table_presuspend_targets()
1603 void dm_table_presuspend_undo_targets(struct dm_table *t) in dm_table_presuspend_undo_targets() argument
1605 if (!t) in dm_table_presuspend_undo_targets()
1608 suspend_targets(t, PRESUSPEND_UNDO); in dm_table_presuspend_undo_targets()
1611 void dm_table_postsuspend_targets(struct dm_table *t) in dm_table_postsuspend_targets() argument
1613 if (!t) in dm_table_postsuspend_targets()
1616 suspend_targets(t, POSTSUSPEND); in dm_table_postsuspend_targets()
1619 int dm_table_resume_targets(struct dm_table *t) in dm_table_resume_targets() argument
1623 for (i = 0; i < t->num_targets; i++) { in dm_table_resume_targets()
1624 struct dm_target *ti = t->targets + i; in dm_table_resume_targets()
1632 dm_device_name(t->md), ti->type->name, r); in dm_table_resume_targets()
1637 for (i = 0; i < t->num_targets; i++) { in dm_table_resume_targets()
1638 struct dm_target *ti = t->targets + i; in dm_table_resume_targets()
1647 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb) in dm_table_add_target_callbacks() argument
1649 list_add(&cb->list, &t->target_callbacks); in dm_table_add_target_callbacks()
1653 int dm_table_any_congested(struct dm_table *t, int bdi_bits) in dm_table_any_congested() argument
1656 struct list_head *devices = dm_table_get_devices(t); in dm_table_any_congested()
1668 dm_device_name(t->md), in dm_table_any_congested()
1672 list_for_each_entry(cb, &t->target_callbacks, list) in dm_table_any_congested()
1679 struct mapped_device *dm_table_get_md(struct dm_table *t) in dm_table_get_md() argument
1681 return t->md; in dm_table_get_md()
1685 void dm_table_run_md_queue_async(struct dm_table *t) in dm_table_run_md_queue_async() argument
1691 if (!dm_table_request_based(t)) in dm_table_run_md_queue_async()
1694 md = dm_table_get_md(t); in dm_table_run_md_queue_async()