Lines Matching refs:mapped_device
69 struct mapped_device *md;
83 struct mapped_device *md;
139 struct mapped_device { struct
240 bool dm_use_blk_mq(struct mapped_device *md) in dm_use_blk_mq() argument
437 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md()
444 struct mapped_device *md; in dm_blk_open()
468 struct mapped_device *md; in dm_blk_close()
485 int dm_open_count(struct mapped_device *md) in dm_open_count()
493 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) in dm_lock_for_deletion()
513 int dm_cancel_deferred_remove(struct mapped_device *md) in dm_cancel_deferred_remove()
534 sector_t dm_get_size(struct mapped_device *md) in dm_get_size()
539 struct request_queue *dm_get_md_queue(struct mapped_device *md) in dm_get_md_queue()
544 struct dm_stats *dm_get_stats(struct mapped_device *md) in dm_get_stats()
551 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_getgeo()
559 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_ioctl()
597 static struct dm_io *alloc_io(struct mapped_device *md) in alloc_io()
602 static void free_io(struct mapped_device *md, struct dm_io *io) in free_io()
607 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) in free_tio()
612 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, in alloc_rq_tio()
623 static struct request *alloc_clone_request(struct mapped_device *md, in alloc_clone_request()
629 static void free_clone_request(struct mapped_device *md, struct request *rq) in free_clone_request()
634 static int md_in_flight(struct mapped_device *md) in md_in_flight()
642 struct mapped_device *md = io->md; in start_io_acct()
662 struct mapped_device *md = io->md; in end_io_acct()
690 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io()
705 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barri… in dm_get_live_table()
712 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) in dm_put_live_table()
717 void dm_sync_table(struct mapped_device *md) in dm_sync_table()
727 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) in dm_get_live_table_fast()
733 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) in dm_put_live_table_fast()
742 struct mapped_device *md) in open_table_device()
768 static void close_table_device(struct table_device *td, struct mapped_device *md) in close_table_device()
789 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, in dm_get_table_device()
825 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) in dm_put_table_device()
855 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_get_geometry()
865 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_set_geometry()
888 static int __noflush_suspending(struct mapped_device *md) in __noflush_suspending()
902 struct mapped_device *md = io->md; in dec_pending()
949 static void disable_write_same(struct mapped_device *md) in disable_write_same()
962 struct mapped_device *md = tio->io->md; in clone_endio()
1054 static void rq_completed(struct mapped_device *md, int rw, bool run_queue) in rq_completed()
1080 struct mapped_device *md = tio->md; in free_rq_clone()
1109 struct mapped_device *md = tio->md; in dm_end_request()
1163 static void dm_requeue_unmapped_original_request(struct mapped_device *md, in dm_requeue_unmapped_original_request()
1434 struct mapped_device *md; in __map_bio()
1467 struct mapped_device *md;
1674 static void __split_and_process_bio(struct mapped_device *md, in __split_and_process_bio()
1720 struct mapped_device *md = q->queuedata; in dm_merge_bvec()
1777 struct mapped_device *md = q->queuedata; in dm_make_request()
1801 int dm_request_based(struct mapped_device *md) in dm_request_based()
1855 static struct request *clone_rq(struct request *rq, struct mapped_device *md, in clone_rq()
1886 struct mapped_device *md) in init_tio()
1899 struct mapped_device *md, gfp_t gfp_mask) in prep_tio()
1929 struct mapped_device *md = q->queuedata; in dm_prep_fn()
1954 struct mapped_device *md) in map_request()
2011 struct mapped_device *md = tio->md; in map_tio_request()
2017 static void dm_start_request(struct mapped_device *md, struct request *orig) in dm_start_request()
2043 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) in dm_attr_rq_based_seq_io_merge_deadline_show()
2048 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, in dm_attr_rq_based_seq_io_merge_deadline_store()
2067 static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md) in dm_request_peeked_before_merge_deadline()
2086 struct mapped_device *md = q->queuedata; in dm_request_fn()
2150 struct mapped_device *md = congested_data; in dm_any_congested()
2225 static void dm_init_md_queue(struct mapped_device *md) in dm_init_md_queue()
2239 static void dm_init_old_md_queue(struct mapped_device *md) in dm_init_old_md_queue()
2257 static struct mapped_device *alloc_dev(int minor) in alloc_dev()
2260 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); in alloc_dev()
2365 static void unlock_fs(struct mapped_device *md);
2367 static void free_dev(struct mapped_device *md) in free_dev()
2404 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) in __bind_mempools()
2451 struct mapped_device *md = (struct mapped_device *) context; in event_callback()
2466 static void __set_size(struct mapped_device *md, sector_t size) in __set_size()
2482 struct mapped_device *dev_md; in dm_queue_merge_is_compulsory()
2529 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, in __bind()
2581 static struct dm_table *__unbind(struct mapped_device *md) in __unbind()
2598 int dm_create(int minor, struct mapped_device **result) in dm_create()
2600 struct mapped_device *md; in dm_create()
2616 void dm_lock_md_type(struct mapped_device *md) in dm_lock_md_type()
2621 void dm_unlock_md_type(struct mapped_device *md) in dm_unlock_md_type()
2626 void dm_set_md_type(struct mapped_device *md, unsigned type) in dm_set_md_type()
2632 unsigned dm_get_md_type(struct mapped_device *md) in dm_get_md_type()
2638 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) in dm_get_immutable_target_type()
2647 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) in dm_get_queue_limits()
2654 static void init_rq_based_worker_thread(struct mapped_device *md) in init_rq_based_worker_thread()
2665 static int dm_init_request_based_queue(struct mapped_device *md) in dm_init_request_based_queue()
2693 struct mapped_device *md = data; in dm_mq_init_request()
2710 struct mapped_device *md = tio->md; in dm_mq_queue_rq()
2773 static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) in dm_init_request_based_blk_mq_queue()
2817 static unsigned filter_md_type(unsigned type, struct mapped_device *md) in filter_md_type()
2828 int dm_setup_md_queue(struct mapped_device *md) in dm_setup_md_queue()
2858 struct mapped_device *dm_get_md(dev_t dev) in dm_get_md()
2860 struct mapped_device *md; in dm_get_md()
2887 void *dm_get_mdptr(struct mapped_device *md) in dm_get_mdptr()
2892 void dm_set_mdptr(struct mapped_device *md, void *ptr) in dm_set_mdptr()
2897 void dm_get(struct mapped_device *md) in dm_get()
2903 int dm_hold(struct mapped_device *md) in dm_hold()
2916 const char *dm_device_name(struct mapped_device *md) in dm_device_name()
2922 static void __dm_destroy(struct mapped_device *md, bool wait) in __dm_destroy()
2969 void dm_destroy(struct mapped_device *md) in dm_destroy()
2974 void dm_destroy_immediate(struct mapped_device *md) in dm_destroy_immediate()
2979 void dm_put(struct mapped_device *md) in dm_put()
2985 static int dm_wait_for_completion(struct mapped_device *md, int interruptible) in dm_wait_for_completion()
3018 struct mapped_device *md = container_of(work, struct mapped_device, in dm_wq_work()
3043 static void dm_queue_flush(struct mapped_device *md) in dm_queue_flush()
3053 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) in dm_swap_table()
3097 static int lock_fs(struct mapped_device *md) in lock_fs()
3115 static void unlock_fs(struct mapped_device *md) in unlock_fs()
3132 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, in __dm_suspend()
3237 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) in dm_suspend()
3274 static int __dm_resume(struct mapped_device *md, struct dm_table *map) in __dm_resume()
3297 int dm_resume(struct mapped_device *md) in dm_resume()
3340 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) in __dm_internal_suspend()
3367 static void __dm_internal_resume(struct mapped_device *md) in __dm_internal_resume()
3389 void dm_internal_suspend_noflush(struct mapped_device *md) in dm_internal_suspend_noflush()
3397 void dm_internal_resume(struct mapped_device *md) in dm_internal_resume()
3410 void dm_internal_suspend_fast(struct mapped_device *md) in dm_internal_suspend_fast()
3423 void dm_internal_resume_fast(struct mapped_device *md) in dm_internal_resume_fast()
3438 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, in dm_kobject_uevent()
3454 uint32_t dm_next_uevent_seq(struct mapped_device *md) in dm_next_uevent_seq()
3459 uint32_t dm_get_event_nr(struct mapped_device *md) in dm_get_event_nr()
3464 int dm_wait_event(struct mapped_device *md, int event_nr) in dm_wait_event()
3470 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) in dm_uevent_add()
3483 struct gendisk *dm_disk(struct mapped_device *md) in dm_disk()
3489 struct kobject *dm_kobject(struct mapped_device *md) in dm_kobject()
3494 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) in dm_get_from_kobject()
3496 struct mapped_device *md; in dm_get_from_kobject()
3498 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); in dm_get_from_kobject()
3508 int dm_suspended_md(struct mapped_device *md) in dm_suspended_md()
3513 int dm_suspended_internally_md(struct mapped_device *md) in dm_suspended_internally_md()
3518 int dm_test_deferred_remove_flag(struct mapped_device *md) in dm_test_deferred_remove_flag()
3535 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, in dm_alloc_md_mempools()