Lines Matching defs:md
70 struct mapped_device *md; member
84 struct mapped_device *md; member
243 bool dm_use_blk_mq(struct mapped_device *md) in dm_use_blk_mq()
440 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md()
447 struct mapped_device *md; in dm_blk_open() local
471 struct mapped_device *md; in dm_blk_close() local
488 int dm_open_count(struct mapped_device *md) in dm_open_count()
496 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) in dm_lock_for_deletion()
516 int dm_cancel_deferred_remove(struct mapped_device *md) in dm_cancel_deferred_remove()
537 sector_t dm_get_size(struct mapped_device *md) in dm_get_size()
542 struct request_queue *dm_get_md_queue(struct mapped_device *md) in dm_get_md_queue()
547 struct dm_stats *dm_get_stats(struct mapped_device *md) in dm_get_stats()
554 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_getgeo() local
559 static int dm_get_live_table_for_ioctl(struct mapped_device *md, in dm_get_live_table_for_ioctl()
604 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_ioctl() local
630 static struct dm_io *alloc_io(struct mapped_device *md) in alloc_io()
635 static void free_io(struct mapped_device *md, struct dm_io *io) in free_io()
640 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) in free_tio()
645 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, in alloc_rq_tio()
656 static struct request *alloc_clone_request(struct mapped_device *md, in alloc_clone_request()
662 static void free_clone_request(struct mapped_device *md, struct request *rq) in free_clone_request()
667 static int md_in_flight(struct mapped_device *md) in md_in_flight()
675 struct mapped_device *md = io->md; in start_io_acct() local
695 struct mapped_device *md = io->md; in end_io_acct() local
723 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io()
738 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barri… in dm_get_live_table()
745 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) in dm_put_live_table()
750 void dm_sync_table(struct mapped_device *md) in dm_sync_table()
760 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) in dm_get_live_table_fast()
766 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) in dm_put_live_table_fast()
775 struct mapped_device *md) in open_table_device()
801 static void close_table_device(struct table_device *td, struct mapped_device *md) in close_table_device()
822 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, in dm_get_table_device()
858 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) in dm_put_table_device()
888 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_get_geometry()
898 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_set_geometry()
921 static int __noflush_suspending(struct mapped_device *md) in __noflush_suspending()
935 struct mapped_device *md = io->md; in dec_pending() local
983 static void disable_write_same(struct mapped_device *md) in disable_write_same()
997 struct mapped_device *md = tio->io->md; in clone_endio() local
1082 static void rq_end_stats(struct mapped_device *md, struct request *orig) in rq_end_stats()
1098 static void rq_completed(struct mapped_device *md, int rw, bool run_queue) in rq_completed()
1124 struct mapped_device *md = tio->md; in free_rq_clone() local
1153 struct mapped_device *md = tio->md; in dm_end_request() local
1208 static void dm_requeue_original_request(struct mapped_device *md, in dm_requeue_original_request()
1474 struct mapped_device *md; in __map_bio() local
1507 struct mapped_device *md; member
1714 static void __split_and_process_bio(struct mapped_device *md, in __split_and_process_bio()
1763 struct mapped_device *md = q->queuedata; in dm_make_request() local
1787 int dm_request_based(struct mapped_device *md) in dm_request_based()
1841 static struct request *clone_rq(struct request *rq, struct mapped_device *md, in clone_rq()
1872 struct mapped_device *md) in init_tio()
1885 struct mapped_device *md, gfp_t gfp_mask) in prep_tio()
1915 struct mapped_device *md = q->queuedata; in dm_prep_fn() local
1940 struct mapped_device *md) in map_request()
1997 struct mapped_device *md = tio->md; in map_tio_request() local
2003 static void dm_start_request(struct mapped_device *md, struct request *orig) in dm_start_request()
2037 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) in dm_attr_rq_based_seq_io_merge_deadline_show()
2042 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, in dm_attr_rq_based_seq_io_merge_deadline_store()
2061 static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md) in dm_request_peeked_before_merge_deadline()
2080 struct mapped_device *md = q->queuedata; in dm_request_fn() local
2144 struct mapped_device *md = congested_data; in dm_any_congested() local
2219 static void dm_init_md_queue(struct mapped_device *md) in dm_init_md_queue()
2240 static void dm_init_old_md_queue(struct mapped_device *md) in dm_init_old_md_queue()
2252 static void cleanup_mapped_device(struct mapped_device *md) in cleanup_mapped_device()
2288 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); in alloc_dev() local
2388 static void free_dev(struct mapped_device *md) in free_dev()
2406 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) in __bind_mempools()
2453 struct mapped_device *md = (struct mapped_device *) context; in event_callback() local
2468 static void __set_size(struct mapped_device *md, sector_t size) in __set_size()
2478 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, in __bind()
2523 static struct dm_table *__unbind(struct mapped_device *md) in __unbind()
2542 struct mapped_device *md; in dm_create() local
2558 void dm_lock_md_type(struct mapped_device *md) in dm_lock_md_type()
2563 void dm_unlock_md_type(struct mapped_device *md) in dm_unlock_md_type()
2568 void dm_set_md_type(struct mapped_device *md, unsigned type) in dm_set_md_type()
2574 unsigned dm_get_md_type(struct mapped_device *md) in dm_get_md_type()
2580 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) in dm_get_immutable_target_type()
2589 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) in dm_get_queue_limits()
2596 static void init_rq_based_worker_thread(struct mapped_device *md) in init_rq_based_worker_thread()
2607 static int dm_init_request_based_queue(struct mapped_device *md) in dm_init_request_based_queue()
2635 struct mapped_device *md = data; in dm_mq_init_request() local
2652 struct mapped_device *md = tio->md; in dm_mq_queue_rq() local
2716 static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) in dm_init_request_based_blk_mq_queue()
2760 static unsigned filter_md_type(unsigned type, struct mapped_device *md) in filter_md_type()
2771 int dm_setup_md_queue(struct mapped_device *md) in dm_setup_md_queue()
2808 struct mapped_device *md; in dm_get_md() local
2835 void *dm_get_mdptr(struct mapped_device *md) in dm_get_mdptr()
2840 void dm_set_mdptr(struct mapped_device *md, void *ptr) in dm_set_mdptr()
2845 void dm_get(struct mapped_device *md) in dm_get()
2851 int dm_hold(struct mapped_device *md) in dm_hold()
2864 const char *dm_device_name(struct mapped_device *md) in dm_device_name()
2870 static void __dm_destroy(struct mapped_device *md, bool wait) in __dm_destroy()
2917 void dm_destroy(struct mapped_device *md) in dm_destroy()
2922 void dm_destroy_immediate(struct mapped_device *md) in dm_destroy_immediate()
2927 void dm_put(struct mapped_device *md) in dm_put()
2933 static int dm_wait_for_completion(struct mapped_device *md, int interruptible) in dm_wait_for_completion()
2966 struct mapped_device *md = container_of(work, struct mapped_device, in dm_wq_work() local
2991 static void dm_queue_flush(struct mapped_device *md) in dm_queue_flush()
3001 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) in dm_swap_table()
3045 static int lock_fs(struct mapped_device *md) in lock_fs()
3063 static void unlock_fs(struct mapped_device *md) in unlock_fs()
3080 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, in __dm_suspend()
3185 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) in dm_suspend()
3222 static int __dm_resume(struct mapped_device *md, struct dm_table *map) in __dm_resume()
3245 int dm_resume(struct mapped_device *md) in dm_resume()
3288 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) in __dm_internal_suspend()
3315 static void __dm_internal_resume(struct mapped_device *md) in __dm_internal_resume()
3337 void dm_internal_suspend_noflush(struct mapped_device *md) in dm_internal_suspend_noflush()
3345 void dm_internal_resume(struct mapped_device *md) in dm_internal_resume()
3358 void dm_internal_suspend_fast(struct mapped_device *md) in dm_internal_suspend_fast()
3371 void dm_internal_resume_fast(struct mapped_device *md) in dm_internal_resume_fast()
3386 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, in dm_kobject_uevent()
3402 uint32_t dm_next_uevent_seq(struct mapped_device *md) in dm_next_uevent_seq()
3407 uint32_t dm_get_event_nr(struct mapped_device *md) in dm_get_event_nr()
3412 int dm_wait_event(struct mapped_device *md, int event_nr) in dm_wait_event()
3418 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) in dm_uevent_add()
3431 struct gendisk *dm_disk(struct mapped_device *md) in dm_disk()
3437 struct kobject *dm_kobject(struct mapped_device *md) in dm_kobject()
3444 struct mapped_device *md; in dm_get_from_kobject() local
3456 int dm_suspended_md(struct mapped_device *md) in dm_suspended_md()
3461 int dm_suspended_internally_md(struct mapped_device *md) in dm_suspended_internally_md()
3466 int dm_test_deferred_remove_flag(struct mapped_device *md) in dm_test_deferred_remove_flag()
3483 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, in dm_alloc_md_mempools()
3558 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_register() local
3581 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_reserve() local
3603 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_release() local
3626 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_preempt() local
3648 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_clear() local