Searched refs:mapped_device (Results 1 - 18 of 18) sorted by relevance

/linux-4.4.14/drivers/md/
H A Ddm.h36 * Type of table and mapped_device's mempool
81 void dm_lock_md_type(struct mapped_device *md);
82 void dm_unlock_md_type(struct mapped_device *md);
83 void dm_set_md_type(struct mapped_device *md, unsigned type);
84 unsigned dm_get_md_type(struct mapped_device *md);
85 struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
87 int dm_setup_md_queue(struct mapped_device *md);
124 * Is this mapped_device being deleted?
126 int dm_deleting_md(struct mapped_device *md);
129 * Is this mapped_device suspended?
131 int dm_suspended_md(struct mapped_device *md);
136 int dm_suspended_internally_md(struct mapped_device *md);
137 void dm_internal_suspend_fast(struct mapped_device *md);
138 void dm_internal_resume_fast(struct mapped_device *md);
139 void dm_internal_suspend_noflush(struct mapped_device *md);
140 void dm_internal_resume(struct mapped_device *md);
145 int dm_test_deferred_remove_flag(struct mapped_device *md);
172 int dm_sysfs_init(struct mapped_device *md);
173 void dm_sysfs_exit(struct mapped_device *md);
174 struct kobject *dm_kobject(struct mapped_device *md);
175 struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
192 * mapped_device operations
194 void dm_destroy(struct mapped_device *md);
195 void dm_destroy_immediate(struct mapped_device *md);
196 int dm_open_count(struct mapped_device *md);
197 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
198 int dm_cancel_deferred_remove(struct mapped_device *md);
199 int dm_request_based(struct mapped_device *md);
200 sector_t dm_get_size(struct mapped_device *md);
201 struct request_queue *dm_get_md_queue(struct mapped_device *md);
202 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
204 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
205 struct dm_stats *dm_get_stats(struct mapped_device *md);
207 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
210 void dm_internal_suspend(struct mapped_device *md);
211 void dm_internal_resume(struct mapped_device *md);
213 bool dm_use_blk_mq(struct mapped_device *md);
224 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
239 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
240 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
H A Ddm-sysfs.c13 ssize_t (*show)(struct mapped_device *, char *);
14 ssize_t (*store)(struct mapped_device *, const char *, size_t count);
25 struct mapped_device *md; dm_attr_show()
50 struct mapped_device *md; dm_attr_store()
67 static ssize_t dm_attr_name_show(struct mapped_device *md, char *buf) dm_attr_name_show()
76 static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf) dm_attr_uuid_show()
85 static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf) dm_attr_suspended_show()
92 static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf) dm_attr_use_blk_mq_show()
129 int dm_sysfs_init(struct mapped_device *md) dm_sysfs_init()
139 void dm_sysfs_exit(struct mapped_device *md) dm_sysfs_exit()
H A Ddm-stats.h27 struct mapped_device;
29 int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
H A Ddm.c70 struct mapped_device *md;
84 struct mapped_device *md;
142 struct mapped_device { struct
243 bool dm_use_blk_mq(struct mapped_device *md) dm_use_blk_mq()
440 int dm_deleting_md(struct mapped_device *md) dm_deleting_md()
447 struct mapped_device *md; dm_blk_open()
471 struct mapped_device *md; dm_blk_close()
488 int dm_open_count(struct mapped_device *md) dm_open_count()
496 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) dm_lock_for_deletion()
516 int dm_cancel_deferred_remove(struct mapped_device *md) dm_cancel_deferred_remove()
537 sector_t dm_get_size(struct mapped_device *md) dm_get_size()
542 struct request_queue *dm_get_md_queue(struct mapped_device *md) dm_get_md_queue()
547 struct dm_stats *dm_get_stats(struct mapped_device *md) dm_get_stats()
554 struct mapped_device *md = bdev->bd_disk->private_data; dm_blk_getgeo()
559 static int dm_get_live_table_for_ioctl(struct mapped_device *md, dm_get_live_table_for_ioctl()
604 struct mapped_device *md = bdev->bd_disk->private_data; dm_blk_ioctl()
630 static struct dm_io *alloc_io(struct mapped_device *md) alloc_io()
635 static void free_io(struct mapped_device *md, struct dm_io *io) free_io()
640 static void free_tio(struct mapped_device *md, struct dm_target_io *tio) free_tio()
645 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md, alloc_rq_tio()
656 static struct request *alloc_clone_request(struct mapped_device *md, alloc_clone_request()
662 static void free_clone_request(struct mapped_device *md, struct request *rq) free_clone_request()
667 static int md_in_flight(struct mapped_device *md) md_in_flight()
675 struct mapped_device *md = io->md; start_io_acct()
695 struct mapped_device *md = io->md; end_io_acct()
723 static void queue_io(struct mapped_device *md, struct bio *bio) queue_io()
738 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
745 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
750 void dm_sync_table(struct mapped_device *md) dm_sync_table()
760 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) __acquires()
766 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) __releases()
775 struct mapped_device *md) open_table_device()
801 static void close_table_device(struct table_device *td, struct mapped_device *md) close_table_device()
822 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, dm_get_table_device()
858 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) dm_put_table_device()
888 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) dm_get_geometry()
898 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) dm_set_geometry()
921 static int __noflush_suspending(struct mapped_device *md) __noflush_suspending()
935 struct mapped_device *md = io->md; dec_pending()
983 static void disable_write_same(struct mapped_device *md) disable_write_same()
997 struct mapped_device *md = tio->io->md; clone_endio()
1082 static void rq_end_stats(struct mapped_device *md, struct request *orig) rq_end_stats()
1098 static void rq_completed(struct mapped_device *md, int rw, bool run_queue) rq_completed()
1124 struct mapped_device *md = tio->md; free_rq_clone()
1153 struct mapped_device *md = tio->md; dm_end_request()
1208 static void dm_requeue_original_request(struct mapped_device *md, dm_requeue_original_request()
1474 struct mapped_device *md; __map_bio()
1507 struct mapped_device *md;
1714 static void __split_and_process_bio(struct mapped_device *md, __split_and_process_bio()
1763 struct mapped_device *md = q->queuedata; dm_make_request()
1787 int dm_request_based(struct mapped_device *md) dm_request_based()
1841 static struct request *clone_rq(struct request *rq, struct mapped_device *md, clone_rq()
1872 struct mapped_device *md) init_tio()
1885 struct mapped_device *md, gfp_t gfp_mask) prep_tio()
1915 struct mapped_device *md = q->queuedata; dm_prep_fn()
1940 struct mapped_device *md) map_request()
1997 struct mapped_device *md = tio->md; map_tio_request()
2003 static void dm_start_request(struct mapped_device *md, struct request *orig) dm_start_request()
2037 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) dm_attr_rq_based_seq_io_merge_deadline_show()
2042 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, dm_attr_rq_based_seq_io_merge_deadline_store()
2061 static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md) dm_request_peeked_before_merge_deadline()
2080 struct mapped_device *md = q->queuedata; dm_request_fn()
2144 struct mapped_device *md = congested_data; dm_any_congested()
2219 static void dm_init_md_queue(struct mapped_device *md) dm_init_md_queue()
2240 static void dm_init_old_md_queue(struct mapped_device *md) dm_init_old_md_queue()
2252 static void cleanup_mapped_device(struct mapped_device *md) cleanup_mapped_device()
2285 static struct mapped_device *alloc_dev(int minor) alloc_dev()
2288 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL); alloc_dev()
2386 static void unlock_fs(struct mapped_device *md);
2388 static void free_dev(struct mapped_device *md) free_dev()
2406 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) __bind_mempools()
2453 struct mapped_device *md = (struct mapped_device *) context; event_callback()
2468 static void __set_size(struct mapped_device *md, sector_t size) __set_size()
2478 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, __bind()
2523 static struct dm_table *__unbind(struct mapped_device *md) __unbind()
2540 int dm_create(int minor, struct mapped_device **result) dm_create()
2542 struct mapped_device *md; dm_create()
2558 void dm_lock_md_type(struct mapped_device *md) dm_lock_md_type()
2563 void dm_unlock_md_type(struct mapped_device *md) dm_unlock_md_type()
2568 void dm_set_md_type(struct mapped_device *md, unsigned type) dm_set_md_type()
2574 unsigned dm_get_md_type(struct mapped_device *md) dm_get_md_type()
2580 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) dm_get_immutable_target_type()
2589 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) dm_get_queue_limits()
2596 static void init_rq_based_worker_thread(struct mapped_device *md) init_rq_based_worker_thread()
2607 static int dm_init_request_based_queue(struct mapped_device *md) dm_init_request_based_queue()
2635 struct mapped_device *md = data; dm_mq_init_request()
2652 struct mapped_device *md = tio->md; dm_mq_queue_rq()
2716 static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) dm_init_request_based_blk_mq_queue()
2760 static unsigned filter_md_type(unsigned type, struct mapped_device *md) filter_md_type()
2771 int dm_setup_md_queue(struct mapped_device *md) dm_setup_md_queue()
2806 struct mapped_device *dm_get_md(dev_t dev) dm_get_md()
2808 struct mapped_device *md; dm_get_md()
2835 void *dm_get_mdptr(struct mapped_device *md) dm_get_mdptr()
2840 void dm_set_mdptr(struct mapped_device *md, void *ptr) dm_set_mdptr()
2845 void dm_get(struct mapped_device *md) dm_get()
2851 int dm_hold(struct mapped_device *md) dm_hold()
2864 const char *dm_device_name(struct mapped_device *md) dm_device_name()
2870 static void __dm_destroy(struct mapped_device *md, bool wait) __dm_destroy()
2902 * No one should increment the reference count of the mapped_device, __dm_destroy()
2903 * after the mapped_device state becomes DMF_FREEING. __dm_destroy()
2909 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", __dm_destroy()
2917 void dm_destroy(struct mapped_device *md) dm_destroy()
2922 void dm_destroy_immediate(struct mapped_device *md) dm_destroy_immediate()
2927 void dm_put(struct mapped_device *md) dm_put()
2933 static int dm_wait_for_completion(struct mapped_device *md, int interruptible) dm_wait_for_completion()
2966 struct mapped_device *md = container_of(work, struct mapped_device, dm_wq_work()
2991 static void dm_queue_flush(struct mapped_device *md) dm_queue_flush()
3001 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) dm_swap_table()
3045 static int lock_fs(struct mapped_device *md) lock_fs()
3063 static void unlock_fs(struct mapped_device *md) unlock_fs()
3080 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, __dm_suspend()
3185 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) dm_suspend()
3222 static int __dm_resume(struct mapped_device *md, struct dm_table *map) __dm_resume()
3245 int dm_resume(struct mapped_device *md) dm_resume()
3288 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) __dm_internal_suspend()
3315 static void __dm_internal_resume(struct mapped_device *md) __dm_internal_resume()
3337 void dm_internal_suspend_noflush(struct mapped_device *md) dm_internal_suspend_noflush()
3345 void dm_internal_resume(struct mapped_device *md) dm_internal_resume()
3358 void dm_internal_suspend_fast(struct mapped_device *md) dm_internal_suspend_fast()
3371 void dm_internal_resume_fast(struct mapped_device *md) dm_internal_resume_fast()
3386 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, dm_kobject_uevent()
3402 uint32_t dm_next_uevent_seq(struct mapped_device *md) dm_next_uevent_seq()
3407 uint32_t dm_get_event_nr(struct mapped_device *md) dm_get_event_nr()
3412 int dm_wait_event(struct mapped_device *md, int event_nr) dm_wait_event()
3418 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) dm_uevent_add()
3431 struct gendisk *dm_disk(struct mapped_device *md) dm_disk()
3437 struct kobject *dm_kobject(struct mapped_device *md) dm_kobject()
3442 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) dm_get_from_kobject()
3444 struct mapped_device *md; dm_get_from_kobject()
3446 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); dm_get_from_kobject()
3456 int dm_suspended_md(struct mapped_device *md) dm_suspended_md()
3461 int dm_suspended_internally_md(struct mapped_device *md) dm_suspended_internally_md()
3466 int dm_test_deferred_remove_flag(struct mapped_device *md) dm_test_deferred_remove_flag()
3483 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, dm_alloc_md_mempools()
3558 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_register()
3581 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_reserve()
3603 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_release()
3626 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_preempt()
3648 struct mapped_device *md = bdev->bd_disk->private_data; dm_pr_clear()
H A Ddm-uevent.c44 struct mapped_device *md;
57 static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md) dm_uevent_alloc()
71 static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md, dm_build_path_uevent()
186 struct mapped_device *md = dm_table_get_md(ti->table); dm_path_uevent()
H A Ddm-ioctl.c35 struct mapped_device *md;
141 struct mapped_device *md; __get_dev_cell()
161 struct mapped_device *md) alloc_cell()
207 static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) dm_hash_insert()
283 struct mapped_device *md; dm_hash_remove_all()
368 static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, dm_hash_rename()
374 struct mapped_device *md; dm_hash_rename()
645 static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx) dm_get_inactive_table()
668 static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, dm_get_live_or_inactive_table()
680 static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) __dev_status()
737 struct mapped_device *md; dev_create()
812 static struct mapped_device *find_device(struct dm_ioctl *param) find_device()
815 struct mapped_device *md = NULL; find_device()
829 struct mapped_device *md; dev_remove()
895 struct mapped_device *md; dev_rename()
924 struct mapped_device *md; dev_set_geometry()
972 struct mapped_device *md; do_suspend()
1002 struct mapped_device *md; do_resume()
1084 struct mapped_device *md; dev_status()
1176 struct mapped_device *md; dev_wait()
1275 struct mapped_device *md; table_load()
1359 struct mapped_device *md; table_clear()
1432 struct mapped_device *md; table_deps()
1458 struct mapped_device *md; table_status()
1484 static int message_for_md(struct mapped_device *md, unsigned argc, char **argv, message_for_md()
1515 struct mapped_device *md; target_message()
1928 * @md: Pointer to mapped_device
1932 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) dm_copy_name_and_uuid()
H A Ddm-stats.c241 void (*suspend_callback)(struct mapped_device *), dm_stats_create()
242 void (*resume_callback)(struct mapped_device *), dm_stats_create()
243 struct mapped_device *md) dm_stats_create()
945 static int message_stats_create(struct mapped_device *md, message_stats_create()
1070 static int message_stats_delete(struct mapped_device *md, message_stats_delete()
1085 static int message_stats_clear(struct mapped_device *md, message_stats_clear()
1100 static int message_stats_list(struct mapped_device *md, message_stats_list()
1123 static int message_stats_print(struct mapped_device *md, message_stats_print()
1150 static int message_stats_set_aux(struct mapped_device *md, message_stats_set_aux()
1165 int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, dm_stats_message()
H A Ddm-table.c32 struct mapped_device *md;
184 unsigned num_targets, struct mapped_device *md) dm_table_create()
215 static void free_devices(struct list_head *devices, struct mapped_device *md) free_devices()
349 struct mapped_device *md) upgrade_mode()
945 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) dm_table_alloc_md_mempools()
1077 struct mapped_device *md = t->md; dm_table_register_integrity()
1676 struct mapped_device *dm_table_get_md(struct dm_table *t) dm_table_get_md()
1684 struct mapped_device *md; dm_table_run_md_queue_async()
H A Ddm-thin.c227 struct mapped_device *pool_md;
305 struct mapped_device *thin_md;
482 * A global list of pools that uses a struct mapped_device as a key.
507 static struct pool *__pool_table_lookup(struct mapped_device *md) __pool_table_lookup()
2766 static struct pool *pool_create(struct mapped_device *pool_md, pool_create()
2909 static struct pool *__pool_find(struct mapped_device *pool_md, __pool_find()
3961 struct mapped_device *pool_md; thin_ctr()
H A Ddm-verity.c203 struct mapped_device *md = dm_table_get_md(v->ti->table); verity_handle_err()
H A Ddm-snap.c1905 struct mapped_device *origin_md = NULL; snapshot_resume()
/linux-4.4.14/include/linux/
H A Ddevice-mapper.h19 struct mapped_device;
369 int dm_create(int minor, struct mapped_device **md);
374 struct mapped_device *dm_get_md(dev_t dev);
375 void dm_get(struct mapped_device *md);
376 int dm_hold(struct mapped_device *md);
377 void dm_put(struct mapped_device *md);
382 void dm_set_mdptr(struct mapped_device *md, void *ptr);
383 void *dm_get_mdptr(struct mapped_device *md);
388 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
389 int dm_resume(struct mapped_device *md);
394 uint32_t dm_get_event_nr(struct mapped_device *md);
395 int dm_wait_event(struct mapped_device *md, int event_nr);
396 uint32_t dm_next_uevent_seq(struct mapped_device *md);
397 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
402 const char *dm_device_name(struct mapped_device *md);
403 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
404 struct gendisk *dm_disk(struct mapped_device *md);
410 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
415 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
416 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
426 unsigned num_targets, struct mapped_device *md);
452 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
453 void dm_put_live_table(struct mapped_device *md, int srcu_idx);
454 void dm_sync_table(struct mapped_device *md);
462 struct mapped_device *dm_table_get_md(struct dm_table *t);
478 struct dm_table *dm_swap_table(struct mapped_device *md,
/linux-4.4.14/drivers/ssb/
H A Dpci.c78 bus->mapped_device = dev; ssb_pci_switch_core()
977 if (unlikely(bus->mapped_device != dev)) { ssb_pci_read8()
990 if (unlikely(bus->mapped_device != dev)) { ssb_pci_read16()
1003 if (unlikely(bus->mapped_device != dev)) { ssb_pci_read32()
1019 if (unlikely(bus->mapped_device != dev)) { ssb_pci_block_read()
1051 if (unlikely(bus->mapped_device != dev)) { ssb_pci_write8()
1064 if (unlikely(bus->mapped_device != dev)) { ssb_pci_write16()
1077 if (unlikely(bus->mapped_device != dev)) { ssb_pci_write32()
1093 if (unlikely(bus->mapped_device != dev)) { ssb_pci_block_write()
H A Dpcmcia.c162 bus->mapped_device = dev; ssb_pcmcia_switch_core()
210 if (unlikely(dev != bus->mapped_device)) { select_core_and_segment()
H A Dscan.c220 bus->mapped_device = NULL; ssb_iounmap()
H A Dsdio.c222 bus->mapped_device = dev; ssb_sdio_switch_core()
H A Dmain.c165 bus->mapped_device = NULL; ssb_bus_resume()
/linux-4.4.14/include/linux/ssb/
H A Dssb.h423 struct ssb_device *mapped_device; member in struct:ssb_bus

Completed in 644 milliseconds