1/* 2 * Internal header file for device mapper 3 * 4 * Copyright (C) 2001, 2002 Sistina Software 5 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 6 * 7 * This file is released under the LGPL. 8 */ 9 10#ifndef DM_INTERNAL_H 11#define DM_INTERNAL_H 12 13#include <linux/fs.h> 14#include <linux/device-mapper.h> 15#include <linux/list.h> 16#include <linux/blkdev.h> 17#include <linux/hdreg.h> 18#include <linux/completion.h> 19#include <linux/kobject.h> 20 21#include "dm-stats.h" 22 23/* 24 * Suspend feature flags 25 */ 26#define DM_SUSPEND_LOCKFS_FLAG (1 << 0) 27#define DM_SUSPEND_NOFLUSH_FLAG (1 << 1) 28 29/* 30 * Status feature flags 31 */ 32#define DM_STATUS_NOFLUSH_FLAG (1 << 0) 33 34/* 35 * Type of table and mapped_device's mempool 36 */ 37#define DM_TYPE_NONE 0 38#define DM_TYPE_BIO_BASED 1 39#define DM_TYPE_REQUEST_BASED 2 40#define DM_TYPE_MQ_REQUEST_BASED 3 41 42/* 43 * List of devices that a metadevice uses and should open/close. 44 */ 45struct dm_dev_internal { 46 struct list_head list; 47 atomic_t count; 48 struct dm_dev *dm_dev; 49}; 50 51struct dm_table; 52struct dm_md_mempools; 53 54/*----------------------------------------------------------------- 55 * Internal table functions. 56 *---------------------------------------------------------------*/ 57void dm_table_destroy(struct dm_table *t); 58void dm_table_event_callback(struct dm_table *t, 59 void (*fn)(void *), void *context); 60struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); 61struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); 62bool dm_table_has_no_data_devices(struct dm_table *table); 63int dm_calculate_queue_limits(struct dm_table *table, 64 struct queue_limits *limits); 65void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 66 struct queue_limits *limits); 67struct list_head *dm_table_get_devices(struct dm_table *t); 68void dm_table_presuspend_targets(struct dm_table *t); 69void dm_table_presuspend_undo_targets(struct dm_table *t); 70void dm_table_postsuspend_targets(struct dm_table *t); 71int dm_table_resume_targets(struct dm_table *t); 72int dm_table_any_congested(struct dm_table *t, int bdi_bits); 73unsigned dm_table_get_type(struct dm_table *t); 74struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); 75bool dm_table_request_based(struct dm_table *t); 76bool dm_table_mq_request_based(struct dm_table *t); 77void dm_table_free_md_mempools(struct dm_table *t); 78struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); 79 80int dm_queue_merge_is_compulsory(struct request_queue *q); 81 82void dm_lock_md_type(struct mapped_device *md); 83void dm_unlock_md_type(struct mapped_device *md); 84void dm_set_md_type(struct mapped_device *md, unsigned type); 85unsigned dm_get_md_type(struct mapped_device *md); 86struct target_type *dm_get_immutable_target_type(struct mapped_device *md); 87 88int dm_setup_md_queue(struct mapped_device *md); 89 90/* 91 * To check the return value from dm_table_find_target(). 92 */ 93#define dm_target_is_valid(t) ((t)->table) 94 95/* 96 * To check whether the target type is bio-based or not (request-based). 97 */ 98#define dm_target_bio_based(t) ((t)->type->map != NULL) 99 100/* 101 * To check whether the target type is request-based or not (bio-based). 102 */ 103#define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \ 104 ((t)->type->clone_and_map_rq != NULL)) 105 106/* 107 * To check whether the target type is a hybrid (capable of being 108 * either request-based or bio-based). 109 */ 110#define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t)) 111 112/*----------------------------------------------------------------- 113 * A registry of target types. 114 *---------------------------------------------------------------*/ 115int dm_target_init(void); 116void dm_target_exit(void); 117struct target_type *dm_get_target_type(const char *name); 118void dm_put_target_type(struct target_type *tt); 119int dm_target_iterate(void (*iter_func)(struct target_type *tt, 120 void *param), void *param); 121 122int dm_split_args(int *argc, char ***argvp, char *input); 123 124/* 125 * Is this mapped_device being deleted? 126 */ 127int dm_deleting_md(struct mapped_device *md); 128 129/* 130 * Is this mapped_device suspended? 131 */ 132int dm_suspended_md(struct mapped_device *md); 133 134/* 135 * Internal suspend and resume methods. 136 */ 137int dm_suspended_internally_md(struct mapped_device *md); 138void dm_internal_suspend_fast(struct mapped_device *md); 139void dm_internal_resume_fast(struct mapped_device *md); 140void dm_internal_suspend_noflush(struct mapped_device *md); 141void dm_internal_resume(struct mapped_device *md); 142 143/* 144 * Test if the device is scheduled for deferred remove. 145 */ 146int dm_test_deferred_remove_flag(struct mapped_device *md); 147 148/* 149 * Try to remove devices marked for deferred removal. 150 */ 151void dm_deferred_remove(void); 152 153/* 154 * The device-mapper can be driven through one of two interfaces; 155 * ioctl or filesystem, depending which patch you have applied. 156 */ 157int dm_interface_init(void); 158void dm_interface_exit(void); 159 160/* 161 * sysfs interface 162 */ 163struct dm_kobject_holder { 164 struct kobject kobj; 165 struct completion completion; 166}; 167 168static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) 169{ 170 return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; 171} 172 173int dm_sysfs_init(struct mapped_device *md); 174void dm_sysfs_exit(struct mapped_device *md); 175struct kobject *dm_kobject(struct mapped_device *md); 176struct mapped_device *dm_get_from_kobject(struct kobject *kobj); 177 178/* 179 * The kobject helper 180 */ 181void dm_kobject_release(struct kobject *kobj); 182 183/* 184 * Targets for linear and striped mappings 185 */ 186int dm_linear_init(void); 187void dm_linear_exit(void); 188 189int dm_stripe_init(void); 190void dm_stripe_exit(void); 191 192/* 193 * mapped_device operations 194 */ 195void dm_destroy(struct mapped_device *md); 196void dm_destroy_immediate(struct mapped_device *md); 197int dm_open_count(struct mapped_device *md); 198int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred); 199int dm_cancel_deferred_remove(struct mapped_device *md); 200int dm_request_based(struct mapped_device *md); 201sector_t dm_get_size(struct mapped_device *md); 202struct request_queue *dm_get_md_queue(struct mapped_device *md); 203int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, 204 struct dm_dev **result); 205void dm_put_table_device(struct mapped_device *md, struct dm_dev *d); 206struct dm_stats *dm_get_stats(struct mapped_device *md); 207 208int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, 209 unsigned cookie); 210 211void dm_internal_suspend(struct mapped_device *md); 212void dm_internal_resume(struct mapped_device *md); 213 214bool dm_use_blk_mq(struct mapped_device *md); 215 216int dm_io_init(void); 217void dm_io_exit(void); 218 219int dm_kcopyd_init(void); 220void dm_kcopyd_exit(void); 221 222/* 223 * Mempool operations 224 */ 225struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, 226 unsigned integrity, unsigned per_bio_data_size); 227void dm_free_md_mempools(struct dm_md_mempools *pools); 228 229/* 230 * Helpers that are used by DM core 231 */ 232unsigned dm_get_reserved_bio_based_ios(void); 233unsigned dm_get_reserved_rq_based_ios(void); 234 235static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) 236{ 237 return !maxlen || strlen(result) + 1 >= maxlen; 238} 239 240ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf); 241ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, 242 const char *buf, size_t count); 243 244#endif 245