root/drivers/md/dm-delay.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. handle_delayed_timer
  2. queue_timeout
  3. flush_bios
  4. flush_delayed_bios
  5. flush_expired_bios
  6. delay_dtr
  7. delay_class_ctr
  8. delay_ctr
  9. delay_bio
  10. delay_presuspend
  11. delay_resume
  12. delay_map
  13. delay_status
  14. delay_iterate_devices
  15. dm_delay_init
  16. dm_delay_exit

   1 /*
   2  * Copyright (C) 2005-2007 Red Hat GmbH
   3  *
   4  * A target that delays reads and/or writes and can send
   5  * them to different devices.
   6  *
   7  * This file is released under the GPL.
   8  */
   9 
  10 #include <linux/module.h>
  11 #include <linux/init.h>
  12 #include <linux/blkdev.h>
  13 #include <linux/bio.h>
  14 #include <linux/slab.h>
  15 
  16 #include <linux/device-mapper.h>
  17 
  18 #define DM_MSG_PREFIX "delay"
  19 
  20 struct delay_class {
  21         struct dm_dev *dev;
  22         sector_t start;
  23         unsigned delay;
  24         unsigned ops;
  25 };
  26 
  27 struct delay_c {
  28         struct timer_list delay_timer;
  29         struct mutex timer_lock;
  30         struct workqueue_struct *kdelayd_wq;
  31         struct work_struct flush_expired_bios;
  32         struct list_head delayed_bios;
  33         atomic_t may_delay;
  34 
  35         struct delay_class read;
  36         struct delay_class write;
  37         struct delay_class flush;
  38 
  39         int argc;
  40 };
  41 
  42 struct dm_delay_info {
  43         struct delay_c *context;
  44         struct delay_class *class;
  45         struct list_head list;
  46         unsigned long expires;
  47 };
  48 
  49 static DEFINE_MUTEX(delayed_bios_lock);
  50 
  51 static void handle_delayed_timer(struct timer_list *t)
  52 {
  53         struct delay_c *dc = from_timer(dc, t, delay_timer);
  54 
  55         queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
  56 }
  57 
  58 static void queue_timeout(struct delay_c *dc, unsigned long expires)
  59 {
  60         mutex_lock(&dc->timer_lock);
  61 
  62         if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
  63                 mod_timer(&dc->delay_timer, expires);
  64 
  65         mutex_unlock(&dc->timer_lock);
  66 }
  67 
  68 static void flush_bios(struct bio *bio)
  69 {
  70         struct bio *n;
  71 
  72         while (bio) {
  73                 n = bio->bi_next;
  74                 bio->bi_next = NULL;
  75                 generic_make_request(bio);
  76                 bio = n;
  77         }
  78 }
  79 
  80 static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
  81 {
  82         struct dm_delay_info *delayed, *next;
  83         unsigned long next_expires = 0;
  84         unsigned long start_timer = 0;
  85         struct bio_list flush_bios = { };
  86 
  87         mutex_lock(&delayed_bios_lock);
  88         list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
  89                 if (flush_all || time_after_eq(jiffies, delayed->expires)) {
  90                         struct bio *bio = dm_bio_from_per_bio_data(delayed,
  91                                                 sizeof(struct dm_delay_info));
  92                         list_del(&delayed->list);
  93                         bio_list_add(&flush_bios, bio);
  94                         delayed->class->ops--;
  95                         continue;
  96                 }
  97 
  98                 if (!start_timer) {
  99                         start_timer = 1;
 100                         next_expires = delayed->expires;
 101                 } else
 102                         next_expires = min(next_expires, delayed->expires);
 103         }
 104         mutex_unlock(&delayed_bios_lock);
 105 
 106         if (start_timer)
 107                 queue_timeout(dc, next_expires);
 108 
 109         return bio_list_get(&flush_bios);
 110 }
 111 
 112 static void flush_expired_bios(struct work_struct *work)
 113 {
 114         struct delay_c *dc;
 115 
 116         dc = container_of(work, struct delay_c, flush_expired_bios);
 117         flush_bios(flush_delayed_bios(dc, 0));
 118 }
 119 
 120 static void delay_dtr(struct dm_target *ti)
 121 {
 122         struct delay_c *dc = ti->private;
 123 
 124         if (dc->kdelayd_wq)
 125                 destroy_workqueue(dc->kdelayd_wq);
 126 
 127         if (dc->read.dev)
 128                 dm_put_device(ti, dc->read.dev);
 129         if (dc->write.dev)
 130                 dm_put_device(ti, dc->write.dev);
 131         if (dc->flush.dev)
 132                 dm_put_device(ti, dc->flush.dev);
 133 
 134         mutex_destroy(&dc->timer_lock);
 135 
 136         kfree(dc);
 137 }
 138 
 139 static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **argv)
 140 {
 141         int ret;
 142         unsigned long long tmpll;
 143         char dummy;
 144 
 145         if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
 146                 ti->error = "Invalid device sector";
 147                 return -EINVAL;
 148         }
 149         c->start = tmpll;
 150 
 151         if (sscanf(argv[2], "%u%c", &c->delay, &dummy) != 1) {
 152                 ti->error = "Invalid delay";
 153                 return -EINVAL;
 154         }
 155 
 156         ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &c->dev);
 157         if (ret) {
 158                 ti->error = "Device lookup failed";
 159                 return ret;
 160         }
 161 
 162         return 0;
 163 }
 164 
 165 /*
 166  * Mapping parameters:
 167  *    <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
 168  *
 169  * With separate write parameters, the first set is only used for reads.
 170  * Offsets are specified in sectors.
 171  * Delays are specified in milliseconds.
 172  */
 173 static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 174 {
 175         struct delay_c *dc;
 176         int ret;
 177 
 178         if (argc != 3 && argc != 6 && argc != 9) {
 179                 ti->error = "Requires exactly 3, 6 or 9 arguments";
 180                 return -EINVAL;
 181         }
 182 
 183         dc = kzalloc(sizeof(*dc), GFP_KERNEL);
 184         if (!dc) {
 185                 ti->error = "Cannot allocate context";
 186                 return -ENOMEM;
 187         }
 188 
 189         ti->private = dc;
 190         timer_setup(&dc->delay_timer, handle_delayed_timer, 0);
 191         INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
 192         INIT_LIST_HEAD(&dc->delayed_bios);
 193         mutex_init(&dc->timer_lock);
 194         atomic_set(&dc->may_delay, 1);
 195         dc->argc = argc;
 196 
 197         ret = delay_class_ctr(ti, &dc->read, argv);
 198         if (ret)
 199                 goto bad;
 200 
 201         if (argc == 3) {
 202                 ret = delay_class_ctr(ti, &dc->write, argv);
 203                 if (ret)
 204                         goto bad;
 205                 ret = delay_class_ctr(ti, &dc->flush, argv);
 206                 if (ret)
 207                         goto bad;
 208                 goto out;
 209         }
 210 
 211         ret = delay_class_ctr(ti, &dc->write, argv + 3);
 212         if (ret)
 213                 goto bad;
 214         if (argc == 6) {
 215                 ret = delay_class_ctr(ti, &dc->flush, argv + 3);
 216                 if (ret)
 217                         goto bad;
 218                 goto out;
 219         }
 220 
 221         ret = delay_class_ctr(ti, &dc->flush, argv + 6);
 222         if (ret)
 223                 goto bad;
 224 
 225 out:
 226         dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
 227         if (!dc->kdelayd_wq) {
 228                 ret = -EINVAL;
 229                 DMERR("Couldn't start kdelayd");
 230                 goto bad;
 231         }
 232 
 233         ti->num_flush_bios = 1;
 234         ti->num_discard_bios = 1;
 235         ti->per_io_data_size = sizeof(struct dm_delay_info);
 236         return 0;
 237 
 238 bad:
 239         delay_dtr(ti);
 240         return ret;
 241 }
 242 
 243 static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
 244 {
 245         struct dm_delay_info *delayed;
 246         unsigned long expires = 0;
 247 
 248         if (!c->delay || !atomic_read(&dc->may_delay))
 249                 return DM_MAPIO_REMAPPED;
 250 
 251         delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
 252 
 253         delayed->context = dc;
 254         delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
 255 
 256         mutex_lock(&delayed_bios_lock);
 257         c->ops++;
 258         list_add_tail(&delayed->list, &dc->delayed_bios);
 259         mutex_unlock(&delayed_bios_lock);
 260 
 261         queue_timeout(dc, expires);
 262 
 263         return DM_MAPIO_SUBMITTED;
 264 }
 265 
 266 static void delay_presuspend(struct dm_target *ti)
 267 {
 268         struct delay_c *dc = ti->private;
 269 
 270         atomic_set(&dc->may_delay, 0);
 271         del_timer_sync(&dc->delay_timer);
 272         flush_bios(flush_delayed_bios(dc, 1));
 273 }
 274 
 275 static void delay_resume(struct dm_target *ti)
 276 {
 277         struct delay_c *dc = ti->private;
 278 
 279         atomic_set(&dc->may_delay, 1);
 280 }
 281 
 282 static int delay_map(struct dm_target *ti, struct bio *bio)
 283 {
 284         struct delay_c *dc = ti->private;
 285         struct delay_class *c;
 286         struct dm_delay_info *delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
 287 
 288         if (bio_data_dir(bio) == WRITE) {
 289                 if (unlikely(bio->bi_opf & REQ_PREFLUSH))
 290                         c = &dc->flush;
 291                 else
 292                         c = &dc->write;
 293         } else {
 294                 c = &dc->read;
 295         }
 296         delayed->class = c;
 297         bio_set_dev(bio, c->dev->bdev);
 298         if (bio_sectors(bio))
 299                 bio->bi_iter.bi_sector = c->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
 300 
 301         return delay_bio(dc, c, bio);
 302 }
 303 
 304 #define DMEMIT_DELAY_CLASS(c) \
 305         DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
 306 
 307 static void delay_status(struct dm_target *ti, status_type_t type,
 308                          unsigned status_flags, char *result, unsigned maxlen)
 309 {
 310         struct delay_c *dc = ti->private;
 311         int sz = 0;
 312 
 313         switch (type) {
 314         case STATUSTYPE_INFO:
 315                 DMEMIT("%u %u %u", dc->read.ops, dc->write.ops, dc->flush.ops);
 316                 break;
 317 
 318         case STATUSTYPE_TABLE:
 319                 DMEMIT_DELAY_CLASS(&dc->read);
 320                 if (dc->argc >= 6) {
 321                         DMEMIT(" ");
 322                         DMEMIT_DELAY_CLASS(&dc->write);
 323                 }
 324                 if (dc->argc >= 9) {
 325                         DMEMIT(" ");
 326                         DMEMIT_DELAY_CLASS(&dc->flush);
 327                 }
 328                 break;
 329         }
 330 }
 331 
 332 static int delay_iterate_devices(struct dm_target *ti,
 333                                  iterate_devices_callout_fn fn, void *data)
 334 {
 335         struct delay_c *dc = ti->private;
 336         int ret = 0;
 337 
 338         ret = fn(ti, dc->read.dev, dc->read.start, ti->len, data);
 339         if (ret)
 340                 goto out;
 341         ret = fn(ti, dc->write.dev, dc->write.start, ti->len, data);
 342         if (ret)
 343                 goto out;
 344         ret = fn(ti, dc->flush.dev, dc->flush.start, ti->len, data);
 345         if (ret)
 346                 goto out;
 347 
 348 out:
 349         return ret;
 350 }
 351 
 352 static struct target_type delay_target = {
 353         .name        = "delay",
 354         .version     = {1, 2, 1},
 355         .features    = DM_TARGET_PASSES_INTEGRITY,
 356         .module      = THIS_MODULE,
 357         .ctr         = delay_ctr,
 358         .dtr         = delay_dtr,
 359         .map         = delay_map,
 360         .presuspend  = delay_presuspend,
 361         .resume      = delay_resume,
 362         .status      = delay_status,
 363         .iterate_devices = delay_iterate_devices,
 364 };
 365 
 366 static int __init dm_delay_init(void)
 367 {
 368         int r;
 369 
 370         r = dm_register_target(&delay_target);
 371         if (r < 0) {
 372                 DMERR("register failed %d", r);
 373                 goto bad_register;
 374         }
 375 
 376         return 0;
 377 
 378 bad_register:
 379         return r;
 380 }
 381 
 382 static void __exit dm_delay_exit(void)
 383 {
 384         dm_unregister_target(&delay_target);
 385 }
 386 
 387 /* Module hooks */
 388 module_init(dm_delay_init);
 389 module_exit(dm_delay_exit);
 390 
 391 MODULE_DESCRIPTION(DM_NAME " delay target");
 392 MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
 393 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */