root/drivers/md/dm-mpath.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. alloc_pgpath
  2. free_pgpath
  3. alloc_priority_group
  4. free_pgpaths
  5. free_priority_group
  6. alloc_multipath
  7. alloc_multipath_stage2
  8. free_multipath
  9. get_mpio
  10. multipath_per_bio_data_size
  11. get_mpio_from_bio
  12. get_bio_details_from_mpio
  13. multipath_init_per_bio_data
  14. __pg_init_all_paths
  15. pg_init_all_paths
  16. __switch_pg
  17. choose_path_in_pg
  18. choose_pgpath
  19. __must_push_back
  20. must_push_back_rq
  21. must_push_back_bio
  22. multipath_clone_and_map
  23. multipath_release_clone
  24. __map_bio
  25. __multipath_map_bio
  26. multipath_map_bio
  27. process_queued_io_list
  28. process_queued_bios
  29. queue_if_no_path
  30. trigger_event
  31. parse_path_selector
  32. setup_scsi_dh
  33. parse_path
  34. parse_priority_group
  35. parse_hw_handler
  36. parse_features
  37. multipath_ctr
  38. multipath_wait_for_pg_init_completion
  39. flush_multipath_work
  40. multipath_dtr
  41. fail_path
  42. reinstate_path
  43. action_dev
  44. bypass_pg
  45. switch_pg_num
  46. bypass_pg_num
  47. pg_init_limit_reached
  48. pg_init_done
  49. activate_or_offline_path
  50. activate_path_work
  51. multipath_end_io
  52. multipath_end_io_bio
  53. multipath_presuspend
  54. multipath_postsuspend
  55. multipath_resume
  56. multipath_status
  57. multipath_message
  58. multipath_prepare_ioctl
  59. multipath_iterate_devices
  60. pgpath_busy
  61. multipath_busy
  62. dm_multipath_init
  63. dm_multipath_exit

   1 /*
   2  * Copyright (C) 2003 Sistina Software Limited.
   3  * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
   4  *
   5  * This file is released under the GPL.
   6  */
   7 
   8 #include <linux/device-mapper.h>
   9 
  10 #include "dm-rq.h"
  11 #include "dm-bio-record.h"
  12 #include "dm-path-selector.h"
  13 #include "dm-uevent.h"
  14 
  15 #include <linux/blkdev.h>
  16 #include <linux/ctype.h>
  17 #include <linux/init.h>
  18 #include <linux/mempool.h>
  19 #include <linux/module.h>
  20 #include <linux/pagemap.h>
  21 #include <linux/slab.h>
  22 #include <linux/time.h>
  23 #include <linux/workqueue.h>
  24 #include <linux/delay.h>
  25 #include <scsi/scsi_dh.h>
  26 #include <linux/atomic.h>
  27 #include <linux/blk-mq.h>
  28 
  29 #define DM_MSG_PREFIX "multipath"
  30 #define DM_PG_INIT_DELAY_MSECS 2000
  31 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
  32 
  33 /* Path properties */
  34 struct pgpath {
  35         struct list_head list;
  36 
  37         struct priority_group *pg;      /* Owning PG */
  38         unsigned fail_count;            /* Cumulative failure count */
  39 
  40         struct dm_path path;
  41         struct delayed_work activate_path;
  42 
  43         bool is_active:1;               /* Path status */
  44 };
  45 
  46 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
  47 
  48 /*
  49  * Paths are grouped into Priority Groups and numbered from 1 upwards.
  50  * Each has a path selector which controls which path gets used.
  51  */
  52 struct priority_group {
  53         struct list_head list;
  54 
  55         struct multipath *m;            /* Owning multipath instance */
  56         struct path_selector ps;
  57 
  58         unsigned pg_num;                /* Reference number */
  59         unsigned nr_pgpaths;            /* Number of paths in PG */
  60         struct list_head pgpaths;
  61 
  62         bool bypassed:1;                /* Temporarily bypass this PG? */
  63 };
  64 
  65 /* Multipath context */
  66 struct multipath {
  67         unsigned long flags;            /* Multipath state flags */
  68 
  69         spinlock_t lock;
  70         enum dm_queue_mode queue_mode;
  71 
  72         struct pgpath *current_pgpath;
  73         struct priority_group *current_pg;
  74         struct priority_group *next_pg; /* Switch to this PG if set */
  75 
  76         atomic_t nr_valid_paths;        /* Total number of usable paths */
  77         unsigned nr_priority_groups;
  78         struct list_head priority_groups;
  79 
  80         const char *hw_handler_name;
  81         char *hw_handler_params;
  82         wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
  83         unsigned pg_init_retries;       /* Number of times to retry pg_init */
  84         unsigned pg_init_delay_msecs;   /* Number of msecs before pg_init retry */
  85         atomic_t pg_init_in_progress;   /* Only one pg_init allowed at once */
  86         atomic_t pg_init_count;         /* Number of times pg_init called */
  87 
  88         struct mutex work_mutex;
  89         struct work_struct trigger_event;
  90         struct dm_target *ti;
  91 
  92         struct work_struct process_queued_bios;
  93         struct bio_list queued_bios;
  94 };
  95 
  96 /*
  97  * Context information attached to each io we process.
  98  */
  99 struct dm_mpath_io {
 100         struct pgpath *pgpath;
 101         size_t nr_bytes;
 102 };
 103 
 104 typedef int (*action_fn) (struct pgpath *pgpath);
 105 
 106 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 107 static void trigger_event(struct work_struct *work);
 108 static void activate_or_offline_path(struct pgpath *pgpath);
 109 static void activate_path_work(struct work_struct *work);
 110 static void process_queued_bios(struct work_struct *work);
 111 
 112 /*-----------------------------------------------
 113  * Multipath state flags.
 114  *-----------------------------------------------*/
 115 
 116 #define MPATHF_QUEUE_IO 0                       /* Must we queue all I/O? */
 117 #define MPATHF_QUEUE_IF_NO_PATH 1               /* Queue I/O if last path fails? */
 118 #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2         /* Saved state during suspension */
 119 #define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3     /* If there's already a hw_handler present, don't change it. */
 120 #define MPATHF_PG_INIT_DISABLED 4               /* pg_init is not currently allowed */
 121 #define MPATHF_PG_INIT_REQUIRED 5               /* pg_init needs calling? */
 122 #define MPATHF_PG_INIT_DELAY_RETRY 6            /* Delay pg_init retry? */
 123 
 124 /*-----------------------------------------------
 125  * Allocation routines
 126  *-----------------------------------------------*/
 127 
 128 static struct pgpath *alloc_pgpath(void)
 129 {
 130         struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 131 
 132         if (!pgpath)
 133                 return NULL;
 134 
 135         pgpath->is_active = true;
 136 
 137         return pgpath;
 138 }
 139 
 140 static void free_pgpath(struct pgpath *pgpath)
 141 {
 142         kfree(pgpath);
 143 }
 144 
 145 static struct priority_group *alloc_priority_group(void)
 146 {
 147         struct priority_group *pg;
 148 
 149         pg = kzalloc(sizeof(*pg), GFP_KERNEL);
 150 
 151         if (pg)
 152                 INIT_LIST_HEAD(&pg->pgpaths);
 153 
 154         return pg;
 155 }
 156 
 157 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 158 {
 159         struct pgpath *pgpath, *tmp;
 160 
 161         list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
 162                 list_del(&pgpath->list);
 163                 dm_put_device(ti, pgpath->path.dev);
 164                 free_pgpath(pgpath);
 165         }
 166 }
 167 
 168 static void free_priority_group(struct priority_group *pg,
 169                                 struct dm_target *ti)
 170 {
 171         struct path_selector *ps = &pg->ps;
 172 
 173         if (ps->type) {
 174                 ps->type->destroy(ps);
 175                 dm_put_path_selector(ps->type);
 176         }
 177 
 178         free_pgpaths(&pg->pgpaths, ti);
 179         kfree(pg);
 180 }
 181 
 182 static struct multipath *alloc_multipath(struct dm_target *ti)
 183 {
 184         struct multipath *m;
 185 
 186         m = kzalloc(sizeof(*m), GFP_KERNEL);
 187         if (m) {
 188                 INIT_LIST_HEAD(&m->priority_groups);
 189                 spin_lock_init(&m->lock);
 190                 atomic_set(&m->nr_valid_paths, 0);
 191                 INIT_WORK(&m->trigger_event, trigger_event);
 192                 mutex_init(&m->work_mutex);
 193 
 194                 m->queue_mode = DM_TYPE_NONE;
 195 
 196                 m->ti = ti;
 197                 ti->private = m;
 198         }
 199 
 200         return m;
 201 }
 202 
 203 static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
 204 {
 205         if (m->queue_mode == DM_TYPE_NONE) {
 206                 m->queue_mode = DM_TYPE_REQUEST_BASED;
 207         } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
 208                 INIT_WORK(&m->process_queued_bios, process_queued_bios);
 209                 /*
 210                  * bio-based doesn't support any direct scsi_dh management;
 211                  * it just discovers if a scsi_dh is attached.
 212                  */
 213                 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
 214         }
 215 
 216         dm_table_set_type(ti->table, m->queue_mode);
 217 
 218         /*
 219          * Init fields that are only used when a scsi_dh is attached
 220          * - must do this unconditionally (really doesn't hurt non-SCSI uses)
 221          */
 222         set_bit(MPATHF_QUEUE_IO, &m->flags);
 223         atomic_set(&m->pg_init_in_progress, 0);
 224         atomic_set(&m->pg_init_count, 0);
 225         m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
 226         init_waitqueue_head(&m->pg_init_wait);
 227 
 228         return 0;
 229 }
 230 
 231 static void free_multipath(struct multipath *m)
 232 {
 233         struct priority_group *pg, *tmp;
 234 
 235         list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
 236                 list_del(&pg->list);
 237                 free_priority_group(pg, m->ti);
 238         }
 239 
 240         kfree(m->hw_handler_name);
 241         kfree(m->hw_handler_params);
 242         mutex_destroy(&m->work_mutex);
 243         kfree(m);
 244 }
 245 
 246 static struct dm_mpath_io *get_mpio(union map_info *info)
 247 {
 248         return info->ptr;
 249 }
 250 
 251 static size_t multipath_per_bio_data_size(void)
 252 {
 253         return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
 254 }
 255 
 256 static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
 257 {
 258         return dm_per_bio_data(bio, multipath_per_bio_data_size());
 259 }
 260 
 261 static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
 262 {
 263         /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
 264         void *bio_details = mpio + 1;
 265         return bio_details;
 266 }
 267 
 268 static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
 269 {
 270         struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 271         struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
 272 
 273         mpio->nr_bytes = bio->bi_iter.bi_size;
 274         mpio->pgpath = NULL;
 275         *mpio_p = mpio;
 276 
 277         dm_bio_record(bio_details, bio);
 278 }
 279 
 280 /*-----------------------------------------------
 281  * Path selection
 282  *-----------------------------------------------*/
 283 
 284 static int __pg_init_all_paths(struct multipath *m)
 285 {
 286         struct pgpath *pgpath;
 287         unsigned long pg_init_delay = 0;
 288 
 289         lockdep_assert_held(&m->lock);
 290 
 291         if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
 292                 return 0;
 293 
 294         atomic_inc(&m->pg_init_count);
 295         clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 296 
 297         /* Check here to reset pg_init_required */
 298         if (!m->current_pg)
 299                 return 0;
 300 
 301         if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
 302                 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
 303                                                  m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
 304         list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
 305                 /* Skip failed paths */
 306                 if (!pgpath->is_active)
 307                         continue;
 308                 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
 309                                        pg_init_delay))
 310                         atomic_inc(&m->pg_init_in_progress);
 311         }
 312         return atomic_read(&m->pg_init_in_progress);
 313 }
 314 
 315 static int pg_init_all_paths(struct multipath *m)
 316 {
 317         int ret;
 318         unsigned long flags;
 319 
 320         spin_lock_irqsave(&m->lock, flags);
 321         ret = __pg_init_all_paths(m);
 322         spin_unlock_irqrestore(&m->lock, flags);
 323 
 324         return ret;
 325 }
 326 
 327 static void __switch_pg(struct multipath *m, struct priority_group *pg)
 328 {
 329         m->current_pg = pg;
 330 
 331         /* Must we initialise the PG first, and queue I/O till it's ready? */
 332         if (m->hw_handler_name) {
 333                 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 334                 set_bit(MPATHF_QUEUE_IO, &m->flags);
 335         } else {
 336                 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
 337                 clear_bit(MPATHF_QUEUE_IO, &m->flags);
 338         }
 339 
 340         atomic_set(&m->pg_init_count, 0);
 341 }
 342 
 343 static struct pgpath *choose_path_in_pg(struct multipath *m,
 344                                         struct priority_group *pg,
 345                                         size_t nr_bytes)
 346 {
 347         unsigned long flags;
 348         struct dm_path *path;
 349         struct pgpath *pgpath;
 350 
 351         path = pg->ps.type->select_path(&pg->ps, nr_bytes);
 352         if (!path)
 353                 return ERR_PTR(-ENXIO);
 354 
 355         pgpath = path_to_pgpath(path);
 356 
 357         if (unlikely(READ_ONCE(m->current_pg) != pg)) {
 358                 /* Only update current_pgpath if pg changed */
 359                 spin_lock_irqsave(&m->lock, flags);
 360                 m->current_pgpath = pgpath;
 361                 __switch_pg(m, pg);
 362                 spin_unlock_irqrestore(&m->lock, flags);
 363         }
 364 
 365         return pgpath;
 366 }
 367 
 368 static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
 369 {
 370         unsigned long flags;
 371         struct priority_group *pg;
 372         struct pgpath *pgpath;
 373         unsigned bypassed = 1;
 374 
 375         if (!atomic_read(&m->nr_valid_paths)) {
 376                 clear_bit(MPATHF_QUEUE_IO, &m->flags);
 377                 goto failed;
 378         }
 379 
 380         /* Were we instructed to switch PG? */
 381         if (READ_ONCE(m->next_pg)) {
 382                 spin_lock_irqsave(&m->lock, flags);
 383                 pg = m->next_pg;
 384                 if (!pg) {
 385                         spin_unlock_irqrestore(&m->lock, flags);
 386                         goto check_current_pg;
 387                 }
 388                 m->next_pg = NULL;
 389                 spin_unlock_irqrestore(&m->lock, flags);
 390                 pgpath = choose_path_in_pg(m, pg, nr_bytes);
 391                 if (!IS_ERR_OR_NULL(pgpath))
 392                         return pgpath;
 393         }
 394 
 395         /* Don't change PG until it has no remaining paths */
 396 check_current_pg:
 397         pg = READ_ONCE(m->current_pg);
 398         if (pg) {
 399                 pgpath = choose_path_in_pg(m, pg, nr_bytes);
 400                 if (!IS_ERR_OR_NULL(pgpath))
 401                         return pgpath;
 402         }
 403 
 404         /*
 405          * Loop through priority groups until we find a valid path.
 406          * First time we skip PGs marked 'bypassed'.
 407          * Second time we only try the ones we skipped, but set
 408          * pg_init_delay_retry so we do not hammer controllers.
 409          */
 410         do {
 411                 list_for_each_entry(pg, &m->priority_groups, list) {
 412                         if (pg->bypassed == !!bypassed)
 413                                 continue;
 414                         pgpath = choose_path_in_pg(m, pg, nr_bytes);
 415                         if (!IS_ERR_OR_NULL(pgpath)) {
 416                                 if (!bypassed)
 417                                         set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
 418                                 return pgpath;
 419                         }
 420                 }
 421         } while (bypassed--);
 422 
 423 failed:
 424         spin_lock_irqsave(&m->lock, flags);
 425         m->current_pgpath = NULL;
 426         m->current_pg = NULL;
 427         spin_unlock_irqrestore(&m->lock, flags);
 428 
 429         return NULL;
 430 }
 431 
 432 /*
 433  * dm_report_EIO() is a macro instead of a function to make pr_debug()
 434  * report the function name and line number of the function from which
 435  * it has been invoked.
 436  */
 437 #define dm_report_EIO(m)                                                \
 438 do {                                                                    \
 439         struct mapped_device *md = dm_table_get_md((m)->ti->table);     \
 440                                                                         \
 441         pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
 442                  dm_device_name(md),                                    \
 443                  test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags),        \
 444                  test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags),  \
 445                  dm_noflush_suspending((m)->ti));                       \
 446 } while (0)
 447 
 448 /*
 449  * Check whether bios must be queued in the device-mapper core rather
 450  * than here in the target.
 451  *
 452  * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
 453  * the same value then we are not between multipath_presuspend()
 454  * and multipath_resume() calls and we have no need to check
 455  * for the DMF_NOFLUSH_SUSPENDING flag.
 456  */
 457 static bool __must_push_back(struct multipath *m, unsigned long flags)
 458 {
 459         return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
 460                  test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
 461                 dm_noflush_suspending(m->ti));
 462 }
 463 
 464 /*
 465  * Following functions use READ_ONCE to get atomic access to
 466  * all m->flags to avoid taking spinlock
 467  */
 468 static bool must_push_back_rq(struct multipath *m)
 469 {
 470         unsigned long flags = READ_ONCE(m->flags);
 471         return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
 472 }
 473 
 474 static bool must_push_back_bio(struct multipath *m)
 475 {
 476         unsigned long flags = READ_ONCE(m->flags);
 477         return __must_push_back(m, flags);
 478 }
 479 
 480 /*
 481  * Map cloned requests (request-based multipath)
 482  */
 483 static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 484                                    union map_info *map_context,
 485                                    struct request **__clone)
 486 {
 487         struct multipath *m = ti->private;
 488         size_t nr_bytes = blk_rq_bytes(rq);
 489         struct pgpath *pgpath;
 490         struct block_device *bdev;
 491         struct dm_mpath_io *mpio = get_mpio(map_context);
 492         struct request_queue *q;
 493         struct request *clone;
 494 
 495         /* Do we need to select a new pgpath? */
 496         pgpath = READ_ONCE(m->current_pgpath);
 497         if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
 498                 pgpath = choose_pgpath(m, nr_bytes);
 499 
 500         if (!pgpath) {
 501                 if (must_push_back_rq(m))
 502                         return DM_MAPIO_DELAY_REQUEUE;
 503                 dm_report_EIO(m);       /* Failed */
 504                 return DM_MAPIO_KILL;
 505         } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
 506                    test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
 507                 pg_init_all_paths(m);
 508                 return DM_MAPIO_DELAY_REQUEUE;
 509         }
 510 
 511         mpio->pgpath = pgpath;
 512         mpio->nr_bytes = nr_bytes;
 513 
 514         bdev = pgpath->path.dev->bdev;
 515         q = bdev_get_queue(bdev);
 516         clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE,
 517                         BLK_MQ_REQ_NOWAIT);
 518         if (IS_ERR(clone)) {
 519                 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
 520                 if (blk_queue_dying(q)) {
 521                         atomic_inc(&m->pg_init_in_progress);
 522                         activate_or_offline_path(pgpath);
 523                         return DM_MAPIO_DELAY_REQUEUE;
 524                 }
 525 
 526                 /*
 527                  * blk-mq's SCHED_RESTART can cover this requeue, so we
 528                  * needn't deal with it by DELAY_REQUEUE. More importantly,
 529                  * we have to return DM_MAPIO_REQUEUE so that blk-mq can
 530                  * get the queue busy feedback (via BLK_STS_RESOURCE),
 531                  * otherwise I/O merging can suffer.
 532                  */
 533                 return DM_MAPIO_REQUEUE;
 534         }
 535         clone->bio = clone->biotail = NULL;
 536         clone->rq_disk = bdev->bd_disk;
 537         clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
 538         *__clone = clone;
 539 
 540         if (pgpath->pg->ps.type->start_io)
 541                 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 542                                               &pgpath->path,
 543                                               nr_bytes);
 544         return DM_MAPIO_REMAPPED;
 545 }
 546 
 547 static void multipath_release_clone(struct request *clone,
 548                                     union map_info *map_context)
 549 {
 550         if (unlikely(map_context)) {
 551                 /*
 552                  * non-NULL map_context means caller is still map
 553                  * method; must undo multipath_clone_and_map()
 554                  */
 555                 struct dm_mpath_io *mpio = get_mpio(map_context);
 556                 struct pgpath *pgpath = mpio->pgpath;
 557 
 558                 if (pgpath && pgpath->pg->ps.type->end_io)
 559                         pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
 560                                                     &pgpath->path,
 561                                                     mpio->nr_bytes);
 562         }
 563 
 564         blk_put_request(clone);
 565 }
 566 
 567 /*
 568  * Map cloned bios (bio-based multipath)
 569  */
 570 
 571 static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
 572 {
 573         struct pgpath *pgpath;
 574         unsigned long flags;
 575         bool queue_io;
 576 
 577         /* Do we need to select a new pgpath? */
 578         pgpath = READ_ONCE(m->current_pgpath);
 579         if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
 580                 pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
 581 
 582         /* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */
 583         queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
 584 
 585         if ((pgpath && queue_io) ||
 586             (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
 587                 /* Queue for the daemon to resubmit */
 588                 spin_lock_irqsave(&m->lock, flags);
 589                 bio_list_add(&m->queued_bios, bio);
 590                 spin_unlock_irqrestore(&m->lock, flags);
 591 
 592                 /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
 593                 if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
 594                         pg_init_all_paths(m);
 595                 else if (!queue_io)
 596                         queue_work(kmultipathd, &m->process_queued_bios);
 597 
 598                 return ERR_PTR(-EAGAIN);
 599         }
 600 
 601         return pgpath;
 602 }
 603 
 604 static int __multipath_map_bio(struct multipath *m, struct bio *bio,
 605                                struct dm_mpath_io *mpio)
 606 {
 607         struct pgpath *pgpath = __map_bio(m, bio);
 608 
 609         if (IS_ERR(pgpath))
 610                 return DM_MAPIO_SUBMITTED;
 611 
 612         if (!pgpath) {
 613                 if (must_push_back_bio(m))
 614                         return DM_MAPIO_REQUEUE;
 615                 dm_report_EIO(m);
 616                 return DM_MAPIO_KILL;
 617         }
 618 
 619         mpio->pgpath = pgpath;
 620 
 621         bio->bi_status = 0;
 622         bio_set_dev(bio, pgpath->path.dev->bdev);
 623         bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
 624 
 625         if (pgpath->pg->ps.type->start_io)
 626                 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 627                                               &pgpath->path,
 628                                               mpio->nr_bytes);
 629         return DM_MAPIO_REMAPPED;
 630 }
 631 
 632 static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
 633 {
 634         struct multipath *m = ti->private;
 635         struct dm_mpath_io *mpio = NULL;
 636 
 637         multipath_init_per_bio_data(bio, &mpio);
 638         return __multipath_map_bio(m, bio, mpio);
 639 }
 640 
 641 static void process_queued_io_list(struct multipath *m)
 642 {
 643         if (m->queue_mode == DM_TYPE_REQUEST_BASED)
 644                 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
 645         else if (m->queue_mode == DM_TYPE_BIO_BASED)
 646                 queue_work(kmultipathd, &m->process_queued_bios);
 647 }
 648 
 649 static void process_queued_bios(struct work_struct *work)
 650 {
 651         int r;
 652         unsigned long flags;
 653         struct bio *bio;
 654         struct bio_list bios;
 655         struct blk_plug plug;
 656         struct multipath *m =
 657                 container_of(work, struct multipath, process_queued_bios);
 658 
 659         bio_list_init(&bios);
 660 
 661         spin_lock_irqsave(&m->lock, flags);
 662 
 663         if (bio_list_empty(&m->queued_bios)) {
 664                 spin_unlock_irqrestore(&m->lock, flags);
 665                 return;
 666         }
 667 
 668         bio_list_merge(&bios, &m->queued_bios);
 669         bio_list_init(&m->queued_bios);
 670 
 671         spin_unlock_irqrestore(&m->lock, flags);
 672 
 673         blk_start_plug(&plug);
 674         while ((bio = bio_list_pop(&bios))) {
 675                 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
 676                 dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
 677                 r = __multipath_map_bio(m, bio, mpio);
 678                 switch (r) {
 679                 case DM_MAPIO_KILL:
 680                         bio->bi_status = BLK_STS_IOERR;
 681                         bio_endio(bio);
 682                         break;
 683                 case DM_MAPIO_REQUEUE:
 684                         bio->bi_status = BLK_STS_DM_REQUEUE;
 685                         bio_endio(bio);
 686                         break;
 687                 case DM_MAPIO_REMAPPED:
 688                         generic_make_request(bio);
 689                         break;
 690                 case DM_MAPIO_SUBMITTED:
 691                         break;
 692                 default:
 693                         WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
 694                 }
 695         }
 696         blk_finish_plug(&plug);
 697 }
 698 
 699 /*
 700  * If we run out of usable paths, should we queue I/O or error it?
 701  */
 702 static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
 703                             bool save_old_value)
 704 {
 705         unsigned long flags;
 706 
 707         spin_lock_irqsave(&m->lock, flags);
 708         assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
 709                    (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
 710                    (!save_old_value && queue_if_no_path));
 711         assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
 712         spin_unlock_irqrestore(&m->lock, flags);
 713 
 714         if (!queue_if_no_path) {
 715                 dm_table_run_md_queue_async(m->ti->table);
 716                 process_queued_io_list(m);
 717         }
 718 
 719         return 0;
 720 }
 721 
 722 /*
 723  * An event is triggered whenever a path is taken out of use.
 724  * Includes path failure and PG bypass.
 725  */
 726 static void trigger_event(struct work_struct *work)
 727 {
 728         struct multipath *m =
 729                 container_of(work, struct multipath, trigger_event);
 730 
 731         dm_table_event(m->ti->table);
 732 }
 733 
 734 /*-----------------------------------------------------------------
 735  * Constructor/argument parsing:
 736  * <#multipath feature args> [<arg>]*
 737  * <#hw_handler args> [hw_handler [<arg>]*]
 738  * <#priority groups>
 739  * <initial priority group>
 740  *     [<selector> <#selector args> [<arg>]*
 741  *      <#paths> <#per-path selector args>
 742  *         [<path> [<arg>]* ]+ ]+
 743  *---------------------------------------------------------------*/
 744 static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 745                                struct dm_target *ti)
 746 {
 747         int r;
 748         struct path_selector_type *pst;
 749         unsigned ps_argc;
 750 
 751         static const struct dm_arg _args[] = {
 752                 {0, 1024, "invalid number of path selector args"},
 753         };
 754 
 755         pst = dm_get_path_selector(dm_shift_arg(as));
 756         if (!pst) {
 757                 ti->error = "unknown path selector type";
 758                 return -EINVAL;
 759         }
 760 
 761         r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
 762         if (r) {
 763                 dm_put_path_selector(pst);
 764                 return -EINVAL;
 765         }
 766 
 767         r = pst->create(&pg->ps, ps_argc, as->argv);
 768         if (r) {
 769                 dm_put_path_selector(pst);
 770                 ti->error = "path selector constructor failed";
 771                 return r;
 772         }
 773 
 774         pg->ps.type = pst;
 775         dm_consume_args(as, ps_argc);
 776 
 777         return 0;
 778 }
 779 
 780 static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
 781                          const char **attached_handler_name, char **error)
 782 {
 783         struct request_queue *q = bdev_get_queue(bdev);
 784         int r;
 785 
 786         if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
 787 retain:
 788                 if (*attached_handler_name) {
 789                         /*
 790                          * Clear any hw_handler_params associated with a
 791                          * handler that isn't already attached.
 792                          */
 793                         if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
 794                                 kfree(m->hw_handler_params);
 795                                 m->hw_handler_params = NULL;
 796                         }
 797 
 798                         /*
 799                          * Reset hw_handler_name to match the attached handler
 800                          *
 801                          * NB. This modifies the table line to show the actual
 802                          * handler instead of the original table passed in.
 803                          */
 804                         kfree(m->hw_handler_name);
 805                         m->hw_handler_name = *attached_handler_name;
 806                         *attached_handler_name = NULL;
 807                 }
 808         }
 809 
 810         if (m->hw_handler_name) {
 811                 r = scsi_dh_attach(q, m->hw_handler_name);
 812                 if (r == -EBUSY) {
 813                         char b[BDEVNAME_SIZE];
 814 
 815                         printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
 816                                bdevname(bdev, b));
 817                         goto retain;
 818                 }
 819                 if (r < 0) {
 820                         *error = "error attaching hardware handler";
 821                         return r;
 822                 }
 823 
 824                 if (m->hw_handler_params) {
 825                         r = scsi_dh_set_params(q, m->hw_handler_params);
 826                         if (r < 0) {
 827                                 *error = "unable to set hardware handler parameters";
 828                                 return r;
 829                         }
 830                 }
 831         }
 832 
 833         return 0;
 834 }
 835 
 836 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
 837                                  struct dm_target *ti)
 838 {
 839         int r;
 840         struct pgpath *p;
 841         struct multipath *m = ti->private;
 842         struct request_queue *q;
 843         const char *attached_handler_name = NULL;
 844 
 845         /* we need at least a path arg */
 846         if (as->argc < 1) {
 847                 ti->error = "no device given";
 848                 return ERR_PTR(-EINVAL);
 849         }
 850 
 851         p = alloc_pgpath();
 852         if (!p)
 853                 return ERR_PTR(-ENOMEM);
 854 
 855         r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
 856                           &p->path.dev);
 857         if (r) {
 858                 ti->error = "error getting device";
 859                 goto bad;
 860         }
 861 
 862         q = bdev_get_queue(p->path.dev->bdev);
 863         attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 864         if (attached_handler_name || m->hw_handler_name) {
 865                 INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
 866                 r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
 867                 kfree(attached_handler_name);
 868                 if (r) {
 869                         dm_put_device(ti, p->path.dev);
 870                         goto bad;
 871                 }
 872         }
 873 
 874         r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
 875         if (r) {
 876                 dm_put_device(ti, p->path.dev);
 877                 goto bad;
 878         }
 879 
 880         return p;
 881  bad:
 882         free_pgpath(p);
 883         return ERR_PTR(r);
 884 }
 885 
 886 static struct priority_group *parse_priority_group(struct dm_arg_set *as,
 887                                                    struct multipath *m)
 888 {
 889         static const struct dm_arg _args[] = {
 890                 {1, 1024, "invalid number of paths"},
 891                 {0, 1024, "invalid number of selector args"}
 892         };
 893 
 894         int r;
 895         unsigned i, nr_selector_args, nr_args;
 896         struct priority_group *pg;
 897         struct dm_target *ti = m->ti;
 898 
 899         if (as->argc < 2) {
 900                 as->argc = 0;
 901                 ti->error = "not enough priority group arguments";
 902                 return ERR_PTR(-EINVAL);
 903         }
 904 
 905         pg = alloc_priority_group();
 906         if (!pg) {
 907                 ti->error = "couldn't allocate priority group";
 908                 return ERR_PTR(-ENOMEM);
 909         }
 910         pg->m = m;
 911 
 912         r = parse_path_selector(as, pg, ti);
 913         if (r)
 914                 goto bad;
 915 
 916         /*
 917          * read the paths
 918          */
 919         r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
 920         if (r)
 921                 goto bad;
 922 
 923         r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
 924         if (r)
 925                 goto bad;
 926 
 927         nr_args = 1 + nr_selector_args;
 928         for (i = 0; i < pg->nr_pgpaths; i++) {
 929                 struct pgpath *pgpath;
 930                 struct dm_arg_set path_args;
 931 
 932                 if (as->argc < nr_args) {
 933                         ti->error = "not enough path parameters";
 934                         r = -EINVAL;
 935                         goto bad;
 936                 }
 937 
 938                 path_args.argc = nr_args;
 939                 path_args.argv = as->argv;
 940 
 941                 pgpath = parse_path(&path_args, &pg->ps, ti);
 942                 if (IS_ERR(pgpath)) {
 943                         r = PTR_ERR(pgpath);
 944                         goto bad;
 945                 }
 946 
 947                 pgpath->pg = pg;
 948                 list_add_tail(&pgpath->list, &pg->pgpaths);
 949                 dm_consume_args(as, nr_args);
 950         }
 951 
 952         return pg;
 953 
 954  bad:
 955         free_priority_group(pg, ti);
 956         return ERR_PTR(r);
 957 }
 958 
 959 static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
 960 {
 961         unsigned hw_argc;
 962         int ret;
 963         struct dm_target *ti = m->ti;
 964 
 965         static const struct dm_arg _args[] = {
 966                 {0, 1024, "invalid number of hardware handler args"},
 967         };
 968 
 969         if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
 970                 return -EINVAL;
 971 
 972         if (!hw_argc)
 973                 return 0;
 974 
 975         if (m->queue_mode == DM_TYPE_BIO_BASED) {
 976                 dm_consume_args(as, hw_argc);
 977                 DMERR("bio-based multipath doesn't allow hardware handler args");
 978                 return 0;
 979         }
 980 
 981         m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
 982         if (!m->hw_handler_name)
 983                 return -EINVAL;
 984 
 985         if (hw_argc > 1) {
 986                 char *p;
 987                 int i, j, len = 4;
 988 
 989                 for (i = 0; i <= hw_argc - 2; i++)
 990                         len += strlen(as->argv[i]) + 1;
 991                 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
 992                 if (!p) {
 993                         ti->error = "memory allocation failed";
 994                         ret = -ENOMEM;
 995                         goto fail;
 996                 }
 997                 j = sprintf(p, "%d", hw_argc - 1);
 998                 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
 999                         j = sprintf(p, "%s", as->argv[i]);
1000         }
1001         dm_consume_args(as, hw_argc - 1);
1002 
1003         return 0;
1004 fail:
1005         kfree(m->hw_handler_name);
1006         m->hw_handler_name = NULL;
1007         return ret;
1008 }
1009 
1010 static int parse_features(struct dm_arg_set *as, struct multipath *m)
1011 {
1012         int r;
1013         unsigned argc;
1014         struct dm_target *ti = m->ti;
1015         const char *arg_name;
1016 
1017         static const struct dm_arg _args[] = {
1018                 {0, 8, "invalid number of feature args"},
1019                 {1, 50, "pg_init_retries must be between 1 and 50"},
1020                 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1021         };
1022 
1023         r = dm_read_arg_group(_args, as, &argc, &ti->error);
1024         if (r)
1025                 return -EINVAL;
1026 
1027         if (!argc)
1028                 return 0;
1029 
1030         do {
1031                 arg_name = dm_shift_arg(as);
1032                 argc--;
1033 
1034                 if (!strcasecmp(arg_name, "queue_if_no_path")) {
1035                         r = queue_if_no_path(m, true, false);
1036                         continue;
1037                 }
1038 
1039                 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1040                         set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1041                         continue;
1042                 }
1043 
1044                 if (!strcasecmp(arg_name, "pg_init_retries") &&
1045                     (argc >= 1)) {
1046                         r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1047                         argc--;
1048                         continue;
1049                 }
1050 
1051                 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1052                     (argc >= 1)) {
1053                         r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1054                         argc--;
1055                         continue;
1056                 }
1057 
1058                 if (!strcasecmp(arg_name, "queue_mode") &&
1059                     (argc >= 1)) {
1060                         const char *queue_mode_name = dm_shift_arg(as);
1061 
1062                         if (!strcasecmp(queue_mode_name, "bio"))
1063                                 m->queue_mode = DM_TYPE_BIO_BASED;
1064                         else if (!strcasecmp(queue_mode_name, "rq") ||
1065                                  !strcasecmp(queue_mode_name, "mq"))
1066                                 m->queue_mode = DM_TYPE_REQUEST_BASED;
1067                         else {
1068                                 ti->error = "Unknown 'queue_mode' requested";
1069                                 r = -EINVAL;
1070                         }
1071                         argc--;
1072                         continue;
1073                 }
1074 
1075                 ti->error = "Unrecognised multipath feature request";
1076                 r = -EINVAL;
1077         } while (argc && !r);
1078 
1079         return r;
1080 }
1081 
1082 static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1083 {
1084         /* target arguments */
1085         static const struct dm_arg _args[] = {
1086                 {0, 1024, "invalid number of priority groups"},
1087                 {0, 1024, "invalid initial priority group number"},
1088         };
1089 
1090         int r;
1091         struct multipath *m;
1092         struct dm_arg_set as;
1093         unsigned pg_count = 0;
1094         unsigned next_pg_num;
1095 
1096         as.argc = argc;
1097         as.argv = argv;
1098 
1099         m = alloc_multipath(ti);
1100         if (!m) {
1101                 ti->error = "can't allocate multipath";
1102                 return -EINVAL;
1103         }
1104 
1105         r = parse_features(&as, m);
1106         if (r)
1107                 goto bad;
1108 
1109         r = alloc_multipath_stage2(ti, m);
1110         if (r)
1111                 goto bad;
1112 
1113         r = parse_hw_handler(&as, m);
1114         if (r)
1115                 goto bad;
1116 
1117         r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1118         if (r)
1119                 goto bad;
1120 
1121         r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1122         if (r)
1123                 goto bad;
1124 
1125         if ((!m->nr_priority_groups && next_pg_num) ||
1126             (m->nr_priority_groups && !next_pg_num)) {
1127                 ti->error = "invalid initial priority group";
1128                 r = -EINVAL;
1129                 goto bad;
1130         }
1131 
1132         /* parse the priority groups */
1133         while (as.argc) {
1134                 struct priority_group *pg;
1135                 unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1136 
1137                 pg = parse_priority_group(&as, m);
1138                 if (IS_ERR(pg)) {
1139                         r = PTR_ERR(pg);
1140                         goto bad;
1141                 }
1142 
1143                 nr_valid_paths += pg->nr_pgpaths;
1144                 atomic_set(&m->nr_valid_paths, nr_valid_paths);
1145 
1146                 list_add_tail(&pg->list, &m->priority_groups);
1147                 pg_count++;
1148                 pg->pg_num = pg_count;
1149                 if (!--next_pg_num)
1150                         m->next_pg = pg;
1151         }
1152 
1153         if (pg_count != m->nr_priority_groups) {
1154                 ti->error = "priority group count mismatch";
1155                 r = -EINVAL;
1156                 goto bad;
1157         }
1158 
1159         ti->num_flush_bios = 1;
1160         ti->num_discard_bios = 1;
1161         ti->num_write_same_bios = 1;
1162         ti->num_write_zeroes_bios = 1;
1163         if (m->queue_mode == DM_TYPE_BIO_BASED)
1164                 ti->per_io_data_size = multipath_per_bio_data_size();
1165         else
1166                 ti->per_io_data_size = sizeof(struct dm_mpath_io);
1167 
1168         return 0;
1169 
1170  bad:
1171         free_multipath(m);
1172         return r;
1173 }
1174 
1175 static void multipath_wait_for_pg_init_completion(struct multipath *m)
1176 {
1177         DEFINE_WAIT(wait);
1178 
1179         while (1) {
1180                 prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1181 
1182                 if (!atomic_read(&m->pg_init_in_progress))
1183                         break;
1184 
1185                 io_schedule();
1186         }
1187         finish_wait(&m->pg_init_wait, &wait);
1188 }
1189 
1190 static void flush_multipath_work(struct multipath *m)
1191 {
1192         if (m->hw_handler_name) {
1193                 set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1194                 smp_mb__after_atomic();
1195 
1196                 if (atomic_read(&m->pg_init_in_progress))
1197                         flush_workqueue(kmpath_handlerd);
1198                 multipath_wait_for_pg_init_completion(m);
1199 
1200                 clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1201                 smp_mb__after_atomic();
1202         }
1203 
1204         if (m->queue_mode == DM_TYPE_BIO_BASED)
1205                 flush_work(&m->process_queued_bios);
1206         flush_work(&m->trigger_event);
1207 }
1208 
1209 static void multipath_dtr(struct dm_target *ti)
1210 {
1211         struct multipath *m = ti->private;
1212 
1213         flush_multipath_work(m);
1214         free_multipath(m);
1215 }
1216 
1217 /*
1218  * Take a path out of use.
1219  */
1220 static int fail_path(struct pgpath *pgpath)
1221 {
1222         unsigned long flags;
1223         struct multipath *m = pgpath->pg->m;
1224 
1225         spin_lock_irqsave(&m->lock, flags);
1226 
1227         if (!pgpath->is_active)
1228                 goto out;
1229 
1230         DMWARN("Failing path %s.", pgpath->path.dev->name);
1231 
1232         pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1233         pgpath->is_active = false;
1234         pgpath->fail_count++;
1235 
1236         atomic_dec(&m->nr_valid_paths);
1237 
1238         if (pgpath == m->current_pgpath)
1239                 m->current_pgpath = NULL;
1240 
1241         dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1242                        pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1243 
1244         schedule_work(&m->trigger_event);
1245 
1246 out:
1247         spin_unlock_irqrestore(&m->lock, flags);
1248 
1249         return 0;
1250 }
1251 
1252 /*
1253  * Reinstate a previously-failed path
1254  */
1255 static int reinstate_path(struct pgpath *pgpath)
1256 {
1257         int r = 0, run_queue = 0;
1258         unsigned long flags;
1259         struct multipath *m = pgpath->pg->m;
1260         unsigned nr_valid_paths;
1261 
1262         spin_lock_irqsave(&m->lock, flags);
1263 
1264         if (pgpath->is_active)
1265                 goto out;
1266 
1267         DMWARN("Reinstating path %s.", pgpath->path.dev->name);
1268 
1269         r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1270         if (r)
1271                 goto out;
1272 
1273         pgpath->is_active = true;
1274 
1275         nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1276         if (nr_valid_paths == 1) {
1277                 m->current_pgpath = NULL;
1278                 run_queue = 1;
1279         } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1280                 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1281                         atomic_inc(&m->pg_init_in_progress);
1282         }
1283 
1284         dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1285                        pgpath->path.dev->name, nr_valid_paths);
1286 
1287         schedule_work(&m->trigger_event);
1288 
1289 out:
1290         spin_unlock_irqrestore(&m->lock, flags);
1291         if (run_queue) {
1292                 dm_table_run_md_queue_async(m->ti->table);
1293                 process_queued_io_list(m);
1294         }
1295 
1296         return r;
1297 }
1298 
1299 /*
1300  * Fail or reinstate all paths that match the provided struct dm_dev.
1301  */
1302 static int action_dev(struct multipath *m, struct dm_dev *dev,
1303                       action_fn action)
1304 {
1305         int r = -EINVAL;
1306         struct pgpath *pgpath;
1307         struct priority_group *pg;
1308 
1309         list_for_each_entry(pg, &m->priority_groups, list) {
1310                 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1311                         if (pgpath->path.dev == dev)
1312                                 r = action(pgpath);
1313                 }
1314         }
1315 
1316         return r;
1317 }
1318 
1319 /*
1320  * Temporarily try to avoid having to use the specified PG
1321  */
1322 static void bypass_pg(struct multipath *m, struct priority_group *pg,
1323                       bool bypassed)
1324 {
1325         unsigned long flags;
1326 
1327         spin_lock_irqsave(&m->lock, flags);
1328 
1329         pg->bypassed = bypassed;
1330         m->current_pgpath = NULL;
1331         m->current_pg = NULL;
1332 
1333         spin_unlock_irqrestore(&m->lock, flags);
1334 
1335         schedule_work(&m->trigger_event);
1336 }
1337 
1338 /*
1339  * Switch to using the specified PG from the next I/O that gets mapped
1340  */
1341 static int switch_pg_num(struct multipath *m, const char *pgstr)
1342 {
1343         struct priority_group *pg;
1344         unsigned pgnum;
1345         unsigned long flags;
1346         char dummy;
1347 
1348         if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1349             !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1350                 DMWARN("invalid PG number supplied to switch_pg_num");
1351                 return -EINVAL;
1352         }
1353 
1354         spin_lock_irqsave(&m->lock, flags);
1355         list_for_each_entry(pg, &m->priority_groups, list) {
1356                 pg->bypassed = false;
1357                 if (--pgnum)
1358                         continue;
1359 
1360                 m->current_pgpath = NULL;
1361                 m->current_pg = NULL;
1362                 m->next_pg = pg;
1363         }
1364         spin_unlock_irqrestore(&m->lock, flags);
1365 
1366         schedule_work(&m->trigger_event);
1367         return 0;
1368 }
1369 
1370 /*
1371  * Set/clear bypassed status of a PG.
1372  * PGs are numbered upwards from 1 in the order they were declared.
1373  */
1374 static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1375 {
1376         struct priority_group *pg;
1377         unsigned pgnum;
1378         char dummy;
1379 
1380         if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1381             !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1382                 DMWARN("invalid PG number supplied to bypass_pg");
1383                 return -EINVAL;
1384         }
1385 
1386         list_for_each_entry(pg, &m->priority_groups, list) {
1387                 if (!--pgnum)
1388                         break;
1389         }
1390 
1391         bypass_pg(m, pg, bypassed);
1392         return 0;
1393 }
1394 
1395 /*
1396  * Should we retry pg_init immediately?
1397  */
1398 static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1399 {
1400         unsigned long flags;
1401         bool limit_reached = false;
1402 
1403         spin_lock_irqsave(&m->lock, flags);
1404 
1405         if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1406             !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1407                 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1408         else
1409                 limit_reached = true;
1410 
1411         spin_unlock_irqrestore(&m->lock, flags);
1412 
1413         return limit_reached;
1414 }
1415 
1416 static void pg_init_done(void *data, int errors)
1417 {
1418         struct pgpath *pgpath = data;
1419         struct priority_group *pg = pgpath->pg;
1420         struct multipath *m = pg->m;
1421         unsigned long flags;
1422         bool delay_retry = false;
1423 
1424         /* device or driver problems */
1425         switch (errors) {
1426         case SCSI_DH_OK:
1427                 break;
1428         case SCSI_DH_NOSYS:
1429                 if (!m->hw_handler_name) {
1430                         errors = 0;
1431                         break;
1432                 }
1433                 DMERR("Could not failover the device: Handler scsi_dh_%s "
1434                       "Error %d.", m->hw_handler_name, errors);
1435                 /*
1436                  * Fail path for now, so we do not ping pong
1437                  */
1438                 fail_path(pgpath);
1439                 break;
1440         case SCSI_DH_DEV_TEMP_BUSY:
1441                 /*
1442                  * Probably doing something like FW upgrade on the
1443                  * controller so try the other pg.
1444                  */
1445                 bypass_pg(m, pg, true);
1446                 break;
1447         case SCSI_DH_RETRY:
1448                 /* Wait before retrying. */
1449                 delay_retry = 1;
1450                 /* fall through */
1451         case SCSI_DH_IMM_RETRY:
1452         case SCSI_DH_RES_TEMP_UNAVAIL:
1453                 if (pg_init_limit_reached(m, pgpath))
1454                         fail_path(pgpath);
1455                 errors = 0;
1456                 break;
1457         case SCSI_DH_DEV_OFFLINED:
1458         default:
1459                 /*
1460                  * We probably do not want to fail the path for a device
1461                  * error, but this is what the old dm did. In future
1462                  * patches we can do more advanced handling.
1463                  */
1464                 fail_path(pgpath);
1465         }
1466 
1467         spin_lock_irqsave(&m->lock, flags);
1468         if (errors) {
1469                 if (pgpath == m->current_pgpath) {
1470                         DMERR("Could not failover device. Error %d.", errors);
1471                         m->current_pgpath = NULL;
1472                         m->current_pg = NULL;
1473                 }
1474         } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1475                 pg->bypassed = false;
1476 
1477         if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1478                 /* Activations of other paths are still on going */
1479                 goto out;
1480 
1481         if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1482                 if (delay_retry)
1483                         set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1484                 else
1485                         clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1486 
1487                 if (__pg_init_all_paths(m))
1488                         goto out;
1489         }
1490         clear_bit(MPATHF_QUEUE_IO, &m->flags);
1491 
1492         process_queued_io_list(m);
1493 
1494         /*
1495          * Wake up any thread waiting to suspend.
1496          */
1497         wake_up(&m->pg_init_wait);
1498 
1499 out:
1500         spin_unlock_irqrestore(&m->lock, flags);
1501 }
1502 
1503 static void activate_or_offline_path(struct pgpath *pgpath)
1504 {
1505         struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1506 
1507         if (pgpath->is_active && !blk_queue_dying(q))
1508                 scsi_dh_activate(q, pg_init_done, pgpath);
1509         else
1510                 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1511 }
1512 
1513 static void activate_path_work(struct work_struct *work)
1514 {
1515         struct pgpath *pgpath =
1516                 container_of(work, struct pgpath, activate_path.work);
1517 
1518         activate_or_offline_path(pgpath);
1519 }
1520 
1521 static int multipath_end_io(struct dm_target *ti, struct request *clone,
1522                             blk_status_t error, union map_info *map_context)
1523 {
1524         struct dm_mpath_io *mpio = get_mpio(map_context);
1525         struct pgpath *pgpath = mpio->pgpath;
1526         int r = DM_ENDIO_DONE;
1527 
1528         /*
1529          * We don't queue any clone request inside the multipath target
1530          * during end I/O handling, since those clone requests don't have
1531          * bio clones.  If we queue them inside the multipath target,
1532          * we need to make bio clones, that requires memory allocation.
1533          * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1534          *  don't have bio clones.)
1535          * Instead of queueing the clone request here, we queue the original
1536          * request into dm core, which will remake a clone request and
1537          * clone bios for it and resubmit it later.
1538          */
1539         if (error && blk_path_error(error)) {
1540                 struct multipath *m = ti->private;
1541 
1542                 if (error == BLK_STS_RESOURCE)
1543                         r = DM_ENDIO_DELAY_REQUEUE;
1544                 else
1545                         r = DM_ENDIO_REQUEUE;
1546 
1547                 if (pgpath)
1548                         fail_path(pgpath);
1549 
1550                 if (atomic_read(&m->nr_valid_paths) == 0 &&
1551                     !must_push_back_rq(m)) {
1552                         if (error == BLK_STS_IOERR)
1553                                 dm_report_EIO(m);
1554                         /* complete with the original error */
1555                         r = DM_ENDIO_DONE;
1556                 }
1557         }
1558 
1559         if (pgpath) {
1560                 struct path_selector *ps = &pgpath->pg->ps;
1561 
1562                 if (ps->type->end_io)
1563                         ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1564         }
1565 
1566         return r;
1567 }
1568 
1569 static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1570                                 blk_status_t *error)
1571 {
1572         struct multipath *m = ti->private;
1573         struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1574         struct pgpath *pgpath = mpio->pgpath;
1575         unsigned long flags;
1576         int r = DM_ENDIO_DONE;
1577 
1578         if (!*error || !blk_path_error(*error))
1579                 goto done;
1580 
1581         if (pgpath)
1582                 fail_path(pgpath);
1583 
1584         if (atomic_read(&m->nr_valid_paths) == 0 &&
1585             !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1586                 if (must_push_back_bio(m)) {
1587                         r = DM_ENDIO_REQUEUE;
1588                 } else {
1589                         dm_report_EIO(m);
1590                         *error = BLK_STS_IOERR;
1591                 }
1592                 goto done;
1593         }
1594 
1595         spin_lock_irqsave(&m->lock, flags);
1596         bio_list_add(&m->queued_bios, clone);
1597         spin_unlock_irqrestore(&m->lock, flags);
1598         if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
1599                 queue_work(kmultipathd, &m->process_queued_bios);
1600 
1601         r = DM_ENDIO_INCOMPLETE;
1602 done:
1603         if (pgpath) {
1604                 struct path_selector *ps = &pgpath->pg->ps;
1605 
1606                 if (ps->type->end_io)
1607                         ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1608         }
1609 
1610         return r;
1611 }
1612 
1613 /*
1614  * Suspend can't complete until all the I/O is processed so if
1615  * the last path fails we must error any remaining I/O.
1616  * Note that if the freeze_bdev fails while suspending, the
1617  * queue_if_no_path state is lost - userspace should reset it.
1618  */
1619 static void multipath_presuspend(struct dm_target *ti)
1620 {
1621         struct multipath *m = ti->private;
1622 
1623         queue_if_no_path(m, false, true);
1624 }
1625 
1626 static void multipath_postsuspend(struct dm_target *ti)
1627 {
1628         struct multipath *m = ti->private;
1629 
1630         mutex_lock(&m->work_mutex);
1631         flush_multipath_work(m);
1632         mutex_unlock(&m->work_mutex);
1633 }
1634 
1635 /*
1636  * Restore the queue_if_no_path setting.
1637  */
1638 static void multipath_resume(struct dm_target *ti)
1639 {
1640         struct multipath *m = ti->private;
1641         unsigned long flags;
1642 
1643         spin_lock_irqsave(&m->lock, flags);
1644         assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
1645                    test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
1646         spin_unlock_irqrestore(&m->lock, flags);
1647 }
1648 
1649 /*
1650  * Info output has the following format:
1651  * num_multipath_feature_args [multipath_feature_args]*
1652  * num_handler_status_args [handler_status_args]*
1653  * num_groups init_group_number
1654  *            [A|D|E num_ps_status_args [ps_status_args]*
1655  *             num_paths num_selector_args
1656  *             [path_dev A|F fail_count [selector_args]* ]+ ]+
1657  *
1658  * Table output has the following format (identical to the constructor string):
1659  * num_feature_args [features_args]*
1660  * num_handler_args hw_handler [hw_handler_args]*
1661  * num_groups init_group_number
1662  *     [priority selector-name num_ps_args [ps_args]*
1663  *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1664  */
1665 static void multipath_status(struct dm_target *ti, status_type_t type,
1666                              unsigned status_flags, char *result, unsigned maxlen)
1667 {
1668         int sz = 0;
1669         unsigned long flags;
1670         struct multipath *m = ti->private;
1671         struct priority_group *pg;
1672         struct pgpath *p;
1673         unsigned pg_num;
1674         char state;
1675 
1676         spin_lock_irqsave(&m->lock, flags);
1677 
1678         /* Features */
1679         if (type == STATUSTYPE_INFO)
1680                 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1681                        atomic_read(&m->pg_init_count));
1682         else {
1683                 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1684                               (m->pg_init_retries > 0) * 2 +
1685                               (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1686                               test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1687                               (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1688 
1689                 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1690                         DMEMIT("queue_if_no_path ");
1691                 if (m->pg_init_retries)
1692                         DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1693                 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1694                         DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1695                 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1696                         DMEMIT("retain_attached_hw_handler ");
1697                 if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1698                         switch(m->queue_mode) {
1699                         case DM_TYPE_BIO_BASED:
1700                                 DMEMIT("queue_mode bio ");
1701                                 break;
1702                         default:
1703                                 WARN_ON_ONCE(true);
1704                                 break;
1705                         }
1706                 }
1707         }
1708 
1709         if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1710                 DMEMIT("0 ");
1711         else
1712                 DMEMIT("1 %s ", m->hw_handler_name);
1713 
1714         DMEMIT("%u ", m->nr_priority_groups);
1715 
1716         if (m->next_pg)
1717                 pg_num = m->next_pg->pg_num;
1718         else if (m->current_pg)
1719                 pg_num = m->current_pg->pg_num;
1720         else
1721                 pg_num = (m->nr_priority_groups ? 1 : 0);
1722 
1723         DMEMIT("%u ", pg_num);
1724 
1725         switch (type) {
1726         case STATUSTYPE_INFO:
1727                 list_for_each_entry(pg, &m->priority_groups, list) {
1728                         if (pg->bypassed)
1729                                 state = 'D';    /* Disabled */
1730                         else if (pg == m->current_pg)
1731                                 state = 'A';    /* Currently Active */
1732                         else
1733                                 state = 'E';    /* Enabled */
1734 
1735                         DMEMIT("%c ", state);
1736 
1737                         if (pg->ps.type->status)
1738                                 sz += pg->ps.type->status(&pg->ps, NULL, type,
1739                                                           result + sz,
1740                                                           maxlen - sz);
1741                         else
1742                                 DMEMIT("0 ");
1743 
1744                         DMEMIT("%u %u ", pg->nr_pgpaths,
1745                                pg->ps.type->info_args);
1746 
1747                         list_for_each_entry(p, &pg->pgpaths, list) {
1748                                 DMEMIT("%s %s %u ", p->path.dev->name,
1749                                        p->is_active ? "A" : "F",
1750                                        p->fail_count);
1751                                 if (pg->ps.type->status)
1752                                         sz += pg->ps.type->status(&pg->ps,
1753                                               &p->path, type, result + sz,
1754                                               maxlen - sz);
1755                         }
1756                 }
1757                 break;
1758 
1759         case STATUSTYPE_TABLE:
1760                 list_for_each_entry(pg, &m->priority_groups, list) {
1761                         DMEMIT("%s ", pg->ps.type->name);
1762 
1763                         if (pg->ps.type->status)
1764                                 sz += pg->ps.type->status(&pg->ps, NULL, type,
1765                                                           result + sz,
1766                                                           maxlen - sz);
1767                         else
1768                                 DMEMIT("0 ");
1769 
1770                         DMEMIT("%u %u ", pg->nr_pgpaths,
1771                                pg->ps.type->table_args);
1772 
1773                         list_for_each_entry(p, &pg->pgpaths, list) {
1774                                 DMEMIT("%s ", p->path.dev->name);
1775                                 if (pg->ps.type->status)
1776                                         sz += pg->ps.type->status(&pg->ps,
1777                                               &p->path, type, result + sz,
1778                                               maxlen - sz);
1779                         }
1780                 }
1781                 break;
1782         }
1783 
1784         spin_unlock_irqrestore(&m->lock, flags);
1785 }
1786 
1787 static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
1788                              char *result, unsigned maxlen)
1789 {
1790         int r = -EINVAL;
1791         struct dm_dev *dev;
1792         struct multipath *m = ti->private;
1793         action_fn action;
1794 
1795         mutex_lock(&m->work_mutex);
1796 
1797         if (dm_suspended(ti)) {
1798                 r = -EBUSY;
1799                 goto out;
1800         }
1801 
1802         if (argc == 1) {
1803                 if (!strcasecmp(argv[0], "queue_if_no_path")) {
1804                         r = queue_if_no_path(m, true, false);
1805                         goto out;
1806                 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1807                         r = queue_if_no_path(m, false, false);
1808                         goto out;
1809                 }
1810         }
1811 
1812         if (argc != 2) {
1813                 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1814                 goto out;
1815         }
1816 
1817         if (!strcasecmp(argv[0], "disable_group")) {
1818                 r = bypass_pg_num(m, argv[1], true);
1819                 goto out;
1820         } else if (!strcasecmp(argv[0], "enable_group")) {
1821                 r = bypass_pg_num(m, argv[1], false);
1822                 goto out;
1823         } else if (!strcasecmp(argv[0], "switch_group")) {
1824                 r = switch_pg_num(m, argv[1]);
1825                 goto out;
1826         } else if (!strcasecmp(argv[0], "reinstate_path"))
1827                 action = reinstate_path;
1828         else if (!strcasecmp(argv[0], "fail_path"))
1829                 action = fail_path;
1830         else {
1831                 DMWARN("Unrecognised multipath message received: %s", argv[0]);
1832                 goto out;
1833         }
1834 
1835         r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1836         if (r) {
1837                 DMWARN("message: error getting device %s",
1838                        argv[1]);
1839                 goto out;
1840         }
1841 
1842         r = action_dev(m, dev, action);
1843 
1844         dm_put_device(ti, dev);
1845 
1846 out:
1847         mutex_unlock(&m->work_mutex);
1848         return r;
1849 }
1850 
1851 static int multipath_prepare_ioctl(struct dm_target *ti,
1852                                    struct block_device **bdev)
1853 {
1854         struct multipath *m = ti->private;
1855         struct pgpath *current_pgpath;
1856         int r;
1857 
1858         current_pgpath = READ_ONCE(m->current_pgpath);
1859         if (!current_pgpath)
1860                 current_pgpath = choose_pgpath(m, 0);
1861 
1862         if (current_pgpath) {
1863                 if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
1864                         *bdev = current_pgpath->path.dev->bdev;
1865                         r = 0;
1866                 } else {
1867                         /* pg_init has not started or completed */
1868                         r = -ENOTCONN;
1869                 }
1870         } else {
1871                 /* No path is available */
1872                 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1873                         r = -ENOTCONN;
1874                 else
1875                         r = -EIO;
1876         }
1877 
1878         if (r == -ENOTCONN) {
1879                 if (!READ_ONCE(m->current_pg)) {
1880                         /* Path status changed, redo selection */
1881                         (void) choose_pgpath(m, 0);
1882                 }
1883                 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1884                         pg_init_all_paths(m);
1885                 dm_table_run_md_queue_async(m->ti->table);
1886                 process_queued_io_list(m);
1887         }
1888 
1889         /*
1890          * Only pass ioctls through if the device sizes match exactly.
1891          */
1892         if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
1893                 return 1;
1894         return r;
1895 }
1896 
1897 static int multipath_iterate_devices(struct dm_target *ti,
1898                                      iterate_devices_callout_fn fn, void *data)
1899 {
1900         struct multipath *m = ti->private;
1901         struct priority_group *pg;
1902         struct pgpath *p;
1903         int ret = 0;
1904 
1905         list_for_each_entry(pg, &m->priority_groups, list) {
1906                 list_for_each_entry(p, &pg->pgpaths, list) {
1907                         ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
1908                         if (ret)
1909                                 goto out;
1910                 }
1911         }
1912 
1913 out:
1914         return ret;
1915 }
1916 
1917 static int pgpath_busy(struct pgpath *pgpath)
1918 {
1919         struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1920 
1921         return blk_lld_busy(q);
1922 }
1923 
1924 /*
1925  * We return "busy", only when we can map I/Os but underlying devices
1926  * are busy (so even if we map I/Os now, the I/Os will wait on
1927  * the underlying queue).
1928  * In other words, if we want to kill I/Os or queue them inside us
1929  * due to map unavailability, we don't return "busy".  Otherwise,
1930  * dm core won't give us the I/Os and we can't do what we want.
1931  */
1932 static int multipath_busy(struct dm_target *ti)
1933 {
1934         bool busy = false, has_active = false;
1935         struct multipath *m = ti->private;
1936         struct priority_group *pg, *next_pg;
1937         struct pgpath *pgpath;
1938 
1939         /* pg_init in progress */
1940         if (atomic_read(&m->pg_init_in_progress))
1941                 return true;
1942 
1943         /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
1944         if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1945                 return (m->queue_mode != DM_TYPE_REQUEST_BASED);
1946 
1947         /* Guess which priority_group will be used at next mapping time */
1948         pg = READ_ONCE(m->current_pg);
1949         next_pg = READ_ONCE(m->next_pg);
1950         if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
1951                 pg = next_pg;
1952 
1953         if (!pg) {
1954                 /*
1955                  * We don't know which pg will be used at next mapping time.
1956                  * We don't call choose_pgpath() here to avoid to trigger
1957                  * pg_init just by busy checking.
1958                  * So we don't know whether underlying devices we will be using
1959                  * at next mapping time are busy or not. Just try mapping.
1960                  */
1961                 return busy;
1962         }
1963 
1964         /*
1965          * If there is one non-busy active path at least, the path selector
1966          * will be able to select it. So we consider such a pg as not busy.
1967          */
1968         busy = true;
1969         list_for_each_entry(pgpath, &pg->pgpaths, list) {
1970                 if (pgpath->is_active) {
1971                         has_active = true;
1972                         if (!pgpath_busy(pgpath)) {
1973                                 busy = false;
1974                                 break;
1975                         }
1976                 }
1977         }
1978 
1979         if (!has_active) {
1980                 /*
1981                  * No active path in this pg, so this pg won't be used and
1982                  * the current_pg will be changed at next mapping time.
1983                  * We need to try mapping to determine it.
1984                  */
1985                 busy = false;
1986         }
1987 
1988         return busy;
1989 }
1990 
1991 /*-----------------------------------------------------------------
1992  * Module setup
1993  *---------------------------------------------------------------*/
1994 static struct target_type multipath_target = {
1995         .name = "multipath",
1996         .version = {1, 13, 0},
1997         .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
1998                     DM_TARGET_PASSES_INTEGRITY,
1999         .module = THIS_MODULE,
2000         .ctr = multipath_ctr,
2001         .dtr = multipath_dtr,
2002         .clone_and_map_rq = multipath_clone_and_map,
2003         .release_clone_rq = multipath_release_clone,
2004         .rq_end_io = multipath_end_io,
2005         .map = multipath_map_bio,
2006         .end_io = multipath_end_io_bio,
2007         .presuspend = multipath_presuspend,
2008         .postsuspend = multipath_postsuspend,
2009         .resume = multipath_resume,
2010         .status = multipath_status,
2011         .message = multipath_message,
2012         .prepare_ioctl = multipath_prepare_ioctl,
2013         .iterate_devices = multipath_iterate_devices,
2014         .busy = multipath_busy,
2015 };
2016 
2017 static int __init dm_multipath_init(void)
2018 {
2019         int r;
2020 
2021         kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2022         if (!kmultipathd) {
2023                 DMERR("failed to create workqueue kmpathd");
2024                 r = -ENOMEM;
2025                 goto bad_alloc_kmultipathd;
2026         }
2027 
2028         /*
2029          * A separate workqueue is used to handle the device handlers
2030          * to avoid overloading existing workqueue. Overloading the
2031          * old workqueue would also create a bottleneck in the
2032          * path of the storage hardware device activation.
2033          */
2034         kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2035                                                   WQ_MEM_RECLAIM);
2036         if (!kmpath_handlerd) {
2037                 DMERR("failed to create workqueue kmpath_handlerd");
2038                 r = -ENOMEM;
2039                 goto bad_alloc_kmpath_handlerd;
2040         }
2041 
2042         r = dm_register_target(&multipath_target);
2043         if (r < 0) {
2044                 DMERR("request-based register failed %d", r);
2045                 r = -EINVAL;
2046                 goto bad_register_target;
2047         }
2048 
2049         return 0;
2050 
2051 bad_register_target:
2052         destroy_workqueue(kmpath_handlerd);
2053 bad_alloc_kmpath_handlerd:
2054         destroy_workqueue(kmultipathd);
2055 bad_alloc_kmultipathd:
2056         return r;
2057 }
2058 
2059 static void __exit dm_multipath_exit(void)
2060 {
2061         destroy_workqueue(kmpath_handlerd);
2062         destroy_workqueue(kmultipathd);
2063 
2064         dm_unregister_target(&multipath_target);
2065 }
2066 
2067 module_init(dm_multipath_init);
2068 module_exit(dm_multipath_exit);
2069 
2070 MODULE_DESCRIPTION(DM_NAME " multipath target");
2071 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2072 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */