Lines Matching refs:m
52 struct multipath *m; /* Owning multipath instance */ member
186 struct multipath *m; in alloc_multipath() local
189 m = kzalloc(sizeof(*m), GFP_KERNEL); in alloc_multipath()
190 if (m) { in alloc_multipath()
191 INIT_LIST_HEAD(&m->priority_groups); in alloc_multipath()
192 spin_lock_init(&m->lock); in alloc_multipath()
193 m->queue_io = 1; in alloc_multipath()
194 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; in alloc_multipath()
195 INIT_WORK(&m->trigger_event, trigger_event); in alloc_multipath()
196 init_waitqueue_head(&m->pg_init_wait); in alloc_multipath()
197 mutex_init(&m->work_mutex); in alloc_multipath()
198 m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache); in alloc_multipath()
199 if (!m->mpio_pool) { in alloc_multipath()
200 kfree(m); in alloc_multipath()
203 m->ti = ti; in alloc_multipath()
204 ti->private = m; in alloc_multipath()
207 return m; in alloc_multipath()
210 static void free_multipath(struct multipath *m) in free_multipath() argument
214 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { in free_multipath()
216 free_priority_group(pg, m->ti); in free_multipath()
219 kfree(m->hw_handler_name); in free_multipath()
220 kfree(m->hw_handler_params); in free_multipath()
221 mempool_destroy(m->mpio_pool); in free_multipath()
222 kfree(m); in free_multipath()
225 static int set_mapinfo(struct multipath *m, union map_info *info) in set_mapinfo() argument
229 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC); in set_mapinfo()
239 static void clear_mapinfo(struct multipath *m, union map_info *info) in clear_mapinfo() argument
244 mempool_free(mpio, m->mpio_pool); in clear_mapinfo()
251 static int __pg_init_all_paths(struct multipath *m) in __pg_init_all_paths() argument
256 if (m->pg_init_in_progress || m->pg_init_disabled) in __pg_init_all_paths()
259 m->pg_init_count++; in __pg_init_all_paths()
260 m->pg_init_required = 0; in __pg_init_all_paths()
263 if (!m->current_pg) in __pg_init_all_paths()
266 if (m->pg_init_delay_retry) in __pg_init_all_paths()
267 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? in __pg_init_all_paths()
268 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); in __pg_init_all_paths()
269 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { in __pg_init_all_paths()
275 m->pg_init_in_progress++; in __pg_init_all_paths()
277 return m->pg_init_in_progress; in __pg_init_all_paths()
280 static void __switch_pg(struct multipath *m, struct pgpath *pgpath) in __switch_pg() argument
282 m->current_pg = pgpath->pg; in __switch_pg()
285 if (m->hw_handler_name) { in __switch_pg()
286 m->pg_init_required = 1; in __switch_pg()
287 m->queue_io = 1; in __switch_pg()
289 m->pg_init_required = 0; in __switch_pg()
290 m->queue_io = 0; in __switch_pg()
293 m->pg_init_count = 0; in __switch_pg()
296 static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg, in __choose_path_in_pg() argument
301 path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes); in __choose_path_in_pg()
305 m->current_pgpath = path_to_pgpath(path); in __choose_path_in_pg()
307 if (m->current_pg != pg) in __choose_path_in_pg()
308 __switch_pg(m, m->current_pgpath); in __choose_path_in_pg()
313 static void __choose_pgpath(struct multipath *m, size_t nr_bytes) in __choose_pgpath() argument
318 if (!m->nr_valid_paths) { in __choose_pgpath()
319 m->queue_io = 0; in __choose_pgpath()
324 if (m->next_pg) { in __choose_pgpath()
325 pg = m->next_pg; in __choose_pgpath()
326 m->next_pg = NULL; in __choose_pgpath()
327 if (!__choose_path_in_pg(m, pg, nr_bytes)) in __choose_pgpath()
332 if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes)) in __choose_pgpath()
342 list_for_each_entry(pg, &m->priority_groups, list) { in __choose_pgpath()
345 if (!__choose_path_in_pg(m, pg, nr_bytes)) { in __choose_pgpath()
347 m->pg_init_delay_retry = 1; in __choose_pgpath()
354 m->current_pgpath = NULL; in __choose_pgpath()
355 m->current_pg = NULL; in __choose_pgpath()
369 static int __must_push_back(struct multipath *m) in __must_push_back() argument
371 return (m->queue_if_no_path || in __must_push_back()
372 (m->queue_if_no_path != m->saved_queue_if_no_path && in __must_push_back()
373 dm_noflush_suspending(m->ti))); in __must_push_back()
383 struct multipath *m = (struct multipath *) ti->private; in __multipath_map() local
390 spin_lock_irq(&m->lock); in __multipath_map()
393 if (!m->current_pgpath || in __multipath_map()
394 (!m->queue_io && (m->repeat_count && --m->repeat_count == 0))) in __multipath_map()
395 __choose_pgpath(m, nr_bytes); in __multipath_map()
397 pgpath = m->current_pgpath; in __multipath_map()
400 if (!__must_push_back(m)) in __multipath_map()
403 } else if (m->queue_io || m->pg_init_required) { in __multipath_map()
404 __pg_init_all_paths(m); in __multipath_map()
408 if (set_mapinfo(m, map_context) < 0) in __multipath_map()
418 spin_unlock_irq(&m->lock); in __multipath_map()
431 clear_mapinfo(m, map_context); in __multipath_map()
446 spin_unlock_irq(&m->lock); in __multipath_map()
472 static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, in queue_if_no_path() argument
477 spin_lock_irqsave(&m->lock, flags); in queue_if_no_path()
480 m->saved_queue_if_no_path = m->queue_if_no_path; in queue_if_no_path()
482 m->saved_queue_if_no_path = queue_if_no_path; in queue_if_no_path()
483 m->queue_if_no_path = queue_if_no_path; in queue_if_no_path()
484 spin_unlock_irqrestore(&m->lock, flags); in queue_if_no_path()
487 dm_table_run_md_queue_async(m->ti->table); in queue_if_no_path()
498 struct multipath *m = in trigger_event() local
501 dm_table_event(m->ti->table); in trigger_event()
555 struct multipath *m = ti->private; in parse_path() local
576 if (m->retain_attached_hw_handler || m->hw_handler_name) in parse_path()
579 if (m->retain_attached_hw_handler) { in parse_path()
591 kfree(m->hw_handler_name); in parse_path()
592 m->hw_handler_name = attached_handler_name; in parse_path()
594 kfree(m->hw_handler_params); in parse_path()
595 m->hw_handler_params = NULL; in parse_path()
599 if (m->hw_handler_name) { in parse_path()
600 r = scsi_dh_attach(q, m->hw_handler_name); in parse_path()
614 if (m->hw_handler_params) { in parse_path()
615 r = scsi_dh_set_params(q, m->hw_handler_params); in parse_path()
639 struct multipath *m) in parse_priority_group() argument
649 struct dm_target *ti = m->ti; in parse_priority_group()
662 pg->m = m; in parse_priority_group()
711 static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) in parse_hw_handler() argument
715 struct dm_target *ti = m->ti; in parse_hw_handler()
727 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); in parse_hw_handler()
735 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL); in parse_hw_handler()
749 kfree(m->hw_handler_name); in parse_hw_handler()
750 m->hw_handler_name = NULL; in parse_hw_handler()
754 static int parse_features(struct dm_arg_set *as, struct multipath *m) in parse_features() argument
758 struct dm_target *ti = m->ti; in parse_features()
779 r = queue_if_no_path(m, 1, 0); in parse_features()
784 m->retain_attached_hw_handler = 1; in parse_features()
790 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error); in parse_features()
797 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error); in parse_features()
819 struct multipath *m; in multipath_ctr() local
827 m = alloc_multipath(ti); in multipath_ctr()
828 if (!m) { in multipath_ctr()
833 r = parse_features(&as, m); in multipath_ctr()
837 r = parse_hw_handler(&as, m); in multipath_ctr()
841 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error); in multipath_ctr()
849 if ((!m->nr_priority_groups && next_pg_num) || in multipath_ctr()
850 (m->nr_priority_groups && !next_pg_num)) { in multipath_ctr()
860 pg = parse_priority_group(&as, m); in multipath_ctr()
866 m->nr_valid_paths += pg->nr_pgpaths; in multipath_ctr()
867 list_add_tail(&pg->list, &m->priority_groups); in multipath_ctr()
871 m->next_pg = pg; in multipath_ctr()
874 if (pg_count != m->nr_priority_groups) { in multipath_ctr()
887 free_multipath(m); in multipath_ctr()
891 static void multipath_wait_for_pg_init_completion(struct multipath *m) in multipath_wait_for_pg_init_completion() argument
896 add_wait_queue(&m->pg_init_wait, &wait); in multipath_wait_for_pg_init_completion()
901 spin_lock_irqsave(&m->lock, flags); in multipath_wait_for_pg_init_completion()
902 if (!m->pg_init_in_progress) { in multipath_wait_for_pg_init_completion()
903 spin_unlock_irqrestore(&m->lock, flags); in multipath_wait_for_pg_init_completion()
906 spin_unlock_irqrestore(&m->lock, flags); in multipath_wait_for_pg_init_completion()
912 remove_wait_queue(&m->pg_init_wait, &wait); in multipath_wait_for_pg_init_completion()
915 static void flush_multipath_work(struct multipath *m) in flush_multipath_work() argument
919 spin_lock_irqsave(&m->lock, flags); in flush_multipath_work()
920 m->pg_init_disabled = 1; in flush_multipath_work()
921 spin_unlock_irqrestore(&m->lock, flags); in flush_multipath_work()
924 multipath_wait_for_pg_init_completion(m); in flush_multipath_work()
926 flush_work(&m->trigger_event); in flush_multipath_work()
928 spin_lock_irqsave(&m->lock, flags); in flush_multipath_work()
929 m->pg_init_disabled = 0; in flush_multipath_work()
930 spin_unlock_irqrestore(&m->lock, flags); in flush_multipath_work()
935 struct multipath *m = ti->private; in multipath_dtr() local
937 flush_multipath_work(m); in multipath_dtr()
938 free_multipath(m); in multipath_dtr()
947 struct multipath *m = pgpath->pg->m; in fail_path() local
949 spin_lock_irqsave(&m->lock, flags); in fail_path()
960 m->nr_valid_paths--; in fail_path()
962 if (pgpath == m->current_pgpath) in fail_path()
963 m->current_pgpath = NULL; in fail_path()
965 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, in fail_path()
966 pgpath->path.dev->name, m->nr_valid_paths); in fail_path()
968 schedule_work(&m->trigger_event); in fail_path()
971 spin_unlock_irqrestore(&m->lock, flags); in fail_path()
983 struct multipath *m = pgpath->pg->m; in reinstate_path() local
985 spin_lock_irqsave(&m->lock, flags); in reinstate_path()
1003 if (!m->nr_valid_paths++) { in reinstate_path()
1004 m->current_pgpath = NULL; in reinstate_path()
1006 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { in reinstate_path()
1008 m->pg_init_in_progress++; in reinstate_path()
1011 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, in reinstate_path()
1012 pgpath->path.dev->name, m->nr_valid_paths); in reinstate_path()
1014 schedule_work(&m->trigger_event); in reinstate_path()
1017 spin_unlock_irqrestore(&m->lock, flags); in reinstate_path()
1019 dm_table_run_md_queue_async(m->ti->table); in reinstate_path()
1027 static int action_dev(struct multipath *m, struct dm_dev *dev, in action_dev() argument
1034 list_for_each_entry(pg, &m->priority_groups, list) { in action_dev()
1047 static void bypass_pg(struct multipath *m, struct priority_group *pg, in bypass_pg() argument
1052 spin_lock_irqsave(&m->lock, flags); in bypass_pg()
1055 m->current_pgpath = NULL; in bypass_pg()
1056 m->current_pg = NULL; in bypass_pg()
1058 spin_unlock_irqrestore(&m->lock, flags); in bypass_pg()
1060 schedule_work(&m->trigger_event); in bypass_pg()
1066 static int switch_pg_num(struct multipath *m, const char *pgstr) in switch_pg_num() argument
1074 (pgnum > m->nr_priority_groups)) { in switch_pg_num()
1079 spin_lock_irqsave(&m->lock, flags); in switch_pg_num()
1080 list_for_each_entry(pg, &m->priority_groups, list) { in switch_pg_num()
1085 m->current_pgpath = NULL; in switch_pg_num()
1086 m->current_pg = NULL; in switch_pg_num()
1087 m->next_pg = pg; in switch_pg_num()
1089 spin_unlock_irqrestore(&m->lock, flags); in switch_pg_num()
1091 schedule_work(&m->trigger_event); in switch_pg_num()
1099 static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed) in bypass_pg_num() argument
1106 (pgnum > m->nr_priority_groups)) { in bypass_pg_num()
1111 list_for_each_entry(pg, &m->priority_groups, list) { in bypass_pg_num()
1116 bypass_pg(m, pg, bypassed); in bypass_pg_num()
1123 static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) in pg_init_limit_reached() argument
1128 spin_lock_irqsave(&m->lock, flags); in pg_init_limit_reached()
1130 if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled) in pg_init_limit_reached()
1131 m->pg_init_required = 1; in pg_init_limit_reached()
1135 spin_unlock_irqrestore(&m->lock, flags); in pg_init_limit_reached()
1144 struct multipath *m = pg->m; in pg_init_done() local
1153 if (!m->hw_handler_name) { in pg_init_done()
1158 "Error %d.", m->hw_handler_name, errors); in pg_init_done()
1169 bypass_pg(m, pg, 1); in pg_init_done()
1176 if (pg_init_limit_reached(m, pgpath)) in pg_init_done()
1189 spin_lock_irqsave(&m->lock, flags); in pg_init_done()
1191 if (pgpath == m->current_pgpath) { in pg_init_done()
1193 m->current_pgpath = NULL; in pg_init_done()
1194 m->current_pg = NULL; in pg_init_done()
1196 } else if (!m->pg_init_required) in pg_init_done()
1199 if (--m->pg_init_in_progress) in pg_init_done()
1203 if (m->pg_init_required) { in pg_init_done()
1204 m->pg_init_delay_retry = delay_retry; in pg_init_done()
1205 if (__pg_init_all_paths(m)) in pg_init_done()
1208 m->queue_io = 0; in pg_init_done()
1213 wake_up(&m->pg_init_wait); in pg_init_done()
1216 spin_unlock_irqrestore(&m->lock, flags); in pg_init_done()
1249 static int do_end_io(struct multipath *m, struct request *clone, in do_end_io() argument
1275 spin_lock_irqsave(&m->lock, flags); in do_end_io()
1276 if (!m->nr_valid_paths) { in do_end_io()
1277 if (!m->queue_if_no_path) { in do_end_io()
1278 if (!__must_push_back(m)) in do_end_io()
1285 spin_unlock_irqrestore(&m->lock, flags); in do_end_io()
1293 struct multipath *m = ti->private; in multipath_end_io() local
1301 r = do_end_io(m, clone, error, mpio); in multipath_end_io()
1308 clear_mapinfo(m, map_context); in multipath_end_io()
1321 struct multipath *m = (struct multipath *) ti->private; in multipath_presuspend() local
1323 queue_if_no_path(m, 0, 1); in multipath_presuspend()
1328 struct multipath *m = ti->private; in multipath_postsuspend() local
1330 mutex_lock(&m->work_mutex); in multipath_postsuspend()
1331 flush_multipath_work(m); in multipath_postsuspend()
1332 mutex_unlock(&m->work_mutex); in multipath_postsuspend()
1340 struct multipath *m = (struct multipath *) ti->private; in multipath_resume() local
1343 spin_lock_irqsave(&m->lock, flags); in multipath_resume()
1344 m->queue_if_no_path = m->saved_queue_if_no_path; in multipath_resume()
1345 spin_unlock_irqrestore(&m->lock, flags); in multipath_resume()
1369 struct multipath *m = (struct multipath *) ti->private; in multipath_status() local
1375 spin_lock_irqsave(&m->lock, flags); in multipath_status()
1379 DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count); in multipath_status()
1381 DMEMIT("%u ", m->queue_if_no_path + in multipath_status()
1382 (m->pg_init_retries > 0) * 2 + in multipath_status()
1383 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 + in multipath_status()
1384 m->retain_attached_hw_handler); in multipath_status()
1385 if (m->queue_if_no_path) in multipath_status()
1387 if (m->pg_init_retries) in multipath_status()
1388 DMEMIT("pg_init_retries %u ", m->pg_init_retries); in multipath_status()
1389 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) in multipath_status()
1390 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); in multipath_status()
1391 if (m->retain_attached_hw_handler) in multipath_status()
1395 if (!m->hw_handler_name || type == STATUSTYPE_INFO) in multipath_status()
1398 DMEMIT("1 %s ", m->hw_handler_name); in multipath_status()
1400 DMEMIT("%u ", m->nr_priority_groups); in multipath_status()
1402 if (m->next_pg) in multipath_status()
1403 pg_num = m->next_pg->pg_num; in multipath_status()
1404 else if (m->current_pg) in multipath_status()
1405 pg_num = m->current_pg->pg_num; in multipath_status()
1407 pg_num = (m->nr_priority_groups ? 1 : 0); in multipath_status()
1413 list_for_each_entry(pg, &m->priority_groups, list) { in multipath_status()
1416 else if (pg == m->current_pg) in multipath_status()
1446 list_for_each_entry(pg, &m->priority_groups, list) { in multipath_status()
1470 spin_unlock_irqrestore(&m->lock, flags); in multipath_status()
1477 struct multipath *m = (struct multipath *) ti->private; in multipath_message() local
1480 mutex_lock(&m->work_mutex); in multipath_message()
1489 r = queue_if_no_path(m, 1, 0); in multipath_message()
1492 r = queue_if_no_path(m, 0, 0); in multipath_message()
1503 r = bypass_pg_num(m, argv[1], 1); in multipath_message()
1506 r = bypass_pg_num(m, argv[1], 0); in multipath_message()
1509 r = switch_pg_num(m, argv[1]); in multipath_message()
1527 r = action_dev(m, dev, action); in multipath_message()
1532 mutex_unlock(&m->work_mutex); in multipath_message()
1539 struct multipath *m = ti->private; in multipath_prepare_ioctl() local
1543 spin_lock_irqsave(&m->lock, flags); in multipath_prepare_ioctl()
1545 if (!m->current_pgpath) in multipath_prepare_ioctl()
1546 __choose_pgpath(m, 0); in multipath_prepare_ioctl()
1548 if (m->current_pgpath) { in multipath_prepare_ioctl()
1549 if (!m->queue_io) { in multipath_prepare_ioctl()
1550 *bdev = m->current_pgpath->path.dev->bdev; in multipath_prepare_ioctl()
1551 *mode = m->current_pgpath->path.dev->mode; in multipath_prepare_ioctl()
1559 if (m->queue_if_no_path) in multipath_prepare_ioctl()
1565 spin_unlock_irqrestore(&m->lock, flags); in multipath_prepare_ioctl()
1568 spin_lock_irqsave(&m->lock, flags); in multipath_prepare_ioctl()
1569 if (!m->current_pg) { in multipath_prepare_ioctl()
1571 __choose_pgpath(m, 0); in multipath_prepare_ioctl()
1573 if (m->pg_init_required) in multipath_prepare_ioctl()
1574 __pg_init_all_paths(m); in multipath_prepare_ioctl()
1575 spin_unlock_irqrestore(&m->lock, flags); in multipath_prepare_ioctl()
1576 dm_table_run_md_queue_async(m->ti->table); in multipath_prepare_ioctl()
1590 struct multipath *m = ti->private; in multipath_iterate_devices() local
1595 list_for_each_entry(pg, &m->priority_groups, list) { in multipath_iterate_devices()
1625 struct multipath *m = ti->private; in multipath_busy() local
1630 spin_lock_irqsave(&m->lock, flags); in multipath_busy()
1633 if (m->pg_init_in_progress || in multipath_busy()
1634 (!m->nr_valid_paths && m->queue_if_no_path)) { in multipath_busy()
1639 if (unlikely(!m->current_pgpath && m->next_pg)) in multipath_busy()
1640 pg = m->next_pg; in multipath_busy()
1641 else if (likely(m->current_pg)) in multipath_busy()
1642 pg = m->current_pg; in multipath_busy()
1677 spin_unlock_irqrestore(&m->lock, flags); in multipath_busy()