This source file includes following definitions.
- release_port_group
- submit_rtpg
- submit_stpg
- alua_find_get_pg
- alua_alloc_pg
- alua_check_tpgs
- alua_check_vpd
- print_alua_state
- alua_check_sense
- alua_tur
- alua_rtpg
- alua_stpg
- alua_rtpg_work
- alua_rtpg_queue
- alua_initialize
- alua_set_params
- alua_activate
- alua_check
- alua_prep_fn
- alua_rescan
- alua_bus_attach
- alua_bus_detach
- alua_init
- alua_exit
1
2
3
4
5
6
7
8 #include <linux/slab.h>
9 #include <linux/delay.h>
10 #include <linux/module.h>
11 #include <asm/unaligned.h>
12 #include <scsi/scsi.h>
13 #include <scsi/scsi_proto.h>
14 #include <scsi/scsi_dbg.h>
15 #include <scsi/scsi_eh.h>
16 #include <scsi/scsi_dh.h>
17
18 #define ALUA_DH_NAME "alua"
19 #define ALUA_DH_VER "2.0"
20
21 #define TPGS_SUPPORT_NONE 0x00
22 #define TPGS_SUPPORT_OPTIMIZED 0x01
23 #define TPGS_SUPPORT_NONOPTIMIZED 0x02
24 #define TPGS_SUPPORT_STANDBY 0x04
25 #define TPGS_SUPPORT_UNAVAILABLE 0x08
26 #define TPGS_SUPPORT_LBA_DEPENDENT 0x10
27 #define TPGS_SUPPORT_OFFLINE 0x40
28 #define TPGS_SUPPORT_TRANSITION 0x80
29 #define TPGS_SUPPORT_ALL 0xdf
30
31 #define RTPG_FMT_MASK 0x70
32 #define RTPG_FMT_EXT_HDR 0x10
33
34 #define TPGS_MODE_UNINITIALIZED -1
35 #define TPGS_MODE_NONE 0x0
36 #define TPGS_MODE_IMPLICIT 0x1
37 #define TPGS_MODE_EXPLICIT 0x2
38
39 #define ALUA_RTPG_SIZE 128
40 #define ALUA_FAILOVER_TIMEOUT 60
41 #define ALUA_FAILOVER_RETRIES 5
42 #define ALUA_RTPG_DELAY_MSECS 5
43 #define ALUA_RTPG_RETRY_DELAY 2
44
45
46 #define ALUA_OPTIMIZE_STPG 0x01
47 #define ALUA_RTPG_EXT_HDR_UNSUPP 0x02
48
49 #define ALUA_PG_RUN_RTPG 0x10
50 #define ALUA_PG_RUN_STPG 0x20
51 #define ALUA_PG_RUNNING 0x40
52
53 static uint optimize_stpg;
54 module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR);
55 MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0.");
56
57 static LIST_HEAD(port_group_list);
58 static DEFINE_SPINLOCK(port_group_lock);
59 static struct workqueue_struct *kaluad_wq;
60
61 struct alua_port_group {
62 struct kref kref;
63 struct rcu_head rcu;
64 struct list_head node;
65 struct list_head dh_list;
66 unsigned char device_id_str[256];
67 int device_id_len;
68 int group_id;
69 int tpgs;
70 int state;
71 int pref;
72 int valid_states;
73 unsigned flags;
74 unsigned char transition_tmo;
75 unsigned long expiry;
76 unsigned long interval;
77 struct delayed_work rtpg_work;
78 spinlock_t lock;
79 struct list_head rtpg_list;
80 struct scsi_device *rtpg_sdev;
81 };
82
83 struct alua_dh_data {
84 struct list_head node;
85 struct alua_port_group __rcu *pg;
86 int group_id;
87 spinlock_t pg_lock;
88 struct scsi_device *sdev;
89 int init_error;
90 struct mutex init_mutex;
91 };
92
93 struct alua_queue_data {
94 struct list_head entry;
95 activate_complete callback_fn;
96 void *callback_data;
97 };
98
99 #define ALUA_POLICY_SWITCH_CURRENT 0
100 #define ALUA_POLICY_SWITCH_ALL 1
101
102 static void alua_rtpg_work(struct work_struct *work);
103 static bool alua_rtpg_queue(struct alua_port_group *pg,
104 struct scsi_device *sdev,
105 struct alua_queue_data *qdata, bool force);
106 static void alua_check(struct scsi_device *sdev, bool force);
107
108 static void release_port_group(struct kref *kref)
109 {
110 struct alua_port_group *pg;
111
112 pg = container_of(kref, struct alua_port_group, kref);
113 if (pg->rtpg_sdev)
114 flush_delayed_work(&pg->rtpg_work);
115 spin_lock(&port_group_lock);
116 list_del(&pg->node);
117 spin_unlock(&port_group_lock);
118 kfree_rcu(pg, rcu);
119 }
120
121
122
123
124
125 static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
126 int bufflen, struct scsi_sense_hdr *sshdr, int flags)
127 {
128 u8 cdb[MAX_COMMAND_SIZE];
129 int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
130 REQ_FAILFAST_DRIVER;
131
132
133 memset(cdb, 0x0, MAX_COMMAND_SIZE);
134 cdb[0] = MAINTENANCE_IN;
135 if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP))
136 cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT;
137 else
138 cdb[1] = MI_REPORT_TARGET_PGS;
139 put_unaligned_be32(bufflen, &cdb[6]);
140
141 return scsi_execute(sdev, cdb, DMA_FROM_DEVICE, buff, bufflen, NULL,
142 sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
143 ALUA_FAILOVER_RETRIES, req_flags, 0, NULL);
144 }
145
146
147
148
149
150
151
152
153 static int submit_stpg(struct scsi_device *sdev, int group_id,
154 struct scsi_sense_hdr *sshdr)
155 {
156 u8 cdb[MAX_COMMAND_SIZE];
157 unsigned char stpg_data[8];
158 int stpg_len = 8;
159 int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
160 REQ_FAILFAST_DRIVER;
161
162
163 memset(stpg_data, 0, stpg_len);
164 stpg_data[4] = SCSI_ACCESS_STATE_OPTIMAL;
165 put_unaligned_be16(group_id, &stpg_data[6]);
166
167
168 memset(cdb, 0x0, MAX_COMMAND_SIZE);
169 cdb[0] = MAINTENANCE_OUT;
170 cdb[1] = MO_SET_TARGET_PGS;
171 put_unaligned_be32(stpg_len, &cdb[6]);
172
173 return scsi_execute(sdev, cdb, DMA_TO_DEVICE, stpg_data, stpg_len, NULL,
174 sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
175 ALUA_FAILOVER_RETRIES, req_flags, 0, NULL);
176 }
177
178 static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
179 int group_id)
180 {
181 struct alua_port_group *pg;
182
183 if (!id_str || !id_size || !strlen(id_str))
184 return NULL;
185
186 list_for_each_entry(pg, &port_group_list, node) {
187 if (pg->group_id != group_id)
188 continue;
189 if (!pg->device_id_len || pg->device_id_len != id_size)
190 continue;
191 if (strncmp(pg->device_id_str, id_str, id_size))
192 continue;
193 if (!kref_get_unless_zero(&pg->kref))
194 continue;
195 return pg;
196 }
197
198 return NULL;
199 }
200
201
202
203
204
205
206
207
208
209
210 static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
211 int group_id, int tpgs)
212 {
213 struct alua_port_group *pg, *tmp_pg;
214
215 pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL);
216 if (!pg)
217 return ERR_PTR(-ENOMEM);
218
219 pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str,
220 sizeof(pg->device_id_str));
221 if (pg->device_id_len <= 0) {
222
223
224
225
226 sdev_printk(KERN_INFO, sdev,
227 "%s: No device descriptors found\n",
228 ALUA_DH_NAME);
229 pg->device_id_str[0] = '\0';
230 pg->device_id_len = 0;
231 }
232 pg->group_id = group_id;
233 pg->tpgs = tpgs;
234 pg->state = SCSI_ACCESS_STATE_OPTIMAL;
235 pg->valid_states = TPGS_SUPPORT_ALL;
236 if (optimize_stpg)
237 pg->flags |= ALUA_OPTIMIZE_STPG;
238 kref_init(&pg->kref);
239 INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work);
240 INIT_LIST_HEAD(&pg->rtpg_list);
241 INIT_LIST_HEAD(&pg->node);
242 INIT_LIST_HEAD(&pg->dh_list);
243 spin_lock_init(&pg->lock);
244
245 spin_lock(&port_group_lock);
246 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
247 group_id);
248 if (tmp_pg) {
249 spin_unlock(&port_group_lock);
250 kfree(pg);
251 return tmp_pg;
252 }
253
254 list_add(&pg->node, &port_group_list);
255 spin_unlock(&port_group_lock);
256
257 return pg;
258 }
259
260
261
262
263
264
265
266
267 static int alua_check_tpgs(struct scsi_device *sdev)
268 {
269 int tpgs = TPGS_MODE_NONE;
270
271
272
273
274
275 if (sdev->type != TYPE_DISK) {
276 sdev_printk(KERN_INFO, sdev,
277 "%s: disable for non-disk devices\n",
278 ALUA_DH_NAME);
279 return tpgs;
280 }
281
282 tpgs = scsi_device_tpgs(sdev);
283 switch (tpgs) {
284 case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT:
285 sdev_printk(KERN_INFO, sdev,
286 "%s: supports implicit and explicit TPGS\n",
287 ALUA_DH_NAME);
288 break;
289 case TPGS_MODE_EXPLICIT:
290 sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n",
291 ALUA_DH_NAME);
292 break;
293 case TPGS_MODE_IMPLICIT:
294 sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n",
295 ALUA_DH_NAME);
296 break;
297 case TPGS_MODE_NONE:
298 sdev_printk(KERN_INFO, sdev, "%s: not supported\n",
299 ALUA_DH_NAME);
300 break;
301 default:
302 sdev_printk(KERN_INFO, sdev,
303 "%s: unsupported TPGS setting %d\n",
304 ALUA_DH_NAME, tpgs);
305 tpgs = TPGS_MODE_NONE;
306 break;
307 }
308
309 return tpgs;
310 }
311
312
313
314
315
316
317
318
319 static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
320 int tpgs)
321 {
322 int rel_port = -1, group_id;
323 struct alua_port_group *pg, *old_pg = NULL;
324 bool pg_updated = false;
325 unsigned long flags;
326
327 group_id = scsi_vpd_tpg_id(sdev, &rel_port);
328 if (group_id < 0) {
329
330
331
332
333
334 sdev_printk(KERN_INFO, sdev,
335 "%s: No target port descriptors found\n",
336 ALUA_DH_NAME);
337 return SCSI_DH_DEV_UNSUPP;
338 }
339
340 pg = alua_alloc_pg(sdev, group_id, tpgs);
341 if (IS_ERR(pg)) {
342 if (PTR_ERR(pg) == -ENOMEM)
343 return SCSI_DH_NOMEM;
344 return SCSI_DH_DEV_UNSUPP;
345 }
346 if (pg->device_id_len)
347 sdev_printk(KERN_INFO, sdev,
348 "%s: device %s port group %x rel port %x\n",
349 ALUA_DH_NAME, pg->device_id_str,
350 group_id, rel_port);
351 else
352 sdev_printk(KERN_INFO, sdev,
353 "%s: port group %x rel port %x\n",
354 ALUA_DH_NAME, group_id, rel_port);
355
356
357 spin_lock(&h->pg_lock);
358 old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
359 if (old_pg != pg) {
360
361 if (h->pg) {
362 spin_lock_irqsave(&old_pg->lock, flags);
363 list_del_rcu(&h->node);
364 spin_unlock_irqrestore(&old_pg->lock, flags);
365 }
366 rcu_assign_pointer(h->pg, pg);
367 pg_updated = true;
368 }
369
370 spin_lock_irqsave(&pg->lock, flags);
371 if (pg_updated)
372 list_add_rcu(&h->node, &pg->dh_list);
373 spin_unlock_irqrestore(&pg->lock, flags);
374
375 alua_rtpg_queue(rcu_dereference_protected(h->pg,
376 lockdep_is_held(&h->pg_lock)),
377 sdev, NULL, true);
378 spin_unlock(&h->pg_lock);
379
380 if (old_pg)
381 kref_put(&old_pg->kref, release_port_group);
382
383 return SCSI_DH_OK;
384 }
385
386 static char print_alua_state(unsigned char state)
387 {
388 switch (state) {
389 case SCSI_ACCESS_STATE_OPTIMAL:
390 return 'A';
391 case SCSI_ACCESS_STATE_ACTIVE:
392 return 'N';
393 case SCSI_ACCESS_STATE_STANDBY:
394 return 'S';
395 case SCSI_ACCESS_STATE_UNAVAILABLE:
396 return 'U';
397 case SCSI_ACCESS_STATE_LBA:
398 return 'L';
399 case SCSI_ACCESS_STATE_OFFLINE:
400 return 'O';
401 case SCSI_ACCESS_STATE_TRANSITIONING:
402 return 'T';
403 default:
404 return 'X';
405 }
406 }
407
408 static int alua_check_sense(struct scsi_device *sdev,
409 struct scsi_sense_hdr *sense_hdr)
410 {
411 switch (sense_hdr->sense_key) {
412 case NOT_READY:
413 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
414
415
416
417 alua_check(sdev, false);
418 return NEEDS_RETRY;
419 }
420 break;
421 case UNIT_ATTENTION:
422 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) {
423
424
425
426
427
428 alua_check(sdev, true);
429 return ADD_TO_MLQUEUE;
430 }
431 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04)
432
433
434
435 return ADD_TO_MLQUEUE;
436 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
437
438
439
440 return ADD_TO_MLQUEUE;
441 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) {
442
443
444
445 alua_check(sdev, true);
446 return ADD_TO_MLQUEUE;
447 }
448 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
449
450
451
452 alua_check(sdev, true);
453 return ADD_TO_MLQUEUE;
454 }
455 if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03)
456
457
458
459 return ADD_TO_MLQUEUE;
460 if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e)
461
462
463
464
465
466 return ADD_TO_MLQUEUE;
467 break;
468 }
469
470 return SCSI_RETURN_NOT_HANDLED;
471 }
472
473
474
475
476
477
478
479
480
481 static int alua_tur(struct scsi_device *sdev)
482 {
483 struct scsi_sense_hdr sense_hdr;
484 int retval;
485
486 retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ,
487 ALUA_FAILOVER_RETRIES, &sense_hdr);
488 if (sense_hdr.sense_key == NOT_READY &&
489 sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
490 return SCSI_DH_RETRY;
491 else if (retval)
492 return SCSI_DH_IO;
493 else
494 return SCSI_DH_OK;
495 }
496
497
498
499
500
501
502
503
504
505 static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
506 {
507 struct scsi_sense_hdr sense_hdr;
508 struct alua_port_group *tmp_pg;
509 int len, k, off, bufflen = ALUA_RTPG_SIZE;
510 unsigned char *desc, *buff;
511 unsigned err, retval;
512 unsigned int tpg_desc_tbl_off;
513 unsigned char orig_transition_tmo;
514 unsigned long flags;
515 bool transitioning_sense = false;
516
517 if (!pg->expiry) {
518 unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
519
520 if (pg->transition_tmo)
521 transition_tmo = pg->transition_tmo * HZ;
522
523 pg->expiry = round_jiffies_up(jiffies + transition_tmo);
524 }
525
526 buff = kzalloc(bufflen, GFP_KERNEL);
527 if (!buff)
528 return SCSI_DH_DEV_TEMP_BUSY;
529
530 retry:
531 err = 0;
532 retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
533
534 if (retval) {
535
536
537
538
539
540
541
542
543
544 if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) {
545 sdev_printk(KERN_INFO, sdev,
546 "%s: ignoring rtpg result %d\n",
547 ALUA_DH_NAME, retval);
548 kfree(buff);
549 return SCSI_DH_OK;
550 }
551 if (!scsi_sense_valid(&sense_hdr)) {
552 sdev_printk(KERN_INFO, sdev,
553 "%s: rtpg failed, result %d\n",
554 ALUA_DH_NAME, retval);
555 kfree(buff);
556 if (driver_byte(retval) == DRIVER_ERROR)
557 return SCSI_DH_DEV_TEMP_BUSY;
558 return SCSI_DH_IO;
559 }
560
561
562
563
564
565
566
567
568
569 if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
570 sense_hdr.sense_key == ILLEGAL_REQUEST &&
571 sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
572 pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
573 goto retry;
574 }
575
576
577
578
579
580 if (sense_hdr.sense_key == NOT_READY &&
581 sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
582 transitioning_sense = true;
583 goto skip_rtpg;
584 }
585
586
587
588 if (sense_hdr.sense_key == UNIT_ATTENTION)
589 err = SCSI_DH_RETRY;
590 if (err == SCSI_DH_RETRY &&
591 pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
592 sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n",
593 ALUA_DH_NAME);
594 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
595 kfree(buff);
596 return err;
597 }
598 sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n",
599 ALUA_DH_NAME);
600 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
601 kfree(buff);
602 pg->expiry = 0;
603 return SCSI_DH_IO;
604 }
605
606 len = get_unaligned_be32(&buff[0]) + 4;
607
608 if (len > bufflen) {
609
610 kfree(buff);
611 bufflen = len;
612 buff = kmalloc(bufflen, GFP_KERNEL);
613 if (!buff) {
614 sdev_printk(KERN_WARNING, sdev,
615 "%s: kmalloc buffer failed\n",__func__);
616
617 pg->expiry = 0;
618 return SCSI_DH_DEV_TEMP_BUSY;
619 }
620 goto retry;
621 }
622
623 orig_transition_tmo = pg->transition_tmo;
624 if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && buff[5] != 0)
625 pg->transition_tmo = buff[5];
626 else
627 pg->transition_tmo = ALUA_FAILOVER_TIMEOUT;
628
629 if (orig_transition_tmo != pg->transition_tmo) {
630 sdev_printk(KERN_INFO, sdev,
631 "%s: transition timeout set to %d seconds\n",
632 ALUA_DH_NAME, pg->transition_tmo);
633 pg->expiry = jiffies + pg->transition_tmo * HZ;
634 }
635
636 if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR)
637 tpg_desc_tbl_off = 8;
638 else
639 tpg_desc_tbl_off = 4;
640
641 for (k = tpg_desc_tbl_off, desc = buff + tpg_desc_tbl_off;
642 k < len;
643 k += off, desc += off) {
644 u16 group_id = get_unaligned_be16(&desc[2]);
645
646 spin_lock_irqsave(&port_group_lock, flags);
647 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
648 group_id);
649 spin_unlock_irqrestore(&port_group_lock, flags);
650 if (tmp_pg) {
651 if (spin_trylock_irqsave(&tmp_pg->lock, flags)) {
652 if ((tmp_pg == pg) ||
653 !(tmp_pg->flags & ALUA_PG_RUNNING)) {
654 struct alua_dh_data *h;
655
656 tmp_pg->state = desc[0] & 0x0f;
657 tmp_pg->pref = desc[0] >> 7;
658 rcu_read_lock();
659 list_for_each_entry_rcu(h,
660 &tmp_pg->dh_list, node) {
661
662 BUG_ON(!h->sdev);
663 h->sdev->access_state = desc[0];
664 }
665 rcu_read_unlock();
666 }
667 if (tmp_pg == pg)
668 tmp_pg->valid_states = desc[1];
669 spin_unlock_irqrestore(&tmp_pg->lock, flags);
670 }
671 kref_put(&tmp_pg->kref, release_port_group);
672 }
673 off = 8 + (desc[7] * 4);
674 }
675
676 skip_rtpg:
677 spin_lock_irqsave(&pg->lock, flags);
678 if (transitioning_sense)
679 pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
680
681 sdev_printk(KERN_INFO, sdev,
682 "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
683 ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
684 pg->pref ? "preferred" : "non-preferred",
685 pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
686 pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
687 pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
688 pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
689 pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
690 pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
691 pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
692
693 switch (pg->state) {
694 case SCSI_ACCESS_STATE_TRANSITIONING:
695 if (time_before(jiffies, pg->expiry)) {
696
697 pg->interval = ALUA_RTPG_RETRY_DELAY;
698 err = SCSI_DH_RETRY;
699 } else {
700 struct alua_dh_data *h;
701
702
703 err = SCSI_DH_IO;
704 pg->state = SCSI_ACCESS_STATE_STANDBY;
705 pg->expiry = 0;
706 rcu_read_lock();
707 list_for_each_entry_rcu(h, &pg->dh_list, node) {
708 BUG_ON(!h->sdev);
709 h->sdev->access_state =
710 (pg->state & SCSI_ACCESS_STATE_MASK);
711 if (pg->pref)
712 h->sdev->access_state |=
713 SCSI_ACCESS_STATE_PREFERRED;
714 }
715 rcu_read_unlock();
716 }
717 break;
718 case SCSI_ACCESS_STATE_OFFLINE:
719
720 err = SCSI_DH_DEV_OFFLINED;
721 pg->expiry = 0;
722 break;
723 default:
724
725 err = SCSI_DH_OK;
726 pg->expiry = 0;
727 break;
728 }
729 spin_unlock_irqrestore(&pg->lock, flags);
730 kfree(buff);
731 return err;
732 }
733
734
735
736
737
738
739
740
741
742 static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
743 {
744 int retval;
745 struct scsi_sense_hdr sense_hdr;
746
747 if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) {
748
749 return SCSI_DH_RETRY;
750 }
751 switch (pg->state) {
752 case SCSI_ACCESS_STATE_OPTIMAL:
753 return SCSI_DH_OK;
754 case SCSI_ACCESS_STATE_ACTIVE:
755 if ((pg->flags & ALUA_OPTIMIZE_STPG) &&
756 !pg->pref &&
757 (pg->tpgs & TPGS_MODE_IMPLICIT))
758 return SCSI_DH_OK;
759 break;
760 case SCSI_ACCESS_STATE_STANDBY:
761 case SCSI_ACCESS_STATE_UNAVAILABLE:
762 break;
763 case SCSI_ACCESS_STATE_OFFLINE:
764 return SCSI_DH_IO;
765 case SCSI_ACCESS_STATE_TRANSITIONING:
766 break;
767 default:
768 sdev_printk(KERN_INFO, sdev,
769 "%s: stpg failed, unhandled TPGS state %d",
770 ALUA_DH_NAME, pg->state);
771 return SCSI_DH_NOSYS;
772 }
773 retval = submit_stpg(sdev, pg->group_id, &sense_hdr);
774
775 if (retval) {
776 if (!scsi_sense_valid(&sense_hdr)) {
777 sdev_printk(KERN_INFO, sdev,
778 "%s: stpg failed, result %d",
779 ALUA_DH_NAME, retval);
780 if (driver_byte(retval) == DRIVER_ERROR)
781 return SCSI_DH_DEV_TEMP_BUSY;
782 } else {
783 sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n",
784 ALUA_DH_NAME);
785 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
786 }
787 }
788
789 return SCSI_DH_RETRY;
790 }
791
792 static void alua_rtpg_work(struct work_struct *work)
793 {
794 struct alua_port_group *pg =
795 container_of(work, struct alua_port_group, rtpg_work.work);
796 struct scsi_device *sdev;
797 LIST_HEAD(qdata_list);
798 int err = SCSI_DH_OK;
799 struct alua_queue_data *qdata, *tmp;
800 unsigned long flags;
801
802 spin_lock_irqsave(&pg->lock, flags);
803 sdev = pg->rtpg_sdev;
804 if (!sdev) {
805 WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
806 WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
807 spin_unlock_irqrestore(&pg->lock, flags);
808 kref_put(&pg->kref, release_port_group);
809 return;
810 }
811 pg->flags |= ALUA_PG_RUNNING;
812 if (pg->flags & ALUA_PG_RUN_RTPG) {
813 int state = pg->state;
814
815 pg->flags &= ~ALUA_PG_RUN_RTPG;
816 spin_unlock_irqrestore(&pg->lock, flags);
817 if (state == SCSI_ACCESS_STATE_TRANSITIONING) {
818 if (alua_tur(sdev) == SCSI_DH_RETRY) {
819 spin_lock_irqsave(&pg->lock, flags);
820 pg->flags &= ~ALUA_PG_RUNNING;
821 pg->flags |= ALUA_PG_RUN_RTPG;
822 if (!pg->interval)
823 pg->interval = ALUA_RTPG_RETRY_DELAY;
824 spin_unlock_irqrestore(&pg->lock, flags);
825 queue_delayed_work(kaluad_wq, &pg->rtpg_work,
826 pg->interval * HZ);
827 return;
828 }
829
830 }
831 err = alua_rtpg(sdev, pg);
832 spin_lock_irqsave(&pg->lock, flags);
833 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
834 pg->flags &= ~ALUA_PG_RUNNING;
835 if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
836 pg->interval = ALUA_RTPG_RETRY_DELAY;
837 pg->flags |= ALUA_PG_RUN_RTPG;
838 spin_unlock_irqrestore(&pg->lock, flags);
839 queue_delayed_work(kaluad_wq, &pg->rtpg_work,
840 pg->interval * HZ);
841 return;
842 }
843 if (err != SCSI_DH_OK)
844 pg->flags &= ~ALUA_PG_RUN_STPG;
845 }
846 if (pg->flags & ALUA_PG_RUN_STPG) {
847 pg->flags &= ~ALUA_PG_RUN_STPG;
848 spin_unlock_irqrestore(&pg->lock, flags);
849 err = alua_stpg(sdev, pg);
850 spin_lock_irqsave(&pg->lock, flags);
851 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
852 pg->flags |= ALUA_PG_RUN_RTPG;
853 pg->interval = 0;
854 pg->flags &= ~ALUA_PG_RUNNING;
855 spin_unlock_irqrestore(&pg->lock, flags);
856 queue_delayed_work(kaluad_wq, &pg->rtpg_work,
857 pg->interval * HZ);
858 return;
859 }
860 }
861
862 list_splice_init(&pg->rtpg_list, &qdata_list);
863 pg->rtpg_sdev = NULL;
864 spin_unlock_irqrestore(&pg->lock, flags);
865
866 list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) {
867 list_del(&qdata->entry);
868 if (qdata->callback_fn)
869 qdata->callback_fn(qdata->callback_data, err);
870 kfree(qdata);
871 }
872 spin_lock_irqsave(&pg->lock, flags);
873 pg->flags &= ~ALUA_PG_RUNNING;
874 spin_unlock_irqrestore(&pg->lock, flags);
875 scsi_device_put(sdev);
876 kref_put(&pg->kref, release_port_group);
877 }
878
879
880
881
882
883
884
885
886
887
888
889
890 static bool alua_rtpg_queue(struct alua_port_group *pg,
891 struct scsi_device *sdev,
892 struct alua_queue_data *qdata, bool force)
893 {
894 int start_queue = 0;
895 unsigned long flags;
896 if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
897 return false;
898
899 spin_lock_irqsave(&pg->lock, flags);
900 if (qdata) {
901 list_add_tail(&qdata->entry, &pg->rtpg_list);
902 pg->flags |= ALUA_PG_RUN_STPG;
903 force = true;
904 }
905 if (pg->rtpg_sdev == NULL) {
906 pg->interval = 0;
907 pg->flags |= ALUA_PG_RUN_RTPG;
908 kref_get(&pg->kref);
909 pg->rtpg_sdev = sdev;
910 start_queue = 1;
911 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
912 pg->flags |= ALUA_PG_RUN_RTPG;
913
914 if (!(pg->flags & ALUA_PG_RUNNING)) {
915 kref_get(&pg->kref);
916 start_queue = 1;
917 }
918 }
919
920 spin_unlock_irqrestore(&pg->lock, flags);
921
922 if (start_queue) {
923 if (queue_delayed_work(kaluad_wq, &pg->rtpg_work,
924 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
925 sdev = NULL;
926 else
927 kref_put(&pg->kref, release_port_group);
928 }
929 if (sdev)
930 scsi_device_put(sdev);
931
932 return true;
933 }
934
935
936
937
938
939
940
941
942 static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
943 {
944 int err = SCSI_DH_DEV_UNSUPP, tpgs;
945
946 mutex_lock(&h->init_mutex);
947 tpgs = alua_check_tpgs(sdev);
948 if (tpgs != TPGS_MODE_NONE)
949 err = alua_check_vpd(sdev, h, tpgs);
950 h->init_error = err;
951 mutex_unlock(&h->init_mutex);
952 return err;
953 }
954
955
956
957
958
959
960
961
962
963 static int alua_set_params(struct scsi_device *sdev, const char *params)
964 {
965 struct alua_dh_data *h = sdev->handler_data;
966 struct alua_port_group *pg = NULL;
967 unsigned int optimize = 0, argc;
968 const char *p = params;
969 int result = SCSI_DH_OK;
970 unsigned long flags;
971
972 if ((sscanf(params, "%u", &argc) != 1) || (argc != 1))
973 return -EINVAL;
974
975 while (*p++)
976 ;
977 if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1))
978 return -EINVAL;
979
980 rcu_read_lock();
981 pg = rcu_dereference(h->pg);
982 if (!pg) {
983 rcu_read_unlock();
984 return -ENXIO;
985 }
986 spin_lock_irqsave(&pg->lock, flags);
987 if (optimize)
988 pg->flags |= ALUA_OPTIMIZE_STPG;
989 else
990 pg->flags &= ~ALUA_OPTIMIZE_STPG;
991 spin_unlock_irqrestore(&pg->lock, flags);
992 rcu_read_unlock();
993
994 return result;
995 }
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007 static int alua_activate(struct scsi_device *sdev,
1008 activate_complete fn, void *data)
1009 {
1010 struct alua_dh_data *h = sdev->handler_data;
1011 int err = SCSI_DH_OK;
1012 struct alua_queue_data *qdata;
1013 struct alua_port_group *pg;
1014
1015 qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
1016 if (!qdata) {
1017 err = SCSI_DH_RES_TEMP_UNAVAIL;
1018 goto out;
1019 }
1020 qdata->callback_fn = fn;
1021 qdata->callback_data = data;
1022
1023 mutex_lock(&h->init_mutex);
1024 rcu_read_lock();
1025 pg = rcu_dereference(h->pg);
1026 if (!pg || !kref_get_unless_zero(&pg->kref)) {
1027 rcu_read_unlock();
1028 kfree(qdata);
1029 err = h->init_error;
1030 mutex_unlock(&h->init_mutex);
1031 goto out;
1032 }
1033 rcu_read_unlock();
1034 mutex_unlock(&h->init_mutex);
1035
1036 if (alua_rtpg_queue(pg, sdev, qdata, true))
1037 fn = NULL;
1038 else
1039 err = SCSI_DH_DEV_OFFLINED;
1040 kref_put(&pg->kref, release_port_group);
1041 out:
1042 if (fn)
1043 fn(data, err);
1044 return 0;
1045 }
1046
1047
1048
1049
1050
1051
1052
1053 static void alua_check(struct scsi_device *sdev, bool force)
1054 {
1055 struct alua_dh_data *h = sdev->handler_data;
1056 struct alua_port_group *pg;
1057
1058 rcu_read_lock();
1059 pg = rcu_dereference(h->pg);
1060 if (!pg || !kref_get_unless_zero(&pg->kref)) {
1061 rcu_read_unlock();
1062 return;
1063 }
1064 rcu_read_unlock();
1065
1066 alua_rtpg_queue(pg, sdev, NULL, force);
1067 kref_put(&pg->kref, release_port_group);
1068 }
1069
1070
1071
1072
1073
1074
1075
1076 static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
1077 {
1078 struct alua_dh_data *h = sdev->handler_data;
1079 struct alua_port_group *pg;
1080 unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
1081
1082 rcu_read_lock();
1083 pg = rcu_dereference(h->pg);
1084 if (pg)
1085 state = pg->state;
1086 rcu_read_unlock();
1087
1088 switch (state) {
1089 case SCSI_ACCESS_STATE_OPTIMAL:
1090 case SCSI_ACCESS_STATE_ACTIVE:
1091 case SCSI_ACCESS_STATE_LBA:
1092 return BLK_STS_OK;
1093 case SCSI_ACCESS_STATE_TRANSITIONING:
1094 return BLK_STS_RESOURCE;
1095 default:
1096 req->rq_flags |= RQF_QUIET;
1097 return BLK_STS_IOERR;
1098 }
1099 }
1100
1101 static void alua_rescan(struct scsi_device *sdev)
1102 {
1103 struct alua_dh_data *h = sdev->handler_data;
1104
1105 alua_initialize(sdev, h);
1106 }
1107
1108
1109
1110
1111
1112 static int alua_bus_attach(struct scsi_device *sdev)
1113 {
1114 struct alua_dh_data *h;
1115 int err;
1116
1117 h = kzalloc(sizeof(*h) , GFP_KERNEL);
1118 if (!h)
1119 return SCSI_DH_NOMEM;
1120 spin_lock_init(&h->pg_lock);
1121 rcu_assign_pointer(h->pg, NULL);
1122 h->init_error = SCSI_DH_OK;
1123 h->sdev = sdev;
1124 INIT_LIST_HEAD(&h->node);
1125
1126 mutex_init(&h->init_mutex);
1127 err = alua_initialize(sdev, h);
1128 if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED)
1129 goto failed;
1130
1131 sdev->handler_data = h;
1132 return SCSI_DH_OK;
1133 failed:
1134 kfree(h);
1135 return err;
1136 }
1137
1138
1139
1140
1141
1142 static void alua_bus_detach(struct scsi_device *sdev)
1143 {
1144 struct alua_dh_data *h = sdev->handler_data;
1145 struct alua_port_group *pg;
1146
1147 spin_lock(&h->pg_lock);
1148 pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
1149 rcu_assign_pointer(h->pg, NULL);
1150 h->sdev = NULL;
1151 spin_unlock(&h->pg_lock);
1152 if (pg) {
1153 spin_lock_irq(&pg->lock);
1154 list_del_rcu(&h->node);
1155 spin_unlock_irq(&pg->lock);
1156 kref_put(&pg->kref, release_port_group);
1157 }
1158 sdev->handler_data = NULL;
1159 kfree(h);
1160 }
1161
1162 static struct scsi_device_handler alua_dh = {
1163 .name = ALUA_DH_NAME,
1164 .module = THIS_MODULE,
1165 .attach = alua_bus_attach,
1166 .detach = alua_bus_detach,
1167 .prep_fn = alua_prep_fn,
1168 .check_sense = alua_check_sense,
1169 .activate = alua_activate,
1170 .rescan = alua_rescan,
1171 .set_params = alua_set_params,
1172 };
1173
1174 static int __init alua_init(void)
1175 {
1176 int r;
1177
1178 kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
1179 if (!kaluad_wq)
1180 return -ENOMEM;
1181
1182 r = scsi_register_device_handler(&alua_dh);
1183 if (r != 0) {
1184 printk(KERN_ERR "%s: Failed to register scsi device handler",
1185 ALUA_DH_NAME);
1186 destroy_workqueue(kaluad_wq);
1187 }
1188 return r;
1189 }
1190
1191 static void __exit alua_exit(void)
1192 {
1193 scsi_unregister_device_handler(&alua_dh);
1194 destroy_workqueue(kaluad_wq);
1195 }
1196
1197 module_init(alua_init);
1198 module_exit(alua_exit);
1199
1200 MODULE_DESCRIPTION("DM Multipath ALUA support");
1201 MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
1202 MODULE_LICENSE("GPL");
1203 MODULE_VERSION(ALUA_DH_VER);