This source file includes following definitions.
- isci_task_refuse
- isci_device_io_ready
- isci_task_execute_task
- isci_task_request_build
- isci_task_execute_tmf
- isci_task_build_tmf
- isci_task_build_abort_task_tmf
- isci_task_send_lu_reset_sas
- isci_task_lu_reset
- isci_task_clear_nexus_port
- isci_task_clear_nexus_ha
- isci_task_abort_task
- isci_task_abort_task_set
- isci_task_clear_aca
- isci_task_clear_task_set
- isci_task_query_task
- isci_task_request_complete
- isci_reset_device
- isci_task_I_T_nexus_reset
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 #include <linux/completion.h>
57 #include <linux/irqflags.h>
58 #include "sas.h"
59 #include <scsi/libsas.h>
60 #include "remote_device.h"
61 #include "remote_node_context.h"
62 #include "isci.h"
63 #include "request.h"
64 #include "task.h"
65 #include "host.h"
66
67
68
69
70
71
72
73
74
75
76 static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
77 enum service_response response,
78 enum exec_status status)
79
80 {
81 unsigned long flags;
82
83
84 dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n",
85 __func__, task, response, status);
86
87 spin_lock_irqsave(&task->task_state_lock, flags);
88
89 task->task_status.resp = response;
90 task->task_status.stat = status;
91
92
93 task->task_state_flags |= SAS_TASK_STATE_DONE;
94 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
95 SAS_TASK_STATE_PENDING);
96 task->lldd_task = NULL;
97 spin_unlock_irqrestore(&task->task_state_lock, flags);
98
99 task->task_done(task);
100 }
101
102 #define for_each_sas_task(num, task) \
103 for (; num > 0; num--,\
104 task = list_entry(task->list.next, struct sas_task, list))
105
106
107 static inline int isci_device_io_ready(struct isci_remote_device *idev,
108 struct sas_task *task)
109 {
110 return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
111 (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
112 isci_task_is_ncq_recovery(task))
113 : 0;
114 }
115
116
117
118
119
120
121
122
123
124 int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags)
125 {
126 struct isci_host *ihost = dev_to_ihost(task->dev);
127 struct isci_remote_device *idev;
128 unsigned long flags;
129 enum sci_status status = SCI_FAILURE;
130 bool io_ready;
131 u16 tag;
132
133 spin_lock_irqsave(&ihost->scic_lock, flags);
134 idev = isci_lookup_device(task->dev);
135 io_ready = isci_device_io_ready(idev, task);
136 tag = isci_alloc_tag(ihost);
137 spin_unlock_irqrestore(&ihost->scic_lock, flags);
138
139 dev_dbg(&ihost->pdev->dev,
140 "task: %p, dev: %p idev: %p:%#lx cmd = %p\n",
141 task, task->dev, idev, idev ? idev->flags : 0,
142 task->uldd_task);
143
144 if (!idev) {
145 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
146 SAS_DEVICE_UNKNOWN);
147 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
148
149
150
151 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
152 SAS_QUEUE_FULL);
153 } else {
154
155 spin_lock_irqsave(&task->task_state_lock, flags);
156
157 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
158
159 spin_unlock_irqrestore(&task->task_state_lock, flags);
160
161 isci_task_refuse(ihost, task,
162 SAS_TASK_UNDELIVERED,
163 SAM_STAT_TASK_ABORTED);
164 } else {
165 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
166 spin_unlock_irqrestore(&task->task_state_lock, flags);
167
168
169 status = isci_request_execute(ihost, idev, task, tag);
170
171 if (status != SCI_SUCCESS) {
172 spin_lock_irqsave(&task->task_state_lock, flags);
173
174 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
175 spin_unlock_irqrestore(&task->task_state_lock, flags);
176
177 if (test_bit(IDEV_GONE, &idev->flags)) {
178
179
180
181 isci_task_refuse(ihost, task,
182 SAS_TASK_UNDELIVERED,
183 SAS_DEVICE_UNKNOWN);
184 } else {
185
186
187
188
189
190
191
192
193 isci_task_refuse(ihost, task,
194 SAS_TASK_COMPLETE,
195 SAS_QUEUE_FULL);
196 }
197 }
198 }
199 }
200
201 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
202 spin_lock_irqsave(&ihost->scic_lock, flags);
203
204
205
206 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
207 spin_unlock_irqrestore(&ihost->scic_lock, flags);
208 }
209
210 isci_put_device(idev);
211 return 0;
212 }
213
214 static struct isci_request *isci_task_request_build(struct isci_host *ihost,
215 struct isci_remote_device *idev,
216 u16 tag, struct isci_tmf *isci_tmf)
217 {
218 enum sci_status status = SCI_FAILURE;
219 struct isci_request *ireq = NULL;
220 struct domain_device *dev;
221
222 dev_dbg(&ihost->pdev->dev,
223 "%s: isci_tmf = %p\n", __func__, isci_tmf);
224
225 dev = idev->domain_dev;
226
227
228 ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
229 if (!ireq)
230 return NULL;
231
232
233 status = sci_task_request_construct(ihost, idev, tag,
234 ireq);
235
236 if (status != SCI_SUCCESS) {
237 dev_warn(&ihost->pdev->dev,
238 "%s: sci_task_request_construct failed - "
239 "status = 0x%x\n",
240 __func__,
241 status);
242 return NULL;
243 }
244
245
246 if (dev->dev_type == SAS_END_DEVICE) {
247 isci_tmf->proto = SAS_PROTOCOL_SSP;
248 status = sci_task_request_construct_ssp(ireq);
249 if (status != SCI_SUCCESS)
250 return NULL;
251 }
252
253 return ireq;
254 }
255
256 static int isci_task_execute_tmf(struct isci_host *ihost,
257 struct isci_remote_device *idev,
258 struct isci_tmf *tmf, unsigned long timeout_ms)
259 {
260 DECLARE_COMPLETION_ONSTACK(completion);
261 enum sci_status status = SCI_FAILURE;
262 struct isci_request *ireq;
263 int ret = TMF_RESP_FUNC_FAILED;
264 unsigned long flags;
265 unsigned long timeleft;
266 u16 tag;
267
268 spin_lock_irqsave(&ihost->scic_lock, flags);
269 tag = isci_alloc_tag(ihost);
270 spin_unlock_irqrestore(&ihost->scic_lock, flags);
271
272 if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
273 return ret;
274
275
276
277
278 if (!idev ||
279 (!test_bit(IDEV_IO_READY, &idev->flags) &&
280 !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
281 dev_dbg(&ihost->pdev->dev,
282 "%s: idev = %p not ready (%#lx)\n",
283 __func__,
284 idev, idev ? idev->flags : 0);
285 goto err_tci;
286 } else
287 dev_dbg(&ihost->pdev->dev,
288 "%s: idev = %p\n",
289 __func__, idev);
290
291
292 tmf->complete = &completion;
293 tmf->status = SCI_FAILURE_TIMEOUT;
294
295 ireq = isci_task_request_build(ihost, idev, tag, tmf);
296 if (!ireq)
297 goto err_tci;
298
299 spin_lock_irqsave(&ihost->scic_lock, flags);
300
301
302 status = sci_controller_start_task(ihost, idev, ireq);
303
304 if (status != SCI_SUCCESS) {
305 dev_dbg(&ihost->pdev->dev,
306 "%s: start_io failed - status = 0x%x, request = %p\n",
307 __func__,
308 status,
309 ireq);
310 spin_unlock_irqrestore(&ihost->scic_lock, flags);
311 goto err_tci;
312 }
313 spin_unlock_irqrestore(&ihost->scic_lock, flags);
314
315
316 isci_remote_device_resume_from_abort(ihost, idev);
317
318
319 timeleft = wait_for_completion_timeout(&completion,
320 msecs_to_jiffies(timeout_ms));
321
322 if (timeleft == 0) {
323
324
325
326 isci_remote_device_suspend_terminate(ihost, idev, ireq);
327 }
328
329 isci_print_tmf(ihost, tmf);
330
331 if (tmf->status == SCI_SUCCESS)
332 ret = TMF_RESP_FUNC_COMPLETE;
333 else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
334 dev_dbg(&ihost->pdev->dev,
335 "%s: tmf.status == "
336 "SCI_FAILURE_IO_RESPONSE_VALID\n",
337 __func__);
338 ret = TMF_RESP_FUNC_COMPLETE;
339 }
340
341
342 dev_dbg(&ihost->pdev->dev,
343 "%s: completed request = %p\n",
344 __func__,
345 ireq);
346
347 return ret;
348
349 err_tci:
350 spin_lock_irqsave(&ihost->scic_lock, flags);
351 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
352 spin_unlock_irqrestore(&ihost->scic_lock, flags);
353
354 return ret;
355 }
356
357 static void isci_task_build_tmf(struct isci_tmf *tmf,
358 enum isci_tmf_function_codes code)
359 {
360 memset(tmf, 0, sizeof(*tmf));
361 tmf->tmf_code = code;
362 }
363
364 static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
365 enum isci_tmf_function_codes code,
366 struct isci_request *old_request)
367 {
368 isci_task_build_tmf(tmf, code);
369 tmf->io_tag = old_request->io_tag;
370 }
371
372
373
374
375
376
377
378
379 static int isci_task_send_lu_reset_sas(
380 struct isci_host *isci_host,
381 struct isci_remote_device *isci_device,
382 u8 *lun)
383 {
384 struct isci_tmf tmf;
385 int ret = TMF_RESP_FUNC_FAILED;
386
387 dev_dbg(&isci_host->pdev->dev,
388 "%s: isci_host = %p, isci_device = %p\n",
389 __func__, isci_host, isci_device);
390
391
392
393
394
395 isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset);
396
397 #define ISCI_LU_RESET_TIMEOUT_MS 2000
398 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
399
400 if (ret == TMF_RESP_FUNC_COMPLETE)
401 dev_dbg(&isci_host->pdev->dev,
402 "%s: %p: TMF_LU_RESET passed\n",
403 __func__, isci_device);
404 else
405 dev_dbg(&isci_host->pdev->dev,
406 "%s: %p: TMF_LU_RESET failed (%x)\n",
407 __func__, isci_device, ret);
408
409 return ret;
410 }
411
412 int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
413 {
414 struct isci_host *ihost = dev_to_ihost(dev);
415 struct isci_remote_device *idev;
416 unsigned long flags;
417 int ret = TMF_RESP_FUNC_COMPLETE;
418
419 spin_lock_irqsave(&ihost->scic_lock, flags);
420 idev = isci_get_device(dev->lldd_dev);
421 spin_unlock_irqrestore(&ihost->scic_lock, flags);
422
423 dev_dbg(&ihost->pdev->dev,
424 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
425 __func__, dev, ihost, idev);
426
427 if (!idev) {
428
429 dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__);
430
431 ret = TMF_RESP_FUNC_FAILED;
432 goto out;
433 }
434
435
436 if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
437 != SCI_SUCCESS) {
438
439 ret = TMF_RESP_FUNC_FAILED;
440 goto out;
441 }
442
443 if (!test_bit(IDEV_GONE, &idev->flags)) {
444 if (dev_is_sata(dev))
445 sas_ata_schedule_reset(dev);
446 else
447
448 ret = isci_task_send_lu_reset_sas(ihost, idev, lun);
449 }
450 out:
451 isci_put_device(idev);
452 return ret;
453 }
454
455
456
457 int isci_task_clear_nexus_port(struct asd_sas_port *port)
458 {
459 return TMF_RESP_FUNC_FAILED;
460 }
461
462
463
464 int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
465 {
466 return TMF_RESP_FUNC_FAILED;
467 }
468
469
470
471
472
473
474
475
476
477
478 int isci_task_abort_task(struct sas_task *task)
479 {
480 struct isci_host *ihost = dev_to_ihost(task->dev);
481 DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
482 struct isci_request *old_request = NULL;
483 struct isci_remote_device *idev = NULL;
484 struct isci_tmf tmf;
485 int ret = TMF_RESP_FUNC_FAILED;
486 unsigned long flags;
487 int target_done_already = 0;
488
489
490
491
492
493
494 spin_lock_irqsave(&ihost->scic_lock, flags);
495 spin_lock(&task->task_state_lock);
496
497 old_request = task->lldd_task;
498
499
500 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
501 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
502 old_request) {
503 idev = isci_get_device(task->dev->lldd_dev);
504 target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
505 &old_request->flags);
506 }
507 spin_unlock(&task->task_state_lock);
508 spin_unlock_irqrestore(&ihost->scic_lock, flags);
509
510 dev_warn(&ihost->pdev->dev,
511 "%s: dev = %p (%s%s), task = %p, old_request == %p\n",
512 __func__, idev,
513 (dev_is_sata(task->dev) ? "STP/SATA"
514 : ((dev_is_expander(task->dev->dev_type))
515 ? "SMP"
516 : "SSP")),
517 ((idev) ? ((test_bit(IDEV_GONE, &idev->flags))
518 ? " IDEV_GONE"
519 : "")
520 : " <NULL>"),
521 task, old_request);
522
523
524
525
526
527 if (!idev || !old_request) {
528
529
530
531
532
533 spin_lock_irqsave(&task->task_state_lock, flags);
534 task->task_state_flags |= SAS_TASK_STATE_DONE;
535 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
536 SAS_TASK_STATE_PENDING);
537 spin_unlock_irqrestore(&task->task_state_lock, flags);
538
539 ret = TMF_RESP_FUNC_COMPLETE;
540
541 dev_warn(&ihost->pdev->dev,
542 "%s: abort task not needed for %p\n",
543 __func__, task);
544 goto out;
545 }
546
547 if (isci_remote_device_suspend_terminate(ihost, idev, old_request)
548 != SCI_SUCCESS) {
549 dev_warn(&ihost->pdev->dev,
550 "%s: isci_remote_device_reset_terminate(dev=%p, "
551 "req=%p, task=%p) failed\n",
552 __func__, idev, old_request, task);
553 ret = TMF_RESP_FUNC_FAILED;
554 goto out;
555 }
556 spin_lock_irqsave(&ihost->scic_lock, flags);
557
558 if (task->task_proto == SAS_PROTOCOL_SMP ||
559 sas_protocol_ata(task->task_proto) ||
560 target_done_already ||
561 test_bit(IDEV_GONE, &idev->flags)) {
562
563 spin_unlock_irqrestore(&ihost->scic_lock, flags);
564
565
566 isci_remote_device_resume_from_abort(ihost, idev);
567
568 dev_warn(&ihost->pdev->dev,
569 "%s: %s request"
570 " or complete_in_target (%d), "
571 "or IDEV_GONE (%d), thus no TMF\n",
572 __func__,
573 ((task->task_proto == SAS_PROTOCOL_SMP)
574 ? "SMP"
575 : (sas_protocol_ata(task->task_proto)
576 ? "SATA/STP"
577 : "<other>")
578 ),
579 test_bit(IREQ_COMPLETE_IN_TARGET,
580 &old_request->flags),
581 test_bit(IDEV_GONE, &idev->flags));
582
583 spin_lock_irqsave(&task->task_state_lock, flags);
584 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
585 SAS_TASK_STATE_PENDING);
586 task->task_state_flags |= SAS_TASK_STATE_DONE;
587 spin_unlock_irqrestore(&task->task_state_lock, flags);
588
589 ret = TMF_RESP_FUNC_COMPLETE;
590 } else {
591
592 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
593 old_request);
594
595 spin_unlock_irqrestore(&ihost->scic_lock, flags);
596
597
598 #define ISCI_ABORT_TASK_TIMEOUT_MS 500
599 ret = isci_task_execute_tmf(ihost, idev, &tmf,
600 ISCI_ABORT_TASK_TIMEOUT_MS);
601 }
602 out:
603 dev_warn(&ihost->pdev->dev,
604 "%s: Done; dev = %p, task = %p , old_request == %p\n",
605 __func__, idev, task, old_request);
606 isci_put_device(idev);
607 return ret;
608 }
609
610
611
612
613
614
615
616
617
618
619
620 int isci_task_abort_task_set(
621 struct domain_device *d_device,
622 u8 *lun)
623 {
624 return TMF_RESP_FUNC_FAILED;
625 }
626
627
628
629
630
631
632
633
634
635
636
637 int isci_task_clear_aca(
638 struct domain_device *d_device,
639 u8 *lun)
640 {
641 return TMF_RESP_FUNC_FAILED;
642 }
643
644
645
646
647
648
649
650
651
652
653
654
655 int isci_task_clear_task_set(
656 struct domain_device *d_device,
657 u8 *lun)
658 {
659 return TMF_RESP_FUNC_FAILED;
660 }
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675 int isci_task_query_task(
676 struct sas_task *task)
677 {
678
679 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
680 return TMF_RESP_FUNC_FAILED;
681 else
682 return TMF_RESP_FUNC_SUCC;
683 }
684
685
686
687
688
689
690
691
692
693
694
695 void
696 isci_task_request_complete(struct isci_host *ihost,
697 struct isci_request *ireq,
698 enum sci_task_status completion_status)
699 {
700 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
701 struct completion *tmf_complete = NULL;
702
703 dev_dbg(&ihost->pdev->dev,
704 "%s: request = %p, status=%d\n",
705 __func__, ireq, completion_status);
706
707 set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
708
709 if (tmf) {
710 tmf->status = completion_status;
711
712 if (tmf->proto == SAS_PROTOCOL_SSP) {
713 memcpy(&tmf->resp.resp_iu,
714 &ireq->ssp.rsp,
715 SSP_RESP_IU_MAX_SIZE);
716 } else if (tmf->proto == SAS_PROTOCOL_SATA) {
717 memcpy(&tmf->resp.d2h_fis,
718 &ireq->stp.rsp,
719 sizeof(struct dev_to_host_fis));
720 }
721
722 tmf_complete = tmf->complete;
723 }
724 sci_controller_complete_io(ihost, ireq->target_device, ireq);
725
726
727
728 set_bit(IREQ_TERMINATED, &ireq->flags);
729
730 if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
731 wake_up_all(&ihost->eventq);
732
733 if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
734 isci_free_tag(ihost, ireq->io_tag);
735
736
737 if (tmf_complete)
738 complete(tmf_complete);
739 }
740
741 static int isci_reset_device(struct isci_host *ihost,
742 struct domain_device *dev,
743 struct isci_remote_device *idev)
744 {
745 int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1;
746 struct sas_phy *phy = sas_get_local_phy(dev);
747 struct isci_port *iport = dev->port->lldd_port;
748
749 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
750
751
752 if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
753 != SCI_SUCCESS) {
754 rc = TMF_RESP_FUNC_FAILED;
755 goto out;
756 }
757
758
759
760
761
762
763 if (!test_bit(IDEV_GONE, &idev->flags)) {
764 if (scsi_is_sas_phy_local(phy)) {
765 struct isci_phy *iphy = &ihost->phys[phy->number];
766
767 reset_stat = isci_port_perform_hard_reset(ihost, iport,
768 iphy);
769 } else
770 reset_stat = sas_phy_reset(phy, !dev_is_sata(dev));
771 }
772
773 isci_remote_device_resume_from_abort(ihost, idev);
774
775 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n",
776 __func__, idev, reset_stat);
777 out:
778 sas_put_local_phy(phy);
779 return rc;
780 }
781
782 int isci_task_I_T_nexus_reset(struct domain_device *dev)
783 {
784 struct isci_host *ihost = dev_to_ihost(dev);
785 struct isci_remote_device *idev;
786 unsigned long flags;
787 int ret;
788
789 spin_lock_irqsave(&ihost->scic_lock, flags);
790 idev = isci_get_device(dev->lldd_dev);
791 spin_unlock_irqrestore(&ihost->scic_lock, flags);
792
793 if (!idev) {
794
795
796
797 ret = -ENODEV;
798 goto out;
799 }
800
801 ret = isci_reset_device(ihost, dev, idev);
802 out:
803 isci_put_device(idev);
804 return ret;
805 }