This source file includes following definitions.
- scsi_select_sense_cache
- scsi_free_sense_buffer
- scsi_alloc_sense_buffer
- scsi_init_sense_cache
- scsi_set_blocked
- scsi_mq_requeue_cmd
- __scsi_queue_insert
- scsi_queue_insert
- __scsi_execute
- scsi_init_cmd_errh
- scsi_dec_host_busy
- scsi_device_unbusy
- scsi_kick_queue
- scsi_single_lun_run
- scsi_device_is_busy
- scsi_target_is_busy
- scsi_host_is_busy
- scsi_starved_list_run
- scsi_run_queue
- scsi_requeue_run_queue
- scsi_run_host_queues
- scsi_uninit_cmd
- scsi_mq_free_sgtables
- scsi_mq_uninit_cmd
- scsi_end_request
- scsi_result_to_blk_status
- scsi_io_completion_reprep
- scsi_io_completion_action
- scsi_io_completion_nz_result
- scsi_io_completion
- scsi_init_sgtable
- scsi_init_io
- scsi_initialize_rq
- scsi_cleanup_rq
- scsi_add_cmd_to_list
- scsi_del_cmd_from_list
- scsi_init_command
- scsi_setup_scsi_cmnd
- scsi_setup_fs_cmnd
- scsi_setup_cmnd
- scsi_prep_state_check
- scsi_dev_queue_ready
- scsi_target_queue_ready
- scsi_host_queue_ready
- scsi_mq_lld_busy
- scsi_softirq_done
- scsi_dispatch_cmd
- scsi_mq_inline_sgl_size
- scsi_mq_prep_fn
- scsi_mq_done
- scsi_mq_put_budget
- scsi_mq_get_budget
- scsi_queue_rq
- scsi_timeout
- scsi_mq_init_request
- scsi_mq_exit_request
- scsi_map_queues
- __scsi_init_queue
- scsi_commit_rqs
- scsi_mq_alloc_queue
- scsi_mq_setup_tags
- scsi_mq_destroy_tags
- scsi_device_from_queue
- scsi_block_requests
- scsi_unblock_requests
- scsi_init_queue
- scsi_exit_queue
- scsi_mode_select
- scsi_mode_sense
- scsi_test_unit_ready
- scsi_device_set_state
- scsi_evt_emit
- scsi_evt_thread
- sdev_evt_send
- sdev_evt_alloc
- sdev_evt_send_simple
- scsi_device_quiesce
- scsi_device_resume
- device_quiesce_fn
- scsi_target_quiesce
- device_resume_fn
- scsi_target_resume
- scsi_internal_device_block_nowait
- scsi_internal_device_block
- scsi_start_queue
- scsi_internal_device_unblock_nowait
- scsi_internal_device_unblock
- device_block
- target_block
- scsi_target_block
- device_unblock
- target_unblock
- scsi_target_unblock
- scsi_kmap_atomic_sg
- scsi_kunmap_atomic_sg
- sdev_disable_disk_events
- sdev_enable_disk_events
- scsi_vpd_lun_id
- scsi_vpd_tpg_id
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/bio.h>
13 #include <linux/bitops.h>
14 #include <linux/blkdev.h>
15 #include <linux/completion.h>
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/init.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/hardirq.h>
22 #include <linux/scatterlist.h>
23 #include <linux/blk-mq.h>
24 #include <linux/ratelimit.h>
25 #include <asm/unaligned.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_dbg.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_driver.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_transport.h>
35 #include <scsi/scsi_dh.h>
36
37 #include <trace/events/scsi.h>
38
39 #include "scsi_debugfs.h"
40 #include "scsi_priv.h"
41 #include "scsi_logging.h"
42
43
44
45
46
47 #ifdef CONFIG_ARCH_NO_SG_CHAIN
48 #define SCSI_INLINE_PROT_SG_CNT 0
49 #define SCSI_INLINE_SG_CNT 0
50 #else
51 #define SCSI_INLINE_PROT_SG_CNT 1
52 #define SCSI_INLINE_SG_CNT 2
53 #endif
54
55 static struct kmem_cache *scsi_sdb_cache;
56 static struct kmem_cache *scsi_sense_cache;
57 static struct kmem_cache *scsi_sense_isadma_cache;
58 static DEFINE_MUTEX(scsi_sense_cache_mutex);
59
60 static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
61
62 static inline struct kmem_cache *
63 scsi_select_sense_cache(bool unchecked_isa_dma)
64 {
65 return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache;
66 }
67
68 static void scsi_free_sense_buffer(bool unchecked_isa_dma,
69 unsigned char *sense_buffer)
70 {
71 kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma),
72 sense_buffer);
73 }
74
75 static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma,
76 gfp_t gfp_mask, int numa_node)
77 {
78 return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma),
79 gfp_mask, numa_node);
80 }
81
82 int scsi_init_sense_cache(struct Scsi_Host *shost)
83 {
84 struct kmem_cache *cache;
85 int ret = 0;
86
87 mutex_lock(&scsi_sense_cache_mutex);
88 cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
89 if (cache)
90 goto exit;
91
92 if (shost->unchecked_isa_dma) {
93 scsi_sense_isadma_cache =
94 kmem_cache_create("scsi_sense_cache(DMA)",
95 SCSI_SENSE_BUFFERSIZE, 0,
96 SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
97 if (!scsi_sense_isadma_cache)
98 ret = -ENOMEM;
99 } else {
100 scsi_sense_cache =
101 kmem_cache_create_usercopy("scsi_sense_cache",
102 SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
103 0, SCSI_SENSE_BUFFERSIZE, NULL);
104 if (!scsi_sense_cache)
105 ret = -ENOMEM;
106 }
107 exit:
108 mutex_unlock(&scsi_sense_cache_mutex);
109 return ret;
110 }
111
112
113
114
115
116
117 #define SCSI_QUEUE_DELAY 3
118
119 static void
120 scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
121 {
122 struct Scsi_Host *host = cmd->device->host;
123 struct scsi_device *device = cmd->device;
124 struct scsi_target *starget = scsi_target(device);
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139 switch (reason) {
140 case SCSI_MLQUEUE_HOST_BUSY:
141 atomic_set(&host->host_blocked, host->max_host_blocked);
142 break;
143 case SCSI_MLQUEUE_DEVICE_BUSY:
144 case SCSI_MLQUEUE_EH_RETRY:
145 atomic_set(&device->device_blocked,
146 device->max_device_blocked);
147 break;
148 case SCSI_MLQUEUE_TARGET_BUSY:
149 atomic_set(&starget->target_blocked,
150 starget->max_target_blocked);
151 break;
152 }
153 }
154
155 static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
156 {
157 if (cmd->request->rq_flags & RQF_DONTPREP) {
158 cmd->request->rq_flags &= ~RQF_DONTPREP;
159 scsi_mq_uninit_cmd(cmd);
160 } else {
161 WARN_ON_ONCE(true);
162 }
163 blk_mq_requeue_request(cmd->request, true);
164 }
165
166
167
168
169
170
171
172
173
174
175
176
177
178 static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
179 {
180 struct scsi_device *device = cmd->device;
181
182 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
183 "Inserting command %p into mlqueue\n", cmd));
184
185 scsi_set_blocked(cmd, reason);
186
187
188
189
190
191 if (unbusy)
192 scsi_device_unbusy(device);
193
194
195
196
197
198
199
200 cmd->result = 0;
201
202 blk_mq_requeue_request(cmd->request, true);
203 }
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224 void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
225 {
226 __scsi_queue_insert(cmd, reason, true);
227 }
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248 int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
249 int data_direction, void *buffer, unsigned bufflen,
250 unsigned char *sense, struct scsi_sense_hdr *sshdr,
251 int timeout, int retries, u64 flags, req_flags_t rq_flags,
252 int *resid)
253 {
254 struct request *req;
255 struct scsi_request *rq;
256 int ret = DRIVER_ERROR << 24;
257
258 req = blk_get_request(sdev->request_queue,
259 data_direction == DMA_TO_DEVICE ?
260 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
261 if (IS_ERR(req))
262 return ret;
263 rq = scsi_req(req);
264
265 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
266 buffer, bufflen, GFP_NOIO))
267 goto out;
268
269 rq->cmd_len = COMMAND_SIZE(cmd[0]);
270 memcpy(rq->cmd, cmd, rq->cmd_len);
271 rq->retries = retries;
272 req->timeout = timeout;
273 req->cmd_flags |= flags;
274 req->rq_flags |= rq_flags | RQF_QUIET;
275
276
277
278
279 blk_execute_rq(req->q, NULL, req, 1);
280
281
282
283
284
285
286
287 if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
288 memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
289
290 if (resid)
291 *resid = rq->resid_len;
292 if (sense && rq->sense_len)
293 memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
294 if (sshdr)
295 scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
296 ret = rq->result;
297 out:
298 blk_put_request(req);
299
300 return ret;
301 }
302 EXPORT_SYMBOL(__scsi_execute);
303
304
305
306
307
308
309
310
311
312
313
314
315 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
316 {
317 scsi_set_resid(cmd, 0);
318 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
319 if (cmd->cmd_len == 0)
320 cmd->cmd_len = scsi_command_size(cmd->cmnd);
321 }
322
323
324
325
326
327
328
329
330
331
332 static void scsi_dec_host_busy(struct Scsi_Host *shost)
333 {
334 unsigned long flags;
335
336 rcu_read_lock();
337 atomic_dec(&shost->host_busy);
338 if (unlikely(scsi_host_in_recovery(shost))) {
339 spin_lock_irqsave(shost->host_lock, flags);
340 if (shost->host_failed || shost->host_eh_scheduled)
341 scsi_eh_wakeup(shost);
342 spin_unlock_irqrestore(shost->host_lock, flags);
343 }
344 rcu_read_unlock();
345 }
346
347 void scsi_device_unbusy(struct scsi_device *sdev)
348 {
349 struct Scsi_Host *shost = sdev->host;
350 struct scsi_target *starget = scsi_target(sdev);
351
352 scsi_dec_host_busy(shost);
353
354 if (starget->can_queue > 0)
355 atomic_dec(&starget->target_busy);
356
357 atomic_dec(&sdev->device_busy);
358 }
359
360 static void scsi_kick_queue(struct request_queue *q)
361 {
362 blk_mq_run_hw_queues(q, false);
363 }
364
365
366
367
368
369
370
371
372 static void scsi_single_lun_run(struct scsi_device *current_sdev)
373 {
374 struct Scsi_Host *shost = current_sdev->host;
375 struct scsi_device *sdev, *tmp;
376 struct scsi_target *starget = scsi_target(current_sdev);
377 unsigned long flags;
378
379 spin_lock_irqsave(shost->host_lock, flags);
380 starget->starget_sdev_user = NULL;
381 spin_unlock_irqrestore(shost->host_lock, flags);
382
383
384
385
386
387
388
389 scsi_kick_queue(current_sdev->request_queue);
390
391 spin_lock_irqsave(shost->host_lock, flags);
392 if (starget->starget_sdev_user)
393 goto out;
394 list_for_each_entry_safe(sdev, tmp, &starget->devices,
395 same_target_siblings) {
396 if (sdev == current_sdev)
397 continue;
398 if (scsi_device_get(sdev))
399 continue;
400
401 spin_unlock_irqrestore(shost->host_lock, flags);
402 scsi_kick_queue(sdev->request_queue);
403 spin_lock_irqsave(shost->host_lock, flags);
404
405 scsi_device_put(sdev);
406 }
407 out:
408 spin_unlock_irqrestore(shost->host_lock, flags);
409 }
410
411 static inline bool scsi_device_is_busy(struct scsi_device *sdev)
412 {
413 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
414 return true;
415 if (atomic_read(&sdev->device_blocked) > 0)
416 return true;
417 return false;
418 }
419
420 static inline bool scsi_target_is_busy(struct scsi_target *starget)
421 {
422 if (starget->can_queue > 0) {
423 if (atomic_read(&starget->target_busy) >= starget->can_queue)
424 return true;
425 if (atomic_read(&starget->target_blocked) > 0)
426 return true;
427 }
428 return false;
429 }
430
431 static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
432 {
433 if (shost->can_queue > 0 &&
434 atomic_read(&shost->host_busy) >= shost->can_queue)
435 return true;
436 if (atomic_read(&shost->host_blocked) > 0)
437 return true;
438 if (shost->host_self_blocked)
439 return true;
440 return false;
441 }
442
443 static void scsi_starved_list_run(struct Scsi_Host *shost)
444 {
445 LIST_HEAD(starved_list);
446 struct scsi_device *sdev;
447 unsigned long flags;
448
449 spin_lock_irqsave(shost->host_lock, flags);
450 list_splice_init(&shost->starved_list, &starved_list);
451
452 while (!list_empty(&starved_list)) {
453 struct request_queue *slq;
454
455
456
457
458
459
460
461
462
463
464
465 if (scsi_host_is_busy(shost))
466 break;
467
468 sdev = list_entry(starved_list.next,
469 struct scsi_device, starved_entry);
470 list_del_init(&sdev->starved_entry);
471 if (scsi_target_is_busy(scsi_target(sdev))) {
472 list_move_tail(&sdev->starved_entry,
473 &shost->starved_list);
474 continue;
475 }
476
477
478
479
480
481
482
483
484
485
486
487 slq = sdev->request_queue;
488 if (!blk_get_queue(slq))
489 continue;
490 spin_unlock_irqrestore(shost->host_lock, flags);
491
492 scsi_kick_queue(slq);
493 blk_put_queue(slq);
494
495 spin_lock_irqsave(shost->host_lock, flags);
496 }
497
498 list_splice(&starved_list, &shost->starved_list);
499 spin_unlock_irqrestore(shost->host_lock, flags);
500 }
501
502
503
504
505
506
507
508
509
510
511
512
513
514 static void scsi_run_queue(struct request_queue *q)
515 {
516 struct scsi_device *sdev = q->queuedata;
517
518 if (scsi_target(sdev)->single_lun)
519 scsi_single_lun_run(sdev);
520 if (!list_empty(&sdev->host->starved_list))
521 scsi_starved_list_run(sdev->host);
522
523 blk_mq_run_hw_queues(q, false);
524 }
525
526 void scsi_requeue_run_queue(struct work_struct *work)
527 {
528 struct scsi_device *sdev;
529 struct request_queue *q;
530
531 sdev = container_of(work, struct scsi_device, requeue_work);
532 q = sdev->request_queue;
533 scsi_run_queue(q);
534 }
535
536 void scsi_run_host_queues(struct Scsi_Host *shost)
537 {
538 struct scsi_device *sdev;
539
540 shost_for_each_device(sdev, shost)
541 scsi_run_queue(sdev->request_queue);
542 }
543
544 static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
545 {
546 if (!blk_rq_is_passthrough(cmd->request)) {
547 struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
548
549 if (drv->uninit_command)
550 drv->uninit_command(cmd);
551 }
552 }
553
554 static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
555 {
556 if (cmd->sdb.table.nents)
557 sg_free_table_chained(&cmd->sdb.table,
558 SCSI_INLINE_SG_CNT);
559 if (scsi_prot_sg_count(cmd))
560 sg_free_table_chained(&cmd->prot_sdb->table,
561 SCSI_INLINE_PROT_SG_CNT);
562 }
563
564 static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
565 {
566 scsi_mq_free_sgtables(cmd);
567 scsi_uninit_cmd(cmd);
568 scsi_del_cmd_from_list(cmd);
569 }
570
571
572 static bool scsi_end_request(struct request *req, blk_status_t error,
573 unsigned int bytes)
574 {
575 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
576 struct scsi_device *sdev = cmd->device;
577 struct request_queue *q = sdev->request_queue;
578
579 if (blk_update_request(req, error, bytes))
580 return true;
581
582 if (blk_queue_add_random(q))
583 add_disk_randomness(req->rq_disk);
584
585 if (!blk_rq_is_scsi(req)) {
586 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
587 cmd->flags &= ~SCMD_INITIALIZED;
588 }
589
590
591
592
593
594
595
596 destroy_rcu_head(&cmd->rcu);
597
598
599
600
601
602
603
604
605 scsi_mq_uninit_cmd(cmd);
606
607
608
609
610
611 percpu_ref_get(&q->q_usage_counter);
612
613 __blk_mq_end_request(req, error);
614
615 if (scsi_target(sdev)->single_lun ||
616 !list_empty(&sdev->host->starved_list))
617 kblockd_schedule_work(&sdev->requeue_work);
618 else
619 blk_mq_run_hw_queues(q, true);
620
621 percpu_ref_put(&q->q_usage_counter);
622 return false;
623 }
624
625
626
627
628
629
630
631
632
633 static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
634 {
635 switch (host_byte(result)) {
636 case DID_OK:
637
638
639
640
641
642 if (scsi_status_is_good(result) && (result & ~0xff) == 0)
643 return BLK_STS_OK;
644 return BLK_STS_IOERR;
645 case DID_TRANSPORT_FAILFAST:
646 return BLK_STS_TRANSPORT;
647 case DID_TARGET_FAILURE:
648 set_host_byte(cmd, DID_OK);
649 return BLK_STS_TARGET;
650 case DID_NEXUS_FAILURE:
651 set_host_byte(cmd, DID_OK);
652 return BLK_STS_NEXUS;
653 case DID_ALLOC_FAILURE:
654 set_host_byte(cmd, DID_OK);
655 return BLK_STS_NOSPC;
656 case DID_MEDIUM_ERROR:
657 set_host_byte(cmd, DID_OK);
658 return BLK_STS_MEDIUM;
659 default:
660 return BLK_STS_IOERR;
661 }
662 }
663
664
665 static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
666 struct request_queue *q)
667 {
668
669 scsi_mq_requeue_cmd(cmd);
670 }
671
672
673 static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
674 {
675 struct request_queue *q = cmd->device->request_queue;
676 struct request *req = cmd->request;
677 int level = 0;
678 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
679 ACTION_DELAYED_RETRY} action;
680 unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
681 struct scsi_sense_hdr sshdr;
682 bool sense_valid;
683 bool sense_current = true;
684 blk_status_t blk_stat;
685
686 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
687 if (sense_valid)
688 sense_current = !scsi_sense_is_deferred(&sshdr);
689
690 blk_stat = scsi_result_to_blk_status(cmd, result);
691
692 if (host_byte(result) == DID_RESET) {
693
694
695
696
697 action = ACTION_RETRY;
698 } else if (sense_valid && sense_current) {
699 switch (sshdr.sense_key) {
700 case UNIT_ATTENTION:
701 if (cmd->device->removable) {
702
703
704
705 cmd->device->changed = 1;
706 action = ACTION_FAIL;
707 } else {
708
709
710
711
712
713 action = ACTION_RETRY;
714 }
715 break;
716 case ILLEGAL_REQUEST:
717
718
719
720
721
722
723
724
725 if ((cmd->device->use_10_for_rw &&
726 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
727 (cmd->cmnd[0] == READ_10 ||
728 cmd->cmnd[0] == WRITE_10)) {
729
730 cmd->device->use_10_for_rw = 0;
731 action = ACTION_REPREP;
732 } else if (sshdr.asc == 0x10) {
733 action = ACTION_FAIL;
734 blk_stat = BLK_STS_PROTECTION;
735
736 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
737 action = ACTION_FAIL;
738 blk_stat = BLK_STS_TARGET;
739 } else
740 action = ACTION_FAIL;
741 break;
742 case ABORTED_COMMAND:
743 action = ACTION_FAIL;
744 if (sshdr.asc == 0x10)
745 blk_stat = BLK_STS_PROTECTION;
746 break;
747 case NOT_READY:
748
749
750
751 if (sshdr.asc == 0x04) {
752 switch (sshdr.ascq) {
753 case 0x01:
754 case 0x04:
755 case 0x05:
756 case 0x06:
757 case 0x07:
758 case 0x08:
759 case 0x09:
760 case 0x14:
761 case 0x1a:
762 case 0x1b:
763 case 0x1d:
764 case 0x24:
765 action = ACTION_DELAYED_RETRY;
766 break;
767 default:
768 action = ACTION_FAIL;
769 break;
770 }
771 } else
772 action = ACTION_FAIL;
773 break;
774 case VOLUME_OVERFLOW:
775
776 action = ACTION_FAIL;
777 break;
778 default:
779 action = ACTION_FAIL;
780 break;
781 }
782 } else
783 action = ACTION_FAIL;
784
785 if (action != ACTION_FAIL &&
786 time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
787 action = ACTION_FAIL;
788
789 switch (action) {
790 case ACTION_FAIL:
791
792 if (!(req->rq_flags & RQF_QUIET)) {
793 static DEFINE_RATELIMIT_STATE(_rs,
794 DEFAULT_RATELIMIT_INTERVAL,
795 DEFAULT_RATELIMIT_BURST);
796
797 if (unlikely(scsi_logging_level))
798 level =
799 SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
800 SCSI_LOG_MLCOMPLETE_BITS);
801
802
803
804
805
806 if (!level && __ratelimit(&_rs)) {
807 scsi_print_result(cmd, NULL, FAILED);
808 if (driver_byte(result) == DRIVER_SENSE)
809 scsi_print_sense(cmd);
810 scsi_print_command(cmd);
811 }
812 }
813 if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
814 return;
815
816 case ACTION_REPREP:
817 scsi_io_completion_reprep(cmd, q);
818 break;
819 case ACTION_RETRY:
820
821 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
822 break;
823 case ACTION_DELAYED_RETRY:
824
825 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
826 break;
827 }
828 }
829
830
831
832
833
834
835 static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
836 blk_status_t *blk_statp)
837 {
838 bool sense_valid;
839 bool sense_current = true;
840 struct request *req = cmd->request;
841 struct scsi_sense_hdr sshdr;
842
843 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
844 if (sense_valid)
845 sense_current = !scsi_sense_is_deferred(&sshdr);
846
847 if (blk_rq_is_passthrough(req)) {
848 if (sense_valid) {
849
850
851
852 scsi_req(req)->sense_len =
853 min(8 + cmd->sense_buffer[7],
854 SCSI_SENSE_BUFFERSIZE);
855 }
856 if (sense_current)
857 *blk_statp = scsi_result_to_blk_status(cmd, result);
858 } else if (blk_rq_bytes(req) == 0 && sense_current) {
859
860
861
862
863
864 *blk_statp = scsi_result_to_blk_status(cmd, result);
865 }
866
867
868
869
870
871
872 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
873 bool do_print = true;
874
875
876
877
878
879 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
880 do_print = false;
881 else if (req->rq_flags & RQF_QUIET)
882 do_print = false;
883 if (do_print)
884 scsi_print_sense(cmd);
885 result = 0;
886
887 *blk_statp = BLK_STS_OK;
888 }
889
890
891
892
893
894
895
896 if (status_byte(result) && scsi_status_is_good(result)) {
897 result = 0;
898 *blk_statp = BLK_STS_OK;
899 }
900 return result;
901 }
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
933 {
934 int result = cmd->result;
935 struct request_queue *q = cmd->device->request_queue;
936 struct request *req = cmd->request;
937 blk_status_t blk_stat = BLK_STS_OK;
938
939 if (unlikely(result))
940 result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
941
942 if (unlikely(blk_rq_is_passthrough(req))) {
943
944
945
946 scsi_req(req)->result = cmd->result;
947 }
948
949
950
951
952
953 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
954 "%u sectors total, %d bytes done.\n",
955 blk_rq_sectors(req), good_bytes));
956
957
958
959
960
961
962 if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
963 if (likely(!scsi_end_request(req, blk_stat, good_bytes)))
964 return;
965 }
966
967
968 if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) {
969 if (scsi_end_request(req, blk_stat, blk_rq_bytes(req)))
970 WARN_ONCE(true,
971 "Bytes remaining after failed, no-retry command");
972 return;
973 }
974
975
976
977
978
979 if (likely(result == 0))
980 scsi_io_completion_reprep(cmd, q);
981 else
982 scsi_io_completion_action(cmd, result);
983 }
984
985 static blk_status_t scsi_init_sgtable(struct request *req,
986 struct scsi_data_buffer *sdb)
987 {
988 int count;
989
990
991
992
993 if (unlikely(sg_alloc_table_chained(&sdb->table,
994 blk_rq_nr_phys_segments(req), sdb->table.sgl,
995 SCSI_INLINE_SG_CNT)))
996 return BLK_STS_RESOURCE;
997
998
999
1000
1001
1002 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1003 BUG_ON(count > sdb->table.nents);
1004 sdb->table.nents = count;
1005 sdb->length = blk_rq_payload_bytes(req);
1006 return BLK_STS_OK;
1007 }
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020 blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
1021 {
1022 struct request *rq = cmd->request;
1023 blk_status_t ret;
1024
1025 if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
1026 return BLK_STS_IOERR;
1027
1028 ret = scsi_init_sgtable(rq, &cmd->sdb);
1029 if (ret)
1030 return ret;
1031
1032 if (blk_integrity_rq(rq)) {
1033 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1034 int ivecs, count;
1035
1036 if (WARN_ON_ONCE(!prot_sdb)) {
1037
1038
1039
1040
1041
1042 ret = BLK_STS_IOERR;
1043 goto out_free_sgtables;
1044 }
1045
1046 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1047
1048 if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
1049 prot_sdb->table.sgl,
1050 SCSI_INLINE_PROT_SG_CNT)) {
1051 ret = BLK_STS_RESOURCE;
1052 goto out_free_sgtables;
1053 }
1054
1055 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1056 prot_sdb->table.sgl);
1057 BUG_ON(count > ivecs);
1058 BUG_ON(count > queue_max_integrity_segments(rq->q));
1059
1060 cmd->prot_sdb = prot_sdb;
1061 cmd->prot_sdb->table.nents = count;
1062 }
1063
1064 return BLK_STS_OK;
1065 out_free_sgtables:
1066 scsi_mq_free_sgtables(cmd);
1067 return ret;
1068 }
1069 EXPORT_SYMBOL(scsi_init_io);
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 static void scsi_initialize_rq(struct request *rq)
1083 {
1084 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1085
1086 scsi_req_init(&cmd->req);
1087 init_rcu_head(&cmd->rcu);
1088 cmd->jiffies_at_alloc = jiffies;
1089 cmd->retries = 0;
1090 }
1091
1092
1093
1094
1095
1096 static void scsi_cleanup_rq(struct request *rq)
1097 {
1098 if (rq->rq_flags & RQF_DONTPREP) {
1099 scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
1100 rq->rq_flags &= ~RQF_DONTPREP;
1101 }
1102 }
1103
1104
1105 void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
1106 {
1107 struct scsi_device *sdev = cmd->device;
1108 struct Scsi_Host *shost = sdev->host;
1109 unsigned long flags;
1110
1111 if (shost->use_cmd_list) {
1112 spin_lock_irqsave(&sdev->list_lock, flags);
1113 list_add_tail(&cmd->list, &sdev->cmd_list);
1114 spin_unlock_irqrestore(&sdev->list_lock, flags);
1115 }
1116 }
1117
1118
1119 void scsi_del_cmd_from_list(struct scsi_cmnd *cmd)
1120 {
1121 struct scsi_device *sdev = cmd->device;
1122 struct Scsi_Host *shost = sdev->host;
1123 unsigned long flags;
1124
1125 if (shost->use_cmd_list) {
1126 spin_lock_irqsave(&sdev->list_lock, flags);
1127 BUG_ON(list_empty(&cmd->list));
1128 list_del_init(&cmd->list);
1129 spin_unlock_irqrestore(&sdev->list_lock, flags);
1130 }
1131 }
1132
1133
1134 void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
1135 {
1136 void *buf = cmd->sense_buffer;
1137 void *prot = cmd->prot_sdb;
1138 struct request *rq = blk_mq_rq_from_pdu(cmd);
1139 unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
1140 unsigned long jiffies_at_alloc;
1141 int retries;
1142
1143 if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
1144 flags |= SCMD_INITIALIZED;
1145 scsi_initialize_rq(rq);
1146 }
1147
1148 jiffies_at_alloc = cmd->jiffies_at_alloc;
1149 retries = cmd->retries;
1150
1151 memset((char *)cmd + sizeof(cmd->req), 0,
1152 sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
1153
1154 cmd->device = dev;
1155 cmd->sense_buffer = buf;
1156 cmd->prot_sdb = prot;
1157 cmd->flags = flags;
1158 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
1159 cmd->jiffies_at_alloc = jiffies_at_alloc;
1160 cmd->retries = retries;
1161
1162 scsi_add_cmd_to_list(cmd);
1163 }
1164
1165 static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
1166 struct request *req)
1167 {
1168 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1169
1170
1171
1172
1173
1174
1175
1176 if (req->bio) {
1177 blk_status_t ret = scsi_init_io(cmd);
1178 if (unlikely(ret != BLK_STS_OK))
1179 return ret;
1180 } else {
1181 BUG_ON(blk_rq_bytes(req));
1182
1183 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1184 }
1185
1186 cmd->cmd_len = scsi_req(req)->cmd_len;
1187 cmd->cmnd = scsi_req(req)->cmd;
1188 cmd->transfersize = blk_rq_bytes(req);
1189 cmd->allowed = scsi_req(req)->retries;
1190 return BLK_STS_OK;
1191 }
1192
1193
1194
1195
1196
1197 static blk_status_t scsi_setup_fs_cmnd(struct scsi_device *sdev,
1198 struct request *req)
1199 {
1200 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1201
1202 if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
1203 blk_status_t ret = sdev->handler->prep_fn(sdev, req);
1204 if (ret != BLK_STS_OK)
1205 return ret;
1206 }
1207
1208 cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
1209 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1210 return scsi_cmd_to_driver(cmd)->init_command(cmd);
1211 }
1212
1213 static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
1214 struct request *req)
1215 {
1216 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1217
1218 if (!blk_rq_bytes(req))
1219 cmd->sc_data_direction = DMA_NONE;
1220 else if (rq_data_dir(req) == WRITE)
1221 cmd->sc_data_direction = DMA_TO_DEVICE;
1222 else
1223 cmd->sc_data_direction = DMA_FROM_DEVICE;
1224
1225 if (blk_rq_is_scsi(req))
1226 return scsi_setup_scsi_cmnd(sdev, req);
1227 else
1228 return scsi_setup_fs_cmnd(sdev, req);
1229 }
1230
1231 static blk_status_t
1232 scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1233 {
1234 switch (sdev->sdev_state) {
1235 case SDEV_OFFLINE:
1236 case SDEV_TRANSPORT_OFFLINE:
1237
1238
1239
1240
1241
1242 sdev_printk(KERN_ERR, sdev,
1243 "rejecting I/O to offline device\n");
1244 return BLK_STS_IOERR;
1245 case SDEV_DEL:
1246
1247
1248
1249
1250 sdev_printk(KERN_ERR, sdev,
1251 "rejecting I/O to dead device\n");
1252 return BLK_STS_IOERR;
1253 case SDEV_BLOCK:
1254 case SDEV_CREATED_BLOCK:
1255 return BLK_STS_RESOURCE;
1256 case SDEV_QUIESCE:
1257
1258
1259
1260 if (req && !(req->rq_flags & RQF_PREEMPT))
1261 return BLK_STS_RESOURCE;
1262 return BLK_STS_OK;
1263 default:
1264
1265
1266
1267
1268
1269 if (req && !(req->rq_flags & RQF_PREEMPT))
1270 return BLK_STS_IOERR;
1271 return BLK_STS_OK;
1272 }
1273 }
1274
1275
1276
1277
1278
1279
1280
1281 static inline int scsi_dev_queue_ready(struct request_queue *q,
1282 struct scsi_device *sdev)
1283 {
1284 unsigned int busy;
1285
1286 busy = atomic_inc_return(&sdev->device_busy) - 1;
1287 if (atomic_read(&sdev->device_blocked)) {
1288 if (busy)
1289 goto out_dec;
1290
1291
1292
1293
1294 if (atomic_dec_return(&sdev->device_blocked) > 0)
1295 goto out_dec;
1296 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1297 "unblocking device at zero depth\n"));
1298 }
1299
1300 if (busy >= sdev->queue_depth)
1301 goto out_dec;
1302
1303 return 1;
1304 out_dec:
1305 atomic_dec(&sdev->device_busy);
1306 return 0;
1307 }
1308
1309
1310
1311
1312
1313 static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1314 struct scsi_device *sdev)
1315 {
1316 struct scsi_target *starget = scsi_target(sdev);
1317 unsigned int busy;
1318
1319 if (starget->single_lun) {
1320 spin_lock_irq(shost->host_lock);
1321 if (starget->starget_sdev_user &&
1322 starget->starget_sdev_user != sdev) {
1323 spin_unlock_irq(shost->host_lock);
1324 return 0;
1325 }
1326 starget->starget_sdev_user = sdev;
1327 spin_unlock_irq(shost->host_lock);
1328 }
1329
1330 if (starget->can_queue <= 0)
1331 return 1;
1332
1333 busy = atomic_inc_return(&starget->target_busy) - 1;
1334 if (atomic_read(&starget->target_blocked) > 0) {
1335 if (busy)
1336 goto starved;
1337
1338
1339
1340
1341 if (atomic_dec_return(&starget->target_blocked) > 0)
1342 goto out_dec;
1343
1344 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1345 "unblocking target at zero depth\n"));
1346 }
1347
1348 if (busy >= starget->can_queue)
1349 goto starved;
1350
1351 return 1;
1352
1353 starved:
1354 spin_lock_irq(shost->host_lock);
1355 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1356 spin_unlock_irq(shost->host_lock);
1357 out_dec:
1358 if (starget->can_queue > 0)
1359 atomic_dec(&starget->target_busy);
1360 return 0;
1361 }
1362
1363
1364
1365
1366
1367
1368 static inline int scsi_host_queue_ready(struct request_queue *q,
1369 struct Scsi_Host *shost,
1370 struct scsi_device *sdev)
1371 {
1372 unsigned int busy;
1373
1374 if (scsi_host_in_recovery(shost))
1375 return 0;
1376
1377 busy = atomic_inc_return(&shost->host_busy) - 1;
1378 if (atomic_read(&shost->host_blocked) > 0) {
1379 if (busy)
1380 goto starved;
1381
1382
1383
1384
1385 if (atomic_dec_return(&shost->host_blocked) > 0)
1386 goto out_dec;
1387
1388 SCSI_LOG_MLQUEUE(3,
1389 shost_printk(KERN_INFO, shost,
1390 "unblocking host at zero depth\n"));
1391 }
1392
1393 if (shost->can_queue > 0 && busy >= shost->can_queue)
1394 goto starved;
1395 if (shost->host_self_blocked)
1396 goto starved;
1397
1398
1399 if (!list_empty(&sdev->starved_entry)) {
1400 spin_lock_irq(shost->host_lock);
1401 if (!list_empty(&sdev->starved_entry))
1402 list_del_init(&sdev->starved_entry);
1403 spin_unlock_irq(shost->host_lock);
1404 }
1405
1406 return 1;
1407
1408 starved:
1409 spin_lock_irq(shost->host_lock);
1410 if (list_empty(&sdev->starved_entry))
1411 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1412 spin_unlock_irq(shost->host_lock);
1413 out_dec:
1414 scsi_dec_host_busy(shost);
1415 return 0;
1416 }
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430 static bool scsi_mq_lld_busy(struct request_queue *q)
1431 {
1432 struct scsi_device *sdev = q->queuedata;
1433 struct Scsi_Host *shost;
1434
1435 if (blk_queue_dying(q))
1436 return false;
1437
1438 shost = sdev->host;
1439
1440
1441
1442
1443
1444
1445
1446 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1447 return true;
1448
1449 return false;
1450 }
1451
1452 static void scsi_softirq_done(struct request *rq)
1453 {
1454 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1455 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1456 int disposition;
1457
1458 INIT_LIST_HEAD(&cmd->eh_entry);
1459
1460 atomic_inc(&cmd->device->iodone_cnt);
1461 if (cmd->result)
1462 atomic_inc(&cmd->device->ioerr_cnt);
1463
1464 disposition = scsi_decide_disposition(cmd);
1465 if (disposition != SUCCESS &&
1466 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1467 scmd_printk(KERN_ERR, cmd,
1468 "timing out command, waited %lus\n",
1469 wait_for/HZ);
1470 disposition = SUCCESS;
1471 }
1472
1473 scsi_log_completion(cmd, disposition);
1474
1475 switch (disposition) {
1476 case SUCCESS:
1477 scsi_finish_command(cmd);
1478 break;
1479 case NEEDS_RETRY:
1480 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1481 break;
1482 case ADD_TO_MLQUEUE:
1483 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1484 break;
1485 default:
1486 scsi_eh_scmd_add(cmd);
1487 break;
1488 }
1489 }
1490
1491
1492
1493
1494
1495
1496
1497
1498 static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
1499 {
1500 struct Scsi_Host *host = cmd->device->host;
1501 int rtn = 0;
1502
1503 atomic_inc(&cmd->device->iorequest_cnt);
1504
1505
1506 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
1507
1508
1509
1510 cmd->result = DID_NO_CONNECT << 16;
1511 goto done;
1512 }
1513
1514
1515 if (unlikely(scsi_device_blocked(cmd->device))) {
1516
1517
1518
1519
1520
1521
1522
1523 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1524 "queuecommand : device blocked\n"));
1525 return SCSI_MLQUEUE_DEVICE_BUSY;
1526 }
1527
1528
1529 if (cmd->device->lun_in_cdb)
1530 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
1531 (cmd->device->lun << 5 & 0xe0);
1532
1533 scsi_log_send(cmd);
1534
1535
1536
1537
1538
1539 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
1540 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1541 "queuecommand : command too long. "
1542 "cdb_size=%d host->max_cmd_len=%d\n",
1543 cmd->cmd_len, cmd->device->host->max_cmd_len));
1544 cmd->result = (DID_ABORT << 16);
1545 goto done;
1546 }
1547
1548 if (unlikely(host->shost_state == SHOST_DEL)) {
1549 cmd->result = (DID_NO_CONNECT << 16);
1550 goto done;
1551
1552 }
1553
1554 trace_scsi_dispatch_cmd_start(cmd);
1555 rtn = host->hostt->queuecommand(host, cmd);
1556 if (rtn) {
1557 trace_scsi_dispatch_cmd_error(cmd, rtn);
1558 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
1559 rtn != SCSI_MLQUEUE_TARGET_BUSY)
1560 rtn = SCSI_MLQUEUE_HOST_BUSY;
1561
1562 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
1563 "queuecommand : request rejected\n"));
1564 }
1565
1566 return rtn;
1567 done:
1568 cmd->scsi_done(cmd);
1569 return 0;
1570 }
1571
1572
1573 static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
1574 {
1575 return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
1576 sizeof(struct scatterlist);
1577 }
1578
1579 static blk_status_t scsi_mq_prep_fn(struct request *req)
1580 {
1581 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1582 struct scsi_device *sdev = req->q->queuedata;
1583 struct Scsi_Host *shost = sdev->host;
1584 struct scatterlist *sg;
1585
1586 scsi_init_command(sdev, cmd);
1587
1588 cmd->request = req;
1589 cmd->tag = req->tag;
1590 cmd->prot_op = SCSI_PROT_NORMAL;
1591
1592 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1593 cmd->sdb.table.sgl = sg;
1594
1595 if (scsi_host_get_prot(shost)) {
1596 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
1597
1598 cmd->prot_sdb->table.sgl =
1599 (struct scatterlist *)(cmd->prot_sdb + 1);
1600 }
1601
1602 blk_mq_start_request(req);
1603
1604 return scsi_setup_cmnd(sdev, req);
1605 }
1606
1607 static void scsi_mq_done(struct scsi_cmnd *cmd)
1608 {
1609 if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
1610 return;
1611 trace_scsi_dispatch_cmd_done(cmd);
1612
1613
1614
1615
1616
1617
1618
1619 if (unlikely(!blk_mq_complete_request(cmd->request)))
1620 clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
1621 }
1622
1623 static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
1624 {
1625 struct request_queue *q = hctx->queue;
1626 struct scsi_device *sdev = q->queuedata;
1627
1628 atomic_dec(&sdev->device_busy);
1629 }
1630
1631 static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
1632 {
1633 struct request_queue *q = hctx->queue;
1634 struct scsi_device *sdev = q->queuedata;
1635
1636 if (scsi_dev_queue_ready(q, sdev))
1637 return true;
1638
1639 if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
1640 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
1641 return false;
1642 }
1643
1644 static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1645 const struct blk_mq_queue_data *bd)
1646 {
1647 struct request *req = bd->rq;
1648 struct request_queue *q = req->q;
1649 struct scsi_device *sdev = q->queuedata;
1650 struct Scsi_Host *shost = sdev->host;
1651 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1652 blk_status_t ret;
1653 int reason;
1654
1655
1656
1657
1658
1659 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1660 ret = scsi_prep_state_check(sdev, req);
1661 if (ret != BLK_STS_OK)
1662 goto out_put_budget;
1663 }
1664
1665 ret = BLK_STS_RESOURCE;
1666 if (!scsi_target_queue_ready(shost, sdev))
1667 goto out_put_budget;
1668 if (!scsi_host_queue_ready(q, shost, sdev))
1669 goto out_dec_target_busy;
1670
1671 if (!(req->rq_flags & RQF_DONTPREP)) {
1672 ret = scsi_mq_prep_fn(req);
1673 if (ret != BLK_STS_OK)
1674 goto out_dec_host_busy;
1675 req->rq_flags |= RQF_DONTPREP;
1676 } else {
1677 clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
1678 blk_mq_start_request(req);
1679 }
1680
1681 cmd->flags &= SCMD_PRESERVED_FLAGS;
1682 if (sdev->simple_tags)
1683 cmd->flags |= SCMD_TAGGED;
1684 if (bd->last)
1685 cmd->flags |= SCMD_LAST;
1686
1687 scsi_init_cmd_errh(cmd);
1688 cmd->scsi_done = scsi_mq_done;
1689
1690 reason = scsi_dispatch_cmd(cmd);
1691 if (reason) {
1692 scsi_set_blocked(cmd, reason);
1693 ret = BLK_STS_RESOURCE;
1694 goto out_dec_host_busy;
1695 }
1696
1697 return BLK_STS_OK;
1698
1699 out_dec_host_busy:
1700 scsi_dec_host_busy(shost);
1701 out_dec_target_busy:
1702 if (scsi_target(sdev)->can_queue > 0)
1703 atomic_dec(&scsi_target(sdev)->target_busy);
1704 out_put_budget:
1705 scsi_mq_put_budget(hctx);
1706 switch (ret) {
1707 case BLK_STS_OK:
1708 break;
1709 case BLK_STS_RESOURCE:
1710 if (atomic_read(&sdev->device_busy) ||
1711 scsi_device_blocked(sdev))
1712 ret = BLK_STS_DEV_RESOURCE;
1713 break;
1714 default:
1715 if (unlikely(!scsi_device_online(sdev)))
1716 scsi_req(req)->result = DID_NO_CONNECT << 16;
1717 else
1718 scsi_req(req)->result = DID_ERROR << 16;
1719
1720
1721
1722
1723
1724 if (req->rq_flags & RQF_DONTPREP)
1725 scsi_mq_uninit_cmd(cmd);
1726 break;
1727 }
1728 return ret;
1729 }
1730
1731 static enum blk_eh_timer_return scsi_timeout(struct request *req,
1732 bool reserved)
1733 {
1734 if (reserved)
1735 return BLK_EH_RESET_TIMER;
1736 return scsi_times_out(req);
1737 }
1738
1739 static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
1740 unsigned int hctx_idx, unsigned int numa_node)
1741 {
1742 struct Scsi_Host *shost = set->driver_data;
1743 const bool unchecked_isa_dma = shost->unchecked_isa_dma;
1744 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1745 struct scatterlist *sg;
1746
1747 if (unchecked_isa_dma)
1748 cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
1749 cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma,
1750 GFP_KERNEL, numa_node);
1751 if (!cmd->sense_buffer)
1752 return -ENOMEM;
1753 cmd->req.sense = cmd->sense_buffer;
1754
1755 if (scsi_host_get_prot(shost)) {
1756 sg = (void *)cmd + sizeof(struct scsi_cmnd) +
1757 shost->hostt->cmd_size;
1758 cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
1759 }
1760
1761 return 0;
1762 }
1763
1764 static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1765 unsigned int hctx_idx)
1766 {
1767 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1768
1769 scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
1770 cmd->sense_buffer);
1771 }
1772
1773 static int scsi_map_queues(struct blk_mq_tag_set *set)
1774 {
1775 struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
1776
1777 if (shost->hostt->map_queues)
1778 return shost->hostt->map_queues(shost);
1779 return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
1780 }
1781
1782 void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
1783 {
1784 struct device *dev = shost->dma_dev;
1785
1786
1787
1788
1789 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1790 SG_MAX_SEGMENTS));
1791
1792 if (scsi_host_prot_dma(shost)) {
1793 shost->sg_prot_tablesize =
1794 min_not_zero(shost->sg_prot_tablesize,
1795 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1796 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1797 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1798 }
1799
1800 if (dev->dma_mask) {
1801 shost->max_sectors = min_t(unsigned int, shost->max_sectors,
1802 dma_max_mapping_size(dev) >> SECTOR_SHIFT);
1803 }
1804 blk_queue_max_hw_sectors(q, shost->max_sectors);
1805 if (shost->unchecked_isa_dma)
1806 blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
1807 blk_queue_segment_boundary(q, shost->dma_boundary);
1808 dma_set_seg_boundary(dev, shost->dma_boundary);
1809
1810 blk_queue_max_segment_size(q, shost->max_segment_size);
1811 blk_queue_virt_boundary(q, shost->virt_boundary_mask);
1812 dma_set_max_seg_size(dev, queue_max_segment_size(q));
1813
1814
1815
1816
1817
1818
1819
1820
1821 blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
1822 }
1823 EXPORT_SYMBOL_GPL(__scsi_init_queue);
1824
1825 static const struct blk_mq_ops scsi_mq_ops_no_commit = {
1826 .get_budget = scsi_mq_get_budget,
1827 .put_budget = scsi_mq_put_budget,
1828 .queue_rq = scsi_queue_rq,
1829 .complete = scsi_softirq_done,
1830 .timeout = scsi_timeout,
1831 #ifdef CONFIG_BLK_DEBUG_FS
1832 .show_rq = scsi_show_rq,
1833 #endif
1834 .init_request = scsi_mq_init_request,
1835 .exit_request = scsi_mq_exit_request,
1836 .initialize_rq_fn = scsi_initialize_rq,
1837 .cleanup_rq = scsi_cleanup_rq,
1838 .busy = scsi_mq_lld_busy,
1839 .map_queues = scsi_map_queues,
1840 };
1841
1842
1843 static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
1844 {
1845 struct request_queue *q = hctx->queue;
1846 struct scsi_device *sdev = q->queuedata;
1847 struct Scsi_Host *shost = sdev->host;
1848
1849 shost->hostt->commit_rqs(shost, hctx->queue_num);
1850 }
1851
1852 static const struct blk_mq_ops scsi_mq_ops = {
1853 .get_budget = scsi_mq_get_budget,
1854 .put_budget = scsi_mq_put_budget,
1855 .queue_rq = scsi_queue_rq,
1856 .commit_rqs = scsi_commit_rqs,
1857 .complete = scsi_softirq_done,
1858 .timeout = scsi_timeout,
1859 #ifdef CONFIG_BLK_DEBUG_FS
1860 .show_rq = scsi_show_rq,
1861 #endif
1862 .init_request = scsi_mq_init_request,
1863 .exit_request = scsi_mq_exit_request,
1864 .initialize_rq_fn = scsi_initialize_rq,
1865 .cleanup_rq = scsi_cleanup_rq,
1866 .busy = scsi_mq_lld_busy,
1867 .map_queues = scsi_map_queues,
1868 };
1869
1870 struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
1871 {
1872 sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
1873 if (IS_ERR(sdev->request_queue))
1874 return NULL;
1875
1876 sdev->request_queue->queuedata = sdev;
1877 __scsi_init_queue(sdev->host, sdev->request_queue);
1878 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, sdev->request_queue);
1879 return sdev->request_queue;
1880 }
1881
1882 int scsi_mq_setup_tags(struct Scsi_Host *shost)
1883 {
1884 unsigned int cmd_size, sgl_size;
1885
1886 sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
1887 scsi_mq_inline_sgl_size(shost));
1888 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
1889 if (scsi_host_get_prot(shost))
1890 cmd_size += sizeof(struct scsi_data_buffer) +
1891 sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
1892
1893 memset(&shost->tag_set, 0, sizeof(shost->tag_set));
1894 if (shost->hostt->commit_rqs)
1895 shost->tag_set.ops = &scsi_mq_ops;
1896 else
1897 shost->tag_set.ops = &scsi_mq_ops_no_commit;
1898 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
1899 shost->tag_set.queue_depth = shost->can_queue;
1900 shost->tag_set.cmd_size = cmd_size;
1901 shost->tag_set.numa_node = NUMA_NO_NODE;
1902 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1903 shost->tag_set.flags |=
1904 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
1905 shost->tag_set.driver_data = shost;
1906
1907 return blk_mq_alloc_tag_set(&shost->tag_set);
1908 }
1909
1910 void scsi_mq_destroy_tags(struct Scsi_Host *shost)
1911 {
1912 blk_mq_free_tag_set(&shost->tag_set);
1913 }
1914
1915
1916
1917
1918
1919
1920
1921
1922 struct scsi_device *scsi_device_from_queue(struct request_queue *q)
1923 {
1924 struct scsi_device *sdev = NULL;
1925
1926 if (q->mq_ops == &scsi_mq_ops_no_commit ||
1927 q->mq_ops == &scsi_mq_ops)
1928 sdev = q->queuedata;
1929 if (!sdev || !get_device(&sdev->sdev_gendev))
1930 sdev = NULL;
1931
1932 return sdev;
1933 }
1934 EXPORT_SYMBOL_GPL(scsi_device_from_queue);
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952 void scsi_block_requests(struct Scsi_Host *shost)
1953 {
1954 shost->host_self_blocked = 1;
1955 }
1956 EXPORT_SYMBOL(scsi_block_requests);
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978 void scsi_unblock_requests(struct Scsi_Host *shost)
1979 {
1980 shost->host_self_blocked = 0;
1981 scsi_run_host_queues(shost);
1982 }
1983 EXPORT_SYMBOL(scsi_unblock_requests);
1984
1985 int __init scsi_init_queue(void)
1986 {
1987 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1988 sizeof(struct scsi_data_buffer),
1989 0, 0, NULL);
1990 if (!scsi_sdb_cache) {
1991 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1992 return -ENOMEM;
1993 }
1994
1995 return 0;
1996 }
1997
1998 void scsi_exit_queue(void)
1999 {
2000 kmem_cache_destroy(scsi_sense_cache);
2001 kmem_cache_destroy(scsi_sense_isadma_cache);
2002 kmem_cache_destroy(scsi_sdb_cache);
2003 }
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023 int
2024 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
2025 unsigned char *buffer, int len, int timeout, int retries,
2026 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2027 {
2028 unsigned char cmd[10];
2029 unsigned char *real_buffer;
2030 int ret;
2031
2032 memset(cmd, 0, sizeof(cmd));
2033 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
2034
2035 if (sdev->use_10_for_ms) {
2036 if (len > 65535)
2037 return -EINVAL;
2038 real_buffer = kmalloc(8 + len, GFP_KERNEL);
2039 if (!real_buffer)
2040 return -ENOMEM;
2041 memcpy(real_buffer + 8, buffer, len);
2042 len += 8;
2043 real_buffer[0] = 0;
2044 real_buffer[1] = 0;
2045 real_buffer[2] = data->medium_type;
2046 real_buffer[3] = data->device_specific;
2047 real_buffer[4] = data->longlba ? 0x01 : 0;
2048 real_buffer[5] = 0;
2049 real_buffer[6] = data->block_descriptor_length >> 8;
2050 real_buffer[7] = data->block_descriptor_length;
2051
2052 cmd[0] = MODE_SELECT_10;
2053 cmd[7] = len >> 8;
2054 cmd[8] = len;
2055 } else {
2056 if (len > 255 || data->block_descriptor_length > 255 ||
2057 data->longlba)
2058 return -EINVAL;
2059
2060 real_buffer = kmalloc(4 + len, GFP_KERNEL);
2061 if (!real_buffer)
2062 return -ENOMEM;
2063 memcpy(real_buffer + 4, buffer, len);
2064 len += 4;
2065 real_buffer[0] = 0;
2066 real_buffer[1] = data->medium_type;
2067 real_buffer[2] = data->device_specific;
2068 real_buffer[3] = data->block_descriptor_length;
2069
2070
2071 cmd[0] = MODE_SELECT;
2072 cmd[4] = len;
2073 }
2074
2075 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2076 sshdr, timeout, retries, NULL);
2077 kfree(real_buffer);
2078 return ret;
2079 }
2080 EXPORT_SYMBOL_GPL(scsi_mode_select);
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099 int
2100 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2101 unsigned char *buffer, int len, int timeout, int retries,
2102 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
2103 {
2104 unsigned char cmd[12];
2105 int use_10_for_ms;
2106 int header_length;
2107 int result, retry_count = retries;
2108 struct scsi_sense_hdr my_sshdr;
2109
2110 memset(data, 0, sizeof(*data));
2111 memset(&cmd[0], 0, 12);
2112 cmd[1] = dbd & 0x18;
2113 cmd[2] = modepage;
2114
2115
2116 if (!sshdr)
2117 sshdr = &my_sshdr;
2118
2119 retry:
2120 use_10_for_ms = sdev->use_10_for_ms;
2121
2122 if (use_10_for_ms) {
2123 if (len < 8)
2124 len = 8;
2125
2126 cmd[0] = MODE_SENSE_10;
2127 cmd[8] = len;
2128 header_length = 8;
2129 } else {
2130 if (len < 4)
2131 len = 4;
2132
2133 cmd[0] = MODE_SENSE;
2134 cmd[4] = len;
2135 header_length = 4;
2136 }
2137
2138 memset(buffer, 0, len);
2139
2140 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2141 sshdr, timeout, retries, NULL);
2142
2143
2144
2145
2146
2147
2148 if (use_10_for_ms && !scsi_status_is_good(result) &&
2149 driver_byte(result) == DRIVER_SENSE) {
2150 if (scsi_sense_valid(sshdr)) {
2151 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
2152 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
2153
2154
2155
2156 sdev->use_10_for_ms = 0;
2157 goto retry;
2158 }
2159 }
2160 }
2161
2162 if(scsi_status_is_good(result)) {
2163 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
2164 (modepage == 6 || modepage == 8))) {
2165
2166 header_length = 0;
2167 data->length = 13;
2168 data->medium_type = 0;
2169 data->device_specific = 0;
2170 data->longlba = 0;
2171 data->block_descriptor_length = 0;
2172 } else if(use_10_for_ms) {
2173 data->length = buffer[0]*256 + buffer[1] + 2;
2174 data->medium_type = buffer[2];
2175 data->device_specific = buffer[3];
2176 data->longlba = buffer[4] & 0x01;
2177 data->block_descriptor_length = buffer[6]*256
2178 + buffer[7];
2179 } else {
2180 data->length = buffer[0] + 1;
2181 data->medium_type = buffer[1];
2182 data->device_specific = buffer[2];
2183 data->block_descriptor_length = buffer[3];
2184 }
2185 data->header_length = header_length;
2186 } else if ((status_byte(result) == CHECK_CONDITION) &&
2187 scsi_sense_valid(sshdr) &&
2188 sshdr->sense_key == UNIT_ATTENTION && retry_count) {
2189 retry_count--;
2190 goto retry;
2191 }
2192
2193 return result;
2194 }
2195 EXPORT_SYMBOL(scsi_mode_sense);
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207 int
2208 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2209 struct scsi_sense_hdr *sshdr)
2210 {
2211 char cmd[] = {
2212 TEST_UNIT_READY, 0, 0, 0, 0, 0,
2213 };
2214 int result;
2215
2216
2217 do {
2218 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2219 timeout, 1, NULL);
2220 if (sdev->removable && scsi_sense_valid(sshdr) &&
2221 sshdr->sense_key == UNIT_ATTENTION)
2222 sdev->changed = 1;
2223 } while (scsi_sense_valid(sshdr) &&
2224 sshdr->sense_key == UNIT_ATTENTION && --retries);
2225
2226 return result;
2227 }
2228 EXPORT_SYMBOL(scsi_test_unit_ready);
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238 int
2239 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2240 {
2241 enum scsi_device_state oldstate = sdev->sdev_state;
2242
2243 if (state == oldstate)
2244 return 0;
2245
2246 switch (state) {
2247 case SDEV_CREATED:
2248 switch (oldstate) {
2249 case SDEV_CREATED_BLOCK:
2250 break;
2251 default:
2252 goto illegal;
2253 }
2254 break;
2255
2256 case SDEV_RUNNING:
2257 switch (oldstate) {
2258 case SDEV_CREATED:
2259 case SDEV_OFFLINE:
2260 case SDEV_TRANSPORT_OFFLINE:
2261 case SDEV_QUIESCE:
2262 case SDEV_BLOCK:
2263 break;
2264 default:
2265 goto illegal;
2266 }
2267 break;
2268
2269 case SDEV_QUIESCE:
2270 switch (oldstate) {
2271 case SDEV_RUNNING:
2272 case SDEV_OFFLINE:
2273 case SDEV_TRANSPORT_OFFLINE:
2274 break;
2275 default:
2276 goto illegal;
2277 }
2278 break;
2279
2280 case SDEV_OFFLINE:
2281 case SDEV_TRANSPORT_OFFLINE:
2282 switch (oldstate) {
2283 case SDEV_CREATED:
2284 case SDEV_RUNNING:
2285 case SDEV_QUIESCE:
2286 case SDEV_BLOCK:
2287 break;
2288 default:
2289 goto illegal;
2290 }
2291 break;
2292
2293 case SDEV_BLOCK:
2294 switch (oldstate) {
2295 case SDEV_RUNNING:
2296 case SDEV_CREATED_BLOCK:
2297 case SDEV_OFFLINE:
2298 break;
2299 default:
2300 goto illegal;
2301 }
2302 break;
2303
2304 case SDEV_CREATED_BLOCK:
2305 switch (oldstate) {
2306 case SDEV_CREATED:
2307 break;
2308 default:
2309 goto illegal;
2310 }
2311 break;
2312
2313 case SDEV_CANCEL:
2314 switch (oldstate) {
2315 case SDEV_CREATED:
2316 case SDEV_RUNNING:
2317 case SDEV_QUIESCE:
2318 case SDEV_OFFLINE:
2319 case SDEV_TRANSPORT_OFFLINE:
2320 break;
2321 default:
2322 goto illegal;
2323 }
2324 break;
2325
2326 case SDEV_DEL:
2327 switch (oldstate) {
2328 case SDEV_CREATED:
2329 case SDEV_RUNNING:
2330 case SDEV_OFFLINE:
2331 case SDEV_TRANSPORT_OFFLINE:
2332 case SDEV_CANCEL:
2333 case SDEV_BLOCK:
2334 case SDEV_CREATED_BLOCK:
2335 break;
2336 default:
2337 goto illegal;
2338 }
2339 break;
2340
2341 }
2342 sdev->sdev_state = state;
2343 return 0;
2344
2345 illegal:
2346 SCSI_LOG_ERROR_RECOVERY(1,
2347 sdev_printk(KERN_ERR, sdev,
2348 "Illegal state transition %s->%s",
2349 scsi_device_state_name(oldstate),
2350 scsi_device_state_name(state))
2351 );
2352 return -EINVAL;
2353 }
2354 EXPORT_SYMBOL(scsi_device_set_state);
2355
2356
2357
2358
2359
2360
2361
2362
2363 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2364 {
2365 int idx = 0;
2366 char *envp[3];
2367
2368 switch (evt->evt_type) {
2369 case SDEV_EVT_MEDIA_CHANGE:
2370 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2371 break;
2372 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2373 scsi_rescan_device(&sdev->sdev_gendev);
2374 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2375 break;
2376 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2377 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2378 break;
2379 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2380 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2381 break;
2382 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2383 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2384 break;
2385 case SDEV_EVT_LUN_CHANGE_REPORTED:
2386 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2387 break;
2388 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2389 envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
2390 break;
2391 case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2392 envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
2393 break;
2394 default:
2395
2396 break;
2397 }
2398
2399 envp[idx++] = NULL;
2400
2401 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2402 }
2403
2404
2405
2406
2407
2408
2409
2410
2411 void scsi_evt_thread(struct work_struct *work)
2412 {
2413 struct scsi_device *sdev;
2414 enum scsi_device_event evt_type;
2415 LIST_HEAD(event_list);
2416
2417 sdev = container_of(work, struct scsi_device, event_work);
2418
2419 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2420 if (test_and_clear_bit(evt_type, sdev->pending_events))
2421 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2422
2423 while (1) {
2424 struct scsi_event *evt;
2425 struct list_head *this, *tmp;
2426 unsigned long flags;
2427
2428 spin_lock_irqsave(&sdev->list_lock, flags);
2429 list_splice_init(&sdev->event_list, &event_list);
2430 spin_unlock_irqrestore(&sdev->list_lock, flags);
2431
2432 if (list_empty(&event_list))
2433 break;
2434
2435 list_for_each_safe(this, tmp, &event_list) {
2436 evt = list_entry(this, struct scsi_event, node);
2437 list_del(&evt->node);
2438 scsi_evt_emit(sdev, evt);
2439 kfree(evt);
2440 }
2441 }
2442 }
2443
2444
2445
2446
2447
2448
2449
2450
2451 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2452 {
2453 unsigned long flags;
2454
2455 #if 0
2456
2457
2458
2459 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2460 kfree(evt);
2461 return;
2462 }
2463 #endif
2464
2465 spin_lock_irqsave(&sdev->list_lock, flags);
2466 list_add_tail(&evt->node, &sdev->event_list);
2467 schedule_work(&sdev->event_work);
2468 spin_unlock_irqrestore(&sdev->list_lock, flags);
2469 }
2470 EXPORT_SYMBOL_GPL(sdev_evt_send);
2471
2472
2473
2474
2475
2476
2477
2478
2479 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2480 gfp_t gfpflags)
2481 {
2482 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2483 if (!evt)
2484 return NULL;
2485
2486 evt->evt_type = evt_type;
2487 INIT_LIST_HEAD(&evt->node);
2488
2489
2490 switch (evt_type) {
2491 case SDEV_EVT_MEDIA_CHANGE:
2492 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2493 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2494 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2495 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2496 case SDEV_EVT_LUN_CHANGE_REPORTED:
2497 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2498 case SDEV_EVT_POWER_ON_RESET_OCCURRED:
2499 default:
2500
2501 break;
2502 }
2503
2504 return evt;
2505 }
2506 EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516 void sdev_evt_send_simple(struct scsi_device *sdev,
2517 enum scsi_device_event evt_type, gfp_t gfpflags)
2518 {
2519 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2520 if (!evt) {
2521 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2522 evt_type);
2523 return;
2524 }
2525
2526 sdev_evt_send(sdev, evt);
2527 }
2528 EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545 int
2546 scsi_device_quiesce(struct scsi_device *sdev)
2547 {
2548 struct request_queue *q = sdev->request_queue;
2549 int err;
2550
2551
2552
2553
2554
2555
2556 WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
2557
2558 if (sdev->quiesced_by == current)
2559 return 0;
2560
2561 blk_set_pm_only(q);
2562
2563 blk_mq_freeze_queue(q);
2564
2565
2566
2567
2568
2569
2570 synchronize_rcu();
2571 blk_mq_unfreeze_queue(q);
2572
2573 mutex_lock(&sdev->state_mutex);
2574 err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2575 if (err == 0)
2576 sdev->quiesced_by = current;
2577 else
2578 blk_clear_pm_only(q);
2579 mutex_unlock(&sdev->state_mutex);
2580
2581 return err;
2582 }
2583 EXPORT_SYMBOL(scsi_device_quiesce);
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594 void scsi_device_resume(struct scsi_device *sdev)
2595 {
2596
2597
2598
2599
2600 mutex_lock(&sdev->state_mutex);
2601 if (sdev->quiesced_by) {
2602 sdev->quiesced_by = NULL;
2603 blk_clear_pm_only(sdev->request_queue);
2604 }
2605 if (sdev->sdev_state == SDEV_QUIESCE)
2606 scsi_device_set_state(sdev, SDEV_RUNNING);
2607 mutex_unlock(&sdev->state_mutex);
2608 }
2609 EXPORT_SYMBOL(scsi_device_resume);
2610
2611 static void
2612 device_quiesce_fn(struct scsi_device *sdev, void *data)
2613 {
2614 scsi_device_quiesce(sdev);
2615 }
2616
2617 void
2618 scsi_target_quiesce(struct scsi_target *starget)
2619 {
2620 starget_for_each_device(starget, NULL, device_quiesce_fn);
2621 }
2622 EXPORT_SYMBOL(scsi_target_quiesce);
2623
2624 static void
2625 device_resume_fn(struct scsi_device *sdev, void *data)
2626 {
2627 scsi_device_resume(sdev);
2628 }
2629
2630 void
2631 scsi_target_resume(struct scsi_target *starget)
2632 {
2633 starget_for_each_device(starget, NULL, device_resume_fn);
2634 }
2635 EXPORT_SYMBOL(scsi_target_resume);
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651 int scsi_internal_device_block_nowait(struct scsi_device *sdev)
2652 {
2653 struct request_queue *q = sdev->request_queue;
2654 int err = 0;
2655
2656 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2657 if (err) {
2658 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2659
2660 if (err)
2661 return err;
2662 }
2663
2664
2665
2666
2667
2668
2669 blk_mq_quiesce_queue_nowait(q);
2670 return 0;
2671 }
2672 EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689 static int scsi_internal_device_block(struct scsi_device *sdev)
2690 {
2691 struct request_queue *q = sdev->request_queue;
2692 int err;
2693
2694 mutex_lock(&sdev->state_mutex);
2695 err = scsi_internal_device_block_nowait(sdev);
2696 if (err == 0)
2697 blk_mq_quiesce_queue(q);
2698 mutex_unlock(&sdev->state_mutex);
2699
2700 return err;
2701 }
2702
2703 void scsi_start_queue(struct scsi_device *sdev)
2704 {
2705 struct request_queue *q = sdev->request_queue;
2706
2707 blk_mq_unquiesce_queue(q);
2708 }
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725 int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
2726 enum scsi_device_state new_state)
2727 {
2728 switch (new_state) {
2729 case SDEV_RUNNING:
2730 case SDEV_TRANSPORT_OFFLINE:
2731 break;
2732 default:
2733 return -EINVAL;
2734 }
2735
2736
2737
2738
2739
2740 switch (sdev->sdev_state) {
2741 case SDEV_BLOCK:
2742 case SDEV_TRANSPORT_OFFLINE:
2743 sdev->sdev_state = new_state;
2744 break;
2745 case SDEV_CREATED_BLOCK:
2746 if (new_state == SDEV_TRANSPORT_OFFLINE ||
2747 new_state == SDEV_OFFLINE)
2748 sdev->sdev_state = new_state;
2749 else
2750 sdev->sdev_state = SDEV_CREATED;
2751 break;
2752 case SDEV_CANCEL:
2753 case SDEV_OFFLINE:
2754 break;
2755 default:
2756 return -EINVAL;
2757 }
2758 scsi_start_queue(sdev);
2759
2760 return 0;
2761 }
2762 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait);
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778 static int scsi_internal_device_unblock(struct scsi_device *sdev,
2779 enum scsi_device_state new_state)
2780 {
2781 int ret;
2782
2783 mutex_lock(&sdev->state_mutex);
2784 ret = scsi_internal_device_unblock_nowait(sdev, new_state);
2785 mutex_unlock(&sdev->state_mutex);
2786
2787 return ret;
2788 }
2789
2790 static void
2791 device_block(struct scsi_device *sdev, void *data)
2792 {
2793 int ret;
2794
2795 ret = scsi_internal_device_block(sdev);
2796
2797 WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n",
2798 dev_name(&sdev->sdev_gendev), ret);
2799 }
2800
2801 static int
2802 target_block(struct device *dev, void *data)
2803 {
2804 if (scsi_is_target_device(dev))
2805 starget_for_each_device(to_scsi_target(dev), NULL,
2806 device_block);
2807 return 0;
2808 }
2809
2810 void
2811 scsi_target_block(struct device *dev)
2812 {
2813 if (scsi_is_target_device(dev))
2814 starget_for_each_device(to_scsi_target(dev), NULL,
2815 device_block);
2816 else
2817 device_for_each_child(dev, NULL, target_block);
2818 }
2819 EXPORT_SYMBOL_GPL(scsi_target_block);
2820
2821 static void
2822 device_unblock(struct scsi_device *sdev, void *data)
2823 {
2824 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2825 }
2826
2827 static int
2828 target_unblock(struct device *dev, void *data)
2829 {
2830 if (scsi_is_target_device(dev))
2831 starget_for_each_device(to_scsi_target(dev), data,
2832 device_unblock);
2833 return 0;
2834 }
2835
2836 void
2837 scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
2838 {
2839 if (scsi_is_target_device(dev))
2840 starget_for_each_device(to_scsi_target(dev), &new_state,
2841 device_unblock);
2842 else
2843 device_for_each_child(dev, &new_state, target_unblock);
2844 }
2845 EXPORT_SYMBOL_GPL(scsi_target_unblock);
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2857 size_t *offset, size_t *len)
2858 {
2859 int i;
2860 size_t sg_len = 0, len_complete = 0;
2861 struct scatterlist *sg;
2862 struct page *page;
2863
2864 WARN_ON(!irqs_disabled());
2865
2866 for_each_sg(sgl, sg, sg_count, i) {
2867 len_complete = sg_len;
2868 sg_len += sg->length;
2869 if (sg_len > *offset)
2870 break;
2871 }
2872
2873 if (unlikely(i == sg_count)) {
2874 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2875 "elements %d\n",
2876 __func__, sg_len, *offset, sg_count);
2877 WARN_ON(1);
2878 return NULL;
2879 }
2880
2881
2882 *offset = *offset - len_complete + sg->offset;
2883
2884
2885 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2886 *offset &= ~PAGE_MASK;
2887
2888
2889 sg_len = PAGE_SIZE - *offset;
2890 if (*len > sg_len)
2891 *len = sg_len;
2892
2893 return kmap_atomic(page);
2894 }
2895 EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2896
2897
2898
2899
2900
2901 void scsi_kunmap_atomic_sg(void *virt)
2902 {
2903 kunmap_atomic(virt);
2904 }
2905 EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2906
2907 void sdev_disable_disk_events(struct scsi_device *sdev)
2908 {
2909 atomic_inc(&sdev->disk_events_disable_depth);
2910 }
2911 EXPORT_SYMBOL(sdev_disable_disk_events);
2912
2913 void sdev_enable_disk_events(struct scsi_device *sdev)
2914 {
2915 if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
2916 return;
2917 atomic_dec(&sdev->disk_events_disable_depth);
2918 }
2919 EXPORT_SYMBOL(sdev_enable_disk_events);
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935 int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
2936 {
2937 u8 cur_id_type = 0xff;
2938 u8 cur_id_size = 0;
2939 const unsigned char *d, *cur_id_str;
2940 const struct scsi_vpd *vpd_pg83;
2941 int id_size = -EINVAL;
2942
2943 rcu_read_lock();
2944 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
2945 if (!vpd_pg83) {
2946 rcu_read_unlock();
2947 return -ENXIO;
2948 }
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965 if (id_len < 21) {
2966 rcu_read_unlock();
2967 return -EINVAL;
2968 }
2969
2970 memset(id, 0, id_len);
2971 d = vpd_pg83->data + 4;
2972 while (d < vpd_pg83->data + vpd_pg83->len) {
2973
2974 if ((d[1] & 0x30) != 0x00)
2975 goto next_desig;
2976
2977 switch (d[1] & 0xf) {
2978 case 0x1:
2979
2980 if (cur_id_size > d[3])
2981 break;
2982
2983 if (cur_id_type > 0x01 && cur_id_type != 0xff)
2984 break;
2985 cur_id_size = d[3];
2986 if (cur_id_size + 4 > id_len)
2987 cur_id_size = id_len - 4;
2988 cur_id_str = d + 4;
2989 cur_id_type = d[1] & 0xf;
2990 id_size = snprintf(id, id_len, "t10.%*pE",
2991 cur_id_size, cur_id_str);
2992 break;
2993 case 0x2:
2994
2995 if (cur_id_size > d[3])
2996 break;
2997
2998 if (cur_id_type == 0x3 &&
2999 cur_id_size == d[3])
3000 break;
3001 cur_id_size = d[3];
3002 cur_id_str = d + 4;
3003 cur_id_type = d[1] & 0xf;
3004 switch (cur_id_size) {
3005 case 8:
3006 id_size = snprintf(id, id_len,
3007 "eui.%8phN",
3008 cur_id_str);
3009 break;
3010 case 12:
3011 id_size = snprintf(id, id_len,
3012 "eui.%12phN",
3013 cur_id_str);
3014 break;
3015 case 16:
3016 id_size = snprintf(id, id_len,
3017 "eui.%16phN",
3018 cur_id_str);
3019 break;
3020 default:
3021 cur_id_size = 0;
3022 break;
3023 }
3024 break;
3025 case 0x3:
3026
3027 if (cur_id_size > d[3])
3028 break;
3029 cur_id_size = d[3];
3030 cur_id_str = d + 4;
3031 cur_id_type = d[1] & 0xf;
3032 switch (cur_id_size) {
3033 case 8:
3034 id_size = snprintf(id, id_len,
3035 "naa.%8phN",
3036 cur_id_str);
3037 break;
3038 case 16:
3039 id_size = snprintf(id, id_len,
3040 "naa.%16phN",
3041 cur_id_str);
3042 break;
3043 default:
3044 cur_id_size = 0;
3045 break;
3046 }
3047 break;
3048 case 0x8:
3049
3050 if (cur_id_size + 4 > d[3])
3051 break;
3052
3053 if (cur_id_size && d[3] > id_len)
3054 break;
3055 cur_id_size = id_size = d[3];
3056 cur_id_str = d + 4;
3057 cur_id_type = d[1] & 0xf;
3058 if (cur_id_size >= id_len)
3059 cur_id_size = id_len - 1;
3060 memcpy(id, cur_id_str, cur_id_size);
3061
3062 if (cur_id_size != id_size)
3063 cur_id_size = 6;
3064 break;
3065 default:
3066 break;
3067 }
3068 next_desig:
3069 d += d[3] + 4;
3070 }
3071 rcu_read_unlock();
3072
3073 return id_size;
3074 }
3075 EXPORT_SYMBOL(scsi_vpd_lun_id);
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086 int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
3087 {
3088 const unsigned char *d;
3089 const struct scsi_vpd *vpd_pg83;
3090 int group_id = -EAGAIN, rel_port = -1;
3091
3092 rcu_read_lock();
3093 vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
3094 if (!vpd_pg83) {
3095 rcu_read_unlock();
3096 return -ENXIO;
3097 }
3098
3099 d = vpd_pg83->data + 4;
3100 while (d < vpd_pg83->data + vpd_pg83->len) {
3101 switch (d[1] & 0xf) {
3102 case 0x4:
3103
3104 rel_port = get_unaligned_be16(&d[6]);
3105 break;
3106 case 0x5:
3107
3108 group_id = get_unaligned_be16(&d[6]);
3109 break;
3110 default:
3111 break;
3112 }
3113 d += d[3] + 4;
3114 }
3115 rcu_read_unlock();
3116
3117 if (group_id >= 0 && rel_id && rel_port != -1)
3118 *rel_id = rel_port;
3119
3120 return group_id;
3121 }
3122 EXPORT_SYMBOL(scsi_vpd_tpg_id);