Lines Matching refs:shost
311 struct Scsi_Host *shost = sdev->host; in scsi_device_unbusy() local
315 atomic_dec(&shost->host_busy); in scsi_device_unbusy()
319 if (unlikely(scsi_host_in_recovery(shost) && in scsi_device_unbusy()
320 (shost->host_failed || shost->host_eh_scheduled))) { in scsi_device_unbusy()
321 spin_lock_irqsave(shost->host_lock, flags); in scsi_device_unbusy()
322 scsi_eh_wakeup(shost); in scsi_device_unbusy()
323 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_device_unbusy()
346 struct Scsi_Host *shost = current_sdev->host; in scsi_single_lun_run() local
351 spin_lock_irqsave(shost->host_lock, flags); in scsi_single_lun_run()
353 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_single_lun_run()
363 spin_lock_irqsave(shost->host_lock, flags); in scsi_single_lun_run()
373 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_single_lun_run()
375 spin_lock_irqsave(shost->host_lock, flags); in scsi_single_lun_run()
380 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_single_lun_run()
403 static inline bool scsi_host_is_busy(struct Scsi_Host *shost) in scsi_host_is_busy() argument
405 if (shost->can_queue > 0 && in scsi_host_is_busy()
406 atomic_read(&shost->host_busy) >= shost->can_queue) in scsi_host_is_busy()
408 if (atomic_read(&shost->host_blocked) > 0) in scsi_host_is_busy()
410 if (shost->host_self_blocked) in scsi_host_is_busy()
415 static void scsi_starved_list_run(struct Scsi_Host *shost) in scsi_starved_list_run() argument
421 spin_lock_irqsave(shost->host_lock, flags); in scsi_starved_list_run()
422 list_splice_init(&shost->starved_list, &starved_list); in scsi_starved_list_run()
437 if (scsi_host_is_busy(shost)) in scsi_starved_list_run()
445 &shost->starved_list); in scsi_starved_list_run()
462 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_starved_list_run()
467 spin_lock_irqsave(shost->host_lock, flags); in scsi_starved_list_run()
470 list_splice(&starved_list, &shost->starved_list); in scsi_starved_list_run()
471 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_starved_list_run()
547 void scsi_run_host_queues(struct Scsi_Host *shost) in scsi_run_host_queues() argument
551 shost_for_each_device(sdev, shost) in scsi_run_host_queues()
638 struct Scsi_Host *shost = sdev->host; in scsi_mq_uninit_cmd() local
644 if (shost->use_cmd_list) { in scsi_mq_uninit_cmd()
1445 static inline int scsi_target_queue_ready(struct Scsi_Host *shost, in scsi_target_queue_ready() argument
1452 spin_lock_irq(shost->host_lock); in scsi_target_queue_ready()
1455 spin_unlock_irq(shost->host_lock); in scsi_target_queue_ready()
1459 spin_unlock_irq(shost->host_lock); in scsi_target_queue_ready()
1486 spin_lock_irq(shost->host_lock); in scsi_target_queue_ready()
1487 list_move_tail(&sdev->starved_entry, &shost->starved_list); in scsi_target_queue_ready()
1488 spin_unlock_irq(shost->host_lock); in scsi_target_queue_ready()
1501 struct Scsi_Host *shost, in scsi_host_queue_ready() argument
1506 if (scsi_host_in_recovery(shost)) in scsi_host_queue_ready()
1509 busy = atomic_inc_return(&shost->host_busy) - 1; in scsi_host_queue_ready()
1510 if (atomic_read(&shost->host_blocked) > 0) { in scsi_host_queue_ready()
1517 if (atomic_dec_return(&shost->host_blocked) > 0) in scsi_host_queue_ready()
1521 shost_printk(KERN_INFO, shost, in scsi_host_queue_ready()
1525 if (shost->can_queue > 0 && busy >= shost->can_queue) in scsi_host_queue_ready()
1527 if (shost->host_self_blocked) in scsi_host_queue_ready()
1532 spin_lock_irq(shost->host_lock); in scsi_host_queue_ready()
1535 spin_unlock_irq(shost->host_lock); in scsi_host_queue_ready()
1541 spin_lock_irq(shost->host_lock); in scsi_host_queue_ready()
1543 list_add_tail(&sdev->starved_entry, &shost->starved_list); in scsi_host_queue_ready()
1544 spin_unlock_irq(shost->host_lock); in scsi_host_queue_ready()
1546 atomic_dec(&shost->host_busy); in scsi_host_queue_ready()
1565 struct Scsi_Host *shost; in scsi_lld_busy() local
1570 shost = sdev->host; in scsi_lld_busy()
1578 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) in scsi_lld_busy()
1592 struct Scsi_Host *shost; in scsi_kill_request() local
1600 shost = sdev->host; in scsi_kill_request()
1611 atomic_inc(&shost->host_busy); in scsi_kill_request()
1771 struct Scsi_Host *shost; in scsi_request_fn() local
1779 shost = sdev->host; in scsi_request_fn()
1827 spin_lock_irq(shost->host_lock); in scsi_request_fn()
1830 &shost->starved_list); in scsi_request_fn()
1831 spin_unlock_irq(shost->host_lock); in scsi_request_fn()
1835 if (!scsi_target_queue_ready(shost, sdev)) in scsi_request_fn()
1838 if (!scsi_host_queue_ready(q, shost, sdev)) in scsi_request_fn()
1903 struct Scsi_Host *shost = sdev->host; in scsi_mq_prep_fn() local
1924 if (shost->use_cmd_list) { in scsi_mq_prep_fn()
1930 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; in scsi_mq_prep_fn()
1933 if (scsi_host_get_prot(shost)) { in scsi_mq_prep_fn()
1936 shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) * in scsi_mq_prep_fn()
1972 struct Scsi_Host *shost = sdev->host; in scsi_queue_rq() local
1987 if (!scsi_target_queue_ready(shost, sdev)) in scsi_queue_rq()
1989 if (!scsi_host_queue_ready(q, shost, sdev)) in scsi_queue_rq()
2020 atomic_dec(&shost->host_busy); in scsi_queue_rq()
2080 static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) in scsi_calculate_bounce_limit() argument
2085 if (shost->unchecked_isa_dma) in scsi_calculate_bounce_limit()
2094 host_dev = scsi_get_device(shost); in scsi_calculate_bounce_limit()
2101 static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) in __scsi_init_queue() argument
2103 struct device *dev = shost->dma_dev; in __scsi_init_queue()
2108 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, in __scsi_init_queue()
2111 if (scsi_host_prot_dma(shost)) { in __scsi_init_queue()
2112 shost->sg_prot_tablesize = in __scsi_init_queue()
2113 min_not_zero(shost->sg_prot_tablesize, in __scsi_init_queue()
2115 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); in __scsi_init_queue()
2116 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); in __scsi_init_queue()
2119 blk_queue_max_hw_sectors(q, shost->max_sectors); in __scsi_init_queue()
2120 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); in __scsi_init_queue()
2121 blk_queue_segment_boundary(q, shost->dma_boundary); in __scsi_init_queue()
2122 dma_set_seg_boundary(dev, shost->dma_boundary); in __scsi_init_queue()
2126 if (!shost->use_clustering) in __scsi_init_queue()
2137 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, in __scsi_alloc_queue() argument
2145 __scsi_init_queue(shost, q); in __scsi_alloc_queue()
2186 int scsi_mq_setup_tags(struct Scsi_Host *shost) in scsi_mq_setup_tags() argument
2190 tbl_size = shost->sg_tablesize; in scsi_mq_setup_tags()
2194 cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; in scsi_mq_setup_tags()
2195 if (scsi_host_get_prot(shost)) in scsi_mq_setup_tags()
2198 memset(&shost->tag_set, 0, sizeof(shost->tag_set)); in scsi_mq_setup_tags()
2199 shost->tag_set.ops = &scsi_mq_ops; in scsi_mq_setup_tags()
2200 shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1; in scsi_mq_setup_tags()
2201 shost->tag_set.queue_depth = shost->can_queue; in scsi_mq_setup_tags()
2202 shost->tag_set.cmd_size = cmd_size; in scsi_mq_setup_tags()
2203 shost->tag_set.numa_node = NUMA_NO_NODE; in scsi_mq_setup_tags()
2204 shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; in scsi_mq_setup_tags()
2205 shost->tag_set.flags |= in scsi_mq_setup_tags()
2206 BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); in scsi_mq_setup_tags()
2207 shost->tag_set.driver_data = shost; in scsi_mq_setup_tags()
2209 return blk_mq_alloc_tag_set(&shost->tag_set); in scsi_mq_setup_tags()
2212 void scsi_mq_destroy_tags(struct Scsi_Host *shost) in scsi_mq_destroy_tags() argument
2214 blk_mq_free_tag_set(&shost->tag_set); in scsi_mq_destroy_tags()
2233 void scsi_block_requests(struct Scsi_Host *shost) in scsi_block_requests() argument
2235 shost->host_self_blocked = 1; in scsi_block_requests()
2259 void scsi_unblock_requests(struct Scsi_Host *shost) in scsi_unblock_requests() argument
2261 shost->host_self_blocked = 0; in scsi_unblock_requests()
2262 scsi_run_host_queues(shost); in scsi_unblock_requests()