Lines Matching refs:q
122 struct request_queue *q = cmd->request->q; in scsi_mq_requeue_cmd() local
125 blk_mq_kick_requeue_list(q); in scsi_mq_requeue_cmd()
144 struct request_queue *q = device->request_queue; in __scsi_queue_insert() local
166 if (q->mq_ops) { in __scsi_queue_insert()
170 spin_lock_irqsave(q->queue_lock, flags); in __scsi_queue_insert()
171 blk_requeue_request(q, cmd->request); in __scsi_queue_insert()
173 spin_unlock_irqrestore(q->queue_lock, flags); in __scsi_queue_insert()
244 blk_execute_rq(req->q, NULL, req, 1); in scsi_execute()
328 static void scsi_kick_queue(struct request_queue *q) in scsi_kick_queue() argument
330 if (q->mq_ops) in scsi_kick_queue()
331 blk_mq_start_hw_queues(q); in scsi_kick_queue()
333 blk_run_queue(q); in scsi_kick_queue()
485 static void scsi_run_queue(struct request_queue *q) in scsi_run_queue() argument
487 struct scsi_device *sdev = q->queuedata; in scsi_run_queue()
494 if (q->mq_ops) in scsi_run_queue()
495 blk_mq_start_stopped_hw_queues(q, false); in scsi_run_queue()
497 blk_run_queue(q); in scsi_run_queue()
503 struct request_queue *q; in scsi_requeue_run_queue() local
506 q = sdev->request_queue; in scsi_requeue_run_queue()
507 scsi_run_queue(q); in scsi_requeue_run_queue()
528 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) in scsi_requeue_command() argument
534 spin_lock_irqsave(q->queue_lock, flags); in scsi_requeue_command()
538 blk_requeue_request(q, req); in scsi_requeue_command()
539 spin_unlock_irqrestore(q->queue_lock, flags); in scsi_requeue_command()
541 scsi_run_queue(q); in scsi_requeue_command()
692 struct request_queue *q = sdev->request_queue; in scsi_end_request() local
702 if (blk_queue_add_random(q)) in scsi_end_request()
721 blk_mq_start_stopped_hw_queues(q, true); in scsi_end_request()
728 spin_lock_irqsave(q->queue_lock, flags); in scsi_end_request()
730 spin_unlock_irqrestore(q->queue_lock, flags); in scsi_end_request()
735 scsi_run_queue(q); in scsi_end_request()
820 struct request_queue *q = cmd->device->request_queue; in scsi_io_completion() local
1062 if (q->mq_ops) { in scsi_io_completion()
1068 scsi_requeue_command(q, cmd); in scsi_io_completion()
1097 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); in scsi_init_sgtable()
1129 if (!rq->q->mq_ops) { in scsi_init_io()
1160 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); in scsi_init_io()
1167 count = blk_rq_map_integrity_sg(rq->q, rq->bio, in scsi_init_io()
1170 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q))); in scsi_init_io()
1343 scsi_prep_return(struct request_queue *q, struct request *req, int ret) in scsi_prep_return() argument
1345 struct scsi_device *sdev = q->queuedata; in scsi_prep_return()
1366 blk_delay_queue(q, SCSI_QUEUE_DELAY); in scsi_prep_return()
1375 static int scsi_prep_fn(struct request_queue *q, struct request *req) in scsi_prep_fn() argument
1377 struct scsi_device *sdev = q->queuedata; in scsi_prep_fn()
1393 return scsi_prep_return(q, req, ret); in scsi_prep_fn()
1396 static void scsi_unprep_fn(struct request_queue *q, struct request *req) in scsi_unprep_fn() argument
1407 static inline int scsi_dev_queue_ready(struct request_queue *q, in scsi_dev_queue_ready() argument
1424 if (!q->mq_ops) in scsi_dev_queue_ready()
1425 blk_delay_queue(q, SCSI_QUEUE_DELAY); in scsi_dev_queue_ready()
1500 static inline int scsi_host_queue_ready(struct request_queue *q, in scsi_host_queue_ready() argument
1562 static int scsi_lld_busy(struct request_queue *q) in scsi_lld_busy() argument
1564 struct scsi_device *sdev = q->queuedata; in scsi_lld_busy()
1567 if (blk_queue_dying(q)) in scsi_lld_busy()
1587 static void scsi_kill_request(struct request *req, struct request_queue *q) in scsi_kill_request() argument
1766 static void scsi_request_fn(struct request_queue *q) in scsi_request_fn() argument
1767 __releases(q->queue_lock) in scsi_request_fn()
1768 __acquires(q->queue_lock) in scsi_request_fn()
1770 struct scsi_device *sdev = q->queuedata; in scsi_request_fn()
1787 req = blk_peek_request(q); in scsi_request_fn()
1794 scsi_kill_request(req, q); in scsi_request_fn()
1798 if (!scsi_dev_queue_ready(q, sdev)) in scsi_request_fn()
1804 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) in scsi_request_fn()
1807 spin_unlock_irq(q->queue_lock); in scsi_request_fn()
1826 if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) { in scsi_request_fn()
1838 if (!scsi_host_queue_ready(q, shost, sdev)) in scsi_request_fn()
1859 spin_lock_irq(q->queue_lock); in scsi_request_fn()
1862 spin_lock_irq(q->queue_lock); in scsi_request_fn()
1879 spin_lock_irq(q->queue_lock); in scsi_request_fn()
1880 blk_requeue_request(q, req); in scsi_request_fn()
1884 blk_delay_queue(q, SCSI_QUEUE_DELAY); in scsi_request_fn()
1902 struct scsi_device *sdev = req->q->queuedata; in scsi_mq_prep_fn()
1970 struct request_queue *q = req->q; in scsi_queue_rq() local
1971 struct scsi_device *sdev = q->queuedata; in scsi_queue_rq()
1985 if (!scsi_dev_queue_ready(q, sdev)) in scsi_queue_rq()
1989 if (!scsi_host_queue_ready(q, shost, sdev)) in scsi_queue_rq()
2101 static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) in __scsi_init_queue() argument
2108 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, in __scsi_init_queue()
2116 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); in __scsi_init_queue()
2119 blk_queue_max_hw_sectors(q, shost->max_sectors); in __scsi_init_queue()
2120 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); in __scsi_init_queue()
2121 blk_queue_segment_boundary(q, shost->dma_boundary); in __scsi_init_queue()
2124 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); in __scsi_init_queue()
2127 q->limits.cluster = 0; in __scsi_init_queue()
2134 blk_queue_dma_alignment(q, 0x03); in __scsi_init_queue()
2140 struct request_queue *q; in __scsi_alloc_queue() local
2142 q = blk_init_queue(request_fn, NULL); in __scsi_alloc_queue()
2143 if (!q) in __scsi_alloc_queue()
2145 __scsi_init_queue(shost, q); in __scsi_alloc_queue()
2146 return q; in __scsi_alloc_queue()
2152 struct request_queue *q; in scsi_alloc_queue() local
2154 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); in scsi_alloc_queue()
2155 if (!q) in scsi_alloc_queue()
2158 blk_queue_prep_rq(q, scsi_prep_fn); in scsi_alloc_queue()
2159 blk_queue_unprep_rq(q, scsi_unprep_fn); in scsi_alloc_queue()
2160 blk_queue_softirq_done(q, scsi_softirq_done); in scsi_alloc_queue()
2161 blk_queue_rq_timed_out(q, scsi_times_out); in scsi_alloc_queue()
2162 blk_queue_lld_busy(q, scsi_lld_busy); in scsi_alloc_queue()
2163 return q; in scsi_alloc_queue()
2945 struct request_queue *q = sdev->request_queue; in scsi_internal_device_block() local
2962 if (q->mq_ops) { in scsi_internal_device_block()
2963 blk_mq_stop_hw_queues(q); in scsi_internal_device_block()
2965 spin_lock_irqsave(q->queue_lock, flags); in scsi_internal_device_block()
2966 blk_stop_queue(q); in scsi_internal_device_block()
2967 spin_unlock_irqrestore(q->queue_lock, flags); in scsi_internal_device_block()
2994 struct request_queue *q = sdev->request_queue; in scsi_internal_device_unblock() local
3014 if (q->mq_ops) { in scsi_internal_device_unblock()
3015 blk_mq_start_stopped_hw_queues(q, false); in scsi_internal_device_unblock()
3017 spin_lock_irqsave(q->queue_lock, flags); in scsi_internal_device_unblock()
3018 blk_start_queue(q); in scsi_internal_device_unblock()
3019 spin_unlock_irqrestore(q->queue_lock, flags); in scsi_internal_device_unblock()