Home
last modified time | relevance | path

Searched refs:mq_ops (Results 1 – 13 of 13) sorted by relevance

/linux-4.4.14/block/
Dblk-flush.c135 if (rq->q->mq_ops) { in blk_flush_queue_rq()
206 if (q->mq_ops) in blk_flush_complete_seq()
229 if (q->mq_ops) { in flush_end_io()
234 hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu); in flush_end_io()
245 if (!q->mq_ops) in flush_end_io()
268 WARN_ON(q->mq_ops); in flush_end_io()
272 if (q->mq_ops) in flush_end_io()
320 if (q->mq_ops) { in blk_kick_flush()
327 hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu); in blk_kick_flush()
360 hctx = q->mq_ops->map_queue(q, ctx->cpu); in mq_flush_data_end_io()
[all …]
Dblk-timeout.c162 if (req->q->mq_ops) { in blk_abort_request()
199 if (!q->mq_ops && !q->rq_timed_out_fn) in blk_add_timer()
212 if (!q->mq_ops) in blk_add_timer()
Dblk-sysfs.c55 if (!q->request_fn && !q->mq_ops) in queue_requests_store()
334 if (!q->mq_ops || !q->mq_ops->poll) in queue_poll_store()
596 if (!q->mq_ops) in blk_release_queue()
657 if (q->mq_ops) in blk_register_queue()
682 if (q->mq_ops) in blk_unregister_queue()
Dblk-mq.c246 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_alloc_request()
256 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_alloc_request()
301 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); in blk_mq_free_request()
560 struct blk_mq_ops *ops = req->q->mq_ops; in blk_mq_rq_timed_out()
777 ret = q->mq_ops->queue_rq(hctx, &bd); in __blk_mq_run_hw_queue()
1008 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_insert_request()
1036 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_insert_requests()
1172 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_map_request()
1187 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_map_request()
1205 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, in blk_mq_direct_issue_request()
[all …]
Dblk.h44 if (!q->mq_ops) in blk_get_flush_queue()
47 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_get_flush_queue()
Dblk-core.c286 if (q->mq_ops) { in blk_sync_queue()
520 if (q->mq_ops) in blk_set_queue_dying()
575 if (!q->mq_ops) in blk_cleanup_queue()
587 if (q->mq_ops) in blk_cleanup_queue()
1294 if (q->mq_ops) in blk_get_request()
1466 if (q->mq_ops) { in __blk_put_request()
1500 if (q->mq_ops) in blk_put_request()
1625 if (q->mq_ops) in blk_attempt_plug_merge()
1673 if (q->mq_ops) in blk_plug_queued_count()
2189 if (q->mq_ops) { in blk_insert_cloned_request()
[all …]
Dblk-exec.c67 if (q->mq_ops) { in blk_execute_rq_nowait()
Dblk-mq-tag.c304 data->hctx = data->q->mq_ops->map_queue(data->q, in bt_get()
692 if (q->mq_ops) { in blk_mq_unique_tag()
693 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); in blk_mq_unique_tag()
Dblk-merge.c550 return !q->mq_ops && req->special; in req_no_special_merge()
/linux-4.4.14/drivers/md/
Ddm.c1079 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); in tio_from_request()
1112 if (!md->queue->mq_ops && run_queue) in rq_completed()
1131 else if (!md->queue->mq_ops) in free_rq_clone()
1140 if (!md->queue->mq_ops) in free_rq_clone()
1171 if (!rq->q->mq_ops) in dm_end_request()
1183 if (!rq->q->mq_ops) { in dm_unprep_request()
1190 else if (!tio->md->queue->mq_ops) in dm_unprep_request()
1216 if (!rq->q->mq_ops) in dm_requeue_original_request()
1240 if (!q->mq_ops) in stop_queue()
1258 if (!q->mq_ops) in start_queue()
[all …]
Ddm-table.c901 if (q->mq_ops) in dm_table_set_type()
908 if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) { in dm_table_set_type()
1694 if (queue->mq_ops) in dm_table_run_md_queue_async()
/linux-4.4.14/drivers/scsi/
Dscsi_lib.c167 if (q->mq_ops) { in __scsi_queue_insert()
331 if (q->mq_ops) in scsi_kick_queue()
495 if (q->mq_ops) in scsi_run_queue()
1063 if (q->mq_ops) { in scsi_io_completion()
1130 if (!rq->q->mq_ops) { in scsi_init_io()
1424 if (!q->mq_ops) in scsi_dev_queue_ready()
2971 if (q->mq_ops) { in scsi_internal_device_block()
3023 if (q->mq_ops) { in scsi_internal_device_unblock()
/linux-4.4.14/include/linux/
Dblkdev.h309 struct blk_mq_ops *mq_ops; member
604 return q->request_fn || q->mq_ops; in queue_is_rq_based()