Home
last modified time | relevance | path

Searched refs:hctx (Results 1 – 22 of 22) sorted by relevance

/linux-4.4.14/block/
Dblk-mq.c36 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
41 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_pending() argument
45 for (i = 0; i < hctx->ctx_map.size; i++) in blk_mq_hctx_has_pending()
46 if (hctx->ctx_map.map[i].word) in blk_mq_hctx_has_pending()
52 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx, in get_bm() argument
55 return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word]; in get_bm()
58 #define CTX_TO_BIT(hctx, ctx) \ argument
59 ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
64 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument
67 struct blk_align_bitmap *bm = get_bm(hctx, ctx); in blk_mq_hctx_mark_pending()
[all …]
Dblk-mq-sysfs.c82 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_show() local
87 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_show()
88 q = hctx->queue; in blk_mq_hw_sysfs_show()
96 res = entry->show(hctx, page); in blk_mq_hw_sysfs_show()
106 struct blk_mq_hw_ctx *hctx; in blk_mq_hw_sysfs_store() local
111 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); in blk_mq_hw_sysfs_store()
112 q = hctx->queue; in blk_mq_hw_sysfs_store()
120 res = entry->store(hctx, page, length); in blk_mq_hw_sysfs_store()
177 static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page) in blk_mq_hw_sysfs_poll_show() argument
179 return sprintf(page, "invoked=%lu, success=%lu\n", hctx->poll_invoked, hctx->poll_success); in blk_mq_hw_sysfs_poll_show()
[all …]
Dblk-mq-tag.c61 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_busy() argument
63 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && in __blk_mq_tag_busy()
64 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_busy()
65 atomic_inc(&hctx->tags->active_queues); in __blk_mq_tag_busy()
104 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in __blk_mq_tag_idle() argument
106 struct blk_mq_tags *tags = hctx->tags; in __blk_mq_tag_idle()
108 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in __blk_mq_tag_idle()
120 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, in hctx_may_queue() argument
125 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) in hctx_may_queue()
127 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) in hctx_may_queue()
[all …]
Dblk-mq-tag.h55 extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
78 static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_busy() argument
80 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) in blk_mq_tag_busy()
83 return __blk_mq_tag_busy(hctx); in blk_mq_tag_busy()
86 static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) in blk_mq_tag_idle() argument
88 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) in blk_mq_tag_idle()
91 __blk_mq_tag_idle(hctx); in blk_mq_tag_idle()
100 static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx, in blk_mq_tag_set_rq() argument
103 hctx->tags->rqs[tag] = rq; in blk_mq_tag_set_rq()
Dblk-mq.h28 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
104 struct blk_mq_hw_ctx *hctx; member
110 struct blk_mq_hw_ctx *hctx) in blk_mq_set_alloc_data() argument
116 data->hctx = hctx; in blk_mq_set_alloc_data()
119 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) in blk_mq_hw_queue_mapped() argument
121 return hctx->nr_ctx && hctx->tags; in blk_mq_hw_queue_mapped()
Dblk-flush.c230 struct blk_mq_hw_ctx *hctx; in flush_end_io() local
234 hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu); in flush_end_io()
235 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); in flush_end_io()
321 struct blk_mq_hw_ctx *hctx; in blk_kick_flush() local
327 hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu); in blk_kick_flush()
328 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); in blk_kick_flush()
355 struct blk_mq_hw_ctx *hctx; in mq_flush_data_end_io() local
360 hctx = q->mq_ops->map_queue(q, ctx->cpu); in mq_flush_data_end_io()
368 blk_mq_run_hw_queue(hctx, true); in mq_flush_data_end_io()
Dblk.h42 struct blk_mq_hw_ctx *hctx; in blk_get_flush_queue() local
47 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_get_flush_queue()
49 return hctx->fq; in blk_get_flush_queue()
Dblk-core.c287 struct blk_mq_hw_ctx *hctx; in blk_sync_queue() local
290 queue_for_each_hw_ctx(q, hctx, i) { in blk_sync_queue()
291 cancel_delayed_work_sync(&hctx->run_work); in blk_sync_queue()
292 cancel_delayed_work_sync(&hctx->delay_work); in blk_sync_queue()
3343 struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[queue_num]; in blk_poll() local
3346 hctx->poll_invoked++; in blk_poll()
3348 ret = q->mq_ops->poll(hctx, blk_qc_t_to_tag(cookie)); in blk_poll()
3350 hctx->poll_success++; in blk_poll()
/linux-4.4.14/include/linux/
Dblk-mq.h228 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
229 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
234 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
254 #define queue_for_each_hw_ctx(q, hctx, i) \ argument
256 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
262 #define hctx_for_each_ctx(hctx, ctx, i) \ argument
263 for ((i) = 0; (i) < (hctx)->nr_ctx && \
264 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
/linux-4.4.14/net/dccp/ccids/
Dccid3.h116 struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); in ccid3_hc_tx_sk() local
117 BUG_ON(hctx == NULL); in ccid3_hc_tx_sk()
118 return hctx; in ccid3_hc_tx_sk()
/linux-4.4.14/drivers/block/
Dvirtio_blk.c160 static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, in virtio_queue_rq() argument
163 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_queue_rq()
168 int qid = hctx->queue_num; in virtio_queue_rq()
204 num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg); in virtio_queue_rq()
216 blk_mq_stop_hw_queue(hctx); in virtio_queue_rq()
Dnull_blk.c354 static int null_queue_rq(struct blk_mq_hw_ctx *hctx, in null_queue_rq() argument
364 cmd->nq = hctx->driver_data; in null_queue_rq()
381 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in null_init_hctx() argument
387 hctx->driver_data = nq; in null_init_hctx()
Dxen-blkfront.c747 static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, in blkif_queue_rq() argument
773 blk_mq_stop_hw_queue(hctx); in blkif_queue_rq()
Dloop.c1651 static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, in loop_queue_rq() argument
Drbd.c3473 static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx, in rbd_queue_rq() argument
/linux-4.4.14/drivers/nvme/host/
Dpci.c191 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_admin_init_hctx() argument
198 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); in nvme_admin_init_hctx()
201 hctx->driver_data = nvmeq; in nvme_admin_init_hctx()
206 static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in nvme_admin_exit_hctx() argument
208 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_admin_exit_hctx()
226 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, in nvme_init_hctx() argument
235 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); in nvme_init_hctx()
236 hctx->driver_data = nvmeq; in nvme_init_hctx()
841 static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, in nvme_queue_rq() argument
844 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_queue_rq()
[all …]
/linux-4.4.14/drivers/mtd/ubi/
Dblock.c319 static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, in ubiblock_queue_rq() argument
323 struct ubiblock *dev = hctx->queue->queuedata; in ubiblock_queue_rq()
/linux-4.4.14/drivers/block/mtip32xx/
Dmtip32xx.c197 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; in mtip_rq_from_tag() local
199 return blk_mq_tag_to_rq(hctx->tags, tag); in mtip_rq_from_tag()
2383 struct blk_mq_hw_ctx *hctx) in mtip_hw_submit_io() argument
3748 static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq) in mtip_submit_request() argument
3750 struct driver_data *dd = hctx->queue->queuedata; in mtip_submit_request()
3784 nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg); in mtip_submit_request()
3787 mtip_hw_submit_io(dd, rq, cmd, nents, hctx); in mtip_submit_request()
3791 static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, in mtip_check_unal_depth() argument
3794 struct driver_data *dd = hctx->queue->queuedata; in mtip_check_unal_depth()
3815 static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, in mtip_queue_rq() argument
[all …]
/linux-4.4.14/crypto/
Dmcryptd.c577 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); in mcryptd_free() local
581 crypto_drop_shash(&hctx->spawn); in mcryptd_free()
Dcryptd.c856 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); in cryptd_free() local
861 crypto_drop_shash(&hctx->spawn); in cryptd_free()
/linux-4.4.14/drivers/scsi/
Dscsi_lib.c1966 static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, in scsi_queue_rq() argument
2031 blk_mq_stop_hw_queue(hctx); in scsi_queue_rq()
2034 blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY); in scsi_queue_rq()
/linux-4.4.14/drivers/md/
Ddm.c2647 static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, in dm_mq_queue_rq() argument