flush_rq          207 block/blk-flush.c static void flush_end_io(struct request *flush_rq, blk_status_t error)
flush_rq          209 block/blk-flush.c 	struct request_queue *q = flush_rq->q;
flush_rq          213 block/blk-flush.c 	struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
flush_rq          219 block/blk-flush.c 	if (!refcount_dec_and_test(&flush_rq->ref)) {
flush_rq          228 block/blk-flush.c 	hctx = flush_rq->mq_hctx;
flush_rq          230 block/blk-flush.c 		blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
flush_rq          231 block/blk-flush.c 		flush_rq->tag = -1;
flush_rq          233 block/blk-flush.c 		blk_mq_put_driver_tag(flush_rq);
flush_rq          234 block/blk-flush.c 		flush_rq->internal_tag = -1;
flush_rq          274 block/blk-flush.c 	struct request *flush_rq = fq->flush_rq;
flush_rq          297 block/blk-flush.c 	blk_rq_init(q, flush_rq);
flush_rq          307 block/blk-flush.c 	flush_rq->mq_ctx = first_rq->mq_ctx;
flush_rq          308 block/blk-flush.c 	flush_rq->mq_hctx = first_rq->mq_hctx;
flush_rq          312 block/blk-flush.c 		flush_rq->tag = first_rq->tag;
flush_rq          313 block/blk-flush.c 		blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
flush_rq          315 block/blk-flush.c 		flush_rq->internal_tag = first_rq->internal_tag;
flush_rq          318 block/blk-flush.c 	flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
flush_rq          319 block/blk-flush.c 	flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
flush_rq          320 block/blk-flush.c 	flush_rq->rq_flags |= RQF_FLUSH_SEQ;
flush_rq          321 block/blk-flush.c 	flush_rq->rq_disk = first_rq->rq_disk;
flush_rq          322 block/blk-flush.c 	flush_rq->end_io = flush_end_io;
flush_rq          324 block/blk-flush.c 	blk_flush_queue_rq(flush_rq, false);
flush_rq          488 block/blk-flush.c 	fq->flush_rq = kzalloc_node(rq_sz, flags, node);
flush_rq          489 block/blk-flush.c 	if (!fq->flush_rq)
flush_rq          514 block/blk-flush.c 	kfree(fq->flush_rq);
flush_rq         2283 block/blk-mq.c 		set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
flush_rq         2337 block/blk-mq.c 	if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
flush_rq           26 block/blk.h    	struct request		*flush_rq;
flush_rq           55 block/blk.h    	return hctx->fq->flush_rq == req;
flush_rq         2607 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	bool flush_sq = false, flush_rq = false;
flush_rq         2609 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	if (info->rq && !qp->flush_rq)
flush_rq         2610 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 		flush_rq = true;
flush_rq         2616 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	qp->flush_rq |= flush_rq;
flush_rq         2617 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	if (!flush_sq && !flush_rq)
flush_rq         2625 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 		if (flush_rq) {
flush_rq         2646 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 		 LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) |
flush_rq          398 drivers/infiniband/hw/i40iw/i40iw_type.h 	bool flush_rq;