rqs               129 block/blk-iolatency.c 		struct blk_rq_stat rqs;
rqs               199 block/blk-iolatency.c 		blk_rq_stat_init(&stat->rqs);
rqs               210 block/blk-iolatency.c 		blk_rq_stat_sum(&sum->rqs, &stat->rqs);
rqs               222 block/blk-iolatency.c 		blk_rq_stat_add(&stat->rqs, req_time);
rqs               234 block/blk-iolatency.c 	return stat->rqs.mean <= iolat->min_lat_nsec;
rqs               242 block/blk-iolatency.c 	return stat->rqs.nr_samples;
rqs               265 block/blk-iolatency.c 				   stat->rqs.mean);
rqs               225 block/blk-mq-tag.c 	rq = tags->rqs[bitnr];
rqs               284 block/blk-mq-tag.c 	rq = tags->rqs[bitnr];
rqs               314 block/blk-mq-tag.c 	if (tags->rqs)
rqs                19 block/blk-mq-tag.h 	struct request **rqs;
rqs                81 block/blk-mq-tag.h 	hctx->tags->rqs[tag] = rq;
rqs               311 block/blk-mq.c 		data->hctx->tags->rqs[rq->tag] = rq;
rqs               820 block/blk-mq.c 		prefetch(tags->rqs[tag]);
rqs               821 block/blk-mq.c 		return tags->rqs[tag];
rqs              1079 block/blk-mq.c 		data.hctx->tags->rqs[rq->tag] = rq;
rqs              2065 block/blk-mq.c 	if (tags->rqs && set->ops->exit_request) {
rqs              2092 block/blk-mq.c 	kfree(tags->rqs);
rqs              2093 block/blk-mq.c 	tags->rqs = NULL;
rqs              2117 block/blk-mq.c 	tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
rqs              2120 block/blk-mq.c 	if (!tags->rqs) {
rqs              2129 block/blk-mq.c 		kfree(tags->rqs);
rqs               182 block/kyber-iosched.c 	struct list_head rqs[KYBER_NUM_DOMAINS];
rqs               492 block/kyber-iosched.c 		INIT_LIST_HEAD(&khd->rqs[i]);
rqs               754 block/kyber-iosched.c 	struct list_head *rqs;
rqs               758 block/kyber-iosched.c 	rqs = &khd->rqs[khd->cur_domain];
rqs               768 block/kyber-iosched.c 	rq = list_first_entry_or_null(rqs, struct request, queuelist);
rqs               783 block/kyber-iosched.c 			kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs);
rqs               784 block/kyber-iosched.c 			rq = list_first_entry(rqs, struct request, queuelist);
rqs               851 block/kyber-iosched.c 		if (!list_empty_careful(&khd->rqs[i]) ||
rqs               913 block/kyber-iosched.c 	return seq_list_start(&khd->rqs[domain], *pos);			\
rqs               922 block/kyber-iosched.c 	return seq_list_next(v, &khd->rqs[domain], pos);		\
rqs               125 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 		struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
rqs               184 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 			rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
rqs               189 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 						  rqs, num_rqs, &ctx->guilty);
rqs               196 drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c 	unsigned int rqs = fifosz / 256 - 1;
rqs               219 drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c 	mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
rqs               143 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c 	unsigned int rqs = fifosz / 256 - 1;
rqs               160 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c 	value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;