rq                 15 arch/alpha/include/asm/agp_backend.h 		u32 rq : 8;
rq               1015 arch/alpha/kernel/core_marvel.c 	       agp->mode.bits.sba ? " - SBA" : "", agp->mode.bits.rq);
rq               1139 arch/alpha/kernel/core_marvel.c 	agp->capability.bits.rq = 0xf;
rq                790 arch/alpha/kernel/core_titan.c 	agp->capability.bits.rq = 7;	/* 8 - 1 */
rq                799 arch/alpha/kernel/core_titan.c 	agp->mode.bits.rq = 7;	/* RQ Depth? */
rq                525 arch/ia64/include/asm/pal.h 			rq		: 1,	/* Requester identifier
rq                562 arch/ia64/include/asm/pal.h 			rq		: 1,	/* Requester identifier
rq                598 arch/ia64/include/asm/pal.h 			rq		: 1,	/* Requester identifier
rq                658 arch/ia64/include/asm/pal.h 			rq		: 1,	/* Requester identifier
rq                736 arch/ia64/include/asm/pal.h #define pmci_bus_req_address_valid		pme_bus.rq
rq                130 arch/powerpc/include/asm/spu.h 	struct spu_runqueue *rq;
rq                 50 arch/powerpc/platforms/cell/spufs/context.c 	INIT_LIST_HEAD(&ctx->rq);
rq                 82 arch/powerpc/platforms/cell/spufs/context.c 	BUG_ON(!list_empty(&ctx->rq));
rq               2531 arch/powerpc/platforms/cell/spufs/file.c 		!list_empty(&ctx->rq) ? 'q' : ' ',
rq                102 arch/powerpc/platforms/cell/spufs/sched.c 	BUG_ON(!list_empty(&ctx->rq));
rq                496 arch/powerpc/platforms/cell/spufs/sched.c 	if (list_empty(&ctx->rq)) {
rq                497 arch/powerpc/platforms/cell/spufs/sched.c 		list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
rq                515 arch/powerpc/platforms/cell/spufs/sched.c 	if (!list_empty(&ctx->rq)) {
rq                518 arch/powerpc/platforms/cell/spufs/sched.c 		list_del_init(&ctx->rq);
rq                833 arch/powerpc/platforms/cell/spufs/sched.c 		struct list_head *rq = &spu_prio->runq[best];
rq                835 arch/powerpc/platforms/cell/spufs/sched.c 		list_for_each_entry(ctx, rq, rq) {
rq                108 arch/powerpc/platforms/cell/spufs/spufs.h 	struct list_head rq;
rq               4085 arch/powerpc/xmon/xmon.c 	DUMP_FIELD(spu, "0x%p", rq);
rq               1379 arch/um/drivers/ubd_kern.c 	struct request *req = bd->rq;
rq                230 block/bfq-iosched.c #define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
rq                231 block/bfq-iosched.c 	(get_sdist(last_pos, rq) >			\
rq                234 block/bfq-iosched.c 	  blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
rq                365 block/bfq-iosched.c #define RQ_BIC(rq)		icq_to_bic((rq)->elv.priv[0])
rq                366 block/bfq-iosched.c #define RQ_BFQQ(rq)		((rq)->elv.priv[1])
rq                874 block/bfq-iosched.c 	struct request *rq;
rq                881 block/bfq-iosched.c 	rq = rq_entry_fifo(bfqq->fifo.next);
rq                883 block/bfq-iosched.c 	if (rq == last || ktime_get_ns() < rq->fifo_time)
rq                886 block/bfq-iosched.c 	bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
rq                887 block/bfq-iosched.c 	return rq;
rq                918 block/bfq-iosched.c static unsigned long bfq_serv_to_charge(struct request *rq,
rq                923 block/bfq-iosched.c 		return blk_rq_sectors(rq);
rq                925 block/bfq-iosched.c 	return blk_rq_sectors(rq) * bfq_async_charge_factor;
rq               1653 block/bfq-iosched.c 					     struct request *rq,
rq               1864 block/bfq-iosched.c static void bfq_add_request(struct request *rq)
rq               1866 block/bfq-iosched.c 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
rq               1872 block/bfq-iosched.c 	bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
rq               1873 block/bfq-iosched.c 	bfqq->queued[rq_is_sync(rq)]++;
rq               2050 block/bfq-iosched.c 	elv_rb_add(&bfqq->sort_list, rq);
rq               2056 block/bfq-iosched.c 	next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
rq               2068 block/bfq-iosched.c 						 rq, &interactive);
rq               2070 block/bfq-iosched.c 		if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
rq               2128 block/bfq-iosched.c static sector_t get_sdist(sector_t last_pos, struct request *rq)
rq               2131 block/bfq-iosched.c 		return abs(blk_rq_pos(rq) - last_pos);
rq               2137 block/bfq-iosched.c static void bfq_activate_request(struct request_queue *q, struct request *rq)
rq               2144 block/bfq-iosched.c static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
rq               2153 block/bfq-iosched.c 			       struct request *rq)
rq               2155 block/bfq-iosched.c 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
rq               2157 block/bfq-iosched.c 	const int sync = rq_is_sync(rq);
rq               2159 block/bfq-iosched.c 	if (bfqq->next_rq == rq) {
rq               2160 block/bfq-iosched.c 		bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
rq               2164 block/bfq-iosched.c 	if (rq->queuelist.prev != &rq->queuelist)
rq               2165 block/bfq-iosched.c 		list_del_init(&rq->queuelist);
rq               2168 block/bfq-iosched.c 	elv_rb_del(&bfqq->sort_list, rq);
rq               2170 block/bfq-iosched.c 	elv_rqhash_del(q, rq);
rq               2171 block/bfq-iosched.c 	if (q->last_merge == rq)
rq               2208 block/bfq-iosched.c 	if (rq->cmd_flags & REQ_META)
rq               2261 block/bfq-iosched.c static struct bfq_queue *bfq_init_rq(struct request *rq);
rq               2320 block/bfq-iosched.c static void bfq_requests_merged(struct request_queue *q, struct request *rq,
rq               2323 block/bfq-iosched.c 	struct bfq_queue *bfqq = bfq_init_rq(rq),
rq               2339 block/bfq-iosched.c 	    !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
rq               2340 block/bfq-iosched.c 	    next->fifo_time < rq->fifo_time) {
rq               2341 block/bfq-iosched.c 		list_del_init(&rq->queuelist);
rq               2342 block/bfq-iosched.c 		list_replace_init(&next->queuelist, &rq->queuelist);
rq               2343 block/bfq-iosched.c 		rq->fifo_time = next->fifo_time;
rq               2347 block/bfq-iosched.c 		bfqq->next_rq = rq;
rq               2813 block/bfq-iosched.c static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
rq               2823 block/bfq-iosched.c 	if (is_sync && !rq_is_sync(rq))
rq               2864 block/bfq-iosched.c 	return bfqq == RQ_BFQQ(rq);
rq               3018 block/bfq-iosched.c 				       struct request *rq)
rq               3020 block/bfq-iosched.c 	if (rq != NULL) { /* new rq dispatch now, reset accordingly */
rq               3025 block/bfq-iosched.c 			blk_rq_sectors(rq);
rq               3035 block/bfq-iosched.c static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
rq               3141 block/bfq-iosched.c 	bfq_reset_rate_computation(bfqd, rq);
rq               3176 block/bfq-iosched.c static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
rq               3183 block/bfq-iosched.c 		bfq_reset_rate_computation(bfqd, rq);
rq               3208 block/bfq-iosched.c 	    && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq))
rq               3211 block/bfq-iosched.c 	bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
rq               3216 block/bfq-iosched.c 			max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
rq               3218 block/bfq-iosched.c 		bfqd->last_rq_max_size = blk_rq_sectors(rq);
rq               3227 block/bfq-iosched.c 	bfq_update_rate_reset(bfqd, rq);
rq               3229 block/bfq-iosched.c 	bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
rq               3230 block/bfq-iosched.c 	if (RQ_BFQQ(rq) == bfqd->in_service_queue)
rq               3238 block/bfq-iosched.c static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
rq               3240 block/bfq-iosched.c 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
rq               3255 block/bfq-iosched.c 	bfq_update_peak_rate(q->elevator->elevator_data, rq);
rq               3257 block/bfq-iosched.c 	bfq_remove_request(q, rq);
rq               4595 block/bfq-iosched.c 	struct request *rq = bfqq->next_rq;
rq               4598 block/bfq-iosched.c 	service_to_charge = bfq_serv_to_charge(rq, bfqq);
rq               4604 block/bfq-iosched.c 		bfqd->waited_rq = rq;
rq               4607 block/bfq-iosched.c 	bfq_dispatch_remove(bfqd->queue, rq);
rq               4636 block/bfq-iosched.c 	return rq;
rq               4654 block/bfq-iosched.c 	struct request *rq = NULL;
rq               4658 block/bfq-iosched.c 		rq = list_first_entry(&bfqd->dispatch, struct request,
rq               4660 block/bfq-iosched.c 		list_del_init(&rq->queuelist);
rq               4662 block/bfq-iosched.c 		bfqq = RQ_BFQQ(rq);
rq               4727 block/bfq-iosched.c 	rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
rq               4729 block/bfq-iosched.c 	if (rq) {
rq               4733 block/bfq-iosched.c 		rq->rq_flags |= RQF_STARTED;
rq               4736 block/bfq-iosched.c 	return rq;
rq               4741 block/bfq-iosched.c 				      struct request *rq,
rq               4745 block/bfq-iosched.c 	struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
rq               4780 block/bfq-iosched.c 		bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
rq               4786 block/bfq-iosched.c 					     struct request *rq,
rq               4794 block/bfq-iosched.c 	struct request *rq;
rq               4803 block/bfq-iosched.c 	rq = __bfq_dispatch_request(hctx);
rq               4810 block/bfq-iosched.c 	bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue,
rq               4813 block/bfq-iosched.c 	return rq;
rq               5205 block/bfq-iosched.c 		       struct request *rq)
rq               5208 block/bfq-iosched.c 	bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
rq               5348 block/bfq-iosched.c 			    struct request *rq)
rq               5350 block/bfq-iosched.c 	if (rq->cmd_flags & REQ_META)
rq               5353 block/bfq-iosched.c 	bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
rq               5356 block/bfq-iosched.c 		bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
rq               5357 block/bfq-iosched.c 				 blk_rq_sectors(rq) < 32;
rq               5404 block/bfq-iosched.c static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
rq               5406 block/bfq-iosched.c 	struct bfq_queue *bfqq = RQ_BFQQ(rq),
rq               5407 block/bfq-iosched.c 		*new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
rq               5426 block/bfq-iosched.c 		if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
rq               5427 block/bfq-iosched.c 			bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
rq               5436 block/bfq-iosched.c 		rq->elv.priv[1] = new_bfqq;
rq               5441 block/bfq-iosched.c 	bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq));
rq               5442 block/bfq-iosched.c 	bfq_update_io_seektime(bfqd, bfqq, rq);
rq               5445 block/bfq-iosched.c 	bfq_add_request(rq);
rq               5448 block/bfq-iosched.c 	rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
rq               5449 block/bfq-iosched.c 	list_add_tail(&rq->queuelist, &bfqq->fifo);
rq               5451 block/bfq-iosched.c 	bfq_rq_enqueued(bfqd, bfqq, rq);
rq               5488 block/bfq-iosched.c static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
rq               5498 block/bfq-iosched.c 	if (blk_mq_sched_try_insert_merge(q, rq)) {
rq               5505 block/bfq-iosched.c 	blk_mq_sched_request_inserted(rq);
rq               5508 block/bfq-iosched.c 	bfqq = bfq_init_rq(rq);
rq               5509 block/bfq-iosched.c 	if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
rq               5511 block/bfq-iosched.c 			list_add(&rq->queuelist, &bfqd->dispatch);
rq               5513 block/bfq-iosched.c 			list_add_tail(&rq->queuelist, &bfqd->dispatch);
rq               5515 block/bfq-iosched.c 		idle_timer_disabled = __bfq_insert_request(bfqd, rq);
rq               5521 block/bfq-iosched.c 		bfqq = RQ_BFQQ(rq);
rq               5523 block/bfq-iosched.c 		if (rq_mergeable(rq)) {
rq               5524 block/bfq-iosched.c 			elv_rqhash_add(q, rq);
rq               5526 block/bfq-iosched.c 				q->last_merge = rq;
rq               5535 block/bfq-iosched.c 	cmd_flags = rq->cmd_flags;
rq               5547 block/bfq-iosched.c 		struct request *rq;
rq               5549 block/bfq-iosched.c 		rq = list_first_entry(list, struct request, queuelist);
rq               5550 block/bfq-iosched.c 		list_del_init(&rq->queuelist);
rq               5551 block/bfq-iosched.c 		bfq_insert_request(hctx, rq, at_head);
rq               5888 block/bfq-iosched.c static void bfq_finish_requeue_request(struct request *rq)
rq               5890 block/bfq-iosched.c 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
rq               5902 block/bfq-iosched.c 	if (unlikely(!(rq->rq_flags & RQF_ELVPRIV)))
rq               5910 block/bfq-iosched.c 	if (!rq->elv.icq || !bfqq)
rq               5915 block/bfq-iosched.c 	if (rq->rq_flags & RQF_STARTED)
rq               5917 block/bfq-iosched.c 					     rq->start_time_ns,
rq               5918 block/bfq-iosched.c 					     rq->io_start_time_ns,
rq               5919 block/bfq-iosched.c 					     rq->cmd_flags);
rq               5921 block/bfq-iosched.c 	if (likely(rq->rq_flags & RQF_STARTED)) {
rq               5926 block/bfq-iosched.c 		if (rq == bfqd->waited_rq)
rq               5947 block/bfq-iosched.c 		if (!RB_EMPTY_NODE(&rq->rb_node)) {
rq               5948 block/bfq-iosched.c 			bfq_remove_request(rq->q, rq);
rq               5950 block/bfq-iosched.c 						    rq->cmd_flags);
rq               5972 block/bfq-iosched.c 	rq->elv.priv[0] = NULL;
rq               5973 block/bfq-iosched.c 	rq->elv.priv[1] = NULL;
rq               6069 block/bfq-iosched.c static void bfq_prepare_request(struct request *rq, struct bio *bio)
rq               6076 block/bfq-iosched.c 	rq->elv.priv[0] = rq->elv.priv[1] = NULL;
rq               6102 block/bfq-iosched.c static struct bfq_queue *bfq_init_rq(struct request *rq)
rq               6104 block/bfq-iosched.c 	struct request_queue *q = rq->q;
rq               6105 block/bfq-iosched.c 	struct bio *bio = rq->bio;
rq               6108 block/bfq-iosched.c 	const int is_sync = rq_is_sync(rq);
rq               6113 block/bfq-iosched.c 	if (unlikely(!rq->elv.icq))
rq               6123 block/bfq-iosched.c 	if (rq->elv.priv[1])
rq               6124 block/bfq-iosched.c 		return rq->elv.priv[1];
rq               6126 block/bfq-iosched.c 	bic = icq_to_bic(rq->elv.icq);
rq               6159 block/bfq-iosched.c 		     rq, bfqq, bfqq->ref);
rq               6161 block/bfq-iosched.c 	rq->elv.priv[0] = bic;
rq               6162 block/bfq-iosched.c 	rq->elv.priv[1] = bfqq;
rq                109 block/blk-core.c void blk_rq_init(struct request_queue *q, struct request *rq)
rq                111 block/blk-core.c 	memset(rq, 0, sizeof(*rq));
rq                113 block/blk-core.c 	INIT_LIST_HEAD(&rq->queuelist);
rq                114 block/blk-core.c 	rq->q = q;
rq                115 block/blk-core.c 	rq->__sector = (sector_t) -1;
rq                116 block/blk-core.c 	INIT_HLIST_NODE(&rq->hash);
rq                117 block/blk-core.c 	RB_CLEAR_NODE(&rq->rb_node);
rq                118 block/blk-core.c 	rq->tag = -1;
rq                119 block/blk-core.c 	rq->internal_tag = -1;
rq                120 block/blk-core.c 	rq->start_time_ns = ktime_get_ns();
rq                121 block/blk-core.c 	rq->part = NULL;
rq                122 block/blk-core.c 	refcount_set(&rq->ref, 1);
rq                229 block/blk-core.c static void req_bio_endio(struct request *rq, struct bio *bio,
rq                235 block/blk-core.c 	if (unlikely(rq->rq_flags & RQF_QUIET))
rq                241 block/blk-core.c 	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
rq                245 block/blk-core.c void blk_dump_rq_flags(struct request *rq, char *msg)
rq                248 block/blk-core.c 		rq->rq_disk ? rq->rq_disk->disk_name : "?",
rq                249 block/blk-core.c 		(unsigned long long) rq->cmd_flags);
rq                252 block/blk-core.c 	       (unsigned long long)blk_rq_pos(rq),
rq                253 block/blk-core.c 	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
rq                255 block/blk-core.c 	       rq->bio, rq->biotail, blk_rq_bytes(rq));
rq                696 block/blk-core.c 	struct request *rq;
rq                705 block/blk-core.c 	list_for_each_entry_reverse(rq, plug_list, queuelist) {
rq                708 block/blk-core.c 		if (rq->q == q && same_queue_rq) {
rq                714 block/blk-core.c 			*same_queue_rq = rq;
rq                717 block/blk-core.c 		if (rq->q != q || !blk_rq_merge_ok(rq, bio))
rq                720 block/blk-core.c 		switch (blk_try_merge(rq, bio)) {
rq                722 block/blk-core.c 			merged = bio_attempt_back_merge(rq, bio, nr_segs);
rq                725 block/blk-core.c 			merged = bio_attempt_front_merge(rq, bio, nr_segs);
rq                728 block/blk-core.c 			merged = bio_attempt_discard_merge(q, rq, bio);
rq               1217 block/blk-core.c 				      struct request *rq)
rq               1219 block/blk-core.c 	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
rq               1221 block/blk-core.c 			__func__, blk_rq_sectors(rq),
rq               1222 block/blk-core.c 			blk_queue_get_max_sectors(q, req_op(rq)));
rq               1232 block/blk-core.c 	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
rq               1233 block/blk-core.c 	if (rq->nr_phys_segments > queue_max_segments(q)) {
rq               1235 block/blk-core.c 			__func__, rq->nr_phys_segments, queue_max_segments(q));
rq               1247 block/blk-core.c blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
rq               1249 block/blk-core.c 	if (blk_cloned_rq_check_limits(q, rq))
rq               1252 block/blk-core.c 	if (rq->rq_disk &&
rq               1253 block/blk-core.c 	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
rq               1257 block/blk-core.c 		blk_account_io_start(rq, true);
rq               1264 block/blk-core.c 	return blk_mq_request_issue_directly(rq, true);
rq               1281 block/blk-core.c unsigned int blk_rq_err_bytes(const struct request *rq)
rq               1283 block/blk-core.c 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
rq               1287 block/blk-core.c 	if (!(rq->rq_flags & RQF_MIXED_MERGE))
rq               1288 block/blk-core.c 		return blk_rq_bytes(rq);
rq               1297 block/blk-core.c 	for (bio = rq->bio; bio; bio = bio->bi_next) {
rq               1304 block/blk-core.c 	BUG_ON(blk_rq_bytes(rq) && !bytes);
rq               1347 block/blk-core.c void blk_account_io_start(struct request *rq, bool new_io)
rq               1350 block/blk-core.c 	int rw = rq_data_dir(rq);
rq               1352 block/blk-core.c 	if (!blk_do_io_stat(rq))
rq               1358 block/blk-core.c 		part = rq->part;
rq               1361 block/blk-core.c 		part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
rq               1371 block/blk-core.c 			part = &rq->rq_disk->part0;
rq               1374 block/blk-core.c 		part_inc_in_flight(rq->q, part, rw);
rq               1375 block/blk-core.c 		rq->part = part;
rq               1387 block/blk-core.c void blk_steal_bios(struct bio_list *list, struct request *rq)
rq               1389 block/blk-core.c 	if (rq->bio) {
rq               1391 block/blk-core.c 			list->tail->bi_next = rq->bio;
rq               1393 block/blk-core.c 			list->head = rq->bio;
rq               1394 block/blk-core.c 		list->tail = rq->biotail;
rq               1396 block/blk-core.c 		rq->bio = NULL;
rq               1397 block/blk-core.c 		rq->biotail = NULL;
rq               1400 block/blk-core.c 	rq->__data_len = 0;
rq               1522 block/blk-core.c void rq_flush_dcache_pages(struct request *rq)
rq               1527 block/blk-core.c 	rq_for_each_segment(bvec, rq, iter)
rq               1568 block/blk-core.c void blk_rq_unprep_clone(struct request *rq)
rq               1572 block/blk-core.c 	while ((bio = rq->bio) != NULL) {
rq               1573 block/blk-core.c 		rq->bio = bio->bi_next;
rq               1616 block/blk-core.c int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
rq               1634 block/blk-core.c 		if (rq->bio) {
rq               1635 block/blk-core.c 			rq->biotail->bi_next = bio;
rq               1636 block/blk-core.c 			rq->biotail = bio;
rq               1638 block/blk-core.c 			rq->bio = rq->biotail = bio;
rq               1641 block/blk-core.c 	__blk_rq_prep_clone(rq, rq_src);
rq               1648 block/blk-core.c 	blk_rq_unprep_clone(rq);
rq                 20 block/blk-exec.c static void blk_end_sync_rq(struct request *rq, blk_status_t error)
rq                 22 block/blk-exec.c 	struct completion *waiting = rq->end_io_data;
rq                 24 block/blk-exec.c 	rq->end_io_data = NULL;
rq                 49 block/blk-exec.c 			   struct request *rq, int at_head,
rq                 53 block/blk-exec.c 	WARN_ON(!blk_rq_is_passthrough(rq));
rq                 55 block/blk-exec.c 	rq->rq_disk = bd_disk;
rq                 56 block/blk-exec.c 	rq->end_io = done;
rq                 62 block/blk-exec.c 	blk_mq_sched_insert_request(rq, at_head, true, false);
rq                 78 block/blk-exec.c 		   struct request *rq, int at_head)
rq                 83 block/blk-exec.c 	rq->end_io_data = &wait;
rq                 84 block/blk-exec.c 	blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
rq                 99 block/blk-flush.c static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
rq                103 block/blk-flush.c 	if (blk_rq_sectors(rq))
rq                107 block/blk-flush.c 		if (rq->cmd_flags & REQ_PREFLUSH)
rq                110 block/blk-flush.c 		    (rq->cmd_flags & REQ_FUA))
rq                116 block/blk-flush.c static unsigned int blk_flush_cur_seq(struct request *rq)
rq                118 block/blk-flush.c 	return 1 << ffz(rq->flush.seq);
rq                121 block/blk-flush.c static void blk_flush_restore_request(struct request *rq)
rq                128 block/blk-flush.c 	rq->bio = rq->biotail;
rq                131 block/blk-flush.c 	rq->rq_flags &= ~RQF_FLUSH_SEQ;
rq                132 block/blk-flush.c 	rq->end_io = rq->flush.saved_end_io;
rq                135 block/blk-flush.c static void blk_flush_queue_rq(struct request *rq, bool add_front)
rq                137 block/blk-flush.c 	blk_mq_add_to_requeue_list(rq, add_front, true);
rq                156 block/blk-flush.c static void blk_flush_complete_seq(struct request *rq,
rq                160 block/blk-flush.c 	struct request_queue *q = rq->q;
rq                164 block/blk-flush.c 	BUG_ON(rq->flush.seq & seq);
rq                165 block/blk-flush.c 	rq->flush.seq |= seq;
rq                166 block/blk-flush.c 	cmd_flags = rq->cmd_flags;
rq                169 block/blk-flush.c 		seq = blk_flush_cur_seq(rq);
rq                179 block/blk-flush.c 		list_move_tail(&rq->flush.list, pending);
rq                183 block/blk-flush.c 		list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
rq                184 block/blk-flush.c 		blk_flush_queue_rq(rq, true);
rq                194 block/blk-flush.c 		BUG_ON(!list_empty(&rq->queuelist));
rq                195 block/blk-flush.c 		list_del_init(&rq->flush.list);
rq                196 block/blk-flush.c 		blk_flush_restore_request(rq);
rq                197 block/blk-flush.c 		blk_mq_end_request(rq, error);
rq                211 block/blk-flush.c 	struct request *rq, *n;
rq                244 block/blk-flush.c 	list_for_each_entry_safe(rq, n, running, flush.list) {
rq                245 block/blk-flush.c 		unsigned int seq = blk_flush_cur_seq(rq);
rq                248 block/blk-flush.c 		blk_flush_complete_seq(rq, fq, seq, error);
rq                327 block/blk-flush.c static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
rq                329 block/blk-flush.c 	struct request_queue *q = rq->q;
rq                330 block/blk-flush.c 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
rq                331 block/blk-flush.c 	struct blk_mq_ctx *ctx = rq->mq_ctx;
rq                336 block/blk-flush.c 		WARN_ON(rq->tag < 0);
rq                337 block/blk-flush.c 		blk_mq_put_driver_tag(rq);
rq                345 block/blk-flush.c 	blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
rq                360 block/blk-flush.c void blk_insert_flush(struct request *rq)
rq                362 block/blk-flush.c 	struct request_queue *q = rq->q;
rq                364 block/blk-flush.c 	unsigned int policy = blk_flush_policy(fflags, rq);
rq                365 block/blk-flush.c 	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
rq                371 block/blk-flush.c 	rq->cmd_flags &= ~REQ_PREFLUSH;
rq                373 block/blk-flush.c 		rq->cmd_flags &= ~REQ_FUA;
rq                380 block/blk-flush.c 	rq->cmd_flags |= REQ_SYNC;
rq                389 block/blk-flush.c 		blk_mq_end_request(rq, 0);
rq                393 block/blk-flush.c 	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
rq                402 block/blk-flush.c 		blk_mq_request_bypass_insert(rq, false, false);
rq                410 block/blk-flush.c 	memset(&rq->flush, 0, sizeof(rq->flush));
rq                411 block/blk-flush.c 	INIT_LIST_HEAD(&rq->flush.list);
rq                412 block/blk-flush.c 	rq->rq_flags |= RQF_FLUSH_SEQ;
rq                413 block/blk-flush.c 	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
rq                415 block/blk-flush.c 	rq->end_io = mq_flush_data_end_io;
rq                418 block/blk-flush.c 	blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
rq                371 block/blk-integrity.c static void blk_integrity_nop_prepare(struct request *rq)
rq                375 block/blk-integrity.c static void blk_integrity_nop_complete(struct request *rq,
rq               1810 block/blk-iocost.c static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
rq               1834 block/blk-iocost.c 	if (blk_rq_pos(rq) < bio_end &&
rq               1835 block/blk-iocost.c 	    blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
rq               1842 block/blk-iocost.c 	if (rq->bio && rq->bio->bi_iocost_cost &&
rq               1871 block/blk-iocost.c static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
rq               1877 block/blk-iocost.c 	if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
rq               1880 block/blk-iocost.c 	switch (req_op(rq) & REQ_OP_MASK) {
rq               1893 block/blk-iocost.c 	on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
rq               1894 block/blk-iocost.c 	rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
rq                 18 block/blk-map.c int blk_rq_append_bio(struct request *rq, struct bio **bio)
rq                 25 block/blk-map.c 	blk_queue_bounce(rq->q, bio);
rq                 30 block/blk-map.c 	if (!rq->bio) {
rq                 31 block/blk-map.c 		blk_rq_bio_prep(rq, *bio, nr_segs);
rq                 33 block/blk-map.c 		if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
rq                 41 block/blk-map.c 		rq->biotail->bi_next = *bio;
rq                 42 block/blk-map.c 		rq->biotail = *bio;
rq                 43 block/blk-map.c 		rq->__data_len += (*bio)->bi_iter.bi_size;
rq                 64 block/blk-map.c static int __blk_rq_map_user_iov(struct request *rq,
rq                 68 block/blk-map.c 	struct request_queue *q = rq->q;
rq                 81 block/blk-map.c 	bio->bi_opf |= req_op(rq);
rq                 89 block/blk-map.c 	ret = blk_rq_append_bio(rq, &bio);
rq                120 block/blk-map.c int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
rq                142 block/blk-map.c 		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
rq                146 block/blk-map.c 			bio = rq->bio;
rq                150 block/blk-map.c 		rq->rq_flags |= RQF_COPY_USER;
rq                156 block/blk-map.c 	rq->bio = NULL;
rq                161 block/blk-map.c int blk_rq_map_user(struct request_queue *q, struct request *rq,
rq                167 block/blk-map.c 	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
rq                172 block/blk-map.c 	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
rq                221 block/blk-map.c int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
rq                224 block/blk-map.c 	int reading = rq_data_dir(rq) == READ;
rq                245 block/blk-map.c 	bio->bi_opf |= req_op(rq);
rq                248 block/blk-map.c 		rq->rq_flags |= RQF_COPY_USER;
rq                251 block/blk-map.c 	ret = blk_rq_append_bio(rq, &bio);
rq                356 block/blk-merge.c unsigned int blk_recalc_rq_segments(struct request *rq)
rq                363 block/blk-merge.c 	if (!rq->bio)
rq                366 block/blk-merge.c 	switch (bio_op(rq->bio)) {
rq                375 block/blk-merge.c 	rq_for_each_bvec(bv, rq, iter)
rq                376 block/blk-merge.c 		bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
rq                502 block/blk-merge.c int blk_rq_map_sg(struct request_queue *q, struct request *rq,
rq                508 block/blk-merge.c 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
rq                509 block/blk-merge.c 		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg);
rq                510 block/blk-merge.c 	else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
rq                511 block/blk-merge.c 		nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg);
rq                512 block/blk-merge.c 	else if (rq->bio)
rq                513 block/blk-merge.c 		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
rq                515 block/blk-merge.c 	if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
rq                516 block/blk-merge.c 	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
rq                518 block/blk-merge.c 			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
rq                521 block/blk-merge.c 		rq->extra_len += pad_len;
rq                524 block/blk-merge.c 	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
rq                525 block/blk-merge.c 		if (op_is_write(req_op(rq)))
rq                535 block/blk-merge.c 		rq->extra_len += q->dma_drain_size;
rq                545 block/blk-merge.c 	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
rq                658 block/blk-merge.c void blk_rq_set_mixed_merge(struct request *rq)
rq                660 block/blk-merge.c 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
rq                663 block/blk-merge.c 	if (rq->rq_flags & RQF_MIXED_MERGE)
rq                671 block/blk-merge.c 	for (bio = rq->bio; bio; bio = bio->bi_next) {
rq                676 block/blk-merge.c 	rq->rq_flags |= RQF_MIXED_MERGE;
rq                814 block/blk-merge.c struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
rq                816 block/blk-merge.c 	struct request *next = elv_latter_request(q, rq);
rq                819 block/blk-merge.c 		return attempt_merge(q, rq, next);
rq                824 block/blk-merge.c struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
rq                826 block/blk-merge.c 	struct request *prev = elv_former_request(q, rq);
rq                829 block/blk-merge.c 		return attempt_merge(q, prev, rq);
rq                834 block/blk-merge.c int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
rq                839 block/blk-merge.c 	free = attempt_merge(q, rq, next);
rq                848 block/blk-merge.c bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
rq                850 block/blk-merge.c 	if (!rq_mergeable(rq) || !bio_mergeable(bio))
rq                853 block/blk-merge.c 	if (req_op(rq) != bio_op(bio))
rq                857 block/blk-merge.c 	if (bio_data_dir(bio) != rq_data_dir(rq))
rq                861 block/blk-merge.c 	if (rq->rq_disk != bio->bi_disk)
rq                865 block/blk-merge.c 	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
rq                869 block/blk-merge.c 	if (req_op(rq) == REQ_OP_WRITE_SAME &&
rq                870 block/blk-merge.c 	    !blk_write_same_mergeable(rq->bio, bio))
rq                877 block/blk-merge.c 	if (rq->write_hint != bio->bi_write_hint)
rq                880 block/blk-merge.c 	if (rq->ioprio != bio_prio(bio))
rq                886 block/blk-merge.c enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
rq                888 block/blk-merge.c 	if (blk_discard_mergable(rq))
rq                890 block/blk-merge.c 	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
rq                892 block/blk-merge.c 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
rq                324 block/blk-mq-debugfs.c int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
rq                326 block/blk-mq-debugfs.c 	const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
rq                327 block/blk-mq-debugfs.c 	const unsigned int op = req_op(rq);
rq                330 block/blk-mq-debugfs.c 	seq_printf(m, "%p {.op=", rq);
rq                336 block/blk-mq-debugfs.c 	blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
rq                339 block/blk-mq-debugfs.c 	blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
rq                341 block/blk-mq-debugfs.c 	seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
rq                342 block/blk-mq-debugfs.c 	seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
rq                343 block/blk-mq-debugfs.c 		   rq->internal_tag);
rq                345 block/blk-mq-debugfs.c 		mq_ops->show_rq(m, rq);
rq                398 block/blk-mq-debugfs.c static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
rq                402 block/blk-mq-debugfs.c 	if (rq->mq_hctx == params->hctx)
rq                404 block/blk-mq-debugfs.c 					 list_entry_rq(&rq->queuelist));
rq                 18 block/blk-mq-debugfs.h int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
rq                 35 block/blk-mq-sched.c void blk_mq_sched_assign_ioc(struct request *rq)
rq                 37 block/blk-mq-sched.c 	struct request_queue *q = rq->q;
rq                 58 block/blk-mq-sched.c 	rq->elv.icq = icq;
rq                 95 block/blk-mq-sched.c 		struct request *rq;
rq                103 block/blk-mq-sched.c 		rq = e->type->ops.dispatch_request(hctx);
rq                104 block/blk-mq-sched.c 		if (!rq) {
rq                114 block/blk-mq-sched.c 		list_add(&rq->queuelist, &rq_list);
rq                141 block/blk-mq-sched.c 		struct request *rq;
rq                149 block/blk-mq-sched.c 		rq = blk_mq_dequeue_from_ctx(hctx, ctx);
rq                150 block/blk-mq-sched.c 		if (!rq) {
rq                160 block/blk-mq-sched.c 		list_add(&rq->queuelist, &rq_list);
rq                163 block/blk-mq-sched.c 		ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
rq                229 block/blk-mq-sched.c 	struct request *rq;
rq                231 block/blk-mq-sched.c 	switch (elv_merge(q, &rq, bio)) {
rq                233 block/blk-mq-sched.c 		if (!blk_mq_sched_allow_merge(q, rq, bio))
rq                235 block/blk-mq-sched.c 		if (!bio_attempt_back_merge(rq, bio, nr_segs))
rq                237 block/blk-mq-sched.c 		*merged_request = attempt_back_merge(q, rq);
rq                239 block/blk-mq-sched.c 			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
rq                242 block/blk-mq-sched.c 		if (!blk_mq_sched_allow_merge(q, rq, bio))
rq                244 block/blk-mq-sched.c 		if (!bio_attempt_front_merge(rq, bio, nr_segs))
rq                246 block/blk-mq-sched.c 		*merged_request = attempt_front_merge(q, rq);
rq                248 block/blk-mq-sched.c 			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
rq                251 block/blk-mq-sched.c 		return bio_attempt_discard_merge(q, rq, bio);
rq                265 block/blk-mq-sched.c 	struct request *rq;
rq                268 block/blk-mq-sched.c 	list_for_each_entry_reverse(rq, list, queuelist) {
rq                274 block/blk-mq-sched.c 		if (!blk_rq_merge_ok(rq, bio))
rq                277 block/blk-mq-sched.c 		switch (blk_try_merge(rq, bio)) {
rq                279 block/blk-mq-sched.c 			if (blk_mq_sched_allow_merge(q, rq, bio))
rq                280 block/blk-mq-sched.c 				merged = bio_attempt_back_merge(rq, bio,
rq                284 block/blk-mq-sched.c 			if (blk_mq_sched_allow_merge(q, rq, bio))
rq                285 block/blk-mq-sched.c 				merged = bio_attempt_front_merge(rq, bio,
rq                289 block/blk-mq-sched.c 			merged = bio_attempt_discard_merge(q, rq, bio);
rq                348 block/blk-mq-sched.c bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
rq                350 block/blk-mq-sched.c 	return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
rq                354 block/blk-mq-sched.c void blk_mq_sched_request_inserted(struct request *rq)
rq                356 block/blk-mq-sched.c 	trace_block_rq_insert(rq->q, rq);
rq                362 block/blk-mq-sched.c 				       struct request *rq)
rq                375 block/blk-mq-sched.c 	if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
rq                379 block/blk-mq-sched.c 		rq->rq_flags |= RQF_SORTED;
rq                384 block/blk-mq-sched.c void blk_mq_sched_insert_request(struct request *rq, bool at_head,
rq                387 block/blk-mq-sched.c 	struct request_queue *q = rq->q;
rq                389 block/blk-mq-sched.c 	struct blk_mq_ctx *ctx = rq->mq_ctx;
rq                390 block/blk-mq-sched.c 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
rq                393 block/blk-mq-sched.c 	if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
rq                394 block/blk-mq-sched.c 		blk_insert_flush(rq);
rq                398 block/blk-mq-sched.c 	WARN_ON(e && (rq->tag != -1));
rq                400 block/blk-mq-sched.c 	if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
rq                422 block/blk-mq-sched.c 		at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
rq                423 block/blk-mq-sched.c 		blk_mq_request_bypass_insert(rq, at_head, false);
rq                430 block/blk-mq-sched.c 		list_add(&rq->queuelist, &list);
rq                434 block/blk-mq-sched.c 		__blk_mq_insert_request(hctx, rq, at_head);
rq                 11 block/blk-mq-sched.h void blk_mq_sched_assign_ioc(struct request *rq);
rq                 13 block/blk-mq-sched.h void blk_mq_sched_request_inserted(struct request *rq);
rq                 18 block/blk-mq-sched.h bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
rq                 22 block/blk-mq-sched.h void blk_mq_sched_insert_request(struct request *rq, bool at_head,
rq                 45 block/blk-mq-sched.h blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
rq                 51 block/blk-mq-sched.h 		return e->type->ops.allow_merge(q, rq, bio);
rq                 56 block/blk-mq-sched.h static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
rq                 58 block/blk-mq-sched.h 	struct elevator_queue *e = rq->q->elevator;
rq                 61 block/blk-mq-sched.h 		e->type->ops.completed_request(rq, now);
rq                 64 block/blk-mq-sched.h static inline void blk_mq_sched_requeue_request(struct request *rq)
rq                 66 block/blk-mq-sched.h 	struct request_queue *q = rq->q;
rq                 70 block/blk-mq-sched.h 		e->type->ops.requeue_request(rq);
rq                221 block/blk-mq-tag.c 	struct request *rq;
rq                225 block/blk-mq-tag.c 	rq = tags->rqs[bitnr];
rq                231 block/blk-mq-tag.c 	if (rq && rq->q == hctx->queue)
rq                232 block/blk-mq-tag.c 		return iter_data->fn(hctx, rq, iter_data->data, reserved);
rq                275 block/blk-mq-tag.c 	struct request *rq;
rq                284 block/blk-mq-tag.c 	rq = tags->rqs[bitnr];
rq                285 block/blk-mq-tag.c 	if (rq && blk_mq_request_started(rq))
rq                286 block/blk-mq-tag.c 		return iter_data->fn(rq, iter_data->data, reserved);
rq                358 block/blk-mq-tag.c static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
rq                363 block/blk-mq-tag.c 	if (blk_mq_request_completed(rq))
rq                556 block/blk-mq-tag.c u32 blk_mq_unique_tag(struct request *rq)
rq                558 block/blk-mq-tag.c 	return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
rq                559 block/blk-mq-tag.c 		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
rq                 79 block/blk-mq-tag.h 		unsigned int tag, struct request *rq)
rq                 81 block/blk-mq-tag.h 	hctx->tags->rqs[tag] = rq;
rq                 46 block/blk-mq.c static int blk_mq_poll_stats_bkt(const struct request *rq)
rq                 50 block/blk-mq.c 	ddir = rq_data_dir(rq);
rq                 51 block/blk-mq.c 	sectors = blk_rq_stats_sectors(rq);
rq                100 block/blk-mq.c 				  struct request *rq, void *priv,
rq                108 block/blk-mq.c 	if (rq->part == mi->part)
rq                126 block/blk-mq.c 				     struct request *rq, void *priv,
rq                131 block/blk-mq.c 	if (rq->part == mi->part)
rq                132 block/blk-mq.c 		mi->inflight[rq_data_dir(rq)]++;
rq                289 block/blk-mq.c static inline bool blk_mq_need_time_stamp(struct request *rq)
rq                291 block/blk-mq.c 	return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator;
rq                298 block/blk-mq.c 	struct request *rq = tags->static_rqs[tag];
rq                302 block/blk-mq.c 		rq->tag = -1;
rq                303 block/blk-mq.c 		rq->internal_tag = tag;
rq                309 block/blk-mq.c 		rq->tag = tag;
rq                310 block/blk-mq.c 		rq->internal_tag = -1;
rq                311 block/blk-mq.c 		data->hctx->tags->rqs[rq->tag] = rq;
rq                315 block/blk-mq.c 	rq->q = data->q;
rq                316 block/blk-mq.c 	rq->mq_ctx = data->ctx;
rq                317 block/blk-mq.c 	rq->mq_hctx = data->hctx;
rq                318 block/blk-mq.c 	rq->rq_flags = rq_flags;
rq                319 block/blk-mq.c 	rq->cmd_flags = op;
rq                321 block/blk-mq.c 		rq->rq_flags |= RQF_PREEMPT;
rq                323 block/blk-mq.c 		rq->rq_flags |= RQF_IO_STAT;
rq                324 block/blk-mq.c 	INIT_LIST_HEAD(&rq->queuelist);
rq                325 block/blk-mq.c 	INIT_HLIST_NODE(&rq->hash);
rq                326 block/blk-mq.c 	RB_CLEAR_NODE(&rq->rb_node);
rq                327 block/blk-mq.c 	rq->rq_disk = NULL;
rq                328 block/blk-mq.c 	rq->part = NULL;
rq                330 block/blk-mq.c 	rq->alloc_time_ns = alloc_time_ns;
rq                332 block/blk-mq.c 	if (blk_mq_need_time_stamp(rq))
rq                333 block/blk-mq.c 		rq->start_time_ns = ktime_get_ns();
rq                335 block/blk-mq.c 		rq->start_time_ns = 0;
rq                336 block/blk-mq.c 	rq->io_start_time_ns = 0;
rq                337 block/blk-mq.c 	rq->stats_sectors = 0;
rq                338 block/blk-mq.c 	rq->nr_phys_segments = 0;
rq                340 block/blk-mq.c 	rq->nr_integrity_segments = 0;
rq                343 block/blk-mq.c 	rq->extra_len = 0;
rq                344 block/blk-mq.c 	WRITE_ONCE(rq->deadline, 0);
rq                346 block/blk-mq.c 	rq->timeout = 0;
rq                348 block/blk-mq.c 	rq->end_io = NULL;
rq                349 block/blk-mq.c 	rq->end_io_data = NULL;
rq                352 block/blk-mq.c 	refcount_set(&rq->ref, 1);
rq                353 block/blk-mq.c 	return rq;
rq                361 block/blk-mq.c 	struct request *rq;
rq                407 block/blk-mq.c 	rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags, alloc_time_ns);
rq                409 block/blk-mq.c 		rq->elv.icq = NULL;
rq                412 block/blk-mq.c 				blk_mq_sched_assign_ioc(rq);
rq                414 block/blk-mq.c 			e->type->ops.prepare_request(rq, bio);
rq                415 block/blk-mq.c 			rq->rq_flags |= RQF_ELVPRIV;
rq                419 block/blk-mq.c 	return rq;
rq                426 block/blk-mq.c 	struct request *rq;
rq                433 block/blk-mq.c 	rq = blk_mq_get_request(q, NULL, &alloc_data);
rq                436 block/blk-mq.c 	if (!rq)
rq                439 block/blk-mq.c 	rq->__data_len = 0;
rq                440 block/blk-mq.c 	rq->__sector = (sector_t) -1;
rq                441 block/blk-mq.c 	rq->bio = rq->biotail = NULL;
rq                442 block/blk-mq.c 	return rq;
rq                450 block/blk-mq.c 	struct request *rq;
rq                482 block/blk-mq.c 	rq = blk_mq_get_request(q, NULL, &alloc_data);
rq                485 block/blk-mq.c 	if (!rq)
rq                488 block/blk-mq.c 	return rq;
rq                492 block/blk-mq.c static void __blk_mq_free_request(struct request *rq)
rq                494 block/blk-mq.c 	struct request_queue *q = rq->q;
rq                495 block/blk-mq.c 	struct blk_mq_ctx *ctx = rq->mq_ctx;
rq                496 block/blk-mq.c 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
rq                497 block/blk-mq.c 	const int sched_tag = rq->internal_tag;
rq                499 block/blk-mq.c 	blk_pm_mark_last_busy(rq);
rq                500 block/blk-mq.c 	rq->mq_hctx = NULL;
rq                501 block/blk-mq.c 	if (rq->tag != -1)
rq                502 block/blk-mq.c 		blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
rq                509 block/blk-mq.c void blk_mq_free_request(struct request *rq)
rq                511 block/blk-mq.c 	struct request_queue *q = rq->q;
rq                513 block/blk-mq.c 	struct blk_mq_ctx *ctx = rq->mq_ctx;
rq                514 block/blk-mq.c 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
rq                516 block/blk-mq.c 	if (rq->rq_flags & RQF_ELVPRIV) {
rq                518 block/blk-mq.c 			e->type->ops.finish_request(rq);
rq                519 block/blk-mq.c 		if (rq->elv.icq) {
rq                520 block/blk-mq.c 			put_io_context(rq->elv.icq->ioc);
rq                521 block/blk-mq.c 			rq->elv.icq = NULL;
rq                525 block/blk-mq.c 	ctx->rq_completed[rq_is_sync(rq)]++;
rq                526 block/blk-mq.c 	if (rq->rq_flags & RQF_MQ_INFLIGHT)
rq                529 block/blk-mq.c 	if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
rq                532 block/blk-mq.c 	rq_qos_done(q, rq);
rq                534 block/blk-mq.c 	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
rq                535 block/blk-mq.c 	if (refcount_dec_and_test(&rq->ref))
rq                536 block/blk-mq.c 		__blk_mq_free_request(rq);
rq                540 block/blk-mq.c inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
rq                544 block/blk-mq.c 	if (blk_mq_need_time_stamp(rq))
rq                547 block/blk-mq.c 	if (rq->rq_flags & RQF_STATS) {
rq                548 block/blk-mq.c 		blk_mq_poll_stats_start(rq->q);
rq                549 block/blk-mq.c 		blk_stat_add(rq, now);
rq                552 block/blk-mq.c 	if (rq->internal_tag != -1)
rq                553 block/blk-mq.c 		blk_mq_sched_completed_request(rq, now);
rq                555 block/blk-mq.c 	blk_account_io_done(rq, now);
rq                557 block/blk-mq.c 	if (rq->end_io) {
rq                558 block/blk-mq.c 		rq_qos_done(rq->q, rq);
rq                559 block/blk-mq.c 		rq->end_io(rq, error);
rq                561 block/blk-mq.c 		blk_mq_free_request(rq);
rq                566 block/blk-mq.c void blk_mq_end_request(struct request *rq, blk_status_t error)
rq                568 block/blk-mq.c 	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
rq                570 block/blk-mq.c 	__blk_mq_end_request(rq, error);
rq                576 block/blk-mq.c 	struct request *rq = data;
rq                577 block/blk-mq.c 	struct request_queue *q = rq->q;
rq                579 block/blk-mq.c 	q->mq_ops->complete(rq);
rq                582 block/blk-mq.c static void __blk_mq_complete_request(struct request *rq)
rq                584 block/blk-mq.c 	struct blk_mq_ctx *ctx = rq->mq_ctx;
rq                585 block/blk-mq.c 	struct request_queue *q = rq->q;
rq                589 block/blk-mq.c 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
rq                600 block/blk-mq.c 		__blk_complete_request(rq);
rq                608 block/blk-mq.c 	if ((rq->cmd_flags & REQ_HIPRI) ||
rq                610 block/blk-mq.c 		q->mq_ops->complete(rq);
rq                619 block/blk-mq.c 		rq->csd.func = __blk_mq_complete_request_remote;
rq                620 block/blk-mq.c 		rq->csd.info = rq;
rq                621 block/blk-mq.c 		rq->csd.flags = 0;
rq                622 block/blk-mq.c 		smp_call_function_single_async(ctx->cpu, &rq->csd);
rq                624 block/blk-mq.c 		q->mq_ops->complete(rq);
rq                657 block/blk-mq.c bool blk_mq_complete_request(struct request *rq)
rq                659 block/blk-mq.c 	if (unlikely(blk_should_fake_timeout(rq->q)))
rq                661 block/blk-mq.c 	__blk_mq_complete_request(rq);
rq                666 block/blk-mq.c int blk_mq_request_started(struct request *rq)
rq                668 block/blk-mq.c 	return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
rq                672 block/blk-mq.c int blk_mq_request_completed(struct request *rq)
rq                674 block/blk-mq.c 	return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
rq                678 block/blk-mq.c void blk_mq_start_request(struct request *rq)
rq                680 block/blk-mq.c 	struct request_queue *q = rq->q;
rq                682 block/blk-mq.c 	trace_block_rq_issue(q, rq);
rq                685 block/blk-mq.c 		rq->io_start_time_ns = ktime_get_ns();
rq                686 block/blk-mq.c 		rq->stats_sectors = blk_rq_sectors(rq);
rq                687 block/blk-mq.c 		rq->rq_flags |= RQF_STATS;
rq                688 block/blk-mq.c 		rq_qos_issue(q, rq);
rq                691 block/blk-mq.c 	WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
rq                693 block/blk-mq.c 	blk_add_timer(rq);
rq                694 block/blk-mq.c 	WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
rq                696 block/blk-mq.c 	if (q->dma_drain_size && blk_rq_bytes(rq)) {
rq                702 block/blk-mq.c 		rq->nr_phys_segments++;
rq                706 block/blk-mq.c 	if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
rq                707 block/blk-mq.c 		q->integrity.profile->prepare_fn(rq);
rq                712 block/blk-mq.c static void __blk_mq_requeue_request(struct request *rq)
rq                714 block/blk-mq.c 	struct request_queue *q = rq->q;
rq                716 block/blk-mq.c 	blk_mq_put_driver_tag(rq);
rq                718 block/blk-mq.c 	trace_block_rq_requeue(q, rq);
rq                719 block/blk-mq.c 	rq_qos_requeue(q, rq);
rq                721 block/blk-mq.c 	if (blk_mq_request_started(rq)) {
rq                722 block/blk-mq.c 		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
rq                723 block/blk-mq.c 		rq->rq_flags &= ~RQF_TIMED_OUT;
rq                724 block/blk-mq.c 		if (q->dma_drain_size && blk_rq_bytes(rq))
rq                725 block/blk-mq.c 			rq->nr_phys_segments--;
rq                729 block/blk-mq.c void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
rq                731 block/blk-mq.c 	__blk_mq_requeue_request(rq);
rq                734 block/blk-mq.c 	blk_mq_sched_requeue_request(rq);
rq                736 block/blk-mq.c 	BUG_ON(!list_empty(&rq->queuelist));
rq                737 block/blk-mq.c 	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
rq                746 block/blk-mq.c 	struct request *rq, *next;
rq                752 block/blk-mq.c 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
rq                753 block/blk-mq.c 		if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
rq                756 block/blk-mq.c 		rq->rq_flags &= ~RQF_SOFTBARRIER;
rq                757 block/blk-mq.c 		list_del_init(&rq->queuelist);
rq                763 block/blk-mq.c 		if (rq->rq_flags & RQF_DONTPREP)
rq                764 block/blk-mq.c 			blk_mq_request_bypass_insert(rq, false, false);
rq                766 block/blk-mq.c 			blk_mq_sched_insert_request(rq, true, false, false);
rq                770 block/blk-mq.c 		rq = list_entry(rq_list.next, struct request, queuelist);
rq                771 block/blk-mq.c 		list_del_init(&rq->queuelist);
rq                772 block/blk-mq.c 		blk_mq_sched_insert_request(rq, false, false, false);
rq                778 block/blk-mq.c void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
rq                781 block/blk-mq.c 	struct request_queue *q = rq->q;
rq                788 block/blk-mq.c 	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
rq                792 block/blk-mq.c 		rq->rq_flags |= RQF_SOFTBARRIER;
rq                793 block/blk-mq.c 		list_add(&rq->queuelist, &q->requeue_list);
rq                795 block/blk-mq.c 		list_add_tail(&rq->queuelist, &q->requeue_list);
rq                828 block/blk-mq.c static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
rq                835 block/blk-mq.c 	if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
rq                869 block/blk-mq.c static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
rq                873 block/blk-mq.c 	if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
rq                875 block/blk-mq.c 	if (rq->rq_flags & RQF_TIMED_OUT)
rq                878 block/blk-mq.c 	deadline = READ_ONCE(rq->deadline);
rq                890 block/blk-mq.c 		struct request *rq, void *priv, bool reserved)
rq                898 block/blk-mq.c 	if (!blk_mq_req_expired(rq, next))
rq                910 block/blk-mq.c 	if (!refcount_inc_not_zero(&rq->ref))
rq                919 block/blk-mq.c 	if (blk_mq_req_expired(rq, next))
rq                920 block/blk-mq.c 		blk_mq_rq_timed_out(rq, reserved);
rq                922 block/blk-mq.c 	if (is_flush_rq(rq, hctx))
rq                923 block/blk-mq.c 		rq->end_io(rq, 0);
rq                924 block/blk-mq.c 	else if (refcount_dec_and_test(&rq->ref))
rq                925 block/blk-mq.c 		__blk_mq_free_request(rq);
rq               1010 block/blk-mq.c 	struct request *rq;
rq               1023 block/blk-mq.c 		dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
rq               1024 block/blk-mq.c 		list_del_init(&dispatch_data->rq->queuelist);
rq               1030 block/blk-mq.c 	return !dispatch_data->rq;
rq               1039 block/blk-mq.c 		.rq   = NULL,
rq               1045 block/blk-mq.c 	return data.rq;
rq               1056 block/blk-mq.c bool blk_mq_get_driver_tag(struct request *rq)
rq               1059 block/blk-mq.c 		.q = rq->q,
rq               1060 block/blk-mq.c 		.hctx = rq->mq_hctx,
rq               1062 block/blk-mq.c 		.cmd_flags = rq->cmd_flags,
rq               1066 block/blk-mq.c 	if (rq->tag != -1)
rq               1069 block/blk-mq.c 	if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
rq               1073 block/blk-mq.c 	rq->tag = blk_mq_get_tag(&data);
rq               1074 block/blk-mq.c 	if (rq->tag >= 0) {
rq               1076 block/blk-mq.c 			rq->rq_flags |= RQF_MQ_INFLIGHT;
rq               1079 block/blk-mq.c 		data.hctx->tags->rqs[rq->tag] = rq;
rq               1083 block/blk-mq.c 	return rq->tag != -1;
rq               1114 block/blk-mq.c 				 struct request *rq)
rq               1132 block/blk-mq.c 		return blk_mq_get_driver_tag(rq);
rq               1158 block/blk-mq.c 	ret = blk_mq_get_driver_tag(rq);
rq               1215 block/blk-mq.c 	struct request *rq, *nxt;
rq               1232 block/blk-mq.c 		rq = list_first_entry(list, struct request, queuelist);
rq               1234 block/blk-mq.c 		hctx = rq->mq_hctx;
rq               1236 block/blk-mq.c 			blk_mq_put_driver_tag(rq);
rq               1240 block/blk-mq.c 		if (!blk_mq_get_driver_tag(rq)) {
rq               1248 block/blk-mq.c 			if (!blk_mq_mark_tag_wait(hctx, rq)) {
rq               1260 block/blk-mq.c 		list_del_init(&rq->queuelist);
rq               1262 block/blk-mq.c 		bd.rq = rq;
rq               1286 block/blk-mq.c 			list_add(&rq->queuelist, list);
rq               1287 block/blk-mq.c 			__blk_mq_requeue_request(rq);
rq               1293 block/blk-mq.c 			blk_mq_end_request(rq, BLK_STS_IOERR);
rq               1642 block/blk-mq.c 					    struct request *rq,
rq               1645 block/blk-mq.c 	struct blk_mq_ctx *ctx = rq->mq_ctx;
rq               1650 block/blk-mq.c 	trace_block_rq_insert(hctx->queue, rq);
rq               1653 block/blk-mq.c 		list_add(&rq->queuelist, &ctx->rq_lists[type]);
rq               1655 block/blk-mq.c 		list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
rq               1658 block/blk-mq.c void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
rq               1661 block/blk-mq.c 	struct blk_mq_ctx *ctx = rq->mq_ctx;
rq               1665 block/blk-mq.c 	__blk_mq_insert_req_list(hctx, rq, at_head);
rq               1673 block/blk-mq.c void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
rq               1676 block/blk-mq.c 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
rq               1680 block/blk-mq.c 		list_add(&rq->queuelist, &hctx->dispatch);
rq               1682 block/blk-mq.c 		list_add_tail(&rq->queuelist, &hctx->dispatch);
rq               1693 block/blk-mq.c 	struct request *rq;
rq               1700 block/blk-mq.c 	list_for_each_entry(rq, list, queuelist) {
rq               1701 block/blk-mq.c 		BUG_ON(rq->mq_ctx != ctx);
rq               1702 block/blk-mq.c 		trace_block_rq_insert(hctx->queue, rq);
rq               1733 block/blk-mq.c 	struct request *rq;
rq               1751 block/blk-mq.c 		rq = list_entry_rq(list.next);
rq               1752 block/blk-mq.c 		list_del_init(&rq->queuelist);
rq               1753 block/blk-mq.c 		BUG_ON(!rq->q);
rq               1754 block/blk-mq.c 		if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) {
rq               1762 block/blk-mq.c 			this_q = rq->q;
rq               1763 block/blk-mq.c 			this_ctx = rq->mq_ctx;
rq               1764 block/blk-mq.c 			this_hctx = rq->mq_hctx;
rq               1769 block/blk-mq.c 		list_add_tail(&rq->queuelist, &rq_list);
rq               1783 block/blk-mq.c static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
rq               1787 block/blk-mq.c 		rq->cmd_flags |= REQ_FAILFAST_MASK;
rq               1789 block/blk-mq.c 	rq->__sector = bio->bi_iter.bi_sector;
rq               1790 block/blk-mq.c 	rq->write_hint = bio->bi_write_hint;
rq               1791 block/blk-mq.c 	blk_rq_bio_prep(rq, bio, nr_segs);
rq               1793 block/blk-mq.c 	blk_account_io_start(rq, true);
rq               1797 block/blk-mq.c 					    struct request *rq,
rq               1800 block/blk-mq.c 	struct request_queue *q = rq->q;
rq               1802 block/blk-mq.c 		.rq = rq,
rq               1808 block/blk-mq.c 	new_cookie = request_to_qc_t(hctx, rq);
rq               1824 block/blk-mq.c 		__blk_mq_requeue_request(rq);
rq               1836 block/blk-mq.c 						struct request *rq,
rq               1840 block/blk-mq.c 	struct request_queue *q = rq->q;
rq               1862 block/blk-mq.c 	if (!blk_mq_get_driver_tag(rq)) {
rq               1867 block/blk-mq.c 	return __blk_mq_issue_directly(hctx, rq, cookie, last);
rq               1872 block/blk-mq.c 	blk_mq_request_bypass_insert(rq, false, run_queue);
rq               1877 block/blk-mq.c 		struct request *rq, blk_qc_t *cookie)
rq               1886 block/blk-mq.c 	ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
rq               1888 block/blk-mq.c 		blk_mq_request_bypass_insert(rq, false, true);
rq               1890 block/blk-mq.c 		blk_mq_end_request(rq, ret);
rq               1895 block/blk-mq.c blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
rq               1900 block/blk-mq.c 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
rq               1903 block/blk-mq.c 	ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
rq               1914 block/blk-mq.c 		struct request *rq = list_first_entry(list, struct request,
rq               1917 block/blk-mq.c 		list_del_init(&rq->queuelist);
rq               1918 block/blk-mq.c 		ret = blk_mq_request_issue_directly(rq, list_empty(list));
rq               1922 block/blk-mq.c 				blk_mq_request_bypass_insert(rq, false,
rq               1926 block/blk-mq.c 			blk_mq_end_request(rq, ret);
rq               1939 block/blk-mq.c static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
rq               1941 block/blk-mq.c 	list_add_tail(&rq->queuelist, &plug->mq_list);
rq               1948 block/blk-mq.c 		if (tmp->q != rq->q)
rq               1958 block/blk-mq.c 	struct request *rq;
rq               1980 block/blk-mq.c 	rq = blk_mq_get_request(q, bio, &data);
rq               1981 block/blk-mq.c 	if (unlikely(!rq)) {
rq               1990 block/blk-mq.c 	rq_qos_track(q, rq, bio);
rq               1992 block/blk-mq.c 	cookie = request_to_qc_t(data.hctx, rq);
rq               1994 block/blk-mq.c 	blk_mq_bio_to_request(rq, bio, nr_segs);
rq               1999 block/blk-mq.c 		blk_insert_flush(rq);
rq               2024 block/blk-mq.c 		blk_add_rq_to_plug(plug, rq);
rq               2026 block/blk-mq.c 		blk_mq_sched_insert_request(rq, false, true, true);
rq               2041 block/blk-mq.c 		blk_add_rq_to_plug(plug, rq);
rq               2052 block/blk-mq.c 		blk_mq_try_issue_directly(data.hctx, rq, &cookie);
rq               2054 block/blk-mq.c 		blk_mq_sched_insert_request(rq, false, true, true);
rq               2069 block/blk-mq.c 			struct request *rq = tags->static_rqs[i];
rq               2071 block/blk-mq.c 			if (!rq)
rq               2073 block/blk-mq.c 			set->ops->exit_request(set, rq, hctx_idx);
rq               2142 block/blk-mq.c static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
rq               2148 block/blk-mq.c 		ret = set->ops->init_request(set, rq, hctx_idx, node);
rq               2153 block/blk-mq.c 	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
rq               2215 block/blk-mq.c 			struct request *rq = p;
rq               2217 block/blk-mq.c 			tags->static_rqs[i] = rq;
rq               2218 block/blk-mq.c 			if (blk_mq_init_request(set, rq, hctx_idx, node)) {
rq               3378 block/blk-mq.c 				       struct request *rq)
rq               3399 block/blk-mq.c 	bucket = blk_mq_poll_stats_bkt(rq);
rq               3411 block/blk-mq.c 				     struct request *rq)
rq               3418 block/blk-mq.c 	if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
rq               3430 block/blk-mq.c 		nsecs = blk_mq_poll_nsecs(q, hctx, rq);
rq               3435 block/blk-mq.c 	rq->rq_flags |= RQF_MQ_POLL_SLEPT;
rq               3448 block/blk-mq.c 		if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
rq               3466 block/blk-mq.c 	struct request *rq;
rq               3472 block/blk-mq.c 		rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
rq               3474 block/blk-mq.c 		rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
rq               3481 block/blk-mq.c 		if (!rq)
rq               3485 block/blk-mq.c 	return blk_mq_poll_hybrid_sleep(q, hctx, rq);
rq               3554 block/blk-mq.c unsigned int blk_mq_rq_cpu(struct request *rq)
rq               3556 block/blk-mq.c 	return rq->mq_ctx->cpu;
rq                 44 block/blk-mq.h void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
rq                 47 block/blk-mq.h bool blk_mq_get_driver_tag(struct request *rq);
rq                 67 block/blk-mq.h void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
rq                 69 block/blk-mq.h void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
rq                 75 block/blk-mq.h blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
rq                136 block/blk-mq.h static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
rq                138 block/blk-mq.h 	return READ_ONCE(rq->state);
rq                210 block/blk-mq.h 					   struct request *rq)
rq                212 block/blk-mq.h 	blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
rq                213 block/blk-mq.h 	rq->tag = -1;
rq                215 block/blk-mq.h 	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
rq                216 block/blk-mq.h 		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
rq                221 block/blk-mq.h static inline void blk_mq_put_driver_tag(struct request *rq)
rq                223 block/blk-mq.h 	if (rq->tag == -1 || rq->internal_tag == -1)
rq                226 block/blk-mq.h 	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
rq                 16 block/blk-pm.h static inline void blk_pm_mark_last_busy(struct request *rq)
rq                 18 block/blk-pm.h 	if (rq->q->dev && !(rq->rq_flags & RQF_PM))
rq                 19 block/blk-pm.h 		pm_runtime_mark_last_busy(rq->q->dev);
rq                 22 block/blk-pm.h static inline void blk_pm_requeue_request(struct request *rq)
rq                 24 block/blk-pm.h 	lockdep_assert_held(&rq->q->queue_lock);
rq                 26 block/blk-pm.h 	if (rq->q->dev && !(rq->rq_flags & RQF_PM))
rq                 27 block/blk-pm.h 		rq->q->nr_pending--;
rq                 31 block/blk-pm.h 				      struct request *rq)
rq                 35 block/blk-pm.h 	if (q->dev && !(rq->rq_flags & RQF_PM))
rq                 39 block/blk-pm.h static inline void blk_pm_put_request(struct request *rq)
rq                 41 block/blk-pm.h 	lockdep_assert_held(&rq->q->queue_lock);
rq                 43 block/blk-pm.h 	if (rq->q->dev && !(rq->rq_flags & RQF_PM))
rq                 44 block/blk-pm.h 		--rq->q->nr_pending;
rq                 51 block/blk-pm.h static inline void blk_pm_mark_last_busy(struct request *rq)
rq                 55 block/blk-pm.h static inline void blk_pm_requeue_request(struct request *rq)
rq                 60 block/blk-pm.h 				      struct request *rq)
rq                 64 block/blk-pm.h static inline void blk_pm_put_request(struct request *rq)
rq                 41 block/blk-rq-qos.c void __rq_qos_done(struct rq_qos *rqos, struct request *rq)
rq                 45 block/blk-rq-qos.c 			rqos->ops->done(rqos, rq);
rq                 50 block/blk-rq-qos.c void __rq_qos_issue(struct rq_qos *rqos, struct request *rq)
rq                 54 block/blk-rq-qos.c 			rqos->ops->issue(rqos, rq);
rq                 59 block/blk-rq-qos.c void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq)
rq                 63 block/blk-rq-qos.c 			rqos->ops->requeue(rqos, rq);
rq                 77 block/blk-rq-qos.c void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
rq                 81 block/blk-rq-qos.c 			rqos->ops->track(rqos, rq, bio);
rq                 86 block/blk-rq-qos.c void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio)
rq                 90 block/blk-rq-qos.c 			rqos->ops->merge(rqos, rq, bio);
rq                135 block/blk-rq-qos.h void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
rq                136 block/blk-rq-qos.h void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
rq                137 block/blk-rq-qos.h void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
rq                139 block/blk-rq-qos.h void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
rq                140 block/blk-rq-qos.h void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
rq                150 block/blk-rq-qos.h static inline void rq_qos_done(struct request_queue *q, struct request *rq)
rq                153 block/blk-rq-qos.h 		__rq_qos_done(q->rq_qos, rq);
rq                156 block/blk-rq-qos.h static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
rq                159 block/blk-rq-qos.h 		__rq_qos_issue(q->rq_qos, rq);
rq                162 block/blk-rq-qos.h static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
rq                165 block/blk-rq-qos.h 		__rq_qos_requeue(q->rq_qos, rq);
rq                185 block/blk-rq-qos.h static inline void rq_qos_track(struct request_queue *q, struct request *rq,
rq                189 block/blk-rq-qos.h 		__rq_qos_track(q->rq_qos, rq, bio);
rq                192 block/blk-rq-qos.h static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
rq                196 block/blk-rq-qos.h 		__rq_qos_merge(q->rq_qos, rq, bio);
rq                 33 block/blk-softirq.c 		struct request *rq;
rq                 35 block/blk-softirq.c 		rq = list_entry(local_list.next, struct request, ipi_list);
rq                 36 block/blk-softirq.c 		list_del_init(&rq->ipi_list);
rq                 37 block/blk-softirq.c 		rq->q->mq_ops->complete(rq);
rq                 44 block/blk-softirq.c 	struct request *rq = data;
rq                 50 block/blk-softirq.c 	list_add_tail(&rq->ipi_list, list);
rq                 52 block/blk-softirq.c 	if (list->next == &rq->ipi_list)
rq                 61 block/blk-softirq.c static int raise_blk_irq(int cpu, struct request *rq)
rq                 64 block/blk-softirq.c 		call_single_data_t *data = &rq->csd;
rq                 67 block/blk-softirq.c 		data->info = rq;
rq                 77 block/blk-softirq.c static int raise_blk_irq(int cpu, struct request *rq)
rq                 51 block/blk-stat.c void blk_stat_add(struct request *rq, u64 now)
rq                 53 block/blk-stat.c 	struct request_queue *q = rq->q;
rq                 59 block/blk-stat.c 	value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
rq                 61 block/blk-stat.c 	blk_throtl_stat_add(rq, value);
rq                 68 block/blk-stat.c 		bucket = cb->bucket_fn(rq);
rq                 68 block/blk-stat.h void blk_stat_add(struct request *rq, u64 now);
rq               2246 block/blk-throttle.c void blk_throtl_stat_add(struct request *rq, u64 time_ns)
rq               2248 block/blk-throttle.c 	struct request_queue *q = rq->q;
rq               2251 block/blk-throttle.c 	throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
rq                 34 block/blk-wbt.c static inline void wbt_clear_state(struct request *rq)
rq                 36 block/blk-wbt.c 	rq->wbt_flags = 0;
rq                 39 block/blk-wbt.c static inline enum wbt_flags wbt_flags(struct request *rq)
rq                 41 block/blk-wbt.c 	return rq->wbt_flags;
rq                 44 block/blk-wbt.c static inline bool wbt_is_tracked(struct request *rq)
rq                 46 block/blk-wbt.c 	return rq->wbt_flags & WBT_TRACKED;
rq                 49 block/blk-wbt.c static inline bool wbt_is_read(struct request *rq)
rq                 51 block/blk-wbt.c 	return rq->wbt_flags & WBT_READ;
rq                185 block/blk-wbt.c static void wbt_done(struct rq_qos *rqos, struct request *rq)
rq                189 block/blk-wbt.c 	if (!wbt_is_tracked(rq)) {
rq                190 block/blk-wbt.c 		if (rwb->sync_cookie == rq) {
rq                195 block/blk-wbt.c 		if (wbt_is_read(rq))
rq                198 block/blk-wbt.c 		WARN_ON_ONCE(rq == rwb->sync_cookie);
rq                199 block/blk-wbt.c 		__wbt_done(rqos, wbt_flags(rq));
rq                201 block/blk-wbt.c 	wbt_clear_state(rq);
rq                597 block/blk-wbt.c static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
rq                600 block/blk-wbt.c 	rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
rq                603 block/blk-wbt.c static void wbt_issue(struct rq_qos *rqos, struct request *rq)
rq                617 block/blk-wbt.c 	if (wbt_is_read(rq) && !rwb->sync_issue) {
rq                618 block/blk-wbt.c 		rwb->sync_cookie = rq;
rq                619 block/blk-wbt.c 		rwb->sync_issue = rq->io_start_time_ns;
rq                623 block/blk-wbt.c static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
rq                628 block/blk-wbt.c 	if (rq == rwb->sync_cookie) {
rq                672 block/blk-wbt.c static int wbt_data_dir(const struct request *rq)
rq                674 block/blk-wbt.c 	const int op = req_op(rq);
rq                104 block/blk-wbt.h static inline void wbt_track(struct request *rq, enum wbt_flags flags)
rq                 34 block/blk-zoned.c bool blk_req_needs_zone_write_lock(struct request *rq)
rq                 36 block/blk-zoned.c 	if (!rq->q->seq_zones_wlock)
rq                 39 block/blk-zoned.c 	if (blk_rq_is_passthrough(rq))
rq                 42 block/blk-zoned.c 	switch (req_op(rq)) {
rq                 46 block/blk-zoned.c 		return blk_rq_zone_is_seq(rq);
rq                 53 block/blk-zoned.c void __blk_req_zone_write_lock(struct request *rq)
rq                 55 block/blk-zoned.c 	if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
rq                 56 block/blk-zoned.c 					  rq->q->seq_zones_wlock)))
rq                 59 block/blk-zoned.c 	WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
rq                 60 block/blk-zoned.c 	rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
rq                 64 block/blk-zoned.c void __blk_req_zone_write_unlock(struct request *rq)
rq                 66 block/blk-zoned.c 	rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
rq                 67 block/blk-zoned.c 	if (rq->q->seq_zones_wlock)
rq                 68 block/blk-zoned.c 		WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
rq                 69 block/blk-zoned.c 						 rq->q->seq_zones_wlock));
rq                110 block/blk.h    static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
rq                113 block/blk.h    	rq->nr_phys_segments = nr_segs;
rq                114 block/blk.h    	rq->__data_len = bio->bi_iter.bi_size;
rq                115 block/blk.h    	rq->bio = rq->biotail = bio;
rq                116 block/blk.h    	rq->ioprio = bio_prio(bio);
rq                119 block/blk.h    		rq->rq_disk = bio->bi_disk;
rq                195 block/blk.h    #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
rq                197 block/blk.h    void blk_insert_flush(struct request *rq);
rq                235 block/blk.h    struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
rq                236 block/blk.h    struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
rq                237 block/blk.h    int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
rq                239 block/blk.h    unsigned int blk_recalc_rq_segments(struct request *rq);
rq                240 block/blk.h    void blk_rq_set_mixed_merge(struct request *rq);
rq                241 block/blk.h    bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
rq                242 block/blk.h    enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
rq                253 block/blk.h    static inline bool blk_do_io_stat(struct request *rq)
rq                255 block/blk.h    	return rq->rq_disk &&
rq                256 block/blk.h    	       (rq->rq_flags & RQF_IO_STAT) &&
rq                257 block/blk.h    		!blk_rq_is_passthrough(rq);
rq                327 block/blk.h    extern void blk_throtl_stat_add(struct request *rq, u64 time);
rq                330 block/blk.h    static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
rq                 36 block/bsg-lib.c static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
rq                 39 block/bsg-lib.c 	struct bsg_job *job = blk_mq_rq_to_pdu(rq);
rq                 48 block/bsg-lib.c 		job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0);
rq                 54 block/bsg-lib.c 		ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
rq                 76 block/bsg-lib.c static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
rq                 78 block/bsg-lib.c 	struct bsg_job *job = blk_mq_rq_to_pdu(rq);
rq                125 block/bsg-lib.c static void bsg_transport_free_rq(struct request *rq)
rq                127 block/bsg-lib.c 	struct bsg_job *job = blk_mq_rq_to_pdu(rq);
rq                151 block/bsg-lib.c 	struct request *rq = blk_mq_rq_from_pdu(job);
rq                158 block/bsg-lib.c 	blk_mq_end_request(rq, BLK_STS_OK);
rq                194 block/bsg-lib.c static void bsg_complete(struct request *rq)
rq                196 block/bsg-lib.c 	struct bsg_job *job = blk_mq_rq_to_pdu(rq);
rq                266 block/bsg-lib.c 	struct request *req = bd->rq;
rq                335 block/bsg-lib.c static enum blk_eh_timer_return bsg_timeout(struct request *rq, bool reserved)
rq                338 block/bsg-lib.c 		container_of(rq->q->tag_set, struct bsg_set, tag_set);
rq                342 block/bsg-lib.c 	return bset->timeout_fn(rq);
rq                 65 block/bsg.c    static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
rq                 68 block/bsg.c    	struct scsi_request *sreq = scsi_req(rq);
rq                 89 block/bsg.c    static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
rq                 91 block/bsg.c    	struct scsi_request *sreq = scsi_req(rq);
rq                115 block/bsg.c    	if (rq_data_dir(rq) == READ)
rq                123 block/bsg.c    static void bsg_scsi_free_rq(struct request *rq)
rq                125 block/bsg.c    	scsi_req_free_cmd(scsi_req(rq));
rq                137 block/bsg.c    	struct request *rq;
rq                154 block/bsg.c    	rq = blk_get_request(q, hdr.dout_xfer_len ?
rq                156 block/bsg.c    	if (IS_ERR(rq))
rq                157 block/bsg.c    		return PTR_ERR(rq);
rq                159 block/bsg.c    	ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
rq                163 block/bsg.c    	rq->timeout = msecs_to_jiffies(hdr.timeout);
rq                164 block/bsg.c    	if (!rq->timeout)
rq                165 block/bsg.c    		rq->timeout = q->sg_timeout;
rq                166 block/bsg.c    	if (!rq->timeout)
rq                167 block/bsg.c    		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
rq                168 block/bsg.c    	if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq                169 block/bsg.c    		rq->timeout = BLK_MIN_SG_TIMEOUT;
rq                172 block/bsg.c    		ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp),
rq                175 block/bsg.c    		ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp),
rq                182 block/bsg.c    	bio = rq->bio;
rq                184 block/bsg.c    	blk_execute_rq(q, NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL));
rq                185 block/bsg.c    	ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr);
rq                189 block/bsg.c    	rq->q->bsg_dev.ops->free_rq(rq);
rq                190 block/bsg.c    	blk_put_request(rq);
rq                235 block/bsg.c    					 struct request_queue *rq,
rq                243 block/bsg.c    	if (!blk_get_queue(rq))
rq                248 block/bsg.c    		blk_put_queue(rq);
rq                252 block/bsg.c    	bd->queue = rq;
rq                257 block/bsg.c    	strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
rq                 54 block/elevator.c #define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
rq                 60 block/elevator.c static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
rq                 62 block/elevator.c 	struct request_queue *q = rq->q;
rq                 66 block/elevator.c 		return e->type->ops.allow_merge(q, rq, bio);
rq                 74 block/elevator.c bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
rq                 76 block/elevator.c 	if (!blk_rq_merge_ok(rq, bio))
rq                 79 block/elevator.c 	if (!elv_iosched_allow_bio_merge(rq, bio))
rq                201 block/elevator.c static inline void __elv_rqhash_del(struct request *rq)
rq                203 block/elevator.c 	hash_del(&rq->hash);
rq                204 block/elevator.c 	rq->rq_flags &= ~RQF_HASHED;
rq                207 block/elevator.c void elv_rqhash_del(struct request_queue *q, struct request *rq)
rq                209 block/elevator.c 	if (ELV_ON_HASH(rq))
rq                210 block/elevator.c 		__elv_rqhash_del(rq);
rq                214 block/elevator.c void elv_rqhash_add(struct request_queue *q, struct request *rq)
rq                218 block/elevator.c 	BUG_ON(ELV_ON_HASH(rq));
rq                219 block/elevator.c 	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
rq                220 block/elevator.c 	rq->rq_flags |= RQF_HASHED;
rq                224 block/elevator.c void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
rq                226 block/elevator.c 	__elv_rqhash_del(rq);
rq                227 block/elevator.c 	elv_rqhash_add(q, rq);
rq                234 block/elevator.c 	struct request *rq;
rq                236 block/elevator.c 	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
rq                237 block/elevator.c 		BUG_ON(!ELV_ON_HASH(rq));
rq                239 block/elevator.c 		if (unlikely(!rq_mergeable(rq))) {
rq                240 block/elevator.c 			__elv_rqhash_del(rq);
rq                244 block/elevator.c 		if (rq_hash_key(rq) == offset)
rq                245 block/elevator.c 			return rq;
rq                255 block/elevator.c void elv_rb_add(struct rb_root *root, struct request *rq)
rq                265 block/elevator.c 		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
rq                267 block/elevator.c 		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
rq                271 block/elevator.c 	rb_link_node(&rq->rb_node, parent, p);
rq                272 block/elevator.c 	rb_insert_color(&rq->rb_node, root);
rq                276 block/elevator.c void elv_rb_del(struct rb_root *root, struct request *rq)
rq                278 block/elevator.c 	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
rq                279 block/elevator.c 	rb_erase(&rq->rb_node, root);
rq                280 block/elevator.c 	RB_CLEAR_NODE(&rq->rb_node);
rq                287 block/elevator.c 	struct request *rq;
rq                290 block/elevator.c 		rq = rb_entry(n, struct request, rb_node);
rq                292 block/elevator.c 		if (sector < blk_rq_pos(rq))
rq                294 block/elevator.c 		else if (sector > blk_rq_pos(rq))
rq                297 block/elevator.c 			return rq;
rq                356 block/elevator.c bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
rq                367 block/elevator.c 	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
rq                378 block/elevator.c 		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
rq                379 block/elevator.c 		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
rq                384 block/elevator.c 		rq = __rq;
rq                390 block/elevator.c void elv_merged_request(struct request_queue *q, struct request *rq,
rq                396 block/elevator.c 		e->type->ops.request_merged(q, rq, type);
rq                399 block/elevator.c 		elv_rqhash_reposition(q, rq);
rq                401 block/elevator.c 	q->last_merge = rq;
rq                404 block/elevator.c void elv_merge_requests(struct request_queue *q, struct request *rq,
rq                410 block/elevator.c 		e->type->ops.requests_merged(q, rq, next);
rq                412 block/elevator.c 	elv_rqhash_reposition(q, rq);
rq                413 block/elevator.c 	q->last_merge = rq;
rq                416 block/elevator.c struct request *elv_latter_request(struct request_queue *q, struct request *rq)
rq                421 block/elevator.c 		return e->type->ops.next_request(q, rq);
rq                426 block/elevator.c struct request *elv_former_request(struct request_queue *q, struct request *rq)
rq                431 block/elevator.c 		return e->type->ops.former_request(q, rq);
rq                813 block/elevator.c 				      struct request *rq)
rq                815 block/elevator.c 	struct rb_node *rbprev = rb_prev(&rq->rb_node);
rq                825 block/elevator.c 				      struct request *rq)
rq                827 block/elevator.c 	struct rb_node *rbnext = rb_next(&rq->rb_node);
rq                528 block/kyber-iosched.c static int rq_get_domain_token(struct request *rq)
rq                530 block/kyber-iosched.c 	return (long)rq->elv.priv[0];
rq                533 block/kyber-iosched.c static void rq_set_domain_token(struct request *rq, int token)
rq                535 block/kyber-iosched.c 	rq->elv.priv[0] = (void *)(long)token;
rq                539 block/kyber-iosched.c 				  struct request *rq)
rq                544 block/kyber-iosched.c 	nr = rq_get_domain_token(rq);
rq                546 block/kyber-iosched.c 		sched_domain = kyber_sched_domain(rq->cmd_flags);
rq                548 block/kyber-iosched.c 				    rq->mq_ctx->cpu);
rq                582 block/kyber-iosched.c static void kyber_prepare_request(struct request *rq, struct bio *bio)
rq                584 block/kyber-iosched.c 	rq_set_domain_token(rq, -1);
rq                591 block/kyber-iosched.c 	struct request *rq, *next;
rq                593 block/kyber-iosched.c 	list_for_each_entry_safe(rq, next, rq_list, queuelist) {
rq                594 block/kyber-iosched.c 		unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
rq                595 block/kyber-iosched.c 		struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
rq                600 block/kyber-iosched.c 			list_move(&rq->queuelist, head);
rq                602 block/kyber-iosched.c 			list_move_tail(&rq->queuelist, head);
rq                604 block/kyber-iosched.c 				rq->mq_ctx->index_hw[hctx->type]);
rq                605 block/kyber-iosched.c 		blk_mq_sched_request_inserted(rq);
rq                610 block/kyber-iosched.c static void kyber_finish_request(struct request *rq)
rq                612 block/kyber-iosched.c 	struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
rq                614 block/kyber-iosched.c 	rq_clear_domain_token(kqd, rq);
rq                635 block/kyber-iosched.c static void kyber_completed_request(struct request *rq, u64 now)
rq                637 block/kyber-iosched.c 	struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
rq                642 block/kyber-iosched.c 	sched_domain = kyber_sched_domain(rq->cmd_flags);
rq                649 block/kyber-iosched.c 			   target, now - rq->start_time_ns);
rq                651 block/kyber-iosched.c 			   now - rq->io_start_time_ns);
rq                755 block/kyber-iosched.c 	struct request *rq;
rq                768 block/kyber-iosched.c 	rq = list_first_entry_or_null(rqs, struct request, queuelist);
rq                769 block/kyber-iosched.c 	if (rq) {
rq                773 block/kyber-iosched.c 			rq_set_domain_token(rq, nr);
rq                774 block/kyber-iosched.c 			list_del_init(&rq->queuelist);
rq                775 block/kyber-iosched.c 			return rq;
rq                784 block/kyber-iosched.c 			rq = list_first_entry(rqs, struct request, queuelist);
rq                786 block/kyber-iosched.c 			rq_set_domain_token(rq, nr);
rq                787 block/kyber-iosched.c 			list_del_init(&rq->queuelist);
rq                788 block/kyber-iosched.c 			return rq;
rq                803 block/kyber-iosched.c 	struct request *rq;
rq                813 block/kyber-iosched.c 		rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
rq                814 block/kyber-iosched.c 		if (rq)
rq                834 block/kyber-iosched.c 		rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
rq                835 block/kyber-iosched.c 		if (rq)
rq                839 block/kyber-iosched.c 	rq = NULL;
rq                842 block/kyber-iosched.c 	return rq;
rq                 68 block/mq-deadline.c deadline_rb_root(struct deadline_data *dd, struct request *rq)
rq                 70 block/mq-deadline.c 	return &dd->sort_list[rq_data_dir(rq)];
rq                 77 block/mq-deadline.c deadline_latter_request(struct request *rq)
rq                 79 block/mq-deadline.c 	struct rb_node *node = rb_next(&rq->rb_node);
rq                 88 block/mq-deadline.c deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
rq                 90 block/mq-deadline.c 	struct rb_root *root = deadline_rb_root(dd, rq);
rq                 92 block/mq-deadline.c 	elv_rb_add(root, rq);
rq                 96 block/mq-deadline.c deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
rq                 98 block/mq-deadline.c 	const int data_dir = rq_data_dir(rq);
rq                100 block/mq-deadline.c 	if (dd->next_rq[data_dir] == rq)
rq                101 block/mq-deadline.c 		dd->next_rq[data_dir] = deadline_latter_request(rq);
rq                103 block/mq-deadline.c 	elv_rb_del(deadline_rb_root(dd, rq), rq);
rq                109 block/mq-deadline.c static void deadline_remove_request(struct request_queue *q, struct request *rq)
rq                113 block/mq-deadline.c 	list_del_init(&rq->queuelist);
rq                118 block/mq-deadline.c 	if (!RB_EMPTY_NODE(&rq->rb_node))
rq                119 block/mq-deadline.c 		deadline_del_rq_rb(dd, rq);
rq                121 block/mq-deadline.c 	elv_rqhash_del(q, rq);
rq                122 block/mq-deadline.c 	if (q->last_merge == rq)
rq                165 block/mq-deadline.c deadline_move_request(struct deadline_data *dd, struct request *rq)
rq                167 block/mq-deadline.c 	const int data_dir = rq_data_dir(rq);
rq                171 block/mq-deadline.c 	dd->next_rq[data_dir] = deadline_latter_request(rq);
rq                176 block/mq-deadline.c 	deadline_remove_request(rq->q, rq);
rq                185 block/mq-deadline.c 	struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
rq                190 block/mq-deadline.c 	if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
rq                203 block/mq-deadline.c 	struct request *rq;
rq                212 block/mq-deadline.c 	rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
rq                213 block/mq-deadline.c 	if (data_dir == READ || !blk_queue_is_zoned(rq->q))
rq                214 block/mq-deadline.c 		return rq;
rq                221 block/mq-deadline.c 	list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
rq                222 block/mq-deadline.c 		if (blk_req_can_dispatch_to_zone(rq))
rq                225 block/mq-deadline.c 	rq = NULL;
rq                229 block/mq-deadline.c 	return rq;
rq                239 block/mq-deadline.c 	struct request *rq;
rq                245 block/mq-deadline.c 	rq = dd->next_rq[data_dir];
rq                246 block/mq-deadline.c 	if (!rq)
rq                249 block/mq-deadline.c 	if (data_dir == READ || !blk_queue_is_zoned(rq->q))
rq                250 block/mq-deadline.c 		return rq;
rq                257 block/mq-deadline.c 	while (rq) {
rq                258 block/mq-deadline.c 		if (blk_req_can_dispatch_to_zone(rq))
rq                260 block/mq-deadline.c 		rq = deadline_latter_request(rq);
rq                264 block/mq-deadline.c 	return rq;
rq                273 block/mq-deadline.c 	struct request *rq, *next_rq;
rq                278 block/mq-deadline.c 		rq = list_first_entry(&dd->dispatch, struct request, queuelist);
rq                279 block/mq-deadline.c 		list_del_init(&rq->queuelist);
rq                289 block/mq-deadline.c 	rq = deadline_next_request(dd, WRITE);
rq                290 block/mq-deadline.c 	if (!rq)
rq                291 block/mq-deadline.c 		rq = deadline_next_request(dd, READ);
rq                293 block/mq-deadline.c 	if (rq && dd->batching < dd->fifo_batch)
rq                342 block/mq-deadline.c 		rq = deadline_fifo_request(dd, data_dir);
rq                348 block/mq-deadline.c 		rq = next_rq;
rq                355 block/mq-deadline.c 	if (!rq)
rq                365 block/mq-deadline.c 	deadline_move_request(dd, rq);
rq                370 block/mq-deadline.c 	blk_req_zone_write_lock(rq);
rq                371 block/mq-deadline.c 	rq->rq_flags |= RQF_STARTED;
rq                372 block/mq-deadline.c 	return rq;
rq                384 block/mq-deadline.c 	struct request *rq;
rq                387 block/mq-deadline.c 	rq = __dd_dispatch_request(dd);
rq                390 block/mq-deadline.c 	return rq;
rq                439 block/mq-deadline.c static int dd_request_merge(struct request_queue *q, struct request **rq,
rq                454 block/mq-deadline.c 			*rq = __rq;
rq                483 block/mq-deadline.c static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
rq                488 block/mq-deadline.c 	const int data_dir = rq_data_dir(rq);
rq                494 block/mq-deadline.c 	blk_req_zone_write_unlock(rq);
rq                496 block/mq-deadline.c 	if (blk_mq_sched_try_insert_merge(q, rq))
rq                499 block/mq-deadline.c 	blk_mq_sched_request_inserted(rq);
rq                501 block/mq-deadline.c 	if (at_head || blk_rq_is_passthrough(rq)) {
rq                503 block/mq-deadline.c 			list_add(&rq->queuelist, &dd->dispatch);
rq                505 block/mq-deadline.c 			list_add_tail(&rq->queuelist, &dd->dispatch);
rq                507 block/mq-deadline.c 		deadline_add_rq_rb(dd, rq);
rq                509 block/mq-deadline.c 		if (rq_mergeable(rq)) {
rq                510 block/mq-deadline.c 			elv_rqhash_add(q, rq);
rq                512 block/mq-deadline.c 				q->last_merge = rq;
rq                518 block/mq-deadline.c 		rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
rq                519 block/mq-deadline.c 		list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
rq                531 block/mq-deadline.c 		struct request *rq;
rq                533 block/mq-deadline.c 		rq = list_first_entry(list, struct request, queuelist);
rq                534 block/mq-deadline.c 		list_del_init(&rq->queuelist);
rq                535 block/mq-deadline.c 		dd_insert_request(hctx, rq, at_head);
rq                544 block/mq-deadline.c static void dd_prepare_request(struct request *rq, struct bio *bio)
rq                562 block/mq-deadline.c static void dd_finish_request(struct request *rq)
rq                564 block/mq-deadline.c 	struct request_queue *q = rq->q;
rq                571 block/mq-deadline.c 		blk_req_zone_write_unlock(rq);
rq                573 block/mq-deadline.c 			blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
rq                698 block/mq-deadline.c 	struct request *rq = dd->next_rq[ddir];				\
rq                700 block/mq-deadline.c 	if (rq)								\
rq                701 block/mq-deadline.c 		__blk_mq_debugfs_rq_show(m, rq);			\
rq                216 block/scsi_ioctl.c static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
rq                219 block/scsi_ioctl.c 	struct scsi_request *req = scsi_req(rq);
rq                231 block/scsi_ioctl.c 	rq->timeout = msecs_to_jiffies(hdr->timeout);
rq                232 block/scsi_ioctl.c 	if (!rq->timeout)
rq                233 block/scsi_ioctl.c 		rq->timeout = q->sg_timeout;
rq                234 block/scsi_ioctl.c 	if (!rq->timeout)
rq                235 block/scsi_ioctl.c 		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
rq                236 block/scsi_ioctl.c 	if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq                237 block/scsi_ioctl.c 		rq->timeout = BLK_MIN_SG_TIMEOUT;
rq                242 block/scsi_ioctl.c static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
rq                245 block/scsi_ioctl.c 	struct scsi_request *req = scsi_req(rq);
rq                285 block/scsi_ioctl.c 	struct request *rq;
rq                310 block/scsi_ioctl.c 	rq = blk_get_request(q, writing ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
rq                311 block/scsi_ioctl.c 	if (IS_ERR(rq))
rq                312 block/scsi_ioctl.c 		return PTR_ERR(rq);
rq                313 block/scsi_ioctl.c 	req = scsi_req(rq);
rq                321 block/scsi_ioctl.c 	ret = blk_fill_sghdr_rq(q, rq, hdr, mode);
rq                330 block/scsi_ioctl.c 		ret = import_iovec(rq_data_dir(rq),
rq                339 block/scsi_ioctl.c 		ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
rq                342 block/scsi_ioctl.c 		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
rq                348 block/scsi_ioctl.c 	bio = rq->bio;
rq                357 block/scsi_ioctl.c 	blk_execute_rq(q, bd_disk, rq, at_head);
rq                361 block/scsi_ioctl.c 	ret = blk_complete_sghdr_rq(rq, hdr, bio);
rq                366 block/scsi_ioctl.c 	blk_put_request(rq);
rq                408 block/scsi_ioctl.c 	struct request *rq;
rq                437 block/scsi_ioctl.c 	rq = blk_get_request(q, in_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
rq                438 block/scsi_ioctl.c 	if (IS_ERR(rq)) {
rq                439 block/scsi_ioctl.c 		err = PTR_ERR(rq);
rq                442 block/scsi_ioctl.c 	req = scsi_req(rq);
rq                467 block/scsi_ioctl.c 		rq->timeout = FORMAT_UNIT_TIMEOUT;
rq                471 block/scsi_ioctl.c 		rq->timeout = START_STOP_TIMEOUT;
rq                474 block/scsi_ioctl.c 		rq->timeout = MOVE_MEDIUM_TIMEOUT;
rq                477 block/scsi_ioctl.c 		rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
rq                480 block/scsi_ioctl.c 		rq->timeout = READ_DEFECT_DATA_TIMEOUT;
rq                484 block/scsi_ioctl.c 		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
rq                488 block/scsi_ioctl.c 	if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO)) {
rq                493 block/scsi_ioctl.c 	blk_execute_rq(q, disk, rq, 0);
rq                509 block/scsi_ioctl.c 	blk_put_request(rq);
rq                522 block/scsi_ioctl.c 	struct request *rq;
rq                525 block/scsi_ioctl.c 	rq = blk_get_request(q, REQ_OP_SCSI_OUT, 0);
rq                526 block/scsi_ioctl.c 	if (IS_ERR(rq))
rq                527 block/scsi_ioctl.c 		return PTR_ERR(rq);
rq                528 block/scsi_ioctl.c 	rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
rq                529 block/scsi_ioctl.c 	scsi_req(rq)->cmd[0] = cmd;
rq                530 block/scsi_ioctl.c 	scsi_req(rq)->cmd[4] = data;
rq                531 block/scsi_ioctl.c 	scsi_req(rq)->cmd_len = 6;
rq                532 block/scsi_ioctl.c 	blk_execute_rq(q, bd_disk, rq, 0);
rq                533 block/scsi_ioctl.c 	err = scsi_req(rq)->result ? -EIO : 0;
rq                534 block/scsi_ioctl.c 	blk_put_request(rq);
rq                132 block/t10-pi.c static void t10_pi_type1_prepare(struct request *rq)
rq                134 block/t10-pi.c 	const int tuple_sz = rq->q->integrity.tuple_size;
rq                135 block/t10-pi.c 	u32 ref_tag = t10_pi_ref_tag(rq);
rq                138 block/t10-pi.c 	__rq_for_each_bio(bio, rq) {
rq                183 block/t10-pi.c static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
rq                185 block/t10-pi.c 	unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp;
rq                186 block/t10-pi.c 	const int tuple_sz = rq->q->integrity.tuple_size;
rq                187 block/t10-pi.c 	u32 ref_tag = t10_pi_ref_tag(rq);
rq                190 block/t10-pi.c 	__rq_for_each_bio(bio, rq) {
rq                241 block/t10-pi.c static void t10_pi_type3_prepare(struct request *rq)
rq                248 block/t10-pi.c static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
rq               1238 drivers/ata/libata-scsi.c static int atapi_drain_needed(struct request *rq)
rq               1240 drivers/ata/libata-scsi.c 	if (likely(!blk_rq_is_passthrough(rq)))
rq               1243 drivers/ata/libata-scsi.c 	if (!blk_rq_bytes(rq) || op_is_write(req_op(rq)))
rq               1246 drivers/ata/libata-scsi.c 	return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC;
rq               1791 drivers/ata/libata-scsi.c 	struct request *rq = scmd->request;
rq               1794 drivers/ata/libata-scsi.c 	if (!blk_rq_is_passthrough(rq))
rq               1797 drivers/ata/libata-scsi.c 	req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
rq               1826 drivers/ata/libata-scsi.c 	struct request *rq = scmd->request;
rq               1827 drivers/ata/libata-scsi.c 	int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
rq                668 drivers/atm/firestream.c 	long rq;
rq                672 drivers/atm/firestream.c 	while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) {
rq                673 drivers/atm/firestream.c 		fs_dprintk (FS_DEBUG_QUEUE, "reaping return queue entry at %lx\n", rq); 
rq                674 drivers/atm/firestream.c 		qe = bus_to_virt (rq);
rq                694 drivers/atm/firestream.c 	long rq;
rq                700 drivers/atm/firestream.c 	while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) {
rq                701 drivers/atm/firestream.c 		fs_dprintk (FS_DEBUG_QUEUE, "reaping txdone entry at %lx\n", rq); 
rq                702 drivers/atm/firestream.c 		qe = bus_to_virt (rq);
rq                764 drivers/atm/firestream.c 	long rq;
rq                771 drivers/atm/firestream.c 	while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) {
rq                772 drivers/atm/firestream.c 		fs_dprintk (FS_DEBUG_QUEUE, "reaping incoming queue entry at %lx\n", rq); 
rq                773 drivers/atm/firestream.c 		qe = bus_to_virt (rq);
rq               1459 drivers/block/amiflop.c 					   struct request *rq)
rq               1465 drivers/block/amiflop.c 	for (cnt = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
rq               1468 drivers/block/amiflop.c 		       blk_rq_pos(rq), cnt,
rq               1469 drivers/block/amiflop.c 		       (rq_data_dir(rq) == READ) ? "read" : "write");
rq               1471 drivers/block/amiflop.c 		block = blk_rq_pos(rq) + cnt;
rq               1474 drivers/block/amiflop.c 		data = bio_data(rq->bio) + 512 * cnt;
rq               1483 drivers/block/amiflop.c 		if (rq_data_dir(rq) == READ) {
rq               1507 drivers/block/amiflop.c 	struct request *rq = bd->rq;
rq               1508 drivers/block/amiflop.c 	struct amiga_floppy_struct *floppy = rq->rq_disk->private_data;
rq               1514 drivers/block/amiflop.c 	blk_mq_start_request(rq);
rq               1517 drivers/block/amiflop.c 		err = amiflop_rw_cur_segment(floppy, rq);
rq               1518 drivers/block/amiflop.c 	} while (blk_update_request(rq, err, blk_rq_cur_bytes(rq)));
rq               1519 drivers/block/amiflop.c 	blk_mq_end_request(rq, err);
rq                111 drivers/block/aoe/aoe.h 	struct request *rq;
rq                184 drivers/block/aoe/aoe.h 		struct request *rq;
rq                274 drivers/block/aoe/aoeblk.c 		blk_mq_start_request(bd->rq);
rq                278 drivers/block/aoe/aoeblk.c 	list_add_tail(&bd->rq->queuelist, &d->rq_list);
rq                826 drivers/block/aoe/aoecmd.c bufinit(struct buf *buf, struct request *rq, struct bio *bio)
rq                829 drivers/block/aoe/aoecmd.c 	buf->rq = rq;
rq                837 drivers/block/aoe/aoecmd.c 	struct request *rq;
rq                848 drivers/block/aoe/aoecmd.c 	rq = d->ip.rq;
rq                849 drivers/block/aoe/aoecmd.c 	if (rq == NULL) {
rq                850 drivers/block/aoe/aoecmd.c 		rq = list_first_entry_or_null(&d->rq_list, struct request,
rq                852 drivers/block/aoe/aoecmd.c 		if (rq == NULL)
rq                854 drivers/block/aoe/aoecmd.c 		list_del_init(&rq->queuelist);
rq                855 drivers/block/aoe/aoecmd.c 		blk_mq_start_request(rq);
rq                856 drivers/block/aoe/aoecmd.c 		d->ip.rq = rq;
rq                857 drivers/block/aoe/aoecmd.c 		d->ip.nxbio = rq->bio;
rq                859 drivers/block/aoe/aoecmd.c 		req = blk_mq_rq_to_pdu(rq);
rq                861 drivers/block/aoe/aoecmd.c 		__rq_for_each_bio(bio, rq)
rq                870 drivers/block/aoe/aoecmd.c 	bufinit(buf, rq, bio);
rq                874 drivers/block/aoe/aoecmd.c 		d->ip.rq = NULL;
rq               1039 drivers/block/aoe/aoecmd.c aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
rq               1047 drivers/block/aoe/aoecmd.c 	if (rq == d->ip.rq)
rq               1048 drivers/block/aoe/aoecmd.c 		d->ip.rq = NULL;
rq               1050 drivers/block/aoe/aoecmd.c 		bio = rq->bio;
rq               1054 drivers/block/aoe/aoecmd.c 	} while (blk_update_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
rq               1056 drivers/block/aoe/aoecmd.c 	__blk_mq_end_request(rq, err);
rq               1066 drivers/block/aoe/aoecmd.c 	struct request *rq = buf->rq;
rq               1067 drivers/block/aoe/aoecmd.c 	struct aoe_req *req = blk_mq_rq_to_pdu(rq);
rq               1073 drivers/block/aoe/aoecmd.c 		aoe_end_request(d, rq, 0);
rq                162 drivers/block/aoe/aoedev.c 	struct request *rq;
rq                167 drivers/block/aoe/aoedev.c 	rq = d->ip.rq;
rq                168 drivers/block/aoe/aoedev.c 	if (rq == NULL)
rq                171 drivers/block/aoe/aoedev.c 	req = blk_mq_rq_to_pdu(rq);
rq                179 drivers/block/aoe/aoedev.c 		aoe_end_request(d, rq, 0);
rq               1487 drivers/block/ataflop.c 	struct atari_floppy_struct *floppy = bd->rq->rq_disk->private_data;
rq               1500 drivers/block/ataflop.c 	fd_request = bd->rq;
rq               2893 drivers/block/floppy.c 	blk_mq_start_request(bd->rq);
rq               2913 drivers/block/floppy.c 	list_add_tail(&bd->rq->queuelist, &floppy_reqs);
rq                289 drivers/block/loop.c static int lo_write_simple(struct loop_device *lo, struct request *rq,
rq                296 drivers/block/loop.c 	rq_for_each_segment(bvec, rq, iter) {
rq                311 drivers/block/loop.c static int lo_write_transfer(struct loop_device *lo, struct request *rq,
rq                323 drivers/block/loop.c 	rq_for_each_segment(bvec, rq, iter) {
rq                341 drivers/block/loop.c static int lo_read_simple(struct loop_device *lo, struct request *rq,
rq                349 drivers/block/loop.c 	rq_for_each_segment(bvec, rq, iter) {
rq                360 drivers/block/loop.c 			__rq_for_each_bio(bio, rq)
rq                370 drivers/block/loop.c static int lo_read_transfer(struct loop_device *lo, struct request *rq,
rq                384 drivers/block/loop.c 	rq_for_each_segment(bvec, rq, iter) {
rq                408 drivers/block/loop.c 			__rq_for_each_bio(bio, rq)
rq                420 drivers/block/loop.c static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
rq                440 drivers/block/loop.c 	ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
rq                447 drivers/block/loop.c static int lo_req_flush(struct loop_device *lo, struct request *rq)
rq                457 drivers/block/loop.c static void lo_complete_rq(struct request *rq)
rq                459 drivers/block/loop.c 	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
rq                462 drivers/block/loop.c 	if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
rq                463 drivers/block/loop.c 	    req_op(rq) != REQ_OP_READ) {
rq                474 drivers/block/loop.c 		blk_update_request(rq, BLK_STS_OK, cmd->ret);
rq                476 drivers/block/loop.c 		blk_mq_requeue_request(rq, true);
rq                479 drivers/block/loop.c 			struct bio *bio = rq->bio;
rq                488 drivers/block/loop.c 		blk_mq_end_request(rq, ret);
rq                494 drivers/block/loop.c 	struct request *rq = blk_mq_rq_from_pdu(cmd);
rq                500 drivers/block/loop.c 	blk_mq_complete_request(rq);
rq                519 drivers/block/loop.c 	struct request *rq = blk_mq_rq_from_pdu(cmd);
rq                520 drivers/block/loop.c 	struct bio *bio = rq->bio;
rq                527 drivers/block/loop.c 	rq_for_each_bvec(tmp, rq, rq_iter)
rq                530 drivers/block/loop.c 	if (rq->bio != rq->biotail) {
rq                544 drivers/block/loop.c 		rq_for_each_bvec(tmp, rq, rq_iter) {
rq                561 drivers/block/loop.c 	iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
rq                585 drivers/block/loop.c static int do_req_filebacked(struct loop_device *lo, struct request *rq)
rq                587 drivers/block/loop.c 	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
rq                588 drivers/block/loop.c 	loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
rq                599 drivers/block/loop.c 	switch (req_op(rq)) {
rq                601 drivers/block/loop.c 		return lo_req_flush(lo, rq);
rq                607 drivers/block/loop.c 		return lo_fallocate(lo, rq, pos,
rq                608 drivers/block/loop.c 			(rq->cmd_flags & REQ_NOUNMAP) ?
rq                612 drivers/block/loop.c 		return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
rq                615 drivers/block/loop.c 			return lo_write_transfer(lo, rq, pos);
rq                619 drivers/block/loop.c 			return lo_write_simple(lo, rq, pos);
rq                622 drivers/block/loop.c 			return lo_read_transfer(lo, rq, pos);
rq                626 drivers/block/loop.c 			return lo_read_simple(lo, rq, pos);
rq               1924 drivers/block/loop.c 	struct request *rq = bd->rq;
rq               1925 drivers/block/loop.c 	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
rq               1926 drivers/block/loop.c 	struct loop_device *lo = rq->q->queuedata;
rq               1928 drivers/block/loop.c 	blk_mq_start_request(rq);
rq               1933 drivers/block/loop.c 	switch (req_op(rq)) {
rq               1946 drivers/block/loop.c 	if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) {
rq               1947 drivers/block/loop.c 		cmd->css = &bio_blkcg(rq->bio)->css;
rq               1959 drivers/block/loop.c 	struct request *rq = blk_mq_rq_from_pdu(cmd);
rq               1960 drivers/block/loop.c 	const bool write = op_is_write(req_op(rq));
rq               1961 drivers/block/loop.c 	struct loop_device *lo = rq->q->queuedata;
rq               1969 drivers/block/loop.c 	ret = do_req_filebacked(lo, rq);
rq               1974 drivers/block/loop.c 		blk_mq_complete_request(rq);
rq               1986 drivers/block/loop.c static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
rq               1989 drivers/block/loop.c 	struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
rq                967 drivers/block/mtip32xx/mtip32xx.c 	struct request *rq;
rq                985 drivers/block/mtip32xx/mtip32xx.c 	rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
rq                986 drivers/block/mtip32xx/mtip32xx.c 	if (IS_ERR(rq)) {
rq               1002 drivers/block/mtip32xx/mtip32xx.c 			blk_mq_free_request(rq);
rq               1010 drivers/block/mtip32xx/mtip32xx.c 	int_cmd = blk_mq_rq_to_pdu(rq);
rq               1014 drivers/block/mtip32xx/mtip32xx.c 	rq->timeout = timeout;
rq               1017 drivers/block/mtip32xx/mtip32xx.c 	blk_execute_rq(rq->q, NULL, rq, true);
rq               1048 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_free_request(rq);
rq               2056 drivers/block/mtip32xx/mtip32xx.c static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
rq               2061 drivers/block/mtip32xx/mtip32xx.c 		dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
rq               2064 drivers/block/mtip32xx/mtip32xx.c 	int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
rq               2065 drivers/block/mtip32xx/mtip32xx.c 	u64 start = blk_rq_pos(rq);
rq               2066 drivers/block/mtip32xx/mtip32xx.c 	unsigned int nsect = blk_rq_sectors(rq);
rq               2070 drivers/block/mtip32xx/mtip32xx.c 	nents = blk_rq_map_sg(hctx->queue, rq, command->sg);
rq               2100 drivers/block/mtip32xx/mtip32xx.c 	fis->sect_count  = ((rq->tag << 3) | (rq->tag >> 5));
rq               2124 drivers/block/mtip32xx/mtip32xx.c 		set_bit(rq->tag, port->cmds_to_issue);
rq               2130 drivers/block/mtip32xx/mtip32xx.c 	mtip_issue_ncq_command(port, rq->tag);
rq               2584 drivers/block/mtip32xx/mtip32xx.c static void mtip_softirq_done_fn(struct request *rq)
rq               2586 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
rq               2587 drivers/block/mtip32xx/mtip32xx.c 	struct driver_data *dd = rq->q->queuedata;
rq               2596 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_end_request(rq, cmd->status);
rq               3402 drivers/block/mtip32xx/mtip32xx.c static inline bool is_stopped(struct driver_data *dd, struct request *rq)
rq               3412 drivers/block/mtip32xx/mtip32xx.c 	    rq_data_dir(rq))
rq               3423 drivers/block/mtip32xx/mtip32xx.c 				  struct request *rq)
rq               3426 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
rq               3428 drivers/block/mtip32xx/mtip32xx.c 	if (rq_data_dir(rq) == READ || !dd->unal_qdepth)
rq               3435 drivers/block/mtip32xx/mtip32xx.c 	if (blk_rq_sectors(rq) <= 64) {
rq               3436 drivers/block/mtip32xx/mtip32xx.c 		if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7))
rq               3447 drivers/block/mtip32xx/mtip32xx.c 		struct request *rq)
rq               3450 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
rq               3453 drivers/block/mtip32xx/mtip32xx.c 		dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
rq               3478 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_start_request(rq);
rq               3479 drivers/block/mtip32xx/mtip32xx.c 	mtip_issue_non_ncq_command(dd->port, rq->tag);
rq               3487 drivers/block/mtip32xx/mtip32xx.c 	struct request *rq = bd->rq;
rq               3488 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
rq               3490 drivers/block/mtip32xx/mtip32xx.c 	if (blk_rq_is_passthrough(rq))
rq               3491 drivers/block/mtip32xx/mtip32xx.c 		return mtip_issue_reserved_cmd(hctx, rq);
rq               3493 drivers/block/mtip32xx/mtip32xx.c 	if (unlikely(mtip_check_unal_depth(hctx, rq)))
rq               3496 drivers/block/mtip32xx/mtip32xx.c 	if (is_se_active(dd) || is_stopped(dd, rq))
rq               3499 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_start_request(rq);
rq               3501 drivers/block/mtip32xx/mtip32xx.c 	mtip_hw_submit_io(dd, rq, cmd, hctx);
rq               3505 drivers/block/mtip32xx/mtip32xx.c static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
rq               3509 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
rq               3518 drivers/block/mtip32xx/mtip32xx.c static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
rq               3522 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
rq               3749 drivers/block/mtip32xx/mtip32xx.c static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
rq               3751 drivers/block/mtip32xx/mtip32xx.c 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
rq               3754 drivers/block/mtip32xx/mtip32xx.c 	blk_mq_complete_request(rq);
rq                953 drivers/block/nbd.c 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
rq               1641 drivers/block/nbd.c static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
rq               1644 drivers/block/nbd.c 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
rq                 20 drivers/block/null_blk.h 	struct request *rq;
rq                623 drivers/block/null_blk_main.c 		blk_mq_end_request(cmd->rq, cmd->error);
rq                648 drivers/block/null_blk_main.c static void null_complete_rq(struct request *rq)
rq                650 drivers/block/null_blk_main.c 	end_cmd(blk_mq_rq_to_pdu(rq));
rq               1055 drivers/block/null_blk_main.c 	struct request *rq = cmd->rq;
rq               1063 drivers/block/null_blk_main.c 	sector = blk_rq_pos(rq);
rq               1065 drivers/block/null_blk_main.c 	if (req_op(rq) == REQ_OP_DISCARD) {
rq               1066 drivers/block/null_blk_main.c 		null_handle_discard(nullb, sector, blk_rq_bytes(rq));
rq               1071 drivers/block/null_blk_main.c 	rq_for_each_segment(bvec, rq, iter) {
rq               1074 drivers/block/null_blk_main.c 				     op_is_write(req_op(rq)), sector,
rq               1075 drivers/block/null_blk_main.c 				     req_op(rq) & REQ_FUA);
rq               1142 drivers/block/null_blk_main.c 	struct request *rq = cmd->rq;
rq               1147 drivers/block/null_blk_main.c 	if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
rq               1193 drivers/block/null_blk_main.c 			blk_mq_complete_request(cmd->rq);
rq               1299 drivers/block/null_blk_main.c static bool should_timeout_request(struct request *rq)
rq               1308 drivers/block/null_blk_main.c static bool should_requeue_request(struct request *rq)
rq               1317 drivers/block/null_blk_main.c static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
rq               1319 drivers/block/null_blk_main.c 	pr_info("rq %p timed out\n", rq);
rq               1320 drivers/block/null_blk_main.c 	blk_mq_complete_request(rq);
rq               1327 drivers/block/null_blk_main.c 	struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
rq               1329 drivers/block/null_blk_main.c 	sector_t nr_sectors = blk_rq_sectors(bd->rq);
rq               1330 drivers/block/null_blk_main.c 	sector_t sector = blk_rq_pos(bd->rq);
rq               1338 drivers/block/null_blk_main.c 	cmd->rq = bd->rq;
rq               1342 drivers/block/null_blk_main.c 	blk_mq_start_request(bd->rq);
rq               1344 drivers/block/null_blk_main.c 	if (should_requeue_request(bd->rq)) {
rq               1353 drivers/block/null_blk_main.c 			blk_mq_requeue_request(bd->rq, true);
rq               1357 drivers/block/null_blk_main.c 	if (should_timeout_request(bd->rq))
rq               1360 drivers/block/null_blk_main.c 	return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
rq                132 drivers/block/null_blk_zoned.c 	switch (req_op(cmd->rq)) {
rq                818 drivers/block/paride/pcd.c 	if (rq_data_dir(bd->rq) != READ) {
rq                819 drivers/block/paride/pcd.c 		blk_mq_start_request(bd->rq);
rq                824 drivers/block/paride/pcd.c 	list_add_tail(&bd->rq->queuelist, &cd->rq_list);
rq                762 drivers/block/paride/pd.c 		pd_req = bd->rq;
rq                765 drivers/block/paride/pd.c 		list_add_tail(&bd->rq->queuelist, &disk->rq_list);
rq                775 drivers/block/paride/pd.c 	struct request *rq;
rq                778 drivers/block/paride/pd.c 	rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
rq                779 drivers/block/paride/pd.c 	if (IS_ERR(rq))
rq                780 drivers/block/paride/pd.c 		return PTR_ERR(rq);
rq                781 drivers/block/paride/pd.c 	req = blk_mq_rq_to_pdu(rq);
rq                784 drivers/block/paride/pd.c 	blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
rq                785 drivers/block/paride/pd.c 	blk_put_request(rq);
rq                877 drivers/block/paride/pf.c 	list_add_tail(&bd->rq->queuelist, &pf->rq_list);
rq                703 drivers/block/pktcdvd.c 	struct request *rq;
rq                706 drivers/block/pktcdvd.c 	rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
rq                708 drivers/block/pktcdvd.c 	if (IS_ERR(rq))
rq                709 drivers/block/pktcdvd.c 		return PTR_ERR(rq);
rq                712 drivers/block/pktcdvd.c 		ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
rq                718 drivers/block/pktcdvd.c 	scsi_req(rq)->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
rq                719 drivers/block/pktcdvd.c 	memcpy(scsi_req(rq)->cmd, cgc->cmd, CDROM_PACKET_SIZE);
rq                721 drivers/block/pktcdvd.c 	rq->timeout = 60*HZ;
rq                723 drivers/block/pktcdvd.c 		rq->rq_flags |= RQF_QUIET;
rq                725 drivers/block/pktcdvd.c 	blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
rq                726 drivers/block/pktcdvd.c 	if (scsi_req(rq)->result)
rq                729 drivers/block/pktcdvd.c 	blk_put_request(rq);
rq               1099 drivers/block/pktcdvd.c 	struct request *rq = pkt->rq;
rq               1100 drivers/block/pktcdvd.c 	struct pktcdvd_device *pd = rq->rq_disk->private_data;
rq                203 drivers/block/ps3disk.c 	blk_mq_start_request(bd->rq);
rq                206 drivers/block/ps3disk.c 	ret = ps3disk_do_request(dev, bd->rq);
rq                341 drivers/block/rbd.c 		struct request		*rq;		/* block request */
rq               3738 drivers/block/rbd.c 		struct request *rq = img_req->rq;
rq               3741 drivers/block/rbd.c 		blk_mq_end_request(rq, errno_to_blk_status(result));
rq               4796 drivers/block/rbd.c 	struct request *rq = blk_mq_rq_from_pdu(work);
rq               4797 drivers/block/rbd.c 	struct rbd_device *rbd_dev = rq->q->queuedata;
rq               4800 drivers/block/rbd.c 	u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
rq               4801 drivers/block/rbd.c 	u64 length = blk_rq_bytes(rq);
rq               4806 drivers/block/rbd.c 	switch (req_op(rq)) {
rq               4820 drivers/block/rbd.c 		dout("%s: non-fs request type %d\n", __func__, req_op(rq));
rq               4860 drivers/block/rbd.c 	blk_mq_start_request(rq);
rq               4882 drivers/block/rbd.c 	img_request->rq = rq;
rq               4892 drivers/block/rbd.c 					       rq->bio);
rq               4907 drivers/block/rbd.c 	blk_mq_end_request(rq, errno_to_blk_status(result));
rq               4913 drivers/block/rbd.c 	struct request *rq = bd->rq;
rq               4914 drivers/block/rbd.c 	struct work_struct *work = blk_mq_rq_to_pdu(rq);
rq               5106 drivers/block/rbd.c static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
rq               5109 drivers/block/rbd.c 	struct work_struct *work = blk_mq_rq_to_pdu(rq);
rq                384 drivers/block/skd_main.c static bool skd_inc_in_flight(struct request *rq, void *data, bool reserved)
rq                481 drivers/block/skd_main.c 	struct request *const req = mqd->rq;
rq               1465 drivers/block/skd_main.c 	struct request *rq;
rq               1526 drivers/block/skd_main.c 		rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
rq               1527 drivers/block/skd_main.c 		if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
rq               1530 drivers/block/skd_main.c 		skreq = blk_mq_rq_to_pdu(rq);
rq               1563 drivers/block/skd_main.c 			blk_mq_complete_request(rq);
rq               1565 drivers/block/skd_main.c 			skd_resolve_req_exception(skdev, skreq, rq);
rq               2739 drivers/block/skd_main.c static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq,
rq               2743 drivers/block/skd_main.c 	struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
rq               2754 drivers/block/skd_main.c static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq,
rq               2758 drivers/block/skd_main.c 	struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
rq                543 drivers/block/sunvdc.c 	blk_mq_start_request(bd->rq);
rq                561 drivers/block/sunvdc.c 	if (__send_request(bd->rq) < 0) {
rq                529 drivers/block/swim.c 	struct request *req = bd->rq;
rq                312 drivers/block/swim3.c 	struct request *req = bd->rq;
rq                508 drivers/block/sx8.c 	struct request *rq;
rq                511 drivers/block/sx8.c 	rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
rq                512 drivers/block/sx8.c 	if (IS_ERR(rq)) {
rq                516 drivers/block/sx8.c 	crq = blk_mq_rq_to_pdu(rq);
rq                518 drivers/block/sx8.c 	ioc = carm_ref_msg(host, rq->tag);
rq                519 drivers/block/sx8.c 	msg_dma = carm_ref_msg_dma(host, rq->tag);
rq                533 drivers/block/sx8.c 	ioc->handle	= cpu_to_le32(TAG_ENCODE(rq->tag));
rq                541 drivers/block/sx8.c 	DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
rq                542 drivers/block/sx8.c 	blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
rq                557 drivers/block/sx8.c 	struct request *rq;
rq                564 drivers/block/sx8.c 	rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
rq                565 drivers/block/sx8.c 	if (IS_ERR(rq))
rq                567 drivers/block/sx8.c 	crq = blk_mq_rq_to_pdu(rq);
rq                569 drivers/block/sx8.c 	mem = carm_ref_msg(host, rq->tag);
rq                571 drivers/block/sx8.c 	msg_size = func(host, rq->tag, mem);
rq                580 drivers/block/sx8.c 	DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
rq                581 drivers/block/sx8.c 	blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
rq                697 drivers/block/sx8.c static inline enum dma_data_direction carm_rq_dir(struct request *rq)
rq                699 drivers/block/sx8.c 	return op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
rq                706 drivers/block/sx8.c 	struct request *rq = bd->rq;
rq                709 drivers/block/sx8.c 	struct carm_request *crq = blk_mq_rq_to_pdu(rq);
rq                719 drivers/block/sx8.c 	blk_mq_start_request(rq);
rq                722 drivers/block/sx8.c 	if (req_op(rq) == REQ_OP_DRV_OUT)
rq                727 drivers/block/sx8.c 	n_elem = blk_rq_map_sg(q, rq, sg);
rq                732 drivers/block/sx8.c 	n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, carm_rq_dir(rq));
rq                748 drivers/block/sx8.c 	msg = (struct carm_msg_rw *) carm_ref_msg(host, rq->tag);
rq                750 drivers/block/sx8.c 	if (rq_data_dir(rq) == WRITE) {
rq                761 drivers/block/sx8.c 	msg->handle	= cpu_to_le32(TAG_ENCODE(rq->tag));
rq                762 drivers/block/sx8.c 	msg->lba	= cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
rq                763 drivers/block/sx8.c 	tmp		= (blk_rq_pos(rq) >> 16) >> 16;
rq                765 drivers/block/sx8.c 	msg->lba_count	= cpu_to_le16(blk_rq_sectors(rq));
rq                782 drivers/block/sx8.c 	VPRINTK("send msg, tag == %u\n", rq->tag);
rq                783 drivers/block/sx8.c 	rc = carm_send_msg(host, crq, rq->tag);
rq                792 drivers/block/sx8.c 	dma_unmap_sg(&host->pdev->dev, &crq->sg[0], n_elem, carm_rq_dir(rq));
rq                910 drivers/block/sx8.c 	struct request *rq;
rq                926 drivers/block/sx8.c 	rq = blk_mq_tag_to_rq(host->tag_set.tags[0], msg_idx);
rq                927 drivers/block/sx8.c 	crq = blk_mq_rq_to_pdu(rq);
rq                933 drivers/block/sx8.c 			     carm_rq_dir(rq));
rq                291 drivers/block/virtio_blk.c 	struct request *req = bd->rq;
rq                752 drivers/block/virtio_blk.c static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
rq                756 drivers/block/virtio_blk.c 	struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
rq                119 drivers/block/xen-blkfront.c static inline struct blkif_req *blkif_req(struct request *rq)
rq                121 drivers/block/xen-blkfront.c 	return blk_mq_rq_to_pdu(rq);
rq                206 drivers/block/xen-blkfront.c 	struct request_queue *rq;
rq                892 drivers/block/xen-blkfront.c 	blk_mq_start_request(qd->rq);
rq                897 drivers/block/xen-blkfront.c 	if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
rq                900 drivers/block/xen-blkfront.c 	if (blkif_queue_request(qd->rq, rinfo))
rq                917 drivers/block/xen-blkfront.c static void blkif_complete_rq(struct request *rq)
rq                919 drivers/block/xen-blkfront.c 	blk_mq_end_request(rq, blkif_req(rq)->error);
rq                929 drivers/block/xen-blkfront.c 	struct request_queue *rq = info->rq;
rq                934 drivers/block/xen-blkfront.c 	blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
rq                937 drivers/block/xen-blkfront.c 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
rq                938 drivers/block/xen-blkfront.c 		blk_queue_max_discard_sectors(rq, get_capacity(gd));
rq                939 drivers/block/xen-blkfront.c 		rq->limits.discard_granularity = info->discard_granularity;
rq                940 drivers/block/xen-blkfront.c 		rq->limits.discard_alignment = info->discard_alignment;
rq                942 drivers/block/xen-blkfront.c 			blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
rq                946 drivers/block/xen-blkfront.c 	blk_queue_logical_block_size(rq, info->sector_size);
rq                947 drivers/block/xen-blkfront.c 	blk_queue_physical_block_size(rq, info->physical_sector_size);
rq                948 drivers/block/xen-blkfront.c 	blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
rq                951 drivers/block/xen-blkfront.c 	blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
rq                952 drivers/block/xen-blkfront.c 	blk_queue_max_segment_size(rq, PAGE_SIZE);
rq                955 drivers/block/xen-blkfront.c 	blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
rq                958 drivers/block/xen-blkfront.c 	blk_queue_dma_alignment(rq, 511);
rq                964 drivers/block/xen-blkfront.c 	struct request_queue *rq;
rq                987 drivers/block/xen-blkfront.c 	rq = blk_mq_init_queue(&info->tag_set);
rq                988 drivers/block/xen-blkfront.c 	if (IS_ERR(rq)) {
rq                990 drivers/block/xen-blkfront.c 		return PTR_ERR(rq);
rq                993 drivers/block/xen-blkfront.c 	rq->queuedata = info;
rq                994 drivers/block/xen-blkfront.c 	info->rq = gd->queue = rq;
rq               1015 drivers/block/xen-blkfront.c 	blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
rq               1106 drivers/block/xen-blkfront.c 	BUG_ON(info->rq != NULL);
rq               1189 drivers/block/xen-blkfront.c 	if (info->rq == NULL)
rq               1193 drivers/block/xen-blkfront.c 	blk_mq_stop_hw_queues(info->rq);
rq               1211 drivers/block/xen-blkfront.c 	blk_cleanup_queue(info->rq);
rq               1213 drivers/block/xen-blkfront.c 	info->rq = NULL;
rq               1223 drivers/block/xen-blkfront.c 		blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
rq               1351 drivers/block/xen-blkfront.c 	if (info->rq)
rq               1352 drivers/block/xen-blkfront.c 		blk_mq_stop_hw_queues(info->rq);
rq               1607 drivers/block/xen-blkfront.c 				struct request_queue *rq = info->rq;
rq               1613 drivers/block/xen-blkfront.c 				blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
rq               1614 drivers/block/xen-blkfront.c 				blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
rq               2028 drivers/block/xen-blkfront.c 	blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
rq               2056 drivers/block/xen-blkfront.c 	blk_mq_start_stopped_hw_queues(info->rq, true);
rq               2057 drivers/block/xen-blkfront.c 	blk_mq_kick_requeue_list(info->rq);
rq                475 drivers/block/xsysace.c 	struct request *rq;
rq                477 drivers/block/xsysace.c 	rq = list_first_entry_or_null(&ace->rq_list, struct request, queuelist);
rq                478 drivers/block/xsysace.c 	if (rq) {
rq                479 drivers/block/xsysace.c 		list_del_init(&rq->queuelist);
rq                480 drivers/block/xsysace.c 		blk_mq_start_request(rq);
rq                868 drivers/block/xsysace.c 	struct request *req = bd->rq;
rq                 72 drivers/block/z2ram.c 	struct request *req = bd->rq;
rq               2177 drivers/cdrom/cdrom.c 	struct request *rq;
rq               2203 drivers/cdrom/cdrom.c 		rq = blk_get_request(q, REQ_OP_SCSI_IN, 0);
rq               2204 drivers/cdrom/cdrom.c 		if (IS_ERR(rq)) {
rq               2205 drivers/cdrom/cdrom.c 			ret = PTR_ERR(rq);
rq               2208 drivers/cdrom/cdrom.c 		req = scsi_req(rq);
rq               2210 drivers/cdrom/cdrom.c 		ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
rq               2212 drivers/cdrom/cdrom.c 			blk_put_request(rq);
rq               2228 drivers/cdrom/cdrom.c 		rq->timeout = 60 * HZ;
rq               2229 drivers/cdrom/cdrom.c 		bio = rq->bio;
rq               2231 drivers/cdrom/cdrom.c 		blk_execute_rq(q, cdi->disk, rq, 0);
rq               2232 drivers/cdrom/cdrom.c 		if (scsi_req(rq)->result) {
rq               2243 drivers/cdrom/cdrom.c 		blk_put_request(rq);
rq                634 drivers/cdrom/gdrom.c 	blk_mq_start_request(bd->rq);
rq                636 drivers/cdrom/gdrom.c 	switch (req_op(bd->rq)) {
rq                638 drivers/cdrom/gdrom.c 		return gdrom_readdisk_dma(bd->rq);
rq                 76 drivers/char/agp/isoch.c 		u32 rq;
rq                128 drivers/char/agp/isoch.c 	target.rq    = (tstatus >> 24) & 0xff;
rq                218 drivers/char/agp/isoch.c 		master[cdev].rq = master[cdev].n;
rq                220 drivers/char/agp/isoch.c 			master[cdev].rq *= (1 << (master[cdev].y - 1));
rq                222 drivers/char/agp/isoch.c 		tot_rq += master[cdev].rq;
rq                229 drivers/char/agp/isoch.c 	rq_async = target.rq - rq_isoch;
rq                256 drivers/char/agp/isoch.c 		master[cdev].rq += (cdev == ndevs - 1)
rq                268 drivers/char/agp/isoch.c 		mcmd   |= master[cdev].rq << 24;
rq                212 drivers/char/raw.c 	struct raw_config_request rq;
rq                218 drivers/char/raw.c 		if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
rq                221 drivers/char/raw.c 		return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
rq                224 drivers/char/raw.c 		if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
rq                227 drivers/char/raw.c 		err = bind_get(rq.raw_minor, &dev);
rq                231 drivers/char/raw.c 		rq.block_major = MAJOR(dev);
rq                232 drivers/char/raw.c 		rq.block_minor = MINOR(dev);
rq                234 drivers/char/raw.c 		if (copy_to_user((void __user *)arg, &rq, sizeof(rq)))
rq                254 drivers/char/raw.c 	struct raw32_config_request rq;
rq                260 drivers/char/raw.c 		if (copy_from_user(&rq, user_req, sizeof(rq)))
rq                263 drivers/char/raw.c 		return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
rq                266 drivers/char/raw.c 		if (copy_from_user(&rq, user_req, sizeof(rq)))
rq                269 drivers/char/raw.c 		err = bind_get(rq.raw_minor, &dev);
rq                273 drivers/char/raw.c 		rq.block_major = MAJOR(dev);
rq                274 drivers/char/raw.c 		rq.block_minor = MINOR(dev);
rq                276 drivers/char/raw.c 		if (copy_to_user(user_req, &rq, sizeof(rq)))
rq                786 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
rq                991 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		ring = to_amdgpu_ring(entity->rq->sched);
rq               1007 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	ring = to_amdgpu_ring(parser->entity->rq->sched);
rq               1331 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	ring = to_amdgpu_ring(entity->rq->sched);
rq                162 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	ring = to_amdgpu_ring(entity->rq->sched);
rq                186 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
rq                154 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx;
rq                157 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 				to_amdgpu_ring(p->entity->rq->sched));
rq               1861 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		struct drm_sched_rq *rq;
rq               1864 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
rq               1865 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
rq                330 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	struct drm_sched_rq *rq;
rq                334 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
rq                335 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 	r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
rq                237 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	struct drm_sched_rq *rq;
rq                241 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
rq                242 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 	r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
rq                102 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c 	ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
rq               1263 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c 	struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
rq                 68 drivers/gpu/drm/etnaviv/etnaviv_drv.c 		struct drm_sched_rq *rq;
rq                 71 drivers/gpu/drm/etnaviv/etnaviv_drv.c 			rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
rq                 73 drivers/gpu/drm/etnaviv/etnaviv_drv.c 					      &rq, 1, NULL);
rq               14255 drivers/gpu/drm/i915/display/intel_display.c 	struct i915_request *rq = wait->request;
rq               14262 drivers/gpu/drm/i915/display/intel_display.c 	if (!i915_request_started(rq))
rq               14263 drivers/gpu/drm/i915/display/intel_display.c 		gen6_rps_boost(rq);
rq               14264 drivers/gpu/drm/i915/display/intel_display.c 	i915_request_put(rq);
rq                224 drivers/gpu/drm/i915/display/intel_overlay.c 	struct i915_request *rq;
rq                229 drivers/gpu/drm/i915/display/intel_overlay.c 	rq = i915_request_create(overlay->context);
rq                230 drivers/gpu/drm/i915/display/intel_overlay.c 	if (IS_ERR(rq))
rq                231 drivers/gpu/drm/i915/display/intel_overlay.c 		return rq;
rq                233 drivers/gpu/drm/i915/display/intel_overlay.c 	err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
rq                235 drivers/gpu/drm/i915/display/intel_overlay.c 		i915_request_add(rq);
rq                239 drivers/gpu/drm/i915/display/intel_overlay.c 	return rq;
rq                246 drivers/gpu/drm/i915/display/intel_overlay.c 	struct i915_request *rq;
rq                251 drivers/gpu/drm/i915/display/intel_overlay.c 	rq = alloc_request(overlay, NULL);
rq                252 drivers/gpu/drm/i915/display/intel_overlay.c 	if (IS_ERR(rq))
rq                253 drivers/gpu/drm/i915/display/intel_overlay.c 		return PTR_ERR(rq);
rq                255 drivers/gpu/drm/i915/display/intel_overlay.c 	cs = intel_ring_begin(rq, 4);
rq                257 drivers/gpu/drm/i915/display/intel_overlay.c 		i915_request_add(rq);
rq                270 drivers/gpu/drm/i915/display/intel_overlay.c 	intel_ring_advance(rq, cs);
rq                272 drivers/gpu/drm/i915/display/intel_overlay.c 	i915_request_add(rq);
rq                304 drivers/gpu/drm/i915/display/intel_overlay.c 	struct i915_request *rq;
rq                318 drivers/gpu/drm/i915/display/intel_overlay.c 	rq = alloc_request(overlay, NULL);
rq                319 drivers/gpu/drm/i915/display/intel_overlay.c 	if (IS_ERR(rq))
rq                320 drivers/gpu/drm/i915/display/intel_overlay.c 		return PTR_ERR(rq);
rq                322 drivers/gpu/drm/i915/display/intel_overlay.c 	cs = intel_ring_begin(rq, 2);
rq                324 drivers/gpu/drm/i915/display/intel_overlay.c 		i915_request_add(rq);
rq                330 drivers/gpu/drm/i915/display/intel_overlay.c 	intel_ring_advance(rq, cs);
rq                333 drivers/gpu/drm/i915/display/intel_overlay.c 	i915_request_add(rq);
rq                386 drivers/gpu/drm/i915/display/intel_overlay.c 	struct i915_request *rq;
rq                397 drivers/gpu/drm/i915/display/intel_overlay.c 	rq = alloc_request(overlay, intel_overlay_off_tail);
rq                398 drivers/gpu/drm/i915/display/intel_overlay.c 	if (IS_ERR(rq))
rq                399 drivers/gpu/drm/i915/display/intel_overlay.c 		return PTR_ERR(rq);
rq                401 drivers/gpu/drm/i915/display/intel_overlay.c 	cs = intel_ring_begin(rq, 6);
rq                403 drivers/gpu/drm/i915/display/intel_overlay.c 		i915_request_add(rq);
rq                417 drivers/gpu/drm/i915/display/intel_overlay.c 	intel_ring_advance(rq, cs);
rq                420 drivers/gpu/drm/i915/display/intel_overlay.c 	i915_request_add(rq);
rq                439 drivers/gpu/drm/i915/display/intel_overlay.c 	struct i915_request *rq;
rq                456 drivers/gpu/drm/i915/display/intel_overlay.c 	rq = alloc_request(overlay, intel_overlay_release_old_vid_tail);
rq                457 drivers/gpu/drm/i915/display/intel_overlay.c 	if (IS_ERR(rq))
rq                458 drivers/gpu/drm/i915/display/intel_overlay.c 		return PTR_ERR(rq);
rq                460 drivers/gpu/drm/i915/display/intel_overlay.c 	cs = intel_ring_begin(rq, 2);
rq                462 drivers/gpu/drm/i915/display/intel_overlay.c 		i915_request_add(rq);
rq                468 drivers/gpu/drm/i915/display/intel_overlay.c 	intel_ring_advance(rq, cs);
rq                470 drivers/gpu/drm/i915/display/intel_overlay.c 	i915_request_add(rq);
rq                 41 drivers/gpu/drm/i915/gem/i915_gem_busy.c 	const struct i915_request *rq;
rq                 55 drivers/gpu/drm/i915/gem/i915_gem_busy.c 	rq = container_of(fence, const struct i915_request, fence);
rq                 56 drivers/gpu/drm/i915/gem/i915_gem_busy.c 	if (i915_request_completed(rq))
rq                 60 drivers/gpu/drm/i915/gem/i915_gem_busy.c 	BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
rq                 61 drivers/gpu/drm/i915/gem/i915_gem_busy.c 	return flag(rq->engine->uabi_class);
rq                161 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	struct i915_request *rq;
rq                188 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	rq = intel_context_create_request(w->ce);
rq                189 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	if (IS_ERR(rq)) {
rq                190 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 		err = PTR_ERR(rq);
rq                195 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	if (dma_fence_add_callback(&rq->fence, &w->cb,
rq                199 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	err = intel_emit_vma_mark_active(batch, rq);
rq                204 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 		err = w->ce->engine->emit_init_breadcrumb(rq);
rq                214 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	err = i915_active_ref(&vma->active, rq->timeline, rq);
rq                218 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	err = w->ce->engine->emit_bb_start(rq,
rq                223 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 		i915_request_skip(rq, err);
rq                227 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 	i915_request_add(rq);
rq                869 drivers/gpu/drm/i915/gem/i915_gem_context.c 				int (*emit)(struct i915_request *rq, void *data),
rq                894 drivers/gpu/drm/i915/gem/i915_gem_context.c 		struct i915_request *rq;
rq                908 drivers/gpu/drm/i915/gem/i915_gem_context.c 		rq = intel_context_create_request(ce);
rq                909 drivers/gpu/drm/i915/gem/i915_gem_context.c 		if (IS_ERR(rq)) {
rq                910 drivers/gpu/drm/i915/gem/i915_gem_context.c 			err = PTR_ERR(rq);
rq                916 drivers/gpu/drm/i915/gem/i915_gem_context.c 			err = emit(rq, data);
rq                918 drivers/gpu/drm/i915/gem/i915_gem_context.c 			err = i915_active_ref(&cb->base, rq->timeline, rq);
rq                920 drivers/gpu/drm/i915/gem/i915_gem_context.c 		i915_request_add(rq);
rq                984 drivers/gpu/drm/i915/gem/i915_gem_context.c static int emit_ppgtt_update(struct i915_request *rq, void *data)
rq                986 drivers/gpu/drm/i915/gem/i915_gem_context.c 	struct i915_address_space *vm = rq->hw_context->vm;
rq                987 drivers/gpu/drm/i915/gem/i915_gem_context.c 	struct intel_engine_cs *engine = rq->engine;
rq                996 drivers/gpu/drm/i915/gem/i915_gem_context.c 		cs = intel_ring_begin(rq, 6);
rq               1008 drivers/gpu/drm/i915/gem/i915_gem_context.c 		intel_ring_advance(rq, cs);
rq               1012 drivers/gpu/drm/i915/gem/i915_gem_context.c 		cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
rq               1026 drivers/gpu/drm/i915/gem/i915_gem_context.c 		intel_ring_advance(rq, cs);
rq               1107 drivers/gpu/drm/i915/gem/i915_gem_context.c static int gen8_emit_rpcs_config(struct i915_request *rq,
rq               1114 drivers/gpu/drm/i915/gem/i915_gem_context.c 	cs = intel_ring_begin(rq, 4);
rq               1125 drivers/gpu/drm/i915/gem/i915_gem_context.c 	*cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
rq               1127 drivers/gpu/drm/i915/gem/i915_gem_context.c 	intel_ring_advance(rq, cs);
rq               1135 drivers/gpu/drm/i915/gem/i915_gem_context.c 	struct i915_request *rq;
rq               1149 drivers/gpu/drm/i915/gem/i915_gem_context.c 	rq = i915_request_create(ce->engine->kernel_context);
rq               1150 drivers/gpu/drm/i915/gem/i915_gem_context.c 	if (IS_ERR(rq))
rq               1151 drivers/gpu/drm/i915/gem/i915_gem_context.c 		return PTR_ERR(rq);
rq               1154 drivers/gpu/drm/i915/gem/i915_gem_context.c 	ret = intel_context_prepare_remote_request(ce, rq);
rq               1156 drivers/gpu/drm/i915/gem/i915_gem_context.c 		ret = gen8_emit_rpcs_config(rq, ce, sseu);
rq               1158 drivers/gpu/drm/i915/gem/i915_gem_context.c 	i915_request_add(rq);
rq                255 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		struct i915_request *rq;
rq                884 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	GEM_BUG_ON(eb->reloc_cache.rq);
rq                909 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	cache->rq = NULL;
rq                934 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct drm_i915_gem_object *obj = cache->rq->batch->obj;
rq                942 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	intel_gt_chipset_flush(cache->rq->engine->gt);
rq                944 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	i915_request_add(cache->rq);
rq                945 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	cache->rq = NULL;
rq                952 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (cache->rq)
rq               1122 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
rq               1133 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	err = i915_request_await_object(rq, vma->obj, true);
rq               1135 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
rq               1148 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct i915_request *rq;
rq               1176 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	rq = i915_request_create(eb->context);
rq               1177 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (IS_ERR(rq)) {
rq               1178 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		err = PTR_ERR(rq);
rq               1182 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	err = intel_engine_pool_mark_active(pool, rq);
rq               1186 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	err = reloc_move_to_gpu(rq, vma);
rq               1190 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	err = eb->engine->emit_bb_start(rq,
rq               1197 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	err = i915_request_await_object(rq, batch->obj, false);
rq               1199 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		err = i915_vma_move_to_active(batch, rq, 0);
rq               1204 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	rq->batch = batch;
rq               1207 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	cache->rq = rq;
rq               1215 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	i915_request_skip(rq, err);
rq               1217 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	i915_request_add(rq);
rq               1237 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (unlikely(!cache->rq)) {
rq               1938 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
rq               1943 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
rq               1948 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	cs = intel_ring_begin(rq, 4 * 2 + 2);
rq               1958 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	intel_ring_advance(rq, cs);
rq               2060 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c add_to_client(struct i915_request *rq, struct drm_file *file)
rq               2064 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	rq->file_priv = file_priv;
rq               2067 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	list_add_tail(&rq->client_link, &file_priv->mm.request_list);
rq               2144 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct i915_request *rq;
rq               2160 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	list_for_each_entry(rq, &tl->requests, link) {
rq               2161 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (rq->ring != ring)
rq               2164 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (__intel_ring_space(rq->postfix,
rq               2168 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (&rq->link == &tl->requests)
rq               2171 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	return i915_request_get(rq);
rq               2206 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct i915_request *rq;
rq               2241 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	rq = eb_throttle(ce);
rq               2245 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	if (rq) {
rq               2246 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		if (i915_request_wait(rq,
rq               2249 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			i915_request_put(rq);
rq               2254 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		i915_request_put(rq);
rq               2640 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	GEM_BUG_ON(eb.reloc_cache.rq);
rq                104 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
rq                109 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	err = i915_request_await_object(rq, vma->obj, false);
rq                111 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		err = i915_vma_move_to_active(vma, rq, 0);
rq                116 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	return intel_engine_pool_mark_active(vma->private, rq);
rq                130 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	struct i915_request *rq;
rq                155 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	rq = intel_context_create_request(ce);
rq                156 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	if (IS_ERR(rq)) {
rq                157 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		err = PTR_ERR(rq);
rq                161 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	err = intel_emit_vma_mark_active(batch, rq);
rq                165 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	err = i915_request_await_object(rq, obj, true);
rq                170 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		err = ce->engine->emit_init_breadcrumb(rq);
rq                176 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	err = i915_request_await_object(rq, vma->obj, true);
rq                178 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
rq                183 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	err = ce->engine->emit_bb_start(rq,
rq                188 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		i915_request_skip(rq, err);
rq                190 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	i915_request_add(rq);
rq                303 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c static int move_to_gpu(struct i915_vma *vma, struct i915_request *rq, bool write)
rq                310 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	return i915_request_await_object(rq, obj, write);
rq                321 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	struct i915_request *rq;
rq                346 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	rq = intel_context_create_request(ce);
rq                347 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	if (IS_ERR(rq)) {
rq                348 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		err = PTR_ERR(rq);
rq                352 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	err = intel_emit_vma_mark_active(batch, rq);
rq                361 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		err = move_to_gpu(vma[i], rq, i);
rq                369 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		err = i915_vma_move_to_active(vma[i], rq, flags);
rq                374 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	if (rq->engine->emit_init_breadcrumb) {
rq                375 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		err = rq->engine->emit_init_breadcrumb(rq);
rq                380 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	err = rq->engine->emit_bb_start(rq,
rq                387 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 		i915_request_skip(rq, err);
rq                389 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c 	i915_request_add(rq);
rq                 26 drivers/gpu/drm/i915/gem/i915_gem_object_blt.h int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq);
rq                 99 drivers/gpu/drm/i915/gem/i915_gem_wait.c 	struct i915_request *rq;
rq                105 drivers/gpu/drm/i915/gem/i915_gem_wait.c 	rq = to_request(fence);
rq                106 drivers/gpu/drm/i915/gem/i915_gem_wait.c 	engine = rq->engine;
rq                111 drivers/gpu/drm/i915/gem/i915_gem_wait.c 		engine->schedule(rq, attr);
rq                184 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	struct i915_request *rq;
rq                199 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	rq = i915_request_create(i915->engine[RCS0]->kernel_context);
rq                200 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	if (IS_ERR(rq)) {
rq                202 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		return PTR_ERR(rq);
rq                205 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	cs = intel_ring_begin(rq, 4);
rq                207 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		i915_request_add(rq);
rq                228 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	intel_ring_advance(rq, cs);
rq                231 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	err = i915_request_await_object(rq, vma->obj, true);
rq                233 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
rq                237 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c 	i915_request_add(rq);
rq                 72 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		struct i915_request *rq;
rq                 78 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 			rq = igt_request_alloc(ctx[n], engine);
rq                 79 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 			if (IS_ERR(rq)) {
rq                 80 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 				err = PTR_ERR(rq);
rq                 83 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 			i915_request_add(rq);
rq                 85 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
rq                106 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 				rq = igt_request_alloc(ctx[n % nctx], engine);
rq                107 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 				if (IS_ERR(rq)) {
rq                108 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 					err = PTR_ERR(rq);
rq                126 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 				i915_request_add(rq);
rq                128 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
rq                629 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	struct i915_request *rq;
rq                656 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	rq = i915_request_create(ce);
rq                657 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (IS_ERR(rq)) {
rq                658 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = PTR_ERR(rq);
rq                662 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = rq->engine->emit_bb_start(rq,
rq                669 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = i915_request_await_object(rq, batch->obj, false);
rq                671 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = i915_vma_move_to_active(batch, rq, 0);
rq                677 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = i915_request_await_object(rq, vma->obj, true);
rq                679 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
rq                690 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	*rq_out = i915_request_get(rq);
rq                692 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_request_add(rq);
rq                697 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_request_skip(rq, err);
rq                699 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_request_add(rq);
rq                719 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	struct i915_request *rq;
rq                734 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
rq                735 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (IS_ERR(rq)) {
rq                736 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		ret = PTR_ERR(rq);
rq                740 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_request_add(rq);
rq                742 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (!igt_wait_for_spinner(*spin, rq)) {
rq                765 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	struct i915_request *rq = NULL;
rq                771 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	ret = emit_rpcs_query(obj, ce, &rq);
rq                778 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	ret = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
rq                779 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_request_put(rq);
rq               1170 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	struct i915_request *rq;
rq               1214 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	rq = igt_request_alloc(ctx, engine);
rq               1215 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (IS_ERR(rq)) {
rq               1216 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = PTR_ERR(rq);
rq               1220 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
rq               1225 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = i915_request_await_object(rq, vma->obj, false);
rq               1227 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = i915_vma_move_to_active(vma, rq, 0);
rq               1236 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_request_add(rq);
rq               1241 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_request_skip(rq, err);
rq               1243 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_request_add(rq);
rq               1259 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	struct i915_request *rq;
rq               1313 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	rq = igt_request_alloc(ctx, engine);
rq               1314 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (IS_ERR(rq)) {
rq               1315 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = PTR_ERR(rq);
rq               1319 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
rq               1324 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	err = i915_request_await_object(rq, vma->obj, true);
rq               1326 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
rq               1334 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_request_add(rq);
rq               1355 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_request_skip(rq, err);
rq               1357 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_request_add(rq);
rq               1500 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	struct i915_request *rq;
rq               1546 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	rq = igt_request_alloc(ctx, i915->engine[RCS0]);
rq               1547 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	if (IS_ERR(rq)) {
rq               1551 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 	i915_request_add(rq);
rq                345 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		struct i915_request *rq;
rq                347 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		rq = i915_request_create(engine->kernel_context);
rq                348 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		if (IS_ERR(rq)) {
rq                350 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			return PTR_ERR(rq);
rq                354 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		err = i915_request_await_object(rq, vma->obj, true);
rq                356 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 			err = i915_vma_move_to_active(vma, rq,
rq                360 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 		i915_request_add(rq);
rq                 21 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	struct i915_request *rq;
rq                 32 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	rq = intel_context_create_request(ce);
rq                 35 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	return rq;
rq                112 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	struct i915_request *rq;
rq                125 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	rq = igt_request_alloc(ctx, engine);
rq                126 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	if (IS_ERR(rq)) {
rq                127 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 		err = PTR_ERR(rq);
rq                135 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	err = engine->emit_bb_start(rq,
rq                142 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	err = i915_request_await_object(rq, batch->obj, false);
rq                144 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 		err = i915_vma_move_to_active(batch, rq, 0);
rq                150 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	err = i915_request_await_object(rq, vma->obj, true);
rq                152 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
rq                157 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	i915_request_add(rq);
rq                166 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	i915_request_skip(rq, err);
rq                168 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c 	i915_request_add(rq);
rq                 81 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c static inline bool __request_completed(const struct i915_request *rq)
rq                 83 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
rq                 87 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c check_signal_order(struct intel_context *ce, struct i915_request *rq)
rq                 89 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	if (!list_is_last(&rq->signal_link, &ce->signals) &&
rq                 90 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	    i915_seqno_passed(rq->fence.seqno,
rq                 91 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 			      list_next_entry(rq, signal_link)->fence.seqno))
rq                 94 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	if (!list_is_first(&rq->signal_link, &ce->signals) &&
rq                 95 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	    i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno,
rq                 96 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 			      rq->fence.seqno))
rq                148 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 			struct i915_request *rq =
rq                149 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 				list_entry(pos, typeof(*rq), signal_link);
rq                151 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 			GEM_BUG_ON(!check_signal_order(ce, rq));
rq                153 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 			if (!__request_completed(rq))
rq                157 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 					     &rq->fence.flags));
rq                158 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 			clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
rq                160 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 			if (!__dma_fence_signal(&rq->fence))
rq                168 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 			i915_request_get(rq);
rq                169 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 			list_add_tail(&rq->signal_link, &signal);
rq                188 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		struct i915_request *rq =
rq                189 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 			list_entry(pos, typeof(*rq), signal_link);
rq                192 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		spin_lock(&rq->lock);
rq                193 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		list_replace(&rq->fence.cb_list, &cb_list);
rq                194 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		__dma_fence_signal__timestamp(&rq->fence, timestamp);
rq                195 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		__dma_fence_signal__notify(&rq->fence, &cb_list);
rq                196 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		spin_unlock(&rq->lock);
rq                198 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		i915_request_put(rq);
rq                275 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c bool i915_request_enable_breadcrumb(struct i915_request *rq)
rq                277 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	lockdep_assert_held(&rq->lock);
rq                280 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
rq                281 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
rq                282 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		struct intel_context *ce = rq->hw_context;
rq                286 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
rq                308 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 			if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno))
rq                311 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		list_add(&rq->signal_link, pos);
rq                314 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		GEM_BUG_ON(!check_signal_order(ce, rq));
rq                316 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
rq                320 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	return !__request_completed(rq);
rq                323 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c void i915_request_cancel_breadcrumb(struct i915_request *rq)
rq                325 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
rq                327 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	lockdep_assert_held(&rq->lock);
rq                337 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
rq                338 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		struct intel_context *ce = rq->hw_context;
rq                340 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		list_del(&rq->signal_link);
rq                344 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
rq                354 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 	struct i915_request *rq;
rq                363 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 		list_for_each_entry(rq, &ce->signals, signal_link) {
rq                365 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 				   rq->fence.context, rq->fence.seqno,
rq                366 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 				   i915_request_completed(rq) ? "!" :
rq                367 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 				   i915_request_started(rq) ? "*" :
rq                369 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c 				   jiffies_to_msecs(jiffies - rq->emitted_jiffies));
rq                293 drivers/gpu/drm/i915/gt/intel_context.c 					 struct i915_request *rq)
rq                299 drivers/gpu/drm/i915/gt/intel_context.c 	GEM_BUG_ON(rq->hw_context == ce);
rq                301 drivers/gpu/drm/i915/gt/intel_context.c 	if (rq->timeline != tl) { /* beware timeline sharing */
rq                308 drivers/gpu/drm/i915/gt/intel_context.c 		err = i915_active_request_set(&tl->last_request, rq);
rq                322 drivers/gpu/drm/i915/gt/intel_context.c 	return i915_active_ref(&ce->active, rq->timeline, rq);
rq                327 drivers/gpu/drm/i915/gt/intel_context.c 	struct i915_request *rq;
rq                334 drivers/gpu/drm/i915/gt/intel_context.c 	rq = i915_request_create(ce);
rq                337 drivers/gpu/drm/i915/gt/intel_context.c 	return rq;
rq                146 drivers/gpu/drm/i915/gt/intel_context.h 					 struct i915_request *rq);
rq                231 drivers/gpu/drm/i915/gt/intel_engine.h int __must_check intel_ring_cacheline_align(struct i915_request *rq);
rq                233 drivers/gpu/drm/i915/gt/intel_engine.h u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
rq                235 drivers/gpu/drm/i915/gt/intel_engine.h static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
rq                245 drivers/gpu/drm/i915/gt/intel_engine.h 	GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
rq                274 drivers/gpu/drm/i915/gt/intel_engine.h static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
rq                277 drivers/gpu/drm/i915/gt/intel_engine.h 	u32 offset = addr - rq->ring->vaddr;
rq                278 drivers/gpu/drm/i915/gt/intel_engine.h 	GEM_BUG_ON(offset > rq->ring->size);
rq                279 drivers/gpu/drm/i915/gt/intel_engine.h 	return intel_ring_wrap(rq->ring, offset);
rq                661 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	struct i915_request rq;
rq                688 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	frame->rq.i915 = engine->i915;
rq                689 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	frame->rq.engine = engine;
rq                690 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	frame->rq.ring = &frame->ring;
rq                691 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	frame->rq.timeline = &frame->timeline;
rq                697 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
rq               1142 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			  struct i915_request *rq,
rq               1145 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
rq               1149 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
rq               1153 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		   rq->fence.context, rq->fence.seqno,
rq               1154 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		   i915_request_completed(rq) ? "!" :
rq               1155 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		   i915_request_started(rq) ? "*" :
rq               1158 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			    &rq->fence.flags) ? "+" :
rq               1160 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			    &rq->fence.flags) ? "-" :
rq               1163 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
rq               1250 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		struct i915_request * const *port, *rq;
rq               1283 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		for (port = execlists->active; (rq = *port); port++) {
rq               1290 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			if (!i915_request_signaled(rq))
rq               1293 drivers/gpu/drm/i915/gt/intel_engine_cs.c 						i915_ggtt_offset(rq->ring->vma),
rq               1294 drivers/gpu/drm/i915/gt/intel_engine_cs.c 						rq->timeline->hwsp_offset,
rq               1295 drivers/gpu/drm/i915/gt/intel_engine_cs.c 						hwsp_seqno(rq));
rq               1297 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			print_request(m, rq, hdr);
rq               1299 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		for (port = execlists->pending; (rq = *port); port++) {
rq               1305 drivers/gpu/drm/i915/gt/intel_engine_cs.c 				 i915_ggtt_offset(rq->ring->vma),
rq               1306 drivers/gpu/drm/i915/gt/intel_engine_cs.c 				 rq->timeline->hwsp_offset,
rq               1307 drivers/gpu/drm/i915/gt/intel_engine_cs.c 				 hwsp_seqno(rq));
rq               1308 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			print_request(m, rq, hdr);
rq               1321 drivers/gpu/drm/i915/gt/intel_engine_cs.c static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
rq               1328 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		   rq->head, rq->postfix, rq->tail,
rq               1329 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
rq               1330 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
rq               1332 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	size = rq->tail - rq->head;
rq               1333 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	if (rq->tail < rq->head)
rq               1334 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		size += rq->ring->size;
rq               1338 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		const void *vaddr = rq->ring->vaddr;
rq               1339 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		unsigned int head = rq->head;
rq               1342 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		if (rq->tail < head) {
rq               1343 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			len = rq->ring->size - head;
rq               1359 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	struct i915_request *rq;
rq               1384 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	rq = intel_engine_find_active_request(engine);
rq               1385 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	if (rq) {
rq               1386 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		print_request(m, rq, "\t\tactive ");
rq               1389 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			   i915_ggtt_offset(rq->ring->vma));
rq               1391 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			   rq->ring->head);
rq               1393 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			   rq->ring->tail);
rq               1395 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			   rq->ring->emit);
rq               1397 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			   rq->ring->space);
rq               1399 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			   rq->timeline->hwsp_offset);
rq               1401 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		print_request_ring(m, rq);
rq               1451 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		struct i915_request *rq;
rq               1456 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		for (port = execlists->active; (rq = *port); port++)
rq               1459 drivers/gpu/drm/i915/gt/intel_engine_cs.c 		for (port = execlists->pending; (rq = *port); port++) {
rq               1461 drivers/gpu/drm/i915/gt/intel_engine_cs.c 			if (!intel_context_inflight_count(rq->hw_context))
rq               1532 drivers/gpu/drm/i915/gt/intel_engine_cs.c static bool match_ring(struct i915_request *rq)
rq               1534 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	u32 ring = ENGINE_READ(rq->engine, RING_START);
rq               1536 drivers/gpu/drm/i915/gt/intel_engine_cs.c 	return ring == i915_ggtt_offset(rq->ring->vma);
rq                 75 drivers/gpu/drm/i915/gt/intel_engine_pm.c 	struct i915_request *rq;
rq                101 drivers/gpu/drm/i915/gt/intel_engine_pm.c 	rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
rq                102 drivers/gpu/drm/i915/gt/intel_engine_pm.c 	if (IS_ERR(rq))
rq                106 drivers/gpu/drm/i915/gt/intel_engine_pm.c 	intel_timeline_enter(rq->timeline);
rq                110 drivers/gpu/drm/i915/gt/intel_engine_pm.c 	i915_request_add_active_barriers(rq);
rq                113 drivers/gpu/drm/i915/gt/intel_engine_pm.c 	rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE;
rq                114 drivers/gpu/drm/i915/gt/intel_engine_pm.c 	__i915_request_commit(rq);
rq                118 drivers/gpu/drm/i915/gt/intel_engine_pm.c 	__i915_request_queue(rq, NULL);
rq                 19 drivers/gpu/drm/i915/gt/intel_engine_pool.h 			      struct i915_request *rq)
rq                 21 drivers/gpu/drm/i915/gt/intel_engine_pool.h 	return i915_active_ref(&node->active, rq->timeline, rq);
rq                423 drivers/gpu/drm/i915/gt/intel_engine_types.h 	int		(*request_alloc)(struct i915_request *rq);
rq                429 drivers/gpu/drm/i915/gt/intel_engine_types.h 	int		(*emit_bb_start)(struct i915_request *rq,
rq                434 drivers/gpu/drm/i915/gt/intel_engine_types.h 	int		 (*emit_init_breadcrumb)(struct i915_request *rq);
rq                435 drivers/gpu/drm/i915/gt/intel_engine_types.h 	u32		*(*emit_fini_breadcrumb)(struct i915_request *rq,
rq                445 drivers/gpu/drm/i915/gt/intel_engine_types.h 	void		(*submit_request)(struct i915_request *rq);
rq                451 drivers/gpu/drm/i915/gt/intel_engine_types.h 	void            (*bond_execute)(struct i915_request *rq,
rq                237 drivers/gpu/drm/i915/gt/intel_lrc.c static void mark_eio(struct i915_request *rq)
rq                239 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (!i915_request_signaled(rq))
rq                240 drivers/gpu/drm/i915/gt/intel_lrc.c 		dma_fence_set_error(&rq->fence, -EIO);
rq                241 drivers/gpu/drm/i915/gt/intel_lrc.c 	i915_request_mark_complete(rq);
rq                269 drivers/gpu/drm/i915/gt/intel_lrc.c static inline int rq_prio(const struct i915_request *rq)
rq                271 drivers/gpu/drm/i915/gt/intel_lrc.c 	return rq->sched.attr.priority;
rq                274 drivers/gpu/drm/i915/gt/intel_lrc.c static int effective_prio(const struct i915_request *rq)
rq                276 drivers/gpu/drm/i915/gt/intel_lrc.c 	int prio = rq_prio(rq);
rq                286 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (i915_request_has_nopreempt(rq))
rq                295 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (__i915_request_has_started(rq))
rq                321 drivers/gpu/drm/i915/gt/intel_lrc.c 				const struct i915_request *rq,
rq                341 drivers/gpu/drm/i915/gt/intel_lrc.c 	last_prio = effective_prio(rq);
rq                350 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
rq                351 drivers/gpu/drm/i915/gt/intel_lrc.c 	    rq_prio(list_next_entry(rq, sched.link)) > last_prio)
rq                477 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct i915_request *rq, *rn, *active = NULL;
rq                483 drivers/gpu/drm/i915/gt/intel_lrc.c 	list_for_each_entry_safe_reverse(rq, rn,
rq                488 drivers/gpu/drm/i915/gt/intel_lrc.c 		if (i915_request_completed(rq))
rq                491 drivers/gpu/drm/i915/gt/intel_lrc.c 		__i915_request_unsubmit(rq);
rq                500 drivers/gpu/drm/i915/gt/intel_lrc.c 		owner = rq->hw_context->engine;
rq                502 drivers/gpu/drm/i915/gt/intel_lrc.c 			GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
rq                503 drivers/gpu/drm/i915/gt/intel_lrc.c 			if (rq_prio(rq) != prio) {
rq                504 drivers/gpu/drm/i915/gt/intel_lrc.c 				prio = rq_prio(rq);
rq                509 drivers/gpu/drm/i915/gt/intel_lrc.c 			list_move(&rq->sched.link, pl);
rq                510 drivers/gpu/drm/i915/gt/intel_lrc.c 			active = rq;
rq                520 drivers/gpu/drm/i915/gt/intel_lrc.c 				     &rq->fence.flags)) {
rq                521 drivers/gpu/drm/i915/gt/intel_lrc.c 				spin_lock_nested(&rq->lock,
rq                523 drivers/gpu/drm/i915/gt/intel_lrc.c 				i915_request_cancel_breadcrumb(rq);
rq                524 drivers/gpu/drm/i915/gt/intel_lrc.c 				spin_unlock(&rq->lock);
rq                526 drivers/gpu/drm/i915/gt/intel_lrc.c 			rq->engine = owner;
rq                527 drivers/gpu/drm/i915/gt/intel_lrc.c 			owner->submit_request(rq);
rq                545 drivers/gpu/drm/i915/gt/intel_lrc.c execlists_context_status_change(struct i915_request *rq, unsigned long status)
rq                554 drivers/gpu/drm/i915/gt/intel_lrc.c 	atomic_notifier_call_chain(&rq->engine->context_status_notifier,
rq                555 drivers/gpu/drm/i915/gt/intel_lrc.c 				   status, rq);
rq                559 drivers/gpu/drm/i915/gt/intel_lrc.c __execlists_schedule_in(struct i915_request *rq)
rq                561 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct intel_engine_cs * const engine = rq->engine;
rq                562 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct intel_context * const ce = rq->hw_context;
rq                567 drivers/gpu/drm/i915/gt/intel_lrc.c 	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
rq                574 drivers/gpu/drm/i915/gt/intel_lrc.c execlists_schedule_in(struct i915_request *rq, int idx)
rq                576 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct intel_context * const ce = rq->hw_context;
rq                579 drivers/gpu/drm/i915/gt/intel_lrc.c 	GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
rq                580 drivers/gpu/drm/i915/gt/intel_lrc.c 	trace_i915_request_in(rq, idx);
rq                585 drivers/gpu/drm/i915/gt/intel_lrc.c 			WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq));
rq                590 drivers/gpu/drm/i915/gt/intel_lrc.c 	GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
rq                591 drivers/gpu/drm/i915/gt/intel_lrc.c 	return i915_request_get(rq);
rq                594 drivers/gpu/drm/i915/gt/intel_lrc.c static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
rq                599 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (next && next->execution_mask & ~rq->execution_mask)
rq                604 drivers/gpu/drm/i915/gt/intel_lrc.c __execlists_schedule_out(struct i915_request *rq,
rq                607 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct intel_context * const ce = rq->hw_context;
rq                610 drivers/gpu/drm/i915/gt/intel_lrc.c 	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
rq                623 drivers/gpu/drm/i915/gt/intel_lrc.c 		kick_siblings(rq, ce);
rq                629 drivers/gpu/drm/i915/gt/intel_lrc.c execlists_schedule_out(struct i915_request *rq)
rq                631 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct intel_context * const ce = rq->hw_context;
rq                634 drivers/gpu/drm/i915/gt/intel_lrc.c 	trace_i915_request_out(rq);
rq                641 drivers/gpu/drm/i915/gt/intel_lrc.c 		__execlists_schedule_out(rq, old);
rq                643 drivers/gpu/drm/i915/gt/intel_lrc.c 	i915_request_put(rq);
rq                646 drivers/gpu/drm/i915/gt/intel_lrc.c static u64 execlists_update_context(struct i915_request *rq)
rq                648 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct intel_context *ce = rq->hw_context;
rq                669 drivers/gpu/drm/i915/gt/intel_lrc.c 	tail = intel_ring_set_tail(rq->ring, rq->tail);
rq                671 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
rq                674 drivers/gpu/drm/i915/gt/intel_lrc.c 	rq->tail = rq->wa_tail;
rq                732 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct i915_request * const *port, *rq;
rq                743 drivers/gpu/drm/i915/gt/intel_lrc.c 	for (port = execlists->pending; (rq = *port); port++) {
rq                744 drivers/gpu/drm/i915/gt/intel_lrc.c 		if (ce == rq->hw_context)
rq                747 drivers/gpu/drm/i915/gt/intel_lrc.c 		ce = rq->hw_context;
rq                748 drivers/gpu/drm/i915/gt/intel_lrc.c 		if (i915_request_completed(rq))
rq                785 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct i915_request *rq = execlists->pending[n];
rq                788 drivers/gpu/drm/i915/gt/intel_lrc.c 			   rq ? execlists_update_context(rq) : 0,
rq                885 drivers/gpu/drm/i915/gt/intel_lrc.c 			    const struct i915_request *rq,
rq                890 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
rq                941 drivers/gpu/drm/i915/gt/intel_lrc.c static void defer_request(struct i915_request *rq, struct list_head * const pl)
rq                955 drivers/gpu/drm/i915/gt/intel_lrc.c 		GEM_BUG_ON(i915_request_is_active(rq));
rq                956 drivers/gpu/drm/i915/gt/intel_lrc.c 		list_move_tail(&rq->sched.link, pl);
rq                958 drivers/gpu/drm/i915/gt/intel_lrc.c 		for_each_waiter(p, rq) {
rq                963 drivers/gpu/drm/i915/gt/intel_lrc.c 			if (w->engine != rq->engine)
rq                968 drivers/gpu/drm/i915/gt/intel_lrc.c 				   !i915_request_completed(rq));
rq                974 drivers/gpu/drm/i915/gt/intel_lrc.c 			if (rq_prio(w) < rq_prio(rq))
rq                977 drivers/gpu/drm/i915/gt/intel_lrc.c 			GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
rq                981 drivers/gpu/drm/i915/gt/intel_lrc.c 		rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
rq                982 drivers/gpu/drm/i915/gt/intel_lrc.c 	} while (rq);
rq                987 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct i915_request *rq;
rq                989 drivers/gpu/drm/i915/gt/intel_lrc.c 	rq = __unwind_incomplete_requests(engine);
rq                990 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (!rq)
rq                993 drivers/gpu/drm/i915/gt/intel_lrc.c 	defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
rq                997 drivers/gpu/drm/i915/gt/intel_lrc.c need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
rq               1004 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (list_is_last(&rq->sched.link, &engine->active.requests))
rq               1007 drivers/gpu/drm/i915/gt/intel_lrc.c 	hint = max(rq_prio(list_next_entry(rq, sched.link)),
rq               1010 drivers/gpu/drm/i915/gt/intel_lrc.c 	return hint >= effective_prio(rq);
rq               1014 drivers/gpu/drm/i915/gt/intel_lrc.c switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
rq               1016 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (list_is_last(&rq->sched.link, &engine->active.requests))
rq               1019 drivers/gpu/drm/i915/gt/intel_lrc.c 	return rq_prio(list_next_entry(rq, sched.link));
rq               1025 drivers/gpu/drm/i915/gt/intel_lrc.c 	const struct i915_request *rq = *execlists->active;
rq               1027 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (i915_request_completed(rq))
rq               1030 drivers/gpu/drm/i915/gt/intel_lrc.c 	return execlists->switch_priority_hint >= effective_prio(rq);
rq               1072 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct i915_request *rq = READ_ONCE(ve->request);
rq               1074 drivers/gpu/drm/i915/gt/intel_lrc.c 		if (!rq) { /* lazily cleanup after another engine handled rq */
rq               1081 drivers/gpu/drm/i915/gt/intel_lrc.c 		if (!virtual_matches(ve, rq, engine)) {
rq               1170 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct i915_request *rq;
rq               1174 drivers/gpu/drm/i915/gt/intel_lrc.c 		rq = ve->request;
rq               1175 drivers/gpu/drm/i915/gt/intel_lrc.c 		if (unlikely(!rq)) { /* lost the race to a sibling */
rq               1183 drivers/gpu/drm/i915/gt/intel_lrc.c 		GEM_BUG_ON(rq != ve->request);
rq               1184 drivers/gpu/drm/i915/gt/intel_lrc.c 		GEM_BUG_ON(rq->engine != &ve->base);
rq               1185 drivers/gpu/drm/i915/gt/intel_lrc.c 		GEM_BUG_ON(rq->hw_context != &ve->context);
rq               1187 drivers/gpu/drm/i915/gt/intel_lrc.c 		if (rq_prio(rq) >= queue_prio(execlists)) {
rq               1188 drivers/gpu/drm/i915/gt/intel_lrc.c 			if (!virtual_matches(ve, rq, engine)) {
rq               1194 drivers/gpu/drm/i915/gt/intel_lrc.c 			if (last && !can_merge_rq(last, rq)) {
rq               1201 drivers/gpu/drm/i915/gt/intel_lrc.c 				  rq->fence.context,
rq               1202 drivers/gpu/drm/i915/gt/intel_lrc.c 				  rq->fence.seqno,
rq               1203 drivers/gpu/drm/i915/gt/intel_lrc.c 				  i915_request_completed(rq) ? "!" :
rq               1204 drivers/gpu/drm/i915/gt/intel_lrc.c 				  i915_request_started(rq) ? "*" :
rq               1213 drivers/gpu/drm/i915/gt/intel_lrc.c 			GEM_BUG_ON(!(rq->execution_mask & engine->mask));
rq               1214 drivers/gpu/drm/i915/gt/intel_lrc.c 			rq->engine = engine;
rq               1244 drivers/gpu/drm/i915/gt/intel_lrc.c 			if (__i915_request_submit(rq)) {
rq               1246 drivers/gpu/drm/i915/gt/intel_lrc.c 				last = rq;
rq               1248 drivers/gpu/drm/i915/gt/intel_lrc.c 			i915_request_put(rq);
rq               1270 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct i915_request *rq, *rn;
rq               1273 drivers/gpu/drm/i915/gt/intel_lrc.c 		priolist_for_each_request_consume(rq, rn, p, i) {
rq               1287 drivers/gpu/drm/i915/gt/intel_lrc.c 			if (last && !can_merge_rq(last, rq)) {
rq               1301 drivers/gpu/drm/i915/gt/intel_lrc.c 				if (last->hw_context == rq->hw_context)
rq               1312 drivers/gpu/drm/i915/gt/intel_lrc.c 				    ctx_single_port_submission(rq->hw_context))
rq               1318 drivers/gpu/drm/i915/gt/intel_lrc.c 			if (__i915_request_submit(rq)) {
rq               1327 drivers/gpu/drm/i915/gt/intel_lrc.c 							  rq->hw_context));
rq               1330 drivers/gpu/drm/i915/gt/intel_lrc.c 				last = rq;
rq               1374 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct i915_request * const *port, *rq;
rq               1376 drivers/gpu/drm/i915/gt/intel_lrc.c 	for (port = execlists->pending; (rq = *port); port++)
rq               1377 drivers/gpu/drm/i915/gt/intel_lrc.c 		execlists_schedule_out(rq);
rq               1380 drivers/gpu/drm/i915/gt/intel_lrc.c 	for (port = execlists->active; (rq = *port); port++)
rq               1381 drivers/gpu/drm/i915/gt/intel_lrc.c 		execlists_schedule_out(rq);
rq               1676 drivers/gpu/drm/i915/gt/intel_lrc.c 			 const struct i915_request *rq)
rq               1680 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (rq_prio(rq) <= execlists->queue_priority_hint)
rq               1683 drivers/gpu/drm/i915/gt/intel_lrc.c 	execlists->queue_priority_hint = rq_prio(rq);
rq               1870 drivers/gpu/drm/i915/gt/intel_lrc.c static int gen8_emit_init_breadcrumb(struct i915_request *rq)
rq               1874 drivers/gpu/drm/i915/gt/intel_lrc.c 	GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb);
rq               1876 drivers/gpu/drm/i915/gt/intel_lrc.c 	cs = intel_ring_begin(rq, 6);
rq               1890 drivers/gpu/drm/i915/gt/intel_lrc.c 	*cs++ = rq->timeline->hwsp_offset;
rq               1892 drivers/gpu/drm/i915/gt/intel_lrc.c 	*cs++ = rq->fence.seqno - 1;
rq               1894 drivers/gpu/drm/i915/gt/intel_lrc.c 	intel_ring_advance(rq, cs);
rq               1897 drivers/gpu/drm/i915/gt/intel_lrc.c 	rq->infix = intel_ring_offset(rq, cs);
rq               1902 drivers/gpu/drm/i915/gt/intel_lrc.c static int emit_pdps(struct i915_request *rq)
rq               1904 drivers/gpu/drm/i915/gt/intel_lrc.c 	const struct intel_engine_cs * const engine = rq->engine;
rq               1905 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->hw_context->vm);
rq               1909 drivers/gpu/drm/i915/gt/intel_lrc.c 	GEM_BUG_ON(intel_vgpu_active(rq->i915));
rq               1919 drivers/gpu/drm/i915/gt/intel_lrc.c 	err = engine->emit_flush(rq, EMIT_FLUSH);
rq               1924 drivers/gpu/drm/i915/gt/intel_lrc.c 	err = engine->emit_flush(rq, EMIT_INVALIDATE);
rq               1928 drivers/gpu/drm/i915/gt/intel_lrc.c 	cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
rq               1945 drivers/gpu/drm/i915/gt/intel_lrc.c 	intel_ring_advance(rq, cs);
rq               1948 drivers/gpu/drm/i915/gt/intel_lrc.c 	err = engine->emit_flush(rq, EMIT_FLUSH);
rq               1953 drivers/gpu/drm/i915/gt/intel_lrc.c 	return engine->emit_flush(rq, EMIT_INVALIDATE);
rq               2436 drivers/gpu/drm/i915/gt/intel_lrc.c static struct i915_request *active_request(struct i915_request *rq)
rq               2438 drivers/gpu/drm/i915/gt/intel_lrc.c 	const struct intel_context * const ce = rq->hw_context;
rq               2442 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
rq               2443 drivers/gpu/drm/i915/gt/intel_lrc.c 		return rq;
rq               2445 drivers/gpu/drm/i915/gt/intel_lrc.c 	list = &rq->timeline->requests;
rq               2446 drivers/gpu/drm/i915/gt/intel_lrc.c 	list_for_each_entry_from_reverse(rq, list, link) {
rq               2447 drivers/gpu/drm/i915/gt/intel_lrc.c 		if (i915_request_completed(rq))
rq               2450 drivers/gpu/drm/i915/gt/intel_lrc.c 		if (rq->hw_context != ce)
rq               2453 drivers/gpu/drm/i915/gt/intel_lrc.c 		active = rq;
rq               2463 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct i915_request *rq;
rq               2476 drivers/gpu/drm/i915/gt/intel_lrc.c 	rq = execlists_active(execlists);
rq               2477 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (!rq)
rq               2480 drivers/gpu/drm/i915/gt/intel_lrc.c 	ce = rq->hw_context;
rq               2483 drivers/gpu/drm/i915/gt/intel_lrc.c 	rq = active_request(rq);
rq               2484 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (!rq) {
rq               2489 drivers/gpu/drm/i915/gt/intel_lrc.c 	ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
rq               2503 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (!i915_request_started(rq))
rq               2517 drivers/gpu/drm/i915/gt/intel_lrc.c 	__i915_request_reset(rq, stalled);
rq               2570 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct i915_request *rq, *rn;
rq               2595 drivers/gpu/drm/i915/gt/intel_lrc.c 	list_for_each_entry(rq, &engine->active.requests, sched.link)
rq               2596 drivers/gpu/drm/i915/gt/intel_lrc.c 		mark_eio(rq);
rq               2603 drivers/gpu/drm/i915/gt/intel_lrc.c 		priolist_for_each_request_consume(rq, rn, p, i) {
rq               2604 drivers/gpu/drm/i915/gt/intel_lrc.c 			mark_eio(rq);
rq               2605 drivers/gpu/drm/i915/gt/intel_lrc.c 			__i915_request_submit(rq);
rq               2621 drivers/gpu/drm/i915/gt/intel_lrc.c 		rq = fetch_and_zero(&ve->request);
rq               2622 drivers/gpu/drm/i915/gt/intel_lrc.c 		if (rq) {
rq               2623 drivers/gpu/drm/i915/gt/intel_lrc.c 			mark_eio(rq);
rq               2625 drivers/gpu/drm/i915/gt/intel_lrc.c 			rq->engine = engine;
rq               2626 drivers/gpu/drm/i915/gt/intel_lrc.c 			__i915_request_submit(rq);
rq               2627 drivers/gpu/drm/i915/gt/intel_lrc.c 			i915_request_put(rq);
rq               2665 drivers/gpu/drm/i915/gt/intel_lrc.c static int gen8_emit_bb_start(struct i915_request *rq,
rq               2671 drivers/gpu/drm/i915/gt/intel_lrc.c 	cs = intel_ring_begin(rq, 4);
rq               2696 drivers/gpu/drm/i915/gt/intel_lrc.c 	intel_ring_advance(rq, cs);
rq               2701 drivers/gpu/drm/i915/gt/intel_lrc.c static int gen9_emit_bb_start(struct i915_request *rq,
rq               2707 drivers/gpu/drm/i915/gt/intel_lrc.c 	cs = intel_ring_begin(rq, 6);
rq               2721 drivers/gpu/drm/i915/gt/intel_lrc.c 	intel_ring_advance(rq, cs);
rq               3527 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct i915_request *rq;
rq               3530 drivers/gpu/drm/i915/gt/intel_lrc.c 	rq = READ_ONCE(ve->request);
rq               3531 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (!rq)
rq               3535 drivers/gpu/drm/i915/gt/intel_lrc.c 	mask = rq->execution_mask;
rq               3538 drivers/gpu/drm/i915/gt/intel_lrc.c 		i915_request_skip(rq, -ENODEV);
rq               3544 drivers/gpu/drm/i915/gt/intel_lrc.c 		  rq->fence.context, rq->fence.seqno,
rq               3630 drivers/gpu/drm/i915/gt/intel_lrc.c static void virtual_submit_request(struct i915_request *rq)
rq               3632 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct virtual_engine *ve = to_virtual_engine(rq->engine);
rq               3638 drivers/gpu/drm/i915/gt/intel_lrc.c 		  rq->fence.context,
rq               3639 drivers/gpu/drm/i915/gt/intel_lrc.c 		  rq->fence.seqno);
rq               3652 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (i915_request_completed(rq)) {
rq               3653 drivers/gpu/drm/i915/gt/intel_lrc.c 		__i915_request_submit(rq);
rq               3658 drivers/gpu/drm/i915/gt/intel_lrc.c 		ve->base.execlists.queue_priority_hint = rq_prio(rq);
rq               3659 drivers/gpu/drm/i915/gt/intel_lrc.c 		ve->request = i915_request_get(rq);
rq               3662 drivers/gpu/drm/i915/gt/intel_lrc.c 		list_move_tail(&rq->sched.link, virtual_queue(ve));
rq               3685 drivers/gpu/drm/i915/gt/intel_lrc.c virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
rq               3687 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct virtual_engine *ve = to_virtual_engine(rq->engine);
rq               3698 drivers/gpu/drm/i915/gt/intel_lrc.c 	exec = READ_ONCE(rq->execution_mask);
rq               3699 drivers/gpu/drm/i915/gt/intel_lrc.c 	while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
rq               3917 drivers/gpu/drm/i915/gt/intel_lrc.c 							struct i915_request *rq,
rq               3922 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct i915_request *rq, *last;
rq               3931 drivers/gpu/drm/i915/gt/intel_lrc.c 	list_for_each_entry(rq, &engine->active.requests, sched.link) {
rq               3933 drivers/gpu/drm/i915/gt/intel_lrc.c 			show_request(m, rq, "\t\tE ");
rq               3935 drivers/gpu/drm/i915/gt/intel_lrc.c 			last = rq;
rq               3955 drivers/gpu/drm/i915/gt/intel_lrc.c 		priolist_for_each_request(rq, p, i) {
rq               3957 drivers/gpu/drm/i915/gt/intel_lrc.c 				show_request(m, rq, "\t\tQ ");
rq               3959 drivers/gpu/drm/i915/gt/intel_lrc.c 				last = rq;
rq               3976 drivers/gpu/drm/i915/gt/intel_lrc.c 		struct i915_request *rq = READ_ONCE(ve->request);
rq               3978 drivers/gpu/drm/i915/gt/intel_lrc.c 		if (rq) {
rq               3980 drivers/gpu/drm/i915/gt/intel_lrc.c 				show_request(m, rq, "\t\tV ");
rq               3982 drivers/gpu/drm/i915/gt/intel_lrc.c 				last = rq;
rq                117 drivers/gpu/drm/i915/gt/intel_lrc.h 							struct i915_request *rq,
rq                433 drivers/gpu/drm/i915/gt/intel_mocs.c static int emit_mocs_control_table(struct i915_request *rq,
rq                436 drivers/gpu/drm/i915/gt/intel_mocs.c 	enum intel_engine_id engine = rq->engine->id;
rq                447 drivers/gpu/drm/i915/gt/intel_mocs.c 	cs = intel_ring_begin(rq, 2 + 2 * table->n_entries);
rq                467 drivers/gpu/drm/i915/gt/intel_mocs.c 	intel_ring_advance(rq, cs);
rq                492 drivers/gpu/drm/i915/gt/intel_mocs.c static int emit_mocs_l3cc_table(struct i915_request *rq,
rq                505 drivers/gpu/drm/i915/gt/intel_mocs.c 	cs = intel_ring_begin(rq, 2 + table->n_entries);
rq                535 drivers/gpu/drm/i915/gt/intel_mocs.c 	intel_ring_advance(rq, cs);
rq                596 drivers/gpu/drm/i915/gt/intel_mocs.c int intel_mocs_emit(struct i915_request *rq)
rq                601 drivers/gpu/drm/i915/gt/intel_mocs.c 	if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915) ||
rq                602 drivers/gpu/drm/i915/gt/intel_mocs.c 	    rq->engine->class != RENDER_CLASS)
rq                605 drivers/gpu/drm/i915/gt/intel_mocs.c 	if (get_mocs_settings(rq->engine->gt, &t)) {
rq                607 drivers/gpu/drm/i915/gt/intel_mocs.c 		ret = emit_mocs_control_table(rq, &t);
rq                612 drivers/gpu/drm/i915/gt/intel_mocs.c 		ret = emit_mocs_l3cc_table(rq, &t);
rq                 59 drivers/gpu/drm/i915/gt/intel_mocs.h int intel_mocs_emit(struct i915_request *rq);
rq                179 drivers/gpu/drm/i915/gt/intel_renderstate.c int intel_renderstate_emit(struct i915_request *rq)
rq                181 drivers/gpu/drm/i915/gt/intel_renderstate.c 	struct intel_engine_cs *engine = rq->engine;
rq                206 drivers/gpu/drm/i915/gt/intel_renderstate.c 	err = render_state_setup(&so, rq->i915);
rq                210 drivers/gpu/drm/i915/gt/intel_renderstate.c 	err = engine->emit_bb_start(rq,
rq                217 drivers/gpu/drm/i915/gt/intel_renderstate.c 		err = engine->emit_bb_start(rq,
rq                225 drivers/gpu/drm/i915/gt/intel_renderstate.c 	err = i915_request_await_object(rq, so.vma->obj, false);
rq                227 drivers/gpu/drm/i915/gt/intel_renderstate.c 		err = i915_vma_move_to_active(so.vma, rq, 0);
rq                 49 drivers/gpu/drm/i915/gt/intel_renderstate.h int intel_renderstate_emit(struct i915_request *rq);
rq                 40 drivers/gpu/drm/i915/gt/intel_reset.c static void engine_skip_context(struct i915_request *rq)
rq                 42 drivers/gpu/drm/i915/gt/intel_reset.c 	struct intel_engine_cs *engine = rq->engine;
rq                 43 drivers/gpu/drm/i915/gt/intel_reset.c 	struct i915_gem_context *hung_ctx = rq->gem_context;
rq                 45 drivers/gpu/drm/i915/gt/intel_reset.c 	if (!i915_request_is_active(rq))
rq                 49 drivers/gpu/drm/i915/gt/intel_reset.c 	list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
rq                 50 drivers/gpu/drm/i915/gt/intel_reset.c 		if (rq->gem_context == hung_ctx)
rq                 51 drivers/gpu/drm/i915/gt/intel_reset.c 			i915_request_skip(rq, -EIO);
rq                117 drivers/gpu/drm/i915/gt/intel_reset.c void __i915_request_reset(struct i915_request *rq, bool guilty)
rq                120 drivers/gpu/drm/i915/gt/intel_reset.c 		  rq->engine->name,
rq                121 drivers/gpu/drm/i915/gt/intel_reset.c 		  rq->fence.context,
rq                122 drivers/gpu/drm/i915/gt/intel_reset.c 		  rq->fence.seqno,
rq                125 drivers/gpu/drm/i915/gt/intel_reset.c 	GEM_BUG_ON(i915_request_completed(rq));
rq                128 drivers/gpu/drm/i915/gt/intel_reset.c 		i915_request_skip(rq, -EIO);
rq                129 drivers/gpu/drm/i915/gt/intel_reset.c 		if (context_mark_guilty(rq->gem_context))
rq                130 drivers/gpu/drm/i915/gt/intel_reset.c 			engine_skip_context(rq);
rq                132 drivers/gpu/drm/i915/gt/intel_reset.c 		dma_fence_set_error(&rq->fence, -EAGAIN);
rq                133 drivers/gpu/drm/i915/gt/intel_reset.c 		context_mark_innocent(rq->gem_context);
rq                815 drivers/gpu/drm/i915/gt/intel_reset.c 		struct i915_request *rq;
rq                817 drivers/gpu/drm/i915/gt/intel_reset.c 		rq = i915_active_request_get_unlocked(&tl->last_request);
rq                818 drivers/gpu/drm/i915/gt/intel_reset.c 		if (!rq)
rq                830 drivers/gpu/drm/i915/gt/intel_reset.c 		dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
rq                831 drivers/gpu/drm/i915/gt/intel_reset.c 		i915_request_put(rq);
rq                 39 drivers/gpu/drm/i915/gt/intel_reset.h void __i915_request_reset(struct i915_request *rq, bool guilty);
rq                 61 drivers/gpu/drm/i915/gt/intel_ringbuffer.c gen2_render_ring_flush(struct i915_request *rq, u32 mode)
rq                 73 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
rq                 80 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		*cs++ = intel_gt_scratch_offset(rq->engine->gt,
rq                 86 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq                 92 drivers/gpu/drm/i915/gt/intel_ringbuffer.c gen4_render_ring_flush(struct i915_request *rq, u32 mode)
rq                128 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5))
rq                136 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, i);
rq                154 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		*cs++ = intel_gt_scratch_offset(rq->engine->gt,
rq                164 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		*cs++ = intel_gt_scratch_offset(rq->engine->gt,
rq                173 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq                216 drivers/gpu/drm/i915/gt/intel_ringbuffer.c gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
rq                219 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		intel_gt_scratch_offset(rq->engine->gt,
rq                223 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 6);
rq                233 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq                235 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 6);
rq                245 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq                251 drivers/gpu/drm/i915/gt/intel_ringbuffer.c gen6_render_ring_flush(struct i915_request *rq, u32 mode)
rq                254 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		intel_gt_scratch_offset(rq->engine->gt,
rq                260 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ret = gen6_emit_post_sync_nonzero_flush(rq);
rq                290 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 4);
rq                298 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq                303 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
rq                313 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	*cs++ = intel_gt_scratch_offset(rq->engine->gt,
rq                325 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	*cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT;
rq                326 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	*cs++ = rq->fence.seqno;
rq                331 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	rq->tail = intel_ring_offset(rq, cs);
rq                332 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	assert_ring_tail_valid(rq->ring, rq->tail);
rq                338 drivers/gpu/drm/i915/gt/intel_ringbuffer.c gen7_render_ring_cs_stall_wa(struct i915_request *rq)
rq                342 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 4);
rq                350 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq                356 drivers/gpu/drm/i915/gt/intel_ringbuffer.c gen7_render_ring_flush(struct i915_request *rq, u32 mode)
rq                359 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		intel_gt_scratch_offset(rq->engine->gt,
rq                402 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		gen7_render_ring_cs_stall_wa(rq);
rq                405 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 4);
rq                413 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq                418 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
rq                428 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	*cs++ = rq->timeline->hwsp_offset;
rq                429 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	*cs++ = rq->fence.seqno;
rq                434 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	rq->tail = intel_ring_offset(rq, cs);
rq                435 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	assert_ring_tail_valid(rq->ring, rq->tail);
rq                440 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
rq                442 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
rq                443 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
rq                447 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	*cs++ = rq->fence.seqno;
rq                451 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	rq->tail = intel_ring_offset(rq, cs);
rq                452 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	assert_ring_tail_valid(rq->ring, rq->tail);
rq                458 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
rq                462 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
rq                463 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
rq                467 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	*cs++ = rq->fence.seqno;
rq                472 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		*cs++ = rq->fence.seqno;
rq                482 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	rq->tail = intel_ring_offset(rq, cs);
rq                483 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	assert_ring_tail_valid(rq->ring, rq->tail);
rq                784 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct i915_request *pos, *rq;
rq                788 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	rq = NULL;
rq                792 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 			rq = pos;
rq                819 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (rq) {
rq                835 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		__i915_request_reset(rq, stalled);
rq                837 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		GEM_BUG_ON(rq->ring != engine->legacy.ring);
rq                838 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		head = rq->head;
rq                938 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
rq                940 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
rq                941 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
rq                947 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	*cs++ = rq->fence.seqno;
rq                952 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	rq->tail = intel_ring_offset(rq, cs);
rq                953 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	assert_ring_tail_valid(rq->ring, rq->tail);
rq                959 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
rq                963 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
rq                964 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
rq                972 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		*cs++ = rq->fence.seqno;
rq                977 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	rq->tail = intel_ring_offset(rq, cs);
rq                978 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	assert_ring_tail_valid(rq->ring, rq->tail);
rq               1031 drivers/gpu/drm/i915/gt/intel_ringbuffer.c bsd_ring_flush(struct i915_request *rq, u32 mode)
rq               1035 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 2);
rq               1041 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq               1083 drivers/gpu/drm/i915/gt/intel_ringbuffer.c i965_emit_bb_start(struct i915_request *rq,
rq               1089 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 2);
rq               1096 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq               1106 drivers/gpu/drm/i915/gt/intel_ringbuffer.c i830_emit_bb_start(struct i915_request *rq,
rq               1111 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		intel_gt_scratch_offset(rq->engine->gt,
rq               1114 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
rq               1116 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 6);
rq               1127 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq               1133 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		cs = intel_ring_begin(rq, 6 + 2);
rq               1150 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		intel_ring_advance(rq, cs);
rq               1156 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 2);
rq               1163 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq               1169 drivers/gpu/drm/i915/gt/intel_ringbuffer.c i915_emit_bb_start(struct i915_request *rq,
rq               1175 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 2);
rq               1182 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq               1530 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt)
rq               1532 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	const struct intel_engine_cs * const engine = rq->engine;
rq               1535 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 6);
rq               1547 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq               1552 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static int flush_pd_dir(struct i915_request *rq)
rq               1554 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	const struct intel_engine_cs * const engine = rq->engine;
rq               1557 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 4);
rq               1564 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	*cs++ = intel_gt_scratch_offset(rq->engine->gt,
rq               1568 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq               1572 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static inline int mi_set_context(struct i915_request *rq, u32 flags)
rq               1574 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct drm_i915_private *i915 = rq->i915;
rq               1575 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct intel_engine_cs *engine = rq->engine;
rq               1603 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, len);
rq               1655 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	*cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
rq               1681 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 			*cs++ = intel_gt_scratch_offset(rq->engine->gt,
rq               1690 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq               1695 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static int remap_l3_slice(struct i915_request *rq, int slice)
rq               1697 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
rq               1703 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
rq               1718 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq               1723 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static int remap_l3(struct i915_request *rq)
rq               1725 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct i915_gem_context *ctx = rq->gem_context;
rq               1735 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		err = remap_l3_slice(rq, i);
rq               1744 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static int switch_context(struct i915_request *rq)
rq               1746 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct intel_engine_cs *engine = rq->engine;
rq               1747 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct i915_address_space *vm = vm_alias(rq->hw_context);
rq               1752 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
rq               1772 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 			ret = load_pd_dir(rq, ppgtt);
rq               1784 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	if (rq->hw_context->state) {
rq               1794 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		if (i915_gem_context_is_kernel(rq->gem_context))
rq               1797 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		ret = mi_set_context(rq, hw_flags);
rq               1803 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		ret = engine->emit_flush(rq, EMIT_INVALIDATE);
rq               1807 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		ret = flush_pd_dir(rq);
rq               1819 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		ret = engine->emit_flush(rq, EMIT_INVALIDATE);
rq               1823 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		ret = engine->emit_flush(rq, EMIT_FLUSH);
rq               1828 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	ret = remap_l3(rq);
rq               1906 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
rq               1908 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	struct intel_ring *ring = rq->ring;
rq               1918 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	total_bytes = bytes + rq->reserved_space;
rq               1939 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 			total_bytes = rq->reserved_space + remain_actual;
rq               1955 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		GEM_BUG_ON(!rq->reserved_space);
rq               1957 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 		ret = wait_for_space(ring, rq->timeline, total_bytes);
rq               1985 drivers/gpu/drm/i915/gt/intel_ringbuffer.c int intel_ring_cacheline_align(struct i915_request *rq)
rq               1990 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
rq               1997 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, num_dwords);
rq               2002 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq               2004 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
rq               2045 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static int mi_flush_dw(struct i915_request *rq, u32 flags)
rq               2049 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 4);
rq               2076 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq               2081 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
rq               2083 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
rq               2086 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
rq               2088 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
rq               2092 drivers/gpu/drm/i915/gt/intel_ringbuffer.c hsw_emit_bb_start(struct i915_request *rq,
rq               2098 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 2);
rq               2106 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq               2112 drivers/gpu/drm/i915/gt/intel_ringbuffer.c gen6_emit_bb_start(struct i915_request *rq,
rq               2118 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	cs = intel_ring_begin(rq, 2);
rq               2126 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	intel_ring_advance(rq, cs);
rq               2133 drivers/gpu/drm/i915/gt/intel_ringbuffer.c static int gen6_ring_flush(struct i915_request *rq, u32 mode)
rq               2135 drivers/gpu/drm/i915/gt/intel_ringbuffer.c 	return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
rq                392 drivers/gpu/drm/i915/gt/intel_timeline.c 			   struct i915_request *rq,
rq                445 drivers/gpu/drm/i915/gt/intel_timeline.c 	err = i915_active_ref(&tl->hwsp_cacheline->active, tl, rq);
rq                481 drivers/gpu/drm/i915/gt/intel_timeline.c 			     struct i915_request *rq,
rq                488 drivers/gpu/drm/i915/gt/intel_timeline.c 		return __intel_timeline_get_seqno(tl, rq, seqno);
rq                494 drivers/gpu/drm/i915/gt/intel_timeline.c 			 struct i915_request *rq)
rq                496 drivers/gpu/drm/i915/gt/intel_timeline.c 	return i915_active_ref(&cl->active, rq->timeline, rq);
rq                 82 drivers/gpu/drm/i915/gt/intel_timeline.h 			     struct i915_request *rq,
rq                617 drivers/gpu/drm/i915/gt/intel_workarounds.c int intel_engine_emit_ctx_wa(struct i915_request *rq)
rq                619 drivers/gpu/drm/i915/gt/intel_workarounds.c 	struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
rq                628 drivers/gpu/drm/i915/gt/intel_workarounds.c 	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
rq                632 drivers/gpu/drm/i915/gt/intel_workarounds.c 	cs = intel_ring_begin(rq, (wal->count * 2 + 2));
rq                643 drivers/gpu/drm/i915/gt/intel_workarounds.c 	intel_ring_advance(rq, cs);
rq                645 drivers/gpu/drm/i915/gt/intel_workarounds.c 	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
rq               1462 drivers/gpu/drm/i915/gt/intel_workarounds.c wa_list_srm(struct i915_request *rq,
rq               1466 drivers/gpu/drm/i915/gt/intel_workarounds.c 	struct drm_i915_private *i915 = rq->i915;
rq               1480 drivers/gpu/drm/i915/gt/intel_workarounds.c 	cs = intel_ring_begin(rq, 4 * count);
rq               1495 drivers/gpu/drm/i915/gt/intel_workarounds.c 	intel_ring_advance(rq, cs);
rq               1505 drivers/gpu/drm/i915/gt/intel_workarounds.c 	struct i915_request *rq;
rq               1518 drivers/gpu/drm/i915/gt/intel_workarounds.c 	rq = intel_context_create_request(ce);
rq               1519 drivers/gpu/drm/i915/gt/intel_workarounds.c 	if (IS_ERR(rq)) {
rq               1520 drivers/gpu/drm/i915/gt/intel_workarounds.c 		err = PTR_ERR(rq);
rq               1524 drivers/gpu/drm/i915/gt/intel_workarounds.c 	err = wa_list_srm(rq, wal, vma);
rq               1528 drivers/gpu/drm/i915/gt/intel_workarounds.c 	i915_request_add(rq);
rq               1529 drivers/gpu/drm/i915/gt/intel_workarounds.c 	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
rq               1542 drivers/gpu/drm/i915/gt/intel_workarounds.c 		if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
rq                 26 drivers/gpu/drm/i915/gt/intel_workarounds.h int intel_engine_emit_ctx_wa(struct i915_request *rq);
rq                 15 drivers/gpu/drm/i915/gt/selftest_context.c static int request_sync(struct i915_request *rq)
rq                 20 drivers/gpu/drm/i915/gt/selftest_context.c 	i915_request_get(rq);
rq                 22 drivers/gpu/drm/i915/gt/selftest_context.c 	i915_request_add(rq);
rq                 23 drivers/gpu/drm/i915/gt/selftest_context.c 	timeout = i915_request_wait(rq, 0, HZ / 10);
rq                 27 drivers/gpu/drm/i915/gt/selftest_context.c 		mutex_lock(&rq->timeline->mutex);
rq                 28 drivers/gpu/drm/i915/gt/selftest_context.c 		i915_request_retire_upto(rq);
rq                 29 drivers/gpu/drm/i915/gt/selftest_context.c 		mutex_unlock(&rq->timeline->mutex);
rq                 32 drivers/gpu/drm/i915/gt/selftest_context.c 	i915_request_put(rq);
rq                 44 drivers/gpu/drm/i915/gt/selftest_context.c 		struct i915_request *rq;
rq                 48 drivers/gpu/drm/i915/gt/selftest_context.c 		rq = rcu_dereference(tl->last_request.request);
rq                 49 drivers/gpu/drm/i915/gt/selftest_context.c 		if (rq)
rq                 50 drivers/gpu/drm/i915/gt/selftest_context.c 			rq = i915_request_get_rcu(rq);
rq                 52 drivers/gpu/drm/i915/gt/selftest_context.c 		if (!rq)
rq                 55 drivers/gpu/drm/i915/gt/selftest_context.c 		timeout = i915_request_wait(rq, 0, HZ / 10);
rq                 59 drivers/gpu/drm/i915/gt/selftest_context.c 			i915_request_retire_upto(rq);
rq                 61 drivers/gpu/drm/i915/gt/selftest_context.c 		i915_request_put(rq);
rq                 72 drivers/gpu/drm/i915/gt/selftest_context.c 	struct i915_request *rq;
rq                110 drivers/gpu/drm/i915/gt/selftest_context.c 	rq = intel_context_create_request(ce);
rq                112 drivers/gpu/drm/i915/gt/selftest_context.c 	if (IS_ERR(rq)) {
rq                113 drivers/gpu/drm/i915/gt/selftest_context.c 		err = PTR_ERR(rq);
rq                117 drivers/gpu/drm/i915/gt/selftest_context.c 	err = request_sync(rq);
rq                122 drivers/gpu/drm/i915/gt/selftest_context.c 	rq = i915_request_create(engine->kernel_context);
rq                123 drivers/gpu/drm/i915/gt/selftest_context.c 	if (IS_ERR(rq)) {
rq                124 drivers/gpu/drm/i915/gt/selftest_context.c 		err = PTR_ERR(rq);
rq                127 drivers/gpu/drm/i915/gt/selftest_context.c 	err = request_sync(rq);
rq                238 drivers/gpu/drm/i915/gt/selftest_context.c 		struct i915_request *rq;
rq                240 drivers/gpu/drm/i915/gt/selftest_context.c 		rq = intel_context_create_request(ce);
rq                241 drivers/gpu/drm/i915/gt/selftest_context.c 		if (IS_ERR(rq)) {
rq                242 drivers/gpu/drm/i915/gt/selftest_context.c 			err = PTR_ERR(rq);
rq                246 drivers/gpu/drm/i915/gt/selftest_context.c 		err = request_sync(rq);
rq                332 drivers/gpu/drm/i915/gt/selftest_context.c 	struct i915_request *rq;
rq                339 drivers/gpu/drm/i915/gt/selftest_context.c 	rq = intel_context_create_request(ce);
rq                340 drivers/gpu/drm/i915/gt/selftest_context.c 	if (IS_ERR(rq)) {
rq                341 drivers/gpu/drm/i915/gt/selftest_context.c 		err = PTR_ERR(rq);
rq                345 drivers/gpu/drm/i915/gt/selftest_context.c 	err = intel_context_prepare_remote_request(remote, rq);
rq                347 drivers/gpu/drm/i915/gt/selftest_context.c 		i915_request_add(rq);
rq                351 drivers/gpu/drm/i915/gt/selftest_context.c 	err = request_sync(rq);
rq                109 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		       const struct i915_request *rq)
rq                111 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
rq                115 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			  struct i915_request *rq,
rq                121 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = i915_request_await_object(rq, vma->obj,
rq                124 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = i915_vma_move_to_active(vma, rq, flags);
rq                136 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct i915_request *rq = NULL;
rq                175 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	rq = igt_request_alloc(h->ctx, engine);
rq                176 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (IS_ERR(rq)) {
rq                177 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = PTR_ERR(rq);
rq                181 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = move_to_active(vma, rq, 0);
rq                185 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = move_to_active(hws, rq, 0);
rq                192 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		*batch++ = lower_32_bits(hws_address(hws, rq));
rq                193 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		*batch++ = upper_32_bits(hws_address(hws, rq));
rq                194 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		*batch++ = rq->fence.seqno;
rq                207 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		*batch++ = lower_32_bits(hws_address(hws, rq));
rq                208 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		*batch++ = rq->fence.seqno;
rq                220 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		*batch++ = lower_32_bits(hws_address(hws, rq));
rq                221 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		*batch++ = rq->fence.seqno;
rq                232 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		*batch++ = lower_32_bits(hws_address(hws, rq));
rq                233 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		*batch++ = rq->fence.seqno;
rq                246 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (rq->engine->emit_init_breadcrumb) {
rq                247 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = rq->engine->emit_init_breadcrumb(rq);
rq                256 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
rq                260 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		i915_request_skip(rq, err);
rq                261 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		i915_request_add(rq);
rq                267 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	return err ? ERR_PTR(err) : rq;
rq                270 drivers/gpu/drm/i915/gt/selftest_hangcheck.c static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
rq                272 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
rq                291 drivers/gpu/drm/i915/gt/selftest_hangcheck.c static bool wait_until_running(struct hang *h, struct i915_request *rq)
rq                293 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
rq                294 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					       rq->fence.seqno),
rq                296 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		 wait_for(i915_seqno_passed(hws_seqno(h, rq),
rq                297 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					    rq->fence.seqno),
rq                304 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct i915_request *rq;
rq                324 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		rq = hang_create_request(&h, engine);
rq                325 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		if (IS_ERR(rq)) {
rq                326 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			err = PTR_ERR(rq);
rq                332 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		i915_request_get(rq);
rq                337 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		i915_request_add(rq);
rq                341 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			timeout = i915_request_wait(rq, 0,
rq                346 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		i915_request_put(rq);
rq                404 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				struct i915_request *rq;
rq                406 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				rq = igt_request_alloc(ctx, engine);
rq                407 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				if (IS_ERR(rq)) {
rq                408 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					err = PTR_ERR(rq);
rq                412 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				i915_request_add(rq);
rq                499 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				struct i915_request *rq;
rq                501 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				rq = igt_request_alloc(ctx, engine);
rq                502 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				if (IS_ERR(rq)) {
rq                503 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					err = PTR_ERR(rq);
rq                507 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				i915_request_add(rq);
rq                594 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				struct i915_request *rq;
rq                597 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				rq = hang_create_request(&h, engine);
rq                598 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				if (IS_ERR(rq)) {
rq                599 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					err = PTR_ERR(rq);
rq                604 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				i915_request_get(rq);
rq                605 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				i915_request_add(rq);
rq                608 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				if (!wait_until_running(&h, rq)) {
rq                612 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					       __func__, rq->fence.seqno, hws_seqno(&h, rq));
rq                616 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					i915_request_put(rq);
rq                621 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				i915_request_put(rq);
rq                689 drivers/gpu/drm/i915/gt/selftest_hangcheck.c static int active_request_put(struct i915_request *rq)
rq                693 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (!rq)
rq                696 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
rq                698 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			  rq->engine->name,
rq                699 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			  rq->fence.context,
rq                700 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			  rq->fence.seqno);
rq                703 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		intel_gt_set_wedged(rq->engine->gt);
rq                707 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_request_put(rq);
rq                717 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct i915_request *rq[8] = {};
rq                718 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct i915_gem_context *ctx[ARRAY_SIZE(rq)];
rq                740 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
rq                741 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		struct i915_request *old = rq[idx];
rq                756 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		rq[idx] = i915_request_get(new);
rq                767 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	for (count = 0; count < ARRAY_SIZE(rq); count++) {
rq                768 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		int err__ = active_request_put(rq[count]);
rq                855 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			struct i915_request *rq = NULL;
rq                859 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				rq = hang_create_request(&h, engine);
rq                860 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				if (IS_ERR(rq)) {
rq                861 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					err = PTR_ERR(rq);
rq                866 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				i915_request_get(rq);
rq                867 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				i915_request_add(rq);
rq                870 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				if (!wait_until_running(&h, rq)) {
rq                874 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					       __func__, rq->fence.seqno, hws_seqno(&h, rq));
rq                878 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					i915_request_put(rq);
rq                893 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			if (rq) {
rq                894 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				if (i915_request_wait(rq, 0, HZ / 5) < 0) {
rq                903 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 					i915_request_put(rq);
rq                911 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				i915_request_put(rq);
rq               1051 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct i915_request *rq;
rq               1069 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	rq = hang_create_request(&h, engine);
rq               1070 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (IS_ERR(rq)) {
rq               1071 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = PTR_ERR(rq);
rq               1075 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_request_get(rq);
rq               1076 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_request_add(rq);
rq               1078 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (!wait_until_running(&h, rq)) {
rq               1082 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
rq               1083 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
rq               1093 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	timeout = i915_request_wait(rq, 0, 10);
rq               1108 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_request_put(rq);
rq               1189 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct i915_request *rq;
rq               1224 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	rq = hang_create_request(&h, engine);
rq               1225 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (IS_ERR(rq)) {
rq               1226 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = PTR_ERR(rq);
rq               1235 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		i915_request_add(rq);
rq               1244 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			i915_request_add(rq);
rq               1250 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	err = i915_request_await_object(rq, arg.vma->obj,
rq               1253 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = i915_vma_move_to_active(arg.vma, rq, flags);
rq               1260 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_request_get(rq);
rq               1261 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_request_add(rq);
rq               1267 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (!wait_until_running(&h, rq)) {
rq               1271 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
rq               1272 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
rq               1290 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
rq               1294 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
rq               1302 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	fake_hangcheck(gt, rq->engine->mask);
rq               1317 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_request_put(rq);
rq               1430 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			struct i915_request *rq;
rq               1433 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			rq = hang_create_request(&h, engine);
rq               1434 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			if (IS_ERR(rq)) {
rq               1435 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				err = PTR_ERR(rq);
rq               1439 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			i915_request_get(rq);
rq               1440 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			i915_request_add(rq);
rq               1456 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				i915_request_put(rq);
rq               1473 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				i915_request_put(rq);
rq               1487 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				i915_request_put(rq);
rq               1493 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			if (rq->fence.error) {
rq               1495 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				       rq->fence.error);
rq               1496 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				i915_request_put(rq);
rq               1504 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				i915_request_put(rq);
rq               1511 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			prev = rq;
rq               1544 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct i915_request *rq;
rq               1562 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	rq = hang_create_request(&h, engine);
rq               1563 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (IS_ERR(rq)) {
rq               1564 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = PTR_ERR(rq);
rq               1568 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_request_get(rq);
rq               1569 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_request_add(rq);
rq               1571 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (!wait_until_running(&h, rq)) {
rq               1575 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
rq               1576 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
rq               1595 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (rq->fence.error != -EIO) {
rq               1602 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_request_put(rq);
rq               1638 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct i915_request *rq;
rq               1650 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	rq = hang_create_request(&h, engine);
rq               1651 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (IS_ERR(rq)) {
rq               1652 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = PTR_ERR(rq);
rq               1656 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_request_get(rq);
rq               1657 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_request_add(rq);
rq               1659 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (wait_until_running(&h, rq)) {
rq               1664 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		       rq->fence.seqno, hws_seqno(&h, rq));
rq               1673 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
rq               1678 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	i915_request_put(rq);
rq                 46 drivers/gpu/drm/i915/gt/selftest_lrc.c 		struct i915_request *rq;
rq                 48 drivers/gpu/drm/i915/gt/selftest_lrc.c 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
rq                 49 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (IS_ERR(rq)) {
rq                 50 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = PTR_ERR(rq);
rq                 54 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_request_add(rq);
rq                 55 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (!igt_wait_for_spinner(&spin, rq)) {
rq                 83 drivers/gpu/drm/i915/gt/selftest_lrc.c emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
rq                 87 drivers/gpu/drm/i915/gt/selftest_lrc.c 	cs = intel_ring_begin(rq, 10);
rq                115 drivers/gpu/drm/i915/gt/selftest_lrc.c 	intel_ring_advance(rq, cs);
rq                123 drivers/gpu/drm/i915/gt/selftest_lrc.c 	struct i915_request *rq;
rq                130 drivers/gpu/drm/i915/gt/selftest_lrc.c 	rq = igt_request_alloc(ctx, engine);
rq                131 drivers/gpu/drm/i915/gt/selftest_lrc.c 	if (IS_ERR(rq))
rq                134 drivers/gpu/drm/i915/gt/selftest_lrc.c 	err = emit_semaphore_chain(rq, vma, idx);
rq                135 drivers/gpu/drm/i915/gt/selftest_lrc.c 	i915_request_add(rq);
rq                137 drivers/gpu/drm/i915/gt/selftest_lrc.c 		rq = ERR_PTR(err);
rq                141 drivers/gpu/drm/i915/gt/selftest_lrc.c 	return rq;
rq                152 drivers/gpu/drm/i915/gt/selftest_lrc.c 	struct i915_request *rq;
rq                155 drivers/gpu/drm/i915/gt/selftest_lrc.c 	rq = i915_request_create(engine->kernel_context);
rq                156 drivers/gpu/drm/i915/gt/selftest_lrc.c 	if (IS_ERR(rq))
rq                157 drivers/gpu/drm/i915/gt/selftest_lrc.c 		return PTR_ERR(rq);
rq                159 drivers/gpu/drm/i915/gt/selftest_lrc.c 	cs = intel_ring_begin(rq, 4);
rq                161 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_request_add(rq);
rq                170 drivers/gpu/drm/i915/gt/selftest_lrc.c 	intel_ring_advance(rq, cs);
rq                171 drivers/gpu/drm/i915/gt/selftest_lrc.c 	i915_request_add(rq);
rq                173 drivers/gpu/drm/i915/gt/selftest_lrc.c 	engine->schedule(rq, &attr);
rq                195 drivers/gpu/drm/i915/gt/selftest_lrc.c 			struct i915_request *rq;
rq                197 drivers/gpu/drm/i915/gt/selftest_lrc.c 			rq = semaphore_queue(engine, vma, n++);
rq                198 drivers/gpu/drm/i915/gt/selftest_lrc.c 			if (IS_ERR(rq)) {
rq                199 drivers/gpu/drm/i915/gt/selftest_lrc.c 				err = PTR_ERR(rq);
rq                491 drivers/gpu/drm/i915/gt/selftest_lrc.c 	struct i915_request *rq;
rq                497 drivers/gpu/drm/i915/gt/selftest_lrc.c 	rq = igt_spinner_create_request(spin, ce, arb);
rq                499 drivers/gpu/drm/i915/gt/selftest_lrc.c 	return rq;
rq                541 drivers/gpu/drm/i915/gt/selftest_lrc.c 		struct i915_request *rq;
rq                551 drivers/gpu/drm/i915/gt/selftest_lrc.c 		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
rq                553 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (IS_ERR(rq)) {
rq                554 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = PTR_ERR(rq);
rq                558 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_request_add(rq);
rq                559 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (!igt_wait_for_spinner(&spin_lo, rq)) {
rq                567 drivers/gpu/drm/i915/gt/selftest_lrc.c 		rq = spinner_create_request(&spin_hi, ctx_hi, engine,
rq                569 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (IS_ERR(rq)) {
rq                571 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = PTR_ERR(rq);
rq                575 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_request_add(rq);
rq                576 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (!igt_wait_for_spinner(&spin_hi, rq)) {
rq                644 drivers/gpu/drm/i915/gt/selftest_lrc.c 		struct i915_request *rq;
rq                654 drivers/gpu/drm/i915/gt/selftest_lrc.c 		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
rq                656 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (IS_ERR(rq)) {
rq                657 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = PTR_ERR(rq);
rq                661 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_request_add(rq);
rq                662 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (!igt_wait_for_spinner(&spin_lo, rq)) {
rq                667 drivers/gpu/drm/i915/gt/selftest_lrc.c 		rq = spinner_create_request(&spin_hi, ctx_hi, engine,
rq                669 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (IS_ERR(rq)) {
rq                671 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = PTR_ERR(rq);
rq                675 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_request_add(rq);
rq                676 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (igt_wait_for_spinner(&spin_hi, rq)) {
rq                682 drivers/gpu/drm/i915/gt/selftest_lrc.c 		engine->schedule(rq, &attr);
rq                684 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (!igt_wait_for_spinner(&spin_hi, rq)) {
rq                981 drivers/gpu/drm/i915/gt/selftest_lrc.c 	struct i915_request *rq;
rq                983 drivers/gpu/drm/i915/gt/selftest_lrc.c 	rq = kzalloc(sizeof(*rq), GFP_KERNEL);
rq                984 drivers/gpu/drm/i915/gt/selftest_lrc.c 	if (!rq)
rq                987 drivers/gpu/drm/i915/gt/selftest_lrc.c 	INIT_LIST_HEAD(&rq->active_list);
rq                988 drivers/gpu/drm/i915/gt/selftest_lrc.c 	rq->engine = engine;
rq                990 drivers/gpu/drm/i915/gt/selftest_lrc.c 	i915_sched_node_init(&rq->sched);
rq                993 drivers/gpu/drm/i915/gt/selftest_lrc.c 	rq->fence.seqno = 1;
rq                994 drivers/gpu/drm/i915/gt/selftest_lrc.c 	BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
rq                995 drivers/gpu/drm/i915/gt/selftest_lrc.c 	rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
rq                996 drivers/gpu/drm/i915/gt/selftest_lrc.c 	GEM_BUG_ON(i915_request_completed(rq));
rq                998 drivers/gpu/drm/i915/gt/selftest_lrc.c 	i915_sw_fence_init(&rq->submit, dummy_notify);
rq                999 drivers/gpu/drm/i915/gt/selftest_lrc.c 	set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
rq               1001 drivers/gpu/drm/i915/gt/selftest_lrc.c 	spin_lock_init(&rq->lock);
rq               1002 drivers/gpu/drm/i915/gt/selftest_lrc.c 	rq->fence.lock = &rq->lock;
rq               1003 drivers/gpu/drm/i915/gt/selftest_lrc.c 	INIT_LIST_HEAD(&rq->fence.cb_list);
rq               1005 drivers/gpu/drm/i915/gt/selftest_lrc.c 	return rq;
rq               1063 drivers/gpu/drm/i915/gt/selftest_lrc.c 			struct i915_request *rq[ARRAY_SIZE(client)];
rq               1073 drivers/gpu/drm/i915/gt/selftest_lrc.c 				rq[i] = spinner_create_request(&client[i].spin,
rq               1076 drivers/gpu/drm/i915/gt/selftest_lrc.c 				if (IS_ERR(rq[i])) {
rq               1077 drivers/gpu/drm/i915/gt/selftest_lrc.c 					err = PTR_ERR(rq[i]);
rq               1082 drivers/gpu/drm/i915/gt/selftest_lrc.c 				__i915_active_request_set(&rq[i]->timeline->last_request,
rq               1084 drivers/gpu/drm/i915/gt/selftest_lrc.c 				i915_request_add(rq[i]);
rq               1089 drivers/gpu/drm/i915/gt/selftest_lrc.c 			GEM_BUG_ON(i915_request_completed(rq[0]));
rq               1090 drivers/gpu/drm/i915/gt/selftest_lrc.c 			if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
rq               1095 drivers/gpu/drm/i915/gt/selftest_lrc.c 			GEM_BUG_ON(!i915_request_started(rq[0]));
rq               1097 drivers/gpu/drm/i915/gt/selftest_lrc.c 			if (i915_request_wait(rq[depth],
rq               1176 drivers/gpu/drm/i915/gt/selftest_lrc.c 		struct i915_request *rq;
rq               1182 drivers/gpu/drm/i915/gt/selftest_lrc.c 		rq = spinner_create_request(&lo.spin,
rq               1185 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (IS_ERR(rq))
rq               1187 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_request_add(rq);
rq               1189 drivers/gpu/drm/i915/gt/selftest_lrc.c 		ring_size = rq->wa_tail - rq->head;
rq               1191 drivers/gpu/drm/i915/gt/selftest_lrc.c 			ring_size += rq->ring->size;
rq               1192 drivers/gpu/drm/i915/gt/selftest_lrc.c 		ring_size = rq->ring->size / ring_size;
rq               1197 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (i915_request_wait(rq, 0, HZ / 2) < 0) {
rq               1208 drivers/gpu/drm/i915/gt/selftest_lrc.c 			rq = spinner_create_request(&hi.spin,
rq               1211 drivers/gpu/drm/i915/gt/selftest_lrc.c 			if (IS_ERR(rq))
rq               1213 drivers/gpu/drm/i915/gt/selftest_lrc.c 			i915_request_add(rq);
rq               1214 drivers/gpu/drm/i915/gt/selftest_lrc.c 			if (!igt_wait_for_spinner(&hi.spin, rq))
rq               1217 drivers/gpu/drm/i915/gt/selftest_lrc.c 			rq = spinner_create_request(&lo.spin,
rq               1220 drivers/gpu/drm/i915/gt/selftest_lrc.c 			if (IS_ERR(rq))
rq               1222 drivers/gpu/drm/i915/gt/selftest_lrc.c 			i915_request_add(rq);
rq               1225 drivers/gpu/drm/i915/gt/selftest_lrc.c 				rq = igt_request_alloc(lo.ctx, engine);
rq               1226 drivers/gpu/drm/i915/gt/selftest_lrc.c 				if (IS_ERR(rq))
rq               1228 drivers/gpu/drm/i915/gt/selftest_lrc.c 				i915_request_add(rq);
rq               1231 drivers/gpu/drm/i915/gt/selftest_lrc.c 			rq = igt_request_alloc(hi.ctx, engine);
rq               1232 drivers/gpu/drm/i915/gt/selftest_lrc.c 			if (IS_ERR(rq))
rq               1234 drivers/gpu/drm/i915/gt/selftest_lrc.c 			i915_request_add(rq);
rq               1235 drivers/gpu/drm/i915/gt/selftest_lrc.c 			engine->schedule(rq, &attr);
rq               1238 drivers/gpu/drm/i915/gt/selftest_lrc.c 			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
rq               1250 drivers/gpu/drm/i915/gt/selftest_lrc.c 			rq = igt_request_alloc(lo.ctx, engine);
rq               1251 drivers/gpu/drm/i915/gt/selftest_lrc.c 			if (IS_ERR(rq))
rq               1253 drivers/gpu/drm/i915/gt/selftest_lrc.c 			i915_request_add(rq);
rq               1254 drivers/gpu/drm/i915/gt/selftest_lrc.c 			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
rq               1328 drivers/gpu/drm/i915/gt/selftest_lrc.c 		struct i915_request *rq;
rq               1333 drivers/gpu/drm/i915/gt/selftest_lrc.c 		rq = spinner_create_request(&spin_lo, ctx_lo, engine,
rq               1335 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (IS_ERR(rq)) {
rq               1336 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = PTR_ERR(rq);
rq               1340 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_request_add(rq);
rq               1341 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (!igt_wait_for_spinner(&spin_lo, rq)) {
rq               1349 drivers/gpu/drm/i915/gt/selftest_lrc.c 		rq = spinner_create_request(&spin_hi, ctx_hi, engine,
rq               1351 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (IS_ERR(rq)) {
rq               1353 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = PTR_ERR(rq);
rq               1360 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_request_add(rq);
rq               1377 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (!igt_wait_for_spinner(&spin_hi, rq)) {
rq               1438 drivers/gpu/drm/i915/gt/selftest_lrc.c 	struct i915_request *rq;
rq               1454 drivers/gpu/drm/i915/gt/selftest_lrc.c 	rq = igt_request_alloc(ctx, smoke->engine);
rq               1455 drivers/gpu/drm/i915/gt/selftest_lrc.c 	if (IS_ERR(rq)) {
rq               1456 drivers/gpu/drm/i915/gt/selftest_lrc.c 		err = PTR_ERR(rq);
rq               1462 drivers/gpu/drm/i915/gt/selftest_lrc.c 		err = i915_request_await_object(rq, vma->obj, false);
rq               1464 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = i915_vma_move_to_active(vma, rq, 0);
rq               1466 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = rq->engine->emit_bb_start(rq,
rq               1472 drivers/gpu/drm/i915/gt/selftest_lrc.c 	i915_request_add(rq);
rq               1989 drivers/gpu/drm/i915/gt/selftest_lrc.c 	struct i915_request *rq[16];
rq               1994 drivers/gpu/drm/i915/gt/selftest_lrc.c 	GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
rq               2001 drivers/gpu/drm/i915/gt/selftest_lrc.c 	rq[0] = ERR_PTR(-ENOMEM);
rq               2008 drivers/gpu/drm/i915/gt/selftest_lrc.c 		memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
rq               2010 drivers/gpu/drm/i915/gt/selftest_lrc.c 		rq[0] = igt_request_alloc(ctx, master);
rq               2011 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (IS_ERR(rq[0])) {
rq               2012 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = PTR_ERR(rq[0]);
rq               2015 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_request_get(rq[0]);
rq               2019 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
rq               2023 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_request_add(rq[0]);
rq               2055 drivers/gpu/drm/i915/gt/selftest_lrc.c 			rq[n + 1] = i915_request_create(ve);
rq               2057 drivers/gpu/drm/i915/gt/selftest_lrc.c 			if (IS_ERR(rq[n + 1])) {
rq               2058 drivers/gpu/drm/i915/gt/selftest_lrc.c 				err = PTR_ERR(rq[n + 1]);
rq               2062 drivers/gpu/drm/i915/gt/selftest_lrc.c 			i915_request_get(rq[n + 1]);
rq               2064 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = i915_request_await_execution(rq[n + 1],
rq               2065 drivers/gpu/drm/i915/gt/selftest_lrc.c 							   &rq[0]->fence,
rq               2067 drivers/gpu/drm/i915/gt/selftest_lrc.c 			i915_request_add(rq[n + 1]);
rq               2075 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
rq               2077 drivers/gpu/drm/i915/gt/selftest_lrc.c 			       rq[0]->engine->name);
rq               2083 drivers/gpu/drm/i915/gt/selftest_lrc.c 			if (i915_request_wait(rq[n + 1], 0,
rq               2089 drivers/gpu/drm/i915/gt/selftest_lrc.c 			if (rq[n + 1]->engine != siblings[n]) {
rq               2092 drivers/gpu/drm/i915/gt/selftest_lrc.c 				       rq[n + 1]->engine->name,
rq               2093 drivers/gpu/drm/i915/gt/selftest_lrc.c 				       rq[0]->engine->name);
rq               2099 drivers/gpu/drm/i915/gt/selftest_lrc.c 		for (n = 0; !IS_ERR(rq[n]); n++)
rq               2100 drivers/gpu/drm/i915/gt/selftest_lrc.c 			i915_request_put(rq[n]);
rq               2101 drivers/gpu/drm/i915/gt/selftest_lrc.c 		rq[0] = ERR_PTR(-ENOMEM);
rq               2105 drivers/gpu/drm/i915/gt/selftest_lrc.c 	for (n = 0; !IS_ERR(rq[n]); n++)
rq               2106 drivers/gpu/drm/i915/gt/selftest_lrc.c 		i915_request_put(rq[n]);
rq                416 drivers/gpu/drm/i915/gt/selftest_timeline.c static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
rq                420 drivers/gpu/drm/i915/gt/selftest_timeline.c 	cs = intel_ring_begin(rq, 4);
rq                424 drivers/gpu/drm/i915/gt/selftest_timeline.c 	if (INTEL_GEN(rq->i915) >= 8) {
rq                429 drivers/gpu/drm/i915/gt/selftest_timeline.c 	} else if (INTEL_GEN(rq->i915) >= 4) {
rq                441 drivers/gpu/drm/i915/gt/selftest_timeline.c 	intel_ring_advance(rq, cs);
rq                449 drivers/gpu/drm/i915/gt/selftest_timeline.c 	struct i915_request *rq;
rq                456 drivers/gpu/drm/i915/gt/selftest_timeline.c 		rq = ERR_PTR(err);
rq                460 drivers/gpu/drm/i915/gt/selftest_timeline.c 	rq = i915_request_create(engine->kernel_context);
rq                461 drivers/gpu/drm/i915/gt/selftest_timeline.c 	if (IS_ERR(rq))
rq                464 drivers/gpu/drm/i915/gt/selftest_timeline.c 	err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
rq                465 drivers/gpu/drm/i915/gt/selftest_timeline.c 	i915_request_add(rq);
rq                467 drivers/gpu/drm/i915/gt/selftest_timeline.c 		rq = ERR_PTR(err);
rq                472 drivers/gpu/drm/i915/gt/selftest_timeline.c 	if (IS_ERR(rq))
rq                474 drivers/gpu/drm/i915/gt/selftest_timeline.c 	return rq;
rq                528 drivers/gpu/drm/i915/gt/selftest_timeline.c 			struct i915_request *rq;
rq                536 drivers/gpu/drm/i915/gt/selftest_timeline.c 			rq = tl_write(tl, engine, count);
rq                537 drivers/gpu/drm/i915/gt/selftest_timeline.c 			if (IS_ERR(rq)) {
rq                539 drivers/gpu/drm/i915/gt/selftest_timeline.c 				err = PTR_ERR(rq);
rq                601 drivers/gpu/drm/i915/gt/selftest_timeline.c 			struct i915_request *rq;
rq                612 drivers/gpu/drm/i915/gt/selftest_timeline.c 			rq = tl_write(tl, engine, count);
rq                613 drivers/gpu/drm/i915/gt/selftest_timeline.c 			if (IS_ERR(rq)) {
rq                615 drivers/gpu/drm/i915/gt/selftest_timeline.c 				err = PTR_ERR(rq);
rq                678 drivers/gpu/drm/i915/gt/selftest_timeline.c 		struct i915_request *rq;
rq                684 drivers/gpu/drm/i915/gt/selftest_timeline.c 		rq = i915_request_create(engine->kernel_context);
rq                685 drivers/gpu/drm/i915/gt/selftest_timeline.c 		if (IS_ERR(rq)) {
rq                686 drivers/gpu/drm/i915/gt/selftest_timeline.c 			err = PTR_ERR(rq);
rq                693 drivers/gpu/drm/i915/gt/selftest_timeline.c 		err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
rq                696 drivers/gpu/drm/i915/gt/selftest_timeline.c 			i915_request_add(rq);
rq                702 drivers/gpu/drm/i915/gt/selftest_timeline.c 		err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
rq                704 drivers/gpu/drm/i915/gt/selftest_timeline.c 			i915_request_add(rq);
rq                710 drivers/gpu/drm/i915/gt/selftest_timeline.c 		err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
rq                713 drivers/gpu/drm/i915/gt/selftest_timeline.c 			i915_request_add(rq);
rq                719 drivers/gpu/drm/i915/gt/selftest_timeline.c 		err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
rq                721 drivers/gpu/drm/i915/gt/selftest_timeline.c 			i915_request_add(rq);
rq                730 drivers/gpu/drm/i915/gt/selftest_timeline.c 		i915_request_add(rq);
rq                732 drivers/gpu/drm/i915/gt/selftest_timeline.c 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
rq                790 drivers/gpu/drm/i915/gt/selftest_timeline.c 			struct i915_request *rq;
rq                798 drivers/gpu/drm/i915/gt/selftest_timeline.c 			rq = tl_write(tl, engine, count);
rq                799 drivers/gpu/drm/i915/gt/selftest_timeline.c 			if (IS_ERR(rq)) {
rq                801 drivers/gpu/drm/i915/gt/selftest_timeline.c 				err = PTR_ERR(rq);
rq                805 drivers/gpu/drm/i915/gt/selftest_timeline.c 			if (i915_request_wait(rq, 0, HZ / 5) < 0) {
rq                 78 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	struct i915_request *rq;
rq                109 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	rq = igt_request_alloc(ctx, engine);
rq                110 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	if (IS_ERR(rq)) {
rq                111 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		err = PTR_ERR(rq);
rq                116 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	err = i915_request_await_object(rq, vma->obj, true);
rq                118 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
rq                127 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
rq                139 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	intel_ring_advance(rq, cs);
rq                141 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	i915_request_add(rq);
rq                147 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	i915_request_add(rq);
rq                245 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	struct i915_request *rq;
rq                258 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	rq = ERR_PTR(-ENODEV);
rq                260 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		rq = igt_spinner_create_request(spin, ce, MI_NOOP);
rq                265 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	if (IS_ERR(rq)) {
rq                267 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		err = PTR_ERR(rq);
rq                271 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	i915_request_add(rq);
rq                273 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	if (spin && !igt_wait_for_spinner(spin, rq)) {
rq                484 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		struct i915_request *rq;
rq                556 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		rq = igt_request_alloc(ctx, engine);
rq                557 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		if (IS_ERR(rq)) {
rq                558 drivers/gpu/drm/i915/gt/selftest_workarounds.c 			err = PTR_ERR(rq);
rq                563 drivers/gpu/drm/i915/gt/selftest_workarounds.c 			err = engine->emit_init_breadcrumb(rq);
rq                568 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		err = engine->emit_bb_start(rq,
rq                575 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		i915_request_add(rq);
rq                579 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
rq                767 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	struct i915_request *rq;
rq                771 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	rq = igt_request_alloc(ctx, engine);
rq                772 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	if (IS_ERR(rq))
rq                773 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		return PTR_ERR(rq);
rq                779 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
rq                797 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	intel_ring_advance(rq, cs);
rq                800 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	i915_request_add(rq);
rq                802 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
rq                811 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	struct i915_request *rq;
rq                841 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	rq = igt_request_alloc(ctx, engine);
rq                842 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	if (IS_ERR(rq)) {
rq                843 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		err = PTR_ERR(rq);
rq                848 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		err = engine->emit_init_breadcrumb(rq);
rq                854 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
rq                857 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	i915_request_add(rq);
rq                858 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	if (i915_request_wait(rq, 0, HZ / 5) < 0)
rq               1160 drivers/gpu/drm/i915/gt/selftest_workarounds.c 	struct i915_request *rq;
rq               1201 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
rq               1202 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		if (IS_ERR(rq)) {
rq               1203 drivers/gpu/drm/i915/gt/selftest_workarounds.c 			ret = PTR_ERR(rq);
rq               1208 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		i915_request_add(rq);
rq               1210 drivers/gpu/drm/i915/gt/selftest_workarounds.c 		if (!igt_wait_for_spinner(&spin, rq)) {
rq                463 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
rq                466 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	struct intel_engine_cs *engine = rq->engine;
rq                467 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
rq                468 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
rq                471 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 			   ring_tail, rq->fence.seqno);
rq                500 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		struct i915_request *rq = *out++;
rq                502 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		flush_ggtt_writes(rq->ring->vma);
rq                503 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		guc_add_request(guc, rq);
rq                509 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c static inline int rq_prio(const struct i915_request *rq)
rq                511 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	return rq->sched.attr.priority | __NO_PREEMPTION;
rq                514 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c static struct i915_request *schedule_in(struct i915_request *rq, int idx)
rq                516 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	trace_i915_request_in(rq, idx);
rq                525 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	intel_gt_pm_get(rq->engine->gt);
rq                526 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	return i915_request_get(rq);
rq                529 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c static void schedule_out(struct i915_request *rq)
rq                531 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	trace_i915_request_out(rq);
rq                533 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	intel_gt_pm_put(rq->engine->gt);
rq                534 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	i915_request_put(rq);
rq                564 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		struct i915_request *rq, *rn;
rq                567 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		priolist_for_each_request_consume(rq, rn, p, i) {
rq                568 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 			if (last && rq->hw_context != last->hw_context) {
rq                577 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 			list_del_init(&rq->sched.link);
rq                578 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 			__i915_request_submit(rq);
rq                580 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 			last = rq;
rq                601 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	struct i915_request **port, *rq;
rq                606 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	for (port = execlists->inflight; (rq = *port); port++) {
rq                607 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		if (!i915_request_completed(rq))
rq                610 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		schedule_out(rq);
rq                644 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	struct i915_request * const *port, *rq;
rq                648 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	for (port = execlists->active; (rq = *port); port++)
rq                649 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		schedule_out(rq);
rq                657 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	struct i915_request *rq;
rq                665 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	rq = execlists_unwind_incomplete_requests(execlists);
rq                666 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	if (!rq)
rq                669 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	if (!i915_request_started(rq))
rq                672 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	__i915_request_reset(rq, stalled);
rq                673 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
rq                682 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	struct i915_request *rq, *rn;
rq                708 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 	list_for_each_entry(rq, &engine->active.requests, sched.link) {
rq                709 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		if (!i915_request_signaled(rq))
rq                710 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 			dma_fence_set_error(&rq->fence, -EIO);
rq                712 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		i915_request_mark_complete(rq);
rq                720 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 		priolist_for_each_request_consume(rq, rn, p, i) {
rq                721 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 			list_del_init(&rq->sched.link);
rq                722 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 			__i915_request_submit(rq);
rq                723 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 			dma_fence_set_error(&rq->fence, -EIO);
rq                724 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 			i915_request_mark_complete(rq);
rq                393 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_request *rq;
rq                400 drivers/gpu/drm/i915/gvt/scheduler.c 	rq = i915_request_create(s->shadow[workload->ring_id]);
rq                401 drivers/gpu/drm/i915/gvt/scheduler.c 	if (IS_ERR(rq)) {
rq                403 drivers/gpu/drm/i915/gvt/scheduler.c 		return PTR_ERR(rq);
rq                406 drivers/gpu/drm/i915/gvt/scheduler.c 	workload->req = i915_request_get(rq);
rq                534 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_request *rq = workload->req;
rq                536 drivers/gpu/drm/i915/gvt/scheduler.c 		(struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
rq                693 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_request *rq;
rq                723 drivers/gpu/drm/i915/gvt/scheduler.c 		rq = fetch_and_zero(&workload->req);
rq                724 drivers/gpu/drm/i915/gvt/scheduler.c 		i915_request_put(rq);
rq                800 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_request *rq = workload->req;
rq                803 drivers/gpu/drm/i915/gvt/scheduler.c 	struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
rq                814 drivers/gpu/drm/i915/gvt/scheduler.c 	gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
rq                834 drivers/gpu/drm/i915/gvt/scheduler.c 	context_page_num = rq->engine->context_size;
rq                837 drivers/gpu/drm/i915/gvt/scheduler.c 	if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
rq                911 drivers/gpu/drm/i915/gvt/scheduler.c 	struct i915_request *rq = workload->req;
rq                921 drivers/gpu/drm/i915/gvt/scheduler.c 	if (rq) {
rq                170 drivers/gpu/drm/i915/i915_active.c node_retire(struct i915_active_request *base, struct i915_request *rq)
rq                176 drivers/gpu/drm/i915/i915_active.c node_retire_nolock(struct i915_active_request *base, struct i915_request *rq)
rq                309 drivers/gpu/drm/i915/i915_active.c 		    struct i915_request *rq)
rq                341 drivers/gpu/drm/i915/i915_active.c 	__i915_active_request_set(active, rq);
rq                451 drivers/gpu/drm/i915/i915_active.c int i915_request_await_active_request(struct i915_request *rq,
rq                455 drivers/gpu/drm/i915/i915_active.c 		i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
rq                457 drivers/gpu/drm/i915/i915_active.c 	return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
rq                460 drivers/gpu/drm/i915/i915_active.c int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
rq                475 drivers/gpu/drm/i915/i915_active.c 		err = i915_request_await_active_request(rq, &it->base);
rq                697 drivers/gpu/drm/i915/i915_active.c void i915_request_add_active_barriers(struct i915_request *rq)
rq                699 drivers/gpu/drm/i915/i915_active.c 	struct intel_engine_cs *engine = rq->engine;
rq                703 drivers/gpu/drm/i915/i915_active.c 	GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline);
rq                711 drivers/gpu/drm/i915/i915_active.c 		RCU_INIT_POINTER(barrier_from_ll(node)->base.request, rq);
rq                713 drivers/gpu/drm/i915/i915_active.c 		list_add_tail((struct list_head *)node, &rq->active_list);
rq                718 drivers/gpu/drm/i915/i915_active.c 			    struct i915_request *rq)
rq                727 drivers/gpu/drm/i915/i915_active.c 	err = i915_request_await_active_request(rq, active);
rq                731 drivers/gpu/drm/i915/i915_active.c 	__i915_active_request_set(active, rq);
rq                 62 drivers/gpu/drm/i915/i915_active.h 			 struct i915_request *rq,
rq                 65 drivers/gpu/drm/i915/i915_active.h 	RCU_INIT_POINTER(active->request, rq);
rq                 98 drivers/gpu/drm/i915/i915_active.h 			struct i915_request *rq);
rq                374 drivers/gpu/drm/i915/i915_active.h 		    struct i915_request *rq);
rq                378 drivers/gpu/drm/i915/i915_active.h int i915_request_await_active(struct i915_request *rq,
rq                380 drivers/gpu/drm/i915/i915_active.h int i915_request_await_active_request(struct i915_request *rq,
rq                405 drivers/gpu/drm/i915/i915_active.h void i915_request_add_active_barriers(struct i915_request *rq);
rq                900 drivers/gpu/drm/i915/i915_gem.c 		struct i915_request *rq;
rq                902 drivers/gpu/drm/i915/i915_gem.c 		rq = i915_active_request_get_unlocked(&tl->last_request);
rq                903 drivers/gpu/drm/i915/i915_gem.c 		if (!rq)
rq                918 drivers/gpu/drm/i915/i915_gem.c 			gen6_rps_boost(rq);
rq                920 drivers/gpu/drm/i915/i915_gem.c 		timeout = i915_request_wait(rq, wait, timeout);
rq                921 drivers/gpu/drm/i915/i915_gem.c 		i915_request_put(rq);
rq               1275 drivers/gpu/drm/i915/i915_gem.c 		struct i915_request *rq;
rq               1287 drivers/gpu/drm/i915/i915_gem.c 		rq = intel_context_create_request(ce);
rq               1288 drivers/gpu/drm/i915/i915_gem.c 		if (IS_ERR(rq)) {
rq               1289 drivers/gpu/drm/i915/i915_gem.c 			err = PTR_ERR(rq);
rq               1294 drivers/gpu/drm/i915/i915_gem.c 		err = intel_engine_emit_ctx_wa(rq);
rq               1302 drivers/gpu/drm/i915/i915_gem.c 		err = intel_mocs_emit(rq);
rq               1307 drivers/gpu/drm/i915/i915_gem.c 		err = intel_renderstate_emit(rq);
rq               1312 drivers/gpu/drm/i915/i915_gem.c 		requests[id] = i915_request_get(rq);
rq               1313 drivers/gpu/drm/i915/i915_gem.c 		i915_request_add(rq);
rq               1325 drivers/gpu/drm/i915/i915_gem.c 		struct i915_request *rq;
rq               1329 drivers/gpu/drm/i915/i915_gem.c 		rq = requests[id];
rq               1330 drivers/gpu/drm/i915/i915_gem.c 		if (!rq)
rq               1334 drivers/gpu/drm/i915/i915_gem.c 		GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
rq               1336 drivers/gpu/drm/i915/i915_gem.c 		state = rq->hw_context->state;
rq               1367 drivers/gpu/drm/i915/i915_gem.c 		rq->engine->default_state = i915_gem_object_get(state->obj);
rq               1382 drivers/gpu/drm/i915/i915_gem.c 		struct i915_request *rq;
rq               1384 drivers/gpu/drm/i915/i915_gem.c 		rq = requests[id];
rq               1385 drivers/gpu/drm/i915/i915_gem.c 		if (!rq)
rq               1388 drivers/gpu/drm/i915/i915_gem.c 		ce = rq->hw_context;
rq               1389 drivers/gpu/drm/i915/i915_gem.c 		i915_request_put(rq);
rq               1250 drivers/gpu/drm/i915/i915_gpu_error.c 			   const struct i915_request *rq)
rq               1252 drivers/gpu/drm/i915/i915_gpu_error.c 	const struct i915_gem_context *ctx = rq->gem_context;
rq               1717 drivers/gpu/drm/i915/i915_perf.c gen8_store_flex(struct i915_request *rq,
rq               1724 drivers/gpu/drm/i915/i915_perf.c 	cs = intel_ring_begin(rq, 4 * count);
rq               1736 drivers/gpu/drm/i915/i915_perf.c 	intel_ring_advance(rq, cs);
rq               1742 drivers/gpu/drm/i915/i915_perf.c gen8_load_flex(struct i915_request *rq,
rq               1750 drivers/gpu/drm/i915/i915_perf.c 	cs = intel_ring_begin(rq, 2 * count + 2);
rq               1761 drivers/gpu/drm/i915/i915_perf.c 	intel_ring_advance(rq, cs);
rq               1769 drivers/gpu/drm/i915/i915_perf.c 	struct i915_request *rq;
rq               1774 drivers/gpu/drm/i915/i915_perf.c 	rq = i915_request_create(ce->engine->kernel_context);
rq               1775 drivers/gpu/drm/i915/i915_perf.c 	if (IS_ERR(rq))
rq               1776 drivers/gpu/drm/i915/i915_perf.c 		return PTR_ERR(rq);
rq               1779 drivers/gpu/drm/i915/i915_perf.c 	err = intel_context_prepare_remote_request(ce, rq);
rq               1781 drivers/gpu/drm/i915/i915_perf.c 		err = gen8_store_flex(rq, ce, flex, count);
rq               1783 drivers/gpu/drm/i915/i915_perf.c 	i915_request_add(rq);
rq               1790 drivers/gpu/drm/i915/i915_perf.c 	struct i915_request *rq;
rq               1793 drivers/gpu/drm/i915/i915_perf.c 	rq = i915_request_create(ce);
rq               1794 drivers/gpu/drm/i915/i915_perf.c 	if (IS_ERR(rq))
rq               1795 drivers/gpu/drm/i915/i915_perf.c 		return PTR_ERR(rq);
rq               1797 drivers/gpu/drm/i915/i915_perf.c 	err = gen8_load_flex(rq, ce, flex, count);
rq               1799 drivers/gpu/drm/i915/i915_perf.c 	i915_request_add(rq);
rq                 45 drivers/gpu/drm/i915/i915_request.c 	void (*hook)(struct i915_request *rq, struct dma_fence *signal);
rq                 99 drivers/gpu/drm/i915/i915_request.c 	struct i915_request *rq = to_request(fence);
rq                108 drivers/gpu/drm/i915/i915_request.c 	i915_sw_fence_fini(&rq->submit);
rq                109 drivers/gpu/drm/i915/i915_request.c 	i915_sw_fence_fini(&rq->semaphore);
rq                111 drivers/gpu/drm/i915/i915_request.c 	kmem_cache_free(global.slab_requests, rq);
rq                142 drivers/gpu/drm/i915/i915_request.c static void __notify_execute_cb(struct i915_request *rq)
rq                146 drivers/gpu/drm/i915/i915_request.c 	lockdep_assert_held(&rq->lock);
rq                148 drivers/gpu/drm/i915/i915_request.c 	if (list_empty(&rq->execute_cb))
rq                151 drivers/gpu/drm/i915/i915_request.c 	list_for_each_entry(cb, &rq->execute_cb, link)
rq                164 drivers/gpu/drm/i915/i915_request.c 	INIT_LIST_HEAD(&rq->execute_cb);
rq                197 drivers/gpu/drm/i915/i915_request.c static void remove_from_engine(struct i915_request *rq)
rq                207 drivers/gpu/drm/i915/i915_request.c 	locked = READ_ONCE(rq->engine);
rq                209 drivers/gpu/drm/i915/i915_request.c 	while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
rq                214 drivers/gpu/drm/i915/i915_request.c 	list_del(&rq->sched.link);
rq                218 drivers/gpu/drm/i915/i915_request.c static bool i915_request_retire(struct i915_request *rq)
rq                222 drivers/gpu/drm/i915/i915_request.c 	lockdep_assert_held(&rq->timeline->mutex);
rq                223 drivers/gpu/drm/i915/i915_request.c 	if (!i915_request_completed(rq))
rq                227 drivers/gpu/drm/i915/i915_request.c 		  rq->engine->name,
rq                228 drivers/gpu/drm/i915/i915_request.c 		  rq->fence.context, rq->fence.seqno,
rq                229 drivers/gpu/drm/i915/i915_request.c 		  hwsp_seqno(rq));
rq                231 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
rq                232 drivers/gpu/drm/i915/i915_request.c 	trace_i915_request_retire(rq);
rq                243 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(!list_is_first(&rq->link, &rq->timeline->requests));
rq                244 drivers/gpu/drm/i915/i915_request.c 	rq->ring->head = rq->postfix;
rq                256 drivers/gpu/drm/i915/i915_request.c 	list_for_each_entry_safe(active, next, &rq->active_list, link) {
rq                272 drivers/gpu/drm/i915/i915_request.c 		active->retire(active, rq);
rq                283 drivers/gpu/drm/i915/i915_request.c 	remove_from_engine(rq);
rq                285 drivers/gpu/drm/i915/i915_request.c 	spin_lock(&rq->lock);
rq                286 drivers/gpu/drm/i915/i915_request.c 	i915_request_mark_complete(rq);
rq                287 drivers/gpu/drm/i915/i915_request.c 	if (!i915_request_signaled(rq))
rq                288 drivers/gpu/drm/i915/i915_request.c 		dma_fence_signal_locked(&rq->fence);
rq                289 drivers/gpu/drm/i915/i915_request.c 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
rq                290 drivers/gpu/drm/i915/i915_request.c 		i915_request_cancel_breadcrumb(rq);
rq                291 drivers/gpu/drm/i915/i915_request.c 	if (i915_request_has_waitboost(rq)) {
rq                292 drivers/gpu/drm/i915/i915_request.c 		GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
rq                293 drivers/gpu/drm/i915/i915_request.c 		atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
rq                295 drivers/gpu/drm/i915/i915_request.c 	if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
rq                296 drivers/gpu/drm/i915/i915_request.c 		set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
rq                297 drivers/gpu/drm/i915/i915_request.c 		__notify_execute_cb(rq);
rq                299 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(!list_empty(&rq->execute_cb));
rq                300 drivers/gpu/drm/i915/i915_request.c 	spin_unlock(&rq->lock);
rq                304 drivers/gpu/drm/i915/i915_request.c 	remove_from_client(rq);
rq                305 drivers/gpu/drm/i915/i915_request.c 	list_del(&rq->link);
rq                307 drivers/gpu/drm/i915/i915_request.c 	intel_context_exit(rq->hw_context);
rq                308 drivers/gpu/drm/i915/i915_request.c 	intel_context_unpin(rq->hw_context);
rq                310 drivers/gpu/drm/i915/i915_request.c 	free_capture_list(rq);
rq                311 drivers/gpu/drm/i915/i915_request.c 	i915_sched_node_fini(&rq->sched);
rq                312 drivers/gpu/drm/i915/i915_request.c 	i915_request_put(rq);
rq                317 drivers/gpu/drm/i915/i915_request.c void i915_request_retire_upto(struct i915_request *rq)
rq                319 drivers/gpu/drm/i915/i915_request.c 	struct intel_timeline * const tl = rq->timeline;
rq                323 drivers/gpu/drm/i915/i915_request.c 		  rq->engine->name,
rq                324 drivers/gpu/drm/i915/i915_request.c 		  rq->fence.context, rq->fence.seqno,
rq                325 drivers/gpu/drm/i915/i915_request.c 		  hwsp_seqno(rq));
rq                328 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(!i915_request_completed(rq));
rq                332 drivers/gpu/drm/i915/i915_request.c 	} while (i915_request_retire(tmp) && tmp != rq);
rq                336 drivers/gpu/drm/i915/i915_request.c __i915_request_await_execution(struct i915_request *rq,
rq                338 drivers/gpu/drm/i915/i915_request.c 			       void (*hook)(struct i915_request *rq,
rq                346 drivers/gpu/drm/i915/i915_request.c 			hook(rq, &signal->fence);
rq                354 drivers/gpu/drm/i915/i915_request.c 	cb->fence = &rq->submit;
rq                367 drivers/gpu/drm/i915/i915_request.c 			hook(rq, &signal->fence);
rq                565 drivers/gpu/drm/i915/i915_request.c 	struct i915_request *rq =
rq                566 drivers/gpu/drm/i915/i915_request.c 		container_of(wrk, typeof(*rq), semaphore_work);
rq                568 drivers/gpu/drm/i915/i915_request.c 	i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
rq                569 drivers/gpu/drm/i915/i915_request.c 	i915_request_put(rq);
rq                575 drivers/gpu/drm/i915/i915_request.c 	struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
rq                579 drivers/gpu/drm/i915/i915_request.c 		if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
rq                580 drivers/gpu/drm/i915/i915_request.c 			i915_request_get(rq);
rq                581 drivers/gpu/drm/i915/i915_request.c 			init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
rq                582 drivers/gpu/drm/i915/i915_request.c 			irq_work_queue(&rq->semaphore_work);
rq                587 drivers/gpu/drm/i915/i915_request.c 		i915_request_put(rq);
rq                596 drivers/gpu/drm/i915/i915_request.c 	struct i915_request *rq, *rn;
rq                598 drivers/gpu/drm/i915/i915_request.c 	list_for_each_entry_safe(rq, rn, &tl->requests, link)
rq                599 drivers/gpu/drm/i915/i915_request.c 		if (!i915_request_retire(rq))
rq                606 drivers/gpu/drm/i915/i915_request.c 	struct i915_request *rq;
rq                615 drivers/gpu/drm/i915/i915_request.c 	rq = list_first_entry(&tl->requests, typeof(*rq), link);
rq                616 drivers/gpu/drm/i915/i915_request.c 	i915_request_retire(rq);
rq                618 drivers/gpu/drm/i915/i915_request.c 	rq = kmem_cache_alloc(global.slab_requests,
rq                620 drivers/gpu/drm/i915/i915_request.c 	if (rq)
rq                621 drivers/gpu/drm/i915/i915_request.c 		return rq;
rq                624 drivers/gpu/drm/i915/i915_request.c 	rq = list_last_entry(&tl->requests, typeof(*rq), link);
rq                625 drivers/gpu/drm/i915/i915_request.c 	cond_synchronize_rcu(rq->rcustate);
rq                638 drivers/gpu/drm/i915/i915_request.c 	struct i915_request *rq;
rq                676 drivers/gpu/drm/i915/i915_request.c 	rq = kmem_cache_alloc(global.slab_requests,
rq                678 drivers/gpu/drm/i915/i915_request.c 	if (unlikely(!rq)) {
rq                679 drivers/gpu/drm/i915/i915_request.c 		rq = request_alloc_slow(tl, gfp);
rq                680 drivers/gpu/drm/i915/i915_request.c 		if (!rq) {
rq                686 drivers/gpu/drm/i915/i915_request.c 	ret = intel_timeline_get_seqno(tl, rq, &seqno);
rq                690 drivers/gpu/drm/i915/i915_request.c 	rq->i915 = ce->engine->i915;
rq                691 drivers/gpu/drm/i915/i915_request.c 	rq->hw_context = ce;
rq                692 drivers/gpu/drm/i915/i915_request.c 	rq->gem_context = ce->gem_context;
rq                693 drivers/gpu/drm/i915/i915_request.c 	rq->engine = ce->engine;
rq                694 drivers/gpu/drm/i915/i915_request.c 	rq->ring = ce->ring;
rq                695 drivers/gpu/drm/i915/i915_request.c 	rq->timeline = tl;
rq                696 drivers/gpu/drm/i915/i915_request.c 	rq->hwsp_seqno = tl->hwsp_seqno;
rq                697 drivers/gpu/drm/i915/i915_request.c 	rq->hwsp_cacheline = tl->hwsp_cacheline;
rq                698 drivers/gpu/drm/i915/i915_request.c 	rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
rq                700 drivers/gpu/drm/i915/i915_request.c 	spin_lock_init(&rq->lock);
rq                701 drivers/gpu/drm/i915/i915_request.c 	dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
rq                705 drivers/gpu/drm/i915/i915_request.c 	i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
rq                706 drivers/gpu/drm/i915/i915_request.c 	i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
rq                708 drivers/gpu/drm/i915/i915_request.c 	i915_sched_node_init(&rq->sched);
rq                711 drivers/gpu/drm/i915/i915_request.c 	rq->file_priv = NULL;
rq                712 drivers/gpu/drm/i915/i915_request.c 	rq->batch = NULL;
rq                713 drivers/gpu/drm/i915/i915_request.c 	rq->capture_list = NULL;
rq                714 drivers/gpu/drm/i915/i915_request.c 	rq->flags = 0;
rq                715 drivers/gpu/drm/i915/i915_request.c 	rq->execution_mask = ALL_ENGINES;
rq                717 drivers/gpu/drm/i915/i915_request.c 	INIT_LIST_HEAD(&rq->active_list);
rq                718 drivers/gpu/drm/i915/i915_request.c 	INIT_LIST_HEAD(&rq->execute_cb);
rq                732 drivers/gpu/drm/i915/i915_request.c 	rq->reserved_space =
rq                733 drivers/gpu/drm/i915/i915_request.c 		2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
rq                741 drivers/gpu/drm/i915/i915_request.c 	rq->head = rq->ring->emit;
rq                743 drivers/gpu/drm/i915/i915_request.c 	ret = rq->engine->request_alloc(rq);
rq                747 drivers/gpu/drm/i915/i915_request.c 	rq->infix = rq->ring->emit; /* end of header; start of user payload */
rq                750 drivers/gpu/drm/i915/i915_request.c 	return rq;
rq                753 drivers/gpu/drm/i915/i915_request.c 	ce->ring->emit = rq->head;
rq                756 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(!list_empty(&rq->active_list));
rq                757 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
rq                758 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
rq                761 drivers/gpu/drm/i915/i915_request.c 	kmem_cache_free(global.slab_requests, rq);
rq                770 drivers/gpu/drm/i915/i915_request.c 	struct i915_request *rq;
rq                778 drivers/gpu/drm/i915/i915_request.c 	rq = list_first_entry(&tl->requests, typeof(*rq), link);
rq                779 drivers/gpu/drm/i915/i915_request.c 	if (!list_is_last(&rq->link, &tl->requests))
rq                780 drivers/gpu/drm/i915/i915_request.c 		i915_request_retire(rq);
rq                783 drivers/gpu/drm/i915/i915_request.c 	rq = __i915_request_create(ce, GFP_KERNEL);
rq                785 drivers/gpu/drm/i915/i915_request.c 	if (IS_ERR(rq))
rq                789 drivers/gpu/drm/i915/i915_request.c 	rq->cookie = lockdep_pin_lock(&tl->mutex);
rq                791 drivers/gpu/drm/i915/i915_request.c 	return rq;
rq                795 drivers/gpu/drm/i915/i915_request.c 	return rq;
rq                799 drivers/gpu/drm/i915/i915_request.c i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
rq                805 drivers/gpu/drm/i915/i915_request.c 	if (intel_timeline_sync_is_later(rq->timeline, &signal->fence))
rq                808 drivers/gpu/drm/i915/i915_request.c 	return i915_sw_fence_await_dma_fence(&rq->submit,
rq                814 drivers/gpu/drm/i915/i915_request.c already_busywaiting(struct i915_request *rq)
rq                828 drivers/gpu/drm/i915/i915_request.c 	return rq->sched.semaphores | rq->engine->saturated;
rq                935 drivers/gpu/drm/i915/i915_request.c i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
rq                967 drivers/gpu/drm/i915/i915_request.c 		if (fence->context == rq->fence.context)
rq                972 drivers/gpu/drm/i915/i915_request.c 		    intel_timeline_sync_is_later(rq->timeline, fence))
rq                976 drivers/gpu/drm/i915/i915_request.c 			ret = i915_request_await_request(rq, to_request(fence));
rq                978 drivers/gpu/drm/i915/i915_request.c 			ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
rq                986 drivers/gpu/drm/i915/i915_request.c 			intel_timeline_sync_set(rq->timeline, fence);
rq                993 drivers/gpu/drm/i915/i915_request.c i915_request_await_execution(struct i915_request *rq,
rq                995 drivers/gpu/drm/i915/i915_request.c 			     void (*hook)(struct i915_request *rq,
rq               1023 drivers/gpu/drm/i915/i915_request.c 			ret = __i915_request_await_execution(rq,
rq               1028 drivers/gpu/drm/i915/i915_request.c 			ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
rq               1100 drivers/gpu/drm/i915/i915_request.c void i915_request_skip(struct i915_request *rq, int error)
rq               1102 drivers/gpu/drm/i915/i915_request.c 	void *vaddr = rq->ring->vaddr;
rq               1106 drivers/gpu/drm/i915/i915_request.c 	dma_fence_set_error(&rq->fence, error);
rq               1108 drivers/gpu/drm/i915/i915_request.c 	if (rq->infix == rq->postfix)
rq               1116 drivers/gpu/drm/i915/i915_request.c 	head = rq->infix;
rq               1117 drivers/gpu/drm/i915/i915_request.c 	if (rq->postfix < head) {
rq               1118 drivers/gpu/drm/i915/i915_request.c 		memset(vaddr + head, 0, rq->ring->size - head);
rq               1121 drivers/gpu/drm/i915/i915_request.c 	memset(vaddr + head, 0, rq->postfix - head);
rq               1122 drivers/gpu/drm/i915/i915_request.c 	rq->infix = rq->postfix;
rq               1126 drivers/gpu/drm/i915/i915_request.c __i915_request_add_to_timeline(struct i915_request *rq)
rq               1128 drivers/gpu/drm/i915/i915_request.c 	struct intel_timeline *timeline = rq->timeline;
rq               1154 drivers/gpu/drm/i915/i915_request.c 		if (is_power_of_2(prev->engine->mask | rq->engine->mask))
rq               1155 drivers/gpu/drm/i915/i915_request.c 			i915_sw_fence_await_sw_fence(&rq->submit,
rq               1157 drivers/gpu/drm/i915/i915_request.c 						     &rq->submitq);
rq               1159 drivers/gpu/drm/i915/i915_request.c 			__i915_sw_fence_await_dma_fence(&rq->submit,
rq               1161 drivers/gpu/drm/i915/i915_request.c 							&rq->dmaq);
rq               1162 drivers/gpu/drm/i915/i915_request.c 		if (rq->engine->schedule)
rq               1163 drivers/gpu/drm/i915/i915_request.c 			__i915_sched_node_add_dependency(&rq->sched,
rq               1165 drivers/gpu/drm/i915/i915_request.c 							 &rq->dep,
rq               1169 drivers/gpu/drm/i915/i915_request.c 	list_add_tail(&rq->link, &timeline->requests);
rq               1176 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
rq               1177 drivers/gpu/drm/i915/i915_request.c 	__i915_active_request_set(&timeline->last_request, rq);
rq               1187 drivers/gpu/drm/i915/i915_request.c struct i915_request *__i915_request_commit(struct i915_request *rq)
rq               1189 drivers/gpu/drm/i915/i915_request.c 	struct intel_engine_cs *engine = rq->engine;
rq               1190 drivers/gpu/drm/i915/i915_request.c 	struct intel_ring *ring = rq->ring;
rq               1194 drivers/gpu/drm/i915/i915_request.c 		  engine->name, rq->fence.context, rq->fence.seqno);
rq               1201 drivers/gpu/drm/i915/i915_request.c 	GEM_BUG_ON(rq->reserved_space > ring->space);
rq               1202 drivers/gpu/drm/i915/i915_request.c 	rq->reserved_space = 0;
rq               1203 drivers/gpu/drm/i915/i915_request.c 	rq->emitted_jiffies = jiffies;
rq               1211 drivers/gpu/drm/i915/i915_request.c 	cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
rq               1213 drivers/gpu/drm/i915/i915_request.c 	rq->postfix = intel_ring_offset(rq, cs);
rq               1215 drivers/gpu/drm/i915/i915_request.c 	return __i915_request_add_to_timeline(rq);
rq               1218 drivers/gpu/drm/i915/i915_request.c void __i915_request_queue(struct i915_request *rq,
rq               1232 drivers/gpu/drm/i915/i915_request.c 	if (attr && rq->engine->schedule)
rq               1233 drivers/gpu/drm/i915/i915_request.c 		rq->engine->schedule(rq, attr);
rq               1234 drivers/gpu/drm/i915/i915_request.c 	i915_sw_fence_commit(&rq->semaphore);
rq               1235 drivers/gpu/drm/i915/i915_request.c 	i915_sw_fence_commit(&rq->submit);
rq               1238 drivers/gpu/drm/i915/i915_request.c void i915_request_add(struct i915_request *rq)
rq               1240 drivers/gpu/drm/i915/i915_request.c 	struct i915_sched_attr attr = rq->gem_context->sched;
rq               1241 drivers/gpu/drm/i915/i915_request.c 	struct intel_timeline * const tl = rq->timeline;
rq               1245 drivers/gpu/drm/i915/i915_request.c 	lockdep_unpin_lock(&tl->mutex, rq->cookie);
rq               1247 drivers/gpu/drm/i915/i915_request.c 	trace_i915_request_add(rq);
rq               1249 drivers/gpu/drm/i915/i915_request.c 	prev = __i915_request_commit(rq);
rq               1263 drivers/gpu/drm/i915/i915_request.c 	if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
rq               1272 drivers/gpu/drm/i915/i915_request.c 	if (list_empty(&rq->sched.signalers_list))
rq               1276 drivers/gpu/drm/i915/i915_request.c 	__i915_request_queue(rq, &attr);
rq               1335 drivers/gpu/drm/i915/i915_request.c static bool __i915_spin_request(const struct i915_request * const rq,
rq               1351 drivers/gpu/drm/i915/i915_request.c 	if (!i915_request_is_running(rq))
rq               1367 drivers/gpu/drm/i915/i915_request.c 		if (i915_request_completed(rq))
rq               1409 drivers/gpu/drm/i915/i915_request.c long i915_request_wait(struct i915_request *rq,
rq               1420 drivers/gpu/drm/i915/i915_request.c 	if (dma_fence_is_signaled(&rq->fence))
rq               1426 drivers/gpu/drm/i915/i915_request.c 	trace_i915_request_wait_begin(rq, flags);
rq               1434 drivers/gpu/drm/i915/i915_request.c 	mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
rq               1460 drivers/gpu/drm/i915/i915_request.c 	    __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
rq               1461 drivers/gpu/drm/i915/i915_request.c 		dma_fence_signal(&rq->fence);
rq               1478 drivers/gpu/drm/i915/i915_request.c 		if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
rq               1479 drivers/gpu/drm/i915/i915_request.c 			gen6_rps_boost(rq);
rq               1480 drivers/gpu/drm/i915/i915_request.c 		i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
rq               1484 drivers/gpu/drm/i915/i915_request.c 	if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
rq               1490 drivers/gpu/drm/i915/i915_request.c 		if (i915_request_completed(rq)) {
rq               1491 drivers/gpu/drm/i915/i915_request.c 			dma_fence_signal(&rq->fence);
rq               1509 drivers/gpu/drm/i915/i915_request.c 	dma_fence_remove_callback(&rq->fence, &wait.cb);
rq               1512 drivers/gpu/drm/i915/i915_request.c 	mutex_release(&rq->engine->gt->reset.mutex.dep_map, 0, _THIS_IP_);
rq               1513 drivers/gpu/drm/i915/i915_request.c 	trace_i915_request_wait_end(rq);
rq                253 drivers/gpu/drm/i915/i915_request.h void __i915_request_queue(struct i915_request *rq,
rq                256 drivers/gpu/drm/i915/i915_request.h void i915_request_retire_upto(struct i915_request *rq);
rq                268 drivers/gpu/drm/i915/i915_request.h i915_request_get(struct i915_request *rq)
rq                270 drivers/gpu/drm/i915/i915_request.h 	return to_request(dma_fence_get(&rq->fence));
rq                274 drivers/gpu/drm/i915/i915_request.h i915_request_get_rcu(struct i915_request *rq)
rq                276 drivers/gpu/drm/i915/i915_request.h 	return to_request(dma_fence_get_rcu(&rq->fence));
rq                280 drivers/gpu/drm/i915/i915_request.h i915_request_put(struct i915_request *rq)
rq                282 drivers/gpu/drm/i915/i915_request.h 	dma_fence_put(&rq->fence);
rq                288 drivers/gpu/drm/i915/i915_request.h int i915_request_await_dma_fence(struct i915_request *rq,
rq                290 drivers/gpu/drm/i915/i915_request.h int i915_request_await_execution(struct i915_request *rq,
rq                292 drivers/gpu/drm/i915/i915_request.h 				 void (*hook)(struct i915_request *rq,
rq                295 drivers/gpu/drm/i915/i915_request.h void i915_request_add(struct i915_request *rq);
rq                309 drivers/gpu/drm/i915/i915_request.h long i915_request_wait(struct i915_request *rq,
rq                319 drivers/gpu/drm/i915/i915_request.h static inline bool i915_request_signaled(const struct i915_request *rq)
rq                322 drivers/gpu/drm/i915/i915_request.h 	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
rq                325 drivers/gpu/drm/i915/i915_request.h static inline bool i915_request_is_active(const struct i915_request *rq)
rq                327 drivers/gpu/drm/i915/i915_request.h 	return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
rq                338 drivers/gpu/drm/i915/i915_request.h static inline u32 __hwsp_seqno(const struct i915_request *rq)
rq                340 drivers/gpu/drm/i915/i915_request.h 	return READ_ONCE(*rq->hwsp_seqno);
rq                356 drivers/gpu/drm/i915/i915_request.h static inline u32 hwsp_seqno(const struct i915_request *rq)
rq                361 drivers/gpu/drm/i915/i915_request.h 	seqno = __hwsp_seqno(rq);
rq                367 drivers/gpu/drm/i915/i915_request.h static inline bool __i915_request_has_started(const struct i915_request *rq)
rq                369 drivers/gpu/drm/i915/i915_request.h 	return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
rq                398 drivers/gpu/drm/i915/i915_request.h static inline bool i915_request_started(const struct i915_request *rq)
rq                400 drivers/gpu/drm/i915/i915_request.h 	if (i915_request_signaled(rq))
rq                404 drivers/gpu/drm/i915/i915_request.h 	return __i915_request_has_started(rq);
rq                415 drivers/gpu/drm/i915/i915_request.h static inline bool i915_request_is_running(const struct i915_request *rq)
rq                417 drivers/gpu/drm/i915/i915_request.h 	if (!i915_request_is_active(rq))
rq                420 drivers/gpu/drm/i915/i915_request.h 	return __i915_request_has_started(rq);
rq                423 drivers/gpu/drm/i915/i915_request.h static inline bool i915_request_completed(const struct i915_request *rq)
rq                425 drivers/gpu/drm/i915/i915_request.h 	if (i915_request_signaled(rq))
rq                428 drivers/gpu/drm/i915/i915_request.h 	return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
rq                431 drivers/gpu/drm/i915/i915_request.h static inline void i915_request_mark_complete(struct i915_request *rq)
rq                433 drivers/gpu/drm/i915/i915_request.h 	rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
rq                436 drivers/gpu/drm/i915/i915_request.h static inline bool i915_request_has_waitboost(const struct i915_request *rq)
rq                438 drivers/gpu/drm/i915/i915_request.h 	return rq->flags & I915_REQUEST_WAITBOOST;
rq                441 drivers/gpu/drm/i915/i915_request.h static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
rq                444 drivers/gpu/drm/i915/i915_request.h 	return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
rq                153 drivers/gpu/drm/i915/i915_scheduler.c 	const struct i915_request *rq = node_to_request(node);
rq                164 drivers/gpu/drm/i915/i915_scheduler.c 	while (locked != (engine = READ_ONCE(rq->engine))) {
rq                175 drivers/gpu/drm/i915/i915_scheduler.c static inline int rq_prio(const struct i915_request *rq)
rq                177 drivers/gpu/drm/i915/i915_scheduler.c 	return rq->sched.attr.priority | __NO_PREEMPTION;
rq                193 drivers/gpu/drm/i915/i915_scheduler.c 			    const struct i915_request *rq,
rq                219 drivers/gpu/drm/i915/i915_scheduler.c 	if (inflight->hw_context == rq->hw_context)
rq                360 drivers/gpu/drm/i915/i915_scheduler.c void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
rq                363 drivers/gpu/drm/i915/i915_scheduler.c 	__i915_schedule(&rq->sched, attr);
rq                375 drivers/gpu/drm/i915/i915_scheduler.c void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
rq                380 drivers/gpu/drm/i915/i915_scheduler.c 	if (READ_ONCE(rq->sched.attr.priority) & bump)
rq                384 drivers/gpu/drm/i915/i915_scheduler.c 	__bump_priority(&rq->sched, bump);
rq                 43 drivers/gpu/drm/i915/i915_scheduler.h void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
rq                663 drivers/gpu/drm/i915/i915_trace.h 	    TP_PROTO(struct i915_request *rq, u32 flags),
rq                664 drivers/gpu/drm/i915/i915_trace.h 	    TP_ARGS(rq, flags),
rq                677 drivers/gpu/drm/i915/i915_trace.h 			   __entry->dev = rq->i915->drm.primary->index;
rq                678 drivers/gpu/drm/i915/i915_trace.h 			   __entry->hw_id = rq->gem_context->hw_id;
rq                679 drivers/gpu/drm/i915/i915_trace.h 			   __entry->class = rq->engine->uabi_class;
rq                680 drivers/gpu/drm/i915/i915_trace.h 			   __entry->instance = rq->engine->uabi_instance;
rq                681 drivers/gpu/drm/i915/i915_trace.h 			   __entry->ctx = rq->fence.context;
rq                682 drivers/gpu/drm/i915/i915_trace.h 			   __entry->seqno = rq->fence.seqno;
rq                693 drivers/gpu/drm/i915/i915_trace.h 	    TP_PROTO(struct i915_request *rq),
rq                694 drivers/gpu/drm/i915/i915_trace.h 	    TP_ARGS(rq),
rq                706 drivers/gpu/drm/i915/i915_trace.h 			   __entry->dev = rq->i915->drm.primary->index;
rq                707 drivers/gpu/drm/i915/i915_trace.h 			   __entry->hw_id = rq->gem_context->hw_id;
rq                708 drivers/gpu/drm/i915/i915_trace.h 			   __entry->class = rq->engine->uabi_class;
rq                709 drivers/gpu/drm/i915/i915_trace.h 			   __entry->instance = rq->engine->uabi_instance;
rq                710 drivers/gpu/drm/i915/i915_trace.h 			   __entry->ctx = rq->fence.context;
rq                711 drivers/gpu/drm/i915/i915_trace.h 			   __entry->seqno = rq->fence.seqno;
rq                720 drivers/gpu/drm/i915/i915_trace.h 	    TP_PROTO(struct i915_request *rq),
rq                721 drivers/gpu/drm/i915/i915_trace.h 	    TP_ARGS(rq)
rq                726 drivers/gpu/drm/i915/i915_trace.h 	     TP_PROTO(struct i915_request *rq),
rq                727 drivers/gpu/drm/i915/i915_trace.h 	     TP_ARGS(rq)
rq                731 drivers/gpu/drm/i915/i915_trace.h 	     TP_PROTO(struct i915_request *rq),
rq                732 drivers/gpu/drm/i915/i915_trace.h 	     TP_ARGS(rq)
rq                736 drivers/gpu/drm/i915/i915_trace.h 	    TP_PROTO(struct i915_request *rq, unsigned int port),
rq                737 drivers/gpu/drm/i915/i915_trace.h 	    TP_ARGS(rq, port),
rq                751 drivers/gpu/drm/i915/i915_trace.h 			   __entry->dev = rq->i915->drm.primary->index;
rq                752 drivers/gpu/drm/i915/i915_trace.h 			   __entry->hw_id = rq->gem_context->hw_id;
rq                753 drivers/gpu/drm/i915/i915_trace.h 			   __entry->class = rq->engine->uabi_class;
rq                754 drivers/gpu/drm/i915/i915_trace.h 			   __entry->instance = rq->engine->uabi_instance;
rq                755 drivers/gpu/drm/i915/i915_trace.h 			   __entry->ctx = rq->fence.context;
rq                756 drivers/gpu/drm/i915/i915_trace.h 			   __entry->seqno = rq->fence.seqno;
rq                757 drivers/gpu/drm/i915/i915_trace.h 			   __entry->prio = rq->sched.attr.priority;
rq                768 drivers/gpu/drm/i915/i915_trace.h 	    TP_PROTO(struct i915_request *rq),
rq                769 drivers/gpu/drm/i915/i915_trace.h 	    TP_ARGS(rq),
rq                782 drivers/gpu/drm/i915/i915_trace.h 			   __entry->dev = rq->i915->drm.primary->index;
rq                783 drivers/gpu/drm/i915/i915_trace.h 			   __entry->hw_id = rq->gem_context->hw_id;
rq                784 drivers/gpu/drm/i915/i915_trace.h 			   __entry->class = rq->engine->uabi_class;
rq                785 drivers/gpu/drm/i915/i915_trace.h 			   __entry->instance = rq->engine->uabi_instance;
rq                786 drivers/gpu/drm/i915/i915_trace.h 			   __entry->ctx = rq->fence.context;
rq                787 drivers/gpu/drm/i915/i915_trace.h 			   __entry->seqno = rq->fence.seqno;
rq                788 drivers/gpu/drm/i915/i915_trace.h 			   __entry->completed = i915_request_completed(rq);
rq                800 drivers/gpu/drm/i915/i915_trace.h trace_i915_request_submit(struct i915_request *rq)
rq                805 drivers/gpu/drm/i915/i915_trace.h trace_i915_request_execute(struct i915_request *rq)
rq                810 drivers/gpu/drm/i915/i915_trace.h trace_i915_request_in(struct i915_request *rq, unsigned int port)
rq                815 drivers/gpu/drm/i915/i915_trace.h trace_i915_request_out(struct i915_request *rq)
rq                822 drivers/gpu/drm/i915/i915_trace.h 	    TP_PROTO(struct i915_request *rq),
rq                823 drivers/gpu/drm/i915/i915_trace.h 	    TP_ARGS(rq)
rq                827 drivers/gpu/drm/i915/i915_trace.h 	    TP_PROTO(struct i915_request *rq, unsigned int flags),
rq                828 drivers/gpu/drm/i915/i915_trace.h 	    TP_ARGS(rq, flags),
rq                847 drivers/gpu/drm/i915/i915_trace.h 			   __entry->dev = rq->i915->drm.primary->index;
rq                848 drivers/gpu/drm/i915/i915_trace.h 			   __entry->hw_id = rq->gem_context->hw_id;
rq                849 drivers/gpu/drm/i915/i915_trace.h 			   __entry->class = rq->engine->uabi_class;
rq                850 drivers/gpu/drm/i915/i915_trace.h 			   __entry->instance = rq->engine->uabi_instance;
rq                851 drivers/gpu/drm/i915/i915_trace.h 			   __entry->ctx = rq->fence.context;
rq                852 drivers/gpu/drm/i915/i915_trace.h 			   __entry->seqno = rq->fence.seqno;
rq                863 drivers/gpu/drm/i915/i915_trace.h 	    TP_PROTO(struct i915_request *rq),
rq                864 drivers/gpu/drm/i915/i915_trace.h 	    TP_ARGS(rq)
rq                887 drivers/gpu/drm/i915/i915_vma.c 			    struct i915_request *rq,
rq                905 drivers/gpu/drm/i915/i915_vma.c 	err = i915_active_ref(&vma->active, rq->timeline, rq);
rq                912 drivers/gpu/drm/i915/i915_vma.c 					rq->timeline,
rq                913 drivers/gpu/drm/i915/i915_vma.c 					rq);
rq                915 drivers/gpu/drm/i915/i915_vma.c 		dma_resv_add_excl_fence(vma->resv, &rq->fence);
rq                923 drivers/gpu/drm/i915/i915_vma.c 		dma_resv_add_shared_fence(vma->resv, &rq->fence);
rq                162 drivers/gpu/drm/i915/i915_vma.h 					 struct i915_request *rq,
rq               6871 drivers/gpu/drm/i915/intel_pm.c void gen6_rps_boost(struct i915_request *rq)
rq               6873 drivers/gpu/drm/i915/intel_pm.c 	struct intel_rps *rps = &rq->i915->gt_pm.rps;
rq               6883 drivers/gpu/drm/i915/intel_pm.c 	if (i915_request_signaled(rq))
rq               6888 drivers/gpu/drm/i915/intel_pm.c 	spin_lock_irqsave(&rq->lock, flags);
rq               6889 drivers/gpu/drm/i915/intel_pm.c 	if (!i915_request_has_waitboost(rq) &&
rq               6890 drivers/gpu/drm/i915/intel_pm.c 	    !dma_fence_is_signaled_locked(&rq->fence)) {
rq               6892 drivers/gpu/drm/i915/intel_pm.c 		rq->flags |= I915_REQUEST_WAITBOOST;
rq               6894 drivers/gpu/drm/i915/intel_pm.c 	spin_unlock_irqrestore(&rq->lock, flags);
rq                 44 drivers/gpu/drm/i915/intel_pm.h void gen6_rps_boost(struct i915_request *rq);
rq                101 drivers/gpu/drm/i915/selftests/i915_active.c 		struct i915_request *rq;
rq                103 drivers/gpu/drm/i915/selftests/i915_active.c 		rq = i915_request_create(engine->kernel_context);
rq                104 drivers/gpu/drm/i915/selftests/i915_active.c 		if (IS_ERR(rq)) {
rq                105 drivers/gpu/drm/i915/selftests/i915_active.c 			err = PTR_ERR(rq);
rq                109 drivers/gpu/drm/i915/selftests/i915_active.c 		err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
rq                113 drivers/gpu/drm/i915/selftests/i915_active.c 			err = i915_active_ref(&active->base, rq->timeline, rq);
rq                114 drivers/gpu/drm/i915/selftests/i915_active.c 		i915_request_add(rq);
rq                 25 drivers/gpu/drm/i915/selftests/i915_gem.c 		struct i915_request *rq;
rq                 27 drivers/gpu/drm/i915/selftests/i915_gem.c 		rq = igt_request_alloc(ctx, engine);
rq                 28 drivers/gpu/drm/i915/selftests/i915_gem.c 		if (IS_ERR(rq))
rq                 29 drivers/gpu/drm/i915/selftests/i915_gem.c 			return PTR_ERR(rq);
rq                 31 drivers/gpu/drm/i915/selftests/i915_gem.c 		i915_request_add(rq);
rq                466 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 			struct i915_request *rq;
rq                475 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 			rq = igt_request_alloc(ctx, engine);
rq                478 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 			if (IS_ERR(rq)) {
rq                480 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 				if (PTR_ERR(rq) != -EBUSY) {
rq                483 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 					       (int)PTR_ERR(rq));
rq                484 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 					err = PTR_ERR(rq);
rq                490 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 			err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
rq                496 drivers/gpu/drm/i915/selftests/i915_gem_evict.c 			i915_request_add(rq);
rq                337 drivers/gpu/drm/i915/selftests/i915_request.c 			struct i915_request *rq;
rq                344 drivers/gpu/drm/i915/selftests/i915_request.c 			rq = t->request_alloc(ce);
rq                346 drivers/gpu/drm/i915/selftests/i915_request.c 			if (IS_ERR(rq)) {
rq                348 drivers/gpu/drm/i915/selftests/i915_request.c 				err = PTR_ERR(rq);
rq                353 drivers/gpu/drm/i915/selftests/i915_request.c 			err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
rq                357 drivers/gpu/drm/i915/selftests/i915_request.c 			requests[n] = i915_request_get(rq);
rq                358 drivers/gpu/drm/i915/selftests/i915_request.c 			i915_request_add(rq);
rq                364 drivers/gpu/drm/i915/selftests/i915_request.c 								    &rq->fence,
rq                369 drivers/gpu/drm/i915/selftests/i915_request.c 				i915_request_put(rq);
rq                381 drivers/gpu/drm/i915/selftests/i915_request.c 			struct i915_request *rq = requests[count - 1];
rq                385 drivers/gpu/drm/i915/selftests/i915_request.c 			       rq->fence.context, rq->fence.seqno,
rq                390 drivers/gpu/drm/i915/selftests/i915_request.c 			GEM_BUG_ON(!i915_request_completed(rq));
rq                396 drivers/gpu/drm/i915/selftests/i915_request.c 			struct i915_request *rq = requests[n];
rq                399 drivers/gpu/drm/i915/selftests/i915_request.c 				      &rq->fence.flags)) {
rq                401 drivers/gpu/drm/i915/selftests/i915_request.c 				       rq->fence.context, rq->fence.seqno);
rq                405 drivers/gpu/drm/i915/selftests/i915_request.c 			i915_request_put(rq);
rq               1068 drivers/gpu/drm/i915/selftests/i915_request.c 	struct i915_request *rq;
rq               1083 drivers/gpu/drm/i915/selftests/i915_request.c 	rq = igt_request_alloc(ctx, engine);
rq               1084 drivers/gpu/drm/i915/selftests/i915_request.c 	if (IS_ERR(rq)) {
rq               1085 drivers/gpu/drm/i915/selftests/i915_request.c 		ret = PTR_ERR(rq);
rq               1089 drivers/gpu/drm/i915/selftests/i915_request.c 		ret = rq->ring->size - rq->reserved_space;
rq               1090 drivers/gpu/drm/i915/selftests/i915_request.c 		i915_request_add(rq);
rq               1092 drivers/gpu/drm/i915/selftests/i915_request.c 		sz = rq->ring->emit - rq->head;
rq               1094 drivers/gpu/drm/i915/selftests/i915_request.c 			sz += rq->ring->size;
rq                 69 drivers/gpu/drm/i915/selftests/igt_spinner.c 		       const struct i915_request *rq)
rq                 71 drivers/gpu/drm/i915/selftests/igt_spinner.c 	return hws->node.start + seqno_offset(rq->fence.context);
rq                 75 drivers/gpu/drm/i915/selftests/igt_spinner.c 			  struct i915_request *rq,
rq                 81 drivers/gpu/drm/i915/selftests/igt_spinner.c 	err = i915_request_await_object(rq, vma->obj,
rq                 84 drivers/gpu/drm/i915/selftests/igt_spinner.c 		err = i915_vma_move_to_active(vma, rq, flags);
rq                 96 drivers/gpu/drm/i915/selftests/igt_spinner.c 	struct i915_request *rq = NULL;
rq                119 drivers/gpu/drm/i915/selftests/igt_spinner.c 	rq = intel_context_create_request(ce);
rq                120 drivers/gpu/drm/i915/selftests/igt_spinner.c 	if (IS_ERR(rq)) {
rq                121 drivers/gpu/drm/i915/selftests/igt_spinner.c 		err = PTR_ERR(rq);
rq                125 drivers/gpu/drm/i915/selftests/igt_spinner.c 	err = move_to_active(vma, rq, 0);
rq                129 drivers/gpu/drm/i915/selftests/igt_spinner.c 	err = move_to_active(hws, rq, 0);
rq                136 drivers/gpu/drm/i915/selftests/igt_spinner.c 	*batch++ = lower_32_bits(hws_address(hws, rq));
rq                137 drivers/gpu/drm/i915/selftests/igt_spinner.c 	*batch++ = upper_32_bits(hws_address(hws, rq));
rq                138 drivers/gpu/drm/i915/selftests/igt_spinner.c 	*batch++ = rq->fence.seqno;
rq                150 drivers/gpu/drm/i915/selftests/igt_spinner.c 	    rq->timeline->has_initial_breadcrumb) {
rq                151 drivers/gpu/drm/i915/selftests/igt_spinner.c 		err = engine->emit_init_breadcrumb(rq);
rq                156 drivers/gpu/drm/i915/selftests/igt_spinner.c 	err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
rq                160 drivers/gpu/drm/i915/selftests/igt_spinner.c 		i915_request_skip(rq, err);
rq                161 drivers/gpu/drm/i915/selftests/igt_spinner.c 		i915_request_add(rq);
rq                167 drivers/gpu/drm/i915/selftests/igt_spinner.c 	return err ? ERR_PTR(err) : rq;
rq                171 drivers/gpu/drm/i915/selftests/igt_spinner.c hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
rq                173 drivers/gpu/drm/i915/selftests/igt_spinner.c 	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
rq                195 drivers/gpu/drm/i915/selftests/igt_spinner.c bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
rq                197 drivers/gpu/drm/i915/selftests/igt_spinner.c 	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
rq                198 drivers/gpu/drm/i915/selftests/igt_spinner.c 					       rq->fence.seqno),
rq                200 drivers/gpu/drm/i915/selftests/igt_spinner.c 		 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
rq                201 drivers/gpu/drm/i915/selftests/igt_spinner.c 					    rq->fence.seqno),
rq                 36 drivers/gpu/drm/i915/selftests/igt_spinner.h bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq);
rq                162 drivers/gpu/drm/lima/lima_sched.c 	struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL;
rq                164 drivers/gpu/drm/lima/lima_sched.c 	return drm_sched_entity_init(&context->base, &rq, 1, guilty);
rq                 39 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c 	int rq = ram->freq < 1000000; /* XXX */
rq                 96 drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr5.c 	ram->mr[3] |= (rq & 0x01) << 5;
rq                133 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c 	u32 hi, lo, rq, tx;
rq                138 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c 	i2c->func->aux_stat(i2c, &hi, &lo, &rq, &tx);
rq                139 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c 	if (!hi && !lo && !rq && !tx)
rq                146 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c 		if (rq & aux->intr) mask |= NVKM_I2C_IRQ;
rq                 28 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
rq                 33 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c 	for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
rq                 36 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/g94.c 		if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
rq                 28 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
rq                 33 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c 	for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) {
rq                 36 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk104.c 		if ((stat & (4 << (i * 4)))) *rq |= 1 << i;
rq                564 drivers/gpu/drm/panfrost/panfrost_job.c 	struct drm_sched_rq *rq;
rq                568 drivers/gpu/drm/panfrost/panfrost_job.c 		rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
rq                569 drivers/gpu/drm/panfrost/panfrost_job.c 		ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
rq                 63 drivers/gpu/drm/scheduler/sched_entity.c 	entity->rq = NULL;
rq                 75 drivers/gpu/drm/scheduler/sched_entity.c 		entity->rq = rq_list[0];
rq                135 drivers/gpu/drm/scheduler/sched_entity.c 	struct drm_sched_rq *rq = NULL;
rq                150 drivers/gpu/drm/scheduler/sched_entity.c 			rq = entity->rq_list[i];
rq                154 drivers/gpu/drm/scheduler/sched_entity.c 	return rq;
rq                175 drivers/gpu/drm/scheduler/sched_entity.c 	if (!entity->rq)
rq                178 drivers/gpu/drm/scheduler/sched_entity.c 	sched = entity->rq->sched;
rq                200 drivers/gpu/drm/scheduler/sched_entity.c 		drm_sched_rq_remove_entity(entity->rq, entity);
rq                279 drivers/gpu/drm/scheduler/sched_entity.c 	if (entity->rq) {
rq                280 drivers/gpu/drm/scheduler/sched_entity.c 		sched = entity->rq->sched;
rq                281 drivers/gpu/drm/scheduler/sched_entity.c 		drm_sched_rq_remove_entity(entity->rq, entity);
rq                349 drivers/gpu/drm/scheduler/sched_entity.c 	drm_sched_wakeup(entity->rq->sched);
rq                355 drivers/gpu/drm/scheduler/sched_entity.c static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
rq                358 drivers/gpu/drm/scheduler/sched_entity.c 	*rq = &(*rq)->sched->sched_rq[priority];
rq                379 drivers/gpu/drm/scheduler/sched_entity.c 	if (entity->rq) {
rq                380 drivers/gpu/drm/scheduler/sched_entity.c 		drm_sched_rq_remove_entity(entity->rq, entity);
rq                381 drivers/gpu/drm/scheduler/sched_entity.c 		drm_sched_entity_set_rq_priority(&entity->rq, priority);
rq                382 drivers/gpu/drm/scheduler/sched_entity.c 		drm_sched_rq_add_entity(entity->rq, entity);
rq                399 drivers/gpu/drm/scheduler/sched_entity.c 	struct drm_gpu_scheduler *sched = entity->rq->sched;
rq                450 drivers/gpu/drm/scheduler/sched_entity.c 	struct drm_gpu_scheduler *sched = entity->rq->sched;
rq                487 drivers/gpu/drm/scheduler/sched_entity.c 	struct drm_sched_rq *rq;
rq                496 drivers/gpu/drm/scheduler/sched_entity.c 	rq = drm_sched_entity_get_free_sched(entity);
rq                497 drivers/gpu/drm/scheduler/sched_entity.c 	if (rq == entity->rq)
rq                501 drivers/gpu/drm/scheduler/sched_entity.c 	drm_sched_rq_remove_entity(entity->rq, entity);
rq                502 drivers/gpu/drm/scheduler/sched_entity.c 	entity->rq = rq;
rq                524 drivers/gpu/drm/scheduler/sched_entity.c 	atomic_inc(&entity->rq->sched->num_jobs);
rq                538 drivers/gpu/drm/scheduler/sched_entity.c 		drm_sched_rq_add_entity(entity->rq, entity);
rq                540 drivers/gpu/drm/scheduler/sched_entity.c 		drm_sched_wakeup(entity->rq->sched);
rq                166 drivers/gpu/drm/scheduler/sched_fence.c 	fence->sched = entity->rq->sched;
rq                 72 drivers/gpu/drm/scheduler/sched_main.c 			      struct drm_sched_rq *rq)
rq                 74 drivers/gpu/drm/scheduler/sched_main.c 	spin_lock_init(&rq->lock);
rq                 75 drivers/gpu/drm/scheduler/sched_main.c 	INIT_LIST_HEAD(&rq->entities);
rq                 76 drivers/gpu/drm/scheduler/sched_main.c 	rq->current_entity = NULL;
rq                 77 drivers/gpu/drm/scheduler/sched_main.c 	rq->sched = sched;
rq                 88 drivers/gpu/drm/scheduler/sched_main.c void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
rq                 93 drivers/gpu/drm/scheduler/sched_main.c 	spin_lock(&rq->lock);
rq                 94 drivers/gpu/drm/scheduler/sched_main.c 	list_add_tail(&entity->list, &rq->entities);
rq                 95 drivers/gpu/drm/scheduler/sched_main.c 	spin_unlock(&rq->lock);
rq                106 drivers/gpu/drm/scheduler/sched_main.c void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
rq                111 drivers/gpu/drm/scheduler/sched_main.c 	spin_lock(&rq->lock);
rq                113 drivers/gpu/drm/scheduler/sched_main.c 	if (rq->current_entity == entity)
rq                114 drivers/gpu/drm/scheduler/sched_main.c 		rq->current_entity = NULL;
rq                115 drivers/gpu/drm/scheduler/sched_main.c 	spin_unlock(&rq->lock);
rq                126 drivers/gpu/drm/scheduler/sched_main.c drm_sched_rq_select_entity(struct drm_sched_rq *rq)
rq                130 drivers/gpu/drm/scheduler/sched_main.c 	spin_lock(&rq->lock);
rq                132 drivers/gpu/drm/scheduler/sched_main.c 	entity = rq->current_entity;
rq                134 drivers/gpu/drm/scheduler/sched_main.c 		list_for_each_entry_continue(entity, &rq->entities, list) {
rq                136 drivers/gpu/drm/scheduler/sched_main.c 				rq->current_entity = entity;
rq                137 drivers/gpu/drm/scheduler/sched_main.c 				spin_unlock(&rq->lock);
rq                143 drivers/gpu/drm/scheduler/sched_main.c 	list_for_each_entry(entity, &rq->entities, list) {
rq                146 drivers/gpu/drm/scheduler/sched_main.c 			rq->current_entity = entity;
rq                147 drivers/gpu/drm/scheduler/sched_main.c 			spin_unlock(&rq->lock);
rq                151 drivers/gpu/drm/scheduler/sched_main.c 		if (entity == rq->current_entity)
rq                155 drivers/gpu/drm/scheduler/sched_main.c 	spin_unlock(&rq->lock);
rq                171 drivers/gpu/drm/scheduler/sched_main.c 	struct drm_gpu_scheduler *sched = entity->rq->sched;
rq                332 drivers/gpu/drm/scheduler/sched_main.c 			struct drm_sched_rq *rq = &sched->sched_rq[i];
rq                334 drivers/gpu/drm/scheduler/sched_main.c 			spin_lock(&rq->lock);
rq                335 drivers/gpu/drm/scheduler/sched_main.c 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
rq                345 drivers/gpu/drm/scheduler/sched_main.c 			spin_unlock(&rq->lock);
rq                346 drivers/gpu/drm/scheduler/sched_main.c 			if (&entity->list != &rq->entities)
rq                529 drivers/gpu/drm/scheduler/sched_main.c 	if (!entity->rq)
rq                532 drivers/gpu/drm/scheduler/sched_main.c 	sched = entity->rq->sched;
rq                536 drivers/gpu/drm/scheduler/sched_main.c 	job->s_priority = entity->rq - sched->sched_rq;
rq                140 drivers/gpu/drm/v3d/v3d_drv.c 	struct drm_sched_rq *rq;
rq                150 drivers/gpu/drm/v3d/v3d_drv.c 		rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
rq                151 drivers/gpu/drm/v3d/v3d_drv.c 		drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
rq                836 drivers/ide/hpt366.c static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq)
rq                838 drivers/ide/hpt366.c 	hpt3xxn_set_clock(drive->hwif, rq_data_dir(rq) ? 0x21 : 0x23);
rq                 93 drivers/ide/ide-atapi.c 	struct request *rq;
rq                 96 drivers/ide/ide-atapi.c 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
rq                 97 drivers/ide/ide-atapi.c 	ide_req(rq)->type = ATA_PRIV_MISC;
rq                 98 drivers/ide/ide-atapi.c 	ide_req(rq)->special = pc;
rq                101 drivers/ide/ide-atapi.c 		error = blk_rq_map_kern(drive->queue, rq, buf, bufflen,
rq                107 drivers/ide/ide-atapi.c 	memcpy(scsi_req(rq)->cmd, pc->c, 12);
rq                109 drivers/ide/ide-atapi.c 		scsi_req(rq)->cmd[13] = REQ_IDETAPE_PC1;
rq                110 drivers/ide/ide-atapi.c 	blk_execute_rq(drive->queue, disk, rq, 0);
rq                111 drivers/ide/ide-atapi.c 	error = scsi_req(rq)->result ? -EIO : 0;
rq                113 drivers/ide/ide-atapi.c 	blk_put_request(rq);
rq                173 drivers/ide/ide-atapi.c void ide_prep_sense(ide_drive_t *drive, struct request *rq)
rq                197 drivers/ide/ide-atapi.c 	if (ata_sense_request(rq) || drive->sense_rq_armed)
rq                223 drivers/ide/ide-atapi.c 	sense_rq->rq_disk = rq->rq_disk;
rq                257 drivers/ide/ide-atapi.c 	drive->hwif->rq = NULL;
rq                272 drivers/ide/ide-atapi.c 	struct request *failed_rq = drive->hwif->rq;
rq                290 drivers/ide/ide-atapi.c 	drive->hwif->rq = NULL;
rq                299 drivers/ide/ide-atapi.c 	struct request *rq = drive->hwif->rq;
rq                302 drivers/ide/ide-atapi.c 	debug_log("%s: scsi_req(rq)->cmd[0]: 0x%x\n", __func__, scsi_req(rq)->cmd[0]);
rq                310 drivers/ide/ide-atapi.c 	switch (scsi_req(rq)->cmd[0]) {
rq                319 drivers/ide/ide-atapi.c 		if (!(rq->rq_flags & RQF_QUIET))
rq                321 drivers/ide/ide-atapi.c 					 scsi_req(rq)->cmd[0]);
rq                329 drivers/ide/ide-atapi.c int ide_cd_get_xferlen(struct request *rq)
rq                331 drivers/ide/ide-atapi.c 	switch (req_op(rq)) {
rq                336 drivers/ide/ide-atapi.c 		return blk_rq_bytes(rq);
rq                339 drivers/ide/ide-atapi.c 		switch (ide_req(rq)->type) {
rq                342 drivers/ide/ide-atapi.c 			return blk_rq_bytes(rq);
rq                370 drivers/ide/ide-atapi.c int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
rq                404 drivers/ide/ide-atapi.c 	if (dev_is_idecd(drive) && ata_pc_request(rq))
rq                405 drivers/ide/ide-atapi.c 		rq->rq_flags |= RQF_FAILED;
rq                421 drivers/ide/ide-atapi.c 	struct request *rq = hwif->rq;
rq                446 drivers/ide/ide-atapi.c 					drive->name, rq_data_dir(pc->rq)
rq                450 drivers/ide/ide-atapi.c 			scsi_req(rq)->resid_len = 0;
rq                460 drivers/ide/ide-atapi.c 			  blk_rq_bytes(rq));
rq                467 drivers/ide/ide-atapi.c 		    (stat & ATA_ERR) && scsi_req(rq)->cmd[0] == REQUEST_SENSE)
rq                475 drivers/ide/ide-atapi.c 				scsi_req(pc->rq)->result++;
rq                477 drivers/ide/ide-atapi.c 			if (scsi_req(rq)->cmd[0] == REQUEST_SENSE) {
rq                483 drivers/ide/ide-atapi.c 			debug_log("[cmd %x]: check condition\n", scsi_req(rq)->cmd[0]);
rq                500 drivers/ide/ide-atapi.c 		done = blk_rq_bytes(rq);
rq                508 drivers/ide/ide-atapi.c 		if (ata_misc_request(rq)) {
rq                509 drivers/ide/ide-atapi.c 			scsi_req(rq)->result = 0;
rq                513 drivers/ide/ide-atapi.c 			if (blk_rq_is_passthrough(rq) && uptodate <= 0) {
rq                514 drivers/ide/ide-atapi.c 				if (scsi_req(rq)->result == 0)
rq                515 drivers/ide/ide-atapi.c 					scsi_req(rq)->result = -EIO;
rq                521 drivers/ide/ide-atapi.c 		ide_complete_rq(drive, error, blk_rq_bytes(rq));
rq                536 drivers/ide/ide-atapi.c 	if (ide_check_ireason(drive, rq, bcount, ireason, write))
rq                543 drivers/ide/ide-atapi.c 	scsi_req(rq)->resid_len -= done;
rq                551 drivers/ide/ide-atapi.c 		  scsi_req(rq)->cmd[0], done, bcount, scsi_req(rq)->resid_len);
rq                614 drivers/ide/ide-atapi.c 	struct request *rq = hwif->rq;
rq                634 drivers/ide/ide-atapi.c 		cmd_len = COMMAND_SIZE(scsi_req(rq)->cmd[0]);
rq                638 drivers/ide/ide-atapi.c 		timeout = rq->timeout;
rq                681 drivers/ide/ide-atapi.c 		hwif->tp_ops->output_data(drive, NULL, scsi_req(rq)->cmd, cmd_len);
rq                702 drivers/ide/ide-atapi.c 	struct request *rq = hwif->rq;
rq                710 drivers/ide/ide-atapi.c 		bcount = ide_cd_get_xferlen(rq);
rq                720 drivers/ide/ide-atapi.c 		bytes = blk_rq_bytes(rq);
rq                726 drivers/ide/ide-atapi.c 		scsi_req(rq)->resid_len = bcount;
rq                 97 drivers/ide/ide-cd.c static int cdrom_log_sense(ide_drive_t *drive, struct request *rq)
rq                102 drivers/ide/ide-cd.c 	if (!sense || !rq || (rq->rq_flags & RQF_QUIET))
rq                125 drivers/ide/ide-cd.c 		if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
rq                211 drivers/ide/ide-cd.c static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
rq                219 drivers/ide/ide-cd.c 	struct request *failed = ide_req(rq)->special;
rq                220 drivers/ide/ide-cd.c 	void *sense = bio_data(rq->bio);
rq                228 drivers/ide/ide-cd.c 		scsi_req(failed)->sense_len = scsi_req(rq)->sense_len;
rq                245 drivers/ide/ide-cd.c static int ide_cd_breathe(ide_drive_t *drive, struct request *rq)
rq                250 drivers/ide/ide-cd.c 	if (!scsi_req(rq)->result)
rq                253 drivers/ide/ide-cd.c 	scsi_req(rq)->result = 1;
rq                261 drivers/ide/ide-cd.c 		blk_mq_requeue_request(rq, false);
rq                286 drivers/ide/ide-cd.c 	struct request *rq = hwif->rq;
rq                295 drivers/ide/ide-cd.c 				  rq->cmd[0], rq->cmd_type, err, stat);
rq                297 drivers/ide/ide-cd.c 	if (ata_sense_request(rq)) {
rq                303 drivers/ide/ide-cd.c 		rq->rq_flags |= RQF_FAILED;
rq                308 drivers/ide/ide-cd.c 	if (blk_rq_is_scsi(rq) && !scsi_req(rq)->result)
rq                309 drivers/ide/ide-cd.c 		scsi_req(rq)->result = SAM_STAT_CHECK_CONDITION;
rq                311 drivers/ide/ide-cd.c 	if (blk_noretry_request(rq))
rq                316 drivers/ide/ide-cd.c 		if (req_op(rq) == REQ_OP_WRITE) {
rq                317 drivers/ide/ide-cd.c 			if (ide_cd_breathe(drive, rq))
rq                322 drivers/ide/ide-cd.c 			if (!blk_rq_is_passthrough(rq) &&
rq                323 drivers/ide/ide-cd.c 			    !(rq->rq_flags & RQF_QUIET))
rq                332 drivers/ide/ide-cd.c 		if (blk_rq_is_passthrough(rq))
rq                339 drivers/ide/ide-cd.c 		if (++scsi_req(rq)->result > ERROR_MAX)
rq                350 drivers/ide/ide-cd.c 		if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT)
rq                358 drivers/ide/ide-cd.c 		if (!(rq->rq_flags & RQF_QUIET))
rq                367 drivers/ide/ide-cd.c 		if (!(rq->rq_flags & RQF_QUIET))
rq                374 drivers/ide/ide-cd.c 		if (!(rq->rq_flags & RQF_QUIET))
rq                380 drivers/ide/ide-cd.c 		if (blk_rq_is_passthrough(rq))
rq                386 drivers/ide/ide-cd.c 		} else if (++scsi_req(rq)->result > ERROR_MAX)
rq                391 drivers/ide/ide-cd.c 	if (blk_rq_is_passthrough(rq)) {
rq                392 drivers/ide/ide-cd.c 		rq->rq_flags |= RQF_FAILED;
rq                410 drivers/ide/ide-cd.c 		hwif->rq = NULL;
rq                411 drivers/ide/ide-cd.c 		return ide_queue_sense_rq(drive, rq) ? 2 : 1;
rq                418 drivers/ide/ide-cd.c 	struct request *rq = cmd->rq;
rq                420 drivers/ide/ide-cd.c 	ide_debug_log(IDE_DBG_FUNC, "rq->cmd[0]: 0x%x", rq->cmd[0]);
rq                426 drivers/ide/ide-cd.c 	if (scsi_req(rq)->cmd[0] == GPCMD_REQUEST_SENSE &&
rq                450 drivers/ide/ide-cd.c 		struct request *rq;
rq                454 drivers/ide/ide-cd.c 		rq = blk_get_request(drive->queue,
rq                456 drivers/ide/ide-cd.c 		memcpy(scsi_req(rq)->cmd, cmd, BLK_MAX_CDB);
rq                457 drivers/ide/ide-cd.c 		ide_req(rq)->type = ATA_PRIV_PC;
rq                458 drivers/ide/ide-cd.c 		rq->rq_flags |= rq_flags;
rq                459 drivers/ide/ide-cd.c 		rq->timeout = timeout;
rq                461 drivers/ide/ide-cd.c 			error = blk_rq_map_kern(drive->queue, rq, buffer,
rq                464 drivers/ide/ide-cd.c 				blk_put_request(rq);
rq                469 drivers/ide/ide-cd.c 		blk_execute_rq(drive->queue, info->disk, rq, 0);
rq                470 drivers/ide/ide-cd.c 		error = scsi_req(rq)->result ? -EIO : 0;
rq                473 drivers/ide/ide-cd.c 			*bufflen = scsi_req(rq)->resid_len;
rq                474 drivers/ide/ide-cd.c 		scsi_normalize_sense(scsi_req(rq)->sense,
rq                475 drivers/ide/ide-cd.c 				     scsi_req(rq)->sense_len, sshdr);
rq                481 drivers/ide/ide-cd.c 		failed = (rq->rq_flags & RQF_FAILED) != 0;
rq                503 drivers/ide/ide-cd.c 		blk_put_request(rq);
rq                531 drivers/ide/ide-cd.c static bool ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
rq                534 drivers/ide/ide-cd.c 	long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
rq                535 drivers/ide/ide-cd.c 	unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
rq                536 drivers/ide/ide-cd.c 	struct scsi_request *req = scsi_req(rq);
rq                538 drivers/ide/ide-cd.c 	if (rq_data_dir(rq) == READ)
rq                564 drivers/ide/ide-cd.c static bool ide_cdrom_prep_pc(struct request *rq)
rq                566 drivers/ide/ide-cd.c 	u8 *c = scsi_req(rq)->cmd;
rq                577 drivers/ide/ide-cd.c 		scsi_req(rq)->cmd_len = 10;
rq                587 drivers/ide/ide-cd.c 		scsi_req(rq)->result = ILLEGAL_REQUEST;
rq                594 drivers/ide/ide-cd.c static bool ide_cdrom_prep_rq(ide_drive_t *drive, struct request *rq)
rq                596 drivers/ide/ide-cd.c 	if (!blk_rq_is_passthrough(rq)) {
rq                597 drivers/ide/ide-cd.c 		scsi_req_init(scsi_req(rq));
rq                599 drivers/ide/ide-cd.c 		return ide_cdrom_prep_fs(drive->queue, rq);
rq                600 drivers/ide/ide-cd.c 	} else if (blk_rq_is_scsi(rq))
rq                601 drivers/ide/ide-cd.c 		return ide_cdrom_prep_pc(rq);
rq                610 drivers/ide/ide-cd.c 	struct request *rq = hwif->rq;
rq                613 drivers/ide/ide-cd.c 	int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
rq                614 drivers/ide/ide-cd.c 	int sense = ata_sense_request(rq);
rq                619 drivers/ide/ide-cd.c 	ide_debug_log(IDE_DBG_PC, "cmd: 0x%x, write: 0x%x", rq->cmd[0], write);
rq                657 drivers/ide/ide-cd.c 	thislen = !blk_rq_is_passthrough(rq) ? len : cmd->nleft;
rq                666 drivers/ide/ide-cd.c 		switch (req_op(rq)) {
rq                678 drivers/ide/ide-cd.c 					rq->rq_flags |= RQF_FAILED;
rq                698 drivers/ide/ide-cd.c 				rq->rq_flags |= RQF_FAILED;
rq                706 drivers/ide/ide-cd.c 	rc = ide_check_ireason(drive, rq, len, ireason, write);
rq                714 drivers/ide/ide-cd.c 				  rq->cmd_type, ireason);
rq                730 drivers/ide/ide-cd.c 			scsi_req(rq)->sense_len += blen;
rq                735 drivers/ide/ide-cd.c 		if (blk_rq_is_passthrough(rq) || write == 0)
rq                740 drivers/ide/ide-cd.c 			blk_dump_rq_flags(rq, "cdrom_newpc_intr");
rq                744 drivers/ide/ide-cd.c 	switch (req_op(rq)) {
rq                747 drivers/ide/ide-cd.c 		timeout = rq->timeout;
rq                763 drivers/ide/ide-cd.c 	if (blk_rq_is_scsi(rq) && rc == 0) {
rq                764 drivers/ide/ide-cd.c 		scsi_req(rq)->resid_len = 0;
rq                765 drivers/ide/ide-cd.c 		blk_mq_end_request(rq, BLK_STS_OK);
rq                766 drivers/ide/ide-cd.c 		hwif->rq = NULL;
rq                769 drivers/ide/ide-cd.c 			ide_cd_complete_failed_rq(drive, rq);
rq                771 drivers/ide/ide-cd.c 		if (!blk_rq_is_passthrough(rq)) {
rq                775 drivers/ide/ide-cd.c 			if (uptodate <= 0 && scsi_req(rq)->result == 0)
rq                776 drivers/ide/ide-cd.c 				scsi_req(rq)->result = -EIO;
rq                779 drivers/ide/ide-cd.c 		if (uptodate == 0 && rq->bio)
rq                784 drivers/ide/ide-cd.c 		if (blk_rq_is_passthrough(rq)) {
rq                785 drivers/ide/ide-cd.c 			scsi_req(rq)->resid_len -= cmd->nbytes - cmd->nleft;
rq                787 drivers/ide/ide-cd.c 				scsi_req(rq)->resid_len += cmd->last_xfer_len;
rq                790 drivers/ide/ide-cd.c 		ide_complete_rq(drive, uptodate ? BLK_STS_OK : BLK_STS_IOERR, blk_rq_bytes(rq));
rq                800 drivers/ide/ide-cd.c static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
rq                804 drivers/ide/ide-cd.c 	int write = rq_data_dir(rq) == WRITE;
rq                810 drivers/ide/ide-cd.c 				  rq->cmd[0], rq->cmd_flags, sectors_per_frame);
rq                821 drivers/ide/ide-cd.c 		ide_cdrom_prep_rq(drive, rq);
rq                825 drivers/ide/ide-cd.c 	if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) ||
rq                826 drivers/ide/ide-cd.c 	    (blk_rq_pos(rq) & (sectors_per_frame - 1)))
rq                835 drivers/ide/ide-cd.c 	rq->timeout = ATAPI_WAIT_PC;
rq                840 drivers/ide/ide-cd.c static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
rq                844 drivers/ide/ide-cd.c 				  rq->cmd[0], rq->cmd_type);
rq                846 drivers/ide/ide-cd.c 	if (blk_rq_is_scsi(rq))
rq                847 drivers/ide/ide-cd.c 		rq->rq_flags |= RQF_QUIET;
rq                849 drivers/ide/ide-cd.c 		rq->rq_flags &= ~RQF_FAILED;
rq                854 drivers/ide/ide-cd.c 	if (rq->bio) {
rq                856 drivers/ide/ide-cd.c 		char *buf = bio_data(rq->bio);
rq                869 drivers/ide/ide-cd.c 		    || blk_rq_bytes(rq) & q->dma_pad_mask
rq                875 drivers/ide/ide-cd.c static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
rq                883 drivers/ide/ide-cd.c 				  rq->cmd[0], (unsigned long long)block);
rq                886 drivers/ide/ide-cd.c 		blk_dump_rq_flags(rq, "ide_cd_do_request");
rq                888 drivers/ide/ide-cd.c 	switch (req_op(rq)) {
rq                890 drivers/ide/ide-cd.c 		if (cdrom_start_rw(drive, rq) == ide_stopped)
rq                896 drivers/ide/ide-cd.c 		if (!rq->timeout)
rq                897 drivers/ide/ide-cd.c 			rq->timeout = ATAPI_WAIT_PC;
rq                898 drivers/ide/ide-cd.c 		cdrom_do_block_pc(drive, rq);
rq                902 drivers/ide/ide-cd.c 		switch (ide_req(rq)->type) {
rq                916 drivers/ide/ide-cd.c 	ide_prep_sense(drive, rq);
rq                920 drivers/ide/ide-cd.c 	if (rq_data_dir(rq))
rq                923 drivers/ide/ide-cd.c 	cmd.rq = rq;
rq                925 drivers/ide/ide-cd.c 	if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
rq                926 drivers/ide/ide-cd.c 		ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
rq                932 drivers/ide/ide-cd.c 	nsectors = blk_rq_sectors(rq);
rq                296 drivers/ide/ide-cd_ioctl.c 	struct request *rq;
rq                299 drivers/ide/ide-cd_ioctl.c 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
rq                300 drivers/ide/ide-cd_ioctl.c 	ide_req(rq)->type = ATA_PRIV_MISC;
rq                301 drivers/ide/ide-cd_ioctl.c 	rq->rq_flags = RQF_QUIET;
rq                302 drivers/ide/ide-cd_ioctl.c 	blk_execute_rq(drive->queue, cd->disk, rq, 0);
rq                303 drivers/ide/ide-cd_ioctl.c 	ret = scsi_req(rq)->result ? -EIO : 0;
rq                304 drivers/ide/ide-cd_ioctl.c 	blk_put_request(rq);
rq                163 drivers/ide/ide-devsets.c 	struct request *rq;
rq                169 drivers/ide/ide-devsets.c 	rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
rq                170 drivers/ide/ide-devsets.c 	ide_req(rq)->type = ATA_PRIV_MISC;
rq                171 drivers/ide/ide-devsets.c 	scsi_req(rq)->cmd_len = 5;
rq                172 drivers/ide/ide-devsets.c 	scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
rq                173 drivers/ide/ide-devsets.c 	*(int *)&scsi_req(rq)->cmd[1] = arg;
rq                174 drivers/ide/ide-devsets.c 	ide_req(rq)->special = setting->set;
rq                176 drivers/ide/ide-devsets.c 	blk_execute_rq(q, NULL, rq, 0);
rq                177 drivers/ide/ide-devsets.c 	ret = scsi_req(rq)->result;
rq                178 drivers/ide/ide-devsets.c 	blk_put_request(rq);
rq                183 drivers/ide/ide-devsets.c ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
rq                185 drivers/ide/ide-devsets.c 	int err, (*setfunc)(ide_drive_t *, int) = ide_req(rq)->special;
rq                187 drivers/ide/ide-devsets.c 	err = setfunc(drive, *(int *)&scsi_req(rq)->cmd[1]);
rq                189 drivers/ide/ide-devsets.c 		scsi_req(rq)->result = err;
rq                190 drivers/ide/ide-devsets.c 	ide_complete_rq(drive, 0, blk_rq_bytes(rq));
rq                 82 drivers/ide/ide-disk.c static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
rq                 86 drivers/ide/ide-disk.c 	u16 nsectors		= (u16)blk_rq_sectors(rq);
rq                 94 drivers/ide/ide-disk.c 		if (block + blk_rq_sectors(rq) > 1ULL << 28)
rq                152 drivers/ide/ide-disk.c 	if (rq_data_dir(rq))
rq                156 drivers/ide/ide-disk.c 	cmd.rq = rq;
rq                182 drivers/ide/ide-disk.c static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
rq                188 drivers/ide/ide-disk.c 	BUG_ON(blk_rq_is_passthrough(rq));
rq                190 drivers/ide/ide-disk.c 	ledtrig_disk_activity(rq_data_dir(rq) == WRITE);
rq                193 drivers/ide/ide-disk.c 		 drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
rq                194 drivers/ide/ide-disk.c 		 (unsigned long long)block, blk_rq_sectors(rq));
rq                197 drivers/ide/ide-disk.c 		hwif->rw_disk(drive, rq);
rq                199 drivers/ide/ide-disk.c 	return __ide_do_rw_disk(drive, rq, block);
rq                430 drivers/ide/ide-disk.c static bool idedisk_prep_rq(ide_drive_t *drive, struct request *rq)
rq                434 drivers/ide/ide-disk.c 	if (req_op(rq) != REQ_OP_FLUSH)
rq                437 drivers/ide/ide-disk.c 	if (ide_req(rq)->special) {
rq                438 drivers/ide/ide-disk.c 		cmd = ide_req(rq)->special;
rq                455 drivers/ide/ide-disk.c 	rq->cmd_flags &= ~REQ_OP_MASK;
rq                456 drivers/ide/ide-disk.c 	rq->cmd_flags |= REQ_OP_DRV_OUT;
rq                457 drivers/ide/ide-disk.c 	ide_req(rq)->type = ATA_PRIV_TASKFILE;
rq                458 drivers/ide/ide-disk.c 	ide_req(rq)->special = cmd;
rq                459 drivers/ide/ide-disk.c 	cmd->rq = rq;
rq                472 drivers/ide/ide-disk.c 	struct request *rq;
rq                480 drivers/ide/ide-disk.c 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
rq                481 drivers/ide/ide-disk.c 	ide_req(rq)->type = ATA_PRIV_TASKFILE;
rq                485 drivers/ide/ide-disk.c 	blk_execute_rq(drive->queue, NULL, rq, 0);
rq                486 drivers/ide/ide-disk.c 	blk_put_request(rq);
rq                108 drivers/ide/ide-dma.c 						blk_rq_sectors(cmd->rq) << 9);
rq                490 drivers/ide/ide-dma.c 	if (hwif->rq)
rq                491 drivers/ide/ide-dma.c 		scsi_req(hwif->rq)->result = 0;
rq                  8 drivers/ide/ide-eh.c static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq,
rq                 16 drivers/ide/ide-eh.c 		scsi_req(rq)->result |= ERROR_RESET;
rq                 29 drivers/ide/ide-eh.c 			scsi_req(rq)->result = ERROR_MAX;
rq                 32 drivers/ide/ide-eh.c 			scsi_req(rq)->result |= ERROR_RECAL;
rq                 36 drivers/ide/ide-eh.c 	if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
rq                 43 drivers/ide/ide-eh.c 	if (scsi_req(rq)->result >= ERROR_MAX || blk_noretry_request(rq)) {
rq                 44 drivers/ide/ide-eh.c 		ide_kill_rq(drive, rq);
rq                 49 drivers/ide/ide-eh.c 		scsi_req(rq)->result |= ERROR_RESET;
rq                 51 drivers/ide/ide-eh.c 	if ((scsi_req(rq)->result & ERROR_RESET) == ERROR_RESET) {
rq                 52 drivers/ide/ide-eh.c 		++scsi_req(rq)->result;
rq                 56 drivers/ide/ide-eh.c 	if ((scsi_req(rq)->result & ERROR_RECAL) == ERROR_RECAL)
rq                 59 drivers/ide/ide-eh.c 	++scsi_req(rq)->result;
rq                 64 drivers/ide/ide-eh.c static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq,
rq                 72 drivers/ide/ide-eh.c 		scsi_req(rq)->result |= ERROR_RESET;
rq                 81 drivers/ide/ide-eh.c 	if (scsi_req(rq)->result >= ERROR_MAX) {
rq                 82 drivers/ide/ide-eh.c 		ide_kill_rq(drive, rq);
rq                 84 drivers/ide/ide-eh.c 		if ((scsi_req(rq)->result & ERROR_RESET) == ERROR_RESET) {
rq                 85 drivers/ide/ide-eh.c 			++scsi_req(rq)->result;
rq                 88 drivers/ide/ide-eh.c 		++scsi_req(rq)->result;
rq                 94 drivers/ide/ide-eh.c static ide_startstop_t __ide_error(ide_drive_t *drive, struct request *rq,
rq                 98 drivers/ide/ide-eh.c 		return ide_ata_error(drive, rq, stat, err);
rq                 99 drivers/ide/ide-eh.c 	return ide_atapi_error(drive, rq, stat, err);
rq                117 drivers/ide/ide-eh.c 	struct request *rq;
rq                122 drivers/ide/ide-eh.c 	rq = drive->hwif->rq;
rq                123 drivers/ide/ide-eh.c 	if (rq == NULL)
rq                127 drivers/ide/ide-eh.c 	if (blk_rq_is_passthrough(rq)) {
rq                128 drivers/ide/ide-eh.c 		if (ata_taskfile_request(rq)) {
rq                129 drivers/ide/ide-eh.c 			struct ide_cmd *cmd = ide_req(rq)->special;
rq                133 drivers/ide/ide-eh.c 		} else if (ata_pm_request(rq)) {
rq                134 drivers/ide/ide-eh.c 			scsi_req(rq)->result = 1;
rq                135 drivers/ide/ide-eh.c 			ide_complete_pm_rq(drive, rq);
rq                138 drivers/ide/ide-eh.c 		scsi_req(rq)->result = err;
rq                139 drivers/ide/ide-eh.c 		ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
rq                143 drivers/ide/ide-eh.c 	return __ide_error(drive, rq, stat, err);
rq                149 drivers/ide/ide-eh.c 	struct request *rq = drive->hwif->rq;
rq                151 drivers/ide/ide-eh.c 	if (rq && ata_misc_request(rq) &&
rq                152 drivers/ide/ide-eh.c 	    scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) {
rq                153 drivers/ide/ide-eh.c 		if (err <= 0 && scsi_req(rq)->result == 0)
rq                154 drivers/ide/ide-eh.c 			scsi_req(rq)->result = -EIO;
rq                155 drivers/ide/ide-eh.c 		ide_complete_rq(drive, err, blk_rq_bytes(rq));
rq                 67 drivers/ide/ide-floppy.c 	struct request *rq = pc->rq;
rq                 76 drivers/ide/ide-floppy.c 	    blk_rq_is_scsi(rq))
rq                 80 drivers/ide/ide-floppy.c 		u8 *buf = bio_data(rq->bio);
rq                101 drivers/ide/ide-floppy.c 	if (ata_misc_request(rq))
rq                102 drivers/ide/ide-floppy.c 		scsi_req(rq)->result = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
rq                137 drivers/ide/ide-floppy.c 		unsigned int done = blk_rq_bytes(drive->hwif->rq);
rq                192 drivers/ide/ide-floppy.c 				    struct ide_atapi_pc *pc, struct request *rq,
rq                197 drivers/ide/ide-floppy.c 	int blocks = blk_rq_sectors(rq) / floppy->bs_factor;
rq                198 drivers/ide/ide-floppy.c 	int cmd = rq_data_dir(rq);
rq                207 drivers/ide/ide-floppy.c 	memcpy(scsi_req(rq)->cmd, pc->c, 12);
rq                209 drivers/ide/ide-floppy.c 	pc->rq = rq;
rq                217 drivers/ide/ide-floppy.c 		struct ide_atapi_pc *pc, struct request *rq)
rq                220 drivers/ide/ide-floppy.c 	memcpy(pc->c, scsi_req(rq)->cmd, sizeof(pc->c));
rq                221 drivers/ide/ide-floppy.c 	pc->rq = rq;
rq                222 drivers/ide/ide-floppy.c 	if (blk_rq_bytes(rq)) {
rq                224 drivers/ide/ide-floppy.c 		if (rq_data_dir(rq) == WRITE)
rq                230 drivers/ide/ide-floppy.c 					     struct request *rq, sector_t block)
rq                236 drivers/ide/ide-floppy.c 	ide_debug_log(IDE_DBG_FUNC, "enter, cmd: 0x%x\n", rq->cmd[0]);
rq                239 drivers/ide/ide-floppy.c 		blk_dump_rq_flags(rq, (rq->rq_disk
rq                240 drivers/ide/ide-floppy.c 					? rq->rq_disk->disk_name
rq                243 drivers/ide/ide-floppy.c 	if (scsi_req(rq)->result >= ERROR_MAX) {
rq                250 drivers/ide/ide-floppy.c 		if (ata_misc_request(rq)) {
rq                251 drivers/ide/ide-floppy.c 			scsi_req(rq)->result = 0;
rq                252 drivers/ide/ide-floppy.c 			ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
rq                258 drivers/ide/ide-floppy.c 	switch (req_op(rq)) {
rq                260 drivers/ide/ide-floppy.c 		if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
rq                261 drivers/ide/ide-floppy.c 		    (blk_rq_sectors(rq) % floppy->bs_factor)) {
rq                267 drivers/ide/ide-floppy.c 		idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
rq                272 drivers/ide/ide-floppy.c 		idefloppy_blockpc_cmd(floppy, pc, rq);
rq                276 drivers/ide/ide-floppy.c 		switch (ide_req(rq)->type) {
rq                279 drivers/ide/ide-floppy.c 			pc = (struct ide_atapi_pc *)ide_req(rq)->special;
rq                286 drivers/ide/ide-floppy.c 	ide_prep_sense(drive, rq);
rq                290 drivers/ide/ide-floppy.c 	if (rq_data_dir(rq))
rq                293 drivers/ide/ide-floppy.c 	cmd.rq = rq;
rq                295 drivers/ide/ide-floppy.c 	if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
rq                296 drivers/ide/ide-floppy.c 		ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
rq                300 drivers/ide/ide-floppy.c 	pc->rq = rq;
rq                305 drivers/ide/ide-floppy.c 	if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0)
rq                306 drivers/ide/ide-floppy.c 		scsi_req(rq)->result = -EIO;
rq                307 drivers/ide/ide-floppy.c 	ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
rq                161 drivers/ide/ide-gd.c 					 struct request *rq, sector_t sector)
rq                163 drivers/ide/ide-gd.c 	return drive->disk_ops->do_request(drive, rq, sector);
rq                 57 drivers/ide/ide-io.c int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
rq                 70 drivers/ide/ide-io.c 	if (!blk_update_request(rq, error, nr_bytes)) {
rq                 71 drivers/ide/ide-io.c 		if (rq == drive->sense_rq) {
rq                 76 drivers/ide/ide-io.c 		__blk_mq_end_request(rq, error);
rq                 88 drivers/ide/ide-io.c 	struct request *rq = cmd->rq;
rq                115 drivers/ide/ide-io.c 	if (rq && ata_taskfile_request(rq)) {
rq                116 drivers/ide/ide-io.c 		struct ide_cmd *orig_cmd = ide_req(rq)->special;
rq                128 drivers/ide/ide-io.c 	struct request *rq = hwif->rq;
rq                135 drivers/ide/ide-io.c 	if (blk_noretry_request(rq) && error)
rq                136 drivers/ide/ide-io.c 		nr_bytes = blk_rq_sectors(rq) << 9;
rq                138 drivers/ide/ide-io.c 	rc = ide_end_rq(drive, rq, error, nr_bytes);
rq                140 drivers/ide/ide-io.c 		hwif->rq = NULL;
rq                146 drivers/ide/ide-io.c void ide_kill_rq(ide_drive_t *drive, struct request *rq)
rq                148 drivers/ide/ide-io.c 	u8 drv_req = ata_misc_request(rq) && rq->rq_disk;
rq                154 drivers/ide/ide-io.c 		scsi_req(rq)->result = 0;
rq                157 drivers/ide/ide-io.c 			scsi_req(rq)->result = IDE_DRV_ERROR_GENERAL;
rq                158 drivers/ide/ide-io.c 		else if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0)
rq                159 drivers/ide/ide-io.c 			scsi_req(rq)->result = -EIO;
rq                162 drivers/ide/ide-io.c 	ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
rq                237 drivers/ide/ide-io.c 	struct request *rq = cmd->rq;
rq                239 drivers/ide/ide-io.c 	cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
rq                264 drivers/ide/ide-io.c 		struct request *rq)
rq                266 drivers/ide/ide-io.c 	struct ide_cmd *cmd = ide_req(rq)->special;
rq                270 drivers/ide/ide-io.c 			ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
rq                284 drivers/ide/ide-io.c 	scsi_req(rq)->result = 0;
rq                285 drivers/ide/ide-io.c 	ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
rq                290 drivers/ide/ide-io.c static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
rq                292 drivers/ide/ide-io.c 	u8 cmd = scsi_req(rq)->cmd[0];
rq                297 drivers/ide/ide-io.c 		return ide_do_park_unpark(drive, rq);
rq                299 drivers/ide/ide-io.c 		return ide_do_devset(drive, rq);
rq                316 drivers/ide/ide-io.c static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
rq                322 drivers/ide/ide-io.c 		drive->hwif->name, (unsigned long) rq);
rq                327 drivers/ide/ide-io.c 		rq->rq_flags |= RQF_FAILED;
rq                331 drivers/ide/ide-io.c 	if (drive->prep_rq && !drive->prep_rq(drive, rq))
rq                334 drivers/ide/ide-io.c 	if (ata_pm_request(rq))
rq                335 drivers/ide/ide-io.c 		ide_check_pm_state(drive, rq);
rq                354 drivers/ide/ide-io.c 		if (ata_taskfile_request(rq))
rq                355 drivers/ide/ide-io.c 			return execute_drive_cmd(drive, rq);
rq                356 drivers/ide/ide-io.c 		else if (ata_pm_request(rq)) {
rq                357 drivers/ide/ide-io.c 			struct ide_pm_state *pm = ide_req(rq)->special;
rq                362 drivers/ide/ide-io.c 			startstop = ide_start_power_step(drive, rq);
rq                365 drivers/ide/ide-io.c 				ide_complete_pm_rq(drive, rq);
rq                367 drivers/ide/ide-io.c 		} else if (!rq->rq_disk && ata_misc_request(rq))
rq                376 drivers/ide/ide-io.c 			return ide_special_rq(drive, rq);
rq                378 drivers/ide/ide-io.c 		drv = *(struct ide_driver **)rq->rq_disk->private_data;
rq                380 drivers/ide/ide-io.c 		return drv->do_request(drive, rq, blk_rq_pos(rq));
rq                384 drivers/ide/ide-io.c 	ide_kill_rq(drive, rq);
rq                444 drivers/ide/ide-io.c void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
rq                449 drivers/ide/ide-io.c 	if (rq) {
rq                450 drivers/ide/ide-io.c 		blk_mq_requeue_request(rq, false);
rq                456 drivers/ide/ide-io.c blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
rq                463 drivers/ide/ide-io.c 	if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
rq                464 drivers/ide/ide-io.c 		rq->rq_flags |= RQF_DONTPREP;
rq                465 drivers/ide/ide-io.c 		ide_req(rq)->special = NULL;
rq                479 drivers/ide/ide-io.c 		WARN_ON_ONCE(hwif->rq);
rq                522 drivers/ide/ide-io.c 		    ata_pm_request(rq) == 0 &&
rq                523 drivers/ide/ide-io.c 		    (rq->rq_flags & RQF_PREEMPT) == 0) {
rq                529 drivers/ide/ide-io.c 		scsi_req(rq)->resid_len = blk_rq_bytes(rq);
rq                530 drivers/ide/ide-io.c 		hwif->rq = rq;
rq                533 drivers/ide/ide-io.c 		startstop = start_request(drive, rq);
rq                537 drivers/ide/ide-io.c 			rq = hwif->rq;
rq                538 drivers/ide/ide-io.c 			hwif->rq = NULL;
rq                539 drivers/ide/ide-io.c 			if (rq)
rq                547 drivers/ide/ide-io.c 			list_add(&rq->queuelist, &drive->rq_list);
rq                551 drivers/ide/ide-io.c 			ide_requeue_and_plug(drive, rq);
rq                557 drivers/ide/ide-io.c 	if (rq == NULL)
rq                578 drivers/ide/ide-io.c 	blk_mq_start_request(bd->rq);
rq                579 drivers/ide/ide-io.c 	return ide_issue_rq(drive, bd->rq, false);
rq                690 drivers/ide/ide-io.c 			rq_in_flight = hwif->rq;
rq                691 drivers/ide/ide-io.c 			hwif->rq = NULL;
rq                867 drivers/ide/ide-io.c 		rq_in_flight = hwif->rq;
rq                868 drivers/ide/ide-io.c 		hwif->rq = NULL;
rq                900 drivers/ide/ide-io.c void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
rq                903 drivers/ide/ide-io.c 	list_add_tail(&rq->queuelist, &drive->rq_list);
rq                127 drivers/ide/ide-ioctls.c 		struct request *rq;
rq                129 drivers/ide/ide-ioctls.c 		rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
rq                130 drivers/ide/ide-ioctls.c 		ide_req(rq)->type = ATA_PRIV_TASKFILE;
rq                131 drivers/ide/ide-ioctls.c 		blk_execute_rq(drive->queue, NULL, rq, 0);
rq                132 drivers/ide/ide-ioctls.c 		err = scsi_req(rq)->result ? -EIO : 0;
rq                133 drivers/ide/ide-ioctls.c 		blk_put_request(rq);
rq                223 drivers/ide/ide-ioctls.c 	struct request *rq;
rq                226 drivers/ide/ide-ioctls.c 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
rq                227 drivers/ide/ide-ioctls.c 	ide_req(rq)->type = ATA_PRIV_MISC;
rq                228 drivers/ide/ide-ioctls.c 	scsi_req(rq)->cmd_len = 1;
rq                229 drivers/ide/ide-ioctls.c 	scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
rq                230 drivers/ide/ide-ioctls.c 	blk_execute_rq(drive->queue, NULL, rq, 1);
rq                231 drivers/ide/ide-ioctls.c 	ret = scsi_req(rq)->result;
rq                232 drivers/ide/ide-ioctls.c 	blk_put_request(rq);
rq                 69 drivers/ide/ide-lib.c 		struct request *rq = drive->hwif->rq;
rq                 73 drivers/ide/ide-lib.c 		if (rq)
rq                 75 drivers/ide/ide-lib.c 			       (unsigned long long)blk_rq_pos(rq));
rq                 14 drivers/ide/ide-park.c 	struct request *rq;
rq                 35 drivers/ide/ide-park.c 	rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
rq                 36 drivers/ide/ide-park.c 	scsi_req(rq)->cmd[0] = REQ_PARK_HEADS;
rq                 37 drivers/ide/ide-park.c 	scsi_req(rq)->cmd_len = 1;
rq                 38 drivers/ide/ide-park.c 	ide_req(rq)->type = ATA_PRIV_MISC;
rq                 39 drivers/ide/ide-park.c 	ide_req(rq)->special = &timeout;
rq                 40 drivers/ide/ide-park.c 	blk_execute_rq(q, NULL, rq, 1);
rq                 41 drivers/ide/ide-park.c 	rc = scsi_req(rq)->result ? -EIO : 0;
rq                 42 drivers/ide/ide-park.c 	blk_put_request(rq);
rq                 50 drivers/ide/ide-park.c 	rq = blk_get_request(q, REQ_OP_DRV_IN, BLK_MQ_REQ_NOWAIT);
rq                 51 drivers/ide/ide-park.c 	if (IS_ERR(rq))
rq                 54 drivers/ide/ide-park.c 	scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
rq                 55 drivers/ide/ide-park.c 	scsi_req(rq)->cmd_len = 1;
rq                 56 drivers/ide/ide-park.c 	ide_req(rq)->type = ATA_PRIV_MISC;
rq                 58 drivers/ide/ide-park.c 	ide_insert_request_head(drive, rq);
rq                 65 drivers/ide/ide-park.c ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq)
rq                 71 drivers/ide/ide-park.c 	if (scsi_req(rq)->cmd[0] == REQ_PARK_HEADS) {
rq                 72 drivers/ide/ide-park.c 		drive->sleep = *(unsigned long *)ide_req(rq)->special;
rq                 87 drivers/ide/ide-park.c 	cmd.rq = rq;
rq                 11 drivers/ide/ide-pm.c 	struct request *rq;
rq                 22 drivers/ide/ide-pm.c 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
rq                 23 drivers/ide/ide-pm.c 	ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
rq                 24 drivers/ide/ide-pm.c 	ide_req(rq)->special = &rqpm;
rq                 30 drivers/ide/ide-pm.c 	blk_execute_rq(drive->queue, NULL, rq, 0);
rq                 31 drivers/ide/ide-pm.c 	ret = scsi_req(rq)->result ? -EIO : 0;
rq                 32 drivers/ide/ide-pm.c 	blk_put_request(rq);
rq                 43 drivers/ide/ide-pm.c static int ide_pm_execute_rq(struct request *rq)
rq                 45 drivers/ide/ide-pm.c 	struct request_queue *q = rq->q;
rq                 48 drivers/ide/ide-pm.c 		rq->rq_flags |= RQF_QUIET;
rq                 49 drivers/ide/ide-pm.c 		scsi_req(rq)->result = -ENXIO;
rq                 50 drivers/ide/ide-pm.c 		blk_mq_end_request(rq, BLK_STS_OK);
rq                 53 drivers/ide/ide-pm.c 	blk_execute_rq(q, NULL, rq, true);
rq                 55 drivers/ide/ide-pm.c 	return scsi_req(rq)->result ? -EIO : 0;
rq                 63 drivers/ide/ide-pm.c 	struct request *rq;
rq                 80 drivers/ide/ide-pm.c 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT);
rq                 81 drivers/ide/ide-pm.c 	ide_req(rq)->type = ATA_PRIV_PM_RESUME;
rq                 82 drivers/ide/ide-pm.c 	ide_req(rq)->special = &rqpm;
rq                 86 drivers/ide/ide-pm.c 	err = ide_pm_execute_rq(rq);
rq                 87 drivers/ide/ide-pm.c 	blk_put_request(rq);
rq                 99 drivers/ide/ide-pm.c void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
rq                101 drivers/ide/ide-pm.c 	struct ide_pm_state *pm = ide_req(rq)->special;
rq                129 drivers/ide/ide-pm.c ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
rq                131 drivers/ide/ide-pm.c 	struct ide_pm_state *pm = ide_req(rq)->special;
rq                141 drivers/ide/ide-pm.c 			ide_complete_power_step(drive, rq);
rq                160 drivers/ide/ide-pm.c 			ide_complete_power_step(drive, rq);
rq                200 drivers/ide/ide-pm.c void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
rq                203 drivers/ide/ide-pm.c 	struct ide_pm_state *pm = ide_req(rq)->special;
rq                205 drivers/ide/ide-pm.c 	ide_complete_power_step(drive, rq);
rq                211 drivers/ide/ide-pm.c 	       (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
rq                213 drivers/ide/ide-pm.c 	if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
rq                218 drivers/ide/ide-pm.c 	drive->hwif->rq = NULL;
rq                220 drivers/ide/ide-pm.c 	blk_mq_end_request(rq, BLK_STS_OK);
rq                223 drivers/ide/ide-pm.c void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
rq                225 drivers/ide/ide-pm.c 	struct ide_pm_state *pm = ide_req(rq)->special;
rq                227 drivers/ide/ide-pm.c 	if (blk_rq_is_private(rq) &&
rq                228 drivers/ide/ide-pm.c 	    ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
rq                232 drivers/ide/ide-pm.c 	else if (blk_rq_is_private(rq) &&
rq                233 drivers/ide/ide-pm.c 	         ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
rq                746 drivers/ide/ide-probe.c static void ide_initialize_rq(struct request *rq)
rq                748 drivers/ide/ide-probe.c 	struct ide_request *req = blk_mq_rq_to_pdu(rq);
rq               1162 drivers/ide/ide-probe.c 	struct request *rq;
rq               1171 drivers/ide/ide-probe.c 		rq = list_first_entry(&drive->rq_list, struct request, queuelist);
rq               1172 drivers/ide/ide-probe.c 		list_del_init(&rq->queuelist);
rq               1175 drivers/ide/ide-probe.c 		ret = ide_issue_rq(drive, rq, true);
rq                272 drivers/ide/ide-tape.c 	struct request *rq = drive->hwif->rq;
rq                273 drivers/ide/ide-tape.c 	u8 *sense = bio_data(rq->bio);
rq                281 drivers/ide/ide-tape.c 		      rq->cmd[0], tape->sense_key, tape->asc, tape->ascq);
rq                285 drivers/ide/ide-tape.c 		scsi_req(rq)->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]);
rq                319 drivers/ide/ide-tape.c 		    (blk_rq_bytes(rq) - scsi_req(rq)->resid_len))
rq                330 drivers/ide/ide-tape.c 	struct request *rq = drive->hwif->rq;
rq                334 drivers/ide/ide-tape.c 	ide_debug_log(IDE_DBG_FUNC, "cmd: 0x%x, dsc: %d, err: %d", rq->cmd[0],
rq                351 drivers/ide/ide-tape.c 			(blk_rq_bytes(rq) - scsi_req(rq)->resid_len) / tape->blk_size;
rq                369 drivers/ide/ide-tape.c 	scsi_req(rq)->result = err;
rq                383 drivers/ide/ide-tape.c 		      drive->hwif->rq->cmd[0], tape->dsc_poll_freq);
rq                444 drivers/ide/ide-tape.c 	struct request *rq = drive->hwif->rq;
rq                477 drivers/ide/ide-tape.c 		ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
rq                544 drivers/ide/ide-tape.c 				   struct ide_atapi_pc *pc, struct request *rq,
rq                547 drivers/ide/ide-tape.c 	unsigned int length = blk_rq_sectors(rq) / (tape->blk_size >> 9);
rq                553 drivers/ide/ide-tape.c 	if (blk_rq_bytes(rq) == tape->buffer_size)
rq                563 drivers/ide/ide-tape.c 	memcpy(scsi_req(rq)->cmd, pc->c, 12);
rq                567 drivers/ide/ide-tape.c 					  struct request *rq, sector_t block)
rq                573 drivers/ide/ide-tape.c 	struct scsi_request *req = scsi_req(rq);
rq                577 drivers/ide/ide-tape.c 		      req->cmd[0], (unsigned long long)blk_rq_pos(rq),
rq                578 drivers/ide/ide-tape.c 		      blk_rq_sectors(rq));
rq                580 drivers/ide/ide-tape.c 	BUG_ON(!blk_rq_is_private(rq));
rq                581 drivers/ide/ide-tape.c 	BUG_ON(ide_req(rq)->type != ATA_PRIV_MISC &&
rq                582 drivers/ide/ide-tape.c 	       ide_req(rq)->type != ATA_PRIV_SENSE);
rq                633 drivers/ide/ide-tape.c 		ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
rq                638 drivers/ide/ide-tape.c 		ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6);
rq                642 drivers/ide/ide-tape.c 		pc = (struct ide_atapi_pc *)ide_req(rq)->special;
rq                655 drivers/ide/ide-tape.c 	ide_prep_sense(drive, rq);
rq                659 drivers/ide/ide-tape.c 	if (rq_data_dir(rq))
rq                662 drivers/ide/ide-tape.c 	cmd.rq = rq;
rq                664 drivers/ide/ide-tape.c 	ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
rq                849 drivers/ide/ide-tape.c 	struct request *rq;
rq                857 drivers/ide/ide-tape.c 	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
rq                858 drivers/ide/ide-tape.c 	ide_req(rq)->type = ATA_PRIV_MISC;
rq                859 drivers/ide/ide-tape.c 	scsi_req(rq)->cmd[13] = cmd;
rq                860 drivers/ide/ide-tape.c 	rq->rq_disk = tape->disk;
rq                861 drivers/ide/ide-tape.c 	rq->__sector = tape->first_frame;
rq                864 drivers/ide/ide-tape.c 		ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
rq                870 drivers/ide/ide-tape.c 	blk_execute_rq(drive->queue, tape->disk, rq, 0);
rq                873 drivers/ide/ide-tape.c 	size -= scsi_req(rq)->resid_len;
rq                881 drivers/ide/ide-tape.c 	if (scsi_req(rq)->result == IDE_DRV_ERROR_GENERAL)
rq                884 drivers/ide/ide-tape.c 	blk_put_request(rq);
rq                190 drivers/ide/ide-taskfile.c 		struct request *rq = hwif->rq;
rq                192 drivers/ide/ide-taskfile.c 		if (ata_pm_request(rq))
rq                193 drivers/ide/ide-taskfile.c 			ide_complete_pm_rq(drive, rq);
rq                283 drivers/ide/ide-taskfile.c 		scsi_req(cmd->rq)->result = 0;
rq                320 drivers/ide/ide-taskfile.c 	struct request *rq = drive->hwif->rq;
rq                325 drivers/ide/ide-taskfile.c 	scsi_req(rq)->result = err;
rq                332 drivers/ide/ide-taskfile.c 	ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
rq                390 drivers/ide/ide-taskfile.c 		ide_complete_rq(drive, BLK_STS_OK, blk_rq_sectors(cmd->rq) << 9);
rq                423 drivers/ide/ide-taskfile.c 	struct request *rq;
rq                426 drivers/ide/ide-taskfile.c 	rq = blk_get_request(drive->queue,
rq                429 drivers/ide/ide-taskfile.c 	ide_req(rq)->type = ATA_PRIV_TASKFILE;
rq                438 drivers/ide/ide-taskfile.c 		error = blk_rq_map_kern(drive->queue, rq, buf,
rq                444 drivers/ide/ide-taskfile.c 	ide_req(rq)->special = cmd;
rq                445 drivers/ide/ide-taskfile.c 	cmd->rq = rq;
rq                447 drivers/ide/ide-taskfile.c 	blk_execute_rq(drive->queue, NULL, rq, 0);
rq                448 drivers/ide/ide-taskfile.c 	error = scsi_req(rq)->result ? -EIO : 0;
rq                450 drivers/ide/ide-taskfile.c 	blk_put_request(rq);
rq                153 drivers/ide/pdc202xx_old.c 		struct request *rq	= hwif->rq;
rq                160 drivers/ide/pdc202xx_old.c 		word_count = (blk_rq_sectors(rq) << 8);
rq                161 drivers/ide/pdc202xx_old.c 		word_count = (rq_data_dir(rq) == READ) ?
rq                117 drivers/ide/tc86c001.c 	unsigned long nsectors	= blk_rq_sectors(hwif->rq);
rq                309 drivers/ide/tx4939ide.c 	tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt);
rq                869 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
rq                876 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		qplib_qp->rq.sg_info.sglist = umem->sg_head.sgl;
rq                877 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem);
rq                878 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		qplib_qp->rq.sg_info.nmap = umem->nmap;
rq                964 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
rq                972 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
rq                973 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
rq                975 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	qp->qplib_qp.rq.q_full_delta = 1;
rq               1081 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		qp->qplib_qp.rq.max_wqe = 0;
rq               1087 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
rq               1090 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
rq               1093 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
rq               1094 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
rq               1095 drivers/infiniband/hw/bnxt_re/ib_verbs.c 			qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
rq               1108 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
rq               1109 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
rq               1110 drivers/infiniband/hw/bnxt_re/ib_verbs.c 			qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
rq               1693 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		qp->qplib_qp.rq.psn = qp_attr->rq_psn;
rq               1749 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		if (qp->qplib_qp.rq.max_wqe) {
rq               1751 drivers/infiniband/hw/bnxt_re/ib_verbs.c 			qp->qplib_qp.rq.max_wqe =
rq               1753 drivers/infiniband/hw/bnxt_re/ib_verbs.c 			qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
rq               1755 drivers/infiniband/hw/bnxt_re/ib_verbs.c 			qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
rq               1813 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	qp_attr->rq_psn = qplib_qp->rq.psn;
rq               1823 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
rq               1824 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
rq               2436 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
rq               2473 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
rq                 83 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		if (!qp->rq.flushed) {
rq                 87 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			qp->rq.flushed = true;
rq                130 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		if (qp->rq.flushed) {
rq                131 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			qp->rq.flushed = false;
rq                146 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	qp->rq.hwq.prod = 0;
rq                147 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	qp->rq.hwq.cons = 0;
rq                177 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_q *rq = &qp->rq;
rq                182 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				  rq->hwq.max_elements * qp->rq_hdr_buf_size,
rq                199 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_q *rq = &qp->rq;
rq                216 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
rq                218 drivers/infiniband/hw/bnxt_re/qplib_fp.c 						    rq->hwq.max_elements *
rq                729 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_q *rq = &qp->rq;
rq                780 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (rq->max_wqe) {
rq                781 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		rq->hwq.max_elements = qp->rq.max_wqe;
rq                782 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL,
rq                783 drivers/infiniband/hw/bnxt_re/qplib_fp.c 					       &rq->hwq.max_elements,
rq                789 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
rq                791 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		if (!rq->swq) {
rq                795 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		pbl = &rq->hwq.pbl[PBL_LVL_0];
rq                798 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
rq                825 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
rq                831 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
rq                851 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
rq                852 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	kfree(rq->swq);
rq                866 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_q *rq = &qp->rq;
rq                958 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (rq->max_wqe) {
rq                959 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		rq->hwq.max_elements = rq->max_wqe;
rq                960 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq,
rq                961 drivers/infiniband/hw/bnxt_re/qplib_fp.c 					       &rq->sg_info,
rq                962 drivers/infiniband/hw/bnxt_re/qplib_fp.c 					       &rq->hwq.max_elements,
rq                968 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
rq                970 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		if (!rq->swq) {
rq                974 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		pbl = &rq->hwq.pbl[PBL_LVL_0];
rq                977 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
rq               1004 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
rq               1020 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	max_rsge = bnxt_qplib_is_chip_gen_p5(res->cctx) ? 6 : rq->max_sge;
rq               1084 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
rq               1085 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	kfree(rq->swq);
rq               1257 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		req.rq_psn = cpu_to_le32(qp->rq.psn);
rq               1271 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
rq               1273 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
rq               1353 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	qp->rq.psn = le32_to_cpu(sb->rq_psn);
rq               1359 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	qp->rq.max_wqe = qp->rq.hwq.max_elements;
rq               1361 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
rq               1446 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
rq               1447 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	kfree(qp->rq.swq);
rq               1477 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_q *rq = &qp->rq;
rq               1479 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	return HWQ_CMP(rq->hwq.prod, &rq->hwq);
rq               1490 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_q *rq = &qp->rq;
rq               1496 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
rq               1809 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_q *rq = &qp->rq;
rq               1816 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
rq               1825 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_q *rq = &qp->rq;
rq               1835 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		dev_dbg(&rq->hwq.pdev->dev,
rq               1839 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (bnxt_qplib_queue_full(rq)) {
rq               1840 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		dev_err(&rq->hwq.pdev->dev,
rq               1845 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
rq               1846 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rq->swq[sw_prod].wr_id = wqe->wr_id;
rq               1848 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
rq               1876 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
rq               1877 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		rq->swq[sw_prod].wr_id = wqe->wr_id;
rq               1880 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rq->hwq.prod++;
rq               1889 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			dev_err(&rq->hwq.pdev->dev,
rq               2055 drivers/infiniband/hw/bnxt_re/qplib_fp.c static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
rq               2077 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
rq               2080 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
rq               2088 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->wr_id = rq->swq[sw_cons].wr_id;
rq               2091 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		rq->hwq.cons++;
rq               2094 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
rq               2335 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_q *rq;
rq               2347 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (qp->rq.flushed) {
rq               2380 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		rq = &qp->rq;
rq               2381 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		if (wr_id_idx >= rq->hwq.max_elements) {
rq               2384 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				wr_id_idx, rq->hwq.max_elements);
rq               2387 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
rq               2390 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		rq->hwq.cons++;
rq               2410 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_q *rq;
rq               2422 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (qp->rq.flushed) {
rq               2461 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		rq = &qp->rq;
rq               2462 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		if (wr_id_idx >= rq->hwq.max_elements) {
rq               2465 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				wr_id_idx, rq->hwq.max_elements);
rq               2469 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
rq               2472 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		rq->hwq.cons++;
rq               2507 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_q *rq;
rq               2519 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (qp->rq.flushed) {
rq               2565 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		rq = &qp->rq;
rq               2566 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		if (wr_id_idx >= rq->hwq.max_elements) {
rq               2569 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				wr_id_idx, rq->hwq.max_elements);
rq               2572 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
rq               2575 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		rq->hwq.cons++;
rq               2595 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_q *sq, *rq;
rq               2618 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	rq = &qp->rq;
rq               2672 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	} else if (cqe_cons > rq->hwq.max_elements) {
rq               2675 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			cqe_cons, rq->hwq.max_elements);
rq               2679 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (qp->rq.flushed) {
rq               2729 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		__flush_rq(&qp->rq, qp, &cqe, &budget);
rq                290 drivers/infiniband/hw/bnxt_re/qplib_fp.h 	struct bnxt_qplib_q		rq;
rq                267 drivers/infiniband/hw/cxgb3/cxio_hal.c 	wq->rq = kcalloc(depth, sizeof(struct t3_swrq), GFP_KERNEL);
rq                268 drivers/infiniband/hw/cxgb3/cxio_hal.c 	if (!wq->rq)
rq                299 drivers/infiniband/hw/cxgb3/cxio_hal.c 	kfree(wq->rq);
rq                325 drivers/infiniband/hw/cxgb3/cxio_hal.c 	kfree(wq->rq);
rq               1277 drivers/infiniband/hw/cxgb3/cxio_hal.c 		*cookie = wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].wr_id;
rq               1278 drivers/infiniband/hw/cxgb3/cxio_hal.c 		if (wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr)
rq               1280 drivers/infiniband/hw/cxgb3/cxio_hal.c 				wq->rq[Q_PTR2IDX(wq->rq_rptr,
rq                704 drivers/infiniband/hw/cxgb3/cxio_wr.h 	struct t3_swrq *rq;		/* SW RQ (holds consumer wr_ids */
rq                281 drivers/infiniband/hw/cxgb3/iwch_qp.c 	qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
rq                283 drivers/infiniband/hw/cxgb3/iwch_qp.c 	qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
rq                344 drivers/infiniband/hw/cxgb3/iwch_qp.c 	qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
rq                346 drivers/infiniband/hw/cxgb3/iwch_qp.c 	qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
rq                206 drivers/infiniband/hw/cxgb4/cq.c 	int in_use = wq->rq.in_use - count;
rq                209 drivers/infiniband/hw/cxgb4/cq.c 		 wq, cq, wq->rq.in_use, count);
rq                662 drivers/infiniband/hw/cxgb4/cq.c 			     CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
rq                723 drivers/infiniband/hw/cxgb4/cq.c 			pr_debug("completing rq idx %u\n", wq->rq.cidx);
rq                724 drivers/infiniband/hw/cxgb4/cq.c 			*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
rq                731 drivers/infiniband/hw/cxgb4/cq.c 		wq->rq.msn++;
rq                113 drivers/infiniband/hw/cxgb4/device.c 		le.qid = wq->rq.qid;
rq                115 drivers/infiniband/hw/cxgb4/device.c 		le.post_host_time = wq->rq.sw_rq[wq->rq.cidx].host_time;
rq                116 drivers/infiniband/hw/cxgb4/device.c 		le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
rq                272 drivers/infiniband/hw/cxgb4/device.c 				      qp->srq ? qp->srq->idx : qp->wq.rq.qid,
rq                292 drivers/infiniband/hw/cxgb4/device.c 				      qp->wq.sq.qid, qp->wq.rq.qid,
rq                306 drivers/infiniband/hw/cxgb4/device.c 			      qp->wq.sq.qid, qp->wq.rq.qid,
rq                827 drivers/infiniband/hw/cxgb4/device.c 		 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
rq                828 drivers/infiniband/hw/cxgb4/device.c 		 rdev->lldi.vr->rq.size,
rq                845 drivers/infiniband/hw/cxgb4/device.c 	rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
rq                963 drivers/infiniband/hw/cxgb4/device.c 	       infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
rq               1289 drivers/infiniband/hw/cxgb4/device.c 	t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL);
rq               1290 drivers/infiniband/hw/cxgb4/device.c 	qp->wq.rq.wq_pidx_inc = 0;
rq               1391 drivers/infiniband/hw/cxgb4/device.c 					  qp->wq.rq.qid,
rq               1397 drivers/infiniband/hw/cxgb4/device.c 			       pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
rq               1402 drivers/infiniband/hw/cxgb4/device.c 		qp->wq.rq.wq_pidx_inc = 0;
rq                 77 drivers/infiniband/hw/cxgb4/iw_cxgb4.h #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
rq                163 drivers/infiniband/hw/cxgb4/qp.c 				  wq->rq.memsize, wq->rq.queue,
rq                164 drivers/infiniband/hw/cxgb4/qp.c 				  dma_unmap_addr(&wq->rq, mapping));
rq                165 drivers/infiniband/hw/cxgb4/qp.c 		c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
rq                166 drivers/infiniband/hw/cxgb4/qp.c 		kfree(wq->rq.sw_rq);
rq                167 drivers/infiniband/hw/cxgb4/qp.c 		c4iw_put_qpid(rdev, wq->rq.qid, uctx);
rq                218 drivers/infiniband/hw/cxgb4/qp.c 		wq->rq.qid = c4iw_get_qpid(rdev, uctx);
rq                219 drivers/infiniband/hw/cxgb4/qp.c 		if (!wq->rq.qid) {
rq                234 drivers/infiniband/hw/cxgb4/qp.c 			wq->rq.sw_rq = kcalloc(wq->rq.size,
rq                235 drivers/infiniband/hw/cxgb4/qp.c 					       sizeof(*wq->rq.sw_rq),
rq                237 drivers/infiniband/hw/cxgb4/qp.c 			if (!wq->rq.sw_rq) {
rq                248 drivers/infiniband/hw/cxgb4/qp.c 		wq->rq.rqt_size =
rq                249 drivers/infiniband/hw/cxgb4/qp.c 			roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
rq                250 drivers/infiniband/hw/cxgb4/qp.c 		wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
rq                251 drivers/infiniband/hw/cxgb4/qp.c 		if (!wq->rq.rqt_hwaddr) {
rq                264 drivers/infiniband/hw/cxgb4/qp.c 		wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
rq                265 drivers/infiniband/hw/cxgb4/qp.c 						  wq->rq.memsize,
rq                266 drivers/infiniband/hw/cxgb4/qp.c 						  &wq->rq.dma_addr,
rq                268 drivers/infiniband/hw/cxgb4/qp.c 		if (!wq->rq.queue) {
rq                275 drivers/infiniband/hw/cxgb4/qp.c 			 wq->rq.queue,
rq                276 drivers/infiniband/hw/cxgb4/qp.c 			 (unsigned long long)virt_to_phys(wq->rq.queue));
rq                277 drivers/infiniband/hw/cxgb4/qp.c 		dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
rq                287 drivers/infiniband/hw/cxgb4/qp.c 		wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
rq                289 drivers/infiniband/hw/cxgb4/qp.c 						 &wq->rq.bar2_qid,
rq                290 drivers/infiniband/hw/cxgb4/qp.c 						 user ? &wq->rq.bar2_pa : NULL);
rq                295 drivers/infiniband/hw/cxgb4/qp.c 	if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
rq                297 drivers/infiniband/hw/cxgb4/qp.c 			pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
rq                302 drivers/infiniband/hw/cxgb4/qp.c 	wq->rq.msn = 1;
rq                358 drivers/infiniband/hw/cxgb4/qp.c 		eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
rq                376 drivers/infiniband/hw/cxgb4/qp.c 		res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
rq                377 drivers/infiniband/hw/cxgb4/qp.c 		res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
rq                386 drivers/infiniband/hw/cxgb4/qp.c 		 wq->sq.qid, wq->rq.qid, wq->db,
rq                387 drivers/infiniband/hw/cxgb4/qp.c 		 wq->sq.bar2_va, wq->rq.bar2_va);
rq                393 drivers/infiniband/hw/cxgb4/qp.c 				  wq->rq.memsize, wq->rq.queue,
rq                394 drivers/infiniband/hw/cxgb4/qp.c 				  dma_unmap_addr(&wq->rq, mapping));
rq                399 drivers/infiniband/hw/cxgb4/qp.c 		c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
rq                402 drivers/infiniband/hw/cxgb4/qp.c 		kfree(wq->rq.sw_rq);
rq                407 drivers/infiniband/hw/cxgb4/qp.c 		c4iw_put_qpid(rdev, wq->rq.qid, uctx);
rq                763 drivers/infiniband/hw/cxgb4/qp.c 	ret = build_isgl((__be64 *)qhp->wq.rq.queue,
rq                764 drivers/infiniband/hw/cxgb4/qp.c 			 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
rq                938 drivers/infiniband/hw/cxgb4/qp.c 		qhp->wq.rq.wq_pidx_inc += inc;
rq               1295 drivers/infiniband/hw/cxgb4/qp.c 		wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
rq               1296 drivers/infiniband/hw/cxgb4/qp.c 					   qhp->wq.rq.wq_pidx *
rq               1307 drivers/infiniband/hw/cxgb4/qp.c 		qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
rq               1309 drivers/infiniband/hw/cxgb4/qp.c 			qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
rq               1312 drivers/infiniband/hw/cxgb4/qp.c 			qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
rq               1318 drivers/infiniband/hw/cxgb4/qp.c 		wqe->recv.wrid = qhp->wq.rq.pidx;
rq               1324 drivers/infiniband/hw/cxgb4/qp.c 			 (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
rq               1806 drivers/infiniband/hw/cxgb4/qp.c 		wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
rq               1807 drivers/infiniband/hw/cxgb4/qp.c 		wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
rq               1808 drivers/infiniband/hw/cxgb4/qp.c 		wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
rq               1809 drivers/infiniband/hw/cxgb4/qp.c 						   rhp->rdev.lldi.vr->rq.start);
rq               1845 drivers/infiniband/hw/cxgb4/qp.c 		 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
rq               2172 drivers/infiniband/hw/cxgb4/qp.c 		qhp->wq.rq.size = rqsize;
rq               2173 drivers/infiniband/hw/cxgb4/qp.c 		qhp->wq.rq.memsize =
rq               2175 drivers/infiniband/hw/cxgb4/qp.c 			sizeof(*qhp->wq.rq.queue);
rq               2181 drivers/infiniband/hw/cxgb4/qp.c 			qhp->wq.rq.memsize =
rq               2182 drivers/infiniband/hw/cxgb4/qp.c 				roundup(qhp->wq.rq.memsize, PAGE_SIZE);
rq               2267 drivers/infiniband/hw/cxgb4/qp.c 			uresp.rqid = qhp->wq.rq.qid;
rq               2268 drivers/infiniband/hw/cxgb4/qp.c 			uresp.rq_size = qhp->wq.rq.size;
rq               2269 drivers/infiniband/hw/cxgb4/qp.c 			uresp.rq_memsize = qhp->wq.rq.memsize;
rq               2298 drivers/infiniband/hw/cxgb4/qp.c 			rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
rq               2299 drivers/infiniband/hw/cxgb4/qp.c 			rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
rq               2309 drivers/infiniband/hw/cxgb4/qp.c 				(u64)(unsigned long)qhp->wq.rq.bar2_pa;
rq               2326 drivers/infiniband/hw/cxgb4/qp.c 			&qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err;
rq               2340 drivers/infiniband/hw/cxgb4/qp.c 		 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
rq               2341 drivers/infiniband/hw/cxgb4/qp.c 		 qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
rq               2550 drivers/infiniband/hw/cxgb4/qp.c 	wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
rq               2619 drivers/infiniband/hw/cxgb4/qp.c 			rdev->lldi.vr->rq.start);
rq                398 drivers/infiniband/hw/cxgb4/resource.c 	rqt_start = rdev->lldi.vr->rq.start + skip;
rq                399 drivers/infiniband/hw/cxgb4/resource.c 	rqt_chunk = rdev->lldi.vr->rq.size - skip;
rq                 70 drivers/infiniband/hw/cxgb4/restrack.c 	if (rdma_nl_put_driver_u32(msg, "rqid", wq->rq.qid))
rq                 72 drivers/infiniband/hw/cxgb4/restrack.c 	if (rdma_nl_put_driver_u32(msg, "memsize", wq->rq.memsize))
rq                 74 drivers/infiniband/hw/cxgb4/restrack.c 	if (rdma_nl_put_driver_u32(msg, "cidx", wq->rq.cidx))
rq                 76 drivers/infiniband/hw/cxgb4/restrack.c 	if (rdma_nl_put_driver_u32(msg, "pidx", wq->rq.pidx))
rq                 78 drivers/infiniband/hw/cxgb4/restrack.c 	if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->rq.wq_pidx))
rq                 80 drivers/infiniband/hw/cxgb4/restrack.c 	if (rdma_nl_put_driver_u32(msg, "msn", wq->rq.msn))
rq                 82 drivers/infiniband/hw/cxgb4/restrack.c 	if (rdma_nl_put_driver_u32_hex(msg, "rqt_hwaddr", wq->rq.rqt_hwaddr))
rq                 84 drivers/infiniband/hw/cxgb4/restrack.c 	if (rdma_nl_put_driver_u32(msg, "rqt_size", wq->rq.rqt_size))
rq                 86 drivers/infiniband/hw/cxgb4/restrack.c 	if (rdma_nl_put_driver_u32(msg, "in_use", wq->rq.in_use))
rq                 88 drivers/infiniband/hw/cxgb4/restrack.c 	if (rdma_nl_put_driver_u32(msg, "size", wq->rq.size))
rq                384 drivers/infiniband/hw/cxgb4/t4.h 	struct t4_rq rq;
rq                482 drivers/infiniband/hw/cxgb4/t4.h 	return wq->rq.in_use;
rq                487 drivers/infiniband/hw/cxgb4/t4.h 	return wq->rq.in_use == 0;
rq                492 drivers/infiniband/hw/cxgb4/t4.h 	return wq->rq.in_use == (wq->rq.size - 1);
rq                497 drivers/infiniband/hw/cxgb4/t4.h 	return wq->rq.size - 1 - wq->rq.in_use;
rq                502 drivers/infiniband/hw/cxgb4/t4.h 	wq->rq.in_use++;
rq                503 drivers/infiniband/hw/cxgb4/t4.h 	if (++wq->rq.pidx == wq->rq.size)
rq                504 drivers/infiniband/hw/cxgb4/t4.h 		wq->rq.pidx = 0;
rq                505 drivers/infiniband/hw/cxgb4/t4.h 	wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
rq                506 drivers/infiniband/hw/cxgb4/t4.h 	if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
rq                507 drivers/infiniband/hw/cxgb4/t4.h 		wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
rq                512 drivers/infiniband/hw/cxgb4/t4.h 	wq->rq.in_use--;
rq                513 drivers/infiniband/hw/cxgb4/t4.h 	if (++wq->rq.cidx == wq->rq.size)
rq                514 drivers/infiniband/hw/cxgb4/t4.h 		wq->rq.cidx = 0;
rq                519 drivers/infiniband/hw/cxgb4/t4.h 	return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
rq                524 drivers/infiniband/hw/cxgb4/t4.h 		return wq->rq.size * T4_RQ_NUM_SLOTS;
rq                641 drivers/infiniband/hw/cxgb4/t4.h 	if (wq->rq.bar2_va) {
rq                642 drivers/infiniband/hw/cxgb4/t4.h 		if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
rq                643 drivers/infiniband/hw/cxgb4/t4.h 			pr_debug("WC wq->rq.pidx = %d\n", wq->rq.pidx);
rq                645 drivers/infiniband/hw/cxgb4/t4.h 				 (wq->rq.bar2_va + SGE_UDB_WCDOORBELL),
rq                648 drivers/infiniband/hw/cxgb4/t4.h 			pr_debug("DB wq->rq.pidx = %d\n", wq->rq.pidx);
rq                649 drivers/infiniband/hw/cxgb4/t4.h 			writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid),
rq                650 drivers/infiniband/hw/cxgb4/t4.h 			       wq->rq.bar2_va + SGE_UDB_KDOORBELL);
rq                657 drivers/infiniband/hw/cxgb4/t4.h 	writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
rq                674 drivers/infiniband/hw/cxgb4/t4.h 	wq->rq.queue[wq->rq.size].status.db_off = 1;
rq                679 drivers/infiniband/hw/cxgb4/t4.h 	wq->rq.queue[wq->rq.size].status.db_off = 0;
rq                684 drivers/infiniband/hw/cxgb4/t4.h 	return !wq->rq.queue[wq->rq.size].status.db_off;
rq                716 drivers/infiniband/hw/hfi1/qp.c 		   srq ? srq->rq.size : qp->r_rq.size
rq                654 drivers/infiniband/hw/hns/hns_roce_device.h 	struct hns_roce_wq	rq;
rq                363 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	spin_lock_irqsave(&hr_qp->rq.lock, flags);
rq                366 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
rq                373 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
rq                375 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
rq                377 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 				wr->num_sge, hr_qp->rq.max_gs);
rq                395 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
rq                400 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		hr_qp->rq.head += nreq;
rq                415 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 				       hr_qp->rq.head);
rq                426 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 				       hr_qp->rq.head);
rq                437 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 			hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
rq                440 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
rq               2368 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		wq = &(*cur_qp)->rq;
rq               2603 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 			       ilog2((unsigned int)hr_qp->rq.wqe_cnt));
rq               2614 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 			       QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
rq               2632 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
rq               2698 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		hr_qp->rq.head = 0;
rq               2699 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		hr_qp->rq.tail = 0;
rq               2787 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 			       ilog2((unsigned int)hr_qp->rq.wqe_cnt));
rq               2853 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 			       ilog2((unsigned int)hr_qp->rq.wqe_cnt));
rq               2996 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 			       hr_qp->rq.head);
rq               3001 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
rq               3279 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 			       RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
rq               3287 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 			hr_qp->rq.db_reg_l = hr_dev->reg_base +
rq               3292 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
rq               3311 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		hr_qp->rq.head = 0;
rq               3312 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		hr_qp->rq.tail = 0;
rq               3436 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
rq               3437 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
rq               3572 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
rq               3573 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
rq               3637 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		kfree(hr_qp->rq.wrid);
rq                635 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	spin_lock_irqsave(&hr_qp->rq.lock, flags);
rq                638 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
rq                644 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
rq                651 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
rq                653 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
rq                655 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 				wr->num_sge, hr_qp->rq.max_gs);
rq                670 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		if (i < hr_qp->rq.max_gs) {
rq                687 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
rq                692 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		hr_qp->rq.head += nreq;
rq                696 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		*hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
rq                706 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 				spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
rq                712 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
rq               2671 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
rq               2766 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		wq = &(*cur_qp)->rq;
rq               3250 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		       ilog2((unsigned int)hr_qp->rq.wqe_cnt));
rq               3287 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		       V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
rq               3647 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	if (hr_qp->rq.wqe_cnt < 1)
rq               3657 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		(hr_qp->rq.offset + page_size) < hr_qp->buff_size) {
rq               3691 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 				  hr_qp->rq.offset / page_size, mtts,
rq               3877 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
rq               4425 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 			       hr_qp->rq.head);
rq               4467 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		hr_qp->rq.head = 0;
rq               4468 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		hr_qp->rq.tail = 0;
rq               4472 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		if (hr_qp->rq.wqe_cnt)
rq               4631 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
rq               4632 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
rq               4701 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
rq               4705 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		kfree(hr_qp->rq.wrid);
rq               4707 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		if (hr_qp->rq.wqe_cnt)
rq               4713 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	     hr_qp->rq.wqe_cnt) {
rq               4851 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 				hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
rq                288 drivers/infiniband/hw/hns/hns_roce_qp.c 		hr_qp->rq.wqe_cnt = 0;
rq                289 drivers/infiniband/hw/hns/hns_roce_qp.c 		hr_qp->rq.max_gs = 0;
rq                303 drivers/infiniband/hw/hns/hns_roce_qp.c 		hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
rq                305 drivers/infiniband/hw/hns/hns_roce_qp.c 		if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
rq                311 drivers/infiniband/hw/hns/hns_roce_qp.c 		hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
rq                313 drivers/infiniband/hw/hns/hns_roce_qp.c 			hr_qp->rq.wqe_shift =
rq                316 drivers/infiniband/hw/hns/hns_roce_qp.c 			hr_qp->rq.wqe_shift =
rq                318 drivers/infiniband/hw/hns/hns_roce_qp.c 					      * hr_qp->rq.max_gs);
rq                321 drivers/infiniband/hw/hns/hns_roce_qp.c 	cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
rq                322 drivers/infiniband/hw/hns/hns_roce_qp.c 	cap->max_recv_sge = hr_qp->rq.max_gs;
rq                396 drivers/infiniband/hw/hns/hns_roce_qp.c 		hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
rq                397 drivers/infiniband/hw/hns/hns_roce_qp.c 					     hr_qp->rq.wqe_shift), PAGE_SIZE) +
rq                402 drivers/infiniband/hw/hns/hns_roce_qp.c 		hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
rq                408 drivers/infiniband/hw/hns/hns_roce_qp.c 		hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
rq                409 drivers/infiniband/hw/hns/hns_roce_qp.c 					     hr_qp->rq.wqe_shift), page_size) +
rq                421 drivers/infiniband/hw/hns/hns_roce_qp.c 			hr_qp->rq.offset = hr_qp->sge.offset +
rq                426 drivers/infiniband/hw/hns/hns_roce_qp.c 			hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
rq                459 drivers/infiniband/hw/hns/hns_roce_qp.c 		buf_size = hr_qp->rq.offset - hr_qp->sq.offset;
rq                472 drivers/infiniband/hw/hns/hns_roce_qp.c 		buf_size = hr_qp->rq.offset - hr_qp->sge.offset;
rq                484 drivers/infiniband/hw/hns/hns_roce_qp.c 	buf_size = hr_qp->buff_size - hr_qp->rq.offset;
rq                489 drivers/infiniband/hw/hns/hns_roce_qp.c 					 hr_qp->rq.offset / page_size,
rq                607 drivers/infiniband/hw/hns/hns_roce_qp.c 	hr_qp->rq.offset = size;
rq                608 drivers/infiniband/hw/hns/hns_roce_qp.c 	size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
rq                645 drivers/infiniband/hw/hns/hns_roce_qp.c 	u32 wqe_cnt = hr_qp->rq.wqe_cnt;
rq                705 drivers/infiniband/hw/hns/hns_roce_qp.c 	spin_lock_init(&hr_qp->rq.lock);
rq                833 drivers/infiniband/hw/hns/hns_roce_qp.c 		hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
rq                886 drivers/infiniband/hw/hns/hns_roce_qp.c 		if (hr_qp->rq.wqe_cnt) {
rq                887 drivers/infiniband/hw/hns/hns_roce_qp.c 			hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
rq                889 drivers/infiniband/hw/hns/hns_roce_qp.c 			if (ZERO_OR_NULL_PTR(hr_qp->rq.wrid)) {
rq                978 drivers/infiniband/hw/hns/hns_roce_qp.c 		if (hr_qp->rq.wqe_cnt)
rq                979 drivers/infiniband/hw/hns/hns_roce_qp.c 			kfree(hr_qp->rq.wrid);
rq               1194 drivers/infiniband/hw/hns/hns_roce_qp.c 				hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
rq               1273 drivers/infiniband/hw/hns/hns_roce_qp.c 	return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
rq               2609 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	if (info->rq && !qp->flush_rq)
rq                844 drivers/infiniband/hw/i40iw/i40iw_hw.c 	info.rq = true;
rq                947 drivers/infiniband/hw/i40iw/i40iw_type.h 	bool rq;
rq               1005 drivers/infiniband/hw/i40iw/i40iw_uk.c 	qp->rq_base = info->rq;
rq                379 drivers/infiniband/hw/i40iw/i40iw_user.h 	struct i40iw_qp_quanta *rq;
rq                499 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	ukinfo->rq = &ukinfo->sq[sqdepth];
rq                502 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
rq                619 drivers/infiniband/hw/mlx4/cq.c 	wq = is_send ? &qp->sq : &qp->rq;
rq                755 drivers/infiniband/hw/mlx4/cq.c 		wq	  = &(*cur_qp)->rq;
rq               3136 drivers/infiniband/hw/mlx4/main.c 		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
rq               3139 drivers/infiniband/hw/mlx4/main.c 			if (mqp->rq.tail != mqp->rq.head) {
rq               3154 drivers/infiniband/hw/mlx4/main.c 		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
rq                315 drivers/infiniband/hw/mlx4/mlx4_ib.h 	struct mlx4_ib_wq	rq;
rq                199 drivers/infiniband/hw/mlx4/qp.c 	return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
rq                340 drivers/infiniband/hw/mlx4/qp.c 		qp->rq.wqe_cnt = qp->rq.max_gs = 0;
rq                351 drivers/infiniband/hw/mlx4/qp.c 		qp->rq.wqe_cnt	 = roundup_pow_of_two(max(1U, cap->max_recv_wr));
rq                352 drivers/infiniband/hw/mlx4/qp.c 		qp->rq.max_gs	 = roundup_pow_of_two(max(1U, cap->max_recv_sge));
rq                353 drivers/infiniband/hw/mlx4/qp.c 		wqe_size = qp->rq.max_gs * sizeof(struct mlx4_wqe_data_seg);
rq                354 drivers/infiniband/hw/mlx4/qp.c 		qp->rq.wqe_shift = ilog2(max_t(u32, wqe_size, inl_recv_sz));
rq                359 drivers/infiniband/hw/mlx4/qp.c 		cap->max_recv_wr  = qp->rq.max_post = qp->rq.wqe_cnt;
rq                360 drivers/infiniband/hw/mlx4/qp.c 		cap->max_recv_sge = qp->rq.max_gs;
rq                362 drivers/infiniband/hw/mlx4/qp.c 		cap->max_recv_wr  = qp->rq.max_post =
rq                363 drivers/infiniband/hw/mlx4/qp.c 			min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
rq                364 drivers/infiniband/hw/mlx4/qp.c 		cap->max_recv_sge = min(qp->rq.max_gs,
rq                416 drivers/infiniband/hw/mlx4/qp.c 	qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
rq                418 drivers/infiniband/hw/mlx4/qp.c 	if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
rq                419 drivers/infiniband/hw/mlx4/qp.c 		qp->rq.offset = 0;
rq                420 drivers/infiniband/hw/mlx4/qp.c 		qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
rq                422 drivers/infiniband/hw/mlx4/qp.c 		qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift;
rq                451 drivers/infiniband/hw/mlx4/qp.c 	qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
rq                462 drivers/infiniband/hw/mlx4/qp.c 		kmalloc_array(qp->rq.wqe_cnt, sizeof(struct mlx4_ib_buf),
rq                466 drivers/infiniband/hw/mlx4/qp.c 	for (i = 0; i < qp->rq.wqe_cnt; i++) {
rq                500 drivers/infiniband/hw/mlx4/qp.c 	for (i = 0; i < qp->rq.wqe_cnt; i++) {
rq                878 drivers/infiniband/hw/mlx4/qp.c 	spin_lock_init(&qp->rq.lock);
rq                916 drivers/infiniband/hw/mlx4/qp.c 	qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
rq               1068 drivers/infiniband/hw/mlx4/qp.c 	spin_lock_init(&qp->rq.lock);
rq               1185 drivers/infiniband/hw/mlx4/qp.c 		qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
rq               1187 drivers/infiniband/hw/mlx4/qp.c 		if (!qp->sq.wrid || !qp->rq.wrid) {
rq               1280 drivers/infiniband/hw/mlx4/qp.c 		kvfree(qp->rq.wrid);
rq               1491 drivers/infiniband/hw/mlx4/qp.c 		if (qp->rq.wqe_cnt) {
rq               1502 drivers/infiniband/hw/mlx4/qp.c 		kvfree(qp->rq.wrid);
rq               1507 drivers/infiniband/hw/mlx4/qp.c 		if (qp->rq.wqe_cnt)
rq               2233 drivers/infiniband/hw/mlx4/qp.c 		if (qp->rq.wqe_cnt)
rq               2234 drivers/infiniband/hw/mlx4/qp.c 			context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
rq               2235 drivers/infiniband/hw/mlx4/qp.c 		context->rq_size_stride |= qp->rq.wqe_shift - 4;
rq               2458 drivers/infiniband/hw/mlx4/qp.c 	if (qp->rq.wqe_cnt &&
rq               2606 drivers/infiniband/hw/mlx4/qp.c 			qp->rq.head = 0;
rq               2607 drivers/infiniband/hw/mlx4/qp.c 			qp->rq.tail = 0;
rq               2611 drivers/infiniband/hw/mlx4/qp.c 			if (qp->rq.wqe_cnt)
rq               3868 drivers/infiniband/hw/mlx4/qp.c 	max_gs = qp->rq.max_gs;
rq               3869 drivers/infiniband/hw/mlx4/qp.c 	spin_lock_irqsave(&qp->rq.lock, flags);
rq               3879 drivers/infiniband/hw/mlx4/qp.c 	ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
rq               3882 drivers/infiniband/hw/mlx4/qp.c 		if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
rq               3888 drivers/infiniband/hw/mlx4/qp.c 		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
rq               3920 drivers/infiniband/hw/mlx4/qp.c 		qp->rq.wrid[ind] = wr->wr_id;
rq               3922 drivers/infiniband/hw/mlx4/qp.c 		ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
rq               3927 drivers/infiniband/hw/mlx4/qp.c 		qp->rq.head += nreq;
rq               3935 drivers/infiniband/hw/mlx4/qp.c 		*qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
rq               3938 drivers/infiniband/hw/mlx4/qp.c 	spin_unlock_irqrestore(&qp->rq.lock, flags);
rq               4091 drivers/infiniband/hw/mlx4/qp.c 	qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
rq               4092 drivers/infiniband/hw/mlx4/qp.c 	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
rq                194 drivers/infiniband/hw/mlx5/cq.c 		wq	  = &qp->rq;
rq                394 drivers/infiniband/hw/mlx5/cq.c 	wq = (is_send) ? &qp->sq : &qp->rq;
rq                539 drivers/infiniband/hw/mlx5/cq.c 				wq = &(*cur_qp)->rq;
rq                615 drivers/infiniband/hw/mlx5/devx.c 			struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
rq                619 drivers/infiniband/hw/mlx5/devx.c 					       rq->base.mqp.qpn) == obj_id ||
rq                623 drivers/infiniband/hw/mlx5/devx.c 					       rq->tirn) == obj_id ||
rq                143 drivers/infiniband/hw/mlx5/flow.c 			dest_id = mqp->raw_packet_qp.rq.tirn;
rq               3880 drivers/infiniband/hw/mlx5/main.c 			dst->tir_num = mqp->raw_packet_qp.rq.tirn;
rq               4497 drivers/infiniband/hw/mlx5/main.c 		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
rq               4500 drivers/infiniband/hw/mlx5/main.c 			if (mqp->rq.tail != mqp->rq.head) {
rq               4515 drivers/infiniband/hw/mlx5/main.c 		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
rq                366 drivers/infiniband/hw/mlx5/mlx5_ib.h 	struct mlx5_ib_wq	*rq;
rq                386 drivers/infiniband/hw/mlx5/mlx5_ib.h 	struct mlx5_ib_rq rq;
rq                411 drivers/infiniband/hw/mlx5/mlx5_ib.h 	struct mlx5_ib_wq	rq;
rq               1108 drivers/infiniband/hw/mlx5/odp.c 	struct mlx5_ib_wq *wq = &qp->rq;
rq                237 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_ib_wq *wq = &qp->rq;
rq                341 drivers/infiniband/hw/mlx5/qp.c 		qp->rq.max_gs = 0;
rq                342 drivers/infiniband/hw/mlx5/qp.c 		qp->rq.wqe_cnt = 0;
rq                343 drivers/infiniband/hw/mlx5/qp.c 		qp->rq.wqe_shift = 0;
rq                348 drivers/infiniband/hw/mlx5/qp.c 			qp->rq.wqe_cnt = ucmd->rq_wqe_count;
rq                351 drivers/infiniband/hw/mlx5/qp.c 			qp->rq.wqe_shift = ucmd->rq_wqe_shift;
rq                352 drivers/infiniband/hw/mlx5/qp.c 			if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
rq                354 drivers/infiniband/hw/mlx5/qp.c 			qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
rq                355 drivers/infiniband/hw/mlx5/qp.c 			qp->rq.max_post = qp->rq.wqe_cnt;
rq                362 drivers/infiniband/hw/mlx5/qp.c 			qp->rq.wqe_cnt = wq_size / wqe_size;
rq                370 drivers/infiniband/hw/mlx5/qp.c 			qp->rq.wqe_shift = ilog2(wqe_size);
rq                371 drivers/infiniband/hw/mlx5/qp.c 			qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
rq                372 drivers/infiniband/hw/mlx5/qp.c 			qp->rq.max_post = qp->rq.wqe_cnt;
rq                552 drivers/infiniband/hw/mlx5/qp.c 		base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
rq                555 drivers/infiniband/hw/mlx5/qp.c 		base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
rq                909 drivers/infiniband/hw/mlx5/qp.c 	qp->rq.offset = 0;
rq                911 drivers/infiniband/hw/mlx5/qp.c 	qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
rq               1064 drivers/infiniband/hw/mlx5/qp.c 	qp->rq.offset = 0;
rq               1065 drivers/infiniband/hw/mlx5/qp.c 	qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
rq               1066 drivers/infiniband/hw/mlx5/qp.c 	base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
rq               1075 drivers/infiniband/hw/mlx5/qp.c 	if (qp->rq.wqe_cnt)
rq               1076 drivers/infiniband/hw/mlx5/qp.c 		mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift,
rq               1077 drivers/infiniband/hw/mlx5/qp.c 			      ilog2(qp->rq.wqe_cnt), &qp->rq.fbc);
rq               1126 drivers/infiniband/hw/mlx5/qp.c 	qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
rq               1127 drivers/infiniband/hw/mlx5/qp.c 				     sizeof(*qp->rq.wrid), GFP_KERNEL);
rq               1133 drivers/infiniband/hw/mlx5/qp.c 	if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
rq               1147 drivers/infiniband/hw/mlx5/qp.c 	kvfree(qp->rq.wrid);
rq               1164 drivers/infiniband/hw/mlx5/qp.c 	kvfree(qp->rq.wrid);
rq               1317 drivers/infiniband/hw/mlx5/qp.c 				   struct mlx5_ib_rq *rq, void *qpin,
rq               1320 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
rq               1341 drivers/infiniband/hw/mlx5/qp.c 	if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING))
rq               1354 drivers/infiniband/hw/mlx5/qp.c 	if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING)
rq               1367 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp);
rq               1375 drivers/infiniband/hw/mlx5/qp.c 				     struct mlx5_ib_rq *rq)
rq               1377 drivers/infiniband/hw/mlx5/qp.c 	mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
rq               1388 drivers/infiniband/hw/mlx5/qp.c 				      struct mlx5_ib_rq *rq,
rq               1395 drivers/infiniband/hw/mlx5/qp.c 	mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid);
rq               1399 drivers/infiniband/hw/mlx5/qp.c 				    struct mlx5_ib_rq *rq, u32 tdn,
rq               1418 drivers/infiniband/hw/mlx5/qp.c 	MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
rq               1438 drivers/infiniband/hw/mlx5/qp.c 	rq->tirn = MLX5_GET(create_tir_out, out, tirn);
rq               1443 drivers/infiniband/hw/mlx5/qp.c 			destroy_raw_packet_qp_tir(dev, rq, 0, pd);
rq               1458 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
rq               1486 drivers/infiniband/hw/mlx5/qp.c 	if (qp->rq.wqe_cnt) {
rq               1487 drivers/infiniband/hw/mlx5/qp.c 		rq->base.container_mibqp = qp;
rq               1490 drivers/infiniband/hw/mlx5/qp.c 			rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
rq               1492 drivers/infiniband/hw/mlx5/qp.c 			rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
rq               1493 drivers/infiniband/hw/mlx5/qp.c 		err = create_raw_packet_qp_rq(dev, rq, in, inlen, pd);
rq               1498 drivers/infiniband/hw/mlx5/qp.c 			dev, rq, tdn, &qp->flags_en, pd, out,
rq               1504 drivers/infiniband/hw/mlx5/qp.c 			resp->rqn = rq->base.mqp.qpn;
rq               1506 drivers/infiniband/hw/mlx5/qp.c 			resp->tirn = rq->tirn;
rq               1526 drivers/infiniband/hw/mlx5/qp.c 						     rq->base.mqp.qpn;
rq               1534 drivers/infiniband/hw/mlx5/qp.c 	destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, pd);
rq               1536 drivers/infiniband/hw/mlx5/qp.c 	destroy_raw_packet_qp_rq(dev, rq);
rq               1552 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
rq               1554 drivers/infiniband/hw/mlx5/qp.c 	if (qp->rq.wqe_cnt) {
rq               1555 drivers/infiniband/hw/mlx5/qp.c 		destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd);
rq               1556 drivers/infiniband/hw/mlx5/qp.c 		destroy_raw_packet_qp_rq(dev, rq);
rq               1569 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
rq               1572 drivers/infiniband/hw/mlx5/qp.c 	rq->rq = &qp->rq;
rq               1574 drivers/infiniband/hw/mlx5/qp.c 	rq->doorbell = &qp->db;
rq               1968 drivers/infiniband/hw/mlx5/qp.c 	spin_lock_init(&qp->rq.lock);
rq               2116 drivers/infiniband/hw/mlx5/qp.c 	       &qp->raw_packet_qp.rq.base :
rq               2132 drivers/infiniband/hw/mlx5/qp.c 			if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
rq               2133 drivers/infiniband/hw/mlx5/qp.c 			    ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
rq               2203 drivers/infiniband/hw/mlx5/qp.c 	if (qp->rq.wqe_cnt) {
rq               2204 drivers/infiniband/hw/mlx5/qp.c 		MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
rq               2205 drivers/infiniband/hw/mlx5/qp.c 		MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
rq               2447 drivers/infiniband/hw/mlx5/qp.c 	       &qp->raw_packet_qp.rq.base :
rq               3150 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state,
rq               3163 drivers/infiniband/hw/mlx5/qp.c 	MLX5_SET(modify_rq_in, in, rq_state, rq->state);
rq               3180 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
rq               3184 drivers/infiniband/hw/mlx5/qp.c 	rq->state = new_state;
rq               3272 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
rq               3274 drivers/infiniband/hw/mlx5/qp.c 	int modify_rq = !!qp->rq.wqe_cnt;
rq               3315 drivers/infiniband/hw/mlx5/qp.c 		err =  modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param,
rq               3611 drivers/infiniband/hw/mlx5/qp.c 	if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
rq               3721 drivers/infiniband/hw/mlx5/qp.c 		qp->rq.head = 0;
rq               3722 drivers/infiniband/hw/mlx5/qp.c 		qp->rq.tail = 0;
rq               5378 drivers/infiniband/hw/mlx5/qp.c 	spin_lock_irqsave(&qp->rq.lock, flags);
rq               5380 drivers/infiniband/hw/mlx5/qp.c 	ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
rq               5383 drivers/infiniband/hw/mlx5/qp.c 		if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
rq               5389 drivers/infiniband/hw/mlx5/qp.c 		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
rq               5395 drivers/infiniband/hw/mlx5/qp.c 		scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
rq               5402 drivers/infiniband/hw/mlx5/qp.c 		if (i < qp->rq.max_gs) {
rq               5410 drivers/infiniband/hw/mlx5/qp.c 			set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
rq               5413 drivers/infiniband/hw/mlx5/qp.c 		qp->rq.wrid[ind] = wr->wr_id;
rq               5415 drivers/infiniband/hw/mlx5/qp.c 		ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
rq               5420 drivers/infiniband/hw/mlx5/qp.c 		qp->rq.head += nreq;
rq               5427 drivers/infiniband/hw/mlx5/qp.c 		*qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
rq               5430 drivers/infiniband/hw/mlx5/qp.c 	spin_unlock_irqrestore(&qp->rq.lock, flags);
rq               5529 drivers/infiniband/hw/mlx5/qp.c 					struct mlx5_ib_rq *rq,
rq               5542 drivers/infiniband/hw/mlx5/qp.c 	err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out);
rq               5548 drivers/infiniband/hw/mlx5/qp.c 	rq->state = *rq_state;
rq               5590 drivers/infiniband/hw/mlx5/qp.c 		     qp->raw_packet_qp.rq.base.mqp.qpn, rq_state);
rq               5606 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
rq               5617 drivers/infiniband/hw/mlx5/qp.c 	if (qp->rq.wqe_cnt) {
rq               5618 drivers/infiniband/hw/mlx5/qp.c 		err = query_raw_packet_qp_rq_state(dev, rq, &rq_state);
rq               5797 drivers/infiniband/hw/mlx5/qp.c 	qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
rq               5798 drivers/infiniband/hw/mlx5/qp.c 	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
rq                538 drivers/infiniband/hw/mthca/mthca_cq.c 					       (*cur_qp)->rq.max];
rq                548 drivers/infiniband/hw/mthca/mthca_cq.c 		wq = &(*cur_qp)->rq;
rq                518 drivers/infiniband/hw/mthca/mthca_provider.c 			qp->rq.db_index  = ucmd.rq_db_index;
rq                573 drivers/infiniband/hw/mthca/mthca_provider.c 	init_attr->cap.max_recv_wr     = qp->rq.max;
rq                575 drivers/infiniband/hw/mthca/mthca_provider.c 	init_attr->cap.max_recv_sge    = qp->rq.max_gs;
rq                597 drivers/infiniband/hw/mthca/mthca_provider.c 				    to_mqp(qp)->rq.db_index);
rq                275 drivers/infiniband/hw/mthca/mthca_provider.h 	struct mthca_wq        rq;
rq                211 drivers/infiniband/hw/mthca/mthca_qp.c 		return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
rq                213 drivers/infiniband/hw/mthca/mthca_qp.c 		return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
rq                214 drivers/infiniband/hw/mthca/mthca_qp.c 			((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
rq                506 drivers/infiniband/hw/mthca/mthca_qp.c 	qp_attr->cap.max_recv_wr     = qp->rq.max;
rq                508 drivers/infiniband/hw/mthca/mthca_qp.c 	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
rq                615 drivers/infiniband/hw/mthca/mthca_qp.c 		if (qp->rq.max)
rq                616 drivers/infiniband/hw/mthca/mthca_qp.c 			qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
rq                617 drivers/infiniband/hw/mthca/mthca_qp.c 		qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
rq                777 drivers/infiniband/hw/mthca/mthca_qp.c 		qp_context->rcv_db_index   = cpu_to_be32(qp->rq.db_index);
rq                843 drivers/infiniband/hw/mthca/mthca_qp.c 		mthca_wq_reset(&qp->rq);
rq                844 drivers/infiniband/hw/mthca/mthca_qp.c 		qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
rq                848 drivers/infiniband/hw/mthca/mthca_qp.c 			*qp->rq.db = 0;
rq                871 drivers/infiniband/hw/mthca/mthca_qp.c 		spin_lock(&qp->rq.lock);
rq                873 drivers/infiniband/hw/mthca/mthca_qp.c 		spin_unlock(&qp->rq.lock);
rq                974 drivers/infiniband/hw/mthca/mthca_qp.c 	qp->rq.max_gs = min_t(int, dev->limits.max_sg,
rq                975 drivers/infiniband/hw/mthca/mthca_qp.c 			       (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
rq                996 drivers/infiniband/hw/mthca/mthca_qp.c 		qp->rq.max_gs * sizeof (struct mthca_data_seg);
rq               1001 drivers/infiniband/hw/mthca/mthca_qp.c 	for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
rq               1002 drivers/infiniband/hw/mthca/mthca_qp.c 	     qp->rq.wqe_shift++)
rq               1049 drivers/infiniband/hw/mthca/mthca_qp.c 	qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
rq               1063 drivers/infiniband/hw/mthca/mthca_qp.c 	qp->wrid = kmalloc_array(qp->rq.max + qp->sq.max, sizeof(u64),
rq               1134 drivers/infiniband/hw/mthca/mthca_qp.c 		qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
rq               1135 drivers/infiniband/hw/mthca/mthca_qp.c 						 qp->qpn, &qp->rq.db);
rq               1136 drivers/infiniband/hw/mthca/mthca_qp.c 		if (qp->rq.db_index < 0)
rq               1142 drivers/infiniband/hw/mthca/mthca_qp.c 			mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
rq               1155 drivers/infiniband/hw/mthca/mthca_qp.c 		mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
rq               1179 drivers/infiniband/hw/mthca/mthca_qp.c 	mthca_wq_reset(&qp->rq);
rq               1182 drivers/infiniband/hw/mthca/mthca_qp.c 	spin_lock_init(&qp->rq.lock);
rq               1214 drivers/infiniband/hw/mthca/mthca_qp.c 			    qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
rq               1216 drivers/infiniband/hw/mthca/mthca_qp.c 		for (i = 0; i < qp->rq.max; ++i) {
rq               1218 drivers/infiniband/hw/mthca/mthca_qp.c 			next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
rq               1219 drivers/infiniband/hw/mthca/mthca_qp.c 						   qp->rq.wqe_shift);
rq               1223 drivers/infiniband/hw/mthca/mthca_qp.c 			     (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
rq               1235 drivers/infiniband/hw/mthca/mthca_qp.c 		for (i = 0; i < qp->rq.max; ++i) {
rq               1237 drivers/infiniband/hw/mthca/mthca_qp.c 			next->nda_op = htonl((((i + 1) % qp->rq.max) <<
rq               1238 drivers/infiniband/hw/mthca/mthca_qp.c 					      qp->rq.wqe_shift) | 1);
rq               1244 drivers/infiniband/hw/mthca/mthca_qp.c 	qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
rq               1270 drivers/infiniband/hw/mthca/mthca_qp.c 		qp->rq.max = cap->max_recv_wr ?
rq               1275 drivers/infiniband/hw/mthca/mthca_qp.c 		qp->rq.max = cap->max_recv_wr;
rq               1279 drivers/infiniband/hw/mthca/mthca_qp.c 	qp->rq.max_gs = cap->max_recv_sge;
rq               1772 drivers/infiniband/hw/mthca/mthca_qp.c 		qp->wrid[ind + qp->rq.max] = wr->wr_id;
rq               1843 drivers/infiniband/hw/mthca/mthca_qp.c 	spin_lock_irqsave(&qp->rq.lock, flags);
rq               1847 drivers/infiniband/hw/mthca/mthca_qp.c 	ind = qp->rq.next_ind;
rq               1850 drivers/infiniband/hw/mthca/mthca_qp.c 		if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
rq               1853 drivers/infiniband/hw/mthca/mthca_qp.c 					qp->rq.head, qp->rq.tail,
rq               1854 drivers/infiniband/hw/mthca/mthca_qp.c 					qp->rq.max, nreq);
rq               1861 drivers/infiniband/hw/mthca/mthca_qp.c 		prev_wqe = qp->rq.last;
rq               1862 drivers/infiniband/hw/mthca/mthca_qp.c 		qp->rq.last = wqe;
rq               1871 drivers/infiniband/hw/mthca/mthca_qp.c 		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
rq               1892 drivers/infiniband/hw/mthca/mthca_qp.c 		if (unlikely(ind >= qp->rq.max))
rq               1893 drivers/infiniband/hw/mthca/mthca_qp.c 			ind -= qp->rq.max;
rq               1901 drivers/infiniband/hw/mthca/mthca_qp.c 			mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
rq               1905 drivers/infiniband/hw/mthca/mthca_qp.c 			qp->rq.next_ind = ind;
rq               1906 drivers/infiniband/hw/mthca/mthca_qp.c 			qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
rq               1914 drivers/infiniband/hw/mthca/mthca_qp.c 		mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
rq               1919 drivers/infiniband/hw/mthca/mthca_qp.c 	qp->rq.next_ind = ind;
rq               1920 drivers/infiniband/hw/mthca/mthca_qp.c 	qp->rq.head    += nreq;
rq               1922 drivers/infiniband/hw/mthca/mthca_qp.c 	spin_unlock_irqrestore(&qp->rq.lock, flags);
rq               2102 drivers/infiniband/hw/mthca/mthca_qp.c 		qp->wrid[ind + qp->rq.max] = wr->wr_id;
rq               2172 drivers/infiniband/hw/mthca/mthca_qp.c 	spin_lock_irqsave(&qp->rq.lock, flags);
rq               2176 drivers/infiniband/hw/mthca/mthca_qp.c 	ind = qp->rq.head & (qp->rq.max - 1);
rq               2179 drivers/infiniband/hw/mthca/mthca_qp.c 		if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
rq               2182 drivers/infiniband/hw/mthca/mthca_qp.c 					qp->rq.head, qp->rq.tail,
rq               2183 drivers/infiniband/hw/mthca/mthca_qp.c 					qp->rq.max, nreq);
rq               2195 drivers/infiniband/hw/mthca/mthca_qp.c 		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
rq               2206 drivers/infiniband/hw/mthca/mthca_qp.c 		if (i < qp->rq.max_gs)
rq               2212 drivers/infiniband/hw/mthca/mthca_qp.c 		if (unlikely(ind >= qp->rq.max))
rq               2213 drivers/infiniband/hw/mthca/mthca_qp.c 			ind -= qp->rq.max;
rq               2217 drivers/infiniband/hw/mthca/mthca_qp.c 		qp->rq.head += nreq;
rq               2224 drivers/infiniband/hw/mthca/mthca_qp.c 		*qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
rq               2227 drivers/infiniband/hw/mthca/mthca_qp.c 	spin_unlock_irqrestore(&qp->rq.lock, flags);
rq                378 drivers/infiniband/hw/ocrdma/ocrdma.h 	struct ocrdma_qp_hwq_info rq;
rq                411 drivers/infiniband/hw/ocrdma/ocrdma.h 	struct ocrdma_qp_hwq_info rq;
rq               2130 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	qp->rq.head = 0;
rq               2131 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	qp->rq.tail = 0;
rq               2255 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	qp->rq.max_cnt = max_rqe_allocated;
rq               2258 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
rq               2259 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	if (!qp->rq.va)
rq               2261 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	qp->rq.pa = pa;
rq               2262 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	qp->rq.len = len;
rq               2263 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	qp->rq.entry_size = dev->attr.rqe_size;
rq               2274 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<
rq               2339 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;
rq               2362 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		qp->rq.max_cnt = max_rqe_allocated;
rq               2363 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		qp->rq.max_wqe_idx = max_rqe_allocated - 1;
rq               2455 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	if (qp->rq.va)
rq               2456 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
rq               2736 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	if (!qp->srq && qp->rq.va)
rq               2737 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
rq               2772 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
rq               2773 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	if (!srq->rq.va) {
rq               2779 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	srq->rq.entry_size = dev->attr.rqe_size;
rq               2780 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	srq->rq.pa = pa;
rq               2781 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	srq->rq.len = len;
rq               2782 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	srq->rq.max_cnt = max_rqe_allocated;
rq               2800 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	srq->rq.dbid = rsp->id;
rq               2805 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	srq->rq.max_cnt = max_rqe_allocated;
rq               2806 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	srq->rq.max_wqe_idx = max_rqe_allocated - 1;
rq               2807 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	srq->rq.max_sges = (rsp->max_sge_rqe_allocated &
rq               2812 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);
rq               2845 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	cmd->id = srq->rq.dbid;
rq               2871 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	if (srq->rq.va)
rq               2872 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		dma_free_coherent(&pdev->dev, srq->rq.len,
rq               2873 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 				  srq->rq.va, srq->rq.pa);
rq               1789 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 		} rq;
rq               1204 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		uresp.rq_dbid = qp->rq.dbid;
rq               1206 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
rq               1207 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
rq               1208 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		uresp.num_rqe_allocated = qp->rq.max_cnt;
rq               1271 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	    kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL);
rq               1291 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	qp->rq.max_sges = attrs->cap.max_recv_sge;
rq               1498 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
rq               1500 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
rq               1576 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	return (qp->rq.tail == qp->rq.head);
rq               1641 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 				wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
rq               1643 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 					qp->srq->rq.max_wqe_idx;
rq               1646 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 				ocrdma_hwq_inc_tail(&qp->srq->rq);
rq               1651 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 				ocrdma_hwq_inc_tail(&qp->rq);
rq               1735 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 			ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
rq               1736 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 					PAGE_ALIGN(qp->rq.len));
rq               1754 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	uresp.rq_dbid = srq->rq.dbid;
rq               1756 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
rq               1757 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	uresp.rq_page_size = srq->rq.len;
rq               1761 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	uresp.num_rqe_allocated = srq->rq.max_cnt;
rq               1801 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
rq               1808 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		srq->bit_fields_len = (srq->rq.max_cnt / 32) +
rq               1809 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		    (srq->rq.max_cnt % 32 ? 1 : 0);
rq               1878 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
rq               1879 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 				PAGE_ALIGN(srq->rq.len));
rq               2208 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
rq               2249 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
rq               2250 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		    wr->num_sge > qp->rq.max_sges) {
rq               2255 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		rqe = ocrdma_hwq_head(&qp->rq);
rq               2258 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
rq               2266 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ocrdma_hwq_inc_head(&qp->rq);
rq               2287 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 			BUG_ON(indx >= srq->rq.max_cnt);
rq               2299 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	u32 val = srq->rq.dbid | (1 << 16);
rq               2317 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
rq               2318 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		    wr->num_sge > srq->rq.max_sges) {
rq               2324 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		rqe = ocrdma_hwq_head(&srq->rq);
rq               2333 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ocrdma_hwq_inc_head(&srq->rq);
rq               2511 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
rq               2512 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	ocrdma_hwq_inc_tail(&qp->rq);
rq               2649 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
rq               2650 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
rq               2657 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	ocrdma_hwq_inc_tail(&srq->rq);
rq               2708 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
rq               2711 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
rq               2715 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
rq               2718 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
rq               2724 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
rq               2725 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ocrdma_hwq_inc_tail(&qp->rq);
rq               2839 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 			ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
rq               2840 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 			ocrdma_hwq_inc_tail(&qp->rq);
rq                396 drivers/infiniband/hw/qedr/qedr.h 	struct qedr_qp_hwq_info rq;
rq                106 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
rq                108 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
rq                110 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
rq                112 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	*((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
rq                114 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	*((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
rq                117 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	qedr_inc_sw_gsi_cons(&qp->rq);
rq                340 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	qp->rq.max_wr = attrs->cap.max_recv_wr;
rq                343 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
rq                653 drivers/infiniband/hw/qedr/qedr_roce_cm.c 		memset(&qp->rqe_wr_id[qp->rq.prod], 0,
rq                654 drivers/infiniband/hw/qedr/qedr_roce_cm.c 		       sizeof(qp->rqe_wr_id[qp->rq.prod]));
rq                655 drivers/infiniband/hw/qedr/qedr_roce_cm.c 		qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
rq                656 drivers/infiniband/hw/qedr/qedr_roce_cm.c 		qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
rq                658 drivers/infiniband/hw/qedr/qedr_roce_cm.c 		qedr_inc_sw_prod(&qp->rq);
rq                683 drivers/infiniband/hw/qedr/qedr_roce_cm.c 	while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
rq                687 drivers/infiniband/hw/qedr/qedr_roce_cm.c 		wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
rq                690 drivers/infiniband/hw/qedr/qedr_roce_cm.c 		wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
rq                693 drivers/infiniband/hw/qedr/qedr_roce_cm.c 		wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
rq                695 drivers/infiniband/hw/qedr/qedr_roce_cm.c 		ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
rq                698 drivers/infiniband/hw/qedr/qedr_roce_cm.c 		vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
rq                703 drivers/infiniband/hw/qedr/qedr_roce_cm.c 			wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
rq                707 drivers/infiniband/hw/qedr/qedr_roce_cm.c 		qedr_inc_sw_cons(&qp->rq);
rq                727 drivers/infiniband/hw/qedr/qedr_roce_cm.c 		 num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
rq               1214 drivers/infiniband/hw/qedr/verbs.c 		qp->rq.max_sges = attrs->cap.max_recv_sge;
rq               1217 drivers/infiniband/hw/qedr/verbs.c 			 qp->rq.max_sges, qp->rq_cq->icid);
rq               1235 drivers/infiniband/hw/qedr/verbs.c 		qp->rq.db = dev->db_addr +
rq               1237 drivers/infiniband/hw/qedr/verbs.c 		qp->rq.db_data.data.icid = qp->icid;
rq               1677 drivers/infiniband/hw/qedr/verbs.c 	qp->rq.db = dev->db_addr +
rq               1679 drivers/infiniband/hw/qedr/verbs.c 	qp->rq.db_data.data.icid = qp->icid;
rq               1680 drivers/infiniband/hw/qedr/verbs.c 	qp->rq.iwarp_db2 = dev->db_addr +
rq               1682 drivers/infiniband/hw/qedr/verbs.c 	qp->rq.iwarp_db2_data.data.icid = qp->icid;
rq               1683 drivers/infiniband/hw/qedr/verbs.c 	qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
rq               1715 drivers/infiniband/hw/qedr/verbs.c 					   &qp->rq.pbl, NULL);
rq               1719 drivers/infiniband/hw/qedr/verbs.c 	in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
rq               1720 drivers/infiniband/hw/qedr/verbs.c 	in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
rq               1782 drivers/infiniband/hw/qedr/verbs.c 					   &qp->rq.pbl, &ext_pbl);
rq               1804 drivers/infiniband/hw/qedr/verbs.c 	dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
rq               1851 drivers/infiniband/hw/qedr/verbs.c 	qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
rq               1854 drivers/infiniband/hw/qedr/verbs.c 	qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
rq               1870 drivers/infiniband/hw/qedr/verbs.c 	n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
rq               2015 drivers/infiniband/hw/qedr/verbs.c 			qedr_reset_qp_hwq_info(&qp->rq);
rq               2030 drivers/infiniband/hw/qedr/verbs.c 				writel(qp->rq.db_data.raw, qp->rq.db);
rq               2083 drivers/infiniband/hw/qedr/verbs.c 			if ((qp->rq.prod != qp->rq.cons) ||
rq               2087 drivers/infiniband/hw/qedr/verbs.c 					  qp->rq.prod, qp->rq.cons, qp->sq.prod,
rq               2405 drivers/infiniband/hw/qedr/verbs.c 	qp_attr->cap.max_recv_wr = qp->rq.max_wr;
rq               2407 drivers/infiniband/hw/qedr/verbs.c 	qp_attr->cap.max_recv_sge = qp->rq.max_sges;
rq               3573 drivers/infiniband/hw/qedr/verbs.c 		if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
rq               3575 drivers/infiniband/hw/qedr/verbs.c 		    wr->num_sge > qp->rq.max_sges) {
rq               3577 drivers/infiniband/hw/qedr/verbs.c 			       qed_chain_get_elem_left_u32(&qp->rq.pbl),
rq               3579 drivers/infiniband/hw/qedr/verbs.c 			       qp->rq.max_sges);
rq               3587 drivers/infiniband/hw/qedr/verbs.c 			    qed_chain_produce(&qp->rq.pbl);
rq               3610 drivers/infiniband/hw/qedr/verbs.c 			    qed_chain_produce(&qp->rq.pbl);
rq               3622 drivers/infiniband/hw/qedr/verbs.c 		qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
rq               3623 drivers/infiniband/hw/qedr/verbs.c 		qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
rq               3625 drivers/infiniband/hw/qedr/verbs.c 		qedr_inc_sw_prod(&qp->rq);
rq               3636 drivers/infiniband/hw/qedr/verbs.c 		qp->rq.db_data.data.value++;
rq               3638 drivers/infiniband/hw/qedr/verbs.c 		writel(qp->rq.db_data.raw, qp->rq.db);
rq               3641 drivers/infiniband/hw/qedr/verbs.c 			writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
rq               3967 drivers/infiniband/hw/qedr/verbs.c 	u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
rq               3971 drivers/infiniband/hw/qedr/verbs.c 	while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
rq               3972 drivers/infiniband/hw/qedr/verbs.c 		qed_chain_consume(&qp->rq.pbl);
rq               3973 drivers/infiniband/hw/qedr/verbs.c 	qedr_inc_sw_cons(&qp->rq);
rq               3983 drivers/infiniband/hw/qedr/verbs.c 	while (num_entries && qp->rq.wqe_cons != hw_cons) {
rq               3990 drivers/infiniband/hw/qedr/verbs.c 		wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
rq               3995 drivers/infiniband/hw/qedr/verbs.c 		while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
rq               3996 drivers/infiniband/hw/qedr/verbs.c 			qed_chain_consume(&qp->rq.pbl);
rq               3997 drivers/infiniband/hw/qedr/verbs.c 		qedr_inc_sw_cons(&qp->rq);
rq               4006 drivers/infiniband/hw/qedr/verbs.c 	if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
rq                186 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h 	struct pvrdma_wq rq;
rq                117 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	if (qp->rq.ring) {
rq                118 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 		atomic_set(&qp->rq.ring->cons_head, 0);
rq                119 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 		atomic_set(&qp->rq.ring->prod_tail, 0);
rq                137 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr));
rq                138 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge));
rq                141 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	req_cap->max_recv_wr = qp->rq.wqe_cnt;
rq                142 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	req_cap->max_recv_sge = qp->rq.max_sg;
rq                144 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) +
rq                146 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 					     qp->rq.max_sg);
rq                147 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) /
rq                246 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 		spin_lock_init(&qp->rq.lock);
rq                309 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 			qp->rq.offset = qp->npages_send * PAGE_SIZE;
rq                336 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 			qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1];
rq                590 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 				       qp->rq.offset + n * qp->rq.wqe_size);
rq                854 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	spin_lock_irqsave(&qp->rq.lock, flags);
rq                859 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 		if (unlikely(wr->num_sge > qp->rq.max_sg ||
rq                869 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 				qp->rq.ring, qp->rq.wqe_cnt, &tail))) {
rq                894 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 		pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
rq                895 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 				    qp->rq.wqe_cnt);
rq                900 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	spin_unlock_irqrestore(&qp->rq.lock, flags);
rq                907 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	spin_unlock_irqrestore(&qp->rq.lock, flags);
rq                822 drivers/infiniband/sw/rdmavt/qp.c int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
rq                826 drivers/infiniband/sw/rdmavt/qp.c 		rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size);
rq                827 drivers/infiniband/sw/rdmavt/qp.c 		if (!rq->wq)
rq                830 drivers/infiniband/sw/rdmavt/qp.c 		rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
rq                831 drivers/infiniband/sw/rdmavt/qp.c 		if (!rq->kwq)
rq                833 drivers/infiniband/sw/rdmavt/qp.c 		rq->kwq->curr_wq = rq->wq->wq;
rq                836 drivers/infiniband/sw/rdmavt/qp.c 		rq->kwq =
rq                838 drivers/infiniband/sw/rdmavt/qp.c 		if (!rq->kwq)
rq                840 drivers/infiniband/sw/rdmavt/qp.c 		rq->kwq->curr_wq = rq->kwq->wq;
rq                843 drivers/infiniband/sw/rdmavt/qp.c 	spin_lock_init(&rq->kwq->p_lock);
rq                844 drivers/infiniband/sw/rdmavt/qp.c 	spin_lock_init(&rq->kwq->c_lock);
rq                847 drivers/infiniband/sw/rdmavt/qp.c 	rvt_free_rq(rq);
rq               1122 drivers/infiniband/sw/rdmavt/qp.c 			if (srq->rq.max_sge > 1)
rq               1124 drivers/infiniband/sw/rdmavt/qp.c 					(srq->rq.max_sge - 1);
rq               2255 drivers/infiniband/sw/rdmavt/qp.c 		if ((unsigned)wr->num_sge > srq->rq.max_sge) {
rq               2260 drivers/infiniband/sw/rdmavt/qp.c 		spin_lock_irqsave(&srq->rq.kwq->p_lock, flags);
rq               2261 drivers/infiniband/sw/rdmavt/qp.c 		wq = srq->rq.kwq;
rq               2263 drivers/infiniband/sw/rdmavt/qp.c 		if (next >= srq->rq.size)
rq               2266 drivers/infiniband/sw/rdmavt/qp.c 			spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
rq               2271 drivers/infiniband/sw/rdmavt/qp.c 		wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
rq               2281 drivers/infiniband/sw/rdmavt/qp.c 		spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
rq               2362 drivers/infiniband/sw/rdmavt/qp.c static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
rq               2368 drivers/infiniband/sw/rdmavt/qp.c 	if (count >= rq->size)
rq               2371 drivers/infiniband/sw/rdmavt/qp.c 		count += rq->size - tail;
rq               2385 drivers/infiniband/sw/rdmavt/qp.c static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
rq               2390 drivers/infiniband/sw/rdmavt/qp.c 		head = RDMA_READ_UAPI_ATOMIC(rq->wq->head);
rq               2392 drivers/infiniband/sw/rdmavt/qp.c 		head = rq->kwq->head;
rq               2410 drivers/infiniband/sw/rdmavt/qp.c 	struct rvt_rq *rq;
rq               2424 drivers/infiniband/sw/rdmavt/qp.c 		rq = &srq->rq;
rq               2429 drivers/infiniband/sw/rdmavt/qp.c 		rq = &qp->r_rq;
rq               2433 drivers/infiniband/sw/rdmavt/qp.c 	spin_lock_irqsave(&rq->kwq->c_lock, flags);
rq               2438 drivers/infiniband/sw/rdmavt/qp.c 	kwq = rq->kwq;
rq               2440 drivers/infiniband/sw/rdmavt/qp.c 		wq = rq->wq;
rq               2447 drivers/infiniband/sw/rdmavt/qp.c 	if (tail >= rq->size)
rq               2451 drivers/infiniband/sw/rdmavt/qp.c 		head = get_rvt_head(rq, ip);
rq               2452 drivers/infiniband/sw/rdmavt/qp.c 		kwq->count = get_count(rq, tail, head);
rq               2460 drivers/infiniband/sw/rdmavt/qp.c 	wqe = rvt_get_rwqe_ptr(rq, tail);
rq               2466 drivers/infiniband/sw/rdmavt/qp.c 	if (++tail >= rq->size)
rq               2487 drivers/infiniband/sw/rdmavt/qp.c 			kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
rq               2492 drivers/infiniband/sw/rdmavt/qp.c 				spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
rq               2502 drivers/infiniband/sw/rdmavt/qp.c 	spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
rq                 71 drivers/infiniband/sw/rdmavt/qp.h int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
rq                 96 drivers/infiniband/sw/rdmavt/srq.c 	srq->rq.size = srq_init_attr->attr.max_wr + 1;
rq                 97 drivers/infiniband/sw/rdmavt/srq.c 	srq->rq.max_sge = srq_init_attr->attr.max_sge;
rq                 98 drivers/infiniband/sw/rdmavt/srq.c 	sz = sizeof(struct ib_sge) * srq->rq.max_sge +
rq                100 drivers/infiniband/sw/rdmavt/srq.c 	if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz,
rq                111 drivers/infiniband/sw/rdmavt/srq.c 		u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
rq                113 drivers/infiniband/sw/rdmavt/srq.c 		srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
rq                128 drivers/infiniband/sw/rdmavt/srq.c 	spin_lock_init(&srq->rq.lock);
rq                152 drivers/infiniband/sw/rdmavt/srq.c 	rvt_free_rq(&srq->rq);
rq                187 drivers/infiniband/sw/rdmavt/srq.c 			srq->rq.max_sge * sizeof(struct ib_sge);
rq                209 drivers/infiniband/sw/rdmavt/srq.c 		spin_lock_irq(&srq->rq.kwq->c_lock);
rq                215 drivers/infiniband/sw/rdmavt/srq.c 			owq = srq->rq.wq;
rq                219 drivers/infiniband/sw/rdmavt/srq.c 			okwq = srq->rq.kwq;
rq                223 drivers/infiniband/sw/rdmavt/srq.c 		if (head >= srq->rq.size || tail >= srq->rq.size) {
rq                229 drivers/infiniband/sw/rdmavt/srq.c 			n += srq->rq.size - tail;
rq                242 drivers/infiniband/sw/rdmavt/srq.c 			wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
rq                249 drivers/infiniband/sw/rdmavt/srq.c 			if (++tail >= srq->rq.size)
rq                252 drivers/infiniband/sw/rdmavt/srq.c 		srq->rq.kwq = tmp_rq.kwq;
rq                254 drivers/infiniband/sw/rdmavt/srq.c 			srq->rq.wq = tmp_rq.wq;
rq                261 drivers/infiniband/sw/rdmavt/srq.c 		srq->rq.size = size;
rq                264 drivers/infiniband/sw/rdmavt/srq.c 		spin_unlock_irq(&srq->rq.kwq->c_lock);
rq                298 drivers/infiniband/sw/rdmavt/srq.c 		spin_lock_irq(&srq->rq.kwq->c_lock);
rq                299 drivers/infiniband/sw/rdmavt/srq.c 		if (attr->srq_limit >= srq->rq.size)
rq                303 drivers/infiniband/sw/rdmavt/srq.c 		spin_unlock_irq(&srq->rq.kwq->c_lock);
rq                308 drivers/infiniband/sw/rdmavt/srq.c 	spin_unlock_irq(&srq->rq.kwq->c_lock);
rq                324 drivers/infiniband/sw/rdmavt/srq.c 	attr->max_wr = srq->rq.size - 1;
rq                325 drivers/infiniband/sw/rdmavt/srq.c 	attr->max_sge = srq->rq.max_sge;
rq                345 drivers/infiniband/sw/rdmavt/srq.c 	kvfree(srq->rq.kwq);
rq                296 drivers/infiniband/sw/rxe/rxe_qp.c 		qp->rq.max_wr		= init->cap.max_recv_wr;
rq                297 drivers/infiniband/sw/rxe/rxe_qp.c 		qp->rq.max_sge		= init->cap.max_recv_sge;
rq                299 drivers/infiniband/sw/rxe/rxe_qp.c 		wqe_size = rcv_wqe_size(qp->rq.max_sge);
rq                302 drivers/infiniband/sw/rxe/rxe_qp.c 			 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
rq                304 drivers/infiniband/sw/rxe/rxe_qp.c 		qp->rq.queue = rxe_queue_init(rxe,
rq                305 drivers/infiniband/sw/rxe/rxe_qp.c 					      &qp->rq.max_wr,
rq                307 drivers/infiniband/sw/rxe/rxe_qp.c 		if (!qp->rq.queue)
rq                311 drivers/infiniband/sw/rxe/rxe_qp.c 				   qp->rq.queue->buf, qp->rq.queue->buf_size,
rq                312 drivers/infiniband/sw/rxe/rxe_qp.c 				   &qp->rq.queue->ip);
rq                314 drivers/infiniband/sw/rxe/rxe_qp.c 			vfree(qp->rq.queue->buf);
rq                315 drivers/infiniband/sw/rxe/rxe_qp.c 			kfree(qp->rq.queue);
rq                320 drivers/infiniband/sw/rxe/rxe_qp.c 	spin_lock_init(&qp->rq.producer_lock);
rq                321 drivers/infiniband/sw/rxe/rxe_qp.c 	spin_lock_init(&qp->rq.consumer_lock);
rq                399 drivers/infiniband/sw/rxe/rxe_qp.c 		init->cap.max_recv_wr		= qp->rq.max_wr;
rq                400 drivers/infiniband/sw/rxe/rxe_qp.c 		init->cap.max_recv_sge		= qp->rq.max_sge;
rq                758 drivers/infiniband/sw/rxe/rxe_qp.c 		attr->cap.max_recv_wr		= qp->rq.max_wr;
rq                759 drivers/infiniband/sw/rxe/rxe_qp.c 		attr->cap.max_recv_sge		= qp->rq.max_sge;
rq                817 drivers/infiniband/sw/rxe/rxe_qp.c 	if (qp->rq.queue)
rq                818 drivers/infiniband/sw/rxe/rxe_qp.c 		rxe_queue_cleanup(qp->rq.queue);
rq                319 drivers/infiniband/sw/rxe/rxe_resp.c 	struct rxe_queue *q = srq->rq.queue;
rq                326 drivers/infiniband/sw/rxe/rxe_resp.c 	spin_lock_bh(&srq->rq.consumer_lock);
rq                330 drivers/infiniband/sw/rxe/rxe_resp.c 		spin_unlock_bh(&srq->rq.consumer_lock);
rq                346 drivers/infiniband/sw/rxe/rxe_resp.c 	spin_unlock_bh(&srq->rq.consumer_lock);
rq                350 drivers/infiniband/sw/rxe/rxe_resp.c 	spin_unlock_bh(&srq->rq.consumer_lock);
rq                368 drivers/infiniband/sw/rxe/rxe_resp.c 			qp->resp.wqe = queue_head(qp->rq.queue);
rq                395 drivers/infiniband/sw/rxe/rxe_resp.c 		qp->resp.wqe = queue_head(qp->rq.queue);
rq                940 drivers/infiniband/sw/rxe/rxe_resp.c 		advance_consumer(qp->rq.queue);
rq               1216 drivers/infiniband/sw/rxe/rxe_resp.c 	while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
rq               1217 drivers/infiniband/sw/rxe/rxe_resp.c 		advance_consumer(qp->rq.queue);
rq                 76 drivers/infiniband/sw/rxe/rxe_srq.c 		if (srq && (attr->srq_limit > srq->rq.queue->buf->index_mask)) {
rq                 79 drivers/infiniband/sw/rxe/rxe_srq.c 				 srq->rq.queue->buf->index_mask);
rq                113 drivers/infiniband/sw/rxe/rxe_srq.c 	srq->rq.max_wr		= init->attr.max_wr;
rq                114 drivers/infiniband/sw/rxe/rxe_srq.c 	srq->rq.max_sge		= init->attr.max_sge;
rq                116 drivers/infiniband/sw/rxe/rxe_srq.c 	srq_wqe_size		= rcv_wqe_size(srq->rq.max_sge);
rq                118 drivers/infiniband/sw/rxe/rxe_srq.c 	spin_lock_init(&srq->rq.producer_lock);
rq                119 drivers/infiniband/sw/rxe/rxe_srq.c 	spin_lock_init(&srq->rq.consumer_lock);
rq                121 drivers/infiniband/sw/rxe/rxe_srq.c 	q = rxe_queue_init(rxe, &srq->rq.max_wr,
rq                128 drivers/infiniband/sw/rxe/rxe_srq.c 	srq->rq.queue = q;
rq                154 drivers/infiniband/sw/rxe/rxe_srq.c 	struct rxe_queue *q = srq->rq.queue;
rq                165 drivers/infiniband/sw/rxe/rxe_srq.c 				       rcv_wqe_size(srq->rq.max_sge), udata, mi,
rq                166 drivers/infiniband/sw/rxe/rxe_srq.c 				       &srq->rq.producer_lock,
rq                167 drivers/infiniband/sw/rxe/rxe_srq.c 				       &srq->rq.consumer_lock);
rq                179 drivers/infiniband/sw/rxe/rxe_srq.c 	srq->rq.queue = NULL;
rq                245 drivers/infiniband/sw/rxe/rxe_verbs.c static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
rq                253 drivers/infiniband/sw/rxe/rxe_verbs.c 	if (unlikely(queue_full(rq->queue))) {
rq                258 drivers/infiniband/sw/rxe/rxe_verbs.c 	if (unlikely(num_sge > rq->max_sge)) {
rq                267 drivers/infiniband/sw/rxe/rxe_verbs.c 	recv_wqe = producer_addr(rq->queue);
rq                285 drivers/infiniband/sw/rxe/rxe_verbs.c 	advance_producer(rq->queue);
rq                370 drivers/infiniband/sw/rxe/rxe_verbs.c 	attr->max_wr = srq->rq.queue->buf->index_mask;
rq                371 drivers/infiniband/sw/rxe/rxe_verbs.c 	attr->max_sge = srq->rq.max_sge;
rq                380 drivers/infiniband/sw/rxe/rxe_verbs.c 	if (srq->rq.queue)
rq                381 drivers/infiniband/sw/rxe/rxe_verbs.c 		rxe_queue_cleanup(srq->rq.queue);
rq                394 drivers/infiniband/sw/rxe/rxe_verbs.c 	spin_lock_irqsave(&srq->rq.producer_lock, flags);
rq                397 drivers/infiniband/sw/rxe/rxe_verbs.c 		err = post_one_recv(&srq->rq, wr);
rq                403 drivers/infiniband/sw/rxe/rxe_verbs.c 	spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
rq                746 drivers/infiniband/sw/rxe/rxe_verbs.c 	struct rxe_rq *rq = &qp->rq;
rq                761 drivers/infiniband/sw/rxe/rxe_verbs.c 	spin_lock_irqsave(&rq->producer_lock, flags);
rq                764 drivers/infiniband/sw/rxe/rxe_verbs.c 		err = post_one_recv(rq, wr);
rq                772 drivers/infiniband/sw/rxe/rxe_verbs.c 	spin_unlock_irqrestore(&rq->producer_lock, flags);
rq                126 drivers/infiniband/sw/rxe/rxe_verbs.h 	struct rxe_rq		rq;
rq                251 drivers/infiniband/sw/rxe/rxe_verbs.h 	struct rxe_rq		rq;
rq                181 drivers/input/misc/xen-kbdfront.c static irqreturn_t input_handler(int rq, void *dev_id)
rq                895 drivers/isdn/hardware/mISDN/avmfritz.c open_bchannel(struct fritzcard *fc, struct channel_req *rq)
rq                899 drivers/isdn/hardware/mISDN/avmfritz.c 	if (rq->adr.channel == 0 || rq->adr.channel > 2)
rq                901 drivers/isdn/hardware/mISDN/avmfritz.c 	if (rq->protocol == ISDN_P_NONE)
rq                903 drivers/isdn/hardware/mISDN/avmfritz.c 	bch = &fc->bch[rq->adr.channel - 1];
rq                906 drivers/isdn/hardware/mISDN/avmfritz.c 	bch->ch.protocol = rq->protocol;
rq                907 drivers/isdn/hardware/mISDN/avmfritz.c 	rq->ch = &bch->ch;
rq                920 drivers/isdn/hardware/mISDN/avmfritz.c 	struct channel_req	*rq;
rq                926 drivers/isdn/hardware/mISDN/avmfritz.c 		rq = arg;
rq                927 drivers/isdn/hardware/mISDN/avmfritz.c 		if (rq->protocol == ISDN_P_TE_S0)
rq                928 drivers/isdn/hardware/mISDN/avmfritz.c 			err = fc->isac.open(&fc->isac, rq);
rq                930 drivers/isdn/hardware/mISDN/avmfritz.c 			err = open_bchannel(fc, rq);
rq               4042 drivers/isdn/hardware/mISDN/hfcmulti.c 	      struct channel_req *rq)
rq               4050 drivers/isdn/hardware/mISDN/hfcmulti.c 	if (rq->protocol == ISDN_P_NONE)
rq               4053 drivers/isdn/hardware/mISDN/hfcmulti.c 	    (dch->dev.D.protocol != rq->protocol)) {
rq               4056 drivers/isdn/hardware/mISDN/hfcmulti.c 			       __func__, dch->dev.D.protocol, rq->protocol);
rq               4059 drivers/isdn/hardware/mISDN/hfcmulti.c 	    (rq->protocol != ISDN_P_TE_S0))
rq               4061 drivers/isdn/hardware/mISDN/hfcmulti.c 	if (dch->dev.D.protocol != rq->protocol) {
rq               4062 drivers/isdn/hardware/mISDN/hfcmulti.c 		if (rq->protocol == ISDN_P_TE_S0) {
rq               4067 drivers/isdn/hardware/mISDN/hfcmulti.c 		dch->dev.D.protocol = rq->protocol;
rq               4075 drivers/isdn/hardware/mISDN/hfcmulti.c 	rq->ch = &dch->dev.D;
rq               4083 drivers/isdn/hardware/mISDN/hfcmulti.c 	      struct channel_req *rq)
rq               4088 drivers/isdn/hardware/mISDN/hfcmulti.c 	if (!test_channelmap(rq->adr.channel, dch->dev.channelmap))
rq               4090 drivers/isdn/hardware/mISDN/hfcmulti.c 	if (rq->protocol == ISDN_P_NONE)
rq               4093 drivers/isdn/hardware/mISDN/hfcmulti.c 		ch = rq->adr.channel;
rq               4095 drivers/isdn/hardware/mISDN/hfcmulti.c 		ch = (rq->adr.channel - 1) + (dch->slot - 2);
rq               4104 drivers/isdn/hardware/mISDN/hfcmulti.c 	bch->ch.protocol = rq->protocol;
rq               4106 drivers/isdn/hardware/mISDN/hfcmulti.c 	rq->ch = &bch->ch;
rq               4172 drivers/isdn/hardware/mISDN/hfcmulti.c 	struct channel_req	*rq;
rq               4181 drivers/isdn/hardware/mISDN/hfcmulti.c 		rq = arg;
rq               4182 drivers/isdn/hardware/mISDN/hfcmulti.c 		switch (rq->protocol) {
rq               4189 drivers/isdn/hardware/mISDN/hfcmulti.c 			err = open_dchannel(hc, dch, rq); /* locked there */
rq               4197 drivers/isdn/hardware/mISDN/hfcmulti.c 			err = open_dchannel(hc, dch, rq); /* locked there */
rq               4201 drivers/isdn/hardware/mISDN/hfcmulti.c 			err = open_bchannel(hc, dch, rq);
rq               1874 drivers/isdn/hardware/mISDN/hfcpci.c 	      struct channel_req *rq)
rq               1881 drivers/isdn/hardware/mISDN/hfcpci.c 	if (rq->protocol == ISDN_P_NONE)
rq               1883 drivers/isdn/hardware/mISDN/hfcpci.c 	if (rq->adr.channel == 1) {
rq               1888 drivers/isdn/hardware/mISDN/hfcpci.c 		if (rq->protocol == ISDN_P_TE_S0) {
rq               1893 drivers/isdn/hardware/mISDN/hfcpci.c 		hc->hw.protocol = rq->protocol;
rq               1894 drivers/isdn/hardware/mISDN/hfcpci.c 		ch->protocol = rq->protocol;
rq               1899 drivers/isdn/hardware/mISDN/hfcpci.c 		if (rq->protocol != ch->protocol) {
rq               1902 drivers/isdn/hardware/mISDN/hfcpci.c 			if (rq->protocol == ISDN_P_TE_S0) {
rq               1907 drivers/isdn/hardware/mISDN/hfcpci.c 			hc->hw.protocol = rq->protocol;
rq               1908 drivers/isdn/hardware/mISDN/hfcpci.c 			ch->protocol = rq->protocol;
rq               1918 drivers/isdn/hardware/mISDN/hfcpci.c 	rq->ch = ch;
rq               1925 drivers/isdn/hardware/mISDN/hfcpci.c open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
rq               1929 drivers/isdn/hardware/mISDN/hfcpci.c 	if (rq->adr.channel == 0 || rq->adr.channel > 2)
rq               1931 drivers/isdn/hardware/mISDN/hfcpci.c 	if (rq->protocol == ISDN_P_NONE)
rq               1933 drivers/isdn/hardware/mISDN/hfcpci.c 	bch = &hc->bch[rq->adr.channel - 1];
rq               1936 drivers/isdn/hardware/mISDN/hfcpci.c 	bch->ch.protocol = rq->protocol;
rq               1937 drivers/isdn/hardware/mISDN/hfcpci.c 	rq->ch = &bch->ch; /* TODO: E-channel */
rq               1952 drivers/isdn/hardware/mISDN/hfcpci.c 	struct channel_req	*rq;
rq               1960 drivers/isdn/hardware/mISDN/hfcpci.c 		rq = arg;
rq               1961 drivers/isdn/hardware/mISDN/hfcpci.c 		if ((rq->protocol == ISDN_P_TE_S0) ||
rq               1962 drivers/isdn/hardware/mISDN/hfcpci.c 		    (rq->protocol == ISDN_P_NT_S0))
rq               1963 drivers/isdn/hardware/mISDN/hfcpci.c 			err = open_dchannel(hc, ch, rq);
rq               1965 drivers/isdn/hardware/mISDN/hfcpci.c 			err = open_bchannel(hc, rq);
rq                413 drivers/isdn/hardware/mISDN/hfcsusb.c 	      struct channel_req *rq)
rq                419 drivers/isdn/hardware/mISDN/hfcsusb.c 		       hw->name, __func__, hw->dch.dev.id, rq->adr.channel,
rq                421 drivers/isdn/hardware/mISDN/hfcsusb.c 	if (rq->protocol == ISDN_P_NONE)
rq                429 drivers/isdn/hardware/mISDN/hfcsusb.c 	if (rq->adr.channel == 1) {
rq                440 drivers/isdn/hardware/mISDN/hfcsusb.c 		hw->protocol = rq->protocol;
rq                441 drivers/isdn/hardware/mISDN/hfcsusb.c 		if (rq->protocol == ISDN_P_TE_S0) {
rq                447 drivers/isdn/hardware/mISDN/hfcsusb.c 		ch->protocol = rq->protocol;
rq                450 drivers/isdn/hardware/mISDN/hfcsusb.c 		if (rq->protocol != ch->protocol)
rq                458 drivers/isdn/hardware/mISDN/hfcsusb.c 	rq->ch = ch;
rq                466 drivers/isdn/hardware/mISDN/hfcsusb.c open_bchannel(struct hfcsusb *hw, struct channel_req *rq)
rq                470 drivers/isdn/hardware/mISDN/hfcsusb.c 	if (rq->adr.channel == 0 || rq->adr.channel > 2)
rq                472 drivers/isdn/hardware/mISDN/hfcsusb.c 	if (rq->protocol == ISDN_P_NONE)
rq                477 drivers/isdn/hardware/mISDN/hfcsusb.c 		       hw->name, __func__, rq->adr.channel);
rq                479 drivers/isdn/hardware/mISDN/hfcsusb.c 	bch = &hw->bch[rq->adr.channel - 1];
rq                482 drivers/isdn/hardware/mISDN/hfcsusb.c 	bch->ch.protocol = rq->protocol;
rq                483 drivers/isdn/hardware/mISDN/hfcsusb.c 	rq->ch = &bch->ch;
rq                523 drivers/isdn/hardware/mISDN/hfcsusb.c 	struct channel_req	*rq;
rq                531 drivers/isdn/hardware/mISDN/hfcsusb.c 		rq = arg;
rq                532 drivers/isdn/hardware/mISDN/hfcsusb.c 		if ((rq->protocol == ISDN_P_TE_S0) ||
rq                533 drivers/isdn/hardware/mISDN/hfcsusb.c 		    (rq->protocol == ISDN_P_NT_S0))
rq                534 drivers/isdn/hardware/mISDN/hfcsusb.c 			err = open_dchannel(hw, ch, rq);
rq                536 drivers/isdn/hardware/mISDN/hfcsusb.c 			err = open_bchannel(hw, rq);
rq                743 drivers/isdn/hardware/mISDN/mISDNipac.c open_dchannel_caller(struct isac_hw *isac, struct channel_req *rq, void *caller)
rq                747 drivers/isdn/hardware/mISDN/mISDNipac.c 	if (rq->protocol != ISDN_P_TE_S0)
rq                749 drivers/isdn/hardware/mISDN/mISDNipac.c 	if (rq->adr.channel == 1)
rq                752 drivers/isdn/hardware/mISDN/mISDNipac.c 	rq->ch = &isac->dch.dev.D;
rq                753 drivers/isdn/hardware/mISDN/mISDNipac.c 	rq->ch->protocol = rq->protocol;
rq                755 drivers/isdn/hardware/mISDN/mISDNipac.c 		_queue_data(rq->ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
rq                761 drivers/isdn/hardware/mISDN/mISDNipac.c open_dchannel(struct isac_hw *isac, struct channel_req *rq)
rq                763 drivers/isdn/hardware/mISDN/mISDNipac.c 	return open_dchannel_caller(isac, rq, __builtin_return_address(0));
rq               1479 drivers/isdn/hardware/mISDN/mISDNipac.c open_bchannel(struct ipac_hw *ipac, struct channel_req *rq)
rq               1483 drivers/isdn/hardware/mISDN/mISDNipac.c 	if (rq->adr.channel == 0 || rq->adr.channel > 2)
rq               1485 drivers/isdn/hardware/mISDN/mISDNipac.c 	if (rq->protocol == ISDN_P_NONE)
rq               1487 drivers/isdn/hardware/mISDN/mISDNipac.c 	bch = &ipac->hscx[rq->adr.channel - 1].bch;
rq               1491 drivers/isdn/hardware/mISDN/mISDNipac.c 	bch->ch.protocol = rq->protocol;
rq               1492 drivers/isdn/hardware/mISDN/mISDNipac.c 	rq->ch = &bch->ch;
rq               1531 drivers/isdn/hardware/mISDN/mISDNipac.c 	struct channel_req *rq;
rq               1537 drivers/isdn/hardware/mISDN/mISDNipac.c 		rq = arg;
rq               1538 drivers/isdn/hardware/mISDN/mISDNipac.c 		if (rq->protocol == ISDN_P_TE_S0)
rq               1539 drivers/isdn/hardware/mISDN/mISDNipac.c 			err = open_dchannel_caller(isac, rq, __builtin_return_address(0));
rq               1541 drivers/isdn/hardware/mISDN/mISDNipac.c 			err = open_bchannel(ipac, rq);
rq               1636 drivers/isdn/hardware/mISDN/mISDNisar.c isar_open(struct isar_hw *isar, struct channel_req *rq)
rq               1640 drivers/isdn/hardware/mISDN/mISDNisar.c 	if (rq->adr.channel == 0 || rq->adr.channel > 2)
rq               1642 drivers/isdn/hardware/mISDN/mISDNisar.c 	if (rq->protocol == ISDN_P_NONE)
rq               1644 drivers/isdn/hardware/mISDN/mISDNisar.c 	bch = &isar->ch[rq->adr.channel - 1].bch;
rq               1647 drivers/isdn/hardware/mISDN/mISDNisar.c 	bch->ch.protocol = rq->protocol;
rq               1648 drivers/isdn/hardware/mISDN/mISDNisar.c 	rq->ch = &bch->ch;
rq                849 drivers/isdn/hardware/mISDN/netjet.c open_bchannel(struct tiger_hw *card, struct channel_req *rq)
rq                853 drivers/isdn/hardware/mISDN/netjet.c 	if (rq->adr.channel == 0 || rq->adr.channel > 2)
rq                855 drivers/isdn/hardware/mISDN/netjet.c 	if (rq->protocol == ISDN_P_NONE)
rq                857 drivers/isdn/hardware/mISDN/netjet.c 	bch = &card->bc[rq->adr.channel - 1].bch;
rq                861 drivers/isdn/hardware/mISDN/netjet.c 	bch->ch.protocol = rq->protocol;
rq                862 drivers/isdn/hardware/mISDN/netjet.c 	rq->ch = &bch->ch;
rq                875 drivers/isdn/hardware/mISDN/netjet.c 	struct channel_req	*rq;
rq                881 drivers/isdn/hardware/mISDN/netjet.c 		rq = arg;
rq                882 drivers/isdn/hardware/mISDN/netjet.c 		if (rq->protocol == ISDN_P_TE_S0)
rq                883 drivers/isdn/hardware/mISDN/netjet.c 			err = card->isac.open(&card->isac, rq);
rq                885 drivers/isdn/hardware/mISDN/netjet.c 			err = open_bchannel(card, rq);
rq                241 drivers/isdn/hardware/mISDN/speedfax.c 	struct channel_req	*rq;
rq                247 drivers/isdn/hardware/mISDN/speedfax.c 		rq = arg;
rq                248 drivers/isdn/hardware/mISDN/speedfax.c 		if (rq->protocol == ISDN_P_TE_S0)
rq                249 drivers/isdn/hardware/mISDN/speedfax.c 			err = sf->isac.open(&sf->isac, rq);
rq                251 drivers/isdn/hardware/mISDN/speedfax.c 			err = sf->isar.open(&sf->isar, rq);
rq                987 drivers/isdn/hardware/mISDN/w6692.c open_bchannel(struct w6692_hw *card, struct channel_req *rq)
rq                991 drivers/isdn/hardware/mISDN/w6692.c 	if (rq->adr.channel == 0 || rq->adr.channel > 2)
rq                993 drivers/isdn/hardware/mISDN/w6692.c 	if (rq->protocol == ISDN_P_NONE)
rq                995 drivers/isdn/hardware/mISDN/w6692.c 	bch = &card->bc[rq->adr.channel - 1].bch;
rq                998 drivers/isdn/hardware/mISDN/w6692.c 	bch->ch.protocol = rq->protocol;
rq                999 drivers/isdn/hardware/mISDN/w6692.c 	rq->ch = &bch->ch;
rq               1158 drivers/isdn/hardware/mISDN/w6692.c open_dchannel(struct w6692_hw *card, struct channel_req *rq, void *caller)
rq               1162 drivers/isdn/hardware/mISDN/w6692.c 	if (rq->protocol != ISDN_P_TE_S0)
rq               1164 drivers/isdn/hardware/mISDN/w6692.c 	if (rq->adr.channel == 1)
rq               1167 drivers/isdn/hardware/mISDN/w6692.c 	rq->ch = &card->dch.dev.D;
rq               1168 drivers/isdn/hardware/mISDN/w6692.c 	rq->ch->protocol = rq->protocol;
rq               1170 drivers/isdn/hardware/mISDN/w6692.c 		_queue_data(rq->ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
rq               1181 drivers/isdn/hardware/mISDN/w6692.c 	struct channel_req *rq;
rq               1187 drivers/isdn/hardware/mISDN/w6692.c 		rq = arg;
rq               1188 drivers/isdn/hardware/mISDN/w6692.c 		if (rq->protocol == ISDN_P_TE_S0)
rq               1189 drivers/isdn/hardware/mISDN/w6692.c 			err = open_dchannel(card, rq, __builtin_return_address(0));
rq               1191 drivers/isdn/hardware/mISDN/w6692.c 			err = open_bchannel(card, rq);
rq                976 drivers/isdn/mISDN/l1oip_core.c open_dchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
rq                981 drivers/isdn/mISDN/l1oip_core.c 	if (rq->protocol == ISDN_P_NONE)
rq                984 drivers/isdn/mISDN/l1oip_core.c 	    (dch->dev.D.protocol != rq->protocol)) {
rq                987 drivers/isdn/mISDN/l1oip_core.c 			       __func__, dch->dev.D.protocol, rq->protocol);
rq                989 drivers/isdn/mISDN/l1oip_core.c 	if (dch->dev.D.protocol != rq->protocol)
rq                990 drivers/isdn/mISDN/l1oip_core.c 		dch->dev.D.protocol = rq->protocol;
rq                996 drivers/isdn/mISDN/l1oip_core.c 	rq->ch = &dch->dev.D;
rq               1003 drivers/isdn/mISDN/l1oip_core.c open_bchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
rq               1008 drivers/isdn/mISDN/l1oip_core.c 	if (!test_channelmap(rq->adr.channel, dch->dev.channelmap))
rq               1010 drivers/isdn/mISDN/l1oip_core.c 	if (rq->protocol == ISDN_P_NONE)
rq               1012 drivers/isdn/mISDN/l1oip_core.c 	ch = rq->adr.channel; /* BRI: 1=B1 2=B2  PRI: 1..15,17.. */
rq               1021 drivers/isdn/mISDN/l1oip_core.c 	bch->ch.protocol = rq->protocol;
rq               1022 drivers/isdn/mISDN/l1oip_core.c 	rq->ch = &bch->ch;
rq               1034 drivers/isdn/mISDN/l1oip_core.c 	struct channel_req	*rq;
rq               1042 drivers/isdn/mISDN/l1oip_core.c 		rq = arg;
rq               1043 drivers/isdn/mISDN/l1oip_core.c 		switch (rq->protocol) {
rq               1050 drivers/isdn/mISDN/l1oip_core.c 			err = open_dchannel(hc, dch, rq);
rq               1058 drivers/isdn/mISDN/l1oip_core.c 			err = open_dchannel(hc, dch, rq);
rq               1061 drivers/isdn/mISDN/l1oip_core.c 			err = open_bchannel(hc, dch, rq);
rq               2112 drivers/isdn/mISDN/layer2.c 	struct channel_req	rq;
rq               2145 drivers/isdn/mISDN/layer2.c 			rq.protocol = ISDN_P_NT_E1;
rq               2147 drivers/isdn/mISDN/layer2.c 			rq.protocol = ISDN_P_NT_S0;
rq               2148 drivers/isdn/mISDN/layer2.c 		rq.adr.channel = 0;
rq               2149 drivers/isdn/mISDN/layer2.c 		l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
rq               2170 drivers/isdn/mISDN/layer2.c 			rq.protocol = ISDN_P_TE_E1;
rq               2172 drivers/isdn/mISDN/layer2.c 			rq.protocol = ISDN_P_TE_S0;
rq               2173 drivers/isdn/mISDN/layer2.c 		rq.adr.channel = 0;
rq               2174 drivers/isdn/mISDN/layer2.c 		l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
rq                421 drivers/isdn/mISDN/stack.c 	struct channel_req	rq;
rq                437 drivers/isdn/mISDN/stack.c 		rq.protocol = protocol;
rq                438 drivers/isdn/mISDN/stack.c 		rq.adr.channel = adr->channel;
rq                439 drivers/isdn/mISDN/stack.c 		err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
rq                458 drivers/isdn/mISDN/stack.c 	struct channel_req	rq, rq2;
rq                470 drivers/isdn/mISDN/stack.c 		rq.protocol = protocol;
rq                471 drivers/isdn/mISDN/stack.c 		rq.adr = *adr;
rq                472 drivers/isdn/mISDN/stack.c 		err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
rq                475 drivers/isdn/mISDN/stack.c 		ch->recv = rq.ch->send;
rq                476 drivers/isdn/mISDN/stack.c 		ch->peer = rq.ch;
rq                477 drivers/isdn/mISDN/stack.c 		rq.ch->recv = ch->send;
rq                478 drivers/isdn/mISDN/stack.c 		rq.ch->peer = ch;
rq                479 drivers/isdn/mISDN/stack.c 		rq.ch->st = dev->D.st;
rq                493 drivers/isdn/mISDN/stack.c 		rq.protocol = rq2.protocol;
rq                494 drivers/isdn/mISDN/stack.c 		rq.adr = *adr;
rq                495 drivers/isdn/mISDN/stack.c 		err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
rq                500 drivers/isdn/mISDN/stack.c 		rq2.ch->recv = rq.ch->send;
rq                501 drivers/isdn/mISDN/stack.c 		rq2.ch->peer = rq.ch;
rq                502 drivers/isdn/mISDN/stack.c 		rq.ch->recv = rq2.ch->send;
rq                503 drivers/isdn/mISDN/stack.c 		rq.ch->peer = rq2.ch;
rq                504 drivers/isdn/mISDN/stack.c 		rq.ch->st = dev->D.st;
rq                507 drivers/isdn/mISDN/stack.c 	ch->nr = rq.ch->nr;
rq                515 drivers/isdn/mISDN/stack.c 	struct channel_req	rq;
rq                523 drivers/isdn/mISDN/stack.c 	rq.protocol = ISDN_P_TE_S0;
rq                525 drivers/isdn/mISDN/stack.c 		rq.protocol = ISDN_P_TE_E1;
rq                528 drivers/isdn/mISDN/stack.c 		rq.protocol = ISDN_P_NT_S0;
rq                530 drivers/isdn/mISDN/stack.c 			rq.protocol = ISDN_P_NT_E1;
rq                536 drivers/isdn/mISDN/stack.c 		rq.adr.channel = 0;
rq                537 drivers/isdn/mISDN/stack.c 		err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
rq                541 drivers/isdn/mISDN/stack.c 		rq.protocol = protocol;
rq                542 drivers/isdn/mISDN/stack.c 		rq.adr = *adr;
rq                543 drivers/isdn/mISDN/stack.c 		rq.ch = ch;
rq                544 drivers/isdn/mISDN/stack.c 		err = dev->teimgr->ctrl(dev->teimgr, OPEN_CHANNEL, &rq);
rq                547 drivers/isdn/mISDN/stack.c 			if ((protocol == ISDN_P_LAPD_NT) && !rq.ch)
rq                549 drivers/isdn/mISDN/stack.c 			add_layer2(rq.ch, dev->D.st);
rq                550 drivers/isdn/mISDN/stack.c 			rq.ch->recv = mISDN_queue_message;
rq                551 drivers/isdn/mISDN/stack.c 			rq.ch->peer = &dev->D.st->own;
rq                552 drivers/isdn/mISDN/stack.c 			rq.ch->ctrl(rq.ch, OPEN_CHANNEL, NULL); /* can't fail */
rq                788 drivers/isdn/mISDN/tei.c 	struct channel_req	rq;
rq                797 drivers/isdn/mISDN/tei.c 		rq.protocol = ISDN_P_NT_E1;
rq                799 drivers/isdn/mISDN/tei.c 		rq.protocol = ISDN_P_NT_S0;
rq                836 drivers/isdn/mISDN/tei.c 		rq.adr.dev = mgr->ch.st->dev->id;
rq                837 drivers/isdn/mISDN/tei.c 		id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL, &rq);
rq                348 drivers/md/dm-integrity.c static void dm_integrity_prepare(struct request *rq)
rq                352 drivers/md/dm-integrity.c static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
rq                483 drivers/md/dm-mpath.c static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
rq                488 drivers/md/dm-mpath.c 	size_t nr_bytes = blk_rq_bytes(rq);
rq                516 drivers/md/dm-mpath.c 	clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE,
rq                126 drivers/md/dm-rq.c static struct dm_rq_target_io *tio_from_request(struct request *rq)
rq                128 drivers/md/dm-rq.c 	return blk_mq_rq_to_pdu(rq);
rq                168 drivers/md/dm-rq.c 	struct request *rq = tio->orig;
rq                173 drivers/md/dm-rq.c 	rq_end_stats(md, rq);
rq                174 drivers/md/dm-rq.c 	blk_mq_end_request(rq, error);
rq                189 drivers/md/dm-rq.c static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
rq                191 drivers/md/dm-rq.c 	blk_mq_requeue_request(rq, false);
rq                192 drivers/md/dm-rq.c 	__dm_mq_kick_requeue_list(rq->q, msecs);
rq                198 drivers/md/dm-rq.c 	struct request *rq = tio->orig;
rq                201 drivers/md/dm-rq.c 	rq_end_stats(md, rq);
rq                207 drivers/md/dm-rq.c 	dm_mq_delay_requeue_request(rq, delay_ms);
rq                261 drivers/md/dm-rq.c static void dm_softirq_done(struct request *rq)
rq                264 drivers/md/dm-rq.c 	struct dm_rq_target_io *tio = tio_from_request(rq);
rq                270 drivers/md/dm-rq.c 		rq_end_stats(md, rq);
rq                271 drivers/md/dm-rq.c 		blk_mq_end_request(rq, tio->error);
rq                276 drivers/md/dm-rq.c 	if (rq->rq_flags & RQF_FAILED)
rq                286 drivers/md/dm-rq.c static void dm_complete_request(struct request *rq, blk_status_t error)
rq                288 drivers/md/dm-rq.c 	struct dm_rq_target_io *tio = tio_from_request(rq);
rq                291 drivers/md/dm-rq.c 	blk_mq_complete_request(rq);
rq                300 drivers/md/dm-rq.c static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
rq                302 drivers/md/dm-rq.c 	rq->rq_flags |= RQF_FAILED;
rq                303 drivers/md/dm-rq.c 	dm_complete_request(rq, error);
rq                313 drivers/md/dm-rq.c static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
rq                324 drivers/md/dm-rq.c 		dm_complete_request(rq, r);
rq                342 drivers/md/dm-rq.c static int setup_clone(struct request *clone, struct request *rq,
rq                347 drivers/md/dm-rq.c 	r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
rq                360 drivers/md/dm-rq.c static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
rq                366 drivers/md/dm-rq.c 	tio->orig = rq;
rq                389 drivers/md/dm-rq.c 	struct request *rq = tio->orig;
rq                393 drivers/md/dm-rq.c 	r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
rq                399 drivers/md/dm-rq.c 		if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
rq                407 drivers/md/dm-rq.c 				     blk_rq_pos(rq));
rq                408 drivers/md/dm-rq.c 		ret = dm_dispatch_clone_request(clone, rq);
rq                426 drivers/md/dm-rq.c 		dm_kill_unmapped_request(rq, BLK_STS_IOERR);
rq                471 drivers/md/dm-rq.c static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
rq                475 drivers/md/dm-rq.c 	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
rq                494 drivers/md/dm-rq.c 	struct request *rq = bd->rq;
rq                495 drivers/md/dm-rq.c 	struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
rq                510 drivers/md/dm-rq.c 	dm_start_request(md, rq);
rq                513 drivers/md/dm-rq.c 	init_tio(tio, rq, md);
rq                523 drivers/md/dm-rq.c 		rq_end_stats(md, rq);
rq                132 drivers/md/dm-target.c static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
rq               2002 drivers/memstick/core/ms_block.c 	struct request *req = bd->rq;
rq                840 drivers/memstick/core/mspro_block.c 		blk_mq_start_request(bd->rq);
rq                844 drivers/memstick/core/mspro_block.c 	msb->block_req = bd->rq;
rq                845 drivers/memstick/core/mspro_block.c 	blk_mq_start_request(bd->rq);
rq                779 drivers/mmc/core/mmc_test.c static void mmc_test_req_reset(struct mmc_test_req *rq)
rq                781 drivers/mmc/core/mmc_test.c 	memset(rq, 0, sizeof(struct mmc_test_req));
rq                783 drivers/mmc/core/mmc_test.c 	rq->mrq.cmd = &rq->cmd;
rq                784 drivers/mmc/core/mmc_test.c 	rq->mrq.data = &rq->data;
rq                785 drivers/mmc/core/mmc_test.c 	rq->mrq.stop = &rq->stop;
rq                790 drivers/mmc/core/mmc_test.c 	struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL);
rq                792 drivers/mmc/core/mmc_test.c 	if (rq)
rq                793 drivers/mmc/core/mmc_test.c 		mmc_test_req_reset(rq);
rq                795 drivers/mmc/core/mmc_test.c 	return rq;
rq               2351 drivers/mmc/core/mmc_test.c 	struct mmc_test_req *rq = mmc_test_req_alloc();
rq               2361 drivers/mmc/core/mmc_test.c 	if (!rq)
rq               2364 drivers/mmc/core/mmc_test.c 	mrq = &rq->mrq;
rq               2366 drivers/mmc/core/mmc_test.c 		mrq->sbc = &rq->sbc;
rq               2393 drivers/mmc/core/mmc_test.c 		cmd_ret = mmc_test_send_status(test, &rq->status);
rq               2397 drivers/mmc/core/mmc_test.c 		status = rq->status.resp[0];
rq               2456 drivers/mmc/core/mmc_test.c 	kfree(rq);
rq                243 drivers/mmc/core/queue.c 	struct request *req = bd->rq;
rq                 25 drivers/mmc/core/queue.h static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
rq                 27 drivers/mmc/core/queue.h 	return blk_mq_rq_to_pdu(rq);
rq                245 drivers/mmc/host/omap.c 	struct mmc_request *rq;
rq                250 drivers/mmc/host/omap.c 	rq = next_slot->mrq;
rq                252 drivers/mmc/host/omap.c 	mmc_omap_start_request(host, rq);
rq                 34 drivers/mtd/mtd_blkdevs.c 	blk_cleanup_queue(dev->rq);
rq                128 drivers/mtd/mtd_blkdevs.c 	struct request *rq;
rq                130 drivers/mtd/mtd_blkdevs.c 	rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
rq                131 drivers/mtd/mtd_blkdevs.c 	if (rq) {
rq                132 drivers/mtd/mtd_blkdevs.c 		list_del_init(&rq->queuelist);
rq                133 drivers/mtd/mtd_blkdevs.c 		blk_mq_start_request(rq);
rq                134 drivers/mtd/mtd_blkdevs.c 		return rq;
rq                192 drivers/mtd/mtd_blkdevs.c 		blk_mq_start_request(bd->rq);
rq                197 drivers/mtd/mtd_blkdevs.c 	list_add_tail(&bd->rq->queuelist, &dev->rq_list);
rq                433 drivers/mtd/mtd_blkdevs.c 	new->rq = blk_mq_init_sq_queue(new->tag_set, &mtd_mq_ops, 2,
rq                435 drivers/mtd/mtd_blkdevs.c 	if (IS_ERR(new->rq)) {
rq                436 drivers/mtd/mtd_blkdevs.c 		ret = PTR_ERR(new->rq);
rq                437 drivers/mtd/mtd_blkdevs.c 		new->rq = NULL;
rq                442 drivers/mtd/mtd_blkdevs.c 		blk_queue_write_cache(new->rq, true, false);
rq                444 drivers/mtd/mtd_blkdevs.c 	new->rq->queuedata = new;
rq                445 drivers/mtd/mtd_blkdevs.c 	blk_queue_logical_block_size(new->rq, tr->blksize);
rq                447 drivers/mtd/mtd_blkdevs.c 	blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
rq                448 drivers/mtd/mtd_blkdevs.c 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
rq                451 drivers/mtd/mtd_blkdevs.c 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
rq                452 drivers/mtd/mtd_blkdevs.c 		blk_queue_max_discard_sectors(new->rq, UINT_MAX);
rq                455 drivers/mtd/mtd_blkdevs.c 	gd->queue = new->rq;
rq                496 drivers/mtd/mtd_blkdevs.c 	old->rq->queuedata = NULL;
rq                500 drivers/mtd/mtd_blkdevs.c 	blk_mq_freeze_queue(old->rq);
rq                501 drivers/mtd/mtd_blkdevs.c 	blk_mq_quiesce_queue(old->rq);
rq                502 drivers/mtd/mtd_blkdevs.c 	blk_mq_unquiesce_queue(old->rq);
rq                503 drivers/mtd/mtd_blkdevs.c 	blk_mq_unfreeze_queue(old->rq);
rq                 83 drivers/mtd/ubi/block.c 	struct request_queue *rq;
rq                316 drivers/mtd/ubi/block.c 	struct request *req = bd->rq;
rq                433 drivers/mtd/ubi/block.c 	dev->rq = blk_mq_init_queue(&dev->tag_set);
rq                434 drivers/mtd/ubi/block.c 	if (IS_ERR(dev->rq)) {
rq                436 drivers/mtd/ubi/block.c 		ret = PTR_ERR(dev->rq);
rq                439 drivers/mtd/ubi/block.c 	blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
rq                441 drivers/mtd/ubi/block.c 	dev->rq->queuedata = dev;
rq                442 drivers/mtd/ubi/block.c 	dev->gd->queue = dev->rq;
rq                464 drivers/mtd/ubi/block.c 	blk_cleanup_queue(dev->rq);
rq                486 drivers/mtd/ubi/block.c 	blk_cleanup_queue(dev->rq);
rq                197 drivers/net/appletalk/cops.c static int  cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
rq                572 drivers/net/dsa/mv88e6xxx/chip.h 			  struct ptp_clock_request *rq, int on);
rq                269 drivers/net/dsa/mv88e6xxx/ptp.c 				      struct ptp_clock_request *rq, int on)
rq                271 drivers/net/dsa/mv88e6xxx/ptp.c 	int rising = (rq->extts.flags & PTP_RISING_EDGE);
rq                277 drivers/net/dsa/mv88e6xxx/ptp.c 	if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
rq                284 drivers/net/dsa/mv88e6xxx/ptp.c 	if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
rq                285 drivers/net/dsa/mv88e6xxx/ptp.c 	    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
rq                286 drivers/net/dsa/mv88e6xxx/ptp.c 	    (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
rq                289 drivers/net/dsa/mv88e6xxx/ptp.c 	pin = ptp_find_pin(chip->ptp_clock, PTP_PF_EXTTS, rq->extts.index);
rq                322 drivers/net/dsa/mv88e6xxx/ptp.c 				struct ptp_clock_request *rq, int on)
rq                326 drivers/net/dsa/mv88e6xxx/ptp.c 	switch (rq->type) {
rq                328 drivers/net/dsa/mv88e6xxx/ptp.c 		return mv88e6352_ptp_enable_extts(chip, rq, on);
rq                238 drivers/net/ethernet/3com/3c574_cs.c static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               1035 drivers/net/ethernet/3com/3c574_cs.c static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1039 drivers/net/ethernet/3com/3c574_cs.c 	struct mii_ioctl_data *data = if_mii(rq);
rq               1043 drivers/net/ethernet/3com/3c574_cs.c 		  dev->name, rq->ifr_ifrn.ifrn_name, cmd,
rq                777 drivers/net/ethernet/3com/3c59x.c static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               3027 drivers/net/ethernet/3com/3c59x.c static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               3040 drivers/net/ethernet/3com/3c59x.c 	err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
rq                 81 drivers/net/ethernet/8390/axnet_cs.c static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq                605 drivers/net/ethernet/8390/axnet_cs.c static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                608 drivers/net/ethernet/8390/axnet_cs.c     struct mii_ioctl_data *data = if_mii(rq);
rq                 99 drivers/net/ethernet/8390/pcnet_cs.c static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               1099 drivers/net/ethernet/8390/pcnet_cs.c static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1102 drivers/net/ethernet/8390/pcnet_cs.c     struct mii_ioctl_data *data = if_mii(rq);
rq                590 drivers/net/ethernet/adaptec/starfire.c static int	netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               1915 drivers/net/ethernet/adaptec/starfire.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1918 drivers/net/ethernet/adaptec/starfire.c 	struct mii_ioctl_data *data = if_mii(rq);
rq                210 drivers/net/ethernet/allwinner/sun4i-emac.c static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                220 drivers/net/ethernet/allwinner/sun4i-emac.c 	return phy_mii_ioctl(phydev, rq, cmd);
rq               1056 drivers/net/ethernet/amd/au1000_eth.c static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1064 drivers/net/ethernet/amd/au1000_eth.c 	return phy_mii_ioctl(dev->phydev, rq, cmd);
rq               2784 drivers/net/ethernet/amd/pcnet32.c static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               2793 drivers/net/ethernet/amd/pcnet32.c 		rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
rq                784 drivers/net/ethernet/arc/emac_main.c static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                792 drivers/net/ethernet/arc/emac_main.c 	return phy_mii_ioctl(dev->phydev, rq, cmd);
rq               1008 drivers/net/ethernet/aurora/nb8800.c static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1010 drivers/net/ethernet/aurora/nb8800.c 	return phy_mii_ioctl(dev->phydev, rq, cmd);
rq               1581 drivers/net/ethernet/broadcom/bcm63xx_enet.c static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1589 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		return phy_mii_ioctl(dev->phydev, rq, cmd);
rq               1599 drivers/net/ethernet/broadcom/bcm63xx_enet.c 		return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
rq               2432 drivers/net/ethernet/broadcom/bcm63xx_enet.c static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               2442 drivers/net/ethernet/broadcom/bcm63xx_enet.c 	return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
rq               13917 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 			    struct ptp_clock_request *rq, int on)
rq               1781 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.rq.pbl_base.lo =
rq               1783 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.rq.pbl_base.hi =
rq               1785 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
rq               1786 drivers/net/ethernet/broadcom/cnic.c 	ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
rq               3891 drivers/net/ethernet/broadcom/cnic_defs.h 	struct ustorm_iscsi_rq_db rq;
rq               1224 drivers/net/ethernet/broadcom/genet/bcmgenet.c static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1232 drivers/net/ethernet/broadcom/genet/bcmgenet.c 	return phy_mii_ioctl(dev->phydev, rq, cmd);
rq                299 drivers/net/ethernet/broadcom/sb1250-mac.c static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               2469 drivers/net/ethernet/broadcom/sb1250-mac.c static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               2476 drivers/net/ethernet/broadcom/sb1250-mac.c 	return phy_mii_ioctl(sc->phy_dev, rq, cmd);
rq               6275 drivers/net/ethernet/broadcom/tg3.c 			  struct ptp_clock_request *rq, int on)
rq               6281 drivers/net/ethernet/broadcom/tg3.c 	switch (rq->type) {
rq               6284 drivers/net/ethernet/broadcom/tg3.c 		if (rq->perout.flags)
rq               6287 drivers/net/ethernet/broadcom/tg3.c 		if (rq->perout.index != 0)
rq               6297 drivers/net/ethernet/broadcom/tg3.c 			nsec = rq->perout.start.sec * 1000000000ULL +
rq               6298 drivers/net/ethernet/broadcom/tg3.c 			       rq->perout.start.nsec;
rq               6300 drivers/net/ethernet/broadcom/tg3.c 			if (rq->perout.period.sec || rq->perout.period.nsec) {
rq               1268 drivers/net/ethernet/cadence/macb.h int gem_get_hwtst(struct net_device *dev, struct ifreq *rq);
rq               3195 drivers/net/ethernet/cadence/macb_main.c static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               3207 drivers/net/ethernet/cadence/macb_main.c 		return phy_mii_ioctl(phydev, rq, cmd);
rq               3211 drivers/net/ethernet/cadence/macb_main.c 		return bp->ptp_info->set_hwtst(dev, rq, cmd);
rq               3213 drivers/net/ethernet/cadence/macb_main.c 		return bp->ptp_info->get_hwtst(dev, rq);
rq               3215 drivers/net/ethernet/cadence/macb_main.c 		return phy_mii_ioctl(phydev, rq, cmd);
rq                179 drivers/net/ethernet/cadence/macb_ptp.c 			  struct ptp_clock_request *rq, int on)
rq                408 drivers/net/ethernet/cadence/macb_ptp.c int gem_get_hwtst(struct net_device *dev, struct ifreq *rq)
rq                417 drivers/net/ethernet/cadence/macb_ptp.c 	if (copy_to_user(rq->ifr_data, tstamp_config, sizeof(*tstamp_config)))
rq                204 drivers/net/ethernet/cavium/common/cavium_ptp.c 			     struct ptp_clock_request *rq, int on)
rq               1663 drivers/net/ethernet/cavium/liquidio/lio_main.c 		    struct ptp_clock_request *rq __attribute__((unused)),
rq                690 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 				      struct ifreq *rq, int cmd)
rq                698 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
rq                780 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 	if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
rq                787 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			     struct ifreq *rq, int cmd)
rq                791 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 		return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
rq                794 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c 			return phy_mii_ioctl(netdev->phydev, rq, cmd);
rq                588 drivers/net/ethernet/cavium/thunder/nic.h 	struct rq_cfg_msg	rq;
rq                993 drivers/net/ethernet/cavium/thunder/nic_main.c 			   (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
rq                994 drivers/net/ethernet/cavium/thunder/nic_main.c 			   (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
rq                995 drivers/net/ethernet/cavium/thunder/nic_main.c 		nic_reg_write(nic, reg_addr, mbx.rq.cfg);
rq               1008 drivers/net/ethernet/cavium/thunder/nic_main.c 			   (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
rq               1009 drivers/net/ethernet/cavium/thunder/nic_main.c 			   (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
rq               1010 drivers/net/ethernet/cavium/thunder/nic_main.c 		nic_reg_write(nic, reg_addr, mbx.rq.cfg);
rq               1017 drivers/net/ethernet/cavium/thunder/nic_main.c 			   (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
rq               1018 drivers/net/ethernet/cavium/thunder/nic_main.c 			   (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
rq               1019 drivers/net/ethernet/cavium/thunder/nic_main.c 		nic_reg_write(nic, reg_addr, mbx.rq.cfg);
rq                310 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 			*((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats)
rq                532 drivers/net/ethernet/cavium/thunder/nicvf_main.c 				struct rcv_queue *rq, struct sk_buff **skb)
rq                555 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	xdp.rxq = &rq->xdp_rxq;
rq                776 drivers/net/ethernet/cavium/thunder/nicvf_main.c 				  struct snd_queue *sq, struct rcv_queue *rq)
rq                802 drivers/net/ethernet/cavium/thunder/nicvf_main.c 		if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
rq                860 drivers/net/ethernet/cavium/thunder/nicvf_main.c 	struct rcv_queue *rq = &qs->rq[cq_idx];
rq                891 drivers/net/ethernet/cavium/thunder/nicvf_main.c 			nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
rq                748 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct rcv_queue *rq;
rq                751 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq = &qs->rq[qidx];
rq                752 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->enable = enable;
rq                757 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	if (!rq->enable) {
rq                759 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		xdp_rxq_info_unreg(&rq->xdp_rxq);
rq                763 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->cq_qs = qs->vnic_id;
rq                764 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->cq_idx = qidx;
rq                765 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->start_rbdr_qs = qs->vnic_id;
rq                766 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
rq                767 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->cont_rbdr_qs = qs->vnic_id;
rq                768 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
rq                770 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->caching = 1;
rq                773 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0);
rq                776 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
rq                777 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	mbx.rq.qs_num = qs->vnic_id;
rq                778 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	mbx.rq.rq_num = qidx;
rq                779 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
rq                780 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			  (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
rq                781 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			  (rq->cont_qs_rbdr_idx << 8) |
rq                782 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 			  (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
rq                785 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
rq                786 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
rq                794 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
rq                795 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
rq               1813 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	struct rcv_queue *rq;
rq               1819 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq = &nic->qs->rq[rq_idx];
rq               1820 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
rq               1821 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
rq                299 drivers/net/ethernet/cavium/thunder/nicvf_queues.h 	struct	rcv_queue	rq[MAX_RCV_QUEUES_PER_QS];
rq               1956 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
rq               1964 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (rq->polling) {
rq               1969 drivers/net/ethernet/chelsio/cxgb3/sge.c 			rq->offload_bundles++;
rq               1972 drivers/net/ethernet/chelsio/cxgb3/sge.c 		offload_enqueue(rq, skb);
rq               2078 drivers/net/ethernet/chelsio/cxgb3/sge.c static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
rq               2082 drivers/net/ethernet/chelsio/cxgb3/sge.c 	struct sge_qset *qs = rspq_to_qset(rq);
rq               2100 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (rq->polling) {
rq               2715 drivers/net/ethernet/chelsio/cxgb3/sge.c 					struct sge_rspq *rq)
rq               2719 drivers/net/ethernet/chelsio/cxgb3/sge.c 	work = process_responses(adap, rspq_to_qset(rq), -1);
rq               2720 drivers/net/ethernet/chelsio/cxgb3/sge.c 	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
rq               2721 drivers/net/ethernet/chelsio/cxgb3/sge.c 		     V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
rq               1891 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
rq               1100 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
rq               1102 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	c->rx_coalesce_usecs = qtimer_val(adap, rq);
rq               1103 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 	c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
rq               1104 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c 		adap->sge.counter_val[rq->pktcnt_idx] : 0;
rq               4747 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		adap->vres.rq.start = val[2];
rq               4748 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c 		adap->vres.rq.size = val[3] - val[2] + 1;
rq                287 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h 	struct cxgb4_range rq;
rq               3959 drivers/net/ethernet/chelsio/cxgb4/sge.c void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
rq               3965 drivers/net/ethernet/chelsio/cxgb4/sge.c 	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
rq               3967 drivers/net/ethernet/chelsio/cxgb4/sge.c 		   rq->cntxt_id, fl_id, 0xffff);
rq               3968 drivers/net/ethernet/chelsio/cxgb4/sge.c 	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
rq               3969 drivers/net/ethernet/chelsio/cxgb4/sge.c 			  rq->desc, rq->phys_addr);
rq               3970 drivers/net/ethernet/chelsio/cxgb4/sge.c 	netif_napi_del(&rq->napi);
rq               3971 drivers/net/ethernet/chelsio/cxgb4/sge.c 	rq->netdev = NULL;
rq               3972 drivers/net/ethernet/chelsio/cxgb4/sge.c 	rq->cntxt_id = rq->abs_id = 0;
rq               3973 drivers/net/ethernet/chelsio/cxgb4/sge.c 	rq->desc = NULL;
rq                184 drivers/net/ethernet/cisco/enic/enic.h 	____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
rq                233 drivers/net/ethernet/cisco/enic/enic.h static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
rq                235 drivers/net/ethernet/cisco/enic/enic.h 	return rq;
rq                259 drivers/net/ethernet/cisco/enic/enic.h 	unsigned int rq)
rq                261 drivers/net/ethernet/cisco/enic/enic.h 	return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
rq                 21 drivers/net/ethernet/cisco/enic/enic_clsf.c int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
rq                 45 drivers/net/ethernet/cisco/enic/enic_clsf.c 	res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data);
rq                 47 drivers/net/ethernet/cisco/enic/enic_clsf.c 	res = (res == 0) ? rq : res;
rq                 10 drivers/net/ethernet/cisco/enic/enic_clsf.h int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq);
rq                416 drivers/net/ethernet/cisco/enic/enic_main.c 		error_status = vnic_rq_error_status(&enic->rq[i]);
rq               1254 drivers/net/ethernet/cisco/enic/enic_main.c static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
rq               1256 drivers/net/ethernet/cisco/enic/enic_main.c 	struct enic *enic = vnic_dev_priv(rq->vdev);
rq               1267 drivers/net/ethernet/cisco/enic/enic_main.c static int enic_rq_alloc_buf(struct vnic_rq *rq)
rq               1269 drivers/net/ethernet/cisco/enic/enic_main.c 	struct enic *enic = vnic_dev_priv(rq->vdev);
rq               1275 drivers/net/ethernet/cisco/enic/enic_main.c 	struct vnic_rq_buf *buf = rq->to_use;
rq               1278 drivers/net/ethernet/cisco/enic/enic_main.c 		enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
rq               1294 drivers/net/ethernet/cisco/enic/enic_main.c 	enic_queue_rq_desc(rq, skb, os_buf_index,
rq               1328 drivers/net/ethernet/cisco/enic/enic_main.c static void enic_rq_indicate_buf(struct vnic_rq *rq,
rq               1332 drivers/net/ethernet/cisco/enic/enic_main.c 	struct enic *enic = vnic_dev_priv(rq->vdev);
rq               1335 drivers/net/ethernet/cisco/enic/enic_main.c 	struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
rq               1446 drivers/net/ethernet/cisco/enic/enic_main.c 		skb_mark_napi_id(skb, &enic->napi[rq->index]);
rq               1471 drivers/net/ethernet/cisco/enic/enic_main.c 	vnic_rq_service(&enic->rq[q_number], cq_desc,
rq               1478 drivers/net/ethernet/cisco/enic/enic_main.c static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
rq               1480 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int intr = enic_msix_rq_intr(enic, rq->index);
rq               1481 drivers/net/ethernet/cisco/enic/enic_main.c 	struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
rq               1490 drivers/net/ethernet/cisco/enic/enic_main.c static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
rq               1493 drivers/net/ethernet/cisco/enic/enic_main.c 	struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
rq               1568 drivers/net/ethernet/cisco/enic/enic_main.c 	err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
rq               1580 drivers/net/ethernet/cisco/enic/enic_main.c 		enic_calc_int_moderation(enic, &enic->rq[0]);
rq               1589 drivers/net/ethernet/cisco/enic/enic_main.c 			enic_set_int_moderation(enic, &enic->rq[0]);
rq               1668 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int rq = (napi - &enic->napi[0]);
rq               1669 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int cq = enic_cq_rq(enic, rq);
rq               1670 drivers/net/ethernet/cisco/enic/enic_main.c 	unsigned int intr = enic_msix_rq_intr(enic, rq);
rq               1693 drivers/net/ethernet/cisco/enic/enic_main.c 	err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
rq               1705 drivers/net/ethernet/cisco/enic/enic_main.c 		enic_calc_int_moderation(enic, &enic->rq[rq]);
rq               1714 drivers/net/ethernet/cisco/enic/enic_main.c 			enic_set_int_moderation(enic, &enic->rq[rq]);
rq               1943 drivers/net/ethernet/cisco/enic/enic_main.c 		vnic_rq_enable(&enic->rq[i]);
rq               1944 drivers/net/ethernet/cisco/enic/enic_main.c 		vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
rq               1946 drivers/net/ethernet/cisco/enic/enic_main.c 		if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
rq               1981 drivers/net/ethernet/cisco/enic/enic_main.c 		ret = vnic_rq_disable(&enic->rq[i]);
rq               1983 drivers/net/ethernet/cisco/enic/enic_main.c 			vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
rq               2030 drivers/net/ethernet/cisco/enic/enic_main.c 		err = vnic_rq_disable(&enic->rq[i]);
rq               2042 drivers/net/ethernet/cisco/enic/enic_main.c 		vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
rq                194 drivers/net/ethernet/cisco/enic/enic_res.c 		vnic_rq_free(&enic->rq[i]);
rq                249 drivers/net/ethernet/cisco/enic/enic_res.c 		vnic_rq_init(&enic->rq[i],
rq                346 drivers/net/ethernet/cisco/enic/enic_res.c 		err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
rq                122 drivers/net/ethernet/cisco/enic/enic_res.h static inline void enic_queue_rq_desc(struct vnic_rq *rq,
rq                126 drivers/net/ethernet/cisco/enic/enic_res.h 	struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
rq                135 drivers/net/ethernet/cisco/enic/enic_res.h 	vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
rq                 31 drivers/net/ethernet/cisco/enic/vnic_rq.c static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
rq                 34 drivers/net/ethernet/cisco/enic/vnic_rq.c 	unsigned int i, j, count = rq->ring.desc_count;
rq                 38 drivers/net/ethernet/cisco/enic/vnic_rq.c 		rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL);
rq                 39 drivers/net/ethernet/cisco/enic/vnic_rq.c 		if (!rq->bufs[i])
rq                 44 drivers/net/ethernet/cisco/enic/vnic_rq.c 		buf = rq->bufs[i];
rq                 47 drivers/net/ethernet/cisco/enic/vnic_rq.c 			buf->desc = (u8 *)rq->ring.descs +
rq                 48 drivers/net/ethernet/cisco/enic/vnic_rq.c 				rq->ring.desc_size * buf->index;
rq                 50 drivers/net/ethernet/cisco/enic/vnic_rq.c 				buf->next = rq->bufs[0];
rq                 53 drivers/net/ethernet/cisco/enic/vnic_rq.c 				buf->next = rq->bufs[i + 1];
rq                 61 drivers/net/ethernet/cisco/enic/vnic_rq.c 	rq->to_use = rq->to_clean = rq->bufs[0];
rq                 66 drivers/net/ethernet/cisco/enic/vnic_rq.c void vnic_rq_free(struct vnic_rq *rq)
rq                 71 drivers/net/ethernet/cisco/enic/vnic_rq.c 	vdev = rq->vdev;
rq                 73 drivers/net/ethernet/cisco/enic/vnic_rq.c 	vnic_dev_free_desc_ring(vdev, &rq->ring);
rq                 76 drivers/net/ethernet/cisco/enic/vnic_rq.c 		if (rq->bufs[i]) {
rq                 77 drivers/net/ethernet/cisco/enic/vnic_rq.c 			kfree(rq->bufs[i]);
rq                 78 drivers/net/ethernet/cisco/enic/vnic_rq.c 			rq->bufs[i] = NULL;
rq                 82 drivers/net/ethernet/cisco/enic/vnic_rq.c 	rq->ctrl = NULL;
rq                 85 drivers/net/ethernet/cisco/enic/vnic_rq.c int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
rq                 90 drivers/net/ethernet/cisco/enic/vnic_rq.c 	rq->index = index;
rq                 91 drivers/net/ethernet/cisco/enic/vnic_rq.c 	rq->vdev = vdev;
rq                 93 drivers/net/ethernet/cisco/enic/vnic_rq.c 	rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
rq                 94 drivers/net/ethernet/cisco/enic/vnic_rq.c 	if (!rq->ctrl) {
rq                 99 drivers/net/ethernet/cisco/enic/vnic_rq.c 	vnic_rq_disable(rq);
rq                101 drivers/net/ethernet/cisco/enic/vnic_rq.c 	err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
rq                105 drivers/net/ethernet/cisco/enic/vnic_rq.c 	err = vnic_rq_alloc_bufs(rq);
rq                107 drivers/net/ethernet/cisco/enic/vnic_rq.c 		vnic_rq_free(rq);
rq                114 drivers/net/ethernet/cisco/enic/vnic_rq.c static void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
rq                120 drivers/net/ethernet/cisco/enic/vnic_rq.c 	unsigned int count = rq->ring.desc_count;
rq                122 drivers/net/ethernet/cisco/enic/vnic_rq.c 	paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
rq                123 drivers/net/ethernet/cisco/enic/vnic_rq.c 	writeq(paddr, &rq->ctrl->ring_base);
rq                124 drivers/net/ethernet/cisco/enic/vnic_rq.c 	iowrite32(count, &rq->ctrl->ring_size);
rq                125 drivers/net/ethernet/cisco/enic/vnic_rq.c 	iowrite32(cq_index, &rq->ctrl->cq_index);
rq                126 drivers/net/ethernet/cisco/enic/vnic_rq.c 	iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
rq                127 drivers/net/ethernet/cisco/enic/vnic_rq.c 	iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
rq                128 drivers/net/ethernet/cisco/enic/vnic_rq.c 	iowrite32(0, &rq->ctrl->dropped_packet_count);
rq                129 drivers/net/ethernet/cisco/enic/vnic_rq.c 	iowrite32(0, &rq->ctrl->error_status);
rq                130 drivers/net/ethernet/cisco/enic/vnic_rq.c 	iowrite32(fetch_index, &rq->ctrl->fetch_index);
rq                131 drivers/net/ethernet/cisco/enic/vnic_rq.c 	iowrite32(posted_index, &rq->ctrl->posted_index);
rq                133 drivers/net/ethernet/cisco/enic/vnic_rq.c 	rq->to_use = rq->to_clean =
rq                134 drivers/net/ethernet/cisco/enic/vnic_rq.c 		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
rq                138 drivers/net/ethernet/cisco/enic/vnic_rq.c void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
rq                142 drivers/net/ethernet/cisco/enic/vnic_rq.c 	vnic_rq_init_start(rq, cq_index, 0, 0, error_interrupt_enable,
rq                146 drivers/net/ethernet/cisco/enic/vnic_rq.c unsigned int vnic_rq_error_status(struct vnic_rq *rq)
rq                148 drivers/net/ethernet/cisco/enic/vnic_rq.c 	return ioread32(&rq->ctrl->error_status);
rq                151 drivers/net/ethernet/cisco/enic/vnic_rq.c void vnic_rq_enable(struct vnic_rq *rq)
rq                153 drivers/net/ethernet/cisco/enic/vnic_rq.c 	iowrite32(1, &rq->ctrl->enable);
rq                156 drivers/net/ethernet/cisco/enic/vnic_rq.c int vnic_rq_disable(struct vnic_rq *rq)
rq                159 drivers/net/ethernet/cisco/enic/vnic_rq.c 	struct vnic_dev *vdev = rq->vdev;
rq                167 drivers/net/ethernet/cisco/enic/vnic_rq.c 		iowrite32(0, &rq->ctrl->enable);
rq                171 drivers/net/ethernet/cisco/enic/vnic_rq.c 			if (!ioread32(&rq->ctrl->running))
rq                175 drivers/net/ethernet/cisco/enic/vnic_rq.c 				    rq->index);
rq                184 drivers/net/ethernet/cisco/enic/vnic_rq.c void vnic_rq_clean(struct vnic_rq *rq,
rq                185 drivers/net/ethernet/cisco/enic/vnic_rq.c 	void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
rq                189 drivers/net/ethernet/cisco/enic/vnic_rq.c 	unsigned int count = rq->ring.desc_count;
rq                192 drivers/net/ethernet/cisco/enic/vnic_rq.c 	buf = rq->to_clean;
rq                194 drivers/net/ethernet/cisco/enic/vnic_rq.c 	for (i = 0; i < rq->ring.desc_count; i++) {
rq                195 drivers/net/ethernet/cisco/enic/vnic_rq.c 		(*buf_clean)(rq, buf);
rq                198 drivers/net/ethernet/cisco/enic/vnic_rq.c 	rq->ring.desc_avail = rq->ring.desc_count - 1;
rq                201 drivers/net/ethernet/cisco/enic/vnic_rq.c 	fetch_index = ioread32(&rq->ctrl->fetch_index);
rq                207 drivers/net/ethernet/cisco/enic/vnic_rq.c 	rq->to_use = rq->to_clean =
rq                208 drivers/net/ethernet/cisco/enic/vnic_rq.c 		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
rq                210 drivers/net/ethernet/cisco/enic/vnic_rq.c 	iowrite32(fetch_index, &rq->ctrl->posted_index);
rq                215 drivers/net/ethernet/cisco/enic/vnic_rq.c 	iowrite32(0, &rq->ctrl->enable);
rq                217 drivers/net/ethernet/cisco/enic/vnic_rq.c 	vnic_dev_clear_desc_ring(&rq->ring);
rq                 97 drivers/net/ethernet/cisco/enic/vnic_rq.h static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
rq                100 drivers/net/ethernet/cisco/enic/vnic_rq.h 	return rq->ring.desc_avail;
rq                103 drivers/net/ethernet/cisco/enic/vnic_rq.h static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
rq                106 drivers/net/ethernet/cisco/enic/vnic_rq.h 	return rq->ring.desc_count - rq->ring.desc_avail - 1;
rq                109 drivers/net/ethernet/cisco/enic/vnic_rq.h static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
rq                111 drivers/net/ethernet/cisco/enic/vnic_rq.h 	return rq->to_use->desc;
rq                114 drivers/net/ethernet/cisco/enic/vnic_rq.h static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
rq                116 drivers/net/ethernet/cisco/enic/vnic_rq.h 	return rq->to_use->index;
rq                119 drivers/net/ethernet/cisco/enic/vnic_rq.h static inline void vnic_rq_post(struct vnic_rq *rq,
rq                124 drivers/net/ethernet/cisco/enic/vnic_rq.h 	struct vnic_rq_buf *buf = rq->to_use;
rq                133 drivers/net/ethernet/cisco/enic/vnic_rq.h 	rq->to_use = buf;
rq                134 drivers/net/ethernet/cisco/enic/vnic_rq.h 	rq->ring.desc_avail--;
rq                150 drivers/net/ethernet/cisco/enic/vnic_rq.h 		iowrite32(buf->index, &rq->ctrl->posted_index);
rq                154 drivers/net/ethernet/cisco/enic/vnic_rq.h static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
rq                156 drivers/net/ethernet/cisco/enic/vnic_rq.h 	rq->ring.desc_avail += count;
rq                164 drivers/net/ethernet/cisco/enic/vnic_rq.h static inline void vnic_rq_service(struct vnic_rq *rq,
rq                166 drivers/net/ethernet/cisco/enic/vnic_rq.h 	int desc_return, void (*buf_service)(struct vnic_rq *rq,
rq                173 drivers/net/ethernet/cisco/enic/vnic_rq.h 	buf = rq->to_clean;
rq                178 drivers/net/ethernet/cisco/enic/vnic_rq.h 		(*buf_service)(rq, cq_desc, buf, skipped, opaque);
rq                181 drivers/net/ethernet/cisco/enic/vnic_rq.h 			rq->ring.desc_avail++;
rq                183 drivers/net/ethernet/cisco/enic/vnic_rq.h 		rq->to_clean = buf->next;
rq                188 drivers/net/ethernet/cisco/enic/vnic_rq.h 		buf = rq->to_clean;
rq                192 drivers/net/ethernet/cisco/enic/vnic_rq.h static inline int vnic_rq_fill(struct vnic_rq *rq,
rq                193 drivers/net/ethernet/cisco/enic/vnic_rq.h 	int (*buf_fill)(struct vnic_rq *rq))
rq                197 drivers/net/ethernet/cisco/enic/vnic_rq.h 	while (vnic_rq_desc_avail(rq) > 0) {
rq                199 drivers/net/ethernet/cisco/enic/vnic_rq.h 		err = (*buf_fill)(rq);
rq                207 drivers/net/ethernet/cisco/enic/vnic_rq.h void vnic_rq_free(struct vnic_rq *rq);
rq                208 drivers/net/ethernet/cisco/enic/vnic_rq.h int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
rq                210 drivers/net/ethernet/cisco/enic/vnic_rq.h void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
rq                213 drivers/net/ethernet/cisco/enic/vnic_rq.h unsigned int vnic_rq_error_status(struct vnic_rq *rq);
rq                214 drivers/net/ethernet/cisco/enic/vnic_rq.h void vnic_rq_enable(struct vnic_rq *rq);
rq                215 drivers/net/ethernet/cisco/enic/vnic_rq.h int vnic_rq_disable(struct vnic_rq *rq);
rq                216 drivers/net/ethernet/cisco/enic/vnic_rq.h void vnic_rq_clean(struct vnic_rq *rq,
rq                217 drivers/net/ethernet/cisco/enic/vnic_rq.h 	void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
rq                905 drivers/net/ethernet/dec/tulip/de4x5.c static int     de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               5362 drivers/net/ethernet/dec/tulip/de4x5.c de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               5365 drivers/net/ethernet/dec/tulip/de4x5.c     struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
rq                268 drivers/net/ethernet/dec/tulip/tulip_core.c static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq                907 drivers/net/ethernet/dec/tulip/tulip_core.c static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
rq                911 drivers/net/ethernet/dec/tulip/tulip_core.c 	struct mii_ioctl_data *data = if_mii(rq);
rq                344 drivers/net/ethernet/dec/tulip/winbond-840.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               1449 drivers/net/ethernet/dec/tulip/winbond-840.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1451 drivers/net/ethernet/dec/tulip/winbond-840.c 	struct mii_ioctl_data *data = if_mii(rq);
rq                 79 drivers/net/ethernet/dlink/dl2k.c static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
rq               1351 drivers/net/ethernet/dlink/dl2k.c rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
rq               1355 drivers/net/ethernet/dlink/dl2k.c 	struct mii_ioctl_data *miidata = if_mii(rq);
rq                449 drivers/net/ethernet/dlink/sundance.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               1815 drivers/net/ethernet/dlink/sundance.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1824 drivers/net/ethernet/dlink/sundance.c 	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
rq                728 drivers/net/ethernet/dnet.c static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                738 drivers/net/ethernet/dnet.c 	return phy_mii_ioctl(phydev, rq, cmd);
rq                439 drivers/net/ethernet/fealnx.c static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               1873 drivers/net/ethernet/fealnx.c static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1882 drivers/net/ethernet/fealnx.c 	rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
rq               2577 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               2582 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
rq               2612 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 	return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
rq               2616 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
rq               2622 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 			return phy_mii_ioctl(net_dev->phydev, rq, cmd);
rq               2626 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c 		return dpaa_ts_ioctl(net_dev, rq, cmd);
rq               1639 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1644 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
rq               1666 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 	return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
rq               1670 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1673 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		return dpaa2_eth_ts_ioctl(dev, rq, cmd);
rq                 17 drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c 			    struct ptp_clock_request *rq, int on)
rq                 29 drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c 	switch (rq->type) {
rq               1594 drivers/net/ethernet/freescale/enetc/enetc.c int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
rq               1598 drivers/net/ethernet/freescale/enetc/enetc.c 		return enetc_hwtstamp_set(ndev, rq);
rq               1600 drivers/net/ethernet/freescale/enetc/enetc.c 		return enetc_hwtstamp_get(ndev, rq);
rq                231 drivers/net/ethernet/freescale/enetc/enetc.h int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
rq               2721 drivers/net/ethernet/freescale/fec_main.c static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
rq               2734 drivers/net/ethernet/freescale/fec_main.c 			return fec_ptp_set(ndev, rq);
rq               2736 drivers/net/ethernet/freescale/fec_main.c 			return fec_ptp_get(ndev, rq);
rq               2739 drivers/net/ethernet/freescale/fec_main.c 	return phy_mii_ioctl(phydev, rq, cmd);
rq                788 drivers/net/ethernet/freescale/fec_mpc52xx.c static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                795 drivers/net/ethernet/freescale/fec_mpc52xx.c 	return phy_mii_ioctl(phydev, rq, cmd);
rq                441 drivers/net/ethernet/freescale/fec_ptp.c 			  struct ptp_clock_request *rq, int on)
rq                447 drivers/net/ethernet/freescale/fec_ptp.c 	if (rq->type == PTP_CLK_REQ_PPS) {
rq                885 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                890 drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c 	return phy_mii_ioctl(dev->phydev, rq, cmd);
rq               1803 drivers/net/ethernet/freescale/gianfar.c 	int i, rq = 0;
rq               1808 drivers/net/ethernet/freescale/gianfar.c 	rq = skb->queue_mapping;
rq               1809 drivers/net/ethernet/freescale/gianfar.c 	tx_queue = priv->tx_queue[rq];
rq               1810 drivers/net/ethernet/freescale/gianfar.c 	txq = netdev_get_tx_queue(dev, rq);
rq               2164 drivers/net/ethernet/freescale/gianfar.c static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               2172 drivers/net/ethernet/freescale/gianfar.c 		return gfar_hwtstamp_set(dev, rq);
rq               2174 drivers/net/ethernet/freescale/gianfar.c 		return gfar_hwtstamp_get(dev, rq);
rq               2179 drivers/net/ethernet/freescale/gianfar.c 	return phy_mii_ioctl(phydev, rq, cmd);
rq                549 drivers/net/ethernet/freescale/gianfar.h 	u8	rq;	/* Receive Queue index */
rq               3661 drivers/net/ethernet/freescale/ucc_geth.c static int ucc_geth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               3671 drivers/net/ethernet/freescale/ucc_geth.c 	return phy_mii_ioctl(ugeth->phydev, rq, cmd);
rq                881 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c 	return &qp->rq;
rq                172 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c 		hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq,
rq                314 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c 	err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
rq                347 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c 	hinic_clean_rq(&qp->rq);
rq                 62 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c #define RQ_MASKED_IDX(rq, idx)  ((idx) & (rq)->wq->mask)
rq                150 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 			   struct hinic_rq *rq, u16 global_qid)
rq                157 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	wq = rq->wq;
rq                176 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 				HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR);
rq                197 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
rq                198 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
rq                242 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c static int alloc_rq_skb_arr(struct hinic_rq *rq)
rq                244 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_wq *wq = rq->wq;
rq                247 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb);
rq                248 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	rq->saved_skb = vzalloc(skb_arr_size);
rq                249 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	if (!rq->saved_skb)
rq                259 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c static void free_rq_skb_arr(struct hinic_rq *rq)
rq                261 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	vfree(rq->saved_skb);
rq                311 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c static int alloc_rq_cqe(struct hinic_rq *rq)
rq                313 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_hwif *hwif = rq->hwif;
rq                316 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_wq *wq = rq->wq;
rq                319 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	cqe_size = wq->q_depth * sizeof(*rq->cqe);
rq                320 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	rq->cqe = vzalloc(cqe_size);
rq                321 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	if (!rq->cqe)
rq                324 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma);
rq                325 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	rq->cqe_dma = vzalloc(cqe_dma_size);
rq                326 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	if (!rq->cqe_dma)
rq                330 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 		rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
rq                331 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 						sizeof(*rq->cqe[i]),
rq                332 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 						&rq->cqe_dma[i], GFP_KERNEL);
rq                333 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 		if (!rq->cqe[i])
rq                341 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 		dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j],
rq                342 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 				  rq->cqe_dma[j]);
rq                344 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	vfree(rq->cqe_dma);
rq                347 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	vfree(rq->cqe);
rq                355 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c static void free_rq_cqe(struct hinic_rq *rq)
rq                357 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_hwif *hwif = rq->hwif;
rq                359 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_wq *wq = rq->wq;
rq                363 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 		dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i],
rq                364 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 				  rq->cqe_dma[i]);
rq                366 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	vfree(rq->cqe_dma);
rq                367 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	vfree(rq->cqe);
rq                379 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
rq                386 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	rq->hwif = hwif;
rq                388 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	rq->wq = wq;
rq                390 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	rq->irq = entry->vector;
rq                391 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	rq->msix_entry = entry->entry;
rq                393 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	rq->buf_sz = HINIC_RX_BUF_SZ;
rq                395 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	err = alloc_rq_skb_arr(rq);
rq                401 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	err = alloc_rq_cqe(rq);
rq                408 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
rq                409 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
rq                410 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 					      &rq->pi_dma_addr, GFP_KERNEL);
rq                411 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	if (!rq->pi_virt_addr) {
rq                420 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	free_rq_cqe(rq);
rq                423 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	free_rq_skb_arr(rq);
rq                431 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_clean_rq(struct hinic_rq *rq)
rq                433 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_hwif *hwif = rq->hwif;
rq                437 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
rq                438 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	dma_free_coherent(&pdev->dev, pi_size, rq->pi_virt_addr,
rq                439 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 			  rq->pi_dma_addr);
rq                441 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	free_rq_cqe(rq);
rq                442 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	free_rq_skb_arr(rq);
rq                464 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
rq                466 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_wq *wq = rq->wq;
rq                794 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
rq                797 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size,
rq                813 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
rq                818 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	rq->saved_skb[prod_idx] = skb;
rq                823 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe));
rq                835 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
rq                844 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx);
rq                848 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	cqe = rq->cqe[*cons_idx];
rq                856 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	*skb = rq->saved_skb[*cons_idx];
rq                870 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
rq                875 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_wq *wq = rq->wq;
rq                882 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	*cons_idx = RQ_MASKED_IDX(rq, *cons_idx + num_wqebbs);
rq                884 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	*skb = rq->saved_skb[*cons_idx];
rq                897 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
rq                900 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
rq                910 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	hinic_put_wqe(rq->wq, wqe_size);
rq                920 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe,
rq                923 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
rq                938 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
rq                943 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_rq_cqe *cqe = rq->cqe[prod_idx];
rq                945 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	dma_addr_t cqe_dma = rq->cqe_dma[prod_idx];
rq                966 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx)
rq                968 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	*rq->pi_virt_addr = cpu_to_be16(RQ_MASKED_IDX(rq, prod_idx + 1));
rq                114 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h 	struct hinic_rq         rq;
rq                127 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h 			   struct hinic_rq *rq, u16 global_qid);
rq                135 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
rq                138 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_clean_rq(struct hinic_rq *rq);
rq                142 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h int hinic_get_rq_free_wqebbs(struct hinic_rq *rq);
rq                198 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
rq                201 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
rq                204 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
rq                208 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
rq                213 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
rq                216 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *wqe,
rq                219 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
rq                222 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx);
rq                207 drivers/net/ethernet/huawei/hinic/hinic_main.c 		struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i);
rq                209 drivers/net/ethernet/huawei/hinic/hinic_main.c 		err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev);
rq                139 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
rq                145 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
rq                173 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
rq                207 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
rq                222 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
rq                229 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
rq                231 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
rq                238 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_rq_update(rxq->rq, prod_idx);
rq                250 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rq *rq = rxq->rq;
rq                255 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) {
rq                259 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge);
rq                261 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE);
rq                263 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
rq                286 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
rq                291 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
rq                328 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
rq                331 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rq *rq = rxq->rq;
rq                348 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
rq                356 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		cqe = rq->cqe[ci];
rq                358 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
rq                376 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		hinic_rq_put_wqe(rq, ci,
rq                411 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
rq                427 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rq *rq = rxq->rq;
rq                436 drivers/net/ethernet/huawei/hinic/hinic_rx.c 				   rq->msix_entry,
rq                459 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rq *rq = rxq->rq;
rq                465 drivers/net/ethernet/huawei/hinic/hinic_rx.c 				   rq->msix_entry,
rq                469 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
rq                479 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rq *rq = rxq->rq;
rq                485 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	hinic_hwdev_msix_set(hwdev, rq->msix_entry,
rq                490 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
rq                496 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	qp = container_of(rq, struct hinic_qp, rq);
rq                497 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
rq                498 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	return irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
rq                503 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rq *rq = rxq->rq;
rq                505 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	irq_set_affinity_hint(rq->irq, NULL);
rq                506 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	free_irq(rq->irq, rxq);
rq                518 drivers/net/ethernet/huawei/hinic/hinic_rx.c int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
rq                521 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq);
rq                525 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	rxq->rq = rq;
rq                 33 drivers/net/ethernet/huawei/hinic/hinic_rx.h 	struct hinic_rq         *rq;
rq                 48 drivers/net/ethernet/huawei/hinic/hinic_rx.h int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
rq                613 drivers/net/ethernet/ibm/ehea/ehea_main.c static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
rq                626 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if (rq == 2) {
rq                630 drivers/net/ethernet/ibm/ehea/ehea_main.c 	} else if (rq == 3) {
rq                665 drivers/net/ethernet/ibm/ehea/ehea_main.c 	int wqe_index, last_wqe_index, rq, port_reset;
rq                680 drivers/net/ethernet/ibm/ehea/ehea_main.c 		if (!ehea_check_cqe(cqe, &rq)) {
rq                681 drivers/net/ethernet/ibm/ehea/ehea_main.c 			if (rq == 1) {
rq                698 drivers/net/ethernet/ibm/ehea/ehea_main.c 			} else if (rq == 2) {
rq                731 drivers/net/ethernet/ibm/ehea/ehea_main.c 			port_reset = ehea_treat_poll_error(pr, rq, cqe,
rq               2310 drivers/net/ethernet/ibm/emac/core.c static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
rq               2313 drivers/net/ethernet/ibm/emac/core.c 	struct mii_ioctl_data *data = if_mii(rq);
rq                213 drivers/net/ethernet/intel/i40e/i40e_ptp.c 				   struct ptp_clock_request *rq, int on)
rq                 16 drivers/net/ethernet/intel/ice/ice_controlq.c 	(qinfo)->rq.head = prefix##_ARQH;			\
rq                 17 drivers/net/ethernet/intel/ice/ice_controlq.c 	(qinfo)->rq.tail = prefix##_ARQT;			\
rq                 18 drivers/net/ethernet/intel/ice/ice_controlq.c 	(qinfo)->rq.len = prefix##_ARQLEN;			\
rq                 19 drivers/net/ethernet/intel/ice/ice_controlq.c 	(qinfo)->rq.bah = prefix##_ARQBAH;			\
rq                 20 drivers/net/ethernet/intel/ice/ice_controlq.c 	(qinfo)->rq.bal = prefix##_ARQBAL;			\
rq                 21 drivers/net/ethernet/intel/ice/ice_controlq.c 	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
rq                 22 drivers/net/ethernet/intel/ice/ice_controlq.c 	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
rq                 23 drivers/net/ethernet/intel/ice/ice_controlq.c 	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
rq                111 drivers/net/ethernet/intel/ice/ice_controlq.c 	cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
rq                112 drivers/net/ethernet/intel/ice/ice_controlq.c 						 &cq->rq.desc_buf.pa,
rq                114 drivers/net/ethernet/intel/ice/ice_controlq.c 	if (!cq->rq.desc_buf.va)
rq                116 drivers/net/ethernet/intel/ice/ice_controlq.c 	cq->rq.desc_buf.size = size;
rq                150 drivers/net/ethernet/intel/ice/ice_controlq.c 	cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
rq                151 drivers/net/ethernet/intel/ice/ice_controlq.c 				       sizeof(cq->rq.desc_buf), GFP_KERNEL);
rq                152 drivers/net/ethernet/intel/ice/ice_controlq.c 	if (!cq->rq.dma_head)
rq                154 drivers/net/ethernet/intel/ice/ice_controlq.c 	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
rq                161 drivers/net/ethernet/intel/ice/ice_controlq.c 		bi = &cq->rq.r.rq_bi[i];
rq                170 drivers/net/ethernet/intel/ice/ice_controlq.c 		desc = ICE_CTL_Q_DESC(cq->rq, i);
rq                196 drivers/net/ethernet/intel/ice/ice_controlq.c 		dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
rq                197 drivers/net/ethernet/intel/ice/ice_controlq.c 				   cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
rq                198 drivers/net/ethernet/intel/ice/ice_controlq.c 		cq->rq.r.rq_bi[i].va = NULL;
rq                199 drivers/net/ethernet/intel/ice/ice_controlq.c 		cq->rq.r.rq_bi[i].pa = 0;
rq                200 drivers/net/ethernet/intel/ice/ice_controlq.c 		cq->rq.r.rq_bi[i].size = 0;
rq                202 drivers/net/ethernet/intel/ice/ice_controlq.c 	devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
rq                297 drivers/net/ethernet/intel/ice/ice_controlq.c 	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
rq                302 drivers/net/ethernet/intel/ice/ice_controlq.c 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
rq                384 drivers/net/ethernet/intel/ice/ice_controlq.c 	if (cq->rq.count > 0) {
rq                396 drivers/net/ethernet/intel/ice/ice_controlq.c 	cq->rq.next_to_use = 0;
rq                397 drivers/net/ethernet/intel/ice/ice_controlq.c 	cq->rq.next_to_clean = 0;
rq                415 drivers/net/ethernet/intel/ice/ice_controlq.c 	cq->rq.count = cq->num_rq_entries;
rq                419 drivers/net/ethernet/intel/ice/ice_controlq.c 	ice_free_cq_ring(hw, &cq->rq);
rq                527 drivers/net/ethernet/intel/ice/ice_controlq.c 	if (!cq->rq.count) {
rq                533 drivers/net/ethernet/intel/ice/ice_controlq.c 	wr32(hw, cq->rq.head, 0);
rq                534 drivers/net/ethernet/intel/ice/ice_controlq.c 	wr32(hw, cq->rq.tail, 0);
rq                535 drivers/net/ethernet/intel/ice/ice_controlq.c 	wr32(hw, cq->rq.len, 0);
rq                536 drivers/net/ethernet/intel/ice/ice_controlq.c 	wr32(hw, cq->rq.bal, 0);
rq                537 drivers/net/ethernet/intel/ice/ice_controlq.c 	wr32(hw, cq->rq.bah, 0);
rq                540 drivers/net/ethernet/intel/ice/ice_controlq.c 	cq->rq.count = 0;
rq                543 drivers/net/ethernet/intel/ice/ice_controlq.c 	ICE_FREE_CQ_BUFS(hw, cq, rq);
rq                544 drivers/net/ethernet/intel/ice/ice_controlq.c 	ice_free_cq_ring(hw, &cq->rq);
rq               1038 drivers/net/ethernet/intel/ice/ice_controlq.c 	u16 ntc = cq->rq.next_to_clean;
rq               1053 drivers/net/ethernet/intel/ice/ice_controlq.c 	if (!cq->rq.count) {
rq               1061 drivers/net/ethernet/intel/ice/ice_controlq.c 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
rq               1070 drivers/net/ethernet/intel/ice/ice_controlq.c 	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
rq               1085 drivers/net/ethernet/intel/ice/ice_controlq.c 		memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
rq               1095 drivers/net/ethernet/intel/ice/ice_controlq.c 	bi = &cq->rq.r.rq_bi[ntc];
rq               1106 drivers/net/ethernet/intel/ice/ice_controlq.c 	wr32(hw, cq->rq.tail, ntc);
rq               1111 drivers/net/ethernet/intel/ice/ice_controlq.c 	cq->rq.next_to_clean = ntc;
rq               1112 drivers/net/ethernet/intel/ice/ice_controlq.c 	cq->rq.next_to_use = ntu;
rq               1118 drivers/net/ethernet/intel/ice/ice_controlq.c 		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
rq               1119 drivers/net/ethernet/intel/ice/ice_controlq.c 		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
rq                 84 drivers/net/ethernet/intel/ice/ice_controlq.h 	struct ice_ctl_q_ring rq;	/* receive queue */
rq                992 drivers/net/ethernet/intel/ice/ice_main.c 	val = rd32(hw, cq->rq.len);
rq               1011 drivers/net/ethernet/intel/ice/ice_main.c 			wr32(hw, cq->rq.len, val);
rq               1097 drivers/net/ethernet/intel/ice/ice_main.c 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
rq               1098 drivers/net/ethernet/intel/ice/ice_main.c 	return cq->rq.next_to_clean != ntu;
rq                511 drivers/net/ethernet/intel/igb/igb_ptp.c 				       struct ptp_clock_request *rq, int on)
rq                522 drivers/net/ethernet/intel/igb/igb_ptp.c 	switch (rq->type) {
rq                525 drivers/net/ethernet/intel/igb/igb_ptp.c 		if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
rq                532 drivers/net/ethernet/intel/igb/igb_ptp.c 		if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
rq                533 drivers/net/ethernet/intel/igb/igb_ptp.c 		    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
rq                534 drivers/net/ethernet/intel/igb/igb_ptp.c 		    (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
rq                539 drivers/net/ethernet/intel/igb/igb_ptp.c 					   rq->extts.index);
rq                543 drivers/net/ethernet/intel/igb/igb_ptp.c 		if (rq->extts.index == 1) {
rq                554 drivers/net/ethernet/intel/igb/igb_ptp.c 			igb_pin_extts(igb, rq->extts.index, pin);
rq                568 drivers/net/ethernet/intel/igb/igb_ptp.c 		if (rq->perout.flags)
rq                573 drivers/net/ethernet/intel/igb/igb_ptp.c 					   rq->perout.index);
rq                577 drivers/net/ethernet/intel/igb/igb_ptp.c 		ts.tv_sec = rq->perout.period.sec;
rq                578 drivers/net/ethernet/intel/igb/igb_ptp.c 		ts.tv_nsec = rq->perout.period.nsec;
rq                588 drivers/net/ethernet/intel/igb/igb_ptp.c 		if (rq->perout.index == 1) {
rq                614 drivers/net/ethernet/intel/igb/igb_ptp.c 		if (rq->perout.index == 1) {
rq                622 drivers/net/ethernet/intel/igb/igb_ptp.c 			int i = rq->perout.index;
rq                624 drivers/net/ethernet/intel/igb/igb_ptp.c 			igb->perout[i].start.tv_sec = rq->perout.start.sec;
rq                625 drivers/net/ethernet/intel/igb/igb_ptp.c 			igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
rq                628 drivers/net/ethernet/intel/igb/igb_ptp.c 			wr32(trgttimh, rq->perout.start.sec);
rq                629 drivers/net/ethernet/intel/igb/igb_ptp.c 			wr32(trgttiml, rq->perout.start.nsec);
rq                657 drivers/net/ethernet/intel/igb/igb_ptp.c 				  struct ptp_clock_request *rq, int on)
rq                637 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c 				    struct ptp_clock_request *rq, int on)
rq                648 drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c 	if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp)
rq               2636 drivers/net/ethernet/jme.c jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
rq               2640 drivers/net/ethernet/jme.c 	struct mii_ioctl_data *mii_data = if_mii(rq);
rq                675 drivers/net/ethernet/korina.c static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                678 drivers/net/ethernet/korina.c 	struct mii_ioctl_data *data = if_mii(rq);
rq                513 drivers/net/ethernet/lantiq_etop.c ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                516 drivers/net/ethernet/lantiq_etop.c 	return phy_mii_ioctl(dev->phydev, rq, cmd);
rq                470 drivers/net/ethernet/marvell/octeontx2/af/mbox.h 		struct nix_rq_ctx_s rq;
rq                488 drivers/net/ethernet/marvell/octeontx2/af/mbox.h 		struct nix_rq_ctx_s rq;
rq                572 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
rq                603 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
rq                613 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			ena = (req->rq.ena & req->rq_mask.ena) |
rq                622 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			ena = (req->rq.ena & req->sq_mask.ena) |
rq                631 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 			ena = (req->rq.ena & req->cq_mask.ena) |
rq                645 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 				memcpy(&rsp->rq, ctx,
rq                693 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		aq_req.rq.ena = 0;
rq                818 drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h 	uint32_t rq			: 20;
rq                820 drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h 	uint32_t rq			: 20;
rq                380 drivers/net/ethernet/mellanox/mlx5/core/en.h 			struct mlx5e_rq *rq;
rq                468 drivers/net/ethernet/mellanox/mlx5/core/en.h 			struct mlx5e_rq *rq;
rq                609 drivers/net/ethernet/mellanox/mlx5/core/en.h typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
rq                709 drivers/net/ethernet/mellanox/mlx5/core/en.h 	struct mlx5e_rq            rq;
rq                754 drivers/net/ethernet/mellanox/mlx5/core/en.h 	struct mlx5e_rq_stats rq;
rq                921 drivers/net/ethernet/mellanox/mlx5/core/en.h static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
rq                923 drivers/net/ethernet/mellanox/mlx5/core/en.h 	switch (rq->wq_type) {
rq                925 drivers/net/ethernet/mellanox/mlx5/core/en.h 		return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
rq                927 drivers/net/ethernet/mellanox/mlx5/core/en.h 		return mlx5_wq_cyc_get_size(&rq->wqe.wq);
rq                931 drivers/net/ethernet/mellanox/mlx5/core/en.h static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
rq                933 drivers/net/ethernet/mellanox/mlx5/core/en.h 	switch (rq->wq_type) {
rq                935 drivers/net/ethernet/mellanox/mlx5/core/en.h 		return rq->mpwqe.wq.cur_sz;
rq                937 drivers/net/ethernet/mellanox/mlx5/core/en.h 		return rq->wqe.wq.cur_sz;
rq                945 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
rq                946 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
rq                949 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
rq                950 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
rq                951 drivers/net/ethernet/mellanox/mlx5/core/en.h bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
rq                953 drivers/net/ethernet/mellanox/mlx5/core/en.h bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
rq                954 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
rq                955 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
rq                957 drivers/net/ethernet/mellanox/mlx5/core/en.h mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
rq                960 drivers/net/ethernet/mellanox/mlx5/core/en.h mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
rq                963 drivers/net/ethernet/mellanox/mlx5/core/en.h mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
rq                966 drivers/net/ethernet/mellanox/mlx5/core/en.h mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
rq               1013 drivers/net/ethernet/mellanox/mlx5/core/en.h 		  struct xdp_umem *umem, struct mlx5e_rq *rq);
rq               1014 drivers/net/ethernet/mellanox/mlx5/core/en.h int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
rq               1015 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
rq               1016 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_close_rq(struct mlx5e_rq *rq);
rq               1060 drivers/net/ethernet/mellanox/mlx5/core/en.h int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
rq               1061 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_activate_rq(struct mlx5e_rq *rq);
rq               1062 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
rq               1063 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
rq               1064 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
rq                 31 drivers/net/ethernet/mellanox/mlx5/core/en/health.h void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
rq                 32 drivers/net/ethernet/mellanox/mlx5/core/en/health.h void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
rq                 24 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c 	data->rx_packets = stats->rq.packets;
rq                 25 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c 	data->rx_bytes   = stats->rq.bytes;
rq                 34 drivers/net/ethernet/mellanox/mlx5/core/en/params.h 	struct mlx5e_rq_param      rq;
rq                 61 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	struct mlx5e_rq *rq;
rq                 66 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	rq = &icosq->channel->rq;
rq                 79 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	mlx5e_deactivate_rq(rq);
rq                 93 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	mlx5e_free_rx_in_progress_descs(rq);
rq                 96 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	mlx5e_activate_rq(rq);
rq                 98 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	rq->stats->recover++;
rq                118 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
rq                120 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	struct net_device *dev = rq->netdev;
rq                123 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
rq                125 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 		netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
rq                128 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
rq                130 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 		netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
rq                141 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	struct mlx5e_rq *rq;
rq                145 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	rq = ctx;
rq                146 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	mdev = rq->mdev;
rq                147 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	dev = rq->netdev;
rq                148 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err = mlx5e_query_rq_state(mdev, rq->rqn, &state);
rq                151 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 			   rq->rqn, err);
rq                158 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	mlx5e_deactivate_rq(rq);
rq                159 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	mlx5e_free_rx_descs(rq);
rq                161 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err = mlx5e_rq_to_ready(rq, MLX5_RQC_STATE_ERR);
rq                165 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
rq                166 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	mlx5e_activate_rq(rq);
rq                167 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	rq->stats->recover++;
rq                170 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
rq                174 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
rq                176 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	struct mlx5e_priv *priv = rq->channel->priv;
rq                180 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err_ctx.ctx = rq;
rq                182 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	sprintf(err_str, "ERR CQE on RQ: 0x%x", rq->rqn);
rq                191 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	struct mlx5e_rq *rq;
rq                194 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	rq = ctx;
rq                195 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	icosq = &rq->channel->icosq;
rq                196 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	eq = rq->cq.mcq.eq;
rq                197 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err = mlx5e_health_channel_eq_recover(eq, rq->channel);
rq                204 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
rq                206 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	struct mlx5e_icosq *icosq = &rq->channel->icosq;
rq                207 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	struct mlx5e_priv *priv = rq->channel->priv;
rq                211 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err_ctx.ctx = rq;
rq                214 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 		icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn);
rq                234 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
rq                237 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	struct mlx5e_priv *priv = rq->channel->priv;
rq                247 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	icosq = &rq->channel->icosq;
rq                248 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state);
rq                256 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	wqes_sz = mlx5e_rqwq_get_cur_sz(rq);
rq                258 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 		  rq->mpwqe.wq.head : mlx5_wq_cyc_get_head(&rq->wqe.wq);
rq                264 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->channel->ix);
rq                268 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn);
rq                276 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err = devlink_fmsg_u8_pair_put(fmsg, "SW state", rq->state);
rq                292 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	err = mlx5e_reporter_cq_diagnose(&rq->cq, fmsg);
rq                317 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 	generic_rq = &priv->channels.c[0]->rq;
rq                358 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 		struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
rq                360 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c 		err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
rq                182 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
rq                184 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
rq                185 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h 		mlx5_wq_ll_reset(&rq->mpwqe.wq);
rq                186 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h 		rq->mpwqe.actual_wq_head = 0;
rq                188 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h 		mlx5_wq_cyc_reset(&rq->wqe.wq);
rq                 59 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
rq                 85 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
rq                113 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		xdpi.page.rq    = rq;
rq                121 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
rq                124 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 	struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
rq                125 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 	struct xdp_umem *umem = rq->umem;
rq                139 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 	xdp.rxq = &rq->xdp_rxq;
rq                153 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, &xdp)))
rq                155 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
rq                159 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		err = xdp_do_redirect(rq->netdev, &xdp, prog);
rq                162 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
rq                163 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
rq                165 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 			mlx5e_page_dma_unmap(rq, di);
rq                166 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		rq->stats->xdp_redirect++;
rq                173 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		trace_xdp_exception(rq->netdev, prog, act);
rq                176 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		rq->stats->xdp_drop++;
rq                375 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 			mlx5e_page_release_dynamic(xdpi.page.rq, &xdpi.page.di, recycle);
rq                531 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
rq                533 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 	struct mlx5e_xdpsq *xdpsq = rq->xdpsq;
rq                540 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 	if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) {
rq                542 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		__clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
rq                 65 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
rq                 71 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
rq                 10 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c bool mlx5e_xsk_pages_enough_umem(struct mlx5e_rq *rq, int count)
rq                 15 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	return xsk_umem_has_addrs_rq(rq->umem, count);
rq                 18 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
rq                 21 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	struct xdp_umem *umem = rq->umem;
rq                 28 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 						      rq->buff.umem_headroom);
rq                 40 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	dma_sync_single_for_device(rq->pdev, dma_info->addr, PAGE_SIZE,
rq                 46 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c static inline void mlx5e_xsk_recycle_frame(struct mlx5e_rq *rq, u64 handle)
rq                 48 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	xsk_umem_fq_reuse(rq->umem, handle & rq->umem->chunk_mask);
rq                 55 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c void mlx5e_xsk_page_release(struct mlx5e_rq *rq,
rq                 58 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	mlx5e_xsk_recycle_frame(rq, dma_info->xsk.handle);
rq                 66 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	struct mlx5e_rq *rq = container_of(zca, struct mlx5e_rq, zca);
rq                 68 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	mlx5e_xsk_recycle_frame(rq, handle);
rq                 71 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data,
rq                 76 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	skb = napi_alloc_skb(rq->cq.napi, cqe_bcnt);
rq                 78 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 		rq->stats->buff_alloc_err++;
rq                 87 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
rq                 94 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	u16 rx_headroom = rq->buff.headroom - rq->buff.umem_headroom;
rq                101 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	if (unlikely(cqe_bcnt > rq->hw_mtu)) {
rq                102 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 		rq->stats->oversize_pkts_sw_drop++;
rq                115 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	frag_size      = rq->buff.headroom + cqe_bcnt32;
rq                117 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	dma_sync_single_for_cpu(rq->pdev, di->addr, frag_size, DMA_BIDIRECTIONAL);
rq                121 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32, true);
rq                140 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 		if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
rq                148 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	return mlx5e_xsk_construct_skb(rq, data, cqe_bcnt32);
rq                151 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
rq                157 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	u16 rx_headroom = rq->buff.headroom - rq->buff.umem_headroom;
rq                171 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	frag_size      = rq->buff.headroom + cqe_bcnt;
rq                173 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	dma_sync_single_for_cpu(rq->pdev, di->addr, frag_size, DMA_BIDIRECTIONAL);
rq                177 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 		rq->stats->wqe_err++;
rq                182 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, true);
rq                192 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	return mlx5e_xsk_construct_skb(rq, data, cqe_bcnt);
rq                 12 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h bool mlx5e_xsk_pages_enough_umem(struct mlx5e_rq *rq, int count);
rq                 13 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
rq                 15 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h void mlx5e_xsk_page_release(struct mlx5e_rq *rq,
rq                 18 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
rq                 23 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
rq                 28 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err)
rq                 30 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h 	if (!xsk_umem_uses_need_wakeup(rq->umem))
rq                 34 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h 		xsk_set_rx_need_wakeup(rq->umem);
rq                 36 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h 		xsk_clear_rx_need_wakeup(rq->umem);
rq                 56 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c 	mlx5e_build_rq_param(priv, params, xsk, &cparam->rq);
rq                 85 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c 	err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq);
rq                 46 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
rq                540 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c 		priv->channel_stats[arfs_rule->rxq].rq.arfs_err++;
rq                 47 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c 	struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
rq                 51 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c 	mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq);
rq                530 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 		mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
rq               1860 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 			__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
rq               1862 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c 			__clear_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
rq                232 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
rq                243 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	cseg->imm       = rq->mkey_be;
rq                251 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
rq                254 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
rq                256 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
rq                257 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 						  sizeof(*rq->mpwqe.info)),
rq                259 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!rq->mpwqe.info)
rq                262 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
rq                301 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
rq                303 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
rq                305 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
rq                308 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
rq                313 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
rq                319 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	next_frag.di = &rq->wqe.di[0];
rq                321 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
rq                322 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
rq                324 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			&rq->wqe.frags[i << rq->wqe.info.log_num_frags];
rq                327 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
rq                346 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_init_di_list(struct mlx5e_rq *rq,
rq                349 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	int len = wq_sz << rq->wqe.info.log_num_frags;
rq                351 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)),
rq                353 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (!rq->wqe.di)
rq                356 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_init_frags_partition(rq);
rq                361 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_free_di_list(struct mlx5e_rq *rq)
rq                363 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	kvfree(rq->wqe.di);
rq                368 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work);
rq                370 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_reporter_rq_cqe_err(rq);
rq                378 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			  struct mlx5e_rq *rq)
rq                393 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->wq_type = params->rq_wq_type;
rq                394 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->pdev    = c->pdev;
rq                395 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->netdev  = c->netdev;
rq                396 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->tstamp  = c->tstamp;
rq                397 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->clock   = &mdev->clock;
rq                398 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->channel = c;
rq                399 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->ix      = c->ix;
rq                400 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->mdev    = mdev;
rq                401 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->hw_mtu  = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq                402 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->xdpsq   = &c->rq_xdpsq;
rq                403 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->umem    = umem;
rq                405 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (rq->umem)
rq                406 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->stats = &c->priv->channel_stats[c->ix].xskrq;
rq                408 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->stats = &c->priv->channel_stats[c->ix].rq;
rq                409 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
rq                411 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
rq                412 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (IS_ERR(rq->xdp_prog)) {
rq                413 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = PTR_ERR(rq->xdp_prog);
rq                414 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->xdp_prog = NULL;
rq                418 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq_xdp_ix = rq->ix;
rq                421 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix);
rq                425 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq                426 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
rq                427 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->buff.umem_headroom = xsk ? xsk->headroom : 0;
rq                430 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	switch (rq->wq_type) {
rq                432 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
rq                433 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 					&rq->wq_ctrl);
rq                437 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
rq                439 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
rq                448 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->post_wqes = mlx5e_post_rx_mpwqes;
rq                449 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq                451 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
rq                459 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (!rq->handle_rx_cqe) {
rq                465 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
rq                471 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq                472 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->mpwqe.num_strides =
rq                475 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5e_create_rq_umr_mkey(mdev, rq);
rq                478 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
rq                480 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5e_rq_alloc_mpwqe_info(rq, c);
rq                485 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
rq                486 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 					 &rq->wq_ctrl);
rq                490 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
rq                492 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
rq                495 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			num_xsk_frames = wq_sz << rq->wqe.info.log_num_frags;
rq                497 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->wqe.info = rqp->frags_info;
rq                498 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->wqe.frags =
rq                499 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
rq                500 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 					(wq_sz << rq->wqe.info.log_num_frags)),
rq                502 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (!rq->wqe.frags) {
rq                507 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
rq                511 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->post_wqes = mlx5e_post_rx_wqes;
rq                512 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
rq                516 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
rq                519 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
rq                520 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (!rq->handle_rx_cqe) {
rq                526 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->wqe.skb_from_cqe = xsk ?
rq                531 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->mkey_be = c->mkey_be;
rq                542 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->zca.free = mlx5e_xsk_zca_free;
rq                543 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
rq                545 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 						 &rq->zca);
rq                553 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		pp_params.dma_dir   = rq->buff.map_dir;
rq                560 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->page_pool = page_pool_create(&pp_params);
rq                561 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (IS_ERR(rq->page_pool)) {
rq                562 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			err = PTR_ERR(rq->page_pool);
rq                563 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			rq->page_pool = NULL;
rq                566 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
rq                567 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 						 MEM_TYPE_PAGE_POOL, rq->page_pool);
rq                573 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
rq                575 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 				mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
rq                577 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 				rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
rq                578 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
rq                580 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
rq                582 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			wqe->data[0].lkey = rq->mkey_be;
rq                585 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 				mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
rq                588 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			for (f = 0; f < rq->wqe.info.num_frags; f++) {
rq                589 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 				u32 frag_size = rq->wqe.info.arr[f].frag_size |
rq                593 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 				wqe->data[f].lkey = rq->mkey_be;
rq                596 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
rq                604 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
rq                608 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
rq                612 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
rq                615 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->page_cache.head = 0;
rq                616 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->page_cache.tail = 0;
rq                621 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	switch (rq->wq_type) {
rq                623 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		kvfree(rq->mpwqe.info);
rq                624 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
rq                627 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		kvfree(rq->wqe.frags);
rq                628 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_free_di_list(rq);
rq                632 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (rq->xdp_prog)
rq                633 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		bpf_prog_put(rq->xdp_prog);
rq                634 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	xdp_rxq_info_unreg(&rq->xdp_rxq);
rq                635 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	page_pool_destroy(rq->page_pool);
rq                636 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_wq_destroy(&rq->wq_ctrl);
rq                641 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_free_rq(struct mlx5e_rq *rq)
rq                645 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (rq->xdp_prog)
rq                646 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		bpf_prog_put(rq->xdp_prog);
rq                648 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	switch (rq->wq_type) {
rq                650 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		kvfree(rq->mpwqe.info);
rq                651 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
rq                654 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		kvfree(rq->wqe.frags);
rq                655 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_free_di_list(rq);
rq                658 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	for (i = rq->page_cache.head; i != rq->page_cache.tail;
rq                660 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
rq                666 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_page_release_dynamic(rq, dma_info, false);
rq                669 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	xdp_rxq_info_unreg(&rq->xdp_rxq);
rq                670 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	page_pool_destroy(rq->page_pool);
rq                671 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_wq_destroy(&rq->wq_ctrl);
rq                674 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_create_rq(struct mlx5e_rq *rq,
rq                677 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = rq->mdev;
rq                686 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		sizeof(u64) * rq->wq_ctrl.buf.npages;
rq                696 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	MLX5_SET(rqc,  rqc, cqn,		rq->cq.mcq.cqn);
rq                698 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	MLX5_SET(wq,   wq,  log_wq_pg_sz,	rq->wq_ctrl.buf.page_shift -
rq                700 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	MLX5_SET64(wq, wq,  dbr_addr,		rq->wq_ctrl.db.dma);
rq                702 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
rq                705 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
rq                712 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
rq                714 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5_core_dev *mdev = rq->mdev;
rq                727 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_rqwq_reset(rq);
rq                734 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
rq                741 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
rq                743 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5e_channel *c = rq->channel;
rq                765 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
rq                772 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
rq                774 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5e_channel *c = rq->channel;
rq                794 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
rq                801 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
rq                803 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5_core_destroy_rq(rq->mdev, rq->rqn);
rq                806 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
rq                809 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	struct mlx5e_channel *c = rq->channel;
rq                811 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
rq                814 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
rq                821 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		    c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
rq                823 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_reporter_rx_timeout(rq);
rq                827 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
rq                833 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
rq                836 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	wq = &rq->mpwqe.wq;
rq                840 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
rq                841 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		rq->dealloc_wqe(rq, head);
rq                845 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->mpwqe.actual_wq_head = wq->head;
rq                846 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->mpwqe.umr_in_progress = 0;
rq                847 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->mpwqe.umr_completed = 0;
rq                850 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
rq                855 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
rq                856 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
rq                858 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		mlx5e_free_rx_in_progress_descs(rq);
rq                866 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			rq->dealloc_wqe(rq, wqe_ix);
rq                871 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		struct mlx5_wq_cyc *wq = &rq->wqe.wq;
rq                875 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			rq->dealloc_wqe(rq, wqe_ix);
rq                884 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		  struct xdp_umem *umem, struct mlx5e_rq *rq)
rq                888 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_alloc_rq(c, params, xsk, umem, param, rq);
rq                892 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_create_rq(rq, param);
rq                896 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
rq                901 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state);
rq                904 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
rq                911 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
rq                916 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_destroy_rq(rq);
rq                918 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_free_rq(rq);
rq                923 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_activate_rq(struct mlx5e_rq *rq)
rq                925 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
rq                926 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_trigger_irq(&rq->channel->icosq);
rq                929 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
rq                931 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
rq                932 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
rq                935 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_close_rq(struct mlx5e_rq *rq)
rq                937 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	cancel_work_sync(&rq->dim.work);
rq                938 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	cancel_work_sync(&rq->channel->icosq.recover_work);
rq                939 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	cancel_work_sync(&rq->recover_work);
rq                940 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_destroy_rq(rq);
rq                941 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_free_rx_descs(rq);
rq                942 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_free_rq(rq);
rq               1867 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
rq               1894 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5e_open_rq(c, params, &cparam->rq, NULL, NULL, &c->rq);
rq               1905 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_close_rq(&c->rq);
rq               1924 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_close_cq(&c->rq.cq);
rq               1941 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_close_rq(&c->rq);
rq               1949 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_close_cq(&c->rq.cq);
rq               2039 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_activate_rq(&c->rq);
rq               2053 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_deactivate_rq(&c->rq);
rq               2334 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	mlx5e_build_rq_param(priv, params, NULL, &cparam->rq);
rq               2336 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
rq               2405 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout);
rq               2544 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			rqn = rrp.rss.channels->c[ix]->rq.rqn;
rq               2586 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	return rrp.rss.channels->c[ix]->rq.rqn;
rq               3118 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 			       struct mlx5e_rq *rq,
rq               3127 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	err = mlx5_wq_cyc_create(mdev, &param->wq, rqc_wq, &rq->wqe.wq,
rq               3128 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 				 &rq->wq_ctrl);
rq               3133 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	xdp_rxq_info_unused(&rq->xdp_rxq);
rq               3135 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 	rq->mdev = mdev;
rq               3438 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
rq               3452 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
rq               3561 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
rq               4479 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
rq               4485 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		old_prog = xchg(&c->rq.xdp_prog, prog);
rq               4495 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
rq                192 drivers/net/ethernet/mellanox/mlx5/core/en_rep.h void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
rq                 66 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
rq                 70 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
rq                 76 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	rq->stats->cqe_compress_blks++;
rq                111 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
rq                115 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
rq                125 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
rq                129 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
rq                132 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
rq                136 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
rq                138 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_decompress_cqe(rq, wq, cqcc);
rq                143 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
rq                148 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
rq                160 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
rq                161 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		rq->handle_rx_cqe(rq, &cqd->title);
rq                166 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	rq->stats->cqe_compress_pkts += cqe_count;
rq                171 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
rq                175 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
rq                178 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_read_title_slot(rq, wq, cc);
rq                180 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_decompress_cqe(rq, wq, cc);
rq                181 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	rq->handle_rx_cqe(rq, &cqd->title);
rq                184 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
rq                192 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
rq                195 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_page_cache *cache = &rq->page_cache;
rq                197 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_rq_stats *stats = rq->stats;
rq                214 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
rq                217 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_page_cache *cache = &rq->page_cache;
rq                218 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_rq_stats *stats = rq->stats;
rq                234 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	dma_sync_single_for_device(rq->pdev, dma_info->addr,
rq                240 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
rq                243 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (mlx5e_rx_cache_get(rq, dma_info))
rq                246 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	dma_info->page = page_pool_dev_alloc_pages(rq->page_pool);
rq                250 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
rq                251 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				      PAGE_SIZE, rq->buff.map_dir);
rq                252 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
rq                253 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		page_pool_recycle_direct(rq->page_pool, dma_info->page);
rq                261 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
rq                264 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (rq->umem)
rq                265 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		return mlx5e_xsk_page_alloc_umem(rq, dma_info);
rq                267 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		return mlx5e_page_alloc_pool(rq, dma_info);
rq                270 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
rq                272 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
rq                275 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
rq                280 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		if (mlx5e_rx_cache_put(rq, dma_info))
rq                283 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_page_dma_unmap(rq, dma_info);
rq                284 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		page_pool_recycle_direct(rq->page_pool, dma_info->page);
rq                286 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_page_dma_unmap(rq, dma_info);
rq                287 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		page_pool_release_page(rq->page_pool, dma_info->page);
rq                292 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_page_release(struct mlx5e_rq *rq,
rq                296 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (rq->umem)
rq                301 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_xsk_page_release(rq, dma_info);
rq                303 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_page_release_dynamic(rq, dma_info, recycle);
rq                306 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
rq                317 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		err = mlx5e_page_alloc(rq, frag->di);
rq                322 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
rq                327 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_page_release(rq, frag->di, recycle);
rq                330 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
rq                332 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
rq                335 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
rq                338 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
rq                342 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
rq                343 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		err = mlx5e_get_rx_frag(rq, frag);
rq                348 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 						frag->offset + rq->buff.headroom);
rq                355 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_put_rx_frag(rq, --frag, true);
rq                360 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
rq                366 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
rq                367 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_put_rx_frag(rq, wi, recycle);
rq                370 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
rq                372 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
rq                374 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_free_rx_wqe(rq, wi, false);
rq                377 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
rq                379 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
rq                383 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (rq->umem) {
rq                384 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
rq                386 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		if (unlikely(!mlx5e_xsk_pages_enough_umem(rq, pages_desired)))
rq                393 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i);
rq                402 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_dealloc_rx_wqe(rq, ix + i);
rq                408 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
rq                412 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	dma_sync_single_for_cpu(rq->pdev,
rq                435 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
rq                450 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			mlx5e_page_release(rq, &dma_info[i], recycle);
rq                453 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
rq                455 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
rq                485 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
rq                487 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
rq                489 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_icosq *sq = &rq->channel->icosq;
rq                497 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (rq->umem &&
rq                498 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	    unlikely(!mlx5e_xsk_pages_enough_umem(rq, MLX5_MPWRQ_PAGES_PER_WQE))) {
rq                511 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
rq                514 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		err = mlx5e_page_alloc(rq, dma_info);
rq                530 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	sq->db.ico_wqe[pi].umr.rq = rq;
rq                540 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_page_release(rq, dma_info, true);
rq                544 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	rq->stats->buff_alloc_err++;
rq                549 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
rq                551 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
rq                553 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_free_rx_mpwqe(rq, wi, false);
rq                556 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
rq                558 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
rq                562 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
rq                565 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	wqe_bulk = rq->wqe.info.wqe_bulk;
rq                573 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
rq                575 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			rq->stats->buff_alloc_err++;
rq                636 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				wi->umr.rq->mpwqe.umr_completed++;
rq                653 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
rq                655 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_icosq *sq = &rq->channel->icosq;
rq                656 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
rq                657 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	u8  umr_completed = rq->mpwqe.umr_completed;
rq                662 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
rq                666 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_post_rx_mpwqe(rq, umr_completed);
rq                667 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		rq->mpwqe.umr_in_progress -= umr_completed;
rq                668 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		rq->mpwqe.umr_completed = 0;
rq                671 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
rq                673 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
rq                674 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		rq->stats->congst_umr++;
rq                680 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	head = rq->mpwqe.actual_wq_head;
rq                683 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		alloc_err = mlx5e_alloc_rx_mpwqe(rq, head);
rq                690 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	rq->mpwqe.umr_last_bulk    = missing - i;
rq                696 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
rq                697 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	rq->mpwqe.actual_wq_head   = head;
rq                705 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(alloc_err == -ENOMEM && rq->umem))
rq                806 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
rq                820 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	rq->stats->ecn_mark += !!rc;
rq                908 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				     struct mlx5e_rq *rq,
rq                912 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_rq_stats *stats = rq->stats;
rq                926 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
rq                948 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
rq                978 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				      struct mlx5e_rq *rq,
rq                982 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_rq_stats *stats = rq->stats;
rq                983 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct net_device *netdev = rq->netdev;
rq               1002 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
rq               1004 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
rq               1006 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb_record_rx_queue(skb, rq->ix);
rq               1019 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
rq               1022 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_enable_ecn(rq, skb);
rq               1027 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
rq               1032 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_rq_stats *stats = rq->stats;
rq               1036 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
rq               1040 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
rq               1047 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		rq->stats->buff_alloc_err++;
rq               1058 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
rq               1062 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	u16 rx_headroom = rq->buff.headroom;
rq               1072 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
rq               1078 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, false);
rq               1083 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
rq               1094 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
rq               1097 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
rq               1107 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = napi_alloc_skb(rq->cq.napi,
rq               1110 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		rq->stats->buff_alloc_err++;
rq               1120 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen,
rq               1129 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen);
rq               1137 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
rq               1142 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	    !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state))
rq               1143 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		queue_work(rq->channel->priv->wq, &rq->recover_work);
rq               1146 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
rq               1148 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
rq               1155 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	wi       = get_frag(rq, ci);
rq               1159 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		trigger_report(rq, cqe);
rq               1160 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		rq->stats->wqe_err++;
rq               1164 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
rq               1167 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			      rq, cqe, wi, cqe_bcnt);
rq               1170 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
rq               1179 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
rq               1180 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	napi_gro_receive(rq->cq.napi, skb);
rq               1183 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_free_rx_wqe(rq, wi, true);
rq               1189 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
rq               1191 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct net_device *netdev = rq->netdev;
rq               1195 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
rq               1202 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	wi       = get_frag(rq, ci);
rq               1206 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		rq->stats->wqe_err++;
rq               1210 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
rq               1213 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
rq               1222 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
rq               1227 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	napi_gro_receive(rq->cq.napi, skb);
rq               1230 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_free_rx_wqe(rq, wi, true);
rq               1237 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
rq               1247 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = napi_alloc_skb(rq->cq.napi,
rq               1250 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		rq->stats->buff_alloc_err++;
rq               1265 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
rq               1267 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_add_skb_frag(rq, skb, di, frag_offset,
rq               1274 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen);
rq               1283 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
rq               1287 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	u16 rx_headroom = rq->buff.headroom;
rq               1295 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(cqe_bcnt > rq->hw_mtu)) {
rq               1296 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		rq->stats->oversize_pkts_sw_drop++;
rq               1304 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
rq               1310 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32, false);
rq               1313 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
rq               1318 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
rq               1328 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
rq               1332 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
rq               1334 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
rq               1345 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		trigger_report(rq, cqe);
rq               1346 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		rq->stats->wqe_err++;
rq               1351 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		struct mlx5e_rq_stats *stats = rq->stats;
rq               1360 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
rq               1363 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			      rq, wi, cqe_bcnt, head_offset, page_idx);
rq               1367 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
rq               1368 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	napi_gro_receive(rq->cq.napi, skb);
rq               1371 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
rq               1374 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	wq  = &rq->mpwqe.wq;
rq               1376 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_free_rx_mpwqe(rq, wi, true);
rq               1382 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
rq               1387 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
rq               1390 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (rq->cqd.left) {
rq               1391 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
rq               1392 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		if (rq->cqd.left || work_done >= budget)
rq               1406 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				mlx5e_decompress_cqes_start(rq, cqwq,
rq               1413 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
rq               1414 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				mlx5e_handle_rx_cqe, rq, cqe);
rq               1418 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (rq->xdp_prog)
rq               1419 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_xdp_rx_poll_complete(rq);
rq               1434 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
rq               1449 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
rq               1463 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	stats = &priv->channel_stats[rq->ix].rq;
rq               1493 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
rq               1495 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb_record_rx_queue(skb, rq->ix);
rq               1512 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
rq               1514 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
rq               1521 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	wi       = get_frag(rq, ci);
rq               1525 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		rq->stats->wqe_err++;
rq               1529 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
rq               1532 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			      rq, cqe, wi, cqe_bcnt);
rq               1536 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
rq               1541 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	napi_gro_receive(rq->cq.napi, skb);
rq               1544 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_free_rx_wqe(rq, wi, true);
rq               1552 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
rq               1554 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
rq               1561 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	wi       = get_frag(rq, ci);
rq               1565 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		rq->stats->wqe_err++;
rq               1569 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
rq               1572 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			      rq, cqe, wi, cqe_bcnt);
rq               1576 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
rq               1580 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
rq               1581 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	napi_gro_receive(rq->cq.napi, skb);
rq               1584 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_free_rx_wqe(rq, wi, true);
rq                186 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 		struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
rq               1637 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
rq                 62 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
rq                 64 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c 	struct mlx5e_rq_stats *stats = rq->stats;
rq                 67 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c 	if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
rq                 70 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c 	dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
rq                 71 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c 	net_dim(&rq->dim, dim_sample);
rq                116 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c 	struct mlx5e_rq *rq = &c->rq;
rq                139 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c 			work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
rq                146 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c 	busy |= rq->post_wqes(rq);
rq                178 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c 	mlx5e_handle_rx_dim(rq);
rq                180 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c 	mlx5e_cq_arm(&rq->cq);
rq                110 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) {
rq                115 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	ix = conn->qp.rq.pc & (conn->qp.rq.size - 1);
rq                116 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix);
rq                121 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	conn->qp.rq.pc++;
rq                122 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	conn->qp.rq.bufs[ix] = buf;
rq                126 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	*conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff);
rq                256 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
rq                257 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	buf = conn->qp.rq.bufs[ix];
rq                258 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	conn->qp.rq.bufs[ix] = NULL;
rq                259 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	conn->qp.rq.cc++;
rq                542 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	conn->qp.rq.pc = 0;
rq                543 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	conn->qp.rq.cc = 0;
rq                544 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	conn->qp.rq.size = roundup_pow_of_two(rx_size);
rq                550 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(conn->qp.rq.size));
rq                556 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	conn->qp.rq.bufs = kvcalloc(conn->qp.rq.size,
rq                557 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 				    sizeof(conn->qp.rq.bufs[0]),
rq                559 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	if (!conn->qp.rq.bufs) {
rq                591 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	MLX5_SET(qpc, qpc, log_rq_size, ilog2(conn->qp.rq.size));
rq                615 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	kvfree(conn->qp.rq.bufs);
rq                627 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	for (ix = 0; ix < conn->qp.rq.size; ix++) {
rq                628 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		if (!conn->qp.rq.bufs[ix])
rq                630 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		mlx5_fpga_conn_unmap_buf(conn, conn->qp.rq.bufs[ix]);
rq                631 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		kfree(conn->qp.rq.bufs[ix]);
rq                632 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		conn->qp.rq.bufs[ix] = NULL;
rq                665 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	kvfree(conn->qp.rq.bufs);
rq                 82 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h 		} rq;
rq                128 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c 		rq_stats = &channel_stats->rq;
rq                126 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
rq                222 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 				struct ptp_clock_request *rq,
rq                240 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
rq                247 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
rq                248 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
rq                249 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	    (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
rq                252 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	if (rq->extts.index >= clock->ptp_info.n_pins)
rq                256 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
rq                260 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
rq                265 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		pin = rq->extts.index;
rq                284 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 				 struct ptp_clock_request *rq,
rq                307 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	if (rq->perout.flags)
rq                310 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	if (rq->perout.index >= clock->ptp_info.n_pins)
rq                315 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 				   rq->perout.index);
rq                321 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		ts.tv_sec = rq->perout.period.sec;
rq                322 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		ts.tv_nsec = rq->perout.period.nsec;
rq                328 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		ts.tv_sec = rq->perout.start.sec;
rq                329 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		ts.tv_nsec = rq->perout.start.nsec;
rq                344 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		pin = rq->perout.index;
rq                364 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 			      struct ptp_clock_request *rq,
rq                375 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 			   struct ptp_clock_request *rq,
rq                378 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 	switch (rq->type) {
rq                380 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		return mlx5_extts_configure(ptp, rq, on);
rq                382 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		return mlx5_perout_configure(ptp, rq, on);
rq                384 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 		return mlx5_pps_configure(ptp, rq, on);
rq                607 drivers/net/ethernet/mellanox/mlx5/core/qp.c 				struct mlx5_core_qp *rq)
rq                616 drivers/net/ethernet/mellanox/mlx5/core/qp.c 	rq->uid = MLX5_GET(create_rq_in, in, uid);
rq                617 drivers/net/ethernet/mellanox/mlx5/core/qp.c 	rq->qpn = rqn;
rq                618 drivers/net/ethernet/mellanox/mlx5/core/qp.c 	err = create_resource_common(dev, rq, MLX5_RES_RQ);
rq                625 drivers/net/ethernet/mellanox/mlx5/core/qp.c 	destroy_rq_tracked(dev, rq->qpn, rq->uid);
rq                632 drivers/net/ethernet/mellanox/mlx5/core/qp.c 				  struct mlx5_core_qp *rq)
rq                634 drivers/net/ethernet/mellanox/mlx5/core/qp.c 	destroy_resource_common(dev, rq);
rq                635 drivers/net/ethernet/mellanox/mlx5/core/qp.c 	destroy_rq_tracked(dev, rq->qpn, rq->uid);
rq                126 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dr_qp->rq.pc = 0;
rq                127 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dr_qp->rq.cc = 0;
rq                128 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	dr_qp->rq.wqe_cnt = 4;
rq                134 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
rq                173 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
rq                988 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h 	} rq;
rq                135 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	mlx5_init_fbc(wq_ctrl->buf.frags, log_rq_stride, log_rq_sz, &wq->rq.fbc);
rq                153 drivers/net/ethernet/mellanox/mlx5/core/wq.c 	wq->rq.db  = &wq_ctrl->db.db[MLX5_RCV_DBR];
rq                 60 drivers/net/ethernet/mellanox/mlx5/core/wq.h 	struct mlx5_wq_cyc	rq;
rq                638 drivers/net/ethernet/natsemi/natsemi.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               3076 drivers/net/ethernet/natsemi/natsemi.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               3078 drivers/net/ethernet/natsemi/natsemi.c 	struct mii_ioctl_data *data = if_mii(rq);
rq               6616 drivers/net/ethernet/neterion/s2io.c static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               3254 drivers/net/ethernet/neterion/vxge/vxge-main.c static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               3260 drivers/net/ethernet/neterion/vxge/vxge-main.c 		return vxge_hwtstamp_set(vdev, rq->ifr_data);
rq               3262 drivers/net/ethernet/neterion/vxge/vxge-main.c 		return vxge_hwtstamp_get(vdev, rq->ifr_data);
rq                549 drivers/net/ethernet/packetengines/hamachi.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               1859 drivers/net/ethernet/packetengines/hamachi.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1862 drivers/net/ethernet/packetengines/hamachi.c 	struct mii_ioctl_data *data = if_mii(rq);
rq               1869 drivers/net/ethernet/packetengines/hamachi.c 		u32 *d = (u32 *)&rq->ifr_ifru;
rq                344 drivers/net/ethernet/packetengines/yellowfin.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               1337 drivers/net/ethernet/packetengines/yellowfin.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1341 drivers/net/ethernet/packetengines/yellowfin.c 	struct mii_ioctl_data *data = if_mii(rq);
rq                145 drivers/net/ethernet/qlogic/qede/qede_ptp.c 					     struct ptp_clock_request *rq,
rq                501 drivers/net/ethernet/rdc/r6040.c static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                506 drivers/net/ethernet/rdc/r6040.c 	return phy_mii_ioctl(dev->phydev, rq, cmd);
rq               1604 drivers/net/ethernet/realtek/8139cp.c static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
rq               1614 drivers/net/ethernet/realtek/8139cp.c 	rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
rq                656 drivers/net/ethernet/realtek/8139too.c static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
rq               2500 drivers/net/ethernet/realtek/8139too.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               2509 drivers/net/ethernet/realtek/8139too.c 	rc = generic_mii_ioctl(&tp->mii, if_mii(rq), cmd, NULL);
rq               2651 drivers/net/ethernet/renesas/sh_eth.c static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
rq               2661 drivers/net/ethernet/renesas/sh_eth.c 	return phy_mii_ioctl(phydev, rq, cmd);
rq               1931 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1944 drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c 		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
rq                113 drivers/net/ethernet/sgi/ioc3-eth.c static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               1611 drivers/net/ethernet/sgi/ioc3-eth.c static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1617 drivers/net/ethernet/sgi/ioc3-eth.c 	rc = generic_mii_ioctl(&ip->mii, if_mii(rq), cmd, NULL);
rq                765 drivers/net/ethernet/sgi/meth.c static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                234 drivers/net/ethernet/sis/sis900.c static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd);
rq               2214 drivers/net/ethernet/sis/sis900.c static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
rq               2217 drivers/net/ethernet/sis/sis900.c 	struct mii_ioctl_data *data = if_mii(rq);
rq                301 drivers/net/ethernet/smsc/epic100.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               1470 drivers/net/ethernet/smsc/epic100.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1474 drivers/net/ethernet/smsc/epic100.c 	struct mii_ioctl_data *data = if_mii(rq);
rq                273 drivers/net/ethernet/smsc/smc91c92_cs.c static int smc_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               1994 drivers/net/ethernet/smsc/smc91c92_cs.c static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
rq               1997 drivers/net/ethernet/smsc/smc91c92_cs.c 	struct mii_ioctl_data *mii = if_mii(rq);
rq               3930 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               3942 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
rq               3945 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		ret = stmmac_hwtstamp_set(dev, rq);
rq               3948 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		ret = stmmac_hwtstamp_get(dev, rq);
rq                133 drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c 			 struct ptp_clock_request *rq, int on)
rq                141 drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c 	switch (rq->type) {
rq                144 drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c 		if (rq->perout.flags)
rq                147 drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c 		cfg = &priv->pps[rq->perout.index];
rq                149 drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c 		cfg->start.tv_sec = rq->perout.start.sec;
rq                150 drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c 		cfg->start.tv_nsec = rq->perout.start.nsec;
rq                151 drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c 		cfg->period.tv_sec = rq->perout.period.sec;
rq                152 drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c 		cfg->period.tv_nsec = rq->perout.period.nsec;
rq                156 drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c 					     rq->perout.index, cfg, on,
rq                288 drivers/net/ethernet/ti/cpts.c 			   struct ptp_clock_request *rq, int on)
rq                161 drivers/net/ethernet/ti/tlan.c static int	tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq                958 drivers/net/ethernet/ti/tlan.c static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                961 drivers/net/ethernet/ti/tlan.c 	struct mii_ioctl_data *data = if_mii(rq);
rq                487 drivers/net/ethernet/toshiba/tc35815.c static int	tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               2012 drivers/net/ethernet/toshiba/tc35815.c static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               2018 drivers/net/ethernet/toshiba/tc35815.c 	return phy_mii_ioctl(dev->phydev, rq, cmd);
rq               1521 drivers/net/ethernet/tundra/tsi108_eth.c static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1526 drivers/net/ethernet/tundra/tsi108_eth.c 	return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
rq                518 drivers/net/ethernet/via/via-rhine.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               2400 drivers/net/ethernet/via/via-rhine.c static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               2409 drivers/net/ethernet/via/via-rhine.c 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
rq               2408 drivers/net/ethernet/via/via-velocity.c static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               2423 drivers/net/ethernet/via/via-velocity.c 		ret = velocity_mii_ioctl(dev, rq, cmd);
rq               1211 drivers/net/ethernet/xilinx/ll_temac_main.c static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
rq               1219 drivers/net/ethernet/xilinx/ll_temac_main.c 	return phy_mii_ioctl(ndev->phydev, rq, cmd);
rq               1248 drivers/net/ethernet/xilinx/xilinx_emaclite.c static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1257 drivers/net/ethernet/xilinx/xilinx_emaclite.c 		return phy_mii_ioctl(dev->phydev, rq, cmd);
rq                298 drivers/net/ethernet/xircom/xirc2ps_cs.c static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               1421 drivers/net/ethernet/xircom/xirc2ps_cs.c do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1425 drivers/net/ethernet/xircom/xirc2ps_cs.c     struct mii_ioctl_data *data = if_mii(rq);
rq               1428 drivers/net/ethernet/xircom/xirc2ps_cs.c 	  dev->name, rq->ifr_ifrn.ifrn_name, cmd,
rq                106 drivers/net/fddi/skfp/skfddi.c static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq                957 drivers/net/fddi/skfp/skfddi.c static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                964 drivers/net/fddi/skfp/skfddi.c 	if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
rq               1564 drivers/net/hippi/rrunner.c static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1599 drivers/net/hippi/rrunner.c 		error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES);
rq               1611 drivers/net/hippi/rrunner.c 		image = memdup_user(rq->ifr_data, EEPROM_BYTES);
rq               1654 drivers/net/hippi/rrunner.c 		return put_user(0x52523032, (int __user *)rq->ifr_data);
rq                838 drivers/net/hippi/rrunner.h static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq                 43 drivers/net/ifb.c 	struct sk_buff_head     rq;
rq                 73 drivers/net/ifb.c 		skb_queue_splice_tail_init(&txp->rq, &txp->tq);
rq                108 drivers/net/ifb.c 		skb = skb_peek(&txp->rq);
rq                169 drivers/net/ifb.c 		__skb_queue_head_init(&txp->rq);
rq                203 drivers/net/ifb.c 		__skb_queue_purge(&txp->rq);
rq                252 drivers/net/ifb.c 	if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
rq                255 drivers/net/ifb.c 	__skb_queue_tail(&txp->rq, skb);
rq                462 drivers/net/phy/dp83640.c 			      struct ptp_clock_request *rq, int on)
rq                470 drivers/net/phy/dp83640.c 	switch (rq->type) {
rq                473 drivers/net/phy/dp83640.c 		if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
rq                480 drivers/net/phy/dp83640.c 		if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
rq                481 drivers/net/phy/dp83640.c 		    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
rq                482 drivers/net/phy/dp83640.c 		    (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
rq                485 drivers/net/phy/dp83640.c 		index = rq->extts.index;
rq                496 drivers/net/phy/dp83640.c 			if (rq->extts.flags & PTP_FALLING_EDGE)
rq                508 drivers/net/phy/dp83640.c 		if (rq->perout.flags)
rq                510 drivers/net/phy/dp83640.c 		if (rq->perout.index >= N_PER_OUT)
rq                512 drivers/net/phy/dp83640.c 		return periodic_output(clock, rq, on, rq->perout.index);
rq               1208 drivers/net/plip/plip.c plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1211 drivers/net/plip/plip.c 	struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
rq                 82 drivers/net/ppp/ppp_generic.c 	struct sk_buff_head rq;		/* receive queue for pppd */
rq                439 drivers/net/ppp/ppp_generic.c 		skb = skb_dequeue(&pf->rq);
rq                538 drivers/net/ppp/ppp_generic.c 	if (skb_peek(&pf->rq))
rq               1599 drivers/net/ppp/ppp_generic.c 		if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
rq               1601 drivers/net/ppp/ppp_generic.c 		skb_queue_tail(&ppp->file.rq, skb);
rq               2028 drivers/net/ppp/ppp_generic.c 		skb_queue_tail(&pch->file.rq, skb);
rq               2030 drivers/net/ppp/ppp_generic.c 		while (pch->file.rq.qlen > PPP_MAX_RQLEN &&
rq               2031 drivers/net/ppp/ppp_generic.c 		       (skb = skb_dequeue(&pch->file.rq)))
rq               2181 drivers/net/ppp/ppp_generic.c 		skb_queue_tail(&ppp->file.rq, skb);
rq               2183 drivers/net/ppp/ppp_generic.c 		while (ppp->file.rq.qlen > PPP_MAX_RQLEN &&
rq               2184 drivers/net/ppp/ppp_generic.c 		       (skb = skb_dequeue(&ppp->file.rq)))
rq               3082 drivers/net/ppp/ppp_generic.c 	skb_queue_head_init(&pf->rq);
rq               3110 drivers/net/ppp/ppp_generic.c 	skb_queue_purge(&ppp->file.rq);
rq               3264 drivers/net/ppp/ppp_generic.c 	skb_queue_purge(&pch->file.rq);
rq                111 drivers/net/slip/slip.c static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               1189 drivers/net/slip/slip.c static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1192 drivers/net/slip/slip.c 	unsigned long *p = (unsigned long *)&rq->ifr_ifru;
rq                107 drivers/net/usb/asix_devices.c static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
rq                111 drivers/net/usb/asix_devices.c 	return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
rq                 42 drivers/net/usb/ax88172a.c static int ax88172a_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
rq                 50 drivers/net/usb/ax88172a.c 	return phy_mii_ioctl(net->phydev, rq, cmd);
rq                811 drivers/net/usb/ax88179_178a.c static int ax88179_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
rq                814 drivers/net/usb/ax88179_178a.c 	return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
rq                270 drivers/net/usb/dm9601.c static int dm9601_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
rq                274 drivers/net/usb/dm9601.c 	return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
rq               1667 drivers/net/usb/lan78xx.c static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
rq               1672 drivers/net/usb/lan78xx.c 	return phy_mii_ioctl(netdev->phydev, rq, cmd);
rq                319 drivers/net/usb/mcs7830.c static int mcs7830_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
rq                322 drivers/net/usb/mcs7830.c 	return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
rq               1002 drivers/net/usb/pegasus.c static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
rq               1004 drivers/net/usb/pegasus.c 	__u16 *data = (__u16 *) &rq->ifr_ifru;
rq               5393 drivers/net/usb/r8152.c static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
rq               5396 drivers/net/usb/r8152.c 	struct mii_ioctl_data *data = if_mii(rq);
rq                837 drivers/net/usb/rtl8150.c static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
rq                840 drivers/net/usb/rtl8150.c 	u16 *data = (u16 *) & rq->ifr_ifru;
rq                748 drivers/net/usb/smsc75xx.c static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
rq                755 drivers/net/usb/smsc75xx.c 	return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
rq                892 drivers/net/usb/smsc95xx.c static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
rq                899 drivers/net/usb/smsc95xx.c 	return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
rq                238 drivers/net/usb/sr9700.c static int sr9700_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
rq                242 drivers/net/usb/sr9700.c 	return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
rq                488 drivers/net/usb/sr9800.c static int sr_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
rq                492 drivers/net/usb/sr9800.c 	return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
rq                 65 drivers/net/veth.c 	struct veth_rq		*rq;
rq                157 drivers/net/veth.c 		const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
rq                208 drivers/net/veth.c static void __veth_xdp_flush(struct veth_rq *rq)
rq                212 drivers/net/veth.c 	if (!rq->rx_notify_masked) {
rq                213 drivers/net/veth.c 		rq->rx_notify_masked = true;
rq                214 drivers/net/veth.c 		napi_schedule(&rq->xdp_napi);
rq                218 drivers/net/veth.c static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
rq                220 drivers/net/veth.c 	if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
rq                229 drivers/net/veth.c 			    struct veth_rq *rq, bool xdp)
rq                232 drivers/net/veth.c 		veth_xdp_rx(rq, skb) :
rq                239 drivers/net/veth.c 	struct veth_rq *rq = NULL;
rq                255 drivers/net/veth.c 		rq = &rcv_priv->rq[rxq];
rq                256 drivers/net/veth.c 		rcv_xdp = rcu_access_pointer(rq->xdp_prog);
rq                262 drivers/net/veth.c 	if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) {
rq                277 drivers/net/veth.c 		__veth_xdp_flush(rq);
rq                316 drivers/net/veth.c 		struct veth_rq_stats *stats = &priv->rq[i].stats;
rq                399 drivers/net/veth.c 	struct veth_rq *rq;
rq                413 drivers/net/veth.c 	rq = &rcv_priv->rq[veth_select_rxq(rcv)];
rq                418 drivers/net/veth.c 	if (!rcu_access_pointer(rq->xdp_prog)) {
rq                426 drivers/net/veth.c 	spin_lock(&rq->xdp_ring.producer_lock);
rq                432 drivers/net/veth.c 			     __ptr_ring_produce(&rq->xdp_ring, ptr))) {
rq                437 drivers/net/veth.c 	spin_unlock(&rq->xdp_ring.producer_lock);
rq                440 drivers/net/veth.c 		__veth_xdp_flush(rq);
rq                472 drivers/net/veth.c 	struct veth_rq *rq;
rq                481 drivers/net/veth.c 	rq = &rcv_priv->rq[veth_select_rxq(rcv)];
rq                483 drivers/net/veth.c 	if (unlikely(!rcu_access_pointer(rq->xdp_prog)))
rq                486 drivers/net/veth.c 	__veth_xdp_flush(rq);
rq                507 drivers/net/veth.c static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
rq                521 drivers/net/veth.c 	xdp_prog = rcu_dereference(rq->xdp_prog);
rq                530 drivers/net/veth.c 		xdp.rxq = &rq->xdp_rxq;
rq                543 drivers/net/veth.c 			if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
rq                544 drivers/net/veth.c 				trace_xdp_exception(rq->dev, xdp_prog, act);
rq                555 drivers/net/veth.c 			if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
rq                566 drivers/net/veth.c 			trace_xdp_exception(rq->dev, xdp_prog, act);
rq                583 drivers/net/veth.c 	skb->protocol = eth_type_trans(skb, rq->dev);
rq                593 drivers/net/veth.c static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
rq                606 drivers/net/veth.c 	xdp_prog = rcu_dereference(rq->xdp_prog);
rq                658 drivers/net/veth.c 	xdp.rxq = &rq->xdp_rxq;
rq                670 drivers/net/veth.c 		xdp.rxq->mem = rq->xdp_mem;
rq                671 drivers/net/veth.c 		if (unlikely(veth_xdp_tx(rq->dev, &xdp, bq) < 0)) {
rq                672 drivers/net/veth.c 			trace_xdp_exception(rq->dev, xdp_prog, act);
rq                681 drivers/net/veth.c 		xdp.rxq->mem = rq->xdp_mem;
rq                682 drivers/net/veth.c 		if (xdp_do_redirect(rq->dev, &xdp, xdp_prog))
rq                691 drivers/net/veth.c 		trace_xdp_exception(rq->dev, xdp_prog, act);
rq                708 drivers/net/veth.c 	skb->protocol = eth_type_trans(skb, rq->dev);
rq                726 drivers/net/veth.c static int veth_xdp_rcv(struct veth_rq *rq, int budget, unsigned int *xdp_xmit,
rq                732 drivers/net/veth.c 		void *ptr = __ptr_ring_consume(&rq->xdp_ring);
rq                743 drivers/net/veth.c 			skb = veth_xdp_rcv_one(rq, frame, &xdp_xmit_one, bq);
rq                747 drivers/net/veth.c 			skb = veth_xdp_rcv_skb(rq, skb, &xdp_xmit_one, bq);
rq                752 drivers/net/veth.c 			napi_gro_receive(&rq->xdp_napi, skb);
rq                759 drivers/net/veth.c 	u64_stats_update_begin(&rq->stats.syncp);
rq                760 drivers/net/veth.c 	rq->stats.xdp_packets += done;
rq                761 drivers/net/veth.c 	rq->stats.xdp_bytes += bytes;
rq                762 drivers/net/veth.c 	rq->stats.xdp_drops += drops;
rq                763 drivers/net/veth.c 	u64_stats_update_end(&rq->stats.syncp);
rq                770 drivers/net/veth.c 	struct veth_rq *rq =
rq                779 drivers/net/veth.c 	done = veth_xdp_rcv(rq, budget, &xdp_xmit, &bq);
rq                783 drivers/net/veth.c 		smp_store_mb(rq->rx_notify_masked, false);
rq                784 drivers/net/veth.c 		if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
rq                785 drivers/net/veth.c 			rq->rx_notify_masked = true;
rq                786 drivers/net/veth.c 			napi_schedule(&rq->xdp_napi);
rq                791 drivers/net/veth.c 		veth_xdp_flush(rq->dev, &bq);
rq                805 drivers/net/veth.c 		struct veth_rq *rq = &priv->rq[i];
rq                807 drivers/net/veth.c 		err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
rq                813 drivers/net/veth.c 		struct veth_rq *rq = &priv->rq[i];
rq                815 drivers/net/veth.c 		netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
rq                816 drivers/net/veth.c 		napi_enable(&rq->xdp_napi);
rq                822 drivers/net/veth.c 		ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
rq                833 drivers/net/veth.c 		struct veth_rq *rq = &priv->rq[i];
rq                835 drivers/net/veth.c 		napi_disable(&rq->xdp_napi);
rq                836 drivers/net/veth.c 		napi_hash_del(&rq->xdp_napi);
rq                841 drivers/net/veth.c 		struct veth_rq *rq = &priv->rq[i];
rq                843 drivers/net/veth.c 		netif_napi_del(&rq->xdp_napi);
rq                844 drivers/net/veth.c 		rq->rx_notify_masked = false;
rq                845 drivers/net/veth.c 		ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
rq                854 drivers/net/veth.c 	if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
rq                856 drivers/net/veth.c 			struct veth_rq *rq = &priv->rq[i];
rq                858 drivers/net/veth.c 			err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i);
rq                862 drivers/net/veth.c 			err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
rq                869 drivers/net/veth.c 			rq->xdp_mem = rq->xdp_rxq.mem;
rq                878 drivers/net/veth.c 		rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
rq                882 drivers/net/veth.c 	xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
rq                885 drivers/net/veth.c 		xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
rq                896 drivers/net/veth.c 		rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
rq                899 drivers/net/veth.c 		struct veth_rq *rq = &priv->rq[i];
rq                901 drivers/net/veth.c 		rq->xdp_rxq.mem = rq->xdp_mem;
rq                902 drivers/net/veth.c 		xdp_rxq_info_unreg(&rq->xdp_rxq);
rq                954 drivers/net/veth.c 	priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL);
rq                955 drivers/net/veth.c 	if (!priv->rq)
rq                959 drivers/net/veth.c 		priv->rq[i].dev = dev;
rq                960 drivers/net/veth.c 		u64_stats_init(&priv->rq[i].stats.syncp);
rq                970 drivers/net/veth.c 	kfree(priv->rq);
rq                181 drivers/net/virtio_net.c 	struct receive_queue *rq;
rq                291 drivers/net/virtio_net.c static void give_pages(struct receive_queue *rq, struct page *page)
rq                297 drivers/net/virtio_net.c 	end->private = (unsigned long)rq->pages;
rq                298 drivers/net/virtio_net.c 	rq->pages = page;
rq                301 drivers/net/virtio_net.c static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
rq                303 drivers/net/virtio_net.c 	struct page *p = rq->pages;
rq                306 drivers/net/virtio_net.c 		rq->pages = (struct page *)p->private;
rq                371 drivers/net/virtio_net.c 				   struct receive_queue *rq,
rq                384 drivers/net/virtio_net.c 	skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
rq                441 drivers/net/virtio_net.c 		give_pages(rq, page);
rq                489 drivers/net/virtio_net.c 	struct receive_queue *rq = vi->rq;
rq                504 drivers/net/virtio_net.c 	xdp_prog = rcu_dereference(rq->xdp_prog);
rq                578 drivers/net/virtio_net.c static struct page *xdp_linearize_page(struct receive_queue *rq,
rq                599 drivers/net/virtio_net.c 		buf = virtqueue_get_buf(rq->vq, &buflen);
rq                630 drivers/net/virtio_net.c 				     struct receive_queue *rq,
rq                652 drivers/net/virtio_net.c 	xdp_prog = rcu_dereference(rq->xdp_prog);
rq                673 drivers/net/virtio_net.c 			xdp_page = xdp_linearize_page(rq, &num_buf, page,
rq                688 drivers/net/virtio_net.c 		xdp.rxq = &rq->xdp_rxq;
rq                757 drivers/net/virtio_net.c 				   struct receive_queue *rq,
rq                763 drivers/net/virtio_net.c 	struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
rq                774 drivers/net/virtio_net.c 	give_pages(rq, page);
rq                780 drivers/net/virtio_net.c 					 struct receive_queue *rq,
rq                801 drivers/net/virtio_net.c 	xdp_prog = rcu_dereference(rq->xdp_prog);
rq                825 drivers/net/virtio_net.c 			xdp_page = xdp_linearize_page(rq, &num_buf,
rq                844 drivers/net/virtio_net.c 		xdp.rxq = &rq->xdp_rxq;
rq                866 drivers/net/virtio_net.c 				head_skb = page_to_skb(vi, rq, xdp_page,
rq                924 drivers/net/virtio_net.c 	head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
rq                932 drivers/net/virtio_net.c 		buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
rq                983 drivers/net/virtio_net.c 	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
rq                992 drivers/net/virtio_net.c 		buf = virtqueue_get_buf(rq->vq, &len);
rq               1010 drivers/net/virtio_net.c static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
rq               1025 drivers/net/virtio_net.c 			give_pages(rq, buf);
rq               1033 drivers/net/virtio_net.c 		skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
rq               1036 drivers/net/virtio_net.c 		skb = receive_big(dev, vi, rq, buf, len, stats);
rq               1038 drivers/net/virtio_net.c 		skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
rq               1056 drivers/net/virtio_net.c 	skb_record_rx_queue(skb, vq2rxq(rq->vq));
rq               1061 drivers/net/virtio_net.c 	napi_gro_receive(&rq->napi, skb);
rq               1074 drivers/net/virtio_net.c static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
rq               1077 drivers/net/virtio_net.c 	struct page_frag *alloc_frag = &rq->alloc_frag;
rq               1092 drivers/net/virtio_net.c 	sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
rq               1094 drivers/net/virtio_net.c 	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
rq               1100 drivers/net/virtio_net.c static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
rq               1107 drivers/net/virtio_net.c 	sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
rq               1111 drivers/net/virtio_net.c 		first = get_a_page(rq, gfp);
rq               1114 drivers/net/virtio_net.c 				give_pages(rq, list);
rq               1117 drivers/net/virtio_net.c 		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
rq               1124 drivers/net/virtio_net.c 	first = get_a_page(rq, gfp);
rq               1126 drivers/net/virtio_net.c 		give_pages(rq, list);
rq               1133 drivers/net/virtio_net.c 	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
rq               1137 drivers/net/virtio_net.c 	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
rq               1141 drivers/net/virtio_net.c 	err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
rq               1144 drivers/net/virtio_net.c 		give_pages(rq, first);
rq               1149 drivers/net/virtio_net.c static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
rq               1160 drivers/net/virtio_net.c 				rq->min_buf_len, PAGE_SIZE - hdr_len);
rq               1166 drivers/net/virtio_net.c 				 struct receive_queue *rq, gfp_t gfp)
rq               1168 drivers/net/virtio_net.c 	struct page_frag *alloc_frag = &rq->alloc_frag;
rq               1181 drivers/net/virtio_net.c 	len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
rq               1199 drivers/net/virtio_net.c 	sg_init_one(rq->sg, buf, len);
rq               1201 drivers/net/virtio_net.c 	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
rq               1215 drivers/net/virtio_net.c static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
rq               1223 drivers/net/virtio_net.c 			err = add_recvbuf_mergeable(vi, rq, gfp);
rq               1225 drivers/net/virtio_net.c 			err = add_recvbuf_big(vi, rq, gfp);
rq               1227 drivers/net/virtio_net.c 			err = add_recvbuf_small(vi, rq, gfp);
rq               1232 drivers/net/virtio_net.c 	} while (rq->vq->num_free);
rq               1233 drivers/net/virtio_net.c 	if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
rq               1236 drivers/net/virtio_net.c 		flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
rq               1237 drivers/net/virtio_net.c 		rq->stats.kicks++;
rq               1238 drivers/net/virtio_net.c 		u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
rq               1247 drivers/net/virtio_net.c 	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
rq               1249 drivers/net/virtio_net.c 	virtqueue_napi_schedule(&rq->napi, rvq);
rq               1297 drivers/net/virtio_net.c 		struct receive_queue *rq = &vi->rq[i];
rq               1299 drivers/net/virtio_net.c 		napi_disable(&rq->napi);
rq               1300 drivers/net/virtio_net.c 		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
rq               1301 drivers/net/virtio_net.c 		virtnet_napi_enable(rq->vq, &rq->napi);
rq               1311 drivers/net/virtio_net.c static int virtnet_receive(struct receive_queue *rq, int budget,
rq               1314 drivers/net/virtio_net.c 	struct virtnet_info *vi = rq->vq->vdev->priv;
rq               1324 drivers/net/virtio_net.c 		       (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
rq               1325 drivers/net/virtio_net.c 			receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
rq               1330 drivers/net/virtio_net.c 		       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
rq               1331 drivers/net/virtio_net.c 			receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
rq               1336 drivers/net/virtio_net.c 	if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
rq               1337 drivers/net/virtio_net.c 		if (!try_fill_recv(vi, rq, GFP_ATOMIC))
rq               1341 drivers/net/virtio_net.c 	u64_stats_update_begin(&rq->stats.syncp);
rq               1346 drivers/net/virtio_net.c 		item = (u64 *)((u8 *)&rq->stats + offset);
rq               1349 drivers/net/virtio_net.c 	u64_stats_update_end(&rq->stats.syncp);
rq               1400 drivers/net/virtio_net.c static void virtnet_poll_cleantx(struct receive_queue *rq)
rq               1402 drivers/net/virtio_net.c 	struct virtnet_info *vi = rq->vq->vdev->priv;
rq               1403 drivers/net/virtio_net.c 	unsigned int index = vq2rxq(rq->vq);
rq               1421 drivers/net/virtio_net.c 	struct receive_queue *rq =
rq               1423 drivers/net/virtio_net.c 	struct virtnet_info *vi = rq->vq->vdev->priv;
rq               1428 drivers/net/virtio_net.c 	virtnet_poll_cleantx(rq);
rq               1430 drivers/net/virtio_net.c 	received = virtnet_receive(rq, budget, &xdp_xmit);
rq               1434 drivers/net/virtio_net.c 		virtqueue_napi_complete(napi, rq->vq, received);
rq               1459 drivers/net/virtio_net.c 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
rq               1462 drivers/net/virtio_net.c 		err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
rq               1466 drivers/net/virtio_net.c 		err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
rq               1469 drivers/net/virtio_net.c 			xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
rq               1473 drivers/net/virtio_net.c 		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
rq               1727 drivers/net/virtio_net.c 		struct receive_queue *rq = &vi->rq[i];
rq               1737 drivers/net/virtio_net.c 			start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
rq               1738 drivers/net/virtio_net.c 			rpackets = rq->stats.packets;
rq               1739 drivers/net/virtio_net.c 			rbytes   = rq->stats.bytes;
rq               1740 drivers/net/virtio_net.c 			rdrops   = rq->stats.drops;
rq               1741 drivers/net/virtio_net.c 		} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
rq               1810 drivers/net/virtio_net.c 		xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
rq               1811 drivers/net/virtio_net.c 		napi_disable(&vi->rq[i].napi);
rq               1924 drivers/net/virtio_net.c 			virtqueue_set_affinity(vi->rq[i].vq, NULL);
rq               1961 drivers/net/virtio_net.c 		virtqueue_set_affinity(vi->rq[i].vq, mask);
rq               2025 drivers/net/virtio_net.c 	ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
rq               2065 drivers/net/virtio_net.c 	if (vi->rq[0].xdp_prog)
rq               2130 drivers/net/virtio_net.c 		struct receive_queue *rq = &vi->rq[i];
rq               2132 drivers/net/virtio_net.c 		stats_base = (u8 *)&rq->stats;
rq               2134 drivers/net/virtio_net.c 			start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
rq               2139 drivers/net/virtio_net.c 		} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
rq               2331 drivers/net/virtio_net.c 			napi_disable(&vi->rq[i].napi);
rq               2352 drivers/net/virtio_net.c 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
rq               2356 drivers/net/virtio_net.c 			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
rq               2446 drivers/net/virtio_net.c 	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
rq               2459 drivers/net/virtio_net.c 			napi_disable(&vi->rq[i].napi);
rq               2466 drivers/net/virtio_net.c 			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
rq               2481 drivers/net/virtio_net.c 			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
rq               2491 drivers/net/virtio_net.c 			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
rq               2503 drivers/net/virtio_net.c 			rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
rq               2508 drivers/net/virtio_net.c 			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
rq               2525 drivers/net/virtio_net.c 		xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog);
rq               2648 drivers/net/virtio_net.c 		napi_hash_del(&vi->rq[i].napi);
rq               2649 drivers/net/virtio_net.c 		netif_napi_del(&vi->rq[i].napi);
rq               2658 drivers/net/virtio_net.c 	kfree(vi->rq);
rq               2669 drivers/net/virtio_net.c 		while (vi->rq[i].pages)
rq               2670 drivers/net/virtio_net.c 			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
rq               2672 drivers/net/virtio_net.c 		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
rq               2673 drivers/net/virtio_net.c 		RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
rq               2690 drivers/net/virtio_net.c 		if (vi->rq[i].alloc_frag.page)
rq               2691 drivers/net/virtio_net.c 			put_page(vi->rq[i].alloc_frag.page);
rq               2710 drivers/net/virtio_net.c 		struct virtqueue *vq = vi->rq[i].vq;
rq               2716 drivers/net/virtio_net.c 				give_pages(&vi->rq[i], buf);
rq               2795 drivers/net/virtio_net.c 		sprintf(vi->rq[i].name, "input.%d", i);
rq               2797 drivers/net/virtio_net.c 		names[rxq2vq(i)] = vi->rq[i].name;
rq               2815 drivers/net/virtio_net.c 		vi->rq[i].vq = vqs[rxq2vq(i)];
rq               2816 drivers/net/virtio_net.c 		vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
rq               2845 drivers/net/virtio_net.c 	vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
rq               2846 drivers/net/virtio_net.c 	if (!vi->rq)
rq               2851 drivers/net/virtio_net.c 		vi->rq[i].pages = NULL;
rq               2852 drivers/net/virtio_net.c 		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
rq               2857 drivers/net/virtio_net.c 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
rq               2858 drivers/net/virtio_net.c 		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
rq               2861 drivers/net/virtio_net.c 		u64_stats_init(&vi->rq[i].stats.syncp);
rq               2911 drivers/net/virtio_net.c 	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
rq               2913 drivers/net/virtio_net.c 		       get_mergeable_buf_len(&vi->rq[queue_index], avg,
rq                566 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
rq                570 drivers/net/vmxnet3/vmxnet3_drv.c 	struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
rq                571 drivers/net/vmxnet3/vmxnet3_drv.c 	struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
rq                587 drivers/net/vmxnet3/vmxnet3_drv.c 					rq->stats.rx_buf_alloc_failure++;
rq                598 drivers/net/vmxnet3/vmxnet3_drv.c 					rq->stats.rx_buf_alloc_failure++;
rq                612 drivers/net/vmxnet3/vmxnet3_drv.c 					rq->stats.rx_buf_alloc_failure++;
rq                622 drivers/net/vmxnet3/vmxnet3_drv.c 					rq->stats.rx_buf_alloc_failure++;
rq               1196 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
rq               1199 drivers/net/vmxnet3/vmxnet3_drv.c 	rq->stats.drop_err++;
rq               1201 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->stats.drop_fcs++;
rq               1203 drivers/net/vmxnet3/vmxnet3_drv.c 	rq->stats.drop_total++;
rq               1278 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
rq               1287 drivers/net/vmxnet3/vmxnet3_drv.c 	struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
rq               1293 drivers/net/vmxnet3/vmxnet3_drv.c 	vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
rq               1295 drivers/net/vmxnet3/vmxnet3_drv.c 	while (rcd->gen == rq->comp_ring.gen) {
rq               1316 drivers/net/vmxnet3/vmxnet3_drv.c 		BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
rq               1317 drivers/net/vmxnet3/vmxnet3_drv.c 		       rcd->rqID != rq->dataRingQid);
rq               1320 drivers/net/vmxnet3/vmxnet3_drv.c 		ring = rq->rx_ring + ring_idx;
rq               1321 drivers/net/vmxnet3/vmxnet3_drv.c 		vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
rq               1323 drivers/net/vmxnet3/vmxnet3_drv.c 		rbi = rq->buf_info[ring_idx] + idx;
rq               1329 drivers/net/vmxnet3/vmxnet3_drv.c 			vmxnet3_rx_error(rq, rcd, ctx, adapter);
rq               1338 drivers/net/vmxnet3/vmxnet3_drv.c 			       (rcd->rqID != rq->qid &&
rq               1339 drivers/net/vmxnet3/vmxnet3_drv.c 				rcd->rqID != rq->dataRingQid));
rq               1365 drivers/net/vmxnet3/vmxnet3_drv.c 				rq->stats.rx_buf_alloc_failure++;
rq               1367 drivers/net/vmxnet3/vmxnet3_drv.c 				rq->stats.drop_total++;
rq               1375 drivers/net/vmxnet3/vmxnet3_drv.c 				BUG_ON(rcd->len > rq->data_ring.desc_size);
rq               1378 drivers/net/vmxnet3/vmxnet3_drv.c 				sz = rcd->rxdIdx * rq->data_ring.desc_size;
rq               1380 drivers/net/vmxnet3/vmxnet3_drv.c 				       &rq->data_ring.base[sz], rcd->len);
rq               1395 drivers/net/vmxnet3/vmxnet3_drv.c 					rq->stats.rx_buf_alloc_failure++;
rq               1397 drivers/net/vmxnet3/vmxnet3_drv.c 					rq->stats.drop_total++;
rq               1457 drivers/net/vmxnet3/vmxnet3_drv.c 					rq->stats.rx_buf_alloc_failure++;
rq               1470 drivers/net/vmxnet3/vmxnet3_drv.c 					rq->stats.rx_buf_alloc_failure++;
rq               1535 drivers/net/vmxnet3/vmxnet3_drv.c 				napi_gro_receive(&rq->napi, skb);
rq               1545 drivers/net/vmxnet3/vmxnet3_drv.c 		ring = rq->rx_ring + ring_idx;
rq               1564 drivers/net/vmxnet3/vmxnet3_drv.c 		if (unlikely(rq->shared->updateRxProd)) {
rq               1566 drivers/net/vmxnet3/vmxnet3_drv.c 					       rxprod_reg[ring_idx] + rq->qid * 8,
rq               1570 drivers/net/vmxnet3/vmxnet3_drv.c 		vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
rq               1572 drivers/net/vmxnet3/vmxnet3_drv.c 				  &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
rq               1580 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
rq               1587 drivers/net/vmxnet3/vmxnet3_drv.c 		for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
rq               1592 drivers/net/vmxnet3/vmxnet3_drv.c 				&rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
rq               1595 drivers/net/vmxnet3/vmxnet3_drv.c 					rq->buf_info[ring_idx][i].skb) {
rq               1598 drivers/net/vmxnet3/vmxnet3_drv.c 				dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
rq               1599 drivers/net/vmxnet3/vmxnet3_drv.c 				rq->buf_info[ring_idx][i].skb = NULL;
rq               1601 drivers/net/vmxnet3/vmxnet3_drv.c 					rq->buf_info[ring_idx][i].page) {
rq               1604 drivers/net/vmxnet3/vmxnet3_drv.c 				put_page(rq->buf_info[ring_idx][i].page);
rq               1605 drivers/net/vmxnet3/vmxnet3_drv.c 				rq->buf_info[ring_idx][i].page = NULL;
rq               1609 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
rq               1610 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->rx_ring[ring_idx].next2fill =
rq               1611 drivers/net/vmxnet3/vmxnet3_drv.c 					rq->rx_ring[ring_idx].next2comp = 0;
rq               1614 drivers/net/vmxnet3/vmxnet3_drv.c 	rq->comp_ring.gen = VMXNET3_INIT_GEN;
rq               1615 drivers/net/vmxnet3/vmxnet3_drv.c 	rq->comp_ring.next2proc = 0;
rq               1629 drivers/net/vmxnet3/vmxnet3_drv.c static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
rq               1637 drivers/net/vmxnet3/vmxnet3_drv.c 		if (rq->buf_info[i]) {
rq               1638 drivers/net/vmxnet3/vmxnet3_drv.c 			for (j = 0; j < rq->rx_ring[i].size; j++)
rq               1639 drivers/net/vmxnet3/vmxnet3_drv.c 				BUG_ON(rq->buf_info[i][j].page != NULL);
rq               1645 drivers/net/vmxnet3/vmxnet3_drv.c 		if (rq->rx_ring[i].base) {
rq               1647 drivers/net/vmxnet3/vmxnet3_drv.c 					  rq->rx_ring[i].size
rq               1649 drivers/net/vmxnet3/vmxnet3_drv.c 					  rq->rx_ring[i].base,
rq               1650 drivers/net/vmxnet3/vmxnet3_drv.c 					  rq->rx_ring[i].basePA);
rq               1651 drivers/net/vmxnet3/vmxnet3_drv.c 			rq->rx_ring[i].base = NULL;
rq               1655 drivers/net/vmxnet3/vmxnet3_drv.c 	if (rq->data_ring.base) {
rq               1657 drivers/net/vmxnet3/vmxnet3_drv.c 				  rq->rx_ring[0].size * rq->data_ring.desc_size,
rq               1658 drivers/net/vmxnet3/vmxnet3_drv.c 				  rq->data_ring.base, rq->data_ring.basePA);
rq               1659 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->data_ring.base = NULL;
rq               1662 drivers/net/vmxnet3/vmxnet3_drv.c 	if (rq->comp_ring.base) {
rq               1663 drivers/net/vmxnet3/vmxnet3_drv.c 		dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
rq               1665 drivers/net/vmxnet3/vmxnet3_drv.c 				  rq->comp_ring.base, rq->comp_ring.basePA);
rq               1666 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->comp_ring.base = NULL;
rq               1669 drivers/net/vmxnet3/vmxnet3_drv.c 	if (rq->buf_info[0]) {
rq               1671 drivers/net/vmxnet3/vmxnet3_drv.c 			(rq->rx_ring[0].size + rq->rx_ring[1].size);
rq               1672 drivers/net/vmxnet3/vmxnet3_drv.c 		dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
rq               1673 drivers/net/vmxnet3/vmxnet3_drv.c 				  rq->buf_info_pa);
rq               1674 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->buf_info[0] = rq->buf_info[1] = NULL;
rq               1684 drivers/net/vmxnet3/vmxnet3_drv.c 		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
rq               1686 drivers/net/vmxnet3/vmxnet3_drv.c 		if (rq->data_ring.base) {
rq               1688 drivers/net/vmxnet3/vmxnet3_drv.c 					  (rq->rx_ring[0].size *
rq               1689 drivers/net/vmxnet3/vmxnet3_drv.c 					  rq->data_ring.desc_size),
rq               1690 drivers/net/vmxnet3/vmxnet3_drv.c 					  rq->data_ring.base,
rq               1691 drivers/net/vmxnet3/vmxnet3_drv.c 					  rq->data_ring.basePA);
rq               1692 drivers/net/vmxnet3/vmxnet3_drv.c 			rq->data_ring.base = NULL;
rq               1693 drivers/net/vmxnet3/vmxnet3_drv.c 			rq->data_ring.desc_size = 0;
rq               1699 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
rq               1705 drivers/net/vmxnet3/vmxnet3_drv.c 	for (i = 0; i < rq->rx_ring[0].size; i++) {
rq               1709 drivers/net/vmxnet3/vmxnet3_drv.c 			rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
rq               1710 drivers/net/vmxnet3/vmxnet3_drv.c 			rq->buf_info[0][i].len = adapter->skb_buf_size;
rq               1712 drivers/net/vmxnet3/vmxnet3_drv.c 			rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
rq               1713 drivers/net/vmxnet3/vmxnet3_drv.c 			rq->buf_info[0][i].len = PAGE_SIZE;
rq               1716 drivers/net/vmxnet3/vmxnet3_drv.c 	for (i = 0; i < rq->rx_ring[1].size; i++) {
rq               1717 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
rq               1718 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->buf_info[1][i].len = PAGE_SIZE;
rq               1723 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
rq               1725 drivers/net/vmxnet3/vmxnet3_drv.c 		memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
rq               1727 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
rq               1729 drivers/net/vmxnet3/vmxnet3_drv.c 	if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
rq               1734 drivers/net/vmxnet3/vmxnet3_drv.c 	vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
rq               1737 drivers/net/vmxnet3/vmxnet3_drv.c 	rq->comp_ring.next2proc = 0;
rq               1738 drivers/net/vmxnet3/vmxnet3_drv.c 	memset(rq->comp_ring.base, 0, rq->comp_ring.size *
rq               1740 drivers/net/vmxnet3/vmxnet3_drv.c 	rq->comp_ring.gen = VMXNET3_INIT_GEN;
rq               1743 drivers/net/vmxnet3/vmxnet3_drv.c 	rq->rx_ctx.skb = NULL;
rq               1770 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
rq               1778 drivers/net/vmxnet3/vmxnet3_drv.c 		sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
rq               1779 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->rx_ring[i].base = dma_alloc_coherent(
rq               1781 drivers/net/vmxnet3/vmxnet3_drv.c 						&rq->rx_ring[i].basePA,
rq               1783 drivers/net/vmxnet3/vmxnet3_drv.c 		if (!rq->rx_ring[i].base) {
rq               1790 drivers/net/vmxnet3/vmxnet3_drv.c 	if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
rq               1791 drivers/net/vmxnet3/vmxnet3_drv.c 		sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
rq               1792 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->data_ring.base =
rq               1794 drivers/net/vmxnet3/vmxnet3_drv.c 					   &rq->data_ring.basePA,
rq               1796 drivers/net/vmxnet3/vmxnet3_drv.c 		if (!rq->data_ring.base) {
rq               1802 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->data_ring.base = NULL;
rq               1803 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->data_ring.desc_size = 0;
rq               1806 drivers/net/vmxnet3/vmxnet3_drv.c 	sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
rq               1807 drivers/net/vmxnet3/vmxnet3_drv.c 	rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
rq               1808 drivers/net/vmxnet3/vmxnet3_drv.c 						&rq->comp_ring.basePA,
rq               1810 drivers/net/vmxnet3/vmxnet3_drv.c 	if (!rq->comp_ring.base) {
rq               1815 drivers/net/vmxnet3/vmxnet3_drv.c 	sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
rq               1816 drivers/net/vmxnet3/vmxnet3_drv.c 						   rq->rx_ring[1].size);
rq               1817 drivers/net/vmxnet3/vmxnet3_drv.c 	bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
rq               1822 drivers/net/vmxnet3/vmxnet3_drv.c 	rq->buf_info[0] = bi;
rq               1823 drivers/net/vmxnet3/vmxnet3_drv.c 	rq->buf_info[1] = bi + rq->rx_ring[0].size;
rq               1828 drivers/net/vmxnet3/vmxnet3_drv.c 	vmxnet3_rq_destroy(rq, adapter);
rq               1902 drivers/net/vmxnet3/vmxnet3_drv.c 	struct vmxnet3_rx_queue *rq = container_of(napi,
rq               1904 drivers/net/vmxnet3/vmxnet3_drv.c 	struct vmxnet3_adapter *adapter = rq->adapter;
rq               1912 drivers/net/vmxnet3/vmxnet3_drv.c 				&adapter->tx_queue[rq - adapter->rx_queue];
rq               1916 drivers/net/vmxnet3/vmxnet3_drv.c 	rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
rq               1920 drivers/net/vmxnet3/vmxnet3_drv.c 		vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
rq               1966 drivers/net/vmxnet3/vmxnet3_drv.c 	struct vmxnet3_rx_queue *rq = data;
rq               1967 drivers/net/vmxnet3/vmxnet3_drv.c 	struct vmxnet3_adapter *adapter = rq->adapter;
rq               1971 drivers/net/vmxnet3/vmxnet3_drv.c 		vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
rq               1972 drivers/net/vmxnet3/vmxnet3_drv.c 	napi_schedule(&rq->napi);
rq               2157 drivers/net/vmxnet3/vmxnet3_drv.c 			struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
rq               2158 drivers/net/vmxnet3/vmxnet3_drv.c 			rq->qid = i;
rq               2159 drivers/net/vmxnet3/vmxnet3_drv.c 			rq->qid2 = i + adapter->num_rx_queues;
rq               2160 drivers/net/vmxnet3/vmxnet3_drv.c 			rq->dataRingQid = i + 2 * adapter->num_rx_queues;
rq               2464 drivers/net/vmxnet3/vmxnet3_drv.c 		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
rq               2466 drivers/net/vmxnet3/vmxnet3_drv.c 		rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
rq               2467 drivers/net/vmxnet3/vmxnet3_drv.c 		rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
rq               2468 drivers/net/vmxnet3/vmxnet3_drv.c 		rqc->compRingBasePA  = cpu_to_le64(rq->comp_ring.basePA);
rq               2469 drivers/net/vmxnet3/vmxnet3_drv.c 		rqc->ddPA            = cpu_to_le64(rq->buf_info_pa);
rq               2470 drivers/net/vmxnet3/vmxnet3_drv.c 		rqc->rxRingSize[0]   = cpu_to_le32(rq->rx_ring[0].size);
rq               2471 drivers/net/vmxnet3/vmxnet3_drv.c 		rqc->rxRingSize[1]   = cpu_to_le32(rq->rx_ring[1].size);
rq               2472 drivers/net/vmxnet3/vmxnet3_drv.c 		rqc->compRingSize    = cpu_to_le32(rq->comp_ring.size);
rq               2477 drivers/net/vmxnet3/vmxnet3_drv.c 		rqc->intrIdx         = rq->comp_ring.intr_idx;
rq               2480 drivers/net/vmxnet3/vmxnet3_drv.c 				cpu_to_le64(rq->data_ring.basePA);
rq               2482 drivers/net/vmxnet3/vmxnet3_drv.c 				cpu_to_le16(rq->data_ring.desc_size);
rq               2810 drivers/net/vmxnet3/vmxnet3_drv.c 		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
rq               2812 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->rx_ring[0].size = ring0_size;
rq               2813 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->rx_ring[1].size = ring1_size;
rq               2814 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->comp_ring.size = comp_size;
rq               2851 drivers/net/vmxnet3/vmxnet3_drv.c 		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
rq               2854 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->shared = &adapter->rqd_start[i].ctrl;
rq               2855 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->adapter = adapter;
rq               2856 drivers/net/vmxnet3/vmxnet3_drv.c 		rq->data_ring.desc_size = rxdata_desc_size;
rq               2857 drivers/net/vmxnet3/vmxnet3_drv.c 		err = vmxnet3_rq_create(rq, adapter);
rq                420 drivers/net/vmxnet3/vmxnet3_ethtool.c 		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
rq                427 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA);
rq                428 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[0].basePA);
rq                429 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = rq->rx_ring[0].size;
rq                430 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = rq->rx_ring[0].next2fill;
rq                431 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = rq->rx_ring[0].next2comp;
rq                432 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = rq->rx_ring[0].gen;
rq                434 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[1].basePA);
rq                435 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[1].basePA);
rq                436 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = rq->rx_ring[1].size;
rq                437 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = rq->rx_ring[1].next2fill;
rq                438 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = rq->rx_ring[1].next2comp;
rq                439 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = rq->rx_ring[1].gen;
rq                441 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA);
rq                442 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA);
rq                443 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = rq->rx_ring[0].size;
rq                444 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = rq->data_ring.desc_size;
rq                446 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA);
rq                447 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA);
rq                448 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = rq->comp_ring.size;
rq                449 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = rq->comp_ring.next2proc;
rq                450 drivers/net/vmxnet3/vmxnet3_ethtool.c 		buf[j++] = rq->comp_ring.gen;
rq                405 drivers/net/vmxnet3/vmxnet3_int.h #define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
rq                406 drivers/net/vmxnet3/vmxnet3_int.h 	((rq)->rx_ring[ring_idx].size >> 3)
rq                575 drivers/net/wireless/atmel/atmel.c static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               2625 drivers/net/wireless/atmel/atmel.c static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               2630 drivers/net/wireless/atmel/atmel.c 	struct iwreq *wrq = (struct iwreq *) rq;
rq               2640 drivers/net/wireless/atmel/atmel.c 		if (copy_from_user(&com, rq->ifr_data, sizeof(com))) {
rq               2665 drivers/net/wireless/atmel/atmel.c 		if (copy_from_user(domain, rq->ifr_data, REGDOMAINSZ)) {
rq               1142 drivers/net/wireless/cisco/airo.c static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               7630 drivers/net/wireless/cisco/airo.c static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               7647 drivers/net/wireless/cisco/airo.c 		if (copy_from_user(&com,rq->ifr_data,sizeof(com)))
rq               7663 drivers/net/wireless/cisco/airo.c 		if (copy_from_user(&com,rq->ifr_data,sizeof(com))) {
rq                789 drivers/nvme/host/core.c static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
rq                791 drivers/nvme/host/core.c 	struct completion *waiting = rq->end_io_data;
rq                793 drivers/nvme/host/core.c 	rq->end_io_data = NULL;
rq                798 drivers/nvme/host/core.c 		struct gendisk *bd_disk, struct request *rq, int at_head)
rq                804 drivers/nvme/host/core.c 	rq->cmd_flags |= REQ_HIPRI;
rq                805 drivers/nvme/host/core.c 	rq->end_io_data = &wait;
rq                806 drivers/nvme/host/core.c 	blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);
rq                809 drivers/nvme/host/core.c 		blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
rq                954 drivers/nvme/host/core.c static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
rq                956 drivers/nvme/host/core.c 	struct nvme_ctrl *ctrl = rq->end_io_data;
rq                960 drivers/nvme/host/core.c 	blk_mq_free_request(rq);
rq                981 drivers/nvme/host/core.c 	struct request *rq;
rq                983 drivers/nvme/host/core.c 	rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
rq                985 drivers/nvme/host/core.c 	if (IS_ERR(rq))
rq                986 drivers/nvme/host/core.c 		return PTR_ERR(rq);
rq                988 drivers/nvme/host/core.c 	rq->timeout = ctrl->kato * HZ;
rq                989 drivers/nvme/host/core.c 	rq->end_io_data = ctrl;
rq                991 drivers/nvme/host/core.c 	blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
rq                548 drivers/nvme/host/fabrics.c 		struct request *rq)
rq                552 drivers/nvme/host/fabrics.c 	    !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
rq                555 drivers/nvme/host/fabrics.c 	nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
rq                556 drivers/nvme/host/fabrics.c 	blk_mq_start_request(rq);
rq                557 drivers/nvme/host/fabrics.c 	nvme_complete_rq(rq);
rq                562 drivers/nvme/host/fabrics.c bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
rq                565 drivers/nvme/host/fabrics.c 	struct nvme_request *req = nvme_req(rq);
rq                571 drivers/nvme/host/fabrics.c 	if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD))
rq                176 drivers/nvme/host/fabrics.h 		struct request *rq);
rq                177 drivers/nvme/host/fabrics.h bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
rq                182 drivers/nvme/host/fabrics.h static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
rq                187 drivers/nvme/host/fabrics.h 	return __nvmf_check_ready(ctrl, rq, queue_live);
rq                 55 drivers/nvme/host/fc.c 	struct request		*rq;
rq                 85 drivers/nvme/host/fc.c 	struct request		*rq;
rq               1490 drivers/nvme/host/fc.c nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
rq               1493 drivers/nvme/host/fc.c 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
rq               1557 drivers/nvme/host/fc.c 	struct request *rq = op->rq;
rq               1706 drivers/nvme/host/fc.c 	nvme_end_request(rq, status, result);
rq               1716 drivers/nvme/host/fc.c 		struct request *rq, u32 rqno)
rq               1731 drivers/nvme/host/fc.c 	op->rq = rq;
rq               1762 drivers/nvme/host/fc.c nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
rq               1766 drivers/nvme/host/fc.c 	struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
rq               1771 drivers/nvme/host/fc.c 	res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
rq               1776 drivers/nvme/host/fc.c 	nvme_req(rq)->ctrl = &ctrl->ctrl;
rq               2107 drivers/nvme/host/fc.c nvme_fc_timeout(struct request *rq, bool reserved)
rq               2109 drivers/nvme/host/fc.c 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
rq               2130 drivers/nvme/host/fc.c nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
rq               2138 drivers/nvme/host/fc.c 	if (!blk_rq_nr_phys_segments(rq))
rq               2143 drivers/nvme/host/fc.c 			blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
rq               2148 drivers/nvme/host/fc.c 	op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
rq               2149 drivers/nvme/host/fc.c 	WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
rq               2151 drivers/nvme/host/fc.c 				op->nents, rq_dma_dir(rq));
rq               2165 drivers/nvme/host/fc.c nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
rq               2174 drivers/nvme/host/fc.c 			rq_dma_dir(rq));
rq               2176 drivers/nvme/host/fc.c 	nvme_cleanup_cmd(rq);
rq               2266 drivers/nvme/host/fc.c 		ret = nvme_fc_map_data(ctrl, op->rq, op);
rq               2268 drivers/nvme/host/fc.c 			nvme_cleanup_cmd(op->rq);
rq               2282 drivers/nvme/host/fc.c 		blk_mq_start_request(op->rq);
rq               2306 drivers/nvme/host/fc.c 			nvme_fc_unmap_data(ctrl, op->rq, op);
rq               2327 drivers/nvme/host/fc.c 	struct request *rq = bd->rq;
rq               2328 drivers/nvme/host/fc.c 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
rq               2337 drivers/nvme/host/fc.c 	    !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
rq               2338 drivers/nvme/host/fc.c 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
rq               2340 drivers/nvme/host/fc.c 	ret = nvme_setup_cmd(ns, rq, sqe);
rq               2352 drivers/nvme/host/fc.c 	if (blk_rq_nr_phys_segments(rq)) {
rq               2353 drivers/nvme/host/fc.c 		data_len = blk_rq_payload_bytes(rq);
rq               2354 drivers/nvme/host/fc.c 		io_dir = ((rq_data_dir(rq) == WRITE) ?
rq               2392 drivers/nvme/host/fc.c nvme_fc_complete_rq(struct request *rq)
rq               2394 drivers/nvme/host/fc.c 	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
rq               2399 drivers/nvme/host/fc.c 	nvme_fc_unmap_data(ctrl, rq, op);
rq               2400 drivers/nvme/host/fc.c 	nvme_complete_rq(rq);
rq                635 drivers/nvme/host/lightnvm.c static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
rq                637 drivers/nvme/host/lightnvm.c 	struct nvm_rq *rqd = rq->end_io_data;
rq                639 drivers/nvme/host/lightnvm.c 	rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
rq                640 drivers/nvme/host/lightnvm.c 	rqd->error = nvme_req(rq)->status;
rq                643 drivers/nvme/host/lightnvm.c 	kfree(nvme_req(rq)->cmd);
rq                644 drivers/nvme/host/lightnvm.c 	blk_mq_free_request(rq);
rq                652 drivers/nvme/host/lightnvm.c 	struct request *rq;
rq                656 drivers/nvme/host/lightnvm.c 	rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
rq                657 drivers/nvme/host/lightnvm.c 	if (IS_ERR(rq))
rq                658 drivers/nvme/host/lightnvm.c 		return rq;
rq                660 drivers/nvme/host/lightnvm.c 	rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
rq                663 drivers/nvme/host/lightnvm.c 		blk_rq_append_bio(rq, &rqd->bio);
rq                665 drivers/nvme/host/lightnvm.c 		rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
rq                667 drivers/nvme/host/lightnvm.c 	return rq;
rq                676 drivers/nvme/host/lightnvm.c 	struct request *rq;
rq                683 drivers/nvme/host/lightnvm.c 	rq = nvme_nvm_alloc_request(q, rqd, cmd);
rq                684 drivers/nvme/host/lightnvm.c 	if (IS_ERR(rq)) {
rq                685 drivers/nvme/host/lightnvm.c 		ret = PTR_ERR(rq);
rq                690 drivers/nvme/host/lightnvm.c 		ret = blk_rq_map_kern(q, rq, buf, geo->csecs * rqd->nr_ppas,
rq                696 drivers/nvme/host/lightnvm.c 	rq->end_io_data = rqd;
rq                698 drivers/nvme/host/lightnvm.c 	blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
rq                761 drivers/nvme/host/lightnvm.c 	struct request *rq;
rq                770 drivers/nvme/host/lightnvm.c 	rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
rq                772 drivers/nvme/host/lightnvm.c 	if (IS_ERR(rq)) {
rq                777 drivers/nvme/host/lightnvm.c 	rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
rq                796 drivers/nvme/host/lightnvm.c 		ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
rq                799 drivers/nvme/host/lightnvm.c 		bio = rq->bio;
rq                823 drivers/nvme/host/lightnvm.c 	blk_execute_rq(q, NULL, rq, 0);
rq                825 drivers/nvme/host/lightnvm.c 	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
rq                827 drivers/nvme/host/lightnvm.c 	else if (nvme_req(rq)->status & 0x7ff)
rq                830 drivers/nvme/host/lightnvm.c 		*result = nvme_req(rq)->status & 0x7ff;
rq                832 drivers/nvme/host/lightnvm.c 		*status = le64_to_cpu(nvme_req(rq)->result.u64);
rq                848 drivers/nvme/host/lightnvm.c 	blk_mq_free_request(rq);
rq                430 drivers/nvme/host/nvme.h 	struct nvme_request *rq = nvme_req(req);
rq                432 drivers/nvme/host/nvme.h 	rq->status = le16_to_cpu(status) >> 1;
rq                433 drivers/nvme/host/nvme.h 	rq->result = result;
rq                868 drivers/nvme/host/pci.c 	struct request *req = bd->rq;
rq                280 drivers/nvme/host/rdma.c 		struct request *rq, unsigned int hctx_idx)
rq                282 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
rq                288 drivers/nvme/host/rdma.c 		struct request *rq, unsigned int hctx_idx,
rq                292 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
rq                296 drivers/nvme/host/rdma.c 	nvme_req(rq)->ctrl = &ctrl->ctrl;
rq               1120 drivers/nvme/host/rdma.c 	struct request *rq = blk_mq_rq_from_pdu(req);
rq               1128 drivers/nvme/host/rdma.c 		nvme_end_request(rq, req->status, req->result);
rq               1150 drivers/nvme/host/rdma.c 		struct request *rq)
rq               1152 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
rq               1156 drivers/nvme/host/rdma.c 	if (!blk_rq_nr_phys_segments(rq))
rq               1164 drivers/nvme/host/rdma.c 	ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
rq               1166 drivers/nvme/host/rdma.c 	nvme_cleanup_cmd(rq);
rq               1265 drivers/nvme/host/rdma.c 		struct request *rq, struct nvme_command *c)
rq               1267 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
rq               1277 drivers/nvme/host/rdma.c 	if (!blk_rq_nr_phys_segments(rq))
rq               1282 drivers/nvme/host/rdma.c 			blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
rq               1287 drivers/nvme/host/rdma.c 	req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
rq               1290 drivers/nvme/host/rdma.c 			      rq_dma_dir(rq));
rq               1297 drivers/nvme/host/rdma.c 		if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
rq               1299 drivers/nvme/host/rdma.c 		    blk_rq_payload_bytes(rq) <=
rq               1319 drivers/nvme/host/rdma.c 	ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
rq               1331 drivers/nvme/host/rdma.c 	struct request *rq = blk_mq_rq_from_pdu(req);
rq               1339 drivers/nvme/host/rdma.c 		nvme_end_request(rq, req->status, req->result);
rq               1444 drivers/nvme/host/rdma.c 	struct request *rq;
rq               1447 drivers/nvme/host/rdma.c 	rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
rq               1448 drivers/nvme/host/rdma.c 	if (!rq) {
rq               1455 drivers/nvme/host/rdma.c 	req = blk_mq_rq_to_pdu(rq);
rq               1482 drivers/nvme/host/rdma.c 		nvme_end_request(rq, req->status, req->result);
rq               1697 drivers/nvme/host/rdma.c nvme_rdma_timeout(struct request *rq, bool reserved)
rq               1699 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
rq               1704 drivers/nvme/host/rdma.c 		 rq->tag, nvme_rdma_queue_idx(queue));
rq               1737 drivers/nvme/host/rdma.c 	struct request *rq = bd->rq;
rq               1738 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
rq               1746 drivers/nvme/host/rdma.c 	WARN_ON_ONCE(rq->tag < 0);
rq               1748 drivers/nvme/host/rdma.c 	if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
rq               1749 drivers/nvme/host/rdma.c 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
rq               1763 drivers/nvme/host/rdma.c 	ret = nvme_setup_cmd(ns, rq, c);
rq               1767 drivers/nvme/host/rdma.c 	blk_mq_start_request(rq);
rq               1769 drivers/nvme/host/rdma.c 	err = nvme_rdma_map_data(queue, rq, c);
rq               1773 drivers/nvme/host/rdma.c 		nvme_cleanup_cmd(rq);
rq               1785 drivers/nvme/host/rdma.c 		nvme_rdma_unmap_data(queue, rq);
rq               1809 drivers/nvme/host/rdma.c static void nvme_rdma_complete_rq(struct request *rq)
rq               1811 drivers/nvme/host/rdma.c 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
rq               1815 drivers/nvme/host/rdma.c 	nvme_rdma_unmap_data(queue, rq);
rq               1818 drivers/nvme/host/rdma.c 	nvme_complete_rq(rq);
rq                166 drivers/nvme/host/tcp.c 	struct request *rq;
rq                171 drivers/nvme/host/tcp.c 	rq = blk_mq_rq_from_pdu(req);
rq                173 drivers/nvme/host/tcp.c 	return rq_data_dir(rq) == WRITE && req->data_len &&
rq                213 drivers/nvme/host/tcp.c 	struct request *rq = blk_mq_rq_from_pdu(req);
rq                219 drivers/nvme/host/tcp.c 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
rq                220 drivers/nvme/host/tcp.c 		vec = &rq->special_vec;
rq                222 drivers/nvme/host/tcp.c 		size = blk_rq_payload_bytes(rq);
rq                352 drivers/nvme/host/tcp.c 		struct request *rq, unsigned int hctx_idx)
rq                354 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
rq                360 drivers/nvme/host/tcp.c 		struct request *rq, unsigned int hctx_idx,
rq                364 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
rq                376 drivers/nvme/host/tcp.c 	nvme_req(rq)->ctrl = &ctrl->ctrl;
rq                429 drivers/nvme/host/tcp.c 	struct request *rq;
rq                431 drivers/nvme/host/tcp.c 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
rq                432 drivers/nvme/host/tcp.c 	if (!rq) {
rq                440 drivers/nvme/host/tcp.c 	nvme_end_request(rq, cqe->status, cqe->result);
rq                449 drivers/nvme/host/tcp.c 	struct request *rq;
rq                451 drivers/nvme/host/tcp.c 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
rq                452 drivers/nvme/host/tcp.c 	if (!rq) {
rq                459 drivers/nvme/host/tcp.c 	if (!blk_rq_payload_bytes(rq)) {
rq                462 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), rq->tag);
rq                472 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), rq->tag);
rq                507 drivers/nvme/host/tcp.c 	struct request *rq = blk_mq_rq_from_pdu(req);
rq                517 drivers/nvme/host/tcp.c 			rq->tag, req->pdu_len, req->data_len,
rq                525 drivers/nvme/host/tcp.c 			rq->tag, le32_to_cpu(pdu->r2t_offset),
rq                542 drivers/nvme/host/tcp.c 	data->command_id = rq->tag;
rq                552 drivers/nvme/host/tcp.c 	struct request *rq;
rq                555 drivers/nvme/host/tcp.c 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
rq                556 drivers/nvme/host/tcp.c 	if (!rq) {
rq                562 drivers/nvme/host/tcp.c 	req = blk_mq_rq_to_pdu(rq);
rq                626 drivers/nvme/host/tcp.c static inline void nvme_tcp_end_request(struct request *rq, u16 status)
rq                630 drivers/nvme/host/tcp.c 	nvme_end_request(rq, cpu_to_le16(status << 1), res);
rq                638 drivers/nvme/host/tcp.c 	struct request *rq;
rq                640 drivers/nvme/host/tcp.c 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
rq                641 drivers/nvme/host/tcp.c 	if (!rq) {
rq                647 drivers/nvme/host/tcp.c 	req = blk_mq_rq_to_pdu(rq);
rq                666 drivers/nvme/host/tcp.c 					nvme_tcp_queue_id(queue), rq->tag);
rq                686 drivers/nvme/host/tcp.c 				nvme_tcp_queue_id(queue), rq->tag);
rq                701 drivers/nvme/host/tcp.c 				nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
rq                739 drivers/nvme/host/tcp.c 		struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
rq                742 drivers/nvme/host/tcp.c 		nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
rq               2046 drivers/nvme/host/tcp.c nvme_tcp_timeout(struct request *rq, bool reserved)
rq               2048 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
rq               2062 drivers/nvme/host/tcp.c 		nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
rq               2083 drivers/nvme/host/tcp.c 			struct request *rq)
rq               2085 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
rq               2091 drivers/nvme/host/tcp.c 	if (!blk_rq_nr_phys_segments(rq))
rq               2093 drivers/nvme/host/tcp.c 	else if (rq_data_dir(rq) == WRITE &&
rq               2103 drivers/nvme/host/tcp.c 		struct request *rq)
rq               2105 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
rq               2111 drivers/nvme/host/tcp.c 	ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
rq               2120 drivers/nvme/host/tcp.c 	req->data_len = blk_rq_nr_phys_segments(rq) ?
rq               2121 drivers/nvme/host/tcp.c 				blk_rq_payload_bytes(rq) : 0;
rq               2122 drivers/nvme/host/tcp.c 	req->curr_bio = rq->bio;
rq               2124 drivers/nvme/host/tcp.c 	if (rq_data_dir(rq) == WRITE &&
rq               2143 drivers/nvme/host/tcp.c 	ret = nvme_tcp_map_data(queue, rq);
rq               2145 drivers/nvme/host/tcp.c 		nvme_cleanup_cmd(rq);
rq               2159 drivers/nvme/host/tcp.c 	struct request *rq = bd->rq;
rq               2160 drivers/nvme/host/tcp.c 	struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
rq               2164 drivers/nvme/host/tcp.c 	if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
rq               2165 drivers/nvme/host/tcp.c 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
rq               2167 drivers/nvme/host/tcp.c 	ret = nvme_tcp_setup_cmd_pdu(ns, rq);
rq               2171 drivers/nvme/host/tcp.c 	blk_mq_start_request(rq);
rq                110 drivers/nvme/target/loop.c 		struct request *rq;
rq                112 drivers/nvme/target/loop.c 		rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
rq                113 drivers/nvme/target/loop.c 		if (!rq) {
rq                120 drivers/nvme/target/loop.c 		nvme_end_request(rq, cqe->status, cqe->result);
rq                137 drivers/nvme/target/loop.c 	struct request *req = bd->rq;
rq               5646 drivers/pci/pci.c int pcie_set_readrq(struct pci_dev *dev, int rq)
rq               5650 drivers/pci/pci.c 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
rq               5661 drivers/pci/pci.c 		if (mps < rq)
rq               5662 drivers/pci/pci.c 			rq = mps;
rq               5665 drivers/pci/pci.c 	v = (ffs(rq) - 8) << 12;
rq                178 drivers/platform/chrome/wilco_ec/debugfs.c 	struct h1_gpio_status_request rq;
rq                183 drivers/platform/chrome/wilco_ec/debugfs.c 	memset(&rq, 0, sizeof(rq));
rq                184 drivers/platform/chrome/wilco_ec/debugfs.c 	rq.cmd = CMD_KB_CHROME;
rq                185 drivers/platform/chrome/wilco_ec/debugfs.c 	rq.sub_cmd = SUB_CMD_H1_GPIO;
rq                189 drivers/platform/chrome/wilco_ec/debugfs.c 	msg.request_data = &rq;
rq                190 drivers/platform/chrome/wilco_ec/debugfs.c 	msg.request_size = sizeof(rq);
rq                 92 drivers/platform/chrome/wilco_ec/mailbox.c 			     struct wilco_ec_request *rq)
rq                 94 drivers/platform/chrome/wilco_ec/mailbox.c 	memset(rq, 0, sizeof(*rq));
rq                 95 drivers/platform/chrome/wilco_ec/mailbox.c 	rq->struct_version = EC_MAILBOX_PROTO_VERSION;
rq                 96 drivers/platform/chrome/wilco_ec/mailbox.c 	rq->mailbox_id = msg->type;
rq                 97 drivers/platform/chrome/wilco_ec/mailbox.c 	rq->mailbox_version = EC_MAILBOX_VERSION;
rq                 98 drivers/platform/chrome/wilco_ec/mailbox.c 	rq->data_size = msg->request_size;
rq                101 drivers/platform/chrome/wilco_ec/mailbox.c 	rq->checksum = wilco_ec_checksum(rq, sizeof(*rq));
rq                102 drivers/platform/chrome/wilco_ec/mailbox.c 	rq->checksum += wilco_ec_checksum(msg->request_data, msg->request_size);
rq                103 drivers/platform/chrome/wilco_ec/mailbox.c 	rq->checksum = -rq->checksum;
rq                117 drivers/platform/chrome/wilco_ec/mailbox.c 			     struct wilco_ec_request *rq)
rq                124 drivers/platform/chrome/wilco_ec/mailbox.c 	cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, 0, sizeof(*rq), (u8 *)rq);
rq                125 drivers/platform/chrome/wilco_ec/mailbox.c 	cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, sizeof(*rq), msg->request_size,
rq                198 drivers/platform/chrome/wilco_ec/mailbox.c 	struct wilco_ec_request *rq;
rq                206 drivers/platform/chrome/wilco_ec/mailbox.c 	rq = ec->data_buffer;
rq                207 drivers/platform/chrome/wilco_ec/mailbox.c 	wilco_ec_prepare(msg, rq);
rq                209 drivers/platform/chrome/wilco_ec/mailbox.c 	ret = wilco_ec_transfer(ec, msg, rq);
rq                 32 drivers/platform/chrome/wilco_ec/properties.c 			     struct ec_property_request *rq,
rq                 40 drivers/platform/chrome/wilco_ec/properties.c 	ec_msg.request_data = rq;
rq                 41 drivers/platform/chrome/wilco_ec/properties.c 	ec_msg.request_size = sizeof(*rq);
rq                 48 drivers/platform/chrome/wilco_ec/properties.c 	if (rs->op != rq->op)
rq                 50 drivers/platform/chrome/wilco_ec/properties.c 	if (memcmp(rq->property_id, rs->property_id, sizeof(rs->property_id)))
rq                 59 drivers/platform/chrome/wilco_ec/properties.c 	struct ec_property_request rq;
rq                 63 drivers/platform/chrome/wilco_ec/properties.c 	memset(&rq, 0, sizeof(rq));
rq                 64 drivers/platform/chrome/wilco_ec/properties.c 	rq.op = EC_OP_GET;
rq                 65 drivers/platform/chrome/wilco_ec/properties.c 	put_unaligned_le32(prop_msg->property_id, rq.property_id);
rq                 67 drivers/platform/chrome/wilco_ec/properties.c 	ret = send_property_msg(ec, &rq, &rs);
rq                 81 drivers/platform/chrome/wilco_ec/properties.c 	struct ec_property_request rq;
rq                 85 drivers/platform/chrome/wilco_ec/properties.c 	memset(&rq, 0, sizeof(rq));
rq                 86 drivers/platform/chrome/wilco_ec/properties.c 	rq.op = EC_OP_SET;
rq                 87 drivers/platform/chrome/wilco_ec/properties.c 	put_unaligned_le32(prop_msg->property_id, rq.property_id);
rq                 88 drivers/platform/chrome/wilco_ec/properties.c 	rq.length = prop_msg->length;
rq                 89 drivers/platform/chrome/wilco_ec/properties.c 	memcpy(rq.data, prop_msg->data, prop_msg->length);
rq                 91 drivers/platform/chrome/wilco_ec/properties.c 	ret = send_property_msg(ec, &rq, &rs);
rq                 50 drivers/platform/chrome/wilco_ec/sysfs.c 	struct boot_on_ac_request rq;
rq                 61 drivers/platform/chrome/wilco_ec/sysfs.c 	memset(&rq, 0, sizeof(rq));
rq                 62 drivers/platform/chrome/wilco_ec/sysfs.c 	rq.cmd = CMD_KB_CMOS;
rq                 63 drivers/platform/chrome/wilco_ec/sysfs.c 	rq.sub_cmd = SUB_CMD_KB_CMOS_AUTO_ON;
rq                 64 drivers/platform/chrome/wilco_ec/sysfs.c 	rq.val = val;
rq                 68 drivers/platform/chrome/wilco_ec/sysfs.c 	msg.request_data = &rq;
rq                 69 drivers/platform/chrome/wilco_ec/sysfs.c 	msg.request_size = sizeof(rq);
rq                154 drivers/platform/chrome/wilco_ec/telemetry.c static int check_telem_request(struct wilco_ec_telem_request *rq,
rq                159 drivers/platform/chrome/wilco_ec/telemetry.c 	if (rq->reserved)
rq                162 drivers/platform/chrome/wilco_ec/telemetry.c 	switch (rq->command) {
rq                164 drivers/platform/chrome/wilco_ec/telemetry.c 		max_size += sizeof(rq->args.get_log);
rq                167 drivers/platform/chrome/wilco_ec/telemetry.c 		max_size += sizeof(rq->args.get_version);
rq                170 drivers/platform/chrome/wilco_ec/telemetry.c 		max_size += sizeof(rq->args.get_fan_info);
rq                173 drivers/platform/chrome/wilco_ec/telemetry.c 		max_size += sizeof(rq->args.get_diag_info);
rq                176 drivers/platform/chrome/wilco_ec/telemetry.c 		max_size += sizeof(rq->args.get_temp_info);
rq                179 drivers/platform/chrome/wilco_ec/telemetry.c 		max_size += sizeof(rq->args.get_temp_read);
rq                182 drivers/platform/chrome/wilco_ec/telemetry.c 		max_size += sizeof(rq->args.get_batt_ext_info);
rq                185 drivers/platform/chrome/wilco_ec/telemetry.c 		if (rq->args.get_batt_ppid_info.always1 != 1)
rq                188 drivers/platform/chrome/wilco_ec/telemetry.c 		max_size += sizeof(rq->args.get_batt_ppid_info);
rq                 21 drivers/ptp/ptp_chardev.c 	struct ptp_clock_request rq;
rq                 24 drivers/ptp/ptp_chardev.c 	memset(&rq, 0, sizeof(rq));
rq                 30 drivers/ptp/ptp_chardev.c 		rq.type = PTP_CLK_REQ_EXTTS;
rq                 31 drivers/ptp/ptp_chardev.c 		rq.extts.index = chan;
rq                 32 drivers/ptp/ptp_chardev.c 		err = ops->enable(ops, &rq, 0);
rq                 35 drivers/ptp/ptp_chardev.c 		rq.type = PTP_CLK_REQ_PEROUT;
rq                 36 drivers/ptp/ptp_chardev.c 		rq.perout.index = chan;
rq                 37 drivers/ptp/ptp_chardev.c 		err = ops->enable(ops, &rq, 0);
rq                220 drivers/ptp/ptp_dte.c 			    struct ptp_clock_request *rq, int on)
rq                202 drivers/ptp/ptp_ixp46x.c 			  struct ptp_clock_request *rq, int on)
rq                206 drivers/ptp/ptp_ixp46x.c 	switch (rq->type) {
rq                208 drivers/ptp/ptp_ixp46x.c 		switch (rq->extts.index) {
rq                138 drivers/ptp/ptp_kvm.c 			  struct ptp_clock_request *rq, int on)
rq                473 drivers/ptp/ptp_pch.c 			  struct ptp_clock_request *rq, int on)
rq                477 drivers/ptp/ptp_pch.c 	switch (rq->type) {
rq                479 drivers/ptp/ptp_pch.c 		switch (rq->extts.index) {
rq                283 drivers/ptp/ptp_qoriq.c 		     struct ptp_clock_request *rq, int on)
rq                290 drivers/ptp/ptp_qoriq.c 	switch (rq->type) {
rq                292 drivers/ptp/ptp_qoriq.c 		switch (rq->extts.index) {
rq                304 drivers/ptp/ptp_qoriq.c 			extts_clean_up(ptp_qoriq, rq->extts.index, false);
rq               3077 drivers/s390/block/dasd.c 	struct request *req = qd->rq;
rq               3647 drivers/s390/block/dasd_eckd.c 	void *rq;
rq               3653 drivers/s390/block/dasd_eckd.c 	rq = req ? blk_mq_rq_to_pdu(req) : NULL;
rq               3664 drivers/s390/block/dasd_eckd.c 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
rq                247 drivers/s390/block/scm_blk.c 	blk_mq_kick_requeue_list(bdev->rq);
rq                288 drivers/s390/block/scm_blk.c 	struct request *req = qd->rq;
rq                438 drivers/s390/block/scm_blk.c 	struct request_queue *rq;
rq                464 drivers/s390/block/scm_blk.c 	rq = blk_mq_init_queue(&bdev->tag_set);
rq                465 drivers/s390/block/scm_blk.c 	if (IS_ERR(rq)) {
rq                466 drivers/s390/block/scm_blk.c 		ret = PTR_ERR(rq);
rq                469 drivers/s390/block/scm_blk.c 	bdev->rq = rq;
rq                473 drivers/s390/block/scm_blk.c 	blk_queue_logical_block_size(rq, 1 << 12);
rq                474 drivers/s390/block/scm_blk.c 	blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
rq                475 drivers/s390/block/scm_blk.c 	blk_queue_max_segments(rq, nr_max_blk);
rq                476 drivers/s390/block/scm_blk.c 	blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
rq                477 drivers/s390/block/scm_blk.c 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
rq                484 drivers/s390/block/scm_blk.c 	rq->queuedata = scmdev;
rq                487 drivers/s390/block/scm_blk.c 	bdev->gendisk->queue = rq;
rq                507 drivers/s390/block/scm_blk.c 	blk_cleanup_queue(rq);
rq                 19 drivers/s390/block/scm_blk.h 	struct request_queue *rq;
rq                 39 drivers/s390/block/scm_blk.h #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
rq                198 drivers/s390/char/con3270.c con3270_write_callback(struct raw3270_request *rq, void *data)
rq                200 drivers/s390/char/con3270.c 	raw3270_request_reset(rq);
rq                201 drivers/s390/char/con3270.c 	xchg(&((struct con3270 *) rq->view)->write, rq);
rq                357 drivers/s390/char/con3270.c con3270_read_callback(struct raw3270_request *rq, void *data)
rq                359 drivers/s390/char/con3270.c 	raw3270_get_view(rq->view);
rq                361 drivers/s390/char/con3270.c 	tasklet_schedule(&((struct con3270 *) rq->view)->readlet);
rq                411 drivers/s390/char/con3270.c con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
rq                417 drivers/s390/char/con3270.c 	if (rq) {
rq                419 drivers/s390/char/con3270.c 			rq->rc = -EIO;
rq                422 drivers/s390/char/con3270.c 			rq->rescnt = irb->scsw.cmd.count;
rq                 48 drivers/s390/char/fs3270.c fs3270_wake_up(struct raw3270_request *rq, void *data)
rq                 64 drivers/s390/char/fs3270.c fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
rq                 70 drivers/s390/char/fs3270.c 	rq->callback = fs3270_wake_up;
rq                 71 drivers/s390/char/fs3270.c 	rq->callback_data = &fp->wait;
rq                 81 drivers/s390/char/fs3270.c 		rc = raw3270_start(view, rq);
rq                 84 drivers/s390/char/fs3270.c 			wait_event(fp->wait, raw3270_request_final(rq));
rq                 94 drivers/s390/char/fs3270.c fs3270_reset_callback(struct raw3270_request *rq, void *data)
rq                 98 drivers/s390/char/fs3270.c 	fp = (struct fs3270 *) rq->view;
rq                 99 drivers/s390/char/fs3270.c 	raw3270_request_reset(rq);
rq                104 drivers/s390/char/fs3270.c fs3270_restore_callback(struct raw3270_request *rq, void *data)
rq                108 drivers/s390/char/fs3270.c 	fp = (struct fs3270 *) rq->view;
rq                109 drivers/s390/char/fs3270.c 	if (rq->rc != 0 || rq->rescnt != 0) {
rq                114 drivers/s390/char/fs3270.c 	raw3270_request_reset(rq);
rq                164 drivers/s390/char/fs3270.c fs3270_save_callback(struct raw3270_request *rq, void *data)
rq                168 drivers/s390/char/fs3270.c 	fp = (struct fs3270 *) rq->view;
rq                180 drivers/s390/char/fs3270.c 	if (rq->rc != 0 || rq->rescnt == 0) {
rq                185 drivers/s390/char/fs3270.c 		fp->rdbuf_size = fp->rdbuf->size - rq->rescnt;
rq                186 drivers/s390/char/fs3270.c 	raw3270_request_reset(rq);
rq                222 drivers/s390/char/fs3270.c fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
rq                230 drivers/s390/char/fs3270.c 	if (rq) {
rq                232 drivers/s390/char/fs3270.c 			rq->rc = -EIO;
rq                235 drivers/s390/char/fs3270.c 			rq->rescnt = irb->scsw.cmd.count;
rq                246 drivers/s390/char/fs3270.c 	struct raw3270_request *rq;
rq                258 drivers/s390/char/fs3270.c 	rq = raw3270_request_alloc(0);
rq                259 drivers/s390/char/fs3270.c 	if (!IS_ERR(rq)) {
rq                262 drivers/s390/char/fs3270.c 		raw3270_request_set_cmd(rq, fp->read_command ? : 2);
rq                263 drivers/s390/char/fs3270.c 		raw3270_request_set_idal(rq, ib);
rq                267 drivers/s390/char/fs3270.c 			rc = fs3270_do_io(&fp->view, rq);
rq                269 drivers/s390/char/fs3270.c 				count -= rq->rescnt;
rq                277 drivers/s390/char/fs3270.c 		raw3270_request_free(rq);
rq                279 drivers/s390/char/fs3270.c 		rc = PTR_ERR(rq);
rq                291 drivers/s390/char/fs3270.c 	struct raw3270_request *rq;
rq                302 drivers/s390/char/fs3270.c 	rq = raw3270_request_alloc(0);
rq                303 drivers/s390/char/fs3270.c 	if (!IS_ERR(rq)) {
rq                308 drivers/s390/char/fs3270.c 			raw3270_request_set_cmd(rq, write_command);
rq                309 drivers/s390/char/fs3270.c 			raw3270_request_set_idal(rq, ib);
rq                310 drivers/s390/char/fs3270.c 			rc = fs3270_do_io(&fp->view, rq);
rq                312 drivers/s390/char/fs3270.c 				rc = count - rq->rescnt;
rq                315 drivers/s390/char/fs3270.c 		raw3270_request_free(rq);
rq                317 drivers/s390/char/fs3270.c 		rc = PTR_ERR(rq);
rq                139 drivers/s390/char/raw3270.c 	struct raw3270_request *rq;
rq                142 drivers/s390/char/raw3270.c 	rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA);
rq                143 drivers/s390/char/raw3270.c 	if (!rq)
rq                148 drivers/s390/char/raw3270.c 		rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
rq                149 drivers/s390/char/raw3270.c 		if (!rq->buffer) {
rq                150 drivers/s390/char/raw3270.c 			kfree(rq);
rq                154 drivers/s390/char/raw3270.c 	rq->size = size;
rq                155 drivers/s390/char/raw3270.c 	INIT_LIST_HEAD(&rq->list);
rq                160 drivers/s390/char/raw3270.c 	rq->ccw.cda = __pa(rq->buffer);
rq                161 drivers/s390/char/raw3270.c 	rq->ccw.flags = CCW_FLAG_SLI;
rq                163 drivers/s390/char/raw3270.c 	return rq;
rq                170 drivers/s390/char/raw3270.c raw3270_request_free (struct raw3270_request *rq)
rq                172 drivers/s390/char/raw3270.c 	kfree(rq->buffer);
rq                173 drivers/s390/char/raw3270.c 	kfree(rq);
rq                180 drivers/s390/char/raw3270.c raw3270_request_reset(struct raw3270_request *rq)
rq                182 drivers/s390/char/raw3270.c 	BUG_ON(!list_empty(&rq->list));
rq                183 drivers/s390/char/raw3270.c 	rq->ccw.cmd_code = 0;
rq                184 drivers/s390/char/raw3270.c 	rq->ccw.count = 0;
rq                185 drivers/s390/char/raw3270.c 	rq->ccw.cda = __pa(rq->buffer);
rq                186 drivers/s390/char/raw3270.c 	rq->ccw.flags = CCW_FLAG_SLI;
rq                187 drivers/s390/char/raw3270.c 	rq->rescnt = 0;
rq                188 drivers/s390/char/raw3270.c 	rq->rc = 0;
rq                195 drivers/s390/char/raw3270.c raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd)
rq                197 drivers/s390/char/raw3270.c 	rq->ccw.cmd_code = cmd;
rq                204 drivers/s390/char/raw3270.c raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size)
rq                206 drivers/s390/char/raw3270.c 	if (size + rq->ccw.count > rq->size)
rq                208 drivers/s390/char/raw3270.c 	memcpy(rq->buffer + rq->ccw.count, data, size);
rq                209 drivers/s390/char/raw3270.c 	rq->ccw.count += size;
rq                217 drivers/s390/char/raw3270.c raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
rq                219 drivers/s390/char/raw3270.c 	rq->ccw.cda = __pa(data);
rq                220 drivers/s390/char/raw3270.c 	rq->ccw.count = size;
rq                227 drivers/s390/char/raw3270.c raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
rq                229 drivers/s390/char/raw3270.c 	rq->ccw.cda = __pa(ib->data);
rq                230 drivers/s390/char/raw3270.c 	rq->ccw.count = ib->size;
rq                231 drivers/s390/char/raw3270.c 	rq->ccw.flags |= CCW_FLAG_IDA;
rq                240 drivers/s390/char/raw3270.c 		struct raw3270_request *rq)
rq                242 drivers/s390/char/raw3270.c 	rq->view = view;
rq                247 drivers/s390/char/raw3270.c 		rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
rq                248 drivers/s390/char/raw3270.c 					       (unsigned long) rq, 0, 0);
rq                249 drivers/s390/char/raw3270.c 		if (rq->rc) {
rq                251 drivers/s390/char/raw3270.c 			return rq->rc;
rq                254 drivers/s390/char/raw3270.c 	list_add_tail(&rq->list, &rp->req_queue);
rq                268 drivers/s390/char/raw3270.c raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
rq                282 drivers/s390/char/raw3270.c 		rc =  __raw3270_start(rp, view, rq);
rq                288 drivers/s390/char/raw3270.c raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
rq                300 drivers/s390/char/raw3270.c 		rc =  __raw3270_start(rp, view, rq);
rq                305 drivers/s390/char/raw3270.c raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
rq                310 drivers/s390/char/raw3270.c 	rq->view = view;
rq                312 drivers/s390/char/raw3270.c 	list_add_tail(&rq->list, &rp->req_queue);
rq                324 drivers/s390/char/raw3270.c 	struct raw3270_request *rq;
rq                329 drivers/s390/char/raw3270.c 	rq = (struct raw3270_request *) intparm;
rq                330 drivers/s390/char/raw3270.c 	view = rq ? rq->view : rp->view;
rq                349 drivers/s390/char/raw3270.c 			view->fn->intv(view, rq, irb);
rq                356 drivers/s390/char/raw3270.c 	if (rq && !list_empty(&rq->list)) {
rq                358 drivers/s390/char/raw3270.c 		list_del_init(&rq->list);
rq                359 drivers/s390/char/raw3270.c 		if (rq->callback)
rq                360 drivers/s390/char/raw3270.c 			rq->callback(rq, rq->callback_data);
rq                370 drivers/s390/char/raw3270.c 		rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
rq                371 drivers/s390/char/raw3270.c 		rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
rq                372 drivers/s390/char/raw3270.c 					  (unsigned long) rq, 0, 0);
rq                373 drivers/s390/char/raw3270.c 		if (rq->rc == 0)
rq                376 drivers/s390/char/raw3270.c 		list_del_init(&rq->list);
rq                377 drivers/s390/char/raw3270.c 		if (rq->callback)
rq                378 drivers/s390/char/raw3270.c 			rq->callback(rq, rq->callback_data);
rq                528 drivers/s390/char/raw3270.c raw3270_read_modified_cb(struct raw3270_request *rq, void *data)
rq                530 drivers/s390/char/raw3270.c 	struct raw3270 *rp = rq->view->dev;
rq                575 drivers/s390/char/raw3270.c raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
rq                577 drivers/s390/char/raw3270.c 	struct raw3270 *rp = rq->view->dev;
rq                581 drivers/s390/char/raw3270.c 	if (rq->rc) {
rq                645 drivers/s390/char/raw3270.c 	struct raw3270_request *rq;
rq                652 drivers/s390/char/raw3270.c 		rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
rq                653 drivers/s390/char/raw3270.c 		view = rq->view;
rq                654 drivers/s390/char/raw3270.c 		rq->rc = -EACCES;
rq                655 drivers/s390/char/raw3270.c 		list_del_init(&rq->list);
rq                656 drivers/s390/char/raw3270.c 		if (rq->callback)
rq                657 drivers/s390/char/raw3270.c 			rq->callback(rq, rq->callback_data);
rq                665 drivers/s390/char/raw3270.c raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
rq                670 drivers/s390/char/raw3270.c 	if (rq) {
rq                673 drivers/s390/char/raw3270.c 				rq->rc = -EOPNOTSUPP;
rq                675 drivers/s390/char/raw3270.c 				rq->rc = -EIO;
rq                122 drivers/s390/char/raw3270.h raw3270_request_final(struct raw3270_request *rq)
rq                124 drivers/s390/char/raw3270.h 	return list_empty(&rq->list);
rq                348 drivers/s390/char/tty3270.c tty3270_write_callback(struct raw3270_request *rq, void *data)
rq                350 drivers/s390/char/tty3270.c 	struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
rq                352 drivers/s390/char/tty3270.c 	if (rq->rc != 0) {
rq                357 drivers/s390/char/tty3270.c 	raw3270_request_reset(rq);
rq                358 drivers/s390/char/tty3270.c 	xchg(&tp->write, rq);
rq                615 drivers/s390/char/tty3270.c tty3270_read_callback(struct raw3270_request *rq, void *data)
rq                617 drivers/s390/char/tty3270.c 	struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
rq                618 drivers/s390/char/tty3270.c 	raw3270_get_view(rq->view);
rq                683 drivers/s390/char/tty3270.c tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
rq                693 drivers/s390/char/tty3270.c 	if (rq) {
rq                695 drivers/s390/char/tty3270.c 			rq->rc = -EIO;
rq                700 drivers/s390/char/tty3270.c 			rq->rescnt = irb->scsw.cmd.count;
rq                192 drivers/s390/cio/chsc.h 	u8 rq;
rq                770 drivers/s390/net/qeth_core.h 	int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
rq               1077 drivers/s390/net/qeth_core.h int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               5858 drivers/s390/net/qeth_core_main.c int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               5869 drivers/s390/net/qeth_core_main.c 		rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
rq               5877 drivers/s390/net/qeth_core_main.c 		mii_data = if_mii(rq);
rq               5881 drivers/s390/net/qeth_core_main.c 		mii_data = if_mii(rq);
rq               5889 drivers/s390/net/qeth_core_main.c 		rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
rq               5893 drivers/s390/net/qeth_core_main.c 			rc = card->discipline->do_ioctl(dev, rq, cmd);
rq               1825 drivers/s390/net/qeth_l3_main.c static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               1838 drivers/s390/net/qeth_l3_main.c 		rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue);
rq               1845 drivers/s390/net/qeth_l3_main.c 		rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
rq               1851 drivers/s390/net/qeth_l3_main.c 		if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry)))
rq                334 drivers/scsi/bnx2fc/bnx2fc.h 	void *rq;
rq               1425 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
rq                701 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
rq                703 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (!tgt->rq) {
rq                876 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	if (tgt->rq) {
rq                878 drivers/scsi/bnx2fc/bnx2fc_tgt.c 				    tgt->rq, tgt->rq_dma);
rq                879 drivers/scsi/bnx2fc/bnx2fc_tgt.c 		tgt->rq = NULL;
rq                406 drivers/scsi/esas2r/esas2r.h 			      struct esas2r_request *rq);
rq                966 drivers/scsi/esas2r/esas2r.h int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
rq               1003 drivers/scsi/esas2r/esas2r.h bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
rq               1008 drivers/scsi/esas2r/esas2r.h 				struct esas2r_request *rq);
rq               1014 drivers/scsi/esas2r/esas2r.h void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq);
rq               1021 drivers/scsi/esas2r/esas2r.h void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq);
rq               1035 drivers/scsi/esas2r/esas2r.h 			    struct esas2r_request *rq,
rq               1041 drivers/scsi/esas2r/esas2r.h 			  struct esas2r_request *rq,
rq               1047 drivers/scsi/esas2r/esas2r.h void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq);
rq               1049 drivers/scsi/esas2r/esas2r.h 			  struct esas2r_request *rq,
rq               1053 drivers/scsi/esas2r/esas2r.h 			    struct esas2r_request *rq,
rq               1057 drivers/scsi/esas2r/esas2r.h 			  struct esas2r_request *rq,
rq               1063 drivers/scsi/esas2r/esas2r.h void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq);
rq               1067 drivers/scsi/esas2r/esas2r.h 			     struct esas2r_request *rq,
rq               1074 drivers/scsi/esas2r/esas2r.h 		   struct esas2r_request *rq, struct esas2r_sg_context *sgc);
rq               1077 drivers/scsi/esas2r/esas2r.h 				struct esas2r_request *rq);
rq               1080 drivers/scsi/esas2r/esas2r.h 			     struct esas2r_request *rq);
rq               1082 drivers/scsi/esas2r/esas2r.h 			   struct esas2r_request *rq);
rq               1083 drivers/scsi/esas2r/esas2r.h void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq);
rq               1085 drivers/scsi/esas2r/esas2r.h 			      struct esas2r_request *rq);
rq               1100 drivers/scsi/esas2r/esas2r.h 				struct esas2r_request *rq);
rq               1102 drivers/scsi/esas2r/esas2r.h bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
rq               1134 drivers/scsi/esas2r/esas2r.h 				     struct esas2r_request *rq);
rq               1138 drivers/scsi/esas2r/esas2r.h 			      struct esas2r_request *rq,
rq               1168 drivers/scsi/esas2r/esas2r.h 				   struct esas2r_request *rq,
rq               1172 drivers/scsi/esas2r/esas2r.h 	sgc->first_req = rq;
rq               1178 drivers/scsi/esas2r/esas2r.h 	sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)rq->vrq
rq               1187 drivers/scsi/esas2r/esas2r.h 		rq->vrq->scsi.sg_list_offset = (u8)
rq               1189 drivers/scsi/esas2r/esas2r.h 						(u8 *)rq->vrq);
rq               1192 drivers/scsi/esas2r/esas2r.h 			sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0];
rq               1193 drivers/scsi/esas2r/esas2r.h 		rq->vrq->scsi.sg_list_offset =
rq               1199 drivers/scsi/esas2r/esas2r.h static inline void esas2r_rq_init_request(struct esas2r_request *rq,
rq               1202 drivers/scsi/esas2r/esas2r.h 	union atto_vda_req *vrq = rq->vrq;
rq               1204 drivers/scsi/esas2r/esas2r.h 	INIT_LIST_HEAD(&rq->sg_table_head);
rq               1205 drivers/scsi/esas2r/esas2r.h 	rq->data_buf = (void *)(vrq + 1);
rq               1206 drivers/scsi/esas2r/esas2r.h 	rq->interrupt_cb = NULL;
rq               1207 drivers/scsi/esas2r/esas2r.h 	rq->comp_cb = esas2r_complete_request_cb;
rq               1208 drivers/scsi/esas2r/esas2r.h 	rq->flags = 0;
rq               1209 drivers/scsi/esas2r/esas2r.h 	rq->timeout = 0;
rq               1210 drivers/scsi/esas2r/esas2r.h 	rq->req_stat = RS_PENDING;
rq               1211 drivers/scsi/esas2r/esas2r.h 	rq->req_type = RT_INI_REQ;
rq               1214 drivers/scsi/esas2r/esas2r.h 	rq->func_rsp.dwords[0] = 0;
rq               1215 drivers/scsi/esas2r/esas2r.h 	rq->func_rsp.dwords[1] = 0;
rq               1224 drivers/scsi/esas2r/esas2r.h 	rq->vda_req_sz = RQ_SIZE_DEFAULT;
rq               1234 drivers/scsi/esas2r/esas2r.h 	a->req_table[LOWORD(vrq->scsi.handle)] = rq;
rq               1258 drivers/scsi/esas2r/esas2r.h 		= cpu_to_le64(rq->vrq_md->phys_addr +
rq               1262 drivers/scsi/esas2r/esas2r.h static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq,
rq               1267 drivers/scsi/esas2r/esas2r.h 	if (list_empty(&rq->sg_table_head))
rq               1271 drivers/scsi/esas2r/esas2r.h 	list_splice_tail_init(&rq->sg_table_head, &a->free_sg_list_head);
rq               1275 drivers/scsi/esas2r/esas2r.h static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,
rq               1279 drivers/scsi/esas2r/esas2r.h 	esas2r_rq_free_sg_lists(rq, a);
rq               1280 drivers/scsi/esas2r/esas2r.h 	a->req_table[LOWORD(rq->vrq->scsi.handle)] = NULL;
rq               1281 drivers/scsi/esas2r/esas2r.h 	rq->data_buf = NULL;
rq               1302 drivers/scsi/esas2r/esas2r.h 					struct esas2r_request *rq,
rq               1305 drivers/scsi/esas2r/esas2r.h 	if (unlikely(le32_to_cpu(rq->vrq->scsi.length) == 0))
rq               1392 drivers/scsi/esas2r/esas2r.h 					   struct esas2r_request *rq)
rq               1396 drivers/scsi/esas2r/esas2r.h 	esas2r_build_ae_req(a, rq);
rq               1399 drivers/scsi/esas2r/esas2r.h 	esas2r_start_vda_request(a, rq);
rq               1406 drivers/scsi/esas2r/esas2r.h 	struct esas2r_request *rq;
rq               1410 drivers/scsi/esas2r/esas2r.h 		rq = list_entry(element, struct esas2r_request, comp_list);
rq               1412 drivers/scsi/esas2r/esas2r.h 		esas2r_complete_request(a, rq);
rq                 49 drivers/scsi/esas2r/esas2r_disc.c 			      struct esas2r_request *rq);
rq                 51 drivers/scsi/esas2r/esas2r_disc.c 				 struct esas2r_request *rq);
rq                 55 drivers/scsi/esas2r/esas2r_disc.c 				      struct esas2r_request *rq);
rq                 59 drivers/scsi/esas2r/esas2r_disc.c 				       struct esas2r_request *rq);
rq                 61 drivers/scsi/esas2r/esas2r_disc.c 					  struct esas2r_request *rq);
rq                 63 drivers/scsi/esas2r/esas2r_disc.c 				struct esas2r_request *rq);
rq                 65 drivers/scsi/esas2r/esas2r_disc.c 				   struct esas2r_request *rq);
rq                 67 drivers/scsi/esas2r/esas2r_disc.c 				  struct esas2r_request *rq);
rq                 69 drivers/scsi/esas2r/esas2r_disc.c 				     struct esas2r_request *rq);
rq                 71 drivers/scsi/esas2r/esas2r_disc.c 					  struct esas2r_request *rq);
rq                 73 drivers/scsi/esas2r/esas2r_disc.c 					     struct esas2r_request *rq);
rq                 75 drivers/scsi/esas2r/esas2r_disc.c 					  struct esas2r_request *rq);
rq                 77 drivers/scsi/esas2r/esas2r_disc.c 					     struct esas2r_request *rq);
rq                 79 drivers/scsi/esas2r/esas2r_disc.c 				      struct esas2r_request *rq);
rq                 81 drivers/scsi/esas2r/esas2r_disc.c 					 struct esas2r_request *rq);
rq                160 drivers/scsi/esas2r/esas2r_disc.c 	struct esas2r_request *rq = &a->general_req;
rq                174 drivers/scsi/esas2r/esas2r_disc.c 	if (rq->interrupt_cx == NULL)
rq                177 drivers/scsi/esas2r/esas2r_disc.c 	if (rq->req_stat == RS_STARTED
rq                178 drivers/scsi/esas2r/esas2r_disc.c 	    && rq->timeout <= RQ_MAX_TIMEOUT) {
rq                180 drivers/scsi/esas2r/esas2r_disc.c 		esas2r_wait_request(a, rq);
rq                182 drivers/scsi/esas2r/esas2r_disc.c 		if (rq->req_stat == RS_TIMEOUT) {
rq                183 drivers/scsi/esas2r/esas2r_disc.c 			esas2r_disc_abort(a, rq);
rq                189 drivers/scsi/esas2r/esas2r_disc.c 	if (rq->req_stat == RS_PENDING
rq                190 drivers/scsi/esas2r/esas2r_disc.c 	    || rq->req_stat == RS_STARTED)
rq                193 drivers/scsi/esas2r/esas2r_disc.c 	esas2r_disc_continue(a, rq);
rq                313 drivers/scsi/esas2r/esas2r_disc.c 	struct esas2r_request *rq = &a->general_req;
rq                359 drivers/scsi/esas2r/esas2r_disc.c 	rq->interrupt_cx = dc;
rq                360 drivers/scsi/esas2r/esas2r_disc.c 	rq->req_stat = RS_SUCCESS;
rq                377 drivers/scsi/esas2r/esas2r_disc.c 		ret = esas2r_disc_continue(a, rq);
rq                387 drivers/scsi/esas2r/esas2r_disc.c 				 struct esas2r_request *rq)
rq                390 drivers/scsi/esas2r/esas2r_disc.c 		(struct esas2r_disc_context *)rq->interrupt_cx;
rq                400 drivers/scsi/esas2r/esas2r_disc.c 			rslt = esas2r_disc_dev_remove(a, rq);
rq                405 drivers/scsi/esas2r/esas2r_disc.c 			rslt = esas2r_disc_dev_add(a, rq);
rq                410 drivers/scsi/esas2r/esas2r_disc.c 			rslt = esas2r_disc_block_dev_scan(a, rq);
rq                415 drivers/scsi/esas2r/esas2r_disc.c 			rslt = esas2r_disc_raid_grp_info(a, rq);
rq                420 drivers/scsi/esas2r/esas2r_disc.c 			rslt = esas2r_disc_part_info(a, rq);
rq                425 drivers/scsi/esas2r/esas2r_disc.c 			rslt = esas2r_disc_passthru_dev_info(a, rq);
rq                429 drivers/scsi/esas2r/esas2r_disc.c 			rslt = esas2r_disc_passthru_dev_addr(a, rq);
rq                448 drivers/scsi/esas2r/esas2r_disc.c 	rq->interrupt_cx = NULL;
rq                460 drivers/scsi/esas2r/esas2r_disc.c 				      struct esas2r_request *rq)
rq                465 drivers/scsi/esas2r/esas2r_disc.c 	if (rq->timeout < ESAS2R_DEFAULT_TMO)
rq                466 drivers/scsi/esas2r/esas2r_disc.c 		rq->timeout = ESAS2R_DEFAULT_TMO;
rq                473 drivers/scsi/esas2r/esas2r_disc.c 	rq->req_type = RT_DISC_REQ;
rq                479 drivers/scsi/esas2r/esas2r_disc.c 		esas2r_disc_local_start_request(a, rq);
rq                481 drivers/scsi/esas2r/esas2r_disc.c 		list_add_tail(&rq->req_list, &a->defer_list);
rq                489 drivers/scsi/esas2r/esas2r_disc.c 				     struct esas2r_request *rq)
rq                493 drivers/scsi/esas2r/esas2r_disc.c 	list_add_tail(&rq->req_list, &a->active_list);
rq                495 drivers/scsi/esas2r/esas2r_disc.c 	esas2r_start_vda_request(a, rq);
rq                503 drivers/scsi/esas2r/esas2r_disc.c 			      struct esas2r_request *rq)
rq                506 drivers/scsi/esas2r/esas2r_disc.c 		(struct esas2r_disc_context *)rq->interrupt_cx;
rq                518 drivers/scsi/esas2r/esas2r_disc.c 				       struct esas2r_request *rq)
rq                521 drivers/scsi/esas2r/esas2r_disc.c 		(struct esas2r_disc_context *)rq->interrupt_cx;
rq                526 drivers/scsi/esas2r/esas2r_disc.c 	esas2r_rq_init_request(rq, a);
rq                529 drivers/scsi/esas2r/esas2r_disc.c 			     rq,
rq                536 drivers/scsi/esas2r/esas2r_disc.c 	rq->comp_cb = esas2r_disc_block_dev_scan_cb;
rq                538 drivers/scsi/esas2r/esas2r_disc.c 	rq->timeout = 30000;
rq                539 drivers/scsi/esas2r/esas2r_disc.c 	rq->interrupt_cx = dc;
rq                541 drivers/scsi/esas2r/esas2r_disc.c 	rslt = esas2r_disc_start_request(a, rq);
rq                549 drivers/scsi/esas2r/esas2r_disc.c 					  struct esas2r_request *rq)
rq                552 drivers/scsi/esas2r/esas2r_disc.c 		(struct esas2r_disc_context *)rq->interrupt_cx;
rq                559 drivers/scsi/esas2r/esas2r_disc.c 	if (rq->req_stat == RS_SUCCESS)
rq                560 drivers/scsi/esas2r/esas2r_disc.c 		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
rq                565 drivers/scsi/esas2r/esas2r_disc.c 	esas2r_rq_destroy_request(rq, a);
rq                570 drivers/scsi/esas2r/esas2r_disc.c 		esas2r_disc_continue(a, rq);
rq                578 drivers/scsi/esas2r/esas2r_disc.c 				      struct esas2r_request *rq)
rq                581 drivers/scsi/esas2r/esas2r_disc.c 		(struct esas2r_disc_context *)rq->interrupt_cx;
rq                597 drivers/scsi/esas2r/esas2r_disc.c 	esas2r_rq_init_request(rq, a);
rq                599 drivers/scsi/esas2r/esas2r_disc.c 	grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
rq                604 drivers/scsi/esas2r/esas2r_disc.c 			     rq,
rq                613 drivers/scsi/esas2r/esas2r_disc.c 	rq->comp_cb = esas2r_disc_raid_grp_info_cb;
rq                615 drivers/scsi/esas2r/esas2r_disc.c 	rq->interrupt_cx = dc;
rq                617 drivers/scsi/esas2r/esas2r_disc.c 	rslt = esas2r_disc_start_request(a, rq);
rq                625 drivers/scsi/esas2r/esas2r_disc.c 					 struct esas2r_request *rq)
rq                628 drivers/scsi/esas2r/esas2r_disc.c 		(struct esas2r_disc_context *)rq->interrupt_cx;
rq                636 drivers/scsi/esas2r/esas2r_disc.c 	if (rq->req_stat == RS_SCAN_GEN) {
rq                637 drivers/scsi/esas2r/esas2r_disc.c 		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
rq                642 drivers/scsi/esas2r/esas2r_disc.c 	if (rq->req_stat == RS_SUCCESS) {
rq                643 drivers/scsi/esas2r/esas2r_disc.c 		grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
rq                662 drivers/scsi/esas2r/esas2r_disc.c 		if (!(rq->req_stat == RS_GRP_INVALID)) {
rq                666 drivers/scsi/esas2r/esas2r_disc.c 				   rq->req_stat);
rq                675 drivers/scsi/esas2r/esas2r_disc.c 	esas2r_rq_destroy_request(rq, a);
rq                680 drivers/scsi/esas2r/esas2r_disc.c 		esas2r_disc_continue(a, rq);
rq                688 drivers/scsi/esas2r/esas2r_disc.c 				  struct esas2r_request *rq)
rq                691 drivers/scsi/esas2r/esas2r_disc.c 		(struct esas2r_disc_context *)rq->interrupt_cx;
rq                708 drivers/scsi/esas2r/esas2r_disc.c 	esas2r_rq_init_request(rq, a);
rq                710 drivers/scsi/esas2r/esas2r_disc.c 	partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
rq                715 drivers/scsi/esas2r/esas2r_disc.c 			     rq,
rq                728 drivers/scsi/esas2r/esas2r_disc.c 	rq->comp_cb = esas2r_disc_part_info_cb;
rq                730 drivers/scsi/esas2r/esas2r_disc.c 	rq->interrupt_cx = dc;
rq                732 drivers/scsi/esas2r/esas2r_disc.c 	rslt = esas2r_disc_start_request(a, rq);
rq                740 drivers/scsi/esas2r/esas2r_disc.c 				     struct esas2r_request *rq)
rq                743 drivers/scsi/esas2r/esas2r_disc.c 		(struct esas2r_disc_context *)rq->interrupt_cx;
rq                751 drivers/scsi/esas2r/esas2r_disc.c 	if (rq->req_stat == RS_SCAN_GEN) {
rq                752 drivers/scsi/esas2r/esas2r_disc.c 		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
rq                755 drivers/scsi/esas2r/esas2r_disc.c 	} else if (rq->req_stat == RS_SUCCESS) {
rq                756 drivers/scsi/esas2r/esas2r_disc.c 		partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
rq                766 drivers/scsi/esas2r/esas2r_disc.c 		if (!(rq->req_stat == RS_PART_LAST)) {
rq                769 drivers/scsi/esas2r/esas2r_disc.c 				   "failed - status:%d", rq->req_stat);
rq                776 drivers/scsi/esas2r/esas2r_disc.c 	esas2r_rq_destroy_request(rq, a);
rq                781 drivers/scsi/esas2r/esas2r_disc.c 		esas2r_disc_continue(a, rq);
rq                789 drivers/scsi/esas2r/esas2r_disc.c 					  struct esas2r_request *rq)
rq                792 drivers/scsi/esas2r/esas2r_disc.c 		(struct esas2r_disc_context *)rq->interrupt_cx;
rq                800 drivers/scsi/esas2r/esas2r_disc.c 	esas2r_rq_init_request(rq, a);
rq                802 drivers/scsi/esas2r/esas2r_disc.c 	devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
rq                807 drivers/scsi/esas2r/esas2r_disc.c 			     rq,
rq                814 drivers/scsi/esas2r/esas2r_disc.c 	rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
rq                816 drivers/scsi/esas2r/esas2r_disc.c 	rq->interrupt_cx = dc;
rq                818 drivers/scsi/esas2r/esas2r_disc.c 	rslt = esas2r_disc_start_request(a, rq);
rq                826 drivers/scsi/esas2r/esas2r_disc.c 					     struct esas2r_request *rq)
rq                829 drivers/scsi/esas2r/esas2r_disc.c 		(struct esas2r_disc_context *)rq->interrupt_cx;
rq                837 drivers/scsi/esas2r/esas2r_disc.c 	if (rq->req_stat == RS_SCAN_GEN) {
rq                838 drivers/scsi/esas2r/esas2r_disc.c 		dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
rq                841 drivers/scsi/esas2r/esas2r_disc.c 	} else if (rq->req_stat == RS_SUCCESS) {
rq                842 drivers/scsi/esas2r/esas2r_disc.c 		devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
rq                844 drivers/scsi/esas2r/esas2r_disc.c 		dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
rq                860 drivers/scsi/esas2r/esas2r_disc.c 		if (!(rq->req_stat == RS_DEV_INVALID)) {
rq                863 drivers/scsi/esas2r/esas2r_disc.c 				   "status:%d", rq->req_stat);
rq                869 drivers/scsi/esas2r/esas2r_disc.c 	esas2r_rq_destroy_request(rq, a);
rq                874 drivers/scsi/esas2r/esas2r_disc.c 		esas2r_disc_continue(a, rq);
rq                882 drivers/scsi/esas2r/esas2r_disc.c 					  struct esas2r_request *rq)
rq                885 drivers/scsi/esas2r/esas2r_disc.c 		(struct esas2r_disc_context *)rq->interrupt_cx;
rq                892 drivers/scsi/esas2r/esas2r_disc.c 	esas2r_rq_init_request(rq, a);
rq                901 drivers/scsi/esas2r/esas2r_disc.c 	esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
rq                903 drivers/scsi/esas2r/esas2r_disc.c 	esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
rq                905 drivers/scsi/esas2r/esas2r_disc.c 	if (!esas2r_build_sg_list(a, rq, &sgc)) {
rq                906 drivers/scsi/esas2r/esas2r_disc.c 		esas2r_rq_destroy_request(rq, a);
rq                913 drivers/scsi/esas2r/esas2r_disc.c 	rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
rq                915 drivers/scsi/esas2r/esas2r_disc.c 	rq->interrupt_cx = dc;
rq                932 drivers/scsi/esas2r/esas2r_disc.c 	rslt = esas2r_disc_start_request(a, rq);
rq                940 drivers/scsi/esas2r/esas2r_disc.c 					     struct esas2r_request *rq)
rq                943 drivers/scsi/esas2r/esas2r_disc.c 		(struct esas2r_disc_context *)rq->interrupt_cx;
rq                955 drivers/scsi/esas2r/esas2r_disc.c 	if (rq->req_stat == RS_SUCCESS
rq               1002 drivers/scsi/esas2r/esas2r_disc.c 			   rq->req_stat, hi->status);
rq               1018 drivers/scsi/esas2r/esas2r_disc.c 	esas2r_rq_destroy_request(rq, a);
rq               1023 drivers/scsi/esas2r/esas2r_disc.c 		esas2r_disc_continue(a, rq);
rq               1044 drivers/scsi/esas2r/esas2r_disc.c 				   struct esas2r_request *rq)
rq               1047 drivers/scsi/esas2r/esas2r_disc.c 		(struct esas2r_disc_context *)rq->interrupt_cx;
rq               1083 drivers/scsi/esas2r/esas2r_disc.c 				struct esas2r_request *rq)
rq               1086 drivers/scsi/esas2r/esas2r_disc.c 		(struct esas2r_disc_context *)rq->interrupt_cx;
rq               1162 drivers/scsi/esas2r/esas2r_disc.c 	struct esas2r_request *rq;
rq               1170 drivers/scsi/esas2r/esas2r_disc.c 		rq = list_entry(element, struct esas2r_request, req_list);
rq               1171 drivers/scsi/esas2r/esas2r_disc.c 		if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
rq               1172 drivers/scsi/esas2r/esas2r_disc.c 			t = a->targetdb + rq->target_id;
rq               1175 drivers/scsi/esas2r/esas2r_disc.c 				rq->vrq->scsi.target_id = le16_to_cpu(
rq               1178 drivers/scsi/esas2r/esas2r_disc.c 				rq->req_stat = RS_SEL;
rq                134 drivers/scsi/esas2r/esas2r_flash.c 				  struct esas2r_request *rq)
rq                136 drivers/scsi/esas2r/esas2r_flash.c 	struct atto_vda_flash_req *vrq = &rq->vrq->flash;
rq                138 drivers/scsi/esas2r/esas2r_flash.c 		(struct esas2r_flash_context *)rq->interrupt_cx;
rq                140 drivers/scsi/esas2r/esas2r_flash.c 	if (rq->req_stat == RS_SUCCESS) {
rq                148 drivers/scsi/esas2r/esas2r_flash.c 			rq->req_stat = RS_PENDING;
rq                154 drivers/scsi/esas2r/esas2r_flash.c 			rq->req_stat = RS_PENDING;
rq                155 drivers/scsi/esas2r/esas2r_flash.c 			rq->interrupt_cb = fc->interrupt_cb;
rq                163 drivers/scsi/esas2r/esas2r_flash.c 	if (rq->req_stat != RS_PENDING)
rq                169 drivers/scsi/esas2r/esas2r_flash.c 		(*fc->interrupt_cb)(a, rq);
rq                177 drivers/scsi/esas2r/esas2r_flash.c 			    struct esas2r_request *rq)
rq                180 drivers/scsi/esas2r/esas2r_flash.c 		(struct esas2r_flash_context *)rq->interrupt_cx;
rq                190 drivers/scsi/esas2r/esas2r_flash.c 		rq->interrupt_cb = esas2r_fmapi_callback;
rq                192 drivers/scsi/esas2r/esas2r_flash.c 		rq->interrupt_cb = fc->interrupt_cb;
rq                195 drivers/scsi/esas2r/esas2r_flash.c 			       rq,
rq                201 drivers/scsi/esas2r/esas2r_flash.c 	esas2r_rq_free_sg_lists(rq, a);
rq                212 drivers/scsi/esas2r/esas2r_flash.c 		esas2r_sgc_init(sgc, a, rq, &rq->vrq->flash.data.sge[0]);
rq                214 drivers/scsi/esas2r/esas2r_flash.c 		if (!esas2r_build_sg_list(a, rq, sgc)) {
rq                215 drivers/scsi/esas2r/esas2r_flash.c 			rq->req_stat = RS_BUSY;
rq                227 drivers/scsi/esas2r/esas2r_flash.c static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
rq                233 drivers/scsi/esas2r/esas2r_flash.c 	rq->req_stat = RS_PENDING;
rq                237 drivers/scsi/esas2r/esas2r_flash.c 		build_flash_msg(a, rq);
rq                239 drivers/scsi/esas2r/esas2r_flash.c 	return rq->req_stat == RS_PENDING;
rq                303 drivers/scsi/esas2r/esas2r_flash.c 			       struct esas2r_request *rq, u8 fi_stat)
rq                306 drivers/scsi/esas2r/esas2r_flash.c 		(struct esas2r_flash_context *)rq->interrupt_cx;
rq                310 drivers/scsi/esas2r/esas2r_flash.c 	fi->driver_error = rq->req_stat;
rq                311 drivers/scsi/esas2r/esas2r_flash.c 	rq->interrupt_cb = NULL;
rq                312 drivers/scsi/esas2r/esas2r_flash.c 	rq->req_stat = RS_SUCCESS;
rq                324 drivers/scsi/esas2r/esas2r_flash.c 			     struct esas2r_request *rq)
rq                327 drivers/scsi/esas2r/esas2r_flash.c 		(struct esas2r_flash_context *)rq->interrupt_cx;
rq                334 drivers/scsi/esas2r/esas2r_flash.c 	if (rq->req_stat != RS_SUCCESS)
rq                538 drivers/scsi/esas2r/esas2r_flash.c 			complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
rq                549 drivers/scsi/esas2r/esas2r_flash.c 	if (!load_image(a, rq)) {
rq                556 drivers/scsi/esas2r/esas2r_flash.c 		complete_fmapi_req(a, rq, FI_STAT_FAILED);
rq                828 drivers/scsi/esas2r/esas2r_flash.c 				     struct esas2r_request *rq)
rq                831 drivers/scsi/esas2r/esas2r_flash.c 		(struct esas2r_ioctl_fs *)rq->interrupt_cx;
rq                833 drivers/scsi/esas2r/esas2r_flash.c 	if (rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
rq                836 drivers/scsi/esas2r/esas2r_flash.c 	fs->driver_error = rq->req_stat;
rq                847 drivers/scsi/esas2r/esas2r_flash.c 			     struct esas2r_request *rq,
rq                898 drivers/scsi/esas2r/esas2r_flash.c 	rq->interrupt_cb = esas2r_complete_fs_ioctl;
rq                899 drivers/scsi/esas2r/esas2r_flash.c 	rq->interrupt_cx = fs;
rq                902 drivers/scsi/esas2r/esas2r_flash.c 			       rq,
rq                915 drivers/scsi/esas2r/esas2r_flash.c 		esas2r_sgc_init(sgc, a, rq, rq->vrq->flash.data.sge);
rq                918 drivers/scsi/esas2r/esas2r_flash.c 		if (!esas2r_build_sg_list(a, rq, sgc)) {
rq                927 drivers/scsi/esas2r/esas2r_flash.c 	esas2r_start_request(a, rq);
rq               1213 drivers/scsi/esas2r/esas2r_flash.c 				  struct esas2r_request *rq)
rq               1215 drivers/scsi/esas2r/esas2r_flash.c 	struct atto_vda_flash_req *vrq = &rq->vrq->flash;
rq               1217 drivers/scsi/esas2r/esas2r_flash.c 	if (rq->req_stat == RS_SUCCESS) {
rq               1223 drivers/scsi/esas2r/esas2r_flash.c 			rq->req_stat = RS_PENDING;
rq               1228 drivers/scsi/esas2r/esas2r_flash.c 			rq->req_stat = RS_PENDING;
rq               1241 drivers/scsi/esas2r/esas2r_flash.c 	if (rq->req_stat != RS_PENDING) {
rq               1243 drivers/scsi/esas2r/esas2r_flash.c 		if (rq->req_stat == RS_SUCCESS)
rq               1258 drivers/scsi/esas2r/esas2r_flash.c bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
rq               1264 drivers/scsi/esas2r/esas2r_flash.c 	struct atto_vda_flash_req *vrq = &rq->vrq->flash;
rq               1304 drivers/scsi/esas2r/esas2r_flash.c 			       rq,
rq               1324 drivers/scsi/esas2r/esas2r_flash.c 	rq->interrupt_cb = esas2r_nvram_callback;
rq               1325 drivers/scsi/esas2r/esas2r_flash.c 	esas2r_start_request(a, rq);
rq               1391 drivers/scsi/esas2r/esas2r_flash.c 		   struct esas2r_request *rq, struct esas2r_sg_context *sgc)
rq               1407 drivers/scsi/esas2r/esas2r_flash.c 	rq->req_stat = RS_SUCCESS;
rq               1408 drivers/scsi/esas2r/esas2r_flash.c 	rq->interrupt_cx = fc;
rq               1418 drivers/scsi/esas2r/esas2r_flash.c 		return complete_fmapi_req(a, rq, FI_STAT_IMG_VER);
rq               1422 drivers/scsi/esas2r/esas2r_flash.c 		return complete_fmapi_req(a, rq, FI_STAT_DEGRADED);
rq               1428 drivers/scsi/esas2r/esas2r_flash.c 			return complete_fmapi_req(a, rq, fi->status);
rq               1500 drivers/scsi/esas2r/esas2r_flash.c 			return complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
rq               1507 drivers/scsi/esas2r/esas2r_flash.c 		return complete_fmapi_req(a, rq, FI_STAT_INVALID);
rq               1516 drivers/scsi/esas2r/esas2r_flash.c 	if (!load_image(a, rq))
rq               1517 drivers/scsi/esas2r/esas2r_flash.c 		return complete_fmapi_req(a, rq, FI_STAT_FAILED);
rq               1519 drivers/scsi/esas2r/esas2r_flash.c 	esas2r_start_request(a, rq);
rq                104 drivers/scsi/esas2r/esas2r_init.c 			  struct esas2r_request *rq)
rq                126 drivers/scsi/esas2r/esas2r_init.c 	rq->vrq_md = memdesc;
rq                127 drivers/scsi/esas2r/esas2r_init.c 	rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
rq                128 drivers/scsi/esas2r/esas2r_init.c 	rq->vrq->scsi.handle = a->num_vrqs;
rq                793 drivers/scsi/esas2r/esas2r_init.c 	struct esas2r_request *rq;
rq                940 drivers/scsi/esas2r/esas2r_init.c 		for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
rq                942 drivers/scsi/esas2r/esas2r_init.c 			INIT_LIST_HEAD(&rq->req_list);
rq                943 drivers/scsi/esas2r/esas2r_init.c 			if (!alloc_vda_req(a, rq)) {
rq                949 drivers/scsi/esas2r/esas2r_init.c 			esas2r_rq_init_request(rq, a);
rq                952 drivers/scsi/esas2r/esas2r_init.c 			rq->comp_cb = esas2r_ae_complete;
rq               1169 drivers/scsi/esas2r/esas2r_init.c 				   struct esas2r_request *rq)
rq               1182 drivers/scsi/esas2r/esas2r_init.c 				     rq,
rq               1186 drivers/scsi/esas2r/esas2r_init.c 		ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
rq               1190 drivers/scsi/esas2r/esas2r_init.c 		rq->flags |= RF_FAILURE_OK;
rq               1196 drivers/scsi/esas2r/esas2r_init.c 		if (rq->req_stat == RS_SUCCESS) {
rq               1202 drivers/scsi/esas2r/esas2r_init.c 				rq->func_rsp.cfg_rsp.vda_version);
rq               1203 drivers/scsi/esas2r/esas2r_init.c 			a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
rq               1205 drivers/scsi/esas2r/esas2r_init.c 				rq->func_rsp.cfg_rsp.fw_release);
rq               1222 drivers/scsi/esas2r/esas2r_init.c 					     rq,
rq               1227 drivers/scsi/esas2r/esas2r_init.c 			rq->vrq->cfg.sg_list_offset = offsetof(
rq               1230 drivers/scsi/esas2r/esas2r_init.c 			rq->vrq->cfg.data.prde.ctl_len =
rq               1232 drivers/scsi/esas2r/esas2r_init.c 			rq->vrq->cfg.data.prde.address = cpu_to_le64(
rq               1233 drivers/scsi/esas2r/esas2r_init.c 				rq->vrq_md->phys_addr +
rq               1235 drivers/scsi/esas2r/esas2r_init.c 			rq->flags |= RF_FAILURE_OK;
rq               1243 drivers/scsi/esas2r/esas2r_init.c 			ci = (struct atto_vda_cfg_init *)rq->data_buf;
rq               1244 drivers/scsi/esas2r/esas2r_init.c 			if (rq->req_stat == RS_SUCCESS) {
rq               1256 drivers/scsi/esas2r/esas2r_init.c 		rq->req_stat = RS_SUCCESS;
rq               1269 drivers/scsi/esas2r/esas2r_init.c 	struct esas2r_request *rq = &a->general_req;
rq               1271 drivers/scsi/esas2r/esas2r_init.c 	esas2r_rq_init_request(rq, a);
rq               1272 drivers/scsi/esas2r/esas2r_init.c 	rq->comp_cb = esas2r_dummy_complete;
rq               1278 drivers/scsi/esas2r/esas2r_init.c 		if (esas2r_format_init_msg(a, rq)) {
rq               1282 drivers/scsi/esas2r/esas2r_init.c 				esas2r_start_vda_request(a, rq);
rq               1284 drivers/scsi/esas2r/esas2r_init.c 				esas2r_wait_request(a, rq);
rq               1285 drivers/scsi/esas2r/esas2r_init.c 				if (rq->req_stat != RS_PENDING)
rq               1290 drivers/scsi/esas2r/esas2r_init.c 		if (rq->req_stat == RS_SUCCESS
rq               1291 drivers/scsi/esas2r/esas2r_init.c 		    || ((rq->flags & RF_FAILURE_OK)
rq               1292 drivers/scsi/esas2r/esas2r_init.c 			&& rq->req_stat != RS_TIMEOUT))
rq               1296 drivers/scsi/esas2r/esas2r_init.c 			   a->init_msg, rq->req_stat, rq->flags);
rq               1302 drivers/scsi/esas2r/esas2r_init.c 	esas2r_rq_destroy_request(rq, a);
rq               1310 drivers/scsi/esas2r/esas2r_init.c 	struct esas2r_request *rq;
rq               1332 drivers/scsi/esas2r/esas2r_init.c 	for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
rq               1333 drivers/scsi/esas2r/esas2r_init.c 		esas2r_start_ae_request(a, rq);
rq                173 drivers/scsi/esas2r/esas2r_int.c 					   struct esas2r_request *rq,
rq                181 drivers/scsi/esas2r/esas2r_int.c 	if (unlikely(rq->req_stat != RS_SUCCESS)) {
rq                182 drivers/scsi/esas2r/esas2r_int.c 		memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp));
rq                184 drivers/scsi/esas2r/esas2r_int.c 		if (rq->req_stat == RS_ABORTED) {
rq                185 drivers/scsi/esas2r/esas2r_int.c 			if (rq->timeout > RQ_MAX_TIMEOUT)
rq                186 drivers/scsi/esas2r/esas2r_int.c 				rq->req_stat = RS_TIMEOUT;
rq                187 drivers/scsi/esas2r/esas2r_int.c 		} else if (rq->req_stat == RS_SCSI_ERROR) {
rq                188 drivers/scsi/esas2r/esas2r_int.c 			u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat;
rq                197 drivers/scsi/esas2r/esas2r_int.c 				rq->req_stat = RS_SUCCESS;
rq                198 drivers/scsi/esas2r/esas2r_int.c 				rq->func_rsp.scsi_rsp.scsi_stat =
rq                210 drivers/scsi/esas2r/esas2r_int.c 	struct esas2r_request *rq;
rq                262 drivers/scsi/esas2r/esas2r_int.c 		rq = a->req_table[LOWORD(handle)];
rq                264 drivers/scsi/esas2r/esas2r_int.c 		if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) {
rq                269 drivers/scsi/esas2r/esas2r_int.c 		list_del(&rq->req_list);
rq                272 drivers/scsi/esas2r/esas2r_int.c 		rq->req_stat = rsp->req_stat;
rq                275 drivers/scsi/esas2r/esas2r_int.c 		esas2r_trace("rq: %p", rq);
rq                276 drivers/scsi/esas2r/esas2r_int.c 		esas2r_trace("req_status: %x", rq->req_stat);
rq                278 drivers/scsi/esas2r/esas2r_int.c 		if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
rq                279 drivers/scsi/esas2r/esas2r_int.c 			esas2r_handle_outbound_rsp_err(a, rq, rsp);
rq                285 drivers/scsi/esas2r/esas2r_int.c 			memcpy(&rq->func_rsp, &rsp->func_rsp,
rq                290 drivers/scsi/esas2r/esas2r_int.c 		list_add_tail(&rq->comp_list, &comp_list);
rq                309 drivers/scsi/esas2r/esas2r_int.c 	struct esas2r_request *rq;
rq                347 drivers/scsi/esas2r/esas2r_int.c 			rq = list_entry(element, struct esas2r_request,
rq                350 drivers/scsi/esas2r/esas2r_int.c 			if (rq->req_stat != RS_PENDING) {
rq                352 drivers/scsi/esas2r/esas2r_int.c 				list_add_tail(&rq->comp_list, &comp_list);
rq                360 drivers/scsi/esas2r/esas2r_int.c 			else if (rq->req_type == RT_DISC_REQ) {
rq                362 drivers/scsi/esas2r/esas2r_int.c 				esas2r_disc_local_start_request(a, rq);
rq                365 drivers/scsi/esas2r/esas2r_int.c 				esas2r_local_start_request(a, rq);
rq                390 drivers/scsi/esas2r/esas2r_int.c 	struct esas2r_request *rq = &a->general_req;
rq                403 drivers/scsi/esas2r/esas2r_int.c 	if (rq->interrupt_cx) {
rq                404 drivers/scsi/esas2r/esas2r_int.c 		dc = (struct esas2r_disc_context *)rq->interrupt_cx;
rq                418 drivers/scsi/esas2r/esas2r_int.c 	rq->interrupt_cx = NULL;
rq                419 drivers/scsi/esas2r/esas2r_int.c 	rq->interrupt_cb = NULL;
rq                421 drivers/scsi/esas2r/esas2r_int.c 	rq->comp_cb = esas2r_dummy_complete;
rq                433 drivers/scsi/esas2r/esas2r_int.c 		rq = list_entry(element, struct esas2r_request, req_list);
rq                435 drivers/scsi/esas2r/esas2r_int.c 		if (rq->req_stat == RS_STARTED)
rq                436 drivers/scsi/esas2r/esas2r_int.c 			if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
rq                437 drivers/scsi/esas2r/esas2r_int.c 				list_add_tail(&rq->comp_list, &comp_list);
rq                448 drivers/scsi/esas2r/esas2r_int.c 	struct esas2r_request *rq;
rq                462 drivers/scsi/esas2r/esas2r_int.c 		rq = list_entry(element, struct esas2r_request, req_list);
rq                463 drivers/scsi/esas2r/esas2r_int.c 		if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
rq                464 drivers/scsi/esas2r/esas2r_int.c 			list_add_tail(&rq->comp_list, &comp_list);
rq                748 drivers/scsi/esas2r/esas2r_int.c void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
rq                751 drivers/scsi/esas2r/esas2r_int.c 		(union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data;
rq                752 drivers/scsi/esas2r/esas2r_int.c 	u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length);
rq                754 drivers/scsi/esas2r/esas2r_int.c 		(union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data
rq                765 drivers/scsi/esas2r/esas2r_int.c 			   rq, length);
rq                847 drivers/scsi/esas2r/esas2r_int.c 	esas2r_start_ae_request(a, rq);
rq                874 drivers/scsi/esas2r/esas2r_int.c void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
rq                878 drivers/scsi/esas2r/esas2r_int.c 				       struct esas2r_request *rq)
rq                882 drivers/scsi/esas2r/esas2r_int.c 	snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len;
rq                884 drivers/scsi/esas2r/esas2r_int.c 	if (snslen > rq->sense_len)
rq                885 drivers/scsi/esas2r/esas2r_int.c 		snslen = rq->sense_len;
rq                888 drivers/scsi/esas2r/esas2r_int.c 		if (rq->sense_buf)
rq                889 drivers/scsi/esas2r/esas2r_int.c 			memcpy(rq->sense_buf, rq->data_buf, snslen);
rq                891 drivers/scsi/esas2r/esas2r_int.c 			rq->sense_buf = (u8 *)rq->data_buf;
rq                895 drivers/scsi/esas2r/esas2r_int.c 			u8 *s = (u8 *)rq->data_buf;
rq                902 drivers/scsi/esas2r/esas2r_int.c 					     rq->target_id);
rq                903 drivers/scsi/esas2r/esas2r_int.c 				esas2r_target_state_changed(a, rq->target_id,
rq                913 drivers/scsi/esas2r/esas2r_int.c 	rq->sense_len = snslen;
rq                918 drivers/scsi/esas2r/esas2r_int.c 			     struct esas2r_request *rq)
rq                920 drivers/scsi/esas2r/esas2r_int.c 	if (rq->vrq->scsi.function == VDA_FUNC_FLASH
rq                921 drivers/scsi/esas2r/esas2r_int.c 	    && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
rq                926 drivers/scsi/esas2r/esas2r_int.c 	if (rq->interrupt_cb) {
rq                927 drivers/scsi/esas2r/esas2r_int.c 		(*rq->interrupt_cb)(a, rq);
rq                929 drivers/scsi/esas2r/esas2r_int.c 		if (rq->req_stat == RS_PENDING) {
rq                930 drivers/scsi/esas2r/esas2r_int.c 			esas2r_start_request(a, rq);
rq                935 drivers/scsi/esas2r/esas2r_int.c 	if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)
rq                936 drivers/scsi/esas2r/esas2r_int.c 	    && unlikely(rq->req_stat != RS_SUCCESS)) {
rq                937 drivers/scsi/esas2r/esas2r_int.c 		esas2r_check_req_rsp_sense(a, rq);
rq                938 drivers/scsi/esas2r/esas2r_int.c 		esas2r_log_request_failure(a, rq);
rq                941 drivers/scsi/esas2r/esas2r_int.c 	(*rq->comp_cb)(a, rq);
rq                 46 drivers/scsi/esas2r/esas2r_io.c void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
rq                 49 drivers/scsi/esas2r/esas2r_io.c 	struct esas2r_request *startrq = rq;
rq                 54 drivers/scsi/esas2r/esas2r_io.c 		if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
rq                 55 drivers/scsi/esas2r/esas2r_io.c 			rq->req_stat = RS_SEL2;
rq                 57 drivers/scsi/esas2r/esas2r_io.c 			rq->req_stat = RS_DEGRADED;
rq                 58 drivers/scsi/esas2r/esas2r_io.c 	} else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
rq                 59 drivers/scsi/esas2r/esas2r_io.c 		t = a->targetdb + rq->target_id;
rq                 63 drivers/scsi/esas2r/esas2r_io.c 			rq->req_stat = RS_SEL;
rq                 66 drivers/scsi/esas2r/esas2r_io.c 			rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id);
rq                 75 drivers/scsi/esas2r/esas2r_io.c 				rq->req_stat = RS_SEL;
rq                 79 drivers/scsi/esas2r/esas2r_io.c 	if (unlikely(rq->req_stat != RS_PENDING)) {
rq                 80 drivers/scsi/esas2r/esas2r_io.c 		esas2r_complete_request(a, rq);
rq                 84 drivers/scsi/esas2r/esas2r_io.c 	esas2r_trace("rq=%p", rq);
rq                 85 drivers/scsi/esas2r/esas2r_io.c 	esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle);
rq                 87 drivers/scsi/esas2r/esas2r_io.c 	if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
rq                 88 drivers/scsi/esas2r/esas2r_io.c 		esas2r_trace("rq->target_id=%d", rq->target_id);
rq                 89 drivers/scsi/esas2r/esas2r_io.c 		esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags);
rq                120 drivers/scsi/esas2r/esas2r_io.c 				struct esas2r_request *rq)
rq                123 drivers/scsi/esas2r/esas2r_io.c 	esas2r_trace("rq=%p", rq);
rq                124 drivers/scsi/esas2r/esas2r_io.c 	esas2r_trace("rq->vrq:%p", rq->vrq);
rq                125 drivers/scsi/esas2r/esas2r_io.c 	esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr);
rq                127 drivers/scsi/esas2r/esas2r_io.c 	if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
rq                128 drivers/scsi/esas2r/esas2r_io.c 		     && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
rq                131 drivers/scsi/esas2r/esas2r_io.c 	list_add_tail(&rq->req_list, &a->active_list);
rq                132 drivers/scsi/esas2r/esas2r_io.c 	esas2r_start_vda_request(a, rq);
rq                138 drivers/scsi/esas2r/esas2r_io.c 			      struct esas2r_request *rq)
rq                143 drivers/scsi/esas2r/esas2r_io.c 	rq->req_stat = RS_STARTED;
rq                164 drivers/scsi/esas2r/esas2r_io.c 	if (rq->vda_req_sz == RQ_SIZE_DEFAULT)
rq                165 drivers/scsi/esas2r/esas2r_io.c 		rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32));
rq                167 drivers/scsi/esas2r/esas2r_io.c 	element->address = cpu_to_le64(rq->vrq_md->phys_addr);
rq                168 drivers/scsi/esas2r/esas2r_io.c 	element->length = cpu_to_le32(rq->vda_req_sz);
rq                176 drivers/scsi/esas2r/esas2r_io.c 	esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
rq                178 drivers/scsi/esas2r/esas2r_io.c 	esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz);
rq                190 drivers/scsi/esas2r/esas2r_io.c 	struct esas2r_request *rq = sgc->first_req;
rq                191 drivers/scsi/esas2r/esas2r_io.c 	union atto_vda_req *vrq = rq->vrq;
rq                273 drivers/scsi/esas2r/esas2r_io.c 						 - (u8 *)rq->sg_table->
rq                288 drivers/scsi/esas2r/esas2r_io.c 				rq->vda_req_sz =
rq                302 drivers/scsi/esas2r/esas2r_io.c 			list_add(&sgl->next_desc, &rq->sg_table_head);
rq                337 drivers/scsi/esas2r/esas2r_io.c 			 (u8 *)rq->sg_table->virt_addr));
rq                354 drivers/scsi/esas2r/esas2r_io.c 		if (reqsize > rq->vda_req_sz)
rq                355 drivers/scsi/esas2r/esas2r_io.c 			rq->vda_req_sz = reqsize;
rq                373 drivers/scsi/esas2r/esas2r_io.c 	struct esas2r_request *rq = sgc->first_req;
rq                458 drivers/scsi/esas2r/esas2r_io.c 			list_add(&sgl->next_desc, &rq->sg_table_head);
rq                512 drivers/scsi/esas2r/esas2r_io.c 	if (!list_empty(&rq->sg_table_head)) {
rq                527 drivers/scsi/esas2r/esas2r_io.c 	struct esas2r_request *rq = sgc->first_req;
rq                529 drivers/scsi/esas2r/esas2r_io.c 	struct esas2r_target *t = a->targetdb + rq->target_id;
rq                533 drivers/scsi/esas2r/esas2r_io.c 	u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0];
rq                540 drivers/scsi/esas2r/esas2r_io.c 	if (rq->vrq->scsi.function == VDA_FUNC_SCSI
rq                545 drivers/scsi/esas2r/esas2r_io.c 		switch (rq->vrq->scsi.cdb[0]) {
rq                591 drivers/scsi/esas2r/esas2r_io.c 			rq->vrq->scsi.iblk_cnt_prd = 0;
rq                600 drivers/scsi/esas2r/esas2r_io.c 				rq->flags |= RF_1ST_IBLK_BASE;
rq                639 drivers/scsi/esas2r/esas2r_io.c 			rq->vrq->scsi.iblk_cnt_prd++;
rq                650 drivers/scsi/esas2r/esas2r_io.c 	reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq))
rq                659 drivers/scsi/esas2r/esas2r_io.c 	if (reqsize > rq->vda_req_sz)
rq                660 drivers/scsi/esas2r/esas2r_io.c 		rq->vda_req_sz = reqsize;
rq                770 drivers/scsi/esas2r/esas2r_io.c 	struct esas2r_request *rq;
rq                783 drivers/scsi/esas2r/esas2r_io.c 		rq = list_entry(element, struct esas2r_request, req_list);
rq                785 drivers/scsi/esas2r/esas2r_io.c 		if (rq->vrq->scsi.function == VDA_FUNC_SCSI
rq                786 drivers/scsi/esas2r/esas2r_io.c 		    && rq->target_id == targetid
rq                787 drivers/scsi/esas2r/esas2r_io.c 		    && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
rq                790 drivers/scsi/esas2r/esas2r_io.c 			if (rq->req_stat == RS_PENDING) {
rq                795 drivers/scsi/esas2r/esas2r_io.c 				if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
rq                796 drivers/scsi/esas2r/esas2r_io.c 					list_add_tail(&rq->comp_list,
rq                816 drivers/scsi/esas2r/esas2r_io.c 			rq = list_entry(element, struct esas2r_request,
rq                818 drivers/scsi/esas2r/esas2r_io.c 			if (rq->vrq->scsi.function == VDA_FUNC_SCSI
rq                819 drivers/scsi/esas2r/esas2r_io.c 			    && rq->target_id == targetid
rq                820 drivers/scsi/esas2r/esas2r_io.c 			    && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
rq                858 drivers/scsi/esas2r/esas2r_io.c bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
rq                862 drivers/scsi/esas2r/esas2r_io.c 	esas2r_trace("rq:%p", rq);
rq                863 drivers/scsi/esas2r/esas2r_io.c 	list_del_init(&rq->req_list);
rq                864 drivers/scsi/esas2r/esas2r_io.c 	if (rq->timeout > RQ_MAX_TIMEOUT) {
rq                869 drivers/scsi/esas2r/esas2r_io.c 		rq->req_stat = RS_BUSY;
rq                874 drivers/scsi/esas2r/esas2r_io.c 	rq->req_stat = status;
rq                 83 drivers/scsi/esas2r/esas2r_ioctl.c 				struct esas2r_request *rq)
rq                111 drivers/scsi/esas2r/esas2r_ioctl.c 	struct esas2r_request *rq;
rq                118 drivers/scsi/esas2r/esas2r_ioctl.c 	rq = esas2r_alloc_request(a);
rq                119 drivers/scsi/esas2r/esas2r_ioctl.c 	if (rq == NULL) {
rq                151 drivers/scsi/esas2r/esas2r_ioctl.c 	rq->comp_cb = complete_fm_api_req;
rq                155 drivers/scsi/esas2r/esas2r_ioctl.c 	if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq,
rq                174 drivers/scsi/esas2r/esas2r_ioctl.c 	esas2r_free_request(a, (struct esas2r_request *)rq);
rq                182 drivers/scsi/esas2r/esas2r_ioctl.c 			     struct esas2r_request *rq)
rq                199 drivers/scsi/esas2r/esas2r_ioctl.c 					struct esas2r_request *rq)
rq                208 drivers/scsi/esas2r/esas2r_ioctl.c 	struct esas2r_request *rq;
rq                251 drivers/scsi/esas2r/esas2r_ioctl.c 	rq = esas2r_alloc_request(a);
rq                252 drivers/scsi/esas2r/esas2r_ioctl.c 	if (rq == NULL) {
rq                262 drivers/scsi/esas2r/esas2r_ioctl.c 	rq->comp_cb = complete_buffered_ioctl_req;
rq                267 drivers/scsi/esas2r/esas2r_ioctl.c 	if (!(*bi->callback)(a, rq, &sgc, bi->context)) {
rq                280 drivers/scsi/esas2r/esas2r_ioctl.c 		(*bi->done_callback)(a, rq, bi->done_context);
rq                282 drivers/scsi/esas2r/esas2r_ioctl.c 	esas2r_free_request(a, rq);
rq                294 drivers/scsi/esas2r/esas2r_ioctl.c 			      struct esas2r_request *rq,
rq                300 drivers/scsi/esas2r/esas2r_ioctl.c 	esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
rq                301 drivers/scsi/esas2r/esas2r_ioctl.c 	esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP);
rq                303 drivers/scsi/esas2r/esas2r_ioctl.c 	if (!esas2r_build_sg_list(a, rq, sgc)) {
rq                308 drivers/scsi/esas2r/esas2r_ioctl.c 	esas2r_start_request(a, rq);
rq                331 drivers/scsi/esas2r/esas2r_ioctl.c 					     struct esas2r_request *rq)
rq                333 drivers/scsi/esas2r/esas2r_ioctl.c 	rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id);
rq                334 drivers/scsi/esas2r/esas2r_ioctl.c 	rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun);
rq                337 drivers/scsi/esas2r/esas2r_ioctl.c 	(*rq->aux_req_cb)(a, rq);
rq                343 drivers/scsi/esas2r/esas2r_ioctl.c 			      struct esas2r_request *rq,
rq                348 drivers/scsi/esas2r/esas2r_ioctl.c 	struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
rq                353 drivers/scsi/esas2r/esas2r_ioctl.c 	esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
rq                354 drivers/scsi/esas2r/esas2r_ioctl.c 	esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI);
rq                357 drivers/scsi/esas2r/esas2r_ioctl.c 	ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags);
rq                363 drivers/scsi/esas2r/esas2r_ioctl.c 	rq->aux_req_cx = ci;
rq                364 drivers/scsi/esas2r/esas2r_ioctl.c 	rq->aux_req_cb = rq->comp_cb;
rq                365 drivers/scsi/esas2r/esas2r_ioctl.c 	rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb;
rq                367 drivers/scsi/esas2r/esas2r_ioctl.c 	if (!esas2r_build_sg_list(a, rq, sgc))
rq                370 drivers/scsi/esas2r/esas2r_ioctl.c 	esas2r_start_request(a, rq);
rq                391 drivers/scsi/esas2r/esas2r_ioctl.c 			       struct esas2r_request *rq,
rq                419 drivers/scsi/esas2r/esas2r_ioctl.c 		rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
rq                424 drivers/scsi/esas2r/esas2r_ioctl.c 	rq->target_id = tid;
rq                425 drivers/scsi/esas2r/esas2r_ioctl.c 	rq->vrq->scsi.flags |= cpu_to_le32(lun);
rq                514 drivers/scsi/esas2r/esas2r_ioctl.c 		if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
rq                548 drivers/scsi/esas2r/esas2r_ioctl.c 		rq->target_id = esas2r_targ_get_id(t, a);
rq                557 drivers/scsi/esas2r/esas2r_ioctl.c 		t = a->targetdb + rq->target_id;
rq                569 drivers/scsi/esas2r/esas2r_ioctl.c 		gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags);
rq                576 drivers/scsi/esas2r/esas2r_ioctl.c 		t = a->targetdb + rq->target_id;
rq                585 drivers/scsi/esas2r/esas2r_ioctl.c 		if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
rq                600 drivers/scsi/esas2r/esas2r_ioctl.c 	rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts);
rq                607 drivers/scsi/esas2r/esas2r_ioctl.c 				     struct esas2r_request *rq, void *context)
rq                632 drivers/scsi/esas2r/esas2r_ioctl.c 		if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) ==
rq                634 drivers/scsi/esas2r/esas2r_ioctl.c 			gsa->target_id = rq->target_id;
rq                642 drivers/scsi/esas2r/esas2r_ioctl.c 	ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status);
rq                669 drivers/scsi/esas2r/esas2r_ioctl.c 			     struct esas2r_request *rq,
rq                672 drivers/scsi/esas2r/esas2r_ioctl.c 	esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
rq                674 drivers/scsi/esas2r/esas2r_ioctl.c 	esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA);
rq                676 drivers/scsi/esas2r/esas2r_ioctl.c 	if (!esas2r_build_sg_list(a, rq, sgc)) {
rq                682 drivers/scsi/esas2r/esas2r_ioctl.c 	esas2r_start_request(a, rq);
rq                688 drivers/scsi/esas2r/esas2r_ioctl.c 				  struct esas2r_request *rq)
rq                690 drivers/scsi/esas2r/esas2r_ioctl.c 	struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx;
rq                694 drivers/scsi/esas2r/esas2r_ioctl.c 	spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat;
rq                695 drivers/scsi/esas2r/esas2r_ioctl.c 	spt->sense_length = rq->sense_len;
rq                697 drivers/scsi/esas2r/esas2r_ioctl.c 		le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length);
rq                699 drivers/scsi/esas2r/esas2r_ioctl.c 	switch (rq->req_stat) {
rq                741 drivers/scsi/esas2r/esas2r_ioctl.c 	(*rq->aux_req_cb)(a, rq);
rq                745 drivers/scsi/esas2r/esas2r_ioctl.c 			      struct esas2r_request *rq,
rq                901 drivers/scsi/esas2r/esas2r_ioctl.c 			if (hba_ioctl_tunnel(a, hi, rq, sgc))
rq                917 drivers/scsi/esas2r/esas2r_ioctl.c 			if (hba_ioctl_tunnel(a, hi, rq, sgc))
rq                988 drivers/scsi/esas2r/esas2r_ioctl.c 			if (hba_ioctl_tunnel(a, hi, rq, sgc))
rq               1005 drivers/scsi/esas2r/esas2r_ioctl.c 		esas2r_sgc_init(sgc, a, rq, NULL);
rq               1012 drivers/scsi/esas2r/esas2r_ioctl.c 		rq->target_id = (u16)spt->target_id;
rq               1013 drivers/scsi/esas2r/esas2r_ioctl.c 		rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]);
rq               1014 drivers/scsi/esas2r/esas2r_ioctl.c 		memcpy(rq->vrq->scsi.cdb, spt->cdb, 16);
rq               1015 drivers/scsi/esas2r/esas2r_ioctl.c 		rq->vrq->scsi.length = cpu_to_le32(hi->data_length);
rq               1016 drivers/scsi/esas2r/esas2r_ioctl.c 		rq->sense_len = spt->sense_length;
rq               1017 drivers/scsi/esas2r/esas2r_ioctl.c 		rq->sense_buf = (u8 *)spt->sense_data;
rq               1025 drivers/scsi/esas2r/esas2r_ioctl.c 		rq->aux_req_cx = hi;
rq               1026 drivers/scsi/esas2r/esas2r_ioctl.c 		rq->aux_req_cb = rq->comp_cb;
rq               1027 drivers/scsi/esas2r/esas2r_ioctl.c 		rq->comp_cb = scsi_passthru_comp_cb;
rq               1030 drivers/scsi/esas2r/esas2r_ioctl.c 			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
rq               1032 drivers/scsi/esas2r/esas2r_ioctl.c 			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
rq               1041 drivers/scsi/esas2r/esas2r_ioctl.c 			rq->vrq->scsi.flags |=
rq               1044 drivers/scsi/esas2r/esas2r_ioctl.c 			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
rq               1047 drivers/scsi/esas2r/esas2r_ioctl.c 		if (!esas2r_build_sg_list(a, rq, sgc)) {
rq               1052 drivers/scsi/esas2r/esas2r_ioctl.c 		esas2r_start_request(a, rq);
rq               1064 drivers/scsi/esas2r/esas2r_ioctl.c 			if (hba_ioctl_tunnel(a, hi, rq, sgc))
rq               1110 drivers/scsi/esas2r/esas2r_ioctl.c 		if (hba_ioctl_tunnel(a, hi, rq, sgc))
rq               1160 drivers/scsi/esas2r/esas2r_ioctl.c 			if (hba_ioctl_tunnel(a, hi, rq, sgc))
rq               1204 drivers/scsi/esas2r/esas2r_ioctl.c 				    struct esas2r_request *rq, void *context)
rq               1249 drivers/scsi/esas2r/esas2r_ioctl.c int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
rq               1255 drivers/scsi/esas2r/esas2r_ioctl.c 	rq->comp_cb = complete_nvr_req;
rq               1257 drivers/scsi/esas2r/esas2r_ioctl.c 	if (esas2r_nvram_write(a, rq, data)) {
rq               1265 drivers/scsi/esas2r/esas2r_ioctl.c 		if (rq->req_stat == RS_SUCCESS)
rq               1277 drivers/scsi/esas2r/esas2r_ioctl.c 	struct esas2r_request *rq;
rq               1398 drivers/scsi/esas2r/esas2r_ioctl.c 		rq = esas2r_alloc_request(a);
rq               1399 drivers/scsi/esas2r/esas2r_ioctl.c 		if (rq == NULL) {
rq               1406 drivers/scsi/esas2r/esas2r_ioctl.c 		code = esas2r_write_params(a, rq,
rq               1410 drivers/scsi/esas2r/esas2r_ioctl.c 		esas2r_free_request(a, rq);
rq               1808 drivers/scsi/esas2r/esas2r_ioctl.c 			     struct esas2r_request *rq)
rq               1831 drivers/scsi/esas2r/esas2r_ioctl.c 		struct esas2r_request *rq;
rq               1844 drivers/scsi/esas2r/esas2r_ioctl.c 		rq = esas2r_alloc_request(a);
rq               1845 drivers/scsi/esas2r/esas2r_ioctl.c 		if (rq == NULL) {
rq               1850 drivers/scsi/esas2r/esas2r_ioctl.c 		rq->comp_cb = vda_complete_req;
rq               1852 drivers/scsi/esas2r/esas2r_ioctl.c 		sgc.first_req = rq;
rq               1860 drivers/scsi/esas2r/esas2r_ioctl.c 			esas2r_process_vda_ioctl(a, vi, rq, &sgc);
rq               1870 drivers/scsi/esas2r/esas2r_ioctl.c 		esas2r_free_request(a, (struct esas2r_request *)rq);
rq               1926 drivers/scsi/esas2r/esas2r_ioctl.c 				struct esas2r_request *rq)
rq               1953 drivers/scsi/esas2r/esas2r_ioctl.c 		struct esas2r_request *rq;
rq               1972 drivers/scsi/esas2r/esas2r_ioctl.c 		rq = esas2r_alloc_request(a);
rq               1973 drivers/scsi/esas2r/esas2r_ioctl.c 		if (rq == NULL) {
rq               1979 drivers/scsi/esas2r/esas2r_ioctl.c 		rq->comp_cb = fs_api_complete_req;
rq               1988 drivers/scsi/esas2r/esas2r_ioctl.c 		if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) {
rq               2004 drivers/scsi/esas2r/esas2r_ioctl.c 		esas2r_free_request(a, (struct esas2r_request *)rq);
rq                145 drivers/scsi/esas2r/esas2r_main.c 	struct esas2r_request *rq;
rq                148 drivers/scsi/esas2r/esas2r_main.c 	rq = esas2r_alloc_request(a);
rq                149 drivers/scsi/esas2r/esas2r_main.c 	if (rq == NULL)
rq                152 drivers/scsi/esas2r/esas2r_main.c 	if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf))
rq                155 drivers/scsi/esas2r/esas2r_main.c 	esas2r_free_request(a, rq);
rq                816 drivers/scsi/esas2r/esas2r_main.c 	struct esas2r_request *rq;
rq                829 drivers/scsi/esas2r/esas2r_main.c 	rq = esas2r_alloc_request(a);
rq                830 drivers/scsi/esas2r/esas2r_main.c 	if (unlikely(rq == NULL)) {
rq                835 drivers/scsi/esas2r/esas2r_main.c 	rq->cmd = cmd;
rq                840 drivers/scsi/esas2r/esas2r_main.c 			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
rq                842 drivers/scsi/esas2r/esas2r_main.c 			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
rq                845 drivers/scsi/esas2r/esas2r_main.c 	memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len);
rq                846 drivers/scsi/esas2r/esas2r_main.c 	rq->vrq->scsi.length = cpu_to_le32(bufflen);
rq                847 drivers/scsi/esas2r/esas2r_main.c 	rq->target_id = cmd->device->id;
rq                848 drivers/scsi/esas2r/esas2r_main.c 	rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
rq                849 drivers/scsi/esas2r/esas2r_main.c 	rq->sense_buf = cmd->sense_buffer;
rq                850 drivers/scsi/esas2r/esas2r_main.c 	rq->sense_len = SCSI_SENSE_BUFFERSIZE;
rq                852 drivers/scsi/esas2r/esas2r_main.c 	esas2r_sgc_init(&sgc, a, rq, NULL);
rq                863 drivers/scsi/esas2r/esas2r_main.c 		esas2r_free_request(a, rq);
rq                869 drivers/scsi/esas2r/esas2r_main.c 	if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) {
rq                871 drivers/scsi/esas2r/esas2r_main.c 		esas2r_free_request(a, rq);
rq                875 drivers/scsi/esas2r/esas2r_main.c 	esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id,
rq                878 drivers/scsi/esas2r/esas2r_main.c 	esas2r_start_request(a, rq);
rq                884 drivers/scsi/esas2r/esas2r_main.c 					     struct esas2r_request *rq)
rq                886 drivers/scsi/esas2r/esas2r_main.c 	(*rq->task_management_status_ptr) = rq->req_stat;
rq                887 drivers/scsi/esas2r/esas2r_main.c 	esas2r_free_request(a, rq);
rq                907 drivers/scsi/esas2r/esas2r_main.c 	struct esas2r_request *rq;
rq                912 drivers/scsi/esas2r/esas2r_main.c 		rq = list_entry(element, struct esas2r_request, req_list);
rq                914 drivers/scsi/esas2r/esas2r_main.c 		if (rq->cmd == cmd) {
rq                938 drivers/scsi/esas2r/esas2r_main.c 				ar->target_id = rq->target_id;
rq                940 drivers/scsi/esas2r/esas2r_main.c 					(u8)le32_to_cpu(rq->vrq->scsi.flags));
rq                948 drivers/scsi/esas2r/esas2r_main.c 					rq->vrq->scsi.handle;
rq                955 drivers/scsi/esas2r/esas2r_main.c 				list_del_init(&rq->req_list);
rq                956 drivers/scsi/esas2r/esas2r_main.c 				esas2r_free_request(a, rq);
rq               1106 drivers/scsi/esas2r/esas2r_main.c 	struct esas2r_request *rq;
rq               1114 drivers/scsi/esas2r/esas2r_main.c 	rq = esas2r_alloc_request(a);
rq               1115 drivers/scsi/esas2r/esas2r_main.c 	if (rq == NULL) {
rq               1133 drivers/scsi/esas2r/esas2r_main.c 	rq->target_id = cmd->device->id;
rq               1134 drivers/scsi/esas2r/esas2r_main.c 	rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
rq               1135 drivers/scsi/esas2r/esas2r_main.c 	rq->req_stat = RS_PENDING;
rq               1137 drivers/scsi/esas2r/esas2r_main.c 	rq->comp_cb = complete_task_management_request;
rq               1138 drivers/scsi/esas2r/esas2r_main.c 	rq->task_management_status_ptr = &task_management_status;
rq               1141 drivers/scsi/esas2r/esas2r_main.c 		esas2r_debug("issuing target reset (%p) to id %d", rq,
rq               1143 drivers/scsi/esas2r/esas2r_main.c 		completed = esas2r_send_task_mgmt(a, rq, 0x20);
rq               1145 drivers/scsi/esas2r/esas2r_main.c 		esas2r_debug("issuing device reset (%p) to id %d lun %d", rq,
rq               1147 drivers/scsi/esas2r/esas2r_main.c 		completed = esas2r_send_task_mgmt(a, rq, 0x10);
rq               1153 drivers/scsi/esas2r/esas2r_main.c 		esas2r_free_request(a, rq);
rq               1194 drivers/scsi/esas2r/esas2r_main.c 				struct esas2r_request *rq)
rq               1196 drivers/scsi/esas2r/esas2r_main.c 	u8 reqstatus = rq->req_stat;
rq               1201 drivers/scsi/esas2r/esas2r_main.c 	if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
rq               1203 drivers/scsi/esas2r/esas2r_main.c 			if (rq->func_rsp.scsi_rsp.sense_len >= 13) {
rq               1206 drivers/scsi/esas2r/esas2r_main.c 					   rq->sense_buf[2], rq->sense_buf[12],
rq               1207 drivers/scsi/esas2r/esas2r_main.c 					   rq->sense_buf[13],
rq               1208 drivers/scsi/esas2r/esas2r_main.c 					   rq->vrq->scsi.cdb[0]);
rq               1212 drivers/scsi/esas2r/esas2r_main.c 					   rq->vrq->scsi.cdb[0]);
rq               1214 drivers/scsi/esas2r/esas2r_main.c 		} else if ((rq->vrq->scsi.cdb[0] != INQUIRY
rq               1215 drivers/scsi/esas2r/esas2r_main.c 			    && rq->vrq->scsi.cdb[0] != REPORT_LUNS)
rq               1219 drivers/scsi/esas2r/esas2r_main.c 			    (rq->vrq->scsi.cdb[0] == INQUIRY)) {
rq               1224 drivers/scsi/esas2r/esas2r_main.c 					   rq->vrq->scsi.cdb[0], reqstatus,
rq               1225 drivers/scsi/esas2r/esas2r_main.c 					   rq->target_id);
rq               1231 drivers/scsi/esas2r/esas2r_main.c void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq)
rq               1237 drivers/scsi/esas2r/esas2r_main.c 	timeout = rq->timeout ? rq->timeout : 5000;
rq               1242 drivers/scsi/esas2r/esas2r_main.c 		if (rq->req_stat != RS_STARTED)
rq               1251 drivers/scsi/esas2r/esas2r_main.c 			rq->req_stat = RS_TIMEOUT;
rq               1479 drivers/scsi/esas2r/esas2r_main.c void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq)
rq               1483 drivers/scsi/esas2r/esas2r_main.c 	esas2r_rq_destroy_request(rq, a);
rq               1485 drivers/scsi/esas2r/esas2r_main.c 	list_add(&rq->comp_list, &a->avail_request);
rq               1491 drivers/scsi/esas2r/esas2r_main.c 	struct esas2r_request *rq;
rq               1501 drivers/scsi/esas2r/esas2r_main.c 	rq = list_first_entry(&a->avail_request, struct esas2r_request,
rq               1503 drivers/scsi/esas2r/esas2r_main.c 	list_del(&rq->comp_list);
rq               1505 drivers/scsi/esas2r/esas2r_main.c 	esas2r_rq_init_request(rq, a);
rq               1507 drivers/scsi/esas2r/esas2r_main.c 	return rq;
rq               1512 drivers/scsi/esas2r/esas2r_main.c 				struct esas2r_request *rq)
rq               1514 drivers/scsi/esas2r/esas2r_main.c 	esas2r_debug("completing request %p\n", rq);
rq               1516 drivers/scsi/esas2r/esas2r_main.c 	scsi_dma_unmap(rq->cmd);
rq               1518 drivers/scsi/esas2r/esas2r_main.c 	if (unlikely(rq->req_stat != RS_SUCCESS)) {
rq               1519 drivers/scsi/esas2r/esas2r_main.c 		esas2r_debug("[%x STATUS %x:%x (%x)]", rq->target_id,
rq               1520 drivers/scsi/esas2r/esas2r_main.c 			     rq->req_stat,
rq               1521 drivers/scsi/esas2r/esas2r_main.c 			     rq->func_rsp.scsi_rsp.scsi_stat,
rq               1522 drivers/scsi/esas2r/esas2r_main.c 			     rq->cmd);
rq               1524 drivers/scsi/esas2r/esas2r_main.c 		rq->cmd->result =
rq               1525 drivers/scsi/esas2r/esas2r_main.c 			((esas2r_req_status_to_error(rq->req_stat) << 16)
rq               1526 drivers/scsi/esas2r/esas2r_main.c 			 | (rq->func_rsp.scsi_rsp.scsi_stat & STATUS_MASK));
rq               1528 drivers/scsi/esas2r/esas2r_main.c 		if (rq->req_stat == RS_UNDERRUN)
rq               1529 drivers/scsi/esas2r/esas2r_main.c 			scsi_set_resid(rq->cmd,
rq               1530 drivers/scsi/esas2r/esas2r_main.c 				       le32_to_cpu(rq->func_rsp.scsi_rsp.
rq               1533 drivers/scsi/esas2r/esas2r_main.c 			scsi_set_resid(rq->cmd, 0);
rq               1536 drivers/scsi/esas2r/esas2r_main.c 	rq->cmd->scsi_done(rq->cmd);
rq               1538 drivers/scsi/esas2r/esas2r_main.c 	esas2r_free_request(a, rq);
rq                 59 drivers/scsi/esas2r/esas2r_vda.c static void clear_vda_request(struct esas2r_request *rq);
rq                 62 drivers/scsi/esas2r/esas2r_vda.c 				      struct esas2r_request *rq);
rq                 67 drivers/scsi/esas2r/esas2r_vda.c 			      struct esas2r_request *rq,
rq                 93 drivers/scsi/esas2r/esas2r_vda.c 		clear_vda_request(rq);
rq                 95 drivers/scsi/esas2r/esas2r_vda.c 	rq->vrq->scsi.function = vi->function;
rq                 96 drivers/scsi/esas2r/esas2r_vda.c 	rq->interrupt_cb = esas2r_complete_vda_ioctl;
rq                 97 drivers/scsi/esas2r/esas2r_vda.c 	rq->interrupt_cx = vi;
rq                112 drivers/scsi/esas2r/esas2r_vda.c 		rq->vrq->flash.length = cpu_to_le32(datalen);
rq                113 drivers/scsi/esas2r/esas2r_vda.c 		rq->vrq->flash.sub_func = vi->cmd.flash.sub_func;
rq                115 drivers/scsi/esas2r/esas2r_vda.c 		memcpy(rq->vrq->flash.data.file.file_name,
rq                119 drivers/scsi/esas2r/esas2r_vda.c 		firstsg = rq->vrq->flash.data.file.sge;
rq                126 drivers/scsi/esas2r/esas2r_vda.c 		rq->vrq->cli.cmd_rsp_len =
rq                128 drivers/scsi/esas2r/esas2r_vda.c 		rq->vrq->cli.length = cpu_to_le32(datalen);
rq                130 drivers/scsi/esas2r/esas2r_vda.c 		firstsg = rq->vrq->cli.sge;
rq                152 drivers/scsi/esas2r/esas2r_vda.c 				rq->vrq->mgt.payld_sglst_offset =
rq                169 drivers/scsi/esas2r/esas2r_vda.c 			rq->vrq->mgt.length = cpu_to_le32(datalen);
rq                172 drivers/scsi/esas2r/esas2r_vda.c 				rq->vrq->mgt.payld_length =
rq                175 drivers/scsi/esas2r/esas2r_vda.c 				esas2r_sgc_init(sgc, a, rq,
rq                176 drivers/scsi/esas2r/esas2r_vda.c 						rq->vrq->mgt.payld_sge);
rq                179 drivers/scsi/esas2r/esas2r_vda.c 				if (!esas2r_build_sg_list(a, rq, sgc)) {
rq                187 drivers/scsi/esas2r/esas2r_vda.c 			rq->vrq->mgt.length = cpu_to_le32(datalen);
rq                194 drivers/scsi/esas2r/esas2r_vda.c 		firstsg = rq->vrq->mgt.sge;
rq                198 drivers/scsi/esas2r/esas2r_vda.c 		rq->vrq->mgt.mgt_func = vi->cmd.mgt.mgt_func;
rq                199 drivers/scsi/esas2r/esas2r_vda.c 		rq->vrq->mgt.scan_generation = vi->cmd.mgt.scan_generation;
rq                200 drivers/scsi/esas2r/esas2r_vda.c 		rq->vrq->mgt.dev_index =
rq                203 drivers/scsi/esas2r/esas2r_vda.c 		esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
rq                220 drivers/scsi/esas2r/esas2r_vda.c 		rq->vrq->cfg.sub_func = vi->cmd.cfg.cfg_func;
rq                221 drivers/scsi/esas2r/esas2r_vda.c 		rq->vrq->cfg.length = cpu_to_le32(vi->cmd.cfg.data_length);
rq                224 drivers/scsi/esas2r/esas2r_vda.c 			memcpy(&rq->vrq->cfg.data,
rq                228 drivers/scsi/esas2r/esas2r_vda.c 			esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
rq                229 drivers/scsi/esas2r/esas2r_vda.c 					     &rq->vrq->cfg.data);
rq                255 drivers/scsi/esas2r/esas2r_vda.c 		esas2r_sgc_init(sgc, a, rq, firstsg);
rq                258 drivers/scsi/esas2r/esas2r_vda.c 		if (!esas2r_build_sg_list(a, rq, sgc)) {
rq                264 drivers/scsi/esas2r/esas2r_vda.c 	esas2r_start_request(a, rq);
rq                270 drivers/scsi/esas2r/esas2r_vda.c 				      struct esas2r_request *rq)
rq                272 drivers/scsi/esas2r/esas2r_vda.c 	struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)rq->interrupt_cx;
rq                274 drivers/scsi/esas2r/esas2r_vda.c 	vi->vda_status = rq->req_stat;
rq                282 drivers/scsi/esas2r/esas2r_vda.c 				le32_to_cpu(rq->func_rsp.flash_rsp.file_size);
rq                289 drivers/scsi/esas2r/esas2r_vda.c 			rq->func_rsp.mgt_rsp.scan_generation;
rq                291 drivers/scsi/esas2r/esas2r_vda.c 			rq->func_rsp.mgt_rsp.dev_index);
rq                295 drivers/scsi/esas2r/esas2r_vda.c 				le32_to_cpu(rq->func_rsp.mgt_rsp.length);
rq                297 drivers/scsi/esas2r/esas2r_vda.c 		esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
rq                304 drivers/scsi/esas2r/esas2r_vda.c 			struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
rq                327 drivers/scsi/esas2r/esas2r_vda.c 			esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
rq                336 drivers/scsi/esas2r/esas2r_vda.c 			le32_to_cpu(rq->func_rsp.cli_rsp.cmd_rsp_len);
rq                347 drivers/scsi/esas2r/esas2r_vda.c 			    struct esas2r_request *rq,
rq                353 drivers/scsi/esas2r/esas2r_vda.c 	struct atto_vda_flash_req *vrq = &rq->vrq->flash;
rq                355 drivers/scsi/esas2r/esas2r_vda.c 	clear_vda_request(rq);
rq                357 drivers/scsi/esas2r/esas2r_vda.c 	rq->vrq->scsi.function = VDA_FUNC_FLASH;
rq                373 drivers/scsi/esas2r/esas2r_vda.c 			  struct esas2r_request *rq,
rq                380 drivers/scsi/esas2r/esas2r_vda.c 	struct atto_vda_mgmt_req *vrq = &rq->vrq->mgt;
rq                382 drivers/scsi/esas2r/esas2r_vda.c 	clear_vda_request(rq);
rq                384 drivers/scsi/esas2r/esas2r_vda.c 	rq->vrq->scsi.function = VDA_FUNC_MGT;
rq                398 drivers/scsi/esas2r/esas2r_vda.c 				rq->vrq_md->phys_addr +
rq                406 drivers/scsi/esas2r/esas2r_vda.c 				rq->vrq_md->phys_addr +
rq                414 drivers/scsi/esas2r/esas2r_vda.c 		memcpy(&rq->vda_rsp_data->mgt_data.data.bytes[0], data,
rq                420 drivers/scsi/esas2r/esas2r_vda.c void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)
rq                422 drivers/scsi/esas2r/esas2r_vda.c 	struct atto_vda_ae_req *vrq = &rq->vrq->ae;
rq                424 drivers/scsi/esas2r/esas2r_vda.c 	clear_vda_request(rq);
rq                426 drivers/scsi/esas2r/esas2r_vda.c 	rq->vrq->scsi.function = VDA_FUNC_AE;
rq                435 drivers/scsi/esas2r/esas2r_vda.c 			rq->vrq_md->phys_addr +
rq                442 drivers/scsi/esas2r/esas2r_vda.c 			rq->vrq_md->phys_addr +
rq                449 drivers/scsi/esas2r/esas2r_vda.c 			  struct esas2r_request *rq,
rq                453 drivers/scsi/esas2r/esas2r_vda.c 	struct atto_vda_cli_req *vrq = &rq->vrq->cli;
rq                455 drivers/scsi/esas2r/esas2r_vda.c 	clear_vda_request(rq);
rq                457 drivers/scsi/esas2r/esas2r_vda.c 	rq->vrq->scsi.function = VDA_FUNC_CLI;
rq                466 drivers/scsi/esas2r/esas2r_vda.c 			    struct esas2r_request *rq,
rq                470 drivers/scsi/esas2r/esas2r_vda.c 	struct atto_vda_ioctl_req *vrq = &rq->vrq->ioctl;
rq                472 drivers/scsi/esas2r/esas2r_vda.c 	clear_vda_request(rq);
rq                474 drivers/scsi/esas2r/esas2r_vda.c 	rq->vrq->scsi.function = VDA_FUNC_IOCTL;
rq                483 drivers/scsi/esas2r/esas2r_vda.c 			  struct esas2r_request *rq,
rq                488 drivers/scsi/esas2r/esas2r_vda.c 	struct atto_vda_cfg_req *vrq = &rq->vrq->cfg;
rq                490 drivers/scsi/esas2r/esas2r_vda.c 	clear_vda_request(rq);
rq                492 drivers/scsi/esas2r/esas2r_vda.c 	rq->vrq->scsi.function = VDA_FUNC_CFG;
rq                504 drivers/scsi/esas2r/esas2r_vda.c static void clear_vda_request(struct esas2r_request *rq)
rq                506 drivers/scsi/esas2r/esas2r_vda.c 	u32 handle = rq->vrq->scsi.handle;
rq                508 drivers/scsi/esas2r/esas2r_vda.c 	memset(rq->vrq, 0, sizeof(*rq->vrq));
rq                510 drivers/scsi/esas2r/esas2r_vda.c 	rq->vrq->scsi.handle = handle;
rq                512 drivers/scsi/esas2r/esas2r_vda.c 	rq->req_stat = RS_PENDING;
rq                516 drivers/scsi/esas2r/esas2r_vda.c 	memset(rq->data_buf, 0, ESAS2R_DATA_BUF_LEN);
rq                523 drivers/scsi/esas2r/esas2r_vda.c 	INIT_LIST_HEAD(&rq->req_list);
rq                311 drivers/scsi/fnic/fnic.h 	____cacheline_aligned struct vnic_rq rq[FNIC_RQ_MAX];
rq                337 drivers/scsi/fnic/fnic.h int fnic_alloc_rq_frame(struct vnic_rq *rq);
rq                338 drivers/scsi/fnic/fnic.h void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
rq                831 drivers/scsi/fnic/fnic_fcs.c static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
rq                836 drivers/scsi/fnic/fnic_fcs.c 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
rq                943 drivers/scsi/fnic/fnic_fcs.c 	vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
rq                960 drivers/scsi/fnic/fnic_fcs.c 			err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
rq                977 drivers/scsi/fnic/fnic_fcs.c int fnic_alloc_rq_frame(struct vnic_rq *rq)
rq                979 drivers/scsi/fnic/fnic_fcs.c 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
rq               1003 drivers/scsi/fnic/fnic_fcs.c 	fnic_queue_rq_desc(rq, skb, pa, len);
rq               1011 drivers/scsi/fnic/fnic_fcs.c void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
rq               1014 drivers/scsi/fnic/fnic_fcs.c 	struct fnic *fnic = vnic_dev_priv(rq->vdev);
rq                237 drivers/scsi/fnic/fnic_isr.c 	unsigned int n = ARRAY_SIZE(fnic->rq);
rq                360 drivers/scsi/fnic/fnic_main.c 		error_status = ioread32(&fnic->rq[i].ctrl->error_status);
rq                495 drivers/scsi/fnic/fnic_main.c 		err = vnic_rq_disable(&fnic->rq[i]);
rq                519 drivers/scsi/fnic/fnic_main.c 		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
rq                810 drivers/scsi/fnic/fnic_main.c 		vnic_rq_enable(&fnic->rq[i]);
rq                811 drivers/scsi/fnic/fnic_main.c 		err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
rq                913 drivers/scsi/fnic/fnic_main.c 		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
rq                221 drivers/scsi/fnic/fnic_res.c 		vnic_rq_free(&fnic->rq[i]);
rq                275 drivers/scsi/fnic/fnic_res.c 		err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i,
rq                357 drivers/scsi/fnic/fnic_res.c 		vnic_rq_init(&fnic->rq[i],
rq                223 drivers/scsi/fnic/fnic_res.h static inline void fnic_queue_rq_desc(struct vnic_rq *rq,
rq                227 drivers/scsi/fnic/fnic_res.h 	struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
rq                234 drivers/scsi/fnic/fnic_res.h 	vnic_rq_post(rq, os_buf, 0, dma_addr, len);
rq                 27 drivers/scsi/fnic/vnic_rq.c static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
rq                 30 drivers/scsi/fnic/vnic_rq.c 	unsigned int i, j, count = rq->ring.desc_count;
rq                 34 drivers/scsi/fnic/vnic_rq.c 		rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
rq                 35 drivers/scsi/fnic/vnic_rq.c 		if (!rq->bufs[i]) {
rq                 42 drivers/scsi/fnic/vnic_rq.c 		buf = rq->bufs[i];
rq                 45 drivers/scsi/fnic/vnic_rq.c 			buf->desc = (u8 *)rq->ring.descs +
rq                 46 drivers/scsi/fnic/vnic_rq.c 				rq->ring.desc_size * buf->index;
rq                 48 drivers/scsi/fnic/vnic_rq.c 				buf->next = rq->bufs[0];
rq                 51 drivers/scsi/fnic/vnic_rq.c 				buf->next = rq->bufs[i + 1];
rq                 59 drivers/scsi/fnic/vnic_rq.c 	rq->to_use = rq->to_clean = rq->bufs[0];
rq                 60 drivers/scsi/fnic/vnic_rq.c 	rq->buf_index = 0;
rq                 65 drivers/scsi/fnic/vnic_rq.c void vnic_rq_free(struct vnic_rq *rq)
rq                 70 drivers/scsi/fnic/vnic_rq.c 	vdev = rq->vdev;
rq                 72 drivers/scsi/fnic/vnic_rq.c 	vnic_dev_free_desc_ring(vdev, &rq->ring);
rq                 75 drivers/scsi/fnic/vnic_rq.c 		kfree(rq->bufs[i]);
rq                 76 drivers/scsi/fnic/vnic_rq.c 		rq->bufs[i] = NULL;
rq                 79 drivers/scsi/fnic/vnic_rq.c 	rq->ctrl = NULL;
rq                 82 drivers/scsi/fnic/vnic_rq.c int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
rq                 87 drivers/scsi/fnic/vnic_rq.c 	rq->index = index;
rq                 88 drivers/scsi/fnic/vnic_rq.c 	rq->vdev = vdev;
rq                 90 drivers/scsi/fnic/vnic_rq.c 	rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
rq                 91 drivers/scsi/fnic/vnic_rq.c 	if (!rq->ctrl) {
rq                 96 drivers/scsi/fnic/vnic_rq.c 	vnic_rq_disable(rq);
rq                 98 drivers/scsi/fnic/vnic_rq.c 	err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
rq                102 drivers/scsi/fnic/vnic_rq.c 	err = vnic_rq_alloc_bufs(rq);
rq                104 drivers/scsi/fnic/vnic_rq.c 		vnic_rq_free(rq);
rq                111 drivers/scsi/fnic/vnic_rq.c void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
rq                118 drivers/scsi/fnic/vnic_rq.c 	paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
rq                119 drivers/scsi/fnic/vnic_rq.c 	writeq(paddr, &rq->ctrl->ring_base);
rq                120 drivers/scsi/fnic/vnic_rq.c 	iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
rq                121 drivers/scsi/fnic/vnic_rq.c 	iowrite32(cq_index, &rq->ctrl->cq_index);
rq                122 drivers/scsi/fnic/vnic_rq.c 	iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
rq                123 drivers/scsi/fnic/vnic_rq.c 	iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
rq                124 drivers/scsi/fnic/vnic_rq.c 	iowrite32(0, &rq->ctrl->dropped_packet_count);
rq                125 drivers/scsi/fnic/vnic_rq.c 	iowrite32(0, &rq->ctrl->error_status);
rq                128 drivers/scsi/fnic/vnic_rq.c 	fetch_index = ioread32(&rq->ctrl->fetch_index);
rq                129 drivers/scsi/fnic/vnic_rq.c 	rq->to_use = rq->to_clean =
rq                130 drivers/scsi/fnic/vnic_rq.c 		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
rq                132 drivers/scsi/fnic/vnic_rq.c 	iowrite32(fetch_index, &rq->ctrl->posted_index);
rq                134 drivers/scsi/fnic/vnic_rq.c 	rq->buf_index = 0;
rq                137 drivers/scsi/fnic/vnic_rq.c unsigned int vnic_rq_error_status(struct vnic_rq *rq)
rq                139 drivers/scsi/fnic/vnic_rq.c 	return ioread32(&rq->ctrl->error_status);
rq                142 drivers/scsi/fnic/vnic_rq.c void vnic_rq_enable(struct vnic_rq *rq)
rq                144 drivers/scsi/fnic/vnic_rq.c 	iowrite32(1, &rq->ctrl->enable);
rq                147 drivers/scsi/fnic/vnic_rq.c int vnic_rq_disable(struct vnic_rq *rq)
rq                151 drivers/scsi/fnic/vnic_rq.c 	iowrite32(0, &rq->ctrl->enable);
rq                155 drivers/scsi/fnic/vnic_rq.c 		if (!(ioread32(&rq->ctrl->running)))
rq                160 drivers/scsi/fnic/vnic_rq.c 	printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
rq                165 drivers/scsi/fnic/vnic_rq.c void vnic_rq_clean(struct vnic_rq *rq,
rq                166 drivers/scsi/fnic/vnic_rq.c 	void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
rq                171 drivers/scsi/fnic/vnic_rq.c 	WARN_ON(ioread32(&rq->ctrl->enable));
rq                173 drivers/scsi/fnic/vnic_rq.c 	buf = rq->to_clean;
rq                175 drivers/scsi/fnic/vnic_rq.c 	while (vnic_rq_desc_used(rq) > 0) {
rq                177 drivers/scsi/fnic/vnic_rq.c 		(*buf_clean)(rq, buf);
rq                179 drivers/scsi/fnic/vnic_rq.c 		buf = rq->to_clean = buf->next;
rq                180 drivers/scsi/fnic/vnic_rq.c 		rq->ring.desc_avail++;
rq                184 drivers/scsi/fnic/vnic_rq.c 	fetch_index = ioread32(&rq->ctrl->fetch_index);
rq                185 drivers/scsi/fnic/vnic_rq.c 	rq->to_use = rq->to_clean =
rq                186 drivers/scsi/fnic/vnic_rq.c 		&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
rq                188 drivers/scsi/fnic/vnic_rq.c 	iowrite32(fetch_index, &rq->ctrl->posted_index);
rq                190 drivers/scsi/fnic/vnic_rq.c 	rq->buf_index = 0;
rq                192 drivers/scsi/fnic/vnic_rq.c 	vnic_dev_clear_desc_ring(&rq->ring);
rq                105 drivers/scsi/fnic/vnic_rq.h static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
rq                108 drivers/scsi/fnic/vnic_rq.h 	return rq->ring.desc_avail;
rq                111 drivers/scsi/fnic/vnic_rq.h static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
rq                114 drivers/scsi/fnic/vnic_rq.h 	return rq->ring.desc_count - rq->ring.desc_avail - 1;
rq                117 drivers/scsi/fnic/vnic_rq.h static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
rq                119 drivers/scsi/fnic/vnic_rq.h 	return rq->to_use->desc;
rq                122 drivers/scsi/fnic/vnic_rq.h static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
rq                124 drivers/scsi/fnic/vnic_rq.h 	return rq->to_use->index;
rq                127 drivers/scsi/fnic/vnic_rq.h static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
rq                129 drivers/scsi/fnic/vnic_rq.h 	return rq->buf_index++;
rq                132 drivers/scsi/fnic/vnic_rq.h static inline void vnic_rq_post(struct vnic_rq *rq,
rq                136 drivers/scsi/fnic/vnic_rq.h 	struct vnic_rq_buf *buf = rq->to_use;
rq                144 drivers/scsi/fnic/vnic_rq.h 	rq->to_use = buf;
rq                145 drivers/scsi/fnic/vnic_rq.h 	rq->ring.desc_avail--;
rq                161 drivers/scsi/fnic/vnic_rq.h 		iowrite32(buf->index, &rq->ctrl->posted_index);
rq                165 drivers/scsi/fnic/vnic_rq.h static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
rq                167 drivers/scsi/fnic/vnic_rq.h 	return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
rq                170 drivers/scsi/fnic/vnic_rq.h static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
rq                172 drivers/scsi/fnic/vnic_rq.h 	rq->ring.desc_avail += count;
rq                180 drivers/scsi/fnic/vnic_rq.h static inline void vnic_rq_service(struct vnic_rq *rq,
rq                182 drivers/scsi/fnic/vnic_rq.h 	int desc_return, void (*buf_service)(struct vnic_rq *rq,
rq                189 drivers/scsi/fnic/vnic_rq.h 	buf = rq->to_clean;
rq                194 drivers/scsi/fnic/vnic_rq.h 		(*buf_service)(rq, cq_desc, buf, skipped, opaque);
rq                197 drivers/scsi/fnic/vnic_rq.h 			rq->ring.desc_avail++;
rq                199 drivers/scsi/fnic/vnic_rq.h 		rq->to_clean = buf->next;
rq                204 drivers/scsi/fnic/vnic_rq.h 		buf = rq->to_clean;
rq                208 drivers/scsi/fnic/vnic_rq.h static inline int vnic_rq_fill(struct vnic_rq *rq,
rq                209 drivers/scsi/fnic/vnic_rq.h 	int (*buf_fill)(struct vnic_rq *rq))
rq                213 drivers/scsi/fnic/vnic_rq.h 	while (vnic_rq_desc_avail(rq) > 1) {
rq                215 drivers/scsi/fnic/vnic_rq.h 		err = (*buf_fill)(rq);
rq                223 drivers/scsi/fnic/vnic_rq.h void vnic_rq_free(struct vnic_rq *rq);
rq                224 drivers/scsi/fnic/vnic_rq.h int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
rq                226 drivers/scsi/fnic/vnic_rq.h void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
rq                229 drivers/scsi/fnic/vnic_rq.h unsigned int vnic_rq_error_status(struct vnic_rq *rq);
rq                230 drivers/scsi/fnic/vnic_rq.h void vnic_rq_enable(struct vnic_rq *rq);
rq                231 drivers/scsi/fnic/vnic_rq.h int vnic_rq_disable(struct vnic_rq *rq);
rq                232 drivers/scsi/fnic/vnic_rq.h void vnic_rq_clean(struct vnic_rq *rq,
rq                233 drivers/scsi/fnic/vnic_rq.h 	void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
rq                712 drivers/scsi/gdth.h         u8          rq;                     /* IRQ/DRQ configuration */
rq                982 drivers/scsi/hpsa.c 	struct reply_queue_buffer *rq = &h->reply_queue[q];
rq                990 drivers/scsi/hpsa.c 	if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
rq                991 drivers/scsi/hpsa.c 		a = rq->head[rq->current_entry];
rq                992 drivers/scsi/hpsa.c 		rq->current_entry++;
rq                998 drivers/scsi/hpsa.c 	if (rq->current_entry == h->max_commands) {
rq                999 drivers/scsi/hpsa.c 		rq->current_entry = 0;
rq               1000 drivers/scsi/hpsa.c 		rq->wraparound ^= 1;
rq               5926 drivers/scsi/hpsa.c 	int rq;
rq               5945 drivers/scsi/hpsa.c 	for (rq = first_queue; rq <= last_queue; rq++) {
rq               5946 drivers/scsi/hpsa.c 		rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
rq                490 drivers/scsi/hpsa.h 	struct reply_queue_buffer *rq = &h->reply_queue[q];
rq                506 drivers/scsi/hpsa.h 	if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
rq                507 drivers/scsi/hpsa.h 		register_value = rq->head[rq->current_entry];
rq                508 drivers/scsi/hpsa.h 		rq->current_entry++;
rq                514 drivers/scsi/hpsa.h 	if (rq->current_entry == h->max_commands) {
rq                515 drivers/scsi/hpsa.h 		rq->current_entry = 0;
rq                516 drivers/scsi/hpsa.h 		rq->wraparound ^= 1;
rq                592 drivers/scsi/hpsa.h 	struct reply_queue_buffer *rq = &h->reply_queue[q];
rq                596 drivers/scsi/hpsa.h 	register_value = rq->head[rq->current_entry];
rq                598 drivers/scsi/hpsa.h 		rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
rq                599 drivers/scsi/hpsa.h 		if (++rq->current_entry == rq->size)
rq                600 drivers/scsi/hpsa.h 			rq->current_entry = 0;
rq                608 drivers/scsi/hpsa.h 		writel((q << 24) | rq->current_entry, h->vaddr +
rq               9219 drivers/scsi/lpfc/lpfc_init.c lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
rq               9225 drivers/scsi/lpfc/lpfc_init.c 	rqbp = rq->rqbp;
rq               4646 drivers/scsi/mpt3sas/mpt3sas_scsih.c 	struct request *rq = scmd->request;
rq               4716 drivers/scsi/mpt3sas/mpt3sas_scsih.c 		class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
rq                 34 drivers/scsi/scsi_debugfs.c void scsi_show_rq(struct seq_file *m, struct request *rq)
rq                 36 drivers/scsi/scsi_debugfs.c 	struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
rq                 38 drivers/scsi/scsi_debugfs.c 	int timeout_ms = jiffies_to_msecs(rq->timeout);
rq                  5 drivers/scsi/scsi_debugfs.h void scsi_show_rq(struct seq_file *m, struct request *rq);
rq               1975 drivers/scsi/scsi_error.c 	struct scsi_request *rq;
rq               1980 drivers/scsi/scsi_error.c 	rq = scsi_req(req);
rq               1982 drivers/scsi/scsi_error.c 	rq->cmd[0] = ALLOW_MEDIUM_REMOVAL;
rq               1983 drivers/scsi/scsi_error.c 	rq->cmd[1] = 0;
rq               1984 drivers/scsi/scsi_error.c 	rq->cmd[2] = 0;
rq               1985 drivers/scsi/scsi_error.c 	rq->cmd[3] = 0;
rq               1986 drivers/scsi/scsi_error.c 	rq->cmd[4] = SCSI_REMOVAL_PREVENT;
rq               1987 drivers/scsi/scsi_error.c 	rq->cmd[5] = 0;
rq               1988 drivers/scsi/scsi_error.c 	rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
rq               1992 drivers/scsi/scsi_error.c 	rq->retries = 5;
rq               2332 drivers/scsi/scsi_error.c 	struct request *rq;
rq               2347 drivers/scsi/scsi_error.c 	rq = kzalloc(sizeof(struct request) + sizeof(struct scsi_cmnd) +
rq               2349 drivers/scsi/scsi_error.c 	if (!rq)
rq               2351 drivers/scsi/scsi_error.c 	blk_rq_init(NULL, rq);
rq               2353 drivers/scsi/scsi_error.c 	scmd = (struct scsi_cmnd *)(rq + 1);
rq               2355 drivers/scsi/scsi_error.c 	scmd->request = rq;
rq               2356 drivers/scsi/scsi_error.c 	scmd->cmnd = scsi_req(rq)->cmd;
rq               2416 drivers/scsi/scsi_error.c 	kfree(rq);
rq                255 drivers/scsi/scsi_lib.c 	struct scsi_request *rq;
rq                263 drivers/scsi/scsi_lib.c 	rq = scsi_req(req);
rq                269 drivers/scsi/scsi_lib.c 	rq->cmd_len = COMMAND_SIZE(cmd[0]);
rq                270 drivers/scsi/scsi_lib.c 	memcpy(rq->cmd, cmd, rq->cmd_len);
rq                271 drivers/scsi/scsi_lib.c 	rq->retries = retries;
rq                287 drivers/scsi/scsi_lib.c 	if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
rq                288 drivers/scsi/scsi_lib.c 		memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
rq                291 drivers/scsi/scsi_lib.c 		*resid = rq->resid_len;
rq                292 drivers/scsi/scsi_lib.c 	if (sense && rq->sense_len)
rq                293 drivers/scsi/scsi_lib.c 		memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
rq                295 drivers/scsi/scsi_lib.c 		scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
rq                296 drivers/scsi/scsi_lib.c 	ret = rq->result;
rq               1022 drivers/scsi/scsi_lib.c 	struct request *rq = cmd->request;
rq               1025 drivers/scsi/scsi_lib.c 	if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
rq               1028 drivers/scsi/scsi_lib.c 	ret = scsi_init_sgtable(rq, &cmd->sdb);
rq               1032 drivers/scsi/scsi_lib.c 	if (blk_integrity_rq(rq)) {
rq               1046 drivers/scsi/scsi_lib.c 		ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
rq               1055 drivers/scsi/scsi_lib.c 		count = blk_rq_map_integrity_sg(rq->q, rq->bio,
rq               1058 drivers/scsi/scsi_lib.c 		BUG_ON(count > queue_max_integrity_segments(rq->q));
rq               1082 drivers/scsi/scsi_lib.c static void scsi_initialize_rq(struct request *rq)
rq               1084 drivers/scsi/scsi_lib.c 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
rq               1096 drivers/scsi/scsi_lib.c static void scsi_cleanup_rq(struct request *rq)
rq               1098 drivers/scsi/scsi_lib.c 	if (rq->rq_flags & RQF_DONTPREP) {
rq               1099 drivers/scsi/scsi_lib.c 		scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
rq               1100 drivers/scsi/scsi_lib.c 		rq->rq_flags &= ~RQF_DONTPREP;
rq               1138 drivers/scsi/scsi_lib.c 	struct request *rq = blk_mq_rq_from_pdu(cmd);
rq               1143 drivers/scsi/scsi_lib.c 	if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
rq               1145 drivers/scsi/scsi_lib.c 		scsi_initialize_rq(rq);
rq               1452 drivers/scsi/scsi_lib.c static void scsi_softirq_done(struct request *rq)
rq               1454 drivers/scsi/scsi_lib.c 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
rq               1455 drivers/scsi/scsi_lib.c 	unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
rq               1647 drivers/scsi/scsi_lib.c 	struct request *req = bd->rq;
rq               1739 drivers/scsi/scsi_lib.c static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
rq               1744 drivers/scsi/scsi_lib.c 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
rq               1764 drivers/scsi/scsi_lib.c static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
rq               1767 drivers/scsi/scsi_lib.c 	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
rq               1300 drivers/scsi/scsi_sysfs.c 	struct request_queue *rq = sdev->request_queue;
rq               1338 drivers/scsi/scsi_sysfs.c 	error = bsg_scsi_register_queue(rq, &sdev->sdev_gendev);
rq                827 drivers/scsi/sd.c 	struct request *rq = cmd->request;
rq                828 drivers/scsi/sd.c 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
rq                829 drivers/scsi/sd.c 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
rq                833 drivers/scsi/sd.c 	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
rq                834 drivers/scsi/sd.c 	if (!rq->special_vec.bv_page)
rq                836 drivers/scsi/sd.c 	clear_highpage(rq->special_vec.bv_page);
rq                837 drivers/scsi/sd.c 	rq->special_vec.bv_offset = 0;
rq                838 drivers/scsi/sd.c 	rq->special_vec.bv_len = data_len;
rq                839 drivers/scsi/sd.c 	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
rq                845 drivers/scsi/sd.c 	buf = page_address(rq->special_vec.bv_page);
rq                853 drivers/scsi/sd.c 	rq->timeout = SD_TIMEOUT;
rq                862 drivers/scsi/sd.c 	struct request *rq = cmd->request;
rq                863 drivers/scsi/sd.c 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
rq                864 drivers/scsi/sd.c 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
rq                867 drivers/scsi/sd.c 	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
rq                868 drivers/scsi/sd.c 	if (!rq->special_vec.bv_page)
rq                870 drivers/scsi/sd.c 	clear_highpage(rq->special_vec.bv_page);
rq                871 drivers/scsi/sd.c 	rq->special_vec.bv_offset = 0;
rq                872 drivers/scsi/sd.c 	rq->special_vec.bv_len = data_len;
rq                873 drivers/scsi/sd.c 	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
rq                884 drivers/scsi/sd.c 	rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
rq                893 drivers/scsi/sd.c 	struct request *rq = cmd->request;
rq                894 drivers/scsi/sd.c 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
rq                895 drivers/scsi/sd.c 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
rq                898 drivers/scsi/sd.c 	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
rq                899 drivers/scsi/sd.c 	if (!rq->special_vec.bv_page)
rq                901 drivers/scsi/sd.c 	clear_highpage(rq->special_vec.bv_page);
rq                902 drivers/scsi/sd.c 	rq->special_vec.bv_offset = 0;
rq                903 drivers/scsi/sd.c 	rq->special_vec.bv_len = data_len;
rq                904 drivers/scsi/sd.c 	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
rq                915 drivers/scsi/sd.c 	rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
rq                922 drivers/scsi/sd.c 	struct request *rq = cmd->request;
rq                924 drivers/scsi/sd.c 	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
rq                925 drivers/scsi/sd.c 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
rq                926 drivers/scsi/sd.c 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
rq                928 drivers/scsi/sd.c 	if (!(rq->cmd_flags & REQ_NOUNMAP)) {
rq               1017 drivers/scsi/sd.c 	struct request *rq = cmd->request;
rq               1019 drivers/scsi/sd.c 	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
rq               1020 drivers/scsi/sd.c 	struct bio *bio = rq->bio;
rq               1021 drivers/scsi/sd.c 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
rq               1022 drivers/scsi/sd.c 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
rq               1030 drivers/scsi/sd.c 	rq->timeout = SD_WRITE_SAME_TIMEOUT;
rq               1057 drivers/scsi/sd.c 	rq->__data_len = sdp->sector_size;
rq               1059 drivers/scsi/sd.c 	rq->__data_len = blk_rq_bytes(rq);
rq               1066 drivers/scsi/sd.c 	struct request *rq = cmd->request;
rq               1076 drivers/scsi/sd.c 	rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
rq               1163 drivers/scsi/sd.c 	struct request *rq = cmd->request;
rq               1165 drivers/scsi/sd.c 	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
rq               1166 drivers/scsi/sd.c 	sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
rq               1168 drivers/scsi/sd.c 	unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
rq               1170 drivers/scsi/sd.c 	bool write = rq_data_dir(rq) == WRITE;
rq               1185 drivers/scsi/sd.c 	if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
rq               1190 drivers/scsi/sd.c 	if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
rq               1211 drivers/scsi/sd.c 	fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
rq               1251 drivers/scsi/sd.c 				     (unsigned long long)blk_rq_pos(rq),
rq               1252 drivers/scsi/sd.c 				     blk_rq_sectors(rq)));
rq               1257 drivers/scsi/sd.c 				     blk_rq_sectors(rq)));
rq               1268 drivers/scsi/sd.c 	struct request *rq = cmd->request;
rq               1270 drivers/scsi/sd.c 	switch (req_op(rq)) {
rq               1272 drivers/scsi/sd.c 		switch (scsi_disk(rq->rq_disk)->provisioning_mode) {
rq               1305 drivers/scsi/sd.c 	struct request *rq = SCpnt->request;
rq               1308 drivers/scsi/sd.c 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
rq               1309 drivers/scsi/sd.c 		mempool_free(rq->special_vec.bv_page, sd_page_pool);
rq               1311 drivers/scsi/sd.c 	if (SCpnt->cmnd != scsi_req(rq)->cmd) {
rq                218 drivers/scsi/sd_zbc.c 	struct request *rq = cmd->request;
rq                219 drivers/scsi/sd_zbc.c 	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
rq                220 drivers/scsi/sd_zbc.c 	sector_t sector = blk_rq_pos(rq);
rq                243 drivers/scsi/sd_zbc.c 	rq->timeout = SD_TIMEOUT;
rq                264 drivers/scsi/sd_zbc.c 	struct request *rq = cmd->request;
rq                266 drivers/scsi/sd_zbc.c 	if (req_op(rq) == REQ_OP_ZONE_RESET &&
rq                275 drivers/scsi/sd_zbc.c 		rq->rq_flags |= RQF_QUIET;
rq                130 drivers/scsi/sg.c 	struct request *rq;
rq                175 drivers/scsi/sg.c static void sg_rq_end_io(struct request *rq, blk_status_t status);
rq                823 drivers/scsi/sg.c 			scsi_req_free_cmd(scsi_req(srp->rq));
rq                824 drivers/scsi/sg.c 			blk_put_request(srp->rq);
rq                825 drivers/scsi/sg.c 			srp->rq = NULL;
rq                840 drivers/scsi/sg.c 	srp->rq->timeout = timeout;
rq                843 drivers/scsi/sg.c 			      srp->rq, at_head, sg_rq_end_io);
rq               1320 drivers/scsi/sg.c sg_rq_end_io(struct request *rq, blk_status_t status)
rq               1322 drivers/scsi/sg.c 	struct sg_request *srp = rq->end_io_data;
rq               1323 drivers/scsi/sg.c 	struct scsi_request *req = scsi_req(rq);
rq               1390 drivers/scsi/sg.c 	srp->rq = NULL;
rq               1391 drivers/scsi/sg.c 	scsi_req_free_cmd(scsi_req(rq));
rq               1392 drivers/scsi/sg.c 	blk_put_request(rq);
rq               1705 drivers/scsi/sg.c 	struct request *rq;
rq               1740 drivers/scsi/sg.c 	rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
rq               1742 drivers/scsi/sg.c 	if (IS_ERR(rq)) {
rq               1744 drivers/scsi/sg.c 		return PTR_ERR(rq);
rq               1746 drivers/scsi/sg.c 	req = scsi_req(rq);
rq               1753 drivers/scsi/sg.c 	srp->rq = rq;
rq               1754 drivers/scsi/sg.c 	rq->end_io_data = srp;
rq               1814 drivers/scsi/sg.c 		res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
rq               1817 drivers/scsi/sg.c 		res = blk_rq_map_user(q, rq, md, hp->dxferp,
rq               1821 drivers/scsi/sg.c 		srp->bio = rq->bio;
rq               1845 drivers/scsi/sg.c 	if (srp->rq) {
rq               1846 drivers/scsi/sg.c 		scsi_req_free_cmd(scsi_req(srp->rq));
rq               1847 drivers/scsi/sg.c 		blk_put_request(srp->rq);
rq                392 drivers/scsi/sr.c 	struct request *rq = SCpnt->request;
rq                398 drivers/scsi/sr.c 	cd = scsi_cd(rq->rq_disk);
rq                409 drivers/scsi/sr.c 			"Finishing %u sectors\n", blk_rq_sectors(rq)));
rq                441 drivers/scsi/sr.c 	switch (req_op(rq)) {
rq                452 drivers/scsi/sr.c 		blk_dump_rq_flags(rq, "Unknown sr command");
rq                475 drivers/scsi/sr.c 	if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) ||
rq                486 drivers/scsi/sr.c 					(rq_data_dir(rq) == WRITE) ?
rq                488 drivers/scsi/sr.c 					this_count, blk_rq_sectors(rq)));
rq                491 drivers/scsi/sr.c 	block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
rq                519 drivers/scsi/st.c 	struct scsi_request *rq = scsi_req(req);
rq                523 drivers/scsi/st.c 	STp->buffer->cmdstat.midlevel_result = SRpnt->result = rq->result;
rq                524 drivers/scsi/st.c 	STp->buffer->cmdstat.residual = rq->resid_len;
rq                529 drivers/scsi/st.c 	if (rq->sense_len)
rq                530 drivers/scsi/st.c 		memcpy(SRpnt->sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
rq                543 drivers/scsi/st.c 	struct scsi_request *rq;
rq                553 drivers/scsi/st.c 	rq = scsi_req(req);
rq                579 drivers/scsi/st.c 	rq->cmd_len = COMMAND_SIZE(cmd[0]);
rq                580 drivers/scsi/st.c 	memset(rq->cmd, 0, BLK_MAX_CDB);
rq                581 drivers/scsi/st.c 	memcpy(rq->cmd, cmd, rq->cmd_len);
rq                583 drivers/scsi/st.c 	rq->retries = retries;
rq                305 drivers/scsi/ufs/ufshcd.c 	struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
rq                307 drivers/scsi/ufs/ufshcd.c 	trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
rq                313 drivers/scsi/ufs/ufshcd.c 	struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
rq                315 drivers/scsi/ufs/ufshcd.c 	trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
rq                511 drivers/scsi/virtio_scsi.c 	struct request *rq = sc->request;
rq                516 drivers/scsi/virtio_scsi.c 	if (!rq || !scsi_prot_sg_count(sc))
rq                519 drivers/scsi/virtio_scsi.c 	bi = blk_get_integrity(rq->rq_disk);
rq                524 drivers/scsi/virtio_scsi.c 							blk_rq_sectors(rq)));
rq                528 drivers/scsi/virtio_scsi.c 							blk_rq_sectors(rq)));
rq                 54 drivers/staging/ks7010/ks_wlan_net.c static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
rq               2454 drivers/staging/ks7010/ks_wlan_net.c static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
rq               2458 drivers/staging/ks7010/ks_wlan_net.c 	struct iwreq *wrq = (struct iwreq *)rq;
rq                 56 drivers/staging/octeon/ethernet-mdio.c int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                 64 drivers/staging/octeon/ethernet-mdio.c 	return phy_mii_ioctl(dev->phydev, rq, cmd);
rq                 27 drivers/staging/octeon/ethernet-mdio.h int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq                 24 drivers/staging/rtl8188eu/include/osdep_intf.h int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               3045 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               3047 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 	struct iwreq *wrq = (struct iwreq *)rq;
rq               3060 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c 		ret = rtw_android_priv_cmd(dev, rq, cmd);
rq               3522 drivers/staging/rtl8192u/r8192U_core.c static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               3525 drivers/staging/rtl8192u/r8192U_core.c 	struct iwreq *wrq = (struct iwreq *)rq;
rq                 30 drivers/staging/rtl8712/osdep_intf.h int r871x_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               2184 drivers/staging/rtl8712/rtl871x_ioctl_linux.c int r871x_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               2186 drivers/staging/rtl8712/rtl871x_ioctl_linux.c 	struct iwreq *wrq = (struct iwreq *)rq;
rq                 56 drivers/staging/rtl8723bs/include/osdep_intf.h int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq               5201 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq               5203 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c 	struct iwreq *wrq = (struct iwreq *)rq;
rq               1734 drivers/tty/ipwireless/hardware.c 	struct ipw_rx_packet *rp, *rq;
rq               1750 drivers/tty/ipwireless/hardware.c 	list_for_each_entry_safe(rp, rq, &hw->rx_queue, queue) {
rq               1755 drivers/tty/ipwireless/hardware.c 	list_for_each_entry_safe(rp, rq, &hw->rx_pool, queue) {
rq                 79 drivers/usb/misc/uss720.c 	struct uss720_async_request *rq = container_of(kref, struct uss720_async_request, ref_count);
rq                 80 drivers/usb/misc/uss720.c 	struct parport_uss720_private *priv = rq->priv;
rq                 83 drivers/usb/misc/uss720.c 	if (likely(rq->urb))
rq                 84 drivers/usb/misc/uss720.c 		usb_free_urb(rq->urb);
rq                 85 drivers/usb/misc/uss720.c 	kfree(rq->dr);
rq                 87 drivers/usb/misc/uss720.c 	list_del_init(&rq->asynclist);
rq                 89 drivers/usb/misc/uss720.c 	kfree(rq);
rq                 97 drivers/usb/misc/uss720.c 	struct uss720_async_request *rq;
rq                102 drivers/usb/misc/uss720.c 	rq = urb->context;
rq                103 drivers/usb/misc/uss720.c 	priv = rq->priv;
rq                108 drivers/usb/misc/uss720.c 	} else if (rq->dr->bRequest == 3) {
rq                109 drivers/usb/misc/uss720.c 		memcpy(priv->reg, rq->reg, sizeof(priv->reg));
rq                115 drivers/usb/misc/uss720.c 		if (rq->reg[2] & rq->reg[1] & 0x10 && pp)
rq                118 drivers/usb/misc/uss720.c 	complete(&rq->compl);
rq                119 drivers/usb/misc/uss720.c 	kref_put(&rq->ref_count, destroy_async);
rq                127 drivers/usb/misc/uss720.c 	struct uss720_async_request *rq;
rq                136 drivers/usb/misc/uss720.c 	rq = kzalloc(sizeof(struct uss720_async_request), mem_flags);
rq                137 drivers/usb/misc/uss720.c 	if (!rq)
rq                139 drivers/usb/misc/uss720.c 	kref_init(&rq->ref_count);
rq                140 drivers/usb/misc/uss720.c 	INIT_LIST_HEAD(&rq->asynclist);
rq                141 drivers/usb/misc/uss720.c 	init_completion(&rq->compl);
rq                143 drivers/usb/misc/uss720.c 	rq->priv = priv;
rq                144 drivers/usb/misc/uss720.c 	rq->urb = usb_alloc_urb(0, mem_flags);
rq                145 drivers/usb/misc/uss720.c 	if (!rq->urb) {
rq                146 drivers/usb/misc/uss720.c 		kref_put(&rq->ref_count, destroy_async);
rq                149 drivers/usb/misc/uss720.c 	rq->dr = kmalloc(sizeof(*rq->dr), mem_flags);
rq                150 drivers/usb/misc/uss720.c 	if (!rq->dr) {
rq                151 drivers/usb/misc/uss720.c 		kref_put(&rq->ref_count, destroy_async);
rq                154 drivers/usb/misc/uss720.c 	rq->dr->bRequestType = requesttype;
rq                155 drivers/usb/misc/uss720.c 	rq->dr->bRequest = request;
rq                156 drivers/usb/misc/uss720.c 	rq->dr->wValue = cpu_to_le16(value);
rq                157 drivers/usb/misc/uss720.c 	rq->dr->wIndex = cpu_to_le16(index);
rq                158 drivers/usb/misc/uss720.c 	rq->dr->wLength = cpu_to_le16((request == 3) ? sizeof(rq->reg) : 0);
rq                159 drivers/usb/misc/uss720.c 	usb_fill_control_urb(rq->urb, usbdev, (requesttype & 0x80) ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0),
rq                160 drivers/usb/misc/uss720.c 			     (unsigned char *)rq->dr,
rq                161 drivers/usb/misc/uss720.c 			     (request == 3) ? rq->reg : NULL, (request == 3) ? sizeof(rq->reg) : 0, async_complete, rq);
rq                164 drivers/usb/misc/uss720.c 	list_add_tail(&rq->asynclist, &priv->asynclist);
rq                166 drivers/usb/misc/uss720.c 	kref_get(&rq->ref_count);
rq                167 drivers/usb/misc/uss720.c 	ret = usb_submit_urb(rq->urb, mem_flags);
rq                169 drivers/usb/misc/uss720.c 		return rq;
rq                170 drivers/usb/misc/uss720.c 	destroy_async(&rq->ref_count);
rq                177 drivers/usb/misc/uss720.c 	struct uss720_async_request *rq;
rq                182 drivers/usb/misc/uss720.c 	list_for_each_entry(rq, &priv->asynclist, asynclist) {
rq                183 drivers/usb/misc/uss720.c 		usb_unlink_urb(rq->urb);
rq                195 drivers/usb/misc/uss720.c 	struct uss720_async_request *rq;
rq                204 drivers/usb/misc/uss720.c 	rq = submit_async_request(priv, 3, 0xc0, ((unsigned int)reg) << 8, 0, mem_flags);
rq                205 drivers/usb/misc/uss720.c 	if (!rq) {
rq                211 drivers/usb/misc/uss720.c 		kref_put(&rq->ref_count, destroy_async);
rq                214 drivers/usb/misc/uss720.c 	if (wait_for_completion_timeout(&rq->compl, HZ)) {
rq                215 drivers/usb/misc/uss720.c 		ret = rq->urb->status;
rq                220 drivers/usb/misc/uss720.c 		kref_put(&rq->ref_count, destroy_async);
rq                231 drivers/usb/misc/uss720.c 	struct uss720_async_request *rq;
rq                236 drivers/usb/misc/uss720.c 	rq = submit_async_request(priv, 4, 0x40, (((unsigned int)reg) << 8) | val, 0, mem_flags);
rq                237 drivers/usb/misc/uss720.c 	if (!rq) {
rq                242 drivers/usb/misc/uss720.c 	kref_put(&rq->ref_count, destroy_async);
rq               1525 drivers/video/fbdev/mx3fb.c 	struct dma_chan_request *rq = arg;
rq               1532 drivers/video/fbdev/mx3fb.c 	if (!rq)
rq               1535 drivers/video/fbdev/mx3fb.c 	dev = rq->mx3fb->dev;
rq               1538 drivers/video/fbdev/mx3fb.c 	return rq->id == chan->chan_id &&
rq               1560 drivers/video/fbdev/mx3fb.c 	struct dma_chan_request rq;
rq               1590 drivers/video/fbdev/mx3fb.c 	rq.mx3fb = mx3fb;
rq               1595 drivers/video/fbdev/mx3fb.c 	rq.id = IDMAC_SDC_0;
rq               1596 drivers/video/fbdev/mx3fb.c 	chan = dma_request_channel(mask, chan_filter, &rq);
rq                343 drivers/video/fbdev/xen-fbfront.c static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
rq                134 fs/dlm/lock.c  #define modes_compat(gr, rq) \
rq                135 fs/dlm/lock.c  	__dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
rq               2647 fs/dlm/lock.c  static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
rq               2649 fs/dlm/lock.c  	if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
rq               2650 fs/dlm/lock.c  	    (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
rq               2656 fs/dlm/lock.c  	if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
rq                 56 fs/erofs/compress.h int z_erofs_decompress(struct z_erofs_decompress_req *rq,
rq                 25 fs/erofs/decompressor.c 	int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
rq                 27 fs/erofs/decompressor.c 	int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
rq                 31 fs/erofs/decompressor.c static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
rq                 35 fs/erofs/decompressor.c 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
rq                 44 fs/erofs/decompressor.c 		struct page *const page = rq->out[i];
rq                 54 fs/erofs/decompressor.c 			availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES];
rq                 81 fs/erofs/decompressor.c 		rq->out[i] = victim;
rq                 86 fs/erofs/decompressor.c static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq,
rq                 93 fs/erofs/decompressor.c 	struct page **in = rq->in;
rq                 96 fs/erofs/decompressor.c 	unsigned int inlen = rq->inputsize - pageofs_in;
rq                113 fs/erofs/decompressor.c static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
rq                120 fs/erofs/decompressor.c 	if (rq->inputsize > PAGE_SIZE)
rq                123 fs/erofs/decompressor.c 	src = kmap_atomic(*rq->in);
rq                128 fs/erofs/decompressor.c 	if (EROFS_SB(rq->sb)->feature_incompat &
rq                136 fs/erofs/decompressor.c 		if (inputmargin >= rq->inputsize) {
rq                143 fs/erofs/decompressor.c 	inlen = rq->inputsize - inputmargin;
rq                144 fs/erofs/decompressor.c 	if (rq->inplace_io) {
rq                145 fs/erofs/decompressor.c 		const uint oend = (rq->pageofs_out +
rq                146 fs/erofs/decompressor.c 				   rq->outputsize) & ~PAGE_MASK;
rq                147 fs/erofs/decompressor.c 		const uint nr = PAGE_ALIGN(rq->pageofs_out +
rq                148 fs/erofs/decompressor.c 					   rq->outputsize) >> PAGE_SHIFT;
rq                150 fs/erofs/decompressor.c 		if (rq->partial_decoding || !support_0padding ||
rq                151 fs/erofs/decompressor.c 		    rq->out[nr - 1] != rq->in[0] ||
rq                152 fs/erofs/decompressor.c 		    rq->inputsize - oend <
rq                154 fs/erofs/decompressor.c 			src = generic_copy_inplace_data(rq, src, inputmargin);
rq                161 fs/erofs/decompressor.c 					  inlen, rq->outputsize,
rq                162 fs/erofs/decompressor.c 					  rq->outputsize);
rq                164 fs/erofs/decompressor.c 		erofs_err(rq->sb, "failed to decompress, in[%u, %u] out[%u]",
rq                165 fs/erofs/decompressor.c 			  inlen, inputmargin, rq->outputsize);
rq                170 fs/erofs/decompressor.c 			       16, 1, out, rq->outputsize, true);
rq                219 fs/erofs/decompressor.c static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
rq                223 fs/erofs/decompressor.c 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
rq                224 fs/erofs/decompressor.c 	const struct z_erofs_decompressor *alg = decompressors + rq->alg;
rq                229 fs/erofs/decompressor.c 	if (nrpages_out == 1 && !rq->inplace_io) {
rq                230 fs/erofs/decompressor.c 		DBG_BUGON(!*rq->out);
rq                231 fs/erofs/decompressor.c 		dst = kmap_atomic(*rq->out);
rq                241 fs/erofs/decompressor.c 	if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
rq                246 fs/erofs/decompressor.c 		rq->inplace_io = false;
rq                247 fs/erofs/decompressor.c 		ret = alg->decompress(rq, dst);
rq                249 fs/erofs/decompressor.c 			copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
rq                250 fs/erofs/decompressor.c 					  rq->outputsize);
rq                256 fs/erofs/decompressor.c 	ret = alg->prepare_destpages(rq, pagepool);
rq                260 fs/erofs/decompressor.c 		dst = page_address(*rq->out);
rq                267 fs/erofs/decompressor.c 		dst = vm_map_ram(rq->out, nrpages_out, -1, PAGE_KERNEL);
rq                281 fs/erofs/decompressor.c 	ret = alg->decompress(rq, dst + rq->pageofs_out);
rq                290 fs/erofs/decompressor.c static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
rq                294 fs/erofs/decompressor.c 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
rq                295 fs/erofs/decompressor.c 	const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
rq                303 fs/erofs/decompressor.c 	if (rq->out[0] == *rq->in) {
rq                308 fs/erofs/decompressor.c 	src = kmap_atomic(*rq->in);
rq                309 fs/erofs/decompressor.c 	if (rq->out[0]) {
rq                310 fs/erofs/decompressor.c 		dst = kmap_atomic(rq->out[0]);
rq                311 fs/erofs/decompressor.c 		memcpy(dst + rq->pageofs_out, src, righthalf);
rq                316 fs/erofs/decompressor.c 		DBG_BUGON(!rq->out[1]);
rq                317 fs/erofs/decompressor.c 		if (rq->out[1] == *rq->in) {
rq                318 fs/erofs/decompressor.c 			memmove(src, src + righthalf, rq->pageofs_out);
rq                320 fs/erofs/decompressor.c 			dst = kmap_atomic(rq->out[1]);
rq                321 fs/erofs/decompressor.c 			memcpy(dst, src + righthalf, rq->pageofs_out);
rq                329 fs/erofs/decompressor.c int z_erofs_decompress(struct z_erofs_decompress_req *rq,
rq                332 fs/erofs/decompressor.c 	if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
rq                333 fs/erofs/decompressor.c 		return z_erofs_shifted_transform(rq, pagepool);
rq                334 fs/erofs/decompressor.c 	return z_erofs_decompress_generic(rq, pagepool);
rq                218 fs/nfsd/blocklayout.c 	struct request *rq;
rq                239 fs/nfsd/blocklayout.c 	rq = blk_get_request(q, REQ_OP_SCSI_IN, 0);
rq                240 fs/nfsd/blocklayout.c 	if (IS_ERR(rq)) {
rq                244 fs/nfsd/blocklayout.c 	req = scsi_req(rq);
rq                246 fs/nfsd/blocklayout.c 	error = blk_rq_map_kern(q, rq, buf, bufflen, GFP_KERNEL);
rq                257 fs/nfsd/blocklayout.c 	blk_execute_rq(rq->q, NULL, rq, 1);
rq                268 fs/nfsd/blocklayout.c 			blk_put_request(rq);
rq                311 fs/nfsd/blocklayout.c 	blk_put_request(rq);
rq                123 fs/nfsd/nfsd.h static inline int nfsd_v4client(struct svc_rqst *rq)
rq                125 fs/nfsd/nfsd.h 	return rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
rq                 81 include/drm/gpu_scheduler.h 	struct drm_sched_rq		*rq;
rq                306 include/drm/gpu_scheduler.h void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
rq                308 include/drm/gpu_scheduler.h void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
rq                119 include/linux/blk-mq.h 	struct request *rq;
rq                354 include/linux/blk-mq.h static inline void *blk_mq_rq_to_pdu(struct request *rq)
rq                368 include/linux/blk-mq.h 		struct request *rq)
rq                370 include/linux/blk-mq.h 	if (rq->tag != -1)
rq                371 include/linux/blk-mq.h 		return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
rq                373 include/linux/blk-mq.h 	return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
rq                377 include/linux/blk-mq.h static inline void blk_mq_cleanup_rq(struct request *rq)
rq                379 include/linux/blk-mq.h 	if (rq->q->mq_ops->cleanup_rq)
rq                380 include/linux/blk-mq.h 		rq->q->mq_ops->cleanup_rq(rq);
rq                259 include/linux/blkdev.h static inline bool blk_rq_is_scsi(struct request *rq)
rq                261 include/linux/blkdev.h 	return blk_op_is_scsi(req_op(rq));
rq                264 include/linux/blkdev.h static inline bool blk_rq_is_private(struct request *rq)
rq                266 include/linux/blkdev.h 	return blk_op_is_private(req_op(rq));
rq                269 include/linux/blkdev.h static inline bool blk_rq_is_passthrough(struct request *rq)
rq                271 include/linux/blkdev.h 	return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
rq                657 include/linux/blkdev.h #define blk_noretry_request(rq) \
rq                658 include/linux/blkdev.h 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
rq                668 include/linux/blkdev.h static inline bool blk_account_rq(struct request *rq)
rq                670 include/linux/blkdev.h 	return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
rq                675 include/linux/blkdev.h #define rq_data_dir(rq)		(op_is_write(req_op(rq)) ? WRITE : READ)
rq                677 include/linux/blkdev.h #define rq_dma_dir(rq) \
rq                678 include/linux/blkdev.h 	(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
rq                739 include/linux/blkdev.h static inline bool rq_is_sync(struct request *rq)
rq                741 include/linux/blkdev.h 	return op_is_sync(rq->cmd_flags);
rq                744 include/linux/blkdev.h static inline bool rq_mergeable(struct request *rq)
rq                746 include/linux/blkdev.h 	if (blk_rq_is_passthrough(rq))
rq                749 include/linux/blkdev.h 	if (req_op(rq) == REQ_OP_FLUSH)
rq                752 include/linux/blkdev.h 	if (req_op(rq) == REQ_OP_WRITE_ZEROES)
rq                755 include/linux/blkdev.h 	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
rq                757 include/linux/blkdev.h 	if (rq->rq_flags & RQF_NOMERGE_FLAGS)
rq                821 include/linux/blkdev.h #define __rq_for_each_bio(_bio, rq)	\
rq                822 include/linux/blkdev.h 	if ((rq->bio))			\
rq                823 include/linux/blkdev.h 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
rq                841 include/linux/blkdev.h extern void rq_flush_dcache_pages(struct request *rq);
rq                843 include/linux/blkdev.h static inline void rq_flush_dcache_pages(struct request *rq)
rq                852 include/linux/blkdev.h extern void blk_rq_init(struct request_queue *q, struct request *rq);
rq                857 include/linux/blkdev.h extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
rq                861 include/linux/blkdev.h extern void blk_rq_unprep_clone(struct request *rq);
rq                863 include/linux/blkdev.h 				     struct request *rq);
rq                864 include/linux/blkdev.h extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
rq                925 include/linux/blkdev.h static inline sector_t blk_rq_pos(const struct request *rq)
rq                927 include/linux/blkdev.h 	return rq->__sector;
rq                930 include/linux/blkdev.h static inline unsigned int blk_rq_bytes(const struct request *rq)
rq                932 include/linux/blkdev.h 	return rq->__data_len;
rq                935 include/linux/blkdev.h static inline int blk_rq_cur_bytes(const struct request *rq)
rq                937 include/linux/blkdev.h 	return rq->bio ? bio_cur_bytes(rq->bio) : 0;
rq                940 include/linux/blkdev.h extern unsigned int blk_rq_err_bytes(const struct request *rq);
rq                942 include/linux/blkdev.h static inline unsigned int blk_rq_sectors(const struct request *rq)
rq                944 include/linux/blkdev.h 	return blk_rq_bytes(rq) >> SECTOR_SHIFT;
rq                947 include/linux/blkdev.h static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
rq                949 include/linux/blkdev.h 	return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
rq                952 include/linux/blkdev.h static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
rq                954 include/linux/blkdev.h 	return rq->stats_sectors;
rq                958 include/linux/blkdev.h static inline unsigned int blk_rq_zone_no(struct request *rq)
rq                960 include/linux/blkdev.h 	return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
rq                963 include/linux/blkdev.h static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
rq                965 include/linux/blkdev.h 	return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
rq                975 include/linux/blkdev.h static inline unsigned int blk_rq_payload_bytes(struct request *rq)
rq                977 include/linux/blkdev.h 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
rq                978 include/linux/blkdev.h 		return rq->special_vec.bv_len;
rq                979 include/linux/blkdev.h 	return blk_rq_bytes(rq);
rq                986 include/linux/blkdev.h static inline struct bio_vec req_bvec(struct request *rq)
rq                988 include/linux/blkdev.h 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
rq                989 include/linux/blkdev.h 		return rq->special_vec;
rq                990 include/linux/blkdev.h 	return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
rq               1023 include/linux/blkdev.h static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
rq               1026 include/linux/blkdev.h 	struct request_queue *q = rq->q;
rq               1028 include/linux/blkdev.h 	if (blk_rq_is_passthrough(rq))
rq               1032 include/linux/blkdev.h 	    req_op(rq) == REQ_OP_DISCARD ||
rq               1033 include/linux/blkdev.h 	    req_op(rq) == REQ_OP_SECURE_ERASE)
rq               1034 include/linux/blkdev.h 		return blk_queue_get_max_sectors(q, req_op(rq));
rq               1037 include/linux/blkdev.h 			blk_queue_get_max_sectors(q, req_op(rq)));
rq               1040 include/linux/blkdev.h static inline unsigned int blk_rq_count_bios(struct request *rq)
rq               1045 include/linux/blkdev.h 	__rq_for_each_bio(bio, rq)
rq               1051 include/linux/blkdev.h void blk_steal_bios(struct bio_list *list, struct request *rq);
rq               1059 include/linux/blkdev.h extern bool blk_update_request(struct request *rq, blk_status_t error,
rq               1125 include/linux/blkdev.h static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
rq               1127 include/linux/blkdev.h 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
rq               1129 include/linux/blkdev.h 	return rq->nr_phys_segments;
rq               1136 include/linux/blkdev.h static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
rq               1138 include/linux/blkdev.h 	return max_t(unsigned short, rq->nr_phys_segments, 1);
rq               1565 include/linux/blkdev.h static inline bool blk_integrity_rq(struct request *rq)
rq               1567 include/linux/blkdev.h 	return rq->cmd_flags & REQ_INTEGRITY;
rq               1608 include/linux/blkdev.h static inline struct bio_vec *rq_integrity_vec(struct request *rq)
rq               1610 include/linux/blkdev.h 	if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
rq               1612 include/linux/blkdev.h 	return rq->bio->bi_integrity->bip_vec;
rq               1622 include/linux/blkdev.h static inline int blk_integrity_rq(struct request *rq)
rq               1664 include/linux/blkdev.h static inline bool blk_integrity_merge_rq(struct request_queue *rq,
rq               1670 include/linux/blkdev.h static inline bool blk_integrity_merge_bio(struct request_queue *rq,
rq               1689 include/linux/blkdev.h static inline struct bio_vec *rq_integrity_vec(struct request *rq)
rq               1724 include/linux/blkdev.h bool blk_req_needs_zone_write_lock(struct request *rq);
rq               1725 include/linux/blkdev.h void __blk_req_zone_write_lock(struct request *rq);
rq               1726 include/linux/blkdev.h void __blk_req_zone_write_unlock(struct request *rq);
rq               1728 include/linux/blkdev.h static inline void blk_req_zone_write_lock(struct request *rq)
rq               1730 include/linux/blkdev.h 	if (blk_req_needs_zone_write_lock(rq))
rq               1731 include/linux/blkdev.h 		__blk_req_zone_write_lock(rq);
rq               1734 include/linux/blkdev.h static inline void blk_req_zone_write_unlock(struct request *rq)
rq               1736 include/linux/blkdev.h 	if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
rq               1737 include/linux/blkdev.h 		__blk_req_zone_write_unlock(rq);
rq               1740 include/linux/blkdev.h static inline bool blk_req_zone_is_write_locked(struct request *rq)
rq               1742 include/linux/blkdev.h 	return rq->q->seq_zones_wlock &&
rq               1743 include/linux/blkdev.h 		test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
rq               1746 include/linux/blkdev.h static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
rq               1748 include/linux/blkdev.h 	if (!blk_req_needs_zone_write_lock(rq))
rq               1750 include/linux/blkdev.h 	return !blk_req_zone_is_write_locked(rq);
rq               1753 include/linux/blkdev.h static inline bool blk_req_needs_zone_write_lock(struct request *rq)
rq               1758 include/linux/blkdev.h static inline void blk_req_zone_write_lock(struct request *rq)
rq               1762 include/linux/blkdev.h static inline void blk_req_zone_write_unlock(struct request *rq)
rq               1765 include/linux/blkdev.h static inline bool blk_req_zone_is_write_locked(struct request *rq)
rq               1770 include/linux/blkdev.h static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
rq                 78 include/linux/blktrace_api.h extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
rq                 93 include/linux/blktrace_api.h # define blk_add_driver_data(q, rq, data, len)		do {} while (0)
rq                125 include/linux/blktrace_api.h static inline sector_t blk_rq_trace_sector(struct request *rq)
rq                131 include/linux/blktrace_api.h 	if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
rq                133 include/linux/blktrace_api.h 	return blk_rq_pos(rq);
rq                136 include/linux/blktrace_api.h static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
rq                138 include/linux/blktrace_api.h 	return blk_rq_is_passthrough(rq) ? 0 : blk_rq_sectors(rq);
rq                 12 include/linux/bsg.h 	int	(*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr,
rq                 14 include/linux/bsg.h 	int	(*complete_rq)(struct request *rq, struct sg_io_v4 *hdr);
rq                 15 include/linux/bsg.h 	void	(*free_rq)(struct request *rq);
rq                 62 include/linux/device-mapper.h 					    struct request *rq,
rq                427 include/linux/device-mapper.h union map_info *dm_get_rq_mapinfo(struct request *rq);
rq                 93 include/linux/elevator.h void elv_rqhash_del(struct request_queue *q, struct request *rq);
rq                 94 include/linux/elevator.h void elv_rqhash_add(struct request_queue *q, struct request *rq);
rq                 95 include/linux/elevator.h void elv_rqhash_reposition(struct request_queue *q, struct request *rq);
rq                163 include/linux/elevator.h #define rq_end_sector(rq)	(blk_rq_pos(rq) + blk_rq_sectors(rq))
rq                167 include/linux/elevator.h #define rq_fifo_clear(rq)	list_del_init(&(rq)->queuelist)
rq                194 include/linux/fsl/ptp_qoriq.h 		     struct ptp_clock_request *rq, int on);
rq                 56 include/linux/ide.h static inline struct ide_request *ide_req(struct request *rq)
rq                 58 include/linux/ide.h 	return blk_mq_rq_to_pdu(rq);
rq                 61 include/linux/ide.h static inline bool ata_misc_request(struct request *rq)
rq                 63 include/linux/ide.h 	return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC;
rq                 66 include/linux/ide.h static inline bool ata_taskfile_request(struct request *rq)
rq                 68 include/linux/ide.h 	return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE;
rq                 71 include/linux/ide.h static inline bool ata_pc_request(struct request *rq)
rq                 73 include/linux/ide.h 	return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC;
rq                 76 include/linux/ide.h static inline bool ata_sense_request(struct request *rq)
rq                 78 include/linux/ide.h 	return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE;
rq                 81 include/linux/ide.h static inline bool ata_pm_request(struct request *rq)
rq                 83 include/linux/ide.h 	return blk_rq_is_private(rq) &&
rq                 84 include/linux/ide.h 		(ide_req(rq)->type == ATA_PRIV_PM_SUSPEND ||
rq                 85 include/linux/ide.h 		 ide_req(rq)->type == ATA_PRIV_PM_RESUME);
rq                354 include/linux/ide.h 	struct request		*rq;		/* copy of request */
rq                382 include/linux/ide.h 	struct request *rq;
rq                537 include/linux/ide.h 	struct request		*rq;	/* current request */
rq                808 include/linux/ide.h 	struct request *rq;
rq               1189 include/linux/ide.h void ide_prep_sense(ide_drive_t *drive, struct request *rq);
rq               1224 include/linux/ide.h extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
rq                 51 include/linux/mii.h static inline struct mii_ioctl_data *if_mii(struct ifreq *rq)
rq                 53 include/linux/mii.h 	return (struct mii_ioctl_data *) &rq->ifr_ifru;
rq                591 include/linux/mlx5/qp.h 				struct mlx5_core_qp *rq);
rq                593 include/linux/mlx5/qp.h 				  struct mlx5_core_qp *rq);
rq                 32 include/linux/mtd/blktrans.h 	struct request_queue *rq;
rq               1182 include/linux/pci.h int pcie_set_readrq(struct pci_dev *dev, int rq);
rq                 55 include/linux/sched.h struct rq;
rq               1992 include/linux/sched.h const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
rq               1993 include/linux/sched.h const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
rq               1994 include/linux/sched.h const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
rq               1996 include/linux/sched.h int sched_trace_rq_cpu(struct rq *rq);
rq                 18 include/linux/sched/nohz.h void calc_load_nohz_remote(struct rq *rq);
rq                 22 include/linux/sched/nohz.h static inline void calc_load_nohz_remote(struct rq *rq) { }
rq                132 include/linux/sunrpc/svcauth.h 	int	(*accept)(struct svc_rqst *rq, __be32 *authp);
rq                133 include/linux/sunrpc/svcauth.h 	int	(*release)(struct svc_rqst *rq);
rq                135 include/linux/sunrpc/svcauth.h 	int	(*set_client)(struct svc_rqst *rq);
rq                 40 include/linux/t10-pi.h static inline u32 t10_pi_ref_tag(struct request *rq)
rq                 42 include/linux/t10-pi.h 	unsigned int shift = ilog2(queue_logical_block_size(rq->q));
rq                 45 include/linux/t10-pi.h 	if (rq->q->integrity.interval_exp)
rq                 46 include/linux/t10-pi.h 		shift = rq->q->integrity.interval_exp;
rq                 48 include/linux/t10-pi.h 	return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
rq                125 include/net/xdp_sock.h void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
rq                173 include/net/xdp_sock.h 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
rq                175 include/net/xdp_sock.h 	if (rq->length >= cnt)
rq                178 include/net/xdp_sock.h 	return xsk_umem_has_addrs(umem, cnt - rq->length);
rq                183 include/net/xdp_sock.h 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
rq                185 include/net/xdp_sock.h 	if (!rq->length)
rq                188 include/net/xdp_sock.h 	*addr = rq->handles[rq->length - 1];
rq                194 include/net/xdp_sock.h 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
rq                196 include/net/xdp_sock.h 	if (!rq->length)
rq                199 include/net/xdp_sock.h 		rq->length--;
rq                204 include/net/xdp_sock.h 	struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
rq                206 include/net/xdp_sock.h 	rq->handles[rq->length++] = addr;
rq                280 include/net/xdp_sock.h static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
rq                449 include/rdma/rdmavt_qp.h 	struct rvt_rq rq;
rq                540 include/rdma/rdmavt_qp.h static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
rq                543 include/rdma/rdmavt_qp.h 		((char *)rq->kwq->curr_wq +
rq                545 include/rdma/rdmavt_qp.h 		  rq->max_sge * sizeof(struct ib_sge)) * n);
rq                968 include/rdma/rdmavt_qp.h static inline void rvt_free_rq(struct rvt_rq *rq)
rq                970 include/rdma/rdmavt_qp.h 	kvfree(rq->kwq);
rq                971 include/rdma/rdmavt_qp.h 	rq->kwq = NULL;
rq                972 include/rdma/rdmavt_qp.h 	vfree(rq->wq);
rq                973 include/rdma/rdmavt_qp.h 	rq->wq = NULL;
rq                 20 include/scsi/scsi_request.h static inline struct scsi_request *scsi_req(struct request *rq)
rq                 22 include/scsi/scsi_request.h 	return blk_mq_rq_to_pdu(rq);
rq                 76 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct request *rq),
rq                 78 include/trace/events/block.h 	TP_ARGS(q, rq),
rq                 89 include/trace/events/block.h 		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
rq                 90 include/trace/events/block.h 		__entry->sector    = blk_rq_trace_sector(rq);
rq                 91 include/trace/events/block.h 		__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
rq                 93 include/trace/events/block.h 		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
rq                118 include/trace/events/block.h 	TP_PROTO(struct request *rq, int error, unsigned int nr_bytes),
rq                120 include/trace/events/block.h 	TP_ARGS(rq, error, nr_bytes),
rq                132 include/trace/events/block.h 		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
rq                133 include/trace/events/block.h 		__entry->sector    = blk_rq_pos(rq);
rq                137 include/trace/events/block.h 		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
rq                150 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct request *rq),
rq                152 include/trace/events/block.h 	TP_ARGS(q, rq),
rq                165 include/trace/events/block.h 		__entry->dev	   = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
rq                166 include/trace/events/block.h 		__entry->sector    = blk_rq_trace_sector(rq);
rq                167 include/trace/events/block.h 		__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
rq                168 include/trace/events/block.h 		__entry->bytes     = blk_rq_bytes(rq);
rq                170 include/trace/events/block.h 		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
rq                194 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct request *rq),
rq                196 include/trace/events/block.h 	TP_ARGS(q, rq)
rq                209 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct request *rq),
rq                211 include/trace/events/block.h 	TP_ARGS(q, rq)
rq                292 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
rq                294 include/trace/events/block.h 	TP_ARGS(q, rq, bio),
rq                329 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
rq                331 include/trace/events/block.h 	TP_ARGS(q, rq, bio)
rq                345 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
rq                347 include/trace/events/block.h 	TP_ARGS(q, rq, bio)
rq                605 include/trace/events/block.h 	TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
rq                608 include/trace/events/block.h 	TP_ARGS(q, rq, dev, from),
rq                621 include/trace/events/block.h 		__entry->dev		= disk_devt(rq->rq_disk);
rq                622 include/trace/events/block.h 		__entry->sector		= blk_rq_pos(rq);
rq                623 include/trace/events/block.h 		__entry->nr_sector	= blk_rq_sectors(rq);
rq                626 include/trace/events/block.h 		__entry->nr_bios	= blk_rq_count_bios(rq);
rq                627 include/trace/events/block.h 		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
rq                 64 include/trace/events/nbd.h 		 struct request *rq),
rq                 66 include/trace/events/nbd.h 	TP_ARGS(nbd_request, index, rq),
rq                 77 include/trace/events/nbd.h 		__entry->request = rq;
rq                 97 include/trace/events/nbd.h 		 struct request *rq),
rq                 99 include/trace/events/nbd.h 	TP_ARGS(nbd_request, index, rq),
rq                609 include/trace/events/sched.h 	TP_PROTO(struct rq *rq),
rq                610 include/trace/events/sched.h 	TP_ARGS(rq));
rq                613 include/trace/events/sched.h 	TP_PROTO(struct rq *rq),
rq                614 include/trace/events/sched.h 	TP_ARGS(rq));
rq                617 include/trace/events/sched.h 	TP_PROTO(struct rq *rq),
rq                618 include/trace/events/sched.h 	TP_ARGS(rq));
rq                284 kernel/livepatch/transition.c 	struct rq *rq;
rq                307 kernel/livepatch/transition.c 	rq = task_rq_lock(task, &flags);
rq                309 kernel/livepatch/transition.c 	if (task_running(rq, task) && task != current) {
rq                326 kernel/livepatch/transition.c 	task_rq_unlock(rq, task, &flags);
rq                 37 kernel/sched/core.c DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
rq                 78 kernel/sched/core.c struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
rq                 79 kernel/sched/core.c 	__acquires(rq->lock)
rq                 81 kernel/sched/core.c 	struct rq *rq;
rq                 86 kernel/sched/core.c 		rq = task_rq(p);
rq                 87 kernel/sched/core.c 		raw_spin_lock(&rq->lock);
rq                 88 kernel/sched/core.c 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
rq                 89 kernel/sched/core.c 			rq_pin_lock(rq, rf);
rq                 90 kernel/sched/core.c 			return rq;
rq                 92 kernel/sched/core.c 		raw_spin_unlock(&rq->lock);
rq                102 kernel/sched/core.c struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
rq                104 kernel/sched/core.c 	__acquires(rq->lock)
rq                106 kernel/sched/core.c 	struct rq *rq;
rq                110 kernel/sched/core.c 		rq = task_rq(p);
rq                111 kernel/sched/core.c 		raw_spin_lock(&rq->lock);
rq                129 kernel/sched/core.c 		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
rq                130 kernel/sched/core.c 			rq_pin_lock(rq, rf);
rq                131 kernel/sched/core.c 			return rq;
rq                133 kernel/sched/core.c 		raw_spin_unlock(&rq->lock);
rq                145 kernel/sched/core.c static void update_rq_clock_task(struct rq *rq, s64 delta)
rq                154 kernel/sched/core.c 	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
rq                174 kernel/sched/core.c 	rq->prev_irq_time += irq_delta;
rq                179 kernel/sched/core.c 		steal = paravirt_steal_clock(cpu_of(rq));
rq                180 kernel/sched/core.c 		steal -= rq->prev_steal_time_rq;
rq                185 kernel/sched/core.c 		rq->prev_steal_time_rq += steal;
rq                190 kernel/sched/core.c 	rq->clock_task += delta;
rq                194 kernel/sched/core.c 		update_irq_load_avg(rq, irq_delta + steal);
rq                196 kernel/sched/core.c 	update_rq_clock_pelt(rq, delta);
rq                199 kernel/sched/core.c void update_rq_clock(struct rq *rq)
rq                203 kernel/sched/core.c 	lockdep_assert_held(&rq->lock);
rq                205 kernel/sched/core.c 	if (rq->clock_update_flags & RQCF_ACT_SKIP)
rq                210 kernel/sched/core.c 		SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
rq                211 kernel/sched/core.c 	rq->clock_update_flags |= RQCF_UPDATED;
rq                214 kernel/sched/core.c 	delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
rq                217 kernel/sched/core.c 	rq->clock += delta;
rq                218 kernel/sched/core.c 	update_rq_clock_task(rq, delta);
rq                227 kernel/sched/core.c static void hrtick_clear(struct rq *rq)
rq                229 kernel/sched/core.c 	if (hrtimer_active(&rq->hrtick_timer))
rq                230 kernel/sched/core.c 		hrtimer_cancel(&rq->hrtick_timer);
rq                239 kernel/sched/core.c 	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
rq                242 kernel/sched/core.c 	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
rq                244 kernel/sched/core.c 	rq_lock(rq, &rf);
rq                245 kernel/sched/core.c 	update_rq_clock(rq);
rq                246 kernel/sched/core.c 	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
rq                247 kernel/sched/core.c 	rq_unlock(rq, &rf);
rq                254 kernel/sched/core.c static void __hrtick_restart(struct rq *rq)
rq                256 kernel/sched/core.c 	struct hrtimer *timer = &rq->hrtick_timer;
rq                266 kernel/sched/core.c 	struct rq *rq = arg;
rq                269 kernel/sched/core.c 	rq_lock(rq, &rf);
rq                270 kernel/sched/core.c 	__hrtick_restart(rq);
rq                271 kernel/sched/core.c 	rq->hrtick_csd_pending = 0;
rq                272 kernel/sched/core.c 	rq_unlock(rq, &rf);
rq                280 kernel/sched/core.c void hrtick_start(struct rq *rq, u64 delay)
rq                282 kernel/sched/core.c 	struct hrtimer *timer = &rq->hrtick_timer;
rq                295 kernel/sched/core.c 	if (rq == this_rq()) {
rq                296 kernel/sched/core.c 		__hrtick_restart(rq);
rq                297 kernel/sched/core.c 	} else if (!rq->hrtick_csd_pending) {
rq                298 kernel/sched/core.c 		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
rq                299 kernel/sched/core.c 		rq->hrtick_csd_pending = 1;
rq                309 kernel/sched/core.c void hrtick_start(struct rq *rq, u64 delay)
rq                316 kernel/sched/core.c 	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
rq                321 kernel/sched/core.c static void hrtick_rq_init(struct rq *rq)
rq                324 kernel/sched/core.c 	rq->hrtick_csd_pending = 0;
rq                326 kernel/sched/core.c 	rq->hrtick_csd.flags = 0;
rq                327 kernel/sched/core.c 	rq->hrtick_csd.func = __hrtick_start;
rq                328 kernel/sched/core.c 	rq->hrtick_csd.info = rq;
rq                331 kernel/sched/core.c 	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
rq                332 kernel/sched/core.c 	rq->hrtick_timer.function = hrtick;
rq                335 kernel/sched/core.c static inline void hrtick_clear(struct rq *rq)
rq                339 kernel/sched/core.c static inline void hrtick_rq_init(struct rq *rq)
rq                507 kernel/sched/core.c void resched_curr(struct rq *rq)
rq                509 kernel/sched/core.c 	struct task_struct *curr = rq->curr;
rq                512 kernel/sched/core.c 	lockdep_assert_held(&rq->lock);
rq                517 kernel/sched/core.c 	cpu = cpu_of(rq);
rq                533 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
rq                536 kernel/sched/core.c 	raw_spin_lock_irqsave(&rq->lock, flags);
rq                538 kernel/sched/core.c 		resched_curr(rq);
rq                539 kernel/sched/core.c 	raw_spin_unlock_irqrestore(&rq->lock, flags);
rq                592 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
rq                597 kernel/sched/core.c 	if (set_nr_and_not_polling(rq->idle))
rq                662 kernel/sched/core.c bool sched_can_stop_tick(struct rq *rq)
rq                667 kernel/sched/core.c 	if (rq->dl.dl_nr_running)
rq                674 kernel/sched/core.c 	if (rq->rt.rr_nr_running) {
rq                675 kernel/sched/core.c 		if (rq->rt.rr_nr_running == 1)
rq                685 kernel/sched/core.c 	fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
rq                694 kernel/sched/core.c 	if (rq->nr_running > 1)
rq                829 kernel/sched/core.c uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
rq                838 kernel/sched/core.c 		rq->uclamp_flags |= UCLAMP_FLAG_IDLE;
rq                845 kernel/sched/core.c static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id,
rq                849 kernel/sched/core.c 	if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE))
rq                852 kernel/sched/core.c 	WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value);
rq                856 kernel/sched/core.c unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id,
rq                859 kernel/sched/core.c 	struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
rq                873 kernel/sched/core.c 	return uclamp_idle_value(rq, clamp_id, clamp_value);
rq                944 kernel/sched/core.c static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p,
rq                947 kernel/sched/core.c 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
rq                951 kernel/sched/core.c 	lockdep_assert_held(&rq->lock);
rq                960 kernel/sched/core.c 	uclamp_idle_reset(rq, clamp_id, uc_se->value);
rq                982 kernel/sched/core.c static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
rq                985 kernel/sched/core.c 	struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id];
rq                991 kernel/sched/core.c 	lockdep_assert_held(&rq->lock);
rq               1015 kernel/sched/core.c 		bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
rq               1020 kernel/sched/core.c static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
rq               1028 kernel/sched/core.c 		uclamp_rq_inc_id(rq, p, clamp_id);
rq               1031 kernel/sched/core.c 	if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
rq               1032 kernel/sched/core.c 		rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
rq               1035 kernel/sched/core.c static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
rq               1043 kernel/sched/core.c 		uclamp_rq_dec_id(rq, p, clamp_id);
rq               1050 kernel/sched/core.c 	struct rq *rq;
rq               1060 kernel/sched/core.c 	rq = task_rq_lock(p, &rf);
rq               1069 kernel/sched/core.c 		uclamp_rq_dec_id(rq, p, clamp_id);
rq               1070 kernel/sched/core.c 		uclamp_rq_inc_id(rq, p, clamp_id);
rq               1073 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
rq               1272 kernel/sched/core.c static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { }
rq               1273 kernel/sched/core.c static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { }
rq               1285 kernel/sched/core.c static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
rq               1288 kernel/sched/core.c 		update_rq_clock(rq);
rq               1291 kernel/sched/core.c 		sched_info_queued(rq, p);
rq               1295 kernel/sched/core.c 	uclamp_rq_inc(rq, p);
rq               1296 kernel/sched/core.c 	p->sched_class->enqueue_task(rq, p, flags);
rq               1299 kernel/sched/core.c static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
rq               1302 kernel/sched/core.c 		update_rq_clock(rq);
rq               1305 kernel/sched/core.c 		sched_info_dequeued(rq, p);
rq               1309 kernel/sched/core.c 	uclamp_rq_dec(rq, p);
rq               1310 kernel/sched/core.c 	p->sched_class->dequeue_task(rq, p, flags);
rq               1313 kernel/sched/core.c void activate_task(struct rq *rq, struct task_struct *p, int flags)
rq               1316 kernel/sched/core.c 		rq->nr_uninterruptible--;
rq               1318 kernel/sched/core.c 	enqueue_task(rq, p, flags);
rq               1323 kernel/sched/core.c void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
rq               1328 kernel/sched/core.c 		rq->nr_uninterruptible++;
rq               1330 kernel/sched/core.c 	dequeue_task(rq, p, flags);
rq               1399 kernel/sched/core.c static inline void check_class_changed(struct rq *rq, struct task_struct *p,
rq               1405 kernel/sched/core.c 			prev_class->switched_from(rq, p);
rq               1407 kernel/sched/core.c 		p->sched_class->switched_to(rq, p);
rq               1409 kernel/sched/core.c 		p->sched_class->prio_changed(rq, p, oldprio);
rq               1412 kernel/sched/core.c void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
rq               1416 kernel/sched/core.c 	if (p->sched_class == rq->curr->sched_class) {
rq               1417 kernel/sched/core.c 		rq->curr->sched_class->check_preempt_curr(rq, p, flags);
rq               1420 kernel/sched/core.c 			if (class == rq->curr->sched_class)
rq               1423 kernel/sched/core.c 				resched_curr(rq);
rq               1433 kernel/sched/core.c 	if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
rq               1434 kernel/sched/core.c 		rq_clock_skip_update(rq);
rq               1484 kernel/sched/core.c static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
rq               1487 kernel/sched/core.c 	lockdep_assert_held(&rq->lock);
rq               1490 kernel/sched/core.c 	dequeue_task(rq, p, DEQUEUE_NOCLOCK);
rq               1492 kernel/sched/core.c 	rq_unlock(rq, rf);
rq               1494 kernel/sched/core.c 	rq = cpu_rq(new_cpu);
rq               1496 kernel/sched/core.c 	rq_lock(rq, rf);
rq               1498 kernel/sched/core.c 	enqueue_task(rq, p, 0);
rq               1500 kernel/sched/core.c 	check_preempt_curr(rq, p, 0);
rq               1502 kernel/sched/core.c 	return rq;
rq               1519 kernel/sched/core.c static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
rq               1524 kernel/sched/core.c 		return rq;
rq               1526 kernel/sched/core.c 	update_rq_clock(rq);
rq               1527 kernel/sched/core.c 	rq = move_queued_task(rq, rf, p, dest_cpu);
rq               1529 kernel/sched/core.c 	return rq;
rq               1541 kernel/sched/core.c 	struct rq *rq = this_rq();
rq               1557 kernel/sched/core.c 	rq_lock(rq, &rf);
rq               1563 kernel/sched/core.c 	if (task_rq(p) == rq) {
rq               1565 kernel/sched/core.c 			rq = __migrate_task(rq, &rf, p, arg->dest_cpu);
rq               1569 kernel/sched/core.c 	rq_unlock(rq, &rf);
rq               1588 kernel/sched/core.c 	struct rq *rq = task_rq(p);
rq               1594 kernel/sched/core.c 	running = task_current(rq, p);
rq               1601 kernel/sched/core.c 		lockdep_assert_held(&rq->lock);
rq               1602 kernel/sched/core.c 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
rq               1605 kernel/sched/core.c 		put_prev_task(rq, p);
rq               1610 kernel/sched/core.c 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
rq               1612 kernel/sched/core.c 		set_next_task(rq, p);
rq               1630 kernel/sched/core.c 	struct rq *rq;
rq               1633 kernel/sched/core.c 	rq = task_rq_lock(p, &rf);
rq               1634 kernel/sched/core.c 	update_rq_clock(rq);
rq               1677 kernel/sched/core.c 	if (task_running(rq, p) || p->state == TASK_WAKING) {
rq               1680 kernel/sched/core.c 		task_rq_unlock(rq, p, &rf);
rq               1681 kernel/sched/core.c 		stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
rq               1688 kernel/sched/core.c 		rq = move_queued_task(rq, &rf, p, dest_cpu);
rq               1691 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
rq               1758 kernel/sched/core.c 		struct rq *src_rq, *dst_rq;
rq               1793 kernel/sched/core.c 	struct rq *src_rq, *dst_rq;
rq               1892 kernel/sched/core.c 	struct rq *rq;
rq               1901 kernel/sched/core.c 		rq = task_rq(p);
rq               1914 kernel/sched/core.c 		while (task_running(rq, p)) {
rq               1925 kernel/sched/core.c 		rq = task_rq_lock(p, &rf);
rq               1927 kernel/sched/core.c 		running = task_running(rq, p);
rq               1932 kernel/sched/core.c 		task_rq_unlock(rq, p, &rf);
rq               2173 kernel/sched/core.c 	struct rq *rq;
rq               2178 kernel/sched/core.c 	rq = this_rq();
rq               2181 kernel/sched/core.c 	if (cpu == rq->cpu) {
rq               2182 kernel/sched/core.c 		__schedstat_inc(rq->ttwu_local);
rq               2189 kernel/sched/core.c 		for_each_domain(rq->cpu, sd) {
rq               2202 kernel/sched/core.c 	__schedstat_inc(rq->ttwu_count);
rq               2212 kernel/sched/core.c static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
rq               2215 kernel/sched/core.c 	check_preempt_curr(rq, p, wake_flags);
rq               2225 kernel/sched/core.c 		rq_unpin_lock(rq, rf);
rq               2226 kernel/sched/core.c 		p->sched_class->task_woken(rq, p);
rq               2227 kernel/sched/core.c 		rq_repin_lock(rq, rf);
rq               2230 kernel/sched/core.c 	if (rq->idle_stamp) {
rq               2231 kernel/sched/core.c 		u64 delta = rq_clock(rq) - rq->idle_stamp;
rq               2232 kernel/sched/core.c 		u64 max = 2*rq->max_idle_balance_cost;
rq               2234 kernel/sched/core.c 		update_avg(&rq->avg_idle, delta);
rq               2236 kernel/sched/core.c 		if (rq->avg_idle > max)
rq               2237 kernel/sched/core.c 			rq->avg_idle = max;
rq               2239 kernel/sched/core.c 		rq->idle_stamp = 0;
rq               2245 kernel/sched/core.c ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
rq               2250 kernel/sched/core.c 	lockdep_assert_held(&rq->lock);
rq               2254 kernel/sched/core.c 		rq->nr_uninterruptible--;
rq               2260 kernel/sched/core.c 	activate_task(rq, p, en_flags);
rq               2261 kernel/sched/core.c 	ttwu_do_wakeup(rq, p, wake_flags, rf);
rq               2273 kernel/sched/core.c 	struct rq *rq;
rq               2276 kernel/sched/core.c 	rq = __task_rq_lock(p, &rf);
rq               2279 kernel/sched/core.c 		update_rq_clock(rq);
rq               2280 kernel/sched/core.c 		ttwu_do_wakeup(rq, p, wake_flags, &rf);
rq               2283 kernel/sched/core.c 	__task_rq_unlock(rq, &rf);
rq               2291 kernel/sched/core.c 	struct rq *rq = this_rq();
rq               2292 kernel/sched/core.c 	struct llist_node *llist = llist_del_all(&rq->wake_list);
rq               2299 kernel/sched/core.c 	rq_lock_irqsave(rq, &rf);
rq               2300 kernel/sched/core.c 	update_rq_clock(rq);
rq               2303 kernel/sched/core.c 		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
rq               2305 kernel/sched/core.c 	rq_unlock_irqrestore(rq, &rf);
rq               2348 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
rq               2353 kernel/sched/core.c 		if (!set_nr_if_polling(rq->idle))
rq               2362 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
rq               2367 kernel/sched/core.c 	if (!is_idle_task(rcu_dereference(rq->curr)))
rq               2370 kernel/sched/core.c 	if (set_nr_if_polling(rq->idle)) {
rq               2373 kernel/sched/core.c 		rq_lock_irqsave(rq, &rf);
rq               2374 kernel/sched/core.c 		if (is_idle_task(rq->curr))
rq               2377 kernel/sched/core.c 		rq_unlock_irqrestore(rq, &rf);
rq               2392 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
rq               2403 kernel/sched/core.c 	rq_lock(rq, &rf);
rq               2404 kernel/sched/core.c 	update_rq_clock(rq);
rq               2405 kernel/sched/core.c 	ttwu_do_activate(rq, p, wake_flags, &rf);
rq               2406 kernel/sched/core.c 	rq_unlock(rq, &rf);
rq               2942 kernel/sched/core.c 	struct rq *rq;
rq               2958 kernel/sched/core.c 	rq = __task_rq_lock(p, &rf);
rq               2959 kernel/sched/core.c 	update_rq_clock(rq);
rq               2962 kernel/sched/core.c 	activate_task(rq, p, ENQUEUE_NOCLOCK);
rq               2964 kernel/sched/core.c 	check_preempt_curr(rq, p, WF_FORK);
rq               2971 kernel/sched/core.c 		rq_unpin_lock(rq, &rf);
rq               2972 kernel/sched/core.c 		p->sched_class->task_woken(rq, p);
rq               2973 kernel/sched/core.c 		rq_repin_lock(rq, &rf);
rq               2976 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
rq               3095 kernel/sched/core.c prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
rq               3103 kernel/sched/core.c 	rq_unpin_lock(rq, rf);
rq               3104 kernel/sched/core.c 	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
rq               3107 kernel/sched/core.c 	rq->lock.owner = next;
rq               3111 kernel/sched/core.c static inline void finish_lock_switch(struct rq *rq)
rq               3118 kernel/sched/core.c 	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
rq               3119 kernel/sched/core.c 	raw_spin_unlock_irq(&rq->lock);
rq               3148 kernel/sched/core.c prepare_task_switch(struct rq *rq, struct task_struct *prev,
rq               3152 kernel/sched/core.c 	sched_info_switch(rq, prev, next);
rq               3179 kernel/sched/core.c static struct rq *finish_task_switch(struct task_struct *prev)
rq               3180 kernel/sched/core.c 	__releases(rq->lock)
rq               3182 kernel/sched/core.c 	struct rq *rq = this_rq();
rq               3183 kernel/sched/core.c 	struct mm_struct *mm = rq->prev_mm;
rq               3202 kernel/sched/core.c 	rq->prev_mm = NULL;
rq               3219 kernel/sched/core.c 	finish_lock_switch(rq);
rq               3257 kernel/sched/core.c 	return rq;
rq               3263 kernel/sched/core.c static void __balance_callback(struct rq *rq)
rq               3266 kernel/sched/core.c 	void (*func)(struct rq *rq);
rq               3269 kernel/sched/core.c 	raw_spin_lock_irqsave(&rq->lock, flags);
rq               3270 kernel/sched/core.c 	head = rq->balance_callback;
rq               3271 kernel/sched/core.c 	rq->balance_callback = NULL;
rq               3273 kernel/sched/core.c 		func = (void (*)(struct rq *))head->func;
rq               3278 kernel/sched/core.c 		func(rq);
rq               3280 kernel/sched/core.c 	raw_spin_unlock_irqrestore(&rq->lock, flags);
rq               3283 kernel/sched/core.c static inline void balance_callback(struct rq *rq)
rq               3285 kernel/sched/core.c 	if (unlikely(rq->balance_callback))
rq               3286 kernel/sched/core.c 		__balance_callback(rq);
rq               3291 kernel/sched/core.c static inline void balance_callback(struct rq *rq)
rq               3302 kernel/sched/core.c 	__releases(rq->lock)
rq               3304 kernel/sched/core.c 	struct rq *rq;
rq               3315 kernel/sched/core.c 	rq = finish_task_switch(prev);
rq               3316 kernel/sched/core.c 	balance_callback(rq);
rq               3328 kernel/sched/core.c static __always_inline struct rq *
rq               3329 kernel/sched/core.c context_switch(struct rq *rq, struct task_struct *prev,
rq               3332 kernel/sched/core.c 	prepare_task_switch(rq, prev, next);
rq               3357 kernel/sched/core.c 		membarrier_switch_mm(rq, prev->active_mm, next->mm);
rq               3370 kernel/sched/core.c 			rq->prev_mm = prev->active_mm;
rq               3375 kernel/sched/core.c 	rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
rq               3377 kernel/sched/core.c 	prepare_lock_switch(rq, next, rf);
rq               3545 kernel/sched/core.c 	struct rq *rq;
rq               3564 kernel/sched/core.c 	rq = task_rq_lock(p, &rf);
rq               3570 kernel/sched/core.c 	if (task_current(rq, p) && task_on_rq_queued(p)) {
rq               3572 kernel/sched/core.c 		update_rq_clock(rq);
rq               3573 kernel/sched/core.c 		p->sched_class->update_curr(rq);
rq               3576 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
rq               3588 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
rq               3589 kernel/sched/core.c 	struct task_struct *curr = rq->curr;
rq               3594 kernel/sched/core.c 	rq_lock(rq, &rf);
rq               3596 kernel/sched/core.c 	update_rq_clock(rq);
rq               3597 kernel/sched/core.c 	curr->sched_class->task_tick(rq, curr, 0);
rq               3598 kernel/sched/core.c 	calc_global_load_tick(rq);
rq               3599 kernel/sched/core.c 	psi_task_tick(rq);
rq               3601 kernel/sched/core.c 	rq_unlock(rq, &rf);
rq               3606 kernel/sched/core.c 	rq->idle_balance = idle_cpu(cpu);
rq               3607 kernel/sched/core.c 	trigger_load_balance(rq);
rq               3653 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
rq               3669 kernel/sched/core.c 	rq_lock_irq(rq, &rf);
rq               3670 kernel/sched/core.c 	curr = rq->curr;
rq               3674 kernel/sched/core.c 	update_rq_clock(rq);
rq               3681 kernel/sched/core.c 		delta = rq_clock_task(rq) - curr->se.exec_start;
rq               3684 kernel/sched/core.c 	curr->sched_class->task_tick(rq, curr, 0);
rq               3686 kernel/sched/core.c 	calc_load_nohz_remote(rq);
rq               3688 kernel/sched/core.c 	rq_unlock_irq(rq, &rf);
rq               3904 kernel/sched/core.c pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rq               3917 kernel/sched/core.c 		   rq->nr_running == rq->cfs.h_nr_running)) {
rq               3919 kernel/sched/core.c 		p = fair_sched_class.pick_next_task(rq, prev, rf);
rq               3925 kernel/sched/core.c 			p = idle_sched_class.pick_next_task(rq, prev, rf);
rq               3941 kernel/sched/core.c 		if (class->balance(rq, prev, rf))
rq               3946 kernel/sched/core.c 	put_prev_task(rq, prev);
rq               3949 kernel/sched/core.c 		p = class->pick_next_task(rq, NULL, NULL);
rq               4002 kernel/sched/core.c 	struct rq *rq;
rq               4006 kernel/sched/core.c 	rq = cpu_rq(cpu);
rq               4007 kernel/sched/core.c 	prev = rq->curr;
rq               4012 kernel/sched/core.c 		hrtick_clear(rq);
rq               4025 kernel/sched/core.c 	rq_lock(rq, &rf);
rq               4029 kernel/sched/core.c 	rq->clock_update_flags <<= 1;
rq               4030 kernel/sched/core.c 	update_rq_clock(rq);
rq               4037 kernel/sched/core.c 			deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
rq               4040 kernel/sched/core.c 				atomic_inc(&rq->nr_iowait);
rq               4047 kernel/sched/core.c 	next = pick_next_task(rq, prev, &rf);
rq               4052 kernel/sched/core.c 		rq->nr_switches++;
rq               4057 kernel/sched/core.c 		RCU_INIT_POINTER(rq->curr, next);
rq               4077 kernel/sched/core.c 		rq = context_switch(rq, prev, next, &rf);
rq               4079 kernel/sched/core.c 		rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
rq               4080 kernel/sched/core.c 		rq_unlock_irq(rq, &rf);
rq               4083 kernel/sched/core.c 	balance_callback(rq);
rq               4376 kernel/sched/core.c 	struct rq *rq;
rq               4387 kernel/sched/core.c 	rq = __task_rq_lock(p, &rf);
rq               4388 kernel/sched/core.c 	update_rq_clock(rq);
rq               4419 kernel/sched/core.c 	if (unlikely(p == rq->idle)) {
rq               4420 kernel/sched/core.c 		WARN_ON(p != rq->curr);
rq               4433 kernel/sched/core.c 	running = task_current(rq, p);
rq               4435 kernel/sched/core.c 		dequeue_task(rq, p, queue_flag);
rq               4437 kernel/sched/core.c 		put_prev_task(rq, p);
rq               4473 kernel/sched/core.c 		enqueue_task(rq, p, queue_flag);
rq               4475 kernel/sched/core.c 		set_next_task(rq, p);
rq               4477 kernel/sched/core.c 	check_class_changed(rq, p, prev_class, oldprio);
rq               4481 kernel/sched/core.c 	__task_rq_unlock(rq, &rf);
rq               4483 kernel/sched/core.c 	balance_callback(rq);
rq               4498 kernel/sched/core.c 	struct rq *rq;
rq               4506 kernel/sched/core.c 	rq = task_rq_lock(p, &rf);
rq               4507 kernel/sched/core.c 	update_rq_clock(rq);
rq               4520 kernel/sched/core.c 	running = task_current(rq, p);
rq               4522 kernel/sched/core.c 		dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
rq               4524 kernel/sched/core.c 		put_prev_task(rq, p);
rq               4533 kernel/sched/core.c 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
rq               4538 kernel/sched/core.c 		if (delta < 0 || (delta > 0 && task_running(rq, p)))
rq               4539 kernel/sched/core.c 			resched_curr(rq);
rq               4542 kernel/sched/core.c 		set_next_task(rq, p);
rq               4544 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
rq               4618 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
rq               4620 kernel/sched/core.c 	if (rq->curr != rq->idle)
rq               4623 kernel/sched/core.c 	if (rq->nr_running)
rq               4627 kernel/sched/core.c 	if (!llist_empty(&rq->wake_list))
rq               4705 kernel/sched/core.c static void __setscheduler(struct rq *rq, struct task_struct *p,
rq               4761 kernel/sched/core.c 	struct rq *rq;
rq               4869 kernel/sched/core.c 	rq = task_rq_lock(p, &rf);
rq               4870 kernel/sched/core.c 	update_rq_clock(rq);
rq               4875 kernel/sched/core.c 	if (p == rq->stop) {
rq               4916 kernel/sched/core.c 			cpumask_t *span = rq->rd->span;
rq               4924 kernel/sched/core.c 			    rq->rd->dl_bw.bw == 0) {
rq               4935 kernel/sched/core.c 		task_rq_unlock(rq, p, &rf);
rq               4968 kernel/sched/core.c 	running = task_current(rq, p);
rq               4970 kernel/sched/core.c 		dequeue_task(rq, p, queue_flags);
rq               4972 kernel/sched/core.c 		put_prev_task(rq, p);
rq               4976 kernel/sched/core.c 	__setscheduler(rq, p, attr, pi);
rq               4987 kernel/sched/core.c 		enqueue_task(rq, p, queue_flags);
rq               4990 kernel/sched/core.c 		set_next_task(rq, p);
rq               4992 kernel/sched/core.c 	check_class_changed(rq, p, prev_class, oldprio);
rq               4996 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
rq               5004 kernel/sched/core.c 	balance_callback(rq);
rq               5010 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
rq               5583 kernel/sched/core.c 	struct rq *rq;
rq               5585 kernel/sched/core.c 	rq = this_rq_lock_irq(&rf);
rq               5587 kernel/sched/core.c 	schedstat_inc(rq->yld_count);
rq               5588 kernel/sched/core.c 	current->sched_class->yield_task(rq);
rq               5595 kernel/sched/core.c 	rq_unlock(rq, &rf);
rq               5695 kernel/sched/core.c 	struct rq *rq, *p_rq;
rq               5700 kernel/sched/core.c 	rq = this_rq();
rq               5708 kernel/sched/core.c 	if (rq->nr_running == 1 && p_rq->nr_running == 1) {
rq               5713 kernel/sched/core.c 	double_rq_lock(rq, p_rq);
rq               5715 kernel/sched/core.c 		double_rq_unlock(rq, p_rq);
rq               5728 kernel/sched/core.c 	yielded = curr->sched_class->yield_to_task(rq, p, preempt);
rq               5730 kernel/sched/core.c 		schedstat_inc(rq->yld_count);
rq               5735 kernel/sched/core.c 		if (preempt && rq != p_rq)
rq               5740 kernel/sched/core.c 	double_rq_unlock(rq, p_rq);
rq               5851 kernel/sched/core.c 	struct rq *rq;
rq               5867 kernel/sched/core.c 	rq = task_rq_lock(p, &rf);
rq               5870 kernel/sched/core.c 		time_slice = p->sched_class->get_rr_interval(rq, p);
rq               5871 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
rq               6018 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
rq               6024 kernel/sched/core.c 	raw_spin_lock(&rq->lock);
rq               6055 kernel/sched/core.c 	rq->idle = idle;
rq               6056 kernel/sched/core.c 	rcu_assign_pointer(rq->curr, idle);
rq               6061 kernel/sched/core.c 	raw_spin_unlock(&rq->lock);
rq               6149 kernel/sched/core.c 	struct rq *rq;
rq               6151 kernel/sched/core.c 	rq = task_rq_lock(p, &rf);
rq               6153 kernel/sched/core.c 	running = task_current(rq, p);
rq               6156 kernel/sched/core.c 		dequeue_task(rq, p, DEQUEUE_SAVE);
rq               6158 kernel/sched/core.c 		put_prev_task(rq, p);
rq               6163 kernel/sched/core.c 		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
rq               6165 kernel/sched/core.c 		set_next_task(rq, p);
rq               6166 kernel/sched/core.c 	task_rq_unlock(rq, p, &rf);
rq               6198 kernel/sched/core.c static void calc_load_migrate(struct rq *rq)
rq               6200 kernel/sched/core.c 	long delta = calc_load_fold_active(rq, 1);
rq               6205 kernel/sched/core.c static struct task_struct *__pick_migrate_task(struct rq *rq)
rq               6211 kernel/sched/core.c 		next = class->pick_next_task(rq, NULL, NULL);
rq               6213 kernel/sched/core.c 			next->sched_class->put_prev_task(rq, next);
rq               6230 kernel/sched/core.c static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)
rq               6232 kernel/sched/core.c 	struct rq *rq = dead_rq;
rq               6233 kernel/sched/core.c 	struct task_struct *next, *stop = rq->stop;
rq               6246 kernel/sched/core.c 	rq->stop = NULL;
rq               6253 kernel/sched/core.c 	update_rq_clock(rq);
rq               6260 kernel/sched/core.c 		if (rq->nr_running == 1)
rq               6263 kernel/sched/core.c 		next = __pick_migrate_task(rq);
rq               6274 kernel/sched/core.c 		rq_unlock(rq, rf);
rq               6276 kernel/sched/core.c 		rq_relock(rq, rf);
rq               6283 kernel/sched/core.c 		if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
rq               6290 kernel/sched/core.c 		rq = __migrate_task(rq, rf, next, dest_cpu);
rq               6291 kernel/sched/core.c 		if (rq != dead_rq) {
rq               6292 kernel/sched/core.c 			rq_unlock(rq, rf);
rq               6293 kernel/sched/core.c 			rq = dead_rq;
rq               6295 kernel/sched/core.c 			rq_relock(rq, rf);
rq               6300 kernel/sched/core.c 	rq->stop = stop;
rq               6304 kernel/sched/core.c void set_rq_online(struct rq *rq)
rq               6306 kernel/sched/core.c 	if (!rq->online) {
rq               6309 kernel/sched/core.c 		cpumask_set_cpu(rq->cpu, rq->rd->online);
rq               6310 kernel/sched/core.c 		rq->online = 1;
rq               6314 kernel/sched/core.c 				class->rq_online(rq);
rq               6319 kernel/sched/core.c void set_rq_offline(struct rq *rq)
rq               6321 kernel/sched/core.c 	if (rq->online) {
rq               6326 kernel/sched/core.c 				class->rq_offline(rq);
rq               6329 kernel/sched/core.c 		cpumask_clear_cpu(rq->cpu, rq->rd->online);
rq               6330 kernel/sched/core.c 		rq->online = 0;
rq               6384 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
rq               6410 kernel/sched/core.c 	rq_lock_irqsave(rq, &rf);
rq               6411 kernel/sched/core.c 	if (rq->rd) {
rq               6412 kernel/sched/core.c 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
rq               6413 kernel/sched/core.c 		set_rq_online(rq);
rq               6415 kernel/sched/core.c 	rq_unlock_irqrestore(rq, &rf);
rq               6456 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
rq               6458 kernel/sched/core.c 	rq->calc_load_update = calc_load_update;
rq               6472 kernel/sched/core.c 	struct rq *rq = cpu_rq(cpu);
rq               6479 kernel/sched/core.c 	rq_lock_irqsave(rq, &rf);
rq               6480 kernel/sched/core.c 	if (rq->rd) {
rq               6481 kernel/sched/core.c 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
rq               6482 kernel/sched/core.c 		set_rq_offline(rq);
rq               6484 kernel/sched/core.c 	migrate_tasks(rq, &rf);
rq               6485 kernel/sched/core.c 	BUG_ON(rq->nr_running != 1);
rq               6486 kernel/sched/core.c 	rq_unlock_irqrestore(rq, &rf);
rq               6488 kernel/sched/core.c 	calc_load_migrate(rq);
rq               6490 kernel/sched/core.c 	nohz_balance_exit_idle(rq);
rq               6491 kernel/sched/core.c 	hrtick_clear(rq);
rq               6620 kernel/sched/core.c 		struct rq *rq;
rq               6622 kernel/sched/core.c 		rq = cpu_rq(i);
rq               6623 kernel/sched/core.c 		raw_spin_lock_init(&rq->lock);
rq               6624 kernel/sched/core.c 		rq->nr_running = 0;
rq               6625 kernel/sched/core.c 		rq->calc_load_active = 0;
rq               6626 kernel/sched/core.c 		rq->calc_load_update = jiffies + LOAD_FREQ;
rq               6627 kernel/sched/core.c 		init_cfs_rq(&rq->cfs);
rq               6628 kernel/sched/core.c 		init_rt_rq(&rq->rt);
rq               6629 kernel/sched/core.c 		init_dl_rq(&rq->dl);
rq               6632 kernel/sched/core.c 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
rq               6633 kernel/sched/core.c 		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
rq               6654 kernel/sched/core.c 		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
rq               6657 kernel/sched/core.c 		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
rq               6659 kernel/sched/core.c 		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
rq               6662 kernel/sched/core.c 		rq->sd = NULL;
rq               6663 kernel/sched/core.c 		rq->rd = NULL;
rq               6664 kernel/sched/core.c 		rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
rq               6665 kernel/sched/core.c 		rq->balance_callback = NULL;
rq               6666 kernel/sched/core.c 		rq->active_balance = 0;
rq               6667 kernel/sched/core.c 		rq->next_balance = jiffies;
rq               6668 kernel/sched/core.c 		rq->push_cpu = 0;
rq               6669 kernel/sched/core.c 		rq->cpu = i;
rq               6670 kernel/sched/core.c 		rq->online = 0;
rq               6671 kernel/sched/core.c 		rq->idle_stamp = 0;
rq               6672 kernel/sched/core.c 		rq->avg_idle = 2*sysctl_sched_migration_cost;
rq               6673 kernel/sched/core.c 		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
rq               6675 kernel/sched/core.c 		INIT_LIST_HEAD(&rq->cfs_tasks);
rq               6677 kernel/sched/core.c 		rq_attach_root(rq, &def_root_domain);
rq               6679 kernel/sched/core.c 		rq->last_load_update_tick = jiffies;
rq               6680 kernel/sched/core.c 		rq->last_blocked_load_update_tick = jiffies;
rq               6681 kernel/sched/core.c 		atomic_set(&rq->nohz_flags, 0);
rq               6684 kernel/sched/core.c 		hrtick_rq_init(rq);
rq               6685 kernel/sched/core.c 		atomic_set(&rq->nr_iowait, 0);
rq               7038 kernel/sched/core.c 	struct rq *rq;
rq               7040 kernel/sched/core.c 	rq = task_rq_lock(tsk, &rf);
rq               7041 kernel/sched/core.c 	update_rq_clock(rq);
rq               7043 kernel/sched/core.c 	running = task_current(rq, tsk);
rq               7047 kernel/sched/core.c 		dequeue_task(rq, tsk, queue_flags);
rq               7049 kernel/sched/core.c 		put_prev_task(rq, tsk);
rq               7054 kernel/sched/core.c 		enqueue_task(rq, tsk, queue_flags);
rq               7056 kernel/sched/core.c 		set_next_task(rq, tsk);
rq               7062 kernel/sched/core.c 		resched_curr(rq);
rq               7065 kernel/sched/core.c 	task_rq_unlock(rq, tsk, &rf);
rq               7132 kernel/sched/core.c 	struct rq *rq;
rq               7134 kernel/sched/core.c 	rq = task_rq_lock(task, &rf);
rq               7136 kernel/sched/core.c 	update_rq_clock(rq);
rq               7139 kernel/sched/core.c 	task_rq_unlock(rq, task, &rf);
rq               7435 kernel/sched/core.c 		struct rq *rq = cfs_rq->rq;
rq               7438 kernel/sched/core.c 		rq_lock_irq(rq, &rf);
rq               7444 kernel/sched/core.c 		rq_unlock_irq(rq, &rf);
rq                211 kernel/sched/cpufreq_schedutil.c 	struct rq *rq = cpu_rq(cpu);
rq                214 kernel/sched/cpufreq_schedutil.c 	    type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
rq                223 kernel/sched/cpufreq_schedutil.c 	irq = cpu_util_irq(rq);
rq                239 kernel/sched/cpufreq_schedutil.c 	util = util_cfs + cpu_util_rt(rq);
rq                241 kernel/sched/cpufreq_schedutil.c 		util = uclamp_util_with(rq, util, p);
rq                243 kernel/sched/cpufreq_schedutil.c 	dl_util = cpu_util_dl(rq);
rq                287 kernel/sched/cpufreq_schedutil.c 		util += cpu_bw_dl(rq);
rq                294 kernel/sched/cpufreq_schedutil.c 	struct rq *rq = cpu_rq(sg_cpu->cpu);
rq                295 kernel/sched/cpufreq_schedutil.c 	unsigned long util = cpu_util_cfs(rq);
rq                299 kernel/sched/cpufreq_schedutil.c 	sg_cpu->bw_dl = cpu_bw_dl(rq);
rq                221 kernel/sched/cputime.c 	struct rq *rq = this_rq();
rq                223 kernel/sched/cputime.c 	if (atomic_read(&rq->nr_iowait) > 0)
rq                279 kernel/sched/cputime.c 	struct rq *rq;
rq                281 kernel/sched/cputime.c 	rq = task_rq_lock(t, &rf);
rq                283 kernel/sched/cputime.c 	task_rq_unlock(rq, t, &rf);
rq                358 kernel/sched/cputime.c 					 struct rq *rq, int ticks)
rq                384 kernel/sched/cputime.c 	} else if (p == rq->idle) {
rq                395 kernel/sched/cputime.c 	struct rq *rq = this_rq();
rq                397 kernel/sched/cputime.c 	irqtime_account_process_tick(current, 0, rq, ticks);
rq                402 kernel/sched/cputime.c 						struct rq *rq, int nr_ticks) { }
rq                478 kernel/sched/cputime.c 	struct rq *rq = this_rq();
rq                484 kernel/sched/cputime.c 		irqtime_account_process_tick(p, user_tick, rq, 1);
rq                498 kernel/sched/cputime.c 	else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
rq                 28 kernel/sched/deadline.c static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
rq                 30 kernel/sched/deadline.c 	return container_of(dl_rq, struct rq, dl);
rq                 36 kernel/sched/deadline.c 	struct rq *rq = task_rq(p);
rq                 38 kernel/sched/deadline.c 	return &rq->dl;
rq                158 kernel/sched/deadline.c 	struct rq *rq;
rq                165 kernel/sched/deadline.c 	rq = task_rq(p);
rq                167 kernel/sched/deadline.c 		sub_running_bw(&p->dl, &rq->dl);
rq                179 kernel/sched/deadline.c 	__sub_rq_bw(p->dl.dl_bw, &rq->dl);
rq                180 kernel/sched/deadline.c 	__add_rq_bw(new_bw, &rq->dl);
rq                242 kernel/sched/deadline.c 	struct rq *rq = rq_of_dl_rq(dl_rq);
rq                265 kernel/sched/deadline.c 	zerolag_time -= rq_clock(rq);
rq                278 kernel/sched/deadline.c 				sub_rq_bw(&p->dl, &rq->dl);
rq                378 kernel/sched/deadline.c static inline int dl_overloaded(struct rq *rq)
rq                380 kernel/sched/deadline.c 	return atomic_read(&rq->rd->dlo_count);
rq                383 kernel/sched/deadline.c static inline void dl_set_overload(struct rq *rq)
rq                385 kernel/sched/deadline.c 	if (!rq->online)
rq                388 kernel/sched/deadline.c 	cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
rq                396 kernel/sched/deadline.c 	atomic_inc(&rq->rd->dlo_count);
rq                399 kernel/sched/deadline.c static inline void dl_clear_overload(struct rq *rq)
rq                401 kernel/sched/deadline.c 	if (!rq->online)
rq                404 kernel/sched/deadline.c 	atomic_dec(&rq->rd->dlo_count);
rq                405 kernel/sched/deadline.c 	cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
rq                445 kernel/sched/deadline.c static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
rq                447 kernel/sched/deadline.c 	struct dl_rq *dl_rq = &rq->dl;
rq                475 kernel/sched/deadline.c static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
rq                477 kernel/sched/deadline.c 	struct dl_rq *dl_rq = &rq->dl;
rq                496 kernel/sched/deadline.c static inline int has_pushable_dl_tasks(struct rq *rq)
rq                498 kernel/sched/deadline.c 	return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
rq                501 kernel/sched/deadline.c static int push_dl_task(struct rq *rq);
rq                503 kernel/sched/deadline.c static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
rq                511 kernel/sched/deadline.c static void push_dl_tasks(struct rq *);
rq                512 kernel/sched/deadline.c static void pull_dl_task(struct rq *);
rq                514 kernel/sched/deadline.c static inline void deadline_queue_push_tasks(struct rq *rq)
rq                516 kernel/sched/deadline.c 	if (!has_pushable_dl_tasks(rq))
rq                519 kernel/sched/deadline.c 	queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
rq                522 kernel/sched/deadline.c static inline void deadline_queue_pull_task(struct rq *rq)
rq                524 kernel/sched/deadline.c 	queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
rq                527 kernel/sched/deadline.c static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
rq                529 kernel/sched/deadline.c static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
rq                531 kernel/sched/deadline.c 	struct rq *later_rq = NULL;
rq                534 kernel/sched/deadline.c 	later_rq = find_lock_later_rq(p, rq);
rq                558 kernel/sched/deadline.c 		double_lock_balance(rq, later_rq);
rq                568 kernel/sched/deadline.c 		sub_running_bw(&p->dl, &rq->dl);
rq                569 kernel/sched/deadline.c 		sub_rq_bw(&p->dl, &rq->dl);
rq                574 kernel/sched/deadline.c 		sub_rq_bw(&p->dl, &rq->dl);
rq                583 kernel/sched/deadline.c 	dl_b = &rq->rd->dl_bw;
rq                585 kernel/sched/deadline.c 	__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
rq                594 kernel/sched/deadline.c 	double_unlock_balance(later_rq, rq);
rq                602 kernel/sched/deadline.c void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
rq                607 kernel/sched/deadline.c void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
rq                621 kernel/sched/deadline.c static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
rq                626 kernel/sched/deadline.c static inline void pull_dl_task(struct rq *rq)
rq                630 kernel/sched/deadline.c static inline void deadline_queue_push_tasks(struct rq *rq)
rq                634 kernel/sched/deadline.c static inline void deadline_queue_pull_task(struct rq *rq)
rq                639 kernel/sched/deadline.c static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
rq                640 kernel/sched/deadline.c static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
rq                641 kernel/sched/deadline.c static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
rq                658 kernel/sched/deadline.c 	struct rq *rq = rq_of_dl_rq(dl_rq);
rq                661 kernel/sched/deadline.c 	WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
rq                676 kernel/sched/deadline.c 	dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
rq                702 kernel/sched/deadline.c 	struct rq *rq = rq_of_dl_rq(dl_rq);
rq                711 kernel/sched/deadline.c 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
rq                738 kernel/sched/deadline.c 	if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
rq                740 kernel/sched/deadline.c 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
rq                823 kernel/sched/deadline.c update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
rq                825 kernel/sched/deadline.c 	u64 laxity = dl_se->deadline - rq_clock(rq);
rq                833 kernel/sched/deadline.c 	WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
rq                888 kernel/sched/deadline.c 	struct rq *rq = rq_of_dl_rq(dl_rq);
rq                890 kernel/sched/deadline.c 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
rq                891 kernel/sched/deadline.c 	    dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
rq                894 kernel/sched/deadline.c 			     !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
rq                896 kernel/sched/deadline.c 			update_dl_revised_wakeup(dl_se, rq);
rq                900 kernel/sched/deadline.c 		dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
rq                924 kernel/sched/deadline.c 	struct rq *rq = task_rq(p);
rq                928 kernel/sched/deadline.c 	lockdep_assert_held(&rq->lock);
rq                937 kernel/sched/deadline.c 	delta = ktime_to_ns(now) - rq_clock(rq);
rq                985 kernel/sched/deadline.c 	struct rq *rq;
rq                987 kernel/sched/deadline.c 	rq = task_rq_lock(p, &rf);
rq               1011 kernel/sched/deadline.c 	update_rq_clock(rq);
rq               1033 kernel/sched/deadline.c 	if (unlikely(!rq->online)) {
rq               1038 kernel/sched/deadline.c 		lockdep_unpin_lock(&rq->lock, rf.cookie);
rq               1039 kernel/sched/deadline.c 		rq = dl_task_offline_migration(rq, p);
rq               1040 kernel/sched/deadline.c 		rf.cookie = lockdep_pin_lock(&rq->lock);
rq               1041 kernel/sched/deadline.c 		update_rq_clock(rq);
rq               1051 kernel/sched/deadline.c 	enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
rq               1052 kernel/sched/deadline.c 	if (dl_task(rq->curr))
rq               1053 kernel/sched/deadline.c 		check_preempt_curr_dl(rq, p, 0);
rq               1055 kernel/sched/deadline.c 		resched_curr(rq);
rq               1062 kernel/sched/deadline.c 	if (has_pushable_dl_tasks(rq)) {
rq               1067 kernel/sched/deadline.c 		rq_unpin_lock(rq, &rf);
rq               1068 kernel/sched/deadline.c 		push_dl_task(rq);
rq               1069 kernel/sched/deadline.c 		rq_repin_lock(rq, &rf);
rq               1074 kernel/sched/deadline.c 	task_rq_unlock(rq, p, &rf);
rq               1114 kernel/sched/deadline.c 	struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
rq               1116 kernel/sched/deadline.c 	if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
rq               1117 kernel/sched/deadline.c 	    dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
rq               1153 kernel/sched/deadline.c static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
rq               1155 kernel/sched/deadline.c 	u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
rq               1157 kernel/sched/deadline.c 	u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
rq               1167 kernel/sched/deadline.c 	if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
rq               1170 kernel/sched/deadline.c 		u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
rq               1179 kernel/sched/deadline.c static void update_curr_dl(struct rq *rq)
rq               1181 kernel/sched/deadline.c 	struct task_struct *curr = rq->curr;
rq               1184 kernel/sched/deadline.c 	int cpu = cpu_of(rq);
rq               1198 kernel/sched/deadline.c 	now = rq_clock_task(rq);
rq               1227 kernel/sched/deadline.c 						 rq,
rq               1248 kernel/sched/deadline.c 		__dequeue_task_dl(rq, curr, 0);
rq               1250 kernel/sched/deadline.c 			enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
rq               1252 kernel/sched/deadline.c 		if (!is_leftmost(curr, &rq->dl))
rq               1253 kernel/sched/deadline.c 			resched_curr(rq);
rq               1268 kernel/sched/deadline.c 		struct rt_rq *rt_rq = &rq->rt;
rq               1289 kernel/sched/deadline.c 	struct rq *rq;
rq               1291 kernel/sched/deadline.c 	rq = task_rq_lock(p, &rf);
rq               1294 kernel/sched/deadline.c 	update_rq_clock(rq);
rq               1315 kernel/sched/deadline.c 	sub_running_bw(dl_se, &rq->dl);
rq               1318 kernel/sched/deadline.c 	task_rq_unlock(rq, p, &rf);
rq               1336 kernel/sched/deadline.c 	struct rq *rq = rq_of_dl_rq(dl_rq);
rq               1341 kernel/sched/deadline.c 		cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
rq               1347 kernel/sched/deadline.c 	struct rq *rq = rq_of_dl_rq(dl_rq);
rq               1356 kernel/sched/deadline.c 		cpudl_clear(&rq->rd->cpudl, rq->cpu);
rq               1363 kernel/sched/deadline.c 		cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
rq               1472 kernel/sched/deadline.c static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
rq               1509 kernel/sched/deadline.c 		add_rq_bw(&p->dl, &rq->dl);
rq               1510 kernel/sched/deadline.c 		add_running_bw(&p->dl, &rq->dl);
rq               1534 kernel/sched/deadline.c 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
rq               1535 kernel/sched/deadline.c 		enqueue_pushable_dl_task(rq, p);
rq               1538 kernel/sched/deadline.c static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
rq               1541 kernel/sched/deadline.c 	dequeue_pushable_dl_task(rq, p);
rq               1544 kernel/sched/deadline.c static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
rq               1546 kernel/sched/deadline.c 	update_curr_dl(rq);
rq               1547 kernel/sched/deadline.c 	__dequeue_task_dl(rq, p, flags);
rq               1550 kernel/sched/deadline.c 		sub_running_bw(&p->dl, &rq->dl);
rq               1551 kernel/sched/deadline.c 		sub_rq_bw(&p->dl, &rq->dl);
rq               1577 kernel/sched/deadline.c static void yield_task_dl(struct rq *rq)
rq               1585 kernel/sched/deadline.c 	rq->curr->dl.dl_yielded = 1;
rq               1587 kernel/sched/deadline.c 	update_rq_clock(rq);
rq               1588 kernel/sched/deadline.c 	update_curr_dl(rq);
rq               1594 kernel/sched/deadline.c 	rq_clock_skip_update(rq);
rq               1605 kernel/sched/deadline.c 	struct rq *rq;
rq               1610 kernel/sched/deadline.c 	rq = cpu_rq(cpu);
rq               1613 kernel/sched/deadline.c 	curr = READ_ONCE(rq->curr); /* unlocked access */
rq               1644 kernel/sched/deadline.c 	struct rq *rq;
rq               1649 kernel/sched/deadline.c 	rq = task_rq(p);
rq               1655 kernel/sched/deadline.c 	raw_spin_lock(&rq->lock);
rq               1657 kernel/sched/deadline.c 		sub_running_bw(&p->dl, &rq->dl);
rq               1669 kernel/sched/deadline.c 	sub_rq_bw(&p->dl, &rq->dl);
rq               1670 kernel/sched/deadline.c 	raw_spin_unlock(&rq->lock);
rq               1673 kernel/sched/deadline.c static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
rq               1679 kernel/sched/deadline.c 	if (rq->curr->nr_cpus_allowed == 1 ||
rq               1680 kernel/sched/deadline.c 	    !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
rq               1688 kernel/sched/deadline.c 	    cpudl_find(&rq->rd->cpudl, p, NULL))
rq               1691 kernel/sched/deadline.c 	resched_curr(rq);
rq               1694 kernel/sched/deadline.c static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
rq               1696 kernel/sched/deadline.c 	if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
rq               1703 kernel/sched/deadline.c 		rq_unpin_lock(rq, rf);
rq               1704 kernel/sched/deadline.c 		pull_dl_task(rq);
rq               1705 kernel/sched/deadline.c 		rq_repin_lock(rq, rf);
rq               1708 kernel/sched/deadline.c 	return sched_stop_runnable(rq) || sched_dl_runnable(rq);
rq               1716 kernel/sched/deadline.c static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
rq               1719 kernel/sched/deadline.c 	if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
rq               1720 kernel/sched/deadline.c 		resched_curr(rq);
rq               1729 kernel/sched/deadline.c 	if ((p->dl.deadline == rq->curr->dl.deadline) &&
rq               1730 kernel/sched/deadline.c 	    !test_tsk_need_resched(rq->curr))
rq               1731 kernel/sched/deadline.c 		check_preempt_equal_dl(rq, p);
rq               1736 kernel/sched/deadline.c static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
rq               1738 kernel/sched/deadline.c 	hrtick_start(rq, p->dl.runtime);
rq               1741 kernel/sched/deadline.c static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
rq               1746 kernel/sched/deadline.c static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
rq               1748 kernel/sched/deadline.c 	p->se.exec_start = rq_clock_task(rq);
rq               1751 kernel/sched/deadline.c 	dequeue_pushable_dl_task(rq, p);
rq               1756 kernel/sched/deadline.c 	if (hrtick_enabled(rq))
rq               1757 kernel/sched/deadline.c 		start_hrtick_dl(rq, p);
rq               1759 kernel/sched/deadline.c 	if (rq->curr->sched_class != &dl_sched_class)
rq               1760 kernel/sched/deadline.c 		update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
rq               1762 kernel/sched/deadline.c 	deadline_queue_push_tasks(rq);
rq               1765 kernel/sched/deadline.c static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
rq               1777 kernel/sched/deadline.c pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rq               1780 kernel/sched/deadline.c 	struct dl_rq *dl_rq = &rq->dl;
rq               1785 kernel/sched/deadline.c 	if (!sched_dl_runnable(rq))
rq               1788 kernel/sched/deadline.c 	dl_se = pick_next_dl_entity(rq, dl_rq);
rq               1791 kernel/sched/deadline.c 	set_next_task_dl(rq, p, true);
rq               1795 kernel/sched/deadline.c static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
rq               1797 kernel/sched/deadline.c 	update_curr_dl(rq);
rq               1799 kernel/sched/deadline.c 	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
rq               1801 kernel/sched/deadline.c 		enqueue_pushable_dl_task(rq, p);
rq               1812 kernel/sched/deadline.c static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
rq               1814 kernel/sched/deadline.c 	update_curr_dl(rq);
rq               1816 kernel/sched/deadline.c 	update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
rq               1822 kernel/sched/deadline.c 	if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
rq               1823 kernel/sched/deadline.c 	    is_leftmost(p, &rq->dl))
rq               1824 kernel/sched/deadline.c 		start_hrtick_dl(rq, p);
rq               1840 kernel/sched/deadline.c static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
rq               1842 kernel/sched/deadline.c 	if (!task_running(rq, p) &&
rq               1852 kernel/sched/deadline.c static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
rq               1854 kernel/sched/deadline.c 	struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
rq               1857 kernel/sched/deadline.c 	if (!has_pushable_dl_tasks(rq))
rq               1864 kernel/sched/deadline.c 		if (pick_dl_task(rq, p, cpu))
rq               1964 kernel/sched/deadline.c static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
rq               1966 kernel/sched/deadline.c 	struct rq *later_rq = NULL;
rq               1973 kernel/sched/deadline.c 		if ((cpu == -1) || (cpu == rq->cpu))
rq               1991 kernel/sched/deadline.c 		if (double_lock_balance(rq, later_rq)) {
rq               1992 kernel/sched/deadline.c 			if (unlikely(task_rq(task) != rq ||
rq               1994 kernel/sched/deadline.c 				     task_running(rq, task) ||
rq               1997 kernel/sched/deadline.c 				double_unlock_balance(rq, later_rq);
rq               2014 kernel/sched/deadline.c 		double_unlock_balance(rq, later_rq);
rq               2021 kernel/sched/deadline.c static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
rq               2025 kernel/sched/deadline.c 	if (!has_pushable_dl_tasks(rq))
rq               2028 kernel/sched/deadline.c 	p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
rq               2031 kernel/sched/deadline.c 	BUG_ON(rq->cpu != task_cpu(p));
rq               2032 kernel/sched/deadline.c 	BUG_ON(task_current(rq, p));
rq               2046 kernel/sched/deadline.c static int push_dl_task(struct rq *rq)
rq               2049 kernel/sched/deadline.c 	struct rq *later_rq;
rq               2052 kernel/sched/deadline.c 	if (!rq->dl.overloaded)
rq               2055 kernel/sched/deadline.c 	next_task = pick_next_pushable_dl_task(rq);
rq               2060 kernel/sched/deadline.c 	if (WARN_ON(next_task == rq->curr))
rq               2068 kernel/sched/deadline.c 	if (dl_task(rq->curr) &&
rq               2069 kernel/sched/deadline.c 	    dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
rq               2070 kernel/sched/deadline.c 	    rq->curr->nr_cpus_allowed > 1) {
rq               2071 kernel/sched/deadline.c 		resched_curr(rq);
rq               2079 kernel/sched/deadline.c 	later_rq = find_lock_later_rq(next_task, rq);
rq               2088 kernel/sched/deadline.c 		task = pick_next_pushable_dl_task(rq);
rq               2106 kernel/sched/deadline.c 	deactivate_task(rq, next_task, 0);
rq               2119 kernel/sched/deadline.c 	double_unlock_balance(rq, later_rq);
rq               2127 kernel/sched/deadline.c static void push_dl_tasks(struct rq *rq)
rq               2130 kernel/sched/deadline.c 	while (push_dl_task(rq))
rq               2134 kernel/sched/deadline.c static void pull_dl_task(struct rq *this_rq)
rq               2139 kernel/sched/deadline.c 	struct rq *src_rq;
rq               2219 kernel/sched/deadline.c static void task_woken_dl(struct rq *rq, struct task_struct *p)
rq               2221 kernel/sched/deadline.c 	if (!task_running(rq, p) &&
rq               2222 kernel/sched/deadline.c 	    !test_tsk_need_resched(rq->curr) &&
rq               2224 kernel/sched/deadline.c 	    dl_task(rq->curr) &&
rq               2225 kernel/sched/deadline.c 	    (rq->curr->nr_cpus_allowed < 2 ||
rq               2226 kernel/sched/deadline.c 	     !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
rq               2227 kernel/sched/deadline.c 		push_dl_tasks(rq);
rq               2235 kernel/sched/deadline.c 	struct rq *rq;
rq               2239 kernel/sched/deadline.c 	rq = task_rq(p);
rq               2240 kernel/sched/deadline.c 	src_rd = rq->rd;
rq               2250 kernel/sched/deadline.c 		src_dl_b = dl_bw_of(cpu_of(rq));
rq               2265 kernel/sched/deadline.c static void rq_online_dl(struct rq *rq)
rq               2267 kernel/sched/deadline.c 	if (rq->dl.overloaded)
rq               2268 kernel/sched/deadline.c 		dl_set_overload(rq);
rq               2270 kernel/sched/deadline.c 	cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
rq               2271 kernel/sched/deadline.c 	if (rq->dl.dl_nr_running > 0)
rq               2272 kernel/sched/deadline.c 		cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
rq               2276 kernel/sched/deadline.c static void rq_offline_dl(struct rq *rq)
rq               2278 kernel/sched/deadline.c 	if (rq->dl.overloaded)
rq               2279 kernel/sched/deadline.c 		dl_clear_overload(rq);
rq               2281 kernel/sched/deadline.c 	cpudl_clear(&rq->rd->cpudl, rq->cpu);
rq               2282 kernel/sched/deadline.c 	cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
rq               2297 kernel/sched/deadline.c 	struct rq *rq;
rq               2300 kernel/sched/deadline.c 	rq = task_rq_lock(p, &rf);
rq               2304 kernel/sched/deadline.c 	dl_b = &rq->rd->dl_bw;
rq               2307 kernel/sched/deadline.c 	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
rq               2312 kernel/sched/deadline.c 	task_rq_unlock(rq, p, &rf);
rq               2326 kernel/sched/deadline.c static void switched_from_dl(struct rq *rq, struct task_struct *p)
rq               2347 kernel/sched/deadline.c 			sub_running_bw(&p->dl, &rq->dl);
rq               2348 kernel/sched/deadline.c 		sub_rq_bw(&p->dl, &rq->dl);
rq               2364 kernel/sched/deadline.c 	if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
rq               2367 kernel/sched/deadline.c 	deadline_queue_pull_task(rq);
rq               2374 kernel/sched/deadline.c static void switched_to_dl(struct rq *rq, struct task_struct *p)
rq               2381 kernel/sched/deadline.c 		add_rq_bw(&p->dl, &rq->dl);
rq               2386 kernel/sched/deadline.c 	if (rq->curr != p) {
rq               2388 kernel/sched/deadline.c 		if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
rq               2389 kernel/sched/deadline.c 			deadline_queue_push_tasks(rq);
rq               2391 kernel/sched/deadline.c 		if (dl_task(rq->curr))
rq               2392 kernel/sched/deadline.c 			check_preempt_curr_dl(rq, p, 0);
rq               2394 kernel/sched/deadline.c 			resched_curr(rq);
rq               2402 kernel/sched/deadline.c static void prio_changed_dl(struct rq *rq, struct task_struct *p,
rq               2405 kernel/sched/deadline.c 	if (task_on_rq_queued(p) || rq->curr == p) {
rq               2413 kernel/sched/deadline.c 		if (!rq->dl.overloaded)
rq               2414 kernel/sched/deadline.c 			deadline_queue_pull_task(rq);
rq               2421 kernel/sched/deadline.c 		if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
rq               2422 kernel/sched/deadline.c 			resched_curr(rq);
rq               2429 kernel/sched/deadline.c 		resched_curr(rq);
rq                434 kernel/sched/debug.c print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
rq                436 kernel/sched/debug.c 	if (rq->curr == p)
rq                462 kernel/sched/debug.c static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
rq                478 kernel/sched/debug.c 		print_task(m, rq, p);
rq                487 kernel/sched/debug.c 	struct rq *rq = cpu_rq(cpu);
rq                501 kernel/sched/debug.c 	raw_spin_lock_irqsave(&rq->lock, flags);
rq                509 kernel/sched/debug.c 	raw_spin_unlock_irqrestore(&rq->lock, flags);
rq                616 kernel/sched/debug.c 	struct rq *rq = cpu_rq(cpu);
rq                632 kernel/sched/debug.c 	if (sizeof(rq->x) == 4)						\
rq                633 kernel/sched/debug.c 		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
rq                635 kernel/sched/debug.c 		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
rq                639 kernel/sched/debug.c 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
rq                646 kernel/sched/debug.c 	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
rq                653 kernel/sched/debug.c #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
rq                659 kernel/sched/debug.c #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
rq                674 kernel/sched/debug.c 	print_rq(m, rq, cpu);
rq                293 kernel/sched/fair.c 	struct rq *rq = rq_of(cfs_rq);
rq                294 kernel/sched/fair.c 	int cpu = cpu_of(rq);
rq                297 kernel/sched/fair.c 		return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
rq                325 kernel/sched/fair.c 		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
rq                335 kernel/sched/fair.c 			&rq->leaf_cfs_rq_list);
rq                340 kernel/sched/fair.c 		rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
rq                350 kernel/sched/fair.c 	list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
rq                355 kernel/sched/fair.c 	rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
rq                362 kernel/sched/fair.c 		struct rq *rq = rq_of(cfs_rq);
rq                371 kernel/sched/fair.c 		if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
rq                372 kernel/sched/fair.c 			rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
rq                379 kernel/sched/fair.c static inline void assert_list_leaf_cfs_rq(struct rq *rq)
rq                381 kernel/sched/fair.c 	SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
rq                385 kernel/sched/fair.c #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)			\
rq                386 kernel/sched/fair.c 	list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,	\
rq                454 kernel/sched/fair.c 	struct rq *rq = task_rq(p);
rq                456 kernel/sched/fair.c 	return &rq->cfs;
rq                480 kernel/sched/fair.c static inline void assert_list_leaf_cfs_rq(struct rq *rq)
rq                484 kernel/sched/fair.c #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)	\
rq                485 kernel/sched/fair.c 		for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
rq                868 kernel/sched/fair.c static void update_curr_fair(struct rq *rq)
rq                870 kernel/sched/fair.c 	update_curr(cfs_rq_of(&rq->curr->se));
rq               1190 kernel/sched/fair.c static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
rq               1192 kernel/sched/fair.c 	rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
rq               1193 kernel/sched/fair.c 	rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
rq               1196 kernel/sched/fair.c static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
rq               1198 kernel/sched/fair.c 	rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
rq               1199 kernel/sched/fair.c 	rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
rq               1477 kernel/sched/fair.c static unsigned long cpu_runnable_load(struct rq *rq);
rq               1496 kernel/sched/fair.c 		struct rq *rq = cpu_rq(cpu);
rq               1498 kernel/sched/fair.c 		ns->load += cpu_runnable_load(rq);
rq               1523 kernel/sched/fair.c 	struct rq *rq = cpu_rq(env->dst_cpu);
rq               1526 kernel/sched/fair.c 	if (xchg(&rq->numa_migrate_on, 1))
rq               1534 kernel/sched/fair.c 		rq = cpu_rq(env->best_cpu);
rq               1535 kernel/sched/fair.c 		WRITE_ONCE(rq->numa_migrate_on, 0);
rq               1593 kernel/sched/fair.c 	struct rq *dst_rq = cpu_rq(env->dst_cpu);
rq               1755 kernel/sched/fair.c 	struct rq *best_rq;
rq               2673 kernel/sched/fair.c static void task_tick_numa(struct rq *rq, struct task_struct *curr)
rq               2738 kernel/sched/fair.c static void task_tick_numa(struct rq *rq, struct task_struct *curr)
rq               2742 kernel/sched/fair.c static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
rq               2746 kernel/sched/fair.c static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
rq               2762 kernel/sched/fair.c 		struct rq *rq = rq_of(cfs_rq);
rq               2764 kernel/sched/fair.c 		account_numa_enqueue(rq, task_of(se));
rq               2765 kernel/sched/fair.c 		list_add(&se->group_node, &rq->cfs_tasks);
rq               3111 kernel/sched/fair.c 	struct rq *rq = rq_of(cfs_rq);
rq               3113 kernel/sched/fair.c 	if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) {
rq               3128 kernel/sched/fair.c 		cpufreq_update_util(rq, flags);
rq               3812 kernel/sched/fair.c static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
rq               3818 kernel/sched/fair.c 		rq->misfit_task_load = 0;
rq               3822 kernel/sched/fair.c 	if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
rq               3823 kernel/sched/fair.c 		rq->misfit_task_load = 0;
rq               3827 kernel/sched/fair.c 	rq->misfit_task_load = task_h_load(p);
rq               3848 kernel/sched/fair.c static inline int idle_balance(struct rq *rq, struct rq_flags *rf)
rq               3859 kernel/sched/fair.c static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
rq               4467 kernel/sched/fair.c 	struct rq *rq = data;
rq               4468 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
rq               4472 kernel/sched/fair.c 		cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
rq               4485 kernel/sched/fair.c 	struct rq *rq = data;
rq               4486 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
rq               4490 kernel/sched/fair.c 		cfs_rq->throttled_clock_task = rq_clock_task(rq);
rq               4500 kernel/sched/fair.c 	struct rq *rq = rq_of(cfs_rq);
rq               4510 kernel/sched/fair.c 	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
rq               4531 kernel/sched/fair.c 		sub_nr_running(rq, task_delta);
rq               4534 kernel/sched/fair.c 	cfs_rq->throttled_clock = rq_clock(rq);
rq               4560 kernel/sched/fair.c 	struct rq *rq = rq_of(cfs_rq);
rq               4566 kernel/sched/fair.c 	se = cfs_rq->tg->se[cpu_of(rq)];
rq               4570 kernel/sched/fair.c 	update_rq_clock(rq);
rq               4573 kernel/sched/fair.c 	cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
rq               4578 kernel/sched/fair.c 	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
rq               4600 kernel/sched/fair.c 		add_nr_running(rq, task_delta);
rq               4613 kernel/sched/fair.c 	assert_list_leaf_cfs_rq(rq);
rq               4616 kernel/sched/fair.c 	if (rq->curr == rq->idle && rq->cfs.nr_running)
rq               4617 kernel/sched/fair.c 		resched_curr(rq);
rq               4629 kernel/sched/fair.c 		struct rq *rq = rq_of(cfs_rq);
rq               4632 kernel/sched/fair.c 		rq_lock_irqsave(rq, &rf);
rq               4651 kernel/sched/fair.c 		rq_unlock_irqrestore(rq, &rf);
rq               5036 kernel/sched/fair.c static void __maybe_unused update_runtime_enabled(struct rq *rq)
rq               5040 kernel/sched/fair.c 	lockdep_assert_held(&rq->lock);
rq               5045 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
rq               5055 kernel/sched/fair.c static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
rq               5059 kernel/sched/fair.c 	lockdep_assert_held(&rq->lock);
rq               5063 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
rq               5125 kernel/sched/fair.c static inline void update_runtime_enabled(struct rq *rq) {}
rq               5126 kernel/sched/fair.c static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
rq               5135 kernel/sched/fair.c static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
rq               5140 kernel/sched/fair.c 	SCHED_WARN_ON(task_rq(p) != rq);
rq               5142 kernel/sched/fair.c 	if (rq->cfs.h_nr_running > 1) {
rq               5148 kernel/sched/fair.c 			if (rq->curr == p)
rq               5149 kernel/sched/fair.c 				resched_curr(rq);
rq               5152 kernel/sched/fair.c 		hrtick_start(rq, delta);
rq               5161 kernel/sched/fair.c static void hrtick_update(struct rq *rq)
rq               5163 kernel/sched/fair.c 	struct task_struct *curr = rq->curr;
rq               5165 kernel/sched/fair.c 	if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
rq               5169 kernel/sched/fair.c 		hrtick_start_fair(rq, curr);
rq               5173 kernel/sched/fair.c hrtick_start_fair(struct rq *rq, struct task_struct *p)
rq               5177 kernel/sched/fair.c static inline void hrtick_update(struct rq *rq)
rq               5190 kernel/sched/fair.c static inline void update_overutilized_status(struct rq *rq)
rq               5192 kernel/sched/fair.c 	if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
rq               5193 kernel/sched/fair.c 		WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
rq               5194 kernel/sched/fair.c 		trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
rq               5198 kernel/sched/fair.c static inline void update_overutilized_status(struct rq *rq) { }
rq               5207 kernel/sched/fair.c enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
rq               5219 kernel/sched/fair.c 	util_est_enqueue(&rq->cfs, p);
rq               5227 kernel/sched/fair.c 		cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
rq               5268 kernel/sched/fair.c 		add_nr_running(rq, 1);
rq               5284 kernel/sched/fair.c 			update_overutilized_status(rq);
rq               5303 kernel/sched/fair.c 	assert_list_leaf_cfs_rq(rq);
rq               5305 kernel/sched/fair.c 	hrtick_update(rq);
rq               5315 kernel/sched/fair.c static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
rq               5365 kernel/sched/fair.c 		sub_nr_running(rq, 1);
rq               5367 kernel/sched/fair.c 	util_est_dequeue(&rq->cfs, p, task_sleep);
rq               5368 kernel/sched/fair.c 	hrtick_update(rq);
rq               5392 kernel/sched/fair.c 	struct rq *rq = cpu_rq(cpu);
rq               5394 kernel/sched/fair.c 	return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
rq               5395 kernel/sched/fair.c 			rq->nr_running);
rq               5398 kernel/sched/fair.c static unsigned long cpu_runnable_load(struct rq *rq)
rq               5400 kernel/sched/fair.c 	return cfs_rq_runnable_load_avg(&rq->cfs);
rq               5410 kernel/sched/fair.c 	struct rq *rq = cpu_rq(cpu);
rq               5411 kernel/sched/fair.c 	unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
rq               5412 kernel/sched/fair.c 	unsigned long load_avg = cpu_runnable_load(rq);
rq               5730 kernel/sched/fair.c 			struct rq *rq = cpu_rq(i);
rq               5731 kernel/sched/fair.c 			struct cpuidle_state *idle = idle_get_state(rq);
rq               5739 kernel/sched/fair.c 				latest_idle_timestamp = rq->idle_stamp;
rq               5742 kernel/sched/fair.c 				   rq->idle_stamp > latest_idle_timestamp) {
rq               5748 kernel/sched/fair.c 				latest_idle_timestamp = rq->idle_stamp;
rq               5856 kernel/sched/fair.c void __update_idle_core(struct rq *rq)
rq               5858 kernel/sched/fair.c 	int core = cpu_of(rq);
rq               6601 kernel/sched/fair.c balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rq               6603 kernel/sched/fair.c 	if (rq->nr_running)
rq               6606 kernel/sched/fair.c 	return newidle_balance(rq, rf) != 0;
rq               6692 kernel/sched/fair.c static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
rq               6694 kernel/sched/fair.c 	struct task_struct *curr = rq->curr;
rq               6758 kernel/sched/fair.c 	resched_curr(rq);
rq               6768 kernel/sched/fair.c 	if (unlikely(!se->on_rq || curr == rq->idle))
rq               6776 kernel/sched/fair.c pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rq               6778 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = &rq->cfs;
rq               6784 kernel/sched/fair.c 	if (!sched_fair_runnable(rq))
rq               6821 kernel/sched/fair.c 				cfs_rq = &rq->cfs;
rq               6866 kernel/sched/fair.c 		put_prev_task(rq, prev);
rq               6883 kernel/sched/fair.c 	list_move(&p->se.group_node, &rq->cfs_tasks);
rq               6886 kernel/sched/fair.c 	if (hrtick_enabled(rq))
rq               6887 kernel/sched/fair.c 		hrtick_start_fair(rq, p);
rq               6889 kernel/sched/fair.c 	update_misfit_status(p, rq);
rq               6897 kernel/sched/fair.c 	new_tasks = newidle_balance(rq, rf);
rq               6914 kernel/sched/fair.c 	update_idle_rq_clock_pelt(rq);
rq               6922 kernel/sched/fair.c static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
rq               6938 kernel/sched/fair.c static void yield_task_fair(struct rq *rq)
rq               6940 kernel/sched/fair.c 	struct task_struct *curr = rq->curr;
rq               6947 kernel/sched/fair.c 	if (unlikely(rq->nr_running == 1))
rq               6953 kernel/sched/fair.c 		update_rq_clock(rq);
rq               6963 kernel/sched/fair.c 		rq_clock_skip_update(rq);
rq               6969 kernel/sched/fair.c static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
rq               6980 kernel/sched/fair.c 	yield_task_fair(rq);
rq               7125 kernel/sched/fair.c 	struct rq		*src_rq;
rq               7129 kernel/sched/fair.c 	struct rq		*dst_rq;
rq               7459 kernel/sched/fair.c static void attach_task(struct rq *rq, struct task_struct *p)
rq               7461 kernel/sched/fair.c 	lockdep_assert_held(&rq->lock);
rq               7463 kernel/sched/fair.c 	BUG_ON(task_rq(p) != rq);
rq               7464 kernel/sched/fair.c 	activate_task(rq, p, ENQUEUE_NOCLOCK);
rq               7465 kernel/sched/fair.c 	check_preempt_curr(rq, p, 0);
rq               7472 kernel/sched/fair.c static void attach_one_task(struct rq *rq, struct task_struct *p)
rq               7476 kernel/sched/fair.c 	rq_lock(rq, &rf);
rq               7477 kernel/sched/fair.c 	update_rq_clock(rq);
rq               7478 kernel/sched/fair.c 	attach_task(rq, p);
rq               7479 kernel/sched/fair.c 	rq_unlock(rq, &rf);
rq               7517 kernel/sched/fair.c static inline bool others_have_blocked(struct rq *rq)
rq               7519 kernel/sched/fair.c 	if (READ_ONCE(rq->avg_rt.util_avg))
rq               7522 kernel/sched/fair.c 	if (READ_ONCE(rq->avg_dl.util_avg))
rq               7526 kernel/sched/fair.c 	if (READ_ONCE(rq->avg_irq.util_avg))
rq               7533 kernel/sched/fair.c static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
rq               7535 kernel/sched/fair.c 	rq->last_blocked_load_update_tick = jiffies;
rq               7538 kernel/sched/fair.c 		rq->has_blocked_load = 0;
rq               7542 kernel/sched/fair.c static inline bool others_have_blocked(struct rq *rq) { return false; }
rq               7543 kernel/sched/fair.c static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
rq               7546 kernel/sched/fair.c static bool __update_blocked_others(struct rq *rq, bool *done)
rq               7549 kernel/sched/fair.c 	u64 now = rq_clock_pelt(rq);
rq               7556 kernel/sched/fair.c 	curr_class = rq->curr->sched_class;
rq               7558 kernel/sched/fair.c 	decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
rq               7559 kernel/sched/fair.c 		  update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
rq               7560 kernel/sched/fair.c 		  update_irq_load_avg(rq, 0);
rq               7562 kernel/sched/fair.c 	if (others_have_blocked(rq))
rq               7587 kernel/sched/fair.c static bool __update_blocked_fair(struct rq *rq, bool *done)
rq               7591 kernel/sched/fair.c 	int cpu = cpu_of(rq);
rq               7597 kernel/sched/fair.c 	for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
rq               7603 kernel/sched/fair.c 			if (cfs_rq == &rq->cfs)
rq               7634 kernel/sched/fair.c 	struct rq *rq = rq_of(cfs_rq);
rq               7635 kernel/sched/fair.c 	struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
rq               7674 kernel/sched/fair.c static bool __update_blocked_fair(struct rq *rq, bool *done)
rq               7676 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = &rq->cfs;
rq               7695 kernel/sched/fair.c 	struct rq *rq = cpu_rq(cpu);
rq               7698 kernel/sched/fair.c 	rq_lock_irqsave(rq, &rf);
rq               7699 kernel/sched/fair.c 	update_rq_clock(rq);
rq               7701 kernel/sched/fair.c 	decayed |= __update_blocked_others(rq, &done);
rq               7702 kernel/sched/fair.c 	decayed |= __update_blocked_fair(rq, &done);
rq               7704 kernel/sched/fair.c 	update_blocked_load_status(rq, !done);
rq               7706 kernel/sched/fair.c 		cpufreq_update_util(rq, 0);
rq               7707 kernel/sched/fair.c 	rq_unlock_irqrestore(rq, &rf);
rq               7773 kernel/sched/fair.c 	struct rq *rq = cpu_rq(cpu);
rq               7778 kernel/sched/fair.c 	irq = cpu_util_irq(rq);
rq               7783 kernel/sched/fair.c 	used = READ_ONCE(rq->avg_rt.util_avg);
rq               7784 kernel/sched/fair.c 	used += READ_ONCE(rq->avg_dl.util_avg);
rq               7838 kernel/sched/fair.c 			struct rq *rq = cpu_rq(cpu);
rq               7851 kernel/sched/fair.c 			if (unlikely(!rq->sd)) {
rq               7854 kernel/sched/fair.c 				sgc = rq->sd->groups->sgc;
rq               7889 kernel/sched/fair.c check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
rq               7891 kernel/sched/fair.c 	return ((rq->cpu_capacity * sd->imbalance_pct) <
rq               7892 kernel/sched/fair.c 				(rq->cpu_capacity_orig * 100));
rq               7900 kernel/sched/fair.c static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
rq               7902 kernel/sched/fair.c 	return rq->misfit_task_load &&
rq               7903 kernel/sched/fair.c 		(rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
rq               7904 kernel/sched/fair.c 		 check_cpu_capacity(rq, sd));
rq               8023 kernel/sched/fair.c static bool update_nohz_stats(struct rq *rq, bool force)
rq               8026 kernel/sched/fair.c 	unsigned int cpu = rq->cpu;
rq               8028 kernel/sched/fair.c 	if (!rq->has_blocked_load)
rq               8034 kernel/sched/fair.c 	if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick))
rq               8039 kernel/sched/fair.c 	return rq->has_blocked_load;
rq               8062 kernel/sched/fair.c 		struct rq *rq = cpu_rq(i);
rq               8064 kernel/sched/fair.c 		if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
rq               8067 kernel/sched/fair.c 		sgs->group_load += cpu_runnable_load(rq);
rq               8069 kernel/sched/fair.c 		sgs->sum_nr_running += rq->cfs.h_nr_running;
rq               8071 kernel/sched/fair.c 		nr_running = rq->nr_running;
rq               8079 kernel/sched/fair.c 		sgs->nr_numa_running += rq->nr_numa_running;
rq               8080 kernel/sched/fair.c 		sgs->nr_preferred_running += rq->nr_preferred_running;
rq               8089 kernel/sched/fair.c 		    sgs->group_misfit_task_load < rq->misfit_task_load) {
rq               8090 kernel/sched/fair.c 			sgs->group_misfit_task_load = rq->misfit_task_load;
rq               8205 kernel/sched/fair.c static inline enum fbq_type fbq_classify_rq(struct rq *rq)
rq               8207 kernel/sched/fair.c 	if (rq->nr_running > rq->nr_numa_running)
rq               8209 kernel/sched/fair.c 	if (rq->nr_running > rq->nr_preferred_running)
rq               8219 kernel/sched/fair.c static inline enum fbq_type fbq_classify_rq(struct rq *rq)
rq               8636 kernel/sched/fair.c static struct rq *find_busiest_queue(struct lb_env *env,
rq               8639 kernel/sched/fair.c 	struct rq *busiest = NULL, *rq;
rq               8647 kernel/sched/fair.c 		rq = cpu_rq(i);
rq               8648 kernel/sched/fair.c 		rt = fbq_classify_rq(rq);
rq               8677 kernel/sched/fair.c 			if (rq->misfit_task_load > busiest_load) {
rq               8678 kernel/sched/fair.c 				busiest_load = rq->misfit_task_load;
rq               8679 kernel/sched/fair.c 				busiest = rq;
rq               8695 kernel/sched/fair.c 		    rq->nr_running == 1)
rq               8698 kernel/sched/fair.c 		load = cpu_runnable_load(rq);
rq               8705 kernel/sched/fair.c 		if (rq->nr_running == 1 && load > env->imbalance &&
rq               8706 kernel/sched/fair.c 		    !check_cpu_capacity(rq, env->sd))
rq               8723 kernel/sched/fair.c 			busiest = rq;
rq               8829 kernel/sched/fair.c static int load_balance(int this_cpu, struct rq *this_rq,
rq               8836 kernel/sched/fair.c 	struct rq *busiest;
rq               9139 kernel/sched/fair.c 	struct rq *busiest_rq = data;
rq               9142 kernel/sched/fair.c 	struct rq *target_rq = cpu_rq(target_cpu);
rq               9239 kernel/sched/fair.c static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
rq               9242 kernel/sched/fair.c 	int cpu = rq->cpu;
rq               9288 kernel/sched/fair.c 			if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
rq               9312 kernel/sched/fair.c 		rq->max_idle_balance_cost =
rq               9323 kernel/sched/fair.c 		rq->next_balance = next_balance;
rq               9334 kernel/sched/fair.c 		if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
rq               9335 kernel/sched/fair.c 			nohz.next_balance = rq->next_balance;
rq               9340 kernel/sched/fair.c static inline int on_null_domain(struct rq *rq)
rq               9342 kernel/sched/fair.c 	return unlikely(!rcu_dereference_sched(rq->sd));
rq               9400 kernel/sched/fair.c static void nohz_balancer_kick(struct rq *rq)
rq               9405 kernel/sched/fair.c 	int nr_busy, i, cpu = rq->cpu;
rq               9408 kernel/sched/fair.c 	if (unlikely(rq->idle_balance))
rq               9415 kernel/sched/fair.c 	nohz_balance_exit_idle(rq);
rq               9431 kernel/sched/fair.c 	if (rq->nr_running >= 2) {
rq               9438 kernel/sched/fair.c 	sd = rcu_dereference(rq->sd);
rq               9445 kernel/sched/fair.c 		if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
rq               9472 kernel/sched/fair.c 		if (check_misfit_status(rq, sd)) {
rq               9527 kernel/sched/fair.c void nohz_balance_exit_idle(struct rq *rq)
rq               9529 kernel/sched/fair.c 	SCHED_WARN_ON(rq != this_rq());
rq               9531 kernel/sched/fair.c 	if (likely(!rq->nohz_tick_stopped))
rq               9534 kernel/sched/fair.c 	rq->nohz_tick_stopped = 0;
rq               9535 kernel/sched/fair.c 	cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
rq               9538 kernel/sched/fair.c 	set_cpu_sd_state_busy(rq->cpu);
rq               9563 kernel/sched/fair.c 	struct rq *rq = cpu_rq(cpu);
rq               9580 kernel/sched/fair.c 	rq->has_blocked_load = 1;
rq               9588 kernel/sched/fair.c 	if (rq->nohz_tick_stopped)
rq               9592 kernel/sched/fair.c 	if (on_null_domain(rq))
rq               9595 kernel/sched/fair.c 	rq->nohz_tick_stopped = 1;
rq               9624 kernel/sched/fair.c static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
rq               9635 kernel/sched/fair.c 	struct rq *rq;
rq               9669 kernel/sched/fair.c 		rq = cpu_rq(balance_cpu);
rq               9671 kernel/sched/fair.c 		has_blocked_load |= update_nohz_stats(rq, true);
rq               9677 kernel/sched/fair.c 		if (time_after_eq(jiffies, rq->next_balance)) {
rq               9680 kernel/sched/fair.c 			rq_lock_irqsave(rq, &rf);
rq               9681 kernel/sched/fair.c 			update_rq_clock(rq);
rq               9682 kernel/sched/fair.c 			rq_unlock_irqrestore(rq, &rf);
rq               9685 kernel/sched/fair.c 				rebalance_domains(rq, CPU_IDLE);
rq               9688 kernel/sched/fair.c 		if (time_after(next_balance, rq->next_balance)) {
rq               9689 kernel/sched/fair.c 			next_balance = rq->next_balance;
rq               9729 kernel/sched/fair.c static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
rq               9752 kernel/sched/fair.c static void nohz_newidle_balance(struct rq *this_rq)
rq               9785 kernel/sched/fair.c static inline void nohz_balancer_kick(struct rq *rq) { }
rq               9787 kernel/sched/fair.c static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
rq               9792 kernel/sched/fair.c static inline void nohz_newidle_balance(struct rq *this_rq) { }
rq               9799 kernel/sched/fair.c int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
rq               9919 kernel/sched/fair.c 	struct rq *this_rq = this_rq();
rq               9942 kernel/sched/fair.c void trigger_load_balance(struct rq *rq)
rq               9945 kernel/sched/fair.c 	if (unlikely(on_null_domain(rq)))
rq               9948 kernel/sched/fair.c 	if (time_after_eq(jiffies, rq->next_balance))
rq               9951 kernel/sched/fair.c 	nohz_balancer_kick(rq);
rq               9954 kernel/sched/fair.c static void rq_online_fair(struct rq *rq)
rq               9958 kernel/sched/fair.c 	update_runtime_enabled(rq);
rq               9961 kernel/sched/fair.c static void rq_offline_fair(struct rq *rq)
rq               9966 kernel/sched/fair.c 	unthrottle_offline_cfs_rqs(rq);
rq               9979 kernel/sched/fair.c static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
rq               9990 kernel/sched/fair.c 		task_tick_numa(rq, curr);
rq               9992 kernel/sched/fair.c 	update_misfit_status(curr, rq);
rq               10005 kernel/sched/fair.c 	struct rq *rq = this_rq();
rq               10008 kernel/sched/fair.c 	rq_lock(rq, &rf);
rq               10009 kernel/sched/fair.c 	update_rq_clock(rq);
rq               10025 kernel/sched/fair.c 		resched_curr(rq);
rq               10029 kernel/sched/fair.c 	rq_unlock(rq, &rf);
rq               10037 kernel/sched/fair.c prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
rq               10047 kernel/sched/fair.c 	if (rq->curr == p) {
rq               10049 kernel/sched/fair.c 			resched_curr(rq);
rq               10051 kernel/sched/fair.c 		check_preempt_curr(rq, p, 0);
rq               10165 kernel/sched/fair.c static void switched_from_fair(struct rq *rq, struct task_struct *p)
rq               10170 kernel/sched/fair.c static void switched_to_fair(struct rq *rq, struct task_struct *p)
rq               10180 kernel/sched/fair.c 		if (rq->curr == p)
rq               10181 kernel/sched/fair.c 			resched_curr(rq);
rq               10183 kernel/sched/fair.c 			check_preempt_curr(rq, p, 0);
rq               10192 kernel/sched/fair.c static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
rq               10202 kernel/sched/fair.c 		list_move(&se->group_node, &rq->cfs_tasks);
rq               10323 kernel/sched/fair.c 	struct rq *rq;
rq               10327 kernel/sched/fair.c 		rq = cpu_rq(i);
rq               10329 kernel/sched/fair.c 		rq_lock_irq(rq, &rf);
rq               10330 kernel/sched/fair.c 		update_rq_clock(rq);
rq               10333 kernel/sched/fair.c 		rq_unlock_irq(rq, &rf);
rq               10340 kernel/sched/fair.c 	struct rq *rq;
rq               10354 kernel/sched/fair.c 		rq = cpu_rq(cpu);
rq               10356 kernel/sched/fair.c 		raw_spin_lock_irqsave(&rq->lock, flags);
rq               10358 kernel/sched/fair.c 		raw_spin_unlock_irqrestore(&rq->lock, flags);
rq               10366 kernel/sched/fair.c 	struct rq *rq = cpu_rq(cpu);
rq               10369 kernel/sched/fair.c 	cfs_rq->rq = rq;
rq               10380 kernel/sched/fair.c 		se->cfs_rq = &rq->cfs;
rq               10413 kernel/sched/fair.c 		struct rq *rq = cpu_rq(i);
rq               10418 kernel/sched/fair.c 		rq_lock_irqsave(rq, &rf);
rq               10419 kernel/sched/fair.c 		update_rq_clock(rq);
rq               10424 kernel/sched/fair.c 		rq_unlock_irqrestore(rq, &rf);
rq               10447 kernel/sched/fair.c static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
rq               10456 kernel/sched/fair.c 	if (rq->cfs.load.weight)
rq               10594 kernel/sched/fair.c const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq)
rq               10597 kernel/sched/fair.c 	return rq ? &rq->avg_rt : NULL;
rq               10604 kernel/sched/fair.c const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq)
rq               10607 kernel/sched/fair.c 	return rq ? &rq->avg_dl : NULL;
rq               10614 kernel/sched/fair.c const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq)
rq               10617 kernel/sched/fair.c 	return rq ? &rq->avg_irq : NULL;
rq               10624 kernel/sched/fair.c int sched_trace_rq_cpu(struct rq *rq)
rq               10626 kernel/sched/fair.c 	return rq ? cpu_of(rq) : -1;
rq                370 kernel/sched/idle.c balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rq                379 kernel/sched/idle.c static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
rq                381 kernel/sched/idle.c 	resched_curr(rq);
rq                384 kernel/sched/idle.c static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
rq                388 kernel/sched/idle.c static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
rq                390 kernel/sched/idle.c 	update_idle_core(rq);
rq                391 kernel/sched/idle.c 	schedstat_inc(rq->sched_goidle);
rq                395 kernel/sched/idle.c pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rq                397 kernel/sched/idle.c 	struct task_struct *next = rq->idle;
rq                400 kernel/sched/idle.c 		put_prev_task(rq, prev);
rq                402 kernel/sched/idle.c 	set_next_task_idle(rq, next, true);
rq                412 kernel/sched/idle.c dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
rq                414 kernel/sched/idle.c 	raw_spin_unlock_irq(&rq->lock);
rq                417 kernel/sched/idle.c 	raw_spin_lock_irq(&rq->lock);
rq                428 kernel/sched/idle.c static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
rq                432 kernel/sched/idle.c static void switched_to_idle(struct rq *rq, struct task_struct *p)
rq                438 kernel/sched/idle.c prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
rq                443 kernel/sched/idle.c static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
rq                448 kernel/sched/idle.c static void update_curr_idle(struct rq *rq)
rq                 79 kernel/sched/loadavg.c long calc_load_fold_active(struct rq *this_rq, long adjust)
rq                234 kernel/sched/loadavg.c static void calc_load_nohz_fold(struct rq *rq)
rq                238 kernel/sched/loadavg.c 	delta = calc_load_fold_active(rq, 0);
rq                259 kernel/sched/loadavg.c void calc_load_nohz_remote(struct rq *rq)
rq                261 kernel/sched/loadavg.c 	calc_load_nohz_fold(rq);
rq                266 kernel/sched/loadavg.c 	struct rq *this_rq = this_rq();
rq                386 kernel/sched/loadavg.c void calc_global_load_tick(struct rq *this_rq)
rq                240 kernel/sched/membarrier.c 		struct rq *rq = cpu_rq(cpu);
rq                243 kernel/sched/membarrier.c 		p = rcu_dereference(rq->curr);
rq                317 kernel/sched/pelt.c int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
rq                319 kernel/sched/pelt.c 	if (___update_load_sum(now, &rq->avg_rt,
rq                324 kernel/sched/pelt.c 		___update_load_avg(&rq->avg_rt, 1, 1);
rq                325 kernel/sched/pelt.c 		trace_pelt_rt_tp(rq);
rq                341 kernel/sched/pelt.c int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
rq                343 kernel/sched/pelt.c 	if (___update_load_sum(now, &rq->avg_dl,
rq                348 kernel/sched/pelt.c 		___update_load_avg(&rq->avg_dl, 1, 1);
rq                349 kernel/sched/pelt.c 		trace_pelt_dl_tp(rq);
rq                366 kernel/sched/pelt.c int update_irq_load_avg(struct rq *rq, u64 running)
rq                375 kernel/sched/pelt.c 	running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
rq                376 kernel/sched/pelt.c 	running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
rq                389 kernel/sched/pelt.c 	ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
rq                393 kernel/sched/pelt.c 	ret += ___update_load_sum(rq->clock, &rq->avg_irq,
rq                399 kernel/sched/pelt.c 		___update_load_avg(&rq->avg_irq, 1, 1);
rq                400 kernel/sched/pelt.c 		trace_pelt_irq_tp(rq);
rq                  7 kernel/sched/pelt.h int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
rq                  8 kernel/sched/pelt.h int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
rq                 11 kernel/sched/pelt.h int update_irq_load_avg(struct rq *rq, u64 running);
rq                 14 kernel/sched/pelt.h update_irq_load_avg(struct rq *rq, u64 running)
rq                 58 kernel/sched/pelt.h static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
rq                 60 kernel/sched/pelt.h 	if (unlikely(is_idle_task(rq->curr))) {
rq                 62 kernel/sched/pelt.h 		rq->clock_pelt  = rq_clock_task(rq);
rq                 82 kernel/sched/pelt.h 	delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
rq                 83 kernel/sched/pelt.h 	delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
rq                 85 kernel/sched/pelt.h 	rq->clock_pelt += delta;
rq                 97 kernel/sched/pelt.h static inline void update_idle_rq_clock_pelt(struct rq *rq)
rq                100 kernel/sched/pelt.h 	u32 util_sum = rq->cfs.avg.util_sum;
rq                101 kernel/sched/pelt.h 	util_sum += rq->avg_rt.util_sum;
rq                102 kernel/sched/pelt.h 	util_sum += rq->avg_dl.util_sum;
rq                114 kernel/sched/pelt.h 		rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
rq                117 kernel/sched/pelt.h static inline u64 rq_clock_pelt(struct rq *rq)
rq                119 kernel/sched/pelt.h 	lockdep_assert_held(&rq->lock);
rq                120 kernel/sched/pelt.h 	assert_clock_updated(rq);
rq                122 kernel/sched/pelt.h 	return rq->clock_pelt - rq->lost_idle_time;
rq                150 kernel/sched/pelt.h update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
rq                156 kernel/sched/pelt.h update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
rq                162 kernel/sched/pelt.h update_irq_load_avg(struct rq *rq, u64 running)
rq                167 kernel/sched/pelt.h static inline u64 rq_clock_pelt(struct rq *rq)
rq                169 kernel/sched/pelt.h 	return rq_clock_task(rq);
rq                173 kernel/sched/pelt.h update_rq_clock_pelt(struct rq *rq, s64 delta) { }
rq                176 kernel/sched/pelt.h update_idle_rq_clock_pelt(struct rq *rq) { }
rq                816 kernel/sched/psi.c 	struct rq *rq;
rq                829 kernel/sched/psi.c 	rq = this_rq_lock_irq(&rf);
rq                834 kernel/sched/psi.c 	rq_unlock_irq(rq, &rf);
rq                846 kernel/sched/psi.c 	struct rq *rq;
rq                858 kernel/sched/psi.c 	rq = this_rq_lock_irq(&rf);
rq                863 kernel/sched/psi.c 	rq_unlock_irq(rq, &rf);
rq                906 kernel/sched/psi.c 	struct rq *rq;
rq                917 kernel/sched/psi.c 	rq = task_rq_lock(task, &rf);
rq                936 kernel/sched/psi.c 	task_rq_unlock(rq, task, &rf);
rq                121 kernel/sched/rt.c static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
rq                123 kernel/sched/rt.c 	return rt_rq->rq;
rq                131 kernel/sched/rt.c static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
rq                135 kernel/sched/rt.c 	return rt_rq->rq;
rq                160 kernel/sched/rt.c 	struct rq *rq = cpu_rq(cpu);
rq                164 kernel/sched/rt.c 	rt_rq->rq = rq;
rq                174 kernel/sched/rt.c 		rt_se->rt_rq = &rq->rt;
rq                232 kernel/sched/rt.c static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
rq                234 kernel/sched/rt.c 	return container_of(rt_rq, struct rq, rt);
rq                237 kernel/sched/rt.c static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
rq                246 kernel/sched/rt.c 	struct rq *rq = rq_of_rt_se(rt_se);
rq                248 kernel/sched/rt.c 	return &rq->rt;
rq                261 kernel/sched/rt.c static void pull_rt_task(struct rq *this_rq);
rq                263 kernel/sched/rt.c static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
rq                266 kernel/sched/rt.c 	return rq->rt.highest_prio.curr > prev->prio;
rq                269 kernel/sched/rt.c static inline int rt_overloaded(struct rq *rq)
rq                271 kernel/sched/rt.c 	return atomic_read(&rq->rd->rto_count);
rq                274 kernel/sched/rt.c static inline void rt_set_overload(struct rq *rq)
rq                276 kernel/sched/rt.c 	if (!rq->online)
rq                279 kernel/sched/rt.c 	cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
rq                290 kernel/sched/rt.c 	atomic_inc(&rq->rd->rto_count);
rq                293 kernel/sched/rt.c static inline void rt_clear_overload(struct rq *rq)
rq                295 kernel/sched/rt.c 	if (!rq->online)
rq                299 kernel/sched/rt.c 	atomic_dec(&rq->rd->rto_count);
rq                300 kernel/sched/rt.c 	cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
rq                350 kernel/sched/rt.c static inline int has_pushable_tasks(struct rq *rq)
rq                352 kernel/sched/rt.c 	return !plist_head_empty(&rq->rt.pushable_tasks);
rq                358 kernel/sched/rt.c static void push_rt_tasks(struct rq *);
rq                359 kernel/sched/rt.c static void pull_rt_task(struct rq *);
rq                361 kernel/sched/rt.c static inline void rt_queue_push_tasks(struct rq *rq)
rq                363 kernel/sched/rt.c 	if (!has_pushable_tasks(rq))
rq                366 kernel/sched/rt.c 	queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
rq                369 kernel/sched/rt.c static inline void rt_queue_pull_task(struct rq *rq)
rq                371 kernel/sched/rt.c 	queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
rq                374 kernel/sched/rt.c static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
rq                376 kernel/sched/rt.c 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
rq                378 kernel/sched/rt.c 	plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
rq                381 kernel/sched/rt.c 	if (p->prio < rq->rt.highest_prio.next)
rq                382 kernel/sched/rt.c 		rq->rt.highest_prio.next = p->prio;
rq                385 kernel/sched/rt.c static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
rq                387 kernel/sched/rt.c 	plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
rq                390 kernel/sched/rt.c 	if (has_pushable_tasks(rq)) {
rq                391 kernel/sched/rt.c 		p = plist_first_entry(&rq->rt.pushable_tasks,
rq                393 kernel/sched/rt.c 		rq->rt.highest_prio.next = p->prio;
rq                395 kernel/sched/rt.c 		rq->rt.highest_prio.next = MAX_RT_PRIO;
rq                400 kernel/sched/rt.c static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
rq                404 kernel/sched/rt.c static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
rq                418 kernel/sched/rt.c static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
rq                423 kernel/sched/rt.c static inline void pull_rt_task(struct rq *this_rq)
rq                427 kernel/sched/rt.c static inline void rt_queue_push_tasks(struct rq *rq)
rq                470 kernel/sched/rt.c #define for_each_rt_rq(rt_rq, iter, rq)					\
rq                473 kernel/sched/rt.c 		(rt_rq = iter->rt_rq[cpu_of(rq)]);)
rq                489 kernel/sched/rt.c 	struct rq *rq = rq_of_rt_rq(rt_rq);
rq                492 kernel/sched/rt.c 	int cpu = cpu_of(rq);
rq                503 kernel/sched/rt.c 			resched_curr(rq);
rq                577 kernel/sched/rt.c #define for_each_rt_rq(rt_rq, iter, rq) \
rq                578 kernel/sched/rt.c 	for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
rq                590 kernel/sched/rt.c 	struct rq *rq = rq_of_rt_rq(rt_rq);
rq                596 kernel/sched/rt.c 	resched_curr(rq);
rq                691 kernel/sched/rt.c static void __disable_runtime(struct rq *rq)
rq                693 kernel/sched/rt.c 	struct root_domain *rd = rq->rd;
rq                700 kernel/sched/rt.c 	for_each_rt_rq(rt_rq, iter, rq) {
rq                773 kernel/sched/rt.c static void __enable_runtime(struct rq *rq)
rq                784 kernel/sched/rt.c 	for_each_rt_rq(rt_rq, iter, rq) {
rq                834 kernel/sched/rt.c 		struct rq *rq = rq_of_rt_rq(rt_rq);
rq                849 kernel/sched/rt.c 		raw_spin_lock(&rq->lock);
rq                850 kernel/sched/rt.c 		update_rq_clock(rq);
rq                871 kernel/sched/rt.c 				if (rt_rq->rt_nr_running && rq->curr == rq->idle)
rq                872 kernel/sched/rt.c 					rq_clock_cancel_skipupdate(rq);
rq                887 kernel/sched/rt.c 		raw_spin_unlock(&rq->lock);
rq                955 kernel/sched/rt.c static void update_curr_rt(struct rq *rq)
rq                957 kernel/sched/rt.c 	struct task_struct *curr = rq->curr;
rq                965 kernel/sched/rt.c 	now = rq_clock_task(rq);
rq                989 kernel/sched/rt.c 				resched_curr(rq);
rq                998 kernel/sched/rt.c 	struct rq *rq = rq_of_rt_rq(rt_rq);
rq               1000 kernel/sched/rt.c 	BUG_ON(&rq->rt != rt_rq);
rq               1005 kernel/sched/rt.c 	BUG_ON(!rq->nr_running);
rq               1007 kernel/sched/rt.c 	sub_nr_running(rq, rt_rq->rt_nr_running);
rq               1015 kernel/sched/rt.c 	struct rq *rq = rq_of_rt_rq(rt_rq);
rq               1017 kernel/sched/rt.c 	BUG_ON(&rq->rt != rt_rq);
rq               1026 kernel/sched/rt.c 		add_nr_running(rq, rt_rq->rt_nr_running);
rq               1031 kernel/sched/rt.c 	cpufreq_update_util(rq, 0);
rq               1039 kernel/sched/rt.c 	struct rq *rq = rq_of_rt_rq(rt_rq);
rq               1045 kernel/sched/rt.c 	if (&rq->rt != rt_rq)
rq               1048 kernel/sched/rt.c 	if (rq->online && prio < prev_prio)
rq               1049 kernel/sched/rt.c 		cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
rq               1055 kernel/sched/rt.c 	struct rq *rq = rq_of_rt_rq(rt_rq);
rq               1061 kernel/sched/rt.c 	if (&rq->rt != rt_rq)
rq               1064 kernel/sched/rt.c 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
rq               1065 kernel/sched/rt.c 		cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
rq               1302 kernel/sched/rt.c 	struct rq *rq = rq_of_rt_se(rt_se);
rq               1307 kernel/sched/rt.c 	enqueue_top_rt_rq(&rq->rt);
rq               1312 kernel/sched/rt.c 	struct rq *rq = rq_of_rt_se(rt_se);
rq               1322 kernel/sched/rt.c 	enqueue_top_rt_rq(&rq->rt);
rq               1329 kernel/sched/rt.c enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
rq               1338 kernel/sched/rt.c 	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
rq               1339 kernel/sched/rt.c 		enqueue_pushable_task(rq, p);
rq               1342 kernel/sched/rt.c static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
rq               1346 kernel/sched/rt.c 	update_curr_rt(rq);
rq               1349 kernel/sched/rt.c 	dequeue_pushable_task(rq, p);
rq               1370 kernel/sched/rt.c static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
rq               1381 kernel/sched/rt.c static void yield_task_rt(struct rq *rq)
rq               1383 kernel/sched/rt.c 	requeue_task_rt(rq, rq->curr, 0);
rq               1393 kernel/sched/rt.c 	struct rq *rq;
rq               1399 kernel/sched/rt.c 	rq = cpu_rq(cpu);
rq               1402 kernel/sched/rt.c 	curr = READ_ONCE(rq->curr); /* unlocked access */
rq               1445 kernel/sched/rt.c static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
rq               1451 kernel/sched/rt.c 	if (rq->curr->nr_cpus_allowed == 1 ||
rq               1452 kernel/sched/rt.c 	    !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
rq               1460 kernel/sched/rt.c 	    && cpupri_find(&rq->rd->cpupri, p, NULL))
rq               1468 kernel/sched/rt.c 	requeue_task_rt(rq, p, 1);
rq               1469 kernel/sched/rt.c 	resched_curr(rq);
rq               1472 kernel/sched/rt.c static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
rq               1474 kernel/sched/rt.c 	if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
rq               1481 kernel/sched/rt.c 		rq_unpin_lock(rq, rf);
rq               1482 kernel/sched/rt.c 		pull_rt_task(rq);
rq               1483 kernel/sched/rt.c 		rq_repin_lock(rq, rf);
rq               1486 kernel/sched/rt.c 	return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
rq               1493 kernel/sched/rt.c static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
rq               1495 kernel/sched/rt.c 	if (p->prio < rq->curr->prio) {
rq               1496 kernel/sched/rt.c 		resched_curr(rq);
rq               1513 kernel/sched/rt.c 	if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
rq               1514 kernel/sched/rt.c 		check_preempt_equal_prio(rq, p);
rq               1518 kernel/sched/rt.c static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
rq               1520 kernel/sched/rt.c 	p->se.exec_start = rq_clock_task(rq);
rq               1523 kernel/sched/rt.c 	dequeue_pushable_task(rq, p);
rq               1533 kernel/sched/rt.c 	if (rq->curr->sched_class != &rt_sched_class)
rq               1534 kernel/sched/rt.c 		update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
rq               1536 kernel/sched/rt.c 	rt_queue_push_tasks(rq);
rq               1539 kernel/sched/rt.c static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
rq               1556 kernel/sched/rt.c static struct task_struct *_pick_next_task_rt(struct rq *rq)
rq               1559 kernel/sched/rt.c 	struct rt_rq *rt_rq  = &rq->rt;
rq               1562 kernel/sched/rt.c 		rt_se = pick_next_rt_entity(rq, rt_rq);
rq               1571 kernel/sched/rt.c pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rq               1577 kernel/sched/rt.c 	if (!sched_rt_runnable(rq))
rq               1580 kernel/sched/rt.c 	p = _pick_next_task_rt(rq);
rq               1581 kernel/sched/rt.c 	set_next_task_rt(rq, p, true);
rq               1585 kernel/sched/rt.c static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
rq               1587 kernel/sched/rt.c 	update_curr_rt(rq);
rq               1589 kernel/sched/rt.c 	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
rq               1596 kernel/sched/rt.c 		enqueue_pushable_task(rq, p);
rq               1604 kernel/sched/rt.c static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
rq               1606 kernel/sched/rt.c 	if (!task_running(rq, p) &&
rq               1617 kernel/sched/rt.c static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
rq               1619 kernel/sched/rt.c 	struct plist_head *head = &rq->rt.pushable_tasks;
rq               1622 kernel/sched/rt.c 	if (!has_pushable_tasks(rq))
rq               1626 kernel/sched/rt.c 		if (pick_rt_task(rq, p, cpu))
rq               1711 kernel/sched/rt.c static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
rq               1713 kernel/sched/rt.c 	struct rq *lowest_rq = NULL;
rq               1720 kernel/sched/rt.c 		if ((cpu == -1) || (cpu == rq->cpu))
rq               1736 kernel/sched/rt.c 		if (double_lock_balance(rq, lowest_rq)) {
rq               1743 kernel/sched/rt.c 			if (unlikely(task_rq(task) != rq ||
rq               1745 kernel/sched/rt.c 				     task_running(rq, task) ||
rq               1749 kernel/sched/rt.c 				double_unlock_balance(rq, lowest_rq);
rq               1760 kernel/sched/rt.c 		double_unlock_balance(rq, lowest_rq);
rq               1767 kernel/sched/rt.c static struct task_struct *pick_next_pushable_task(struct rq *rq)
rq               1771 kernel/sched/rt.c 	if (!has_pushable_tasks(rq))
rq               1774 kernel/sched/rt.c 	p = plist_first_entry(&rq->rt.pushable_tasks,
rq               1777 kernel/sched/rt.c 	BUG_ON(rq->cpu != task_cpu(p));
rq               1778 kernel/sched/rt.c 	BUG_ON(task_current(rq, p));
rq               1792 kernel/sched/rt.c static int push_rt_task(struct rq *rq)
rq               1795 kernel/sched/rt.c 	struct rq *lowest_rq;
rq               1798 kernel/sched/rt.c 	if (!rq->rt.overloaded)
rq               1801 kernel/sched/rt.c 	next_task = pick_next_pushable_task(rq);
rq               1806 kernel/sched/rt.c 	if (WARN_ON(next_task == rq->curr))
rq               1814 kernel/sched/rt.c 	if (unlikely(next_task->prio < rq->curr->prio)) {
rq               1815 kernel/sched/rt.c 		resched_curr(rq);
rq               1823 kernel/sched/rt.c 	lowest_rq = find_lock_lowest_rq(next_task, rq);
rq               1834 kernel/sched/rt.c 		task = pick_next_pushable_task(rq);
rq               1857 kernel/sched/rt.c 	deactivate_task(rq, next_task, 0);
rq               1864 kernel/sched/rt.c 	double_unlock_balance(rq, lowest_rq);
rq               1872 kernel/sched/rt.c static void push_rt_tasks(struct rq *rq)
rq               1875 kernel/sched/rt.c 	while (push_rt_task(rq))
rq               1979 kernel/sched/rt.c static void tell_cpu_to_push(struct rq *rq)
rq               1984 kernel/sched/rt.c 	atomic_inc(&rq->rd->rto_loop_next);
rq               1987 kernel/sched/rt.c 	if (!rto_start_trylock(&rq->rd->rto_loop_start))
rq               1990 kernel/sched/rt.c 	raw_spin_lock(&rq->rd->rto_lock);
rq               1998 kernel/sched/rt.c 	if (rq->rd->rto_cpu < 0)
rq               1999 kernel/sched/rt.c 		cpu = rto_next_cpu(rq->rd);
rq               2001 kernel/sched/rt.c 	raw_spin_unlock(&rq->rd->rto_lock);
rq               2003 kernel/sched/rt.c 	rto_start_unlock(&rq->rd->rto_loop_start);
rq               2007 kernel/sched/rt.c 		sched_get_rd(rq->rd);
rq               2008 kernel/sched/rt.c 		irq_work_queue_on(&rq->rd->rto_push_work, cpu);
rq               2017 kernel/sched/rt.c 	struct rq *rq;
rq               2020 kernel/sched/rt.c 	rq = this_rq();
rq               2026 kernel/sched/rt.c 	if (has_pushable_tasks(rq)) {
rq               2027 kernel/sched/rt.c 		raw_spin_lock(&rq->lock);
rq               2028 kernel/sched/rt.c 		push_rt_tasks(rq);
rq               2029 kernel/sched/rt.c 		raw_spin_unlock(&rq->lock);
rq               2049 kernel/sched/rt.c static void pull_rt_task(struct rq *this_rq)
rq               2054 kernel/sched/rt.c 	struct rq *src_rq;
rq               2151 kernel/sched/rt.c static void task_woken_rt(struct rq *rq, struct task_struct *p)
rq               2153 kernel/sched/rt.c 	if (!task_running(rq, p) &&
rq               2154 kernel/sched/rt.c 	    !test_tsk_need_resched(rq->curr) &&
rq               2156 kernel/sched/rt.c 	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
rq               2157 kernel/sched/rt.c 	    (rq->curr->nr_cpus_allowed < 2 ||
rq               2158 kernel/sched/rt.c 	     rq->curr->prio <= p->prio))
rq               2159 kernel/sched/rt.c 		push_rt_tasks(rq);
rq               2163 kernel/sched/rt.c static void rq_online_rt(struct rq *rq)
rq               2165 kernel/sched/rt.c 	if (rq->rt.overloaded)
rq               2166 kernel/sched/rt.c 		rt_set_overload(rq);
rq               2168 kernel/sched/rt.c 	__enable_runtime(rq);
rq               2170 kernel/sched/rt.c 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
rq               2174 kernel/sched/rt.c static void rq_offline_rt(struct rq *rq)
rq               2176 kernel/sched/rt.c 	if (rq->rt.overloaded)
rq               2177 kernel/sched/rt.c 		rt_clear_overload(rq);
rq               2179 kernel/sched/rt.c 	__disable_runtime(rq);
rq               2181 kernel/sched/rt.c 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
rq               2188 kernel/sched/rt.c static void switched_from_rt(struct rq *rq, struct task_struct *p)
rq               2197 kernel/sched/rt.c 	if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
rq               2200 kernel/sched/rt.c 	rt_queue_pull_task(rq);
rq               2219 kernel/sched/rt.c static void switched_to_rt(struct rq *rq, struct task_struct *p)
rq               2228 kernel/sched/rt.c 	if (task_on_rq_queued(p) && rq->curr != p) {
rq               2230 kernel/sched/rt.c 		if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
rq               2231 kernel/sched/rt.c 			rt_queue_push_tasks(rq);
rq               2233 kernel/sched/rt.c 		if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
rq               2234 kernel/sched/rt.c 			resched_curr(rq);
rq               2243 kernel/sched/rt.c prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
rq               2248 kernel/sched/rt.c 	if (rq->curr == p) {
rq               2255 kernel/sched/rt.c 			rt_queue_pull_task(rq);
rq               2261 kernel/sched/rt.c 		if (p->prio > rq->rt.highest_prio.curr)
rq               2262 kernel/sched/rt.c 			resched_curr(rq);
rq               2266 kernel/sched/rt.c 			resched_curr(rq);
rq               2274 kernel/sched/rt.c 		if (p->prio < rq->curr->prio)
rq               2275 kernel/sched/rt.c 			resched_curr(rq);
rq               2280 kernel/sched/rt.c static void watchdog(struct rq *rq, struct task_struct *p)
rq               2304 kernel/sched/rt.c static inline void watchdog(struct rq *rq, struct task_struct *p) { }
rq               2315 kernel/sched/rt.c static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
rq               2319 kernel/sched/rt.c 	update_curr_rt(rq);
rq               2320 kernel/sched/rt.c 	update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
rq               2322 kernel/sched/rt.c 	watchdog(rq, p);
rq               2342 kernel/sched/rt.c 			requeue_task_rt(rq, p, 0);
rq               2343 kernel/sched/rt.c 			resched_curr(rq);
rq               2349 kernel/sched/rt.c static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
rq                 84 kernel/sched/sched.h struct rq;
rq                 96 kernel/sched/sched.h extern void calc_global_load_tick(struct rq *this_rq);
rq                 97 kernel/sched/sched.h extern long calc_load_fold_active(struct rq *this_rq, long adjust);
rq                558 kernel/sched/sched.h 	struct rq		*rq;	/* CPU runqueue to which this cfs_rq is attached */
rq                627 kernel/sched/sched.h 	struct rq		*rq;
rq                795 kernel/sched/sched.h extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
rq               1013 kernel/sched/sched.h static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
rq               1015 kernel/sched/sched.h 	return cfs_rq->rq;
rq               1020 kernel/sched/sched.h static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
rq               1022 kernel/sched/sched.h 	return container_of(cfs_rq, struct rq, cfs);
rq               1026 kernel/sched/sched.h static inline int cpu_of(struct rq *rq)
rq               1029 kernel/sched/sched.h 	return rq->cpu;
rq               1037 kernel/sched/sched.h extern void __update_idle_core(struct rq *rq);
rq               1039 kernel/sched/sched.h static inline void update_idle_core(struct rq *rq)
rq               1042 kernel/sched/sched.h 		__update_idle_core(rq);
rq               1046 kernel/sched/sched.h static inline void update_idle_core(struct rq *rq) { }
rq               1049 kernel/sched/sched.h DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
rq               1057 kernel/sched/sched.h extern void update_rq_clock(struct rq *rq);
rq               1059 kernel/sched/sched.h static inline u64 __rq_clock_broken(struct rq *rq)
rq               1061 kernel/sched/sched.h 	return READ_ONCE(rq->clock);
rq               1091 kernel/sched/sched.h static inline void assert_clock_updated(struct rq *rq)
rq               1097 kernel/sched/sched.h 	SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
rq               1100 kernel/sched/sched.h static inline u64 rq_clock(struct rq *rq)
rq               1102 kernel/sched/sched.h 	lockdep_assert_held(&rq->lock);
rq               1103 kernel/sched/sched.h 	assert_clock_updated(rq);
rq               1105 kernel/sched/sched.h 	return rq->clock;
rq               1108 kernel/sched/sched.h static inline u64 rq_clock_task(struct rq *rq)
rq               1110 kernel/sched/sched.h 	lockdep_assert_held(&rq->lock);
rq               1111 kernel/sched/sched.h 	assert_clock_updated(rq);
rq               1113 kernel/sched/sched.h 	return rq->clock_task;
rq               1116 kernel/sched/sched.h static inline void rq_clock_skip_update(struct rq *rq)
rq               1118 kernel/sched/sched.h 	lockdep_assert_held(&rq->lock);
rq               1119 kernel/sched/sched.h 	rq->clock_update_flags |= RQCF_REQ_SKIP;
rq               1126 kernel/sched/sched.h static inline void rq_clock_cancel_skipupdate(struct rq *rq)
rq               1128 kernel/sched/sched.h 	lockdep_assert_held(&rq->lock);
rq               1129 kernel/sched/sched.h 	rq->clock_update_flags &= ~RQCF_REQ_SKIP;
rq               1145 kernel/sched/sched.h static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
rq               1147 kernel/sched/sched.h 	rf->cookie = lockdep_pin_lock(&rq->lock);
rq               1150 kernel/sched/sched.h 	rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
rq               1155 kernel/sched/sched.h static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
rq               1158 kernel/sched/sched.h 	if (rq->clock_update_flags > RQCF_ACT_SKIP)
rq               1162 kernel/sched/sched.h 	lockdep_unpin_lock(&rq->lock, rf->cookie);
rq               1165 kernel/sched/sched.h static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
rq               1167 kernel/sched/sched.h 	lockdep_repin_lock(&rq->lock, rf->cookie);
rq               1173 kernel/sched/sched.h 	rq->clock_update_flags |= rf->clock_update_flags;
rq               1177 kernel/sched/sched.h struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
rq               1178 kernel/sched/sched.h 	__acquires(rq->lock);
rq               1180 kernel/sched/sched.h struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
rq               1182 kernel/sched/sched.h 	__acquires(rq->lock);
rq               1184 kernel/sched/sched.h static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
rq               1185 kernel/sched/sched.h 	__releases(rq->lock)
rq               1187 kernel/sched/sched.h 	rq_unpin_lock(rq, rf);
rq               1188 kernel/sched/sched.h 	raw_spin_unlock(&rq->lock);
rq               1192 kernel/sched/sched.h task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
rq               1193 kernel/sched/sched.h 	__releases(rq->lock)
rq               1196 kernel/sched/sched.h 	rq_unpin_lock(rq, rf);
rq               1197 kernel/sched/sched.h 	raw_spin_unlock(&rq->lock);
rq               1202 kernel/sched/sched.h rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
rq               1203 kernel/sched/sched.h 	__acquires(rq->lock)
rq               1205 kernel/sched/sched.h 	raw_spin_lock_irqsave(&rq->lock, rf->flags);
rq               1206 kernel/sched/sched.h 	rq_pin_lock(rq, rf);
rq               1210 kernel/sched/sched.h rq_lock_irq(struct rq *rq, struct rq_flags *rf)
rq               1211 kernel/sched/sched.h 	__acquires(rq->lock)
rq               1213 kernel/sched/sched.h 	raw_spin_lock_irq(&rq->lock);
rq               1214 kernel/sched/sched.h 	rq_pin_lock(rq, rf);
rq               1218 kernel/sched/sched.h rq_lock(struct rq *rq, struct rq_flags *rf)
rq               1219 kernel/sched/sched.h 	__acquires(rq->lock)
rq               1221 kernel/sched/sched.h 	raw_spin_lock(&rq->lock);
rq               1222 kernel/sched/sched.h 	rq_pin_lock(rq, rf);
rq               1226 kernel/sched/sched.h rq_relock(struct rq *rq, struct rq_flags *rf)
rq               1227 kernel/sched/sched.h 	__acquires(rq->lock)
rq               1229 kernel/sched/sched.h 	raw_spin_lock(&rq->lock);
rq               1230 kernel/sched/sched.h 	rq_repin_lock(rq, rf);
rq               1234 kernel/sched/sched.h rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
rq               1235 kernel/sched/sched.h 	__releases(rq->lock)
rq               1237 kernel/sched/sched.h 	rq_unpin_lock(rq, rf);
rq               1238 kernel/sched/sched.h 	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
rq               1242 kernel/sched/sched.h rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
rq               1243 kernel/sched/sched.h 	__releases(rq->lock)
rq               1245 kernel/sched/sched.h 	rq_unpin_lock(rq, rf);
rq               1246 kernel/sched/sched.h 	raw_spin_unlock_irq(&rq->lock);
rq               1250 kernel/sched/sched.h rq_unlock(struct rq *rq, struct rq_flags *rf)
rq               1251 kernel/sched/sched.h 	__releases(rq->lock)
rq               1253 kernel/sched/sched.h 	rq_unpin_lock(rq, rf);
rq               1254 kernel/sched/sched.h 	raw_spin_unlock(&rq->lock);
rq               1257 kernel/sched/sched.h static inline struct rq *
rq               1259 kernel/sched/sched.h 	__acquires(rq->lock)
rq               1261 kernel/sched/sched.h 	struct rq *rq;
rq               1264 kernel/sched/sched.h 	rq = this_rq();
rq               1265 kernel/sched/sched.h 	rq_lock(rq, rf);
rq               1266 kernel/sched/sched.h 	return rq;
rq               1315 kernel/sched/sched.h queue_balance_callback(struct rq *rq,
rq               1317 kernel/sched/sched.h 		       void (*func)(struct rq *rq))
rq               1319 kernel/sched/sched.h 	lockdep_assert_held(&rq->lock);
rq               1325 kernel/sched/sched.h 	head->next = rq->balance_callback;
rq               1326 kernel/sched/sched.h 	rq->balance_callback = head;
rq               1468 kernel/sched/sched.h extern int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
rq               1474 kernel/sched/sched.h static inline int newidle_balance(struct rq *this_rq, struct rq_flags *rf) { return 0; }
rq               1623 kernel/sched/sched.h static inline int task_current(struct rq *rq, struct task_struct *p)
rq               1625 kernel/sched/sched.h 	return rq->curr == p;
rq               1628 kernel/sched/sched.h static inline int task_running(struct rq *rq, struct task_struct *p)
rq               1633 kernel/sched/sched.h 	return task_current(rq, p);
rq               1715 kernel/sched/sched.h 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
rq               1716 kernel/sched/sched.h 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
rq               1717 kernel/sched/sched.h 	void (*yield_task)   (struct rq *rq);
rq               1718 kernel/sched/sched.h 	bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);
rq               1720 kernel/sched/sched.h 	void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
rq               1733 kernel/sched/sched.h 	struct task_struct * (*pick_next_task)(struct rq *rq,
rq               1736 kernel/sched/sched.h 	void (*put_prev_task)(struct rq *rq, struct task_struct *p);
rq               1737 kernel/sched/sched.h 	void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
rq               1740 kernel/sched/sched.h 	int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
rq               1744 kernel/sched/sched.h 	void (*task_woken)(struct rq *this_rq, struct task_struct *task);
rq               1749 kernel/sched/sched.h 	void (*rq_online)(struct rq *rq);
rq               1750 kernel/sched/sched.h 	void (*rq_offline)(struct rq *rq);
rq               1753 kernel/sched/sched.h 	void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
rq               1762 kernel/sched/sched.h 	void (*switched_from)(struct rq *this_rq, struct task_struct *task);
rq               1763 kernel/sched/sched.h 	void (*switched_to)  (struct rq *this_rq, struct task_struct *task);
rq               1764 kernel/sched/sched.h 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
rq               1767 kernel/sched/sched.h 	unsigned int (*get_rr_interval)(struct rq *rq,
rq               1770 kernel/sched/sched.h 	void (*update_curr)(struct rq *rq);
rq               1780 kernel/sched/sched.h static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
rq               1782 kernel/sched/sched.h 	WARN_ON_ONCE(rq->curr != prev);
rq               1783 kernel/sched/sched.h 	prev->sched_class->put_prev_task(rq, prev);
rq               1786 kernel/sched/sched.h static inline void set_next_task(struct rq *rq, struct task_struct *next)
rq               1788 kernel/sched/sched.h 	WARN_ON_ONCE(rq->curr != next);
rq               1789 kernel/sched/sched.h 	next->sched_class->set_next_task(rq, next, false);
rq               1810 kernel/sched/sched.h static inline bool sched_stop_runnable(struct rq *rq)
rq               1812 kernel/sched/sched.h 	return rq->stop && task_on_rq_queued(rq->stop);
rq               1815 kernel/sched/sched.h static inline bool sched_dl_runnable(struct rq *rq)
rq               1817 kernel/sched/sched.h 	return rq->dl.dl_nr_running > 0;
rq               1820 kernel/sched/sched.h static inline bool sched_rt_runnable(struct rq *rq)
rq               1822 kernel/sched/sched.h 	return rq->rt.rt_queued > 0;
rq               1825 kernel/sched/sched.h static inline bool sched_fair_runnable(struct rq *rq)
rq               1827 kernel/sched/sched.h 	return rq->cfs.nr_running > 0;
rq               1834 kernel/sched/sched.h extern void trigger_load_balance(struct rq *rq);
rq               1841 kernel/sched/sched.h static inline void idle_set_state(struct rq *rq,
rq               1844 kernel/sched/sched.h 	rq->idle_state = idle_state;
rq               1847 kernel/sched/sched.h static inline struct cpuidle_state *idle_get_state(struct rq *rq)
rq               1851 kernel/sched/sched.h 	return rq->idle_state;
rq               1854 kernel/sched/sched.h static inline void idle_set_state(struct rq *rq,
rq               1859 kernel/sched/sched.h static inline struct cpuidle_state *idle_get_state(struct rq *rq)
rq               1877 kernel/sched/sched.h extern void resched_curr(struct rq *rq);
rq               1898 kernel/sched/sched.h extern bool sched_can_stop_tick(struct rq *rq);
rq               1906 kernel/sched/sched.h static inline void sched_update_tick_dependency(struct rq *rq)
rq               1913 kernel/sched/sched.h 	cpu = cpu_of(rq);
rq               1918 kernel/sched/sched.h 	if (sched_can_stop_tick(rq))
rq               1925 kernel/sched/sched.h static inline void sched_update_tick_dependency(struct rq *rq) { }
rq               1928 kernel/sched/sched.h static inline void add_nr_running(struct rq *rq, unsigned count)
rq               1930 kernel/sched/sched.h 	unsigned prev_nr = rq->nr_running;
rq               1932 kernel/sched/sched.h 	rq->nr_running = prev_nr + count;
rq               1935 kernel/sched/sched.h 	if (prev_nr < 2 && rq->nr_running >= 2) {
rq               1936 kernel/sched/sched.h 		if (!READ_ONCE(rq->rd->overload))
rq               1937 kernel/sched/sched.h 			WRITE_ONCE(rq->rd->overload, 1);
rq               1941 kernel/sched/sched.h 	sched_update_tick_dependency(rq);
rq               1944 kernel/sched/sched.h static inline void sub_nr_running(struct rq *rq, unsigned count)
rq               1946 kernel/sched/sched.h 	rq->nr_running -= count;
rq               1948 kernel/sched/sched.h 	sched_update_tick_dependency(rq);
rq               1951 kernel/sched/sched.h extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
rq               1952 kernel/sched/sched.h extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
rq               1954 kernel/sched/sched.h extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
rq               1966 kernel/sched/sched.h static inline int hrtick_enabled(struct rq *rq)
rq               1970 kernel/sched/sched.h 	if (!cpu_active(cpu_of(rq)))
rq               1972 kernel/sched/sched.h 	return hrtimer_is_hres_active(&rq->hrtick_timer);
rq               1975 kernel/sched/sched.h void hrtick_start(struct rq *rq, u64 delay);
rq               1979 kernel/sched/sched.h static inline int hrtick_enabled(struct rq *rq)
rq               1997 kernel/sched/sched.h static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
rq               2007 kernel/sched/sched.h static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
rq               2026 kernel/sched/sched.h static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
rq               2052 kernel/sched/sched.h static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
rq               2063 kernel/sched/sched.h static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
rq               2103 kernel/sched/sched.h static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
rq               2128 kernel/sched/sched.h static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
rq               2139 kernel/sched/sched.h extern void set_rq_online (struct rq *rq);
rq               2140 kernel/sched/sched.h extern void set_rq_offline(struct rq *rq);
rq               2151 kernel/sched/sched.h static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
rq               2167 kernel/sched/sched.h static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
rq               2217 kernel/sched/sched.h extern void nohz_balance_exit_idle(struct rq *rq);
rq               2219 kernel/sched/sched.h static inline void nohz_balance_exit_idle(struct rq *rq) { }
rq               2233 kernel/sched/sched.h 		struct rq *rq = cpu_rq(i);
rq               2235 kernel/sched/sched.h 		rq->dl.extra_bw += bw;
rq               2304 kernel/sched/sched.h static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
rq               2309 kernel/sched/sched.h 						  cpu_of(rq)));
rq               2311 kernel/sched/sched.h 		data->func(data, rq_clock(rq), flags);
rq               2314 kernel/sched/sched.h static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
rq               2321 kernel/sched/sched.h unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
rq               2324 kernel/sched/sched.h 	unsigned int min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
rq               2325 kernel/sched/sched.h 	unsigned int max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
rq               2343 kernel/sched/sched.h static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
rq               2345 kernel/sched/sched.h 	return uclamp_util_with(rq, util, NULL);
rq               2348 kernel/sched/sched.h static inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
rq               2353 kernel/sched/sched.h static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
rq               2395 kernel/sched/sched.h static inline unsigned long cpu_bw_dl(struct rq *rq)
rq               2397 kernel/sched/sched.h 	return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
rq               2400 kernel/sched/sched.h static inline unsigned long cpu_util_dl(struct rq *rq)
rq               2402 kernel/sched/sched.h 	return READ_ONCE(rq->avg_dl.util_avg);
rq               2405 kernel/sched/sched.h static inline unsigned long cpu_util_cfs(struct rq *rq)
rq               2407 kernel/sched/sched.h 	unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
rq               2411 kernel/sched/sched.h 			     READ_ONCE(rq->cfs.avg.util_est.enqueued));
rq               2417 kernel/sched/sched.h static inline unsigned long cpu_util_rt(struct rq *rq)
rq               2419 kernel/sched/sched.h 	return READ_ONCE(rq->avg_rt.util_avg);
rq               2431 kernel/sched/sched.h static inline unsigned long cpu_util_irq(struct rq *rq)
rq               2433 kernel/sched/sched.h 	return rq->avg_irq.util_avg;
rq               2446 kernel/sched/sched.h static inline unsigned long cpu_util_irq(struct rq *rq)
rq               2483 kernel/sched/sched.h static inline void membarrier_switch_mm(struct rq *rq,
rq               2493 kernel/sched/sched.h 	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
rq               2496 kernel/sched/sched.h 	WRITE_ONCE(rq->membarrier_state, membarrier_state);
rq               2499 kernel/sched/sched.h static inline void membarrier_switch_mm(struct rq *rq,
rq                 23 kernel/sched/stats.c 		struct rq *rq;
rq                 29 kernel/sched/stats.c 		rq = cpu_rq(cpu);
rq                 34 kernel/sched/stats.c 		    cpu, rq->yld_count,
rq                 35 kernel/sched/stats.c 		    rq->sched_count, rq->sched_goidle,
rq                 36 kernel/sched/stats.c 		    rq->ttwu_count, rq->ttwu_local,
rq                 37 kernel/sched/stats.c 		    rq->rq_cpu_time,
rq                 38 kernel/sched/stats.c 		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
rq                  9 kernel/sched/stats.h rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
rq                 11 kernel/sched/stats.h 	if (rq) {
rq                 12 kernel/sched/stats.h 		rq->rq_sched_info.run_delay += delta;
rq                 13 kernel/sched/stats.h 		rq->rq_sched_info.pcount++;
rq                 21 kernel/sched/stats.h rq_sched_info_depart(struct rq *rq, unsigned long long delta)
rq                 23 kernel/sched/stats.h 	if (rq)
rq                 24 kernel/sched/stats.h 		rq->rq_cpu_time += delta;
rq                 28 kernel/sched/stats.h rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
rq                 30 kernel/sched/stats.h 	if (rq)
rq                 31 kernel/sched/stats.h 		rq->rq_sched_info.run_delay += delta;
rq                 44 kernel/sched/stats.h static inline void rq_sched_info_arrive  (struct rq *rq, unsigned long long delta) { }
rq                 45 kernel/sched/stats.h static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { }
rq                 46 kernel/sched/stats.h static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delta) { }
rq                114 kernel/sched/stats.h 		struct rq *rq;
rq                122 kernel/sched/stats.h 		rq = __task_rq_lock(p, &rf);
rq                125 kernel/sched/stats.h 		__task_rq_unlock(rq, &rf);
rq                129 kernel/sched/stats.h static inline void psi_task_tick(struct rq *rq)
rq                134 kernel/sched/stats.h 	if (unlikely(rq->curr->flags & PF_MEMSTALL))
rq                135 kernel/sched/stats.h 		psi_memstall_tick(rq->curr, cpu_of(rq));
rq                141 kernel/sched/stats.h static inline void psi_task_tick(struct rq *rq) {}
rq                156 kernel/sched/stats.h static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
rq                158 kernel/sched/stats.h 	unsigned long long now = rq_clock(rq), delta = 0;
rq                167 kernel/sched/stats.h 	rq_sched_info_dequeued(rq, delta);
rq                175 kernel/sched/stats.h static void sched_info_arrive(struct rq *rq, struct task_struct *t)
rq                177 kernel/sched/stats.h 	unsigned long long now = rq_clock(rq), delta = 0;
rq                186 kernel/sched/stats.h 	rq_sched_info_arrive(rq, delta);
rq                194 kernel/sched/stats.h static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
rq                198 kernel/sched/stats.h 			t->sched_info.last_queued = rq_clock(rq);
rq                210 kernel/sched/stats.h static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
rq                212 kernel/sched/stats.h 	unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
rq                214 kernel/sched/stats.h 	rq_sched_info_depart(rq, delta);
rq                217 kernel/sched/stats.h 		sched_info_queued(rq, t);
rq                226 kernel/sched/stats.h __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
rq                233 kernel/sched/stats.h 	if (prev != rq->idle)
rq                234 kernel/sched/stats.h 		sched_info_depart(rq, prev);
rq                236 kernel/sched/stats.h 	if (next != rq->idle)
rq                237 kernel/sched/stats.h 		sched_info_arrive(rq, next);
rq                241 kernel/sched/stats.h sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
rq                244 kernel/sched/stats.h 		__sched_info_switch(rq, prev, next);
rq                248 kernel/sched/stats.h # define sched_info_queued(rq, t)	do { } while (0)
rq                250 kernel/sched/stats.h # define sched_info_dequeued(rq, t)	do { } while (0)
rq                251 kernel/sched/stats.h # define sched_info_depart(rq, t)	do { } while (0)
rq                252 kernel/sched/stats.h # define sched_info_arrive(rq, next)	do { } while (0)
rq                253 kernel/sched/stats.h # define sched_info_switch(rq, t, next)	do { } while (0)
rq                 20 kernel/sched/stop_task.c balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rq                 22 kernel/sched/stop_task.c 	return sched_stop_runnable(rq);
rq                 27 kernel/sched/stop_task.c check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
rq                 32 kernel/sched/stop_task.c static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
rq                 34 kernel/sched/stop_task.c 	stop->se.exec_start = rq_clock_task(rq);
rq                 38 kernel/sched/stop_task.c pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rq                 42 kernel/sched/stop_task.c 	if (!sched_stop_runnable(rq))
rq                 45 kernel/sched/stop_task.c 	set_next_task_stop(rq, rq->stop, true);
rq                 46 kernel/sched/stop_task.c 	return rq->stop;
rq                 50 kernel/sched/stop_task.c enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
rq                 52 kernel/sched/stop_task.c 	add_nr_running(rq, 1);
rq                 56 kernel/sched/stop_task.c dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
rq                 58 kernel/sched/stop_task.c 	sub_nr_running(rq, 1);
rq                 61 kernel/sched/stop_task.c static void yield_task_stop(struct rq *rq)
rq                 66 kernel/sched/stop_task.c static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
rq                 68 kernel/sched/stop_task.c 	struct task_struct *curr = rq->curr;
rq                 71 kernel/sched/stop_task.c 	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
rq                 81 kernel/sched/stop_task.c 	curr->se.exec_start = rq_clock_task(rq);
rq                 93 kernel/sched/stop_task.c static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
rq                 97 kernel/sched/stop_task.c static void switched_to_stop(struct rq *rq, struct task_struct *p)
rq                103 kernel/sched/stop_task.c prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
rq                109 kernel/sched/stop_task.c get_rr_interval_stop(struct rq *rq, struct task_struct *task)
rq                114 kernel/sched/stop_task.c static void update_curr_stop(struct rq *rq)
rq                440 kernel/sched/topology.c void rq_attach_root(struct rq *rq, struct root_domain *rd)
rq                445 kernel/sched/topology.c 	raw_spin_lock_irqsave(&rq->lock, flags);
rq                447 kernel/sched/topology.c 	if (rq->rd) {
rq                448 kernel/sched/topology.c 		old_rd = rq->rd;
rq                450 kernel/sched/topology.c 		if (cpumask_test_cpu(rq->cpu, old_rd->online))
rq                451 kernel/sched/topology.c 			set_rq_offline(rq);
rq                453 kernel/sched/topology.c 		cpumask_clear_cpu(rq->cpu, old_rd->span);
rq                465 kernel/sched/topology.c 	rq->rd = rd;
rq                467 kernel/sched/topology.c 	cpumask_set_cpu(rq->cpu, rd->span);
rq                468 kernel/sched/topology.c 	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
rq                469 kernel/sched/topology.c 		set_rq_online(rq);
rq                471 kernel/sched/topology.c 	raw_spin_unlock_irqrestore(&rq->lock, flags);
rq                663 kernel/sched/topology.c 	struct rq *rq = cpu_rq(cpu);
rq                698 kernel/sched/topology.c 	rq_attach_root(rq, rd);
rq                699 kernel/sched/topology.c 	tmp = rq->sd;
rq                700 kernel/sched/topology.c 	rcu_assign_pointer(rq->sd, sd);
rq               1990 kernel/sched/topology.c 	struct rq *rq = NULL;
rq               2059 kernel/sched/topology.c 		rq = cpu_rq(i);
rq               2063 kernel/sched/topology.c 		if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
rq               2064 kernel/sched/topology.c 			WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
rq               2073 kernel/sched/topology.c 	if (rq && sched_debug_enabled) {
rq               2075 kernel/sched/topology.c 			cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
rq                780 kernel/trace/blktrace.c blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
rq                782 kernel/trace/blktrace.c 	if (!rq->bio)
rq                785 kernel/trace/blktrace.c 	return blk_trace_bio_get_cgid(q, rq->bio);
rq                804 kernel/trace/blktrace.c static void blk_add_trace_rq(struct request *rq, int error,
rq                811 kernel/trace/blktrace.c 	bt = rcu_dereference(rq->q->blk_trace);
rq                817 kernel/trace/blktrace.c 	if (blk_rq_is_passthrough(rq))
rq                822 kernel/trace/blktrace.c 	__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
rq                823 kernel/trace/blktrace.c 			rq->cmd_flags, what, error, 0, NULL, cgid);
rq                828 kernel/trace/blktrace.c 				    struct request_queue *q, struct request *rq)
rq                830 kernel/trace/blktrace.c 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
rq                831 kernel/trace/blktrace.c 			 blk_trace_request_get_cgid(q, rq));
rq                835 kernel/trace/blktrace.c 				   struct request_queue *q, struct request *rq)
rq                837 kernel/trace/blktrace.c 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
rq                838 kernel/trace/blktrace.c 			 blk_trace_request_get_cgid(q, rq));
rq                843 kernel/trace/blktrace.c 				     struct request *rq)
rq                845 kernel/trace/blktrace.c 	blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
rq                846 kernel/trace/blktrace.c 			 blk_trace_request_get_cgid(q, rq));
rq                849 kernel/trace/blktrace.c static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
rq                852 kernel/trace/blktrace.c 	blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
rq                853 kernel/trace/blktrace.c 			 blk_trace_request_get_cgid(rq->q, rq));
rq                900 kernel/trace/blktrace.c 					struct request *rq,
rq                908 kernel/trace/blktrace.c 					 struct request *rq,
rq               1060 kernel/trace/blktrace.c 				   struct request *rq, dev_t dev,
rq               1074 kernel/trace/blktrace.c 	r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
rq               1077 kernel/trace/blktrace.c 	__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
rq               1078 kernel/trace/blktrace.c 			rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
rq               1079 kernel/trace/blktrace.c 			sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
rq               1095 kernel/trace/blktrace.c 			 struct request *rq,
rq               1107 kernel/trace/blktrace.c 	__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
rq               1109 kernel/trace/blktrace.c 				blk_trace_request_get_cgid(q, rq));
rq                129 net/9p/trans_fd.c 	struct work_struct rq;
rq                281 net/9p/trans_fd.c 	m = container_of(work, struct p9_conn, rq);
rq                392 net/9p/trans_fd.c 			schedule_work(&m->rq);
rq                588 net/9p/trans_fd.c 	INIT_WORK(&m->rq, p9_read_work);
rq                630 net/9p/trans_fd.c 			schedule_work(&m->rq);
rq                863 net/9p/trans_fd.c 	cancel_work_sync(&m->rq);
rq                227 net/atm/common.c 	struct sk_buff_head queue, *rq;
rq                232 net/atm/common.c 	rq = &sk_atm(vcc)->sk_receive_queue;
rq                234 net/atm/common.c 	spin_lock_irqsave(&rq->lock, flags);
rq                235 net/atm/common.c 	skb_queue_splice_init(rq, &queue);
rq                236 net/atm/common.c 	spin_unlock_irqrestore(&rq->lock, flags);
rq                109 net/bridge/br_ioctl.c static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                116 net/bridge/br_ioctl.c 	if (copy_from_user(args, rq->ifr_data, sizeof(args)))
rq                382 net/bridge/br_ioctl.c int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rq                388 net/bridge/br_ioctl.c 		return old_dev_ioctl(dev, rq, cmd);
rq                392 net/bridge/br_ioctl.c 		return add_del_if(br, rq->ifr_ifindex, cmd == SIOCBRADDIF);
rq                653 net/bridge/br_private.h int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
rq                265 net/dccp/dccp.h int dccp_reqsk_init(struct request_sock *rq, struct dccp_sock const *dp,
rq               1937 net/key/af_key.c parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
rq               1946 net/key/af_key.c 	if (rq->sadb_x_ipsecrequest_mode == 0)
rq               1948 net/key/af_key.c 	if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto))
rq               1951 net/key/af_key.c 	t->id.proto = rq->sadb_x_ipsecrequest_proto;
rq               1952 net/key/af_key.c 	if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
rq               1955 net/key/af_key.c 	if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE)
rq               1957 net/key/af_key.c 	else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
rq               1958 net/key/af_key.c 		t->reqid = rq->sadb_x_ipsecrequest_reqid;
rq               1970 net/key/af_key.c 			(struct sockaddr *)(rq + 1),
rq               1971 net/key/af_key.c 			rq->sadb_x_ipsecrequest_len - sizeof(*rq),
rq               1989 net/key/af_key.c 	struct sadb_x_ipsecrequest *rq = (void*)(pol+1);
rq               1994 net/key/af_key.c 	while (len >= sizeof(*rq)) {
rq               1995 net/key/af_key.c 		if (len < rq->sadb_x_ipsecrequest_len ||
rq               1996 net/key/af_key.c 		    rq->sadb_x_ipsecrequest_len < sizeof(*rq))
rq               1999 net/key/af_key.c 		if ((err = parse_ipsecrequest(xp, rq)) < 0)
rq               2001 net/key/af_key.c 		len -= rq->sadb_x_ipsecrequest_len;
rq               2002 net/key/af_key.c 		rq = (void*)((u8*)rq + rq->sadb_x_ipsecrequest_len);
rq               2148 net/key/af_key.c 		struct sadb_x_ipsecrequest *rq;
rq               2159 net/key/af_key.c 		rq = skb_put(skb, req_size);
rq               2161 net/key/af_key.c 		memset(rq, 0, sizeof(*rq));
rq               2162 net/key/af_key.c 		rq->sadb_x_ipsecrequest_len = req_size;
rq               2163 net/key/af_key.c 		rq->sadb_x_ipsecrequest_proto = t->id.proto;
rq               2166 net/key/af_key.c 		rq->sadb_x_ipsecrequest_mode = mode;
rq               2167 net/key/af_key.c 		rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_REQUIRE;
rq               2169 net/key/af_key.c 			rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_UNIQUE;
rq               2171 net/key/af_key.c 			rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE;
rq               2172 net/key/af_key.c 		rq->sadb_x_ipsecrequest_reqid = t->reqid;
rq               2175 net/key/af_key.c 			u8 *sa = (void *)(rq + 1);
rq               2545 net/key/af_key.c 	struct sadb_x_ipsecrequest *rq;
rq               2599 net/key/af_key.c 	rq = (struct sadb_x_ipsecrequest *)(pol + 1);
rq               2606 net/key/af_key.c 		ret = ipsecrequests_to_migrate(rq, len, &m[i]);
rq               2611 net/key/af_key.c 			rq = (struct sadb_x_ipsecrequest *)((u8 *)rq + ret);
rq               3498 net/key/af_key.c 	struct sadb_x_ipsecrequest *rq;
rq               3506 net/key/af_key.c 	rq = skb_put_zero(skb, size_req);
rq               3507 net/key/af_key.c 	rq->sadb_x_ipsecrequest_len = size_req;
rq               3508 net/key/af_key.c 	rq->sadb_x_ipsecrequest_proto = proto;
rq               3509 net/key/af_key.c 	rq->sadb_x_ipsecrequest_mode = mode;
rq               3510 net/key/af_key.c 	rq->sadb_x_ipsecrequest_level = level;
rq               3511 net/key/af_key.c 	rq->sadb_x_ipsecrequest_reqid = reqid;
rq               3513 net/key/af_key.c 	sa = (u8 *) (rq + 1);
rq                800 net/sunrpc/cache.c 	struct cache_request *rq;
rq                824 net/sunrpc/cache.c 	rq = container_of(rp->q.list.next, struct cache_request, q.list);
rq                825 net/sunrpc/cache.c 	WARN_ON_ONCE(rq->q.reader);
rq                827 net/sunrpc/cache.c 		rq->readers++;
rq                830 net/sunrpc/cache.c 	if (rq->len == 0) {
rq                831 net/sunrpc/cache.c 		err = cache_request(cd, rq);
rq                834 net/sunrpc/cache.c 		rq->len = err;
rq                837 net/sunrpc/cache.c 	if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
rq                840 net/sunrpc/cache.c 		list_move(&rp->q.list, &rq->q.list);
rq                843 net/sunrpc/cache.c 		if (rp->offset + count > rq->len)
rq                844 net/sunrpc/cache.c 			count = rq->len - rp->offset;
rq                846 net/sunrpc/cache.c 		if (copy_to_user(buf, rq->buf + rp->offset, count))
rq                849 net/sunrpc/cache.c 		if (rp->offset >= rq->len) {
rq                852 net/sunrpc/cache.c 			list_move(&rp->q.list, &rq->q.list);
rq                861 net/sunrpc/cache.c 		rq->readers--;
rq                862 net/sunrpc/cache.c 		if (rq->readers == 0 &&
rq                863 net/sunrpc/cache.c 		    !test_bit(CACHE_PENDING, &rq->item->flags)) {
rq                864 net/sunrpc/cache.c 			list_del(&rq->q.list);
rq                866 net/sunrpc/cache.c 			cache_put(rq->item, cd);
rq                867 net/sunrpc/cache.c 			kfree(rq->buf);
rq                868 net/sunrpc/cache.c 			kfree(rq);
rq                109 net/xdp/xsk_queue.c void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
rq                111 net/xdp/xsk_queue.c 	kvfree(rq);
rq                 26 samples/bpf/tracex3_kern.c 	long rq = PT_REGS_PARM1(ctx);
rq                 29 samples/bpf/tracex3_kern.c 	bpf_map_update_elem(&my_map, &rq, &val, BPF_ANY);
rq                 54 samples/bpf/tracex3_kern.c 	long rq = PT_REGS_PARM1(ctx);
rq                 58 samples/bpf/tracex3_kern.c 	value = bpf_map_lookup_elem(&my_map, &rq);
rq                 65 samples/bpf/tracex3_kern.c 	bpf_map_delete_elem(&my_map, &rq);