rq_list 92 block/blk-mq-sched.c LIST_HEAD(rq_list); rq_list 114 block/blk-mq-sched.c list_add(&rq->queuelist, &rq_list); rq_list 115 block/blk-mq-sched.c } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); rq_list 137 block/blk-mq-sched.c LIST_HEAD(rq_list); rq_list 160 block/blk-mq-sched.c list_add(&rq->queuelist, &rq_list); rq_list 165 block/blk-mq-sched.c } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); rq_list 175 block/blk-mq-sched.c LIST_HEAD(rq_list); rq_list 190 block/blk-mq-sched.c list_splice_init(&hctx->dispatch, &rq_list); rq_list 207 block/blk-mq-sched.c if (!list_empty(&rq_list)) { rq_list 209 block/blk-mq-sched.c if (blk_mq_dispatch_rq_list(q, &rq_list, false)) { rq_list 221 block/blk-mq-sched.c blk_mq_flush_busy_ctxs(hctx, &rq_list); rq_list 222 block/blk-mq-sched.c blk_mq_dispatch_rq_list(q, &rq_list, false); rq_list 745 block/blk-mq.c LIST_HEAD(rq_list); rq_list 749 block/blk-mq.c list_splice_init(&q->requeue_list, &rq_list); rq_list 752 block/blk-mq.c list_for_each_entry_safe(rq, next, &rq_list, queuelist) { rq_list 769 block/blk-mq.c while (!list_empty(&rq_list)) { rq_list 770 block/blk-mq.c rq = list_entry(rq_list.next, struct request, queuelist); rq_list 1735 block/blk-mq.c LIST_HEAD(rq_list); rq_list 1758 block/blk-mq.c &rq_list, rq_list 1769 block/blk-mq.c list_add_tail(&rq->queuelist, &rq_list); rq_list 1778 block/blk-mq.c blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list, rq_list 147 block/kyber-iosched.c struct list_head rq_list[KYBER_NUM_DOMAINS]; rq_list 458 block/kyber-iosched.c INIT_LIST_HEAD(&kcq->rq_list[i]); rq_list 572 block/kyber-iosched.c struct list_head *rq_list = &kcq->rq_list[sched_domain]; rq_list 576 block/kyber-iosched.c merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio, nr_segs); rq_list 588 block/kyber-iosched.c struct list_head *rq_list, bool at_head) rq_list 593 block/kyber-iosched.c list_for_each_entry_safe(rq, next, rq_list, queuelist) { rq_list 596 block/kyber-iosched.c struct list_head *head = &kcq->rq_list[sched_domain]; rq_list 669 block/kyber-iosched.c list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain], rq_list 173 drivers/block/aoe/aoe.h struct list_head rq_list; rq_list 278 drivers/block/aoe/aoeblk.c list_add_tail(&bd->rq->queuelist, &d->rq_list); rq_list 850 drivers/block/aoe/aoecmd.c rq = list_first_entry_or_null(&d->rq_list, struct request, rq_list 475 drivers/block/aoe/aoedev.c INIT_LIST_HEAD(&d->rq_list); rq_list 204 drivers/block/paride/pcd.c struct list_head rq_list; rq_list 322 drivers/block/paride/pcd.c INIT_LIST_HEAD(&cd->rq_list); rq_list 780 drivers/block/paride/pcd.c if (cd->present && !list_empty(&cd->rq_list)) { rq_list 781 drivers/block/paride/pcd.c pcd_req = list_first_entry(&cd->rq_list, struct request, rq_list 824 drivers/block/paride/pcd.c list_add_tail(&bd->rq->queuelist, &cd->rq_list); rq_list 240 drivers/block/paride/pd.c struct list_head rq_list; rq_list 411 drivers/block/paride/pd.c if (list_empty(&disk->rq_list)) rq_list 414 drivers/block/paride/pd.c pd_req = list_first_entry(&disk->rq_list, rq_list 765 drivers/block/paride/pd.c list_add_tail(&bd->rq->queuelist, &disk->rq_list); rq_list 955 drivers/block/paride/pd.c INIT_LIST_HEAD(&disk->rq_list); rq_list 243 drivers/block/paride/pf.c struct list_head rq_list; rq_list 308 drivers/block/paride/pf.c INIT_LIST_HEAD(&pf->rq_list); rq_list 815 drivers/block/paride/pf.c if (pf->present && !list_empty(&pf->rq_list)) { rq_list 816 drivers/block/paride/pf.c pf_req = list_first_entry(&pf->rq_list, struct request, rq_list 877 drivers/block/paride/pf.c list_add_tail(&bd->rq->queuelist, &pf->rq_list); rq_list 210 drivers/block/xsysace.c struct list_head rq_list; rq_list 468 drivers/block/xsysace.c return !list_empty(&ace->rq_list); rq_list 477 drivers/block/xsysace.c rq = list_first_entry_or_null(&ace->rq_list, struct request, queuelist); rq_list 876 drivers/block/xsysace.c list_add_tail(&req->queuelist, &ace->rq_list); rq_list 992 drivers/block/xsysace.c INIT_LIST_HEAD(&ace->rq_list); rq_list 52 drivers/gpu/drm/scheduler/sched_entity.c struct drm_sched_rq **rq_list, rq_list 58 drivers/gpu/drm/scheduler/sched_entity.c if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0]))) rq_list 66 drivers/gpu/drm/scheduler/sched_entity.c entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *), rq_list 68 drivers/gpu/drm/scheduler/sched_entity.c if (!entity->rq_list) rq_list 72 drivers/gpu/drm/scheduler/sched_entity.c entity->rq_list[i] = rq_list[i]; rq_list 75 drivers/gpu/drm/scheduler/sched_entity.c entity->rq = rq_list[0]; rq_list 140 drivers/gpu/drm/scheduler/sched_entity.c struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched; rq_list 142 drivers/gpu/drm/scheduler/sched_entity.c if (!entity->rq_list[i]->sched->ready) { rq_list 150 drivers/gpu/drm/scheduler/sched_entity.c rq = entity->rq_list[i]; rq_list 307 drivers/gpu/drm/scheduler/sched_entity.c kfree(entity->rq_list); rq_list 377 drivers/gpu/drm/scheduler/sched_entity.c drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority); rq_list 547 drivers/ide/ide-io.c list_add(&rq->queuelist, &drive->rq_list); rq_list 903 drivers/ide/ide-io.c list_add_tail(&rq->queuelist, &drive->rq_list); rq_list 1170 drivers/ide/ide-probe.c while (!list_empty(&drive->rq_list)) { rq_list 1171 drivers/ide/ide-probe.c rq = list_first_entry(&drive->rq_list, struct request, queuelist); rq_list 1219 drivers/ide/ide-probe.c INIT_LIST_HEAD(&drive->rq_list); rq_list 130 drivers/mtd/mtd_blkdevs.c rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist); rq_list 197 drivers/mtd/mtd_blkdevs.c list_add_tail(&bd->rq->queuelist, &dev->rq_list); rq_list 427 drivers/mtd/mtd_blkdevs.c INIT_LIST_HEAD(&new->rq_list); rq_list 144 drivers/scsi/sg.c struct list_head rq_list; /* head of request list */ rq_list 875 drivers/scsi/sg.c list_for_each_entry(srp, &sfp->rq_list, entry) { rq_list 1002 drivers/scsi/sg.c list_for_each_entry(srp, &sfp->rq_list, entry) { rq_list 1016 drivers/scsi/sg.c list_for_each_entry(srp, &sfp->rq_list, entry) { rq_list 1188 drivers/scsi/sg.c list_for_each_entry(srp, &sfp->rq_list, entry) { rq_list 2083 drivers/scsi/sg.c list_for_each_entry(resp, &sfp->rq_list, entry) { rq_list 2105 drivers/scsi/sg.c if (!list_empty(&sfp->rq_list)) { rq_list 2119 drivers/scsi/sg.c list_add_tail(&rp->entry, &sfp->rq_list); rq_list 2134 drivers/scsi/sg.c if (!sfp || !srp || list_empty(&sfp->rq_list)) rq_list 2159 drivers/scsi/sg.c INIT_LIST_HEAD(&sfp->rq_list); rq_list 2204 drivers/scsi/sg.c while (!list_empty(&sfp->rq_list)) { rq_list 2205 drivers/scsi/sg.c srp = list_first_entry(&sfp->rq_list, Sg_request, entry); rq_list 2539 drivers/scsi/sg.c list_for_each_entry(srp, &fp->rq_list, entry) { rq_list 2574 drivers/scsi/sg.c if (list_empty(&fp->rq_list)) rq_list 82 include/drm/gpu_scheduler.h struct drm_sched_rq **rq_list; rq_list 312 include/drm/gpu_scheduler.h struct drm_sched_rq **rq_list, rq_list 624 include/linux/ide.h struct list_head rq_list; rq_list 33 include/linux/mtd/blktrans.h struct list_head rq_list; rq_list 87 include/linux/sunrpc/xprt.h struct list_head rq_list; /* Slot allocation list */ rq_list 1593 net/sunrpc/xprt.c req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); rq_list 1594 net/sunrpc/xprt.c list_del(&req->rq_list); rq_list 1630 net/sunrpc/xprt.c list_add(&req->rq_list, &xprt->free); rq_list 1641 net/sunrpc/xprt.c req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); rq_list 1642 net/sunrpc/xprt.c list_del(&req->rq_list); rq_list 1665 net/sunrpc/xprt.c list_add(&req->rq_list, &xprt->free);