icq               380 block/bfq-iosched.c 	return bic->icq.q->elevator->elevator_data;
icq               387 block/bfq-iosched.c static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
icq               390 block/bfq-iosched.c 	return container_of(icq, struct bfq_io_cq, icq);
icq               405 block/bfq-iosched.c 		struct bfq_io_cq *icq;
icq               408 block/bfq-iosched.c 		icq = icq_to_bic(ioc_lookup_icq(ioc, q));
icq               411 block/bfq-iosched.c 		return icq;
icq              4496 block/bfq-iosched.c 		    icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic &&
icq              4955 block/bfq-iosched.c static void bfq_exit_icq(struct io_cq *icq)
icq              4957 block/bfq-iosched.c 	struct bfq_io_cq *bic = icq_to_bic(icq);
icq              5022 block/bfq-iosched.c 	int ioprio = bic->icq.ioc->ioprio;
icq              5240 block/bfq-iosched.c 	if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
icq              5910 block/bfq-iosched.c 	if (!rq->elv.icq || !bfqq)
icq              6113 block/bfq-iosched.c 	if (unlikely(!rq->elv.icq))
icq              6126 block/bfq-iosched.c 	bic = icq_to_bic(rq->elv.icq);
icq               388 block/bfq-iosched.h 	struct io_cq icq; /* must be the first member */
icq                34 block/blk-ioc.c 	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
icq                36 block/blk-ioc.c 	kmem_cache_free(icq->__rcu_icq_cache, icq);
icq                43 block/blk-ioc.c static void ioc_exit_icq(struct io_cq *icq)
icq                45 block/blk-ioc.c 	struct elevator_type *et = icq->q->elevator->type;
icq                47 block/blk-ioc.c 	if (icq->flags & ICQ_EXITED)
icq                51 block/blk-ioc.c 		et->ops.exit_icq(icq);
icq                53 block/blk-ioc.c 	icq->flags |= ICQ_EXITED;
icq                60 block/blk-ioc.c static void ioc_destroy_icq(struct io_cq *icq)
icq                62 block/blk-ioc.c 	struct io_context *ioc = icq->ioc;
icq                63 block/blk-ioc.c 	struct request_queue *q = icq->q;
icq                68 block/blk-ioc.c 	radix_tree_delete(&ioc->icq_tree, icq->q->id);
icq                69 block/blk-ioc.c 	hlist_del_init(&icq->ioc_node);
icq                70 block/blk-ioc.c 	list_del_init(&icq->q_node);
icq                77 block/blk-ioc.c 	if (rcu_access_pointer(ioc->icq_hint) == icq)
icq                80 block/blk-ioc.c 	ioc_exit_icq(icq);
icq                86 block/blk-ioc.c 	icq->__rcu_icq_cache = et->icq_cache;
icq                87 block/blk-ioc.c 	icq->flags |= ICQ_DESTROYED;
icq                88 block/blk-ioc.c 	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
icq               110 block/blk-ioc.c 		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
icq               112 block/blk-ioc.c 		struct request_queue *q = icq->q;
icq               115 block/blk-ioc.c 			ioc_destroy_icq(icq);
icq               174 block/blk-ioc.c 	struct io_cq *icq;
icq               187 block/blk-ioc.c 	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
icq               188 block/blk-ioc.c 		if (icq->flags & ICQ_EXITED)
icq               191 block/blk-ioc.c 		ioc_exit_icq(icq);
icq               218 block/blk-ioc.c 		struct io_cq *icq = list_entry(icq_list->next,
icq               220 block/blk-ioc.c 		struct io_context *ioc = icq->ioc;
icq               223 block/blk-ioc.c 		if (icq->flags & ICQ_DESTROYED) {
icq               227 block/blk-ioc.c 		ioc_destroy_icq(icq);
icq               334 block/blk-ioc.c 	struct io_cq *icq;
icq               345 block/blk-ioc.c 	icq = rcu_dereference(ioc->icq_hint);
icq               346 block/blk-ioc.c 	if (icq && icq->q == q)
icq               349 block/blk-ioc.c 	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
icq               350 block/blk-ioc.c 	if (icq && icq->q == q)
icq               351 block/blk-ioc.c 		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
icq               353 block/blk-ioc.c 		icq = NULL;
icq               356 block/blk-ioc.c 	return icq;
icq               376 block/blk-ioc.c 	struct io_cq *icq;
icq               379 block/blk-ioc.c 	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
icq               381 block/blk-ioc.c 	if (!icq)
icq               385 block/blk-ioc.c 		kmem_cache_free(et->icq_cache, icq);
icq               389 block/blk-ioc.c 	icq->ioc = ioc;
icq               390 block/blk-ioc.c 	icq->q = q;
icq               391 block/blk-ioc.c 	INIT_LIST_HEAD(&icq->q_node);
icq               392 block/blk-ioc.c 	INIT_HLIST_NODE(&icq->ioc_node);
icq               398 block/blk-ioc.c 	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
icq               399 block/blk-ioc.c 		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
icq               400 block/blk-ioc.c 		list_add(&icq->q_node, &q->icq_list);
icq               402 block/blk-ioc.c 			et->ops.init_icq(icq);
icq               404 block/blk-ioc.c 		kmem_cache_free(et->icq_cache, icq);
icq               405 block/blk-ioc.c 		icq = ioc_lookup_icq(ioc, q);
icq               406 block/blk-ioc.c 		if (!icq)
icq               413 block/blk-ioc.c 	return icq;
icq                39 block/blk-mq-sched.c 	struct io_cq *icq;
icq                49 block/blk-mq-sched.c 	icq = ioc_lookup_icq(ioc, q);
icq                52 block/blk-mq-sched.c 	if (!icq) {
icq                53 block/blk-mq-sched.c 		icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
icq                54 block/blk-mq-sched.c 		if (!icq)
icq                57 block/blk-mq-sched.c 	get_io_context(icq->ioc);
icq                58 block/blk-mq-sched.c 	rq->elv.icq = icq;
icq               409 block/blk-mq.c 		rq->elv.icq = NULL;
icq               519 block/blk-mq.c 		if (rq->elv.icq) {
icq               520 block/blk-mq.c 			put_io_context(rq->elv.icq->ioc);
icq               521 block/blk-mq.c 			rq->elv.icq = NULL;
icq               184 include/linux/blkdev.h 			struct io_cq		*icq;