rq_depth          138 block/blk-iolatency.c 	struct rq_depth rq_depth;
rq_depth          277 block/blk-iolatency.c 	return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
rq_depth          369 block/blk-iolatency.c 	unsigned long old = iolat->rq_depth.max_depth;
rq_depth          381 block/blk-iolatency.c 			iolat->rq_depth.max_depth = old;
rq_depth          386 block/blk-iolatency.c 		iolat->rq_depth.max_depth = max(old, 1UL);
rq_depth          444 block/blk-iolatency.c 	if (iolat->rq_depth.max_depth == 1 && direction < 0) {
rq_depth          452 block/blk-iolatency.c 		iolat->rq_depth.max_depth = UINT_MAX;
rq_depth          507 block/blk-iolatency.c 	if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
rq_depth          903 block/blk-iolatency.c 	if (iolat->rq_depth.max_depth == UINT_MAX)
rq_depth          910 block/blk-iolatency.c 			 iolat->rq_depth.max_depth);
rq_depth          928 block/blk-iolatency.c 	if (iolat->rq_depth.max_depth == UINT_MAX)
rq_depth          933 block/blk-iolatency.c 			 iolat->rq_depth.max_depth, avg_lat, cur_win);
rq_depth          978 block/blk-iolatency.c 	iolat->rq_depth.queue_depth = blkg->q->nr_requests;
rq_depth          979 block/blk-iolatency.c 	iolat->rq_depth.max_depth = UINT_MAX;
rq_depth          980 block/blk-iolatency.c 	iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
rq_depth          116 block/blk-rq-qos.c bool rq_depth_calc_max_depth(struct rq_depth *rqd)
rq_depth          164 block/blk-rq-qos.c bool rq_depth_scale_up(struct rq_depth *rqd)
rq_depth          183 block/blk-rq-qos.c bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
rq_depth          130 block/blk-rq-qos.h bool rq_depth_scale_up(struct rq_depth *rqd);
rq_depth          131 block/blk-rq-qos.h bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
rq_depth          132 block/blk-rq-qos.h bool rq_depth_calc_max_depth(struct rq_depth *rqd);
rq_depth          237 block/blk-wbt.c 	struct rq_depth *rqd = &rwb->rq_depth;
rq_depth          290 block/blk-wbt.c 	struct rq_depth *rqd = &rwb->rq_depth;
rq_depth          300 block/blk-wbt.c 	} else if (rwb->rq_depth.max_depth <= 2) {
rq_depth          301 block/blk-wbt.c 		rwb->wb_normal = rwb->rq_depth.max_depth;
rq_depth          304 block/blk-wbt.c 		rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
rq_depth          305 block/blk-wbt.c 		rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
rq_depth          311 block/blk-wbt.c 	if (!rq_depth_scale_up(&rwb->rq_depth))
rq_depth          321 block/blk-wbt.c 	if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
rq_depth          330 block/blk-wbt.c 	struct rq_depth *rqd = &rwb->rq_depth;
rq_depth          355 block/blk-wbt.c 	struct rq_depth *rqd = &rwb->rq_depth;
rq_depth          410 block/blk-wbt.c 	struct rq_depth *rqd = &rwb->rq_depth;
rq_depth          481 block/blk-wbt.c 		limit = rwb->rq_depth.max_depth;
rq_depth          687 block/blk-wbt.c 	RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
rq_depth          845 block/blk-wbt.c 	rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
rq_depth           69 block/blk-wbt.h 	struct rq_depth rq_depth;
rq_depth           38 drivers/infiniband/hw/efa/efa_com_cmd.c 			params->rq_depth;
rq_depth           26 drivers/infiniband/hw/efa/efa_com_cmd.h 	u32 rq_depth;
rq_depth          712 drivers/infiniband/hw/efa/efa_verbs.c 	create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
rq_depth          286 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
rq_depth          307 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c 	hw_ioctxt.rq_depth  = ilog2(rq_depth);
rq_depth          157 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h 	u16     rq_depth;
rq_depth          475 drivers/net/ethernet/huawei/hinic/hinic_port.c 	rq_num.rq_depth = ilog2(HINIC_SQ_DEPTH);
rq_depth          211 drivers/net/ethernet/huawei/hinic/hinic_port.h 	u32	rq_depth;
rq_depth           88 net/9p/trans_rdma.c 	int rq_depth;
rq_depth          126 net/9p/trans_rdma.c 	int rq_depth;
rq_depth          158 net/9p/trans_rdma.c 	if (rdma->rq_depth != P9_RDMA_RQ_DEPTH)
rq_depth          159 net/9p/trans_rdma.c 		seq_printf(m, ",rq=%u", rdma->rq_depth);
rq_depth          183 net/9p/trans_rdma.c 	opts->rq_depth = P9_RDMA_RQ_DEPTH;
rq_depth          220 net/9p/trans_rdma.c 			opts->rq_depth = option;
rq_depth          233 net/9p/trans_rdma.c 	opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
rq_depth          573 net/9p/trans_rdma.c 	rdma->rq_depth = opts->rq_depth;
rq_depth          578 net/9p/trans_rdma.c 	sema_init(&rdma->rq_sem, rdma->rq_depth);
rq_depth          689 net/9p/trans_rdma.c 				   opts.sq_depth + opts.rq_depth + 1,
rq_depth          704 net/9p/trans_rdma.c 	qp_attr.cap.max_recv_wr = opts.rq_depth;
rq_depth          394 net/sunrpc/xprtrdma/svc_rdma_transport.c 	unsigned int ctxts, rq_depth;
rq_depth          431 net/sunrpc/xprtrdma/svc_rdma_transport.c 	rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests;
rq_depth          432 net/sunrpc/xprtrdma/svc_rdma_transport.c 	if (rq_depth > dev->attrs.max_qp_wr) {
rq_depth          435 net/sunrpc/xprtrdma/svc_rdma_transport.c 		rq_depth = dev->attrs.max_qp_wr;
rq_depth          436 net/sunrpc/xprtrdma/svc_rdma_transport.c 		newxprt->sc_max_requests = rq_depth - 2;
rq_depth          442 net/sunrpc/xprtrdma/svc_rdma_transport.c 	newxprt->sc_sq_depth = rq_depth + ctxts;
rq_depth          462 net/sunrpc/xprtrdma/svc_rdma_transport.c 		ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE);
rq_depth          474 net/sunrpc/xprtrdma/svc_rdma_transport.c 	qp_attr.cap.max_recv_wr = rq_depth;