rds_ibdev          83 net/rds/ib.c   static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
rds_ibdev          88 net/rds/ib.c   	spin_lock_irqsave(&rds_ibdev->spinlock, flags);
rds_ibdev          89 net/rds/ib.c   	list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
rds_ibdev          91 net/rds/ib.c   	spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
rds_ibdev         101 net/rds/ib.c   	struct rds_ib_device *rds_ibdev = container_of(work,
rds_ibdev         104 net/rds/ib.c   	if (rds_ibdev->mr_8k_pool)
rds_ibdev         105 net/rds/ib.c   		rds_ib_destroy_mr_pool(rds_ibdev->mr_8k_pool);
rds_ibdev         106 net/rds/ib.c   	if (rds_ibdev->mr_1m_pool)
rds_ibdev         107 net/rds/ib.c   		rds_ib_destroy_mr_pool(rds_ibdev->mr_1m_pool);
rds_ibdev         108 net/rds/ib.c   	if (rds_ibdev->pd)
rds_ibdev         109 net/rds/ib.c   		ib_dealloc_pd(rds_ibdev->pd);
rds_ibdev         111 net/rds/ib.c   	list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
rds_ibdev         116 net/rds/ib.c   	kfree(rds_ibdev->vector_load);
rds_ibdev         118 net/rds/ib.c   	kfree(rds_ibdev);
rds_ibdev         121 net/rds/ib.c   void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
rds_ibdev         123 net/rds/ib.c   	BUG_ON(refcount_read(&rds_ibdev->refcount) == 0);
rds_ibdev         124 net/rds/ib.c   	if (refcount_dec_and_test(&rds_ibdev->refcount))
rds_ibdev         125 net/rds/ib.c   		queue_work(rds_wq, &rds_ibdev->free_work);
rds_ibdev         130 net/rds/ib.c   	struct rds_ib_device *rds_ibdev;
rds_ibdev         137 net/rds/ib.c   	rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL,
rds_ibdev         139 net/rds/ib.c   	if (!rds_ibdev)
rds_ibdev         142 net/rds/ib.c   	spin_lock_init(&rds_ibdev->spinlock);
rds_ibdev         143 net/rds/ib.c   	refcount_set(&rds_ibdev->refcount, 1);
rds_ibdev         144 net/rds/ib.c   	INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
rds_ibdev         146 net/rds/ib.c   	INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
rds_ibdev         147 net/rds/ib.c   	INIT_LIST_HEAD(&rds_ibdev->conn_list);
rds_ibdev         149 net/rds/ib.c   	rds_ibdev->max_wrs = device->attrs.max_qp_wr;
rds_ibdev         150 net/rds/ib.c   	rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
rds_ibdev         156 net/rds/ib.c   	rds_ibdev->use_fastreg = (has_fr && !has_fmr);
rds_ibdev         158 net/rds/ib.c   	rds_ibdev->fmr_max_remaps = device->attrs.max_map_per_fmr?: 32;
rds_ibdev         159 net/rds/ib.c   	rds_ibdev->max_1m_mrs = device->attrs.max_mr ?
rds_ibdev         163 net/rds/ib.c   	rds_ibdev->max_8k_mrs = device->attrs.max_mr ?
rds_ibdev         167 net/rds/ib.c   	rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom;
rds_ibdev         168 net/rds/ib.c   	rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom;
rds_ibdev         170 net/rds/ib.c   	rds_ibdev->vector_load = kcalloc(device->num_comp_vectors,
rds_ibdev         173 net/rds/ib.c   	if (!rds_ibdev->vector_load) {
rds_ibdev         179 net/rds/ib.c   	rds_ibdev->dev = device;
rds_ibdev         180 net/rds/ib.c   	rds_ibdev->pd = ib_alloc_pd(device, 0);
rds_ibdev         181 net/rds/ib.c   	if (IS_ERR(rds_ibdev->pd)) {
rds_ibdev         182 net/rds/ib.c   		rds_ibdev->pd = NULL;
rds_ibdev         186 net/rds/ib.c   	rds_ibdev->mr_1m_pool =
rds_ibdev         187 net/rds/ib.c   		rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL);
rds_ibdev         188 net/rds/ib.c   	if (IS_ERR(rds_ibdev->mr_1m_pool)) {
rds_ibdev         189 net/rds/ib.c   		rds_ibdev->mr_1m_pool = NULL;
rds_ibdev         193 net/rds/ib.c   	rds_ibdev->mr_8k_pool =
rds_ibdev         194 net/rds/ib.c   		rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_8K_POOL);
rds_ibdev         195 net/rds/ib.c   	if (IS_ERR(rds_ibdev->mr_8k_pool)) {
rds_ibdev         196 net/rds/ib.c   		rds_ibdev->mr_8k_pool = NULL;
rds_ibdev         201 net/rds/ib.c   		 device->attrs.max_fmr, rds_ibdev->max_wrs, rds_ibdev->max_sge,
rds_ibdev         202 net/rds/ib.c   		 rds_ibdev->fmr_max_remaps, rds_ibdev->max_1m_mrs,
rds_ibdev         203 net/rds/ib.c   		 rds_ibdev->max_8k_mrs);
rds_ibdev         207 net/rds/ib.c   		rds_ibdev->use_fastreg ? "FRMR" : "FMR");
rds_ibdev         210 net/rds/ib.c   	list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
rds_ibdev         212 net/rds/ib.c   	refcount_inc(&rds_ibdev->refcount);
rds_ibdev         214 net/rds/ib.c   	ib_set_client_data(device, &rds_ib_client, rds_ibdev);
rds_ibdev         215 net/rds/ib.c   	refcount_inc(&rds_ibdev->refcount);
rds_ibdev         220 net/rds/ib.c   	rds_ib_dev_put(rds_ibdev);
rds_ibdev         241 net/rds/ib.c   	struct rds_ib_device *rds_ibdev;
rds_ibdev         244 net/rds/ib.c   	rds_ibdev = ib_get_client_data(device, &rds_ib_client);
rds_ibdev         245 net/rds/ib.c   	if (rds_ibdev)
rds_ibdev         246 net/rds/ib.c   		refcount_inc(&rds_ibdev->refcount);
rds_ibdev         248 net/rds/ib.c   	return rds_ibdev;
rds_ibdev         260 net/rds/ib.c   	struct rds_ib_device *rds_ibdev = client_data;
rds_ibdev         262 net/rds/ib.c   	if (!rds_ibdev)
rds_ibdev         265 net/rds/ib.c   	rds_ib_dev_shutdown(rds_ibdev);
rds_ibdev         271 net/rds/ib.c   	list_del_rcu(&rds_ibdev->list);
rds_ibdev         280 net/rds/ib.c   	rds_ib_dev_put(rds_ibdev);
rds_ibdev         281 net/rds/ib.c   	rds_ib_dev_put(rds_ibdev);
rds_ibdev         312 net/rds/ib.c   		struct rds_ib_device *rds_ibdev;
rds_ibdev         317 net/rds/ib.c   		rds_ibdev = ic->rds_ibdev;
rds_ibdev         320 net/rds/ib.c   		iinfo->max_send_sge = rds_ibdev->max_sge;
rds_ibdev         321 net/rds/ib.c   		rds_ib_get_mr_info(rds_ibdev, iinfo);
rds_ibdev         350 net/rds/ib.c   		struct rds_ib_device *rds_ibdev;
rds_ibdev         354 net/rds/ib.c   		rds_ibdev = ic->rds_ibdev;
rds_ibdev         357 net/rds/ib.c   		iinfo6->max_send_sge = rds_ibdev->max_sge;
rds_ibdev         358 net/rds/ib.c   		rds6_ib_get_mr_info(rds_ibdev, iinfo6);
rds_ibdev         146 net/rds/ib.h   	struct rds_ib_device	*rds_ibdev;
rds_ibdev         361 net/rds/ib.h   void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
rds_ibdev         390 net/rds/ib.h   int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
rds_ibdev         392 net/rds/ib.h   void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
rds_ibdev         393 net/rds/ib.h   void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
rds_ibdev         179 net/rds/ib_cm.c 	err = rds_ib_update_ipaddr(ic->rds_ibdev, &conn->c_laddr);
rds_ibdev         205 net/rds/ib_cm.c 	struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
rds_ibdev         210 net/rds/ib_cm.c 		min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
rds_ibdev         212 net/rds/ib_cm.c 		min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
rds_ibdev         359 net/rds/ib_cm.c 	struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
rds_ibdev         362 net/rds/ib_cm.c 	if (!rds_ibdev)
rds_ibdev         420 net/rds/ib_cm.c static inline int ibdev_get_unused_vector(struct rds_ib_device *rds_ibdev)
rds_ibdev         422 net/rds/ib_cm.c 	int min = rds_ibdev->vector_load[rds_ibdev->dev->num_comp_vectors - 1];
rds_ibdev         423 net/rds/ib_cm.c 	int index = rds_ibdev->dev->num_comp_vectors - 1;
rds_ibdev         426 net/rds/ib_cm.c 	for (i = rds_ibdev->dev->num_comp_vectors - 1; i >= 0; i--) {
rds_ibdev         427 net/rds/ib_cm.c 		if (rds_ibdev->vector_load[i] < min) {
rds_ibdev         429 net/rds/ib_cm.c 			min = rds_ibdev->vector_load[i];
rds_ibdev         433 net/rds/ib_cm.c 	rds_ibdev->vector_load[index]++;
rds_ibdev         437 net/rds/ib_cm.c static inline void ibdev_put_vector(struct rds_ib_device *rds_ibdev, int index)
rds_ibdev         439 net/rds/ib_cm.c 	rds_ibdev->vector_load[index]--;
rds_ibdev         452 net/rds/ib_cm.c 	struct rds_ib_device *rds_ibdev;
rds_ibdev         460 net/rds/ib_cm.c 	rds_ibdev = rds_ib_get_client_data(dev);
rds_ibdev         461 net/rds/ib_cm.c 	if (!rds_ibdev)
rds_ibdev         468 net/rds/ib_cm.c 	fr_queue_space = (rds_ibdev->use_fastreg ? RDS_IB_DEFAULT_FR_WR : 0);
rds_ibdev         471 net/rds/ib_cm.c 	rds_ib_add_conn(rds_ibdev, conn);
rds_ibdev         473 net/rds/ib_cm.c 	max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_send_wr + 1 ?
rds_ibdev         474 net/rds/ib_cm.c 		rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_send_wr;
rds_ibdev         478 net/rds/ib_cm.c 	max_wrs = rds_ibdev->max_wrs < rds_ib_sysctl_max_recv_wr + 1 ?
rds_ibdev         479 net/rds/ib_cm.c 		rds_ibdev->max_wrs - 1 : rds_ib_sysctl_max_recv_wr;
rds_ibdev         484 net/rds/ib_cm.c 	ic->i_pd = rds_ibdev->pd;
rds_ibdev         486 net/rds/ib_cm.c 	ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev);
rds_ibdev         495 net/rds/ib_cm.c 		ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
rds_ibdev         500 net/rds/ib_cm.c 	ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
rds_ibdev         509 net/rds/ib_cm.c 		ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
rds_ibdev         533 net/rds/ib_cm.c 	attr.cap.max_send_sge = rds_ibdev->max_sge;
rds_ibdev         625 net/rds/ib_cm.c 	rds_ib_remove_conn(rds_ibdev, conn);
rds_ibdev         627 net/rds/ib_cm.c 	rds_ib_dev_put(rds_ibdev);
rds_ibdev        1033 net/rds/ib_cm.c 			if (ic->rds_ibdev)
rds_ibdev        1034 net/rds/ib_cm.c 				ibdev_put_vector(ic->rds_ibdev, ic->i_scq_vector);
rds_ibdev        1039 net/rds/ib_cm.c 			if (ic->rds_ibdev)
rds_ibdev        1040 net/rds/ib_cm.c 				ibdev_put_vector(ic->rds_ibdev, ic->i_rcq_vector);
rds_ibdev        1073 net/rds/ib_cm.c 		if (ic->rds_ibdev)
rds_ibdev        1074 net/rds/ib_cm.c 			rds_ib_remove_conn(ic->rds_ibdev, conn);
rds_ibdev        1084 net/rds/ib_cm.c 	BUG_ON(ic->rds_ibdev);
rds_ibdev        1187 net/rds/ib_cm.c 	lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock;
rds_ibdev          35 net/rds/ib_fmr.c struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
rds_ibdev          43 net/rds/ib_fmr.c 		pool = rds_ibdev->mr_8k_pool;
rds_ibdev          45 net/rds/ib_fmr.c 		pool = rds_ibdev->mr_1m_pool;
rds_ibdev          53 net/rds/ib_fmr.c 			pool = rds_ibdev->mr_1m_pool;
rds_ibdev          55 net/rds/ib_fmr.c 			pool = rds_ibdev->mr_8k_pool;
rds_ibdev          63 net/rds/ib_fmr.c 			    rdsibdev_to_node(rds_ibdev));
rds_ibdev          70 net/rds/ib_fmr.c 	fmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
rds_ibdev          98 net/rds/ib_fmr.c static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
rds_ibdev         102 net/rds/ib_fmr.c 	struct ib_device *dev = rds_ibdev->dev;
rds_ibdev         154 net/rds/ib_fmr.c 				       rdsibdev_to_node(rds_ibdev));
rds_ibdev         198 net/rds/ib_fmr.c struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *rds_ibdev,
rds_ibdev         207 net/rds/ib_fmr.c 	ibmr = rds_ib_alloc_fmr(rds_ibdev, nents);
rds_ibdev         211 net/rds/ib_fmr.c 	ibmr->device = rds_ibdev;
rds_ibdev         213 net/rds/ib_fmr.c 	ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
rds_ibdev          53 net/rds/ib_frmr.c static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
rds_ibdev          62 net/rds/ib_frmr.c 		pool = rds_ibdev->mr_8k_pool;
rds_ibdev          64 net/rds/ib_frmr.c 		pool = rds_ibdev->mr_1m_pool;
rds_ibdev          71 net/rds/ib_frmr.c 			    rdsibdev_to_node(rds_ibdev));
rds_ibdev          78 net/rds/ib_frmr.c 	frmr->mr = ib_alloc_mr(rds_ibdev->pd, IB_MR_TYPE_MEM_REG,
rds_ibdev         187 net/rds/ib_frmr.c static int rds_ib_map_frmr(struct rds_ib_device *rds_ibdev,
rds_ibdev         192 net/rds/ib_frmr.c 	struct ib_device *dev = rds_ibdev->dev;
rds_ibdev         260 net/rds/ib_frmr.c 	ib_dma_unmap_sg(rds_ibdev->dev, ibmr->sg, ibmr->sg_len,
rds_ibdev         401 net/rds/ib_frmr.c struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
rds_ibdev         418 net/rds/ib_frmr.c 		ibmr = rds_ib_alloc_frmr(rds_ibdev, nents);
rds_ibdev         425 net/rds/ib_frmr.c 	ibmr->device = rds_ibdev;
rds_ibdev         426 net/rds/ib_frmr.c 	ret = rds_ib_map_frmr(rds_ibdev, ibmr->pool, ibmr, sg, nents);
rds_ibdev         118 net/rds/ib_mr.h void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
rds_ibdev         120 net/rds/ib_mr.h void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
rds_ibdev         143 net/rds/ib_mr.h struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
rds_ibdev          45 net/rds/ib_rdma.c 	struct rds_ib_device *rds_ibdev;
rds_ibdev          49 net/rds/ib_rdma.c 	list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
rds_ibdev          50 net/rds/ib_rdma.c 		list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
rds_ibdev          52 net/rds/ib_rdma.c 				refcount_inc(&rds_ibdev->refcount);
rds_ibdev          54 net/rds/ib_rdma.c 				return rds_ibdev;
rds_ibdev          63 net/rds/ib_rdma.c static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
rds_ibdev          73 net/rds/ib_rdma.c 	spin_lock_irq(&rds_ibdev->spinlock);
rds_ibdev          74 net/rds/ib_rdma.c 	list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
rds_ibdev          75 net/rds/ib_rdma.c 	spin_unlock_irq(&rds_ibdev->spinlock);
rds_ibdev          80 net/rds/ib_rdma.c static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
rds_ibdev          86 net/rds/ib_rdma.c 	spin_lock_irq(&rds_ibdev->spinlock);
rds_ibdev          87 net/rds/ib_rdma.c 	list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
rds_ibdev          94 net/rds/ib_rdma.c 	spin_unlock_irq(&rds_ibdev->spinlock);
rds_ibdev         100 net/rds/ib_rdma.c int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
rds_ibdev         107 net/rds/ib_rdma.c 		return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
rds_ibdev         109 net/rds/ib_rdma.c 	if (rds_ibdev_old != rds_ibdev) {
rds_ibdev         112 net/rds/ib_rdma.c 		return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
rds_ibdev         119 net/rds/ib_rdma.c void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
rds_ibdev         129 net/rds/ib_rdma.c 	spin_lock(&rds_ibdev->spinlock);
rds_ibdev         130 net/rds/ib_rdma.c 	list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
rds_ibdev         131 net/rds/ib_rdma.c 	spin_unlock(&rds_ibdev->spinlock);
rds_ibdev         134 net/rds/ib_rdma.c 	ic->rds_ibdev = rds_ibdev;
rds_ibdev         135 net/rds/ib_rdma.c 	refcount_inc(&rds_ibdev->refcount);
rds_ibdev         138 net/rds/ib_rdma.c void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
rds_ibdev         145 net/rds/ib_rdma.c 	spin_lock_irq(&rds_ibdev->spinlock);
rds_ibdev         148 net/rds/ib_rdma.c 	spin_unlock_irq(&rds_ibdev->spinlock);
rds_ibdev         154 net/rds/ib_rdma.c 	ic->rds_ibdev = NULL;
rds_ibdev         155 net/rds/ib_rdma.c 	rds_ib_dev_put(rds_ibdev);
rds_ibdev         172 net/rds/ib_rdma.c void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
rds_ibdev         174 net/rds/ib_rdma.c 	struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
rds_ibdev         181 net/rds/ib_rdma.c void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
rds_ibdev         184 net/rds/ib_rdma.c 	struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
rds_ibdev         214 net/rds/ib_rdma.c 	struct rds_ib_device *rds_ibdev = ibmr->device;
rds_ibdev         218 net/rds/ib_rdma.c 		ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
rds_ibdev         222 net/rds/ib_rdma.c 		ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
rds_ibdev         230 net/rds/ib_rdma.c 	struct rds_ib_device *rds_ibdev = ibmr->device;
rds_ibdev         233 net/rds/ib_rdma.c 		ib_dma_unmap_sg(rds_ibdev->dev,
rds_ibdev         481 net/rds/ib_rdma.c 	struct rds_ib_device *rds_ibdev = ibmr->device;
rds_ibdev         486 net/rds/ib_rdma.c 	if (rds_ibdev->use_fastreg)
rds_ibdev         511 net/rds/ib_rdma.c 	rds_ib_dev_put(rds_ibdev);
rds_ibdev         516 net/rds/ib_rdma.c 	struct rds_ib_device *rds_ibdev;
rds_ibdev         519 net/rds/ib_rdma.c 	list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
rds_ibdev         520 net/rds/ib_rdma.c 		if (rds_ibdev->mr_8k_pool)
rds_ibdev         521 net/rds/ib_rdma.c 			rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
rds_ibdev         523 net/rds/ib_rdma.c 		if (rds_ibdev->mr_1m_pool)
rds_ibdev         524 net/rds/ib_rdma.c 			rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
rds_ibdev         533 net/rds/ib_rdma.c 	struct rds_ib_device *rds_ibdev;
rds_ibdev         538 net/rds/ib_rdma.c 	rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]);
rds_ibdev         539 net/rds/ib_rdma.c 	if (!rds_ibdev) {
rds_ibdev         547 net/rds/ib_rdma.c 	if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
rds_ibdev         552 net/rds/ib_rdma.c 	if (rds_ibdev->use_fastreg)
rds_ibdev         553 net/rds/ib_rdma.c 		ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
rds_ibdev         555 net/rds/ib_rdma.c 		ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
rds_ibdev         564 net/rds/ib_rdma.c 	if (rds_ibdev)
rds_ibdev         565 net/rds/ib_rdma.c 		rds_ib_dev_put(rds_ibdev);
rds_ibdev         579 net/rds/ib_rdma.c struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
rds_ibdev         600 net/rds/ib_rdma.c 		pool->max_items = rds_ibdev->max_1m_mrs;
rds_ibdev         604 net/rds/ib_rdma.c 		pool->max_items = rds_ibdev->max_8k_mrs;
rds_ibdev         608 net/rds/ib_rdma.c 	pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
rds_ibdev         610 net/rds/ib_rdma.c 	pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
rds_ibdev         611 net/rds/ib_rdma.c 	pool->use_fastreg = rds_ibdev->use_fastreg;
rds_ibdev         849 net/rds/ib_send.c 	u32 max_sge = ic->rds_ibdev->max_sge;