Lines Matching refs:rnicp

84 static void disable_dbs(struct iwch_dev *rnicp)  in disable_dbs()  argument
86 spin_lock_irq(&rnicp->lock); in disable_dbs()
87 idr_for_each(&rnicp->qpidr, disable_qp_db, NULL); in disable_dbs()
88 spin_unlock_irq(&rnicp->lock); in disable_dbs()
91 static void enable_dbs(struct iwch_dev *rnicp, int ring_db) in enable_dbs() argument
93 spin_lock_irq(&rnicp->lock); in enable_dbs()
94 idr_for_each(&rnicp->qpidr, enable_qp_db, in enable_dbs()
96 spin_unlock_irq(&rnicp->lock); in enable_dbs()
101 struct iwch_dev *rnicp = container_of(work, struct iwch_dev, in iwch_db_drop_task() local
103 enable_dbs(rnicp, 1); in iwch_db_drop_task()
106 static void rnic_init(struct iwch_dev *rnicp) in rnic_init() argument
108 PDBG("%s iwch_dev %p\n", __func__, rnicp); in rnic_init()
109 idr_init(&rnicp->cqidr); in rnic_init()
110 idr_init(&rnicp->qpidr); in rnic_init()
111 idr_init(&rnicp->mmidr); in rnic_init()
112 spin_lock_init(&rnicp->lock); in rnic_init()
113 INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task); in rnic_init()
115 rnicp->attr.max_qps = T3_MAX_NUM_QP - 32; in rnic_init()
116 rnicp->attr.max_wrs = T3_MAX_QP_DEPTH; in rnic_init()
117 rnicp->attr.max_sge_per_wr = T3_MAX_SGE; in rnic_init()
118 rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE; in rnic_init()
119 rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1; in rnic_init()
120 rnicp->attr.max_cqes_per_cq = T3_MAX_CQ_DEPTH; in rnic_init()
121 rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev); in rnic_init()
122 rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE; in rnic_init()
123 rnicp->attr.max_pds = T3_MAX_NUM_PD - 1; in rnic_init()
124 rnicp->attr.mem_pgsizes_bitmask = T3_PAGESIZE_MASK; in rnic_init()
125 rnicp->attr.max_mr_size = T3_MAX_MR_SIZE; in rnic_init()
126 rnicp->attr.can_resize_wq = 0; in rnic_init()
127 rnicp->attr.max_rdma_reads_per_qp = 8; in rnic_init()
128 rnicp->attr.max_rdma_read_resources = in rnic_init()
129 rnicp->attr.max_rdma_reads_per_qp * rnicp->attr.max_qps; in rnic_init()
130 rnicp->attr.max_rdma_read_qp_depth = 8; /* IRD */ in rnic_init()
131 rnicp->attr.max_rdma_read_depth = in rnic_init()
132 rnicp->attr.max_rdma_read_qp_depth * rnicp->attr.max_qps; in rnic_init()
133 rnicp->attr.rq_overflow_handled = 0; in rnic_init()
134 rnicp->attr.can_modify_ird = 0; in rnic_init()
135 rnicp->attr.can_modify_ord = 0; in rnic_init()
136 rnicp->attr.max_mem_windows = rnicp->attr.max_mem_regs - 1; in rnic_init()
137 rnicp->attr.stag0_value = 1; in rnic_init()
138 rnicp->attr.zbva_support = 1; in rnic_init()
139 rnicp->attr.local_invalidate_fence = 1; in rnic_init()
140 rnicp->attr.cq_overflow_detection = 1; in rnic_init()
146 struct iwch_dev *rnicp; in open_rnic_dev() local
151 rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp)); in open_rnic_dev()
152 if (!rnicp) { in open_rnic_dev()
156 rnicp->rdev.ulp = rnicp; in open_rnic_dev()
157 rnicp->rdev.t3cdev_p = tdev; in open_rnic_dev()
161 if (cxio_rdev_open(&rnicp->rdev)) { in open_rnic_dev()
164 ib_dealloc_device(&rnicp->ibdev); in open_rnic_dev()
168 rnic_init(rnicp); in open_rnic_dev()
170 list_add_tail(&rnicp->entry, &dev_list); in open_rnic_dev()
173 if (iwch_register_device(rnicp)) { in open_rnic_dev()
178 pci_name(rnicp->rdev.rnic_info.pdev)); in open_rnic_dev()
208 struct iwch_dev *rnicp; in iwch_event_handler() local
215 rnicp = rdev_to_iwch_dev(rdev); in iwch_event_handler()
235 disable_dbs(rnicp); in iwch_event_handler()
239 enable_dbs(rnicp, 1); in iwch_event_handler()
246 disable_dbs(rnicp); in iwch_event_handler()
253 schedule_delayed_work(&rnicp->db_drop_task, in iwch_event_handler()
260 event.device = &rnicp->ibdev; in iwch_event_handler()