This source file includes following definitions.
- get_wqe
- mlx4_ib_srq_event
- mlx4_ib_create_srq
- mlx4_ib_modify_srq
- mlx4_ib_query_srq
- mlx4_ib_destroy_srq
- mlx4_ib_free_srq_wqe
- mlx4_ib_post_srq_recv
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #include <linux/mlx4/qp.h>
35 #include <linux/mlx4/srq.h>
36 #include <linux/slab.h>
37
38 #include "mlx4_ib.h"
39 #include <rdma/mlx4-abi.h>
40 #include <rdma/uverbs_ioctl.h>
41
42 static void *get_wqe(struct mlx4_ib_srq *srq, int n)
43 {
44 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
45 }
46
47 static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
48 {
49 struct ib_event event;
50 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
51
52 if (ibsrq->event_handler) {
53 event.device = ibsrq->device;
54 event.element.srq = ibsrq;
55 switch (type) {
56 case MLX4_EVENT_TYPE_SRQ_LIMIT:
57 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
58 break;
59 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
60 event.event = IB_EVENT_SRQ_ERR;
61 break;
62 default:
63 pr_warn("Unexpected event type %d "
64 "on SRQ %06x\n", type, srq->srqn);
65 return;
66 }
67
68 ibsrq->event_handler(&event, ibsrq->srq_context);
69 }
70 }
71
72 int mlx4_ib_create_srq(struct ib_srq *ib_srq,
73 struct ib_srq_init_attr *init_attr,
74 struct ib_udata *udata)
75 {
76 struct mlx4_ib_dev *dev = to_mdev(ib_srq->device);
77 struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
78 udata, struct mlx4_ib_ucontext, ibucontext);
79 struct mlx4_ib_srq *srq = to_msrq(ib_srq);
80 struct mlx4_wqe_srq_next_seg *next;
81 struct mlx4_wqe_data_seg *scatter;
82 u32 cqn;
83 u16 xrcdn;
84 int desc_size;
85 int buf_size;
86 int err;
87 int i;
88
89
90 if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes ||
91 init_attr->attr.max_sge > dev->dev->caps.max_srq_sge)
92 return -EINVAL;
93
94 mutex_init(&srq->mutex);
95 spin_lock_init(&srq->lock);
96 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
97 srq->msrq.max_gs = init_attr->attr.max_sge;
98
99 desc_size = max(32UL,
100 roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) +
101 srq->msrq.max_gs *
102 sizeof (struct mlx4_wqe_data_seg)));
103 srq->msrq.wqe_shift = ilog2(desc_size);
104
105 buf_size = srq->msrq.max * desc_size;
106
107 if (udata) {
108 struct mlx4_ib_create_srq ucmd;
109
110 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
111 return -EFAULT;
112
113 srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
114 if (IS_ERR(srq->umem))
115 return PTR_ERR(srq->umem);
116
117 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
118 PAGE_SHIFT, &srq->mtt);
119 if (err)
120 goto err_buf;
121
122 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
123 if (err)
124 goto err_mtt;
125
126 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db);
127 if (err)
128 goto err_mtt;
129 } else {
130 err = mlx4_db_alloc(dev->dev, &srq->db, 0);
131 if (err)
132 return err;
133
134 *srq->db.db = 0;
135
136 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
137 &srq->buf)) {
138 err = -ENOMEM;
139 goto err_db;
140 }
141
142 srq->head = 0;
143 srq->tail = srq->msrq.max - 1;
144 srq->wqe_ctr = 0;
145
146 for (i = 0; i < srq->msrq.max; ++i) {
147 next = get_wqe(srq, i);
148 next->next_wqe_index =
149 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
150
151 for (scatter = (void *) (next + 1);
152 (void *) scatter < (void *) next + desc_size;
153 ++scatter)
154 scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY);
155 }
156
157 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
158 &srq->mtt);
159 if (err)
160 goto err_buf;
161
162 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
163 if (err)
164 goto err_mtt;
165
166 srq->wrid = kvmalloc_array(srq->msrq.max,
167 sizeof(u64), GFP_KERNEL);
168 if (!srq->wrid) {
169 err = -ENOMEM;
170 goto err_mtt;
171 }
172 }
173
174 cqn = ib_srq_has_cq(init_attr->srq_type) ?
175 to_mcq(init_attr->ext.cq)->mcq.cqn : 0;
176 xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
177 to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn :
178 (u16) dev->dev->caps.reserved_xrcds;
179 err = mlx4_srq_alloc(dev->dev, to_mpd(ib_srq->pd)->pdn, cqn, xrcdn,
180 &srq->mtt, srq->db.dma, &srq->msrq);
181 if (err)
182 goto err_wrid;
183
184 srq->msrq.event = mlx4_ib_srq_event;
185 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
186
187 if (udata)
188 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
189 err = -EFAULT;
190 goto err_wrid;
191 }
192
193 init_attr->attr.max_wr = srq->msrq.max - 1;
194
195 return 0;
196
197 err_wrid:
198 if (udata)
199 mlx4_ib_db_unmap_user(ucontext, &srq->db);
200 else
201 kvfree(srq->wrid);
202
203 err_mtt:
204 mlx4_mtt_cleanup(dev->dev, &srq->mtt);
205
206 err_buf:
207 if (!srq->umem)
208 mlx4_buf_free(dev->dev, buf_size, &srq->buf);
209 ib_umem_release(srq->umem);
210
211 err_db:
212 if (!udata)
213 mlx4_db_free(dev->dev, &srq->db);
214
215 return err;
216 }
217
218 int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
219 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
220 {
221 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
222 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
223 int ret;
224
225
226 if (attr_mask & IB_SRQ_MAX_WR)
227 return -EINVAL;
228
229 if (attr_mask & IB_SRQ_LIMIT) {
230 if (attr->srq_limit >= srq->msrq.max)
231 return -EINVAL;
232
233 mutex_lock(&srq->mutex);
234 ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit);
235 mutex_unlock(&srq->mutex);
236
237 if (ret)
238 return ret;
239 }
240
241 return 0;
242 }
243
244 int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
245 {
246 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
247 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
248 int ret;
249 int limit_watermark;
250
251 ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark);
252 if (ret)
253 return ret;
254
255 srq_attr->srq_limit = limit_watermark;
256 srq_attr->max_wr = srq->msrq.max - 1;
257 srq_attr->max_sge = srq->msrq.max_gs;
258
259 return 0;
260 }
261
262 void mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
263 {
264 struct mlx4_ib_dev *dev = to_mdev(srq->device);
265 struct mlx4_ib_srq *msrq = to_msrq(srq);
266
267 mlx4_srq_free(dev->dev, &msrq->msrq);
268 mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
269
270 if (udata) {
271 mlx4_ib_db_unmap_user(
272 rdma_udata_to_drv_context(
273 udata,
274 struct mlx4_ib_ucontext,
275 ibucontext),
276 &msrq->db);
277 } else {
278 kvfree(msrq->wrid);
279 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
280 &msrq->buf);
281 mlx4_db_free(dev->dev, &msrq->db);
282 }
283 ib_umem_release(msrq->umem);
284 }
285
286 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
287 {
288 struct mlx4_wqe_srq_next_seg *next;
289
290
291 spin_lock(&srq->lock);
292
293 next = get_wqe(srq, srq->tail);
294 next->next_wqe_index = cpu_to_be16(wqe_index);
295 srq->tail = wqe_index;
296
297 spin_unlock(&srq->lock);
298 }
299
300 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
301 const struct ib_recv_wr **bad_wr)
302 {
303 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
304 struct mlx4_wqe_srq_next_seg *next;
305 struct mlx4_wqe_data_seg *scat;
306 unsigned long flags;
307 int err = 0;
308 int nreq;
309 int i;
310 struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);
311
312 spin_lock_irqsave(&srq->lock, flags);
313 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
314 err = -EIO;
315 *bad_wr = wr;
316 nreq = 0;
317 goto out;
318 }
319
320 for (nreq = 0; wr; ++nreq, wr = wr->next) {
321 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
322 err = -EINVAL;
323 *bad_wr = wr;
324 break;
325 }
326
327 if (unlikely(srq->head == srq->tail)) {
328 err = -ENOMEM;
329 *bad_wr = wr;
330 break;
331 }
332
333 srq->wrid[srq->head] = wr->wr_id;
334
335 next = get_wqe(srq, srq->head);
336 srq->head = be16_to_cpu(next->next_wqe_index);
337 scat = (struct mlx4_wqe_data_seg *) (next + 1);
338
339 for (i = 0; i < wr->num_sge; ++i) {
340 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
341 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
342 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
343 }
344
345 if (i < srq->msrq.max_gs) {
346 scat[i].byte_count = 0;
347 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY);
348 scat[i].addr = 0;
349 }
350 }
351
352 if (likely(nreq)) {
353 srq->wqe_ctr += nreq;
354
355
356
357
358
359 wmb();
360
361 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
362 }
363 out:
364
365 spin_unlock_irqrestore(&srq->lock, flags);
366
367 return err;
368 }