This source file includes following definitions.
- rxe_srq_chk_attr
- rxe_srq_from_init
- rxe_srq_from_attr
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #include <linux/vmalloc.h>
35 #include "rxe.h"
36 #include "rxe_loc.h"
37 #include "rxe_queue.h"
38
39 int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
40 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
41 {
42 if (srq && srq->error) {
43 pr_warn("srq in error state\n");
44 goto err1;
45 }
46
47 if (mask & IB_SRQ_MAX_WR) {
48 if (attr->max_wr > rxe->attr.max_srq_wr) {
49 pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
50 attr->max_wr, rxe->attr.max_srq_wr);
51 goto err1;
52 }
53
54 if (attr->max_wr <= 0) {
55 pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
56 goto err1;
57 }
58
59 if (srq && srq->limit && (attr->max_wr < srq->limit)) {
60 pr_warn("max_wr (%d) < srq->limit (%d)\n",
61 attr->max_wr, srq->limit);
62 goto err1;
63 }
64
65 if (attr->max_wr < RXE_MIN_SRQ_WR)
66 attr->max_wr = RXE_MIN_SRQ_WR;
67 }
68
69 if (mask & IB_SRQ_LIMIT) {
70 if (attr->srq_limit > rxe->attr.max_srq_wr) {
71 pr_warn("srq_limit(%d) > max_srq_wr(%d)\n",
72 attr->srq_limit, rxe->attr.max_srq_wr);
73 goto err1;
74 }
75
76 if (srq && (attr->srq_limit > srq->rq.queue->buf->index_mask)) {
77 pr_warn("srq_limit (%d) > cur limit(%d)\n",
78 attr->srq_limit,
79 srq->rq.queue->buf->index_mask);
80 goto err1;
81 }
82 }
83
84 if (mask == IB_SRQ_INIT_MASK) {
85 if (attr->max_sge > rxe->attr.max_srq_sge) {
86 pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
87 attr->max_sge, rxe->attr.max_srq_sge);
88 goto err1;
89 }
90
91 if (attr->max_sge < RXE_MIN_SRQ_SGE)
92 attr->max_sge = RXE_MIN_SRQ_SGE;
93 }
94
95 return 0;
96
97 err1:
98 return -EINVAL;
99 }
100
101 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
102 struct ib_srq_init_attr *init, struct ib_udata *udata,
103 struct rxe_create_srq_resp __user *uresp)
104 {
105 int err;
106 int srq_wqe_size;
107 struct rxe_queue *q;
108
109 srq->ibsrq.event_handler = init->event_handler;
110 srq->ibsrq.srq_context = init->srq_context;
111 srq->limit = init->attr.srq_limit;
112 srq->srq_num = srq->pelem.index;
113 srq->rq.max_wr = init->attr.max_wr;
114 srq->rq.max_sge = init->attr.max_sge;
115
116 srq_wqe_size = rcv_wqe_size(srq->rq.max_sge);
117
118 spin_lock_init(&srq->rq.producer_lock);
119 spin_lock_init(&srq->rq.consumer_lock);
120
121 q = rxe_queue_init(rxe, &srq->rq.max_wr,
122 srq_wqe_size);
123 if (!q) {
124 pr_warn("unable to allocate queue for srq\n");
125 return -ENOMEM;
126 }
127
128 srq->rq.queue = q;
129
130 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf,
131 q->buf_size, &q->ip);
132 if (err) {
133 vfree(q->buf);
134 kfree(q);
135 return err;
136 }
137
138 if (uresp) {
139 if (copy_to_user(&uresp->srq_num, &srq->srq_num,
140 sizeof(uresp->srq_num))) {
141 rxe_queue_cleanup(q);
142 return -EFAULT;
143 }
144 }
145
146 return 0;
147 }
148
149 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
150 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
151 struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata)
152 {
153 int err;
154 struct rxe_queue *q = srq->rq.queue;
155 struct mminfo __user *mi = NULL;
156
157 if (mask & IB_SRQ_MAX_WR) {
158
159
160
161
162 mi = u64_to_user_ptr(ucmd->mmap_info_addr);
163
164 err = rxe_queue_resize(q, &attr->max_wr,
165 rcv_wqe_size(srq->rq.max_sge), udata, mi,
166 &srq->rq.producer_lock,
167 &srq->rq.consumer_lock);
168 if (err)
169 goto err2;
170 }
171
172 if (mask & IB_SRQ_LIMIT)
173 srq->limit = attr->srq_limit;
174
175 return 0;
176
177 err2:
178 rxe_queue_cleanup(q);
179 srq->rq.queue = NULL;
180 return err;
181 }