This source file includes following definitions.
- set_local_comm_id
- get_local_comm_id
- set_remote_comm_id
- get_remote_comm_id
- gid_from_req_msg
- id_map_find_by_sl_id
- id_map_ent_timeout
- sl_id_map_add
- id_map_alloc
- id_map_get
- schedule_delayed
- mlx4_ib_multiplex_cm_handler
- mlx4_ib_demux_cm_handler
- mlx4_ib_cm_paravirt_init
- mlx4_ib_cm_paravirt_clean
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <rdma/ib_mad.h>
34
35 #include <linux/mlx4/cmd.h>
36 #include <linux/rbtree.h>
37 #include <linux/idr.h>
38 #include <rdma/ib_cm.h>
39
40 #include "mlx4_ib.h"
41
42 #define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ)
43
44 struct id_map_entry {
45 struct rb_node node;
46
47 u32 sl_cm_id;
48 u32 pv_cm_id;
49 int slave_id;
50 int scheduled_delete;
51 struct mlx4_ib_dev *dev;
52
53 struct list_head list;
54 struct delayed_work timeout;
55 };
56
57 struct cm_generic_msg {
58 struct ib_mad_hdr hdr;
59
60 __be32 local_comm_id;
61 __be32 remote_comm_id;
62 };
63
64 struct cm_sidr_generic_msg {
65 struct ib_mad_hdr hdr;
66 __be32 request_id;
67 };
68
69 struct cm_req_msg {
70 unsigned char unused[0x60];
71 union ib_gid primary_path_sgid;
72 };
73
74
75 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
76 {
77 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
78 struct cm_sidr_generic_msg *msg =
79 (struct cm_sidr_generic_msg *)mad;
80 msg->request_id = cpu_to_be32(cm_id);
81 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
82 pr_err("trying to set local_comm_id in SIDR_REP\n");
83 return;
84 } else {
85 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
86 msg->local_comm_id = cpu_to_be32(cm_id);
87 }
88 }
89
90 static u32 get_local_comm_id(struct ib_mad *mad)
91 {
92 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
93 struct cm_sidr_generic_msg *msg =
94 (struct cm_sidr_generic_msg *)mad;
95 return be32_to_cpu(msg->request_id);
96 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
97 pr_err("trying to set local_comm_id in SIDR_REP\n");
98 return -1;
99 } else {
100 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
101 return be32_to_cpu(msg->local_comm_id);
102 }
103 }
104
105 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
106 {
107 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
108 struct cm_sidr_generic_msg *msg =
109 (struct cm_sidr_generic_msg *)mad;
110 msg->request_id = cpu_to_be32(cm_id);
111 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
112 pr_err("trying to set remote_comm_id in SIDR_REQ\n");
113 return;
114 } else {
115 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
116 msg->remote_comm_id = cpu_to_be32(cm_id);
117 }
118 }
119
120 static u32 get_remote_comm_id(struct ib_mad *mad)
121 {
122 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
123 struct cm_sidr_generic_msg *msg =
124 (struct cm_sidr_generic_msg *)mad;
125 return be32_to_cpu(msg->request_id);
126 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
127 pr_err("trying to set remote_comm_id in SIDR_REQ\n");
128 return -1;
129 } else {
130 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
131 return be32_to_cpu(msg->remote_comm_id);
132 }
133 }
134
135 static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
136 {
137 struct cm_req_msg *msg = (struct cm_req_msg *)mad;
138
139 return msg->primary_path_sgid;
140 }
141
142
143 static struct id_map_entry *
144 id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
145 {
146 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
147 struct rb_node *node = sl_id_map->rb_node;
148
149 while (node) {
150 struct id_map_entry *id_map_entry =
151 rb_entry(node, struct id_map_entry, node);
152
153 if (id_map_entry->sl_cm_id > sl_cm_id)
154 node = node->rb_left;
155 else if (id_map_entry->sl_cm_id < sl_cm_id)
156 node = node->rb_right;
157 else if (id_map_entry->slave_id > slave_id)
158 node = node->rb_left;
159 else if (id_map_entry->slave_id < slave_id)
160 node = node->rb_right;
161 else
162 return id_map_entry;
163 }
164 return NULL;
165 }
166
167 static void id_map_ent_timeout(struct work_struct *work)
168 {
169 struct delayed_work *delay = to_delayed_work(work);
170 struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
171 struct id_map_entry *found_ent;
172 struct mlx4_ib_dev *dev = ent->dev;
173 struct mlx4_ib_sriov *sriov = &dev->sriov;
174 struct rb_root *sl_id_map = &sriov->sl_id_map;
175
176 spin_lock(&sriov->id_map_lock);
177 if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
178 goto out;
179 found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
180 if (found_ent && found_ent == ent)
181 rb_erase(&found_ent->node, sl_id_map);
182
183 out:
184 list_del(&ent->list);
185 spin_unlock(&sriov->id_map_lock);
186 kfree(ent);
187 }
188
189 static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
190 {
191 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
192 struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
193 struct id_map_entry *ent;
194 int slave_id = new->slave_id;
195 int sl_cm_id = new->sl_cm_id;
196
197 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
198 if (ent) {
199 pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
200 sl_cm_id);
201
202 rb_replace_node(&ent->node, &new->node, sl_id_map);
203 return;
204 }
205
206
207 while (*link) {
208 parent = *link;
209 ent = rb_entry(parent, struct id_map_entry, node);
210
211 if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
212 link = &(*link)->rb_left;
213 else
214 link = &(*link)->rb_right;
215 }
216
217 rb_link_node(&new->node, parent, link);
218 rb_insert_color(&new->node, sl_id_map);
219 }
220
221 static struct id_map_entry *
222 id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
223 {
224 int ret;
225 struct id_map_entry *ent;
226 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
227
228 ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
229 if (!ent)
230 return ERR_PTR(-ENOMEM);
231
232 ent->sl_cm_id = sl_cm_id;
233 ent->slave_id = slave_id;
234 ent->scheduled_delete = 0;
235 ent->dev = to_mdev(ibdev);
236 INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
237
238 ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
239 xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
240 if (ret >= 0) {
241 spin_lock(&sriov->id_map_lock);
242 sl_id_map_add(ibdev, ent);
243 list_add_tail(&ent->list, &sriov->cm_list);
244 spin_unlock(&sriov->id_map_lock);
245 return ent;
246 }
247
248
249 kfree(ent);
250 mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
251 return ERR_PTR(-ENOMEM);
252 }
253
254 static struct id_map_entry *
255 id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
256 {
257 struct id_map_entry *ent;
258 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
259
260 spin_lock(&sriov->id_map_lock);
261 if (*pv_cm_id == -1) {
262 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
263 if (ent)
264 *pv_cm_id = (int) ent->pv_cm_id;
265 } else
266 ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
267 spin_unlock(&sriov->id_map_lock);
268
269 return ent;
270 }
271
272 static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
273 {
274 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
275 unsigned long flags;
276
277 spin_lock(&sriov->id_map_lock);
278 spin_lock_irqsave(&sriov->going_down_lock, flags);
279
280 if (!sriov->is_going_down && !id->scheduled_delete) {
281 id->scheduled_delete = 1;
282 schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
283 }
284 spin_unlock_irqrestore(&sriov->going_down_lock, flags);
285 spin_unlock(&sriov->id_map_lock);
286 }
287
288 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
289 struct ib_mad *mad)
290 {
291 struct id_map_entry *id;
292 u32 sl_cm_id;
293 int pv_cm_id = -1;
294
295 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
296 mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
297 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
298 sl_cm_id = get_local_comm_id(mad);
299 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
300 if (id)
301 goto cont;
302 id = id_map_alloc(ibdev, slave_id, sl_cm_id);
303 if (IS_ERR(id)) {
304 mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
305 __func__, slave_id, sl_cm_id);
306 return PTR_ERR(id);
307 }
308 } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
309 mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
310 return 0;
311 } else {
312 sl_cm_id = get_local_comm_id(mad);
313 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
314 }
315
316 if (!id) {
317 pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL!\n",
318 slave_id, sl_cm_id);
319 return -EINVAL;
320 }
321
322 cont:
323 set_local_comm_id(mad, id->pv_cm_id);
324
325 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
326 schedule_delayed(ibdev, id);
327 return 0;
328 }
329
330 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
331 struct ib_mad *mad)
332 {
333 u32 pv_cm_id;
334 struct id_map_entry *id;
335
336 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
337 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
338 union ib_gid gid;
339
340 if (!slave)
341 return 0;
342
343 gid = gid_from_req_msg(ibdev, mad);
344 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
345 if (*slave < 0) {
346 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
347 be64_to_cpu(gid.global.interface_id));
348 return -ENOENT;
349 }
350 return 0;
351 }
352
353 pv_cm_id = get_remote_comm_id(mad);
354 id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
355
356 if (!id) {
357 pr_debug("Couldn't find an entry for pv_cm_id 0x%x\n", pv_cm_id);
358 return -ENOENT;
359 }
360
361 if (slave)
362 *slave = id->slave_id;
363 set_remote_comm_id(mad, id->sl_cm_id);
364
365 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
366 mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
367 schedule_delayed(ibdev, id);
368
369 return 0;
370 }
371
372 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
373 {
374 spin_lock_init(&dev->sriov.id_map_lock);
375 INIT_LIST_HEAD(&dev->sriov.cm_list);
376 dev->sriov.sl_id_map = RB_ROOT;
377 xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
378 }
379
380
381
382 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
383 {
384 struct mlx4_ib_sriov *sriov = &dev->sriov;
385 struct rb_root *sl_id_map = &sriov->sl_id_map;
386 struct list_head lh;
387 struct rb_node *nd;
388 int need_flush = 0;
389 struct id_map_entry *map, *tmp_map;
390
391 INIT_LIST_HEAD(&lh);
392 spin_lock(&sriov->id_map_lock);
393 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
394 if (slave < 0 || slave == map->slave_id) {
395 if (map->scheduled_delete)
396 need_flush |= !cancel_delayed_work(&map->timeout);
397 }
398 }
399
400 spin_unlock(&sriov->id_map_lock);
401
402 if (need_flush)
403 flush_scheduled_work();
404
405
406 spin_lock(&sriov->id_map_lock);
407 if (slave < 0) {
408 while (rb_first(sl_id_map)) {
409 struct id_map_entry *ent =
410 rb_entry(rb_first(sl_id_map),
411 struct id_map_entry, node);
412
413 rb_erase(&ent->node, sl_id_map);
414 xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
415 }
416 list_splice_init(&dev->sriov.cm_list, &lh);
417 } else {
418
419 nd = rb_first(sl_id_map);
420 while (nd) {
421 struct id_map_entry *ent =
422 rb_entry(nd, struct id_map_entry, node);
423 nd = rb_next(nd);
424 if (ent->slave_id == slave)
425 list_move_tail(&ent->list, &lh);
426 }
427
428 list_for_each_entry_safe(map, tmp_map, &lh, list) {
429 rb_erase(&map->node, sl_id_map);
430 xa_erase(&sriov->pv_id_table, map->pv_cm_id);
431 }
432
433
434 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
435 if (slave == map->slave_id)
436 list_move_tail(&map->list, &lh);
437 }
438 }
439
440 spin_unlock(&sriov->id_map_lock);
441
442
443 list_for_each_entry_safe(map, tmp_map, &lh, list) {
444 list_del(&map->list);
445 kfree(map);
446 }
447 }