This source file includes following definitions.
- put_driver_name_print_type
- _rdma_nl_put_driver_u32
- _rdma_nl_put_driver_u64
- rdma_nl_put_driver_u32
- rdma_nl_put_driver_u32_hex
- rdma_nl_put_driver_u64
- rdma_nl_put_driver_u64_hex
- fill_nldev_handle
- fill_dev_info
- fill_port_info
- fill_res_info_entry
- fill_res_info
- fill_res_name_pid
- fill_res_entry
- fill_res_qp_entry
- fill_res_cm_id_entry
- fill_res_cq_entry
- fill_res_mr_entry
- fill_res_pd_entry
- fill_stat_counter_mode
- fill_stat_counter_qp_entry
- fill_stat_counter_qps
- fill_stat_hwcounter_entry
- fill_stat_counter_hwcounters
- fill_res_counter_entry
- nldev_get_doit
- nldev_set_doit
- _nldev_get_dumpit
- nldev_get_dumpit
- nldev_port_get_doit
- nldev_port_get_dumpit
- nldev_res_get_doit
- _nldev_res_get_dumpit
- nldev_res_get_dumpit
- res_get_common_doit
- res_get_common_dumpit
- link_ops_get
- rdma_link_register
- rdma_link_unregister
- nldev_newlink
- nldev_dellink
- nldev_get_chardev
- nldev_sys_get_doit
- nldev_set_sys_set_doit
- nldev_stat_set_doit
- nldev_stat_del_doit
- stat_get_doit_default_counter
- stat_get_doit_qp
- nldev_stat_get_doit
- nldev_stat_get_dumpit
- nldev_init
- nldev_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/module.h>
34 #include <linux/pid.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/mutex.h>
37 #include <net/netlink.h>
38 #include <rdma/rdma_cm.h>
39 #include <rdma/rdma_netlink.h>
40
41 #include "core_priv.h"
42 #include "cma_priv.h"
43 #include "restrack.h"
44
45
46
47
48 static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
49 [RDMA_NLDEV_ATTR_CHARDEV] = { .type = NLA_U64 },
50 [RDMA_NLDEV_ATTR_CHARDEV_ABI] = { .type = NLA_U64 },
51 [RDMA_NLDEV_ATTR_CHARDEV_NAME] = { .type = NLA_NUL_STRING,
52 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
53 [RDMA_NLDEV_ATTR_CHARDEV_TYPE] = { .type = NLA_NUL_STRING,
54 .len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE },
55 [RDMA_NLDEV_ATTR_DEV_DIM] = { .type = NLA_U8 },
56 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
57 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING,
58 .len = IB_DEVICE_NAME_MAX },
59 [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
60 [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING,
61 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
62 [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED },
63 [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED },
64 [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 },
65 [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING,
66 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
67 [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 },
68 [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 },
69 [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 },
70 [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 },
71 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING,
72 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
73 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 },
74 [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING,
75 .len = IFNAMSIZ },
76 [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 },
77 [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 },
78 [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING,
79 .len = IFNAMSIZ },
80 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 },
81 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 },
82 [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
83 [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
84 [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED },
85 [RDMA_NLDEV_ATTR_RES_CM_IDN] = { .type = NLA_U32 },
86 [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED },
87 [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED },
88 [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 },
89 [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 },
90 [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED },
91 [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 },
92 [RDMA_NLDEV_ATTR_RES_DST_ADDR] = {
93 .len = sizeof(struct __kernel_sockaddr_storage) },
94 [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 },
95 [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
96 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
97 [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 },
98 [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 },
99 [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
100 [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED },
101 [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 },
102 [RDMA_NLDEV_ATTR_RES_MRN] = { .type = NLA_U32 },
103 [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED },
104 [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
105 [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED },
106 [RDMA_NLDEV_ATTR_RES_PDN] = { .type = NLA_U32 },
107 [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED },
108 [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
109 [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 },
110 [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 },
111 [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
112 [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
113 [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 },
114 [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
115 [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
116 [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
117 [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = {
118 .len = sizeof(struct __kernel_sockaddr_storage) },
119 [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
120 [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
121 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
122 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 },
123 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING,
124 .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
125 [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
126 [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
127 [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
128 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
129 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
130 [RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 },
131 [RDMA_NLDEV_ATTR_STAT_MODE] = { .type = NLA_U32 },
132 [RDMA_NLDEV_ATTR_STAT_RES] = { .type = NLA_U32 },
133 [RDMA_NLDEV_ATTR_STAT_COUNTER] = { .type = NLA_NESTED },
134 [RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY] = { .type = NLA_NESTED },
135 [RDMA_NLDEV_ATTR_STAT_COUNTER_ID] = { .type = NLA_U32 },
136 [RDMA_NLDEV_ATTR_STAT_HWCOUNTERS] = { .type = NLA_NESTED },
137 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY] = { .type = NLA_NESTED },
138 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING },
139 [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 },
140 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
141 [RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 },
142 [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 },
143 [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 },
144 };
145
146 static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
147 enum rdma_nldev_print_type print_type)
148 {
149 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name))
150 return -EMSGSIZE;
151 if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC &&
152 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type))
153 return -EMSGSIZE;
154
155 return 0;
156 }
157
158 static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name,
159 enum rdma_nldev_print_type print_type,
160 u32 value)
161 {
162 if (put_driver_name_print_type(msg, name, print_type))
163 return -EMSGSIZE;
164 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value))
165 return -EMSGSIZE;
166
167 return 0;
168 }
169
170 static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
171 enum rdma_nldev_print_type print_type,
172 u64 value)
173 {
174 if (put_driver_name_print_type(msg, name, print_type))
175 return -EMSGSIZE;
176 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value,
177 RDMA_NLDEV_ATTR_PAD))
178 return -EMSGSIZE;
179
180 return 0;
181 }
182
183 int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
184 {
185 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
186 value);
187 }
188 EXPORT_SYMBOL(rdma_nl_put_driver_u32);
189
190 int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
191 u32 value)
192 {
193 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
194 value);
195 }
196 EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex);
197
198 int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value)
199 {
200 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
201 value);
202 }
203 EXPORT_SYMBOL(rdma_nl_put_driver_u64);
204
205 int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value)
206 {
207 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX,
208 value);
209 }
210 EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex);
211
212 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
213 {
214 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
215 return -EMSGSIZE;
216 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
217 dev_name(&device->dev)))
218 return -EMSGSIZE;
219
220 return 0;
221 }
222
223 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
224 {
225 char fw[IB_FW_VERSION_NAME_MAX];
226 int ret = 0;
227 u8 port;
228
229 if (fill_nldev_handle(msg, device))
230 return -EMSGSIZE;
231
232 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
233 return -EMSGSIZE;
234
235 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
236 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
237 device->attrs.device_cap_flags,
238 RDMA_NLDEV_ATTR_PAD))
239 return -EMSGSIZE;
240
241 ib_get_device_fw_str(device, fw);
242
243 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
244 return -EMSGSIZE;
245
246 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
247 be64_to_cpu(device->node_guid),
248 RDMA_NLDEV_ATTR_PAD))
249 return -EMSGSIZE;
250 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
251 be64_to_cpu(device->attrs.sys_image_guid),
252 RDMA_NLDEV_ATTR_PAD))
253 return -EMSGSIZE;
254 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
255 return -EMSGSIZE;
256 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim))
257 return -EMSGSIZE;
258
259
260
261
262
263
264 port = rdma_start_port(device);
265 if (rdma_cap_opa_mad(device, port))
266 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa");
267 else if (rdma_protocol_ib(device, port))
268 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib");
269 else if (rdma_protocol_iwarp(device, port))
270 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw");
271 else if (rdma_protocol_roce(device, port))
272 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce");
273 else if (rdma_protocol_usnic(device, port))
274 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL,
275 "usnic");
276 return ret;
277 }
278
279 static int fill_port_info(struct sk_buff *msg,
280 struct ib_device *device, u32 port,
281 const struct net *net)
282 {
283 struct net_device *netdev = NULL;
284 struct ib_port_attr attr;
285 int ret;
286 u64 cap_flags = 0;
287
288 if (fill_nldev_handle(msg, device))
289 return -EMSGSIZE;
290
291 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
292 return -EMSGSIZE;
293
294 ret = ib_query_port(device, port, &attr);
295 if (ret)
296 return ret;
297
298 if (rdma_protocol_ib(device, port)) {
299 BUILD_BUG_ON((sizeof(attr.port_cap_flags) +
300 sizeof(attr.port_cap_flags2)) > sizeof(u64));
301 cap_flags = attr.port_cap_flags |
302 ((u64)attr.port_cap_flags2 << 32);
303 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
304 cap_flags, RDMA_NLDEV_ATTR_PAD))
305 return -EMSGSIZE;
306 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
307 attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD))
308 return -EMSGSIZE;
309 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
310 return -EMSGSIZE;
311 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
312 return -EMSGSIZE;
313 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
314 return -EMSGSIZE;
315 }
316 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
317 return -EMSGSIZE;
318 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
319 return -EMSGSIZE;
320
321 netdev = ib_device_get_netdev(device, port);
322 if (netdev && net_eq(dev_net(netdev), net)) {
323 ret = nla_put_u32(msg,
324 RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex);
325 if (ret)
326 goto out;
327 ret = nla_put_string(msg,
328 RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name);
329 }
330
331 out:
332 if (netdev)
333 dev_put(netdev);
334 return ret;
335 }
336
337 static int fill_res_info_entry(struct sk_buff *msg,
338 const char *name, u64 curr)
339 {
340 struct nlattr *entry_attr;
341
342 entry_attr = nla_nest_start_noflag(msg,
343 RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
344 if (!entry_attr)
345 return -EMSGSIZE;
346
347 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
348 goto err;
349 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr,
350 RDMA_NLDEV_ATTR_PAD))
351 goto err;
352
353 nla_nest_end(msg, entry_attr);
354 return 0;
355
356 err:
357 nla_nest_cancel(msg, entry_attr);
358 return -EMSGSIZE;
359 }
360
361 static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
362 {
363 static const char * const names[RDMA_RESTRACK_MAX] = {
364 [RDMA_RESTRACK_PD] = "pd",
365 [RDMA_RESTRACK_CQ] = "cq",
366 [RDMA_RESTRACK_QP] = "qp",
367 [RDMA_RESTRACK_CM_ID] = "cm_id",
368 [RDMA_RESTRACK_MR] = "mr",
369 [RDMA_RESTRACK_CTX] = "ctx",
370 };
371
372 struct nlattr *table_attr;
373 int ret, i, curr;
374
375 if (fill_nldev_handle(msg, device))
376 return -EMSGSIZE;
377
378 table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
379 if (!table_attr)
380 return -EMSGSIZE;
381
382 for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
383 if (!names[i])
384 continue;
385 curr = rdma_restrack_count(device, i);
386 ret = fill_res_info_entry(msg, names[i], curr);
387 if (ret)
388 goto err;
389 }
390
391 nla_nest_end(msg, table_attr);
392 return 0;
393
394 err:
395 nla_nest_cancel(msg, table_attr);
396 return ret;
397 }
398
399 static int fill_res_name_pid(struct sk_buff *msg,
400 struct rdma_restrack_entry *res)
401 {
402
403
404
405
406 if (rdma_is_kernel_res(res)) {
407 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
408 res->kern_name))
409 return -EMSGSIZE;
410 } else {
411 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
412 task_pid_vnr(res->task)))
413 return -EMSGSIZE;
414 }
415 return 0;
416 }
417
418 static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
419 struct rdma_restrack_entry *res)
420 {
421 if (!dev->ops.fill_res_entry)
422 return false;
423 return dev->ops.fill_res_entry(msg, res);
424 }
425
426 static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
427 struct rdma_restrack_entry *res, uint32_t port)
428 {
429 struct ib_qp *qp = container_of(res, struct ib_qp, res);
430 struct ib_device *dev = qp->device;
431 struct ib_qp_init_attr qp_init_attr;
432 struct ib_qp_attr qp_attr;
433 int ret;
434
435 ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
436 if (ret)
437 return ret;
438
439 if (port && port != qp_attr.port_num)
440 return -EAGAIN;
441
442
443 if (qp_attr.port_num &&
444 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
445 goto err;
446
447 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
448 goto err;
449 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
450 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
451 qp_attr.dest_qp_num))
452 goto err;
453 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
454 qp_attr.rq_psn))
455 goto err;
456 }
457
458 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
459 goto err;
460
461 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
462 qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
463 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
464 qp_attr.path_mig_state))
465 goto err;
466 }
467 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
468 goto err;
469 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
470 goto err;
471
472 if (!rdma_is_kernel_res(res) &&
473 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id))
474 goto err;
475
476 if (fill_res_name_pid(msg, res))
477 goto err;
478
479 if (fill_res_entry(dev, msg, res))
480 goto err;
481
482 return 0;
483
484 err: return -EMSGSIZE;
485 }
486
487 static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin,
488 struct rdma_restrack_entry *res, uint32_t port)
489 {
490 struct rdma_id_private *id_priv =
491 container_of(res, struct rdma_id_private, res);
492 struct ib_device *dev = id_priv->id.device;
493 struct rdma_cm_id *cm_id = &id_priv->id;
494
495 if (port && port != cm_id->port_num)
496 return 0;
497
498 if (cm_id->port_num &&
499 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
500 goto err;
501
502 if (id_priv->qp_num) {
503 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
504 goto err;
505 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
506 goto err;
507 }
508
509 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
510 goto err;
511
512 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
513 goto err;
514
515 if (cm_id->route.addr.src_addr.ss_family &&
516 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
517 sizeof(cm_id->route.addr.src_addr),
518 &cm_id->route.addr.src_addr))
519 goto err;
520 if (cm_id->route.addr.dst_addr.ss_family &&
521 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
522 sizeof(cm_id->route.addr.dst_addr),
523 &cm_id->route.addr.dst_addr))
524 goto err;
525
526 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id))
527 goto err;
528
529 if (fill_res_name_pid(msg, res))
530 goto err;
531
532 if (fill_res_entry(dev, msg, res))
533 goto err;
534
535 return 0;
536
537 err: return -EMSGSIZE;
538 }
539
540 static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin,
541 struct rdma_restrack_entry *res, uint32_t port)
542 {
543 struct ib_cq *cq = container_of(res, struct ib_cq, res);
544 struct ib_device *dev = cq->device;
545
546 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
547 goto err;
548 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
549 atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD))
550 goto err;
551
552
553 if (rdma_is_kernel_res(res) &&
554 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
555 goto err;
556
557 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL)))
558 goto err;
559
560 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id))
561 goto err;
562 if (!rdma_is_kernel_res(res) &&
563 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
564 cq->uobject->context->res.id))
565 goto err;
566
567 if (fill_res_name_pid(msg, res))
568 goto err;
569
570 if (fill_res_entry(dev, msg, res))
571 goto err;
572
573 return 0;
574
575 err: return -EMSGSIZE;
576 }
577
578 static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
579 struct rdma_restrack_entry *res, uint32_t port)
580 {
581 struct ib_mr *mr = container_of(res, struct ib_mr, res);
582 struct ib_device *dev = mr->pd->device;
583
584 if (has_cap_net_admin) {
585 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
586 goto err;
587 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
588 goto err;
589 }
590
591 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length,
592 RDMA_NLDEV_ATTR_PAD))
593 goto err;
594
595 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
596 goto err;
597
598 if (!rdma_is_kernel_res(res) &&
599 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id))
600 goto err;
601
602 if (fill_res_name_pid(msg, res))
603 goto err;
604
605 if (fill_res_entry(dev, msg, res))
606 goto err;
607
608 return 0;
609
610 err: return -EMSGSIZE;
611 }
612
613 static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
614 struct rdma_restrack_entry *res, uint32_t port)
615 {
616 struct ib_pd *pd = container_of(res, struct ib_pd, res);
617 struct ib_device *dev = pd->device;
618
619 if (has_cap_net_admin) {
620 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
621 pd->local_dma_lkey))
622 goto err;
623 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
624 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
625 pd->unsafe_global_rkey))
626 goto err;
627 }
628 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
629 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
630 goto err;
631
632 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
633 goto err;
634
635 if (!rdma_is_kernel_res(res) &&
636 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
637 pd->uobject->context->res.id))
638 goto err;
639
640 if (fill_res_name_pid(msg, res))
641 goto err;
642
643 if (fill_res_entry(dev, msg, res))
644 goto err;
645
646 return 0;
647
648 err: return -EMSGSIZE;
649 }
650
651 static int fill_stat_counter_mode(struct sk_buff *msg,
652 struct rdma_counter *counter)
653 {
654 struct rdma_counter_mode *m = &counter->mode;
655
656 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode))
657 return -EMSGSIZE;
658
659 if (m->mode == RDMA_COUNTER_MODE_AUTO)
660 if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) &&
661 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type))
662 return -EMSGSIZE;
663
664 return 0;
665 }
666
667 static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn)
668 {
669 struct nlattr *entry_attr;
670
671 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
672 if (!entry_attr)
673 return -EMSGSIZE;
674
675 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn))
676 goto err;
677
678 nla_nest_end(msg, entry_attr);
679 return 0;
680
681 err:
682 nla_nest_cancel(msg, entry_attr);
683 return -EMSGSIZE;
684 }
685
686 static int fill_stat_counter_qps(struct sk_buff *msg,
687 struct rdma_counter *counter)
688 {
689 struct rdma_restrack_entry *res;
690 struct rdma_restrack_root *rt;
691 struct nlattr *table_attr;
692 struct ib_qp *qp = NULL;
693 unsigned long id = 0;
694 int ret = 0;
695
696 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP);
697
698 rt = &counter->device->res[RDMA_RESTRACK_QP];
699 xa_lock(&rt->xa);
700 xa_for_each(&rt->xa, id, res) {
701 if (!rdma_is_visible_in_pid_ns(res))
702 continue;
703
704 qp = container_of(res, struct ib_qp, res);
705 if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
706 continue;
707
708 if (!qp->counter || (qp->counter->id != counter->id))
709 continue;
710
711 ret = fill_stat_counter_qp_entry(msg, qp->qp_num);
712 if (ret)
713 goto err;
714 }
715
716 xa_unlock(&rt->xa);
717 nla_nest_end(msg, table_attr);
718 return 0;
719
720 err:
721 xa_unlock(&rt->xa);
722 nla_nest_cancel(msg, table_attr);
723 return ret;
724 }
725
726 static int fill_stat_hwcounter_entry(struct sk_buff *msg,
727 const char *name, u64 value)
728 {
729 struct nlattr *entry_attr;
730
731 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
732 if (!entry_attr)
733 return -EMSGSIZE;
734
735 if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
736 name))
737 goto err;
738 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE,
739 value, RDMA_NLDEV_ATTR_PAD))
740 goto err;
741
742 nla_nest_end(msg, entry_attr);
743 return 0;
744
745 err:
746 nla_nest_cancel(msg, entry_attr);
747 return -EMSGSIZE;
748 }
749
750 static int fill_stat_counter_hwcounters(struct sk_buff *msg,
751 struct rdma_counter *counter)
752 {
753 struct rdma_hw_stats *st = counter->stats;
754 struct nlattr *table_attr;
755 int i;
756
757 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
758 if (!table_attr)
759 return -EMSGSIZE;
760
761 for (i = 0; i < st->num_counters; i++)
762 if (fill_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
763 goto err;
764
765 nla_nest_end(msg, table_attr);
766 return 0;
767
768 err:
769 nla_nest_cancel(msg, table_attr);
770 return -EMSGSIZE;
771 }
772
773 static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
774 struct rdma_restrack_entry *res,
775 uint32_t port)
776 {
777 struct rdma_counter *counter =
778 container_of(res, struct rdma_counter, res);
779
780 if (port && port != counter->port)
781 return -EAGAIN;
782
783
784 rdma_counter_query_stats(counter);
785
786 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) ||
787 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) ||
788 fill_res_name_pid(msg, &counter->res) ||
789 fill_stat_counter_mode(msg, counter) ||
790 fill_stat_counter_qps(msg, counter) ||
791 fill_stat_counter_hwcounters(msg, counter))
792 return -EMSGSIZE;
793
794 return 0;
795 }
796
797 static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
798 struct netlink_ext_ack *extack)
799 {
800 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
801 struct ib_device *device;
802 struct sk_buff *msg;
803 u32 index;
804 int err;
805
806 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
807 nldev_policy, extack);
808 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
809 return -EINVAL;
810
811 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
812
813 device = ib_device_get_by_index(sock_net(skb->sk), index);
814 if (!device)
815 return -EINVAL;
816
817 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
818 if (!msg) {
819 err = -ENOMEM;
820 goto err;
821 }
822
823 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
824 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
825 0, 0);
826
827 err = fill_dev_info(msg, device);
828 if (err)
829 goto err_free;
830
831 nlmsg_end(msg, nlh);
832
833 ib_device_put(device);
834 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
835
836 err_free:
837 nlmsg_free(msg);
838 err:
839 ib_device_put(device);
840 return err;
841 }
842
843 static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
844 struct netlink_ext_ack *extack)
845 {
846 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
847 struct ib_device *device;
848 u32 index;
849 int err;
850
851 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
852 nldev_policy, extack);
853 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
854 return -EINVAL;
855
856 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
857 device = ib_device_get_by_index(sock_net(skb->sk), index);
858 if (!device)
859 return -EINVAL;
860
861 if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
862 char name[IB_DEVICE_NAME_MAX] = {};
863
864 nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
865 IB_DEVICE_NAME_MAX);
866 if (strlen(name) == 0) {
867 err = -EINVAL;
868 goto done;
869 }
870 err = ib_device_rename(device, name);
871 goto done;
872 }
873
874 if (tb[RDMA_NLDEV_NET_NS_FD]) {
875 u32 ns_fd;
876
877 ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
878 err = ib_device_set_netns_put(skb, device, ns_fd);
879 goto put_done;
880 }
881
882 if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) {
883 u8 use_dim;
884
885 use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]);
886 err = ib_device_set_dim(device, use_dim);
887 goto done;
888 }
889
890 done:
891 ib_device_put(device);
892 put_done:
893 return err;
894 }
895
896 static int _nldev_get_dumpit(struct ib_device *device,
897 struct sk_buff *skb,
898 struct netlink_callback *cb,
899 unsigned int idx)
900 {
901 int start = cb->args[0];
902 struct nlmsghdr *nlh;
903
904 if (idx < start)
905 return 0;
906
907 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
908 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
909 0, NLM_F_MULTI);
910
911 if (fill_dev_info(skb, device)) {
912 nlmsg_cancel(skb, nlh);
913 goto out;
914 }
915
916 nlmsg_end(skb, nlh);
917
918 idx++;
919
920 out: cb->args[0] = idx;
921 return skb->len;
922 }
923
924 static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
925 {
926
927
928
929
930 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
931 }
932
933 static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
934 struct netlink_ext_ack *extack)
935 {
936 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
937 struct ib_device *device;
938 struct sk_buff *msg;
939 u32 index;
940 u32 port;
941 int err;
942
943 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
944 nldev_policy, extack);
945 if (err ||
946 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
947 !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
948 return -EINVAL;
949
950 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
951 device = ib_device_get_by_index(sock_net(skb->sk), index);
952 if (!device)
953 return -EINVAL;
954
955 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
956 if (!rdma_is_port_valid(device, port)) {
957 err = -EINVAL;
958 goto err;
959 }
960
961 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
962 if (!msg) {
963 err = -ENOMEM;
964 goto err;
965 }
966
967 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
968 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
969 0, 0);
970
971 err = fill_port_info(msg, device, port, sock_net(skb->sk));
972 if (err)
973 goto err_free;
974
975 nlmsg_end(msg, nlh);
976 ib_device_put(device);
977
978 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
979
980 err_free:
981 nlmsg_free(msg);
982 err:
983 ib_device_put(device);
984 return err;
985 }
986
987 static int nldev_port_get_dumpit(struct sk_buff *skb,
988 struct netlink_callback *cb)
989 {
990 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
991 struct ib_device *device;
992 int start = cb->args[0];
993 struct nlmsghdr *nlh;
994 u32 idx = 0;
995 u32 ifindex;
996 int err;
997 unsigned int p;
998
999 err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1000 nldev_policy, NULL);
1001 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1002 return -EINVAL;
1003
1004 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1005 device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
1006 if (!device)
1007 return -EINVAL;
1008
1009 rdma_for_each_port (device, p) {
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020 if (idx < start) {
1021 idx++;
1022 continue;
1023 }
1024
1025 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
1026 cb->nlh->nlmsg_seq,
1027 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1028 RDMA_NLDEV_CMD_PORT_GET),
1029 0, NLM_F_MULTI);
1030
1031 if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
1032 nlmsg_cancel(skb, nlh);
1033 goto out;
1034 }
1035 idx++;
1036 nlmsg_end(skb, nlh);
1037 }
1038
1039 out:
1040 ib_device_put(device);
1041 cb->args[0] = idx;
1042 return skb->len;
1043 }
1044
1045 static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1046 struct netlink_ext_ack *extack)
1047 {
1048 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1049 struct ib_device *device;
1050 struct sk_buff *msg;
1051 u32 index;
1052 int ret;
1053
1054 ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1055 nldev_policy, extack);
1056 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1057 return -EINVAL;
1058
1059 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1060 device = ib_device_get_by_index(sock_net(skb->sk), index);
1061 if (!device)
1062 return -EINVAL;
1063
1064 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1065 if (!msg) {
1066 ret = -ENOMEM;
1067 goto err;
1068 }
1069
1070 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1071 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1072 0, 0);
1073
1074 ret = fill_res_info(msg, device);
1075 if (ret)
1076 goto err_free;
1077
1078 nlmsg_end(msg, nlh);
1079 ib_device_put(device);
1080 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1081
1082 err_free:
1083 nlmsg_free(msg);
1084 err:
1085 ib_device_put(device);
1086 return ret;
1087 }
1088
1089 static int _nldev_res_get_dumpit(struct ib_device *device,
1090 struct sk_buff *skb,
1091 struct netlink_callback *cb,
1092 unsigned int idx)
1093 {
1094 int start = cb->args[0];
1095 struct nlmsghdr *nlh;
1096
1097 if (idx < start)
1098 return 0;
1099
1100 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1101 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
1102 0, NLM_F_MULTI);
1103
1104 if (fill_res_info(skb, device)) {
1105 nlmsg_cancel(skb, nlh);
1106 goto out;
1107 }
1108 nlmsg_end(skb, nlh);
1109
1110 idx++;
1111
1112 out:
1113 cb->args[0] = idx;
1114 return skb->len;
1115 }
1116
1117 static int nldev_res_get_dumpit(struct sk_buff *skb,
1118 struct netlink_callback *cb)
1119 {
1120 return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
1121 }
1122
1123 struct nldev_fill_res_entry {
1124 int (*fill_res_func)(struct sk_buff *msg, bool has_cap_net_admin,
1125 struct rdma_restrack_entry *res, u32 port);
1126 enum rdma_nldev_attr nldev_attr;
1127 enum rdma_nldev_command nldev_cmd;
1128 u8 flags;
1129 u32 entry;
1130 u32 id;
1131 };
1132
1133 enum nldev_res_flags {
1134 NLDEV_PER_DEV = 1 << 0,
1135 };
1136
1137 static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
1138 [RDMA_RESTRACK_QP] = {
1139 .fill_res_func = fill_res_qp_entry,
1140 .nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
1141 .nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
1142 .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
1143 .id = RDMA_NLDEV_ATTR_RES_LQPN,
1144 },
1145 [RDMA_RESTRACK_CM_ID] = {
1146 .fill_res_func = fill_res_cm_id_entry,
1147 .nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
1148 .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
1149 .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
1150 .id = RDMA_NLDEV_ATTR_RES_CM_IDN,
1151 },
1152 [RDMA_RESTRACK_CQ] = {
1153 .fill_res_func = fill_res_cq_entry,
1154 .nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
1155 .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
1156 .flags = NLDEV_PER_DEV,
1157 .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY,
1158 .id = RDMA_NLDEV_ATTR_RES_CQN,
1159 },
1160 [RDMA_RESTRACK_MR] = {
1161 .fill_res_func = fill_res_mr_entry,
1162 .nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
1163 .nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
1164 .flags = NLDEV_PER_DEV,
1165 .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY,
1166 .id = RDMA_NLDEV_ATTR_RES_MRN,
1167 },
1168 [RDMA_RESTRACK_PD] = {
1169 .fill_res_func = fill_res_pd_entry,
1170 .nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
1171 .nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
1172 .flags = NLDEV_PER_DEV,
1173 .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY,
1174 .id = RDMA_NLDEV_ATTR_RES_PDN,
1175 },
1176 [RDMA_RESTRACK_COUNTER] = {
1177 .fill_res_func = fill_res_counter_entry,
1178 .nldev_cmd = RDMA_NLDEV_CMD_STAT_GET,
1179 .nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER,
1180 .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
1181 .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID,
1182 },
1183 };
1184
1185 static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1186 struct netlink_ext_ack *extack,
1187 enum rdma_restrack_type res_type)
1188 {
1189 const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1190 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1191 struct rdma_restrack_entry *res;
1192 struct ib_device *device;
1193 u32 index, id, port = 0;
1194 bool has_cap_net_admin;
1195 struct sk_buff *msg;
1196 int ret;
1197
1198 ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1199 nldev_policy, extack);
1200 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
1201 return -EINVAL;
1202
1203 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1204 device = ib_device_get_by_index(sock_net(skb->sk), index);
1205 if (!device)
1206 return -EINVAL;
1207
1208 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1209 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1210 if (!rdma_is_port_valid(device, port)) {
1211 ret = -EINVAL;
1212 goto err;
1213 }
1214 }
1215
1216 if ((port && fe->flags & NLDEV_PER_DEV) ||
1217 (!port && ~fe->flags & NLDEV_PER_DEV)) {
1218 ret = -EINVAL;
1219 goto err;
1220 }
1221
1222 id = nla_get_u32(tb[fe->id]);
1223 res = rdma_restrack_get_byid(device, res_type, id);
1224 if (IS_ERR(res)) {
1225 ret = PTR_ERR(res);
1226 goto err;
1227 }
1228
1229 if (!rdma_is_visible_in_pid_ns(res)) {
1230 ret = -ENOENT;
1231 goto err_get;
1232 }
1233
1234 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1235 if (!msg) {
1236 ret = -ENOMEM;
1237 goto err_get;
1238 }
1239
1240 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1241 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
1242 0, 0);
1243
1244 if (fill_nldev_handle(msg, device)) {
1245 ret = -EMSGSIZE;
1246 goto err_free;
1247 }
1248
1249 has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
1250 ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
1251 if (ret)
1252 goto err_free;
1253
1254 rdma_restrack_put(res);
1255 nlmsg_end(msg, nlh);
1256 ib_device_put(device);
1257 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1258
1259 err_free:
1260 nlmsg_free(msg);
1261 err_get:
1262 rdma_restrack_put(res);
1263 err:
1264 ib_device_put(device);
1265 return ret;
1266 }
1267
1268 static int res_get_common_dumpit(struct sk_buff *skb,
1269 struct netlink_callback *cb,
1270 enum rdma_restrack_type res_type)
1271 {
1272 const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
1273 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1274 struct rdma_restrack_entry *res;
1275 struct rdma_restrack_root *rt;
1276 int err, ret = 0, idx = 0;
1277 struct nlattr *table_attr;
1278 struct nlattr *entry_attr;
1279 struct ib_device *device;
1280 int start = cb->args[0];
1281 bool has_cap_net_admin;
1282 struct nlmsghdr *nlh;
1283 unsigned long id;
1284 u32 index, port = 0;
1285 bool filled = false;
1286
1287 err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1288 nldev_policy, NULL);
1289
1290
1291
1292
1293
1294
1295
1296
1297 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1298 return -EINVAL;
1299
1300 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1301 device = ib_device_get_by_index(sock_net(skb->sk), index);
1302 if (!device)
1303 return -EINVAL;
1304
1305
1306
1307
1308 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1309 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1310 if (!rdma_is_port_valid(device, port)) {
1311 ret = -EINVAL;
1312 goto err_index;
1313 }
1314 }
1315
1316 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1317 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
1318 0, NLM_F_MULTI);
1319
1320 if (fill_nldev_handle(skb, device)) {
1321 ret = -EMSGSIZE;
1322 goto err;
1323 }
1324
1325 table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
1326 if (!table_attr) {
1327 ret = -EMSGSIZE;
1328 goto err;
1329 }
1330
1331 has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN);
1332
1333 rt = &device->res[res_type];
1334 xa_lock(&rt->xa);
1335
1336
1337
1338
1339
1340 xa_for_each(&rt->xa, id, res) {
1341 if (!rdma_is_visible_in_pid_ns(res))
1342 continue;
1343
1344 if (idx < start || !rdma_restrack_get(res))
1345 goto next;
1346
1347 xa_unlock(&rt->xa);
1348
1349 filled = true;
1350
1351 entry_attr = nla_nest_start_noflag(skb, fe->entry);
1352 if (!entry_attr) {
1353 ret = -EMSGSIZE;
1354 rdma_restrack_put(res);
1355 goto msg_full;
1356 }
1357
1358 ret = fe->fill_res_func(skb, has_cap_net_admin, res, port);
1359 rdma_restrack_put(res);
1360
1361 if (ret) {
1362 nla_nest_cancel(skb, entry_attr);
1363 if (ret == -EMSGSIZE)
1364 goto msg_full;
1365 if (ret == -EAGAIN)
1366 goto again;
1367 goto res_err;
1368 }
1369 nla_nest_end(skb, entry_attr);
1370 again: xa_lock(&rt->xa);
1371 next: idx++;
1372 }
1373 xa_unlock(&rt->xa);
1374
1375 msg_full:
1376 nla_nest_end(skb, table_attr);
1377 nlmsg_end(skb, nlh);
1378 cb->args[0] = idx;
1379
1380
1381
1382
1383
1384 if (!filled)
1385 goto err;
1386
1387 ib_device_put(device);
1388 return skb->len;
1389
1390 res_err:
1391 nla_nest_cancel(skb, table_attr);
1392
1393 err:
1394 nlmsg_cancel(skb, nlh);
1395
1396 err_index:
1397 ib_device_put(device);
1398 return ret;
1399 }
1400
1401 #define RES_GET_FUNCS(name, type) \
1402 static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
1403 struct netlink_callback *cb) \
1404 { \
1405 return res_get_common_dumpit(skb, cb, type); \
1406 } \
1407 static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
1408 struct nlmsghdr *nlh, \
1409 struct netlink_ext_ack *extack) \
1410 { \
1411 return res_get_common_doit(skb, nlh, extack, type); \
1412 }
1413
1414 RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
1415 RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID);
1416 RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ);
1417 RES_GET_FUNCS(pd, RDMA_RESTRACK_PD);
1418 RES_GET_FUNCS(mr, RDMA_RESTRACK_MR);
1419 RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER);
1420
1421 static LIST_HEAD(link_ops);
1422 static DECLARE_RWSEM(link_ops_rwsem);
1423
1424 static const struct rdma_link_ops *link_ops_get(const char *type)
1425 {
1426 const struct rdma_link_ops *ops;
1427
1428 list_for_each_entry(ops, &link_ops, list) {
1429 if (!strcmp(ops->type, type))
1430 goto out;
1431 }
1432 ops = NULL;
1433 out:
1434 return ops;
1435 }
1436
1437 void rdma_link_register(struct rdma_link_ops *ops)
1438 {
1439 down_write(&link_ops_rwsem);
1440 if (WARN_ON_ONCE(link_ops_get(ops->type)))
1441 goto out;
1442 list_add(&ops->list, &link_ops);
1443 out:
1444 up_write(&link_ops_rwsem);
1445 }
1446 EXPORT_SYMBOL(rdma_link_register);
1447
1448 void rdma_link_unregister(struct rdma_link_ops *ops)
1449 {
1450 down_write(&link_ops_rwsem);
1451 list_del(&ops->list);
1452 up_write(&link_ops_rwsem);
1453 }
1454 EXPORT_SYMBOL(rdma_link_unregister);
1455
1456 static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
1457 struct netlink_ext_ack *extack)
1458 {
1459 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1460 char ibdev_name[IB_DEVICE_NAME_MAX];
1461 const struct rdma_link_ops *ops;
1462 char ndev_name[IFNAMSIZ];
1463 struct net_device *ndev;
1464 char type[IFNAMSIZ];
1465 int err;
1466
1467 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1468 nldev_policy, extack);
1469 if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
1470 !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
1471 return -EINVAL;
1472
1473 nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
1474 sizeof(ibdev_name));
1475 if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
1476 return -EINVAL;
1477
1478 nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
1479 nla_strlcpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
1480 sizeof(ndev_name));
1481
1482 ndev = dev_get_by_name(sock_net(skb->sk), ndev_name);
1483 if (!ndev)
1484 return -ENODEV;
1485
1486 down_read(&link_ops_rwsem);
1487 ops = link_ops_get(type);
1488 #ifdef CONFIG_MODULES
1489 if (!ops) {
1490 up_read(&link_ops_rwsem);
1491 request_module("rdma-link-%s", type);
1492 down_read(&link_ops_rwsem);
1493 ops = link_ops_get(type);
1494 }
1495 #endif
1496 err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL;
1497 up_read(&link_ops_rwsem);
1498 dev_put(ndev);
1499
1500 return err;
1501 }
1502
1503 static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
1504 struct netlink_ext_ack *extack)
1505 {
1506 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1507 struct ib_device *device;
1508 u32 index;
1509 int err;
1510
1511 err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1512 nldev_policy, extack);
1513 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
1514 return -EINVAL;
1515
1516 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1517 device = ib_device_get_by_index(sock_net(skb->sk), index);
1518 if (!device)
1519 return -EINVAL;
1520
1521 if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) {
1522 ib_device_put(device);
1523 return -EINVAL;
1524 }
1525
1526 ib_unregister_device_and_put(device);
1527 return 0;
1528 }
1529
1530 static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
1531 struct netlink_ext_ack *extack)
1532 {
1533 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1534 char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE];
1535 struct ib_client_nl_info data = {};
1536 struct ib_device *ibdev = NULL;
1537 struct sk_buff *msg;
1538 u32 index;
1539 int err;
1540
1541 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
1542 extack);
1543 if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
1544 return -EINVAL;
1545
1546 nla_strlcpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
1547 sizeof(client_name));
1548
1549 if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
1550 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1551 ibdev = ib_device_get_by_index(sock_net(skb->sk), index);
1552 if (!ibdev)
1553 return -EINVAL;
1554
1555 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1556 data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1557 if (!rdma_is_port_valid(ibdev, data.port)) {
1558 err = -EINVAL;
1559 goto out_put;
1560 }
1561 } else {
1562 data.port = -1;
1563 }
1564 } else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
1565 return -EINVAL;
1566 }
1567
1568 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1569 if (!msg) {
1570 err = -ENOMEM;
1571 goto out_put;
1572 }
1573 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1574 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1575 RDMA_NLDEV_CMD_GET_CHARDEV),
1576 0, 0);
1577
1578 data.nl_msg = msg;
1579 err = ib_get_client_nl_info(ibdev, client_name, &data);
1580 if (err)
1581 goto out_nlmsg;
1582
1583 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV,
1584 huge_encode_dev(data.cdev->devt),
1585 RDMA_NLDEV_ATTR_PAD);
1586 if (err)
1587 goto out_data;
1588 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi,
1589 RDMA_NLDEV_ATTR_PAD);
1590 if (err)
1591 goto out_data;
1592 if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME,
1593 dev_name(data.cdev))) {
1594 err = -EMSGSIZE;
1595 goto out_data;
1596 }
1597
1598 nlmsg_end(msg, nlh);
1599 put_device(data.cdev);
1600 if (ibdev)
1601 ib_device_put(ibdev);
1602 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1603
1604 out_data:
1605 put_device(data.cdev);
1606 out_nlmsg:
1607 nlmsg_free(msg);
1608 out_put:
1609 if (ibdev)
1610 ib_device_put(ibdev);
1611 return err;
1612 }
1613
1614 static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1615 struct netlink_ext_ack *extack)
1616 {
1617 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1618 struct sk_buff *msg;
1619 int err;
1620
1621 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1622 nldev_policy, extack);
1623 if (err)
1624 return err;
1625
1626 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1627 if (!msg)
1628 return -ENOMEM;
1629
1630 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1631 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1632 RDMA_NLDEV_CMD_SYS_GET),
1633 0, 0);
1634
1635 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
1636 (u8)ib_devices_shared_netns);
1637 if (err) {
1638 nlmsg_free(msg);
1639 return err;
1640 }
1641 nlmsg_end(msg, nlh);
1642 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1643 }
1644
1645 static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1646 struct netlink_ext_ack *extack)
1647 {
1648 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1649 u8 enable;
1650 int err;
1651
1652 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1653 nldev_policy, extack);
1654 if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
1655 return -EINVAL;
1656
1657 enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
1658
1659 if (enable > 1)
1660 return -EINVAL;
1661
1662 err = rdma_compatdev_set(enable);
1663 return err;
1664 }
1665
1666 static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1667 struct netlink_ext_ack *extack)
1668 {
1669 u32 index, port, mode, mask = 0, qpn, cntn = 0;
1670 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1671 struct ib_device *device;
1672 struct sk_buff *msg;
1673 int ret;
1674
1675 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1676 nldev_policy, extack);
1677
1678 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1679 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
1680 !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_MODE])
1681 return -EINVAL;
1682
1683 if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1684 return -EINVAL;
1685
1686 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1687 device = ib_device_get_by_index(sock_net(skb->sk), index);
1688 if (!device)
1689 return -EINVAL;
1690
1691 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1692 if (!rdma_is_port_valid(device, port)) {
1693 ret = -EINVAL;
1694 goto err;
1695 }
1696
1697 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1698 if (!msg) {
1699 ret = -ENOMEM;
1700 goto err;
1701 }
1702 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1703 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1704 RDMA_NLDEV_CMD_STAT_SET),
1705 0, 0);
1706
1707 mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
1708 if (mode == RDMA_COUNTER_MODE_AUTO) {
1709 if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
1710 mask = nla_get_u32(
1711 tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
1712
1713 ret = rdma_counter_set_auto_mode(device, port,
1714 mask ? true : false, mask);
1715 if (ret)
1716 goto err_msg;
1717 } else {
1718 if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
1719 goto err_msg;
1720 qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
1721 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
1722 cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
1723 ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
1724 } else {
1725 ret = rdma_counter_bind_qpn_alloc(device, port,
1726 qpn, &cntn);
1727 }
1728 if (ret)
1729 goto err_msg;
1730
1731 if (fill_nldev_handle(msg, device) ||
1732 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1733 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
1734 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
1735 ret = -EMSGSIZE;
1736 goto err_fill;
1737 }
1738 }
1739
1740 nlmsg_end(msg, nlh);
1741 ib_device_put(device);
1742 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1743
1744 err_fill:
1745 rdma_counter_unbind_qpn(device, port, qpn, cntn);
1746 err_msg:
1747 nlmsg_free(msg);
1748 err:
1749 ib_device_put(device);
1750 return ret;
1751 }
1752
1753 static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1754 struct netlink_ext_ack *extack)
1755 {
1756 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1757 struct ib_device *device;
1758 struct sk_buff *msg;
1759 u32 index, port, qpn, cntn;
1760 int ret;
1761
1762 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1763 nldev_policy, extack);
1764 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
1765 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] ||
1766 !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] ||
1767 !tb[RDMA_NLDEV_ATTR_RES_LQPN])
1768 return -EINVAL;
1769
1770 if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
1771 return -EINVAL;
1772
1773 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1774 device = ib_device_get_by_index(sock_net(skb->sk), index);
1775 if (!device)
1776 return -EINVAL;
1777
1778 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1779 if (!rdma_is_port_valid(device, port)) {
1780 ret = -EINVAL;
1781 goto err;
1782 }
1783
1784 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1785 if (!msg) {
1786 ret = -ENOMEM;
1787 goto err;
1788 }
1789 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1790 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1791 RDMA_NLDEV_CMD_STAT_SET),
1792 0, 0);
1793
1794 cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
1795 qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
1796 if (fill_nldev_handle(msg, device) ||
1797 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1798 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
1799 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
1800 ret = -EMSGSIZE;
1801 goto err_fill;
1802 }
1803
1804 ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
1805 if (ret)
1806 goto err_fill;
1807
1808 nlmsg_end(msg, nlh);
1809 ib_device_put(device);
1810 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1811
1812 err_fill:
1813 nlmsg_free(msg);
1814 err:
1815 ib_device_put(device);
1816 return ret;
1817 }
1818
1819 static int stat_get_doit_default_counter(struct sk_buff *skb,
1820 struct nlmsghdr *nlh,
1821 struct netlink_ext_ack *extack,
1822 struct nlattr *tb[])
1823 {
1824 struct rdma_hw_stats *stats;
1825 struct nlattr *table_attr;
1826 struct ib_device *device;
1827 int ret, num_cnts, i;
1828 struct sk_buff *msg;
1829 u32 index, port;
1830 u64 v;
1831
1832 if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
1833 return -EINVAL;
1834
1835 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1836 device = ib_device_get_by_index(sock_net(skb->sk), index);
1837 if (!device)
1838 return -EINVAL;
1839
1840 if (!device->ops.alloc_hw_stats || !device->ops.get_hw_stats) {
1841 ret = -EINVAL;
1842 goto err;
1843 }
1844
1845 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1846 if (!rdma_is_port_valid(device, port)) {
1847 ret = -EINVAL;
1848 goto err;
1849 }
1850
1851 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1852 if (!msg) {
1853 ret = -ENOMEM;
1854 goto err;
1855 }
1856
1857 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1858 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1859 RDMA_NLDEV_CMD_STAT_GET),
1860 0, 0);
1861
1862 if (fill_nldev_handle(msg, device) ||
1863 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
1864 ret = -EMSGSIZE;
1865 goto err_msg;
1866 }
1867
1868 stats = device->port_data ? device->port_data[port].hw_stats : NULL;
1869 if (stats == NULL) {
1870 ret = -EINVAL;
1871 goto err_msg;
1872 }
1873 mutex_lock(&stats->lock);
1874
1875 num_cnts = device->ops.get_hw_stats(device, stats, port, 0);
1876 if (num_cnts < 0) {
1877 ret = -EINVAL;
1878 goto err_stats;
1879 }
1880
1881 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
1882 if (!table_attr) {
1883 ret = -EMSGSIZE;
1884 goto err_stats;
1885 }
1886 for (i = 0; i < num_cnts; i++) {
1887 v = stats->value[i] +
1888 rdma_counter_get_hwstat_value(device, port, i);
1889 if (fill_stat_hwcounter_entry(msg, stats->names[i], v)) {
1890 ret = -EMSGSIZE;
1891 goto err_table;
1892 }
1893 }
1894 nla_nest_end(msg, table_attr);
1895
1896 mutex_unlock(&stats->lock);
1897 nlmsg_end(msg, nlh);
1898 ib_device_put(device);
1899 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1900
1901 err_table:
1902 nla_nest_cancel(msg, table_attr);
1903 err_stats:
1904 mutex_unlock(&stats->lock);
1905 err_msg:
1906 nlmsg_free(msg);
1907 err:
1908 ib_device_put(device);
1909 return ret;
1910 }
1911
1912 static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
1913 struct netlink_ext_ack *extack, struct nlattr *tb[])
1914
1915 {
1916 static enum rdma_nl_counter_mode mode;
1917 static enum rdma_nl_counter_mask mask;
1918 struct ib_device *device;
1919 struct sk_buff *msg;
1920 u32 index, port;
1921 int ret;
1922
1923 if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID])
1924 return nldev_res_get_counter_doit(skb, nlh, extack);
1925
1926 if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] ||
1927 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
1928 return -EINVAL;
1929
1930 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
1931 device = ib_device_get_by_index(sock_net(skb->sk), index);
1932 if (!device)
1933 return -EINVAL;
1934
1935 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
1936 if (!rdma_is_port_valid(device, port)) {
1937 ret = -EINVAL;
1938 goto err;
1939 }
1940
1941 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1942 if (!msg) {
1943 ret = -ENOMEM;
1944 goto err;
1945 }
1946
1947 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
1948 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
1949 RDMA_NLDEV_CMD_STAT_GET),
1950 0, 0);
1951
1952 ret = rdma_counter_get_mode(device, port, &mode, &mask);
1953 if (ret)
1954 goto err_msg;
1955
1956 if (fill_nldev_handle(msg, device) ||
1957 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1958 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
1959 ret = -EMSGSIZE;
1960 goto err_msg;
1961 }
1962
1963 if ((mode == RDMA_COUNTER_MODE_AUTO) &&
1964 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
1965 ret = -EMSGSIZE;
1966 goto err_msg;
1967 }
1968
1969 nlmsg_end(msg, nlh);
1970 ib_device_put(device);
1971 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
1972
1973 err_msg:
1974 nlmsg_free(msg);
1975 err:
1976 ib_device_put(device);
1977 return ret;
1978 }
1979
1980 static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1981 struct netlink_ext_ack *extack)
1982 {
1983 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
1984 int ret;
1985
1986 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
1987 nldev_policy, extack);
1988 if (ret)
1989 return -EINVAL;
1990
1991 if (!tb[RDMA_NLDEV_ATTR_STAT_RES])
1992 return stat_get_doit_default_counter(skb, nlh, extack, tb);
1993
1994 switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
1995 case RDMA_NLDEV_ATTR_RES_QP:
1996 ret = stat_get_doit_qp(skb, nlh, extack, tb);
1997 break;
1998
1999 default:
2000 ret = -EINVAL;
2001 break;
2002 }
2003
2004 return ret;
2005 }
2006
2007 static int nldev_stat_get_dumpit(struct sk_buff *skb,
2008 struct netlink_callback *cb)
2009 {
2010 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
2011 int ret;
2012
2013 ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
2014 nldev_policy, NULL);
2015 if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES])
2016 return -EINVAL;
2017
2018 switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) {
2019 case RDMA_NLDEV_ATTR_RES_QP:
2020 ret = nldev_res_get_counter_dumpit(skb, cb);
2021 break;
2022
2023 default:
2024 ret = -EINVAL;
2025 break;
2026 }
2027
2028 return ret;
2029 }
2030
2031 static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
2032 [RDMA_NLDEV_CMD_GET] = {
2033 .doit = nldev_get_doit,
2034 .dump = nldev_get_dumpit,
2035 },
2036 [RDMA_NLDEV_CMD_GET_CHARDEV] = {
2037 .doit = nldev_get_chardev,
2038 },
2039 [RDMA_NLDEV_CMD_SET] = {
2040 .doit = nldev_set_doit,
2041 .flags = RDMA_NL_ADMIN_PERM,
2042 },
2043 [RDMA_NLDEV_CMD_NEWLINK] = {
2044 .doit = nldev_newlink,
2045 .flags = RDMA_NL_ADMIN_PERM,
2046 },
2047 [RDMA_NLDEV_CMD_DELLINK] = {
2048 .doit = nldev_dellink,
2049 .flags = RDMA_NL_ADMIN_PERM,
2050 },
2051 [RDMA_NLDEV_CMD_PORT_GET] = {
2052 .doit = nldev_port_get_doit,
2053 .dump = nldev_port_get_dumpit,
2054 },
2055 [RDMA_NLDEV_CMD_RES_GET] = {
2056 .doit = nldev_res_get_doit,
2057 .dump = nldev_res_get_dumpit,
2058 },
2059 [RDMA_NLDEV_CMD_RES_QP_GET] = {
2060 .doit = nldev_res_get_qp_doit,
2061 .dump = nldev_res_get_qp_dumpit,
2062 },
2063 [RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
2064 .doit = nldev_res_get_cm_id_doit,
2065 .dump = nldev_res_get_cm_id_dumpit,
2066 },
2067 [RDMA_NLDEV_CMD_RES_CQ_GET] = {
2068 .doit = nldev_res_get_cq_doit,
2069 .dump = nldev_res_get_cq_dumpit,
2070 },
2071 [RDMA_NLDEV_CMD_RES_MR_GET] = {
2072 .doit = nldev_res_get_mr_doit,
2073 .dump = nldev_res_get_mr_dumpit,
2074 },
2075 [RDMA_NLDEV_CMD_RES_PD_GET] = {
2076 .doit = nldev_res_get_pd_doit,
2077 .dump = nldev_res_get_pd_dumpit,
2078 },
2079 [RDMA_NLDEV_CMD_SYS_GET] = {
2080 .doit = nldev_sys_get_doit,
2081 },
2082 [RDMA_NLDEV_CMD_SYS_SET] = {
2083 .doit = nldev_set_sys_set_doit,
2084 },
2085 [RDMA_NLDEV_CMD_STAT_SET] = {
2086 .doit = nldev_stat_set_doit,
2087 .flags = RDMA_NL_ADMIN_PERM,
2088 },
2089 [RDMA_NLDEV_CMD_STAT_GET] = {
2090 .doit = nldev_stat_get_doit,
2091 .dump = nldev_stat_get_dumpit,
2092 },
2093 [RDMA_NLDEV_CMD_STAT_DEL] = {
2094 .doit = nldev_stat_del_doit,
2095 .flags = RDMA_NL_ADMIN_PERM,
2096 },
2097 };
2098
2099 void __init nldev_init(void)
2100 {
2101 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
2102 }
2103
2104 void __exit nldev_exit(void)
2105 {
2106 rdma_nl_unregister(RDMA_NL_NLDEV);
2107 }
2108
2109 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);