This source file includes following definitions.
- __printf
- __printf
- ib_network_to_gid_type
- rdma_gid_attr_network_type
- ib_mtu_enum_to_int
- ib_mtu_int_to_enum
- ib_width_enum_to_int
- rdma_alloc_hw_stats_struct
- ib_srq_has_cq
- rdma_block_iter_dma_address
- ib_get_client_data
- rdma_user_mmap_io
- ib_copy_from_udata
- ib_copy_to_udata
- ib_is_buffer_cleared
- ib_is_udata_cleared
- ib_is_destroy_retryable
- ib_destroy_usecnt
- rdma_cap_ib_switch
- rdma_start_port
- rdma_end_port
- rdma_is_port_valid
- rdma_is_grh_required
- rdma_protocol_ib
- rdma_protocol_roce
- rdma_protocol_roce_udp_encap
- rdma_protocol_roce_eth_encap
- rdma_protocol_iwarp
- rdma_ib_or_roce
- rdma_protocol_raw_packet
- rdma_protocol_usnic
- rdma_cap_ib_mad
- rdma_cap_opa_mad
- rdma_cap_ib_smi
- rdma_cap_ib_cm
- rdma_cap_iw_cm
- rdma_cap_ib_sa
- rdma_cap_ib_mcast
- rdma_cap_af_ib
- rdma_cap_eth_ah
- rdma_cap_opa_ah
- rdma_max_mad_size
- rdma_cap_roce_gid_table
- rdma_cap_read_inv
- rdma_find_pg_bit
- ib_dealloc_pd
- rdma_destroy_ah
- ib_destroy_srq
- ib_post_srq_recv
- ib_create_qp
- ib_destroy_qp
- ib_post_send
- ib_post_recv
- ib_alloc_cq_user
- ib_alloc_cq
- ib_alloc_cq_any
- ib_free_cq
- ib_destroy_cq
- ib_poll_cq
- ib_req_notify_cq
- ib_req_ncomp_notif
- ib_dma_mapping_error
- ib_dma_map_single
- ib_dma_unmap_single
- ib_dma_map_page
- ib_dma_unmap_page
- ib_dma_map_sg
- ib_dma_unmap_sg
- ib_dma_map_sg_attrs
- ib_dma_unmap_sg_attrs
- ib_dma_max_seg_size
- ib_dma_sync_single_for_cpu
- ib_dma_sync_single_for_device
- ib_dma_alloc_coherent
- ib_dma_free_coherent
- ib_dereg_mr
- ib_alloc_mr
- ib_update_fast_reg_key
- ib_inc_rkey
- ib_map_phys_fmr
- ib_check_mr_access
- ib_access_writable
- ib_device_try_get
- ib_map_mr_sg_zbva
- rdma_ah_retrieve_dmac
- rdma_ah_set_dlid
- rdma_ah_get_dlid
- rdma_ah_set_sl
- rdma_ah_get_sl
- rdma_ah_set_path_bits
- rdma_ah_get_path_bits
- rdma_ah_set_make_grd
- rdma_ah_get_make_grd
- rdma_ah_set_port_num
- rdma_ah_get_port_num
- rdma_ah_set_static_rate
- rdma_ah_get_static_rate
- rdma_ah_set_ah_flags
- rdma_ah_get_ah_flags
- rdma_ah_read_grh
- rdma_ah_retrieve_grh
- rdma_ah_set_dgid_raw
- rdma_ah_set_subnet_prefix
- rdma_ah_set_interface_id
- rdma_ah_set_grh
- rdma_ah_find_type
- ib_lid_cpu16
- ib_lid_be16
- ib_get_vector_affinity
- rdma_set_device_sysfs_group
- rdma_device_to_ibdev
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 #if !defined(IB_VERBS_H)
40 #define IB_VERBS_H
41
42 #include <linux/types.h>
43 #include <linux/device.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kref.h>
46 #include <linux/list.h>
47 #include <linux/rwsem.h>
48 #include <linux/workqueue.h>
49 #include <linux/irq_poll.h>
50 #include <uapi/linux/if_ether.h>
51 #include <net/ipv6.h>
52 #include <net/ip.h>
53 #include <linux/string.h>
54 #include <linux/slab.h>
55 #include <linux/netdevice.h>
56 #include <linux/refcount.h>
57 #include <linux/if_link.h>
58 #include <linux/atomic.h>
59 #include <linux/mmu_notifier.h>
60 #include <linux/uaccess.h>
61 #include <linux/cgroup_rdma.h>
62 #include <linux/irqflags.h>
63 #include <linux/preempt.h>
64 #include <linux/dim.h>
65 #include <uapi/rdma/ib_user_verbs.h>
66 #include <rdma/rdma_counter.h>
67 #include <rdma/restrack.h>
68 #include <rdma/signature.h>
69 #include <uapi/rdma/rdma_user_ioctl.h>
70 #include <uapi/rdma/ib_user_ioctl_verbs.h>
71
72 #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
73
74 struct ib_umem_odp;
75
76 extern struct workqueue_struct *ib_wq;
77 extern struct workqueue_struct *ib_comp_wq;
78 extern struct workqueue_struct *ib_comp_unbound_wq;
79
80 __printf(3, 4) __cold
81 void ibdev_printk(const char *level, const struct ib_device *ibdev,
82 const char *format, ...);
83 __printf(2, 3) __cold
84 void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
85 __printf(2, 3) __cold
86 void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
87 __printf(2, 3) __cold
88 void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
89 __printf(2, 3) __cold
90 void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
91 __printf(2, 3) __cold
92 void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
93 __printf(2, 3) __cold
94 void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
95 __printf(2, 3) __cold
96 void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
97
98 #if defined(CONFIG_DYNAMIC_DEBUG)
99 #define ibdev_dbg(__dev, format, args...) \
100 dynamic_ibdev_dbg(__dev, format, ##args)
101 #else
102 __printf(2, 3) __cold
103 static inline
104 void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
105 #endif
106
107 #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
108 do { \
109 static DEFINE_RATELIMIT_STATE(_rs, \
110 DEFAULT_RATELIMIT_INTERVAL, \
111 DEFAULT_RATELIMIT_BURST); \
112 if (__ratelimit(&_rs)) \
113 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
114 } while (0)
115
116 #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
117 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
118 #define ibdev_alert_ratelimited(ibdev, fmt, ...) \
119 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
120 #define ibdev_crit_ratelimited(ibdev, fmt, ...) \
121 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
122 #define ibdev_err_ratelimited(ibdev, fmt, ...) \
123 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
124 #define ibdev_warn_ratelimited(ibdev, fmt, ...) \
125 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
126 #define ibdev_notice_ratelimited(ibdev, fmt, ...) \
127 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
128 #define ibdev_info_ratelimited(ibdev, fmt, ...) \
129 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
130
131 #if defined(CONFIG_DYNAMIC_DEBUG)
132
133 #define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
134 do { \
135 static DEFINE_RATELIMIT_STATE(_rs, \
136 DEFAULT_RATELIMIT_INTERVAL, \
137 DEFAULT_RATELIMIT_BURST); \
138 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
139 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
140 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
141 ##__VA_ARGS__); \
142 } while (0)
143 #else
144 __printf(2, 3) __cold
145 static inline
146 void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
147 #endif
148
149 union ib_gid {
150 u8 raw[16];
151 struct {
152 __be64 subnet_prefix;
153 __be64 interface_id;
154 } global;
155 };
156
157 extern union ib_gid zgid;
158
159 enum ib_gid_type {
160
161 IB_GID_TYPE_IB = 0,
162 IB_GID_TYPE_ROCE = 0,
163 IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
164 IB_GID_TYPE_SIZE
165 };
166
167 #define ROCE_V2_UDP_DPORT 4791
168 struct ib_gid_attr {
169 struct net_device __rcu *ndev;
170 struct ib_device *device;
171 union ib_gid gid;
172 enum ib_gid_type gid_type;
173 u16 index;
174 u8 port_num;
175 };
176
177 enum {
178
179 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
180 };
181
182 enum rdma_transport_type {
183 RDMA_TRANSPORT_IB,
184 RDMA_TRANSPORT_IWARP,
185 RDMA_TRANSPORT_USNIC,
186 RDMA_TRANSPORT_USNIC_UDP,
187 RDMA_TRANSPORT_UNSPECIFIED,
188 };
189
190 enum rdma_protocol_type {
191 RDMA_PROTOCOL_IB,
192 RDMA_PROTOCOL_IBOE,
193 RDMA_PROTOCOL_IWARP,
194 RDMA_PROTOCOL_USNIC_UDP
195 };
196
197 __attribute_const__ enum rdma_transport_type
198 rdma_node_get_transport(unsigned int node_type);
199
200 enum rdma_network_type {
201 RDMA_NETWORK_IB,
202 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
203 RDMA_NETWORK_IPV4,
204 RDMA_NETWORK_IPV6
205 };
206
207 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
208 {
209 if (network_type == RDMA_NETWORK_IPV4 ||
210 network_type == RDMA_NETWORK_IPV6)
211 return IB_GID_TYPE_ROCE_UDP_ENCAP;
212
213
214 return IB_GID_TYPE_IB;
215 }
216
217 static inline enum rdma_network_type
218 rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
219 {
220 if (attr->gid_type == IB_GID_TYPE_IB)
221 return RDMA_NETWORK_IB;
222
223 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
224 return RDMA_NETWORK_IPV4;
225 else
226 return RDMA_NETWORK_IPV6;
227 }
228
229 enum rdma_link_layer {
230 IB_LINK_LAYER_UNSPECIFIED,
231 IB_LINK_LAYER_INFINIBAND,
232 IB_LINK_LAYER_ETHERNET,
233 };
234
235 enum ib_device_cap_flags {
236 IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
237 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
238 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
239 IB_DEVICE_RAW_MULTI = (1 << 3),
240 IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
241 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
242 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
243 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
244 IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
245
246 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
247 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
248 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
249 IB_DEVICE_SRQ_RESIZE = (1 << 13),
250 IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
251
252
253
254
255
256
257
258
259 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
260
261 IB_DEVICE_MEM_WINDOW = (1 << 17),
262
263
264
265
266
267
268
269 IB_DEVICE_UD_IP_CSUM = (1 << 18),
270 IB_DEVICE_UD_TSO = (1 << 19),
271 IB_DEVICE_XRC = (1 << 20),
272
273
274
275
276
277
278
279
280
281
282 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
283 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
284 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
285 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
286 IB_DEVICE_RC_IP_CSUM = (1 << 25),
287
288 IB_DEVICE_RAW_IP_CSUM = (1 << 26),
289
290
291
292
293
294
295 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
296 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
297 IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30),
298 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
299 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
300 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
301
302 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
303 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
304
305 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
306 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
307 };
308
309 enum ib_atomic_cap {
310 IB_ATOMIC_NONE,
311 IB_ATOMIC_HCA,
312 IB_ATOMIC_GLOB
313 };
314
315 enum ib_odp_general_cap_bits {
316 IB_ODP_SUPPORT = 1 << 0,
317 IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
318 };
319
320 enum ib_odp_transport_cap_bits {
321 IB_ODP_SUPPORT_SEND = 1 << 0,
322 IB_ODP_SUPPORT_RECV = 1 << 1,
323 IB_ODP_SUPPORT_WRITE = 1 << 2,
324 IB_ODP_SUPPORT_READ = 1 << 3,
325 IB_ODP_SUPPORT_ATOMIC = 1 << 4,
326 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
327 };
328
329 struct ib_odp_caps {
330 uint64_t general_caps;
331 struct {
332 uint32_t rc_odp_caps;
333 uint32_t uc_odp_caps;
334 uint32_t ud_odp_caps;
335 uint32_t xrc_odp_caps;
336 } per_transport_caps;
337 };
338
339 struct ib_rss_caps {
340
341
342
343
344 u32 supported_qpts;
345 u32 max_rwq_indirection_tables;
346 u32 max_rwq_indirection_table_size;
347 };
348
349 enum ib_tm_cap_flags {
350
351 IB_TM_CAP_RNDV_RC = 1 << 0,
352 };
353
354 struct ib_tm_caps {
355
356 u32 max_rndv_hdr_size;
357
358 u32 max_num_tags;
359
360 u32 flags;
361
362 u32 max_ops;
363
364 u32 max_sge;
365 };
366
367 struct ib_cq_init_attr {
368 unsigned int cqe;
369 u32 comp_vector;
370 u32 flags;
371 };
372
373 enum ib_cq_attr_mask {
374 IB_CQ_MODERATE = 1 << 0,
375 };
376
377 struct ib_cq_caps {
378 u16 max_cq_moderation_count;
379 u16 max_cq_moderation_period;
380 };
381
382 struct ib_dm_mr_attr {
383 u64 length;
384 u64 offset;
385 u32 access_flags;
386 };
387
388 struct ib_dm_alloc_attr {
389 u64 length;
390 u32 alignment;
391 u32 flags;
392 };
393
394 struct ib_device_attr {
395 u64 fw_ver;
396 __be64 sys_image_guid;
397 u64 max_mr_size;
398 u64 page_size_cap;
399 u32 vendor_id;
400 u32 vendor_part_id;
401 u32 hw_ver;
402 int max_qp;
403 int max_qp_wr;
404 u64 device_cap_flags;
405 int max_send_sge;
406 int max_recv_sge;
407 int max_sge_rd;
408 int max_cq;
409 int max_cqe;
410 int max_mr;
411 int max_pd;
412 int max_qp_rd_atom;
413 int max_ee_rd_atom;
414 int max_res_rd_atom;
415 int max_qp_init_rd_atom;
416 int max_ee_init_rd_atom;
417 enum ib_atomic_cap atomic_cap;
418 enum ib_atomic_cap masked_atomic_cap;
419 int max_ee;
420 int max_rdd;
421 int max_mw;
422 int max_raw_ipv6_qp;
423 int max_raw_ethy_qp;
424 int max_mcast_grp;
425 int max_mcast_qp_attach;
426 int max_total_mcast_qp_attach;
427 int max_ah;
428 int max_fmr;
429 int max_map_per_fmr;
430 int max_srq;
431 int max_srq_wr;
432 int max_srq_sge;
433 unsigned int max_fast_reg_page_list_len;
434 unsigned int max_pi_fast_reg_page_list_len;
435 u16 max_pkeys;
436 u8 local_ca_ack_delay;
437 int sig_prot_cap;
438 int sig_guard_cap;
439 struct ib_odp_caps odp_caps;
440 uint64_t timestamp_mask;
441 uint64_t hca_core_clock;
442 struct ib_rss_caps rss_caps;
443 u32 max_wq_type_rq;
444 u32 raw_packet_caps;
445 struct ib_tm_caps tm_caps;
446 struct ib_cq_caps cq_caps;
447 u64 max_dm_size;
448 };
449
450 enum ib_mtu {
451 IB_MTU_256 = 1,
452 IB_MTU_512 = 2,
453 IB_MTU_1024 = 3,
454 IB_MTU_2048 = 4,
455 IB_MTU_4096 = 5
456 };
457
458 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
459 {
460 switch (mtu) {
461 case IB_MTU_256: return 256;
462 case IB_MTU_512: return 512;
463 case IB_MTU_1024: return 1024;
464 case IB_MTU_2048: return 2048;
465 case IB_MTU_4096: return 4096;
466 default: return -1;
467 }
468 }
469
470 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
471 {
472 if (mtu >= 4096)
473 return IB_MTU_4096;
474 else if (mtu >= 2048)
475 return IB_MTU_2048;
476 else if (mtu >= 1024)
477 return IB_MTU_1024;
478 else if (mtu >= 512)
479 return IB_MTU_512;
480 else
481 return IB_MTU_256;
482 }
483
484 enum ib_port_state {
485 IB_PORT_NOP = 0,
486 IB_PORT_DOWN = 1,
487 IB_PORT_INIT = 2,
488 IB_PORT_ARMED = 3,
489 IB_PORT_ACTIVE = 4,
490 IB_PORT_ACTIVE_DEFER = 5
491 };
492
493 enum ib_port_phys_state {
494 IB_PORT_PHYS_STATE_SLEEP = 1,
495 IB_PORT_PHYS_STATE_POLLING = 2,
496 IB_PORT_PHYS_STATE_DISABLED = 3,
497 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
498 IB_PORT_PHYS_STATE_LINK_UP = 5,
499 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
500 IB_PORT_PHYS_STATE_PHY_TEST = 7,
501 };
502
503 enum ib_port_width {
504 IB_WIDTH_1X = 1,
505 IB_WIDTH_2X = 16,
506 IB_WIDTH_4X = 2,
507 IB_WIDTH_8X = 4,
508 IB_WIDTH_12X = 8
509 };
510
511 static inline int ib_width_enum_to_int(enum ib_port_width width)
512 {
513 switch (width) {
514 case IB_WIDTH_1X: return 1;
515 case IB_WIDTH_2X: return 2;
516 case IB_WIDTH_4X: return 4;
517 case IB_WIDTH_8X: return 8;
518 case IB_WIDTH_12X: return 12;
519 default: return -1;
520 }
521 }
522
523 enum ib_port_speed {
524 IB_SPEED_SDR = 1,
525 IB_SPEED_DDR = 2,
526 IB_SPEED_QDR = 4,
527 IB_SPEED_FDR10 = 8,
528 IB_SPEED_FDR = 16,
529 IB_SPEED_EDR = 32,
530 IB_SPEED_HDR = 64
531 };
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552 struct rdma_hw_stats {
553 struct mutex lock;
554 unsigned long timestamp;
555 unsigned long lifespan;
556 const char * const *names;
557 int num_counters;
558 u64 value[];
559 };
560
561 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
562
563
564
565
566
567
568
569 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
570 const char * const *names, int num_counters,
571 unsigned long lifespan)
572 {
573 struct rdma_hw_stats *stats;
574
575 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
576 GFP_KERNEL);
577 if (!stats)
578 return NULL;
579 stats->names = names;
580 stats->num_counters = num_counters;
581 stats->lifespan = msecs_to_jiffies(lifespan);
582
583 return stats;
584 }
585
586
587
588
589
590
591 #define RDMA_CORE_CAP_IB_MAD 0x00000001
592 #define RDMA_CORE_CAP_IB_SMI 0x00000002
593 #define RDMA_CORE_CAP_IB_CM 0x00000004
594 #define RDMA_CORE_CAP_IW_CM 0x00000008
595 #define RDMA_CORE_CAP_IB_SA 0x00000010
596 #define RDMA_CORE_CAP_OPA_MAD 0x00000020
597
598
599 #define RDMA_CORE_CAP_AF_IB 0x00001000
600 #define RDMA_CORE_CAP_ETH_AH 0x00002000
601 #define RDMA_CORE_CAP_OPA_AH 0x00004000
602 #define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
603
604
605 #define RDMA_CORE_CAP_PROT_IB 0x00100000
606 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000
607 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000
608 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
609 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
610 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000
611
612 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
613 | RDMA_CORE_CAP_PROT_ROCE \
614 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
615
616 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
617 | RDMA_CORE_CAP_IB_MAD \
618 | RDMA_CORE_CAP_IB_SMI \
619 | RDMA_CORE_CAP_IB_CM \
620 | RDMA_CORE_CAP_IB_SA \
621 | RDMA_CORE_CAP_AF_IB)
622 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
623 | RDMA_CORE_CAP_IB_MAD \
624 | RDMA_CORE_CAP_IB_CM \
625 | RDMA_CORE_CAP_AF_IB \
626 | RDMA_CORE_CAP_ETH_AH)
627 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
628 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
629 | RDMA_CORE_CAP_IB_MAD \
630 | RDMA_CORE_CAP_IB_CM \
631 | RDMA_CORE_CAP_AF_IB \
632 | RDMA_CORE_CAP_ETH_AH)
633 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
634 | RDMA_CORE_CAP_IW_CM)
635 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
636 | RDMA_CORE_CAP_OPA_MAD)
637
638 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
639
640 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
641
642 struct ib_port_attr {
643 u64 subnet_prefix;
644 enum ib_port_state state;
645 enum ib_mtu max_mtu;
646 enum ib_mtu active_mtu;
647 int gid_tbl_len;
648 unsigned int ip_gids:1;
649
650 u32 port_cap_flags;
651 u32 max_msg_sz;
652 u32 bad_pkey_cntr;
653 u32 qkey_viol_cntr;
654 u16 pkey_tbl_len;
655 u32 sm_lid;
656 u32 lid;
657 u8 lmc;
658 u8 max_vl_num;
659 u8 sm_sl;
660 u8 subnet_timeout;
661 u8 init_type_reply;
662 u8 active_width;
663 u8 active_speed;
664 u8 phys_state;
665 u16 port_cap_flags2;
666 };
667
668 enum ib_device_modify_flags {
669 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
670 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
671 };
672
673 #define IB_DEVICE_NODE_DESC_MAX 64
674
675 struct ib_device_modify {
676 u64 sys_image_guid;
677 char node_desc[IB_DEVICE_NODE_DESC_MAX];
678 };
679
680 enum ib_port_modify_flags {
681 IB_PORT_SHUTDOWN = 1,
682 IB_PORT_INIT_TYPE = (1<<2),
683 IB_PORT_RESET_QKEY_CNTR = (1<<3),
684 IB_PORT_OPA_MASK_CHG = (1<<4)
685 };
686
687 struct ib_port_modify {
688 u32 set_port_cap_mask;
689 u32 clr_port_cap_mask;
690 u8 init_type;
691 };
692
693 enum ib_event_type {
694 IB_EVENT_CQ_ERR,
695 IB_EVENT_QP_FATAL,
696 IB_EVENT_QP_REQ_ERR,
697 IB_EVENT_QP_ACCESS_ERR,
698 IB_EVENT_COMM_EST,
699 IB_EVENT_SQ_DRAINED,
700 IB_EVENT_PATH_MIG,
701 IB_EVENT_PATH_MIG_ERR,
702 IB_EVENT_DEVICE_FATAL,
703 IB_EVENT_PORT_ACTIVE,
704 IB_EVENT_PORT_ERR,
705 IB_EVENT_LID_CHANGE,
706 IB_EVENT_PKEY_CHANGE,
707 IB_EVENT_SM_CHANGE,
708 IB_EVENT_SRQ_ERR,
709 IB_EVENT_SRQ_LIMIT_REACHED,
710 IB_EVENT_QP_LAST_WQE_REACHED,
711 IB_EVENT_CLIENT_REREGISTER,
712 IB_EVENT_GID_CHANGE,
713 IB_EVENT_WQ_FATAL,
714 };
715
716 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
717
718 struct ib_event {
719 struct ib_device *device;
720 union {
721 struct ib_cq *cq;
722 struct ib_qp *qp;
723 struct ib_srq *srq;
724 struct ib_wq *wq;
725 u8 port_num;
726 } element;
727 enum ib_event_type event;
728 };
729
730 struct ib_event_handler {
731 struct ib_device *device;
732 void (*handler)(struct ib_event_handler *, struct ib_event *);
733 struct list_head list;
734 };
735
736 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
737 do { \
738 (_ptr)->device = _device; \
739 (_ptr)->handler = _handler; \
740 INIT_LIST_HEAD(&(_ptr)->list); \
741 } while (0)
742
743 struct ib_global_route {
744 const struct ib_gid_attr *sgid_attr;
745 union ib_gid dgid;
746 u32 flow_label;
747 u8 sgid_index;
748 u8 hop_limit;
749 u8 traffic_class;
750 };
751
752 struct ib_grh {
753 __be32 version_tclass_flow;
754 __be16 paylen;
755 u8 next_hdr;
756 u8 hop_limit;
757 union ib_gid sgid;
758 union ib_gid dgid;
759 };
760
761 union rdma_network_hdr {
762 struct ib_grh ibgrh;
763 struct {
764
765
766
767 u8 reserved[20];
768 struct iphdr roce4grh;
769 };
770 };
771
772 #define IB_QPN_MASK 0xFFFFFF
773
774 enum {
775 IB_MULTICAST_QPN = 0xffffff
776 };
777
778 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
779 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
780
781 enum ib_ah_flags {
782 IB_AH_GRH = 1
783 };
784
785 enum ib_rate {
786 IB_RATE_PORT_CURRENT = 0,
787 IB_RATE_2_5_GBPS = 2,
788 IB_RATE_5_GBPS = 5,
789 IB_RATE_10_GBPS = 3,
790 IB_RATE_20_GBPS = 6,
791 IB_RATE_30_GBPS = 4,
792 IB_RATE_40_GBPS = 7,
793 IB_RATE_60_GBPS = 8,
794 IB_RATE_80_GBPS = 9,
795 IB_RATE_120_GBPS = 10,
796 IB_RATE_14_GBPS = 11,
797 IB_RATE_56_GBPS = 12,
798 IB_RATE_112_GBPS = 13,
799 IB_RATE_168_GBPS = 14,
800 IB_RATE_25_GBPS = 15,
801 IB_RATE_100_GBPS = 16,
802 IB_RATE_200_GBPS = 17,
803 IB_RATE_300_GBPS = 18,
804 IB_RATE_28_GBPS = 19,
805 IB_RATE_50_GBPS = 20,
806 IB_RATE_400_GBPS = 21,
807 IB_RATE_600_GBPS = 22,
808 };
809
810
811
812
813
814
815
816 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
817
818
819
820
821
822
823 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843 enum ib_mr_type {
844 IB_MR_TYPE_MEM_REG,
845 IB_MR_TYPE_SG_GAPS,
846 IB_MR_TYPE_DM,
847 IB_MR_TYPE_USER,
848 IB_MR_TYPE_DMA,
849 IB_MR_TYPE_INTEGRITY,
850 };
851
852 enum ib_mr_status_check {
853 IB_MR_CHECK_SIG_STATUS = 1,
854 };
855
856
857
858
859
860
861
862
863
864 struct ib_mr_status {
865 u32 fail_status;
866 struct ib_sig_err sig_err;
867 };
868
869
870
871
872
873
874 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
875
876 enum rdma_ah_attr_type {
877 RDMA_AH_ATTR_TYPE_UNDEFINED,
878 RDMA_AH_ATTR_TYPE_IB,
879 RDMA_AH_ATTR_TYPE_ROCE,
880 RDMA_AH_ATTR_TYPE_OPA,
881 };
882
883 struct ib_ah_attr {
884 u16 dlid;
885 u8 src_path_bits;
886 };
887
888 struct roce_ah_attr {
889 u8 dmac[ETH_ALEN];
890 };
891
892 struct opa_ah_attr {
893 u32 dlid;
894 u8 src_path_bits;
895 bool make_grd;
896 };
897
898 struct rdma_ah_attr {
899 struct ib_global_route grh;
900 u8 sl;
901 u8 static_rate;
902 u8 port_num;
903 u8 ah_flags;
904 enum rdma_ah_attr_type type;
905 union {
906 struct ib_ah_attr ib;
907 struct roce_ah_attr roce;
908 struct opa_ah_attr opa;
909 };
910 };
911
912 enum ib_wc_status {
913 IB_WC_SUCCESS,
914 IB_WC_LOC_LEN_ERR,
915 IB_WC_LOC_QP_OP_ERR,
916 IB_WC_LOC_EEC_OP_ERR,
917 IB_WC_LOC_PROT_ERR,
918 IB_WC_WR_FLUSH_ERR,
919 IB_WC_MW_BIND_ERR,
920 IB_WC_BAD_RESP_ERR,
921 IB_WC_LOC_ACCESS_ERR,
922 IB_WC_REM_INV_REQ_ERR,
923 IB_WC_REM_ACCESS_ERR,
924 IB_WC_REM_OP_ERR,
925 IB_WC_RETRY_EXC_ERR,
926 IB_WC_RNR_RETRY_EXC_ERR,
927 IB_WC_LOC_RDD_VIOL_ERR,
928 IB_WC_REM_INV_RD_REQ_ERR,
929 IB_WC_REM_ABORT_ERR,
930 IB_WC_INV_EECN_ERR,
931 IB_WC_INV_EEC_STATE_ERR,
932 IB_WC_FATAL_ERR,
933 IB_WC_RESP_TIMEOUT_ERR,
934 IB_WC_GENERAL_ERR
935 };
936
937 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
938
939 enum ib_wc_opcode {
940 IB_WC_SEND,
941 IB_WC_RDMA_WRITE,
942 IB_WC_RDMA_READ,
943 IB_WC_COMP_SWAP,
944 IB_WC_FETCH_ADD,
945 IB_WC_LSO,
946 IB_WC_LOCAL_INV,
947 IB_WC_REG_MR,
948 IB_WC_MASKED_COMP_SWAP,
949 IB_WC_MASKED_FETCH_ADD,
950
951
952
953
954 IB_WC_RECV = 1 << 7,
955 IB_WC_RECV_RDMA_WITH_IMM
956 };
957
958 enum ib_wc_flags {
959 IB_WC_GRH = 1,
960 IB_WC_WITH_IMM = (1<<1),
961 IB_WC_WITH_INVALIDATE = (1<<2),
962 IB_WC_IP_CSUM_OK = (1<<3),
963 IB_WC_WITH_SMAC = (1<<4),
964 IB_WC_WITH_VLAN = (1<<5),
965 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
966 };
967
968 struct ib_wc {
969 union {
970 u64 wr_id;
971 struct ib_cqe *wr_cqe;
972 };
973 enum ib_wc_status status;
974 enum ib_wc_opcode opcode;
975 u32 vendor_err;
976 u32 byte_len;
977 struct ib_qp *qp;
978 union {
979 __be32 imm_data;
980 u32 invalidate_rkey;
981 } ex;
982 u32 src_qp;
983 u32 slid;
984 int wc_flags;
985 u16 pkey_index;
986 u8 sl;
987 u8 dlid_path_bits;
988 u8 port_num;
989 u8 smac[ETH_ALEN];
990 u16 vlan_id;
991 u8 network_hdr_type;
992 };
993
994 enum ib_cq_notify_flags {
995 IB_CQ_SOLICITED = 1 << 0,
996 IB_CQ_NEXT_COMP = 1 << 1,
997 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
998 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
999 };
1000
1001 enum ib_srq_type {
1002 IB_SRQT_BASIC,
1003 IB_SRQT_XRC,
1004 IB_SRQT_TM,
1005 };
1006
1007 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1008 {
1009 return srq_type == IB_SRQT_XRC ||
1010 srq_type == IB_SRQT_TM;
1011 }
1012
1013 enum ib_srq_attr_mask {
1014 IB_SRQ_MAX_WR = 1 << 0,
1015 IB_SRQ_LIMIT = 1 << 1,
1016 };
1017
1018 struct ib_srq_attr {
1019 u32 max_wr;
1020 u32 max_sge;
1021 u32 srq_limit;
1022 };
1023
1024 struct ib_srq_init_attr {
1025 void (*event_handler)(struct ib_event *, void *);
1026 void *srq_context;
1027 struct ib_srq_attr attr;
1028 enum ib_srq_type srq_type;
1029
1030 struct {
1031 struct ib_cq *cq;
1032 union {
1033 struct {
1034 struct ib_xrcd *xrcd;
1035 } xrc;
1036
1037 struct {
1038 u32 max_num_tags;
1039 } tag_matching;
1040 };
1041 } ext;
1042 };
1043
1044 struct ib_qp_cap {
1045 u32 max_send_wr;
1046 u32 max_recv_wr;
1047 u32 max_send_sge;
1048 u32 max_recv_sge;
1049 u32 max_inline_data;
1050
1051
1052
1053
1054
1055
1056 u32 max_rdma_ctxs;
1057 };
1058
1059 enum ib_sig_type {
1060 IB_SIGNAL_ALL_WR,
1061 IB_SIGNAL_REQ_WR
1062 };
1063
1064 enum ib_qp_type {
1065
1066
1067
1068
1069
1070 IB_QPT_SMI,
1071 IB_QPT_GSI,
1072
1073 IB_QPT_RC,
1074 IB_QPT_UC,
1075 IB_QPT_UD,
1076 IB_QPT_RAW_IPV6,
1077 IB_QPT_RAW_ETHERTYPE,
1078 IB_QPT_RAW_PACKET = 8,
1079 IB_QPT_XRC_INI = 9,
1080 IB_QPT_XRC_TGT,
1081 IB_QPT_MAX,
1082 IB_QPT_DRIVER = 0xFF,
1083
1084
1085
1086
1087 IB_QPT_RESERVED1 = 0x1000,
1088 IB_QPT_RESERVED2,
1089 IB_QPT_RESERVED3,
1090 IB_QPT_RESERVED4,
1091 IB_QPT_RESERVED5,
1092 IB_QPT_RESERVED6,
1093 IB_QPT_RESERVED7,
1094 IB_QPT_RESERVED8,
1095 IB_QPT_RESERVED9,
1096 IB_QPT_RESERVED10,
1097 };
1098
1099 enum ib_qp_create_flags {
1100 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1101 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
1102 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1103 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1104 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1105 IB_QP_CREATE_NETIF_QP = 1 << 5,
1106 IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
1107
1108 IB_QP_CREATE_SCATTER_FCS = 1 << 8,
1109 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
1110 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1111 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
1112
1113 IB_QP_CREATE_RESERVED_START = 1 << 26,
1114 IB_QP_CREATE_RESERVED_END = 1 << 31,
1115 };
1116
1117
1118
1119
1120
1121
1122 struct ib_qp_init_attr {
1123
1124 void (*event_handler)(struct ib_event *, void *);
1125
1126 void *qp_context;
1127 struct ib_cq *send_cq;
1128 struct ib_cq *recv_cq;
1129 struct ib_srq *srq;
1130 struct ib_xrcd *xrcd;
1131 struct ib_qp_cap cap;
1132 enum ib_sig_type sq_sig_type;
1133 enum ib_qp_type qp_type;
1134 u32 create_flags;
1135
1136
1137
1138
1139 u8 port_num;
1140 struct ib_rwq_ind_table *rwq_ind_tbl;
1141 u32 source_qpn;
1142 };
1143
1144 struct ib_qp_open_attr {
1145 void (*event_handler)(struct ib_event *, void *);
1146 void *qp_context;
1147 u32 qp_num;
1148 enum ib_qp_type qp_type;
1149 };
1150
1151 enum ib_rnr_timeout {
1152 IB_RNR_TIMER_655_36 = 0,
1153 IB_RNR_TIMER_000_01 = 1,
1154 IB_RNR_TIMER_000_02 = 2,
1155 IB_RNR_TIMER_000_03 = 3,
1156 IB_RNR_TIMER_000_04 = 4,
1157 IB_RNR_TIMER_000_06 = 5,
1158 IB_RNR_TIMER_000_08 = 6,
1159 IB_RNR_TIMER_000_12 = 7,
1160 IB_RNR_TIMER_000_16 = 8,
1161 IB_RNR_TIMER_000_24 = 9,
1162 IB_RNR_TIMER_000_32 = 10,
1163 IB_RNR_TIMER_000_48 = 11,
1164 IB_RNR_TIMER_000_64 = 12,
1165 IB_RNR_TIMER_000_96 = 13,
1166 IB_RNR_TIMER_001_28 = 14,
1167 IB_RNR_TIMER_001_92 = 15,
1168 IB_RNR_TIMER_002_56 = 16,
1169 IB_RNR_TIMER_003_84 = 17,
1170 IB_RNR_TIMER_005_12 = 18,
1171 IB_RNR_TIMER_007_68 = 19,
1172 IB_RNR_TIMER_010_24 = 20,
1173 IB_RNR_TIMER_015_36 = 21,
1174 IB_RNR_TIMER_020_48 = 22,
1175 IB_RNR_TIMER_030_72 = 23,
1176 IB_RNR_TIMER_040_96 = 24,
1177 IB_RNR_TIMER_061_44 = 25,
1178 IB_RNR_TIMER_081_92 = 26,
1179 IB_RNR_TIMER_122_88 = 27,
1180 IB_RNR_TIMER_163_84 = 28,
1181 IB_RNR_TIMER_245_76 = 29,
1182 IB_RNR_TIMER_327_68 = 30,
1183 IB_RNR_TIMER_491_52 = 31
1184 };
1185
1186 enum ib_qp_attr_mask {
1187 IB_QP_STATE = 1,
1188 IB_QP_CUR_STATE = (1<<1),
1189 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1190 IB_QP_ACCESS_FLAGS = (1<<3),
1191 IB_QP_PKEY_INDEX = (1<<4),
1192 IB_QP_PORT = (1<<5),
1193 IB_QP_QKEY = (1<<6),
1194 IB_QP_AV = (1<<7),
1195 IB_QP_PATH_MTU = (1<<8),
1196 IB_QP_TIMEOUT = (1<<9),
1197 IB_QP_RETRY_CNT = (1<<10),
1198 IB_QP_RNR_RETRY = (1<<11),
1199 IB_QP_RQ_PSN = (1<<12),
1200 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1201 IB_QP_ALT_PATH = (1<<14),
1202 IB_QP_MIN_RNR_TIMER = (1<<15),
1203 IB_QP_SQ_PSN = (1<<16),
1204 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1205 IB_QP_PATH_MIG_STATE = (1<<18),
1206 IB_QP_CAP = (1<<19),
1207 IB_QP_DEST_QPN = (1<<20),
1208 IB_QP_RESERVED1 = (1<<21),
1209 IB_QP_RESERVED2 = (1<<22),
1210 IB_QP_RESERVED3 = (1<<23),
1211 IB_QP_RESERVED4 = (1<<24),
1212 IB_QP_RATE_LIMIT = (1<<25),
1213 };
1214
1215 enum ib_qp_state {
1216 IB_QPS_RESET,
1217 IB_QPS_INIT,
1218 IB_QPS_RTR,
1219 IB_QPS_RTS,
1220 IB_QPS_SQD,
1221 IB_QPS_SQE,
1222 IB_QPS_ERR
1223 };
1224
1225 enum ib_mig_state {
1226 IB_MIG_MIGRATED,
1227 IB_MIG_REARM,
1228 IB_MIG_ARMED
1229 };
1230
1231 enum ib_mw_type {
1232 IB_MW_TYPE_1 = 1,
1233 IB_MW_TYPE_2 = 2
1234 };
1235
1236 struct ib_qp_attr {
1237 enum ib_qp_state qp_state;
1238 enum ib_qp_state cur_qp_state;
1239 enum ib_mtu path_mtu;
1240 enum ib_mig_state path_mig_state;
1241 u32 qkey;
1242 u32 rq_psn;
1243 u32 sq_psn;
1244 u32 dest_qp_num;
1245 int qp_access_flags;
1246 struct ib_qp_cap cap;
1247 struct rdma_ah_attr ah_attr;
1248 struct rdma_ah_attr alt_ah_attr;
1249 u16 pkey_index;
1250 u16 alt_pkey_index;
1251 u8 en_sqd_async_notify;
1252 u8 sq_draining;
1253 u8 max_rd_atomic;
1254 u8 max_dest_rd_atomic;
1255 u8 min_rnr_timer;
1256 u8 port_num;
1257 u8 timeout;
1258 u8 retry_cnt;
1259 u8 rnr_retry;
1260 u8 alt_port_num;
1261 u8 alt_timeout;
1262 u32 rate_limit;
1263 };
1264
1265 enum ib_wr_opcode {
1266
1267 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1268 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1269 IB_WR_SEND = IB_UVERBS_WR_SEND,
1270 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1271 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1272 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1273 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1274 IB_WR_LSO = IB_UVERBS_WR_TSO,
1275 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1276 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1277 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1278 IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1279 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1280 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1281 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1282
1283
1284 IB_WR_REG_MR = 0x20,
1285 IB_WR_REG_MR_INTEGRITY,
1286
1287
1288
1289
1290 IB_WR_RESERVED1 = 0xf0,
1291 IB_WR_RESERVED2,
1292 IB_WR_RESERVED3,
1293 IB_WR_RESERVED4,
1294 IB_WR_RESERVED5,
1295 IB_WR_RESERVED6,
1296 IB_WR_RESERVED7,
1297 IB_WR_RESERVED8,
1298 IB_WR_RESERVED9,
1299 IB_WR_RESERVED10,
1300 };
1301
1302 enum ib_send_flags {
1303 IB_SEND_FENCE = 1,
1304 IB_SEND_SIGNALED = (1<<1),
1305 IB_SEND_SOLICITED = (1<<2),
1306 IB_SEND_INLINE = (1<<3),
1307 IB_SEND_IP_CSUM = (1<<4),
1308
1309
1310 IB_SEND_RESERVED_START = (1 << 26),
1311 IB_SEND_RESERVED_END = (1 << 31),
1312 };
1313
1314 struct ib_sge {
1315 u64 addr;
1316 u32 length;
1317 u32 lkey;
1318 };
1319
1320 struct ib_cqe {
1321 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1322 };
1323
1324 struct ib_send_wr {
1325 struct ib_send_wr *next;
1326 union {
1327 u64 wr_id;
1328 struct ib_cqe *wr_cqe;
1329 };
1330 struct ib_sge *sg_list;
1331 int num_sge;
1332 enum ib_wr_opcode opcode;
1333 int send_flags;
1334 union {
1335 __be32 imm_data;
1336 u32 invalidate_rkey;
1337 } ex;
1338 };
1339
1340 struct ib_rdma_wr {
1341 struct ib_send_wr wr;
1342 u64 remote_addr;
1343 u32 rkey;
1344 };
1345
1346 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1347 {
1348 return container_of(wr, struct ib_rdma_wr, wr);
1349 }
1350
1351 struct ib_atomic_wr {
1352 struct ib_send_wr wr;
1353 u64 remote_addr;
1354 u64 compare_add;
1355 u64 swap;
1356 u64 compare_add_mask;
1357 u64 swap_mask;
1358 u32 rkey;
1359 };
1360
1361 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1362 {
1363 return container_of(wr, struct ib_atomic_wr, wr);
1364 }
1365
1366 struct ib_ud_wr {
1367 struct ib_send_wr wr;
1368 struct ib_ah *ah;
1369 void *header;
1370 int hlen;
1371 int mss;
1372 u32 remote_qpn;
1373 u32 remote_qkey;
1374 u16 pkey_index;
1375 u8 port_num;
1376 };
1377
1378 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1379 {
1380 return container_of(wr, struct ib_ud_wr, wr);
1381 }
1382
1383 struct ib_reg_wr {
1384 struct ib_send_wr wr;
1385 struct ib_mr *mr;
1386 u32 key;
1387 int access;
1388 };
1389
1390 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1391 {
1392 return container_of(wr, struct ib_reg_wr, wr);
1393 }
1394
1395 struct ib_recv_wr {
1396 struct ib_recv_wr *next;
1397 union {
1398 u64 wr_id;
1399 struct ib_cqe *wr_cqe;
1400 };
1401 struct ib_sge *sg_list;
1402 int num_sge;
1403 };
1404
1405 enum ib_access_flags {
1406 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1407 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1408 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1409 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1410 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1411 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1412 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1413 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1414
1415 IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
1416 };
1417
1418
1419
1420
1421
1422 enum ib_mr_rereg_flags {
1423 IB_MR_REREG_TRANS = 1,
1424 IB_MR_REREG_PD = (1<<1),
1425 IB_MR_REREG_ACCESS = (1<<2),
1426 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1427 };
1428
1429 struct ib_fmr_attr {
1430 int max_pages;
1431 int max_maps;
1432 u8 page_shift;
1433 };
1434
1435 struct ib_umem;
1436
1437 enum rdma_remove_reason {
1438
1439
1440
1441
1442 RDMA_REMOVE_DESTROY,
1443
1444 RDMA_REMOVE_CLOSE,
1445
1446 RDMA_REMOVE_DRIVER_REMOVE,
1447
1448 RDMA_REMOVE_ABORT,
1449 };
1450
1451 struct ib_rdmacg_object {
1452 #ifdef CONFIG_CGROUP_RDMA
1453 struct rdma_cgroup *cg;
1454 #endif
1455 };
1456
1457 struct ib_ucontext {
1458 struct ib_device *device;
1459 struct ib_uverbs_file *ufile;
1460
1461
1462
1463
1464
1465 bool closing;
1466
1467 bool cleanup_retryable;
1468
1469 struct ib_rdmacg_object cg_obj;
1470
1471
1472
1473 struct rdma_restrack_entry res;
1474 };
1475
1476 struct ib_uobject {
1477 u64 user_handle;
1478
1479 struct ib_uverbs_file *ufile;
1480
1481 struct ib_ucontext *context;
1482 void *object;
1483 struct list_head list;
1484 struct ib_rdmacg_object cg_obj;
1485 int id;
1486 struct kref ref;
1487 atomic_t usecnt;
1488 struct rcu_head rcu;
1489
1490 const struct uverbs_api_object *uapi_object;
1491 };
1492
1493 struct ib_udata {
1494 const void __user *inbuf;
1495 void __user *outbuf;
1496 size_t inlen;
1497 size_t outlen;
1498 };
1499
1500 struct ib_pd {
1501 u32 local_dma_lkey;
1502 u32 flags;
1503 struct ib_device *device;
1504 struct ib_uobject *uobject;
1505 atomic_t usecnt;
1506
1507 u32 unsafe_global_rkey;
1508
1509
1510
1511
1512 struct ib_mr *__internal_mr;
1513 struct rdma_restrack_entry res;
1514 };
1515
1516 struct ib_xrcd {
1517 struct ib_device *device;
1518 atomic_t usecnt;
1519 struct inode *inode;
1520
1521 struct mutex tgt_qp_mutex;
1522 struct list_head tgt_qp_list;
1523 };
1524
1525 struct ib_ah {
1526 struct ib_device *device;
1527 struct ib_pd *pd;
1528 struct ib_uobject *uobject;
1529 const struct ib_gid_attr *sgid_attr;
1530 enum rdma_ah_attr_type type;
1531 };
1532
1533 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1534
1535 enum ib_poll_context {
1536 IB_POLL_DIRECT,
1537 IB_POLL_SOFTIRQ,
1538 IB_POLL_WORKQUEUE,
1539 IB_POLL_UNBOUND_WORKQUEUE,
1540 };
1541
1542 struct ib_cq {
1543 struct ib_device *device;
1544 struct ib_uobject *uobject;
1545 ib_comp_handler comp_handler;
1546 void (*event_handler)(struct ib_event *, void *);
1547 void *cq_context;
1548 int cqe;
1549 atomic_t usecnt;
1550 enum ib_poll_context poll_ctx;
1551 struct ib_wc *wc;
1552 union {
1553 struct irq_poll iop;
1554 struct work_struct work;
1555 };
1556 struct workqueue_struct *comp_wq;
1557 struct dim *dim;
1558
1559
1560
1561 struct rdma_restrack_entry res;
1562 };
1563
1564 struct ib_srq {
1565 struct ib_device *device;
1566 struct ib_pd *pd;
1567 struct ib_uobject *uobject;
1568 void (*event_handler)(struct ib_event *, void *);
1569 void *srq_context;
1570 enum ib_srq_type srq_type;
1571 atomic_t usecnt;
1572
1573 struct {
1574 struct ib_cq *cq;
1575 union {
1576 struct {
1577 struct ib_xrcd *xrcd;
1578 u32 srq_num;
1579 } xrc;
1580 };
1581 } ext;
1582 };
1583
1584 enum ib_raw_packet_caps {
1585
1586
1587
1588 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0),
1589
1590
1591 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1),
1592
1593 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2),
1594
1595
1596
1597 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3),
1598 };
1599
1600 enum ib_wq_type {
1601 IB_WQT_RQ
1602 };
1603
1604 enum ib_wq_state {
1605 IB_WQS_RESET,
1606 IB_WQS_RDY,
1607 IB_WQS_ERR
1608 };
1609
1610 struct ib_wq {
1611 struct ib_device *device;
1612 struct ib_uobject *uobject;
1613 void *wq_context;
1614 void (*event_handler)(struct ib_event *, void *);
1615 struct ib_pd *pd;
1616 struct ib_cq *cq;
1617 u32 wq_num;
1618 enum ib_wq_state state;
1619 enum ib_wq_type wq_type;
1620 atomic_t usecnt;
1621 };
1622
1623 enum ib_wq_flags {
1624 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
1625 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
1626 IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
1627 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
1628 };
1629
1630 struct ib_wq_init_attr {
1631 void *wq_context;
1632 enum ib_wq_type wq_type;
1633 u32 max_wr;
1634 u32 max_sge;
1635 struct ib_cq *cq;
1636 void (*event_handler)(struct ib_event *, void *);
1637 u32 create_flags;
1638 };
1639
1640 enum ib_wq_attr_mask {
1641 IB_WQ_STATE = 1 << 0,
1642 IB_WQ_CUR_STATE = 1 << 1,
1643 IB_WQ_FLAGS = 1 << 2,
1644 };
1645
1646 struct ib_wq_attr {
1647 enum ib_wq_state wq_state;
1648 enum ib_wq_state curr_wq_state;
1649 u32 flags;
1650 u32 flags_mask;
1651 };
1652
1653 struct ib_rwq_ind_table {
1654 struct ib_device *device;
1655 struct ib_uobject *uobject;
1656 atomic_t usecnt;
1657 u32 ind_tbl_num;
1658 u32 log_ind_tbl_size;
1659 struct ib_wq **ind_tbl;
1660 };
1661
1662 struct ib_rwq_ind_table_init_attr {
1663 u32 log_ind_tbl_size;
1664
1665 struct ib_wq **ind_tbl;
1666 };
1667
1668 enum port_pkey_state {
1669 IB_PORT_PKEY_NOT_VALID = 0,
1670 IB_PORT_PKEY_VALID = 1,
1671 IB_PORT_PKEY_LISTED = 2,
1672 };
1673
1674 struct ib_qp_security;
1675
1676 struct ib_port_pkey {
1677 enum port_pkey_state state;
1678 u16 pkey_index;
1679 u8 port_num;
1680 struct list_head qp_list;
1681 struct list_head to_error_list;
1682 struct ib_qp_security *sec;
1683 };
1684
1685 struct ib_ports_pkeys {
1686 struct ib_port_pkey main;
1687 struct ib_port_pkey alt;
1688 };
1689
1690 struct ib_qp_security {
1691 struct ib_qp *qp;
1692 struct ib_device *dev;
1693
1694 struct mutex mutex;
1695 struct ib_ports_pkeys *ports_pkeys;
1696
1697
1698
1699 struct list_head shared_qp_list;
1700 void *security;
1701 bool destroying;
1702 atomic_t error_list_count;
1703 struct completion error_complete;
1704 int error_comps_pending;
1705 };
1706
1707
1708
1709
1710
1711 struct ib_qp {
1712 struct ib_device *device;
1713 struct ib_pd *pd;
1714 struct ib_cq *send_cq;
1715 struct ib_cq *recv_cq;
1716 spinlock_t mr_lock;
1717 int mrs_used;
1718 struct list_head rdma_mrs;
1719 struct list_head sig_mrs;
1720 struct ib_srq *srq;
1721 struct ib_xrcd *xrcd;
1722 struct list_head xrcd_list;
1723
1724
1725 atomic_t usecnt;
1726 struct list_head open_list;
1727 struct ib_qp *real_qp;
1728 struct ib_uobject *uobject;
1729 void (*event_handler)(struct ib_event *, void *);
1730 void *qp_context;
1731
1732 const struct ib_gid_attr *av_sgid_attr;
1733 const struct ib_gid_attr *alt_path_sgid_attr;
1734 u32 qp_num;
1735 u32 max_write_sge;
1736 u32 max_read_sge;
1737 enum ib_qp_type qp_type;
1738 struct ib_rwq_ind_table *rwq_ind_tbl;
1739 struct ib_qp_security *qp_sec;
1740 u8 port;
1741
1742 bool integrity_en;
1743
1744
1745
1746 struct rdma_restrack_entry res;
1747
1748
1749 struct rdma_counter *counter;
1750 };
1751
1752 struct ib_dm {
1753 struct ib_device *device;
1754 u32 length;
1755 u32 flags;
1756 struct ib_uobject *uobject;
1757 atomic_t usecnt;
1758 };
1759
1760 struct ib_mr {
1761 struct ib_device *device;
1762 struct ib_pd *pd;
1763 u32 lkey;
1764 u32 rkey;
1765 u64 iova;
1766 u64 length;
1767 unsigned int page_size;
1768 enum ib_mr_type type;
1769 bool need_inval;
1770 union {
1771 struct ib_uobject *uobject;
1772 struct list_head qp_entry;
1773 };
1774
1775 struct ib_dm *dm;
1776 struct ib_sig_attrs *sig_attrs;
1777
1778
1779
1780 struct rdma_restrack_entry res;
1781 };
1782
1783 struct ib_mw {
1784 struct ib_device *device;
1785 struct ib_pd *pd;
1786 struct ib_uobject *uobject;
1787 u32 rkey;
1788 enum ib_mw_type type;
1789 };
1790
1791 struct ib_fmr {
1792 struct ib_device *device;
1793 struct ib_pd *pd;
1794 struct list_head list;
1795 u32 lkey;
1796 u32 rkey;
1797 };
1798
1799
1800 enum ib_flow_attr_type {
1801
1802 IB_FLOW_ATTR_NORMAL = 0x0,
1803
1804
1805
1806 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
1807
1808
1809
1810 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
1811
1812 IB_FLOW_ATTR_SNIFFER = 0x3
1813 };
1814
1815
1816 enum ib_flow_spec_type {
1817
1818 IB_FLOW_SPEC_ETH = 0x20,
1819 IB_FLOW_SPEC_IB = 0x22,
1820
1821 IB_FLOW_SPEC_IPV4 = 0x30,
1822 IB_FLOW_SPEC_IPV6 = 0x31,
1823 IB_FLOW_SPEC_ESP = 0x34,
1824
1825 IB_FLOW_SPEC_TCP = 0x40,
1826 IB_FLOW_SPEC_UDP = 0x41,
1827 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1828 IB_FLOW_SPEC_GRE = 0x51,
1829 IB_FLOW_SPEC_MPLS = 0x60,
1830 IB_FLOW_SPEC_INNER = 0x100,
1831
1832 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
1833 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
1834 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
1835 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
1836 };
1837 #define IB_FLOW_SPEC_LAYER_MASK 0xF0
1838 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10
1839
1840
1841
1842
1843 enum ib_flow_domain {
1844 IB_FLOW_DOMAIN_USER,
1845 IB_FLOW_DOMAIN_ETHTOOL,
1846 IB_FLOW_DOMAIN_RFS,
1847 IB_FLOW_DOMAIN_NIC,
1848 IB_FLOW_DOMAIN_NUM
1849 };
1850
1851 enum ib_flow_flags {
1852 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1,
1853 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2,
1854 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3
1855 };
1856
1857 struct ib_flow_eth_filter {
1858 u8 dst_mac[6];
1859 u8 src_mac[6];
1860 __be16 ether_type;
1861 __be16 vlan_tag;
1862
1863 u8 real_sz[0];
1864 };
1865
1866 struct ib_flow_spec_eth {
1867 u32 type;
1868 u16 size;
1869 struct ib_flow_eth_filter val;
1870 struct ib_flow_eth_filter mask;
1871 };
1872
1873 struct ib_flow_ib_filter {
1874 __be16 dlid;
1875 __u8 sl;
1876
1877 u8 real_sz[0];
1878 };
1879
1880 struct ib_flow_spec_ib {
1881 u32 type;
1882 u16 size;
1883 struct ib_flow_ib_filter val;
1884 struct ib_flow_ib_filter mask;
1885 };
1886
1887
1888 enum ib_ipv4_flags {
1889 IB_IPV4_DONT_FRAG = 0x2,
1890 IB_IPV4_MORE_FRAG = 0X4
1891
1892 };
1893
1894 struct ib_flow_ipv4_filter {
1895 __be32 src_ip;
1896 __be32 dst_ip;
1897 u8 proto;
1898 u8 tos;
1899 u8 ttl;
1900 u8 flags;
1901
1902 u8 real_sz[0];
1903 };
1904
1905 struct ib_flow_spec_ipv4 {
1906 u32 type;
1907 u16 size;
1908 struct ib_flow_ipv4_filter val;
1909 struct ib_flow_ipv4_filter mask;
1910 };
1911
1912 struct ib_flow_ipv6_filter {
1913 u8 src_ip[16];
1914 u8 dst_ip[16];
1915 __be32 flow_label;
1916 u8 next_hdr;
1917 u8 traffic_class;
1918 u8 hop_limit;
1919
1920 u8 real_sz[0];
1921 };
1922
1923 struct ib_flow_spec_ipv6 {
1924 u32 type;
1925 u16 size;
1926 struct ib_flow_ipv6_filter val;
1927 struct ib_flow_ipv6_filter mask;
1928 };
1929
1930 struct ib_flow_tcp_udp_filter {
1931 __be16 dst_port;
1932 __be16 src_port;
1933
1934 u8 real_sz[0];
1935 };
1936
1937 struct ib_flow_spec_tcp_udp {
1938 u32 type;
1939 u16 size;
1940 struct ib_flow_tcp_udp_filter val;
1941 struct ib_flow_tcp_udp_filter mask;
1942 };
1943
1944 struct ib_flow_tunnel_filter {
1945 __be32 tunnel_id;
1946 u8 real_sz[0];
1947 };
1948
1949
1950
1951
1952 struct ib_flow_spec_tunnel {
1953 u32 type;
1954 u16 size;
1955 struct ib_flow_tunnel_filter val;
1956 struct ib_flow_tunnel_filter mask;
1957 };
1958
1959 struct ib_flow_esp_filter {
1960 __be32 spi;
1961 __be32 seq;
1962
1963 u8 real_sz[0];
1964 };
1965
1966 struct ib_flow_spec_esp {
1967 u32 type;
1968 u16 size;
1969 struct ib_flow_esp_filter val;
1970 struct ib_flow_esp_filter mask;
1971 };
1972
1973 struct ib_flow_gre_filter {
1974 __be16 c_ks_res0_ver;
1975 __be16 protocol;
1976 __be32 key;
1977
1978 u8 real_sz[0];
1979 };
1980
1981 struct ib_flow_spec_gre {
1982 u32 type;
1983 u16 size;
1984 struct ib_flow_gre_filter val;
1985 struct ib_flow_gre_filter mask;
1986 };
1987
1988 struct ib_flow_mpls_filter {
1989 __be32 tag;
1990
1991 u8 real_sz[0];
1992 };
1993
1994 struct ib_flow_spec_mpls {
1995 u32 type;
1996 u16 size;
1997 struct ib_flow_mpls_filter val;
1998 struct ib_flow_mpls_filter mask;
1999 };
2000
2001 struct ib_flow_spec_action_tag {
2002 enum ib_flow_spec_type type;
2003 u16 size;
2004 u32 tag_id;
2005 };
2006
2007 struct ib_flow_spec_action_drop {
2008 enum ib_flow_spec_type type;
2009 u16 size;
2010 };
2011
2012 struct ib_flow_spec_action_handle {
2013 enum ib_flow_spec_type type;
2014 u16 size;
2015 struct ib_flow_action *act;
2016 };
2017
2018 enum ib_counters_description {
2019 IB_COUNTER_PACKETS,
2020 IB_COUNTER_BYTES,
2021 };
2022
2023 struct ib_flow_spec_action_count {
2024 enum ib_flow_spec_type type;
2025 u16 size;
2026 struct ib_counters *counters;
2027 };
2028
2029 union ib_flow_spec {
2030 struct {
2031 u32 type;
2032 u16 size;
2033 };
2034 struct ib_flow_spec_eth eth;
2035 struct ib_flow_spec_ib ib;
2036 struct ib_flow_spec_ipv4 ipv4;
2037 struct ib_flow_spec_tcp_udp tcp_udp;
2038 struct ib_flow_spec_ipv6 ipv6;
2039 struct ib_flow_spec_tunnel tunnel;
2040 struct ib_flow_spec_esp esp;
2041 struct ib_flow_spec_gre gre;
2042 struct ib_flow_spec_mpls mpls;
2043 struct ib_flow_spec_action_tag flow_tag;
2044 struct ib_flow_spec_action_drop drop;
2045 struct ib_flow_spec_action_handle action;
2046 struct ib_flow_spec_action_count flow_count;
2047 };
2048
2049 struct ib_flow_attr {
2050 enum ib_flow_attr_type type;
2051 u16 size;
2052 u16 priority;
2053 u32 flags;
2054 u8 num_of_specs;
2055 u8 port;
2056 union ib_flow_spec flows[];
2057 };
2058
2059 struct ib_flow {
2060 struct ib_qp *qp;
2061 struct ib_device *device;
2062 struct ib_uobject *uobject;
2063 };
2064
2065 enum ib_flow_action_type {
2066 IB_FLOW_ACTION_UNSPECIFIED,
2067 IB_FLOW_ACTION_ESP = 1,
2068 };
2069
2070 struct ib_flow_action_attrs_esp_keymats {
2071 enum ib_uverbs_flow_action_esp_keymat protocol;
2072 union {
2073 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2074 } keymat;
2075 };
2076
2077 struct ib_flow_action_attrs_esp_replays {
2078 enum ib_uverbs_flow_action_esp_replay protocol;
2079 union {
2080 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2081 } replay;
2082 };
2083
2084 enum ib_flow_action_attrs_esp_flags {
2085
2086
2087
2088
2089
2090
2091 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
2092 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
2093 };
2094
2095 struct ib_flow_spec_list {
2096 struct ib_flow_spec_list *next;
2097 union ib_flow_spec spec;
2098 };
2099
2100 struct ib_flow_action_attrs_esp {
2101 struct ib_flow_action_attrs_esp_keymats *keymat;
2102 struct ib_flow_action_attrs_esp_replays *replay;
2103 struct ib_flow_spec_list *encap;
2104
2105
2106
2107 u32 esn;
2108 u32 spi;
2109 u32 seq;
2110 u32 tfc_pad;
2111
2112 u64 flags;
2113 u64 hard_limit_pkts;
2114 };
2115
2116 struct ib_flow_action {
2117 struct ib_device *device;
2118 struct ib_uobject *uobject;
2119 enum ib_flow_action_type type;
2120 atomic_t usecnt;
2121 };
2122
2123 struct ib_mad_hdr;
2124 struct ib_grh;
2125
2126 enum ib_process_mad_flags {
2127 IB_MAD_IGNORE_MKEY = 1,
2128 IB_MAD_IGNORE_BKEY = 2,
2129 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2130 };
2131
2132 enum ib_mad_result {
2133 IB_MAD_RESULT_FAILURE = 0,
2134 IB_MAD_RESULT_SUCCESS = 1 << 0,
2135 IB_MAD_RESULT_REPLY = 1 << 1,
2136 IB_MAD_RESULT_CONSUMED = 1 << 2
2137 };
2138
2139 struct ib_port_cache {
2140 u64 subnet_prefix;
2141 struct ib_pkey_cache *pkey;
2142 struct ib_gid_table *gid;
2143 u8 lmc;
2144 enum ib_port_state port_state;
2145 };
2146
2147 struct ib_cache {
2148 rwlock_t lock;
2149 };
2150
2151 struct ib_port_immutable {
2152 int pkey_tbl_len;
2153 int gid_tbl_len;
2154 u32 core_cap_flags;
2155 u32 max_mad_size;
2156 };
2157
2158 struct ib_port_data {
2159 struct ib_device *ib_dev;
2160
2161 struct ib_port_immutable immutable;
2162
2163 spinlock_t pkey_list_lock;
2164 struct list_head pkey_list;
2165
2166 struct ib_port_cache cache;
2167
2168 spinlock_t netdev_lock;
2169 struct net_device __rcu *netdev;
2170 struct hlist_node ndev_hash_link;
2171 struct rdma_port_counter port_counter;
2172 struct rdma_hw_stats *hw_stats;
2173 };
2174
2175
2176 enum rdma_netdev_t {
2177 RDMA_NETDEV_OPA_VNIC,
2178 RDMA_NETDEV_IPOIB,
2179 };
2180
2181
2182
2183
2184
2185 struct rdma_netdev {
2186 void *clnt_priv;
2187 struct ib_device *hca;
2188 u8 port_num;
2189
2190
2191
2192
2193
2194
2195 void (*free_rdma_netdev)(struct net_device *netdev);
2196
2197
2198 void (*set_id)(struct net_device *netdev, int id);
2199
2200 int (*send)(struct net_device *dev, struct sk_buff *skb,
2201 struct ib_ah *address, u32 dqpn);
2202
2203 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2204 union ib_gid *gid, u16 mlid,
2205 int set_qkey, u32 qkey);
2206 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2207 union ib_gid *gid, u16 mlid);
2208 };
2209
2210 struct rdma_netdev_alloc_params {
2211 size_t sizeof_priv;
2212 unsigned int txqs;
2213 unsigned int rxqs;
2214 void *param;
2215
2216 int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num,
2217 struct net_device *netdev, void *param);
2218 };
2219
2220 struct ib_counters {
2221 struct ib_device *device;
2222 struct ib_uobject *uobject;
2223
2224 atomic_t usecnt;
2225 };
2226
2227 struct ib_counters_read_attr {
2228 u64 *counters_buff;
2229 u32 ncounters;
2230 u32 flags;
2231 };
2232
2233 struct uverbs_attr_bundle;
2234 struct iw_cm_id;
2235 struct iw_cm_conn_param;
2236
2237 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2238 .size_##ib_struct = \
2239 (sizeof(struct drv_struct) + \
2240 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2241 BUILD_BUG_ON_ZERO( \
2242 !__same_type(((struct drv_struct *)NULL)->member, \
2243 struct ib_struct)))
2244
2245 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2246 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
2247
2248 #define rdma_zalloc_drv_obj(ib_dev, ib_type) \
2249 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2250
2251 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2252
2253
2254
2255
2256
2257
2258 struct ib_device_ops {
2259 struct module *owner;
2260 enum rdma_driver_id driver_id;
2261 u32 uverbs_abi_ver;
2262 unsigned int uverbs_no_driver_id_binding:1;
2263
2264 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2265 const struct ib_send_wr **bad_send_wr);
2266 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2267 const struct ib_recv_wr **bad_recv_wr);
2268 void (*drain_rq)(struct ib_qp *qp);
2269 void (*drain_sq)(struct ib_qp *qp);
2270 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2271 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2272 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2273 int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt);
2274 int (*post_srq_recv)(struct ib_srq *srq,
2275 const struct ib_recv_wr *recv_wr,
2276 const struct ib_recv_wr **bad_recv_wr);
2277 int (*process_mad)(struct ib_device *device, int process_mad_flags,
2278 u8 port_num, const struct ib_wc *in_wc,
2279 const struct ib_grh *in_grh,
2280 const struct ib_mad_hdr *in_mad, size_t in_mad_size,
2281 struct ib_mad_hdr *out_mad, size_t *out_mad_size,
2282 u16 *out_mad_pkey_index);
2283 int (*query_device)(struct ib_device *device,
2284 struct ib_device_attr *device_attr,
2285 struct ib_udata *udata);
2286 int (*modify_device)(struct ib_device *device, int device_modify_mask,
2287 struct ib_device_modify *device_modify);
2288 void (*get_dev_fw_str)(struct ib_device *device, char *str);
2289 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
2290 int comp_vector);
2291 int (*query_port)(struct ib_device *device, u8 port_num,
2292 struct ib_port_attr *port_attr);
2293 int (*modify_port)(struct ib_device *device, u8 port_num,
2294 int port_modify_mask,
2295 struct ib_port_modify *port_modify);
2296
2297
2298
2299
2300
2301
2302 int (*get_port_immutable)(struct ib_device *device, u8 port_num,
2303 struct ib_port_immutable *immutable);
2304 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2305 u8 port_num);
2306
2307
2308
2309
2310
2311
2312
2313
2314 struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num);
2315
2316
2317
2318
2319
2320
2321 struct net_device *(*alloc_rdma_netdev)(
2322 struct ib_device *device, u8 port_num, enum rdma_netdev_t type,
2323 const char *name, unsigned char name_assign_type,
2324 void (*setup)(struct net_device *));
2325
2326 int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num,
2327 enum rdma_netdev_t type,
2328 struct rdma_netdev_alloc_params *params);
2329
2330
2331
2332
2333
2334 int (*query_gid)(struct ib_device *device, u8 port_num, int index,
2335 union ib_gid *gid);
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349 int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2350
2351
2352
2353
2354
2355
2356
2357
2358 int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2359 int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
2360 u16 *pkey);
2361 int (*alloc_ucontext)(struct ib_ucontext *context,
2362 struct ib_udata *udata);
2363 void (*dealloc_ucontext)(struct ib_ucontext *context);
2364 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2365 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2366 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2367 void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2368 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
2369 u32 flags, struct ib_udata *udata);
2370 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2371 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2372 void (*destroy_ah)(struct ib_ah *ah, u32 flags);
2373 int (*create_srq)(struct ib_srq *srq,
2374 struct ib_srq_init_attr *srq_init_attr,
2375 struct ib_udata *udata);
2376 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2377 enum ib_srq_attr_mask srq_attr_mask,
2378 struct ib_udata *udata);
2379 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2380 void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2381 struct ib_qp *(*create_qp)(struct ib_pd *pd,
2382 struct ib_qp_init_attr *qp_init_attr,
2383 struct ib_udata *udata);
2384 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2385 int qp_attr_mask, struct ib_udata *udata);
2386 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2387 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2388 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2389 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2390 struct ib_udata *udata);
2391 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2392 void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2393 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
2394 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2395 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2396 u64 virt_addr, int mr_access_flags,
2397 struct ib_udata *udata);
2398 int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
2399 u64 virt_addr, int mr_access_flags,
2400 struct ib_pd *pd, struct ib_udata *udata);
2401 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2402 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2403 u32 max_num_sg, struct ib_udata *udata);
2404 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2405 u32 max_num_data_sg,
2406 u32 max_num_meta_sg);
2407 int (*advise_mr)(struct ib_pd *pd,
2408 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2409 struct ib_sge *sg_list, u32 num_sge,
2410 struct uverbs_attr_bundle *attrs);
2411 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2412 unsigned int *sg_offset);
2413 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2414 struct ib_mr_status *mr_status);
2415 struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
2416 struct ib_udata *udata);
2417 int (*dealloc_mw)(struct ib_mw *mw);
2418 struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags,
2419 struct ib_fmr_attr *fmr_attr);
2420 int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
2421 u64 iova);
2422 int (*unmap_fmr)(struct list_head *fmr_list);
2423 int (*dealloc_fmr)(struct ib_fmr *fmr);
2424 void (*invalidate_range)(struct ib_umem_odp *umem_odp,
2425 unsigned long start, unsigned long end);
2426 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2427 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2428 struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
2429 struct ib_udata *udata);
2430 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2431 struct ib_flow *(*create_flow)(struct ib_qp *qp,
2432 struct ib_flow_attr *flow_attr,
2433 int domain, struct ib_udata *udata);
2434 int (*destroy_flow)(struct ib_flow *flow_id);
2435 struct ib_flow_action *(*create_flow_action_esp)(
2436 struct ib_device *device,
2437 const struct ib_flow_action_attrs_esp *attr,
2438 struct uverbs_attr_bundle *attrs);
2439 int (*destroy_flow_action)(struct ib_flow_action *action);
2440 int (*modify_flow_action_esp)(
2441 struct ib_flow_action *action,
2442 const struct ib_flow_action_attrs_esp *attr,
2443 struct uverbs_attr_bundle *attrs);
2444 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2445 int state);
2446 int (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2447 struct ifla_vf_info *ivf);
2448 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2449 struct ifla_vf_stats *stats);
2450 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2451 int type);
2452 struct ib_wq *(*create_wq)(struct ib_pd *pd,
2453 struct ib_wq_init_attr *init_attr,
2454 struct ib_udata *udata);
2455 void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2456 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2457 u32 wq_attr_mask, struct ib_udata *udata);
2458 struct ib_rwq_ind_table *(*create_rwq_ind_table)(
2459 struct ib_device *device,
2460 struct ib_rwq_ind_table_init_attr *init_attr,
2461 struct ib_udata *udata);
2462 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2463 struct ib_dm *(*alloc_dm)(struct ib_device *device,
2464 struct ib_ucontext *context,
2465 struct ib_dm_alloc_attr *attr,
2466 struct uverbs_attr_bundle *attrs);
2467 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2468 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2469 struct ib_dm_mr_attr *attr,
2470 struct uverbs_attr_bundle *attrs);
2471 struct ib_counters *(*create_counters)(
2472 struct ib_device *device, struct uverbs_attr_bundle *attrs);
2473 int (*destroy_counters)(struct ib_counters *counters);
2474 int (*read_counters)(struct ib_counters *counters,
2475 struct ib_counters_read_attr *counters_read_attr,
2476 struct uverbs_attr_bundle *attrs);
2477 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2478 int data_sg_nents, unsigned int *data_sg_offset,
2479 struct scatterlist *meta_sg, int meta_sg_nents,
2480 unsigned int *meta_sg_offset);
2481
2482
2483
2484
2485
2486
2487
2488 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
2489 u8 port_num);
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502 int (*get_hw_stats)(struct ib_device *device,
2503 struct rdma_hw_stats *stats, u8 port, int index);
2504
2505
2506
2507
2508 int (*init_port)(struct ib_device *device, u8 port_num,
2509 struct kobject *port_sysfs);
2510
2511
2512
2513 int (*fill_res_entry)(struct sk_buff *msg,
2514 struct rdma_restrack_entry *entry);
2515
2516
2517
2518
2519
2520
2521 int (*enable_driver)(struct ib_device *dev);
2522
2523
2524
2525 void (*dealloc_driver)(struct ib_device *dev);
2526
2527
2528 void (*iw_add_ref)(struct ib_qp *qp);
2529 void (*iw_rem_ref)(struct ib_qp *qp);
2530 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2531 int (*iw_connect)(struct iw_cm_id *cm_id,
2532 struct iw_cm_conn_param *conn_param);
2533 int (*iw_accept)(struct iw_cm_id *cm_id,
2534 struct iw_cm_conn_param *conn_param);
2535 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2536 u8 pdata_len);
2537 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2538 int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2539
2540
2541
2542
2543
2544 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
2545
2546
2547
2548
2549 int (*counter_unbind_qp)(struct ib_qp *qp);
2550
2551
2552
2553 int (*counter_dealloc)(struct rdma_counter *counter);
2554
2555
2556
2557
2558 struct rdma_hw_stats *(*counter_alloc_stats)(
2559 struct rdma_counter *counter);
2560
2561
2562
2563 int (*counter_update_stats)(struct rdma_counter *counter);
2564
2565 DECLARE_RDMA_OBJ_SIZE(ib_ah);
2566 DECLARE_RDMA_OBJ_SIZE(ib_cq);
2567 DECLARE_RDMA_OBJ_SIZE(ib_pd);
2568 DECLARE_RDMA_OBJ_SIZE(ib_srq);
2569 DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2570 };
2571
2572 struct ib_core_device {
2573
2574
2575
2576 struct device dev;
2577 possible_net_t rdma_net;
2578 struct kobject *ports_kobj;
2579 struct list_head port_list;
2580 struct ib_device *owner;
2581 };
2582
2583 struct rdma_restrack_root;
2584 struct ib_device {
2585
2586 struct device *dma_device;
2587 struct ib_device_ops ops;
2588 char name[IB_DEVICE_NAME_MAX];
2589 struct rcu_head rcu_head;
2590
2591 struct list_head event_handler_list;
2592
2593 struct rw_semaphore event_handler_rwsem;
2594
2595
2596 spinlock_t event_handler_lock;
2597
2598 struct rw_semaphore client_data_rwsem;
2599 struct xarray client_data;
2600 struct mutex unregistration_lock;
2601
2602 struct ib_cache cache;
2603
2604
2605
2606 struct ib_port_data *port_data;
2607
2608 int num_comp_vectors;
2609
2610 union {
2611 struct device dev;
2612 struct ib_core_device coredev;
2613 };
2614
2615
2616
2617
2618
2619 const struct attribute_group *groups[3];
2620
2621 u64 uverbs_cmd_mask;
2622 u64 uverbs_ex_cmd_mask;
2623
2624 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2625 __be64 node_guid;
2626 u32 local_dma_lkey;
2627 u16 is_switch:1;
2628
2629 u16 kverbs_provider:1;
2630
2631 u16 use_cq_dim:1;
2632 u8 node_type;
2633 u8 phys_port_cnt;
2634 struct ib_device_attr attrs;
2635 struct attribute_group *hw_stats_ag;
2636 struct rdma_hw_stats *hw_stats;
2637
2638 #ifdef CONFIG_CGROUP_RDMA
2639 struct rdmacg_device cg_device;
2640 #endif
2641
2642 u32 index;
2643 struct rdma_restrack_root *res;
2644
2645 const struct uapi_definition *driver_def;
2646
2647
2648
2649
2650
2651 refcount_t refcount;
2652 struct completion unreg_completion;
2653 struct work_struct unregistration_work;
2654
2655 const struct rdma_link_ops *link_ops;
2656
2657
2658 struct mutex compat_devs_mutex;
2659
2660 struct xarray compat_devs;
2661
2662
2663 char iw_ifname[IFNAMSIZ];
2664 u32 iw_driver_flags;
2665 };
2666
2667 struct ib_client_nl_info;
2668 struct ib_client {
2669 const char *name;
2670 void (*add) (struct ib_device *);
2671 void (*remove)(struct ib_device *, void *client_data);
2672 void (*rename)(struct ib_device *dev, void *client_data);
2673 int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
2674 struct ib_client_nl_info *res);
2675 int (*get_global_nl_info)(struct ib_client_nl_info *res);
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692 struct net_device *(*get_net_dev_by_params)(
2693 struct ib_device *dev,
2694 u8 port,
2695 u16 pkey,
2696 const union ib_gid *gid,
2697 const struct sockaddr *addr,
2698 void *client_data);
2699
2700 refcount_t uses;
2701 struct completion uses_zero;
2702 u32 client_id;
2703
2704
2705 u8 no_kverbs_req:1;
2706 };
2707
2708
2709
2710
2711
2712
2713
2714 struct ib_block_iter {
2715
2716 struct scatterlist *__sg;
2717 dma_addr_t __dma_addr;
2718 unsigned int __sg_nents;
2719 unsigned int __sg_advance;
2720 unsigned int __pg_bit;
2721 };
2722
2723 struct ib_device *_ib_alloc_device(size_t size);
2724 #define ib_alloc_device(drv_struct, member) \
2725 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
2726 BUILD_BUG_ON_ZERO(offsetof( \
2727 struct drv_struct, member))), \
2728 struct drv_struct, member)
2729
2730 void ib_dealloc_device(struct ib_device *device);
2731
2732 void ib_get_device_fw_str(struct ib_device *device, char *str);
2733
2734 int ib_register_device(struct ib_device *device, const char *name);
2735 void ib_unregister_device(struct ib_device *device);
2736 void ib_unregister_driver(enum rdma_driver_id driver_id);
2737 void ib_unregister_device_and_put(struct ib_device *device);
2738 void ib_unregister_device_queued(struct ib_device *ib_dev);
2739
2740 int ib_register_client (struct ib_client *client);
2741 void ib_unregister_client(struct ib_client *client);
2742
2743 void __rdma_block_iter_start(struct ib_block_iter *biter,
2744 struct scatterlist *sglist,
2745 unsigned int nents,
2746 unsigned long pgsz);
2747 bool __rdma_block_iter_next(struct ib_block_iter *biter);
2748
2749
2750
2751
2752
2753
2754 static inline dma_addr_t
2755 rdma_block_iter_dma_address(struct ib_block_iter *biter)
2756 {
2757 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
2758 }
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770 #define rdma_for_each_block(sglist, biter, nents, pgsz) \
2771 for (__rdma_block_iter_start(biter, sglist, nents, \
2772 pgsz); \
2773 __rdma_block_iter_next(biter);)
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785 static inline void *ib_get_client_data(struct ib_device *device,
2786 struct ib_client *client)
2787 {
2788 return xa_load(&device->client_data, client->client_id);
2789 }
2790 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2791 void *data);
2792 void ib_set_device_ops(struct ib_device *device,
2793 const struct ib_device_ops *ops);
2794
2795 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
2796 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
2797 unsigned long pfn, unsigned long size, pgprot_t prot);
2798 #else
2799 static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
2800 struct vm_area_struct *vma,
2801 unsigned long pfn, unsigned long size,
2802 pgprot_t prot)
2803 {
2804 return -EINVAL;
2805 }
2806 #endif
2807
2808 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2809 {
2810 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2811 }
2812
2813 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2814 {
2815 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2816 }
2817
2818 static inline bool ib_is_buffer_cleared(const void __user *p,
2819 size_t len)
2820 {
2821 bool ret;
2822 u8 *buf;
2823
2824 if (len > USHRT_MAX)
2825 return false;
2826
2827 buf = memdup_user(p, len);
2828 if (IS_ERR(buf))
2829 return false;
2830
2831 ret = !memchr_inv(buf, 0, len);
2832 kfree(buf);
2833 return ret;
2834 }
2835
2836 static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2837 size_t offset,
2838 size_t len)
2839 {
2840 return ib_is_buffer_cleared(udata->inbuf + offset, len);
2841 }
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858 static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
2859 struct ib_uobject *uobj)
2860 {
2861 return ret && (why == RDMA_REMOVE_DESTROY ||
2862 uobj->context->cleanup_retryable);
2863 }
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874 static inline int ib_destroy_usecnt(atomic_t *usecnt,
2875 enum rdma_remove_reason why,
2876 struct ib_uobject *uobj)
2877 {
2878 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
2879 return -EBUSY;
2880 return 0;
2881 }
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2899 enum ib_qp_type type, enum ib_qp_attr_mask mask);
2900
2901 void ib_register_event_handler(struct ib_event_handler *event_handler);
2902 void ib_unregister_event_handler(struct ib_event_handler *event_handler);
2903 void ib_dispatch_event(const struct ib_event *event);
2904
2905 int ib_query_port(struct ib_device *device,
2906 u8 port_num, struct ib_port_attr *port_attr);
2907
2908 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2909 u8 port_num);
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920 static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2921 {
2922 return device->is_switch;
2923 }
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933 static inline u8 rdma_start_port(const struct ib_device *device)
2934 {
2935 return rdma_cap_ib_switch(device) ? 0 : 1;
2936 }
2937
2938
2939
2940
2941
2942
2943 #define rdma_for_each_port(device, iter) \
2944 for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
2945 unsigned int, iter))); \
2946 iter <= rdma_end_port(device); (iter)++)
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956 static inline u8 rdma_end_port(const struct ib_device *device)
2957 {
2958 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2959 }
2960
2961 static inline int rdma_is_port_valid(const struct ib_device *device,
2962 unsigned int port)
2963 {
2964 return (port >= rdma_start_port(device) &&
2965 port <= rdma_end_port(device));
2966 }
2967
2968 static inline bool rdma_is_grh_required(const struct ib_device *device,
2969 u8 port_num)
2970 {
2971 return device->port_data[port_num].immutable.core_cap_flags &
2972 RDMA_CORE_PORT_IB_GRH_REQUIRED;
2973 }
2974
2975 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2976 {
2977 return device->port_data[port_num].immutable.core_cap_flags &
2978 RDMA_CORE_CAP_PROT_IB;
2979 }
2980
2981 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2982 {
2983 return device->port_data[port_num].immutable.core_cap_flags &
2984 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2985 }
2986
2987 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2988 {
2989 return device->port_data[port_num].immutable.core_cap_flags &
2990 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2991 }
2992
2993 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2994 {
2995 return device->port_data[port_num].immutable.core_cap_flags &
2996 RDMA_CORE_CAP_PROT_ROCE;
2997 }
2998
2999 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
3000 {
3001 return device->port_data[port_num].immutable.core_cap_flags &
3002 RDMA_CORE_CAP_PROT_IWARP;
3003 }
3004
3005 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
3006 {
3007 return rdma_protocol_ib(device, port_num) ||
3008 rdma_protocol_roce(device, port_num);
3009 }
3010
3011 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
3012 {
3013 return device->port_data[port_num].immutable.core_cap_flags &
3014 RDMA_CORE_CAP_PROT_RAW_PACKET;
3015 }
3016
3017 static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
3018 {
3019 return device->port_data[port_num].immutable.core_cap_flags &
3020 RDMA_CORE_CAP_PROT_USNIC;
3021 }
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
3036 {
3037 return device->port_data[port_num].immutable.core_cap_flags &
3038 RDMA_CORE_CAP_IB_MAD;
3039 }
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
3061 {
3062 return device->port_data[port_num].immutable.core_cap_flags &
3063 RDMA_CORE_CAP_OPA_MAD;
3064 }
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
3087 {
3088 return device->port_data[port_num].immutable.core_cap_flags &
3089 RDMA_CORE_CAP_IB_SMI;
3090 }
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
3108 {
3109 return device->port_data[port_num].immutable.core_cap_flags &
3110 RDMA_CORE_CAP_IB_CM;
3111 }
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
3126 {
3127 return device->port_data[port_num].immutable.core_cap_flags &
3128 RDMA_CORE_CAP_IW_CM;
3129 }
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
3147 {
3148 return device->port_data[port_num].immutable.core_cap_flags &
3149 RDMA_CORE_CAP_IB_SA;
3150 }
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
3170 {
3171 return rdma_cap_ib_sa(device, port_num);
3172 }
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
3188 {
3189 return device->port_data[port_num].immutable.core_cap_flags &
3190 RDMA_CORE_CAP_AF_IB;
3191 }
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
3210 {
3211 return device->port_data[port_num].immutable.core_cap_flags &
3212 RDMA_CORE_CAP_ETH_AH;
3213 }
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224 static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
3225 {
3226 return (device->port_data[port_num].immutable.core_cap_flags &
3227 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3228 }
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
3243 {
3244 return device->port_data[port_num].immutable.max_mad_size;
3245 }
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3261 u8 port_num)
3262 {
3263 return rdma_protocol_roce(device, port_num) &&
3264 device->ops.add_gid && device->ops.del_gid;
3265 }
3266
3267
3268
3269
3270 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3271 {
3272
3273
3274
3275
3276 return rdma_protocol_iwarp(dev, port_num);
3277 }
3278
3279
3280
3281
3282
3283
3284
3285 static inline unsigned int rdma_find_pg_bit(unsigned long addr,
3286 unsigned long pgsz_bitmap)
3287 {
3288 unsigned long align;
3289 unsigned long pgsz;
3290
3291 align = addr & -addr;
3292
3293
3294
3295
3296 pgsz = pgsz_bitmap & ~(-align << 1);
3297 if (!pgsz)
3298 return __ffs(pgsz_bitmap);
3299
3300 return __fls(pgsz);
3301 }
3302
3303 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
3304 int state);
3305 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
3306 struct ifla_vf_info *info);
3307 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
3308 struct ifla_vf_stats *stats);
3309 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
3310 int type);
3311
3312 int ib_query_pkey(struct ib_device *device,
3313 u8 port_num, u16 index, u16 *pkey);
3314
3315 int ib_modify_device(struct ib_device *device,
3316 int device_modify_mask,
3317 struct ib_device_modify *device_modify);
3318
3319 int ib_modify_port(struct ib_device *device,
3320 u8 port_num, int port_modify_mask,
3321 struct ib_port_modify *port_modify);
3322
3323 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3324 u8 *port_num, u16 *index);
3325
3326 int ib_find_pkey(struct ib_device *device,
3327 u8 port_num, u16 pkey, u16 *index);
3328
3329 enum ib_pd_flags {
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3340 };
3341
3342 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3343 const char *caller);
3344
3345 #define ib_alloc_pd(device, flags) \
3346 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3347
3348
3349
3350
3351
3352
3353 void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3354
3355
3356
3357
3358
3359
3360
3361 static inline void ib_dealloc_pd(struct ib_pd *pd)
3362 {
3363 ib_dealloc_pd_user(pd, NULL);
3364 }
3365
3366 enum rdma_create_ah_flags {
3367
3368 RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3369 };
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3381 u32 flags);
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3396 struct rdma_ah_attr *ah_attr,
3397 struct ib_udata *udata);
3398
3399
3400
3401
3402
3403
3404
3405
3406 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3407 enum rdma_network_type net_type,
3408 union ib_gid *sgid, union ib_gid *dgid);
3409
3410
3411
3412
3413
3414 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
3435 const struct ib_wc *wc, const struct ib_grh *grh,
3436 struct rdma_ah_attr *ah_attr);
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3451 const struct ib_grh *grh, u8 port_num);
3452
3453
3454
3455
3456
3457
3458
3459
3460 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3461
3462
3463
3464
3465
3466
3467
3468
3469 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3470
3471 enum rdma_destroy_ah_flags {
3472
3473 RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3474 };
3475
3476
3477
3478
3479
3480
3481
3482 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3483
3484
3485
3486
3487
3488
3489
3490
3491 static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3492 {
3493 return rdma_destroy_ah_user(ah, flags, NULL);
3494 }
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509 struct ib_srq *ib_create_srq(struct ib_pd *pd,
3510 struct ib_srq_init_attr *srq_init_attr);
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524 int ib_modify_srq(struct ib_srq *srq,
3525 struct ib_srq_attr *srq_attr,
3526 enum ib_srq_attr_mask srq_attr_mask);
3527
3528
3529
3530
3531
3532
3533
3534 int ib_query_srq(struct ib_srq *srq,
3535 struct ib_srq_attr *srq_attr);
3536
3537
3538
3539
3540
3541
3542 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3543
3544
3545
3546
3547
3548
3549
3550 static inline int ib_destroy_srq(struct ib_srq *srq)
3551 {
3552 return ib_destroy_srq_user(srq, NULL);
3553 }
3554
3555
3556
3557
3558
3559
3560
3561
3562 static inline int ib_post_srq_recv(struct ib_srq *srq,
3563 const struct ib_recv_wr *recv_wr,
3564 const struct ib_recv_wr **bad_recv_wr)
3565 {
3566 const struct ib_recv_wr *dummy;
3567
3568 return srq->device->ops.post_srq_recv(srq, recv_wr,
3569 bad_recv_wr ? : &dummy);
3570 }
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581 struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
3582 struct ib_qp_init_attr *qp_init_attr,
3583 struct ib_udata *udata);
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596 static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3597 struct ib_qp_init_attr *qp_init_attr)
3598 {
3599 return ib_create_qp_user(pd, qp_init_attr, NULL);
3600 }
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613 int ib_modify_qp_with_udata(struct ib_qp *qp,
3614 struct ib_qp_attr *attr,
3615 int attr_mask,
3616 struct ib_udata *udata);
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627 int ib_modify_qp(struct ib_qp *qp,
3628 struct ib_qp_attr *qp_attr,
3629 int qp_attr_mask);
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642 int ib_query_qp(struct ib_qp *qp,
3643 struct ib_qp_attr *qp_attr,
3644 int qp_attr_mask,
3645 struct ib_qp_init_attr *qp_init_attr);
3646
3647
3648
3649
3650
3651
3652 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3653
3654
3655
3656
3657
3658
3659
3660 static inline int ib_destroy_qp(struct ib_qp *qp)
3661 {
3662 return ib_destroy_qp_user(qp, NULL);
3663 }
3664
3665
3666
3667
3668
3669
3670
3671
3672 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3673 struct ib_qp_open_attr *qp_open_attr);
3674
3675
3676
3677
3678
3679
3680
3681
3682 int ib_close_qp(struct ib_qp *qp);
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697 static inline int ib_post_send(struct ib_qp *qp,
3698 const struct ib_send_wr *send_wr,
3699 const struct ib_send_wr **bad_send_wr)
3700 {
3701 const struct ib_send_wr *dummy;
3702
3703 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
3704 }
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714 static inline int ib_post_recv(struct ib_qp *qp,
3715 const struct ib_recv_wr *recv_wr,
3716 const struct ib_recv_wr **bad_recv_wr)
3717 {
3718 const struct ib_recv_wr *dummy;
3719
3720 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
3721 }
3722
3723 struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
3724 int nr_cqe, int comp_vector,
3725 enum ib_poll_context poll_ctx,
3726 const char *caller, struct ib_udata *udata);
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737 static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
3738 void *private, int nr_cqe,
3739 int comp_vector,
3740 enum ib_poll_context poll_ctx,
3741 struct ib_udata *udata)
3742 {
3743 return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3744 KBUILD_MODNAME, udata);
3745 }
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757 static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3758 int nr_cqe, int comp_vector,
3759 enum ib_poll_context poll_ctx)
3760 {
3761 return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
3762 NULL);
3763 }
3764
3765 struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
3766 int nr_cqe, enum ib_poll_context poll_ctx,
3767 const char *caller);
3768
3769
3770
3771
3772
3773
3774
3775
3776 static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
3777 void *private, int nr_cqe,
3778 enum ib_poll_context poll_ctx)
3779 {
3780 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
3781 KBUILD_MODNAME);
3782 }
3783
3784
3785
3786
3787
3788
3789 void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3790
3791
3792
3793
3794
3795
3796
3797 static inline void ib_free_cq(struct ib_cq *cq)
3798 {
3799 ib_free_cq_user(cq, NULL);
3800 }
3801
3802 int ib_process_cq_direct(struct ib_cq *cq, int budget);
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817 struct ib_cq *__ib_create_cq(struct ib_device *device,
3818 ib_comp_handler comp_handler,
3819 void (*event_handler)(struct ib_event *, void *),
3820 void *cq_context,
3821 const struct ib_cq_init_attr *cq_attr,
3822 const char *caller);
3823 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
3824 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
3825
3826
3827
3828
3829
3830
3831
3832
3833 int ib_resize_cq(struct ib_cq *cq, int cqe);
3834
3835
3836
3837
3838
3839
3840
3841
3842 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3843
3844
3845
3846
3847
3848
3849 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
3850
3851
3852
3853
3854
3855
3856
3857 static inline void ib_destroy_cq(struct ib_cq *cq)
3858 {
3859 ib_destroy_cq_user(cq, NULL);
3860 }
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3875 struct ib_wc *wc)
3876 {
3877 return cq->device->ops.poll_cq(cq, num_entries, wc);
3878 }
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907 static inline int ib_req_notify_cq(struct ib_cq *cq,
3908 enum ib_cq_notify_flags flags)
3909 {
3910 return cq->device->ops.req_notify_cq(cq, flags);
3911 }
3912
3913
3914
3915
3916
3917
3918
3919
3920 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3921 {
3922 return cq->device->ops.req_ncomp_notif ?
3923 cq->device->ops.req_ncomp_notif(cq, wc_cnt) :
3924 -ENOSYS;
3925 }
3926
3927
3928
3929
3930
3931
3932 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3933 {
3934 return dma_mapping_error(dev->dma_device, dma_addr);
3935 }
3936
3937
3938
3939
3940
3941
3942
3943
3944 static inline u64 ib_dma_map_single(struct ib_device *dev,
3945 void *cpu_addr, size_t size,
3946 enum dma_data_direction direction)
3947 {
3948 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3949 }
3950
3951
3952
3953
3954
3955
3956
3957
3958 static inline void ib_dma_unmap_single(struct ib_device *dev,
3959 u64 addr, size_t size,
3960 enum dma_data_direction direction)
3961 {
3962 dma_unmap_single(dev->dma_device, addr, size, direction);
3963 }
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973 static inline u64 ib_dma_map_page(struct ib_device *dev,
3974 struct page *page,
3975 unsigned long offset,
3976 size_t size,
3977 enum dma_data_direction direction)
3978 {
3979 return dma_map_page(dev->dma_device, page, offset, size, direction);
3980 }
3981
3982
3983
3984
3985
3986
3987
3988
3989 static inline void ib_dma_unmap_page(struct ib_device *dev,
3990 u64 addr, size_t size,
3991 enum dma_data_direction direction)
3992 {
3993 dma_unmap_page(dev->dma_device, addr, size, direction);
3994 }
3995
3996
3997
3998
3999
4000
4001
4002
4003 static inline int ib_dma_map_sg(struct ib_device *dev,
4004 struct scatterlist *sg, int nents,
4005 enum dma_data_direction direction)
4006 {
4007 return dma_map_sg(dev->dma_device, sg, nents, direction);
4008 }
4009
4010
4011
4012
4013
4014
4015
4016
4017 static inline void ib_dma_unmap_sg(struct ib_device *dev,
4018 struct scatterlist *sg, int nents,
4019 enum dma_data_direction direction)
4020 {
4021 dma_unmap_sg(dev->dma_device, sg, nents, direction);
4022 }
4023
4024 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4025 struct scatterlist *sg, int nents,
4026 enum dma_data_direction direction,
4027 unsigned long dma_attrs)
4028 {
4029 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4030 dma_attrs);
4031 }
4032
4033 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4034 struct scatterlist *sg, int nents,
4035 enum dma_data_direction direction,
4036 unsigned long dma_attrs)
4037 {
4038 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
4039 }
4040
4041
4042
4043
4044
4045
4046
4047 static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4048 {
4049 return dma_get_max_seg_size(dev->dma_device);
4050 }
4051
4052
4053
4054
4055
4056
4057
4058
4059 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4060 u64 addr,
4061 size_t size,
4062 enum dma_data_direction dir)
4063 {
4064 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4065 }
4066
4067
4068
4069
4070
4071
4072
4073
4074 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4075 u64 addr,
4076 size_t size,
4077 enum dma_data_direction dir)
4078 {
4079 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4080 }
4081
4082
4083
4084
4085
4086
4087
4088
4089 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
4090 size_t size,
4091 dma_addr_t *dma_handle,
4092 gfp_t flag)
4093 {
4094 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
4095 }
4096
4097
4098
4099
4100
4101
4102
4103
4104 static inline void ib_dma_free_coherent(struct ib_device *dev,
4105 size_t size, void *cpu_addr,
4106 dma_addr_t dma_handle)
4107 {
4108 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
4109 }
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130 static inline int ib_dereg_mr(struct ib_mr *mr)
4131 {
4132 return ib_dereg_mr_user(mr, NULL);
4133 }
4134
4135 struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
4136 u32 max_num_sg, struct ib_udata *udata);
4137
4138 static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
4139 enum ib_mr_type mr_type, u32 max_num_sg)
4140 {
4141 return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL);
4142 }
4143
4144 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4145 u32 max_num_data_sg,
4146 u32 max_num_meta_sg);
4147
4148
4149
4150
4151
4152
4153
4154 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4155 {
4156 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4157 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4158 }
4159
4160
4161
4162
4163
4164
4165 static inline u32 ib_inc_rkey(u32 rkey)
4166 {
4167 const u32 mask = 0x000000ff;
4168 return ((rkey + 1) & mask) | (rkey & ~mask);
4169 }
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
4181 int mr_access_flags,
4182 struct ib_fmr_attr *fmr_attr);
4183
4184
4185
4186
4187
4188
4189
4190
4191 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
4192 u64 *page_list, int list_len,
4193 u64 iova)
4194 {
4195 return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
4196 }
4197
4198
4199
4200
4201
4202 int ib_unmap_fmr(struct list_head *fmr_list);
4203
4204
4205
4206
4207
4208 int ib_dealloc_fmr(struct ib_fmr *fmr);
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4223
4224
4225
4226
4227
4228
4229
4230 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4231
4232
4233
4234
4235
4236
4237 struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
4238 #define ib_alloc_xrcd(device) \
4239 __ib_alloc_xrcd((device), KBUILD_MODNAME)
4240
4241
4242
4243
4244
4245
4246 int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
4247
4248 static inline int ib_check_mr_access(int flags)
4249 {
4250
4251
4252
4253
4254 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4255 !(flags & IB_ACCESS_LOCAL_WRITE))
4256 return -EINVAL;
4257
4258 if (flags & ~IB_ACCESS_SUPPORTED)
4259 return -EINVAL;
4260
4261 return 0;
4262 }
4263
4264 static inline bool ib_access_writable(int access_flags)
4265 {
4266
4267
4268
4269
4270
4271
4272
4273 return access_flags &
4274 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
4275 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4276 }
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4291 struct ib_mr_status *mr_status);
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306 static inline bool ib_device_try_get(struct ib_device *dev)
4307 {
4308 return refcount_inc_not_zero(&dev->refcount);
4309 }
4310
4311 void ib_device_put(struct ib_device *device);
4312 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4313 enum rdma_driver_id driver_id);
4314 struct ib_device *ib_device_get_by_name(const char *name,
4315 enum rdma_driver_id driver_id);
4316 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
4317 u16 pkey, const union ib_gid *gid,
4318 const struct sockaddr *addr);
4319 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4320 unsigned int port);
4321 struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
4322
4323 struct ib_wq *ib_create_wq(struct ib_pd *pd,
4324 struct ib_wq_init_attr *init_attr);
4325 int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
4326 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
4327 u32 wq_attr_mask);
4328 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
4329 struct ib_rwq_ind_table_init_attr*
4330 wq_ind_table_init_attr);
4331 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
4332
4333 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4334 unsigned int *sg_offset, unsigned int page_size);
4335 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4336 int data_sg_nents, unsigned int *data_sg_offset,
4337 struct scatterlist *meta_sg, int meta_sg_nents,
4338 unsigned int *meta_sg_offset, unsigned int page_size);
4339
4340 static inline int
4341 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4342 unsigned int *sg_offset, unsigned int page_size)
4343 {
4344 int n;
4345
4346 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4347 mr->iova = 0;
4348
4349 return n;
4350 }
4351
4352 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4353 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4354
4355 void ib_drain_rq(struct ib_qp *qp);
4356 void ib_drain_sq(struct ib_qp *qp);
4357 void ib_drain_qp(struct ib_qp *qp);
4358
4359 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
4360
4361 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4362 {
4363 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4364 return attr->roce.dmac;
4365 return NULL;
4366 }
4367
4368 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4369 {
4370 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4371 attr->ib.dlid = (u16)dlid;
4372 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4373 attr->opa.dlid = dlid;
4374 }
4375
4376 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4377 {
4378 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4379 return attr->ib.dlid;
4380 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4381 return attr->opa.dlid;
4382 return 0;
4383 }
4384
4385 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4386 {
4387 attr->sl = sl;
4388 }
4389
4390 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4391 {
4392 return attr->sl;
4393 }
4394
4395 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4396 u8 src_path_bits)
4397 {
4398 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4399 attr->ib.src_path_bits = src_path_bits;
4400 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4401 attr->opa.src_path_bits = src_path_bits;
4402 }
4403
4404 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4405 {
4406 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4407 return attr->ib.src_path_bits;
4408 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4409 return attr->opa.src_path_bits;
4410 return 0;
4411 }
4412
4413 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4414 bool make_grd)
4415 {
4416 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4417 attr->opa.make_grd = make_grd;
4418 }
4419
4420 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4421 {
4422 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4423 return attr->opa.make_grd;
4424 return false;
4425 }
4426
4427 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
4428 {
4429 attr->port_num = port_num;
4430 }
4431
4432 static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4433 {
4434 return attr->port_num;
4435 }
4436
4437 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4438 u8 static_rate)
4439 {
4440 attr->static_rate = static_rate;
4441 }
4442
4443 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4444 {
4445 return attr->static_rate;
4446 }
4447
4448 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4449 enum ib_ah_flags flag)
4450 {
4451 attr->ah_flags = flag;
4452 }
4453
4454 static inline enum ib_ah_flags
4455 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4456 {
4457 return attr->ah_flags;
4458 }
4459
4460 static inline const struct ib_global_route
4461 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4462 {
4463 return &attr->grh;
4464 }
4465
4466
4467 static inline struct ib_global_route
4468 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4469 {
4470 return &attr->grh;
4471 }
4472
4473 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4474 {
4475 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4476
4477 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4478 }
4479
4480 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4481 __be64 prefix)
4482 {
4483 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4484
4485 grh->dgid.global.subnet_prefix = prefix;
4486 }
4487
4488 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4489 __be64 if_id)
4490 {
4491 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4492
4493 grh->dgid.global.interface_id = if_id;
4494 }
4495
4496 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4497 union ib_gid *dgid, u32 flow_label,
4498 u8 sgid_index, u8 hop_limit,
4499 u8 traffic_class)
4500 {
4501 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4502
4503 attr->ah_flags = IB_AH_GRH;
4504 if (dgid)
4505 grh->dgid = *dgid;
4506 grh->flow_label = flow_label;
4507 grh->sgid_index = sgid_index;
4508 grh->hop_limit = hop_limit;
4509 grh->traffic_class = traffic_class;
4510 grh->sgid_attr = NULL;
4511 }
4512
4513 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4514 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4515 u32 flow_label, u8 hop_limit, u8 traffic_class,
4516 const struct ib_gid_attr *sgid_attr);
4517 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4518 const struct rdma_ah_attr *src);
4519 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4520 const struct rdma_ah_attr *new);
4521 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4522
4523
4524
4525
4526
4527
4528
4529 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4530 u8 port_num)
4531 {
4532 if (rdma_protocol_roce(dev, port_num))
4533 return RDMA_AH_ATTR_TYPE_ROCE;
4534 if (rdma_protocol_ib(dev, port_num)) {
4535 if (rdma_cap_opa_ah(dev, port_num))
4536 return RDMA_AH_ATTR_TYPE_OPA;
4537 return RDMA_AH_ATTR_TYPE_IB;
4538 }
4539
4540 return RDMA_AH_ATTR_TYPE_UNDEFINED;
4541 }
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552 static inline u16 ib_lid_cpu16(u32 lid)
4553 {
4554 WARN_ON_ONCE(lid & 0xFFFF0000);
4555 return (u16)lid;
4556 }
4557
4558
4559
4560
4561
4562
4563 static inline __be16 ib_lid_be16(u32 lid)
4564 {
4565 WARN_ON_ONCE(lid & 0xFFFF0000);
4566 return cpu_to_be16((u16)lid);
4567 }
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579 static inline const struct cpumask *
4580 ib_get_vector_affinity(struct ib_device *device, int comp_vector)
4581 {
4582 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors ||
4583 !device->ops.get_vector_affinity)
4584 return NULL;
4585
4586 return device->ops.get_vector_affinity(device, comp_vector);
4587
4588 }
4589
4590
4591
4592
4593
4594
4595
4596 void rdma_roce_rescan_device(struct ib_device *ibdev);
4597
4598 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4599
4600 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4601
4602 struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
4603 enum rdma_netdev_t type, const char *name,
4604 unsigned char name_assign_type,
4605 void (*setup)(struct net_device *));
4606
4607 int rdma_init_netdev(struct ib_device *device, u8 port_num,
4608 enum rdma_netdev_t type, const char *name,
4609 unsigned char name_assign_type,
4610 void (*setup)(struct net_device *),
4611 struct net_device *netdev);
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628 static inline void
4629 rdma_set_device_sysfs_group(struct ib_device *dev,
4630 const struct attribute_group *group)
4631 {
4632 dev->groups[1] = group;
4633 }
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643 static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4644 {
4645 struct ib_core_device *coredev =
4646 container_of(device, struct ib_core_device, dev);
4647
4648 return coredev->owner;
4649 }
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659 #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4660 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4661
4662 bool rdma_dev_access_netns(const struct ib_device *device,
4663 const struct net *net);
4664 #endif