This source file includes following definitions.
- smbd_get_connection
- smbd_reconnect
- smbd_destroy
- smbd_recv
- smbd_send
1
2
3
4
5
6
7 #ifndef _SMBDIRECT_H
8 #define _SMBDIRECT_H
9
10 #ifdef CONFIG_CIFS_SMB_DIRECT
11 #define cifs_rdma_enabled(server) ((server)->rdma)
12
13 #include "cifsglob.h"
14 #include <rdma/ib_verbs.h>
15 #include <rdma/rdma_cm.h>
16 #include <linux/mempool.h>
17
18 extern int rdma_readwrite_threshold;
19 extern int smbd_max_frmr_depth;
20 extern int smbd_keep_alive_interval;
21 extern int smbd_max_receive_size;
22 extern int smbd_max_fragmented_recv_size;
23 extern int smbd_max_send_size;
24 extern int smbd_send_credit_target;
25 extern int smbd_receive_credit_max;
26
27 enum keep_alive_status {
28 KEEP_ALIVE_NONE,
29 KEEP_ALIVE_PENDING,
30 KEEP_ALIVE_SENT,
31 };
32
33 enum smbd_connection_status {
34 SMBD_CREATED,
35 SMBD_CONNECTING,
36 SMBD_CONNECTED,
37 SMBD_NEGOTIATE_FAILED,
38 SMBD_DISCONNECTING,
39 SMBD_DISCONNECTED,
40 SMBD_DESTROYED
41 };
42
43
44
45
46
47
48
49
50
51
52 struct smbd_connection {
53 enum smbd_connection_status transport_status;
54
55
56 struct rdma_cm_id *id;
57 struct ib_qp_init_attr qp_attr;
58 struct ib_pd *pd;
59 struct ib_cq *send_cq, *recv_cq;
60 struct ib_device_attr dev_attr;
61 int ri_rc;
62 struct completion ri_done;
63 wait_queue_head_t conn_wait;
64 wait_queue_head_t disconn_wait;
65
66 struct completion negotiate_completion;
67 bool negotiate_done;
68
69 struct work_struct disconnect_work;
70 struct work_struct recv_done_work;
71 struct work_struct post_send_credits_work;
72
73 spinlock_t lock_new_credits_offered;
74 int new_credits_offered;
75
76
77 int receive_credit_max;
78 int send_credit_target;
79 int max_send_size;
80 int max_fragmented_recv_size;
81 int max_fragmented_send_size;
82 int max_receive_size;
83 int keep_alive_interval;
84 int max_readwrite_size;
85 enum keep_alive_status keep_alive_requested;
86 int protocol;
87 atomic_t send_credits;
88 atomic_t receive_credits;
89 int receive_credit_target;
90 int fragment_reassembly_remaining;
91
92
93
94 int responder_resources;
95
96 int max_frmr_depth;
97
98
99
100
101
102
103 int rdma_readwrite_threshold;
104 enum ib_mr_type mr_type;
105 struct list_head mr_list;
106 spinlock_t mr_list_lock;
107
108 atomic_t mr_ready_count;
109 atomic_t mr_used_count;
110 wait_queue_head_t wait_mr;
111 struct work_struct mr_recovery_work;
112
113 wait_queue_head_t wait_for_mr_cleanup;
114
115
116 atomic_t send_pending;
117 wait_queue_head_t wait_send_pending;
118 atomic_t send_payload_pending;
119 wait_queue_head_t wait_send_payload_pending;
120
121
122 struct list_head receive_queue;
123 int count_receive_queue;
124 spinlock_t receive_queue_lock;
125
126 struct list_head empty_packet_queue;
127 int count_empty_packet_queue;
128 spinlock_t empty_packet_queue_lock;
129
130 wait_queue_head_t wait_receive_queues;
131
132
133 struct list_head reassembly_queue;
134 spinlock_t reassembly_queue_lock;
135 wait_queue_head_t wait_reassembly_queue;
136
137
138 int reassembly_data_length;
139 int reassembly_queue_length;
140
141 int first_entry_offset;
142
143 bool send_immediate;
144
145 wait_queue_head_t wait_send_queue;
146
147
148
149
150
151
152
153
154 bool full_packet_received;
155
156 struct workqueue_struct *workqueue;
157 struct delayed_work idle_timer_work;
158 struct delayed_work send_immediate_work;
159
160
161
162 struct kmem_cache *request_cache;
163 mempool_t *request_mempool;
164
165
166 struct kmem_cache *response_cache;
167 mempool_t *response_mempool;
168
169
170 unsigned int count_get_receive_buffer;
171 unsigned int count_put_receive_buffer;
172 unsigned int count_reassembly_queue;
173 unsigned int count_enqueue_reassembly_queue;
174 unsigned int count_dequeue_reassembly_queue;
175 unsigned int count_send_empty;
176 };
177
178 enum smbd_message_type {
179 SMBD_NEGOTIATE_RESP,
180 SMBD_TRANSFER_DATA,
181 };
182
183 #define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
184
185
186 struct smbd_negotiate_req {
187 __le16 min_version;
188 __le16 max_version;
189 __le16 reserved;
190 __le16 credits_requested;
191 __le32 preferred_send_size;
192 __le32 max_receive_size;
193 __le32 max_fragmented_size;
194 } __packed;
195
196
197 struct smbd_negotiate_resp {
198 __le16 min_version;
199 __le16 max_version;
200 __le16 negotiated_version;
201 __le16 reserved;
202 __le16 credits_requested;
203 __le16 credits_granted;
204 __le32 status;
205 __le32 max_readwrite_size;
206 __le32 preferred_send_size;
207 __le32 max_receive_size;
208 __le32 max_fragmented_size;
209 } __packed;
210
211
212 struct smbd_data_transfer {
213 __le16 credits_requested;
214 __le16 credits_granted;
215 __le16 flags;
216 __le16 reserved;
217 __le32 remaining_data_length;
218 __le32 data_offset;
219 __le32 data_length;
220 __le32 padding;
221 __u8 buffer[];
222 } __packed;
223
224
225 struct smbd_buffer_descriptor_v1 {
226 __le64 offset;
227 __le32 token;
228 __le32 length;
229 } __packed;
230
231
232 #define SMBDIRECT_MAX_SGE 16
233
234 struct smbd_request {
235 struct smbd_connection *info;
236 struct ib_cqe cqe;
237
238
239 bool has_payload;
240
241
242 struct ib_sge sge[SMBDIRECT_MAX_SGE];
243 int num_sge;
244
245
246 u8 packet[];
247 };
248
249
250 struct smbd_response {
251 struct smbd_connection *info;
252 struct ib_cqe cqe;
253 struct ib_sge sge;
254
255 enum smbd_message_type type;
256
257
258 struct list_head list;
259
260
261 bool first_segment;
262
263
264 u8 packet[];
265 };
266
267
268 struct smbd_connection *smbd_get_connection(
269 struct TCP_Server_Info *server, struct sockaddr *dstaddr);
270
271
272 int smbd_reconnect(struct TCP_Server_Info *server);
273
274 void smbd_destroy(struct TCP_Server_Info *server);
275
276
277 int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
278 int smbd_send(struct TCP_Server_Info *server,
279 int num_rqst, struct smb_rqst *rqst);
280
281 enum mr_state {
282 MR_READY,
283 MR_REGISTERED,
284 MR_INVALIDATED,
285 MR_ERROR
286 };
287
288 struct smbd_mr {
289 struct smbd_connection *conn;
290 struct list_head list;
291 enum mr_state state;
292 struct ib_mr *mr;
293 struct scatterlist *sgl;
294 int sgl_count;
295 enum dma_data_direction dir;
296 union {
297 struct ib_reg_wr wr;
298 struct ib_send_wr inv_wr;
299 };
300 struct ib_cqe cqe;
301 bool need_invalidate;
302 struct completion invalidate_done;
303 };
304
305
306 struct smbd_mr *smbd_register_mr(
307 struct smbd_connection *info, struct page *pages[], int num_pages,
308 int offset, int tailsz, bool writing, bool need_invalidate);
309 int smbd_deregister_mr(struct smbd_mr *mr);
310
311 #else
312 #define cifs_rdma_enabled(server) 0
313 struct smbd_connection {};
314 static inline void *smbd_get_connection(
315 struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;}
316 static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
317 static inline void smbd_destroy(struct TCP_Server_Info *server) {}
318 static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
319 static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; }
320 #endif
321
322 #endif