This source file includes following definitions.
- qp_event_handler
- svc_rdma_create_xprt
- svc_rdma_parse_connect_private
- handle_connect_req
- rdma_listen_handler
- rdma_cma_handler
- svc_rdma_create
- svc_rdma_accept
- svc_rdma_detach
- __svc_rdma_free
- svc_rdma_free
- svc_rdma_has_wspace
- svc_rdma_secure_port
- svc_rdma_kill_temp_xprt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45 #include <linux/interrupt.h>
46 #include <linux/sched.h>
47 #include <linux/slab.h>
48 #include <linux/spinlock.h>
49 #include <linux/workqueue.h>
50 #include <linux/export.h>
51
52 #include <rdma/ib_verbs.h>
53 #include <rdma/rdma_cm.h>
54 #include <rdma/rw.h>
55
56 #include <linux/sunrpc/addr.h>
57 #include <linux/sunrpc/debug.h>
58 #include <linux/sunrpc/rpc_rdma.h>
59 #include <linux/sunrpc/svc_xprt.h>
60 #include <linux/sunrpc/svc_rdma.h>
61
62 #include "xprt_rdma.h"
63 #include <trace/events/rpcrdma.h>
64
65 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
66
67 static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
68 struct net *net);
69 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
70 struct net *net,
71 struct sockaddr *sa, int salen,
72 int flags);
73 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
74 static void svc_rdma_detach(struct svc_xprt *xprt);
75 static void svc_rdma_free(struct svc_xprt *xprt);
76 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
77 static void svc_rdma_secure_port(struct svc_rqst *);
78 static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
79
80 static const struct svc_xprt_ops svc_rdma_ops = {
81 .xpo_create = svc_rdma_create,
82 .xpo_recvfrom = svc_rdma_recvfrom,
83 .xpo_sendto = svc_rdma_sendto,
84 .xpo_release_rqst = svc_rdma_release_rqst,
85 .xpo_detach = svc_rdma_detach,
86 .xpo_free = svc_rdma_free,
87 .xpo_has_wspace = svc_rdma_has_wspace,
88 .xpo_accept = svc_rdma_accept,
89 .xpo_secure_port = svc_rdma_secure_port,
90 .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
91 };
92
93 struct svc_xprt_class svc_rdma_class = {
94 .xcl_name = "rdma",
95 .xcl_owner = THIS_MODULE,
96 .xcl_ops = &svc_rdma_ops,
97 .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
98 .xcl_ident = XPRT_TRANSPORT_RDMA,
99 };
100
101
102 static void qp_event_handler(struct ib_event *event, void *context)
103 {
104 struct svc_xprt *xprt = context;
105
106 trace_svcrdma_qp_error(event, (struct sockaddr *)&xprt->xpt_remote);
107 switch (event->event) {
108
109 case IB_EVENT_PATH_MIG:
110 case IB_EVENT_COMM_EST:
111 case IB_EVENT_SQ_DRAINED:
112 case IB_EVENT_QP_LAST_WQE_REACHED:
113 break;
114
115
116 case IB_EVENT_PATH_MIG_ERR:
117 case IB_EVENT_QP_FATAL:
118 case IB_EVENT_QP_REQ_ERR:
119 case IB_EVENT_QP_ACCESS_ERR:
120 case IB_EVENT_DEVICE_FATAL:
121 default:
122 set_bit(XPT_CLOSE, &xprt->xpt_flags);
123 svc_xprt_enqueue(xprt);
124 break;
125 }
126 }
127
128 static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
129 struct net *net)
130 {
131 struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
132
133 if (!cma_xprt) {
134 dprintk("svcrdma: failed to create new transport\n");
135 return NULL;
136 }
137 svc_xprt_init(net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
138 INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
139 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
140 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
141 INIT_LIST_HEAD(&cma_xprt->sc_send_ctxts);
142 init_llist_head(&cma_xprt->sc_recv_ctxts);
143 INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts);
144 init_waitqueue_head(&cma_xprt->sc_send_wait);
145
146 spin_lock_init(&cma_xprt->sc_lock);
147 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
148 spin_lock_init(&cma_xprt->sc_send_lock);
149 spin_lock_init(&cma_xprt->sc_rw_ctxt_lock);
150
151
152
153
154
155
156
157 set_bit(XPT_CONG_CTRL, &cma_xprt->sc_xprt.xpt_flags);
158
159 return cma_xprt;
160 }
161
162 static void
163 svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt,
164 struct rdma_conn_param *param)
165 {
166 const struct rpcrdma_connect_private *pmsg = param->private_data;
167
168 if (pmsg &&
169 pmsg->cp_magic == rpcrdma_cmp_magic &&
170 pmsg->cp_version == RPCRDMA_CMP_VERSION) {
171 newxprt->sc_snd_w_inv = pmsg->cp_flags &
172 RPCRDMA_CMP_F_SND_W_INV_OK;
173
174 dprintk("svcrdma: client send_size %u, recv_size %u "
175 "remote inv %ssupported\n",
176 rpcrdma_decode_buffer_size(pmsg->cp_send_size),
177 rpcrdma_decode_buffer_size(pmsg->cp_recv_size),
178 newxprt->sc_snd_w_inv ? "" : "un");
179 }
180 }
181
182
183
184
185
186
187
188
189
190
191
192
193 static void handle_connect_req(struct rdma_cm_id *new_cma_id,
194 struct rdma_conn_param *param)
195 {
196 struct svcxprt_rdma *listen_xprt = new_cma_id->context;
197 struct svcxprt_rdma *newxprt;
198 struct sockaddr *sa;
199
200
201 newxprt = svc_rdma_create_xprt(listen_xprt->sc_xprt.xpt_server,
202 listen_xprt->sc_xprt.xpt_net);
203 if (!newxprt)
204 return;
205 newxprt->sc_cm_id = new_cma_id;
206 new_cma_id->context = newxprt;
207 svc_rdma_parse_connect_private(newxprt, param);
208
209
210 newxprt->sc_ord = param->initiator_depth;
211
212 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
213 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
214
215
216
217
218 rpc_set_port((struct sockaddr *)&newxprt->sc_xprt.xpt_remote, 0);
219
220 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
221 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
222
223
224
225
226
227 spin_lock(&listen_xprt->sc_lock);
228 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
229 spin_unlock(&listen_xprt->sc_lock);
230
231 set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
232 svc_xprt_enqueue(&listen_xprt->sc_xprt);
233 }
234
235
236
237
238
239 static int rdma_listen_handler(struct rdma_cm_id *cma_id,
240 struct rdma_cm_event *event)
241 {
242 struct sockaddr *sap = (struct sockaddr *)&cma_id->route.addr.src_addr;
243
244 trace_svcrdma_cm_event(event, sap);
245
246 switch (event->event) {
247 case RDMA_CM_EVENT_CONNECT_REQUEST:
248 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
249 "event = %s (%d)\n", cma_id, cma_id->context,
250 rdma_event_msg(event->event), event->event);
251 handle_connect_req(cma_id, &event->param.conn);
252 break;
253 default:
254
255 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
256 "event = %s (%d)\n", cma_id,
257 rdma_event_msg(event->event), event->event);
258 break;
259 }
260
261 return 0;
262 }
263
264 static int rdma_cma_handler(struct rdma_cm_id *cma_id,
265 struct rdma_cm_event *event)
266 {
267 struct sockaddr *sap = (struct sockaddr *)&cma_id->route.addr.dst_addr;
268 struct svcxprt_rdma *rdma = cma_id->context;
269 struct svc_xprt *xprt = &rdma->sc_xprt;
270
271 trace_svcrdma_cm_event(event, sap);
272
273 switch (event->event) {
274 case RDMA_CM_EVENT_ESTABLISHED:
275
276 svc_xprt_get(xprt);
277 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
278 "cm_id=%p\n", xprt, cma_id);
279 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
280 svc_xprt_enqueue(xprt);
281 break;
282 case RDMA_CM_EVENT_DISCONNECTED:
283 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
284 xprt, cma_id);
285 set_bit(XPT_CLOSE, &xprt->xpt_flags);
286 svc_xprt_enqueue(xprt);
287 svc_xprt_put(xprt);
288 break;
289 case RDMA_CM_EVENT_DEVICE_REMOVAL:
290 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
291 "event = %s (%d)\n", cma_id, xprt,
292 rdma_event_msg(event->event), event->event);
293 set_bit(XPT_CLOSE, &xprt->xpt_flags);
294 svc_xprt_enqueue(xprt);
295 svc_xprt_put(xprt);
296 break;
297 default:
298 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
299 "event = %s (%d)\n", cma_id,
300 rdma_event_msg(event->event), event->event);
301 break;
302 }
303 return 0;
304 }
305
306
307
308
309 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
310 struct net *net,
311 struct sockaddr *sa, int salen,
312 int flags)
313 {
314 struct rdma_cm_id *listen_id;
315 struct svcxprt_rdma *cma_xprt;
316 int ret;
317
318 dprintk("svcrdma: Creating RDMA listener\n");
319 if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) {
320 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
321 return ERR_PTR(-EAFNOSUPPORT);
322 }
323 cma_xprt = svc_rdma_create_xprt(serv, net);
324 if (!cma_xprt)
325 return ERR_PTR(-ENOMEM);
326 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
327 strcpy(cma_xprt->sc_xprt.xpt_remotebuf, "listener");
328
329 listen_id = rdma_create_id(net, rdma_listen_handler, cma_xprt,
330 RDMA_PS_TCP, IB_QPT_RC);
331 if (IS_ERR(listen_id)) {
332 ret = PTR_ERR(listen_id);
333 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
334 goto err0;
335 }
336
337
338
339
340 #if IS_ENABLED(CONFIG_IPV6)
341 ret = rdma_set_afonly(listen_id, 1);
342 if (ret) {
343 dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret);
344 goto err1;
345 }
346 #endif
347 ret = rdma_bind_addr(listen_id, sa);
348 if (ret) {
349 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
350 goto err1;
351 }
352 cma_xprt->sc_cm_id = listen_id;
353
354 ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
355 if (ret) {
356 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
357 goto err1;
358 }
359
360
361
362
363
364 sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
365 svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
366
367 return &cma_xprt->sc_xprt;
368
369 err1:
370 rdma_destroy_id(listen_id);
371 err0:
372 kfree(cma_xprt);
373 return ERR_PTR(ret);
374 }
375
376
377
378
379
380
381
382
383
384
385
386
387 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
388 {
389 struct svcxprt_rdma *listen_rdma;
390 struct svcxprt_rdma *newxprt = NULL;
391 struct rdma_conn_param conn_param;
392 struct rpcrdma_connect_private pmsg;
393 struct ib_qp_init_attr qp_attr;
394 unsigned int ctxts, rq_depth;
395 struct ib_device *dev;
396 int ret = 0;
397 RPC_IFDEBUG(struct sockaddr *sap);
398
399 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
400 clear_bit(XPT_CONN, &xprt->xpt_flags);
401
402 spin_lock(&listen_rdma->sc_lock);
403 if (!list_empty(&listen_rdma->sc_accept_q)) {
404 newxprt = list_entry(listen_rdma->sc_accept_q.next,
405 struct svcxprt_rdma, sc_accept_q);
406 list_del_init(&newxprt->sc_accept_q);
407 }
408 if (!list_empty(&listen_rdma->sc_accept_q))
409 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
410 spin_unlock(&listen_rdma->sc_lock);
411 if (!newxprt)
412 return NULL;
413
414 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
415 newxprt, newxprt->sc_cm_id);
416
417 dev = newxprt->sc_cm_id->device;
418 newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
419
420
421
422
423 newxprt->sc_max_send_sges = 3;
424
425 newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
426 if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
427 newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
428 newxprt->sc_max_req_size = svcrdma_max_req_size;
429 newxprt->sc_max_requests = svcrdma_max_requests;
430 newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
431 rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests;
432 if (rq_depth > dev->attrs.max_qp_wr) {
433 pr_warn("svcrdma: reducing receive depth to %d\n",
434 dev->attrs.max_qp_wr);
435 rq_depth = dev->attrs.max_qp_wr;
436 newxprt->sc_max_requests = rq_depth - 2;
437 newxprt->sc_max_bc_requests = 2;
438 }
439 newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests);
440 ctxts = rdma_rw_mr_factor(dev, newxprt->sc_port_num, RPCSVC_MAXPAGES);
441 ctxts *= newxprt->sc_max_requests;
442 newxprt->sc_sq_depth = rq_depth + ctxts;
443 if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) {
444 pr_warn("svcrdma: reducing send depth to %d\n",
445 dev->attrs.max_qp_wr);
446 newxprt->sc_sq_depth = dev->attrs.max_qp_wr;
447 }
448 atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth);
449
450 newxprt->sc_pd = ib_alloc_pd(dev, 0);
451 if (IS_ERR(newxprt->sc_pd)) {
452 dprintk("svcrdma: error creating PD for connect request\n");
453 goto errout;
454 }
455 newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth,
456 IB_POLL_WORKQUEUE);
457 if (IS_ERR(newxprt->sc_sq_cq)) {
458 dprintk("svcrdma: error creating SQ CQ for connect request\n");
459 goto errout;
460 }
461 newxprt->sc_rq_cq =
462 ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE);
463 if (IS_ERR(newxprt->sc_rq_cq)) {
464 dprintk("svcrdma: error creating RQ CQ for connect request\n");
465 goto errout;
466 }
467
468 memset(&qp_attr, 0, sizeof qp_attr);
469 qp_attr.event_handler = qp_event_handler;
470 qp_attr.qp_context = &newxprt->sc_xprt;
471 qp_attr.port_num = newxprt->sc_port_num;
472 qp_attr.cap.max_rdma_ctxs = ctxts;
473 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth - ctxts;
474 qp_attr.cap.max_recv_wr = rq_depth;
475 qp_attr.cap.max_send_sge = newxprt->sc_max_send_sges;
476 qp_attr.cap.max_recv_sge = 1;
477 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
478 qp_attr.qp_type = IB_QPT_RC;
479 qp_attr.send_cq = newxprt->sc_sq_cq;
480 qp_attr.recv_cq = newxprt->sc_rq_cq;
481 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n",
482 newxprt->sc_cm_id, newxprt->sc_pd);
483 dprintk(" cap.max_send_wr = %d, cap.max_recv_wr = %d\n",
484 qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr);
485 dprintk(" cap.max_send_sge = %d, cap.max_recv_sge = %d\n",
486 qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge);
487
488 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
489 if (ret) {
490 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
491 goto errout;
492 }
493 newxprt->sc_qp = newxprt->sc_cm_id->qp;
494
495 if (!(dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
496 newxprt->sc_snd_w_inv = false;
497 if (!rdma_protocol_iwarp(dev, newxprt->sc_port_num) &&
498 !rdma_ib_or_roce(dev, newxprt->sc_port_num))
499 goto errout;
500
501 if (!svc_rdma_post_recvs(newxprt))
502 goto errout;
503
504
505 newxprt->sc_cm_id->event_handler = rdma_cma_handler;
506
507
508 pmsg.cp_magic = rpcrdma_cmp_magic;
509 pmsg.cp_version = RPCRDMA_CMP_VERSION;
510 pmsg.cp_flags = 0;
511 pmsg.cp_send_size = pmsg.cp_recv_size =
512 rpcrdma_encode_buffer_size(newxprt->sc_max_req_size);
513
514
515 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
516 memset(&conn_param, 0, sizeof conn_param);
517 conn_param.responder_resources = 0;
518 conn_param.initiator_depth = min_t(int, newxprt->sc_ord,
519 dev->attrs.max_qp_init_rd_atom);
520 if (!conn_param.initiator_depth) {
521 dprintk("svcrdma: invalid ORD setting\n");
522 ret = -EINVAL;
523 goto errout;
524 }
525 conn_param.private_data = &pmsg;
526 conn_param.private_data_len = sizeof(pmsg);
527 ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
528 if (ret)
529 goto errout;
530
531 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
532 dprintk("svcrdma: new connection %p accepted:\n", newxprt);
533 sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
534 dprintk(" local address : %pIS:%u\n", sap, rpc_get_port(sap));
535 sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
536 dprintk(" remote address : %pIS:%u\n", sap, rpc_get_port(sap));
537 dprintk(" max_sge : %d\n", newxprt->sc_max_send_sges);
538 dprintk(" sq_depth : %d\n", newxprt->sc_sq_depth);
539 dprintk(" rdma_rw_ctxs : %d\n", ctxts);
540 dprintk(" max_requests : %d\n", newxprt->sc_max_requests);
541 dprintk(" ord : %d\n", conn_param.initiator_depth);
542 #endif
543
544 trace_svcrdma_xprt_accept(&newxprt->sc_xprt);
545 return &newxprt->sc_xprt;
546
547 errout:
548 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
549 trace_svcrdma_xprt_fail(&newxprt->sc_xprt);
550
551 svc_xprt_get(&newxprt->sc_xprt);
552 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
553 ib_destroy_qp(newxprt->sc_qp);
554 rdma_destroy_id(newxprt->sc_cm_id);
555
556 svc_xprt_put(&newxprt->sc_xprt);
557 return NULL;
558 }
559
560
561
562
563
564
565
566
567
568
569
570
571
572 static void svc_rdma_detach(struct svc_xprt *xprt)
573 {
574 struct svcxprt_rdma *rdma =
575 container_of(xprt, struct svcxprt_rdma, sc_xprt);
576
577
578 rdma_disconnect(rdma->sc_cm_id);
579 }
580
581 static void __svc_rdma_free(struct work_struct *work)
582 {
583 struct svcxprt_rdma *rdma =
584 container_of(work, struct svcxprt_rdma, sc_work);
585 struct svc_xprt *xprt = &rdma->sc_xprt;
586
587 trace_svcrdma_xprt_free(xprt);
588
589 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
590 ib_drain_qp(rdma->sc_qp);
591
592 svc_rdma_flush_recv_queues(rdma);
593
594
595 if (xprt->xpt_bc_xprt) {
596 xprt_put(xprt->xpt_bc_xprt);
597 xprt->xpt_bc_xprt = NULL;
598 }
599
600 svc_rdma_destroy_rw_ctxts(rdma);
601 svc_rdma_send_ctxts_destroy(rdma);
602 svc_rdma_recv_ctxts_destroy(rdma);
603
604
605 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
606 ib_destroy_qp(rdma->sc_qp);
607
608 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
609 ib_free_cq(rdma->sc_sq_cq);
610
611 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
612 ib_free_cq(rdma->sc_rq_cq);
613
614 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
615 ib_dealloc_pd(rdma->sc_pd);
616
617
618 rdma_destroy_id(rdma->sc_cm_id);
619
620 kfree(rdma);
621 }
622
623 static void svc_rdma_free(struct svc_xprt *xprt)
624 {
625 struct svcxprt_rdma *rdma =
626 container_of(xprt, struct svcxprt_rdma, sc_xprt);
627
628 INIT_WORK(&rdma->sc_work, __svc_rdma_free);
629 schedule_work(&rdma->sc_work);
630 }
631
632 static int svc_rdma_has_wspace(struct svc_xprt *xprt)
633 {
634 struct svcxprt_rdma *rdma =
635 container_of(xprt, struct svcxprt_rdma, sc_xprt);
636
637
638
639
640
641 if (waitqueue_active(&rdma->sc_send_wait))
642 return 0;
643
644
645 return 1;
646 }
647
648 static void svc_rdma_secure_port(struct svc_rqst *rqstp)
649 {
650 set_bit(RQ_SECURE, &rqstp->rq_flags);
651 }
652
653 static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
654 {
655 }