This source file includes following definitions.
- srpt_get_u64_x
- srpt_set_ch_state
- srpt_event_handler
- srpt_srq_event
- get_ch_state_name
- srpt_qp_event
- srpt_set_ioc
- srpt_get_class_port_info
- srpt_get_iou
- srpt_get_ioc
- srpt_get_svc_entries
- srpt_mgmt_method_get
- srpt_mad_send_handler
- srpt_mad_recv_handler
- srpt_format_guid
- srpt_refresh_port
- srpt_unregister_mad_agent
- srpt_alloc_ioctx
- srpt_free_ioctx
- srpt_alloc_ioctx_ring
- srpt_free_ioctx_ring
- srpt_set_cmd_state
- srpt_test_and_set_cmd_state
- srpt_post_recv
- srpt_zerolength_write
- srpt_zerolength_write_done
- srpt_alloc_rw_ctxs
- srpt_free_rw_ctxs
- srpt_get_desc_buf
- srpt_get_desc_tbl
- srpt_init_ch_qp
- srpt_ch_qp_rtr
- srpt_ch_qp_rts
- srpt_ch_qp_err
- srpt_get_send_ioctx
- srpt_abort_cmd
- srpt_rdma_read_done
- srpt_build_cmd_rsp
- srpt_build_tskmgmt_rsp
- srpt_check_stop_free
- srpt_handle_cmd
- srp_tmr_to_tcm
- srpt_handle_tsk_mgmt
- srpt_handle_new_iu
- srpt_recv_done
- srpt_process_wait_list
- srpt_send_done
- srpt_create_ch_ib
- srpt_destroy_ch_ib
- srpt_close_ch
- srpt_disconnect_ch
- srpt_ch_closed
- srpt_disconnect_ch_sync
- __srpt_close_all_ch
- srpt_get_nexus
- srpt_set_enabled
- srpt_free_ch
- srpt_release_channel_work
- srpt_cm_req_recv
- srpt_ib_cm_req_recv
- srpt_rdma_cm_req_recv
- srpt_cm_rej_recv
- srpt_cm_rtu_recv
- srpt_cm_handler
- srpt_rdma_cm_handler
- srpt_write_pending
- tcm_to_srp_tsk_mgmt_status
- srpt_queue_response
- srpt_queue_data_in
- srpt_queue_tm_rsp
- srpt_aborted_task
- srpt_queue_status
- srpt_refresh_port_work
- srpt_ch_list_empty
- srpt_release_sport
- __srpt_lookup_wwn
- srpt_lookup_wwn
- srpt_free_srq
- srpt_alloc_srq
- srpt_use_srq
- srpt_add_one
- srpt_remove_one
- srpt_check_true
- srpt_check_false
- srpt_tpg_to_sport
- srpt_get_fabric_wwn
- srpt_get_tag
- srpt_tpg_get_inst_index
- srpt_release_cmd
- srpt_close_session
- srpt_sess_get_index
- srpt_set_default_node_attrs
- srpt_get_tcm_cmd_state
- srpt_parse_guid
- srpt_parse_i_port_id
- srpt_init_nodeacl
- srpt_tpg_attrib_srp_max_rdma_size_show
- srpt_tpg_attrib_srp_max_rdma_size_store
- srpt_tpg_attrib_srp_max_rsp_size_show
- srpt_tpg_attrib_srp_max_rsp_size_store
- srpt_tpg_attrib_srp_sq_size_show
- srpt_tpg_attrib_srp_sq_size_store
- srpt_tpg_attrib_use_srq_show
- srpt_tpg_attrib_use_srq_store
- srpt_create_rdma_id
- srpt_rdma_cm_port_show
- srpt_rdma_cm_port_store
- srpt_tpg_enable_show
- srpt_tpg_enable_store
- srpt_make_tpg
- srpt_drop_tpg
- srpt_make_tport
- srpt_drop_tport
- srpt_wwn_version_show
- srpt_init_module
- srpt_cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/ctype.h>
40 #include <linux/kthread.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/atomic.h>
44 #include <linux/inet.h>
45 #include <rdma/ib_cache.h>
46 #include <scsi/scsi_proto.h>
47 #include <scsi/scsi_tcq.h>
48 #include <target/target_core_base.h>
49 #include <target/target_core_fabric.h>
50 #include "ib_srpt.h"
51
52
53 #define DRV_NAME "ib_srpt"
54
55 #define SRPT_ID_STRING "Linux SRP target"
56
57 #undef pr_fmt
58 #define pr_fmt(fmt) DRV_NAME " " fmt
59
60 MODULE_AUTHOR("Vu Pham and Bart Van Assche");
61 MODULE_DESCRIPTION("SCSI RDMA Protocol target driver");
62 MODULE_LICENSE("Dual BSD/GPL");
63
64
65
66
67
68 static u64 srpt_service_guid;
69 static DEFINE_SPINLOCK(srpt_dev_lock);
70 static LIST_HEAD(srpt_dev_list);
71
72 static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
73 module_param(srp_max_req_size, int, 0444);
74 MODULE_PARM_DESC(srp_max_req_size,
75 "Maximum size of SRP request messages in bytes.");
76
77 static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
78 module_param(srpt_srq_size, int, 0444);
79 MODULE_PARM_DESC(srpt_srq_size,
80 "Shared receive queue (SRQ) size.");
81
82 static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
83 {
84 return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
85 }
86 module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
87 0444);
88 MODULE_PARM_DESC(srpt_service_guid,
89 "Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
90
91 static struct ib_client srpt_client;
92
93 static DEFINE_MUTEX(rdma_cm_mutex);
94
95 static u16 rdma_cm_port;
96 static struct rdma_cm_id *rdma_cm_id;
97 static void srpt_release_cmd(struct se_cmd *se_cmd);
98 static void srpt_free_ch(struct kref *kref);
99 static int srpt_queue_status(struct se_cmd *cmd);
100 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
101 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
102 static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
103
104
105
106
107
108 static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
109 {
110 unsigned long flags;
111 enum rdma_ch_state prev;
112 bool changed = false;
113
114 spin_lock_irqsave(&ch->spinlock, flags);
115 prev = ch->state;
116 if (new > prev) {
117 ch->state = new;
118 changed = true;
119 }
120 spin_unlock_irqrestore(&ch->spinlock, flags);
121
122 return changed;
123 }
124
125
126
127
128
129
130
131
132
133
134
135 static void srpt_event_handler(struct ib_event_handler *handler,
136 struct ib_event *event)
137 {
138 struct srpt_device *sdev;
139 struct srpt_port *sport;
140 u8 port_num;
141
142 sdev = ib_get_client_data(event->device, &srpt_client);
143 if (!sdev || sdev->device != event->device)
144 return;
145
146 pr_debug("ASYNC event= %d on device= %s\n", event->event,
147 dev_name(&sdev->device->dev));
148
149 switch (event->event) {
150 case IB_EVENT_PORT_ERR:
151 port_num = event->element.port_num - 1;
152 if (port_num < sdev->device->phys_port_cnt) {
153 sport = &sdev->port[port_num];
154 sport->lid = 0;
155 sport->sm_lid = 0;
156 } else {
157 WARN(true, "event %d: port_num %d out of range 1..%d\n",
158 event->event, port_num + 1,
159 sdev->device->phys_port_cnt);
160 }
161 break;
162 case IB_EVENT_PORT_ACTIVE:
163 case IB_EVENT_LID_CHANGE:
164 case IB_EVENT_PKEY_CHANGE:
165 case IB_EVENT_SM_CHANGE:
166 case IB_EVENT_CLIENT_REREGISTER:
167 case IB_EVENT_GID_CHANGE:
168
169 port_num = event->element.port_num - 1;
170 if (port_num < sdev->device->phys_port_cnt) {
171 sport = &sdev->port[port_num];
172 if (!sport->lid && !sport->sm_lid)
173 schedule_work(&sport->work);
174 } else {
175 WARN(true, "event %d: port_num %d out of range 1..%d\n",
176 event->event, port_num + 1,
177 sdev->device->phys_port_cnt);
178 }
179 break;
180 default:
181 pr_err("received unrecognized IB event %d\n", event->event);
182 break;
183 }
184 }
185
186
187
188
189
190
191 static void srpt_srq_event(struct ib_event *event, void *ctx)
192 {
193 pr_debug("SRQ event %d\n", event->event);
194 }
195
196 static const char *get_ch_state_name(enum rdma_ch_state s)
197 {
198 switch (s) {
199 case CH_CONNECTING:
200 return "connecting";
201 case CH_LIVE:
202 return "live";
203 case CH_DISCONNECTING:
204 return "disconnecting";
205 case CH_DRAINING:
206 return "draining";
207 case CH_DISCONNECTED:
208 return "disconnected";
209 }
210 return "???";
211 }
212
213
214
215
216
217
218 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
219 {
220 pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
221 event->event, ch, ch->sess_name, ch->state);
222
223 switch (event->event) {
224 case IB_EVENT_COMM_EST:
225 if (ch->using_rdma_cm)
226 rdma_notify(ch->rdma_cm.cm_id, event->event);
227 else
228 ib_cm_notify(ch->ib_cm.cm_id, event->event);
229 break;
230 case IB_EVENT_QP_LAST_WQE_REACHED:
231 pr_debug("%s-%d, state %s: received Last WQE event.\n",
232 ch->sess_name, ch->qp->qp_num,
233 get_ch_state_name(ch->state));
234 break;
235 default:
236 pr_err("received unrecognized IB QP event %d\n", event->event);
237 break;
238 }
239 }
240
241
242
243
244
245
246
247
248
249
250 static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
251 {
252 u16 id;
253 u8 tmp;
254
255 id = (slot - 1) / 2;
256 if (slot & 0x1) {
257 tmp = c_list[id] & 0xf;
258 c_list[id] = (value << 4) | tmp;
259 } else {
260 tmp = c_list[id] & 0xf0;
261 c_list[id] = (value & 0xf) | tmp;
262 }
263 }
264
265
266
267
268
269
270
271
272 static void srpt_get_class_port_info(struct ib_dm_mad *mad)
273 {
274 struct ib_class_port_info *cif;
275
276 cif = (struct ib_class_port_info *)mad->data;
277 memset(cif, 0, sizeof(*cif));
278 cif->base_version = 1;
279 cif->class_version = 1;
280
281 ib_set_cpi_resp_time(cif, 20);
282 mad->mad_hdr.status = 0;
283 }
284
285
286
287
288
289
290
291
292 static void srpt_get_iou(struct ib_dm_mad *mad)
293 {
294 struct ib_dm_iou_info *ioui;
295 u8 slot;
296 int i;
297
298 ioui = (struct ib_dm_iou_info *)mad->data;
299 ioui->change_id = cpu_to_be16(1);
300 ioui->max_controllers = 16;
301
302
303 srpt_set_ioc(ioui->controller_list, 1, 1);
304 for (i = 1, slot = 2; i < 16; i++, slot++)
305 srpt_set_ioc(ioui->controller_list, slot, 0);
306
307 mad->mad_hdr.status = 0;
308 }
309
310
311
312
313
314
315
316
317
318
319
320 static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
321 struct ib_dm_mad *mad)
322 {
323 struct srpt_device *sdev = sport->sdev;
324 struct ib_dm_ioc_profile *iocp;
325 int send_queue_depth;
326
327 iocp = (struct ib_dm_ioc_profile *)mad->data;
328
329 if (!slot || slot > 16) {
330 mad->mad_hdr.status
331 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
332 return;
333 }
334
335 if (slot > 2) {
336 mad->mad_hdr.status
337 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
338 return;
339 }
340
341 if (sdev->use_srq)
342 send_queue_depth = sdev->srq_size;
343 else
344 send_queue_depth = min(MAX_SRPT_RQ_SIZE,
345 sdev->device->attrs.max_qp_wr);
346
347 memset(iocp, 0, sizeof(*iocp));
348 strcpy(iocp->id_string, SRPT_ID_STRING);
349 iocp->guid = cpu_to_be64(srpt_service_guid);
350 iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
351 iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id);
352 iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver);
353 iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
354 iocp->subsys_device_id = 0x0;
355 iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
356 iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
357 iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
358 iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
359 iocp->send_queue_depth = cpu_to_be16(send_queue_depth);
360 iocp->rdma_read_depth = 4;
361 iocp->send_size = cpu_to_be32(srp_max_req_size);
362 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
363 1U << 24));
364 iocp->num_svc_entries = 1;
365 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
366 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
367
368 mad->mad_hdr.status = 0;
369 }
370
371
372
373
374
375
376
377
378
379
380
381
382 static void srpt_get_svc_entries(u64 ioc_guid,
383 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
384 {
385 struct ib_dm_svc_entries *svc_entries;
386
387 WARN_ON(!ioc_guid);
388
389 if (!slot || slot > 16) {
390 mad->mad_hdr.status
391 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
392 return;
393 }
394
395 if (slot > 2 || lo > hi || hi > 1) {
396 mad->mad_hdr.status
397 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
398 return;
399 }
400
401 svc_entries = (struct ib_dm_svc_entries *)mad->data;
402 memset(svc_entries, 0, sizeof(*svc_entries));
403 svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
404 snprintf(svc_entries->service_entries[0].name,
405 sizeof(svc_entries->service_entries[0].name),
406 "%s%016llx",
407 SRP_SERVICE_NAME_PREFIX,
408 ioc_guid);
409
410 mad->mad_hdr.status = 0;
411 }
412
413
414
415
416
417
418
419 static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
420 struct ib_dm_mad *rsp_mad)
421 {
422 u16 attr_id;
423 u32 slot;
424 u8 hi, lo;
425
426 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
427 switch (attr_id) {
428 case DM_ATTR_CLASS_PORT_INFO:
429 srpt_get_class_port_info(rsp_mad);
430 break;
431 case DM_ATTR_IOU_INFO:
432 srpt_get_iou(rsp_mad);
433 break;
434 case DM_ATTR_IOC_PROFILE:
435 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
436 srpt_get_ioc(sp, slot, rsp_mad);
437 break;
438 case DM_ATTR_SVC_ENTRIES:
439 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
440 hi = (u8) ((slot >> 8) & 0xff);
441 lo = (u8) (slot & 0xff);
442 slot = (u16) ((slot >> 16) & 0xffff);
443 srpt_get_svc_entries(srpt_service_guid,
444 slot, hi, lo, rsp_mad);
445 break;
446 default:
447 rsp_mad->mad_hdr.status =
448 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
449 break;
450 }
451 }
452
453
454
455
456
457
458 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
459 struct ib_mad_send_wc *mad_wc)
460 {
461 rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE);
462 ib_free_send_mad(mad_wc->send_buf);
463 }
464
465
466
467
468
469
470
471 static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
472 struct ib_mad_send_buf *send_buf,
473 struct ib_mad_recv_wc *mad_wc)
474 {
475 struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
476 struct ib_ah *ah;
477 struct ib_mad_send_buf *rsp;
478 struct ib_dm_mad *dm_mad;
479
480 if (!mad_wc || !mad_wc->recv_buf.mad)
481 return;
482
483 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
484 mad_wc->recv_buf.grh, mad_agent->port_num);
485 if (IS_ERR(ah))
486 goto err;
487
488 BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
489
490 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
491 mad_wc->wc->pkey_index, 0,
492 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
493 GFP_KERNEL,
494 IB_MGMT_BASE_VERSION);
495 if (IS_ERR(rsp))
496 goto err_rsp;
497
498 rsp->ah = ah;
499
500 dm_mad = rsp->mad;
501 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
502 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
503 dm_mad->mad_hdr.status = 0;
504
505 switch (mad_wc->recv_buf.mad->mad_hdr.method) {
506 case IB_MGMT_METHOD_GET:
507 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
508 break;
509 case IB_MGMT_METHOD_SET:
510 dm_mad->mad_hdr.status =
511 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
512 break;
513 default:
514 dm_mad->mad_hdr.status =
515 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
516 break;
517 }
518
519 if (!ib_post_send_mad(rsp, NULL)) {
520 ib_free_recv_mad(mad_wc);
521
522 return;
523 }
524
525 ib_free_send_mad(rsp);
526
527 err_rsp:
528 rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
529 err:
530 ib_free_recv_mad(mad_wc);
531 }
532
533 static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
534 {
535 const __be16 *g = (const __be16 *)guid;
536
537 return snprintf(buf, size, "%04x:%04x:%04x:%04x",
538 be16_to_cpu(g[0]), be16_to_cpu(g[1]),
539 be16_to_cpu(g[2]), be16_to_cpu(g[3]));
540 }
541
542
543
544
545
546
547
548
549
550
551
552 static int srpt_refresh_port(struct srpt_port *sport)
553 {
554 struct ib_mad_reg_req reg_req;
555 struct ib_port_modify port_modify;
556 struct ib_port_attr port_attr;
557 int ret;
558
559 memset(&port_modify, 0, sizeof(port_modify));
560 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
561 port_modify.clr_port_cap_mask = 0;
562
563 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
564 if (ret)
565 goto err_mod_port;
566
567 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
568 if (ret)
569 goto err_query_port;
570
571 sport->sm_lid = port_attr.sm_lid;
572 sport->lid = port_attr.lid;
573
574 ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
575 if (ret)
576 goto err_query_port;
577
578 sport->port_guid_wwn.priv = sport;
579 srpt_format_guid(sport->port_guid, sizeof(sport->port_guid),
580 &sport->gid.global.interface_id);
581 sport->port_gid_wwn.priv = sport;
582 snprintf(sport->port_gid, sizeof(sport->port_gid),
583 "0x%016llx%016llx",
584 be64_to_cpu(sport->gid.global.subnet_prefix),
585 be64_to_cpu(sport->gid.global.interface_id));
586
587 if (!sport->mad_agent) {
588 memset(®_req, 0, sizeof(reg_req));
589 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
590 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
591 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
592 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
593
594 sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
595 sport->port,
596 IB_QPT_GSI,
597 ®_req, 0,
598 srpt_mad_send_handler,
599 srpt_mad_recv_handler,
600 sport, 0);
601 if (IS_ERR(sport->mad_agent)) {
602 ret = PTR_ERR(sport->mad_agent);
603 sport->mad_agent = NULL;
604 goto err_query_port;
605 }
606 }
607
608 return 0;
609
610 err_query_port:
611
612 port_modify.set_port_cap_mask = 0;
613 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
614 ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
615
616 err_mod_port:
617
618 return ret;
619 }
620
621
622
623
624
625
626
627 static void srpt_unregister_mad_agent(struct srpt_device *sdev)
628 {
629 struct ib_port_modify port_modify = {
630 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
631 };
632 struct srpt_port *sport;
633 int i;
634
635 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
636 sport = &sdev->port[i - 1];
637 WARN_ON(sport->port != i);
638 if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
639 pr_err("disabling MAD processing failed.\n");
640 if (sport->mad_agent) {
641 ib_unregister_mad_agent(sport->mad_agent);
642 sport->mad_agent = NULL;
643 }
644 }
645 }
646
647
648
649
650
651
652
653
654 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
655 int ioctx_size,
656 struct kmem_cache *buf_cache,
657 enum dma_data_direction dir)
658 {
659 struct srpt_ioctx *ioctx;
660
661 ioctx = kzalloc(ioctx_size, GFP_KERNEL);
662 if (!ioctx)
663 goto err;
664
665 ioctx->buf = kmem_cache_alloc(buf_cache, GFP_KERNEL);
666 if (!ioctx->buf)
667 goto err_free_ioctx;
668
669 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf,
670 kmem_cache_size(buf_cache), dir);
671 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
672 goto err_free_buf;
673
674 return ioctx;
675
676 err_free_buf:
677 kmem_cache_free(buf_cache, ioctx->buf);
678 err_free_ioctx:
679 kfree(ioctx);
680 err:
681 return NULL;
682 }
683
684
685
686
687
688
689
690
691 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
692 struct kmem_cache *buf_cache,
693 enum dma_data_direction dir)
694 {
695 if (!ioctx)
696 return;
697
698 ib_dma_unmap_single(sdev->device, ioctx->dma,
699 kmem_cache_size(buf_cache), dir);
700 kmem_cache_free(buf_cache, ioctx->buf);
701 kfree(ioctx);
702 }
703
704
705
706
707
708
709
710
711
712
713
714 static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
715 int ring_size, int ioctx_size,
716 struct kmem_cache *buf_cache,
717 int alignment_offset,
718 enum dma_data_direction dir)
719 {
720 struct srpt_ioctx **ring;
721 int i;
722
723 WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) &&
724 ioctx_size != sizeof(struct srpt_send_ioctx));
725
726 ring = kvmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL);
727 if (!ring)
728 goto out;
729 for (i = 0; i < ring_size; ++i) {
730 ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, buf_cache, dir);
731 if (!ring[i])
732 goto err;
733 ring[i]->index = i;
734 ring[i]->offset = alignment_offset;
735 }
736 goto out;
737
738 err:
739 while (--i >= 0)
740 srpt_free_ioctx(sdev, ring[i], buf_cache, dir);
741 kvfree(ring);
742 ring = NULL;
743 out:
744 return ring;
745 }
746
747
748
749
750
751
752
753
754
755 static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
756 struct srpt_device *sdev, int ring_size,
757 struct kmem_cache *buf_cache,
758 enum dma_data_direction dir)
759 {
760 int i;
761
762 if (!ioctx_ring)
763 return;
764
765 for (i = 0; i < ring_size; ++i)
766 srpt_free_ioctx(sdev, ioctx_ring[i], buf_cache, dir);
767 kvfree(ioctx_ring);
768 }
769
770
771
772
773
774
775
776
777
778 static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
779 enum srpt_command_state new)
780 {
781 enum srpt_command_state previous;
782
783 previous = ioctx->state;
784 if (previous != SRPT_STATE_DONE)
785 ioctx->state = new;
786
787 return previous;
788 }
789
790
791
792
793
794
795
796
797
798 static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
799 enum srpt_command_state old,
800 enum srpt_command_state new)
801 {
802 enum srpt_command_state previous;
803
804 WARN_ON(!ioctx);
805 WARN_ON(old == SRPT_STATE_DONE);
806 WARN_ON(new == SRPT_STATE_NEW);
807
808 previous = ioctx->state;
809 if (previous == old)
810 ioctx->state = new;
811
812 return previous == old;
813 }
814
815
816
817
818
819
820
821 static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
822 struct srpt_recv_ioctx *ioctx)
823 {
824 struct ib_sge list;
825 struct ib_recv_wr wr;
826
827 BUG_ON(!sdev);
828 list.addr = ioctx->ioctx.dma + ioctx->ioctx.offset;
829 list.length = srp_max_req_size;
830 list.lkey = sdev->lkey;
831
832 ioctx->ioctx.cqe.done = srpt_recv_done;
833 wr.wr_cqe = &ioctx->ioctx.cqe;
834 wr.next = NULL;
835 wr.sg_list = &list;
836 wr.num_sge = 1;
837
838 if (sdev->use_srq)
839 return ib_post_srq_recv(sdev->srq, &wr, NULL);
840 else
841 return ib_post_recv(ch->qp, &wr, NULL);
842 }
843
844
845
846
847
848
849
850
851
852
853 static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
854 {
855 struct ib_rdma_wr wr = {
856 .wr = {
857 .next = NULL,
858 { .wr_cqe = &ch->zw_cqe, },
859 .opcode = IB_WR_RDMA_WRITE,
860 .send_flags = IB_SEND_SIGNALED,
861 }
862 };
863
864 pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
865 ch->qp->qp_num);
866
867 return ib_post_send(ch->qp, &wr.wr, NULL);
868 }
869
870 static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
871 {
872 struct srpt_rdma_ch *ch = cq->cq_context;
873
874 pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num,
875 wc->status);
876
877 if (wc->status == IB_WC_SUCCESS) {
878 srpt_process_wait_list(ch);
879 } else {
880 if (srpt_set_ch_state(ch, CH_DISCONNECTED))
881 schedule_work(&ch->release_work);
882 else
883 pr_debug("%s-%d: already disconnected.\n",
884 ch->sess_name, ch->qp->qp_num);
885 }
886 }
887
888 static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
889 struct srp_direct_buf *db, int nbufs, struct scatterlist **sg,
890 unsigned *sg_cnt)
891 {
892 enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
893 struct srpt_rdma_ch *ch = ioctx->ch;
894 struct scatterlist *prev = NULL;
895 unsigned prev_nents;
896 int ret, i;
897
898 if (nbufs == 1) {
899 ioctx->rw_ctxs = &ioctx->s_rw_ctx;
900 } else {
901 ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs),
902 GFP_KERNEL);
903 if (!ioctx->rw_ctxs)
904 return -ENOMEM;
905 }
906
907 for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) {
908 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
909 u64 remote_addr = be64_to_cpu(db->va);
910 u32 size = be32_to_cpu(db->len);
911 u32 rkey = be32_to_cpu(db->key);
912
913 ret = target_alloc_sgl(&ctx->sg, &ctx->nents, size, false,
914 i < nbufs - 1);
915 if (ret)
916 goto unwind;
917
918 ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port,
919 ctx->sg, ctx->nents, 0, remote_addr, rkey, dir);
920 if (ret < 0) {
921 target_free_sgl(ctx->sg, ctx->nents);
922 goto unwind;
923 }
924
925 ioctx->n_rdma += ret;
926 ioctx->n_rw_ctx++;
927
928 if (prev) {
929 sg_unmark_end(&prev[prev_nents - 1]);
930 sg_chain(prev, prev_nents + 1, ctx->sg);
931 } else {
932 *sg = ctx->sg;
933 }
934
935 prev = ctx->sg;
936 prev_nents = ctx->nents;
937
938 *sg_cnt += ctx->nents;
939 }
940
941 return 0;
942
943 unwind:
944 while (--i >= 0) {
945 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
946
947 rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
948 ctx->sg, ctx->nents, dir);
949 target_free_sgl(ctx->sg, ctx->nents);
950 }
951 if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
952 kfree(ioctx->rw_ctxs);
953 return ret;
954 }
955
956 static void srpt_free_rw_ctxs(struct srpt_rdma_ch *ch,
957 struct srpt_send_ioctx *ioctx)
958 {
959 enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
960 int i;
961
962 for (i = 0; i < ioctx->n_rw_ctx; i++) {
963 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
964
965 rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
966 ctx->sg, ctx->nents, dir);
967 target_free_sgl(ctx->sg, ctx->nents);
968 }
969
970 if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
971 kfree(ioctx->rw_ctxs);
972 }
973
974 static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
975 {
976
977
978
979
980
981 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) &&
982 !__same_type(srp_cmd->add_data[0], (u8)0));
983
984
985
986
987
988
989 return srp_cmd->add_data + (srp_cmd->add_cdb_len & ~3);
990 }
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011 static int srpt_get_desc_tbl(struct srpt_recv_ioctx *recv_ioctx,
1012 struct srpt_send_ioctx *ioctx,
1013 struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
1014 struct scatterlist **sg, unsigned int *sg_cnt, u64 *data_len,
1015 u16 imm_data_offset)
1016 {
1017 BUG_ON(!dir);
1018 BUG_ON(!data_len);
1019
1020
1021
1022
1023
1024
1025 if (srp_cmd->buf_fmt & 0xf)
1026
1027 *dir = DMA_FROM_DEVICE;
1028 else if (srp_cmd->buf_fmt >> 4)
1029
1030 *dir = DMA_TO_DEVICE;
1031 else
1032 *dir = DMA_NONE;
1033
1034
1035 ioctx->cmd.data_direction = *dir;
1036
1037 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
1038 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
1039 struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
1040
1041 *data_len = be32_to_cpu(db->len);
1042 return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
1043 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
1044 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
1045 struct srp_indirect_buf *idb = srpt_get_desc_buf(srp_cmd);
1046 int nbufs = be32_to_cpu(idb->table_desc.len) /
1047 sizeof(struct srp_direct_buf);
1048
1049 if (nbufs >
1050 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
1051 pr_err("received unsupported SRP_CMD request type (%u out + %u in != %u / %zu)\n",
1052 srp_cmd->data_out_desc_cnt,
1053 srp_cmd->data_in_desc_cnt,
1054 be32_to_cpu(idb->table_desc.len),
1055 sizeof(struct srp_direct_buf));
1056 return -EINVAL;
1057 }
1058
1059 *data_len = be32_to_cpu(idb->len);
1060 return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
1061 sg, sg_cnt);
1062 } else if ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_IMM) {
1063 struct srp_imm_buf *imm_buf = srpt_get_desc_buf(srp_cmd);
1064 void *data = (void *)srp_cmd + imm_data_offset;
1065 uint32_t len = be32_to_cpu(imm_buf->len);
1066 uint32_t req_size = imm_data_offset + len;
1067
1068 if (req_size > srp_max_req_size) {
1069 pr_err("Immediate data (length %d + %d) exceeds request size %d\n",
1070 imm_data_offset, len, srp_max_req_size);
1071 return -EINVAL;
1072 }
1073 if (recv_ioctx->byte_len < req_size) {
1074 pr_err("Received too few data - %d < %d\n",
1075 recv_ioctx->byte_len, req_size);
1076 return -EIO;
1077 }
1078
1079
1080
1081
1082 if ((void *)(imm_buf + 1) > (void *)data) {
1083 pr_err("Received invalid write request\n");
1084 return -EINVAL;
1085 }
1086 *data_len = len;
1087 ioctx->recv_ioctx = recv_ioctx;
1088 if ((uintptr_t)data & 511) {
1089 pr_warn_once("Internal error - the receive buffers are not aligned properly.\n");
1090 return -EINVAL;
1091 }
1092 sg_init_one(&ioctx->imm_sg, data, len);
1093 *sg = &ioctx->imm_sg;
1094 *sg_cnt = 1;
1095 return 0;
1096 } else {
1097 *data_len = 0;
1098 return 0;
1099 }
1100 }
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1111 {
1112 struct ib_qp_attr *attr;
1113 int ret;
1114
1115 WARN_ON_ONCE(ch->using_rdma_cm);
1116
1117 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1118 if (!attr)
1119 return -ENOMEM;
1120
1121 attr->qp_state = IB_QPS_INIT;
1122 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1123 attr->port_num = ch->sport->port;
1124
1125 ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
1126 ch->pkey, &attr->pkey_index);
1127 if (ret < 0)
1128 pr_err("Translating pkey %#x failed (%d) - using index 0\n",
1129 ch->pkey, ret);
1130
1131 ret = ib_modify_qp(qp, attr,
1132 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
1133 IB_QP_PKEY_INDEX);
1134
1135 kfree(attr);
1136 return ret;
1137 }
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1151 {
1152 struct ib_qp_attr qp_attr;
1153 int attr_mask;
1154 int ret;
1155
1156 WARN_ON_ONCE(ch->using_rdma_cm);
1157
1158 qp_attr.qp_state = IB_QPS_RTR;
1159 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
1160 if (ret)
1161 goto out;
1162
1163 qp_attr.max_dest_rd_atomic = 4;
1164
1165 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1166
1167 out:
1168 return ret;
1169 }
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
1183 {
1184 struct ib_qp_attr qp_attr;
1185 int attr_mask;
1186 int ret;
1187
1188 qp_attr.qp_state = IB_QPS_RTS;
1189 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
1190 if (ret)
1191 goto out;
1192
1193 qp_attr.max_rd_atomic = 4;
1194
1195 ret = ib_modify_qp(qp, &qp_attr, attr_mask);
1196
1197 out:
1198 return ret;
1199 }
1200
1201
1202
1203
1204
1205 static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1206 {
1207 struct ib_qp_attr qp_attr;
1208
1209 qp_attr.qp_state = IB_QPS_ERR;
1210 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
1211 }
1212
1213
1214
1215
1216
1217 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1218 {
1219 struct srpt_send_ioctx *ioctx;
1220 int tag, cpu;
1221
1222 BUG_ON(!ch);
1223
1224 tag = sbitmap_queue_get(&ch->sess->sess_tag_pool, &cpu);
1225 if (tag < 0)
1226 return NULL;
1227
1228 ioctx = ch->ioctx_ring[tag];
1229 BUG_ON(ioctx->ch != ch);
1230 ioctx->state = SRPT_STATE_NEW;
1231 WARN_ON_ONCE(ioctx->recv_ioctx);
1232 ioctx->n_rdma = 0;
1233 ioctx->n_rw_ctx = 0;
1234 ioctx->queue_status_only = false;
1235
1236
1237
1238
1239 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1240 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1241 ioctx->cmd.map_tag = tag;
1242 ioctx->cmd.map_cpu = cpu;
1243
1244 return ioctx;
1245 }
1246
1247
1248
1249
1250
1251 static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1252 {
1253 enum srpt_command_state state;
1254
1255 BUG_ON(!ioctx);
1256
1257
1258
1259
1260
1261
1262 state = ioctx->state;
1263 switch (state) {
1264 case SRPT_STATE_NEED_DATA:
1265 ioctx->state = SRPT_STATE_DATA_IN;
1266 break;
1267 case SRPT_STATE_CMD_RSP_SENT:
1268 case SRPT_STATE_MGMT_RSP_SENT:
1269 ioctx->state = SRPT_STATE_DONE;
1270 break;
1271 default:
1272 WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
1273 __func__, state);
1274 break;
1275 }
1276
1277 pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
1278 ioctx->state, ioctx->cmd.tag);
1279
1280 switch (state) {
1281 case SRPT_STATE_NEW:
1282 case SRPT_STATE_DATA_IN:
1283 case SRPT_STATE_MGMT:
1284 case SRPT_STATE_DONE:
1285
1286
1287
1288
1289 break;
1290 case SRPT_STATE_NEED_DATA:
1291 pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
1292 transport_generic_request_failure(&ioctx->cmd,
1293 TCM_CHECK_CONDITION_ABORT_CMD);
1294 break;
1295 case SRPT_STATE_CMD_RSP_SENT:
1296
1297
1298
1299
1300 transport_generic_free_cmd(&ioctx->cmd, 0);
1301 break;
1302 case SRPT_STATE_MGMT_RSP_SENT:
1303 transport_generic_free_cmd(&ioctx->cmd, 0);
1304 break;
1305 default:
1306 WARN(1, "Unexpected command state (%d)", state);
1307 break;
1308 }
1309
1310 return state;
1311 }
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323 static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
1324 {
1325 struct srpt_rdma_ch *ch = cq->cq_context;
1326 struct srpt_send_ioctx *ioctx =
1327 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
1328
1329 WARN_ON(ioctx->n_rdma <= 0);
1330 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1331 ioctx->n_rdma = 0;
1332
1333 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1334 pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
1335 ioctx, wc->status);
1336 srpt_abort_cmd(ioctx);
1337 return;
1338 }
1339
1340 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1341 SRPT_STATE_DATA_IN))
1342 target_execute_cmd(&ioctx->cmd);
1343 else
1344 pr_err("%s[%d]: wrong state = %d\n", __func__,
1345 __LINE__, ioctx->state);
1346 }
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1364 struct srpt_send_ioctx *ioctx, u64 tag,
1365 int status)
1366 {
1367 struct se_cmd *cmd = &ioctx->cmd;
1368 struct srp_rsp *srp_rsp;
1369 const u8 *sense_data;
1370 int sense_data_len, max_sense_len;
1371 u32 resid = cmd->residual_count;
1372
1373
1374
1375
1376
1377 WARN_ON(status & 1);
1378
1379 srp_rsp = ioctx->ioctx.buf;
1380 BUG_ON(!srp_rsp);
1381
1382 sense_data = ioctx->sense_data;
1383 sense_data_len = ioctx->cmd.scsi_sense_length;
1384 WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1385
1386 memset(srp_rsp, 0, sizeof(*srp_rsp));
1387 srp_rsp->opcode = SRP_RSP;
1388 srp_rsp->req_lim_delta =
1389 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1390 srp_rsp->tag = tag;
1391 srp_rsp->status = status;
1392
1393 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1394 if (cmd->data_direction == DMA_TO_DEVICE) {
1395
1396 srp_rsp->flags = SRP_RSP_FLAG_DOUNDER;
1397 srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
1398 } else if (cmd->data_direction == DMA_FROM_DEVICE) {
1399
1400 srp_rsp->flags = SRP_RSP_FLAG_DIUNDER;
1401 srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
1402 }
1403 } else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1404 if (cmd->data_direction == DMA_TO_DEVICE) {
1405
1406 srp_rsp->flags = SRP_RSP_FLAG_DOOVER;
1407 srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
1408 } else if (cmd->data_direction == DMA_FROM_DEVICE) {
1409
1410 srp_rsp->flags = SRP_RSP_FLAG_DIOVER;
1411 srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
1412 }
1413 }
1414
1415 if (sense_data_len) {
1416 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
1417 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
1418 if (sense_data_len > max_sense_len) {
1419 pr_warn("truncated sense data from %d to %d bytes\n",
1420 sense_data_len, max_sense_len);
1421 sense_data_len = max_sense_len;
1422 }
1423
1424 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
1425 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
1426 memcpy(srp_rsp + 1, sense_data, sense_data_len);
1427 }
1428
1429 return sizeof(*srp_rsp) + sense_data_len;
1430 }
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1446 struct srpt_send_ioctx *ioctx,
1447 u8 rsp_code, u64 tag)
1448 {
1449 struct srp_rsp *srp_rsp;
1450 int resp_data_len;
1451 int resp_len;
1452
1453 resp_data_len = 4;
1454 resp_len = sizeof(*srp_rsp) + resp_data_len;
1455
1456 srp_rsp = ioctx->ioctx.buf;
1457 BUG_ON(!srp_rsp);
1458 memset(srp_rsp, 0, sizeof(*srp_rsp));
1459
1460 srp_rsp->opcode = SRP_RSP;
1461 srp_rsp->req_lim_delta =
1462 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1463 srp_rsp->tag = tag;
1464
1465 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
1466 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
1467 srp_rsp->data[3] = rsp_code;
1468
1469 return resp_len;
1470 }
1471
1472 static int srpt_check_stop_free(struct se_cmd *cmd)
1473 {
1474 struct srpt_send_ioctx *ioctx = container_of(cmd,
1475 struct srpt_send_ioctx, cmd);
1476
1477 return target_put_sess_cmd(&ioctx->cmd);
1478 }
1479
1480
1481
1482
1483
1484
1485
1486 static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
1487 struct srpt_recv_ioctx *recv_ioctx,
1488 struct srpt_send_ioctx *send_ioctx)
1489 {
1490 struct se_cmd *cmd;
1491 struct srp_cmd *srp_cmd;
1492 struct scatterlist *sg = NULL;
1493 unsigned sg_cnt = 0;
1494 u64 data_len;
1495 enum dma_data_direction dir;
1496 int rc;
1497
1498 BUG_ON(!send_ioctx);
1499
1500 srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1501 cmd = &send_ioctx->cmd;
1502 cmd->tag = srp_cmd->tag;
1503
1504 switch (srp_cmd->task_attr) {
1505 case SRP_CMD_SIMPLE_Q:
1506 cmd->sam_task_attr = TCM_SIMPLE_TAG;
1507 break;
1508 case SRP_CMD_ORDERED_Q:
1509 default:
1510 cmd->sam_task_attr = TCM_ORDERED_TAG;
1511 break;
1512 case SRP_CMD_HEAD_OF_Q:
1513 cmd->sam_task_attr = TCM_HEAD_TAG;
1514 break;
1515 case SRP_CMD_ACA:
1516 cmd->sam_task_attr = TCM_ACA_TAG;
1517 break;
1518 }
1519
1520 rc = srpt_get_desc_tbl(recv_ioctx, send_ioctx, srp_cmd, &dir,
1521 &sg, &sg_cnt, &data_len, ch->imm_data_offset);
1522 if (rc) {
1523 if (rc != -EAGAIN) {
1524 pr_err("0x%llx: parsing SRP descriptor table failed.\n",
1525 srp_cmd->tag);
1526 }
1527 goto busy;
1528 }
1529
1530 rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
1531 &send_ioctx->sense_data[0],
1532 scsilun_to_int(&srp_cmd->lun), data_len,
1533 TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF,
1534 sg, sg_cnt, NULL, 0, NULL, 0);
1535 if (rc != 0) {
1536 pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
1537 srp_cmd->tag);
1538 goto busy;
1539 }
1540 return;
1541
1542 busy:
1543 target_send_busy(cmd);
1544 }
1545
1546 static int srp_tmr_to_tcm(int fn)
1547 {
1548 switch (fn) {
1549 case SRP_TSK_ABORT_TASK:
1550 return TMR_ABORT_TASK;
1551 case SRP_TSK_ABORT_TASK_SET:
1552 return TMR_ABORT_TASK_SET;
1553 case SRP_TSK_CLEAR_TASK_SET:
1554 return TMR_CLEAR_TASK_SET;
1555 case SRP_TSK_LUN_RESET:
1556 return TMR_LUN_RESET;
1557 case SRP_TSK_CLEAR_ACA:
1558 return TMR_CLEAR_ACA;
1559 default:
1560 return -1;
1561 }
1562 }
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
1576 struct srpt_recv_ioctx *recv_ioctx,
1577 struct srpt_send_ioctx *send_ioctx)
1578 {
1579 struct srp_tsk_mgmt *srp_tsk;
1580 struct se_cmd *cmd;
1581 struct se_session *sess = ch->sess;
1582 int tcm_tmr;
1583 int rc;
1584
1585 BUG_ON(!send_ioctx);
1586
1587 srp_tsk = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1588 cmd = &send_ioctx->cmd;
1589
1590 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
1591 srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag, ch,
1592 ch->sess);
1593
1594 srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
1595 send_ioctx->cmd.tag = srp_tsk->tag;
1596 tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
1597 rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
1598 scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
1599 GFP_KERNEL, srp_tsk->task_tag,
1600 TARGET_SCF_ACK_KREF);
1601 if (rc != 0) {
1602 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
1603 cmd->se_tfo->queue_tm_rsp(cmd);
1604 }
1605 return;
1606 }
1607
1608
1609
1610
1611
1612
1613 static bool
1614 srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx)
1615 {
1616 struct srpt_send_ioctx *send_ioctx = NULL;
1617 struct srp_cmd *srp_cmd;
1618 bool res = false;
1619 u8 opcode;
1620
1621 BUG_ON(!ch);
1622 BUG_ON(!recv_ioctx);
1623
1624 if (unlikely(ch->state == CH_CONNECTING))
1625 goto push;
1626
1627 ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
1628 recv_ioctx->ioctx.dma,
1629 recv_ioctx->ioctx.offset + srp_max_req_size,
1630 DMA_FROM_DEVICE);
1631
1632 srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1633 opcode = srp_cmd->opcode;
1634 if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) {
1635 send_ioctx = srpt_get_send_ioctx(ch);
1636 if (unlikely(!send_ioctx))
1637 goto push;
1638 }
1639
1640 if (!list_empty(&recv_ioctx->wait_list)) {
1641 WARN_ON_ONCE(!ch->processing_wait_list);
1642 list_del_init(&recv_ioctx->wait_list);
1643 }
1644
1645 switch (opcode) {
1646 case SRP_CMD:
1647 srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
1648 break;
1649 case SRP_TSK_MGMT:
1650 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
1651 break;
1652 case SRP_I_LOGOUT:
1653 pr_err("Not yet implemented: SRP_I_LOGOUT\n");
1654 break;
1655 case SRP_CRED_RSP:
1656 pr_debug("received SRP_CRED_RSP\n");
1657 break;
1658 case SRP_AER_RSP:
1659 pr_debug("received SRP_AER_RSP\n");
1660 break;
1661 case SRP_RSP:
1662 pr_err("Received SRP_RSP\n");
1663 break;
1664 default:
1665 pr_err("received IU with unknown opcode 0x%x\n", opcode);
1666 break;
1667 }
1668
1669 if (!send_ioctx || !send_ioctx->recv_ioctx)
1670 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
1671 res = true;
1672
1673 out:
1674 return res;
1675
1676 push:
1677 if (list_empty(&recv_ioctx->wait_list)) {
1678 WARN_ON_ONCE(ch->processing_wait_list);
1679 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
1680 }
1681 goto out;
1682 }
1683
1684 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1685 {
1686 struct srpt_rdma_ch *ch = cq->cq_context;
1687 struct srpt_recv_ioctx *ioctx =
1688 container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
1689
1690 if (wc->status == IB_WC_SUCCESS) {
1691 int req_lim;
1692
1693 req_lim = atomic_dec_return(&ch->req_lim);
1694 if (unlikely(req_lim < 0))
1695 pr_err("req_lim = %d < 0\n", req_lim);
1696 ioctx->byte_len = wc->byte_len;
1697 srpt_handle_new_iu(ch, ioctx);
1698 } else {
1699 pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
1700 ioctx, wc->status);
1701 }
1702 }
1703
1704
1705
1706
1707
1708
1709 static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
1710 {
1711 struct srpt_recv_ioctx *recv_ioctx, *tmp;
1712
1713 WARN_ON_ONCE(ch->state == CH_CONNECTING);
1714
1715 if (list_empty(&ch->cmd_wait_list))
1716 return;
1717
1718 WARN_ON_ONCE(ch->processing_wait_list);
1719 ch->processing_wait_list = true;
1720 list_for_each_entry_safe(recv_ioctx, tmp, &ch->cmd_wait_list,
1721 wait_list) {
1722 if (!srpt_handle_new_iu(ch, recv_ioctx))
1723 break;
1724 }
1725 ch->processing_wait_list = false;
1726 }
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
1746 {
1747 struct srpt_rdma_ch *ch = cq->cq_context;
1748 struct srpt_send_ioctx *ioctx =
1749 container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
1750 enum srpt_command_state state;
1751
1752 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1753
1754 WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
1755 state != SRPT_STATE_MGMT_RSP_SENT);
1756
1757 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
1758
1759 if (wc->status != IB_WC_SUCCESS)
1760 pr_info("sending response for ioctx 0x%p failed with status %d\n",
1761 ioctx, wc->status);
1762
1763 if (state != SRPT_STATE_DONE) {
1764 transport_generic_free_cmd(&ioctx->cmd, 0);
1765 } else {
1766 pr_err("IB completion has been received too late for wr_id = %u.\n",
1767 ioctx->ioctx.index);
1768 }
1769
1770 srpt_process_wait_list(ch);
1771 }
1772
1773
1774
1775
1776
1777 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
1778 {
1779 struct ib_qp_init_attr *qp_init;
1780 struct srpt_port *sport = ch->sport;
1781 struct srpt_device *sdev = sport->sdev;
1782 const struct ib_device_attr *attrs = &sdev->device->attrs;
1783 int sq_size = sport->port_attrib.srp_sq_size;
1784 int i, ret;
1785
1786 WARN_ON(ch->rq_size < 1);
1787
1788 ret = -ENOMEM;
1789 qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
1790 if (!qp_init)
1791 goto out;
1792
1793 retry:
1794 ch->cq = ib_alloc_cq_any(sdev->device, ch, ch->rq_size + sq_size,
1795 IB_POLL_WORKQUEUE);
1796 if (IS_ERR(ch->cq)) {
1797 ret = PTR_ERR(ch->cq);
1798 pr_err("failed to create CQ cqe= %d ret= %d\n",
1799 ch->rq_size + sq_size, ret);
1800 goto out;
1801 }
1802
1803 qp_init->qp_context = (void *)ch;
1804 qp_init->event_handler
1805 = (void(*)(struct ib_event *, void*))srpt_qp_event;
1806 qp_init->send_cq = ch->cq;
1807 qp_init->recv_cq = ch->cq;
1808 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
1809 qp_init->qp_type = IB_QPT_RC;
1810
1811
1812
1813
1814
1815
1816
1817 qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
1818 qp_init->cap.max_rdma_ctxs = sq_size / 2;
1819 qp_init->cap.max_send_sge = min(attrs->max_send_sge,
1820 SRPT_MAX_SG_PER_WQE);
1821 qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
1822 SRPT_MAX_SG_PER_WQE);
1823 qp_init->port_num = ch->sport->port;
1824 if (sdev->use_srq) {
1825 qp_init->srq = sdev->srq;
1826 } else {
1827 qp_init->cap.max_recv_wr = ch->rq_size;
1828 qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
1829 SRPT_MAX_SG_PER_WQE);
1830 }
1831
1832 if (ch->using_rdma_cm) {
1833 ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
1834 ch->qp = ch->rdma_cm.cm_id->qp;
1835 } else {
1836 ch->qp = ib_create_qp(sdev->pd, qp_init);
1837 if (!IS_ERR(ch->qp)) {
1838 ret = srpt_init_ch_qp(ch, ch->qp);
1839 if (ret)
1840 ib_destroy_qp(ch->qp);
1841 } else {
1842 ret = PTR_ERR(ch->qp);
1843 }
1844 }
1845 if (ret) {
1846 bool retry = sq_size > MIN_SRPT_SQ_SIZE;
1847
1848 if (retry) {
1849 pr_debug("failed to create queue pair with sq_size = %d (%d) - retrying\n",
1850 sq_size, ret);
1851 ib_free_cq(ch->cq);
1852 sq_size = max(sq_size / 2, MIN_SRPT_SQ_SIZE);
1853 goto retry;
1854 } else {
1855 pr_err("failed to create queue pair with sq_size = %d (%d)\n",
1856 sq_size, ret);
1857 goto err_destroy_cq;
1858 }
1859 }
1860
1861 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
1862
1863 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n",
1864 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
1865 qp_init->cap.max_send_wr, ch);
1866
1867 if (!sdev->use_srq)
1868 for (i = 0; i < ch->rq_size; i++)
1869 srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]);
1870
1871 out:
1872 kfree(qp_init);
1873 return ret;
1874
1875 err_destroy_cq:
1876 ch->qp = NULL;
1877 ib_free_cq(ch->cq);
1878 goto out;
1879 }
1880
1881 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
1882 {
1883 ib_destroy_qp(ch->qp);
1884 ib_free_cq(ch->cq);
1885 }
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897 static bool srpt_close_ch(struct srpt_rdma_ch *ch)
1898 {
1899 int ret;
1900
1901 if (!srpt_set_ch_state(ch, CH_DRAINING)) {
1902 pr_debug("%s: already closed\n", ch->sess_name);
1903 return false;
1904 }
1905
1906 kref_get(&ch->kref);
1907
1908 ret = srpt_ch_qp_err(ch);
1909 if (ret < 0)
1910 pr_err("%s-%d: changing queue pair into error state failed: %d\n",
1911 ch->sess_name, ch->qp->qp_num, ret);
1912
1913 ret = srpt_zerolength_write(ch);
1914 if (ret < 0) {
1915 pr_err("%s-%d: queuing zero-length write failed: %d\n",
1916 ch->sess_name, ch->qp->qp_num, ret);
1917 if (srpt_set_ch_state(ch, CH_DISCONNECTED))
1918 schedule_work(&ch->release_work);
1919 else
1920 WARN_ON_ONCE(true);
1921 }
1922
1923 kref_put(&ch->kref, srpt_free_ch);
1924
1925 return true;
1926 }
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937 static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
1938 {
1939 int ret;
1940
1941 if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
1942 return -ENOTCONN;
1943
1944 if (ch->using_rdma_cm) {
1945 ret = rdma_disconnect(ch->rdma_cm.cm_id);
1946 } else {
1947 ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
1948 if (ret < 0)
1949 ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
1950 }
1951
1952 if (ret < 0 && srpt_close_ch(ch))
1953 ret = 0;
1954
1955 return ret;
1956 }
1957
1958 static bool srpt_ch_closed(struct srpt_port *sport, struct srpt_rdma_ch *ch)
1959 {
1960 struct srpt_nexus *nexus;
1961 struct srpt_rdma_ch *ch2;
1962 bool res = true;
1963
1964 rcu_read_lock();
1965 list_for_each_entry(nexus, &sport->nexus_list, entry) {
1966 list_for_each_entry(ch2, &nexus->ch_list, list) {
1967 if (ch2 == ch) {
1968 res = false;
1969 goto done;
1970 }
1971 }
1972 }
1973 done:
1974 rcu_read_unlock();
1975
1976 return res;
1977 }
1978
1979
1980 static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
1981 {
1982 struct srpt_port *sport = ch->sport;
1983
1984 pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
1985 ch->state);
1986
1987 mutex_lock(&sport->mutex);
1988 srpt_disconnect_ch(ch);
1989 mutex_unlock(&sport->mutex);
1990
1991 while (wait_event_timeout(sport->ch_releaseQ, srpt_ch_closed(sport, ch),
1992 5 * HZ) == 0)
1993 pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
1994 ch->sess_name, ch->qp->qp_num, ch->state);
1995
1996 }
1997
1998 static void __srpt_close_all_ch(struct srpt_port *sport)
1999 {
2000 struct srpt_nexus *nexus;
2001 struct srpt_rdma_ch *ch;
2002
2003 lockdep_assert_held(&sport->mutex);
2004
2005 list_for_each_entry(nexus, &sport->nexus_list, entry) {
2006 list_for_each_entry(ch, &nexus->ch_list, list) {
2007 if (srpt_disconnect_ch(ch) >= 0)
2008 pr_info("Closing channel %s because target %s_%d has been disabled\n",
2009 ch->sess_name,
2010 dev_name(&sport->sdev->device->dev),
2011 sport->port);
2012 srpt_close_ch(ch);
2013 }
2014 }
2015 }
2016
2017
2018
2019
2020
2021 static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport,
2022 const u8 i_port_id[16],
2023 const u8 t_port_id[16])
2024 {
2025 struct srpt_nexus *nexus = NULL, *tmp_nexus = NULL, *n;
2026
2027 for (;;) {
2028 mutex_lock(&sport->mutex);
2029 list_for_each_entry(n, &sport->nexus_list, entry) {
2030 if (memcmp(n->i_port_id, i_port_id, 16) == 0 &&
2031 memcmp(n->t_port_id, t_port_id, 16) == 0) {
2032 nexus = n;
2033 break;
2034 }
2035 }
2036 if (!nexus && tmp_nexus) {
2037 list_add_tail_rcu(&tmp_nexus->entry,
2038 &sport->nexus_list);
2039 swap(nexus, tmp_nexus);
2040 }
2041 mutex_unlock(&sport->mutex);
2042
2043 if (nexus)
2044 break;
2045 tmp_nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
2046 if (!tmp_nexus) {
2047 nexus = ERR_PTR(-ENOMEM);
2048 break;
2049 }
2050 INIT_LIST_HEAD(&tmp_nexus->ch_list);
2051 memcpy(tmp_nexus->i_port_id, i_port_id, 16);
2052 memcpy(tmp_nexus->t_port_id, t_port_id, 16);
2053 }
2054
2055 kfree(tmp_nexus);
2056
2057 return nexus;
2058 }
2059
2060 static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
2061 __must_hold(&sport->mutex)
2062 {
2063 lockdep_assert_held(&sport->mutex);
2064
2065 if (sport->enabled == enabled)
2066 return;
2067 sport->enabled = enabled;
2068 if (!enabled)
2069 __srpt_close_all_ch(sport);
2070 }
2071
2072 static void srpt_free_ch(struct kref *kref)
2073 {
2074 struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
2075
2076 kfree_rcu(ch, rcu);
2077 }
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087 static void srpt_release_channel_work(struct work_struct *w)
2088 {
2089 struct srpt_rdma_ch *ch;
2090 struct srpt_device *sdev;
2091 struct srpt_port *sport;
2092 struct se_session *se_sess;
2093
2094 ch = container_of(w, struct srpt_rdma_ch, release_work);
2095 pr_debug("%s-%d\n", ch->sess_name, ch->qp->qp_num);
2096
2097 sdev = ch->sport->sdev;
2098 BUG_ON(!sdev);
2099
2100 se_sess = ch->sess;
2101 BUG_ON(!se_sess);
2102
2103 target_sess_cmd_list_set_waiting(se_sess);
2104 target_wait_for_sess_cmds(se_sess);
2105
2106 target_remove_session(se_sess);
2107 ch->sess = NULL;
2108
2109 if (ch->using_rdma_cm)
2110 rdma_destroy_id(ch->rdma_cm.cm_id);
2111 else
2112 ib_destroy_cm_id(ch->ib_cm.cm_id);
2113
2114 sport = ch->sport;
2115 mutex_lock(&sport->mutex);
2116 list_del_rcu(&ch->list);
2117 mutex_unlock(&sport->mutex);
2118
2119 srpt_destroy_ch_ib(ch);
2120
2121 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2122 ch->sport->sdev, ch->rq_size,
2123 ch->rsp_buf_cache, DMA_TO_DEVICE);
2124
2125 kmem_cache_destroy(ch->rsp_buf_cache);
2126
2127 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
2128 sdev, ch->rq_size,
2129 ch->req_buf_cache, DMA_FROM_DEVICE);
2130
2131 kmem_cache_destroy(ch->req_buf_cache);
2132
2133 wake_up(&sport->ch_releaseQ);
2134
2135 kref_put(&ch->kref, srpt_free_ch);
2136 }
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152 static int srpt_cm_req_recv(struct srpt_device *const sdev,
2153 struct ib_cm_id *ib_cm_id,
2154 struct rdma_cm_id *rdma_cm_id,
2155 u8 port_num, __be16 pkey,
2156 const struct srp_login_req *req,
2157 const char *src_addr)
2158 {
2159 struct srpt_port *sport = &sdev->port[port_num - 1];
2160 struct srpt_nexus *nexus;
2161 struct srp_login_rsp *rsp = NULL;
2162 struct srp_login_rej *rej = NULL;
2163 union {
2164 struct rdma_conn_param rdma_cm;
2165 struct ib_cm_rep_param ib_cm;
2166 } *rep_param = NULL;
2167 struct srpt_rdma_ch *ch = NULL;
2168 char i_port_id[36];
2169 u32 it_iu_len;
2170 int i, tag_num, tag_size, ret;
2171
2172 WARN_ON_ONCE(irqs_disabled());
2173
2174 if (WARN_ON(!sdev || !req))
2175 return -EINVAL;
2176
2177 it_iu_len = be32_to_cpu(req->req_it_iu_len);
2178
2179 pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n",
2180 req->initiator_port_id, req->target_port_id, it_iu_len,
2181 port_num, &sport->gid, be16_to_cpu(pkey));
2182
2183 nexus = srpt_get_nexus(sport, req->initiator_port_id,
2184 req->target_port_id);
2185 if (IS_ERR(nexus)) {
2186 ret = PTR_ERR(nexus);
2187 goto out;
2188 }
2189
2190 ret = -ENOMEM;
2191 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
2192 rej = kzalloc(sizeof(*rej), GFP_KERNEL);
2193 rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
2194 if (!rsp || !rej || !rep_param)
2195 goto out;
2196
2197 ret = -EINVAL;
2198 if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2199 rej->reason = cpu_to_be32(
2200 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2201 pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n",
2202 it_iu_len, 64, srp_max_req_size);
2203 goto reject;
2204 }
2205
2206 if (!sport->enabled) {
2207 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2208 pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
2209 dev_name(&sport->sdev->device->dev), port_num);
2210 goto reject;
2211 }
2212
2213 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2214 || *(__be64 *)(req->target_port_id + 8) !=
2215 cpu_to_be64(srpt_service_guid)) {
2216 rej->reason = cpu_to_be32(
2217 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2218 pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n");
2219 goto reject;
2220 }
2221
2222 ret = -ENOMEM;
2223 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
2224 if (!ch) {
2225 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2226 pr_err("rejected SRP_LOGIN_REQ because out of memory.\n");
2227 goto reject;
2228 }
2229
2230 kref_init(&ch->kref);
2231 ch->pkey = be16_to_cpu(pkey);
2232 ch->nexus = nexus;
2233 ch->zw_cqe.done = srpt_zerolength_write_done;
2234 INIT_WORK(&ch->release_work, srpt_release_channel_work);
2235 ch->sport = sport;
2236 if (ib_cm_id) {
2237 ch->ib_cm.cm_id = ib_cm_id;
2238 ib_cm_id->context = ch;
2239 } else {
2240 ch->using_rdma_cm = true;
2241 ch->rdma_cm.cm_id = rdma_cm_id;
2242 rdma_cm_id->context = ch;
2243 }
2244
2245
2246
2247
2248
2249 ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
2250 spin_lock_init(&ch->spinlock);
2251 ch->state = CH_CONNECTING;
2252 INIT_LIST_HEAD(&ch->cmd_wait_list);
2253 ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
2254
2255 ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
2256 512, 0, NULL);
2257 if (!ch->rsp_buf_cache)
2258 goto free_ch;
2259
2260 ch->ioctx_ring = (struct srpt_send_ioctx **)
2261 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2262 sizeof(*ch->ioctx_ring[0]),
2263 ch->rsp_buf_cache, 0, DMA_TO_DEVICE);
2264 if (!ch->ioctx_ring) {
2265 pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
2266 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2267 goto free_rsp_cache;
2268 }
2269
2270 for (i = 0; i < ch->rq_size; i++)
2271 ch->ioctx_ring[i]->ch = ch;
2272 if (!sdev->use_srq) {
2273 u16 imm_data_offset = req->req_flags & SRP_IMMED_REQUESTED ?
2274 be16_to_cpu(req->imm_data_offset) : 0;
2275 u16 alignment_offset;
2276 u32 req_sz;
2277
2278 if (req->req_flags & SRP_IMMED_REQUESTED)
2279 pr_debug("imm_data_offset = %d\n",
2280 be16_to_cpu(req->imm_data_offset));
2281 if (imm_data_offset >= sizeof(struct srp_cmd)) {
2282 ch->imm_data_offset = imm_data_offset;
2283 rsp->rsp_flags |= SRP_LOGIN_RSP_IMMED_SUPP;
2284 } else {
2285 ch->imm_data_offset = 0;
2286 }
2287 alignment_offset = round_up(imm_data_offset, 512) -
2288 imm_data_offset;
2289 req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
2290 ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
2291 512, 0, NULL);
2292 if (!ch->req_buf_cache)
2293 goto free_rsp_ring;
2294
2295 ch->ioctx_recv_ring = (struct srpt_recv_ioctx **)
2296 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
2297 sizeof(*ch->ioctx_recv_ring[0]),
2298 ch->req_buf_cache,
2299 alignment_offset,
2300 DMA_FROM_DEVICE);
2301 if (!ch->ioctx_recv_ring) {
2302 pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n");
2303 rej->reason =
2304 cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2305 goto free_recv_cache;
2306 }
2307 for (i = 0; i < ch->rq_size; i++)
2308 INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list);
2309 }
2310
2311 ret = srpt_create_ch_ib(ch);
2312 if (ret) {
2313 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2314 pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n");
2315 goto free_recv_ring;
2316 }
2317
2318 strlcpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
2319 snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx",
2320 be64_to_cpu(*(__be64 *)nexus->i_port_id),
2321 be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
2322
2323 pr_debug("registering session %s\n", ch->sess_name);
2324
2325 tag_num = ch->rq_size;
2326 tag_size = 1;
2327 if (sport->port_guid_tpg.se_tpg_wwn)
2328 ch->sess = target_setup_session(&sport->port_guid_tpg, tag_num,
2329 tag_size, TARGET_PROT_NORMAL,
2330 ch->sess_name, ch, NULL);
2331 if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
2332 ch->sess = target_setup_session(&sport->port_gid_tpg, tag_num,
2333 tag_size, TARGET_PROT_NORMAL, i_port_id,
2334 ch, NULL);
2335
2336 if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
2337 ch->sess = target_setup_session(&sport->port_gid_tpg, tag_num,
2338 tag_size, TARGET_PROT_NORMAL,
2339 i_port_id + 2, ch, NULL);
2340 if (IS_ERR_OR_NULL(ch->sess)) {
2341 WARN_ON_ONCE(ch->sess == NULL);
2342 ret = PTR_ERR(ch->sess);
2343 ch->sess = NULL;
2344 pr_info("Rejected login for initiator %s: ret = %d.\n",
2345 ch->sess_name, ret);
2346 rej->reason = cpu_to_be32(ret == -ENOMEM ?
2347 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
2348 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2349 goto destroy_ib;
2350 }
2351
2352 mutex_lock(&sport->mutex);
2353
2354 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
2355 struct srpt_rdma_ch *ch2;
2356
2357 list_for_each_entry(ch2, &nexus->ch_list, list) {
2358 if (srpt_disconnect_ch(ch2) < 0)
2359 continue;
2360 pr_info("Relogin - closed existing channel %s\n",
2361 ch2->sess_name);
2362 rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
2363 }
2364 } else {
2365 rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
2366 }
2367
2368 list_add_tail_rcu(&ch->list, &nexus->ch_list);
2369
2370 if (!sport->enabled) {
2371 rej->reason = cpu_to_be32(
2372 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2373 pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
2374 dev_name(&sdev->device->dev), port_num);
2375 mutex_unlock(&sport->mutex);
2376 goto reject;
2377 }
2378
2379 mutex_unlock(&sport->mutex);
2380
2381 ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rtr(ch, ch->qp);
2382 if (ret) {
2383 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2384 pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
2385 ret);
2386 goto reject;
2387 }
2388
2389 pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess,
2390 ch->sess_name, ch);
2391
2392
2393 rsp->opcode = SRP_LOGIN_RSP;
2394 rsp->tag = req->tag;
2395 rsp->max_it_iu_len = cpu_to_be32(srp_max_req_size);
2396 rsp->max_ti_iu_len = req->req_it_iu_len;
2397 ch->max_ti_iu_len = it_iu_len;
2398 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
2399 SRP_BUF_FORMAT_INDIRECT);
2400 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2401 atomic_set(&ch->req_lim, ch->rq_size);
2402 atomic_set(&ch->req_lim_delta, 0);
2403
2404
2405 if (ch->using_rdma_cm) {
2406 rep_param->rdma_cm.private_data = (void *)rsp;
2407 rep_param->rdma_cm.private_data_len = sizeof(*rsp);
2408 rep_param->rdma_cm.rnr_retry_count = 7;
2409 rep_param->rdma_cm.flow_control = 1;
2410 rep_param->rdma_cm.responder_resources = 4;
2411 rep_param->rdma_cm.initiator_depth = 4;
2412 } else {
2413 rep_param->ib_cm.qp_num = ch->qp->qp_num;
2414 rep_param->ib_cm.private_data = (void *)rsp;
2415 rep_param->ib_cm.private_data_len = sizeof(*rsp);
2416 rep_param->ib_cm.rnr_retry_count = 7;
2417 rep_param->ib_cm.flow_control = 1;
2418 rep_param->ib_cm.failover_accepted = 0;
2419 rep_param->ib_cm.srq = 1;
2420 rep_param->ib_cm.responder_resources = 4;
2421 rep_param->ib_cm.initiator_depth = 4;
2422 }
2423
2424
2425
2426
2427
2428 mutex_lock(&sport->mutex);
2429 if (sport->enabled && ch->state == CH_CONNECTING) {
2430 if (ch->using_rdma_cm)
2431 ret = rdma_accept(rdma_cm_id, &rep_param->rdma_cm);
2432 else
2433 ret = ib_send_cm_rep(ib_cm_id, &rep_param->ib_cm);
2434 } else {
2435 ret = -EINVAL;
2436 }
2437 mutex_unlock(&sport->mutex);
2438
2439 switch (ret) {
2440 case 0:
2441 break;
2442 case -EINVAL:
2443 goto reject;
2444 default:
2445 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2446 pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n",
2447 ret);
2448 goto reject;
2449 }
2450
2451 goto out;
2452
2453 destroy_ib:
2454 srpt_destroy_ch_ib(ch);
2455
2456 free_recv_ring:
2457 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
2458 ch->sport->sdev, ch->rq_size,
2459 ch->req_buf_cache, DMA_FROM_DEVICE);
2460
2461 free_recv_cache:
2462 kmem_cache_destroy(ch->req_buf_cache);
2463
2464 free_rsp_ring:
2465 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
2466 ch->sport->sdev, ch->rq_size,
2467 ch->rsp_buf_cache, DMA_TO_DEVICE);
2468
2469 free_rsp_cache:
2470 kmem_cache_destroy(ch->rsp_buf_cache);
2471
2472 free_ch:
2473 if (rdma_cm_id)
2474 rdma_cm_id->context = NULL;
2475 else
2476 ib_cm_id->context = NULL;
2477 kfree(ch);
2478 ch = NULL;
2479
2480 WARN_ON_ONCE(ret == 0);
2481
2482 reject:
2483 pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej->reason));
2484 rej->opcode = SRP_LOGIN_REJ;
2485 rej->tag = req->tag;
2486 rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
2487 SRP_BUF_FORMAT_INDIRECT);
2488
2489 if (rdma_cm_id)
2490 rdma_reject(rdma_cm_id, rej, sizeof(*rej));
2491 else
2492 ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2493 rej, sizeof(*rej));
2494
2495 if (ch && ch->sess) {
2496 srpt_close_ch(ch);
2497
2498
2499
2500
2501 ret = 0;
2502 }
2503
2504 out:
2505 kfree(rep_param);
2506 kfree(rsp);
2507 kfree(rej);
2508
2509 return ret;
2510 }
2511
2512 static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
2513 const struct ib_cm_req_event_param *param,
2514 void *private_data)
2515 {
2516 char sguid[40];
2517
2518 srpt_format_guid(sguid, sizeof(sguid),
2519 ¶m->primary_path->dgid.global.interface_id);
2520
2521 return srpt_cm_req_recv(cm_id->context, cm_id, NULL, param->port,
2522 param->primary_path->pkey,
2523 private_data, sguid);
2524 }
2525
2526 static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
2527 struct rdma_cm_event *event)
2528 {
2529 struct srpt_device *sdev;
2530 struct srp_login_req req;
2531 const struct srp_login_req_rdma *req_rdma;
2532 char src_addr[40];
2533
2534 sdev = ib_get_client_data(cm_id->device, &srpt_client);
2535 if (!sdev)
2536 return -ECONNREFUSED;
2537
2538 if (event->param.conn.private_data_len < sizeof(*req_rdma))
2539 return -EINVAL;
2540
2541
2542 req_rdma = event->param.conn.private_data;
2543 memset(&req, 0, sizeof(req));
2544 req.opcode = req_rdma->opcode;
2545 req.tag = req_rdma->tag;
2546 req.req_it_iu_len = req_rdma->req_it_iu_len;
2547 req.req_buf_fmt = req_rdma->req_buf_fmt;
2548 req.req_flags = req_rdma->req_flags;
2549 memcpy(req.initiator_port_id, req_rdma->initiator_port_id, 16);
2550 memcpy(req.target_port_id, req_rdma->target_port_id, 16);
2551 req.imm_data_offset = req_rdma->imm_data_offset;
2552
2553 snprintf(src_addr, sizeof(src_addr), "%pIS",
2554 &cm_id->route.addr.src_addr);
2555
2556 return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
2557 cm_id->route.path_rec->pkey, &req, src_addr);
2558 }
2559
2560 static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
2561 enum ib_cm_rej_reason reason,
2562 const u8 *private_data,
2563 u8 private_data_len)
2564 {
2565 char *priv = NULL;
2566 int i;
2567
2568 if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1,
2569 GFP_KERNEL))) {
2570 for (i = 0; i < private_data_len; i++)
2571 sprintf(priv + 3 * i, " %02x", private_data[i]);
2572 }
2573 pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
2574 ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
2575 "; private data" : "", priv ? priv : " (?)");
2576 kfree(priv);
2577 }
2578
2579
2580
2581
2582
2583
2584
2585
2586 static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
2587 {
2588 int ret;
2589
2590 ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rts(ch, ch->qp);
2591 if (ret < 0) {
2592 pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name,
2593 ch->qp->qp_num);
2594 srpt_close_ch(ch);
2595 return;
2596 }
2597
2598
2599
2600
2601
2602
2603 if (!srpt_set_ch_state(ch, CH_LIVE)) {
2604 pr_err("%s-%d: channel transition to LIVE state failed\n",
2605 ch->sess_name, ch->qp->qp_num);
2606 return;
2607 }
2608
2609
2610 ret = srpt_zerolength_write(ch);
2611 WARN_ONCE(ret < 0, "%d\n", ret);
2612 }
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626 static int srpt_cm_handler(struct ib_cm_id *cm_id,
2627 const struct ib_cm_event *event)
2628 {
2629 struct srpt_rdma_ch *ch = cm_id->context;
2630 int ret;
2631
2632 ret = 0;
2633 switch (event->event) {
2634 case IB_CM_REQ_RECEIVED:
2635 ret = srpt_ib_cm_req_recv(cm_id, &event->param.req_rcvd,
2636 event->private_data);
2637 break;
2638 case IB_CM_REJ_RECEIVED:
2639 srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
2640 event->private_data,
2641 IB_CM_REJ_PRIVATE_DATA_SIZE);
2642 break;
2643 case IB_CM_RTU_RECEIVED:
2644 case IB_CM_USER_ESTABLISHED:
2645 srpt_cm_rtu_recv(ch);
2646 break;
2647 case IB_CM_DREQ_RECEIVED:
2648 srpt_disconnect_ch(ch);
2649 break;
2650 case IB_CM_DREP_RECEIVED:
2651 pr_info("Received CM DREP message for ch %s-%d.\n",
2652 ch->sess_name, ch->qp->qp_num);
2653 srpt_close_ch(ch);
2654 break;
2655 case IB_CM_TIMEWAIT_EXIT:
2656 pr_info("Received CM TimeWait exit for ch %s-%d.\n",
2657 ch->sess_name, ch->qp->qp_num);
2658 srpt_close_ch(ch);
2659 break;
2660 case IB_CM_REP_ERROR:
2661 pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
2662 ch->qp->qp_num);
2663 break;
2664 case IB_CM_DREQ_ERROR:
2665 pr_info("Received CM DREQ ERROR event.\n");
2666 break;
2667 case IB_CM_MRA_RECEIVED:
2668 pr_info("Received CM MRA event\n");
2669 break;
2670 default:
2671 pr_err("received unrecognized CM event %d\n", event->event);
2672 break;
2673 }
2674
2675 return ret;
2676 }
2677
2678 static int srpt_rdma_cm_handler(struct rdma_cm_id *cm_id,
2679 struct rdma_cm_event *event)
2680 {
2681 struct srpt_rdma_ch *ch = cm_id->context;
2682 int ret = 0;
2683
2684 switch (event->event) {
2685 case RDMA_CM_EVENT_CONNECT_REQUEST:
2686 ret = srpt_rdma_cm_req_recv(cm_id, event);
2687 break;
2688 case RDMA_CM_EVENT_REJECTED:
2689 srpt_cm_rej_recv(ch, event->status,
2690 event->param.conn.private_data,
2691 event->param.conn.private_data_len);
2692 break;
2693 case RDMA_CM_EVENT_ESTABLISHED:
2694 srpt_cm_rtu_recv(ch);
2695 break;
2696 case RDMA_CM_EVENT_DISCONNECTED:
2697 if (ch->state < CH_DISCONNECTING)
2698 srpt_disconnect_ch(ch);
2699 else
2700 srpt_close_ch(ch);
2701 break;
2702 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2703 srpt_close_ch(ch);
2704 break;
2705 case RDMA_CM_EVENT_UNREACHABLE:
2706 pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
2707 ch->qp->qp_num);
2708 break;
2709 case RDMA_CM_EVENT_DEVICE_REMOVAL:
2710 case RDMA_CM_EVENT_ADDR_CHANGE:
2711 break;
2712 default:
2713 pr_err("received unrecognized RDMA CM event %d\n",
2714 event->event);
2715 break;
2716 }
2717
2718 return ret;
2719 }
2720
2721
2722
2723
2724 static int srpt_write_pending(struct se_cmd *se_cmd)
2725 {
2726 struct srpt_send_ioctx *ioctx =
2727 container_of(se_cmd, struct srpt_send_ioctx, cmd);
2728 struct srpt_rdma_ch *ch = ioctx->ch;
2729 struct ib_send_wr *first_wr = NULL;
2730 struct ib_cqe *cqe = &ioctx->rdma_cqe;
2731 enum srpt_command_state new_state;
2732 int ret, i;
2733
2734 if (ioctx->recv_ioctx) {
2735 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
2736 target_execute_cmd(&ioctx->cmd);
2737 return 0;
2738 }
2739
2740 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2741 WARN_ON(new_state == SRPT_STATE_DONE);
2742
2743 if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) {
2744 pr_warn("%s: IB send queue full (needed %d)\n",
2745 __func__, ioctx->n_rdma);
2746 ret = -ENOMEM;
2747 goto out_undo;
2748 }
2749
2750 cqe->done = srpt_rdma_read_done;
2751 for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
2752 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
2753
2754 first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port,
2755 cqe, first_wr);
2756 cqe = NULL;
2757 }
2758
2759 ret = ib_post_send(ch->qp, first_wr, NULL);
2760 if (ret) {
2761 pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
2762 __func__, ret, ioctx->n_rdma,
2763 atomic_read(&ch->sq_wr_avail));
2764 goto out_undo;
2765 }
2766
2767 return 0;
2768 out_undo:
2769 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
2770 return ret;
2771 }
2772
2773 static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
2774 {
2775 switch (tcm_mgmt_status) {
2776 case TMR_FUNCTION_COMPLETE:
2777 return SRP_TSK_MGMT_SUCCESS;
2778 case TMR_FUNCTION_REJECTED:
2779 return SRP_TSK_MGMT_FUNC_NOT_SUPP;
2780 }
2781 return SRP_TSK_MGMT_FAILED;
2782 }
2783
2784
2785
2786
2787
2788
2789
2790
2791 static void srpt_queue_response(struct se_cmd *cmd)
2792 {
2793 struct srpt_send_ioctx *ioctx =
2794 container_of(cmd, struct srpt_send_ioctx, cmd);
2795 struct srpt_rdma_ch *ch = ioctx->ch;
2796 struct srpt_device *sdev = ch->sport->sdev;
2797 struct ib_send_wr send_wr, *first_wr = &send_wr;
2798 struct ib_sge sge;
2799 enum srpt_command_state state;
2800 int resp_len, ret, i;
2801 u8 srp_tm_status;
2802
2803 BUG_ON(!ch);
2804
2805 state = ioctx->state;
2806 switch (state) {
2807 case SRPT_STATE_NEW:
2808 case SRPT_STATE_DATA_IN:
2809 ioctx->state = SRPT_STATE_CMD_RSP_SENT;
2810 break;
2811 case SRPT_STATE_MGMT:
2812 ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
2813 break;
2814 default:
2815 WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
2816 ch, ioctx->ioctx.index, ioctx->state);
2817 break;
2818 }
2819
2820 if (WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))
2821 return;
2822
2823
2824 if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
2825 ioctx->cmd.data_length &&
2826 !ioctx->queue_status_only) {
2827 for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
2828 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
2829
2830 first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp,
2831 ch->sport->port, NULL, first_wr);
2832 }
2833 }
2834
2835 if (state != SRPT_STATE_MGMT)
2836 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
2837 cmd->scsi_status);
2838 else {
2839 srp_tm_status
2840 = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
2841 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
2842 ioctx->cmd.tag);
2843 }
2844
2845 atomic_inc(&ch->req_lim);
2846
2847 if (unlikely(atomic_sub_return(1 + ioctx->n_rdma,
2848 &ch->sq_wr_avail) < 0)) {
2849 pr_warn("%s: IB send queue full (needed %d)\n",
2850 __func__, ioctx->n_rdma);
2851 ret = -ENOMEM;
2852 goto out;
2853 }
2854
2855 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
2856 DMA_TO_DEVICE);
2857
2858 sge.addr = ioctx->ioctx.dma;
2859 sge.length = resp_len;
2860 sge.lkey = sdev->lkey;
2861
2862 ioctx->ioctx.cqe.done = srpt_send_done;
2863 send_wr.next = NULL;
2864 send_wr.wr_cqe = &ioctx->ioctx.cqe;
2865 send_wr.sg_list = &sge;
2866 send_wr.num_sge = 1;
2867 send_wr.opcode = IB_WR_SEND;
2868 send_wr.send_flags = IB_SEND_SIGNALED;
2869
2870 ret = ib_post_send(ch->qp, first_wr, NULL);
2871 if (ret < 0) {
2872 pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
2873 __func__, ioctx->cmd.tag, ret);
2874 goto out;
2875 }
2876
2877 return;
2878
2879 out:
2880 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
2881 atomic_dec(&ch->req_lim);
2882 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
2883 target_put_sess_cmd(&ioctx->cmd);
2884 }
2885
2886 static int srpt_queue_data_in(struct se_cmd *cmd)
2887 {
2888 srpt_queue_response(cmd);
2889 return 0;
2890 }
2891
2892 static void srpt_queue_tm_rsp(struct se_cmd *cmd)
2893 {
2894 srpt_queue_response(cmd);
2895 }
2896
2897
2898
2899
2900
2901
2902
2903 static void srpt_aborted_task(struct se_cmd *cmd)
2904 {
2905 struct srpt_send_ioctx *ioctx = container_of(cmd,
2906 struct srpt_send_ioctx, cmd);
2907 struct srpt_rdma_ch *ch = ioctx->ch;
2908
2909 atomic_inc(&ch->req_lim_delta);
2910 }
2911
2912 static int srpt_queue_status(struct se_cmd *cmd)
2913 {
2914 struct srpt_send_ioctx *ioctx;
2915
2916 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2917 BUG_ON(ioctx->sense_data != cmd->sense_buffer);
2918 if (cmd->se_cmd_flags &
2919 (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
2920 WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
2921 ioctx->queue_status_only = true;
2922 srpt_queue_response(cmd);
2923 return 0;
2924 }
2925
2926 static void srpt_refresh_port_work(struct work_struct *work)
2927 {
2928 struct srpt_port *sport = container_of(work, struct srpt_port, work);
2929
2930 srpt_refresh_port(sport);
2931 }
2932
2933 static bool srpt_ch_list_empty(struct srpt_port *sport)
2934 {
2935 struct srpt_nexus *nexus;
2936 bool res = true;
2937
2938 rcu_read_lock();
2939 list_for_each_entry(nexus, &sport->nexus_list, entry)
2940 if (!list_empty(&nexus->ch_list))
2941 res = false;
2942 rcu_read_unlock();
2943
2944 return res;
2945 }
2946
2947
2948
2949
2950
2951 static int srpt_release_sport(struct srpt_port *sport)
2952 {
2953 struct srpt_nexus *nexus, *next_n;
2954 struct srpt_rdma_ch *ch;
2955
2956 WARN_ON_ONCE(irqs_disabled());
2957
2958 mutex_lock(&sport->mutex);
2959 srpt_set_enabled(sport, false);
2960 mutex_unlock(&sport->mutex);
2961
2962 while (wait_event_timeout(sport->ch_releaseQ,
2963 srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
2964 pr_info("%s_%d: waiting for session unregistration ...\n",
2965 dev_name(&sport->sdev->device->dev), sport->port);
2966 rcu_read_lock();
2967 list_for_each_entry(nexus, &sport->nexus_list, entry) {
2968 list_for_each_entry(ch, &nexus->ch_list, list) {
2969 pr_info("%s-%d: state %s\n",
2970 ch->sess_name, ch->qp->qp_num,
2971 get_ch_state_name(ch->state));
2972 }
2973 }
2974 rcu_read_unlock();
2975 }
2976
2977 mutex_lock(&sport->mutex);
2978 list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) {
2979 list_del(&nexus->entry);
2980 kfree_rcu(nexus, rcu);
2981 }
2982 mutex_unlock(&sport->mutex);
2983
2984 return 0;
2985 }
2986
2987 static struct se_wwn *__srpt_lookup_wwn(const char *name)
2988 {
2989 struct ib_device *dev;
2990 struct srpt_device *sdev;
2991 struct srpt_port *sport;
2992 int i;
2993
2994 list_for_each_entry(sdev, &srpt_dev_list, list) {
2995 dev = sdev->device;
2996 if (!dev)
2997 continue;
2998
2999 for (i = 0; i < dev->phys_port_cnt; i++) {
3000 sport = &sdev->port[i];
3001
3002 if (strcmp(sport->port_guid, name) == 0)
3003 return &sport->port_guid_wwn;
3004 if (strcmp(sport->port_gid, name) == 0)
3005 return &sport->port_gid_wwn;
3006 }
3007 }
3008
3009 return NULL;
3010 }
3011
3012 static struct se_wwn *srpt_lookup_wwn(const char *name)
3013 {
3014 struct se_wwn *wwn;
3015
3016 spin_lock(&srpt_dev_lock);
3017 wwn = __srpt_lookup_wwn(name);
3018 spin_unlock(&srpt_dev_lock);
3019
3020 return wwn;
3021 }
3022
3023 static void srpt_free_srq(struct srpt_device *sdev)
3024 {
3025 if (!sdev->srq)
3026 return;
3027
3028 ib_destroy_srq(sdev->srq);
3029 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
3030 sdev->srq_size, sdev->req_buf_cache,
3031 DMA_FROM_DEVICE);
3032 kmem_cache_destroy(sdev->req_buf_cache);
3033 sdev->srq = NULL;
3034 }
3035
3036 static int srpt_alloc_srq(struct srpt_device *sdev)
3037 {
3038 struct ib_srq_init_attr srq_attr = {
3039 .event_handler = srpt_srq_event,
3040 .srq_context = (void *)sdev,
3041 .attr.max_wr = sdev->srq_size,
3042 .attr.max_sge = 1,
3043 .srq_type = IB_SRQT_BASIC,
3044 };
3045 struct ib_device *device = sdev->device;
3046 struct ib_srq *srq;
3047 int i;
3048
3049 WARN_ON_ONCE(sdev->srq);
3050 srq = ib_create_srq(sdev->pd, &srq_attr);
3051 if (IS_ERR(srq)) {
3052 pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq));
3053 return PTR_ERR(srq);
3054 }
3055
3056 pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
3057 sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
3058
3059 sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
3060 srp_max_req_size, 0, 0, NULL);
3061 if (!sdev->req_buf_cache)
3062 goto free_srq;
3063
3064 sdev->ioctx_ring = (struct srpt_recv_ioctx **)
3065 srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
3066 sizeof(*sdev->ioctx_ring[0]),
3067 sdev->req_buf_cache, 0, DMA_FROM_DEVICE);
3068 if (!sdev->ioctx_ring)
3069 goto free_cache;
3070
3071 sdev->use_srq = true;
3072 sdev->srq = srq;
3073
3074 for (i = 0; i < sdev->srq_size; ++i) {
3075 INIT_LIST_HEAD(&sdev->ioctx_ring[i]->wait_list);
3076 srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
3077 }
3078
3079 return 0;
3080
3081 free_cache:
3082 kmem_cache_destroy(sdev->req_buf_cache);
3083
3084 free_srq:
3085 ib_destroy_srq(srq);
3086 return -ENOMEM;
3087 }
3088
3089 static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
3090 {
3091 struct ib_device *device = sdev->device;
3092 int ret = 0;
3093
3094 if (!use_srq) {
3095 srpt_free_srq(sdev);
3096 sdev->use_srq = false;
3097 } else if (use_srq && !sdev->srq) {
3098 ret = srpt_alloc_srq(sdev);
3099 }
3100 pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__,
3101 dev_name(&device->dev), sdev->use_srq, ret);
3102 return ret;
3103 }
3104
3105
3106
3107
3108
3109 static void srpt_add_one(struct ib_device *device)
3110 {
3111 struct srpt_device *sdev;
3112 struct srpt_port *sport;
3113 int i, ret;
3114
3115 pr_debug("device = %p\n", device);
3116
3117 sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
3118 GFP_KERNEL);
3119 if (!sdev)
3120 goto err;
3121
3122 sdev->device = device;
3123 mutex_init(&sdev->sdev_mutex);
3124
3125 sdev->pd = ib_alloc_pd(device, 0);
3126 if (IS_ERR(sdev->pd))
3127 goto free_dev;
3128
3129 sdev->lkey = sdev->pd->local_dma_lkey;
3130
3131 sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
3132
3133 srpt_use_srq(sdev, sdev->port[0].port_attrib.use_srq);
3134
3135 if (!srpt_service_guid)
3136 srpt_service_guid = be64_to_cpu(device->node_guid);
3137
3138 if (rdma_port_get_link_layer(device, 1) == IB_LINK_LAYER_INFINIBAND)
3139 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
3140 if (IS_ERR(sdev->cm_id)) {
3141 pr_info("ib_create_cm_id() failed: %ld\n",
3142 PTR_ERR(sdev->cm_id));
3143 sdev->cm_id = NULL;
3144 if (!rdma_cm_id)
3145 goto err_ring;
3146 }
3147
3148
3149 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,pkey=ffff,service_id=%016llx\n",
3150 srpt_service_guid, srpt_service_guid, srpt_service_guid);
3151
3152
3153
3154
3155
3156
3157
3158 ret = sdev->cm_id ?
3159 ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0) :
3160 0;
3161 if (ret < 0) {
3162 pr_err("ib_cm_listen() failed: %d (cm_id state = %d)\n", ret,
3163 sdev->cm_id->state);
3164 goto err_cm;
3165 }
3166
3167 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
3168 srpt_event_handler);
3169 ib_register_event_handler(&sdev->event_handler);
3170
3171 for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
3172 sport = &sdev->port[i - 1];
3173 INIT_LIST_HEAD(&sport->nexus_list);
3174 init_waitqueue_head(&sport->ch_releaseQ);
3175 mutex_init(&sport->mutex);
3176 sport->sdev = sdev;
3177 sport->port = i;
3178 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
3179 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
3180 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
3181 sport->port_attrib.use_srq = false;
3182 INIT_WORK(&sport->work, srpt_refresh_port_work);
3183
3184 if (srpt_refresh_port(sport)) {
3185 pr_err("MAD registration failed for %s-%d.\n",
3186 dev_name(&sdev->device->dev), i);
3187 goto err_event;
3188 }
3189 }
3190
3191 spin_lock(&srpt_dev_lock);
3192 list_add_tail(&sdev->list, &srpt_dev_list);
3193 spin_unlock(&srpt_dev_lock);
3194
3195 out:
3196 ib_set_client_data(device, &srpt_client, sdev);
3197 pr_debug("added %s.\n", dev_name(&device->dev));
3198 return;
3199
3200 err_event:
3201 ib_unregister_event_handler(&sdev->event_handler);
3202 err_cm:
3203 if (sdev->cm_id)
3204 ib_destroy_cm_id(sdev->cm_id);
3205 err_ring:
3206 srpt_free_srq(sdev);
3207 ib_dealloc_pd(sdev->pd);
3208 free_dev:
3209 kfree(sdev);
3210 err:
3211 sdev = NULL;
3212 pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
3213 goto out;
3214 }
3215
3216
3217
3218
3219
3220
3221 static void srpt_remove_one(struct ib_device *device, void *client_data)
3222 {
3223 struct srpt_device *sdev = client_data;
3224 int i;
3225
3226 if (!sdev) {
3227 pr_info("%s(%s): nothing to do.\n", __func__,
3228 dev_name(&device->dev));
3229 return;
3230 }
3231
3232 srpt_unregister_mad_agent(sdev);
3233
3234 ib_unregister_event_handler(&sdev->event_handler);
3235
3236
3237 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3238 cancel_work_sync(&sdev->port[i].work);
3239
3240 if (sdev->cm_id)
3241 ib_destroy_cm_id(sdev->cm_id);
3242
3243 ib_set_client_data(device, &srpt_client, NULL);
3244
3245
3246
3247
3248
3249
3250 spin_lock(&srpt_dev_lock);
3251 list_del(&sdev->list);
3252 spin_unlock(&srpt_dev_lock);
3253
3254 for (i = 0; i < sdev->device->phys_port_cnt; i++)
3255 srpt_release_sport(&sdev->port[i]);
3256
3257 srpt_free_srq(sdev);
3258
3259 ib_dealloc_pd(sdev->pd);
3260
3261 kfree(sdev);
3262 }
3263
3264 static struct ib_client srpt_client = {
3265 .name = DRV_NAME,
3266 .add = srpt_add_one,
3267 .remove = srpt_remove_one
3268 };
3269
3270 static int srpt_check_true(struct se_portal_group *se_tpg)
3271 {
3272 return 1;
3273 }
3274
3275 static int srpt_check_false(struct se_portal_group *se_tpg)
3276 {
3277 return 0;
3278 }
3279
3280 static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
3281 {
3282 return tpg->se_tpg_wwn->priv;
3283 }
3284
3285 static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
3286 {
3287 struct srpt_port *sport = srpt_tpg_to_sport(tpg);
3288
3289 WARN_ON_ONCE(tpg != &sport->port_guid_tpg &&
3290 tpg != &sport->port_gid_tpg);
3291 return tpg == &sport->port_guid_tpg ? sport->port_guid :
3292 sport->port_gid;
3293 }
3294
3295 static u16 srpt_get_tag(struct se_portal_group *tpg)
3296 {
3297 return 1;
3298 }
3299
3300 static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
3301 {
3302 return 1;
3303 }
3304
3305 static void srpt_release_cmd(struct se_cmd *se_cmd)
3306 {
3307 struct srpt_send_ioctx *ioctx = container_of(se_cmd,
3308 struct srpt_send_ioctx, cmd);
3309 struct srpt_rdma_ch *ch = ioctx->ch;
3310 struct srpt_recv_ioctx *recv_ioctx = ioctx->recv_ioctx;
3311
3312 WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
3313 !(ioctx->cmd.transport_state & CMD_T_ABORTED));
3314
3315 if (recv_ioctx) {
3316 WARN_ON_ONCE(!list_empty(&recv_ioctx->wait_list));
3317 ioctx->recv_ioctx = NULL;
3318 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
3319 }
3320
3321 if (ioctx->n_rw_ctx) {
3322 srpt_free_rw_ctxs(ch, ioctx);
3323 ioctx->n_rw_ctx = 0;
3324 }
3325
3326 target_free_tag(se_cmd->se_sess, se_cmd);
3327 }
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337 static void srpt_close_session(struct se_session *se_sess)
3338 {
3339 struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
3340
3341 srpt_disconnect_ch_sync(ch);
3342 }
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353 static u32 srpt_sess_get_index(struct se_session *se_sess)
3354 {
3355 return 0;
3356 }
3357
3358 static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
3359 {
3360 }
3361
3362
3363 static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
3364 {
3365 struct srpt_send_ioctx *ioctx;
3366
3367 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3368 return ioctx->state;
3369 }
3370
3371 static int srpt_parse_guid(u64 *guid, const char *name)
3372 {
3373 u16 w[4];
3374 int ret = -EINVAL;
3375
3376 if (sscanf(name, "%hx:%hx:%hx:%hx", &w[0], &w[1], &w[2], &w[3]) != 4)
3377 goto out;
3378 *guid = get_unaligned_be64(w);
3379 ret = 0;
3380 out:
3381 return ret;
3382 }
3383
3384
3385
3386
3387
3388
3389 static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
3390 {
3391 const char *p;
3392 unsigned len, count, leading_zero_bytes;
3393 int ret;
3394
3395 p = name;
3396 if (strncasecmp(p, "0x", 2) == 0)
3397 p += 2;
3398 ret = -EINVAL;
3399 len = strlen(p);
3400 if (len % 2)
3401 goto out;
3402 count = min(len / 2, 16U);
3403 leading_zero_bytes = 16 - count;
3404 memset(i_port_id, 0, leading_zero_bytes);
3405 ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
3406
3407 out:
3408 return ret;
3409 }
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423 static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
3424 {
3425 struct sockaddr_storage sa;
3426 u64 guid;
3427 u8 i_port_id[16];
3428 int ret;
3429
3430 ret = srpt_parse_guid(&guid, name);
3431 if (ret < 0)
3432 ret = srpt_parse_i_port_id(i_port_id, name);
3433 if (ret < 0)
3434 ret = inet_pton_with_scope(&init_net, AF_UNSPEC, name, NULL,
3435 &sa);
3436 if (ret < 0)
3437 pr_err("invalid initiator port ID %s\n", name);
3438 return ret;
3439 }
3440
3441 static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
3442 char *page)
3443 {
3444 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3445 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3446
3447 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
3448 }
3449
3450 static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
3451 const char *page, size_t count)
3452 {
3453 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3454 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3455 unsigned long val;
3456 int ret;
3457
3458 ret = kstrtoul(page, 0, &val);
3459 if (ret < 0) {
3460 pr_err("kstrtoul() failed with ret: %d\n", ret);
3461 return -EINVAL;
3462 }
3463 if (val > MAX_SRPT_RDMA_SIZE) {
3464 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
3465 MAX_SRPT_RDMA_SIZE);
3466 return -EINVAL;
3467 }
3468 if (val < DEFAULT_MAX_RDMA_SIZE) {
3469 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
3470 val, DEFAULT_MAX_RDMA_SIZE);
3471 return -EINVAL;
3472 }
3473 sport->port_attrib.srp_max_rdma_size = val;
3474
3475 return count;
3476 }
3477
3478 static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item,
3479 char *page)
3480 {
3481 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3482 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3483
3484 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
3485 }
3486
3487 static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item,
3488 const char *page, size_t count)
3489 {
3490 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3491 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3492 unsigned long val;
3493 int ret;
3494
3495 ret = kstrtoul(page, 0, &val);
3496 if (ret < 0) {
3497 pr_err("kstrtoul() failed with ret: %d\n", ret);
3498 return -EINVAL;
3499 }
3500 if (val > MAX_SRPT_RSP_SIZE) {
3501 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
3502 MAX_SRPT_RSP_SIZE);
3503 return -EINVAL;
3504 }
3505 if (val < MIN_MAX_RSP_SIZE) {
3506 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
3507 MIN_MAX_RSP_SIZE);
3508 return -EINVAL;
3509 }
3510 sport->port_attrib.srp_max_rsp_size = val;
3511
3512 return count;
3513 }
3514
3515 static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item,
3516 char *page)
3517 {
3518 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3519 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3520
3521 return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
3522 }
3523
3524 static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
3525 const char *page, size_t count)
3526 {
3527 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3528 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3529 unsigned long val;
3530 int ret;
3531
3532 ret = kstrtoul(page, 0, &val);
3533 if (ret < 0) {
3534 pr_err("kstrtoul() failed with ret: %d\n", ret);
3535 return -EINVAL;
3536 }
3537 if (val > MAX_SRPT_SRQ_SIZE) {
3538 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
3539 MAX_SRPT_SRQ_SIZE);
3540 return -EINVAL;
3541 }
3542 if (val < MIN_SRPT_SRQ_SIZE) {
3543 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
3544 MIN_SRPT_SRQ_SIZE);
3545 return -EINVAL;
3546 }
3547 sport->port_attrib.srp_sq_size = val;
3548
3549 return count;
3550 }
3551
3552 static ssize_t srpt_tpg_attrib_use_srq_show(struct config_item *item,
3553 char *page)
3554 {
3555 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3556 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3557
3558 return sprintf(page, "%d\n", sport->port_attrib.use_srq);
3559 }
3560
3561 static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
3562 const char *page, size_t count)
3563 {
3564 struct se_portal_group *se_tpg = attrib_to_tpg(item);
3565 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3566 struct srpt_device *sdev = sport->sdev;
3567 unsigned long val;
3568 bool enabled;
3569 int ret;
3570
3571 ret = kstrtoul(page, 0, &val);
3572 if (ret < 0)
3573 return ret;
3574 if (val != !!val)
3575 return -EINVAL;
3576
3577 ret = mutex_lock_interruptible(&sdev->sdev_mutex);
3578 if (ret < 0)
3579 return ret;
3580 ret = mutex_lock_interruptible(&sport->mutex);
3581 if (ret < 0)
3582 goto unlock_sdev;
3583 enabled = sport->enabled;
3584
3585 srpt_set_enabled(sport, false);
3586 sport->port_attrib.use_srq = val;
3587 srpt_use_srq(sdev, sport->port_attrib.use_srq);
3588 srpt_set_enabled(sport, enabled);
3589 ret = count;
3590 mutex_unlock(&sport->mutex);
3591 unlock_sdev:
3592 mutex_unlock(&sdev->sdev_mutex);
3593
3594 return ret;
3595 }
3596
3597 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
3598 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size);
3599 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size);
3600 CONFIGFS_ATTR(srpt_tpg_attrib_, use_srq);
3601
3602 static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
3603 &srpt_tpg_attrib_attr_srp_max_rdma_size,
3604 &srpt_tpg_attrib_attr_srp_max_rsp_size,
3605 &srpt_tpg_attrib_attr_srp_sq_size,
3606 &srpt_tpg_attrib_attr_use_srq,
3607 NULL,
3608 };
3609
3610 static struct rdma_cm_id *srpt_create_rdma_id(struct sockaddr *listen_addr)
3611 {
3612 struct rdma_cm_id *rdma_cm_id;
3613 int ret;
3614
3615 rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler,
3616 NULL, RDMA_PS_TCP, IB_QPT_RC);
3617 if (IS_ERR(rdma_cm_id)) {
3618 pr_err("RDMA/CM ID creation failed: %ld\n",
3619 PTR_ERR(rdma_cm_id));
3620 goto out;
3621 }
3622
3623 ret = rdma_bind_addr(rdma_cm_id, listen_addr);
3624 if (ret) {
3625 char addr_str[64];
3626
3627 snprintf(addr_str, sizeof(addr_str), "%pISp", listen_addr);
3628 pr_err("Binding RDMA/CM ID to address %s failed: %d\n",
3629 addr_str, ret);
3630 rdma_destroy_id(rdma_cm_id);
3631 rdma_cm_id = ERR_PTR(ret);
3632 goto out;
3633 }
3634
3635 ret = rdma_listen(rdma_cm_id, 128);
3636 if (ret) {
3637 pr_err("rdma_listen() failed: %d\n", ret);
3638 rdma_destroy_id(rdma_cm_id);
3639 rdma_cm_id = ERR_PTR(ret);
3640 }
3641
3642 out:
3643 return rdma_cm_id;
3644 }
3645
3646 static ssize_t srpt_rdma_cm_port_show(struct config_item *item, char *page)
3647 {
3648 return sprintf(page, "%d\n", rdma_cm_port);
3649 }
3650
3651 static ssize_t srpt_rdma_cm_port_store(struct config_item *item,
3652 const char *page, size_t count)
3653 {
3654 struct sockaddr_in addr4 = { .sin_family = AF_INET };
3655 struct sockaddr_in6 addr6 = { .sin6_family = AF_INET6 };
3656 struct rdma_cm_id *new_id = NULL;
3657 u16 val;
3658 int ret;
3659
3660 ret = kstrtou16(page, 0, &val);
3661 if (ret < 0)
3662 return ret;
3663 ret = count;
3664 if (rdma_cm_port == val)
3665 goto out;
3666
3667 if (val) {
3668 addr6.sin6_port = cpu_to_be16(val);
3669 new_id = srpt_create_rdma_id((struct sockaddr *)&addr6);
3670 if (IS_ERR(new_id)) {
3671 addr4.sin_port = cpu_to_be16(val);
3672 new_id = srpt_create_rdma_id((struct sockaddr *)&addr4);
3673 if (IS_ERR(new_id)) {
3674 ret = PTR_ERR(new_id);
3675 goto out;
3676 }
3677 }
3678 }
3679
3680 mutex_lock(&rdma_cm_mutex);
3681 rdma_cm_port = val;
3682 swap(rdma_cm_id, new_id);
3683 mutex_unlock(&rdma_cm_mutex);
3684
3685 if (new_id)
3686 rdma_destroy_id(new_id);
3687 ret = count;
3688 out:
3689 return ret;
3690 }
3691
3692 CONFIGFS_ATTR(srpt_, rdma_cm_port);
3693
3694 static struct configfs_attribute *srpt_da_attrs[] = {
3695 &srpt_attr_rdma_cm_port,
3696 NULL,
3697 };
3698
3699 static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
3700 {
3701 struct se_portal_group *se_tpg = to_tpg(item);
3702 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3703
3704 return snprintf(page, PAGE_SIZE, "%d\n", sport->enabled);
3705 }
3706
3707 static ssize_t srpt_tpg_enable_store(struct config_item *item,
3708 const char *page, size_t count)
3709 {
3710 struct se_portal_group *se_tpg = to_tpg(item);
3711 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
3712 unsigned long tmp;
3713 int ret;
3714
3715 ret = kstrtoul(page, 0, &tmp);
3716 if (ret < 0) {
3717 pr_err("Unable to extract srpt_tpg_store_enable\n");
3718 return -EINVAL;
3719 }
3720
3721 if ((tmp != 0) && (tmp != 1)) {
3722 pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
3723 return -EINVAL;
3724 }
3725
3726 mutex_lock(&sport->mutex);
3727 srpt_set_enabled(sport, tmp);
3728 mutex_unlock(&sport->mutex);
3729
3730 return count;
3731 }
3732
3733 CONFIGFS_ATTR(srpt_tpg_, enable);
3734
3735 static struct configfs_attribute *srpt_tpg_attrs[] = {
3736 &srpt_tpg_attr_enable,
3737 NULL,
3738 };
3739
3740
3741
3742
3743
3744
3745 static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
3746 const char *name)
3747 {
3748 struct srpt_port *sport = wwn->priv;
3749 struct se_portal_group *tpg;
3750 int res;
3751
3752 WARN_ON_ONCE(wwn != &sport->port_guid_wwn &&
3753 wwn != &sport->port_gid_wwn);
3754 tpg = wwn == &sport->port_guid_wwn ? &sport->port_guid_tpg :
3755 &sport->port_gid_tpg;
3756 res = core_tpg_register(wwn, tpg, SCSI_PROTOCOL_SRP);
3757 if (res)
3758 return ERR_PTR(res);
3759
3760 return tpg;
3761 }
3762
3763
3764
3765
3766
3767 static void srpt_drop_tpg(struct se_portal_group *tpg)
3768 {
3769 struct srpt_port *sport = srpt_tpg_to_sport(tpg);
3770
3771 sport->enabled = false;
3772 core_tpg_deregister(tpg);
3773 }
3774
3775
3776
3777
3778
3779
3780
3781 static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
3782 struct config_group *group,
3783 const char *name)
3784 {
3785 return srpt_lookup_wwn(name) ? : ERR_PTR(-EINVAL);
3786 }
3787
3788
3789
3790
3791
3792 static void srpt_drop_tport(struct se_wwn *wwn)
3793 {
3794 }
3795
3796 static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
3797 {
3798 return scnprintf(buf, PAGE_SIZE, "\n");
3799 }
3800
3801 CONFIGFS_ATTR_RO(srpt_wwn_, version);
3802
3803 static struct configfs_attribute *srpt_wwn_attrs[] = {
3804 &srpt_wwn_attr_version,
3805 NULL,
3806 };
3807
3808 static const struct target_core_fabric_ops srpt_template = {
3809 .module = THIS_MODULE,
3810 .fabric_name = "srpt",
3811 .tpg_get_wwn = srpt_get_fabric_wwn,
3812 .tpg_get_tag = srpt_get_tag,
3813 .tpg_check_demo_mode = srpt_check_false,
3814 .tpg_check_demo_mode_cache = srpt_check_true,
3815 .tpg_check_demo_mode_write_protect = srpt_check_true,
3816 .tpg_check_prod_mode_write_protect = srpt_check_false,
3817 .tpg_get_inst_index = srpt_tpg_get_inst_index,
3818 .release_cmd = srpt_release_cmd,
3819 .check_stop_free = srpt_check_stop_free,
3820 .close_session = srpt_close_session,
3821 .sess_get_index = srpt_sess_get_index,
3822 .sess_get_initiator_sid = NULL,
3823 .write_pending = srpt_write_pending,
3824 .set_default_node_attributes = srpt_set_default_node_attrs,
3825 .get_cmd_state = srpt_get_tcm_cmd_state,
3826 .queue_data_in = srpt_queue_data_in,
3827 .queue_status = srpt_queue_status,
3828 .queue_tm_rsp = srpt_queue_tm_rsp,
3829 .aborted_task = srpt_aborted_task,
3830
3831
3832
3833
3834 .fabric_make_wwn = srpt_make_tport,
3835 .fabric_drop_wwn = srpt_drop_tport,
3836 .fabric_make_tpg = srpt_make_tpg,
3837 .fabric_drop_tpg = srpt_drop_tpg,
3838 .fabric_init_nodeacl = srpt_init_nodeacl,
3839
3840 .tfc_discovery_attrs = srpt_da_attrs,
3841 .tfc_wwn_attrs = srpt_wwn_attrs,
3842 .tfc_tpg_base_attrs = srpt_tpg_attrs,
3843 .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs,
3844 };
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854 static int __init srpt_init_module(void)
3855 {
3856 int ret;
3857
3858 ret = -EINVAL;
3859 if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
3860 pr_err("invalid value %d for kernel module parameter srp_max_req_size -- must be at least %d.\n",
3861 srp_max_req_size, MIN_MAX_REQ_SIZE);
3862 goto out;
3863 }
3864
3865 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
3866 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
3867 pr_err("invalid value %d for kernel module parameter srpt_srq_size -- must be in the range [%d..%d].\n",
3868 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
3869 goto out;
3870 }
3871
3872 ret = target_register_template(&srpt_template);
3873 if (ret)
3874 goto out;
3875
3876 ret = ib_register_client(&srpt_client);
3877 if (ret) {
3878 pr_err("couldn't register IB client\n");
3879 goto out_unregister_target;
3880 }
3881
3882 return 0;
3883
3884 out_unregister_target:
3885 target_unregister_template(&srpt_template);
3886 out:
3887 return ret;
3888 }
3889
3890 static void __exit srpt_cleanup_module(void)
3891 {
3892 if (rdma_cm_id)
3893 rdma_destroy_id(rdma_cm_id);
3894 ib_unregister_client(&srpt_client);
3895 target_unregister_template(&srpt_template);
3896 }
3897
3898 module_init(srpt_init_module);
3899 module_exit(srpt_cleanup_module);