1/* 2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved. 3 * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 * 33 */ 34 35#include <linux/module.h> 36#include <linux/init.h> 37#include <linux/slab.h> 38#include <linux/err.h> 39#include <linux/ctype.h> 40#include <linux/kthread.h> 41#include <linux/string.h> 42#include <linux/delay.h> 43#include <linux/atomic.h> 44#include <scsi/scsi_proto.h> 45#include <scsi/scsi_tcq.h> 46#include <target/target_core_base.h> 47#include <target/target_core_fabric.h> 48#include "ib_srpt.h" 49 50/* Name of this kernel module. */ 51#define DRV_NAME "ib_srpt" 52#define DRV_VERSION "2.0.0" 53#define DRV_RELDATE "2011-02-14" 54 55#define SRPT_ID_STRING "Linux SRP target" 56 57#undef pr_fmt 58#define pr_fmt(fmt) DRV_NAME " " fmt 59 60MODULE_AUTHOR("Vu Pham and Bart Van Assche"); 61MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target " 62 "v" DRV_VERSION " (" DRV_RELDATE ")"); 63MODULE_LICENSE("Dual BSD/GPL"); 64 65/* 66 * Global Variables 67 */ 68 69static u64 srpt_service_guid; 70static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */ 71static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */ 72 73static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE; 74module_param(srp_max_req_size, int, 0444); 75MODULE_PARM_DESC(srp_max_req_size, 76 "Maximum size of SRP request messages in bytes."); 77 78static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE; 79module_param(srpt_srq_size, int, 0444); 80MODULE_PARM_DESC(srpt_srq_size, 81 "Shared receive queue (SRQ) size."); 82 83static int srpt_get_u64_x(char *buffer, struct kernel_param *kp) 84{ 85 return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg); 86} 87module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid, 88 0444); 89MODULE_PARM_DESC(srpt_service_guid, 90 "Using this value for ioc_guid, id_ext, and cm_listen_id" 91 " instead of using the node_guid of the first HCA."); 92 93static struct ib_client srpt_client; 94static void srpt_release_channel(struct srpt_rdma_ch *ch); 95static int srpt_queue_status(struct se_cmd *cmd); 96 97/** 98 * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE. 99 */ 100static inline 101enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir) 102{ 103 switch (dir) { 104 case DMA_TO_DEVICE: return DMA_FROM_DEVICE; 105 case DMA_FROM_DEVICE: return DMA_TO_DEVICE; 106 default: return dir; 107 } 108} 109 110/** 111 * srpt_sdev_name() - Return the name associated with the HCA. 112 * 113 * Examples are ib0, ib1, ... 114 */ 115static inline const char *srpt_sdev_name(struct srpt_device *sdev) 116{ 117 return sdev->device->name; 118} 119 120static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch) 121{ 122 unsigned long flags; 123 enum rdma_ch_state state; 124 125 spin_lock_irqsave(&ch->spinlock, flags); 126 state = ch->state; 127 spin_unlock_irqrestore(&ch->spinlock, flags); 128 return state; 129} 130 131static enum rdma_ch_state 132srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state) 133{ 134 unsigned long flags; 135 enum rdma_ch_state prev; 136 137 spin_lock_irqsave(&ch->spinlock, flags); 138 prev = ch->state; 139 ch->state = new_state; 140 spin_unlock_irqrestore(&ch->spinlock, flags); 141 return prev; 142} 143 144/** 145 * srpt_test_and_set_ch_state() - Test and set the channel state. 146 * 147 * Returns true if and only if the channel state has been set to the new state. 148 */ 149static bool 150srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old, 151 enum rdma_ch_state new) 152{ 153 unsigned long flags; 154 enum rdma_ch_state prev; 155 156 spin_lock_irqsave(&ch->spinlock, flags); 157 prev = ch->state; 158 if (prev == old) 159 ch->state = new; 160 spin_unlock_irqrestore(&ch->spinlock, flags); 161 return prev == old; 162} 163 164/** 165 * srpt_event_handler() - Asynchronous IB event callback function. 166 * 167 * Callback function called by the InfiniBand core when an asynchronous IB 168 * event occurs. This callback may occur in interrupt context. See also 169 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand 170 * Architecture Specification. 171 */ 172static void srpt_event_handler(struct ib_event_handler *handler, 173 struct ib_event *event) 174{ 175 struct srpt_device *sdev; 176 struct srpt_port *sport; 177 178 sdev = ib_get_client_data(event->device, &srpt_client); 179 if (!sdev || sdev->device != event->device) 180 return; 181 182 pr_debug("ASYNC event= %d on device= %s\n", event->event, 183 srpt_sdev_name(sdev)); 184 185 switch (event->event) { 186 case IB_EVENT_PORT_ERR: 187 if (event->element.port_num <= sdev->device->phys_port_cnt) { 188 sport = &sdev->port[event->element.port_num - 1]; 189 sport->lid = 0; 190 sport->sm_lid = 0; 191 } 192 break; 193 case IB_EVENT_PORT_ACTIVE: 194 case IB_EVENT_LID_CHANGE: 195 case IB_EVENT_PKEY_CHANGE: 196 case IB_EVENT_SM_CHANGE: 197 case IB_EVENT_CLIENT_REREGISTER: 198 case IB_EVENT_GID_CHANGE: 199 /* Refresh port data asynchronously. */ 200 if (event->element.port_num <= sdev->device->phys_port_cnt) { 201 sport = &sdev->port[event->element.port_num - 1]; 202 if (!sport->lid && !sport->sm_lid) 203 schedule_work(&sport->work); 204 } 205 break; 206 default: 207 pr_err("received unrecognized IB event %d\n", 208 event->event); 209 break; 210 } 211} 212 213/** 214 * srpt_srq_event() - SRQ event callback function. 215 */ 216static void srpt_srq_event(struct ib_event *event, void *ctx) 217{ 218 pr_info("SRQ event %d\n", event->event); 219} 220 221/** 222 * srpt_qp_event() - QP event callback function. 223 */ 224static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch) 225{ 226 pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n", 227 event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch)); 228 229 switch (event->event) { 230 case IB_EVENT_COMM_EST: 231 ib_cm_notify(ch->cm_id, event->event); 232 break; 233 case IB_EVENT_QP_LAST_WQE_REACHED: 234 if (srpt_test_and_set_ch_state(ch, CH_DRAINING, 235 CH_RELEASING)) 236 srpt_release_channel(ch); 237 else 238 pr_debug("%s: state %d - ignored LAST_WQE.\n", 239 ch->sess_name, srpt_get_ch_state(ch)); 240 break; 241 default: 242 pr_err("received unrecognized IB QP event %d\n", event->event); 243 break; 244 } 245} 246 247/** 248 * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure. 249 * 250 * @slot: one-based slot number. 251 * @value: four-bit value. 252 * 253 * Copies the lowest four bits of value in element slot of the array of four 254 * bit elements called c_list (controller list). The index slot is one-based. 255 */ 256static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value) 257{ 258 u16 id; 259 u8 tmp; 260 261 id = (slot - 1) / 2; 262 if (slot & 0x1) { 263 tmp = c_list[id] & 0xf; 264 c_list[id] = (value << 4) | tmp; 265 } else { 266 tmp = c_list[id] & 0xf0; 267 c_list[id] = (value & 0xf) | tmp; 268 } 269} 270 271/** 272 * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram. 273 * 274 * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture 275 * Specification. 276 */ 277static void srpt_get_class_port_info(struct ib_dm_mad *mad) 278{ 279 struct ib_class_port_info *cif; 280 281 cif = (struct ib_class_port_info *)mad->data; 282 memset(cif, 0, sizeof *cif); 283 cif->base_version = 1; 284 cif->class_version = 1; 285 cif->resp_time_value = 20; 286 287 mad->mad_hdr.status = 0; 288} 289 290/** 291 * srpt_get_iou() - Write IOUnitInfo to a management datagram. 292 * 293 * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture 294 * Specification. See also section B.7, table B.6 in the SRP r16a document. 295 */ 296static void srpt_get_iou(struct ib_dm_mad *mad) 297{ 298 struct ib_dm_iou_info *ioui; 299 u8 slot; 300 int i; 301 302 ioui = (struct ib_dm_iou_info *)mad->data; 303 ioui->change_id = cpu_to_be16(1); 304 ioui->max_controllers = 16; 305 306 /* set present for slot 1 and empty for the rest */ 307 srpt_set_ioc(ioui->controller_list, 1, 1); 308 for (i = 1, slot = 2; i < 16; i++, slot++) 309 srpt_set_ioc(ioui->controller_list, slot, 0); 310 311 mad->mad_hdr.status = 0; 312} 313 314/** 315 * srpt_get_ioc() - Write IOControllerprofile to a management datagram. 316 * 317 * See also section 16.3.3.4 IOControllerProfile in the InfiniBand 318 * Architecture Specification. See also section B.7, table B.7 in the SRP 319 * r16a document. 320 */ 321static void srpt_get_ioc(struct srpt_port *sport, u32 slot, 322 struct ib_dm_mad *mad) 323{ 324 struct srpt_device *sdev = sport->sdev; 325 struct ib_dm_ioc_profile *iocp; 326 327 iocp = (struct ib_dm_ioc_profile *)mad->data; 328 329 if (!slot || slot > 16) { 330 mad->mad_hdr.status 331 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); 332 return; 333 } 334 335 if (slot > 2) { 336 mad->mad_hdr.status 337 = cpu_to_be16(DM_MAD_STATUS_NO_IOC); 338 return; 339 } 340 341 memset(iocp, 0, sizeof *iocp); 342 strcpy(iocp->id_string, SRPT_ID_STRING); 343 iocp->guid = cpu_to_be64(srpt_service_guid); 344 iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id); 345 iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id); 346 iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver); 347 iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id); 348 iocp->subsys_device_id = 0x0; 349 iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS); 350 iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS); 351 iocp->protocol = cpu_to_be16(SRP_PROTOCOL); 352 iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION); 353 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size); 354 iocp->rdma_read_depth = 4; 355 iocp->send_size = cpu_to_be32(srp_max_req_size); 356 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size, 357 1U << 24)); 358 iocp->num_svc_entries = 1; 359 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC | 360 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC; 361 362 mad->mad_hdr.status = 0; 363} 364 365/** 366 * srpt_get_svc_entries() - Write ServiceEntries to a management datagram. 367 * 368 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture 369 * Specification. See also section B.7, table B.8 in the SRP r16a document. 370 */ 371static void srpt_get_svc_entries(u64 ioc_guid, 372 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad) 373{ 374 struct ib_dm_svc_entries *svc_entries; 375 376 WARN_ON(!ioc_guid); 377 378 if (!slot || slot > 16) { 379 mad->mad_hdr.status 380 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); 381 return; 382 } 383 384 if (slot > 2 || lo > hi || hi > 1) { 385 mad->mad_hdr.status 386 = cpu_to_be16(DM_MAD_STATUS_NO_IOC); 387 return; 388 } 389 390 svc_entries = (struct ib_dm_svc_entries *)mad->data; 391 memset(svc_entries, 0, sizeof *svc_entries); 392 svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid); 393 snprintf(svc_entries->service_entries[0].name, 394 sizeof(svc_entries->service_entries[0].name), 395 "%s%016llx", 396 SRP_SERVICE_NAME_PREFIX, 397 ioc_guid); 398 399 mad->mad_hdr.status = 0; 400} 401 402/** 403 * srpt_mgmt_method_get() - Process a received management datagram. 404 * @sp: source port through which the MAD has been received. 405 * @rq_mad: received MAD. 406 * @rsp_mad: response MAD. 407 */ 408static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad, 409 struct ib_dm_mad *rsp_mad) 410{ 411 u16 attr_id; 412 u32 slot; 413 u8 hi, lo; 414 415 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id); 416 switch (attr_id) { 417 case DM_ATTR_CLASS_PORT_INFO: 418 srpt_get_class_port_info(rsp_mad); 419 break; 420 case DM_ATTR_IOU_INFO: 421 srpt_get_iou(rsp_mad); 422 break; 423 case DM_ATTR_IOC_PROFILE: 424 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod); 425 srpt_get_ioc(sp, slot, rsp_mad); 426 break; 427 case DM_ATTR_SVC_ENTRIES: 428 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod); 429 hi = (u8) ((slot >> 8) & 0xff); 430 lo = (u8) (slot & 0xff); 431 slot = (u16) ((slot >> 16) & 0xffff); 432 srpt_get_svc_entries(srpt_service_guid, 433 slot, hi, lo, rsp_mad); 434 break; 435 default: 436 rsp_mad->mad_hdr.status = 437 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); 438 break; 439 } 440} 441 442/** 443 * srpt_mad_send_handler() - Post MAD-send callback function. 444 */ 445static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent, 446 struct ib_mad_send_wc *mad_wc) 447{ 448 ib_destroy_ah(mad_wc->send_buf->ah); 449 ib_free_send_mad(mad_wc->send_buf); 450} 451 452/** 453 * srpt_mad_recv_handler() - MAD reception callback function. 454 */ 455static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, 456 struct ib_mad_recv_wc *mad_wc) 457{ 458 struct srpt_port *sport = (struct srpt_port *)mad_agent->context; 459 struct ib_ah *ah; 460 struct ib_mad_send_buf *rsp; 461 struct ib_dm_mad *dm_mad; 462 463 if (!mad_wc || !mad_wc->recv_buf.mad) 464 return; 465 466 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc, 467 mad_wc->recv_buf.grh, mad_agent->port_num); 468 if (IS_ERR(ah)) 469 goto err; 470 471 BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR); 472 473 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp, 474 mad_wc->wc->pkey_index, 0, 475 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA, 476 GFP_KERNEL, 477 IB_MGMT_BASE_VERSION); 478 if (IS_ERR(rsp)) 479 goto err_rsp; 480 481 rsp->ah = ah; 482 483 dm_mad = rsp->mad; 484 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad); 485 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP; 486 dm_mad->mad_hdr.status = 0; 487 488 switch (mad_wc->recv_buf.mad->mad_hdr.method) { 489 case IB_MGMT_METHOD_GET: 490 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad); 491 break; 492 case IB_MGMT_METHOD_SET: 493 dm_mad->mad_hdr.status = 494 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); 495 break; 496 default: 497 dm_mad->mad_hdr.status = 498 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); 499 break; 500 } 501 502 if (!ib_post_send_mad(rsp, NULL)) { 503 ib_free_recv_mad(mad_wc); 504 /* will destroy_ah & free_send_mad in send completion */ 505 return; 506 } 507 508 ib_free_send_mad(rsp); 509 510err_rsp: 511 ib_destroy_ah(ah); 512err: 513 ib_free_recv_mad(mad_wc); 514} 515 516/** 517 * srpt_refresh_port() - Configure a HCA port. 518 * 519 * Enable InfiniBand management datagram processing, update the cached sm_lid, 520 * lid and gid values, and register a callback function for processing MADs 521 * on the specified port. 522 * 523 * Note: It is safe to call this function more than once for the same port. 524 */ 525static int srpt_refresh_port(struct srpt_port *sport) 526{ 527 struct ib_mad_reg_req reg_req; 528 struct ib_port_modify port_modify; 529 struct ib_port_attr port_attr; 530 int ret; 531 532 memset(&port_modify, 0, sizeof port_modify); 533 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP; 534 port_modify.clr_port_cap_mask = 0; 535 536 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify); 537 if (ret) 538 goto err_mod_port; 539 540 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr); 541 if (ret) 542 goto err_query_port; 543 544 sport->sm_lid = port_attr.sm_lid; 545 sport->lid = port_attr.lid; 546 547 ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid, 548 NULL); 549 if (ret) 550 goto err_query_port; 551 552 if (!sport->mad_agent) { 553 memset(®_req, 0, sizeof reg_req); 554 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT; 555 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION; 556 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask); 557 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask); 558 559 sport->mad_agent = ib_register_mad_agent(sport->sdev->device, 560 sport->port, 561 IB_QPT_GSI, 562 ®_req, 0, 563 srpt_mad_send_handler, 564 srpt_mad_recv_handler, 565 sport, 0); 566 if (IS_ERR(sport->mad_agent)) { 567 ret = PTR_ERR(sport->mad_agent); 568 sport->mad_agent = NULL; 569 goto err_query_port; 570 } 571 } 572 573 return 0; 574 575err_query_port: 576 577 port_modify.set_port_cap_mask = 0; 578 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP; 579 ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify); 580 581err_mod_port: 582 583 return ret; 584} 585 586/** 587 * srpt_unregister_mad_agent() - Unregister MAD callback functions. 588 * 589 * Note: It is safe to call this function more than once for the same device. 590 */ 591static void srpt_unregister_mad_agent(struct srpt_device *sdev) 592{ 593 struct ib_port_modify port_modify = { 594 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP, 595 }; 596 struct srpt_port *sport; 597 int i; 598 599 for (i = 1; i <= sdev->device->phys_port_cnt; i++) { 600 sport = &sdev->port[i - 1]; 601 WARN_ON(sport->port != i); 602 if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0) 603 pr_err("disabling MAD processing failed.\n"); 604 if (sport->mad_agent) { 605 ib_unregister_mad_agent(sport->mad_agent); 606 sport->mad_agent = NULL; 607 } 608 } 609} 610 611/** 612 * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure. 613 */ 614static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev, 615 int ioctx_size, int dma_size, 616 enum dma_data_direction dir) 617{ 618 struct srpt_ioctx *ioctx; 619 620 ioctx = kmalloc(ioctx_size, GFP_KERNEL); 621 if (!ioctx) 622 goto err; 623 624 ioctx->buf = kmalloc(dma_size, GFP_KERNEL); 625 if (!ioctx->buf) 626 goto err_free_ioctx; 627 628 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir); 629 if (ib_dma_mapping_error(sdev->device, ioctx->dma)) 630 goto err_free_buf; 631 632 return ioctx; 633 634err_free_buf: 635 kfree(ioctx->buf); 636err_free_ioctx: 637 kfree(ioctx); 638err: 639 return NULL; 640} 641 642/** 643 * srpt_free_ioctx() - Free an SRPT I/O context structure. 644 */ 645static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx, 646 int dma_size, enum dma_data_direction dir) 647{ 648 if (!ioctx) 649 return; 650 651 ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir); 652 kfree(ioctx->buf); 653 kfree(ioctx); 654} 655 656/** 657 * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures. 658 * @sdev: Device to allocate the I/O context ring for. 659 * @ring_size: Number of elements in the I/O context ring. 660 * @ioctx_size: I/O context size. 661 * @dma_size: DMA buffer size. 662 * @dir: DMA data direction. 663 */ 664static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev, 665 int ring_size, int ioctx_size, 666 int dma_size, enum dma_data_direction dir) 667{ 668 struct srpt_ioctx **ring; 669 int i; 670 671 WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) 672 && ioctx_size != sizeof(struct srpt_send_ioctx)); 673 674 ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL); 675 if (!ring) 676 goto out; 677 for (i = 0; i < ring_size; ++i) { 678 ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir); 679 if (!ring[i]) 680 goto err; 681 ring[i]->index = i; 682 } 683 goto out; 684 685err: 686 while (--i >= 0) 687 srpt_free_ioctx(sdev, ring[i], dma_size, dir); 688 kfree(ring); 689 ring = NULL; 690out: 691 return ring; 692} 693 694/** 695 * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures. 696 */ 697static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring, 698 struct srpt_device *sdev, int ring_size, 699 int dma_size, enum dma_data_direction dir) 700{ 701 int i; 702 703 for (i = 0; i < ring_size; ++i) 704 srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir); 705 kfree(ioctx_ring); 706} 707 708/** 709 * srpt_get_cmd_state() - Get the state of a SCSI command. 710 */ 711static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx) 712{ 713 enum srpt_command_state state; 714 unsigned long flags; 715 716 BUG_ON(!ioctx); 717 718 spin_lock_irqsave(&ioctx->spinlock, flags); 719 state = ioctx->state; 720 spin_unlock_irqrestore(&ioctx->spinlock, flags); 721 return state; 722} 723 724/** 725 * srpt_set_cmd_state() - Set the state of a SCSI command. 726 * 727 * Does not modify the state of aborted commands. Returns the previous command 728 * state. 729 */ 730static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx, 731 enum srpt_command_state new) 732{ 733 enum srpt_command_state previous; 734 unsigned long flags; 735 736 BUG_ON(!ioctx); 737 738 spin_lock_irqsave(&ioctx->spinlock, flags); 739 previous = ioctx->state; 740 if (previous != SRPT_STATE_DONE) 741 ioctx->state = new; 742 spin_unlock_irqrestore(&ioctx->spinlock, flags); 743 744 return previous; 745} 746 747/** 748 * srpt_test_and_set_cmd_state() - Test and set the state of a command. 749 * 750 * Returns true if and only if the previous command state was equal to 'old'. 751 */ 752static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx, 753 enum srpt_command_state old, 754 enum srpt_command_state new) 755{ 756 enum srpt_command_state previous; 757 unsigned long flags; 758 759 WARN_ON(!ioctx); 760 WARN_ON(old == SRPT_STATE_DONE); 761 WARN_ON(new == SRPT_STATE_NEW); 762 763 spin_lock_irqsave(&ioctx->spinlock, flags); 764 previous = ioctx->state; 765 if (previous == old) 766 ioctx->state = new; 767 spin_unlock_irqrestore(&ioctx->spinlock, flags); 768 return previous == old; 769} 770 771/** 772 * srpt_post_recv() - Post an IB receive request. 773 */ 774static int srpt_post_recv(struct srpt_device *sdev, 775 struct srpt_recv_ioctx *ioctx) 776{ 777 struct ib_sge list; 778 struct ib_recv_wr wr, *bad_wr; 779 780 BUG_ON(!sdev); 781 wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index); 782 783 list.addr = ioctx->ioctx.dma; 784 list.length = srp_max_req_size; 785 list.lkey = sdev->pd->local_dma_lkey; 786 787 wr.next = NULL; 788 wr.sg_list = &list; 789 wr.num_sge = 1; 790 791 return ib_post_srq_recv(sdev->srq, &wr, &bad_wr); 792} 793 794/** 795 * srpt_post_send() - Post an IB send request. 796 * 797 * Returns zero upon success and a non-zero value upon failure. 798 */ 799static int srpt_post_send(struct srpt_rdma_ch *ch, 800 struct srpt_send_ioctx *ioctx, int len) 801{ 802 struct ib_sge list; 803 struct ib_send_wr wr, *bad_wr; 804 struct srpt_device *sdev = ch->sport->sdev; 805 int ret; 806 807 atomic_inc(&ch->req_lim); 808 809 ret = -ENOMEM; 810 if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) { 811 pr_warn("IB send queue full (needed 1)\n"); 812 goto out; 813 } 814 815 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len, 816 DMA_TO_DEVICE); 817 818 list.addr = ioctx->ioctx.dma; 819 list.length = len; 820 list.lkey = sdev->pd->local_dma_lkey; 821 822 wr.next = NULL; 823 wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index); 824 wr.sg_list = &list; 825 wr.num_sge = 1; 826 wr.opcode = IB_WR_SEND; 827 wr.send_flags = IB_SEND_SIGNALED; 828 829 ret = ib_post_send(ch->qp, &wr, &bad_wr); 830 831out: 832 if (ret < 0) { 833 atomic_inc(&ch->sq_wr_avail); 834 atomic_dec(&ch->req_lim); 835 } 836 return ret; 837} 838 839/** 840 * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request. 841 * @ioctx: Pointer to the I/O context associated with the request. 842 * @srp_cmd: Pointer to the SRP_CMD request data. 843 * @dir: Pointer to the variable to which the transfer direction will be 844 * written. 845 * @data_len: Pointer to the variable to which the total data length of all 846 * descriptors in the SRP_CMD request will be written. 847 * 848 * This function initializes ioctx->nrbuf and ioctx->r_bufs. 849 * 850 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors; 851 * -ENOMEM when memory allocation fails and zero upon success. 852 */ 853static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx, 854 struct srp_cmd *srp_cmd, 855 enum dma_data_direction *dir, u64 *data_len) 856{ 857 struct srp_indirect_buf *idb; 858 struct srp_direct_buf *db; 859 unsigned add_cdb_offset; 860 int ret; 861 862 /* 863 * The pointer computations below will only be compiled correctly 864 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check 865 * whether srp_cmd::add_data has been declared as a byte pointer. 866 */ 867 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) 868 && !__same_type(srp_cmd->add_data[0], (u8)0)); 869 870 BUG_ON(!dir); 871 BUG_ON(!data_len); 872 873 ret = 0; 874 *data_len = 0; 875 876 /* 877 * The lower four bits of the buffer format field contain the DATA-IN 878 * buffer descriptor format, and the highest four bits contain the 879 * DATA-OUT buffer descriptor format. 880 */ 881 *dir = DMA_NONE; 882 if (srp_cmd->buf_fmt & 0xf) 883 /* DATA-IN: transfer data from target to initiator (read). */ 884 *dir = DMA_FROM_DEVICE; 885 else if (srp_cmd->buf_fmt >> 4) 886 /* DATA-OUT: transfer data from initiator to target (write). */ 887 *dir = DMA_TO_DEVICE; 888 889 /* 890 * According to the SRP spec, the lower two bits of the 'ADDITIONAL 891 * CDB LENGTH' field are reserved and the size in bytes of this field 892 * is four times the value specified in bits 3..7. Hence the "& ~3". 893 */ 894 add_cdb_offset = srp_cmd->add_cdb_len & ~3; 895 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) || 896 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) { 897 ioctx->n_rbuf = 1; 898 ioctx->rbufs = &ioctx->single_rbuf; 899 900 db = (struct srp_direct_buf *)(srp_cmd->add_data 901 + add_cdb_offset); 902 memcpy(ioctx->rbufs, db, sizeof *db); 903 *data_len = be32_to_cpu(db->len); 904 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) || 905 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) { 906 idb = (struct srp_indirect_buf *)(srp_cmd->add_data 907 + add_cdb_offset); 908 909 ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db; 910 911 if (ioctx->n_rbuf > 912 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) { 913 pr_err("received unsupported SRP_CMD request" 914 " type (%u out + %u in != %u / %zu)\n", 915 srp_cmd->data_out_desc_cnt, 916 srp_cmd->data_in_desc_cnt, 917 be32_to_cpu(idb->table_desc.len), 918 sizeof(*db)); 919 ioctx->n_rbuf = 0; 920 ret = -EINVAL; 921 goto out; 922 } 923 924 if (ioctx->n_rbuf == 1) 925 ioctx->rbufs = &ioctx->single_rbuf; 926 else { 927 ioctx->rbufs = 928 kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC); 929 if (!ioctx->rbufs) { 930 ioctx->n_rbuf = 0; 931 ret = -ENOMEM; 932 goto out; 933 } 934 } 935 936 db = idb->desc_list; 937 memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db); 938 *data_len = be32_to_cpu(idb->len); 939 } 940out: 941 return ret; 942} 943 944/** 945 * srpt_init_ch_qp() - Initialize queue pair attributes. 946 * 947 * Initialized the attributes of queue pair 'qp' by allowing local write, 948 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT. 949 */ 950static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) 951{ 952 struct ib_qp_attr *attr; 953 int ret; 954 955 attr = kzalloc(sizeof *attr, GFP_KERNEL); 956 if (!attr) 957 return -ENOMEM; 958 959 attr->qp_state = IB_QPS_INIT; 960 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | 961 IB_ACCESS_REMOTE_WRITE; 962 attr->port_num = ch->sport->port; 963 attr->pkey_index = 0; 964 965 ret = ib_modify_qp(qp, attr, 966 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT | 967 IB_QP_PKEY_INDEX); 968 969 kfree(attr); 970 return ret; 971} 972 973/** 974 * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR). 975 * @ch: channel of the queue pair. 976 * @qp: queue pair to change the state of. 977 * 978 * Returns zero upon success and a negative value upon failure. 979 * 980 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system. 981 * If this structure ever becomes larger, it might be necessary to allocate 982 * it dynamically instead of on the stack. 983 */ 984static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp) 985{ 986 struct ib_qp_attr qp_attr; 987 int attr_mask; 988 int ret; 989 990 qp_attr.qp_state = IB_QPS_RTR; 991 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); 992 if (ret) 993 goto out; 994 995 qp_attr.max_dest_rd_atomic = 4; 996 997 ret = ib_modify_qp(qp, &qp_attr, attr_mask); 998 999out: 1000 return ret; 1001} 1002 1003/** 1004 * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS). 1005 * @ch: channel of the queue pair. 1006 * @qp: queue pair to change the state of. 1007 * 1008 * Returns zero upon success and a negative value upon failure. 1009 * 1010 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system. 1011 * If this structure ever becomes larger, it might be necessary to allocate 1012 * it dynamically instead of on the stack. 1013 */ 1014static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp) 1015{ 1016 struct ib_qp_attr qp_attr; 1017 int attr_mask; 1018 int ret; 1019 1020 qp_attr.qp_state = IB_QPS_RTS; 1021 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); 1022 if (ret) 1023 goto out; 1024 1025 qp_attr.max_rd_atomic = 4; 1026 1027 ret = ib_modify_qp(qp, &qp_attr, attr_mask); 1028 1029out: 1030 return ret; 1031} 1032 1033/** 1034 * srpt_ch_qp_err() - Set the channel queue pair state to 'error'. 1035 */ 1036static int srpt_ch_qp_err(struct srpt_rdma_ch *ch) 1037{ 1038 struct ib_qp_attr qp_attr; 1039 1040 qp_attr.qp_state = IB_QPS_ERR; 1041 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE); 1042} 1043 1044/** 1045 * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list. 1046 */ 1047static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch, 1048 struct srpt_send_ioctx *ioctx) 1049{ 1050 struct scatterlist *sg; 1051 enum dma_data_direction dir; 1052 1053 BUG_ON(!ch); 1054 BUG_ON(!ioctx); 1055 BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius); 1056 1057 while (ioctx->n_rdma) 1058 kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge); 1059 1060 kfree(ioctx->rdma_ius); 1061 ioctx->rdma_ius = NULL; 1062 1063 if (ioctx->mapped_sg_count) { 1064 sg = ioctx->sg; 1065 WARN_ON(!sg); 1066 dir = ioctx->cmd.data_direction; 1067 BUG_ON(dir == DMA_NONE); 1068 ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt, 1069 opposite_dma_dir(dir)); 1070 ioctx->mapped_sg_count = 0; 1071 } 1072} 1073 1074/** 1075 * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list. 1076 */ 1077static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, 1078 struct srpt_send_ioctx *ioctx) 1079{ 1080 struct ib_device *dev = ch->sport->sdev->device; 1081 struct se_cmd *cmd; 1082 struct scatterlist *sg, *sg_orig; 1083 int sg_cnt; 1084 enum dma_data_direction dir; 1085 struct rdma_iu *riu; 1086 struct srp_direct_buf *db; 1087 dma_addr_t dma_addr; 1088 struct ib_sge *sge; 1089 u64 raddr; 1090 u32 rsize; 1091 u32 tsize; 1092 u32 dma_len; 1093 int count, nrdma; 1094 int i, j, k; 1095 1096 BUG_ON(!ch); 1097 BUG_ON(!ioctx); 1098 cmd = &ioctx->cmd; 1099 dir = cmd->data_direction; 1100 BUG_ON(dir == DMA_NONE); 1101 1102 ioctx->sg = sg = sg_orig = cmd->t_data_sg; 1103 ioctx->sg_cnt = sg_cnt = cmd->t_data_nents; 1104 1105 count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt, 1106 opposite_dma_dir(dir)); 1107 if (unlikely(!count)) 1108 return -EAGAIN; 1109 1110 ioctx->mapped_sg_count = count; 1111 1112 if (ioctx->rdma_ius && ioctx->n_rdma_ius) 1113 nrdma = ioctx->n_rdma_ius; 1114 else { 1115 nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE 1116 + ioctx->n_rbuf; 1117 1118 ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL); 1119 if (!ioctx->rdma_ius) 1120 goto free_mem; 1121 1122 ioctx->n_rdma_ius = nrdma; 1123 } 1124 1125 db = ioctx->rbufs; 1126 tsize = cmd->data_length; 1127 dma_len = ib_sg_dma_len(dev, &sg[0]); 1128 riu = ioctx->rdma_ius; 1129 1130 /* 1131 * For each remote desc - calculate the #ib_sge. 1132 * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then 1133 * each remote desc rdma_iu is required a rdma wr; 1134 * else 1135 * we need to allocate extra rdma_iu to carry extra #ib_sge in 1136 * another rdma wr 1137 */ 1138 for (i = 0, j = 0; 1139 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) { 1140 rsize = be32_to_cpu(db->len); 1141 raddr = be64_to_cpu(db->va); 1142 riu->raddr = raddr; 1143 riu->rkey = be32_to_cpu(db->key); 1144 riu->sge_cnt = 0; 1145 1146 /* calculate how many sge required for this remote_buf */ 1147 while (rsize > 0 && tsize > 0) { 1148 1149 if (rsize >= dma_len) { 1150 tsize -= dma_len; 1151 rsize -= dma_len; 1152 raddr += dma_len; 1153 1154 if (tsize > 0) { 1155 ++j; 1156 if (j < count) { 1157 sg = sg_next(sg); 1158 dma_len = ib_sg_dma_len( 1159 dev, sg); 1160 } 1161 } 1162 } else { 1163 tsize -= rsize; 1164 dma_len -= rsize; 1165 rsize = 0; 1166 } 1167 1168 ++riu->sge_cnt; 1169 1170 if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) { 1171 ++ioctx->n_rdma; 1172 riu->sge = 1173 kmalloc(riu->sge_cnt * sizeof *riu->sge, 1174 GFP_KERNEL); 1175 if (!riu->sge) 1176 goto free_mem; 1177 1178 ++riu; 1179 riu->sge_cnt = 0; 1180 riu->raddr = raddr; 1181 riu->rkey = be32_to_cpu(db->key); 1182 } 1183 } 1184 1185 ++ioctx->n_rdma; 1186 riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge, 1187 GFP_KERNEL); 1188 if (!riu->sge) 1189 goto free_mem; 1190 } 1191 1192 db = ioctx->rbufs; 1193 tsize = cmd->data_length; 1194 riu = ioctx->rdma_ius; 1195 sg = sg_orig; 1196 dma_len = ib_sg_dma_len(dev, &sg[0]); 1197 dma_addr = ib_sg_dma_address(dev, &sg[0]); 1198 1199 /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */ 1200 for (i = 0, j = 0; 1201 j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) { 1202 rsize = be32_to_cpu(db->len); 1203 sge = riu->sge; 1204 k = 0; 1205 1206 while (rsize > 0 && tsize > 0) { 1207 sge->addr = dma_addr; 1208 sge->lkey = ch->sport->sdev->pd->local_dma_lkey; 1209 1210 if (rsize >= dma_len) { 1211 sge->length = 1212 (tsize < dma_len) ? tsize : dma_len; 1213 tsize -= dma_len; 1214 rsize -= dma_len; 1215 1216 if (tsize > 0) { 1217 ++j; 1218 if (j < count) { 1219 sg = sg_next(sg); 1220 dma_len = ib_sg_dma_len( 1221 dev, sg); 1222 dma_addr = ib_sg_dma_address( 1223 dev, sg); 1224 } 1225 } 1226 } else { 1227 sge->length = (tsize < rsize) ? tsize : rsize; 1228 tsize -= rsize; 1229 dma_len -= rsize; 1230 dma_addr += rsize; 1231 rsize = 0; 1232 } 1233 1234 ++k; 1235 if (k == riu->sge_cnt && rsize > 0 && tsize > 0) { 1236 ++riu; 1237 sge = riu->sge; 1238 k = 0; 1239 } else if (rsize > 0 && tsize > 0) 1240 ++sge; 1241 } 1242 } 1243 1244 return 0; 1245 1246free_mem: 1247 srpt_unmap_sg_to_ib_sge(ch, ioctx); 1248 1249 return -ENOMEM; 1250} 1251 1252/** 1253 * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator. 1254 */ 1255static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) 1256{ 1257 struct srpt_send_ioctx *ioctx; 1258 unsigned long flags; 1259 1260 BUG_ON(!ch); 1261 1262 ioctx = NULL; 1263 spin_lock_irqsave(&ch->spinlock, flags); 1264 if (!list_empty(&ch->free_list)) { 1265 ioctx = list_first_entry(&ch->free_list, 1266 struct srpt_send_ioctx, free_list); 1267 list_del(&ioctx->free_list); 1268 } 1269 spin_unlock_irqrestore(&ch->spinlock, flags); 1270 1271 if (!ioctx) 1272 return ioctx; 1273 1274 BUG_ON(ioctx->ch != ch); 1275 spin_lock_init(&ioctx->spinlock); 1276 ioctx->state = SRPT_STATE_NEW; 1277 ioctx->n_rbuf = 0; 1278 ioctx->rbufs = NULL; 1279 ioctx->n_rdma = 0; 1280 ioctx->n_rdma_ius = 0; 1281 ioctx->rdma_ius = NULL; 1282 ioctx->mapped_sg_count = 0; 1283 init_completion(&ioctx->tx_done); 1284 ioctx->queue_status_only = false; 1285 /* 1286 * transport_init_se_cmd() does not initialize all fields, so do it 1287 * here. 1288 */ 1289 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd)); 1290 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data)); 1291 1292 return ioctx; 1293} 1294 1295/** 1296 * srpt_abort_cmd() - Abort a SCSI command. 1297 * @ioctx: I/O context associated with the SCSI command. 1298 * @context: Preferred execution context. 1299 */ 1300static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) 1301{ 1302 enum srpt_command_state state; 1303 unsigned long flags; 1304 1305 BUG_ON(!ioctx); 1306 1307 /* 1308 * If the command is in a state where the target core is waiting for 1309 * the ib_srpt driver, change the state to the next state. Changing 1310 * the state of the command from SRPT_STATE_NEED_DATA to 1311 * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this 1312 * function a second time. 1313 */ 1314 1315 spin_lock_irqsave(&ioctx->spinlock, flags); 1316 state = ioctx->state; 1317 switch (state) { 1318 case SRPT_STATE_NEED_DATA: 1319 ioctx->state = SRPT_STATE_DATA_IN; 1320 break; 1321 case SRPT_STATE_DATA_IN: 1322 case SRPT_STATE_CMD_RSP_SENT: 1323 case SRPT_STATE_MGMT_RSP_SENT: 1324 ioctx->state = SRPT_STATE_DONE; 1325 break; 1326 default: 1327 break; 1328 } 1329 spin_unlock_irqrestore(&ioctx->spinlock, flags); 1330 1331 if (state == SRPT_STATE_DONE) { 1332 struct srpt_rdma_ch *ch = ioctx->ch; 1333 1334 BUG_ON(ch->sess == NULL); 1335 1336 target_put_sess_cmd(&ioctx->cmd); 1337 goto out; 1338 } 1339 1340 pr_debug("Aborting cmd with state %d and tag %lld\n", state, 1341 ioctx->cmd.tag); 1342 1343 switch (state) { 1344 case SRPT_STATE_NEW: 1345 case SRPT_STATE_DATA_IN: 1346 case SRPT_STATE_MGMT: 1347 /* 1348 * Do nothing - defer abort processing until 1349 * srpt_queue_response() is invoked. 1350 */ 1351 WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false)); 1352 break; 1353 case SRPT_STATE_NEED_DATA: 1354 /* DMA_TO_DEVICE (write) - RDMA read error. */ 1355 1356 /* XXX(hch): this is a horrible layering violation.. */ 1357 spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); 1358 ioctx->cmd.transport_state &= ~CMD_T_ACTIVE; 1359 spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); 1360 break; 1361 case SRPT_STATE_CMD_RSP_SENT: 1362 /* 1363 * SRP_RSP sending failed or the SRP_RSP send completion has 1364 * not been received in time. 1365 */ 1366 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); 1367 target_put_sess_cmd(&ioctx->cmd); 1368 break; 1369 case SRPT_STATE_MGMT_RSP_SENT: 1370 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); 1371 target_put_sess_cmd(&ioctx->cmd); 1372 break; 1373 default: 1374 WARN(1, "Unexpected command state (%d)", state); 1375 break; 1376 } 1377 1378out: 1379 return state; 1380} 1381 1382/** 1383 * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion. 1384 */ 1385static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id) 1386{ 1387 struct srpt_send_ioctx *ioctx; 1388 enum srpt_command_state state; 1389 u32 index; 1390 1391 atomic_inc(&ch->sq_wr_avail); 1392 1393 index = idx_from_wr_id(wr_id); 1394 ioctx = ch->ioctx_ring[index]; 1395 state = srpt_get_cmd_state(ioctx); 1396 1397 WARN_ON(state != SRPT_STATE_CMD_RSP_SENT 1398 && state != SRPT_STATE_MGMT_RSP_SENT 1399 && state != SRPT_STATE_NEED_DATA 1400 && state != SRPT_STATE_DONE); 1401 1402 /* If SRP_RSP sending failed, undo the ch->req_lim change. */ 1403 if (state == SRPT_STATE_CMD_RSP_SENT 1404 || state == SRPT_STATE_MGMT_RSP_SENT) 1405 atomic_dec(&ch->req_lim); 1406 1407 srpt_abort_cmd(ioctx); 1408} 1409 1410/** 1411 * srpt_handle_send_comp() - Process an IB send completion notification. 1412 */ 1413static void srpt_handle_send_comp(struct srpt_rdma_ch *ch, 1414 struct srpt_send_ioctx *ioctx) 1415{ 1416 enum srpt_command_state state; 1417 1418 atomic_inc(&ch->sq_wr_avail); 1419 1420 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); 1421 1422 if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT 1423 && state != SRPT_STATE_MGMT_RSP_SENT 1424 && state != SRPT_STATE_DONE)) 1425 pr_debug("state = %d\n", state); 1426 1427 if (state != SRPT_STATE_DONE) { 1428 srpt_unmap_sg_to_ib_sge(ch, ioctx); 1429 transport_generic_free_cmd(&ioctx->cmd, 0); 1430 } else { 1431 pr_err("IB completion has been received too late for" 1432 " wr_id = %u.\n", ioctx->ioctx.index); 1433 } 1434} 1435 1436/** 1437 * srpt_handle_rdma_comp() - Process an IB RDMA completion notification. 1438 * 1439 * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping 1440 * the data that has been transferred via IB RDMA had to be postponed until the 1441 * check_stop_free() callback. None of this is necessary anymore and needs to 1442 * be cleaned up. 1443 */ 1444static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, 1445 struct srpt_send_ioctx *ioctx, 1446 enum srpt_opcode opcode) 1447{ 1448 WARN_ON(ioctx->n_rdma <= 0); 1449 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail); 1450 1451 if (opcode == SRPT_RDMA_READ_LAST) { 1452 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA, 1453 SRPT_STATE_DATA_IN)) 1454 target_execute_cmd(&ioctx->cmd); 1455 else 1456 pr_err("%s[%d]: wrong state = %d\n", __func__, 1457 __LINE__, srpt_get_cmd_state(ioctx)); 1458 } else if (opcode == SRPT_RDMA_ABORT) { 1459 ioctx->rdma_aborted = true; 1460 } else { 1461 WARN(true, "unexpected opcode %d\n", opcode); 1462 } 1463} 1464 1465/** 1466 * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion. 1467 */ 1468static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch, 1469 struct srpt_send_ioctx *ioctx, 1470 enum srpt_opcode opcode) 1471{ 1472 enum srpt_command_state state; 1473 1474 state = srpt_get_cmd_state(ioctx); 1475 switch (opcode) { 1476 case SRPT_RDMA_READ_LAST: 1477 if (ioctx->n_rdma <= 0) { 1478 pr_err("Received invalid RDMA read" 1479 " error completion with idx %d\n", 1480 ioctx->ioctx.index); 1481 break; 1482 } 1483 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail); 1484 if (state == SRPT_STATE_NEED_DATA) 1485 srpt_abort_cmd(ioctx); 1486 else 1487 pr_err("%s[%d]: wrong state = %d\n", 1488 __func__, __LINE__, state); 1489 break; 1490 case SRPT_RDMA_WRITE_LAST: 1491 break; 1492 default: 1493 pr_err("%s[%d]: opcode = %u\n", __func__, __LINE__, opcode); 1494 break; 1495 } 1496} 1497 1498/** 1499 * srpt_build_cmd_rsp() - Build an SRP_RSP response. 1500 * @ch: RDMA channel through which the request has been received. 1501 * @ioctx: I/O context associated with the SRP_CMD request. The response will 1502 * be built in the buffer ioctx->buf points at and hence this function will 1503 * overwrite the request data. 1504 * @tag: tag of the request for which this response is being generated. 1505 * @status: value for the STATUS field of the SRP_RSP information unit. 1506 * 1507 * Returns the size in bytes of the SRP_RSP response. 1508 * 1509 * An SRP_RSP response contains a SCSI status or service response. See also 1510 * section 6.9 in the SRP r16a document for the format of an SRP_RSP 1511 * response. See also SPC-2 for more information about sense data. 1512 */ 1513static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, 1514 struct srpt_send_ioctx *ioctx, u64 tag, 1515 int status) 1516{ 1517 struct srp_rsp *srp_rsp; 1518 const u8 *sense_data; 1519 int sense_data_len, max_sense_len; 1520 1521 /* 1522 * The lowest bit of all SAM-3 status codes is zero (see also 1523 * paragraph 5.3 in SAM-3). 1524 */ 1525 WARN_ON(status & 1); 1526 1527 srp_rsp = ioctx->ioctx.buf; 1528 BUG_ON(!srp_rsp); 1529 1530 sense_data = ioctx->sense_data; 1531 sense_data_len = ioctx->cmd.scsi_sense_length; 1532 WARN_ON(sense_data_len > sizeof(ioctx->sense_data)); 1533 1534 memset(srp_rsp, 0, sizeof *srp_rsp); 1535 srp_rsp->opcode = SRP_RSP; 1536 srp_rsp->req_lim_delta = 1537 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); 1538 srp_rsp->tag = tag; 1539 srp_rsp->status = status; 1540 1541 if (sense_data_len) { 1542 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp)); 1543 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp); 1544 if (sense_data_len > max_sense_len) { 1545 pr_warn("truncated sense data from %d to %d" 1546 " bytes\n", sense_data_len, max_sense_len); 1547 sense_data_len = max_sense_len; 1548 } 1549 1550 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID; 1551 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len); 1552 memcpy(srp_rsp + 1, sense_data, sense_data_len); 1553 } 1554 1555 return sizeof(*srp_rsp) + sense_data_len; 1556} 1557 1558/** 1559 * srpt_build_tskmgmt_rsp() - Build a task management response. 1560 * @ch: RDMA channel through which the request has been received. 1561 * @ioctx: I/O context in which the SRP_RSP response will be built. 1562 * @rsp_code: RSP_CODE that will be stored in the response. 1563 * @tag: Tag of the request for which this response is being generated. 1564 * 1565 * Returns the size in bytes of the SRP_RSP response. 1566 * 1567 * An SRP_RSP response contains a SCSI status or service response. See also 1568 * section 6.9 in the SRP r16a document for the format of an SRP_RSP 1569 * response. 1570 */ 1571static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch, 1572 struct srpt_send_ioctx *ioctx, 1573 u8 rsp_code, u64 tag) 1574{ 1575 struct srp_rsp *srp_rsp; 1576 int resp_data_len; 1577 int resp_len; 1578 1579 resp_data_len = 4; 1580 resp_len = sizeof(*srp_rsp) + resp_data_len; 1581 1582 srp_rsp = ioctx->ioctx.buf; 1583 BUG_ON(!srp_rsp); 1584 memset(srp_rsp, 0, sizeof *srp_rsp); 1585 1586 srp_rsp->opcode = SRP_RSP; 1587 srp_rsp->req_lim_delta = 1588 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); 1589 srp_rsp->tag = tag; 1590 1591 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; 1592 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); 1593 srp_rsp->data[3] = rsp_code; 1594 1595 return resp_len; 1596} 1597 1598#define NO_SUCH_LUN ((uint64_t)-1LL) 1599 1600/* 1601 * SCSI LUN addressing method. See also SAM-2 and the section about 1602 * eight byte LUNs. 1603 */ 1604enum scsi_lun_addr_method { 1605 SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0, 1606 SCSI_LUN_ADDR_METHOD_FLAT = 1, 1607 SCSI_LUN_ADDR_METHOD_LUN = 2, 1608 SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3, 1609}; 1610 1611/* 1612 * srpt_unpack_lun() - Convert from network LUN to linear LUN. 1613 * 1614 * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte 1615 * order (big endian) to a linear LUN. Supports three LUN addressing methods: 1616 * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40). 1617 */ 1618static uint64_t srpt_unpack_lun(const uint8_t *lun, int len) 1619{ 1620 uint64_t res = NO_SUCH_LUN; 1621 int addressing_method; 1622 1623 if (unlikely(len < 2)) { 1624 pr_err("Illegal LUN length %d, expected 2 bytes or more\n", 1625 len); 1626 goto out; 1627 } 1628 1629 switch (len) { 1630 case 8: 1631 if ((*((__be64 *)lun) & 1632 cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0) 1633 goto out_err; 1634 break; 1635 case 4: 1636 if (*((__be16 *)&lun[2]) != 0) 1637 goto out_err; 1638 break; 1639 case 6: 1640 if (*((__be32 *)&lun[2]) != 0) 1641 goto out_err; 1642 break; 1643 case 2: 1644 break; 1645 default: 1646 goto out_err; 1647 } 1648 1649 addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */ 1650 switch (addressing_method) { 1651 case SCSI_LUN_ADDR_METHOD_PERIPHERAL: 1652 case SCSI_LUN_ADDR_METHOD_FLAT: 1653 case SCSI_LUN_ADDR_METHOD_LUN: 1654 res = *(lun + 1) | (((*lun) & 0x3f) << 8); 1655 break; 1656 1657 case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN: 1658 default: 1659 pr_err("Unimplemented LUN addressing method %u\n", 1660 addressing_method); 1661 break; 1662 } 1663 1664out: 1665 return res; 1666 1667out_err: 1668 pr_err("Support for multi-level LUNs has not yet been implemented\n"); 1669 goto out; 1670} 1671 1672static int srpt_check_stop_free(struct se_cmd *cmd) 1673{ 1674 struct srpt_send_ioctx *ioctx = container_of(cmd, 1675 struct srpt_send_ioctx, cmd); 1676 1677 return target_put_sess_cmd(&ioctx->cmd); 1678} 1679 1680/** 1681 * srpt_handle_cmd() - Process SRP_CMD. 1682 */ 1683static int srpt_handle_cmd(struct srpt_rdma_ch *ch, 1684 struct srpt_recv_ioctx *recv_ioctx, 1685 struct srpt_send_ioctx *send_ioctx) 1686{ 1687 struct se_cmd *cmd; 1688 struct srp_cmd *srp_cmd; 1689 uint64_t unpacked_lun; 1690 u64 data_len; 1691 enum dma_data_direction dir; 1692 sense_reason_t ret; 1693 int rc; 1694 1695 BUG_ON(!send_ioctx); 1696 1697 srp_cmd = recv_ioctx->ioctx.buf; 1698 cmd = &send_ioctx->cmd; 1699 cmd->tag = srp_cmd->tag; 1700 1701 switch (srp_cmd->task_attr) { 1702 case SRP_CMD_SIMPLE_Q: 1703 cmd->sam_task_attr = TCM_SIMPLE_TAG; 1704 break; 1705 case SRP_CMD_ORDERED_Q: 1706 default: 1707 cmd->sam_task_attr = TCM_ORDERED_TAG; 1708 break; 1709 case SRP_CMD_HEAD_OF_Q: 1710 cmd->sam_task_attr = TCM_HEAD_TAG; 1711 break; 1712 case SRP_CMD_ACA: 1713 cmd->sam_task_attr = TCM_ACA_TAG; 1714 break; 1715 } 1716 1717 if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) { 1718 pr_err("0x%llx: parsing SRP descriptor table failed.\n", 1719 srp_cmd->tag); 1720 ret = TCM_INVALID_CDB_FIELD; 1721 goto send_sense; 1722 } 1723 1724 unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun, 1725 sizeof(srp_cmd->lun)); 1726 rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb, 1727 &send_ioctx->sense_data[0], unpacked_lun, data_len, 1728 TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF); 1729 if (rc != 0) { 1730 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1731 goto send_sense; 1732 } 1733 return 0; 1734 1735send_sense: 1736 transport_send_check_condition_and_sense(cmd, ret, 0); 1737 return -1; 1738} 1739 1740static int srp_tmr_to_tcm(int fn) 1741{ 1742 switch (fn) { 1743 case SRP_TSK_ABORT_TASK: 1744 return TMR_ABORT_TASK; 1745 case SRP_TSK_ABORT_TASK_SET: 1746 return TMR_ABORT_TASK_SET; 1747 case SRP_TSK_CLEAR_TASK_SET: 1748 return TMR_CLEAR_TASK_SET; 1749 case SRP_TSK_LUN_RESET: 1750 return TMR_LUN_RESET; 1751 case SRP_TSK_CLEAR_ACA: 1752 return TMR_CLEAR_ACA; 1753 default: 1754 return -1; 1755 } 1756} 1757 1758/** 1759 * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit. 1760 * 1761 * Returns 0 if and only if the request will be processed by the target core. 1762 * 1763 * For more information about SRP_TSK_MGMT information units, see also section 1764 * 6.7 in the SRP r16a document. 1765 */ 1766static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch, 1767 struct srpt_recv_ioctx *recv_ioctx, 1768 struct srpt_send_ioctx *send_ioctx) 1769{ 1770 struct srp_tsk_mgmt *srp_tsk; 1771 struct se_cmd *cmd; 1772 struct se_session *sess = ch->sess; 1773 uint64_t unpacked_lun; 1774 int tcm_tmr; 1775 int rc; 1776 1777 BUG_ON(!send_ioctx); 1778 1779 srp_tsk = recv_ioctx->ioctx.buf; 1780 cmd = &send_ioctx->cmd; 1781 1782 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld" 1783 " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func, 1784 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess); 1785 1786 srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT); 1787 send_ioctx->cmd.tag = srp_tsk->tag; 1788 tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func); 1789 unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun, 1790 sizeof(srp_tsk->lun)); 1791 rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun, 1792 srp_tsk, tcm_tmr, GFP_KERNEL, srp_tsk->task_tag, 1793 TARGET_SCF_ACK_KREF); 1794 if (rc != 0) { 1795 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED; 1796 goto fail; 1797 } 1798 return; 1799fail: 1800 transport_send_check_condition_and_sense(cmd, 0, 0); // XXX: 1801} 1802 1803/** 1804 * srpt_handle_new_iu() - Process a newly received information unit. 1805 * @ch: RDMA channel through which the information unit has been received. 1806 * @ioctx: SRPT I/O context associated with the information unit. 1807 */ 1808static void srpt_handle_new_iu(struct srpt_rdma_ch *ch, 1809 struct srpt_recv_ioctx *recv_ioctx, 1810 struct srpt_send_ioctx *send_ioctx) 1811{ 1812 struct srp_cmd *srp_cmd; 1813 enum rdma_ch_state ch_state; 1814 1815 BUG_ON(!ch); 1816 BUG_ON(!recv_ioctx); 1817 1818 ib_dma_sync_single_for_cpu(ch->sport->sdev->device, 1819 recv_ioctx->ioctx.dma, srp_max_req_size, 1820 DMA_FROM_DEVICE); 1821 1822 ch_state = srpt_get_ch_state(ch); 1823 if (unlikely(ch_state == CH_CONNECTING)) { 1824 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list); 1825 goto out; 1826 } 1827 1828 if (unlikely(ch_state != CH_LIVE)) 1829 goto out; 1830 1831 srp_cmd = recv_ioctx->ioctx.buf; 1832 if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) { 1833 if (!send_ioctx) 1834 send_ioctx = srpt_get_send_ioctx(ch); 1835 if (unlikely(!send_ioctx)) { 1836 list_add_tail(&recv_ioctx->wait_list, 1837 &ch->cmd_wait_list); 1838 goto out; 1839 } 1840 } 1841 1842 switch (srp_cmd->opcode) { 1843 case SRP_CMD: 1844 srpt_handle_cmd(ch, recv_ioctx, send_ioctx); 1845 break; 1846 case SRP_TSK_MGMT: 1847 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx); 1848 break; 1849 case SRP_I_LOGOUT: 1850 pr_err("Not yet implemented: SRP_I_LOGOUT\n"); 1851 break; 1852 case SRP_CRED_RSP: 1853 pr_debug("received SRP_CRED_RSP\n"); 1854 break; 1855 case SRP_AER_RSP: 1856 pr_debug("received SRP_AER_RSP\n"); 1857 break; 1858 case SRP_RSP: 1859 pr_err("Received SRP_RSP\n"); 1860 break; 1861 default: 1862 pr_err("received IU with unknown opcode 0x%x\n", 1863 srp_cmd->opcode); 1864 break; 1865 } 1866 1867 srpt_post_recv(ch->sport->sdev, recv_ioctx); 1868out: 1869 return; 1870} 1871 1872static void srpt_process_rcv_completion(struct ib_cq *cq, 1873 struct srpt_rdma_ch *ch, 1874 struct ib_wc *wc) 1875{ 1876 struct srpt_device *sdev = ch->sport->sdev; 1877 struct srpt_recv_ioctx *ioctx; 1878 u32 index; 1879 1880 index = idx_from_wr_id(wc->wr_id); 1881 if (wc->status == IB_WC_SUCCESS) { 1882 int req_lim; 1883 1884 req_lim = atomic_dec_return(&ch->req_lim); 1885 if (unlikely(req_lim < 0)) 1886 pr_err("req_lim = %d < 0\n", req_lim); 1887 ioctx = sdev->ioctx_ring[index]; 1888 srpt_handle_new_iu(ch, ioctx, NULL); 1889 } else { 1890 pr_info("receiving failed for idx %u with status %d\n", 1891 index, wc->status); 1892 } 1893} 1894 1895/** 1896 * srpt_process_send_completion() - Process an IB send completion. 1897 * 1898 * Note: Although this has not yet been observed during tests, at least in 1899 * theory it is possible that the srpt_get_send_ioctx() call invoked by 1900 * srpt_handle_new_iu() fails. This is possible because the req_lim_delta 1901 * value in each response is set to one, and it is possible that this response 1902 * makes the initiator send a new request before the send completion for that 1903 * response has been processed. This could e.g. happen if the call to 1904 * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or 1905 * if IB retransmission causes generation of the send completion to be 1906 * delayed. Incoming information units for which srpt_get_send_ioctx() fails 1907 * are queued on cmd_wait_list. The code below processes these delayed 1908 * requests one at a time. 1909 */ 1910static void srpt_process_send_completion(struct ib_cq *cq, 1911 struct srpt_rdma_ch *ch, 1912 struct ib_wc *wc) 1913{ 1914 struct srpt_send_ioctx *send_ioctx; 1915 uint32_t index; 1916 enum srpt_opcode opcode; 1917 1918 index = idx_from_wr_id(wc->wr_id); 1919 opcode = opcode_from_wr_id(wc->wr_id); 1920 send_ioctx = ch->ioctx_ring[index]; 1921 if (wc->status == IB_WC_SUCCESS) { 1922 if (opcode == SRPT_SEND) 1923 srpt_handle_send_comp(ch, send_ioctx); 1924 else { 1925 WARN_ON(opcode != SRPT_RDMA_ABORT && 1926 wc->opcode != IB_WC_RDMA_READ); 1927 srpt_handle_rdma_comp(ch, send_ioctx, opcode); 1928 } 1929 } else { 1930 if (opcode == SRPT_SEND) { 1931 pr_info("sending response for idx %u failed" 1932 " with status %d\n", index, wc->status); 1933 srpt_handle_send_err_comp(ch, wc->wr_id); 1934 } else if (opcode != SRPT_RDMA_MID) { 1935 pr_info("RDMA t %d for idx %u failed with" 1936 " status %d\n", opcode, index, wc->status); 1937 srpt_handle_rdma_err_comp(ch, send_ioctx, opcode); 1938 } 1939 } 1940 1941 while (unlikely(opcode == SRPT_SEND 1942 && !list_empty(&ch->cmd_wait_list) 1943 && srpt_get_ch_state(ch) == CH_LIVE 1944 && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) { 1945 struct srpt_recv_ioctx *recv_ioctx; 1946 1947 recv_ioctx = list_first_entry(&ch->cmd_wait_list, 1948 struct srpt_recv_ioctx, 1949 wait_list); 1950 list_del(&recv_ioctx->wait_list); 1951 srpt_handle_new_iu(ch, recv_ioctx, send_ioctx); 1952 } 1953} 1954 1955static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch) 1956{ 1957 struct ib_wc *const wc = ch->wc; 1958 int i, n; 1959 1960 WARN_ON(cq != ch->cq); 1961 1962 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 1963 while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) { 1964 for (i = 0; i < n; i++) { 1965 if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV) 1966 srpt_process_rcv_completion(cq, ch, &wc[i]); 1967 else 1968 srpt_process_send_completion(cq, ch, &wc[i]); 1969 } 1970 } 1971} 1972 1973/** 1974 * srpt_completion() - IB completion queue callback function. 1975 * 1976 * Notes: 1977 * - It is guaranteed that a completion handler will never be invoked 1978 * concurrently on two different CPUs for the same completion queue. See also 1979 * Documentation/infiniband/core_locking.txt and the implementation of 1980 * handle_edge_irq() in kernel/irq/chip.c. 1981 * - When threaded IRQs are enabled, completion handlers are invoked in thread 1982 * context instead of interrupt context. 1983 */ 1984static void srpt_completion(struct ib_cq *cq, void *ctx) 1985{ 1986 struct srpt_rdma_ch *ch = ctx; 1987 1988 wake_up_interruptible(&ch->wait_queue); 1989} 1990 1991static int srpt_compl_thread(void *arg) 1992{ 1993 struct srpt_rdma_ch *ch; 1994 1995 /* Hibernation / freezing of the SRPT kernel thread is not supported. */ 1996 current->flags |= PF_NOFREEZE; 1997 1998 ch = arg; 1999 BUG_ON(!ch); 2000 pr_info("Session %s: kernel thread %s (PID %d) started\n", 2001 ch->sess_name, ch->thread->comm, current->pid); 2002 while (!kthread_should_stop()) { 2003 wait_event_interruptible(ch->wait_queue, 2004 (srpt_process_completion(ch->cq, ch), 2005 kthread_should_stop())); 2006 } 2007 pr_info("Session %s: kernel thread %s (PID %d) stopped\n", 2008 ch->sess_name, ch->thread->comm, current->pid); 2009 return 0; 2010} 2011 2012/** 2013 * srpt_create_ch_ib() - Create receive and send completion queues. 2014 */ 2015static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) 2016{ 2017 struct ib_qp_init_attr *qp_init; 2018 struct srpt_port *sport = ch->sport; 2019 struct srpt_device *sdev = sport->sdev; 2020 u32 srp_sq_size = sport->port_attrib.srp_sq_size; 2021 struct ib_cq_init_attr cq_attr = {}; 2022 int ret; 2023 2024 WARN_ON(ch->rq_size < 1); 2025 2026 ret = -ENOMEM; 2027 qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL); 2028 if (!qp_init) 2029 goto out; 2030 2031retry: 2032 cq_attr.cqe = ch->rq_size + srp_sq_size; 2033 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, 2034 &cq_attr); 2035 if (IS_ERR(ch->cq)) { 2036 ret = PTR_ERR(ch->cq); 2037 pr_err("failed to create CQ cqe= %d ret= %d\n", 2038 ch->rq_size + srp_sq_size, ret); 2039 goto out; 2040 } 2041 2042 qp_init->qp_context = (void *)ch; 2043 qp_init->event_handler 2044 = (void(*)(struct ib_event *, void*))srpt_qp_event; 2045 qp_init->send_cq = ch->cq; 2046 qp_init->recv_cq = ch->cq; 2047 qp_init->srq = sdev->srq; 2048 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR; 2049 qp_init->qp_type = IB_QPT_RC; 2050 qp_init->cap.max_send_wr = srp_sq_size; 2051 qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE; 2052 2053 ch->qp = ib_create_qp(sdev->pd, qp_init); 2054 if (IS_ERR(ch->qp)) { 2055 ret = PTR_ERR(ch->qp); 2056 if (ret == -ENOMEM) { 2057 srp_sq_size /= 2; 2058 if (srp_sq_size >= MIN_SRPT_SQ_SIZE) { 2059 ib_destroy_cq(ch->cq); 2060 goto retry; 2061 } 2062 } 2063 pr_err("failed to create_qp ret= %d\n", ret); 2064 goto err_destroy_cq; 2065 } 2066 2067 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr); 2068 2069 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", 2070 __func__, ch->cq->cqe, qp_init->cap.max_send_sge, 2071 qp_init->cap.max_send_wr, ch->cm_id); 2072 2073 ret = srpt_init_ch_qp(ch, ch->qp); 2074 if (ret) 2075 goto err_destroy_qp; 2076 2077 init_waitqueue_head(&ch->wait_queue); 2078 2079 pr_debug("creating thread for session %s\n", ch->sess_name); 2080 2081 ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl"); 2082 if (IS_ERR(ch->thread)) { 2083 pr_err("failed to create kernel thread %ld\n", 2084 PTR_ERR(ch->thread)); 2085 ch->thread = NULL; 2086 goto err_destroy_qp; 2087 } 2088 2089out: 2090 kfree(qp_init); 2091 return ret; 2092 2093err_destroy_qp: 2094 ib_destroy_qp(ch->qp); 2095err_destroy_cq: 2096 ib_destroy_cq(ch->cq); 2097 goto out; 2098} 2099 2100static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch) 2101{ 2102 if (ch->thread) 2103 kthread_stop(ch->thread); 2104 2105 ib_destroy_qp(ch->qp); 2106 ib_destroy_cq(ch->cq); 2107} 2108 2109/** 2110 * __srpt_close_ch() - Close an RDMA channel by setting the QP error state. 2111 * 2112 * Reset the QP and make sure all resources associated with the channel will 2113 * be deallocated at an appropriate time. 2114 * 2115 * Note: The caller must hold ch->sport->sdev->spinlock. 2116 */ 2117static void __srpt_close_ch(struct srpt_rdma_ch *ch) 2118{ 2119 enum rdma_ch_state prev_state; 2120 unsigned long flags; 2121 2122 spin_lock_irqsave(&ch->spinlock, flags); 2123 prev_state = ch->state; 2124 switch (prev_state) { 2125 case CH_CONNECTING: 2126 case CH_LIVE: 2127 ch->state = CH_DISCONNECTING; 2128 break; 2129 default: 2130 break; 2131 } 2132 spin_unlock_irqrestore(&ch->spinlock, flags); 2133 2134 switch (prev_state) { 2135 case CH_CONNECTING: 2136 ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0, 2137 NULL, 0); 2138 /* fall through */ 2139 case CH_LIVE: 2140 if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0) 2141 pr_err("sending CM DREQ failed.\n"); 2142 break; 2143 case CH_DISCONNECTING: 2144 break; 2145 case CH_DRAINING: 2146 case CH_RELEASING: 2147 break; 2148 } 2149} 2150 2151/** 2152 * srpt_close_ch() - Close an RDMA channel. 2153 */ 2154static void srpt_close_ch(struct srpt_rdma_ch *ch) 2155{ 2156 struct srpt_device *sdev; 2157 2158 sdev = ch->sport->sdev; 2159 spin_lock_irq(&sdev->spinlock); 2160 __srpt_close_ch(ch); 2161 spin_unlock_irq(&sdev->spinlock); 2162} 2163 2164/** 2165 * srpt_shutdown_session() - Whether or not a session may be shut down. 2166 */ 2167static int srpt_shutdown_session(struct se_session *se_sess) 2168{ 2169 struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr; 2170 unsigned long flags; 2171 2172 spin_lock_irqsave(&ch->spinlock, flags); 2173 if (ch->in_shutdown) { 2174 spin_unlock_irqrestore(&ch->spinlock, flags); 2175 return true; 2176 } 2177 2178 ch->in_shutdown = true; 2179 target_sess_cmd_list_set_waiting(se_sess); 2180 spin_unlock_irqrestore(&ch->spinlock, flags); 2181 2182 return true; 2183} 2184 2185/** 2186 * srpt_drain_channel() - Drain a channel by resetting the IB queue pair. 2187 * @cm_id: Pointer to the CM ID of the channel to be drained. 2188 * 2189 * Note: Must be called from inside srpt_cm_handler to avoid a race between 2190 * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one() 2191 * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one() 2192 * waits until all target sessions for the associated IB device have been 2193 * unregistered and target session registration involves a call to 2194 * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until 2195 * this function has finished). 2196 */ 2197static void srpt_drain_channel(struct ib_cm_id *cm_id) 2198{ 2199 struct srpt_device *sdev; 2200 struct srpt_rdma_ch *ch; 2201 int ret; 2202 bool do_reset = false; 2203 2204 WARN_ON_ONCE(irqs_disabled()); 2205 2206 sdev = cm_id->context; 2207 BUG_ON(!sdev); 2208 spin_lock_irq(&sdev->spinlock); 2209 list_for_each_entry(ch, &sdev->rch_list, list) { 2210 if (ch->cm_id == cm_id) { 2211 do_reset = srpt_test_and_set_ch_state(ch, 2212 CH_CONNECTING, CH_DRAINING) || 2213 srpt_test_and_set_ch_state(ch, 2214 CH_LIVE, CH_DRAINING) || 2215 srpt_test_and_set_ch_state(ch, 2216 CH_DISCONNECTING, CH_DRAINING); 2217 break; 2218 } 2219 } 2220 spin_unlock_irq(&sdev->spinlock); 2221 2222 if (do_reset) { 2223 if (ch->sess) 2224 srpt_shutdown_session(ch->sess); 2225 2226 ret = srpt_ch_qp_err(ch); 2227 if (ret < 0) 2228 pr_err("Setting queue pair in error state" 2229 " failed: %d\n", ret); 2230 } 2231} 2232 2233/** 2234 * srpt_find_channel() - Look up an RDMA channel. 2235 * @cm_id: Pointer to the CM ID of the channel to be looked up. 2236 * 2237 * Return NULL if no matching RDMA channel has been found. 2238 */ 2239static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev, 2240 struct ib_cm_id *cm_id) 2241{ 2242 struct srpt_rdma_ch *ch; 2243 bool found; 2244 2245 WARN_ON_ONCE(irqs_disabled()); 2246 BUG_ON(!sdev); 2247 2248 found = false; 2249 spin_lock_irq(&sdev->spinlock); 2250 list_for_each_entry(ch, &sdev->rch_list, list) { 2251 if (ch->cm_id == cm_id) { 2252 found = true; 2253 break; 2254 } 2255 } 2256 spin_unlock_irq(&sdev->spinlock); 2257 2258 return found ? ch : NULL; 2259} 2260 2261/** 2262 * srpt_release_channel() - Release channel resources. 2263 * 2264 * Schedules the actual release because: 2265 * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would 2266 * trigger a deadlock. 2267 * - It is not safe to call TCM transport_* functions from interrupt context. 2268 */ 2269static void srpt_release_channel(struct srpt_rdma_ch *ch) 2270{ 2271 schedule_work(&ch->release_work); 2272} 2273 2274static void srpt_release_channel_work(struct work_struct *w) 2275{ 2276 struct srpt_rdma_ch *ch; 2277 struct srpt_device *sdev; 2278 struct se_session *se_sess; 2279 2280 ch = container_of(w, struct srpt_rdma_ch, release_work); 2281 pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess, 2282 ch->release_done); 2283 2284 sdev = ch->sport->sdev; 2285 BUG_ON(!sdev); 2286 2287 se_sess = ch->sess; 2288 BUG_ON(!se_sess); 2289 2290 target_wait_for_sess_cmds(se_sess); 2291 2292 transport_deregister_session_configfs(se_sess); 2293 transport_deregister_session(se_sess); 2294 ch->sess = NULL; 2295 2296 ib_destroy_cm_id(ch->cm_id); 2297 2298 srpt_destroy_ch_ib(ch); 2299 2300 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, 2301 ch->sport->sdev, ch->rq_size, 2302 ch->rsp_size, DMA_TO_DEVICE); 2303 2304 spin_lock_irq(&sdev->spinlock); 2305 list_del(&ch->list); 2306 spin_unlock_irq(&sdev->spinlock); 2307 2308 if (ch->release_done) 2309 complete(ch->release_done); 2310 2311 wake_up(&sdev->ch_releaseQ); 2312 2313 kfree(ch); 2314} 2315 2316static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport, 2317 u8 i_port_id[16]) 2318{ 2319 struct srpt_node_acl *nacl; 2320 2321 list_for_each_entry(nacl, &sport->port_acl_list, list) 2322 if (memcmp(nacl->i_port_id, i_port_id, 2323 sizeof(nacl->i_port_id)) == 0) 2324 return nacl; 2325 2326 return NULL; 2327} 2328 2329static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport, 2330 u8 i_port_id[16]) 2331{ 2332 struct srpt_node_acl *nacl; 2333 2334 spin_lock_irq(&sport->port_acl_lock); 2335 nacl = __srpt_lookup_acl(sport, i_port_id); 2336 spin_unlock_irq(&sport->port_acl_lock); 2337 2338 return nacl; 2339} 2340 2341/** 2342 * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED. 2343 * 2344 * Ownership of the cm_id is transferred to the target session if this 2345 * functions returns zero. Otherwise the caller remains the owner of cm_id. 2346 */ 2347static int srpt_cm_req_recv(struct ib_cm_id *cm_id, 2348 struct ib_cm_req_event_param *param, 2349 void *private_data) 2350{ 2351 struct srpt_device *sdev = cm_id->context; 2352 struct srpt_port *sport = &sdev->port[param->port - 1]; 2353 struct srp_login_req *req; 2354 struct srp_login_rsp *rsp; 2355 struct srp_login_rej *rej; 2356 struct ib_cm_rep_param *rep_param; 2357 struct srpt_rdma_ch *ch, *tmp_ch; 2358 struct srpt_node_acl *nacl; 2359 u32 it_iu_len; 2360 int i; 2361 int ret = 0; 2362 2363 WARN_ON_ONCE(irqs_disabled()); 2364 2365 if (WARN_ON(!sdev || !private_data)) 2366 return -EINVAL; 2367 2368 req = (struct srp_login_req *)private_data; 2369 2370 it_iu_len = be32_to_cpu(req->req_it_iu_len); 2371 2372 pr_info("Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx," 2373 " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d" 2374 " (guid=0x%llx:0x%llx)\n", 2375 be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]), 2376 be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]), 2377 be64_to_cpu(*(__be64 *)&req->target_port_id[0]), 2378 be64_to_cpu(*(__be64 *)&req->target_port_id[8]), 2379 it_iu_len, 2380 param->port, 2381 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]), 2382 be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8])); 2383 2384 rsp = kzalloc(sizeof *rsp, GFP_KERNEL); 2385 rej = kzalloc(sizeof *rej, GFP_KERNEL); 2386 rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL); 2387 2388 if (!rsp || !rej || !rep_param) { 2389 ret = -ENOMEM; 2390 goto out; 2391 } 2392 2393 if (it_iu_len > srp_max_req_size || it_iu_len < 64) { 2394 rej->reason = cpu_to_be32( 2395 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); 2396 ret = -EINVAL; 2397 pr_err("rejected SRP_LOGIN_REQ because its" 2398 " length (%d bytes) is out of range (%d .. %d)\n", 2399 it_iu_len, 64, srp_max_req_size); 2400 goto reject; 2401 } 2402 2403 if (!sport->enabled) { 2404 rej->reason = cpu_to_be32( 2405 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2406 ret = -EINVAL; 2407 pr_err("rejected SRP_LOGIN_REQ because the target port" 2408 " has not yet been enabled\n"); 2409 goto reject; 2410 } 2411 2412 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) { 2413 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN; 2414 2415 spin_lock_irq(&sdev->spinlock); 2416 2417 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) { 2418 if (!memcmp(ch->i_port_id, req->initiator_port_id, 16) 2419 && !memcmp(ch->t_port_id, req->target_port_id, 16) 2420 && param->port == ch->sport->port 2421 && param->listen_id == ch->sport->sdev->cm_id 2422 && ch->cm_id) { 2423 enum rdma_ch_state ch_state; 2424 2425 ch_state = srpt_get_ch_state(ch); 2426 if (ch_state != CH_CONNECTING 2427 && ch_state != CH_LIVE) 2428 continue; 2429 2430 /* found an existing channel */ 2431 pr_debug("Found existing channel %s" 2432 " cm_id= %p state= %d\n", 2433 ch->sess_name, ch->cm_id, ch_state); 2434 2435 __srpt_close_ch(ch); 2436 2437 rsp->rsp_flags = 2438 SRP_LOGIN_RSP_MULTICHAN_TERMINATED; 2439 } 2440 } 2441 2442 spin_unlock_irq(&sdev->spinlock); 2443 2444 } else 2445 rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED; 2446 2447 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid) 2448 || *(__be64 *)(req->target_port_id + 8) != 2449 cpu_to_be64(srpt_service_guid)) { 2450 rej->reason = cpu_to_be32( 2451 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); 2452 ret = -ENOMEM; 2453 pr_err("rejected SRP_LOGIN_REQ because it" 2454 " has an invalid target port identifier.\n"); 2455 goto reject; 2456 } 2457 2458 ch = kzalloc(sizeof *ch, GFP_KERNEL); 2459 if (!ch) { 2460 rej->reason = cpu_to_be32( 2461 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2462 pr_err("rejected SRP_LOGIN_REQ because no memory.\n"); 2463 ret = -ENOMEM; 2464 goto reject; 2465 } 2466 2467 INIT_WORK(&ch->release_work, srpt_release_channel_work); 2468 memcpy(ch->i_port_id, req->initiator_port_id, 16); 2469 memcpy(ch->t_port_id, req->target_port_id, 16); 2470 ch->sport = &sdev->port[param->port - 1]; 2471 ch->cm_id = cm_id; 2472 /* 2473 * Avoid QUEUE_FULL conditions by limiting the number of buffers used 2474 * for the SRP protocol to the command queue size. 2475 */ 2476 ch->rq_size = SRPT_RQ_SIZE; 2477 spin_lock_init(&ch->spinlock); 2478 ch->state = CH_CONNECTING; 2479 INIT_LIST_HEAD(&ch->cmd_wait_list); 2480 ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size; 2481 2482 ch->ioctx_ring = (struct srpt_send_ioctx **) 2483 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size, 2484 sizeof(*ch->ioctx_ring[0]), 2485 ch->rsp_size, DMA_TO_DEVICE); 2486 if (!ch->ioctx_ring) 2487 goto free_ch; 2488 2489 INIT_LIST_HEAD(&ch->free_list); 2490 for (i = 0; i < ch->rq_size; i++) { 2491 ch->ioctx_ring[i]->ch = ch; 2492 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list); 2493 } 2494 2495 ret = srpt_create_ch_ib(ch); 2496 if (ret) { 2497 rej->reason = cpu_to_be32( 2498 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2499 pr_err("rejected SRP_LOGIN_REQ because creating" 2500 " a new RDMA channel failed.\n"); 2501 goto free_ring; 2502 } 2503 2504 ret = srpt_ch_qp_rtr(ch, ch->qp); 2505 if (ret) { 2506 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2507 pr_err("rejected SRP_LOGIN_REQ because enabling" 2508 " RTR failed (error code = %d)\n", ret); 2509 goto destroy_ib; 2510 } 2511 /* 2512 * Use the initator port identifier as the session name. 2513 */ 2514 snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx", 2515 be64_to_cpu(*(__be64 *)ch->i_port_id), 2516 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8))); 2517 2518 pr_debug("registering session %s\n", ch->sess_name); 2519 2520 nacl = srpt_lookup_acl(sport, ch->i_port_id); 2521 if (!nacl) { 2522 pr_info("Rejected login because no ACL has been" 2523 " configured yet for initiator %s.\n", ch->sess_name); 2524 rej->reason = cpu_to_be32( 2525 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); 2526 goto destroy_ib; 2527 } 2528 2529 ch->sess = transport_init_session(TARGET_PROT_NORMAL); 2530 if (IS_ERR(ch->sess)) { 2531 rej->reason = cpu_to_be32( 2532 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2533 pr_debug("Failed to create session\n"); 2534 goto deregister_session; 2535 } 2536 ch->sess->se_node_acl = &nacl->nacl; 2537 transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch); 2538 2539 pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess, 2540 ch->sess_name, ch->cm_id); 2541 2542 /* create srp_login_response */ 2543 rsp->opcode = SRP_LOGIN_RSP; 2544 rsp->tag = req->tag; 2545 rsp->max_it_iu_len = req->req_it_iu_len; 2546 rsp->max_ti_iu_len = req->req_it_iu_len; 2547 ch->max_ti_iu_len = it_iu_len; 2548 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT 2549 | SRP_BUF_FORMAT_INDIRECT); 2550 rsp->req_lim_delta = cpu_to_be32(ch->rq_size); 2551 atomic_set(&ch->req_lim, ch->rq_size); 2552 atomic_set(&ch->req_lim_delta, 0); 2553 2554 /* create cm reply */ 2555 rep_param->qp_num = ch->qp->qp_num; 2556 rep_param->private_data = (void *)rsp; 2557 rep_param->private_data_len = sizeof *rsp; 2558 rep_param->rnr_retry_count = 7; 2559 rep_param->flow_control = 1; 2560 rep_param->failover_accepted = 0; 2561 rep_param->srq = 1; 2562 rep_param->responder_resources = 4; 2563 rep_param->initiator_depth = 4; 2564 2565 ret = ib_send_cm_rep(cm_id, rep_param); 2566 if (ret) { 2567 pr_err("sending SRP_LOGIN_REQ response failed" 2568 " (error code = %d)\n", ret); 2569 goto release_channel; 2570 } 2571 2572 spin_lock_irq(&sdev->spinlock); 2573 list_add_tail(&ch->list, &sdev->rch_list); 2574 spin_unlock_irq(&sdev->spinlock); 2575 2576 goto out; 2577 2578release_channel: 2579 srpt_set_ch_state(ch, CH_RELEASING); 2580 transport_deregister_session_configfs(ch->sess); 2581 2582deregister_session: 2583 transport_deregister_session(ch->sess); 2584 ch->sess = NULL; 2585 2586destroy_ib: 2587 srpt_destroy_ch_ib(ch); 2588 2589free_ring: 2590 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, 2591 ch->sport->sdev, ch->rq_size, 2592 ch->rsp_size, DMA_TO_DEVICE); 2593free_ch: 2594 kfree(ch); 2595 2596reject: 2597 rej->opcode = SRP_LOGIN_REJ; 2598 rej->tag = req->tag; 2599 rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT 2600 | SRP_BUF_FORMAT_INDIRECT); 2601 2602 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, 2603 (void *)rej, sizeof *rej); 2604 2605out: 2606 kfree(rep_param); 2607 kfree(rsp); 2608 kfree(rej); 2609 2610 return ret; 2611} 2612 2613static void srpt_cm_rej_recv(struct ib_cm_id *cm_id) 2614{ 2615 pr_info("Received IB REJ for cm_id %p.\n", cm_id); 2616 srpt_drain_channel(cm_id); 2617} 2618 2619/** 2620 * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event. 2621 * 2622 * An IB_CM_RTU_RECEIVED message indicates that the connection is established 2623 * and that the recipient may begin transmitting (RTU = ready to use). 2624 */ 2625static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id) 2626{ 2627 struct srpt_rdma_ch *ch; 2628 int ret; 2629 2630 ch = srpt_find_channel(cm_id->context, cm_id); 2631 BUG_ON(!ch); 2632 2633 if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) { 2634 struct srpt_recv_ioctx *ioctx, *ioctx_tmp; 2635 2636 ret = srpt_ch_qp_rts(ch, ch->qp); 2637 2638 list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list, 2639 wait_list) { 2640 list_del(&ioctx->wait_list); 2641 srpt_handle_new_iu(ch, ioctx, NULL); 2642 } 2643 if (ret) 2644 srpt_close_ch(ch); 2645 } 2646} 2647 2648static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id) 2649{ 2650 pr_info("Received IB TimeWait exit for cm_id %p.\n", cm_id); 2651 srpt_drain_channel(cm_id); 2652} 2653 2654static void srpt_cm_rep_error(struct ib_cm_id *cm_id) 2655{ 2656 pr_info("Received IB REP error for cm_id %p.\n", cm_id); 2657 srpt_drain_channel(cm_id); 2658} 2659 2660/** 2661 * srpt_cm_dreq_recv() - Process reception of a DREQ message. 2662 */ 2663static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id) 2664{ 2665 struct srpt_rdma_ch *ch; 2666 unsigned long flags; 2667 bool send_drep = false; 2668 2669 ch = srpt_find_channel(cm_id->context, cm_id); 2670 BUG_ON(!ch); 2671 2672 pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch)); 2673 2674 spin_lock_irqsave(&ch->spinlock, flags); 2675 switch (ch->state) { 2676 case CH_CONNECTING: 2677 case CH_LIVE: 2678 send_drep = true; 2679 ch->state = CH_DISCONNECTING; 2680 break; 2681 case CH_DISCONNECTING: 2682 case CH_DRAINING: 2683 case CH_RELEASING: 2684 WARN(true, "unexpected channel state %d\n", ch->state); 2685 break; 2686 } 2687 spin_unlock_irqrestore(&ch->spinlock, flags); 2688 2689 if (send_drep) { 2690 if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0) 2691 pr_err("Sending IB DREP failed.\n"); 2692 pr_info("Received DREQ and sent DREP for session %s.\n", 2693 ch->sess_name); 2694 } 2695} 2696 2697/** 2698 * srpt_cm_drep_recv() - Process reception of a DREP message. 2699 */ 2700static void srpt_cm_drep_recv(struct ib_cm_id *cm_id) 2701{ 2702 pr_info("Received InfiniBand DREP message for cm_id %p.\n", cm_id); 2703 srpt_drain_channel(cm_id); 2704} 2705 2706/** 2707 * srpt_cm_handler() - IB connection manager callback function. 2708 * 2709 * A non-zero return value will cause the caller destroy the CM ID. 2710 * 2711 * Note: srpt_cm_handler() must only return a non-zero value when transferring 2712 * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning 2713 * a non-zero value in any other case will trigger a race with the 2714 * ib_destroy_cm_id() call in srpt_release_channel(). 2715 */ 2716static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 2717{ 2718 int ret; 2719 2720 ret = 0; 2721 switch (event->event) { 2722 case IB_CM_REQ_RECEIVED: 2723 ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd, 2724 event->private_data); 2725 break; 2726 case IB_CM_REJ_RECEIVED: 2727 srpt_cm_rej_recv(cm_id); 2728 break; 2729 case IB_CM_RTU_RECEIVED: 2730 case IB_CM_USER_ESTABLISHED: 2731 srpt_cm_rtu_recv(cm_id); 2732 break; 2733 case IB_CM_DREQ_RECEIVED: 2734 srpt_cm_dreq_recv(cm_id); 2735 break; 2736 case IB_CM_DREP_RECEIVED: 2737 srpt_cm_drep_recv(cm_id); 2738 break; 2739 case IB_CM_TIMEWAIT_EXIT: 2740 srpt_cm_timewait_exit(cm_id); 2741 break; 2742 case IB_CM_REP_ERROR: 2743 srpt_cm_rep_error(cm_id); 2744 break; 2745 case IB_CM_DREQ_ERROR: 2746 pr_info("Received IB DREQ ERROR event.\n"); 2747 break; 2748 case IB_CM_MRA_RECEIVED: 2749 pr_info("Received IB MRA event\n"); 2750 break; 2751 default: 2752 pr_err("received unrecognized IB CM event %d\n", event->event); 2753 break; 2754 } 2755 2756 return ret; 2757} 2758 2759/** 2760 * srpt_perform_rdmas() - Perform IB RDMA. 2761 * 2762 * Returns zero upon success or a negative number upon failure. 2763 */ 2764static int srpt_perform_rdmas(struct srpt_rdma_ch *ch, 2765 struct srpt_send_ioctx *ioctx) 2766{ 2767 struct ib_rdma_wr wr; 2768 struct ib_send_wr *bad_wr; 2769 struct rdma_iu *riu; 2770 int i; 2771 int ret; 2772 int sq_wr_avail; 2773 enum dma_data_direction dir; 2774 const int n_rdma = ioctx->n_rdma; 2775 2776 dir = ioctx->cmd.data_direction; 2777 if (dir == DMA_TO_DEVICE) { 2778 /* write */ 2779 ret = -ENOMEM; 2780 sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail); 2781 if (sq_wr_avail < 0) { 2782 pr_warn("IB send queue full (needed %d)\n", 2783 n_rdma); 2784 goto out; 2785 } 2786 } 2787 2788 ioctx->rdma_aborted = false; 2789 ret = 0; 2790 riu = ioctx->rdma_ius; 2791 memset(&wr, 0, sizeof wr); 2792 2793 for (i = 0; i < n_rdma; ++i, ++riu) { 2794 if (dir == DMA_FROM_DEVICE) { 2795 wr.wr.opcode = IB_WR_RDMA_WRITE; 2796 wr.wr.wr_id = encode_wr_id(i == n_rdma - 1 ? 2797 SRPT_RDMA_WRITE_LAST : 2798 SRPT_RDMA_MID, 2799 ioctx->ioctx.index); 2800 } else { 2801 wr.wr.opcode = IB_WR_RDMA_READ; 2802 wr.wr.wr_id = encode_wr_id(i == n_rdma - 1 ? 2803 SRPT_RDMA_READ_LAST : 2804 SRPT_RDMA_MID, 2805 ioctx->ioctx.index); 2806 } 2807 wr.wr.next = NULL; 2808 wr.remote_addr = riu->raddr; 2809 wr.rkey = riu->rkey; 2810 wr.wr.num_sge = riu->sge_cnt; 2811 wr.wr.sg_list = riu->sge; 2812 2813 /* only get completion event for the last rdma write */ 2814 if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE) 2815 wr.wr.send_flags = IB_SEND_SIGNALED; 2816 2817 ret = ib_post_send(ch->qp, &wr.wr, &bad_wr); 2818 if (ret) 2819 break; 2820 } 2821 2822 if (ret) 2823 pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n", 2824 __func__, __LINE__, ret, i, n_rdma); 2825 if (ret && i > 0) { 2826 wr.wr.num_sge = 0; 2827 wr.wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index); 2828 wr.wr.send_flags = IB_SEND_SIGNALED; 2829 while (ch->state == CH_LIVE && 2830 ib_post_send(ch->qp, &wr.wr, &bad_wr) != 0) { 2831 pr_info("Trying to abort failed RDMA transfer [%d]\n", 2832 ioctx->ioctx.index); 2833 msleep(1000); 2834 } 2835 while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) { 2836 pr_info("Waiting until RDMA abort finished [%d]\n", 2837 ioctx->ioctx.index); 2838 msleep(1000); 2839 } 2840 } 2841out: 2842 if (unlikely(dir == DMA_TO_DEVICE && ret < 0)) 2843 atomic_add(n_rdma, &ch->sq_wr_avail); 2844 return ret; 2845} 2846 2847/** 2848 * srpt_xfer_data() - Start data transfer from initiator to target. 2849 */ 2850static int srpt_xfer_data(struct srpt_rdma_ch *ch, 2851 struct srpt_send_ioctx *ioctx) 2852{ 2853 int ret; 2854 2855 ret = srpt_map_sg_to_ib_sge(ch, ioctx); 2856 if (ret) { 2857 pr_err("%s[%d] ret=%d\n", __func__, __LINE__, ret); 2858 goto out; 2859 } 2860 2861 ret = srpt_perform_rdmas(ch, ioctx); 2862 if (ret) { 2863 if (ret == -EAGAIN || ret == -ENOMEM) 2864 pr_info("%s[%d] queue full -- ret=%d\n", 2865 __func__, __LINE__, ret); 2866 else 2867 pr_err("%s[%d] fatal error -- ret=%d\n", 2868 __func__, __LINE__, ret); 2869 goto out_unmap; 2870 } 2871 2872out: 2873 return ret; 2874out_unmap: 2875 srpt_unmap_sg_to_ib_sge(ch, ioctx); 2876 goto out; 2877} 2878 2879static int srpt_write_pending_status(struct se_cmd *se_cmd) 2880{ 2881 struct srpt_send_ioctx *ioctx; 2882 2883 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); 2884 return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA; 2885} 2886 2887/* 2888 * srpt_write_pending() - Start data transfer from initiator to target (write). 2889 */ 2890static int srpt_write_pending(struct se_cmd *se_cmd) 2891{ 2892 struct srpt_rdma_ch *ch; 2893 struct srpt_send_ioctx *ioctx; 2894 enum srpt_command_state new_state; 2895 enum rdma_ch_state ch_state; 2896 int ret; 2897 2898 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); 2899 2900 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA); 2901 WARN_ON(new_state == SRPT_STATE_DONE); 2902 2903 ch = ioctx->ch; 2904 BUG_ON(!ch); 2905 2906 ch_state = srpt_get_ch_state(ch); 2907 switch (ch_state) { 2908 case CH_CONNECTING: 2909 WARN(true, "unexpected channel state %d\n", ch_state); 2910 ret = -EINVAL; 2911 goto out; 2912 case CH_LIVE: 2913 break; 2914 case CH_DISCONNECTING: 2915 case CH_DRAINING: 2916 case CH_RELEASING: 2917 pr_debug("cmd with tag %lld: channel disconnecting\n", 2918 ioctx->cmd.tag); 2919 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN); 2920 ret = -EINVAL; 2921 goto out; 2922 } 2923 ret = srpt_xfer_data(ch, ioctx); 2924 2925out: 2926 return ret; 2927} 2928 2929static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status) 2930{ 2931 switch (tcm_mgmt_status) { 2932 case TMR_FUNCTION_COMPLETE: 2933 return SRP_TSK_MGMT_SUCCESS; 2934 case TMR_FUNCTION_REJECTED: 2935 return SRP_TSK_MGMT_FUNC_NOT_SUPP; 2936 } 2937 return SRP_TSK_MGMT_FAILED; 2938} 2939 2940/** 2941 * srpt_queue_response() - Transmits the response to a SCSI command. 2942 * 2943 * Callback function called by the TCM core. Must not block since it can be 2944 * invoked on the context of the IB completion handler. 2945 */ 2946static void srpt_queue_response(struct se_cmd *cmd) 2947{ 2948 struct srpt_rdma_ch *ch; 2949 struct srpt_send_ioctx *ioctx; 2950 enum srpt_command_state state; 2951 unsigned long flags; 2952 int ret; 2953 enum dma_data_direction dir; 2954 int resp_len; 2955 u8 srp_tm_status; 2956 2957 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); 2958 ch = ioctx->ch; 2959 BUG_ON(!ch); 2960 2961 spin_lock_irqsave(&ioctx->spinlock, flags); 2962 state = ioctx->state; 2963 switch (state) { 2964 case SRPT_STATE_NEW: 2965 case SRPT_STATE_DATA_IN: 2966 ioctx->state = SRPT_STATE_CMD_RSP_SENT; 2967 break; 2968 case SRPT_STATE_MGMT: 2969 ioctx->state = SRPT_STATE_MGMT_RSP_SENT; 2970 break; 2971 default: 2972 WARN(true, "ch %p; cmd %d: unexpected command state %d\n", 2973 ch, ioctx->ioctx.index, ioctx->state); 2974 break; 2975 } 2976 spin_unlock_irqrestore(&ioctx->spinlock, flags); 2977 2978 if (unlikely(transport_check_aborted_status(&ioctx->cmd, false) 2979 || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) { 2980 atomic_inc(&ch->req_lim_delta); 2981 srpt_abort_cmd(ioctx); 2982 return; 2983 } 2984 2985 dir = ioctx->cmd.data_direction; 2986 2987 /* For read commands, transfer the data to the initiator. */ 2988 if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length && 2989 !ioctx->queue_status_only) { 2990 ret = srpt_xfer_data(ch, ioctx); 2991 if (ret) { 2992 pr_err("xfer_data failed for tag %llu\n", 2993 ioctx->cmd.tag); 2994 return; 2995 } 2996 } 2997 2998 if (state != SRPT_STATE_MGMT) 2999 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag, 3000 cmd->scsi_status); 3001 else { 3002 srp_tm_status 3003 = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response); 3004 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status, 3005 ioctx->cmd.tag); 3006 } 3007 ret = srpt_post_send(ch, ioctx, resp_len); 3008 if (ret) { 3009 pr_err("sending cmd response failed for tag %llu\n", 3010 ioctx->cmd.tag); 3011 srpt_unmap_sg_to_ib_sge(ch, ioctx); 3012 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); 3013 target_put_sess_cmd(&ioctx->cmd); 3014 } 3015} 3016 3017static int srpt_queue_data_in(struct se_cmd *cmd) 3018{ 3019 srpt_queue_response(cmd); 3020 return 0; 3021} 3022 3023static void srpt_queue_tm_rsp(struct se_cmd *cmd) 3024{ 3025 srpt_queue_response(cmd); 3026} 3027 3028static void srpt_aborted_task(struct se_cmd *cmd) 3029{ 3030 struct srpt_send_ioctx *ioctx = container_of(cmd, 3031 struct srpt_send_ioctx, cmd); 3032 3033 srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx); 3034} 3035 3036static int srpt_queue_status(struct se_cmd *cmd) 3037{ 3038 struct srpt_send_ioctx *ioctx; 3039 3040 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); 3041 BUG_ON(ioctx->sense_data != cmd->sense_buffer); 3042 if (cmd->se_cmd_flags & 3043 (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE)) 3044 WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION); 3045 ioctx->queue_status_only = true; 3046 srpt_queue_response(cmd); 3047 return 0; 3048} 3049 3050static void srpt_refresh_port_work(struct work_struct *work) 3051{ 3052 struct srpt_port *sport = container_of(work, struct srpt_port, work); 3053 3054 srpt_refresh_port(sport); 3055} 3056 3057static int srpt_ch_list_empty(struct srpt_device *sdev) 3058{ 3059 int res; 3060 3061 spin_lock_irq(&sdev->spinlock); 3062 res = list_empty(&sdev->rch_list); 3063 spin_unlock_irq(&sdev->spinlock); 3064 3065 return res; 3066} 3067 3068/** 3069 * srpt_release_sdev() - Free the channel resources associated with a target. 3070 */ 3071static int srpt_release_sdev(struct srpt_device *sdev) 3072{ 3073 struct srpt_rdma_ch *ch, *tmp_ch; 3074 int res; 3075 3076 WARN_ON_ONCE(irqs_disabled()); 3077 3078 BUG_ON(!sdev); 3079 3080 spin_lock_irq(&sdev->spinlock); 3081 list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) 3082 __srpt_close_ch(ch); 3083 spin_unlock_irq(&sdev->spinlock); 3084 3085 res = wait_event_interruptible(sdev->ch_releaseQ, 3086 srpt_ch_list_empty(sdev)); 3087 if (res) 3088 pr_err("%s: interrupted.\n", __func__); 3089 3090 return 0; 3091} 3092 3093static struct srpt_port *__srpt_lookup_port(const char *name) 3094{ 3095 struct ib_device *dev; 3096 struct srpt_device *sdev; 3097 struct srpt_port *sport; 3098 int i; 3099 3100 list_for_each_entry(sdev, &srpt_dev_list, list) { 3101 dev = sdev->device; 3102 if (!dev) 3103 continue; 3104 3105 for (i = 0; i < dev->phys_port_cnt; i++) { 3106 sport = &sdev->port[i]; 3107 3108 if (!strcmp(sport->port_guid, name)) 3109 return sport; 3110 } 3111 } 3112 3113 return NULL; 3114} 3115 3116static struct srpt_port *srpt_lookup_port(const char *name) 3117{ 3118 struct srpt_port *sport; 3119 3120 spin_lock(&srpt_dev_lock); 3121 sport = __srpt_lookup_port(name); 3122 spin_unlock(&srpt_dev_lock); 3123 3124 return sport; 3125} 3126 3127/** 3128 * srpt_add_one() - Infiniband device addition callback function. 3129 */ 3130static void srpt_add_one(struct ib_device *device) 3131{ 3132 struct srpt_device *sdev; 3133 struct srpt_port *sport; 3134 struct ib_srq_init_attr srq_attr; 3135 int i; 3136 3137 pr_debug("device = %p, device->dma_ops = %p\n", device, 3138 device->dma_ops); 3139 3140 sdev = kzalloc(sizeof *sdev, GFP_KERNEL); 3141 if (!sdev) 3142 goto err; 3143 3144 sdev->device = device; 3145 INIT_LIST_HEAD(&sdev->rch_list); 3146 init_waitqueue_head(&sdev->ch_releaseQ); 3147 spin_lock_init(&sdev->spinlock); 3148 3149 if (ib_query_device(device, &sdev->dev_attr)) 3150 goto free_dev; 3151 3152 sdev->pd = ib_alloc_pd(device); 3153 if (IS_ERR(sdev->pd)) 3154 goto free_dev; 3155 3156 sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr); 3157 3158 srq_attr.event_handler = srpt_srq_event; 3159 srq_attr.srq_context = (void *)sdev; 3160 srq_attr.attr.max_wr = sdev->srq_size; 3161 srq_attr.attr.max_sge = 1; 3162 srq_attr.attr.srq_limit = 0; 3163 srq_attr.srq_type = IB_SRQT_BASIC; 3164 3165 sdev->srq = ib_create_srq(sdev->pd, &srq_attr); 3166 if (IS_ERR(sdev->srq)) 3167 goto err_pd; 3168 3169 pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n", 3170 __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr, 3171 device->name); 3172 3173 if (!srpt_service_guid) 3174 srpt_service_guid = be64_to_cpu(device->node_guid); 3175 3176 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev); 3177 if (IS_ERR(sdev->cm_id)) 3178 goto err_srq; 3179 3180 /* print out target login information */ 3181 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx," 3182 "pkey=ffff,service_id=%016llx\n", srpt_service_guid, 3183 srpt_service_guid, srpt_service_guid); 3184 3185 /* 3186 * We do not have a consistent service_id (ie. also id_ext of target_id) 3187 * to identify this target. We currently use the guid of the first HCA 3188 * in the system as service_id; therefore, the target_id will change 3189 * if this HCA is gone bad and replaced by different HCA 3190 */ 3191 if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0)) 3192 goto err_cm; 3193 3194 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device, 3195 srpt_event_handler); 3196 if (ib_register_event_handler(&sdev->event_handler)) 3197 goto err_cm; 3198 3199 sdev->ioctx_ring = (struct srpt_recv_ioctx **) 3200 srpt_alloc_ioctx_ring(sdev, sdev->srq_size, 3201 sizeof(*sdev->ioctx_ring[0]), 3202 srp_max_req_size, DMA_FROM_DEVICE); 3203 if (!sdev->ioctx_ring) 3204 goto err_event; 3205 3206 for (i = 0; i < sdev->srq_size; ++i) 3207 srpt_post_recv(sdev, sdev->ioctx_ring[i]); 3208 3209 WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port)); 3210 3211 for (i = 1; i <= sdev->device->phys_port_cnt; i++) { 3212 sport = &sdev->port[i - 1]; 3213 sport->sdev = sdev; 3214 sport->port = i; 3215 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE; 3216 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE; 3217 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE; 3218 INIT_WORK(&sport->work, srpt_refresh_port_work); 3219 INIT_LIST_HEAD(&sport->port_acl_list); 3220 spin_lock_init(&sport->port_acl_lock); 3221 3222 if (srpt_refresh_port(sport)) { 3223 pr_err("MAD registration failed for %s-%d.\n", 3224 srpt_sdev_name(sdev), i); 3225 goto err_ring; 3226 } 3227 snprintf(sport->port_guid, sizeof(sport->port_guid), 3228 "0x%016llx%016llx", 3229 be64_to_cpu(sport->gid.global.subnet_prefix), 3230 be64_to_cpu(sport->gid.global.interface_id)); 3231 } 3232 3233 spin_lock(&srpt_dev_lock); 3234 list_add_tail(&sdev->list, &srpt_dev_list); 3235 spin_unlock(&srpt_dev_lock); 3236 3237out: 3238 ib_set_client_data(device, &srpt_client, sdev); 3239 pr_debug("added %s.\n", device->name); 3240 return; 3241 3242err_ring: 3243 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, 3244 sdev->srq_size, srp_max_req_size, 3245 DMA_FROM_DEVICE); 3246err_event: 3247 ib_unregister_event_handler(&sdev->event_handler); 3248err_cm: 3249 ib_destroy_cm_id(sdev->cm_id); 3250err_srq: 3251 ib_destroy_srq(sdev->srq); 3252err_pd: 3253 ib_dealloc_pd(sdev->pd); 3254free_dev: 3255 kfree(sdev); 3256err: 3257 sdev = NULL; 3258 pr_info("%s(%s) failed.\n", __func__, device->name); 3259 goto out; 3260} 3261 3262/** 3263 * srpt_remove_one() - InfiniBand device removal callback function. 3264 */ 3265static void srpt_remove_one(struct ib_device *device, void *client_data) 3266{ 3267 struct srpt_device *sdev = client_data; 3268 int i; 3269 3270 if (!sdev) { 3271 pr_info("%s(%s): nothing to do.\n", __func__, device->name); 3272 return; 3273 } 3274 3275 srpt_unregister_mad_agent(sdev); 3276 3277 ib_unregister_event_handler(&sdev->event_handler); 3278 3279 /* Cancel any work queued by the just unregistered IB event handler. */ 3280 for (i = 0; i < sdev->device->phys_port_cnt; i++) 3281 cancel_work_sync(&sdev->port[i].work); 3282 3283 ib_destroy_cm_id(sdev->cm_id); 3284 3285 /* 3286 * Unregistering a target must happen after destroying sdev->cm_id 3287 * such that no new SRP_LOGIN_REQ information units can arrive while 3288 * destroying the target. 3289 */ 3290 spin_lock(&srpt_dev_lock); 3291 list_del(&sdev->list); 3292 spin_unlock(&srpt_dev_lock); 3293 srpt_release_sdev(sdev); 3294 3295 ib_destroy_srq(sdev->srq); 3296 ib_dealloc_pd(sdev->pd); 3297 3298 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, 3299 sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE); 3300 sdev->ioctx_ring = NULL; 3301 kfree(sdev); 3302} 3303 3304static struct ib_client srpt_client = { 3305 .name = DRV_NAME, 3306 .add = srpt_add_one, 3307 .remove = srpt_remove_one 3308}; 3309 3310static int srpt_check_true(struct se_portal_group *se_tpg) 3311{ 3312 return 1; 3313} 3314 3315static int srpt_check_false(struct se_portal_group *se_tpg) 3316{ 3317 return 0; 3318} 3319 3320static char *srpt_get_fabric_name(void) 3321{ 3322 return "srpt"; 3323} 3324 3325static char *srpt_get_fabric_wwn(struct se_portal_group *tpg) 3326{ 3327 struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1); 3328 3329 return sport->port_guid; 3330} 3331 3332static u16 srpt_get_tag(struct se_portal_group *tpg) 3333{ 3334 return 1; 3335} 3336 3337static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg) 3338{ 3339 return 1; 3340} 3341 3342static void srpt_release_cmd(struct se_cmd *se_cmd) 3343{ 3344 struct srpt_send_ioctx *ioctx = container_of(se_cmd, 3345 struct srpt_send_ioctx, cmd); 3346 struct srpt_rdma_ch *ch = ioctx->ch; 3347 unsigned long flags; 3348 3349 WARN_ON(ioctx->state != SRPT_STATE_DONE); 3350 WARN_ON(ioctx->mapped_sg_count != 0); 3351 3352 if (ioctx->n_rbuf > 1) { 3353 kfree(ioctx->rbufs); 3354 ioctx->rbufs = NULL; 3355 ioctx->n_rbuf = 0; 3356 } 3357 3358 spin_lock_irqsave(&ch->spinlock, flags); 3359 list_add(&ioctx->free_list, &ch->free_list); 3360 spin_unlock_irqrestore(&ch->spinlock, flags); 3361} 3362 3363/** 3364 * srpt_close_session() - Forcibly close a session. 3365 * 3366 * Callback function invoked by the TCM core to clean up sessions associated 3367 * with a node ACL when the user invokes 3368 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id 3369 */ 3370static void srpt_close_session(struct se_session *se_sess) 3371{ 3372 DECLARE_COMPLETION_ONSTACK(release_done); 3373 struct srpt_rdma_ch *ch; 3374 struct srpt_device *sdev; 3375 unsigned long res; 3376 3377 ch = se_sess->fabric_sess_ptr; 3378 WARN_ON(ch->sess != se_sess); 3379 3380 pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch)); 3381 3382 sdev = ch->sport->sdev; 3383 spin_lock_irq(&sdev->spinlock); 3384 BUG_ON(ch->release_done); 3385 ch->release_done = &release_done; 3386 __srpt_close_ch(ch); 3387 spin_unlock_irq(&sdev->spinlock); 3388 3389 res = wait_for_completion_timeout(&release_done, 60 * HZ); 3390 WARN_ON(res == 0); 3391} 3392 3393/** 3394 * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB). 3395 * 3396 * A quote from RFC 4455 (SCSI-MIB) about this MIB object: 3397 * This object represents an arbitrary integer used to uniquely identify a 3398 * particular attached remote initiator port to a particular SCSI target port 3399 * within a particular SCSI target device within a particular SCSI instance. 3400 */ 3401static u32 srpt_sess_get_index(struct se_session *se_sess) 3402{ 3403 return 0; 3404} 3405 3406static void srpt_set_default_node_attrs(struct se_node_acl *nacl) 3407{ 3408} 3409 3410/* Note: only used from inside debug printk's by the TCM core. */ 3411static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd) 3412{ 3413 struct srpt_send_ioctx *ioctx; 3414 3415 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); 3416 return srpt_get_cmd_state(ioctx); 3417} 3418 3419/** 3420 * srpt_parse_i_port_id() - Parse an initiator port ID. 3421 * @name: ASCII representation of a 128-bit initiator port ID. 3422 * @i_port_id: Binary 128-bit port ID. 3423 */ 3424static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) 3425{ 3426 const char *p; 3427 unsigned len, count, leading_zero_bytes; 3428 int ret, rc; 3429 3430 p = name; 3431 if (strncasecmp(p, "0x", 2) == 0) 3432 p += 2; 3433 ret = -EINVAL; 3434 len = strlen(p); 3435 if (len % 2) 3436 goto out; 3437 count = min(len / 2, 16U); 3438 leading_zero_bytes = 16 - count; 3439 memset(i_port_id, 0, leading_zero_bytes); 3440 rc = hex2bin(i_port_id + leading_zero_bytes, p, count); 3441 if (rc < 0) 3442 pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc); 3443 ret = 0; 3444out: 3445 return ret; 3446} 3447 3448/* 3449 * configfs callback function invoked for 3450 * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id 3451 */ 3452static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name) 3453{ 3454 struct srpt_port *sport = 3455 container_of(se_nacl->se_tpg, struct srpt_port, port_tpg_1); 3456 struct srpt_node_acl *nacl = 3457 container_of(se_nacl, struct srpt_node_acl, nacl); 3458 u8 i_port_id[16]; 3459 3460 if (srpt_parse_i_port_id(i_port_id, name) < 0) { 3461 pr_err("invalid initiator port ID %s\n", name); 3462 return -EINVAL; 3463 } 3464 3465 memcpy(&nacl->i_port_id[0], &i_port_id[0], 16); 3466 nacl->sport = sport; 3467 3468 spin_lock_irq(&sport->port_acl_lock); 3469 list_add_tail(&nacl->list, &sport->port_acl_list); 3470 spin_unlock_irq(&sport->port_acl_lock); 3471 3472 return 0; 3473} 3474 3475/* 3476 * configfs callback function invoked for 3477 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id 3478 */ 3479static void srpt_cleanup_nodeacl(struct se_node_acl *se_nacl) 3480{ 3481 struct srpt_node_acl *nacl = 3482 container_of(se_nacl, struct srpt_node_acl, nacl); 3483 struct srpt_port *sport = nacl->sport; 3484 3485 spin_lock_irq(&sport->port_acl_lock); 3486 list_del(&nacl->list); 3487 spin_unlock_irq(&sport->port_acl_lock); 3488} 3489 3490static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item, 3491 char *page) 3492{ 3493 struct se_portal_group *se_tpg = attrib_to_tpg(item); 3494 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3495 3496 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size); 3497} 3498 3499static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item, 3500 const char *page, size_t count) 3501{ 3502 struct se_portal_group *se_tpg = attrib_to_tpg(item); 3503 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3504 unsigned long val; 3505 int ret; 3506 3507 ret = kstrtoul(page, 0, &val); 3508 if (ret < 0) { 3509 pr_err("kstrtoul() failed with ret: %d\n", ret); 3510 return -EINVAL; 3511 } 3512 if (val > MAX_SRPT_RDMA_SIZE) { 3513 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val, 3514 MAX_SRPT_RDMA_SIZE); 3515 return -EINVAL; 3516 } 3517 if (val < DEFAULT_MAX_RDMA_SIZE) { 3518 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n", 3519 val, DEFAULT_MAX_RDMA_SIZE); 3520 return -EINVAL; 3521 } 3522 sport->port_attrib.srp_max_rdma_size = val; 3523 3524 return count; 3525} 3526 3527static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item, 3528 char *page) 3529{ 3530 struct se_portal_group *se_tpg = attrib_to_tpg(item); 3531 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3532 3533 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size); 3534} 3535 3536static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item, 3537 const char *page, size_t count) 3538{ 3539 struct se_portal_group *se_tpg = attrib_to_tpg(item); 3540 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3541 unsigned long val; 3542 int ret; 3543 3544 ret = kstrtoul(page, 0, &val); 3545 if (ret < 0) { 3546 pr_err("kstrtoul() failed with ret: %d\n", ret); 3547 return -EINVAL; 3548 } 3549 if (val > MAX_SRPT_RSP_SIZE) { 3550 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val, 3551 MAX_SRPT_RSP_SIZE); 3552 return -EINVAL; 3553 } 3554 if (val < MIN_MAX_RSP_SIZE) { 3555 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val, 3556 MIN_MAX_RSP_SIZE); 3557 return -EINVAL; 3558 } 3559 sport->port_attrib.srp_max_rsp_size = val; 3560 3561 return count; 3562} 3563 3564static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item, 3565 char *page) 3566{ 3567 struct se_portal_group *se_tpg = attrib_to_tpg(item); 3568 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3569 3570 return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size); 3571} 3572 3573static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item, 3574 const char *page, size_t count) 3575{ 3576 struct se_portal_group *se_tpg = attrib_to_tpg(item); 3577 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3578 unsigned long val; 3579 int ret; 3580 3581 ret = kstrtoul(page, 0, &val); 3582 if (ret < 0) { 3583 pr_err("kstrtoul() failed with ret: %d\n", ret); 3584 return -EINVAL; 3585 } 3586 if (val > MAX_SRPT_SRQ_SIZE) { 3587 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val, 3588 MAX_SRPT_SRQ_SIZE); 3589 return -EINVAL; 3590 } 3591 if (val < MIN_SRPT_SRQ_SIZE) { 3592 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val, 3593 MIN_SRPT_SRQ_SIZE); 3594 return -EINVAL; 3595 } 3596 sport->port_attrib.srp_sq_size = val; 3597 3598 return count; 3599} 3600 3601CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size); 3602CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size); 3603CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size); 3604 3605static struct configfs_attribute *srpt_tpg_attrib_attrs[] = { 3606 &srpt_tpg_attrib_attr_srp_max_rdma_size, 3607 &srpt_tpg_attrib_attr_srp_max_rsp_size, 3608 &srpt_tpg_attrib_attr_srp_sq_size, 3609 NULL, 3610}; 3611 3612static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page) 3613{ 3614 struct se_portal_group *se_tpg = to_tpg(item); 3615 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3616 3617 return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0); 3618} 3619 3620static ssize_t srpt_tpg_enable_store(struct config_item *item, 3621 const char *page, size_t count) 3622{ 3623 struct se_portal_group *se_tpg = to_tpg(item); 3624 struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1); 3625 unsigned long tmp; 3626 int ret; 3627 3628 ret = kstrtoul(page, 0, &tmp); 3629 if (ret < 0) { 3630 pr_err("Unable to extract srpt_tpg_store_enable\n"); 3631 return -EINVAL; 3632 } 3633 3634 if ((tmp != 0) && (tmp != 1)) { 3635 pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp); 3636 return -EINVAL; 3637 } 3638 if (tmp == 1) 3639 sport->enabled = true; 3640 else 3641 sport->enabled = false; 3642 3643 return count; 3644} 3645 3646CONFIGFS_ATTR(srpt_tpg_, enable); 3647 3648static struct configfs_attribute *srpt_tpg_attrs[] = { 3649 &srpt_tpg_attr_enable, 3650 NULL, 3651}; 3652 3653/** 3654 * configfs callback invoked for 3655 * mkdir /sys/kernel/config/target/$driver/$port/$tpg 3656 */ 3657static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn, 3658 struct config_group *group, 3659 const char *name) 3660{ 3661 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn); 3662 int res; 3663 3664 /* Initialize sport->port_wwn and sport->port_tpg_1 */ 3665 res = core_tpg_register(&sport->port_wwn, &sport->port_tpg_1, SCSI_PROTOCOL_SRP); 3666 if (res) 3667 return ERR_PTR(res); 3668 3669 return &sport->port_tpg_1; 3670} 3671 3672/** 3673 * configfs callback invoked for 3674 * rmdir /sys/kernel/config/target/$driver/$port/$tpg 3675 */ 3676static void srpt_drop_tpg(struct se_portal_group *tpg) 3677{ 3678 struct srpt_port *sport = container_of(tpg, 3679 struct srpt_port, port_tpg_1); 3680 3681 sport->enabled = false; 3682 core_tpg_deregister(&sport->port_tpg_1); 3683} 3684 3685/** 3686 * configfs callback invoked for 3687 * mkdir /sys/kernel/config/target/$driver/$port 3688 */ 3689static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf, 3690 struct config_group *group, 3691 const char *name) 3692{ 3693 struct srpt_port *sport; 3694 int ret; 3695 3696 sport = srpt_lookup_port(name); 3697 pr_debug("make_tport(%s)\n", name); 3698 ret = -EINVAL; 3699 if (!sport) 3700 goto err; 3701 3702 return &sport->port_wwn; 3703 3704err: 3705 return ERR_PTR(ret); 3706} 3707 3708/** 3709 * configfs callback invoked for 3710 * rmdir /sys/kernel/config/target/$driver/$port 3711 */ 3712static void srpt_drop_tport(struct se_wwn *wwn) 3713{ 3714 struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn); 3715 3716 pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item)); 3717} 3718 3719static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf) 3720{ 3721 return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION); 3722} 3723 3724CONFIGFS_ATTR_RO(srpt_wwn_, version); 3725 3726static struct configfs_attribute *srpt_wwn_attrs[] = { 3727 &srpt_wwn_attr_version, 3728 NULL, 3729}; 3730 3731static const struct target_core_fabric_ops srpt_template = { 3732 .module = THIS_MODULE, 3733 .name = "srpt", 3734 .node_acl_size = sizeof(struct srpt_node_acl), 3735 .get_fabric_name = srpt_get_fabric_name, 3736 .tpg_get_wwn = srpt_get_fabric_wwn, 3737 .tpg_get_tag = srpt_get_tag, 3738 .tpg_check_demo_mode = srpt_check_false, 3739 .tpg_check_demo_mode_cache = srpt_check_true, 3740 .tpg_check_demo_mode_write_protect = srpt_check_true, 3741 .tpg_check_prod_mode_write_protect = srpt_check_false, 3742 .tpg_get_inst_index = srpt_tpg_get_inst_index, 3743 .release_cmd = srpt_release_cmd, 3744 .check_stop_free = srpt_check_stop_free, 3745 .shutdown_session = srpt_shutdown_session, 3746 .close_session = srpt_close_session, 3747 .sess_get_index = srpt_sess_get_index, 3748 .sess_get_initiator_sid = NULL, 3749 .write_pending = srpt_write_pending, 3750 .write_pending_status = srpt_write_pending_status, 3751 .set_default_node_attributes = srpt_set_default_node_attrs, 3752 .get_cmd_state = srpt_get_tcm_cmd_state, 3753 .queue_data_in = srpt_queue_data_in, 3754 .queue_status = srpt_queue_status, 3755 .queue_tm_rsp = srpt_queue_tm_rsp, 3756 .aborted_task = srpt_aborted_task, 3757 /* 3758 * Setup function pointers for generic logic in 3759 * target_core_fabric_configfs.c 3760 */ 3761 .fabric_make_wwn = srpt_make_tport, 3762 .fabric_drop_wwn = srpt_drop_tport, 3763 .fabric_make_tpg = srpt_make_tpg, 3764 .fabric_drop_tpg = srpt_drop_tpg, 3765 .fabric_init_nodeacl = srpt_init_nodeacl, 3766 .fabric_cleanup_nodeacl = srpt_cleanup_nodeacl, 3767 3768 .tfc_wwn_attrs = srpt_wwn_attrs, 3769 .tfc_tpg_base_attrs = srpt_tpg_attrs, 3770 .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs, 3771}; 3772 3773/** 3774 * srpt_init_module() - Kernel module initialization. 3775 * 3776 * Note: Since ib_register_client() registers callback functions, and since at 3777 * least one of these callback functions (srpt_add_one()) calls target core 3778 * functions, this driver must be registered with the target core before 3779 * ib_register_client() is called. 3780 */ 3781static int __init srpt_init_module(void) 3782{ 3783 int ret; 3784 3785 ret = -EINVAL; 3786 if (srp_max_req_size < MIN_MAX_REQ_SIZE) { 3787 pr_err("invalid value %d for kernel module parameter" 3788 " srp_max_req_size -- must be at least %d.\n", 3789 srp_max_req_size, MIN_MAX_REQ_SIZE); 3790 goto out; 3791 } 3792 3793 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE 3794 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) { 3795 pr_err("invalid value %d for kernel module parameter" 3796 " srpt_srq_size -- must be in the range [%d..%d].\n", 3797 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE); 3798 goto out; 3799 } 3800 3801 ret = target_register_template(&srpt_template); 3802 if (ret) 3803 goto out; 3804 3805 ret = ib_register_client(&srpt_client); 3806 if (ret) { 3807 pr_err("couldn't register IB client\n"); 3808 goto out_unregister_target; 3809 } 3810 3811 return 0; 3812 3813out_unregister_target: 3814 target_unregister_template(&srpt_template); 3815out: 3816 return ret; 3817} 3818 3819static void __exit srpt_cleanup_module(void) 3820{ 3821 ib_unregister_client(&srpt_client); 3822 target_unregister_template(&srpt_template); 3823} 3824 3825module_init(srpt_init_module); 3826module_exit(srpt_cleanup_module); 3827