root/drivers/infiniband/ulp/srp/ib_srp.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


   1 /*
   2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 
  33 #ifndef IB_SRP_H
  34 #define IB_SRP_H
  35 
  36 #include <linux/types.h>
  37 #include <linux/list.h>
  38 #include <linux/mutex.h>
  39 #include <linux/scatterlist.h>
  40 
  41 #include <scsi/scsi_host.h>
  42 #include <scsi/scsi_cmnd.h>
  43 
  44 #include <rdma/ib_verbs.h>
  45 #include <rdma/ib_sa.h>
  46 #include <rdma/ib_cm.h>
  47 #include <rdma/ib_fmr_pool.h>
  48 #include <rdma/rdma_cm.h>
  49 
  50 enum {
  51         SRP_PATH_REC_TIMEOUT_MS = 1000,
  52         SRP_ABORT_TIMEOUT_MS    = 5000,
  53 
  54         SRP_PORT_REDIRECT       = 1,
  55         SRP_DLID_REDIRECT       = 2,
  56         SRP_STALE_CONN          = 3,
  57 
  58         SRP_DEF_SG_TABLESIZE    = 12,
  59 
  60         SRP_DEFAULT_QUEUE_SIZE  = 1 << 6,
  61         SRP_RSP_SQ_SIZE         = 1,
  62         SRP_TSK_MGMT_SQ_SIZE    = 1,
  63         SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
  64                                   SRP_TSK_MGMT_SQ_SIZE,
  65 
  66         SRP_TAG_NO_REQ          = ~0U,
  67         SRP_TAG_TSK_MGMT        = 1U << 31,
  68 
  69         SRP_MAX_PAGES_PER_MR    = 512,
  70 
  71         SRP_MAX_ADD_CDB_LEN     = 16,
  72 
  73         SRP_MAX_IMM_SGE         = 2,
  74         SRP_MAX_SGE             = SRP_MAX_IMM_SGE + 1,
  75         /*
  76          * Choose the immediate data offset such that a 32 byte CDB still fits.
  77          */
  78         SRP_IMM_DATA_OFFSET     = sizeof(struct srp_cmd) +
  79                                   SRP_MAX_ADD_CDB_LEN +
  80                                   sizeof(struct srp_imm_buf),
  81 };
  82 
  83 enum srp_target_state {
  84         SRP_TARGET_SCANNING,
  85         SRP_TARGET_LIVE,
  86         SRP_TARGET_REMOVED,
  87 };
  88 
  89 enum srp_iu_type {
  90         SRP_IU_CMD,
  91         SRP_IU_TSK_MGMT,
  92         SRP_IU_RSP,
  93 };
  94 
  95 /*
  96  * @mr_page_mask: HCA memory registration page mask.
  97  * @mr_page_size: HCA memory registration page size.
  98  * @mr_max_size: Maximum size in bytes of a single FMR / FR registration
  99  *   request.
 100  */
 101 struct srp_device {
 102         struct list_head        dev_list;
 103         struct ib_device       *dev;
 104         struct ib_pd           *pd;
 105         u32                     global_rkey;
 106         u64                     mr_page_mask;
 107         int                     mr_page_size;
 108         int                     mr_max_size;
 109         int                     max_pages_per_mr;
 110         bool                    has_fmr;
 111         bool                    has_fr;
 112         bool                    use_fmr;
 113         bool                    use_fast_reg;
 114 };
 115 
 116 struct srp_host {
 117         struct srp_device      *srp_dev;
 118         u8                      port;
 119         struct device           dev;
 120         struct list_head        target_list;
 121         spinlock_t              target_lock;
 122         struct completion       released;
 123         struct list_head        list;
 124         struct mutex            add_target_mutex;
 125 };
 126 
 127 struct srp_request {
 128         struct scsi_cmnd       *scmnd;
 129         struct srp_iu          *cmd;
 130         union {
 131                 struct ib_pool_fmr **fmr_list;
 132                 struct srp_fr_desc **fr_list;
 133         };
 134         u64                    *map_page;
 135         struct srp_direct_buf  *indirect_desc;
 136         dma_addr_t              indirect_dma_addr;
 137         short                   nmdesc;
 138         struct ib_cqe           reg_cqe;
 139 };
 140 
 141 /**
 142  * struct srp_rdma_ch
 143  * @comp_vector: Completion vector used by this RDMA channel.
 144  * @max_it_iu_len: Maximum initiator-to-target information unit length.
 145  * @max_ti_iu_len: Maximum target-to-initiator information unit length.
 146  */
 147 struct srp_rdma_ch {
 148         /* These are RW in the hot path, and commonly used together */
 149         struct list_head        free_tx;
 150         spinlock_t              lock;
 151         s32                     req_lim;
 152 
 153         /* These are read-only in the hot path */
 154         struct srp_target_port *target ____cacheline_aligned_in_smp;
 155         struct ib_cq           *send_cq;
 156         struct ib_cq           *recv_cq;
 157         struct ib_qp           *qp;
 158         union {
 159                 struct ib_fmr_pool     *fmr_pool;
 160                 struct srp_fr_pool     *fr_pool;
 161         };
 162         uint32_t                max_it_iu_len;
 163         uint32_t                max_ti_iu_len;
 164         bool                    use_imm_data;
 165 
 166         /* Everything above this point is used in the hot path of
 167          * command processing. Try to keep them packed into cachelines.
 168          */
 169 
 170         struct completion       done;
 171         int                     status;
 172 
 173         union {
 174                 struct ib_cm {
 175                         struct sa_path_rec      path;
 176                         struct ib_sa_query      *path_query;
 177                         int                     path_query_id;
 178                         struct ib_cm_id         *cm_id;
 179                 } ib_cm;
 180                 struct rdma_cm {
 181                         struct rdma_cm_id       *cm_id;
 182                 } rdma_cm;
 183         };
 184 
 185         struct srp_iu         **tx_ring;
 186         struct srp_iu         **rx_ring;
 187         struct srp_request     *req_ring;
 188         int                     comp_vector;
 189 
 190         u64                     tsk_mgmt_tag;
 191         struct completion       tsk_mgmt_done;
 192         u8                      tsk_mgmt_status;
 193         bool                    connected;
 194 };
 195 
 196 /**
 197  * struct srp_target_port
 198  * @comp_vector: Completion vector used by the first RDMA channel created for
 199  *   this target port.
 200  */
 201 struct srp_target_port {
 202         /* read and written in the hot path */
 203         spinlock_t              lock;
 204 
 205         /* read only in the hot path */
 206         u32                     global_rkey;
 207         struct srp_rdma_ch      *ch;
 208         struct net              *net;
 209         u32                     ch_count;
 210         u32                     lkey;
 211         enum srp_target_state   state;
 212         unsigned int            cmd_sg_cnt;
 213         unsigned int            indirect_size;
 214         bool                    allow_ext_sg;
 215 
 216         /* other member variables */
 217         union ib_gid            sgid;
 218         __be64                  id_ext;
 219         __be64                  ioc_guid;
 220         __be64                  initiator_ext;
 221         u16                     io_class;
 222         struct srp_host        *srp_host;
 223         struct Scsi_Host       *scsi_host;
 224         struct srp_rport       *rport;
 225         char                    target_name[32];
 226         unsigned int            scsi_id;
 227         unsigned int            sg_tablesize;
 228         unsigned int            target_can_queue;
 229         int                     mr_pool_size;
 230         int                     mr_per_cmd;
 231         int                     queue_size;
 232         int                     req_ring_size;
 233         int                     comp_vector;
 234         int                     tl_retry_count;
 235 
 236         bool                    using_rdma_cm;
 237 
 238         union {
 239                 struct {
 240                         __be64                  service_id;
 241                         union ib_gid            orig_dgid;
 242                         __be16                  pkey;
 243                 } ib_cm;
 244                 struct {
 245                         union {
 246                                 struct sockaddr_in      ip4;
 247                                 struct sockaddr_in6     ip6;
 248                                 struct sockaddr_storage ss;
 249                         } src;
 250                         union {
 251                                 struct sockaddr_in      ip4;
 252                                 struct sockaddr_in6     ip6;
 253                                 struct sockaddr_storage ss;
 254                         } dst;
 255                         bool src_specified;
 256                 } rdma_cm;
 257         };
 258 
 259         u32                     rq_tmo_jiffies;
 260 
 261         int                     zero_req_lim;
 262 
 263         struct work_struct      tl_err_work;
 264         struct work_struct      remove_work;
 265 
 266         struct list_head        list;
 267         bool                    qp_in_error;
 268 };
 269 
 270 struct srp_iu {
 271         struct list_head        list;
 272         u64                     dma;
 273         void                   *buf;
 274         size_t                  size;
 275         enum dma_data_direction direction;
 276         u32                     num_sge;
 277         struct ib_sge           sge[SRP_MAX_SGE];
 278         struct ib_cqe           cqe;
 279 };
 280 
 281 /**
 282  * struct srp_fr_desc - fast registration work request arguments
 283  * @entry: Entry in srp_fr_pool.free_list.
 284  * @mr:    Memory region.
 285  * @frpl:  Fast registration page list.
 286  */
 287 struct srp_fr_desc {
 288         struct list_head                entry;
 289         struct ib_mr                    *mr;
 290 };
 291 
 292 /**
 293  * struct srp_fr_pool - pool of fast registration descriptors
 294  *
 295  * An entry is available for allocation if and only if it occurs in @free_list.
 296  *
 297  * @size:      Number of descriptors in this pool.
 298  * @max_page_list_len: Maximum fast registration work request page list length.
 299  * @lock:      Protects free_list.
 300  * @free_list: List of free descriptors.
 301  * @desc:      Fast registration descriptor pool.
 302  */
 303 struct srp_fr_pool {
 304         int                     size;
 305         int                     max_page_list_len;
 306         spinlock_t              lock;
 307         struct list_head        free_list;
 308         struct srp_fr_desc      desc[0];
 309 };
 310 
 311 /**
 312  * struct srp_map_state - per-request DMA memory mapping state
 313  * @desc:           Pointer to the element of the SRP buffer descriptor array
 314  *                  that is being filled in.
 315  * @pages:          Array with DMA addresses of pages being considered for
 316  *                  memory registration.
 317  * @base_dma_addr:  DMA address of the first page that has not yet been mapped.
 318  * @dma_len:        Number of bytes that will be registered with the next
 319  *                  FMR or FR memory registration call.
 320  * @total_len:      Total number of bytes in the sg-list being mapped.
 321  * @npages:         Number of page addresses in the pages[] array.
 322  * @nmdesc:         Number of FMR or FR memory descriptors used for mapping.
 323  * @ndesc:          Number of SRP buffer descriptors that have been filled in.
 324  */
 325 struct srp_map_state {
 326         union {
 327                 struct {
 328                         struct ib_pool_fmr **next;
 329                         struct ib_pool_fmr **end;
 330                 } fmr;
 331                 struct {
 332                         struct srp_fr_desc **next;
 333                         struct srp_fr_desc **end;
 334                 } fr;
 335                 struct {
 336                         void               **next;
 337                         void               **end;
 338                 } gen;
 339         };
 340         struct srp_direct_buf  *desc;
 341         union {
 342                 u64                     *pages;
 343                 struct scatterlist      *sg;
 344         };
 345         dma_addr_t              base_dma_addr;
 346         u32                     dma_len;
 347         u32                     total_len;
 348         unsigned int            npages;
 349         unsigned int            nmdesc;
 350         unsigned int            ndesc;
 351 };
 352 
 353 #endif /* IB_SRP_H */

/* [<][>][^][v][top][bottom][index][help] */