root/drivers/target/iscsi/cxgbit/cxgbit_ddp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cxgbit_set_one_ppod
  2. cxgbit_ppod_init_idata
  3. cxgbit_ppod_write_idata
  4. cxgbit_ddp_set_map
  5. cxgbit_ddp_sgl_check
  6. cxgbit_ddp_reserve
  7. cxgbit_get_r2t_ttt
  8. cxgbit_unmap_cmd
  9. cxgbit_ddp_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (c) 2016 Chelsio Communications, Inc.
   4  */
   5 
   6 #include "cxgbit.h"
   7 
   8 static void
   9 cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod,
  10                     struct cxgbi_task_tag_info *ttinfo,
  11                     struct scatterlist **sg_pp, unsigned int *sg_off)
  12 {
  13         struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
  14         unsigned int offset = sg_off ? *sg_off : 0;
  15         dma_addr_t addr = 0UL;
  16         unsigned int len = 0;
  17         int i;
  18 
  19         memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
  20 
  21         if (sg) {
  22                 addr = sg_dma_address(sg);
  23                 len = sg_dma_len(sg);
  24         }
  25 
  26         for (i = 0; i < PPOD_PAGES_MAX; i++) {
  27                 if (sg) {
  28                         ppod->addr[i] = cpu_to_be64(addr + offset);
  29                         offset += PAGE_SIZE;
  30                         if (offset == (len + sg->offset)) {
  31                                 offset = 0;
  32                                 sg = sg_next(sg);
  33                                 if (sg) {
  34                                         addr = sg_dma_address(sg);
  35                                         len = sg_dma_len(sg);
  36                                 }
  37                         }
  38                 } else {
  39                         ppod->addr[i] = 0ULL;
  40                 }
  41         }
  42 
  43         /*
  44          * the fifth address needs to be repeated in the next ppod, so do
  45          * not move sg
  46          */
  47         if (sg_pp) {
  48                 *sg_pp = sg;
  49                 *sg_off = offset;
  50         }
  51 
  52         if (offset == len) {
  53                 offset = 0;
  54                 if (sg) {
  55                         sg = sg_next(sg);
  56                         if (sg)
  57                                 addr = sg_dma_address(sg);
  58                 }
  59         }
  60         ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
  61 }
  62 
  63 static struct sk_buff *
  64 cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm,
  65                        unsigned int idx, unsigned int npods, unsigned int tid)
  66 {
  67         struct ulp_mem_io *req;
  68         struct ulptx_idata *idata;
  69         unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
  70         unsigned int dlen = npods << PPOD_SIZE_SHIFT;
  71         unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
  72                                 sizeof(struct ulptx_idata) + dlen, 16);
  73         struct sk_buff *skb;
  74 
  75         skb  = alloc_skb(wr_len, GFP_KERNEL);
  76         if (!skb)
  77                 return NULL;
  78 
  79         req = __skb_put(skb, wr_len);
  80         INIT_ULPTX_WR(req, wr_len, 0, tid);
  81         req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
  82                 FW_WR_ATOMIC_V(0));
  83         req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
  84                 ULP_MEMIO_ORDER_V(0) |
  85                 T5_ULP_MEMIO_IMM_V(1));
  86         req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
  87         req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
  88         req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
  89 
  90         idata = (struct ulptx_idata *)(req + 1);
  91         idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
  92         idata->len = htonl(dlen);
  93 
  94         return skb;
  95 }
  96 
  97 static int
  98 cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
  99                         struct cxgbi_task_tag_info *ttinfo, unsigned int idx,
 100                         unsigned int npods, struct scatterlist **sg_pp,
 101                         unsigned int *sg_off)
 102 {
 103         struct cxgbit_device *cdev = csk->com.cdev;
 104         struct sk_buff *skb;
 105         struct ulp_mem_io *req;
 106         struct ulptx_idata *idata;
 107         struct cxgbi_pagepod *ppod;
 108         unsigned int i;
 109 
 110         skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid);
 111         if (!skb)
 112                 return -ENOMEM;
 113 
 114         req = (struct ulp_mem_io *)skb->data;
 115         idata = (struct ulptx_idata *)(req + 1);
 116         ppod = (struct cxgbi_pagepod *)(idata + 1);
 117 
 118         for (i = 0; i < npods; i++, ppod++)
 119                 cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
 120 
 121         __skb_queue_tail(&csk->ppodq, skb);
 122 
 123         return 0;
 124 }
 125 
 126 static int
 127 cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
 128                    struct cxgbi_task_tag_info *ttinfo)
 129 {
 130         unsigned int pidx = ttinfo->idx;
 131         unsigned int npods = ttinfo->npods;
 132         unsigned int i, cnt;
 133         struct scatterlist *sg = ttinfo->sgl;
 134         unsigned int offset = 0;
 135         int ret = 0;
 136 
 137         for (i = 0; i < npods; i += cnt, pidx += cnt) {
 138                 cnt = npods - i;
 139 
 140                 if (cnt > ULPMEM_IDATA_MAX_NPPODS)
 141                         cnt = ULPMEM_IDATA_MAX_NPPODS;
 142 
 143                 ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
 144                                               &sg, &offset);
 145                 if (ret < 0)
 146                         break;
 147         }
 148 
 149         return ret;
 150 }
 151 
 152 static int cxgbit_ddp_sgl_check(struct scatterlist *sg,
 153                                 unsigned int nents)
 154 {
 155         unsigned int last_sgidx = nents - 1;
 156         unsigned int i;
 157 
 158         for (i = 0; i < nents; i++, sg = sg_next(sg)) {
 159                 unsigned int len = sg->length + sg->offset;
 160 
 161                 if ((sg->offset & 0x3) || (i && sg->offset) ||
 162                     ((i != last_sgidx) && (len != PAGE_SIZE))) {
 163                         return -EINVAL;
 164                 }
 165         }
 166 
 167         return 0;
 168 }
 169 
 170 static int
 171 cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
 172                    unsigned int xferlen)
 173 {
 174         struct cxgbit_device *cdev = csk->com.cdev;
 175         struct cxgbi_ppm *ppm = cdev2ppm(cdev);
 176         struct scatterlist *sgl = ttinfo->sgl;
 177         unsigned int sgcnt = ttinfo->nents;
 178         unsigned int sg_offset = sgl->offset;
 179         int ret;
 180 
 181         if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) {
 182                 pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
 183                          ppm, ppm->tformat.pgsz_idx_dflt,
 184                          xferlen, ttinfo->nents);
 185                 return -EINVAL;
 186         }
 187 
 188         if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0)
 189                 return -EINVAL;
 190 
 191         ttinfo->nr_pages = (xferlen + sgl->offset +
 192                             (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT;
 193 
 194         /*
 195          * the ddp tag will be used for the ttt in the outgoing r2t pdu
 196          */
 197         ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
 198                                       &ttinfo->tag, 0);
 199         if (ret < 0)
 200                 return ret;
 201         ttinfo->npods = ret;
 202 
 203         sgl->offset = 0;
 204         ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
 205         sgl->offset = sg_offset;
 206         if (!ret) {
 207                 pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
 208                          __func__, 0, xferlen, sgcnt);
 209                 goto rel_ppods;
 210         }
 211 
 212         cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
 213                                 xferlen, &ttinfo->hdr);
 214 
 215         ret = cxgbit_ddp_set_map(ppm, csk, ttinfo);
 216         if (ret < 0) {
 217                 __skb_queue_purge(&csk->ppodq);
 218                 dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
 219                 goto rel_ppods;
 220         }
 221 
 222         return 0;
 223 
 224 rel_ppods:
 225         cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
 226         return -EINVAL;
 227 }
 228 
 229 void
 230 cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 231                    struct iscsi_r2t *r2t)
 232 {
 233         struct cxgbit_sock *csk = conn->context;
 234         struct cxgbit_device *cdev = csk->com.cdev;
 235         struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
 236         struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
 237         int ret = -EINVAL;
 238 
 239         if ((!ccmd->setup_ddp) ||
 240             (!test_bit(CSK_DDP_ENABLE, &csk->com.flags)))
 241                 goto out;
 242 
 243         ccmd->setup_ddp = false;
 244 
 245         ttinfo->sgl = cmd->se_cmd.t_data_sg;
 246         ttinfo->nents = cmd->se_cmd.t_data_nents;
 247 
 248         ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
 249         if (ret < 0) {
 250                 pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
 251                          csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
 252 
 253                 ttinfo->sgl = NULL;
 254                 ttinfo->nents = 0;
 255         } else {
 256                 ccmd->release = true;
 257         }
 258 out:
 259         pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag);
 260         r2t->targ_xfer_tag = ttinfo->tag;
 261 }
 262 
 263 void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 264 {
 265         struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
 266 
 267         if (ccmd->release) {
 268                 struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
 269 
 270                 if (ttinfo->sgl) {
 271                         struct cxgbit_sock *csk = conn->context;
 272                         struct cxgbit_device *cdev = csk->com.cdev;
 273                         struct cxgbi_ppm *ppm = cdev2ppm(cdev);
 274 
 275                         /* Abort the TCP conn if DDP is not complete to
 276                          * avoid any possibility of DDP after freeing
 277                          * the cmd.
 278                          */
 279                         if (unlikely(cmd->write_data_done !=
 280                                      cmd->se_cmd.data_length))
 281                                 cxgbit_abort_conn(csk);
 282 
 283                         cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
 284 
 285                         dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
 286                                      ttinfo->nents, DMA_FROM_DEVICE);
 287                 } else {
 288                         put_page(sg_page(&ccmd->sg));
 289                 }
 290 
 291                 ccmd->release = false;
 292         }
 293 }
 294 
 295 int cxgbit_ddp_init(struct cxgbit_device *cdev)
 296 {
 297         struct cxgb4_lld_info *lldi = &cdev->lldi;
 298         struct net_device *ndev = cdev->lldi.ports[0];
 299         struct cxgbi_tag_format tformat;
 300         unsigned int ppmax;
 301         int ret, i;
 302 
 303         if (!lldi->vr->iscsi.size) {
 304                 pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
 305                 return -EACCES;
 306         }
 307 
 308         ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
 309 
 310         memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
 311         for (i = 0; i < 4; i++)
 312                 tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
 313                                          & 0xF;
 314         cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
 315 
 316         ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0],
 317                              cdev->lldi.pdev, &cdev->lldi, &tformat,
 318                              lldi->vr->iscsi.size, lldi->iscsi_llimit,
 319                              lldi->vr->iscsi.start, 2,
 320                              lldi->vr->ppod_edram.start,
 321                              lldi->vr->ppod_edram.size);
 322         if (ret >= 0) {
 323                 struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm);
 324 
 325                 if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) &&
 326                     (ppm->ppmax >= 1024))
 327                         set_bit(CDEV_DDP_ENABLE, &cdev->flags);
 328                 ret = 0;
 329         }
 330 
 331         return ret;
 332 }

/* [<][>][^][v][top][bottom][index][help] */