This source file includes following definitions.
- pvrdma_get_dma_mr
- pvrdma_reg_user_mr
- pvrdma_alloc_mr
- pvrdma_dereg_mr
- pvrdma_set_page
- pvrdma_map_mr_sg
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 
  25 
  26 
  27 
  28 
  29 
  30 
  31 
  32 
  33 
  34 
  35 
  36 
  37 
  38 
  39 
  40 
  41 
  42 
  43 
  44 
  45 
  46 #include <linux/list.h>
  47 #include <linux/slab.h>
  48 
  49 #include "pvrdma.h"
  50 
  51 
  52 
  53 
  54 
  55 
  56 
  57 
  58 struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc)
  59 {
  60         struct pvrdma_dev *dev = to_vdev(pd->device);
  61         struct pvrdma_user_mr *mr;
  62         union pvrdma_cmd_req req;
  63         union pvrdma_cmd_resp rsp;
  64         struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
  65         struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
  66         int ret;
  67 
  68         
  69         if (acc & ~IB_ACCESS_LOCAL_WRITE) {
  70                 dev_warn(&dev->pdev->dev,
  71                          "unsupported dma mr access flags %#x\n", acc);
  72                 return ERR_PTR(-EOPNOTSUPP);
  73         }
  74 
  75         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
  76         if (!mr)
  77                 return ERR_PTR(-ENOMEM);
  78 
  79         memset(cmd, 0, sizeof(*cmd));
  80         cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
  81         cmd->pd_handle = to_vpd(pd)->pd_handle;
  82         cmd->access_flags = acc;
  83         cmd->flags = PVRDMA_MR_FLAG_DMA;
  84 
  85         ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
  86         if (ret < 0) {
  87                 dev_warn(&dev->pdev->dev,
  88                          "could not get DMA mem region, error: %d\n", ret);
  89                 kfree(mr);
  90                 return ERR_PTR(ret);
  91         }
  92 
  93         mr->mmr.mr_handle = resp->mr_handle;
  94         mr->ibmr.lkey = resp->lkey;
  95         mr->ibmr.rkey = resp->rkey;
  96 
  97         return &mr->ibmr;
  98 }
  99 
 100 
 101 
 102 
 103 
 104 
 105 
 106 
 107 
 108 
 109 
 110 
 111 struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 112                                  u64 virt_addr, int access_flags,
 113                                  struct ib_udata *udata)
 114 {
 115         struct pvrdma_dev *dev = to_vdev(pd->device);
 116         struct pvrdma_user_mr *mr = NULL;
 117         struct ib_umem *umem;
 118         union pvrdma_cmd_req req;
 119         union pvrdma_cmd_resp rsp;
 120         struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
 121         struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
 122         int ret, npages;
 123 
 124         if (length == 0 || length > dev->dsr->caps.max_mr_size) {
 125                 dev_warn(&dev->pdev->dev, "invalid mem region length\n");
 126                 return ERR_PTR(-EINVAL);
 127         }
 128 
 129         umem = ib_umem_get(udata, start, length, access_flags, 0);
 130         if (IS_ERR(umem)) {
 131                 dev_warn(&dev->pdev->dev,
 132                          "could not get umem for mem region\n");
 133                 return ERR_CAST(umem);
 134         }
 135 
 136         npages = ib_umem_num_pages(umem);
 137         if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
 138                 dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
 139                          npages);
 140                 ret = -EINVAL;
 141                 goto err_umem;
 142         }
 143 
 144         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 145         if (!mr) {
 146                 ret = -ENOMEM;
 147                 goto err_umem;
 148         }
 149 
 150         mr->mmr.iova = virt_addr;
 151         mr->mmr.size = length;
 152         mr->umem = umem;
 153 
 154         ret = pvrdma_page_dir_init(dev, &mr->pdir, npages, false);
 155         if (ret) {
 156                 dev_warn(&dev->pdev->dev,
 157                          "could not allocate page directory\n");
 158                 goto err_umem;
 159         }
 160 
 161         ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0);
 162         if (ret)
 163                 goto err_pdir;
 164 
 165         memset(cmd, 0, sizeof(*cmd));
 166         cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
 167         cmd->start = start;
 168         cmd->length = length;
 169         cmd->pd_handle = to_vpd(pd)->pd_handle;
 170         cmd->access_flags = access_flags;
 171         cmd->nchunks = npages;
 172         cmd->pdir_dma = mr->pdir.dir_dma;
 173 
 174         ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
 175         if (ret < 0) {
 176                 dev_warn(&dev->pdev->dev,
 177                          "could not register mem region, error: %d\n", ret);
 178                 goto err_pdir;
 179         }
 180 
 181         mr->mmr.mr_handle = resp->mr_handle;
 182         mr->ibmr.lkey = resp->lkey;
 183         mr->ibmr.rkey = resp->rkey;
 184 
 185         return &mr->ibmr;
 186 
 187 err_pdir:
 188         pvrdma_page_dir_cleanup(dev, &mr->pdir);
 189 err_umem:
 190         ib_umem_release(umem);
 191         kfree(mr);
 192 
 193         return ERR_PTR(ret);
 194 }
 195 
 196 
 197 
 198 
 199 
 200 
 201 
 202 
 203 
 204 struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
 205                               u32 max_num_sg, struct ib_udata *udata)
 206 {
 207         struct pvrdma_dev *dev = to_vdev(pd->device);
 208         struct pvrdma_user_mr *mr;
 209         union pvrdma_cmd_req req;
 210         union pvrdma_cmd_resp rsp;
 211         struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
 212         struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
 213         int size = max_num_sg * sizeof(u64);
 214         int ret;
 215 
 216         if (mr_type != IB_MR_TYPE_MEM_REG ||
 217             max_num_sg > PVRDMA_MAX_FAST_REG_PAGES)
 218                 return ERR_PTR(-EINVAL);
 219 
 220         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 221         if (!mr)
 222                 return ERR_PTR(-ENOMEM);
 223 
 224         mr->pages = kzalloc(size, GFP_KERNEL);
 225         if (!mr->pages) {
 226                 ret = -ENOMEM;
 227                 goto freemr;
 228         }
 229 
 230         ret = pvrdma_page_dir_init(dev, &mr->pdir, max_num_sg, false);
 231         if (ret) {
 232                 dev_warn(&dev->pdev->dev,
 233                          "failed to allocate page dir for mr\n");
 234                 ret = -ENOMEM;
 235                 goto freepages;
 236         }
 237 
 238         memset(cmd, 0, sizeof(*cmd));
 239         cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
 240         cmd->pd_handle = to_vpd(pd)->pd_handle;
 241         cmd->access_flags = 0;
 242         cmd->flags = PVRDMA_MR_FLAG_FRMR;
 243         cmd->nchunks = max_num_sg;
 244 
 245         ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
 246         if (ret < 0) {
 247                 dev_warn(&dev->pdev->dev,
 248                          "could not create FR mem region, error: %d\n", ret);
 249                 goto freepdir;
 250         }
 251 
 252         mr->max_pages = max_num_sg;
 253         mr->mmr.mr_handle = resp->mr_handle;
 254         mr->ibmr.lkey = resp->lkey;
 255         mr->ibmr.rkey = resp->rkey;
 256         mr->page_shift = PAGE_SHIFT;
 257         mr->umem = NULL;
 258 
 259         return &mr->ibmr;
 260 
 261 freepdir:
 262         pvrdma_page_dir_cleanup(dev, &mr->pdir);
 263 freepages:
 264         kfree(mr->pages);
 265 freemr:
 266         kfree(mr);
 267         return ERR_PTR(ret);
 268 }
 269 
 270 
 271 
 272 
 273 
 274 
 275 
 276 int pvrdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
 277 {
 278         struct pvrdma_user_mr *mr = to_vmr(ibmr);
 279         struct pvrdma_dev *dev = to_vdev(ibmr->device);
 280         union pvrdma_cmd_req req;
 281         struct pvrdma_cmd_destroy_mr *cmd = &req.destroy_mr;
 282         int ret;
 283 
 284         memset(cmd, 0, sizeof(*cmd));
 285         cmd->hdr.cmd = PVRDMA_CMD_DESTROY_MR;
 286         cmd->mr_handle = mr->mmr.mr_handle;
 287         ret = pvrdma_cmd_post(dev, &req, NULL, 0);
 288         if (ret < 0)
 289                 dev_warn(&dev->pdev->dev,
 290                          "could not deregister mem region, error: %d\n", ret);
 291 
 292         pvrdma_page_dir_cleanup(dev, &mr->pdir);
 293         ib_umem_release(mr->umem);
 294 
 295         kfree(mr->pages);
 296         kfree(mr);
 297 
 298         return 0;
 299 }
 300 
 301 static int pvrdma_set_page(struct ib_mr *ibmr, u64 addr)
 302 {
 303         struct pvrdma_user_mr *mr = to_vmr(ibmr);
 304 
 305         if (mr->npages == mr->max_pages)
 306                 return -ENOMEM;
 307 
 308         mr->pages[mr->npages++] = addr;
 309         return 0;
 310 }
 311 
 312 int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
 313                      unsigned int *sg_offset)
 314 {
 315         struct pvrdma_user_mr *mr = to_vmr(ibmr);
 316         struct pvrdma_dev *dev = to_vdev(ibmr->device);
 317         int ret;
 318 
 319         mr->npages = 0;
 320 
 321         ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, pvrdma_set_page);
 322         if (ret < 0)
 323                 dev_warn(&dev->pdev->dev, "could not map sg to pages\n");
 324 
 325         return ret;
 326 }