root/drivers/infiniband/sw/rdmavt/srq.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rvt_driver_srq_init
  2. rvt_create_srq
  3. rvt_modify_srq
  4. rvt_query_srq
  5. rvt_destroy_srq

   1 /*
   2  * Copyright(c) 2016 Intel Corporation.
   3  *
   4  * This file is provided under a dual BSD/GPLv2 license.  When using or
   5  * redistributing this file, you may do so under either license.
   6  *
   7  * GPL LICENSE SUMMARY
   8  *
   9  * This program is free software; you can redistribute it and/or modify
  10  * it under the terms of version 2 of the GNU General Public License as
  11  * published by the Free Software Foundation.
  12  *
  13  * This program is distributed in the hope that it will be useful, but
  14  * WITHOUT ANY WARRANTY; without even the implied warranty of
  15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16  * General Public License for more details.
  17  *
  18  * BSD LICENSE
  19  *
  20  * Redistribution and use in source and binary forms, with or without
  21  * modification, are permitted provided that the following conditions
  22  * are met:
  23  *
  24  *  - Redistributions of source code must retain the above copyright
  25  *    notice, this list of conditions and the following disclaimer.
  26  *  - Redistributions in binary form must reproduce the above copyright
  27  *    notice, this list of conditions and the following disclaimer in
  28  *    the documentation and/or other materials provided with the
  29  *    distribution.
  30  *  - Neither the name of Intel Corporation nor the names of its
  31  *    contributors may be used to endorse or promote products derived
  32  *    from this software without specific prior written permission.
  33  *
  34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45  *
  46  */
  47 
  48 #include <linux/err.h>
  49 #include <linux/slab.h>
  50 #include <linux/vmalloc.h>
  51 #include <rdma/uverbs_ioctl.h>
  52 
  53 #include "srq.h"
  54 #include "vt.h"
  55 #include "qp.h"
  56 /**
  57  * rvt_driver_srq_init - init srq resources on a per driver basis
  58  * @rdi: rvt dev structure
  59  *
  60  * Do any initialization needed when a driver registers with rdmavt.
  61  */
  62 void rvt_driver_srq_init(struct rvt_dev_info *rdi)
  63 {
  64         spin_lock_init(&rdi->n_srqs_lock);
  65         rdi->n_srqs_allocated = 0;
  66 }
  67 
  68 /**
  69  * rvt_create_srq - create a shared receive queue
  70  * @ibpd: the protection domain of the SRQ to create
  71  * @srq_init_attr: the attributes of the SRQ
  72  * @udata: data from libibverbs when creating a user SRQ
  73  *
  74  * Return: 0 on success
  75  */
  76 int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
  77                    struct ib_udata *udata)
  78 {
  79         struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
  80         struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
  81         u32 sz;
  82         int ret;
  83 
  84         if (srq_init_attr->srq_type != IB_SRQT_BASIC)
  85                 return -EOPNOTSUPP;
  86 
  87         if (srq_init_attr->attr.max_sge == 0 ||
  88             srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
  89             srq_init_attr->attr.max_wr == 0 ||
  90             srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
  91                 return -EINVAL;
  92 
  93         /*
  94          * Need to use vmalloc() if we want to support large #s of entries.
  95          */
  96         srq->rq.size = srq_init_attr->attr.max_wr + 1;
  97         srq->rq.max_sge = srq_init_attr->attr.max_sge;
  98         sz = sizeof(struct ib_sge) * srq->rq.max_sge +
  99                 sizeof(struct rvt_rwqe);
 100         if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz,
 101                          dev->dparms.node, udata)) {
 102                 ret = -ENOMEM;
 103                 goto bail_srq;
 104         }
 105 
 106         /*
 107          * Return the address of the RWQ as the offset to mmap.
 108          * See rvt_mmap() for details.
 109          */
 110         if (udata && udata->outlen >= sizeof(__u64)) {
 111                 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
 112 
 113                 srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
 114                 if (IS_ERR(srq->ip)) {
 115                         ret = PTR_ERR(srq->ip);
 116                         goto bail_wq;
 117                 }
 118 
 119                 ret = ib_copy_to_udata(udata, &srq->ip->offset,
 120                                        sizeof(srq->ip->offset));
 121                 if (ret)
 122                         goto bail_ip;
 123         }
 124 
 125         /*
 126          * ib_create_srq() will initialize srq->ibsrq.
 127          */
 128         spin_lock_init(&srq->rq.lock);
 129         srq->limit = srq_init_attr->attr.srq_limit;
 130 
 131         spin_lock(&dev->n_srqs_lock);
 132         if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
 133                 spin_unlock(&dev->n_srqs_lock);
 134                 ret = -ENOMEM;
 135                 goto bail_ip;
 136         }
 137 
 138         dev->n_srqs_allocated++;
 139         spin_unlock(&dev->n_srqs_lock);
 140 
 141         if (srq->ip) {
 142                 spin_lock_irq(&dev->pending_lock);
 143                 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
 144                 spin_unlock_irq(&dev->pending_lock);
 145         }
 146 
 147         return 0;
 148 
 149 bail_ip:
 150         kfree(srq->ip);
 151 bail_wq:
 152         rvt_free_rq(&srq->rq);
 153 bail_srq:
 154         return ret;
 155 }
 156 
 157 /**
 158  * rvt_modify_srq - modify a shared receive queue
 159  * @ibsrq: the SRQ to modify
 160  * @attr: the new attributes of the SRQ
 161  * @attr_mask: indicates which attributes to modify
 162  * @udata: user data for libibverbs.so
 163  *
 164  * Return: 0 on success
 165  */
 166 int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 167                    enum ib_srq_attr_mask attr_mask,
 168                    struct ib_udata *udata)
 169 {
 170         struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
 171         struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
 172         struct rvt_rq tmp_rq = {};
 173         int ret = 0;
 174 
 175         if (attr_mask & IB_SRQ_MAX_WR) {
 176                 struct rvt_krwq *okwq = NULL;
 177                 struct rvt_rwq *owq = NULL;
 178                 struct rvt_rwqe *p;
 179                 u32 sz, size, n, head, tail;
 180 
 181                 /* Check that the requested sizes are below the limits. */
 182                 if ((attr->max_wr > dev->dparms.props.max_srq_wr) ||
 183                     ((attr_mask & IB_SRQ_LIMIT) ?
 184                      attr->srq_limit : srq->limit) > attr->max_wr)
 185                         return -EINVAL;
 186                 sz = sizeof(struct rvt_rwqe) +
 187                         srq->rq.max_sge * sizeof(struct ib_sge);
 188                 size = attr->max_wr + 1;
 189                 if (rvt_alloc_rq(&tmp_rq, size * sz, dev->dparms.node,
 190                                  udata))
 191                         return -ENOMEM;
 192                 /* Check that we can write the offset to mmap. */
 193                 if (udata && udata->inlen >= sizeof(__u64)) {
 194                         __u64 offset_addr;
 195                         __u64 offset = 0;
 196 
 197                         ret = ib_copy_from_udata(&offset_addr, udata,
 198                                                  sizeof(offset_addr));
 199                         if (ret)
 200                                 goto bail_free;
 201                         udata->outbuf = (void __user *)
 202                                         (unsigned long)offset_addr;
 203                         ret = ib_copy_to_udata(udata, &offset,
 204                                                sizeof(offset));
 205                         if (ret)
 206                                 goto bail_free;
 207                 }
 208 
 209                 spin_lock_irq(&srq->rq.kwq->c_lock);
 210                 /*
 211                  * validate head and tail pointer values and compute
 212                  * the number of remaining WQEs.
 213                  */
 214                 if (udata) {
 215                         owq = srq->rq.wq;
 216                         head = RDMA_READ_UAPI_ATOMIC(owq->head);
 217                         tail = RDMA_READ_UAPI_ATOMIC(owq->tail);
 218                 } else {
 219                         okwq = srq->rq.kwq;
 220                         head = okwq->head;
 221                         tail = okwq->tail;
 222                 }
 223                 if (head >= srq->rq.size || tail >= srq->rq.size) {
 224                         ret = -EINVAL;
 225                         goto bail_unlock;
 226                 }
 227                 n = head;
 228                 if (n < tail)
 229                         n += srq->rq.size - tail;
 230                 else
 231                         n -= tail;
 232                 if (size <= n) {
 233                         ret = -EINVAL;
 234                         goto bail_unlock;
 235                 }
 236                 n = 0;
 237                 p = tmp_rq.kwq->curr_wq;
 238                 while (tail != head) {
 239                         struct rvt_rwqe *wqe;
 240                         int i;
 241 
 242                         wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
 243                         p->wr_id = wqe->wr_id;
 244                         p->num_sge = wqe->num_sge;
 245                         for (i = 0; i < wqe->num_sge; i++)
 246                                 p->sg_list[i] = wqe->sg_list[i];
 247                         n++;
 248                         p = (struct rvt_rwqe *)((char *)p + sz);
 249                         if (++tail >= srq->rq.size)
 250                                 tail = 0;
 251                 }
 252                 srq->rq.kwq = tmp_rq.kwq;
 253                 if (udata) {
 254                         srq->rq.wq = tmp_rq.wq;
 255                         RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->head, n);
 256                         RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->tail, 0);
 257                 } else {
 258                         tmp_rq.kwq->head = n;
 259                         tmp_rq.kwq->tail = 0;
 260                 }
 261                 srq->rq.size = size;
 262                 if (attr_mask & IB_SRQ_LIMIT)
 263                         srq->limit = attr->srq_limit;
 264                 spin_unlock_irq(&srq->rq.kwq->c_lock);
 265 
 266                 vfree(owq);
 267                 kvfree(okwq);
 268 
 269                 if (srq->ip) {
 270                         struct rvt_mmap_info *ip = srq->ip;
 271                         struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
 272                         u32 s = sizeof(struct rvt_rwq) + size * sz;
 273 
 274                         rvt_update_mmap_info(dev, ip, s, tmp_rq.wq);
 275 
 276                         /*
 277                          * Return the offset to mmap.
 278                          * See rvt_mmap() for details.
 279                          */
 280                         if (udata && udata->inlen >= sizeof(__u64)) {
 281                                 ret = ib_copy_to_udata(udata, &ip->offset,
 282                                                        sizeof(ip->offset));
 283                                 if (ret)
 284                                         return ret;
 285                         }
 286 
 287                         /*
 288                          * Put user mapping info onto the pending list
 289                          * unless it already is on the list.
 290                          */
 291                         spin_lock_irq(&dev->pending_lock);
 292                         if (list_empty(&ip->pending_mmaps))
 293                                 list_add(&ip->pending_mmaps,
 294                                          &dev->pending_mmaps);
 295                         spin_unlock_irq(&dev->pending_lock);
 296                 }
 297         } else if (attr_mask & IB_SRQ_LIMIT) {
 298                 spin_lock_irq(&srq->rq.kwq->c_lock);
 299                 if (attr->srq_limit >= srq->rq.size)
 300                         ret = -EINVAL;
 301                 else
 302                         srq->limit = attr->srq_limit;
 303                 spin_unlock_irq(&srq->rq.kwq->c_lock);
 304         }
 305         return ret;
 306 
 307 bail_unlock:
 308         spin_unlock_irq(&srq->rq.kwq->c_lock);
 309 bail_free:
 310         rvt_free_rq(&tmp_rq);
 311         return ret;
 312 }
 313 
 314 /** rvt_query_srq - query srq data
 315  * @ibsrq: srq to query
 316  * @attr: return info in attr
 317  *
 318  * Return: always 0
 319  */
 320 int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
 321 {
 322         struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
 323 
 324         attr->max_wr = srq->rq.size - 1;
 325         attr->max_sge = srq->rq.max_sge;
 326         attr->srq_limit = srq->limit;
 327         return 0;
 328 }
 329 
 330 /**
 331  * rvt_destroy_srq - destory an srq
 332  * @ibsrq: srq object to destroy
 333  *
 334  */
 335 void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 336 {
 337         struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
 338         struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
 339 
 340         spin_lock(&dev->n_srqs_lock);
 341         dev->n_srqs_allocated--;
 342         spin_unlock(&dev->n_srqs_lock);
 343         if (srq->ip)
 344                 kref_put(&srq->ip->ref, rvt_release_mmap_info);
 345         kvfree(srq->rq.kwq);
 346 }

/* [<][>][^][v][top][bottom][index][help] */