root/drivers/infiniband/sw/rdmavt/cq.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rvt_cq_enter
  2. send_complete
  3. rvt_create_cq
  4. rvt_destroy_cq
  5. rvt_req_notify_cq
  6. rvt_resize_cq
  7. rvt_poll_cq
  8. rvt_driver_cq_init
  9. rvt_cq_exit

   1 /*
   2  * Copyright(c) 2016 - 2018 Intel Corporation.
   3  *
   4  * This file is provided under a dual BSD/GPLv2 license.  When using or
   5  * redistributing this file, you may do so under either license.
   6  *
   7  * GPL LICENSE SUMMARY
   8  *
   9  * This program is free software; you can redistribute it and/or modify
  10  * it under the terms of version 2 of the GNU General Public License as
  11  * published by the Free Software Foundation.
  12  *
  13  * This program is distributed in the hope that it will be useful, but
  14  * WITHOUT ANY WARRANTY; without even the implied warranty of
  15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16  * General Public License for more details.
  17  *
  18  * BSD LICENSE
  19  *
  20  * Redistribution and use in source and binary forms, with or without
  21  * modification, are permitted provided that the following conditions
  22  * are met:
  23  *
  24  *  - Redistributions of source code must retain the above copyright
  25  *    notice, this list of conditions and the following disclaimer.
  26  *  - Redistributions in binary form must reproduce the above copyright
  27  *    notice, this list of conditions and the following disclaimer in
  28  *    the documentation and/or other materials provided with the
  29  *    distribution.
  30  *  - Neither the name of Intel Corporation nor the names of its
  31  *    contributors may be used to endorse or promote products derived
  32  *    from this software without specific prior written permission.
  33  *
  34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45  *
  46  */
  47 
  48 #include <linux/slab.h>
  49 #include <linux/vmalloc.h>
  50 #include "cq.h"
  51 #include "vt.h"
  52 #include "trace.h"
  53 
  54 static struct workqueue_struct *comp_vector_wq;
  55 
  56 /**
  57  * rvt_cq_enter - add a new entry to the completion queue
  58  * @cq: completion queue
  59  * @entry: work completion entry to add
  60  * @solicited: true if @entry is solicited
  61  *
  62  * This may be called with qp->s_lock held.
  63  *
  64  * Return: return true on success, else return
  65  * false if cq is full.
  66  */
  67 bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
  68 {
  69         struct ib_uverbs_wc *uqueue = NULL;
  70         struct ib_wc *kqueue = NULL;
  71         struct rvt_cq_wc *u_wc = NULL;
  72         struct rvt_k_cq_wc *k_wc = NULL;
  73         unsigned long flags;
  74         u32 head;
  75         u32 next;
  76         u32 tail;
  77 
  78         spin_lock_irqsave(&cq->lock, flags);
  79 
  80         if (cq->ip) {
  81                 u_wc = cq->queue;
  82                 uqueue = &u_wc->uqueue[0];
  83                 head = RDMA_READ_UAPI_ATOMIC(u_wc->head);
  84                 tail = RDMA_READ_UAPI_ATOMIC(u_wc->tail);
  85         } else {
  86                 k_wc = cq->kqueue;
  87                 kqueue = &k_wc->kqueue[0];
  88                 head = k_wc->head;
  89                 tail = k_wc->tail;
  90         }
  91 
  92         /*
  93          * Note that the head pointer might be writable by
  94          * user processes.Take care to verify it is a sane value.
  95          */
  96         if (head >= (unsigned)cq->ibcq.cqe) {
  97                 head = cq->ibcq.cqe;
  98                 next = 0;
  99         } else {
 100                 next = head + 1;
 101         }
 102 
 103         if (unlikely(next == tail || cq->cq_full)) {
 104                 struct rvt_dev_info *rdi = cq->rdi;
 105 
 106                 if (!cq->cq_full)
 107                         rvt_pr_err_ratelimited(rdi, "CQ is full!\n");
 108                 cq->cq_full = true;
 109                 spin_unlock_irqrestore(&cq->lock, flags);
 110                 if (cq->ibcq.event_handler) {
 111                         struct ib_event ev;
 112 
 113                         ev.device = cq->ibcq.device;
 114                         ev.element.cq = &cq->ibcq;
 115                         ev.event = IB_EVENT_CQ_ERR;
 116                         cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
 117                 }
 118                 return false;
 119         }
 120         trace_rvt_cq_enter(cq, entry, head);
 121         if (uqueue) {
 122                 uqueue[head].wr_id = entry->wr_id;
 123                 uqueue[head].status = entry->status;
 124                 uqueue[head].opcode = entry->opcode;
 125                 uqueue[head].vendor_err = entry->vendor_err;
 126                 uqueue[head].byte_len = entry->byte_len;
 127                 uqueue[head].ex.imm_data = entry->ex.imm_data;
 128                 uqueue[head].qp_num = entry->qp->qp_num;
 129                 uqueue[head].src_qp = entry->src_qp;
 130                 uqueue[head].wc_flags = entry->wc_flags;
 131                 uqueue[head].pkey_index = entry->pkey_index;
 132                 uqueue[head].slid = ib_lid_cpu16(entry->slid);
 133                 uqueue[head].sl = entry->sl;
 134                 uqueue[head].dlid_path_bits = entry->dlid_path_bits;
 135                 uqueue[head].port_num = entry->port_num;
 136                 /* Make sure entry is written before the head index. */
 137                 RDMA_WRITE_UAPI_ATOMIC(u_wc->head, next);
 138         } else {
 139                 kqueue[head] = *entry;
 140                 k_wc->head = next;
 141         }
 142 
 143         if (cq->notify == IB_CQ_NEXT_COMP ||
 144             (cq->notify == IB_CQ_SOLICITED &&
 145              (solicited || entry->status != IB_WC_SUCCESS))) {
 146                 /*
 147                  * This will cause send_complete() to be called in
 148                  * another thread.
 149                  */
 150                 cq->notify = RVT_CQ_NONE;
 151                 cq->triggered++;
 152                 queue_work_on(cq->comp_vector_cpu, comp_vector_wq,
 153                               &cq->comptask);
 154         }
 155 
 156         spin_unlock_irqrestore(&cq->lock, flags);
 157         return true;
 158 }
 159 EXPORT_SYMBOL(rvt_cq_enter);
 160 
 161 static void send_complete(struct work_struct *work)
 162 {
 163         struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
 164 
 165         /*
 166          * The completion handler will most likely rearm the notification
 167          * and poll for all pending entries.  If a new completion entry
 168          * is added while we are in this routine, queue_work()
 169          * won't call us again until we return so we check triggered to
 170          * see if we need to call the handler again.
 171          */
 172         for (;;) {
 173                 u8 triggered = cq->triggered;
 174 
 175                 /*
 176                  * IPoIB connected mode assumes the callback is from a
 177                  * soft IRQ. We simulate this by blocking "bottom halves".
 178                  * See the implementation for ipoib_cm_handle_tx_wc(),
 179                  * netif_tx_lock_bh() and netif_tx_lock().
 180                  */
 181                 local_bh_disable();
 182                 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
 183                 local_bh_enable();
 184 
 185                 if (cq->triggered == triggered)
 186                         return;
 187         }
 188 }
 189 
 190 /**
 191  * rvt_create_cq - create a completion queue
 192  * @ibcq: Allocated CQ
 193  * @attr: creation attributes
 194  * @udata: user data for libibverbs.so
 195  *
 196  * Called by ib_create_cq() in the generic verbs code.
 197  *
 198  * Return: 0 on success
 199  */
 200 int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
 201                   struct ib_udata *udata)
 202 {
 203         struct ib_device *ibdev = ibcq->device;
 204         struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
 205         struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
 206         struct rvt_cq_wc *u_wc = NULL;
 207         struct rvt_k_cq_wc *k_wc = NULL;
 208         u32 sz;
 209         unsigned int entries = attr->cqe;
 210         int comp_vector = attr->comp_vector;
 211         int err;
 212 
 213         if (attr->flags)
 214                 return -EINVAL;
 215 
 216         if (entries < 1 || entries > rdi->dparms.props.max_cqe)
 217                 return -EINVAL;
 218 
 219         if (comp_vector < 0)
 220                 comp_vector = 0;
 221 
 222         comp_vector = comp_vector % rdi->ibdev.num_comp_vectors;
 223 
 224         /*
 225          * Allocate the completion queue entries and head/tail pointers.
 226          * This is allocated separately so that it can be resized and
 227          * also mapped into user space.
 228          * We need to use vmalloc() in order to support mmap and large
 229          * numbers of entries.
 230          */
 231         if (udata && udata->outlen >= sizeof(__u64)) {
 232                 sz = sizeof(struct ib_uverbs_wc) * (entries + 1);
 233                 sz += sizeof(*u_wc);
 234                 u_wc = vmalloc_user(sz);
 235                 if (!u_wc)
 236                         return -ENOMEM;
 237         } else {
 238                 sz = sizeof(struct ib_wc) * (entries + 1);
 239                 sz += sizeof(*k_wc);
 240                 k_wc = vzalloc_node(sz, rdi->dparms.node);
 241                 if (!k_wc)
 242                         return -ENOMEM;
 243         }
 244 
 245         /*
 246          * Return the address of the WC as the offset to mmap.
 247          * See rvt_mmap() for details.
 248          */
 249         if (udata && udata->outlen >= sizeof(__u64)) {
 250                 cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
 251                 if (IS_ERR(cq->ip)) {
 252                         err = PTR_ERR(cq->ip);
 253                         goto bail_wc;
 254                 }
 255 
 256                 err = ib_copy_to_udata(udata, &cq->ip->offset,
 257                                        sizeof(cq->ip->offset));
 258                 if (err)
 259                         goto bail_ip;
 260         }
 261 
 262         spin_lock_irq(&rdi->n_cqs_lock);
 263         if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
 264                 spin_unlock_irq(&rdi->n_cqs_lock);
 265                 err = -ENOMEM;
 266                 goto bail_ip;
 267         }
 268 
 269         rdi->n_cqs_allocated++;
 270         spin_unlock_irq(&rdi->n_cqs_lock);
 271 
 272         if (cq->ip) {
 273                 spin_lock_irq(&rdi->pending_lock);
 274                 list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
 275                 spin_unlock_irq(&rdi->pending_lock);
 276         }
 277 
 278         /*
 279          * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
 280          * The number of entries should be >= the number requested or return
 281          * an error.
 282          */
 283         cq->rdi = rdi;
 284         if (rdi->driver_f.comp_vect_cpu_lookup)
 285                 cq->comp_vector_cpu =
 286                         rdi->driver_f.comp_vect_cpu_lookup(rdi, comp_vector);
 287         else
 288                 cq->comp_vector_cpu =
 289                         cpumask_first(cpumask_of_node(rdi->dparms.node));
 290 
 291         cq->ibcq.cqe = entries;
 292         cq->notify = RVT_CQ_NONE;
 293         spin_lock_init(&cq->lock);
 294         INIT_WORK(&cq->comptask, send_complete);
 295         if (u_wc)
 296                 cq->queue = u_wc;
 297         else
 298                 cq->kqueue = k_wc;
 299 
 300         trace_rvt_create_cq(cq, attr);
 301         return 0;
 302 
 303 bail_ip:
 304         kfree(cq->ip);
 305 bail_wc:
 306         vfree(u_wc);
 307         vfree(k_wc);
 308         return err;
 309 }
 310 
 311 /**
 312  * rvt_destroy_cq - destroy a completion queue
 313  * @ibcq: the completion queue to destroy.
 314  * @udata: user data or NULL for kernel object
 315  *
 316  * Called by ib_destroy_cq() in the generic verbs code.
 317  */
 318 void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
 319 {
 320         struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
 321         struct rvt_dev_info *rdi = cq->rdi;
 322 
 323         flush_work(&cq->comptask);
 324         spin_lock_irq(&rdi->n_cqs_lock);
 325         rdi->n_cqs_allocated--;
 326         spin_unlock_irq(&rdi->n_cqs_lock);
 327         if (cq->ip)
 328                 kref_put(&cq->ip->ref, rvt_release_mmap_info);
 329         else
 330                 vfree(cq->kqueue);
 331 }
 332 
 333 /**
 334  * rvt_req_notify_cq - change the notification type for a completion queue
 335  * @ibcq: the completion queue
 336  * @notify_flags: the type of notification to request
 337  *
 338  * This may be called from interrupt context.  Also called by
 339  * ib_req_notify_cq() in the generic verbs code.
 340  *
 341  * Return: 0 for success.
 342  */
 343 int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
 344 {
 345         struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
 346         unsigned long flags;
 347         int ret = 0;
 348 
 349         spin_lock_irqsave(&cq->lock, flags);
 350         /*
 351          * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
 352          * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
 353          */
 354         if (cq->notify != IB_CQ_NEXT_COMP)
 355                 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
 356 
 357         if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
 358                 if (cq->queue) {
 359                         if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) !=
 360                                 RDMA_READ_UAPI_ATOMIC(cq->queue->tail))
 361                                 ret = 1;
 362                 } else {
 363                         if (cq->kqueue->head != cq->kqueue->tail)
 364                                 ret = 1;
 365                 }
 366         }
 367 
 368         spin_unlock_irqrestore(&cq->lock, flags);
 369 
 370         return ret;
 371 }
 372 
 373 /**
 374  * rvt_resize_cq - change the size of the CQ
 375  * @ibcq: the completion queue
 376  *
 377  * Return: 0 for success.
 378  */
 379 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
 380 {
 381         struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
 382         u32 head, tail, n;
 383         int ret;
 384         u32 sz;
 385         struct rvt_dev_info *rdi = cq->rdi;
 386         struct rvt_cq_wc *u_wc = NULL;
 387         struct rvt_cq_wc *old_u_wc = NULL;
 388         struct rvt_k_cq_wc *k_wc = NULL;
 389         struct rvt_k_cq_wc *old_k_wc = NULL;
 390 
 391         if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
 392                 return -EINVAL;
 393 
 394         /*
 395          * Need to use vmalloc() if we want to support large #s of entries.
 396          */
 397         if (udata && udata->outlen >= sizeof(__u64)) {
 398                 sz = sizeof(struct ib_uverbs_wc) * (cqe + 1);
 399                 sz += sizeof(*u_wc);
 400                 u_wc = vmalloc_user(sz);
 401                 if (!u_wc)
 402                         return -ENOMEM;
 403         } else {
 404                 sz = sizeof(struct ib_wc) * (cqe + 1);
 405                 sz += sizeof(*k_wc);
 406                 k_wc = vzalloc_node(sz, rdi->dparms.node);
 407                 if (!k_wc)
 408                         return -ENOMEM;
 409         }
 410         /* Check that we can write the offset to mmap. */
 411         if (udata && udata->outlen >= sizeof(__u64)) {
 412                 __u64 offset = 0;
 413 
 414                 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
 415                 if (ret)
 416                         goto bail_free;
 417         }
 418 
 419         spin_lock_irq(&cq->lock);
 420         /*
 421          * Make sure head and tail are sane since they
 422          * might be user writable.
 423          */
 424         if (u_wc) {
 425                 old_u_wc = cq->queue;
 426                 head = RDMA_READ_UAPI_ATOMIC(old_u_wc->head);
 427                 tail = RDMA_READ_UAPI_ATOMIC(old_u_wc->tail);
 428         } else {
 429                 old_k_wc = cq->kqueue;
 430                 head = old_k_wc->head;
 431                 tail = old_k_wc->tail;
 432         }
 433 
 434         if (head > (u32)cq->ibcq.cqe)
 435                 head = (u32)cq->ibcq.cqe;
 436         if (tail > (u32)cq->ibcq.cqe)
 437                 tail = (u32)cq->ibcq.cqe;
 438         if (head < tail)
 439                 n = cq->ibcq.cqe + 1 + head - tail;
 440         else
 441                 n = head - tail;
 442         if (unlikely((u32)cqe < n)) {
 443                 ret = -EINVAL;
 444                 goto bail_unlock;
 445         }
 446         for (n = 0; tail != head; n++) {
 447                 if (u_wc)
 448                         u_wc->uqueue[n] = old_u_wc->uqueue[tail];
 449                 else
 450                         k_wc->kqueue[n] = old_k_wc->kqueue[tail];
 451                 if (tail == (u32)cq->ibcq.cqe)
 452                         tail = 0;
 453                 else
 454                         tail++;
 455         }
 456         cq->ibcq.cqe = cqe;
 457         if (u_wc) {
 458                 RDMA_WRITE_UAPI_ATOMIC(u_wc->head, n);
 459                 RDMA_WRITE_UAPI_ATOMIC(u_wc->tail, 0);
 460                 cq->queue = u_wc;
 461         } else {
 462                 k_wc->head = n;
 463                 k_wc->tail = 0;
 464                 cq->kqueue = k_wc;
 465         }
 466         spin_unlock_irq(&cq->lock);
 467 
 468         if (u_wc)
 469                 vfree(old_u_wc);
 470         else
 471                 vfree(old_k_wc);
 472 
 473         if (cq->ip) {
 474                 struct rvt_mmap_info *ip = cq->ip;
 475 
 476                 rvt_update_mmap_info(rdi, ip, sz, u_wc);
 477 
 478                 /*
 479                  * Return the offset to mmap.
 480                  * See rvt_mmap() for details.
 481                  */
 482                 if (udata && udata->outlen >= sizeof(__u64)) {
 483                         ret = ib_copy_to_udata(udata, &ip->offset,
 484                                                sizeof(ip->offset));
 485                         if (ret)
 486                                 return ret;
 487                 }
 488 
 489                 spin_lock_irq(&rdi->pending_lock);
 490                 if (list_empty(&ip->pending_mmaps))
 491                         list_add(&ip->pending_mmaps, &rdi->pending_mmaps);
 492                 spin_unlock_irq(&rdi->pending_lock);
 493         }
 494 
 495         return 0;
 496 
 497 bail_unlock:
 498         spin_unlock_irq(&cq->lock);
 499 bail_free:
 500         vfree(u_wc);
 501         vfree(k_wc);
 502 
 503         return ret;
 504 }
 505 
 506 /**
 507  * rvt_poll_cq - poll for work completion entries
 508  * @ibcq: the completion queue to poll
 509  * @num_entries: the maximum number of entries to return
 510  * @entry: pointer to array where work completions are placed
 511  *
 512  * This may be called from interrupt context.  Also called by ib_poll_cq()
 513  * in the generic verbs code.
 514  *
 515  * Return: the number of completion entries polled.
 516  */
 517 int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
 518 {
 519         struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
 520         struct rvt_k_cq_wc *wc;
 521         unsigned long flags;
 522         int npolled;
 523         u32 tail;
 524 
 525         /* The kernel can only poll a kernel completion queue */
 526         if (cq->ip)
 527                 return -EINVAL;
 528 
 529         spin_lock_irqsave(&cq->lock, flags);
 530 
 531         wc = cq->kqueue;
 532         tail = wc->tail;
 533         if (tail > (u32)cq->ibcq.cqe)
 534                 tail = (u32)cq->ibcq.cqe;
 535         for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
 536                 if (tail == wc->head)
 537                         break;
 538                 /* The kernel doesn't need a RMB since it has the lock. */
 539                 trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled);
 540                 *entry = wc->kqueue[tail];
 541                 if (tail >= cq->ibcq.cqe)
 542                         tail = 0;
 543                 else
 544                         tail++;
 545         }
 546         wc->tail = tail;
 547 
 548         spin_unlock_irqrestore(&cq->lock, flags);
 549 
 550         return npolled;
 551 }
 552 
 553 /**
 554  * rvt_driver_cq_init - Init cq resources on behalf of driver
 555  * @rdi: rvt dev structure
 556  *
 557  * Return: 0 on success
 558  */
 559 int rvt_driver_cq_init(void)
 560 {
 561         comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE,
 562                                          0, "rdmavt_cq");
 563         if (!comp_vector_wq)
 564                 return -ENOMEM;
 565 
 566         return 0;
 567 }
 568 
 569 /**
 570  * rvt_cq_exit - tear down cq reources
 571  * @rdi: rvt dev structure
 572  */
 573 void rvt_cq_exit(void)
 574 {
 575         destroy_workqueue(comp_vector_wq);
 576         comp_vector_wq = NULL;
 577 }

/* [<][>][^][v][top][bottom][index][help] */