root/drivers/infiniband/sw/rxe/rxe_cq.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rxe_cq_chk_attr
  2. rxe_send_complete
  3. rxe_cq_from_init
  4. rxe_cq_resize_queue
  5. rxe_cq_post
  6. rxe_cq_disable
  7. rxe_cq_cleanup

   1 /*
   2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   4  *
   5  * This software is available to you under a choice of one of two
   6  * licenses.  You may choose to be licensed under the terms of the GNU
   7  * General Public License (GPL) Version 2, available from the file
   8  * COPYING in the main directory of this source tree, or the
   9  * OpenIB.org BSD license below:
  10  *
  11  *         Redistribution and use in source and binary forms, with or
  12  *         without modification, are permitted provided that the following
  13  *         conditions are met:
  14  *
  15  *      - Redistributions of source code must retain the above
  16  *        copyright notice, this list of conditions and the following
  17  *        disclaimer.
  18  *
  19  *      - Redistributions in binary form must reproduce the above
  20  *        copyright notice, this list of conditions and the following
  21  *        disclaimer in the documentation and/or other materials
  22  *        provided with the distribution.
  23  *
  24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31  * SOFTWARE.
  32  */
  33 #include <linux/vmalloc.h>
  34 #include "rxe.h"
  35 #include "rxe_loc.h"
  36 #include "rxe_queue.h"
  37 
  38 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
  39                     int cqe, int comp_vector)
  40 {
  41         int count;
  42 
  43         if (cqe <= 0) {
  44                 pr_warn("cqe(%d) <= 0\n", cqe);
  45                 goto err1;
  46         }
  47 
  48         if (cqe > rxe->attr.max_cqe) {
  49                 pr_warn("cqe(%d) > max_cqe(%d)\n",
  50                         cqe, rxe->attr.max_cqe);
  51                 goto err1;
  52         }
  53 
  54         if (cq) {
  55                 count = queue_count(cq->queue);
  56                 if (cqe < count) {
  57                         pr_warn("cqe(%d) < current # elements in queue (%d)",
  58                                 cqe, count);
  59                         goto err1;
  60                 }
  61         }
  62 
  63         return 0;
  64 
  65 err1:
  66         return -EINVAL;
  67 }
  68 
  69 static void rxe_send_complete(unsigned long data)
  70 {
  71         struct rxe_cq *cq = (struct rxe_cq *)data;
  72         unsigned long flags;
  73 
  74         spin_lock_irqsave(&cq->cq_lock, flags);
  75         if (cq->is_dying) {
  76                 spin_unlock_irqrestore(&cq->cq_lock, flags);
  77                 return;
  78         }
  79         spin_unlock_irqrestore(&cq->cq_lock, flags);
  80 
  81         cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
  82 }
  83 
  84 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
  85                      int comp_vector, struct ib_udata *udata,
  86                      struct rxe_create_cq_resp __user *uresp)
  87 {
  88         int err;
  89 
  90         cq->queue = rxe_queue_init(rxe, &cqe,
  91                                    sizeof(struct rxe_cqe));
  92         if (!cq->queue) {
  93                 pr_warn("unable to create cq\n");
  94                 return -ENOMEM;
  95         }
  96 
  97         err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
  98                            cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
  99         if (err) {
 100                 vfree(cq->queue->buf);
 101                 kfree(cq->queue);
 102                 return err;
 103         }
 104 
 105         if (uresp)
 106                 cq->is_user = 1;
 107 
 108         cq->is_dying = false;
 109 
 110         tasklet_init(&cq->comp_task, rxe_send_complete, (unsigned long)cq);
 111 
 112         spin_lock_init(&cq->cq_lock);
 113         cq->ibcq.cqe = cqe;
 114         return 0;
 115 }
 116 
 117 int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
 118                         struct rxe_resize_cq_resp __user *uresp,
 119                         struct ib_udata *udata)
 120 {
 121         int err;
 122 
 123         err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
 124                                sizeof(struct rxe_cqe), udata,
 125                                uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
 126         if (!err)
 127                 cq->ibcq.cqe = cqe;
 128 
 129         return err;
 130 }
 131 
 132 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
 133 {
 134         struct ib_event ev;
 135         unsigned long flags;
 136 
 137         spin_lock_irqsave(&cq->cq_lock, flags);
 138 
 139         if (unlikely(queue_full(cq->queue))) {
 140                 spin_unlock_irqrestore(&cq->cq_lock, flags);
 141                 if (cq->ibcq.event_handler) {
 142                         ev.device = cq->ibcq.device;
 143                         ev.element.cq = &cq->ibcq;
 144                         ev.event = IB_EVENT_CQ_ERR;
 145                         cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
 146                 }
 147 
 148                 return -EBUSY;
 149         }
 150 
 151         memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe));
 152 
 153         /* make sure all changes to the CQ are written before we update the
 154          * producer pointer
 155          */
 156         smp_wmb();
 157 
 158         advance_producer(cq->queue);
 159         spin_unlock_irqrestore(&cq->cq_lock, flags);
 160 
 161         if ((cq->notify == IB_CQ_NEXT_COMP) ||
 162             (cq->notify == IB_CQ_SOLICITED && solicited)) {
 163                 cq->notify = 0;
 164                 tasklet_schedule(&cq->comp_task);
 165         }
 166 
 167         return 0;
 168 }
 169 
 170 void rxe_cq_disable(struct rxe_cq *cq)
 171 {
 172         unsigned long flags;
 173 
 174         spin_lock_irqsave(&cq->cq_lock, flags);
 175         cq->is_dying = true;
 176         spin_unlock_irqrestore(&cq->cq_lock, flags);
 177 }
 178 
 179 void rxe_cq_cleanup(struct rxe_pool_entry *arg)
 180 {
 181         struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
 182 
 183         if (cq->queue)
 184                 rxe_queue_cleanup(cq->queue);
 185 }

/* [<][>][^][v][top][bottom][index][help] */