root/drivers/infiniband/sw/rxe/rxe_queue.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. do_mmap_info
  2. rxe_queue_reset
  3. rxe_queue_init
  4. resize_finish
  5. rxe_queue_resize
  6. rxe_queue_cleanup

   1 /*
   2  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   3  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   4  *
   5  * This software is available to you under a choice of one of two
   6  * licenses.  You may choose to be licensed under the terms of the GNU
   7  * General Public License (GPL) Version 2, available from the file
   8  * COPYING in the main directory of this source tree, or the
   9  * OpenIB.org BSD license below:
  10  *
  11  *     Redistribution and use in source and binary forms, with or
  12  *     without modification, are permitted provided that the following
  13  *     conditions are met:
  14  *
  15  *      - Redistributions of source code must retain the above
  16  *        copyright notice, this list of conditions and the following
  17  *        disclaimer.
  18  *
  19  *      - Redistributions in binary form must retailuce the above
  20  *        copyright notice, this list of conditions and the following
  21  *        disclaimer in the documentation and/or other materials
  22  *        provided with the distribution.
  23  *
  24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31  * SOFTWARE.
  32  */
  33 
  34 #include <linux/vmalloc.h>
  35 #include "rxe.h"
  36 #include "rxe_loc.h"
  37 #include "rxe_queue.h"
  38 
  39 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
  40                  struct ib_udata *udata, struct rxe_queue_buf *buf,
  41                  size_t buf_size, struct rxe_mmap_info **ip_p)
  42 {
  43         int err;
  44         struct rxe_mmap_info *ip = NULL;
  45 
  46         if (outbuf) {
  47                 ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
  48                 if (IS_ERR(ip)) {
  49                         err = PTR_ERR(ip);
  50                         goto err1;
  51                 }
  52 
  53                 if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
  54                         err = -EFAULT;
  55                         goto err2;
  56                 }
  57 
  58                 spin_lock_bh(&rxe->pending_lock);
  59                 list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
  60                 spin_unlock_bh(&rxe->pending_lock);
  61         }
  62 
  63         *ip_p = ip;
  64 
  65         return 0;
  66 
  67 err2:
  68         kfree(ip);
  69 err1:
  70         return err;
  71 }
  72 
  73 inline void rxe_queue_reset(struct rxe_queue *q)
  74 {
  75         /* queue is comprised from header and the memory
  76          * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
  77          * reset only the queue itself and not the management header
  78          */
  79         memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
  80 }
  81 
  82 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
  83                                  int *num_elem,
  84                                  unsigned int elem_size)
  85 {
  86         struct rxe_queue *q;
  87         size_t buf_size;
  88         unsigned int num_slots;
  89 
  90         /* num_elem == 0 is allowed, but uninteresting */
  91         if (*num_elem < 0)
  92                 goto err1;
  93 
  94         q = kmalloc(sizeof(*q), GFP_KERNEL);
  95         if (!q)
  96                 goto err1;
  97 
  98         q->rxe = rxe;
  99 
 100         /* used in resize, only need to copy used part of queue */
 101         q->elem_size = elem_size;
 102 
 103         /* pad element up to at least a cacheline and always a power of 2 */
 104         if (elem_size < cache_line_size())
 105                 elem_size = cache_line_size();
 106         elem_size = roundup_pow_of_two(elem_size);
 107 
 108         q->log2_elem_size = order_base_2(elem_size);
 109 
 110         num_slots = *num_elem + 1;
 111         num_slots = roundup_pow_of_two(num_slots);
 112         q->index_mask = num_slots - 1;
 113 
 114         buf_size = sizeof(struct rxe_queue_buf) + num_slots * elem_size;
 115 
 116         q->buf = vmalloc_user(buf_size);
 117         if (!q->buf)
 118                 goto err2;
 119 
 120         q->buf->log2_elem_size = q->log2_elem_size;
 121         q->buf->index_mask = q->index_mask;
 122 
 123         q->buf_size = buf_size;
 124 
 125         *num_elem = num_slots - 1;
 126         return q;
 127 
 128 err2:
 129         kfree(q);
 130 err1:
 131         return NULL;
 132 }
 133 
 134 /* copies elements from original q to new q and then swaps the contents of the
 135  * two q headers. This is so that if anyone is holding a pointer to q it will
 136  * still work
 137  */
 138 static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
 139                          unsigned int num_elem)
 140 {
 141         if (!queue_empty(q) && (num_elem < queue_count(q)))
 142                 return -EINVAL;
 143 
 144         while (!queue_empty(q)) {
 145                 memcpy(producer_addr(new_q), consumer_addr(q),
 146                        new_q->elem_size);
 147                 advance_producer(new_q);
 148                 advance_consumer(q);
 149         }
 150 
 151         swap(*q, *new_q);
 152 
 153         return 0;
 154 }
 155 
 156 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
 157                      unsigned int elem_size, struct ib_udata *udata,
 158                      struct mminfo __user *outbuf, spinlock_t *producer_lock,
 159                      spinlock_t *consumer_lock)
 160 {
 161         struct rxe_queue *new_q;
 162         unsigned int num_elem = *num_elem_p;
 163         int err;
 164         unsigned long flags = 0, flags1;
 165 
 166         new_q = rxe_queue_init(q->rxe, &num_elem, elem_size);
 167         if (!new_q)
 168                 return -ENOMEM;
 169 
 170         err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf,
 171                            new_q->buf_size, &new_q->ip);
 172         if (err) {
 173                 vfree(new_q->buf);
 174                 kfree(new_q);
 175                 goto err1;
 176         }
 177 
 178         spin_lock_irqsave(consumer_lock, flags1);
 179 
 180         if (producer_lock) {
 181                 spin_lock_irqsave(producer_lock, flags);
 182                 err = resize_finish(q, new_q, num_elem);
 183                 spin_unlock_irqrestore(producer_lock, flags);
 184         } else {
 185                 err = resize_finish(q, new_q, num_elem);
 186         }
 187 
 188         spin_unlock_irqrestore(consumer_lock, flags1);
 189 
 190         rxe_queue_cleanup(new_q);       /* new/old dep on err */
 191         if (err)
 192                 goto err1;
 193 
 194         *num_elem_p = num_elem;
 195         return 0;
 196 
 197 err1:
 198         return err;
 199 }
 200 
 201 void rxe_queue_cleanup(struct rxe_queue *q)
 202 {
 203         if (q->ip)
 204                 kref_put(&q->ip->ref, rxe_mmap_release);
 205         else
 206                 vfree(q->buf);
 207 
 208         kfree(q);
 209 }

/* [<][>][^][v][top][bottom][index][help] */