root/drivers/infiniband/hw/mthca/mthca_allocator.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mthca_alloc
  2. mthca_free
  3. mthca_alloc_init
  4. mthca_alloc_cleanup
  5. mthca_array_get
  6. mthca_array_set
  7. mthca_array_clear
  8. mthca_array_init
  9. mthca_array_cleanup
  10. mthca_buf_alloc
  11. mthca_buf_free

   1 /*
   2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 
  33 #include <linux/errno.h>
  34 #include <linux/slab.h>
  35 #include <linux/bitmap.h>
  36 
  37 #include "mthca_dev.h"
  38 
  39 /* Trivial bitmap-based allocator */
  40 u32 mthca_alloc(struct mthca_alloc *alloc)
  41 {
  42         unsigned long flags;
  43         u32 obj;
  44 
  45         spin_lock_irqsave(&alloc->lock, flags);
  46 
  47         obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
  48         if (obj >= alloc->max) {
  49                 alloc->top = (alloc->top + alloc->max) & alloc->mask;
  50                 obj = find_first_zero_bit(alloc->table, alloc->max);
  51         }
  52 
  53         if (obj < alloc->max) {
  54                 set_bit(obj, alloc->table);
  55                 obj |= alloc->top;
  56         } else
  57                 obj = -1;
  58 
  59         spin_unlock_irqrestore(&alloc->lock, flags);
  60 
  61         return obj;
  62 }
  63 
  64 void mthca_free(struct mthca_alloc *alloc, u32 obj)
  65 {
  66         unsigned long flags;
  67 
  68         obj &= alloc->max - 1;
  69 
  70         spin_lock_irqsave(&alloc->lock, flags);
  71 
  72         clear_bit(obj, alloc->table);
  73         alloc->last = min(alloc->last, obj);
  74         alloc->top = (alloc->top + alloc->max) & alloc->mask;
  75 
  76         spin_unlock_irqrestore(&alloc->lock, flags);
  77 }
  78 
  79 int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
  80                      u32 reserved)
  81 {
  82         int i;
  83 
  84         /* num must be a power of 2 */
  85         if (num != 1 << (ffs(num) - 1))
  86                 return -EINVAL;
  87 
  88         alloc->last = 0;
  89         alloc->top  = 0;
  90         alloc->max  = num;
  91         alloc->mask = mask;
  92         spin_lock_init(&alloc->lock);
  93         alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long),
  94                                      GFP_KERNEL);
  95         if (!alloc->table)
  96                 return -ENOMEM;
  97 
  98         bitmap_zero(alloc->table, num);
  99         for (i = 0; i < reserved; ++i)
 100                 set_bit(i, alloc->table);
 101 
 102         return 0;
 103 }
 104 
 105 void mthca_alloc_cleanup(struct mthca_alloc *alloc)
 106 {
 107         kfree(alloc->table);
 108 }
 109 
 110 /*
 111  * Array of pointers with lazy allocation of leaf pages.  Callers of
 112  * _get, _set and _clear methods must use a lock or otherwise
 113  * serialize access to the array.
 114  */
 115 
 116 #define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1)
 117 
 118 void *mthca_array_get(struct mthca_array *array, int index)
 119 {
 120         int p = (index * sizeof (void *)) >> PAGE_SHIFT;
 121 
 122         if (array->page_list[p].page)
 123                 return array->page_list[p].page[index & MTHCA_ARRAY_MASK];
 124         else
 125                 return NULL;
 126 }
 127 
 128 int mthca_array_set(struct mthca_array *array, int index, void *value)
 129 {
 130         int p = (index * sizeof (void *)) >> PAGE_SHIFT;
 131 
 132         /* Allocate with GFP_ATOMIC because we'll be called with locks held. */
 133         if (!array->page_list[p].page)
 134                 array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC);
 135 
 136         if (!array->page_list[p].page)
 137                 return -ENOMEM;
 138 
 139         array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value;
 140         ++array->page_list[p].used;
 141 
 142         return 0;
 143 }
 144 
 145 void mthca_array_clear(struct mthca_array *array, int index)
 146 {
 147         int p = (index * sizeof (void *)) >> PAGE_SHIFT;
 148 
 149         if (--array->page_list[p].used == 0) {
 150                 free_page((unsigned long) array->page_list[p].page);
 151                 array->page_list[p].page = NULL;
 152         } else
 153                 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL;
 154 
 155         if (array->page_list[p].used < 0)
 156                 pr_debug("Array %p index %d page %d with ref count %d < 0\n",
 157                          array, index, p, array->page_list[p].used);
 158 }
 159 
 160 int mthca_array_init(struct mthca_array *array, int nent)
 161 {
 162         int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE;
 163         int i;
 164 
 165         array->page_list = kmalloc_array(npage, sizeof(*array->page_list),
 166                                          GFP_KERNEL);
 167         if (!array->page_list)
 168                 return -ENOMEM;
 169 
 170         for (i = 0; i < npage; ++i) {
 171                 array->page_list[i].page = NULL;
 172                 array->page_list[i].used = 0;
 173         }
 174 
 175         return 0;
 176 }
 177 
 178 void mthca_array_cleanup(struct mthca_array *array, int nent)
 179 {
 180         int i;
 181 
 182         for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
 183                 free_page((unsigned long) array->page_list[i].page);
 184 
 185         kfree(array->page_list);
 186 }
 187 
 188 /*
 189  * Handling for queue buffers -- we allocate a bunch of memory and
 190  * register it in a memory region at HCA virtual address 0.  If the
 191  * requested size is > max_direct, we split the allocation into
 192  * multiple pages, so we don't require too much contiguous memory.
 193  */
 194 
 195 int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
 196                     union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
 197                     int hca_write, struct mthca_mr *mr)
 198 {
 199         int err = -ENOMEM;
 200         int npages, shift;
 201         u64 *dma_list = NULL;
 202         dma_addr_t t;
 203         int i;
 204 
 205         if (size <= max_direct) {
 206                 *is_direct = 1;
 207                 npages     = 1;
 208                 shift      = get_order(size) + PAGE_SHIFT;
 209 
 210                 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
 211                                                      size, &t, GFP_KERNEL);
 212                 if (!buf->direct.buf)
 213                         return -ENOMEM;
 214 
 215                 dma_unmap_addr_set(&buf->direct, mapping, t);
 216 
 217                 while (t & ((1 << shift) - 1)) {
 218                         --shift;
 219                         npages *= 2;
 220                 }
 221 
 222                 dma_list = kmalloc_array(npages, sizeof(*dma_list),
 223                                          GFP_KERNEL);
 224                 if (!dma_list)
 225                         goto err_free;
 226 
 227                 for (i = 0; i < npages; ++i)
 228                         dma_list[i] = t + i * (1 << shift);
 229         } else {
 230                 *is_direct = 0;
 231                 npages     = (size + PAGE_SIZE - 1) / PAGE_SIZE;
 232                 shift      = PAGE_SHIFT;
 233 
 234                 dma_list = kmalloc_array(npages, sizeof(*dma_list),
 235                                          GFP_KERNEL);
 236                 if (!dma_list)
 237                         return -ENOMEM;
 238 
 239                 buf->page_list = kmalloc_array(npages,
 240                                                sizeof(*buf->page_list),
 241                                                GFP_KERNEL);
 242                 if (!buf->page_list)
 243                         goto err_out;
 244 
 245                 for (i = 0; i < npages; ++i)
 246                         buf->page_list[i].buf = NULL;
 247 
 248                 for (i = 0; i < npages; ++i) {
 249                         buf->page_list[i].buf =
 250                                 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
 251                                                    &t, GFP_KERNEL);
 252                         if (!buf->page_list[i].buf)
 253                                 goto err_free;
 254 
 255                         dma_list[i] = t;
 256                         dma_unmap_addr_set(&buf->page_list[i], mapping, t);
 257 
 258                         clear_page(buf->page_list[i].buf);
 259                 }
 260         }
 261 
 262         err = mthca_mr_alloc_phys(dev, pd->pd_num,
 263                                   dma_list, shift, npages,
 264                                   0, size,
 265                                   MTHCA_MPT_FLAG_LOCAL_READ |
 266                                   (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0),
 267                                   mr);
 268         if (err)
 269                 goto err_free;
 270 
 271         kfree(dma_list);
 272 
 273         return 0;
 274 
 275 err_free:
 276         mthca_buf_free(dev, size, buf, *is_direct, NULL);
 277 
 278 err_out:
 279         kfree(dma_list);
 280 
 281         return err;
 282 }
 283 
 284 void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
 285                     int is_direct, struct mthca_mr *mr)
 286 {
 287         int i;
 288 
 289         if (mr)
 290                 mthca_free_mr(dev, mr);
 291 
 292         if (is_direct)
 293                 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
 294                                   dma_unmap_addr(&buf->direct, mapping));
 295         else {
 296                 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
 297                         dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
 298                                           buf->page_list[i].buf,
 299                                           dma_unmap_addr(&buf->page_list[i],
 300                                                          mapping));
 301                 kfree(buf->page_list);
 302         }
 303 }

/* [<][>][^][v][top][bottom][index][help] */