root/drivers/net/ethernet/mellanox/mlx5/core/alloc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mlx5_dma_zalloc_coherent_node
  2. mlx5_buf_alloc_node
  3. mlx5_buf_alloc
  4. mlx5_buf_free
  5. mlx5_frag_buf_alloc_node
  6. mlx5_frag_buf_free
  7. mlx5_alloc_db_pgdir
  8. mlx5_alloc_db_from_pgdir
  9. mlx5_db_alloc_node
  10. mlx5_db_alloc
  11. mlx5_db_free
  12. mlx5_fill_page_array
  13. mlx5_fill_page_frag_array

   1 /*
   2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 
  33 #include <linux/errno.h>
  34 #include <linux/slab.h>
  35 #include <linux/mm.h>
  36 #include <linux/export.h>
  37 #include <linux/bitmap.h>
  38 #include <linux/dma-mapping.h>
  39 #include <linux/vmalloc.h>
  40 #include <linux/mlx5/driver.h>
  41 
  42 #include "mlx5_core.h"
  43 
  44 struct mlx5_db_pgdir {
  45         struct list_head        list;
  46         unsigned long          *bitmap;
  47         __be32                 *db_page;
  48         dma_addr_t              db_dma;
  49 };
  50 
  51 /* Handling for queue buffers -- we allocate a bunch of memory and
  52  * register it in a memory region at HCA virtual address 0.
  53  */
  54 
  55 static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
  56                                            size_t size, dma_addr_t *dma_handle,
  57                                            int node)
  58 {
  59         struct mlx5_priv *priv = &dev->priv;
  60         struct device *device = dev->device;
  61         int original_node;
  62         void *cpu_handle;
  63 
  64         mutex_lock(&priv->alloc_mutex);
  65         original_node = dev_to_node(device);
  66         set_dev_node(device, node);
  67         cpu_handle = dma_alloc_coherent(device, size, dma_handle,
  68                                         GFP_KERNEL);
  69         set_dev_node(device, original_node);
  70         mutex_unlock(&priv->alloc_mutex);
  71         return cpu_handle;
  72 }
  73 
  74 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
  75                         struct mlx5_frag_buf *buf, int node)
  76 {
  77         dma_addr_t t;
  78 
  79         buf->size = size;
  80         buf->npages       = 1;
  81         buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
  82 
  83         buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL);
  84         if (!buf->frags)
  85                 return -ENOMEM;
  86 
  87         buf->frags->buf   = mlx5_dma_zalloc_coherent_node(dev, size,
  88                                                           &t, node);
  89         if (!buf->frags->buf)
  90                 goto err_out;
  91 
  92         buf->frags->map = t;
  93 
  94         while (t & ((1 << buf->page_shift) - 1)) {
  95                 --buf->page_shift;
  96                 buf->npages *= 2;
  97         }
  98 
  99         return 0;
 100 err_out:
 101         kfree(buf->frags);
 102         return -ENOMEM;
 103 }
 104 
 105 int mlx5_buf_alloc(struct mlx5_core_dev *dev,
 106                    int size, struct mlx5_frag_buf *buf)
 107 {
 108         return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
 109 }
 110 EXPORT_SYMBOL(mlx5_buf_alloc);
 111 
 112 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
 113 {
 114         dma_free_coherent(dev->device, buf->size, buf->frags->buf,
 115                           buf->frags->map);
 116 
 117         kfree(buf->frags);
 118 }
 119 EXPORT_SYMBOL_GPL(mlx5_buf_free);
 120 
 121 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
 122                              struct mlx5_frag_buf *buf, int node)
 123 {
 124         int i;
 125 
 126         buf->size = size;
 127         buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
 128         buf->page_shift = PAGE_SHIFT;
 129         buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
 130                              GFP_KERNEL);
 131         if (!buf->frags)
 132                 goto err_out;
 133 
 134         for (i = 0; i < buf->npages; i++) {
 135                 struct mlx5_buf_list *frag = &buf->frags[i];
 136                 int frag_sz = min_t(int, size, PAGE_SIZE);
 137 
 138                 frag->buf = mlx5_dma_zalloc_coherent_node(dev, frag_sz,
 139                                                           &frag->map, node);
 140                 if (!frag->buf)
 141                         goto err_free_buf;
 142                 if (frag->map & ((1 << buf->page_shift) - 1)) {
 143                         dma_free_coherent(dev->device, frag_sz,
 144                                           buf->frags[i].buf, buf->frags[i].map);
 145                         mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
 146                                        &frag->map, buf->page_shift);
 147                         goto err_free_buf;
 148                 }
 149                 size -= frag_sz;
 150         }
 151 
 152         return 0;
 153 
 154 err_free_buf:
 155         while (i--)
 156                 dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf,
 157                                   buf->frags[i].map);
 158         kfree(buf->frags);
 159 err_out:
 160         return -ENOMEM;
 161 }
 162 EXPORT_SYMBOL_GPL(mlx5_frag_buf_alloc_node);
 163 
 164 void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
 165 {
 166         int size = buf->size;
 167         int i;
 168 
 169         for (i = 0; i < buf->npages; i++) {
 170                 int frag_sz = min_t(int, size, PAGE_SIZE);
 171 
 172                 dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf,
 173                                   buf->frags[i].map);
 174                 size -= frag_sz;
 175         }
 176         kfree(buf->frags);
 177 }
 178 EXPORT_SYMBOL_GPL(mlx5_frag_buf_free);
 179 
 180 static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
 181                                                  int node)
 182 {
 183         u32 db_per_page = PAGE_SIZE / cache_line_size();
 184         struct mlx5_db_pgdir *pgdir;
 185 
 186         pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
 187         if (!pgdir)
 188                 return NULL;
 189 
 190         pgdir->bitmap = bitmap_zalloc(db_per_page, GFP_KERNEL);
 191         if (!pgdir->bitmap) {
 192                 kfree(pgdir);
 193                 return NULL;
 194         }
 195 
 196         bitmap_fill(pgdir->bitmap, db_per_page);
 197 
 198         pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
 199                                                        &pgdir->db_dma, node);
 200         if (!pgdir->db_page) {
 201                 bitmap_free(pgdir->bitmap);
 202                 kfree(pgdir);
 203                 return NULL;
 204         }
 205 
 206         return pgdir;
 207 }
 208 
 209 static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
 210                                     struct mlx5_db *db)
 211 {
 212         u32 db_per_page = PAGE_SIZE / cache_line_size();
 213         int offset;
 214         int i;
 215 
 216         i = find_first_bit(pgdir->bitmap, db_per_page);
 217         if (i >= db_per_page)
 218                 return -ENOMEM;
 219 
 220         __clear_bit(i, pgdir->bitmap);
 221 
 222         db->u.pgdir = pgdir;
 223         db->index   = i;
 224         offset = db->index * cache_line_size();
 225         db->db      = pgdir->db_page + offset / sizeof(*pgdir->db_page);
 226         db->dma     = pgdir->db_dma  + offset;
 227 
 228         db->db[0] = 0;
 229         db->db[1] = 0;
 230 
 231         return 0;
 232 }
 233 
 234 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
 235 {
 236         struct mlx5_db_pgdir *pgdir;
 237         int ret = 0;
 238 
 239         mutex_lock(&dev->priv.pgdir_mutex);
 240 
 241         list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
 242                 if (!mlx5_alloc_db_from_pgdir(pgdir, db))
 243                         goto out;
 244 
 245         pgdir = mlx5_alloc_db_pgdir(dev, node);
 246         if (!pgdir) {
 247                 ret = -ENOMEM;
 248                 goto out;
 249         }
 250 
 251         list_add(&pgdir->list, &dev->priv.pgdir_list);
 252 
 253         /* This should never fail -- we just allocated an empty page: */
 254         WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
 255 
 256 out:
 257         mutex_unlock(&dev->priv.pgdir_mutex);
 258 
 259         return ret;
 260 }
 261 EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
 262 
 263 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
 264 {
 265         return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
 266 }
 267 EXPORT_SYMBOL_GPL(mlx5_db_alloc);
 268 
 269 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
 270 {
 271         u32 db_per_page = PAGE_SIZE / cache_line_size();
 272 
 273         mutex_lock(&dev->priv.pgdir_mutex);
 274 
 275         __set_bit(db->index, db->u.pgdir->bitmap);
 276 
 277         if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
 278                 dma_free_coherent(dev->device, PAGE_SIZE,
 279                                   db->u.pgdir->db_page, db->u.pgdir->db_dma);
 280                 list_del(&db->u.pgdir->list);
 281                 bitmap_free(db->u.pgdir->bitmap);
 282                 kfree(db->u.pgdir);
 283         }
 284 
 285         mutex_unlock(&dev->priv.pgdir_mutex);
 286 }
 287 EXPORT_SYMBOL_GPL(mlx5_db_free);
 288 
 289 void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
 290 {
 291         u64 addr;
 292         int i;
 293 
 294         for (i = 0; i < buf->npages; i++) {
 295                 addr = buf->frags->map + (i << buf->page_shift);
 296 
 297                 pas[i] = cpu_to_be64(addr);
 298         }
 299 }
 300 EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
 301 
 302 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
 303 {
 304         int i;
 305 
 306         for (i = 0; i < buf->npages; i++)
 307                 pas[i] = cpu_to_be64(buf->frags[i].map);
 308 }
 309 EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);

/* [<][>][^][v][top][bottom][index][help] */