root/drivers/infiniband/hw/mlx5/mem.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mlx5_ib_cont_pages
  2. umem_dma_to_mtt
  3. __mlx5_ib_populate_pas
  4. mlx5_ib_populate_pas
  5. mlx5_ib_get_buf_offset

   1 /*
   2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 
  33 #include <linux/module.h>
  34 #include <rdma/ib_umem.h>
  35 #include <rdma/ib_umem_odp.h>
  36 #include "mlx5_ib.h"
  37 
  38 /* @umem: umem object to scan
  39  * @addr: ib virtual address requested by the user
  40  * @max_page_shift: high limit for page_shift - 0 means no limit
  41  * @count: number of PAGE_SIZE pages covered by umem
  42  * @shift: page shift for the compound pages found in the region
  43  * @ncont: number of compund pages
  44  * @order: log2 of the number of compound pages
  45  */
  46 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
  47                         unsigned long max_page_shift,
  48                         int *count, int *shift,
  49                         int *ncont, int *order)
  50 {
  51         unsigned long tmp;
  52         unsigned long m;
  53         u64 base = ~0, p = 0;
  54         u64 len, pfn;
  55         int i = 0;
  56         struct scatterlist *sg;
  57         int entry;
  58 
  59         addr = addr >> PAGE_SHIFT;
  60         tmp = (unsigned long)addr;
  61         m = find_first_bit(&tmp, BITS_PER_LONG);
  62         if (max_page_shift)
  63                 m = min_t(unsigned long, max_page_shift - PAGE_SHIFT, m);
  64 
  65         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
  66                 len = sg_dma_len(sg) >> PAGE_SHIFT;
  67                 pfn = sg_dma_address(sg) >> PAGE_SHIFT;
  68                 if (base + p != pfn) {
  69                         /* If either the offset or the new
  70                          * base are unaligned update m
  71                          */
  72                         tmp = (unsigned long)(pfn | p);
  73                         if (!IS_ALIGNED(tmp, 1 << m))
  74                                 m = find_first_bit(&tmp, BITS_PER_LONG);
  75 
  76                         base = pfn;
  77                         p = 0;
  78                 }
  79 
  80                 p += len;
  81                 i += len;
  82         }
  83 
  84         if (i) {
  85                 m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
  86 
  87                 if (order)
  88                         *order = ilog2(roundup_pow_of_two(i) >> m);
  89 
  90                 *ncont = DIV_ROUND_UP(i, (1 << m));
  91         } else {
  92                 m  = 0;
  93 
  94                 if (order)
  95                         *order = 0;
  96 
  97                 *ncont = 0;
  98         }
  99         *shift = PAGE_SHIFT + m;
 100         *count = i;
 101 }
 102 
 103 static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
 104 {
 105         u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
 106 
 107         if (umem_dma & ODP_READ_ALLOWED_BIT)
 108                 mtt_entry |= MLX5_IB_MTT_READ;
 109         if (umem_dma & ODP_WRITE_ALLOWED_BIT)
 110                 mtt_entry |= MLX5_IB_MTT_WRITE;
 111 
 112         return mtt_entry;
 113 }
 114 
 115 /*
 116  * Populate the given array with bus addresses from the umem.
 117  *
 118  * dev - mlx5_ib device
 119  * umem - umem to use to fill the pages
 120  * page_shift - determines the page size used in the resulting array
 121  * offset - offset into the umem to start from,
 122  *          only implemented for ODP umems
 123  * num_pages - total number of pages to fill
 124  * pas - bus addresses array to fill
 125  * access_flags - access flags to set on all present pages.
 126                   use enum mlx5_ib_mtt_access_flags for this.
 127  */
 128 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
 129                             int page_shift, size_t offset, size_t num_pages,
 130                             __be64 *pas, int access_flags)
 131 {
 132         int shift = page_shift - PAGE_SHIFT;
 133         int mask = (1 << shift) - 1;
 134         int i, k, idx;
 135         u64 cur = 0;
 136         u64 base;
 137         int len;
 138         struct scatterlist *sg;
 139         int entry;
 140 
 141         if (umem->is_odp) {
 142                 WARN_ON(shift != 0);
 143                 WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
 144 
 145                 for (i = 0; i < num_pages; ++i) {
 146                         dma_addr_t pa =
 147                                 to_ib_umem_odp(umem)->dma_list[offset + i];
 148 
 149                         pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
 150                 }
 151                 return;
 152         }
 153 
 154         i = 0;
 155         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
 156                 len = sg_dma_len(sg) >> PAGE_SHIFT;
 157                 base = sg_dma_address(sg);
 158 
 159                 /* Skip elements below offset */
 160                 if (i + len < offset << shift) {
 161                         i += len;
 162                         continue;
 163                 }
 164 
 165                 /* Skip pages below offset */
 166                 if (i < offset << shift) {
 167                         k = (offset << shift) - i;
 168                         i = offset << shift;
 169                 } else {
 170                         k = 0;
 171                 }
 172 
 173                 for (; k < len; k++) {
 174                         if (!(i & mask)) {
 175                                 cur = base + (k << PAGE_SHIFT);
 176                                 cur |= access_flags;
 177                                 idx = (i >> shift) - offset;
 178 
 179                                 pas[idx] = cpu_to_be64(cur);
 180                                 mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
 181                                             i >> shift, be64_to_cpu(pas[idx]));
 182                         }
 183                         i++;
 184 
 185                         /* Stop after num_pages reached */
 186                         if (i >> shift >= offset + num_pages)
 187                                 return;
 188                 }
 189         }
 190 }
 191 
 192 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
 193                           int page_shift, __be64 *pas, int access_flags)
 194 {
 195         return __mlx5_ib_populate_pas(dev, umem, page_shift, 0,
 196                                       ib_umem_num_pages(umem), pas,
 197                                       access_flags);
 198 }
 199 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
 200 {
 201         u64 page_size;
 202         u64 page_mask;
 203         u64 off_size;
 204         u64 off_mask;
 205         u64 buf_off;
 206 
 207         page_size = (u64)1 << page_shift;
 208         page_mask = page_size - 1;
 209         buf_off = addr & page_mask;
 210         off_size = page_size >> 6;
 211         off_mask = off_size - 1;
 212 
 213         if (buf_off & off_mask)
 214                 return -EINVAL;
 215 
 216         *offset = buf_off >> ilog2(off_size);
 217         return 0;
 218 }

/* [<][>][^][v][top][bottom][index][help] */