root/include/rdma/ib_umem_odp.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. to_ib_umem_odp
  2. ib_umem_start
  3. ib_umem_end
  4. ib_umem_odp_num_pages
  5. rbt_ib_umem_lookup
  6. ib_umem_mmu_notifier_retry
  7. ib_umem_odp_get
  8. ib_umem_odp_release

   1 /*
   2  * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 
  33 #ifndef IB_UMEM_ODP_H
  34 #define IB_UMEM_ODP_H
  35 
  36 #include <rdma/ib_umem.h>
  37 #include <rdma/ib_verbs.h>
  38 #include <linux/interval_tree.h>
  39 
  40 struct ib_umem_odp {
  41         struct ib_umem umem;
  42         struct ib_ucontext_per_mm *per_mm;
  43 
  44         /*
  45          * An array of the pages included in the on-demand paging umem.
  46          * Indices of pages that are currently not mapped into the device will
  47          * contain NULL.
  48          */
  49         struct page             **page_list;
  50         /*
  51          * An array of the same size as page_list, with DMA addresses mapped
  52          * for pages the pages in page_list. The lower two bits designate
  53          * access permissions. See ODP_READ_ALLOWED_BIT and
  54          * ODP_WRITE_ALLOWED_BIT.
  55          */
  56         dma_addr_t              *dma_list;
  57         /*
  58          * The umem_mutex protects the page_list and dma_list fields of an ODP
  59          * umem, allowing only a single thread to map/unmap pages. The mutex
  60          * also protects access to the mmu notifier counters.
  61          */
  62         struct mutex            umem_mutex;
  63         void                    *private; /* for the HW driver to use. */
  64 
  65         int notifiers_seq;
  66         int notifiers_count;
  67         int npages;
  68 
  69         /* Tree tracking */
  70         struct interval_tree_node interval_tree;
  71 
  72         /*
  73          * An implicit odp umem cannot be DMA mapped, has 0 length, and serves
  74          * only as an anchor for the driver to hold onto the per_mm. FIXME:
  75          * This should be removed and drivers should work with the per_mm
  76          * directly.
  77          */
  78         bool is_implicit_odp;
  79 
  80         struct completion       notifier_completion;
  81         int                     dying;
  82         unsigned int            page_shift;
  83         struct work_struct      work;
  84 };
  85 
  86 static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
  87 {
  88         return container_of(umem, struct ib_umem_odp, umem);
  89 }
  90 
  91 /* Returns the first page of an ODP umem. */
  92 static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp)
  93 {
  94         return umem_odp->interval_tree.start;
  95 }
  96 
  97 /* Returns the address of the page after the last one of an ODP umem. */
  98 static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp)
  99 {
 100         return umem_odp->interval_tree.last + 1;
 101 }
 102 
 103 static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
 104 {
 105         return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >>
 106                umem_odp->page_shift;
 107 }
 108 
 109 /*
 110  * The lower 2 bits of the DMA address signal the R/W permissions for
 111  * the entry. To upgrade the permissions, provide the appropriate
 112  * bitmask to the map_dma_pages function.
 113  *
 114  * Be aware that upgrading a mapped address might result in change of
 115  * the DMA address for the page.
 116  */
 117 #define ODP_READ_ALLOWED_BIT  (1<<0ULL)
 118 #define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
 119 
 120 #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
 121 
 122 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 123 
 124 struct ib_ucontext_per_mm {
 125         struct mmu_notifier mn;
 126         struct pid *tgid;
 127 
 128         struct rb_root_cached umem_tree;
 129         /* Protects umem_tree */
 130         struct rw_semaphore umem_rwsem;
 131 };
 132 
 133 struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
 134                                     size_t size, int access);
 135 struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
 136                                                int access);
 137 struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem,
 138                                             unsigned long addr, size_t size);
 139 void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
 140 
 141 int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
 142                               u64 bcnt, u64 access_mask,
 143                               unsigned long current_seq);
 144 
 145 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
 146                                  u64 bound);
 147 
 148 typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end,
 149                               void *cookie);
 150 /*
 151  * Call the callback on each ib_umem in the range. Returns the logical or of
 152  * the return values of the functions called.
 153  */
 154 int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
 155                                   u64 start, u64 end,
 156                                   umem_call_back cb,
 157                                   bool blockable, void *cookie);
 158 
 159 /*
 160  * Find first region intersecting with address range.
 161  * Return NULL if not found
 162  */
 163 static inline struct ib_umem_odp *
 164 rbt_ib_umem_lookup(struct rb_root_cached *root, u64 addr, u64 length)
 165 {
 166         struct interval_tree_node *node;
 167 
 168         node = interval_tree_iter_first(root, addr, addr + length - 1);
 169         if (!node)
 170                 return NULL;
 171         return container_of(node, struct ib_umem_odp, interval_tree);
 172 
 173 }
 174 
 175 static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
 176                                              unsigned long mmu_seq)
 177 {
 178         /*
 179          * This code is strongly based on the KVM code from
 180          * mmu_notifier_retry. Should be called with
 181          * the relevant locks taken (umem_odp->umem_mutex
 182          * and the ucontext umem_mutex semaphore locked for read).
 183          */
 184 
 185         if (unlikely(umem_odp->notifiers_count))
 186                 return 1;
 187         if (umem_odp->notifiers_seq != mmu_seq)
 188                 return 1;
 189         return 0;
 190 }
 191 
 192 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
 193 
 194 static inline struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata,
 195                                                   unsigned long addr,
 196                                                   size_t size, int access)
 197 {
 198         return ERR_PTR(-EINVAL);
 199 }
 200 
 201 static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
 202 
 203 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
 204 
 205 #endif /* IB_UMEM_ODP_H */

/* [<][>][^][v][top][bottom][index][help] */