This source file includes following definitions.
- to_ib_umem_odp
- ib_umem_start
- ib_umem_end
- ib_umem_odp_num_pages
- rbt_ib_umem_lookup
- ib_umem_mmu_notifier_retry
- ib_umem_odp_get
- ib_umem_odp_release
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #ifndef IB_UMEM_ODP_H
34 #define IB_UMEM_ODP_H
35
36 #include <rdma/ib_umem.h>
37 #include <rdma/ib_verbs.h>
38 #include <linux/interval_tree.h>
39
40 struct ib_umem_odp {
41 struct ib_umem umem;
42 struct ib_ucontext_per_mm *per_mm;
43
44
45
46
47
48
49 struct page **page_list;
50
51
52
53
54
55
56 dma_addr_t *dma_list;
57
58
59
60
61
62 struct mutex umem_mutex;
63 void *private;
64
65 int notifiers_seq;
66 int notifiers_count;
67 int npages;
68
69
70 struct interval_tree_node interval_tree;
71
72
73
74
75
76
77
78 bool is_implicit_odp;
79
80 struct completion notifier_completion;
81 int dying;
82 unsigned int page_shift;
83 struct work_struct work;
84 };
85
86 static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
87 {
88 return container_of(umem, struct ib_umem_odp, umem);
89 }
90
91
92 static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp)
93 {
94 return umem_odp->interval_tree.start;
95 }
96
97
98 static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp)
99 {
100 return umem_odp->interval_tree.last + 1;
101 }
102
103 static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
104 {
105 return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >>
106 umem_odp->page_shift;
107 }
108
109
110
111
112
113
114
115
116
117 #define ODP_READ_ALLOWED_BIT (1<<0ULL)
118 #define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
119
120 #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
121
122 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
123
124 struct ib_ucontext_per_mm {
125 struct mmu_notifier mn;
126 struct pid *tgid;
127
128 struct rb_root_cached umem_tree;
129
130 struct rw_semaphore umem_rwsem;
131 };
132
133 struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
134 size_t size, int access);
135 struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
136 int access);
137 struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem,
138 unsigned long addr, size_t size);
139 void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
140
141 int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
142 u64 bcnt, u64 access_mask,
143 unsigned long current_seq);
144
145 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
146 u64 bound);
147
148 typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end,
149 void *cookie);
150
151
152
153
154 int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
155 u64 start, u64 end,
156 umem_call_back cb,
157 bool blockable, void *cookie);
158
159
160
161
162
163 static inline struct ib_umem_odp *
164 rbt_ib_umem_lookup(struct rb_root_cached *root, u64 addr, u64 length)
165 {
166 struct interval_tree_node *node;
167
168 node = interval_tree_iter_first(root, addr, addr + length - 1);
169 if (!node)
170 return NULL;
171 return container_of(node, struct ib_umem_odp, interval_tree);
172
173 }
174
175 static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
176 unsigned long mmu_seq)
177 {
178
179
180
181
182
183
184
185 if (unlikely(umem_odp->notifiers_count))
186 return 1;
187 if (umem_odp->notifiers_seq != mmu_seq)
188 return 1;
189 return 0;
190 }
191
192 #else
193
194 static inline struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata,
195 unsigned long addr,
196 size_t size, int access)
197 {
198 return ERR_PTR(-EINVAL);
199 }
200
201 static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
202
203 #endif
204
205 #endif