This source file includes following definitions.
- ib_fmr_hash
- ib_fmr_cache_lookup
- ib_fmr_batch_release
- ib_fmr_cleanup_func
- ib_create_fmr_pool
- ib_destroy_fmr_pool
- ib_flush_fmr_pool
- ib_fmr_pool_map_phys
- ib_fmr_pool_unmap
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #include <linux/errno.h>
35 #include <linux/spinlock.h>
36 #include <linux/export.h>
37 #include <linux/slab.h>
38 #include <linux/jhash.h>
39 #include <linux/kthread.h>
40
41 #include <rdma/ib_fmr_pool.h>
42
43 #include "core_priv.h"
44
45 #define PFX "fmr_pool: "
46
47 enum {
48 IB_FMR_MAX_REMAPS = 32,
49
50 IB_FMR_HASH_BITS = 8,
51 IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS,
52 IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1
53 };
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83 struct ib_fmr_pool {
84 spinlock_t pool_lock;
85
86 int pool_size;
87 int max_pages;
88 int max_remaps;
89 int dirty_watermark;
90 int dirty_len;
91 struct list_head free_list;
92 struct list_head dirty_list;
93 struct hlist_head *cache_bucket;
94
95 void (*flush_function)(struct ib_fmr_pool *pool,
96 void * arg);
97 void *flush_arg;
98
99 struct kthread_worker *worker;
100 struct kthread_work work;
101
102 atomic_t req_ser;
103 atomic_t flush_ser;
104
105 wait_queue_head_t force_wait;
106 };
107
108 static inline u32 ib_fmr_hash(u64 first_page)
109 {
110 return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
111 (IB_FMR_HASH_SIZE - 1);
112 }
113
114
115 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
116 u64 *page_list,
117 int page_list_len,
118 u64 io_virtual_address)
119 {
120 struct hlist_head *bucket;
121 struct ib_pool_fmr *fmr;
122
123 if (!pool->cache_bucket)
124 return NULL;
125
126 bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
127
128 hlist_for_each_entry(fmr, bucket, cache_node)
129 if (io_virtual_address == fmr->io_virtual_address &&
130 page_list_len == fmr->page_list_len &&
131 !memcmp(page_list, fmr->page_list,
132 page_list_len * sizeof *page_list))
133 return fmr;
134
135 return NULL;
136 }
137
138 static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
139 {
140 int ret;
141 struct ib_pool_fmr *fmr;
142 LIST_HEAD(unmap_list);
143 LIST_HEAD(fmr_list);
144
145 spin_lock_irq(&pool->pool_lock);
146
147 list_for_each_entry(fmr, &pool->dirty_list, list) {
148 hlist_del_init(&fmr->cache_node);
149 fmr->remap_count = 0;
150 list_add_tail(&fmr->fmr->list, &fmr_list);
151 }
152
153 list_splice_init(&pool->dirty_list, &unmap_list);
154 pool->dirty_len = 0;
155
156 spin_unlock_irq(&pool->pool_lock);
157
158 if (list_empty(&unmap_list)) {
159 return;
160 }
161
162 ret = ib_unmap_fmr(&fmr_list);
163 if (ret)
164 pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
165
166 spin_lock_irq(&pool->pool_lock);
167 list_splice(&unmap_list, &pool->free_list);
168 spin_unlock_irq(&pool->pool_lock);
169 }
170
171 static void ib_fmr_cleanup_func(struct kthread_work *work)
172 {
173 struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work);
174
175 ib_fmr_batch_release(pool);
176 atomic_inc(&pool->flush_ser);
177 wake_up_interruptible(&pool->force_wait);
178
179 if (pool->flush_function)
180 pool->flush_function(pool, pool->flush_arg);
181
182 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0)
183 kthread_queue_work(pool->worker, &pool->work);
184 }
185
186
187
188
189
190
191
192
193
194 struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
195 struct ib_fmr_pool_param *params)
196 {
197 struct ib_device *device;
198 struct ib_fmr_pool *pool;
199 int i;
200 int ret;
201 int max_remaps;
202
203 if (!params)
204 return ERR_PTR(-EINVAL);
205
206 device = pd->device;
207 if (!device->ops.alloc_fmr || !device->ops.dealloc_fmr ||
208 !device->ops.map_phys_fmr || !device->ops.unmap_fmr) {
209 dev_info(&device->dev, "Device does not support FMRs\n");
210 return ERR_PTR(-ENOSYS);
211 }
212
213 if (!device->attrs.max_map_per_fmr)
214 max_remaps = IB_FMR_MAX_REMAPS;
215 else
216 max_remaps = device->attrs.max_map_per_fmr;
217
218 pool = kmalloc(sizeof *pool, GFP_KERNEL);
219 if (!pool)
220 return ERR_PTR(-ENOMEM);
221
222 pool->cache_bucket = NULL;
223 pool->flush_function = params->flush_function;
224 pool->flush_arg = params->flush_arg;
225
226 INIT_LIST_HEAD(&pool->free_list);
227 INIT_LIST_HEAD(&pool->dirty_list);
228
229 if (params->cache) {
230 pool->cache_bucket =
231 kmalloc_array(IB_FMR_HASH_SIZE,
232 sizeof(*pool->cache_bucket),
233 GFP_KERNEL);
234 if (!pool->cache_bucket) {
235 ret = -ENOMEM;
236 goto out_free_pool;
237 }
238
239 for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
240 INIT_HLIST_HEAD(pool->cache_bucket + i);
241 }
242
243 pool->pool_size = 0;
244 pool->max_pages = params->max_pages_per_fmr;
245 pool->max_remaps = max_remaps;
246 pool->dirty_watermark = params->dirty_watermark;
247 pool->dirty_len = 0;
248 spin_lock_init(&pool->pool_lock);
249 atomic_set(&pool->req_ser, 0);
250 atomic_set(&pool->flush_ser, 0);
251 init_waitqueue_head(&pool->force_wait);
252
253 pool->worker =
254 kthread_create_worker(0, "ib_fmr(%s)", dev_name(&device->dev));
255 if (IS_ERR(pool->worker)) {
256 pr_warn(PFX "couldn't start cleanup kthread worker\n");
257 ret = PTR_ERR(pool->worker);
258 goto out_free_pool;
259 }
260 kthread_init_work(&pool->work, ib_fmr_cleanup_func);
261
262 {
263 struct ib_pool_fmr *fmr;
264 struct ib_fmr_attr fmr_attr = {
265 .max_pages = params->max_pages_per_fmr,
266 .max_maps = pool->max_remaps,
267 .page_shift = params->page_shift
268 };
269 int bytes_per_fmr = sizeof *fmr;
270
271 if (pool->cache_bucket)
272 bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
273
274 for (i = 0; i < params->pool_size; ++i) {
275 fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
276 if (!fmr)
277 goto out_fail;
278
279 fmr->pool = pool;
280 fmr->remap_count = 0;
281 fmr->ref_count = 0;
282 INIT_HLIST_NODE(&fmr->cache_node);
283
284 fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
285 if (IS_ERR(fmr->fmr)) {
286 pr_warn(PFX "fmr_create failed for FMR %d\n",
287 i);
288 kfree(fmr);
289 goto out_fail;
290 }
291
292 list_add_tail(&fmr->list, &pool->free_list);
293 ++pool->pool_size;
294 }
295 }
296
297 return pool;
298
299 out_free_pool:
300 kfree(pool->cache_bucket);
301 kfree(pool);
302
303 return ERR_PTR(ret);
304
305 out_fail:
306 ib_destroy_fmr_pool(pool);
307
308 return ERR_PTR(-ENOMEM);
309 }
310 EXPORT_SYMBOL(ib_create_fmr_pool);
311
312
313
314
315
316
317
318 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
319 {
320 struct ib_pool_fmr *fmr;
321 struct ib_pool_fmr *tmp;
322 LIST_HEAD(fmr_list);
323 int i;
324
325 kthread_destroy_worker(pool->worker);
326 ib_fmr_batch_release(pool);
327
328 i = 0;
329 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
330 if (fmr->remap_count) {
331 INIT_LIST_HEAD(&fmr_list);
332 list_add_tail(&fmr->fmr->list, &fmr_list);
333 ib_unmap_fmr(&fmr_list);
334 }
335 ib_dealloc_fmr(fmr->fmr);
336 list_del(&fmr->list);
337 kfree(fmr);
338 ++i;
339 }
340
341 if (i < pool->pool_size)
342 pr_warn(PFX "pool still has %d regions registered\n",
343 pool->pool_size - i);
344
345 kfree(pool->cache_bucket);
346 kfree(pool);
347 }
348 EXPORT_SYMBOL(ib_destroy_fmr_pool);
349
350
351
352
353
354
355
356 int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
357 {
358 int serial;
359 struct ib_pool_fmr *fmr, *next;
360
361
362
363
364
365
366
367 spin_lock_irq(&pool->pool_lock);
368 list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
369 if (fmr->remap_count > 0)
370 list_move(&fmr->list, &pool->dirty_list);
371 }
372 spin_unlock_irq(&pool->pool_lock);
373
374 serial = atomic_inc_return(&pool->req_ser);
375 kthread_queue_work(pool->worker, &pool->work);
376
377 if (wait_event_interruptible(pool->force_wait,
378 atomic_read(&pool->flush_ser) - serial >= 0))
379 return -EINTR;
380
381 return 0;
382 }
383 EXPORT_SYMBOL(ib_flush_fmr_pool);
384
385
386
387
388
389
390
391
392 struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
393 u64 *page_list,
394 int list_len,
395 u64 io_virtual_address)
396 {
397 struct ib_fmr_pool *pool = pool_handle;
398 struct ib_pool_fmr *fmr;
399 unsigned long flags;
400 int result;
401
402 if (list_len < 1 || list_len > pool->max_pages)
403 return ERR_PTR(-EINVAL);
404
405 spin_lock_irqsave(&pool->pool_lock, flags);
406 fmr = ib_fmr_cache_lookup(pool,
407 page_list,
408 list_len,
409 io_virtual_address);
410 if (fmr) {
411
412 ++fmr->ref_count;
413 if (fmr->ref_count == 1) {
414 list_del(&fmr->list);
415 }
416
417 spin_unlock_irqrestore(&pool->pool_lock, flags);
418
419 return fmr;
420 }
421
422 if (list_empty(&pool->free_list)) {
423 spin_unlock_irqrestore(&pool->pool_lock, flags);
424 return ERR_PTR(-EAGAIN);
425 }
426
427 fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
428 list_del(&fmr->list);
429 hlist_del_init(&fmr->cache_node);
430 spin_unlock_irqrestore(&pool->pool_lock, flags);
431
432 result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
433 io_virtual_address);
434
435 if (result) {
436 spin_lock_irqsave(&pool->pool_lock, flags);
437 list_add(&fmr->list, &pool->free_list);
438 spin_unlock_irqrestore(&pool->pool_lock, flags);
439
440 pr_warn(PFX "fmr_map returns %d\n", result);
441
442 return ERR_PTR(result);
443 }
444
445 ++fmr->remap_count;
446 fmr->ref_count = 1;
447
448 if (pool->cache_bucket) {
449 fmr->io_virtual_address = io_virtual_address;
450 fmr->page_list_len = list_len;
451 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
452
453 spin_lock_irqsave(&pool->pool_lock, flags);
454 hlist_add_head(&fmr->cache_node,
455 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
456 spin_unlock_irqrestore(&pool->pool_lock, flags);
457 }
458
459 return fmr;
460 }
461 EXPORT_SYMBOL(ib_fmr_pool_map_phys);
462
463
464
465
466
467
468
469
470 void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
471 {
472 struct ib_fmr_pool *pool;
473 unsigned long flags;
474
475 pool = fmr->pool;
476
477 spin_lock_irqsave(&pool->pool_lock, flags);
478
479 --fmr->ref_count;
480 if (!fmr->ref_count) {
481 if (fmr->remap_count < pool->max_remaps) {
482 list_add_tail(&fmr->list, &pool->free_list);
483 } else {
484 list_add_tail(&fmr->list, &pool->dirty_list);
485 if (++pool->dirty_len >= pool->dirty_watermark) {
486 atomic_inc(&pool->req_ser);
487 kthread_queue_work(pool->worker, &pool->work);
488 }
489 }
490 }
491
492 spin_unlock_irqrestore(&pool->pool_lock, flags);
493 }
494 EXPORT_SYMBOL(ib_fmr_pool_unmap);