Lines Matching refs:pool

46 	struct rds_iw_mr_pool	*pool;  member
78 static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all);
80 static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
81 static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
84 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
85 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
89 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
336 struct rds_iw_mr_pool *pool; local
338 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
339 if (!pool) {
344 pool->device = rds_iwdev;
345 INIT_LIST_HEAD(&pool->dirty_list);
346 INIT_LIST_HEAD(&pool->clean_list);
347 mutex_init(&pool->flush_lock);
348 spin_lock_init(&pool->list_lock);
349 INIT_WORK(&pool->flush_worker, rds_iw_mr_pool_flush_worker);
351 pool->max_message_size = fastreg_message_size;
352 pool->max_items = fastreg_pool_size;
353 pool->max_free_pinned = pool->max_items * pool->max_message_size / 4;
354 pool->max_pages = fastreg_message_size;
361 pool->max_items_soft = pool->max_items * 3 / 4;
363 return pool;
368 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; local
370 iinfo->rdma_mr_max = pool->max_items;
371 iinfo->rdma_mr_size = pool->max_pages;
374 void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *pool) argument
377 rds_iw_flush_mr_pool(pool, 1);
378 BUG_ON(atomic_read(&pool->item_count));
379 BUG_ON(atomic_read(&pool->free_pinned));
380 kfree(pool);
383 static inline struct rds_iw_mr *rds_iw_reuse_fmr(struct rds_iw_mr_pool *pool) argument
388 spin_lock_irqsave(&pool->list_lock, flags);
389 if (!list_empty(&pool->clean_list)) {
390 ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list);
393 spin_unlock_irqrestore(&pool->list_lock, flags);
400 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; local
405 ibmr = rds_iw_reuse_fmr(pool);
418 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
421 atomic_dec(&pool->item_count);
430 rds_iw_flush_mr_pool(pool, 0);
443 err = rds_iw_init_fastreg(pool, ibmr);
452 rds_iw_destroy_fastreg(pool, ibmr);
455 atomic_dec(&pool->item_count);
482 static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) argument
493 mutex_lock(&pool->flush_lock);
495 spin_lock_irqsave(&pool->list_lock, flags);
497 list_splice_init(&pool->dirty_list, &unmap_list);
499 list_splice_init(&pool->clean_list, &kill_list);
500 spin_unlock_irqrestore(&pool->list_lock, flags);
511 ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list,
523 rds_iw_destroy_fastreg(pool, ibmr);
531 spin_lock_irqsave(&pool->list_lock, flags);
532 list_splice(&unmap_list, &pool->clean_list);
533 spin_unlock_irqrestore(&pool->list_lock, flags);
536 atomic_sub(unpinned, &pool->free_pinned);
537 atomic_sub(ncleaned, &pool->dirty_count);
538 atomic_sub(nfreed, &pool->item_count);
540 mutex_unlock(&pool->flush_lock);
546 struct rds_iw_mr_pool *pool = container_of(work, struct rds_iw_mr_pool, flush_worker); local
548 rds_iw_flush_mr_pool(pool, 0);
554 struct rds_iw_mr_pool *pool = ibmr->device->mr_pool; local
557 if (!pool)
561 rds_iw_free_fastreg(pool, ibmr);
564 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
565 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
566 queue_work(rds_wq, &pool->flush_worker);
570 rds_iw_flush_mr_pool(pool, 0);
574 queue_work(rds_wq, &pool->flush_worker);
584 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; local
586 if (pool)
587 rds_iw_flush_mr_pool(pool, 0);
662 static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, argument
665 struct rds_iw_device *rds_iwdev = pool->device;
670 mr = ib_alloc_fast_reg_mr(rds_iwdev->pd, pool->max_message_size);
681 page_list = ib_alloc_fast_reg_page_list(rds_iwdev->dev, pool->max_message_size);
758 static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, argument
763 struct rds_iw_device *rds_iwdev = pool->device;
777 if (mapping->m_sg.dma_len > pool->max_message_size) {
800 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, argument
814 spin_lock_irqsave(&pool->list_lock, flags);
816 list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list);
817 atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned);
818 atomic_inc(&pool->dirty_count);
820 spin_unlock_irqrestore(&pool->list_lock, flags);
823 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, argument
851 spin_lock_irqsave(&pool->list_lock, flags);
857 spin_unlock_irqrestore(&pool->list_lock, flags);
868 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, argument