Lines Matching refs:pool
46 struct rds_iw_mr_pool *pool; member
77 static void rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all);
79 static int rds_iw_init_reg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
80 static int rds_iw_map_reg(struct rds_iw_mr_pool *pool,
83 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
84 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
88 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
317 struct rds_iw_mr_pool *pool; local
319 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
320 if (!pool) {
325 pool->device = rds_iwdev;
326 INIT_LIST_HEAD(&pool->dirty_list);
327 INIT_LIST_HEAD(&pool->clean_list);
328 mutex_init(&pool->flush_lock);
329 spin_lock_init(&pool->list_lock);
330 INIT_WORK(&pool->flush_worker, rds_iw_mr_pool_flush_worker);
332 pool->max_message_size = fastreg_message_size;
333 pool->max_items = fastreg_pool_size;
334 pool->max_free_pinned = pool->max_items * pool->max_message_size / 4;
335 pool->max_pages = fastreg_message_size;
342 pool->max_items_soft = pool->max_items * 3 / 4;
344 return pool;
349 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; local
351 iinfo->rdma_mr_max = pool->max_items;
352 iinfo->rdma_mr_size = pool->max_pages;
355 void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *pool) argument
358 rds_iw_flush_mr_pool(pool, 1);
359 BUG_ON(atomic_read(&pool->item_count));
360 BUG_ON(atomic_read(&pool->free_pinned));
361 kfree(pool);
364 static inline struct rds_iw_mr *rds_iw_reuse_fmr(struct rds_iw_mr_pool *pool) argument
369 spin_lock_irqsave(&pool->list_lock, flags);
370 if (!list_empty(&pool->clean_list)) {
371 ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list);
374 spin_unlock_irqrestore(&pool->list_lock, flags);
381 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; local
386 ibmr = rds_iw_reuse_fmr(pool);
399 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
402 atomic_dec(&pool->item_count);
411 rds_iw_flush_mr_pool(pool, 0);
424 err = rds_iw_init_reg(pool, ibmr);
433 rds_iw_destroy_fastreg(pool, ibmr);
436 atomic_dec(&pool->item_count);
463 static void rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) argument
473 mutex_lock(&pool->flush_lock);
475 spin_lock_irqsave(&pool->list_lock, flags);
477 list_splice_init(&pool->dirty_list, &unmap_list);
479 list_splice_init(&pool->clean_list, &kill_list);
480 spin_unlock_irqrestore(&pool->list_lock, flags);
491 ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list,
503 rds_iw_destroy_fastreg(pool, ibmr);
511 spin_lock_irqsave(&pool->list_lock, flags);
512 list_splice(&unmap_list, &pool->clean_list);
513 spin_unlock_irqrestore(&pool->list_lock, flags);
516 atomic_sub(unpinned, &pool->free_pinned);
517 atomic_sub(ncleaned, &pool->dirty_count);
518 atomic_sub(nfreed, &pool->item_count);
520 mutex_unlock(&pool->flush_lock);
525 struct rds_iw_mr_pool *pool = container_of(work, struct rds_iw_mr_pool, flush_worker); local
527 rds_iw_flush_mr_pool(pool, 0);
533 struct rds_iw_mr_pool *pool = ibmr->device->mr_pool; local
536 if (!pool)
540 rds_iw_free_fastreg(pool, ibmr);
543 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
544 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
545 queue_work(rds_wq, &pool->flush_worker);
549 rds_iw_flush_mr_pool(pool, 0);
553 queue_work(rds_wq, &pool->flush_worker);
563 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; local
565 if (pool)
566 rds_iw_flush_mr_pool(pool, 0);
641 static int rds_iw_init_reg(struct rds_iw_mr_pool *pool, argument
644 struct rds_iw_device *rds_iwdev = pool->device;
649 pool->max_message_size);
726 static int rds_iw_map_reg(struct rds_iw_mr_pool *pool, argument
731 struct rds_iw_device *rds_iwdev = pool->device;
744 if (mapping->m_sg.dma_len > pool->max_message_size) {
764 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, argument
778 spin_lock_irqsave(&pool->list_lock, flags);
780 list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list);
781 atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned);
782 atomic_inc(&pool->dirty_count);
784 spin_unlock_irqrestore(&pool->list_lock, flags);
787 static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, argument
815 spin_lock_irqsave(&pool->list_lock, flags);
821 spin_unlock_irqrestore(&pool->list_lock, flags);
832 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, argument