Lines Matching refs:ent
74 if (order < cache->ent[0].order) in order2idx()
77 return order - cache->ent[0].order; in order2idx()
86 struct mlx5_cache_ent *ent = &cache->ent[c]; in reg_mr_callback() local
92 spin_lock_irqsave(&ent->lock, flags); in reg_mr_callback()
93 ent->pending--; in reg_mr_callback()
94 spin_unlock_irqrestore(&ent->lock, flags); in reg_mr_callback()
120 spin_lock_irqsave(&ent->lock, flags); in reg_mr_callback()
121 list_add_tail(&mr->list, &ent->head); in reg_mr_callback()
122 ent->cur++; in reg_mr_callback()
123 ent->size++; in reg_mr_callback()
124 spin_unlock_irqrestore(&ent->lock, flags); in reg_mr_callback()
137 struct mlx5_cache_ent *ent = &cache->ent[c]; in add_keys() local
140 int npages = 1 << ent->order; in add_keys()
149 if (ent->pending >= MAX_PENDING_REG_MR) { in add_keys()
159 mr->order = ent->order; in add_keys()
168 spin_lock_irq(&ent->lock); in add_keys()
169 ent->pending++; in add_keys()
170 spin_unlock_irq(&ent->lock); in add_keys()
175 spin_lock_irq(&ent->lock); in add_keys()
176 ent->pending--; in add_keys()
177 spin_unlock_irq(&ent->lock); in add_keys()
191 struct mlx5_cache_ent *ent = &cache->ent[c]; in remove_keys() local
197 spin_lock_irq(&ent->lock); in remove_keys()
198 if (list_empty(&ent->head)) { in remove_keys()
199 spin_unlock_irq(&ent->lock); in remove_keys()
202 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in remove_keys()
204 ent->cur--; in remove_keys()
205 ent->size--; in remove_keys()
206 spin_unlock_irq(&ent->lock); in remove_keys()
218 struct mlx5_cache_ent *ent = filp->private_data; in size_write() local
219 struct mlx5_ib_dev *dev = ent->dev; in size_write()
228 c = order2idx(dev, ent->order); in size_write()
234 if (var < ent->limit) in size_write()
237 if (var > ent->size) { in size_write()
239 err = add_keys(dev, c, var - ent->size); in size_write()
245 } else if (var < ent->size) { in size_write()
246 remove_keys(dev, c, ent->size - var); in size_write()
255 struct mlx5_cache_ent *ent = filp->private_data; in size_read() local
262 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size); in size_read()
284 struct mlx5_cache_ent *ent = filp->private_data; in limit_write() local
285 struct mlx5_ib_dev *dev = ent->dev; in limit_write()
294 c = order2idx(dev, ent->order); in limit_write()
300 if (var > ent->size) in limit_write()
303 ent->limit = var; in limit_write()
305 if (ent->cur < ent->limit) { in limit_write()
306 err = add_keys(dev, c, 2 * ent->limit - ent->cur); in limit_write()
317 struct mlx5_cache_ent *ent = filp->private_data; in limit_read() local
324 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); in limit_read()
348 if (cache->ent[i].cur < cache->ent[i].limit) in someone_adding()
355 static void __cache_work_func(struct mlx5_cache_ent *ent) in __cache_work_func() argument
357 struct mlx5_ib_dev *dev = ent->dev; in __cache_work_func()
359 int i = order2idx(dev, ent->order); in __cache_work_func()
365 ent = &dev->cache.ent[i]; in __cache_work_func()
366 if (ent->cur < 2 * ent->limit && !dev->fill_delay) { in __cache_work_func()
368 if (ent->cur < 2 * ent->limit) { in __cache_work_func()
372 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
377 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
380 queue_work(cache->wq, &ent->work); in __cache_work_func()
383 } else if (ent->cur > 2 * ent->limit) { in __cache_work_func()
399 if (ent->cur > ent->limit) in __cache_work_func()
400 queue_work(cache->wq, &ent->work); in __cache_work_func()
402 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func()
409 struct mlx5_cache_ent *ent; in delayed_cache_work_func() local
411 ent = container_of(work, struct mlx5_cache_ent, dwork.work); in delayed_cache_work_func()
412 __cache_work_func(ent); in delayed_cache_work_func()
417 struct mlx5_cache_ent *ent; in cache_work_func() local
419 ent = container_of(work, struct mlx5_cache_ent, work); in cache_work_func()
420 __cache_work_func(ent); in cache_work_func()
427 struct mlx5_cache_ent *ent; in alloc_cached_mr() local
438 ent = &cache->ent[i]; in alloc_cached_mr()
440 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); in alloc_cached_mr()
442 spin_lock_irq(&ent->lock); in alloc_cached_mr()
443 if (!list_empty(&ent->head)) { in alloc_cached_mr()
444 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, in alloc_cached_mr()
447 ent->cur--; in alloc_cached_mr()
448 spin_unlock_irq(&ent->lock); in alloc_cached_mr()
449 if (ent->cur < ent->limit) in alloc_cached_mr()
450 queue_work(cache->wq, &ent->work); in alloc_cached_mr()
453 spin_unlock_irq(&ent->lock); in alloc_cached_mr()
455 queue_work(cache->wq, &ent->work); in alloc_cached_mr()
459 cache->ent[c].miss++; in alloc_cached_mr()
467 struct mlx5_cache_ent *ent; in free_cached_mr() local
476 ent = &cache->ent[c]; in free_cached_mr()
477 spin_lock_irq(&ent->lock); in free_cached_mr()
478 list_add_tail(&mr->list, &ent->head); in free_cached_mr()
479 ent->cur++; in free_cached_mr()
480 if (ent->cur > 2 * ent->limit) in free_cached_mr()
482 spin_unlock_irq(&ent->lock); in free_cached_mr()
485 queue_work(cache->wq, &ent->work); in free_cached_mr()
491 struct mlx5_cache_ent *ent = &cache->ent[c]; in clean_keys() local
495 cancel_delayed_work(&ent->dwork); in clean_keys()
497 spin_lock_irq(&ent->lock); in clean_keys()
498 if (list_empty(&ent->head)) { in clean_keys()
499 spin_unlock_irq(&ent->lock); in clean_keys()
502 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in clean_keys()
504 ent->cur--; in clean_keys()
505 ent->size--; in clean_keys()
506 spin_unlock_irq(&ent->lock); in clean_keys()
518 struct mlx5_cache_ent *ent; in mlx5_mr_cache_debugfs_init() local
529 ent = &cache->ent[i]; in mlx5_mr_cache_debugfs_init()
530 sprintf(ent->name, "%d", ent->order); in mlx5_mr_cache_debugfs_init()
531 ent->dir = debugfs_create_dir(ent->name, cache->root); in mlx5_mr_cache_debugfs_init()
532 if (!ent->dir) in mlx5_mr_cache_debugfs_init()
535 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent, in mlx5_mr_cache_debugfs_init()
537 if (!ent->fsize) in mlx5_mr_cache_debugfs_init()
540 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent, in mlx5_mr_cache_debugfs_init()
542 if (!ent->flimit) in mlx5_mr_cache_debugfs_init()
545 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir, in mlx5_mr_cache_debugfs_init()
546 &ent->cur); in mlx5_mr_cache_debugfs_init()
547 if (!ent->fcur) in mlx5_mr_cache_debugfs_init()
550 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir, in mlx5_mr_cache_debugfs_init()
551 &ent->miss); in mlx5_mr_cache_debugfs_init()
552 if (!ent->fmiss) in mlx5_mr_cache_debugfs_init()
577 struct mlx5_cache_ent *ent; in mlx5_mr_cache_init() local
590 INIT_LIST_HEAD(&cache->ent[i].head); in mlx5_mr_cache_init()
591 spin_lock_init(&cache->ent[i].lock); in mlx5_mr_cache_init()
593 ent = &cache->ent[i]; in mlx5_mr_cache_init()
594 INIT_LIST_HEAD(&ent->head); in mlx5_mr_cache_init()
595 spin_lock_init(&ent->lock); in mlx5_mr_cache_init()
596 ent->order = i + 2; in mlx5_mr_cache_init()
597 ent->dev = dev; in mlx5_mr_cache_init()
604 INIT_WORK(&ent->work, cache_work_func); in mlx5_mr_cache_init()
605 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); in mlx5_mr_cache_init()
606 ent->limit = limit; in mlx5_mr_cache_init()
607 queue_work(cache->wq, &ent->work); in mlx5_mr_cache_init()