Lines Matching refs:ac

207 	struct array_cache ac;  member
661 static void init_arraycache(struct array_cache *ac, int limit, int batch) in init_arraycache() argument
670 kmemleak_no_scan(ac); in init_arraycache()
671 if (ac) { in init_arraycache()
672 ac->avail = 0; in init_arraycache()
673 ac->limit = limit; in init_arraycache()
674 ac->batchcount = batch; in init_arraycache()
675 ac->touched = 0; in init_arraycache()
683 struct array_cache *ac = NULL; in alloc_arraycache() local
685 ac = kmalloc_node(memsize, gfp, node); in alloc_arraycache()
686 init_arraycache(ac, entries, batchcount); in alloc_arraycache()
687 return ac; in alloc_arraycache()
697 struct array_cache *ac) in recheck_pfmemalloc_active() argument
724 static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, in __ac_get_obj() argument
728 void *objp = ac->entry[--ac->avail]; in __ac_get_obj()
740 for (i = 0; i < ac->avail; i++) { in __ac_get_obj()
742 if (!is_obj_pfmemalloc(ac->entry[i])) { in __ac_get_obj()
743 objp = ac->entry[i]; in __ac_get_obj()
744 ac->entry[i] = ac->entry[ac->avail]; in __ac_get_obj()
745 ac->entry[ac->avail] = objp; in __ac_get_obj()
759 recheck_pfmemalloc_active(cachep, ac); in __ac_get_obj()
764 ac->avail++; in __ac_get_obj()
772 struct array_cache *ac, gfp_t flags, bool force_refill) in ac_get_obj() argument
777 objp = __ac_get_obj(cachep, ac, flags, force_refill); in ac_get_obj()
779 objp = ac->entry[--ac->avail]; in ac_get_obj()
785 struct array_cache *ac, void *objp) in __ac_put_obj() argument
797 static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, in ac_put_obj() argument
801 objp = __ac_put_obj(cachep, ac, objp); in ac_put_obj()
803 ac->entry[ac->avail++] = objp; in ac_put_obj()
878 init_arraycache(&alc->ac, entries, batch); in __alloc_alien_cache()
921 struct array_cache *ac, int node, in __drain_alien_cache() argument
926 if (ac->avail) { in __drain_alien_cache()
934 transfer_objects(n->shared, ac, ac->limit); in __drain_alien_cache()
936 free_block(cachep, ac->entry, ac->avail, node, list); in __drain_alien_cache()
937 ac->avail = 0; in __drain_alien_cache()
951 struct array_cache *ac; in reap_alien() local
954 ac = &alc->ac; in reap_alien()
955 if (ac->avail && spin_trylock_irq(&alc->lock)) { in reap_alien()
958 __drain_alien_cache(cachep, ac, node, &list); in reap_alien()
971 struct array_cache *ac; in drain_alien_cache() local
979 ac = &alc->ac; in drain_alien_cache()
981 __drain_alien_cache(cachep, ac, i, &list); in drain_alien_cache()
993 struct array_cache *ac; in __cache_free_alien() local
1000 ac = &alien->ac; in __cache_free_alien()
1002 if (unlikely(ac->avail == ac->limit)) { in __cache_free_alien()
1004 __drain_alien_cache(cachep, ac, page_node, &list); in __cache_free_alien()
1006 ac_put_obj(cachep, ac, objp); in __cache_free_alien()
2320 struct array_cache *ac,
2326 struct array_cache *ac; in do_drain() local
2332 ac = cpu_cache_get(cachep); in do_drain()
2335 free_block(cachep, ac->entry, ac->avail, node, &list); in do_drain()
2338 ac->avail = 0; in do_drain()
2764 struct array_cache *ac; in cache_alloc_refill() local
2772 ac = cpu_cache_get(cachep); in cache_alloc_refill()
2773 batchcount = ac->batchcount; in cache_alloc_refill()
2774 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { in cache_alloc_refill()
2784 BUG_ON(ac->avail > 0 || !n); in cache_alloc_refill()
2788 if (n->shared && transfer_objects(ac, n->shared, batchcount)) { in cache_alloc_refill()
2820 ac_put_obj(cachep, ac, slab_get_obj(cachep, page, in cache_alloc_refill()
2833 n->free_objects -= ac->avail; in cache_alloc_refill()
2837 if (unlikely(!ac->avail)) { in cache_alloc_refill()
2843 ac = cpu_cache_get(cachep); in cache_alloc_refill()
2847 if (!x && (ac->avail == 0 || force_refill)) in cache_alloc_refill()
2850 if (!ac->avail) /* objects refilled by interrupt? */ in cache_alloc_refill()
2853 ac->touched = 1; in cache_alloc_refill()
2855 return ac_get_obj(cachep, ac, flags, force_refill); in cache_alloc_refill()
2931 struct array_cache *ac; in ____cache_alloc() local
2936 ac = cpu_cache_get(cachep); in ____cache_alloc()
2937 if (likely(ac->avail)) { in ____cache_alloc()
2938 ac->touched = 1; in ____cache_alloc()
2939 objp = ac_get_obj(cachep, ac, flags, false); in ____cache_alloc()
2958 ac = cpu_cache_get(cachep); in ____cache_alloc()
2967 kmemleak_erase(&ac->entry[ac->avail]); in ____cache_alloc()
3309 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) in cache_flusharray() argument
3316 batchcount = ac->batchcount; in cache_flusharray()
3318 BUG_ON(!batchcount || batchcount > ac->avail); in cache_flusharray()
3330 ac->entry, sizeof(void *) * batchcount); in cache_flusharray()
3336 free_block(cachep, ac->entry, batchcount, node, &list); in cache_flusharray()
3358 ac->avail -= batchcount; in cache_flusharray()
3359 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); in cache_flusharray()
3369 struct array_cache *ac = cpu_cache_get(cachep); in __cache_free() local
3387 if (ac->avail < ac->limit) { in __cache_free()
3391 cache_flusharray(cachep, ac); in __cache_free()
3394 ac_put_obj(cachep, ac, objp); in __cache_free()
3717 struct array_cache *ac = per_cpu_ptr(prev, cpu); in __do_tune_cpucache() local
3722 free_block(cachep, ac->entry, ac->avail, node, &list); in __do_tune_cpucache()
3828 struct array_cache *ac, int force, int node) in drain_array() argument
3833 if (!ac || !ac->avail) in drain_array()
3835 if (ac->touched && !force) { in drain_array()
3836 ac->touched = 0; in drain_array()
3839 if (ac->avail) { in drain_array()
3840 tofree = force ? ac->avail : (ac->limit + 4) / 5; in drain_array()
3841 if (tofree > ac->avail) in drain_array()
3842 tofree = (ac->avail + 1) / 2; in drain_array()
3843 free_block(cachep, ac->entry, tofree, node, &list); in drain_array()
3844 ac->avail -= tofree; in drain_array()
3845 memmove(ac->entry, &(ac->entry[tofree]), in drain_array()
3846 sizeof(void *) * ac->avail); in drain_array()