Lines Matching refs:shared

262 	parent->shared = NULL;  in kmem_cache_node_init()
464 .shared = 1,
933 if (n->shared) in __drain_alien_cache()
934 transfer_objects(n->shared, ac, ac->limit); in __drain_alien_cache()
1105 struct array_cache *shared; in cpuup_canceled() local
1130 shared = n->shared; in cpuup_canceled()
1131 if (shared) { in cpuup_canceled()
1132 free_block(cachep, shared->entry, in cpuup_canceled()
1133 shared->avail, node, &list); in cpuup_canceled()
1134 n->shared = NULL; in cpuup_canceled()
1142 kfree(shared); in cpuup_canceled()
1186 struct array_cache *shared = NULL; in cpuup_prepare() local
1189 if (cachep->shared) { in cpuup_prepare()
1190 shared = alloc_arraycache(node, in cpuup_prepare()
1191 cachep->shared * cachep->batchcount, in cpuup_prepare()
1193 if (!shared) in cpuup_prepare()
1199 kfree(shared); in cpuup_prepare()
1207 if (!n->shared) { in cpuup_prepare()
1212 n->shared = shared; in cpuup_prepare()
1213 shared = NULL; in cpuup_prepare()
1222 kfree(shared); in cpuup_prepare()
2353 drain_array(cachep, n, n->shared, 1, node); in drain_cpu_caches()
2428 kfree(n->shared); in __kmem_cache_shutdown()
2788 if (n->shared && transfer_objects(ac, n->shared, batchcount)) { in cache_alloc_refill()
2789 n->shared->touched = 1; in cache_alloc_refill()
3323 if (n->shared) { in cache_flusharray()
3324 struct array_cache *shared_array = n->shared; in cache_flusharray()
3620 if (cachep->shared) { in alloc_kmem_cache_node()
3622 cachep->shared*cachep->batchcount, in alloc_kmem_cache_node()
3632 struct array_cache *shared = n->shared; in alloc_kmem_cache_node() local
3637 if (shared) in alloc_kmem_cache_node()
3638 free_block(cachep, shared->entry, in alloc_kmem_cache_node()
3639 shared->avail, node, &list); in alloc_kmem_cache_node()
3641 n->shared = new_shared; in alloc_kmem_cache_node()
3650 kfree(shared); in alloc_kmem_cache_node()
3664 n->shared = new_shared; in alloc_kmem_cache_node()
3679 kfree(n->shared); in alloc_kmem_cache_node()
3692 int batchcount, int shared, gfp_t gfp) in __do_tune_cpucache() argument
3708 cachep->shared = shared; in __do_tune_cpucache()
3733 int batchcount, int shared, gfp_t gfp) in do_tune_cpucache() argument
3738 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); in do_tune_cpucache()
3749 __do_tune_cpucache(c, limit, batchcount, shared, gfp); in do_tune_cpucache()
3760 int shared = 0; in enable_cpucache() local
3766 shared = root->shared; in enable_cpucache()
3770 if (limit && shared && batchcount) in enable_cpucache()
3801 shared = 0; in enable_cpucache()
3803 shared = 8; in enable_cpucache()
3815 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); in enable_cpucache()
3899 drain_array(searchp, n, n->shared, 0, node); in cache_reap()
3961 if (n->shared) in get_slabinfo()
3962 shared_avail += n->shared->avail; in get_slabinfo()
3982 sinfo->shared = cachep->shared; in get_slabinfo()
4032 int limit, batchcount, shared, res; in slabinfo_write() local
4046 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) in slabinfo_write()
4055 batchcount > limit || shared < 0) { in slabinfo_write()
4059 batchcount, shared, in slabinfo_write()