Searched refs:SLAB_DESTROY_BY_RCU (Results 1 - 27 of 27) sorted by relevance

/linux-4.1.27/net/ipv4/
H A Dudplite.c58 .slab_flags = SLAB_DESTROY_BY_RCU,
H A Dtcp_ipv4.c2373 .slab_flags = SLAB_DESTROY_BY_RCU,
H A Dudp.c2274 .slab_flags = SLAB_DESTROY_BY_RCU,
/linux-4.1.27/net/ipv6/
H A Dudplite.c53 .slab_flags = SLAB_DESTROY_BY_RCU,
H A Dtcp_ipv6.c1864 .slab_flags = SLAB_DESTROY_BY_RCU,
H A Dudp.c1543 .slab_flags = SLAB_DESTROY_BY_RCU,
/linux-4.1.27/mm/
H A Dkmemcheck.c96 if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) kmemcheck_slab_free()
H A Dslob.c129 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
527 if (flags & SLAB_DESTROY_BY_RCU) { __kmem_cache_create()
601 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { kmem_cache_free()
H A Dslab.h117 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
H A Dslab_common.c37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
439 if (s->flags & SLAB_DESTROY_BY_RCU) do_kmem_cache_shutdown()
H A Drmap.c426 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); anon_vma_init()
475 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero() page_get_anon_vma()
H A Dslub.c303 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) slab_ksize()
1503 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { free_slab()
3028 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && calculate_sizes()
3050 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || calculate_sizes()
3124 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) kmem_cache_open()
4577 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); destroy_by_rcu_show()
H A Dslab.c1890 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { slab_destroy()
2128 if (!(flags & SLAB_DESTROY_BY_RCU)) __kmem_cache_create()
2131 if (flags & SLAB_DESTROY_BY_RCU) __kmem_cache_create()
/linux-4.1.27/drivers/staging/lustre/lustre/include/linux/
H A Dlustre_compat25.h94 #ifndef SLAB_DESTROY_BY_RCU
95 #define SLAB_DESTROY_BY_RCU 0 macro
/linux-4.1.27/fs/
H A Dsignalfd.c41 * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return. signalfd_cleanup()
/linux-4.1.27/include/linux/
H A Dslab.h31 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
65 #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ macro
/linux-4.1.27/mm/kasan/
H A Dkasan.c333 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) kasan_slab_free()
/linux-4.1.27/net/netfilter/
H A Dnf_conntrack_core.c818 * SLAB_DESTROY_BY_RCU. __nf_conntrack_alloc()
876 * the golden rule for SLAB_DESTROY_BY_RCU nf_conntrack_free()
1769 SLAB_DESTROY_BY_RCU, NULL);
/linux-4.1.27/net/llc/
H A Dllc_sap.c328 /* Extra checks required by SLAB_DESTROY_BY_RCU */ sk_nulls_for_each_rcu()
H A Dllc_conn.c509 /* Extra checks required by SLAB_DESTROY_BY_RCU */ sk_nulls_for_each_rcu()
568 /* Extra checks required by SLAB_DESTROY_BY_RCU */ sk_nulls_for_each_rcu()
H A Daf_llc.c143 .slab_flags = SLAB_DESTROY_BY_RCU,
/linux-4.1.27/kernel/
H A Dfork.c1080 * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it __cleanup_sighand()
1820 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| proc_caches_init()
H A Dsignal.c1280 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which __lock_task_sighand()
/linux-4.1.27/drivers/staging/lustre/lustre/ldlm/
H A Dldlm_lockd.c1160 SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL); ldlm_init()
/linux-4.1.27/net/dccp/
H A Dipv4.c973 .slab_flags = SLAB_DESTROY_BY_RCU,
H A Dipv6.c1041 .slab_flags = SLAB_DESTROY_BY_RCU,
/linux-4.1.27/include/net/
H A Dsock.h925 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes

Completed in 1757 milliseconds