slab_cache 26 drivers/gpu/drm/i915/i915_active.c struct kmem_cache *slab_cache; slab_cache 148 drivers/gpu/drm/i915/i915_active.c kmem_cache_free(global.slab_cache, it); slab_cache 200 drivers/gpu/drm/i915/i915_active.c prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); slab_cache 214 drivers/gpu/drm/i915/i915_active.c kmem_cache_free(global.slab_cache, prealloc); slab_cache 607 drivers/gpu/drm/i915/i915_active.c node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); slab_cache 652 drivers/gpu/drm/i915/i915_active.c kmem_cache_free(global.slab_cache, node); slab_cache 747 drivers/gpu/drm/i915/i915_active.c kmem_cache_shrink(global.slab_cache); slab_cache 752 drivers/gpu/drm/i915/i915_active.c kmem_cache_destroy(global.slab_cache); slab_cache 762 drivers/gpu/drm/i915/i915_active.c global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN); slab_cache 763 drivers/gpu/drm/i915/i915_active.c if (!global.slab_cache) slab_cache 95 drivers/md/dm-bufio.c struct kmem_cache *slab_cache; slab_cache 378 drivers/md/dm-bufio.c if (unlikely(c->slab_cache != NULL)) { slab_cache 380 drivers/md/dm-bufio.c return kmem_cache_alloc(c->slab_cache, gfp_mask); slab_cache 420 drivers/md/dm-bufio.c kmem_cache_free(c->slab_cache, data); slab_cache 1662 drivers/md/dm-bufio.c c->slab_cache = kmem_cache_create(slab_name, block_size, align, slab_cache 1664 drivers/md/dm-bufio.c if (!c->slab_cache) { slab_cache 1713 drivers/md/dm-bufio.c kmem_cache_destroy(c->slab_cache); slab_cache 1761 drivers/md/dm-bufio.c kmem_cache_destroy(c->slab_cache); slab_cache 2169 drivers/md/raid5.c sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); slab_cache 2175 drivers/md/raid5.c free_stripe(conf->slab_cache, sh); slab_cache 2208 drivers/md/raid5.c conf->slab_cache = sc; slab_cache 2368 drivers/md/raid5.c free_stripe(conf->slab_cache, osh); slab_cache 2376 drivers/md/raid5.c kmem_cache_destroy(conf->slab_cache); slab_cache 2408 drivers/md/raid5.c conf->slab_cache = sc; slab_cache 2445 drivers/md/raid5.c free_stripe(conf->slab_cache, sh); slab_cache 2457 drivers/md/raid5.c kmem_cache_destroy(conf->slab_cache); slab_cache 2458 drivers/md/raid5.c conf->slab_cache = NULL; slab_cache 624 drivers/md/raid5.h struct kmem_cache *slab_cache; /* for allocating stripes */ slab_cache 117 include/linux/mm_types.h struct kmem_cache *slab_cache; /* not slob */ slab_cache 565 mm/kasan/common.c return __kasan_kmalloc(page->slab_cache, object, size, slab_cache 582 mm/kasan/common.c __kasan_slab_free(page->slab_cache, ptr, ip, false); slab_cache 128 mm/kasan/quarantine.c return virt_to_head_page(qlink)->slab_cache; slab_cache 378 mm/kasan/report.c struct kmem_cache *cache = page->slab_cache; slab_cache 52 mm/kasan/tags_report.c cache = page->slab_cache; slab_cache 1410 mm/slab.c cachep = page->slab_cache; slab_cache 2559 mm/slab.c page->slab_cache = cache; slab_cache 4177 mm/slab.c cachep = page->slab_cache; slab_cache 336 mm/slab.h s = READ_ONCE(page->slab_cache); slab_cache 476 mm/slab.h return page->slab_cache; slab_cache 1151 mm/slub.c if (unlikely(s != page->slab_cache)) { slab_cache 1155 mm/slub.c } else if (!page->slab_cache) { slab_cache 1656 mm/slub.c page->slab_cache = s; slab_cache 1738 mm/slub.c __free_slab(page->slab_cache, page); slab_cache 3086 mm/slub.c df->s = page->slab_cache; slab_cache 3895 mm/slub.c s = page->slab_cache; slab_cache 3950 mm/slub.c return slab_ksize(page->slab_cache); slab_cache 3975 mm/slub.c slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); slab_cache 4229 mm/slub.c p->slab_cache = s; slab_cache 4233 mm/slub.c p->slab_cache = s;