cachep 48 arch/powerpc/mm/hugetlbpage.c struct kmem_cache *cachep; cachep 54 arch/powerpc/mm/hugetlbpage.c cachep = PGT_CACHE(PTE_T_ORDER); cachep 58 arch/powerpc/mm/hugetlbpage.c cachep = NULL; cachep 62 arch/powerpc/mm/hugetlbpage.c cachep = PGT_CACHE(pdshift - pshift); cachep 67 arch/powerpc/mm/hugetlbpage.c if (!cachep && !new) { cachep 72 arch/powerpc/mm/hugetlbpage.c if (cachep) cachep 73 arch/powerpc/mm/hugetlbpage.c new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); cachep 104 arch/powerpc/mm/hugetlbpage.c if (cachep) cachep 105 arch/powerpc/mm/hugetlbpage.c kmem_cache_free(cachep, new); cachep 833 drivers/scsi/snic/snic_main.c struct kmem_cache *cachep; cachep 866 drivers/scsi/snic/snic_main.c cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN, cachep 868 drivers/scsi/snic/snic_main.c if (!cachep) { cachep 874 drivers/scsi/snic/snic_main.c snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep; cachep 879 drivers/scsi/snic/snic_main.c cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, cachep 881 drivers/scsi/snic/snic_main.c if (!cachep) { cachep 887 drivers/scsi/snic/snic_main.c snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep; cachep 890 drivers/scsi/snic/snic_main.c cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, cachep 892 drivers/scsi/snic/snic_main.c if (!cachep) { cachep 898 drivers/scsi/snic/snic_main.c snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep; cachep 2347 fs/ext4/mballoc.c struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; cachep 2349 fs/ext4/mballoc.c BUG_ON(!cachep); cachep 2350 fs/ext4/mballoc.c return cachep; cachep 2398 fs/ext4/mballoc.c struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); cachep 2422 fs/ext4/mballoc.c meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); cachep 2487 fs/ext4/mballoc.c struct kmem_cache *cachep; cachep 2518 fs/ext4/mballoc.c cachep = get_groupinfo_cache(sb->s_blocksize_bits); cachep 2520 fs/ext4/mballoc.c kmem_cache_free(cachep, ext4_get_group_info(sb, i)); cachep 2551 fs/ext4/mballoc.c struct kmem_cache *cachep; cachep 2568 fs/ext4/mballoc.c cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], cachep 2572 fs/ext4/mballoc.c ext4_groupinfo_caches[cache_index] = cachep; cachep 2575 fs/ext4/mballoc.c if (!cachep) { cachep 2722 fs/ext4/mballoc.c struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); cachep 2734 fs/ext4/mballoc.c kmem_cache_free(cachep, grinfo); cachep 2204 fs/f2fs/f2fs.h static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, cachep 2209 fs/f2fs/f2fs.h entry = kmem_cache_alloc(cachep, flags); cachep 2211 fs/f2fs/f2fs.h entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); cachep 801 fs/gfs2/glock.c struct kmem_cache *cachep; cachep 813 fs/gfs2/glock.c cachep = gfs2_glock_aspace_cachep; cachep 815 fs/gfs2/glock.c cachep = gfs2_glock_cachep; cachep 816 fs/gfs2/glock.c gl = kmem_cache_alloc(cachep, GFP_NOFS); cachep 825 fs/gfs2/glock.c kmem_cache_free(cachep, gl); cachep 875 fs/gfs2/glock.c kmem_cache_free(cachep, gl); cachep 1373 include/linux/memcontrol.h struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); cachep 1374 include/linux/memcontrol.h void memcg_kmem_put_cache(struct kmem_cache *cachep); cachep 212 include/net/inet_hashtables.h inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, cachep 215 include/net/inet_hashtables.h void inet_bind_bucket_destroy(struct kmem_cache *cachep, cachep 2869 mm/memcontrol.c struct kmem_cache *cachep; cachep 2878 mm/memcontrol.c struct kmem_cache *cachep = cw->cachep; cachep 2880 mm/memcontrol.c memcg_create_kmem_cache(memcg, cachep); cachep 2890 mm/memcontrol.c struct kmem_cache *cachep) cachep 2902 mm/memcontrol.c cw->cachep = cachep; cachep 2931 mm/memcontrol.c struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) cachep 2938 mm/memcontrol.c VM_BUG_ON(!is_root_cache(cachep)); cachep 2941 mm/memcontrol.c return cachep; cachep 2957 mm/memcontrol.c arr = rcu_dereference(cachep->memcg_params.memcg_caches); cachep 2986 mm/memcontrol.c memcg_schedule_kmem_cache_create(memcg, cachep); cachep 2988 mm/memcontrol.c cachep = memcg_cachep; cachep 2991 mm/memcontrol.c return cachep; cachep 2998 mm/memcontrol.c void memcg_kmem_put_cache(struct kmem_cache *cachep) cachep 3000 mm/memcontrol.c if (!is_root_cache(cachep)) cachep 3001 mm/memcontrol.c percpu_ref_put(&cachep->memcg_params.refcnt); cachep 211 mm/slab.c static void free_block(struct kmem_cache *cachep, void **objpp, int len, cachep 213 mm/slab.c static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list); cachep 214 mm/slab.c static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); cachep 217 mm/slab.c static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, cachep 219 mm/slab.c static inline void fixup_slab_list(struct kmem_cache *cachep, cachep 241 mm/slab.c #define MAKE_LIST(cachep, listp, slab, nodeid) \ cachep 244 mm/slab.c list_splice(&get_node(cachep, nodeid)->slab, listp); \ cachep 247 mm/slab.c #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ cachep 249 mm/slab.c MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ cachep 250 mm/slab.c MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ cachep 251 mm/slab.c MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ cachep 327 mm/slab.c static int obj_offset(struct kmem_cache *cachep) cachep 329 mm/slab.c return cachep->obj_offset; cachep 332 mm/slab.c static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) cachep 334 mm/slab.c BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); cachep 335 mm/slab.c return (unsigned long long*) (objp + obj_offset(cachep) - cachep 339 mm/slab.c static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) cachep 341 mm/slab.c BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); cachep 342 mm/slab.c if (cachep->flags & SLAB_STORE_USER) cachep 343 mm/slab.c return (unsigned long long *)(objp + cachep->size - cachep 346 mm/slab.c return (unsigned long long *) (objp + cachep->size - cachep 350 mm/slab.c static void **dbg_userword(struct kmem_cache *cachep, void *objp) cachep 352 mm/slab.c BUG_ON(!(cachep->flags & SLAB_STORE_USER)); cachep 353 mm/slab.c return (void **)(objp + cachep->size - BYTES_PER_WORD); cachep 359 mm/slab.c #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) cachep 360 mm/slab.c #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) cachep 361 mm/slab.c #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) cachep 392 mm/slab.c static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) cachep 394 mm/slab.c return this_cpu_ptr(cachep->cpu_cache); cachep 436 mm/slab.c #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) cachep 438 mm/slab.c static void __slab_error(const char *function, struct kmem_cache *cachep, cachep 442 mm/slab.c function, cachep->name, msg); cachep 551 mm/slab.c static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, cachep 559 mm/slab.c n = get_node(cachep, page_node); cachep 562 mm/slab.c free_block(cachep, &objp, 1, page_node, &list); cachep 565 mm/slab.c slabs_destroy(cachep, &list); cachep 593 mm/slab.c #define drain_alien_cache(cachep, alien) do { } while (0) cachep 594 mm/slab.c #define reap_alien(cachep, n) do { } while (0) cachep 606 mm/slab.c static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) cachep 611 mm/slab.c static inline void *alternate_node_alloc(struct kmem_cache *cachep, cachep 617 mm/slab.c static inline void *____cache_alloc_node(struct kmem_cache *cachep, cachep 684 mm/slab.c static void __drain_alien_cache(struct kmem_cache *cachep, cachep 688 mm/slab.c struct kmem_cache_node *n = get_node(cachep, node); cachep 700 mm/slab.c free_block(cachep, ac->entry, ac->avail, node, list); cachep 709 mm/slab.c static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) cachep 722 mm/slab.c __drain_alien_cache(cachep, ac, node, &list); cachep 724 mm/slab.c slabs_destroy(cachep, &list); cachep 730 mm/slab.c static void drain_alien_cache(struct kmem_cache *cachep, cachep 745 mm/slab.c __drain_alien_cache(cachep, ac, i, &list); cachep 747 mm/slab.c slabs_destroy(cachep, &list); cachep 752 mm/slab.c static int __cache_free_alien(struct kmem_cache *cachep, void *objp, cachep 760 mm/slab.c n = get_node(cachep, node); cachep 761 mm/slab.c STATS_INC_NODEFREES(cachep); cachep 767 mm/slab.c STATS_INC_ACOVERFLOW(cachep); cachep 768 mm/slab.c __drain_alien_cache(cachep, ac, page_node, &list); cachep 772 mm/slab.c slabs_destroy(cachep, &list); cachep 774 mm/slab.c n = get_node(cachep, page_node); cachep 776 mm/slab.c free_block(cachep, &objp, 1, page_node, &list); cachep 778 mm/slab.c slabs_destroy(cachep, &list); cachep 783 mm/slab.c static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) cachep 794 mm/slab.c return __cache_free_alien(cachep, objp, node, page_node); cachep 807 mm/slab.c static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) cachep 816 mm/slab.c n = get_node(cachep, node); cachep 819 mm/slab.c n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep 820 mm/slab.c cachep->num; cachep 832 mm/slab.c ((unsigned long)cachep) % REAPTIMEOUT_NODE; cachep 835 mm/slab.c (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; cachep 842 mm/slab.c cachep->node[node] = n; cachep 860 mm/slab.c struct kmem_cache *cachep; cachep 862 mm/slab.c list_for_each_entry(cachep, &slab_caches, list) { cachep 863 mm/slab.c ret = init_cache_node(cachep, node, GFP_KERNEL); cachep 872 mm/slab.c static int setup_kmem_cache_node(struct kmem_cache *cachep, cachep 883 mm/slab.c new_alien = alloc_alien_cache(node, cachep->limit, gfp); cachep 888 mm/slab.c if (cachep->shared) { cachep 890 mm/slab.c cachep->shared * cachep->batchcount, 0xbaadf00d, gfp); cachep 895 mm/slab.c ret = init_cache_node(cachep, node, gfp); cachep 899 mm/slab.c n = get_node(cachep, node); cachep 902 mm/slab.c free_block(cachep, n->shared->entry, cachep 919 mm/slab.c slabs_destroy(cachep, &list); cachep 942 mm/slab.c struct kmem_cache *cachep; cachep 947 mm/slab.c list_for_each_entry(cachep, &slab_caches, list) { cachep 953 mm/slab.c n = get_node(cachep, node); cachep 960 mm/slab.c n->free_limit -= cachep->batchcount; cachep 963 mm/slab.c nc = per_cpu_ptr(cachep->cpu_cache, cpu); cachep 964 mm/slab.c free_block(cachep, nc->entry, nc->avail, node, &list); cachep 974 mm/slab.c free_block(cachep, shared->entry, cachep 986 mm/slab.c drain_alien_cache(cachep, alien); cachep 991 mm/slab.c slabs_destroy(cachep, &list); cachep 998 mm/slab.c list_for_each_entry(cachep, &slab_caches, list) { cachep 999 mm/slab.c n = get_node(cachep, node); cachep 1002 mm/slab.c drain_freelist(cachep, n, INT_MAX); cachep 1008 mm/slab.c struct kmem_cache *cachep; cachep 1026 mm/slab.c list_for_each_entry(cachep, &slab_caches, list) { cachep 1027 mm/slab.c err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false); cachep 1097 mm/slab.c struct kmem_cache *cachep; cachep 1100 mm/slab.c list_for_each_entry(cachep, &slab_caches, list) { cachep 1103 mm/slab.c n = get_node(cachep, node); cachep 1107 mm/slab.c drain_freelist(cachep, n, INT_MAX); cachep 1154 mm/slab.c static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, cachep 1168 mm/slab.c MAKE_ALL_LISTS(cachep, ptr, nodeid); cachep 1169 mm/slab.c cachep->node[nodeid] = ptr; cachep 1176 mm/slab.c static void __init set_up_node(struct kmem_cache *cachep, int index) cachep 1181 mm/slab.c cachep->node[node] = &init_kmem_cache_node[index + node]; cachep 1182 mm/slab.c cachep->node[node]->next_reap = jiffies + cachep 1184 mm/slab.c ((unsigned long)cachep) % REAPTIMEOUT_NODE; cachep 1275 mm/slab.c struct kmem_cache *cachep; cachep 1279 mm/slab.c list_for_each_entry(cachep, &slab_caches, list) cachep 1280 mm/slab.c if (enable_cpucache(cachep, GFP_NOWAIT)) cachep 1317 mm/slab.c slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) cachep 1332 mm/slab.c cachep->name, cachep->size, cachep->gfporder); cachep 1334 mm/slab.c for_each_kmem_cache_node(cachep, node, n) { cachep 1345 mm/slab.c (total_slabs * cachep->num) - free_objs, cachep 1346 mm/slab.c total_slabs * cachep->num); cachep 1359 mm/slab.c static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, cachep 1364 mm/slab.c flags |= cachep->allocflags; cachep 1366 mm/slab.c page = __alloc_pages_node(nodeid, flags, cachep->gfporder); cachep 1368 mm/slab.c slab_out_of_memory(cachep, flags, nodeid); cachep 1372 mm/slab.c if (charge_slab_page(page, flags, cachep->gfporder, cachep)) { cachep 1373 mm/slab.c __free_pages(page, cachep->gfporder); cachep 1388 mm/slab.c static void kmem_freepages(struct kmem_cache *cachep, struct page *page) cachep 1390 mm/slab.c int order = cachep->gfporder; cachep 1400 mm/slab.c uncharge_slab_page(page, order, cachep); cachep 1406 mm/slab.c struct kmem_cache *cachep; cachep 1410 mm/slab.c cachep = page->slab_cache; cachep 1412 mm/slab.c kmem_freepages(cachep, page); cachep 1416 mm/slab.c static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) cachep 1418 mm/slab.c if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) && cachep 1419 mm/slab.c (cachep->size % PAGE_SIZE) == 0) cachep 1426 mm/slab.c static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map) cachep 1428 mm/slab.c if (!is_debug_pagealloc_cache(cachep)) cachep 1431 mm/slab.c kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); cachep 1435 mm/slab.c static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, cachep 1440 mm/slab.c static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) cachep 1442 mm/slab.c int size = cachep->object_size; cachep 1443 mm/slab.c addr = &((char *)addr)[obj_offset(cachep)]; cachep 1481 mm/slab.c static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) cachep 1486 mm/slab.c if (cachep->flags & SLAB_RED_ZONE) { cachep 1488 mm/slab.c *dbg_redzone1(cachep, objp), cachep 1489 mm/slab.c *dbg_redzone2(cachep, objp)); cachep 1492 mm/slab.c if (cachep->flags & SLAB_STORE_USER) cachep 1493 mm/slab.c pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp)); cachep 1494 mm/slab.c realobj = (char *)objp + obj_offset(cachep); cachep 1495 mm/slab.c size = cachep->object_size; cachep 1505 mm/slab.c static void check_poison_obj(struct kmem_cache *cachep, void *objp) cachep 1511 mm/slab.c if (is_debug_pagealloc_cache(cachep)) cachep 1514 mm/slab.c realobj = (char *)objp + obj_offset(cachep); cachep 1515 mm/slab.c size = cachep->object_size; cachep 1527 mm/slab.c print_tainted(), cachep->name, cachep 1529 mm/slab.c print_objinfo(cachep, objp, 0); cachep 1551 mm/slab.c objnr = obj_to_index(cachep, page, objp); cachep 1553 mm/slab.c objp = index_to_obj(cachep, page, objnr - 1); cachep 1554 mm/slab.c realobj = (char *)objp + obj_offset(cachep); cachep 1556 mm/slab.c print_objinfo(cachep, objp, 2); cachep 1558 mm/slab.c if (objnr + 1 < cachep->num) { cachep 1559 mm/slab.c objp = index_to_obj(cachep, page, objnr + 1); cachep 1560 mm/slab.c realobj = (char *)objp + obj_offset(cachep); cachep 1562 mm/slab.c print_objinfo(cachep, objp, 2); cachep 1569 mm/slab.c static void slab_destroy_debugcheck(struct kmem_cache *cachep, cachep 1574 mm/slab.c if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) { cachep 1575 mm/slab.c poison_obj(cachep, page->freelist - obj_offset(cachep), cachep 1579 mm/slab.c for (i = 0; i < cachep->num; i++) { cachep 1580 mm/slab.c void *objp = index_to_obj(cachep, page, i); cachep 1582 mm/slab.c if (cachep->flags & SLAB_POISON) { cachep 1583 mm/slab.c check_poison_obj(cachep, objp); cachep 1584 mm/slab.c slab_kernel_map(cachep, objp, 1); cachep 1586 mm/slab.c if (cachep->flags & SLAB_RED_ZONE) { cachep 1587 mm/slab.c if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) cachep 1588 mm/slab.c slab_error(cachep, "start of a freed object was overwritten"); cachep 1589 mm/slab.c if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) cachep 1590 mm/slab.c slab_error(cachep, "end of a freed object was overwritten"); cachep 1595 mm/slab.c static void slab_destroy_debugcheck(struct kmem_cache *cachep, cachep 1610 mm/slab.c static void slab_destroy(struct kmem_cache *cachep, struct page *page) cachep 1615 mm/slab.c slab_destroy_debugcheck(cachep, page); cachep 1616 mm/slab.c if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU)) cachep 1619 mm/slab.c kmem_freepages(cachep, page); cachep 1625 mm/slab.c if (OFF_SLAB(cachep)) cachep 1626 mm/slab.c kmem_cache_free(cachep->freelist_cache, freelist); cachep 1629 mm/slab.c static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) cachep 1635 mm/slab.c slab_destroy(cachep, page); cachep 1653 mm/slab.c static size_t calculate_slab_order(struct kmem_cache *cachep, cachep 1688 mm/slab.c if (freelist_cache->size > cachep->size / 2) cachep 1693 mm/slab.c cachep->num = num; cachep 1694 mm/slab.c cachep->gfporder = gfporder; cachep 1722 mm/slab.c struct kmem_cache *cachep, int entries, int batchcount) cachep 1742 mm/slab.c static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) cachep 1745 mm/slab.c return enable_cpucache(cachep, gfp); cachep 1747 mm/slab.c cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); cachep 1748 mm/slab.c if (!cachep->cpu_cache) cachep 1756 mm/slab.c set_up_node(cachep, SIZE_NODE); cachep 1761 mm/slab.c cachep->node[node] = kmalloc_node( cachep 1763 mm/slab.c BUG_ON(!cachep->node[node]); cachep 1764 mm/slab.c kmem_cache_node_init(cachep->node[node]); cachep 1768 mm/slab.c cachep->node[numa_mem_id()]->next_reap = cachep 1770 mm/slab.c ((unsigned long)cachep) % REAPTIMEOUT_NODE; cachep 1772 mm/slab.c cpu_cache_get(cachep)->avail = 0; cachep 1773 mm/slab.c cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; cachep 1774 mm/slab.c cpu_cache_get(cachep)->batchcount = 1; cachep 1775 mm/slab.c cpu_cache_get(cachep)->touched = 0; cachep 1776 mm/slab.c cachep->batchcount = 1; cachep 1777 mm/slab.c cachep->limit = BOOT_CPUCACHE_ENTRIES; cachep 1792 mm/slab.c struct kmem_cache *cachep; cachep 1794 mm/slab.c cachep = find_mergeable(size, align, flags, name, ctor); cachep 1795 mm/slab.c if (cachep) { cachep 1796 mm/slab.c cachep->refcount++; cachep 1802 mm/slab.c cachep->object_size = max_t(int, cachep->object_size, size); cachep 1804 mm/slab.c return cachep; cachep 1807 mm/slab.c static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, cachep 1812 mm/slab.c cachep->num = 0; cachep 1819 mm/slab.c if (unlikely(slab_want_init_on_free(cachep))) cachep 1822 mm/slab.c if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU) cachep 1825 mm/slab.c left = calculate_slab_order(cachep, size, cachep 1827 mm/slab.c if (!cachep->num) cachep 1830 mm/slab.c if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size) cachep 1833 mm/slab.c cachep->colour = left / cachep->colour_off; cachep 1838 mm/slab.c static bool set_off_slab_cache(struct kmem_cache *cachep, cachep 1843 mm/slab.c cachep->num = 0; cachep 1856 mm/slab.c left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB); cachep 1857 mm/slab.c if (!cachep->num) cachep 1864 mm/slab.c if (left >= cachep->num * sizeof(freelist_idx_t)) cachep 1867 mm/slab.c cachep->colour = left / cachep->colour_off; cachep 1872 mm/slab.c static bool set_on_slab_cache(struct kmem_cache *cachep, cachep 1877 mm/slab.c cachep->num = 0; cachep 1879 mm/slab.c left = calculate_slab_order(cachep, size, flags); cachep 1880 mm/slab.c if (!cachep->num) cachep 1883 mm/slab.c cachep->colour = left / cachep->colour_off; cachep 1911 mm/slab.c int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) cachep 1916 mm/slab.c unsigned int size = cachep->size; cachep 1949 mm/slab.c if (ralign < cachep->align) { cachep 1950 mm/slab.c ralign = cachep->align; cachep 1958 mm/slab.c cachep->align = ralign; cachep 1959 mm/slab.c cachep->colour_off = cache_line_size(); cachep 1961 mm/slab.c if (cachep->colour_off < cachep->align) cachep 1962 mm/slab.c cachep->colour_off = cachep->align; cachep 1977 mm/slab.c cachep->obj_offset += sizeof(unsigned long long); cachep 1992 mm/slab.c kasan_cache_create(cachep, &size, &flags); cachep 1994 mm/slab.c size = ALIGN(size, cachep->align); cachep 2000 mm/slab.c size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); cachep 2011 mm/slab.c size >= 256 && cachep->object_size > cache_line_size()) { cachep 2015 mm/slab.c if (set_off_slab_cache(cachep, tmp_size, flags)) { cachep 2017 mm/slab.c cachep->obj_offset += tmp_size - size; cachep 2025 mm/slab.c if (set_objfreelist_slab_cache(cachep, size, flags)) { cachep 2030 mm/slab.c if (set_off_slab_cache(cachep, size, flags)) { cachep 2035 mm/slab.c if (set_on_slab_cache(cachep, size, flags)) cachep 2041 mm/slab.c cachep->freelist_size = cachep->num * sizeof(freelist_idx_t); cachep 2042 mm/slab.c cachep->flags = flags; cachep 2043 mm/slab.c cachep->allocflags = __GFP_COMP; cachep 2045 mm/slab.c cachep->allocflags |= GFP_DMA; cachep 2047 mm/slab.c cachep->allocflags |= GFP_DMA32; cachep 2049 mm/slab.c cachep->allocflags |= __GFP_RECLAIMABLE; cachep 2050 mm/slab.c cachep->size = size; cachep 2051 mm/slab.c cachep->reciprocal_buffer_size = reciprocal_value(size); cachep 2060 mm/slab.c (cachep->flags & SLAB_POISON) && cachep 2061 mm/slab.c is_debug_pagealloc_cache(cachep)) cachep 2062 mm/slab.c cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); cachep 2065 mm/slab.c if (OFF_SLAB(cachep)) { cachep 2066 mm/slab.c cachep->freelist_cache = cachep 2067 mm/slab.c kmalloc_slab(cachep->freelist_size, 0u); cachep 2070 mm/slab.c err = setup_cpu_cache(cachep, gfp); cachep 2072 mm/slab.c __kmem_cache_release(cachep); cachep 2095 mm/slab.c static void check_spinlock_acquired(struct kmem_cache *cachep) cachep 2099 mm/slab.c assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); cachep 2103 mm/slab.c static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) cachep 2107 mm/slab.c assert_spin_locked(&get_node(cachep, node)->list_lock); cachep 2119 mm/slab.c static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, cachep 2131 mm/slab.c free_block(cachep, ac->entry, tofree, node, list); cachep 2138 mm/slab.c struct kmem_cache *cachep = arg; cachep 2145 mm/slab.c ac = cpu_cache_get(cachep); cachep 2146 mm/slab.c n = get_node(cachep, node); cachep 2148 mm/slab.c free_block(cachep, ac->entry, ac->avail, node, &list); cachep 2150 mm/slab.c slabs_destroy(cachep, &list); cachep 2154 mm/slab.c static void drain_cpu_caches(struct kmem_cache *cachep) cachep 2160 mm/slab.c on_each_cpu(do_drain, cachep, 1); cachep 2162 mm/slab.c for_each_kmem_cache_node(cachep, node, n) cachep 2164 mm/slab.c drain_alien_cache(cachep, n->alien); cachep 2166 mm/slab.c for_each_kmem_cache_node(cachep, node, n) { cachep 2168 mm/slab.c drain_array_locked(cachep, n->shared, node, true, &list); cachep 2171 mm/slab.c slabs_destroy(cachep, &list); cachep 2227 mm/slab.c int __kmem_cache_shrink(struct kmem_cache *cachep) cachep 2233 mm/slab.c drain_cpu_caches(cachep); cachep 2236 mm/slab.c for_each_kmem_cache_node(cachep, node, n) { cachep 2237 mm/slab.c drain_freelist(cachep, n, INT_MAX); cachep 2246 mm/slab.c void __kmemcg_cache_deactivate(struct kmem_cache *cachep) cachep 2248 mm/slab.c __kmem_cache_shrink(cachep); cachep 2256 mm/slab.c int __kmem_cache_shutdown(struct kmem_cache *cachep) cachep 2258 mm/slab.c return __kmem_cache_shrink(cachep); cachep 2261 mm/slab.c void __kmem_cache_release(struct kmem_cache *cachep) cachep 2266 mm/slab.c cache_random_seq_destroy(cachep); cachep 2268 mm/slab.c free_percpu(cachep->cpu_cache); cachep 2271 mm/slab.c for_each_kmem_cache_node(cachep, i, n) { cachep 2275 mm/slab.c cachep->node[i] = NULL; cachep 2293 mm/slab.c static void *alloc_slabmgmt(struct kmem_cache *cachep, cachep 2303 mm/slab.c if (OBJFREELIST_SLAB(cachep)) cachep 2305 mm/slab.c else if (OFF_SLAB(cachep)) { cachep 2307 mm/slab.c freelist = kmem_cache_alloc_node(cachep->freelist_cache, cachep 2313 mm/slab.c freelist = addr + (PAGE_SIZE << cachep->gfporder) - cachep 2314 mm/slab.c cachep->freelist_size; cachep 2331 mm/slab.c static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page) cachep 2336 mm/slab.c for (i = 0; i < cachep->num; i++) { cachep 2337 mm/slab.c void *objp = index_to_obj(cachep, page, i); cachep 2339 mm/slab.c if (cachep->flags & SLAB_STORE_USER) cachep 2340 mm/slab.c *dbg_userword(cachep, objp) = NULL; cachep 2342 mm/slab.c if (cachep->flags & SLAB_RED_ZONE) { cachep 2343 mm/slab.c *dbg_redzone1(cachep, objp) = RED_INACTIVE; cachep 2344 mm/slab.c *dbg_redzone2(cachep, objp) = RED_INACTIVE; cachep 2351 mm/slab.c if (cachep->ctor && !(cachep->flags & SLAB_POISON)) { cachep 2352 mm/slab.c kasan_unpoison_object_data(cachep, cachep 2353 mm/slab.c objp + obj_offset(cachep)); cachep 2354 mm/slab.c cachep->ctor(objp + obj_offset(cachep)); cachep 2356 mm/slab.c cachep, objp + obj_offset(cachep)); cachep 2359 mm/slab.c if (cachep->flags & SLAB_RED_ZONE) { cachep 2360 mm/slab.c if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) cachep 2361 mm/slab.c slab_error(cachep, "constructor overwrote the end of an object"); cachep 2362 mm/slab.c if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) cachep 2363 mm/slab.c slab_error(cachep, "constructor overwrote the start of an object"); cachep 2366 mm/slab.c if (cachep->flags & SLAB_POISON) { cachep 2367 mm/slab.c poison_obj(cachep, objp, POISON_FREE); cachep 2368 mm/slab.c slab_kernel_map(cachep, objp, 0); cachep 2390 mm/slab.c struct kmem_cache *cachep, cachep 2400 mm/slab.c if (!cachep->random_seq) { cachep 2404 mm/slab.c state->list = cachep->random_seq; cachep 2431 mm/slab.c static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page) cachep 2433 mm/slab.c unsigned int objfreelist = 0, i, rand, count = cachep->num; cachep 2440 mm/slab.c precomputed = freelist_state_initialize(&state, cachep, count); cachep 2443 mm/slab.c if (OBJFREELIST_SLAB(cachep)) { cachep 2448 mm/slab.c page->freelist = index_to_obj(cachep, page, objfreelist) + cachep 2449 mm/slab.c obj_offset(cachep); cachep 2472 mm/slab.c if (OBJFREELIST_SLAB(cachep)) cachep 2473 mm/slab.c set_free_obj(page, cachep->num - 1, objfreelist); cachep 2478 mm/slab.c static inline bool shuffle_freelist(struct kmem_cache *cachep, cachep 2485 mm/slab.c static void cache_init_objs(struct kmem_cache *cachep, cachep 2492 mm/slab.c cache_init_objs_debug(cachep, page); cachep 2495 mm/slab.c shuffled = shuffle_freelist(cachep, page); cachep 2497 mm/slab.c if (!shuffled && OBJFREELIST_SLAB(cachep)) { cachep 2498 mm/slab.c page->freelist = index_to_obj(cachep, page, cachep->num - 1) + cachep 2499 mm/slab.c obj_offset(cachep); cachep 2502 mm/slab.c for (i = 0; i < cachep->num; i++) { cachep 2503 mm/slab.c objp = index_to_obj(cachep, page, i); cachep 2504 mm/slab.c objp = kasan_init_slab_obj(cachep, objp); cachep 2507 mm/slab.c if (DEBUG == 0 && cachep->ctor) { cachep 2508 mm/slab.c kasan_unpoison_object_data(cachep, objp); cachep 2509 mm/slab.c cachep->ctor(objp); cachep 2510 mm/slab.c kasan_poison_object_data(cachep, objp); cachep 2518 mm/slab.c static void *slab_get_obj(struct kmem_cache *cachep, struct page *page) cachep 2522 mm/slab.c objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); cachep 2528 mm/slab.c static void slab_put_obj(struct kmem_cache *cachep, cachep 2531 mm/slab.c unsigned int objnr = obj_to_index(cachep, page, objp); cachep 2536 mm/slab.c for (i = page->active; i < cachep->num; i++) { cachep 2539 mm/slab.c cachep->name, objp); cachep 2546 mm/slab.c page->freelist = objp + obj_offset(cachep); cachep 2567 mm/slab.c static struct page *cache_grow_begin(struct kmem_cache *cachep, cachep 2588 mm/slab.c WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); cachep 2599 mm/slab.c page = kmem_getpages(cachep, local_flags, nodeid); cachep 2604 mm/slab.c n = get_node(cachep, page_node); cachep 2608 mm/slab.c if (n->colour_next >= cachep->colour) cachep 2612 mm/slab.c if (offset >= cachep->colour) cachep 2615 mm/slab.c offset *= cachep->colour_off; cachep 2625 mm/slab.c freelist = alloc_slabmgmt(cachep, page, offset, cachep 2627 mm/slab.c if (OFF_SLAB(cachep) && !freelist) cachep 2630 mm/slab.c slab_map_pages(cachep, page, freelist); cachep 2632 mm/slab.c cache_init_objs(cachep, page); cachep 2640 mm/slab.c kmem_freepages(cachep, page); cachep 2647 mm/slab.c static void cache_grow_end(struct kmem_cache *cachep, struct page *page) cachep 2658 mm/slab.c n = get_node(cachep, page_to_nid(page)); cachep 2666 mm/slab.c fixup_slab_list(cachep, n, page, &list); cachep 2668 mm/slab.c STATS_INC_GROWN(cachep); cachep 2669 mm/slab.c n->free_objects += cachep->num - page->active; cachep 2672 mm/slab.c fixup_objfreelist_debug(cachep, &list); cachep 2713 mm/slab.c static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, cachep 2719 mm/slab.c BUG_ON(virt_to_cache(objp) != cachep); cachep 2721 mm/slab.c objp -= obj_offset(cachep); cachep 2725 mm/slab.c if (cachep->flags & SLAB_RED_ZONE) { cachep 2726 mm/slab.c verify_redzone_free(cachep, objp); cachep 2727 mm/slab.c *dbg_redzone1(cachep, objp) = RED_INACTIVE; cachep 2728 mm/slab.c *dbg_redzone2(cachep, objp) = RED_INACTIVE; cachep 2730 mm/slab.c if (cachep->flags & SLAB_STORE_USER) cachep 2731 mm/slab.c *dbg_userword(cachep, objp) = (void *)caller; cachep 2733 mm/slab.c objnr = obj_to_index(cachep, page, objp); cachep 2735 mm/slab.c BUG_ON(objnr >= cachep->num); cachep 2736 mm/slab.c BUG_ON(objp != index_to_obj(cachep, page, objnr)); cachep 2738 mm/slab.c if (cachep->flags & SLAB_POISON) { cachep 2739 mm/slab.c poison_obj(cachep, objp, POISON_FREE); cachep 2740 mm/slab.c slab_kernel_map(cachep, objp, 0); cachep 2750 mm/slab.c static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, cachep 2758 mm/slab.c objp = next - obj_offset(cachep); cachep 2760 mm/slab.c poison_obj(cachep, objp, POISON_FREE); cachep 2765 mm/slab.c static inline void fixup_slab_list(struct kmem_cache *cachep, cachep 2771 mm/slab.c if (page->active == cachep->num) { cachep 2773 mm/slab.c if (OBJFREELIST_SLAB(cachep)) { cachep 2776 mm/slab.c if (cachep->flags & SLAB_POISON) { cachep 2853 mm/slab.c static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, cachep 2870 mm/slab.c obj = slab_get_obj(cachep, page); cachep 2873 mm/slab.c fixup_slab_list(cachep, n, page, &list); cachep 2876 mm/slab.c fixup_objfreelist_debug(cachep, &list); cachep 2885 mm/slab.c static __always_inline int alloc_block(struct kmem_cache *cachep, cachep 2892 mm/slab.c BUG_ON(page->active >= cachep->num); cachep 2894 mm/slab.c while (page->active < cachep->num && batchcount--) { cachep 2895 mm/slab.c STATS_INC_ALLOCED(cachep); cachep 2896 mm/slab.c STATS_INC_ACTIVE(cachep); cachep 2897 mm/slab.c STATS_SET_HIGH(cachep); cachep 2899 mm/slab.c ac->entry[ac->avail++] = slab_get_obj(cachep, page); cachep 2905 mm/slab.c static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) cachep 2917 mm/slab.c ac = cpu_cache_get(cachep); cachep 2927 mm/slab.c n = get_node(cachep, node); cachep 2949 mm/slab.c check_spinlock_acquired(cachep); cachep 2951 mm/slab.c batchcount = alloc_block(cachep, ac, page, batchcount); cachep 2952 mm/slab.c fixup_slab_list(cachep, n, page, &list); cachep 2959 mm/slab.c fixup_objfreelist_debug(cachep, &list); cachep 2965 mm/slab.c void *obj = cache_alloc_pfmemalloc(cachep, n, flags); cachep 2971 mm/slab.c page = cache_grow_begin(cachep, gfp_exact_node(flags), node); cachep 2977 mm/slab.c ac = cpu_cache_get(cachep); cachep 2979 mm/slab.c alloc_block(cachep, ac, page, batchcount); cachep 2980 mm/slab.c cache_grow_end(cachep, page); cachep 2990 mm/slab.c static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, cachep 2997 mm/slab.c static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, cachep 3000 mm/slab.c WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); cachep 3003 mm/slab.c if (cachep->flags & SLAB_POISON) { cachep 3004 mm/slab.c check_poison_obj(cachep, objp); cachep 3005 mm/slab.c slab_kernel_map(cachep, objp, 1); cachep 3006 mm/slab.c poison_obj(cachep, objp, POISON_INUSE); cachep 3008 mm/slab.c if (cachep->flags & SLAB_STORE_USER) cachep 3009 mm/slab.c *dbg_userword(cachep, objp) = (void *)caller; cachep 3011 mm/slab.c if (cachep->flags & SLAB_RED_ZONE) { cachep 3012 mm/slab.c if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || cachep 3013 mm/slab.c *dbg_redzone2(cachep, objp) != RED_INACTIVE) { cachep 3014 mm/slab.c slab_error(cachep, "double free, or memory outside object was overwritten"); cachep 3016 mm/slab.c objp, *dbg_redzone1(cachep, objp), cachep 3017 mm/slab.c *dbg_redzone2(cachep, objp)); cachep 3019 mm/slab.c *dbg_redzone1(cachep, objp) = RED_ACTIVE; cachep 3020 mm/slab.c *dbg_redzone2(cachep, objp) = RED_ACTIVE; cachep 3023 mm/slab.c objp += obj_offset(cachep); cachep 3024 mm/slab.c if (cachep->ctor && cachep->flags & SLAB_POISON) cachep 3025 mm/slab.c cachep->ctor(objp); cachep 3037 mm/slab.c static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) cachep 3044 mm/slab.c ac = cpu_cache_get(cachep); cachep 3049 mm/slab.c STATS_INC_ALLOCHIT(cachep); cachep 3053 mm/slab.c STATS_INC_ALLOCMISS(cachep); cachep 3054 mm/slab.c objp = cache_alloc_refill(cachep, flags); cachep 3059 mm/slab.c ac = cpu_cache_get(cachep); cachep 3079 mm/slab.c static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) cachep 3086 mm/slab.c if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) cachep 3091 mm/slab.c return ____cache_alloc_node(cachep, flags, nid_alloc); cachep 3170 mm/slab.c static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, cachep 3179 mm/slab.c n = get_node(cachep, nodeid); cachep 3188 mm/slab.c check_spinlock_acquired_node(cachep, nodeid); cachep 3190 mm/slab.c STATS_INC_NODEALLOCS(cachep); cachep 3191 mm/slab.c STATS_INC_ACTIVE(cachep); cachep 3192 mm/slab.c STATS_SET_HIGH(cachep); cachep 3194 mm/slab.c BUG_ON(page->active == cachep->num); cachep 3196 mm/slab.c obj = slab_get_obj(cachep, page); cachep 3199 mm/slab.c fixup_slab_list(cachep, n, page, &list); cachep 3202 mm/slab.c fixup_objfreelist_debug(cachep, &list); cachep 3207 mm/slab.c page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); cachep 3210 mm/slab.c obj = slab_get_obj(cachep, page); cachep 3212 mm/slab.c cache_grow_end(cachep, page); cachep 3214 mm/slab.c return obj ? obj : fallback_alloc(cachep, flags); cachep 3218 mm/slab.c slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, cachep 3226 mm/slab.c cachep = slab_pre_alloc_hook(cachep, flags); cachep 3227 mm/slab.c if (unlikely(!cachep)) cachep 3230 mm/slab.c cache_alloc_debugcheck_before(cachep, flags); cachep 3236 mm/slab.c if (unlikely(!get_node(cachep, nodeid))) { cachep 3238 mm/slab.c ptr = fallback_alloc(cachep, flags); cachep 3249 mm/slab.c ptr = ____cache_alloc(cachep, flags); cachep 3254 mm/slab.c ptr = ____cache_alloc_node(cachep, flags, nodeid); cachep 3257 mm/slab.c ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); cachep 3259 mm/slab.c if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr) cachep 3260 mm/slab.c memset(ptr, 0, cachep->object_size); cachep 3262 mm/slab.c slab_post_alloc_hook(cachep, flags, 1, &ptr); cachep 3291 mm/slab.c __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) cachep 3293 mm/slab.c return ____cache_alloc(cachep, flags); cachep 3299 mm/slab.c slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) cachep 3305 mm/slab.c cachep = slab_pre_alloc_hook(cachep, flags); cachep 3306 mm/slab.c if (unlikely(!cachep)) cachep 3309 mm/slab.c cache_alloc_debugcheck_before(cachep, flags); cachep 3311 mm/slab.c objp = __do_cache_alloc(cachep, flags); cachep 3313 mm/slab.c objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); cachep 3316 mm/slab.c if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp) cachep 3317 mm/slab.c memset(objp, 0, cachep->object_size); cachep 3319 mm/slab.c slab_post_alloc_hook(cachep, flags, 1, &objp); cachep 3327 mm/slab.c static void free_block(struct kmem_cache *cachep, void **objpp, cachep 3331 mm/slab.c struct kmem_cache_node *n = get_node(cachep, node); cachep 3344 mm/slab.c check_spinlock_acquired_node(cachep, node); cachep 3345 mm/slab.c slab_put_obj(cachep, page, objp); cachep 3346 mm/slab.c STATS_DEC_ACTIVE(cachep); cachep 3362 mm/slab.c n->free_objects -= cachep->num; cachep 3371 mm/slab.c static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) cachep 3381 mm/slab.c n = get_node(cachep, node); cachep 3396 mm/slab.c free_block(cachep, ac->entry, batchcount, node, &list); cachep 3408 mm/slab.c STATS_SET_FREEABLE(cachep, i); cachep 3412 mm/slab.c slabs_destroy(cachep, &list); cachep 3421 mm/slab.c static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, cachep 3425 mm/slab.c if (kasan_slab_free(cachep, objp, _RET_IP_)) cachep 3428 mm/slab.c ___cache_free(cachep, objp, caller); cachep 3431 mm/slab.c void ___cache_free(struct kmem_cache *cachep, void *objp, cachep 3434 mm/slab.c struct array_cache *ac = cpu_cache_get(cachep); cachep 3437 mm/slab.c if (unlikely(slab_want_init_on_free(cachep))) cachep 3438 mm/slab.c memset(objp, 0, cachep->object_size); cachep 3439 mm/slab.c kmemleak_free_recursive(objp, cachep->flags); cachep 3440 mm/slab.c objp = cache_free_debugcheck(cachep, objp, caller); cachep 3449 mm/slab.c if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) cachep 3453 mm/slab.c STATS_INC_FREEHIT(cachep); cachep 3455 mm/slab.c STATS_INC_FREEMISS(cachep); cachep 3456 mm/slab.c cache_flusharray(cachep, ac); cachep 3463 mm/slab.c cache_free_pfmemalloc(cachep, page, objp); cachep 3481 mm/slab.c void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) cachep 3483 mm/slab.c void *ret = slab_alloc(cachep, flags, _RET_IP_); cachep 3486 mm/slab.c cachep->object_size, cachep->size, flags); cachep 3544 mm/slab.c kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) cachep 3548 mm/slab.c ret = slab_alloc(cachep, flags, _RET_IP_); cachep 3550 mm/slab.c ret = kasan_kmalloc(cachep, ret, size, flags); cachep 3552 mm/slab.c size, cachep->size, flags); cachep 3572 mm/slab.c void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) cachep 3574 mm/slab.c void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); cachep 3577 mm/slab.c cachep->object_size, cachep->size, cachep 3585 mm/slab.c void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, cachep 3592 mm/slab.c ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); cachep 3594 mm/slab.c ret = kasan_kmalloc(cachep, ret, size, flags); cachep 3596 mm/slab.c size, cachep->size, cachep 3606 mm/slab.c struct kmem_cache *cachep; cachep 3611 mm/slab.c cachep = kmalloc_slab(size, flags); cachep 3612 mm/slab.c if (unlikely(ZERO_OR_NULL_PTR(cachep))) cachep 3613 mm/slab.c return cachep; cachep 3614 mm/slab.c ret = kmem_cache_alloc_node_trace(cachep, flags, node, size); cachep 3615 mm/slab.c ret = kasan_kmalloc(cachep, ret, size, flags); cachep 3645 mm/slab.c struct kmem_cache *cachep; cachep 3650 mm/slab.c cachep = kmalloc_slab(size, flags); cachep 3651 mm/slab.c if (unlikely(ZERO_OR_NULL_PTR(cachep))) cachep 3652 mm/slab.c return cachep; cachep 3653 mm/slab.c ret = slab_alloc(cachep, flags, caller); cachep 3655 mm/slab.c ret = kasan_kmalloc(cachep, ret, size, flags); cachep 3657 mm/slab.c size, cachep->size, flags); cachep 3682 mm/slab.c void kmem_cache_free(struct kmem_cache *cachep, void *objp) cachep 3685 mm/slab.c cachep = cache_from_obj(cachep, objp); cachep 3686 mm/slab.c if (!cachep) cachep 3690 mm/slab.c debug_check_no_locks_freed(objp, cachep->object_size); cachep 3691 mm/slab.c if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) cachep 3692 mm/slab.c debug_check_no_obj_freed(objp, cachep->object_size); cachep 3693 mm/slab.c __cache_free(cachep, objp, _RET_IP_); cachep 3764 mm/slab.c static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp) cachep 3771 mm/slab.c ret = setup_kmem_cache_node(cachep, node, gfp, true); cachep 3780 mm/slab.c if (!cachep->list.next) { cachep 3784 mm/slab.c n = get_node(cachep, node); cachep 3789 mm/slab.c cachep->node[node] = NULL; cachep 3798 mm/slab.c static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, cachep 3804 mm/slab.c cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount); cachep 3808 mm/slab.c prev = cachep->cpu_cache; cachep 3809 mm/slab.c cachep->cpu_cache = cpu_cache; cachep 3818 mm/slab.c cachep->batchcount = batchcount; cachep 3819 mm/slab.c cachep->limit = limit; cachep 3820 mm/slab.c cachep->shared = shared; cachep 3832 mm/slab.c n = get_node(cachep, node); cachep 3834 mm/slab.c free_block(cachep, ac->entry, ac->avail, node, &list); cachep 3836 mm/slab.c slabs_destroy(cachep, &list); cachep 3841 mm/slab.c return setup_kmem_cache_nodes(cachep, gfp); cachep 3844 mm/slab.c static int do_tune_cpucache(struct kmem_cache *cachep, int limit, cachep 3850 mm/slab.c ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); cachep 3855 mm/slab.c if ((ret < 0) || !is_root_cache(cachep)) cachep 3859 mm/slab.c for_each_memcg_cache(c, cachep) { cachep 3868 mm/slab.c static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) cachep 3875 mm/slab.c err = cache_random_seq_create(cachep, cachep->num, gfp); cachep 3879 mm/slab.c if (!is_root_cache(cachep)) { cachep 3880 mm/slab.c struct kmem_cache *root = memcg_root_cache(cachep); cachep 3897 mm/slab.c if (cachep->size > 131072) cachep 3899 mm/slab.c else if (cachep->size > PAGE_SIZE) cachep 3901 mm/slab.c else if (cachep->size > 1024) cachep 3903 mm/slab.c else if (cachep->size > 256) cachep 3918 mm/slab.c if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) cachep 3931 mm/slab.c err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); cachep 3935 mm/slab.c cachep->name, -err); cachep 3944 mm/slab.c static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, cachep 3961 mm/slab.c drain_array_locked(cachep, ac, node, false, &list); cachep 3964 mm/slab.c slabs_destroy(cachep, &list); cachep 4036 mm/slab.c void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) cachep 4044 mm/slab.c for_each_kmem_cache_node(cachep, node, n) { cachep 4057 mm/slab.c num_objs = total_slabs * cachep->num; cachep 4066 mm/slab.c sinfo->limit = cachep->limit; cachep 4067 mm/slab.c sinfo->batchcount = cachep->batchcount; cachep 4068 mm/slab.c sinfo->shared = cachep->shared; cachep 4069 mm/slab.c sinfo->objects_per_slab = cachep->num; cachep 4070 mm/slab.c sinfo->cache_order = cachep->gfporder; cachep 4073 mm/slab.c void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) cachep 4077 mm/slab.c unsigned long high = cachep->high_mark; cachep 4078 mm/slab.c unsigned long allocs = cachep->num_allocations; cachep 4079 mm/slab.c unsigned long grown = cachep->grown; cachep 4080 mm/slab.c unsigned long reaped = cachep->reaped; cachep 4081 mm/slab.c unsigned long errors = cachep->errors; cachep 4082 mm/slab.c unsigned long max_freeable = cachep->max_freeable; cachep 4083 mm/slab.c unsigned long node_allocs = cachep->node_allocs; cachep 4084 mm/slab.c unsigned long node_frees = cachep->node_frees; cachep 4085 mm/slab.c unsigned long overflows = cachep->node_overflow; cachep 4094 mm/slab.c unsigned long allochit = atomic_read(&cachep->allochit); cachep 4095 mm/slab.c unsigned long allocmiss = atomic_read(&cachep->allocmiss); cachep 4096 mm/slab.c unsigned long freehit = atomic_read(&cachep->freehit); cachep 4097 mm/slab.c unsigned long freemiss = atomic_read(&cachep->freemiss); cachep 4120 mm/slab.c struct kmem_cache *cachep; cachep 4139 mm/slab.c list_for_each_entry(cachep, &slab_caches, list) { cachep 4140 mm/slab.c if (!strcmp(cachep->name, kbuf)) { cachep 4145 mm/slab.c res = do_tune_cpucache(cachep, limit, cachep 4170 mm/slab.c struct kmem_cache *cachep; cachep 4177 mm/slab.c cachep = page->slab_cache; cachep 4178 mm/slab.c objnr = obj_to_index(cachep, page, (void *)ptr); cachep 4179 mm/slab.c BUG_ON(objnr >= cachep->num); cachep 4182 mm/slab.c offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); cachep 4185 mm/slab.c if (offset >= cachep->useroffset && cachep 4186 mm/slab.c offset - cachep->useroffset <= cachep->usersize && cachep 4187 mm/slab.c n <= cachep->useroffset - offset + cachep->usersize) cachep 4197 mm/slab.c offset <= cachep->object_size && cachep 4198 mm/slab.c n <= cachep->object_size - offset) { cachep 4199 mm/slab.c usercopy_warn("SLAB object", cachep->name, to_user, offset, n); cachep 4203 mm/slab.c usercopy_abort("SLAB object", cachep->name, to_user, offset, n); cachep 506 mm/slab.h struct kmem_cache *cachep; cachep 520 mm/slab.h cachep = virt_to_cache(x); cachep 521 mm/slab.h WARN_ONCE(cachep && !slab_equal_or_root(cachep, s), cachep 523 mm/slab.h __func__, s->name, cachep->name); cachep 524 mm/slab.h return cachep; cachep 662 mm/slab.h int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, cachep 664 mm/slab.h void cache_random_seq_destroy(struct kmem_cache *cachep); cachep 666 mm/slab.h static inline int cache_random_seq_create(struct kmem_cache *cachep, cachep 671 mm/slab.h static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } cachep 984 mm/slab_common.c int kmem_cache_shrink(struct kmem_cache *cachep) cachep 990 mm/slab_common.c kasan_cache_shrink(cachep); cachep 991 mm/slab_common.c ret = __kmem_cache_shrink(cachep); cachep 1360 mm/slab_common.c int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, cachep 1365 mm/slab_common.c if (count < 2 || cachep->random_seq) cachep 1368 mm/slab_common.c cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); cachep 1369 mm/slab_common.c if (!cachep->random_seq) cachep 1375 mm/slab_common.c freelist_randomize(&state, cachep->random_seq, count); cachep 1380 mm/slab_common.c void cache_random_seq_destroy(struct kmem_cache *cachep) cachep 1382 mm/slab_common.c kfree(cachep->random_seq); cachep 1383 mm/slab_common.c cachep->random_seq = NULL; cachep 622 mm/slob.c void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) cachep 624 mm/slob.c return slob_alloc_node(cachep, flags, NUMA_NO_NODE); cachep 635 mm/slob.c void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node) cachep 637 mm/slob.c return slob_alloc_node(cachep, gfp, node); cachep 61 net/ipv4/inet_hashtables.c struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, cachep 67 net/ipv4/inet_hashtables.c struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); cachep 84 net/ipv4/inet_hashtables.c void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) cachep 88 net/ipv4/inet_hashtables.c kmem_cache_free(cachep, tb); cachep 28 tools/testing/radix-tree/linux.c void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) cachep 35 tools/testing/radix-tree/linux.c pthread_mutex_lock(&cachep->lock); cachep 36 tools/testing/radix-tree/linux.c if (cachep->nr_objs) { cachep 37 tools/testing/radix-tree/linux.c cachep->nr_objs--; cachep 38 tools/testing/radix-tree/linux.c node = cachep->objs; cachep 39 tools/testing/radix-tree/linux.c cachep->objs = node->parent; cachep 40 tools/testing/radix-tree/linux.c pthread_mutex_unlock(&cachep->lock); cachep 43 tools/testing/radix-tree/linux.c pthread_mutex_unlock(&cachep->lock); cachep 44 tools/testing/radix-tree/linux.c node = malloc(cachep->size); cachep 45 tools/testing/radix-tree/linux.c if (cachep->ctor) cachep 46 tools/testing/radix-tree/linux.c cachep->ctor(node); cachep 55 tools/testing/radix-tree/linux.c void kmem_cache_free(struct kmem_cache *cachep, void *objp) cachep 61 tools/testing/radix-tree/linux.c pthread_mutex_lock(&cachep->lock); cachep 62 tools/testing/radix-tree/linux.c if (cachep->nr_objs > 10) { cachep 63 tools/testing/radix-tree/linux.c memset(objp, POISON_FREE, cachep->size); cachep 67 tools/testing/radix-tree/linux.c cachep->nr_objs++; cachep 68 tools/testing/radix-tree/linux.c node->parent = cachep->objs; cachep 69 tools/testing/radix-tree/linux.c cachep->objs = node; cachep 71 tools/testing/radix-tree/linux.c pthread_mutex_unlock(&cachep->lock); cachep 20 tools/testing/radix-tree/linux/slab.h void *kmem_cache_alloc(struct kmem_cache *cachep, int flags); cachep 21 tools/testing/radix-tree/linux/slab.h void kmem_cache_free(struct kmem_cache *cachep, void *objp);