Lines Matching refs:cachep
247 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
249 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
250 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
270 #define MAKE_LIST(cachep, listp, slab, nodeid) \ argument
273 list_splice(&get_node(cachep, nodeid)->slab, listp); \
276 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ argument
278 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
279 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
280 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
354 static int obj_offset(struct kmem_cache *cachep) in obj_offset() argument
356 return cachep->obj_offset; in obj_offset()
359 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) in dbg_redzone1() argument
361 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); in dbg_redzone1()
362 return (unsigned long long*) (objp + obj_offset(cachep) - in dbg_redzone1()
366 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) in dbg_redzone2() argument
368 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); in dbg_redzone2()
369 if (cachep->flags & SLAB_STORE_USER) in dbg_redzone2()
370 return (unsigned long long *)(objp + cachep->size - in dbg_redzone2()
373 return (unsigned long long *) (objp + cachep->size - in dbg_redzone2()
377 static void **dbg_userword(struct kmem_cache *cachep, void *objp) in dbg_userword() argument
379 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); in dbg_userword()
380 return (void **)(objp + cachep->size - BYTES_PER_WORD); in dbg_userword()
386 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) argument
387 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) argument
388 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) argument
401 struct kmem_cache *cachep = page->slab_cache; in set_obj_status() local
403 freelist_size = cachep->num * sizeof(freelist_idx_t); in set_obj_status()
412 struct kmem_cache *cachep = page->slab_cache; in get_obj_status() local
414 freelist_size = cachep->num * sizeof(freelist_idx_t); in get_obj_status()
472 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) in cpu_cache_get() argument
474 return this_cpu_ptr(cachep->cpu_cache); in cpu_cache_get()
562 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) argument
564 static void __slab_error(const char *function, struct kmem_cache *cachep, in __slab_error() argument
568 function, cachep->name, msg); in __slab_error()
695 static void recheck_pfmemalloc_active(struct kmem_cache *cachep, in recheck_pfmemalloc_active() argument
698 struct kmem_cache_node *n = get_node(cachep, numa_mem_id()); in recheck_pfmemalloc_active()
723 static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, in __ac_get_obj() argument
753 n = get_node(cachep, numa_mem_id()); in __ac_get_obj()
758 recheck_pfmemalloc_active(cachep, ac); in __ac_get_obj()
770 static inline void *ac_get_obj(struct kmem_cache *cachep, in ac_get_obj() argument
776 objp = __ac_get_obj(cachep, ac, flags, force_refill); in ac_get_obj()
783 static noinline void *__ac_put_obj(struct kmem_cache *cachep, in __ac_put_obj() argument
796 static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, in ac_put_obj() argument
800 objp = __ac_put_obj(cachep, ac, objp); in ac_put_obj()
830 #define drain_alien_cache(cachep, alien) do { } while (0) argument
831 #define reap_alien(cachep, n) do { } while (0) argument
843 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) in cache_free_alien() argument
848 static inline void *alternate_node_alloc(struct kmem_cache *cachep, in alternate_node_alloc() argument
854 static inline void *____cache_alloc_node(struct kmem_cache *cachep, in ____cache_alloc_node() argument
919 static void __drain_alien_cache(struct kmem_cache *cachep, in __drain_alien_cache() argument
923 struct kmem_cache_node *n = get_node(cachep, node); in __drain_alien_cache()
935 free_block(cachep, ac->entry, ac->avail, node, list); in __drain_alien_cache()
944 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) in reap_alien() argument
957 __drain_alien_cache(cachep, ac, node, &list); in reap_alien()
959 slabs_destroy(cachep, &list); in reap_alien()
965 static void drain_alien_cache(struct kmem_cache *cachep, in drain_alien_cache() argument
980 __drain_alien_cache(cachep, ac, i, &list); in drain_alien_cache()
982 slabs_destroy(cachep, &list); in drain_alien_cache()
987 static int __cache_free_alien(struct kmem_cache *cachep, void *objp, in __cache_free_alien() argument
995 n = get_node(cachep, node); in __cache_free_alien()
996 STATS_INC_NODEFREES(cachep); in __cache_free_alien()
1002 STATS_INC_ACOVERFLOW(cachep); in __cache_free_alien()
1003 __drain_alien_cache(cachep, ac, page_node, &list); in __cache_free_alien()
1005 ac_put_obj(cachep, ac, objp); in __cache_free_alien()
1007 slabs_destroy(cachep, &list); in __cache_free_alien()
1009 n = get_node(cachep, page_node); in __cache_free_alien()
1011 free_block(cachep, &objp, 1, page_node, &list); in __cache_free_alien()
1013 slabs_destroy(cachep, &list); in __cache_free_alien()
1018 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) in cache_free_alien() argument
1029 return __cache_free_alien(cachep, objp, node, page_node); in cache_free_alien()
1053 struct kmem_cache *cachep; in init_cache_node_node() local
1057 list_for_each_entry(cachep, &slab_caches, list) { in init_cache_node_node()
1063 n = get_node(cachep, node); in init_cache_node_node()
1070 ((unsigned long)cachep) % REAPTIMEOUT_NODE; in init_cache_node_node()
1077 cachep->node[node] = n; in init_cache_node_node()
1083 cachep->batchcount + cachep->num; in init_cache_node_node()
1089 static inline int slabs_tofree(struct kmem_cache *cachep, in slabs_tofree() argument
1092 return (n->free_objects + cachep->num - 1) / cachep->num; in slabs_tofree()
1097 struct kmem_cache *cachep; in cpuup_canceled() local
1102 list_for_each_entry(cachep, &slab_caches, list) { in cpuup_canceled()
1108 n = get_node(cachep, node); in cpuup_canceled()
1115 n->free_limit -= cachep->batchcount; in cpuup_canceled()
1118 nc = per_cpu_ptr(cachep->cpu_cache, cpu); in cpuup_canceled()
1120 free_block(cachep, nc->entry, nc->avail, node, &list); in cpuup_canceled()
1131 free_block(cachep, shared->entry, in cpuup_canceled()
1143 drain_alien_cache(cachep, alien); in cpuup_canceled()
1148 slabs_destroy(cachep, &list); in cpuup_canceled()
1155 list_for_each_entry(cachep, &slab_caches, list) { in cpuup_canceled()
1156 n = get_node(cachep, node); in cpuup_canceled()
1159 drain_freelist(cachep, n, slabs_tofree(cachep, n)); in cpuup_canceled()
1165 struct kmem_cache *cachep; in cpuup_prepare() local
1184 list_for_each_entry(cachep, &slab_caches, list) { in cpuup_prepare()
1188 if (cachep->shared) { in cpuup_prepare()
1190 cachep->shared * cachep->batchcount, in cpuup_prepare()
1196 alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL); in cpuup_prepare()
1202 n = get_node(cachep, node); in cpuup_prepare()
1301 struct kmem_cache *cachep; in drain_cache_node_node() local
1304 list_for_each_entry(cachep, &slab_caches, list) { in drain_cache_node_node()
1307 n = get_node(cachep, node); in drain_cache_node_node()
1311 drain_freelist(cachep, n, slabs_tofree(cachep, n)); in drain_cache_node_node()
1358 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, in init_list() argument
1372 MAKE_ALL_LISTS(cachep, ptr, nodeid); in init_list()
1373 cachep->node[nodeid] = ptr; in init_list()
1380 static void __init set_up_node(struct kmem_cache *cachep, int index) in set_up_node() argument
1385 cachep->node[node] = &init_kmem_cache_node[index + node]; in set_up_node()
1386 cachep->node[node]->next_reap = jiffies + in set_up_node()
1388 ((unsigned long)cachep) % REAPTIMEOUT_NODE; in set_up_node()
1477 struct kmem_cache *cachep; in kmem_cache_init_late() local
1483 list_for_each_entry(cachep, &slab_caches, list) in kmem_cache_init_late()
1484 if (enable_cpucache(cachep, GFP_NOWAIT)) in kmem_cache_init_late()
1528 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) in slab_out_of_memory() argument
1545 cachep->name, cachep->size, cachep->gfporder); in slab_out_of_memory()
1547 for_each_kmem_cache_node(cachep, node, n) { in slab_out_of_memory()
1553 active_objs += cachep->num; in slab_out_of_memory()
1567 num_objs = num_slabs * cachep->num; in slab_out_of_memory()
1584 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, in kmem_getpages() argument
1590 flags |= cachep->allocflags; in kmem_getpages()
1591 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) in kmem_getpages()
1594 if (memcg_charge_slab(cachep, flags, cachep->gfporder)) in kmem_getpages()
1597 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); in kmem_getpages()
1599 memcg_uncharge_slab(cachep, cachep->gfporder); in kmem_getpages()
1600 slab_out_of_memory(cachep, flags, nodeid); in kmem_getpages()
1608 nr_pages = (1 << cachep->gfporder); in kmem_getpages()
1609 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) in kmem_getpages()
1619 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { in kmem_getpages()
1620 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); in kmem_getpages()
1622 if (cachep->ctor) in kmem_getpages()
1634 static void kmem_freepages(struct kmem_cache *cachep, struct page *page) in kmem_freepages() argument
1636 const unsigned long nr_freed = (1 << cachep->gfporder); in kmem_freepages()
1638 kmemcheck_free_shadow(page, cachep->gfporder); in kmem_freepages()
1640 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) in kmem_freepages()
1655 __free_pages(page, cachep->gfporder); in kmem_freepages()
1656 memcg_uncharge_slab(cachep, cachep->gfporder); in kmem_freepages()
1661 struct kmem_cache *cachep; in kmem_rcu_free() local
1665 cachep = page->slab_cache; in kmem_rcu_free()
1667 kmem_freepages(cachep, page); in kmem_rcu_free()
1673 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, in store_stackinfo() argument
1676 int size = cachep->object_size; in store_stackinfo()
1678 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; in store_stackinfo()
1706 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) in poison_obj() argument
1708 int size = cachep->object_size; in poison_obj()
1709 addr = &((char *)addr)[obj_offset(cachep)]; in poison_obj()
1749 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) in print_objinfo() argument
1754 if (cachep->flags & SLAB_RED_ZONE) { in print_objinfo()
1756 *dbg_redzone1(cachep, objp), in print_objinfo()
1757 *dbg_redzone2(cachep, objp)); in print_objinfo()
1760 if (cachep->flags & SLAB_STORE_USER) { in print_objinfo()
1762 *dbg_userword(cachep, objp), in print_objinfo()
1763 *dbg_userword(cachep, objp)); in print_objinfo()
1765 realobj = (char *)objp + obj_offset(cachep); in print_objinfo()
1766 size = cachep->object_size; in print_objinfo()
1776 static void check_poison_obj(struct kmem_cache *cachep, void *objp) in check_poison_obj() argument
1782 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1783 size = cachep->object_size; in check_poison_obj()
1796 print_tainted(), cachep->name, realobj, size); in check_poison_obj()
1797 print_objinfo(cachep, objp, 0); in check_poison_obj()
1819 objnr = obj_to_index(cachep, page, objp); in check_poison_obj()
1821 objp = index_to_obj(cachep, page, objnr - 1); in check_poison_obj()
1822 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1825 print_objinfo(cachep, objp, 2); in check_poison_obj()
1827 if (objnr + 1 < cachep->num) { in check_poison_obj()
1828 objp = index_to_obj(cachep, page, objnr + 1); in check_poison_obj()
1829 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1832 print_objinfo(cachep, objp, 2); in check_poison_obj()
1839 static void slab_destroy_debugcheck(struct kmem_cache *cachep, in slab_destroy_debugcheck() argument
1843 for (i = 0; i < cachep->num; i++) { in slab_destroy_debugcheck()
1844 void *objp = index_to_obj(cachep, page, i); in slab_destroy_debugcheck()
1846 if (cachep->flags & SLAB_POISON) { in slab_destroy_debugcheck()
1848 if (cachep->size % PAGE_SIZE == 0 && in slab_destroy_debugcheck()
1849 OFF_SLAB(cachep)) in slab_destroy_debugcheck()
1851 cachep->size / PAGE_SIZE, 1); in slab_destroy_debugcheck()
1853 check_poison_obj(cachep, objp); in slab_destroy_debugcheck()
1855 check_poison_obj(cachep, objp); in slab_destroy_debugcheck()
1858 if (cachep->flags & SLAB_RED_ZONE) { in slab_destroy_debugcheck()
1859 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) in slab_destroy_debugcheck()
1860 slab_error(cachep, "start of a freed object " in slab_destroy_debugcheck()
1862 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) in slab_destroy_debugcheck()
1863 slab_error(cachep, "end of a freed object " in slab_destroy_debugcheck()
1869 static void slab_destroy_debugcheck(struct kmem_cache *cachep, in slab_destroy_debugcheck() argument
1884 static void slab_destroy(struct kmem_cache *cachep, struct page *page) in slab_destroy() argument
1889 slab_destroy_debugcheck(cachep, page); in slab_destroy()
1890 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { in slab_destroy()
1903 kmem_freepages(cachep, page); in slab_destroy()
1910 if (OFF_SLAB(cachep)) in slab_destroy()
1911 kmem_cache_free(cachep->freelist_cache, freelist); in slab_destroy()
1914 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) in slabs_destroy() argument
1920 slab_destroy(cachep, page); in slabs_destroy()
1937 static size_t calculate_slab_order(struct kmem_cache *cachep, in calculate_slab_order() argument
1973 cachep->num = num; in calculate_slab_order()
1974 cachep->gfporder = gfporder; in calculate_slab_order()
2002 struct kmem_cache *cachep, int entries, int batchcount) in alloc_kmem_cache_cpus() argument
2022 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) in setup_cpu_cache() argument
2025 return enable_cpucache(cachep, gfp); in setup_cpu_cache()
2027 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); in setup_cpu_cache()
2028 if (!cachep->cpu_cache) in setup_cpu_cache()
2036 set_up_node(cachep, SIZE_NODE); in setup_cpu_cache()
2041 cachep->node[node] = kmalloc_node( in setup_cpu_cache()
2043 BUG_ON(!cachep->node[node]); in setup_cpu_cache()
2044 kmem_cache_node_init(cachep->node[node]); in setup_cpu_cache()
2048 cachep->node[numa_mem_id()]->next_reap = in setup_cpu_cache()
2050 ((unsigned long)cachep) % REAPTIMEOUT_NODE; in setup_cpu_cache()
2052 cpu_cache_get(cachep)->avail = 0; in setup_cpu_cache()
2053 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; in setup_cpu_cache()
2054 cpu_cache_get(cachep)->batchcount = 1; in setup_cpu_cache()
2055 cpu_cache_get(cachep)->touched = 0; in setup_cpu_cache()
2056 cachep->batchcount = 1; in setup_cpu_cache()
2057 cachep->limit = BOOT_CPUCACHE_ENTRIES; in setup_cpu_cache()
2072 struct kmem_cache *cachep; in __kmem_cache_alias() local
2074 cachep = find_mergeable(size, align, flags, name, ctor); in __kmem_cache_alias()
2075 if (cachep) { in __kmem_cache_alias()
2076 cachep->refcount++; in __kmem_cache_alias()
2082 cachep->object_size = max_t(int, cachep->object_size, size); in __kmem_cache_alias()
2084 return cachep; in __kmem_cache_alias()
2109 __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) in __kmem_cache_create() argument
2115 size_t size = cachep->size; in __kmem_cache_create()
2154 if (ralign < cachep->align) { in __kmem_cache_create()
2155 ralign = cachep->align; in __kmem_cache_create()
2163 cachep->align = ralign; in __kmem_cache_create()
2178 cachep->obj_offset += sizeof(unsigned long long); in __kmem_cache_create()
2200 size >= 256 && cachep->object_size > cache_line_size() && in __kmem_cache_create()
2201 ALIGN(size, cachep->align) < PAGE_SIZE) { in __kmem_cache_create()
2202 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); in __kmem_cache_create()
2222 size = ALIGN(size, cachep->align); in __kmem_cache_create()
2228 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); in __kmem_cache_create()
2230 left_over = calculate_slab_order(cachep, size, cachep->align, flags); in __kmem_cache_create()
2232 if (!cachep->num) in __kmem_cache_create()
2235 freelist_size = calculate_freelist_size(cachep->num, cachep->align); in __kmem_cache_create()
2248 freelist_size = calculate_freelist_size(cachep->num, 0); in __kmem_cache_create()
2260 cachep->colour_off = cache_line_size(); in __kmem_cache_create()
2262 if (cachep->colour_off < cachep->align) in __kmem_cache_create()
2263 cachep->colour_off = cachep->align; in __kmem_cache_create()
2264 cachep->colour = left_over / cachep->colour_off; in __kmem_cache_create()
2265 cachep->freelist_size = freelist_size; in __kmem_cache_create()
2266 cachep->flags = flags; in __kmem_cache_create()
2267 cachep->allocflags = __GFP_COMP; in __kmem_cache_create()
2269 cachep->allocflags |= GFP_DMA; in __kmem_cache_create()
2270 cachep->size = size; in __kmem_cache_create()
2271 cachep->reciprocal_buffer_size = reciprocal_value(size); in __kmem_cache_create()
2274 cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); in __kmem_cache_create()
2282 BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache)); in __kmem_cache_create()
2285 err = setup_cpu_cache(cachep, gfp); in __kmem_cache_create()
2287 __kmem_cache_shutdown(cachep); in __kmem_cache_create()
2305 static void check_spinlock_acquired(struct kmem_cache *cachep) in check_spinlock_acquired() argument
2309 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); in check_spinlock_acquired()
2313 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) in check_spinlock_acquired_node() argument
2317 assert_spin_locked(&get_node(cachep, node)->list_lock); in check_spinlock_acquired_node()
2328 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
2334 struct kmem_cache *cachep = arg; in do_drain() local
2341 ac = cpu_cache_get(cachep); in do_drain()
2342 n = get_node(cachep, node); in do_drain()
2344 free_block(cachep, ac->entry, ac->avail, node, &list); in do_drain()
2346 slabs_destroy(cachep, &list); in do_drain()
2350 static void drain_cpu_caches(struct kmem_cache *cachep) in drain_cpu_caches() argument
2355 on_each_cpu(do_drain, cachep, 1); in drain_cpu_caches()
2357 for_each_kmem_cache_node(cachep, node, n) in drain_cpu_caches()
2359 drain_alien_cache(cachep, n->alien); in drain_cpu_caches()
2361 for_each_kmem_cache_node(cachep, node, n) in drain_cpu_caches()
2362 drain_array(cachep, n, n->shared, 1, node); in drain_cpu_caches()
2406 int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) in __kmem_cache_shrink() argument
2412 drain_cpu_caches(cachep); in __kmem_cache_shrink()
2415 for_each_kmem_cache_node(cachep, node, n) { in __kmem_cache_shrink()
2416 drain_freelist(cachep, n, slabs_tofree(cachep, n)); in __kmem_cache_shrink()
2424 int __kmem_cache_shutdown(struct kmem_cache *cachep) in __kmem_cache_shutdown() argument
2428 int rc = __kmem_cache_shrink(cachep, false); in __kmem_cache_shutdown()
2433 free_percpu(cachep->cpu_cache); in __kmem_cache_shutdown()
2436 for_each_kmem_cache_node(cachep, i, n) { in __kmem_cache_shutdown()
2440 cachep->node[i] = NULL; in __kmem_cache_shutdown()
2459 static void *alloc_slabmgmt(struct kmem_cache *cachep, in alloc_slabmgmt() argument
2466 if (OFF_SLAB(cachep)) { in alloc_slabmgmt()
2468 freelist = kmem_cache_alloc_node(cachep->freelist_cache, in alloc_slabmgmt()
2474 colour_off += cachep->freelist_size; in alloc_slabmgmt()
2492 static void cache_init_objs(struct kmem_cache *cachep, in cache_init_objs() argument
2497 for (i = 0; i < cachep->num; i++) { in cache_init_objs()
2498 void *objp = index_to_obj(cachep, page, i); in cache_init_objs()
2501 if (cachep->flags & SLAB_POISON) in cache_init_objs()
2502 poison_obj(cachep, objp, POISON_FREE); in cache_init_objs()
2503 if (cachep->flags & SLAB_STORE_USER) in cache_init_objs()
2504 *dbg_userword(cachep, objp) = NULL; in cache_init_objs()
2506 if (cachep->flags & SLAB_RED_ZONE) { in cache_init_objs()
2507 *dbg_redzone1(cachep, objp) = RED_INACTIVE; in cache_init_objs()
2508 *dbg_redzone2(cachep, objp) = RED_INACTIVE; in cache_init_objs()
2515 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) in cache_init_objs()
2516 cachep->ctor(objp + obj_offset(cachep)); in cache_init_objs()
2518 if (cachep->flags & SLAB_RED_ZONE) { in cache_init_objs()
2519 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) in cache_init_objs()
2520 slab_error(cachep, "constructor overwrote the" in cache_init_objs()
2522 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) in cache_init_objs()
2523 slab_error(cachep, "constructor overwrote the" in cache_init_objs()
2526 if ((cachep->size % PAGE_SIZE) == 0 && in cache_init_objs()
2527 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) in cache_init_objs()
2529 cachep->size / PAGE_SIZE, 0); in cache_init_objs()
2531 if (cachep->ctor) in cache_init_objs()
2532 cachep->ctor(objp); in cache_init_objs()
2539 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) in kmem_flagcheck() argument
2543 BUG_ON(!(cachep->allocflags & GFP_DMA)); in kmem_flagcheck()
2545 BUG_ON(cachep->allocflags & GFP_DMA); in kmem_flagcheck()
2549 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page, in slab_get_obj() argument
2554 objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); in slab_get_obj()
2563 static void slab_put_obj(struct kmem_cache *cachep, struct page *page, in slab_put_obj() argument
2566 unsigned int objnr = obj_to_index(cachep, page, objp); in slab_put_obj()
2574 for (i = page->active; i < cachep->num; i++) { in slab_put_obj()
2577 "'%s', objp %p\n", cachep->name, objp); in slab_put_obj()
2602 static int cache_grow(struct kmem_cache *cachep, in cache_grow() argument
2622 n = get_node(cachep, nodeid); in cache_grow()
2628 if (n->colour_next >= cachep->colour) in cache_grow()
2632 offset *= cachep->colour_off; in cache_grow()
2643 kmem_flagcheck(cachep, flags); in cache_grow()
2650 page = kmem_getpages(cachep, local_flags, nodeid); in cache_grow()
2655 freelist = alloc_slabmgmt(cachep, page, offset, in cache_grow()
2660 slab_map_pages(cachep, page, freelist); in cache_grow()
2662 cache_init_objs(cachep, page); in cache_grow()
2671 STATS_INC_GROWN(cachep); in cache_grow()
2672 n->free_objects += cachep->num; in cache_grow()
2676 kmem_freepages(cachep, page); in cache_grow()
2721 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, in cache_free_debugcheck() argument
2727 BUG_ON(virt_to_cache(objp) != cachep); in cache_free_debugcheck()
2729 objp -= obj_offset(cachep); in cache_free_debugcheck()
2733 if (cachep->flags & SLAB_RED_ZONE) { in cache_free_debugcheck()
2734 verify_redzone_free(cachep, objp); in cache_free_debugcheck()
2735 *dbg_redzone1(cachep, objp) = RED_INACTIVE; in cache_free_debugcheck()
2736 *dbg_redzone2(cachep, objp) = RED_INACTIVE; in cache_free_debugcheck()
2738 if (cachep->flags & SLAB_STORE_USER) in cache_free_debugcheck()
2739 *dbg_userword(cachep, objp) = (void *)caller; in cache_free_debugcheck()
2741 objnr = obj_to_index(cachep, page, objp); in cache_free_debugcheck()
2743 BUG_ON(objnr >= cachep->num); in cache_free_debugcheck()
2744 BUG_ON(objp != index_to_obj(cachep, page, objnr)); in cache_free_debugcheck()
2747 if (cachep->flags & SLAB_POISON) { in cache_free_debugcheck()
2749 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { in cache_free_debugcheck()
2750 store_stackinfo(cachep, objp, caller); in cache_free_debugcheck()
2752 cachep->size / PAGE_SIZE, 0); in cache_free_debugcheck()
2754 poison_obj(cachep, objp, POISON_FREE); in cache_free_debugcheck()
2757 poison_obj(cachep, objp, POISON_FREE); in cache_free_debugcheck()
2768 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, in cache_alloc_refill() argument
2781 ac = cpu_cache_get(cachep); in cache_alloc_refill()
2791 n = get_node(cachep, node); in cache_alloc_refill()
2815 check_spinlock_acquired(cachep); in cache_alloc_refill()
2822 BUG_ON(page->active >= cachep->num); in cache_alloc_refill()
2824 while (page->active < cachep->num && batchcount--) { in cache_alloc_refill()
2825 STATS_INC_ALLOCED(cachep); in cache_alloc_refill()
2826 STATS_INC_ACTIVE(cachep); in cache_alloc_refill()
2827 STATS_SET_HIGH(cachep); in cache_alloc_refill()
2829 ac_put_obj(cachep, ac, slab_get_obj(cachep, page, in cache_alloc_refill()
2835 if (page->active == cachep->num) in cache_alloc_refill()
2849 x = cache_grow(cachep, gfp_exact_node(flags), node, NULL); in cache_alloc_refill()
2852 ac = cpu_cache_get(cachep); in cache_alloc_refill()
2864 return ac_get_obj(cachep, ac, flags, force_refill); in cache_alloc_refill()
2867 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, in cache_alloc_debugcheck_before() argument
2872 kmem_flagcheck(cachep, flags); in cache_alloc_debugcheck_before()
2877 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, in cache_alloc_debugcheck_after() argument
2884 if (cachep->flags & SLAB_POISON) { in cache_alloc_debugcheck_after()
2886 if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) in cache_alloc_debugcheck_after()
2888 cachep->size / PAGE_SIZE, 1); in cache_alloc_debugcheck_after()
2890 check_poison_obj(cachep, objp); in cache_alloc_debugcheck_after()
2892 check_poison_obj(cachep, objp); in cache_alloc_debugcheck_after()
2894 poison_obj(cachep, objp, POISON_INUSE); in cache_alloc_debugcheck_after()
2896 if (cachep->flags & SLAB_STORE_USER) in cache_alloc_debugcheck_after()
2897 *dbg_userword(cachep, objp) = (void *)caller; in cache_alloc_debugcheck_after()
2899 if (cachep->flags & SLAB_RED_ZONE) { in cache_alloc_debugcheck_after()
2900 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || in cache_alloc_debugcheck_after()
2901 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { in cache_alloc_debugcheck_after()
2902 slab_error(cachep, "double free, or memory outside" in cache_alloc_debugcheck_after()
2906 objp, *dbg_redzone1(cachep, objp), in cache_alloc_debugcheck_after()
2907 *dbg_redzone2(cachep, objp)); in cache_alloc_debugcheck_after()
2909 *dbg_redzone1(cachep, objp) = RED_ACTIVE; in cache_alloc_debugcheck_after()
2910 *dbg_redzone2(cachep, objp) = RED_ACTIVE; in cache_alloc_debugcheck_after()
2914 set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE); in cache_alloc_debugcheck_after()
2915 objp += obj_offset(cachep); in cache_alloc_debugcheck_after()
2916 if (cachep->ctor && cachep->flags & SLAB_POISON) in cache_alloc_debugcheck_after()
2917 cachep->ctor(objp); in cache_alloc_debugcheck_after()
2929 static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) in slab_should_failslab() argument
2931 if (unlikely(cachep == kmem_cache)) in slab_should_failslab()
2934 return should_failslab(cachep->object_size, flags, cachep->flags); in slab_should_failslab()
2937 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) in ____cache_alloc() argument
2945 ac = cpu_cache_get(cachep); in ____cache_alloc()
2948 objp = ac_get_obj(cachep, ac, flags, false); in ____cache_alloc()
2955 STATS_INC_ALLOCHIT(cachep); in ____cache_alloc()
2961 STATS_INC_ALLOCMISS(cachep); in ____cache_alloc()
2962 objp = cache_alloc_refill(cachep, flags, force_refill); in ____cache_alloc()
2967 ac = cpu_cache_get(cachep); in ____cache_alloc()
2987 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) in alternate_node_alloc() argument
2994 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) in alternate_node_alloc()
2999 return ____cache_alloc_node(cachep, flags, nid_alloc); in alternate_node_alloc()
3094 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, in ____cache_alloc_node() argument
3104 n = get_node(cachep, nodeid); in ____cache_alloc_node()
3119 check_spinlock_acquired_node(cachep, nodeid); in ____cache_alloc_node()
3121 STATS_INC_NODEALLOCS(cachep); in ____cache_alloc_node()
3122 STATS_INC_ACTIVE(cachep); in ____cache_alloc_node()
3123 STATS_SET_HIGH(cachep); in ____cache_alloc_node()
3125 BUG_ON(page->active == cachep->num); in ____cache_alloc_node()
3127 obj = slab_get_obj(cachep, page, nodeid); in ____cache_alloc_node()
3132 if (page->active == cachep->num) in ____cache_alloc_node()
3142 x = cache_grow(cachep, gfp_exact_node(flags), nodeid, NULL); in ____cache_alloc_node()
3146 return fallback_alloc(cachep, flags); in ____cache_alloc_node()
3153 slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, in slab_alloc_node() argument
3164 if (slab_should_failslab(cachep, flags)) in slab_alloc_node()
3167 cachep = memcg_kmem_get_cache(cachep, flags); in slab_alloc_node()
3169 cache_alloc_debugcheck_before(cachep, flags); in slab_alloc_node()
3175 if (unlikely(!get_node(cachep, nodeid))) { in slab_alloc_node()
3177 ptr = fallback_alloc(cachep, flags); in slab_alloc_node()
3188 ptr = ____cache_alloc(cachep, flags); in slab_alloc_node()
3193 ptr = ____cache_alloc_node(cachep, flags, nodeid); in slab_alloc_node()
3196 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); in slab_alloc_node()
3197 kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags, in slab_alloc_node()
3201 kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size); in slab_alloc_node()
3203 memset(ptr, 0, cachep->object_size); in slab_alloc_node()
3206 memcg_kmem_put_cache(cachep); in slab_alloc_node()
3235 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) in __do_cache_alloc() argument
3237 return ____cache_alloc(cachep, flags); in __do_cache_alloc()
3243 slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) in slab_alloc() argument
3252 if (slab_should_failslab(cachep, flags)) in slab_alloc()
3255 cachep = memcg_kmem_get_cache(cachep, flags); in slab_alloc()
3257 cache_alloc_debugcheck_before(cachep, flags); in slab_alloc()
3259 objp = __do_cache_alloc(cachep, flags); in slab_alloc()
3261 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); in slab_alloc()
3262 kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags, in slab_alloc()
3267 kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size); in slab_alloc()
3269 memset(objp, 0, cachep->object_size); in slab_alloc()
3272 memcg_kmem_put_cache(cachep); in slab_alloc()
3280 static void free_block(struct kmem_cache *cachep, void **objpp, in free_block() argument
3284 struct kmem_cache_node *n = get_node(cachep, node); in free_block()
3295 check_spinlock_acquired_node(cachep, node); in free_block()
3296 slab_put_obj(cachep, page, objp, node); in free_block()
3297 STATS_DEC_ACTIVE(cachep); in free_block()
3303 n->free_objects -= cachep->num; in free_block()
3318 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) in cache_flusharray() argument
3330 n = get_node(cachep, node); in cache_flusharray()
3345 free_block(cachep, ac->entry, batchcount, node, &list); in cache_flusharray()
3362 STATS_SET_FREEABLE(cachep, i); in cache_flusharray()
3366 slabs_destroy(cachep, &list); in cache_flusharray()
3375 static inline void __cache_free(struct kmem_cache *cachep, void *objp, in __cache_free() argument
3378 struct array_cache *ac = cpu_cache_get(cachep); in __cache_free()
3381 kmemleak_free_recursive(objp, cachep->flags); in __cache_free()
3382 objp = cache_free_debugcheck(cachep, objp, caller); in __cache_free()
3384 kmemcheck_slab_free(cachep, objp, cachep->object_size); in __cache_free()
3393 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) in __cache_free()
3397 STATS_INC_FREEHIT(cachep); in __cache_free()
3399 STATS_INC_FREEMISS(cachep); in __cache_free()
3400 cache_flusharray(cachep, ac); in __cache_free()
3403 ac_put_obj(cachep, ac, objp); in __cache_free()
3414 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) in kmem_cache_alloc() argument
3416 void *ret = slab_alloc(cachep, flags, _RET_IP_); in kmem_cache_alloc()
3419 cachep->object_size, cachep->size, flags); in kmem_cache_alloc()
3427 kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) in kmem_cache_alloc_trace() argument
3431 ret = slab_alloc(cachep, flags, _RET_IP_); in kmem_cache_alloc_trace()
3434 size, cachep->size, flags); in kmem_cache_alloc_trace()
3452 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) in kmem_cache_alloc_node() argument
3454 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); in kmem_cache_alloc_node()
3457 cachep->object_size, cachep->size, in kmem_cache_alloc_node()
3465 void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, in kmem_cache_alloc_node_trace() argument
3472 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); in kmem_cache_alloc_node_trace()
3475 size, cachep->size, in kmem_cache_alloc_node_trace()
3485 struct kmem_cache *cachep; in __do_kmalloc_node() local
3487 cachep = kmalloc_slab(size, flags); in __do_kmalloc_node()
3488 if (unlikely(ZERO_OR_NULL_PTR(cachep))) in __do_kmalloc_node()
3489 return cachep; in __do_kmalloc_node()
3490 return kmem_cache_alloc_node_trace(cachep, flags, node, size); in __do_kmalloc_node()
3516 struct kmem_cache *cachep; in __do_kmalloc() local
3519 cachep = kmalloc_slab(size, flags); in __do_kmalloc()
3520 if (unlikely(ZERO_OR_NULL_PTR(cachep))) in __do_kmalloc()
3521 return cachep; in __do_kmalloc()
3522 ret = slab_alloc(cachep, flags, caller); in __do_kmalloc()
3525 size, cachep->size, flags); in __do_kmalloc()
3550 void kmem_cache_free(struct kmem_cache *cachep, void *objp) in kmem_cache_free() argument
3553 cachep = cache_from_obj(cachep, objp); in kmem_cache_free()
3554 if (!cachep) in kmem_cache_free()
3558 debug_check_no_locks_freed(objp, cachep->object_size); in kmem_cache_free()
3559 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) in kmem_cache_free()
3560 debug_check_no_obj_freed(objp, cachep->object_size); in kmem_cache_free()
3561 __cache_free(cachep, objp, _RET_IP_); in kmem_cache_free()
3600 static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp) in alloc_kmem_cache_node() argument
3610 new_alien = alloc_alien_cache(node, cachep->limit, gfp); in alloc_kmem_cache_node()
3616 if (cachep->shared) { in alloc_kmem_cache_node()
3618 cachep->shared*cachep->batchcount, in alloc_kmem_cache_node()
3626 n = get_node(cachep, node); in alloc_kmem_cache_node()
3634 free_block(cachep, shared->entry, in alloc_kmem_cache_node()
3643 cachep->batchcount + cachep->num; in alloc_kmem_cache_node()
3645 slabs_destroy(cachep, &list); in alloc_kmem_cache_node()
3659 ((unsigned long)cachep) % REAPTIMEOUT_NODE; in alloc_kmem_cache_node()
3663 cachep->batchcount + cachep->num; in alloc_kmem_cache_node()
3664 cachep->node[node] = n; in alloc_kmem_cache_node()
3669 if (!cachep->list.next) { in alloc_kmem_cache_node()
3673 n = get_node(cachep, node); in alloc_kmem_cache_node()
3678 cachep->node[node] = NULL; in alloc_kmem_cache_node()
3687 static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, in __do_tune_cpucache() argument
3693 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount); in __do_tune_cpucache()
3697 prev = cachep->cpu_cache; in __do_tune_cpucache()
3698 cachep->cpu_cache = cpu_cache; in __do_tune_cpucache()
3702 cachep->batchcount = batchcount; in __do_tune_cpucache()
3703 cachep->limit = limit; in __do_tune_cpucache()
3704 cachep->shared = shared; in __do_tune_cpucache()
3716 n = get_node(cachep, node); in __do_tune_cpucache()
3718 free_block(cachep, ac->entry, ac->avail, node, &list); in __do_tune_cpucache()
3720 slabs_destroy(cachep, &list); in __do_tune_cpucache()
3725 return alloc_kmem_cache_node(cachep, gfp); in __do_tune_cpucache()
3728 static int do_tune_cpucache(struct kmem_cache *cachep, int limit, in do_tune_cpucache() argument
3734 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); in do_tune_cpucache()
3739 if ((ret < 0) || !is_root_cache(cachep)) in do_tune_cpucache()
3743 for_each_memcg_cache(c, cachep) { in do_tune_cpucache()
3752 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) in enable_cpucache() argument
3759 if (!is_root_cache(cachep)) { in enable_cpucache()
3760 struct kmem_cache *root = memcg_root_cache(cachep); in enable_cpucache()
3777 if (cachep->size > 131072) in enable_cpucache()
3779 else if (cachep->size > PAGE_SIZE) in enable_cpucache()
3781 else if (cachep->size > 1024) in enable_cpucache()
3783 else if (cachep->size > 256) in enable_cpucache()
3798 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) in enable_cpucache()
3811 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); in enable_cpucache()
3814 cachep->name, -err); in enable_cpucache()
3823 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, in drain_array() argument
3839 free_block(cachep, ac->entry, tofree, node, &list); in drain_array()
3845 slabs_destroy(cachep, &list); in drain_array()
3918 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) in get_slabinfo() argument
3932 for_each_kmem_cache_node(cachep, node, n) { in get_slabinfo()
3938 if (page->active != cachep->num && !error) in get_slabinfo()
3940 active_objs += cachep->num; in get_slabinfo()
3944 if (page->active == cachep->num && !error) in get_slabinfo()
3963 num_objs = num_slabs * cachep->num; in get_slabinfo()
3967 name = cachep->name; in get_slabinfo()
3976 sinfo->limit = cachep->limit; in get_slabinfo()
3977 sinfo->batchcount = cachep->batchcount; in get_slabinfo()
3978 sinfo->shared = cachep->shared; in get_slabinfo()
3979 sinfo->objects_per_slab = cachep->num; in get_slabinfo()
3980 sinfo->cache_order = cachep->gfporder; in get_slabinfo()
3983 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) in slabinfo_show_stats() argument
3987 unsigned long high = cachep->high_mark; in slabinfo_show_stats()
3988 unsigned long allocs = cachep->num_allocations; in slabinfo_show_stats()
3989 unsigned long grown = cachep->grown; in slabinfo_show_stats()
3990 unsigned long reaped = cachep->reaped; in slabinfo_show_stats()
3991 unsigned long errors = cachep->errors; in slabinfo_show_stats()
3992 unsigned long max_freeable = cachep->max_freeable; in slabinfo_show_stats()
3993 unsigned long node_allocs = cachep->node_allocs; in slabinfo_show_stats()
3994 unsigned long node_frees = cachep->node_frees; in slabinfo_show_stats()
3995 unsigned long overflows = cachep->node_overflow; in slabinfo_show_stats()
4005 unsigned long allochit = atomic_read(&cachep->allochit); in slabinfo_show_stats()
4006 unsigned long allocmiss = atomic_read(&cachep->allocmiss); in slabinfo_show_stats()
4007 unsigned long freehit = atomic_read(&cachep->freehit); in slabinfo_show_stats()
4008 unsigned long freemiss = atomic_read(&cachep->freemiss); in slabinfo_show_stats()
4029 struct kmem_cache *cachep; in slabinfo_write() local
4048 list_for_each_entry(cachep, &slab_caches, list) { in slabinfo_write()
4049 if (!strcmp(cachep->name, kbuf)) { in slabinfo_write()
4054 res = do_tune_cpucache(cachep, limit, in slabinfo_write()
4134 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); in leaks_show() local
4142 if (!(cachep->flags & SLAB_STORE_USER)) in leaks_show()
4144 if (!(cachep->flags & SLAB_RED_ZONE)) in leaks_show()
4151 for_each_kmem_cache_node(cachep, node, n) { in leaks_show()
4157 handle_slab(x, cachep, page); in leaks_show()
4159 handle_slab(x, cachep, page); in leaks_show()
4162 name = cachep->name; in leaks_show()