Lines Matching refs:s

118 static inline int kmem_cache_debug(struct kmem_cache *s)  in kmem_cache_debug()  argument
121 return unlikely(s->flags & SLAB_DEBUG_FLAGS); in kmem_cache_debug()
127 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) in kmem_cache_has_cpu_partial() argument
130 return !kmem_cache_debug(s); in kmem_cache_has_cpu_partial()
204 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
206 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } in sysfs_slab_add() argument
207 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) in sysfs_slab_alias() argument
209 static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { } in memcg_propagate_slab_attrs() argument
212 static inline void stat(const struct kmem_cache *s, enum stat_item si) in stat() argument
219 raw_cpu_inc(s->cpu_slab->stat[si]); in stat()
228 static inline int check_valid_pointer(struct kmem_cache *s, in check_valid_pointer() argument
237 if (object < base || object >= base + page->objects * s->size || in check_valid_pointer()
238 (object - base) % s->size) { in check_valid_pointer()
245 static inline void *get_freepointer(struct kmem_cache *s, void *object) in get_freepointer() argument
247 return *(void **)(object + s->offset); in get_freepointer()
250 static void prefetch_freepointer(const struct kmem_cache *s, void *object) in prefetch_freepointer() argument
252 prefetch(object + s->offset); in prefetch_freepointer()
255 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) in get_freepointer_safe() argument
260 probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p)); in get_freepointer_safe()
262 p = get_freepointer(s, object); in get_freepointer_safe()
267 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) in set_freepointer() argument
269 *(void **)(object + s->offset) = fp; in set_freepointer()
282 static inline int slab_index(void *p, struct kmem_cache *s, void *addr) in slab_index() argument
284 return (p - addr) / s->size; in slab_index()
287 static inline size_t slab_ksize(const struct kmem_cache *s) in slab_ksize() argument
294 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) in slab_ksize()
295 return s->object_size; in slab_ksize()
303 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) in slab_ksize()
304 return s->inuse; in slab_ksize()
308 return s->size; in slab_ksize()
365 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in __cmpxchg_double_slab() argument
373 if (s->flags & __CMPXCHG_DOUBLE) { in __cmpxchg_double_slab()
393 stat(s, CMPXCHG_DOUBLE_FAIL); in __cmpxchg_double_slab()
396 pr_info("%s %s: cmpxchg double redo ", n, s->name); in __cmpxchg_double_slab()
402 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in cmpxchg_double_slab() argument
409 if (s->flags & __CMPXCHG_DOUBLE) { in cmpxchg_double_slab()
434 stat(s, CMPXCHG_DOUBLE_FAIL); in cmpxchg_double_slab()
437 pr_info("%s %s: cmpxchg double redo ", n, s->name); in cmpxchg_double_slab()
450 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) in get_map() argument
455 for (p = page->freelist; p; p = get_freepointer(s, p)) in get_map()
456 set_bit(slab_index(p, s, addr), map); in get_map()
498 static struct track *get_track(struct kmem_cache *s, void *object, in get_track() argument
503 if (s->offset) in get_track()
504 p = object + s->offset + sizeof(void *); in get_track()
506 p = object + s->inuse; in get_track()
511 static void set_track(struct kmem_cache *s, void *object, in set_track() argument
514 struct track *p = get_track(s, object, alloc); in set_track()
545 static void init_tracking(struct kmem_cache *s, void *object) in init_tracking() argument
547 if (!(s->flags & SLAB_STORE_USER)) in init_tracking()
550 set_track(s, object, TRACK_FREE, 0UL); in init_tracking()
551 set_track(s, object, TRACK_ALLOC, 0UL); in init_tracking()
554 static void print_track(const char *s, struct track *t) in print_track() argument
560 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); in print_track()
573 static void print_tracking(struct kmem_cache *s, void *object) in print_tracking() argument
575 if (!(s->flags & SLAB_STORE_USER)) in print_tracking()
578 print_track("Allocated", get_track(s, object, TRACK_ALLOC)); in print_tracking()
579 print_track("Freed", get_track(s, object, TRACK_FREE)); in print_tracking()
589 static void slab_bug(struct kmem_cache *s, char *fmt, ...) in slab_bug() argument
598 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); in slab_bug()
605 static void slab_fix(struct kmem_cache *s, char *fmt, ...) in slab_fix() argument
613 pr_err("FIX %s: %pV\n", s->name, &vaf); in slab_fix()
617 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) in print_trailer() argument
622 print_tracking(s, p); in print_trailer()
627 p, p - addr, get_freepointer(s, p)); in print_trailer()
632 print_section("Object ", p, min_t(unsigned long, s->object_size, in print_trailer()
634 if (s->flags & SLAB_RED_ZONE) in print_trailer()
635 print_section("Redzone ", p + s->object_size, in print_trailer()
636 s->inuse - s->object_size); in print_trailer()
638 if (s->offset) in print_trailer()
639 off = s->offset + sizeof(void *); in print_trailer()
641 off = s->inuse; in print_trailer()
643 if (s->flags & SLAB_STORE_USER) in print_trailer()
646 if (off != s->size) in print_trailer()
648 print_section("Padding ", p + off, s->size - off); in print_trailer()
653 void object_err(struct kmem_cache *s, struct page *page, in object_err() argument
656 slab_bug(s, "%s", reason); in object_err()
657 print_trailer(s, page, object); in object_err()
660 static void slab_err(struct kmem_cache *s, struct page *page, in slab_err() argument
669 slab_bug(s, "%s", buf); in slab_err()
674 static void init_object(struct kmem_cache *s, void *object, u8 val) in init_object() argument
678 if (s->flags & __OBJECT_POISON) { in init_object()
679 memset(p, POISON_FREE, s->object_size - 1); in init_object()
680 p[s->object_size - 1] = POISON_END; in init_object()
683 if (s->flags & SLAB_RED_ZONE) in init_object()
684 memset(p + s->object_size, val, s->inuse - s->object_size); in init_object()
687 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, in restore_bytes() argument
690 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); in restore_bytes()
694 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, in check_bytes_and_report() argument
711 slab_bug(s, "%s overwritten", what); in check_bytes_and_report()
714 print_trailer(s, page, object); in check_bytes_and_report()
716 restore_bytes(s, what, value, fault, end); in check_bytes_and_report()
758 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) in check_pad_bytes() argument
760 unsigned long off = s->inuse; /* The end of info */ in check_pad_bytes()
762 if (s->offset) in check_pad_bytes()
766 if (s->flags & SLAB_STORE_USER) in check_pad_bytes()
770 if (s->size == off) in check_pad_bytes()
773 return check_bytes_and_report(s, page, p, "Object padding", in check_pad_bytes()
774 p + off, POISON_INUSE, s->size - off); in check_pad_bytes()
778 static int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
786 if (!(s->flags & SLAB_POISON)) in slab_pad_check()
790 length = (PAGE_SIZE << compound_order(page)) - s->reserved; in slab_pad_check()
792 remainder = length % s->size; in slab_pad_check()
804 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); in slab_pad_check()
807 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); in slab_pad_check()
811 static int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
815 u8 *endobject = object + s->object_size; in check_object()
817 if (s->flags & SLAB_RED_ZONE) { in check_object()
818 if (!check_bytes_and_report(s, page, object, "Redzone", in check_object()
819 endobject, val, s->inuse - s->object_size)) in check_object()
822 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { in check_object()
823 check_bytes_and_report(s, page, p, "Alignment padding", in check_object()
825 s->inuse - s->object_size); in check_object()
829 if (s->flags & SLAB_POISON) { in check_object()
830 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && in check_object()
831 (!check_bytes_and_report(s, page, p, "Poison", p, in check_object()
832 POISON_FREE, s->object_size - 1) || in check_object()
833 !check_bytes_and_report(s, page, p, "Poison", in check_object()
834 p + s->object_size - 1, POISON_END, 1))) in check_object()
839 check_pad_bytes(s, page, p); in check_object()
842 if (!s->offset && val == SLUB_RED_ACTIVE) in check_object()
850 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { in check_object()
851 object_err(s, page, p, "Freepointer corrupt"); in check_object()
857 set_freepointer(s, p, NULL); in check_object()
863 static int check_slab(struct kmem_cache *s, struct page *page) in check_slab() argument
870 slab_err(s, page, "Not a valid slab page"); in check_slab()
874 maxobj = order_objects(compound_order(page), s->size, s->reserved); in check_slab()
876 slab_err(s, page, "objects %u > max %u", in check_slab()
881 slab_err(s, page, "inuse %u > max %u", in check_slab()
886 slab_pad_check(s, page); in check_slab()
894 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) in on_freelist() argument
905 if (!check_valid_pointer(s, page, fp)) { in on_freelist()
907 object_err(s, page, object, in on_freelist()
909 set_freepointer(s, object, NULL); in on_freelist()
911 slab_err(s, page, "Freepointer corrupt"); in on_freelist()
914 slab_fix(s, "Freelist cleared"); in on_freelist()
920 fp = get_freepointer(s, object); in on_freelist()
924 max_objects = order_objects(compound_order(page), s->size, s->reserved); in on_freelist()
929 slab_err(s, page, "Wrong number of objects. Found %d but " in on_freelist()
932 slab_fix(s, "Number of objects adjusted."); in on_freelist()
935 slab_err(s, page, "Wrong object count. Counter is %d but " in on_freelist()
938 slab_fix(s, "Object count adjusted."); in on_freelist()
943 static void trace(struct kmem_cache *s, struct page *page, void *object, in trace() argument
946 if (s->flags & SLAB_TRACE) { in trace()
948 s->name, in trace()
955 s->object_size); in trace()
964 static void add_full(struct kmem_cache *s, in add_full() argument
967 if (!(s->flags & SLAB_STORE_USER)) in add_full()
974 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) in remove_full() argument
976 if (!(s->flags & SLAB_STORE_USER)) in remove_full()
984 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument
986 struct kmem_cache_node *n = get_node(s, node); in slabs_node()
996 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) in inc_slabs_node() argument
998 struct kmem_cache_node *n = get_node(s, node); in inc_slabs_node()
1011 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) in dec_slabs_node() argument
1013 struct kmem_cache_node *n = get_node(s, node); in dec_slabs_node()
1020 static void setup_object_debug(struct kmem_cache *s, struct page *page, in setup_object_debug() argument
1023 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) in setup_object_debug()
1026 init_object(s, object, SLUB_RED_INACTIVE); in setup_object_debug()
1027 init_tracking(s, object); in setup_object_debug()
1030 static noinline int alloc_debug_processing(struct kmem_cache *s, in alloc_debug_processing() argument
1034 if (!check_slab(s, page)) in alloc_debug_processing()
1037 if (!check_valid_pointer(s, page, object)) { in alloc_debug_processing()
1038 object_err(s, page, object, "Freelist Pointer check fails"); in alloc_debug_processing()
1042 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) in alloc_debug_processing()
1046 if (s->flags & SLAB_STORE_USER) in alloc_debug_processing()
1047 set_track(s, object, TRACK_ALLOC, addr); in alloc_debug_processing()
1048 trace(s, page, object, 1); in alloc_debug_processing()
1049 init_object(s, object, SLUB_RED_ACTIVE); in alloc_debug_processing()
1059 slab_fix(s, "Marking all objects used"); in alloc_debug_processing()
1067 struct kmem_cache *s, struct page *page, void *object, in free_debug_processing() argument
1070 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing()
1075 if (!check_slab(s, page)) in free_debug_processing()
1078 if (!check_valid_pointer(s, page, object)) { in free_debug_processing()
1079 slab_err(s, page, "Invalid object pointer 0x%p", object); in free_debug_processing()
1083 if (on_freelist(s, page, object)) { in free_debug_processing()
1084 object_err(s, page, object, "Object already free"); in free_debug_processing()
1088 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) in free_debug_processing()
1091 if (unlikely(s != page->slab_cache)) { in free_debug_processing()
1093 slab_err(s, page, "Attempt to free object(0x%p) " in free_debug_processing()
1100 object_err(s, page, object, in free_debug_processing()
1105 if (s->flags & SLAB_STORE_USER) in free_debug_processing()
1106 set_track(s, object, TRACK_FREE, addr); in free_debug_processing()
1107 trace(s, page, object, 0); in free_debug_processing()
1108 init_object(s, object, SLUB_RED_INACTIVE); in free_debug_processing()
1120 slab_fix(s, "Object at 0x%p not freed", object); in free_debug_processing()
1206 static inline void setup_object_debug(struct kmem_cache *s, in setup_object_debug() argument
1209 static inline int alloc_debug_processing(struct kmem_cache *s, in alloc_debug_processing() argument
1213 struct kmem_cache *s, struct page *page, void *object, in free_debug_processing() argument
1216 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
1218 static inline int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
1220 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, in add_full() argument
1222 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, in remove_full() argument
1234 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument
1238 static inline void inc_slabs_node(struct kmem_cache *s, int node, in inc_slabs_node() argument
1240 static inline void dec_slabs_node(struct kmem_cache *s, int node, in dec_slabs_node() argument
1261 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, in slab_pre_alloc_hook() argument
1268 if (should_failslab(s->object_size, flags, s->flags)) in slab_pre_alloc_hook()
1271 return memcg_kmem_get_cache(s, flags); in slab_pre_alloc_hook()
1274 static inline void slab_post_alloc_hook(struct kmem_cache *s, in slab_post_alloc_hook() argument
1278 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); in slab_post_alloc_hook()
1279 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); in slab_post_alloc_hook()
1280 memcg_kmem_put_cache(s); in slab_post_alloc_hook()
1281 kasan_slab_alloc(s, object); in slab_post_alloc_hook()
1284 static inline void slab_free_hook(struct kmem_cache *s, void *x) in slab_free_hook() argument
1286 kmemleak_free_recursive(x, s->flags); in slab_free_hook()
1298 kmemcheck_slab_free(s, x, s->object_size); in slab_free_hook()
1299 debug_check_no_locks_freed(x, s->object_size); in slab_free_hook()
1303 if (!(s->flags & SLAB_DEBUG_OBJECTS)) in slab_free_hook()
1304 debug_check_no_obj_freed(x, s->object_size); in slab_free_hook()
1306 kasan_slab_free(s, x); in slab_free_hook()
1312 static inline struct page *alloc_slab_page(struct kmem_cache *s, in alloc_slab_page() argument
1320 if (memcg_charge_slab(s, flags, order)) in alloc_slab_page()
1329 memcg_uncharge_slab(s, order); in alloc_slab_page()
1334 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab() argument
1337 struct kmem_cache_order_objects oo = s->oo; in allocate_slab()
1345 flags |= s->allocflags; in allocate_slab()
1353 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1355 oo = s->min; in allocate_slab()
1361 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1364 stat(s, ORDER_FALLBACK); in allocate_slab()
1368 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { in allocate_slab()
1377 if (s->ctor) in allocate_slab()
1390 (s->flags & SLAB_RECLAIM_ACCOUNT) ? in allocate_slab()
1397 static void setup_object(struct kmem_cache *s, struct page *page, in setup_object() argument
1400 setup_object_debug(s, page, object); in setup_object()
1401 if (unlikely(s->ctor)) { in setup_object()
1402 kasan_unpoison_object_data(s, object); in setup_object()
1403 s->ctor(object); in setup_object()
1404 kasan_poison_object_data(s, object); in setup_object()
1408 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab() argument
1421 page = allocate_slab(s, in new_slab()
1427 inc_slabs_node(s, page_to_nid(page), page->objects); in new_slab()
1428 page->slab_cache = s; in new_slab()
1435 if (unlikely(s->flags & SLAB_POISON)) in new_slab()
1440 for_each_object_idx(p, idx, s, start, page->objects) { in new_slab()
1441 setup_object(s, page, p); in new_slab()
1443 set_freepointer(s, p, p + s->size); in new_slab()
1445 set_freepointer(s, p, NULL); in new_slab()
1455 static void __free_slab(struct kmem_cache *s, struct page *page) in __free_slab() argument
1460 if (kmem_cache_debug(s)) { in __free_slab()
1463 slab_pad_check(s, page); in __free_slab()
1464 for_each_object(p, s, page_address(page), in __free_slab()
1466 check_object(s, page, p, SLUB_RED_INACTIVE); in __free_slab()
1472 (s->flags & SLAB_RECLAIM_ACCOUNT) ? in __free_slab()
1483 memcg_uncharge_slab(s, order); in __free_slab()
1501 static void free_slab(struct kmem_cache *s, struct page *page) in free_slab() argument
1503 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { in free_slab()
1508 int offset = (PAGE_SIZE << order) - s->reserved; in free_slab()
1510 VM_BUG_ON(s->reserved != sizeof(*head)); in free_slab()
1521 __free_slab(s, page); in free_slab()
1524 static void discard_slab(struct kmem_cache *s, struct page *page) in discard_slab() argument
1526 dec_slabs_node(s, page_to_nid(page), page->objects); in discard_slab()
1527 free_slab(s, page); in discard_slab()
1570 static inline void *acquire_slab(struct kmem_cache *s, in acquire_slab() argument
1599 if (!__cmpxchg_double_slab(s, page, in acquire_slab()
1610 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1616 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, in get_partial_node() argument
1640 t = acquire_slab(s, n, page, object == NULL, &objects); in get_partial_node()
1647 stat(s, ALLOC_FROM_PARTIAL); in get_partial_node()
1650 put_cpu_partial(s, page, 0); in get_partial_node()
1651 stat(s, CPU_PARTIAL_NODE); in get_partial_node()
1653 if (!kmem_cache_has_cpu_partial(s) in get_partial_node()
1654 || available > s->cpu_partial / 2) in get_partial_node()
1665 static void *get_any_partial(struct kmem_cache *s, gfp_t flags, in get_any_partial() argument
1694 if (!s->remote_node_defrag_ratio || in get_any_partial()
1695 get_cycles() % 1024 > s->remote_node_defrag_ratio) in get_any_partial()
1704 n = get_node(s, zone_to_nid(zone)); in get_any_partial()
1707 n->nr_partial > s->min_partial) { in get_any_partial()
1708 object = get_partial_node(s, n, c, flags); in get_any_partial()
1729 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, in get_partial() argument
1740 object = get_partial_node(s, get_node(s, searchnode), c, flags); in get_partial()
1744 return get_any_partial(s, flags, c); in get_partial()
1783 const struct kmem_cache *s, unsigned long tid) in note_cmpxchg_failure() argument
1786 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); in note_cmpxchg_failure()
1788 pr_info("%s %s: cmpxchg redo ", n, s->name); in note_cmpxchg_failure()
1803 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); in note_cmpxchg_failure()
1806 static void init_kmem_cache_cpus(struct kmem_cache *s) in init_kmem_cache_cpus() argument
1811 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); in init_kmem_cache_cpus()
1817 static void deactivate_slab(struct kmem_cache *s, struct page *page, in deactivate_slab() argument
1821 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in deactivate_slab()
1830 stat(s, DEACTIVATE_REMOTE_FREES); in deactivate_slab()
1842 while (freelist && (nextfree = get_freepointer(s, freelist))) { in deactivate_slab()
1849 set_freepointer(s, freelist, prior); in deactivate_slab()
1854 } while (!__cmpxchg_double_slab(s, page, in deactivate_slab()
1886 set_freepointer(s, freelist, old.freelist); in deactivate_slab()
1893 if (!new.inuse && n->nr_partial >= s->min_partial) in deactivate_slab()
1908 if (kmem_cache_debug(s) && !lock) { in deactivate_slab()
1927 remove_full(s, n, page); in deactivate_slab()
1932 stat(s, tail); in deactivate_slab()
1936 stat(s, DEACTIVATE_FULL); in deactivate_slab()
1937 add_full(s, n, page); in deactivate_slab()
1943 if (!__cmpxchg_double_slab(s, page, in deactivate_slab()
1953 stat(s, DEACTIVATE_EMPTY); in deactivate_slab()
1954 discard_slab(s, page); in deactivate_slab()
1955 stat(s, FREE_SLAB); in deactivate_slab()
1966 static void unfreeze_partials(struct kmem_cache *s, in unfreeze_partials() argument
1979 n2 = get_node(s, page_to_nid(page)); in unfreeze_partials()
1999 } while (!__cmpxchg_double_slab(s, page, in unfreeze_partials()
2004 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { in unfreeze_partials()
2009 stat(s, FREE_ADD_PARTIAL); in unfreeze_partials()
2020 stat(s, DEACTIVATE_EMPTY); in unfreeze_partials()
2021 discard_slab(s, page); in unfreeze_partials()
2022 stat(s, FREE_SLAB); in unfreeze_partials()
2036 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) in put_cpu_partial() argument
2047 oldpage = this_cpu_read(s->cpu_slab->partial); in put_cpu_partial()
2052 if (drain && pobjects > s->cpu_partial) { in put_cpu_partial()
2059 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); in put_cpu_partial()
2064 stat(s, CPU_PARTIAL_DRAIN); in put_cpu_partial()
2075 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) in put_cpu_partial()
2077 if (unlikely(!s->cpu_partial)) { in put_cpu_partial()
2081 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); in put_cpu_partial()
2088 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) in flush_slab() argument
2090 stat(s, CPUSLAB_FLUSH); in flush_slab()
2091 deactivate_slab(s, c->page, c->freelist); in flush_slab()
2103 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) in __flush_cpu_slab() argument
2105 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab()
2109 flush_slab(s, c); in __flush_cpu_slab()
2111 unfreeze_partials(s, c); in __flush_cpu_slab()
2117 struct kmem_cache *s = d; in flush_cpu_slab() local
2119 __flush_cpu_slab(s, smp_processor_id()); in flush_cpu_slab()
2124 struct kmem_cache *s = info; in has_cpu_slab() local
2125 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in has_cpu_slab()
2130 static void flush_all(struct kmem_cache *s) in flush_all() argument
2132 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); in flush_all()
2177 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) in slab_out_of_memory() argument
2191 s->name, s->object_size, s->size, oo_order(s->oo), in slab_out_of_memory()
2192 oo_order(s->min)); in slab_out_of_memory()
2194 if (oo_order(s->min) > get_order(s->object_size)) in slab_out_of_memory()
2196 s->name); in slab_out_of_memory()
2198 for_each_kmem_cache_node(s, node, n) { in slab_out_of_memory()
2213 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, in new_slab_objects() argument
2220 freelist = get_partial(s, flags, node, c); in new_slab_objects()
2225 page = new_slab(s, flags, node); in new_slab_objects()
2227 c = raw_cpu_ptr(s->cpu_slab); in new_slab_objects()
2229 flush_slab(s, c); in new_slab_objects()
2238 stat(s, ALLOC_SLAB); in new_slab_objects()
2265 static inline void *get_freelist(struct kmem_cache *s, struct page *page) in get_freelist() argument
2281 } while (!__cmpxchg_double_slab(s, page, in get_freelist()
2305 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, in __slab_alloc() argument
2319 c = this_cpu_ptr(s->cpu_slab); in __slab_alloc()
2334 stat(s, ALLOC_NODE_MISMATCH); in __slab_alloc()
2335 deactivate_slab(s, page, c->freelist); in __slab_alloc()
2348 deactivate_slab(s, page, c->freelist); in __slab_alloc()
2359 freelist = get_freelist(s, page); in __slab_alloc()
2363 stat(s, DEACTIVATE_BYPASS); in __slab_alloc()
2367 stat(s, ALLOC_REFILL); in __slab_alloc()
2376 c->freelist = get_freepointer(s, freelist); in __slab_alloc()
2386 stat(s, CPU_PARTIAL_ALLOC); in __slab_alloc()
2391 freelist = new_slab_objects(s, gfpflags, node, &c); in __slab_alloc()
2394 slab_out_of_memory(s, gfpflags, node); in __slab_alloc()
2400 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) in __slab_alloc()
2404 if (kmem_cache_debug(s) && in __slab_alloc()
2405 !alloc_debug_processing(s, page, freelist, addr)) in __slab_alloc()
2408 deactivate_slab(s, page, get_freepointer(s, freelist)); in __slab_alloc()
2425 static __always_inline void *slab_alloc_node(struct kmem_cache *s, in slab_alloc_node() argument
2433 s = slab_pre_alloc_hook(s, gfpflags); in slab_alloc_node()
2434 if (!s) in slab_alloc_node()
2448 tid = this_cpu_read(s->cpu_slab->tid); in slab_alloc_node()
2449 c = raw_cpu_ptr(s->cpu_slab); in slab_alloc_node()
2473 object = __slab_alloc(s, gfpflags, node, addr, c); in slab_alloc_node()
2474 stat(s, ALLOC_SLOWPATH); in slab_alloc_node()
2476 void *next_object = get_freepointer_safe(s, object); in slab_alloc_node()
2493 s->cpu_slab->freelist, s->cpu_slab->tid, in slab_alloc_node()
2497 note_cmpxchg_failure("slab_alloc", s, tid); in slab_alloc_node()
2500 prefetch_freepointer(s, next_object); in slab_alloc_node()
2501 stat(s, ALLOC_FASTPATH); in slab_alloc_node()
2505 memset(object, 0, s->object_size); in slab_alloc_node()
2507 slab_post_alloc_hook(s, gfpflags, object); in slab_alloc_node()
2512 static __always_inline void *slab_alloc(struct kmem_cache *s, in slab_alloc() argument
2515 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr); in slab_alloc()
2518 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) in kmem_cache_alloc() argument
2520 void *ret = slab_alloc(s, gfpflags, _RET_IP_); in kmem_cache_alloc()
2522 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, in kmem_cache_alloc()
2523 s->size, gfpflags); in kmem_cache_alloc()
2530 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) in kmem_cache_alloc_trace() argument
2532 void *ret = slab_alloc(s, gfpflags, _RET_IP_); in kmem_cache_alloc_trace()
2533 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); in kmem_cache_alloc_trace()
2534 kasan_kmalloc(s, ret, size); in kmem_cache_alloc_trace()
2541 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) in kmem_cache_alloc_node() argument
2543 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); in kmem_cache_alloc_node()
2546 s->object_size, s->size, gfpflags, node); in kmem_cache_alloc_node()
2553 void *kmem_cache_alloc_node_trace(struct kmem_cache *s, in kmem_cache_alloc_node_trace() argument
2557 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); in kmem_cache_alloc_node_trace()
2560 size, s->size, gfpflags, node); in kmem_cache_alloc_node_trace()
2562 kasan_kmalloc(s, ret, size); in kmem_cache_alloc_node_trace()
2577 static void __slab_free(struct kmem_cache *s, struct page *page, in __slab_free() argument
2588 stat(s, FREE_SLOWPATH); in __slab_free()
2590 if (kmem_cache_debug(s) && in __slab_free()
2591 !(n = free_debug_processing(s, page, x, addr, &flags))) in __slab_free()
2601 set_freepointer(s, object, prior); in __slab_free()
2607 if (kmem_cache_has_cpu_partial(s) && !prior) { in __slab_free()
2619 n = get_node(s, page_to_nid(page)); in __slab_free()
2633 } while (!cmpxchg_double_slab(s, page, in __slab_free()
2645 put_cpu_partial(s, page, 1); in __slab_free()
2646 stat(s, CPU_PARTIAL_FREE); in __slab_free()
2653 stat(s, FREE_FROZEN); in __slab_free()
2657 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) in __slab_free()
2664 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { in __slab_free()
2665 if (kmem_cache_debug(s)) in __slab_free()
2666 remove_full(s, n, page); in __slab_free()
2668 stat(s, FREE_ADD_PARTIAL); in __slab_free()
2679 stat(s, FREE_REMOVE_PARTIAL); in __slab_free()
2682 remove_full(s, n, page); in __slab_free()
2686 stat(s, FREE_SLAB); in __slab_free()
2687 discard_slab(s, page); in __slab_free()
2701 static __always_inline void slab_free(struct kmem_cache *s, in slab_free() argument
2708 slab_free_hook(s, x); in slab_free()
2718 tid = this_cpu_read(s->cpu_slab->tid); in slab_free()
2719 c = raw_cpu_ptr(s->cpu_slab); in slab_free()
2727 set_freepointer(s, object, c->freelist); in slab_free()
2730 s->cpu_slab->freelist, s->cpu_slab->tid, in slab_free()
2734 note_cmpxchg_failure("slab_free", s, tid); in slab_free()
2737 stat(s, FREE_FASTPATH); in slab_free()
2739 __slab_free(s, page, x, addr); in slab_free()
2743 void kmem_cache_free(struct kmem_cache *s, void *x) in kmem_cache_free() argument
2745 s = cache_from_obj(s, x); in kmem_cache_free()
2746 if (!s) in kmem_cache_free()
2748 slab_free(s, virt_to_head_page(x), x, _RET_IP_); in kmem_cache_free()
2893 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) in alloc_kmem_cache_cpus() argument
2902 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), in alloc_kmem_cache_cpus()
2905 if (!s->cpu_slab) in alloc_kmem_cache_cpus()
2908 init_kmem_cache_cpus(s); in alloc_kmem_cache_cpus()
2960 static void free_kmem_cache_nodes(struct kmem_cache *s) in free_kmem_cache_nodes() argument
2965 for_each_kmem_cache_node(s, node, n) { in free_kmem_cache_nodes()
2967 s->node[node] = NULL; in free_kmem_cache_nodes()
2971 static int init_kmem_cache_nodes(struct kmem_cache *s) in init_kmem_cache_nodes() argument
2986 free_kmem_cache_nodes(s); in init_kmem_cache_nodes()
2990 s->node[node] = n; in init_kmem_cache_nodes()
2996 static void set_min_partial(struct kmem_cache *s, unsigned long min) in set_min_partial() argument
3002 s->min_partial = min; in set_min_partial()
3009 static int calculate_sizes(struct kmem_cache *s, int forced_order) in calculate_sizes() argument
3011 unsigned long flags = s->flags; in calculate_sizes()
3012 unsigned long size = s->object_size; in calculate_sizes()
3029 !s->ctor) in calculate_sizes()
3030 s->flags |= __OBJECT_POISON; in calculate_sizes()
3032 s->flags &= ~__OBJECT_POISON; in calculate_sizes()
3040 if ((flags & SLAB_RED_ZONE) && size == s->object_size) in calculate_sizes()
3048 s->inuse = size; in calculate_sizes()
3051 s->ctor)) { in calculate_sizes()
3060 s->offset = size; in calculate_sizes()
3088 size = ALIGN(size, s->align); in calculate_sizes()
3089 s->size = size; in calculate_sizes()
3093 order = calculate_order(size, s->reserved); in calculate_sizes()
3098 s->allocflags = 0; in calculate_sizes()
3100 s->allocflags |= __GFP_COMP; in calculate_sizes()
3102 if (s->flags & SLAB_CACHE_DMA) in calculate_sizes()
3103 s->allocflags |= GFP_DMA; in calculate_sizes()
3105 if (s->flags & SLAB_RECLAIM_ACCOUNT) in calculate_sizes()
3106 s->allocflags |= __GFP_RECLAIMABLE; in calculate_sizes()
3111 s->oo = oo_make(order, size, s->reserved); in calculate_sizes()
3112 s->min = oo_make(get_order(size), size, s->reserved); in calculate_sizes()
3113 if (oo_objects(s->oo) > oo_objects(s->max)) in calculate_sizes()
3114 s->max = s->oo; in calculate_sizes()
3116 return !!oo_objects(s->oo); in calculate_sizes()
3119 static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) in kmem_cache_open() argument
3121 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); in kmem_cache_open()
3122 s->reserved = 0; in kmem_cache_open()
3124 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) in kmem_cache_open()
3125 s->reserved = sizeof(struct rcu_head); in kmem_cache_open()
3127 if (!calculate_sizes(s, -1)) in kmem_cache_open()
3134 if (get_order(s->size) > get_order(s->object_size)) { in kmem_cache_open()
3135 s->flags &= ~DEBUG_METADATA_FLAGS; in kmem_cache_open()
3136 s->offset = 0; in kmem_cache_open()
3137 if (!calculate_sizes(s, -1)) in kmem_cache_open()
3144 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0) in kmem_cache_open()
3146 s->flags |= __CMPXCHG_DOUBLE; in kmem_cache_open()
3153 set_min_partial(s, ilog2(s->size) / 2); in kmem_cache_open()
3172 if (!kmem_cache_has_cpu_partial(s)) in kmem_cache_open()
3173 s->cpu_partial = 0; in kmem_cache_open()
3174 else if (s->size >= PAGE_SIZE) in kmem_cache_open()
3175 s->cpu_partial = 2; in kmem_cache_open()
3176 else if (s->size >= 1024) in kmem_cache_open()
3177 s->cpu_partial = 6; in kmem_cache_open()
3178 else if (s->size >= 256) in kmem_cache_open()
3179 s->cpu_partial = 13; in kmem_cache_open()
3181 s->cpu_partial = 30; in kmem_cache_open()
3184 s->remote_node_defrag_ratio = 1000; in kmem_cache_open()
3186 if (!init_kmem_cache_nodes(s)) in kmem_cache_open()
3189 if (alloc_kmem_cache_cpus(s)) in kmem_cache_open()
3192 free_kmem_cache_nodes(s); in kmem_cache_open()
3197 s->name, (unsigned long)s->size, s->size, in kmem_cache_open()
3198 oo_order(s->oo), s->offset, flags); in kmem_cache_open()
3202 static void list_slab_objects(struct kmem_cache *s, struct page *page, in list_slab_objects() argument
3212 slab_err(s, page, text, s->name); in list_slab_objects()
3215 get_map(s, page, map); in list_slab_objects()
3216 for_each_object(p, s, addr, page->objects) { in list_slab_objects()
3218 if (!test_bit(slab_index(p, s, addr), map)) { in list_slab_objects()
3220 print_tracking(s, p); in list_slab_objects()
3233 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) in free_partial() argument
3240 discard_slab(s, page); in free_partial()
3242 list_slab_objects(s, page, in free_partial()
3251 static inline int kmem_cache_close(struct kmem_cache *s) in kmem_cache_close() argument
3256 flush_all(s); in kmem_cache_close()
3258 for_each_kmem_cache_node(s, node, n) { in kmem_cache_close()
3259 free_partial(s, n); in kmem_cache_close()
3260 if (n->nr_partial || slabs_node(s, node)) in kmem_cache_close()
3263 free_percpu(s->cpu_slab); in kmem_cache_close()
3264 free_kmem_cache_nodes(s); in kmem_cache_close()
3268 int __kmem_cache_shutdown(struct kmem_cache *s) in __kmem_cache_shutdown() argument
3270 return kmem_cache_close(s); in __kmem_cache_shutdown()
3307 struct kmem_cache *s; in __kmalloc() local
3313 s = kmalloc_slab(size, flags); in __kmalloc()
3315 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc()
3316 return s; in __kmalloc()
3318 ret = slab_alloc(s, flags, _RET_IP_); in __kmalloc()
3320 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); in __kmalloc()
3322 kasan_kmalloc(s, ret, size); in __kmalloc()
3345 struct kmem_cache *s; in __kmalloc_node() local
3358 s = kmalloc_slab(size, flags); in __kmalloc_node()
3360 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc_node()
3361 return s; in __kmalloc_node()
3363 ret = slab_alloc_node(s, flags, node, _RET_IP_); in __kmalloc_node()
3365 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); in __kmalloc_node()
3367 kasan_kmalloc(s, ret, size); in __kmalloc_node()
3433 int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) in __kmem_cache_shrink() argument
3450 s->cpu_partial = 0; in __kmem_cache_shrink()
3451 s->min_partial = 0; in __kmem_cache_shrink()
3460 flush_all(s); in __kmem_cache_shrink()
3461 for_each_kmem_cache_node(s, node, n) { in __kmem_cache_shrink()
3501 discard_slab(s, page); in __kmem_cache_shrink()
3503 if (slabs_node(s, node)) in __kmem_cache_shrink()
3512 struct kmem_cache *s; in slab_mem_going_offline_callback() local
3515 list_for_each_entry(s, &slab_caches, list) in slab_mem_going_offline_callback()
3516 __kmem_cache_shrink(s, false); in slab_mem_going_offline_callback()
3525 struct kmem_cache *s; in slab_mem_offline_callback() local
3539 list_for_each_entry(s, &slab_caches, list) { in slab_mem_offline_callback()
3540 n = get_node(s, offline_node); in slab_mem_offline_callback()
3548 BUG_ON(slabs_node(s, offline_node)); in slab_mem_offline_callback()
3550 s->node[offline_node] = NULL; in slab_mem_offline_callback()
3560 struct kmem_cache *s; in slab_mem_going_online_callback() local
3578 list_for_each_entry(s, &slab_caches, list) { in slab_mem_going_online_callback()
3590 s->node[nid] = n; in slab_mem_going_online_callback()
3642 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); in bootstrap() local
3645 memcpy(s, static_cache, kmem_cache->object_size); in bootstrap()
3652 __flush_cpu_slab(s, smp_processor_id()); in bootstrap()
3653 for_each_kmem_cache_node(s, node, n) { in bootstrap()
3657 p->slab_cache = s; in bootstrap()
3661 p->slab_cache = s; in bootstrap()
3664 slab_init_memcg_params(s); in bootstrap()
3665 list_add(&s->list, &slab_caches); in bootstrap()
3666 return s; in bootstrap()
3723 struct kmem_cache *s, *c; in __kmem_cache_alias() local
3725 s = find_mergeable(size, align, flags, name, ctor); in __kmem_cache_alias()
3726 if (s) { in __kmem_cache_alias()
3727 s->refcount++; in __kmem_cache_alias()
3733 s->object_size = max(s->object_size, (int)size); in __kmem_cache_alias()
3734 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); in __kmem_cache_alias()
3736 for_each_memcg_cache(c, s) { in __kmem_cache_alias()
3737 c->object_size = s->object_size; in __kmem_cache_alias()
3742 if (sysfs_slab_alias(s, name)) { in __kmem_cache_alias()
3743 s->refcount--; in __kmem_cache_alias()
3744 s = NULL; in __kmem_cache_alias()
3748 return s; in __kmem_cache_alias()
3751 int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) in __kmem_cache_create() argument
3755 err = kmem_cache_open(s, flags); in __kmem_cache_create()
3763 memcg_propagate_slab_attrs(s); in __kmem_cache_create()
3764 err = sysfs_slab_add(s); in __kmem_cache_create()
3766 kmem_cache_close(s); in __kmem_cache_create()
3780 struct kmem_cache *s; in slab_cpuup_callback() local
3789 list_for_each_entry(s, &slab_caches, list) { in slab_cpuup_callback()
3791 __flush_cpu_slab(s, cpu); in slab_cpuup_callback()
3810 struct kmem_cache *s; in __kmalloc_track_caller() local
3816 s = kmalloc_slab(size, gfpflags); in __kmalloc_track_caller()
3818 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc_track_caller()
3819 return s; in __kmalloc_track_caller()
3821 ret = slab_alloc(s, gfpflags, caller); in __kmalloc_track_caller()
3824 trace_kmalloc(caller, ret, size, s->size, gfpflags); in __kmalloc_track_caller()
3833 struct kmem_cache *s; in __kmalloc_node_track_caller() local
3846 s = kmalloc_slab(size, gfpflags); in __kmalloc_node_track_caller()
3848 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc_node_track_caller()
3849 return s; in __kmalloc_node_track_caller()
3851 ret = slab_alloc_node(s, gfpflags, node, caller); in __kmalloc_node_track_caller()
3854 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); in __kmalloc_node_track_caller()
3873 static int validate_slab(struct kmem_cache *s, struct page *page, in validate_slab() argument
3879 if (!check_slab(s, page) || in validate_slab()
3880 !on_freelist(s, page, NULL)) in validate_slab()
3886 get_map(s, page, map); in validate_slab()
3887 for_each_object(p, s, addr, page->objects) { in validate_slab()
3888 if (test_bit(slab_index(p, s, addr), map)) in validate_slab()
3889 if (!check_object(s, page, p, SLUB_RED_INACTIVE)) in validate_slab()
3893 for_each_object(p, s, addr, page->objects) in validate_slab()
3894 if (!test_bit(slab_index(p, s, addr), map)) in validate_slab()
3895 if (!check_object(s, page, p, SLUB_RED_ACTIVE)) in validate_slab()
3900 static void validate_slab_slab(struct kmem_cache *s, struct page *page, in validate_slab_slab() argument
3904 validate_slab(s, page, map); in validate_slab_slab()
3908 static int validate_slab_node(struct kmem_cache *s, in validate_slab_node() argument
3918 validate_slab_slab(s, page, map); in validate_slab_node()
3923 s->name, count, n->nr_partial); in validate_slab_node()
3925 if (!(s->flags & SLAB_STORE_USER)) in validate_slab_node()
3929 validate_slab_slab(s, page, map); in validate_slab_node()
3934 s->name, count, atomic_long_read(&n->nr_slabs)); in validate_slab_node()
3941 static long validate_slab_cache(struct kmem_cache *s) in validate_slab_cache() argument
3945 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * in validate_slab_cache()
3952 flush_all(s); in validate_slab_cache()
3953 for_each_kmem_cache_node(s, node, n) in validate_slab_cache()
3954 count += validate_slab_node(s, n, map); in validate_slab_cache()
4008 static int add_location(struct loc_track *t, struct kmem_cache *s, in add_location() argument
4084 static void process_slab(struct loc_track *t, struct kmem_cache *s, in process_slab() argument
4092 get_map(s, page, map); in process_slab()
4094 for_each_object(p, s, addr, page->objects) in process_slab()
4095 if (!test_bit(slab_index(p, s, addr), map)) in process_slab()
4096 add_location(t, s, get_track(s, p, alloc)); in process_slab()
4099 static int list_locations(struct kmem_cache *s, char *buf, in list_locations() argument
4106 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * in list_locations()
4116 flush_all(s); in list_locations()
4118 for_each_kmem_cache_node(s, node, n) { in list_locations()
4127 process_slab(&t, s, page, alloc, map); in list_locations()
4129 process_slab(&t, s, page, alloc, map); in list_locations()
4259 static ssize_t show_slab_objects(struct kmem_cache *s, in show_slab_objects() argument
4275 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, in show_slab_objects()
4315 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
4333 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
4357 static int any_slab_objects(struct kmem_cache *s) in any_slab_objects() argument
4362 for_each_kmem_cache_node(s, node, n) in any_slab_objects()
4375 ssize_t (*show)(struct kmem_cache *s, char *buf);
4376 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4387 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) in slab_size_show() argument
4389 return sprintf(buf, "%d\n", s->size); in slab_size_show()
4393 static ssize_t align_show(struct kmem_cache *s, char *buf) in align_show() argument
4395 return sprintf(buf, "%d\n", s->align); in align_show()
4399 static ssize_t object_size_show(struct kmem_cache *s, char *buf) in object_size_show() argument
4401 return sprintf(buf, "%d\n", s->object_size); in object_size_show()
4405 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) in objs_per_slab_show() argument
4407 return sprintf(buf, "%d\n", oo_objects(s->oo)); in objs_per_slab_show()
4411 static ssize_t order_store(struct kmem_cache *s, in order_store() argument
4424 calculate_sizes(s, order); in order_store()
4428 static ssize_t order_show(struct kmem_cache *s, char *buf) in order_show() argument
4430 return sprintf(buf, "%d\n", oo_order(s->oo)); in order_show()
4434 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) in min_partial_show() argument
4436 return sprintf(buf, "%lu\n", s->min_partial); in min_partial_show()
4439 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, in min_partial_store() argument
4449 set_min_partial(s, min); in min_partial_store()
4454 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) in cpu_partial_show() argument
4456 return sprintf(buf, "%u\n", s->cpu_partial); in cpu_partial_show()
4459 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, in cpu_partial_store() argument
4468 if (objects && !kmem_cache_has_cpu_partial(s)) in cpu_partial_store()
4471 s->cpu_partial = objects; in cpu_partial_store()
4472 flush_all(s); in cpu_partial_store()
4477 static ssize_t ctor_show(struct kmem_cache *s, char *buf) in ctor_show() argument
4479 if (!s->ctor) in ctor_show()
4481 return sprintf(buf, "%pS\n", s->ctor); in ctor_show()
4485 static ssize_t aliases_show(struct kmem_cache *s, char *buf) in aliases_show() argument
4487 return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); in aliases_show()
4491 static ssize_t partial_show(struct kmem_cache *s, char *buf) in partial_show() argument
4493 return show_slab_objects(s, buf, SO_PARTIAL); in partial_show()
4497 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) in cpu_slabs_show() argument
4499 return show_slab_objects(s, buf, SO_CPU); in cpu_slabs_show()
4503 static ssize_t objects_show(struct kmem_cache *s, char *buf) in objects_show() argument
4505 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); in objects_show()
4509 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) in objects_partial_show() argument
4511 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); in objects_partial_show()
4515 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) in slabs_cpu_partial_show() argument
4523 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial; in slabs_cpu_partial_show()
4535 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial; in slabs_cpu_partial_show()
4546 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) in reclaim_account_show() argument
4548 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); in reclaim_account_show()
4551 static ssize_t reclaim_account_store(struct kmem_cache *s, in reclaim_account_store() argument
4554 s->flags &= ~SLAB_RECLAIM_ACCOUNT; in reclaim_account_store()
4556 s->flags |= SLAB_RECLAIM_ACCOUNT; in reclaim_account_store()
4561 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) in hwcache_align_show() argument
4563 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); in hwcache_align_show()
4568 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) in cache_dma_show() argument
4570 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); in cache_dma_show()
4575 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) in destroy_by_rcu_show() argument
4577 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); in destroy_by_rcu_show()
4581 static ssize_t reserved_show(struct kmem_cache *s, char *buf) in reserved_show() argument
4583 return sprintf(buf, "%d\n", s->reserved); in reserved_show()
4588 static ssize_t slabs_show(struct kmem_cache *s, char *buf) in slabs_show() argument
4590 return show_slab_objects(s, buf, SO_ALL); in slabs_show()
4594 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) in total_objects_show() argument
4596 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); in total_objects_show()
4600 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) in sanity_checks_show() argument
4602 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); in sanity_checks_show()
4605 static ssize_t sanity_checks_store(struct kmem_cache *s, in sanity_checks_store() argument
4608 s->flags &= ~SLAB_DEBUG_FREE; in sanity_checks_store()
4610 s->flags &= ~__CMPXCHG_DOUBLE; in sanity_checks_store()
4611 s->flags |= SLAB_DEBUG_FREE; in sanity_checks_store()
4617 static ssize_t trace_show(struct kmem_cache *s, char *buf) in trace_show() argument
4619 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); in trace_show()
4622 static ssize_t trace_store(struct kmem_cache *s, const char *buf, in trace_store() argument
4630 if (s->refcount > 1) in trace_store()
4633 s->flags &= ~SLAB_TRACE; in trace_store()
4635 s->flags &= ~__CMPXCHG_DOUBLE; in trace_store()
4636 s->flags |= SLAB_TRACE; in trace_store()
4642 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) in red_zone_show() argument
4644 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); in red_zone_show()
4647 static ssize_t red_zone_store(struct kmem_cache *s, in red_zone_store() argument
4650 if (any_slab_objects(s)) in red_zone_store()
4653 s->flags &= ~SLAB_RED_ZONE; in red_zone_store()
4655 s->flags &= ~__CMPXCHG_DOUBLE; in red_zone_store()
4656 s->flags |= SLAB_RED_ZONE; in red_zone_store()
4658 calculate_sizes(s, -1); in red_zone_store()
4663 static ssize_t poison_show(struct kmem_cache *s, char *buf) in poison_show() argument
4665 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); in poison_show()
4668 static ssize_t poison_store(struct kmem_cache *s, in poison_store() argument
4671 if (any_slab_objects(s)) in poison_store()
4674 s->flags &= ~SLAB_POISON; in poison_store()
4676 s->flags &= ~__CMPXCHG_DOUBLE; in poison_store()
4677 s->flags |= SLAB_POISON; in poison_store()
4679 calculate_sizes(s, -1); in poison_store()
4684 static ssize_t store_user_show(struct kmem_cache *s, char *buf) in store_user_show() argument
4686 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); in store_user_show()
4689 static ssize_t store_user_store(struct kmem_cache *s, in store_user_store() argument
4692 if (any_slab_objects(s)) in store_user_store()
4695 s->flags &= ~SLAB_STORE_USER; in store_user_store()
4697 s->flags &= ~__CMPXCHG_DOUBLE; in store_user_store()
4698 s->flags |= SLAB_STORE_USER; in store_user_store()
4700 calculate_sizes(s, -1); in store_user_store()
4705 static ssize_t validate_show(struct kmem_cache *s, char *buf) in validate_show() argument
4710 static ssize_t validate_store(struct kmem_cache *s, in validate_store() argument
4716 ret = validate_slab_cache(s); in validate_store()
4724 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) in alloc_calls_show() argument
4726 if (!(s->flags & SLAB_STORE_USER)) in alloc_calls_show()
4728 return list_locations(s, buf, TRACK_ALLOC); in alloc_calls_show()
4732 static ssize_t free_calls_show(struct kmem_cache *s, char *buf) in free_calls_show() argument
4734 if (!(s->flags & SLAB_STORE_USER)) in free_calls_show()
4736 return list_locations(s, buf, TRACK_FREE); in free_calls_show()
4742 static ssize_t failslab_show(struct kmem_cache *s, char *buf) in failslab_show() argument
4744 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); in failslab_show()
4747 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, in failslab_store() argument
4750 if (s->refcount > 1) in failslab_store()
4753 s->flags &= ~SLAB_FAILSLAB; in failslab_store()
4755 s->flags |= SLAB_FAILSLAB; in failslab_store()
4761 static ssize_t shrink_show(struct kmem_cache *s, char *buf) in shrink_show() argument
4766 static ssize_t shrink_store(struct kmem_cache *s, in shrink_store() argument
4770 kmem_cache_shrink(s); in shrink_store()
4778 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) in remote_node_defrag_ratio_show() argument
4780 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); in remote_node_defrag_ratio_show()
4783 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, in remote_node_defrag_ratio_store() argument
4794 s->remote_node_defrag_ratio = ratio * 10; in remote_node_defrag_ratio_store()
4802 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) in show_stat() argument
4813 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; in show_stat()
4831 static void clear_stat(struct kmem_cache *s, enum stat_item si) in clear_stat() argument
4836 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; in clear_stat()
4840 static ssize_t text##_show(struct kmem_cache *s, char *buf) \
4842 return show_stat(s, buf, si); \
4844 static ssize_t text##_store(struct kmem_cache *s, \
4849 clear_stat(s, si); \
4964 struct kmem_cache *s; in slab_attr_show() local
4968 s = to_slab(kobj); in slab_attr_show()
4973 err = attribute->show(s, buf); in slab_attr_show()
4983 struct kmem_cache *s; in slab_attr_store() local
4987 s = to_slab(kobj); in slab_attr_store()
4992 err = attribute->store(s, buf, len); in slab_attr_store()
4994 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { in slab_attr_store()
4998 if (s->max_attr_size < len) in slab_attr_store()
4999 s->max_attr_size = len; in slab_attr_store()
5018 for_each_memcg_cache(c, s) in slab_attr_store()
5026 static void memcg_propagate_slab_attrs(struct kmem_cache *s) in memcg_propagate_slab_attrs() argument
5033 if (is_root_cache(s)) in memcg_propagate_slab_attrs()
5036 root_cache = s->memcg_params.root_cache; in memcg_propagate_slab_attrs()
5074 attr->store(s, buf, strlen(buf)); in memcg_propagate_slab_attrs()
5112 static inline struct kset *cache_kset(struct kmem_cache *s) in cache_kset() argument
5115 if (!is_root_cache(s)) in cache_kset()
5116 return s->memcg_params.root_cache->memcg_kset; in cache_kset()
5127 static char *create_unique_id(struct kmem_cache *s) in create_unique_id() argument
5142 if (s->flags & SLAB_CACHE_DMA) in create_unique_id()
5144 if (s->flags & SLAB_RECLAIM_ACCOUNT) in create_unique_id()
5146 if (s->flags & SLAB_DEBUG_FREE) in create_unique_id()
5148 if (!(s->flags & SLAB_NOTRACK)) in create_unique_id()
5152 p += sprintf(p, "%07d", s->size); in create_unique_id()
5158 static int sysfs_slab_add(struct kmem_cache *s) in sysfs_slab_add() argument
5162 int unmergeable = slab_unmergeable(s); in sysfs_slab_add()
5170 sysfs_remove_link(&slab_kset->kobj, s->name); in sysfs_slab_add()
5171 name = s->name; in sysfs_slab_add()
5177 name = create_unique_id(s); in sysfs_slab_add()
5180 s->kobj.kset = cache_kset(s); in sysfs_slab_add()
5181 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); in sysfs_slab_add()
5185 err = sysfs_create_group(&s->kobj, &slab_attr_group); in sysfs_slab_add()
5190 if (is_root_cache(s)) { in sysfs_slab_add()
5191 s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj); in sysfs_slab_add()
5192 if (!s->memcg_kset) { in sysfs_slab_add()
5199 kobject_uevent(&s->kobj, KOBJ_ADD); in sysfs_slab_add()
5202 sysfs_slab_alias(s, s->name); in sysfs_slab_add()
5209 kobject_del(&s->kobj); in sysfs_slab_add()
5211 kobject_put(&s->kobj); in sysfs_slab_add()
5215 void sysfs_slab_remove(struct kmem_cache *s) in sysfs_slab_remove() argument
5225 kset_unregister(s->memcg_kset); in sysfs_slab_remove()
5227 kobject_uevent(&s->kobj, KOBJ_REMOVE); in sysfs_slab_remove()
5228 kobject_del(&s->kobj); in sysfs_slab_remove()
5229 kobject_put(&s->kobj); in sysfs_slab_remove()
5237 struct kmem_cache *s; member
5244 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) in sysfs_slab_alias() argument
5253 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); in sysfs_slab_alias()
5260 al->s = s; in sysfs_slab_alias()
5269 struct kmem_cache *s; in slab_sysfs_init() local
5283 list_for_each_entry(s, &slab_caches, list) { in slab_sysfs_init()
5284 err = sysfs_slab_add(s); in slab_sysfs_init()
5287 s->name); in slab_sysfs_init()
5294 err = sysfs_slab_alias(al->s, al->name); in slab_sysfs_init()
5313 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) in get_slabinfo() argument
5321 for_each_kmem_cache_node(s, node, n) { in get_slabinfo()
5331 sinfo->objects_per_slab = oo_objects(s->oo); in get_slabinfo()
5332 sinfo->cache_order = oo_order(s->oo); in get_slabinfo()
5335 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) in slabinfo_show_stats() argument