Lines Matching refs:s
118 static inline int kmem_cache_debug(struct kmem_cache *s) in kmem_cache_debug() argument
121 return unlikely(s->flags & SLAB_DEBUG_FLAGS); in kmem_cache_debug()
127 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) in kmem_cache_has_cpu_partial() argument
130 return !kmem_cache_debug(s); in kmem_cache_has_cpu_partial()
204 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
206 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } in sysfs_slab_add() argument
207 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) in sysfs_slab_alias() argument
209 static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { } in memcg_propagate_slab_attrs() argument
212 static inline void stat(const struct kmem_cache *s, enum stat_item si) in stat() argument
219 raw_cpu_inc(s->cpu_slab->stat[si]); in stat()
228 static inline int check_valid_pointer(struct kmem_cache *s, in check_valid_pointer() argument
237 if (object < base || object >= base + page->objects * s->size || in check_valid_pointer()
238 (object - base) % s->size) { in check_valid_pointer()
245 static inline void *get_freepointer(struct kmem_cache *s, void *object) in get_freepointer() argument
247 return *(void **)(object + s->offset); in get_freepointer()
250 static void prefetch_freepointer(const struct kmem_cache *s, void *object) in prefetch_freepointer() argument
252 prefetch(object + s->offset); in prefetch_freepointer()
255 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) in get_freepointer_safe() argument
260 probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p)); in get_freepointer_safe()
262 p = get_freepointer(s, object); in get_freepointer_safe()
267 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) in set_freepointer() argument
269 *(void **)(object + s->offset) = fp; in set_freepointer()
282 static inline int slab_index(void *p, struct kmem_cache *s, void *addr) in slab_index() argument
284 return (p - addr) / s->size; in slab_index()
287 static inline size_t slab_ksize(const struct kmem_cache *s) in slab_ksize() argument
294 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) in slab_ksize()
295 return s->object_size; in slab_ksize()
303 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) in slab_ksize()
304 return s->inuse; in slab_ksize()
308 return s->size; in slab_ksize()
365 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in __cmpxchg_double_slab() argument
373 if (s->flags & __CMPXCHG_DOUBLE) { in __cmpxchg_double_slab()
393 stat(s, CMPXCHG_DOUBLE_FAIL); in __cmpxchg_double_slab()
396 pr_info("%s %s: cmpxchg double redo ", n, s->name); in __cmpxchg_double_slab()
402 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in cmpxchg_double_slab() argument
409 if (s->flags & __CMPXCHG_DOUBLE) { in cmpxchg_double_slab()
434 stat(s, CMPXCHG_DOUBLE_FAIL); in cmpxchg_double_slab()
437 pr_info("%s %s: cmpxchg double redo ", n, s->name); in cmpxchg_double_slab()
450 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) in get_map() argument
455 for (p = page->freelist; p; p = get_freepointer(s, p)) in get_map()
456 set_bit(slab_index(p, s, addr), map); in get_map()
500 static struct track *get_track(struct kmem_cache *s, void *object, in get_track() argument
505 if (s->offset) in get_track()
506 p = object + s->offset + sizeof(void *); in get_track()
508 p = object + s->inuse; in get_track()
513 static void set_track(struct kmem_cache *s, void *object, in set_track() argument
516 struct track *p = get_track(s, object, alloc); in set_track()
547 static void init_tracking(struct kmem_cache *s, void *object) in init_tracking() argument
549 if (!(s->flags & SLAB_STORE_USER)) in init_tracking()
552 set_track(s, object, TRACK_FREE, 0UL); in init_tracking()
553 set_track(s, object, TRACK_ALLOC, 0UL); in init_tracking()
556 static void print_track(const char *s, struct track *t) in print_track() argument
562 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); in print_track()
575 static void print_tracking(struct kmem_cache *s, void *object) in print_tracking() argument
577 if (!(s->flags & SLAB_STORE_USER)) in print_tracking()
580 print_track("Allocated", get_track(s, object, TRACK_ALLOC)); in print_tracking()
581 print_track("Freed", get_track(s, object, TRACK_FREE)); in print_tracking()
591 static void slab_bug(struct kmem_cache *s, char *fmt, ...) in slab_bug() argument
600 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); in slab_bug()
607 static void slab_fix(struct kmem_cache *s, char *fmt, ...) in slab_fix() argument
615 pr_err("FIX %s: %pV\n", s->name, &vaf); in slab_fix()
619 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) in print_trailer() argument
624 print_tracking(s, p); in print_trailer()
629 p, p - addr, get_freepointer(s, p)); in print_trailer()
634 print_section("Object ", p, min_t(unsigned long, s->object_size, in print_trailer()
636 if (s->flags & SLAB_RED_ZONE) in print_trailer()
637 print_section("Redzone ", p + s->object_size, in print_trailer()
638 s->inuse - s->object_size); in print_trailer()
640 if (s->offset) in print_trailer()
641 off = s->offset + sizeof(void *); in print_trailer()
643 off = s->inuse; in print_trailer()
645 if (s->flags & SLAB_STORE_USER) in print_trailer()
648 if (off != s->size) in print_trailer()
650 print_section("Padding ", p + off, s->size - off); in print_trailer()
655 void object_err(struct kmem_cache *s, struct page *page, in object_err() argument
658 slab_bug(s, "%s", reason); in object_err()
659 print_trailer(s, page, object); in object_err()
662 static void slab_err(struct kmem_cache *s, struct page *page, in slab_err() argument
671 slab_bug(s, "%s", buf); in slab_err()
676 static void init_object(struct kmem_cache *s, void *object, u8 val) in init_object() argument
680 if (s->flags & __OBJECT_POISON) { in init_object()
681 memset(p, POISON_FREE, s->object_size - 1); in init_object()
682 p[s->object_size - 1] = POISON_END; in init_object()
685 if (s->flags & SLAB_RED_ZONE) in init_object()
686 memset(p + s->object_size, val, s->inuse - s->object_size); in init_object()
689 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, in restore_bytes() argument
692 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); in restore_bytes()
696 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, in check_bytes_and_report() argument
713 slab_bug(s, "%s overwritten", what); in check_bytes_and_report()
716 print_trailer(s, page, object); in check_bytes_and_report()
718 restore_bytes(s, what, value, fault, end); in check_bytes_and_report()
760 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) in check_pad_bytes() argument
762 unsigned long off = s->inuse; /* The end of info */ in check_pad_bytes()
764 if (s->offset) in check_pad_bytes()
768 if (s->flags & SLAB_STORE_USER) in check_pad_bytes()
772 if (s->size == off) in check_pad_bytes()
775 return check_bytes_and_report(s, page, p, "Object padding", in check_pad_bytes()
776 p + off, POISON_INUSE, s->size - off); in check_pad_bytes()
780 static int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
788 if (!(s->flags & SLAB_POISON)) in slab_pad_check()
792 length = (PAGE_SIZE << compound_order(page)) - s->reserved; in slab_pad_check()
794 remainder = length % s->size; in slab_pad_check()
806 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); in slab_pad_check()
809 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); in slab_pad_check()
813 static int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
817 u8 *endobject = object + s->object_size; in check_object()
819 if (s->flags & SLAB_RED_ZONE) { in check_object()
820 if (!check_bytes_and_report(s, page, object, "Redzone", in check_object()
821 endobject, val, s->inuse - s->object_size)) in check_object()
824 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { in check_object()
825 check_bytes_and_report(s, page, p, "Alignment padding", in check_object()
827 s->inuse - s->object_size); in check_object()
831 if (s->flags & SLAB_POISON) { in check_object()
832 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && in check_object()
833 (!check_bytes_and_report(s, page, p, "Poison", p, in check_object()
834 POISON_FREE, s->object_size - 1) || in check_object()
835 !check_bytes_and_report(s, page, p, "Poison", in check_object()
836 p + s->object_size - 1, POISON_END, 1))) in check_object()
841 check_pad_bytes(s, page, p); in check_object()
844 if (!s->offset && val == SLUB_RED_ACTIVE) in check_object()
852 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { in check_object()
853 object_err(s, page, p, "Freepointer corrupt"); in check_object()
859 set_freepointer(s, p, NULL); in check_object()
865 static int check_slab(struct kmem_cache *s, struct page *page) in check_slab() argument
872 slab_err(s, page, "Not a valid slab page"); in check_slab()
876 maxobj = order_objects(compound_order(page), s->size, s->reserved); in check_slab()
878 slab_err(s, page, "objects %u > max %u", in check_slab()
883 slab_err(s, page, "inuse %u > max %u", in check_slab()
888 slab_pad_check(s, page); in check_slab()
896 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) in on_freelist() argument
907 if (!check_valid_pointer(s, page, fp)) { in on_freelist()
909 object_err(s, page, object, in on_freelist()
911 set_freepointer(s, object, NULL); in on_freelist()
913 slab_err(s, page, "Freepointer corrupt"); in on_freelist()
916 slab_fix(s, "Freelist cleared"); in on_freelist()
922 fp = get_freepointer(s, object); in on_freelist()
926 max_objects = order_objects(compound_order(page), s->size, s->reserved); in on_freelist()
931 slab_err(s, page, "Wrong number of objects. Found %d but " in on_freelist()
934 slab_fix(s, "Number of objects adjusted."); in on_freelist()
937 slab_err(s, page, "Wrong object count. Counter is %d but " in on_freelist()
940 slab_fix(s, "Object count adjusted."); in on_freelist()
945 static void trace(struct kmem_cache *s, struct page *page, void *object, in trace() argument
948 if (s->flags & SLAB_TRACE) { in trace()
950 s->name, in trace()
957 s->object_size); in trace()
966 static void add_full(struct kmem_cache *s, in add_full() argument
969 if (!(s->flags & SLAB_STORE_USER)) in add_full()
976 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) in remove_full() argument
978 if (!(s->flags & SLAB_STORE_USER)) in remove_full()
986 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument
988 struct kmem_cache_node *n = get_node(s, node); in slabs_node()
998 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) in inc_slabs_node() argument
1000 struct kmem_cache_node *n = get_node(s, node); in inc_slabs_node()
1013 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) in dec_slabs_node() argument
1015 struct kmem_cache_node *n = get_node(s, node); in dec_slabs_node()
1022 static void setup_object_debug(struct kmem_cache *s, struct page *page, in setup_object_debug() argument
1025 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) in setup_object_debug()
1028 init_object(s, object, SLUB_RED_INACTIVE); in setup_object_debug()
1029 init_tracking(s, object); in setup_object_debug()
1032 static noinline int alloc_debug_processing(struct kmem_cache *s, in alloc_debug_processing() argument
1036 if (!check_slab(s, page)) in alloc_debug_processing()
1039 if (!check_valid_pointer(s, page, object)) { in alloc_debug_processing()
1040 object_err(s, page, object, "Freelist Pointer check fails"); in alloc_debug_processing()
1044 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) in alloc_debug_processing()
1048 if (s->flags & SLAB_STORE_USER) in alloc_debug_processing()
1049 set_track(s, object, TRACK_ALLOC, addr); in alloc_debug_processing()
1050 trace(s, page, object, 1); in alloc_debug_processing()
1051 init_object(s, object, SLUB_RED_ACTIVE); in alloc_debug_processing()
1061 slab_fix(s, "Marking all objects used"); in alloc_debug_processing()
1070 struct kmem_cache *s, struct page *page, in free_debug_processing() argument
1074 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing()
1081 if (!check_slab(s, page)) in free_debug_processing()
1087 if (!check_valid_pointer(s, page, object)) { in free_debug_processing()
1088 slab_err(s, page, "Invalid object pointer 0x%p", object); in free_debug_processing()
1092 if (on_freelist(s, page, object)) { in free_debug_processing()
1093 object_err(s, page, object, "Object already free"); in free_debug_processing()
1097 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) in free_debug_processing()
1100 if (unlikely(s != page->slab_cache)) { in free_debug_processing()
1102 slab_err(s, page, "Attempt to free object(0x%p) " in free_debug_processing()
1109 object_err(s, page, object, in free_debug_processing()
1114 if (s->flags & SLAB_STORE_USER) in free_debug_processing()
1115 set_track(s, object, TRACK_FREE, addr); in free_debug_processing()
1116 trace(s, page, object, 0); in free_debug_processing()
1118 init_object(s, object, SLUB_RED_INACTIVE); in free_debug_processing()
1122 object = get_freepointer(s, object); in free_debug_processing()
1127 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", in free_debug_processing()
1140 slab_fix(s, "Object at 0x%p not freed", object); in free_debug_processing()
1226 static inline void setup_object_debug(struct kmem_cache *s, in setup_object_debug() argument
1229 static inline int alloc_debug_processing(struct kmem_cache *s, in alloc_debug_processing() argument
1233 struct kmem_cache *s, struct page *page, in free_debug_processing() argument
1237 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
1239 static inline int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
1241 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, in add_full() argument
1243 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, in remove_full() argument
1255 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument
1259 static inline void inc_slabs_node(struct kmem_cache *s, int node, in inc_slabs_node() argument
1261 static inline void dec_slabs_node(struct kmem_cache *s, int node, in dec_slabs_node() argument
1282 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, in slab_pre_alloc_hook() argument
1289 if (should_failslab(s->object_size, flags, s->flags)) in slab_pre_alloc_hook()
1292 return memcg_kmem_get_cache(s, flags); in slab_pre_alloc_hook()
1295 static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, in slab_post_alloc_hook() argument
1304 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); in slab_post_alloc_hook()
1305 kmemleak_alloc_recursive(object, s->object_size, 1, in slab_post_alloc_hook()
1306 s->flags, flags); in slab_post_alloc_hook()
1307 kasan_slab_alloc(s, object); in slab_post_alloc_hook()
1309 memcg_kmem_put_cache(s); in slab_post_alloc_hook()
1312 static inline void slab_free_hook(struct kmem_cache *s, void *x) in slab_free_hook() argument
1314 kmemleak_free_recursive(x, s->flags); in slab_free_hook()
1326 kmemcheck_slab_free(s, x, s->object_size); in slab_free_hook()
1327 debug_check_no_locks_freed(x, s->object_size); in slab_free_hook()
1331 if (!(s->flags & SLAB_DEBUG_OBJECTS)) in slab_free_hook()
1332 debug_check_no_obj_freed(x, s->object_size); in slab_free_hook()
1334 kasan_slab_free(s, x); in slab_free_hook()
1337 static inline void slab_free_freelist_hook(struct kmem_cache *s, in slab_free_freelist_hook() argument
1354 slab_free_hook(s, object); in slab_free_freelist_hook()
1356 (object = get_freepointer(s, object))); in slab_free_freelist_hook()
1360 static void setup_object(struct kmem_cache *s, struct page *page, in setup_object() argument
1363 setup_object_debug(s, page, object); in setup_object()
1364 if (unlikely(s->ctor)) { in setup_object()
1365 kasan_unpoison_object_data(s, object); in setup_object()
1366 s->ctor(object); in setup_object()
1367 kasan_poison_object_data(s, object); in setup_object()
1374 static inline struct page *alloc_slab_page(struct kmem_cache *s, in alloc_slab_page() argument
1387 if (page && memcg_charge_slab(page, flags, order, s)) { in alloc_slab_page()
1395 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab() argument
1398 struct kmem_cache_order_objects oo = s->oo; in allocate_slab()
1408 flags |= s->allocflags; in allocate_slab()
1415 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) in allocate_slab()
1418 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1420 oo = s->min; in allocate_slab()
1426 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1429 stat(s, ORDER_FALLBACK); in allocate_slab()
1433 !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { in allocate_slab()
1442 if (s->ctor) in allocate_slab()
1451 page->slab_cache = s; in allocate_slab()
1458 if (unlikely(s->flags & SLAB_POISON)) in allocate_slab()
1463 for_each_object_idx(p, idx, s, start, page->objects) { in allocate_slab()
1464 setup_object(s, page, p); in allocate_slab()
1466 set_freepointer(s, p, p + s->size); in allocate_slab()
1468 set_freepointer(s, p, NULL); in allocate_slab()
1482 (s->flags & SLAB_RECLAIM_ACCOUNT) ? in allocate_slab()
1486 inc_slabs_node(s, page_to_nid(page), page->objects); in allocate_slab()
1491 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab() argument
1498 return allocate_slab(s, in new_slab()
1502 static void __free_slab(struct kmem_cache *s, struct page *page) in __free_slab() argument
1507 if (kmem_cache_debug(s)) { in __free_slab()
1510 slab_pad_check(s, page); in __free_slab()
1511 for_each_object(p, s, page_address(page), in __free_slab()
1513 check_object(s, page, p, SLUB_RED_INACTIVE); in __free_slab()
1519 (s->flags & SLAB_RECLAIM_ACCOUNT) ? in __free_slab()
1547 static void free_slab(struct kmem_cache *s, struct page *page) in free_slab() argument
1549 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { in free_slab()
1554 int offset = (PAGE_SIZE << order) - s->reserved; in free_slab()
1556 VM_BUG_ON(s->reserved != sizeof(*head)); in free_slab()
1564 __free_slab(s, page); in free_slab()
1567 static void discard_slab(struct kmem_cache *s, struct page *page) in discard_slab() argument
1569 dec_slabs_node(s, page_to_nid(page), page->objects); in discard_slab()
1570 free_slab(s, page); in discard_slab()
1613 static inline void *acquire_slab(struct kmem_cache *s, in acquire_slab() argument
1642 if (!__cmpxchg_double_slab(s, page, in acquire_slab()
1653 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1659 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, in get_partial_node() argument
1683 t = acquire_slab(s, n, page, object == NULL, &objects); in get_partial_node()
1690 stat(s, ALLOC_FROM_PARTIAL); in get_partial_node()
1693 put_cpu_partial(s, page, 0); in get_partial_node()
1694 stat(s, CPU_PARTIAL_NODE); in get_partial_node()
1696 if (!kmem_cache_has_cpu_partial(s) in get_partial_node()
1697 || available > s->cpu_partial / 2) in get_partial_node()
1708 static void *get_any_partial(struct kmem_cache *s, gfp_t flags, in get_any_partial() argument
1737 if (!s->remote_node_defrag_ratio || in get_any_partial()
1738 get_cycles() % 1024 > s->remote_node_defrag_ratio) in get_any_partial()
1747 n = get_node(s, zone_to_nid(zone)); in get_any_partial()
1750 n->nr_partial > s->min_partial) { in get_any_partial()
1751 object = get_partial_node(s, n, c, flags); in get_any_partial()
1772 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, in get_partial() argument
1783 object = get_partial_node(s, get_node(s, searchnode), c, flags); in get_partial()
1787 return get_any_partial(s, flags, c); in get_partial()
1826 const struct kmem_cache *s, unsigned long tid) in note_cmpxchg_failure() argument
1829 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); in note_cmpxchg_failure()
1831 pr_info("%s %s: cmpxchg redo ", n, s->name); in note_cmpxchg_failure()
1846 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); in note_cmpxchg_failure()
1849 static void init_kmem_cache_cpus(struct kmem_cache *s) in init_kmem_cache_cpus() argument
1854 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); in init_kmem_cache_cpus()
1860 static void deactivate_slab(struct kmem_cache *s, struct page *page, in deactivate_slab() argument
1864 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in deactivate_slab()
1873 stat(s, DEACTIVATE_REMOTE_FREES); in deactivate_slab()
1885 while (freelist && (nextfree = get_freepointer(s, freelist))) { in deactivate_slab()
1892 set_freepointer(s, freelist, prior); in deactivate_slab()
1897 } while (!__cmpxchg_double_slab(s, page, in deactivate_slab()
1929 set_freepointer(s, freelist, old.freelist); in deactivate_slab()
1936 if (!new.inuse && n->nr_partial >= s->min_partial) in deactivate_slab()
1951 if (kmem_cache_debug(s) && !lock) { in deactivate_slab()
1970 remove_full(s, n, page); in deactivate_slab()
1975 stat(s, tail); in deactivate_slab()
1979 stat(s, DEACTIVATE_FULL); in deactivate_slab()
1980 add_full(s, n, page); in deactivate_slab()
1986 if (!__cmpxchg_double_slab(s, page, in deactivate_slab()
1996 stat(s, DEACTIVATE_EMPTY); in deactivate_slab()
1997 discard_slab(s, page); in deactivate_slab()
1998 stat(s, FREE_SLAB); in deactivate_slab()
2009 static void unfreeze_partials(struct kmem_cache *s, in unfreeze_partials() argument
2022 n2 = get_node(s, page_to_nid(page)); in unfreeze_partials()
2042 } while (!__cmpxchg_double_slab(s, page, in unfreeze_partials()
2047 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { in unfreeze_partials()
2052 stat(s, FREE_ADD_PARTIAL); in unfreeze_partials()
2063 stat(s, DEACTIVATE_EMPTY); in unfreeze_partials()
2064 discard_slab(s, page); in unfreeze_partials()
2065 stat(s, FREE_SLAB); in unfreeze_partials()
2079 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) in put_cpu_partial() argument
2090 oldpage = this_cpu_read(s->cpu_slab->partial); in put_cpu_partial()
2095 if (drain && pobjects > s->cpu_partial) { in put_cpu_partial()
2102 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); in put_cpu_partial()
2107 stat(s, CPU_PARTIAL_DRAIN); in put_cpu_partial()
2118 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) in put_cpu_partial()
2120 if (unlikely(!s->cpu_partial)) { in put_cpu_partial()
2124 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); in put_cpu_partial()
2131 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) in flush_slab() argument
2133 stat(s, CPUSLAB_FLUSH); in flush_slab()
2134 deactivate_slab(s, c->page, c->freelist); in flush_slab()
2146 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) in __flush_cpu_slab() argument
2148 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab()
2152 flush_slab(s, c); in __flush_cpu_slab()
2154 unfreeze_partials(s, c); in __flush_cpu_slab()
2160 struct kmem_cache *s = d; in flush_cpu_slab() local
2162 __flush_cpu_slab(s, smp_processor_id()); in flush_cpu_slab()
2167 struct kmem_cache *s = info; in has_cpu_slab() local
2168 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in has_cpu_slab()
2173 static void flush_all(struct kmem_cache *s) in flush_all() argument
2175 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); in flush_all()
2220 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) in slab_out_of_memory() argument
2234 s->name, s->object_size, s->size, oo_order(s->oo), in slab_out_of_memory()
2235 oo_order(s->min)); in slab_out_of_memory()
2237 if (oo_order(s->min) > get_order(s->object_size)) in slab_out_of_memory()
2239 s->name); in slab_out_of_memory()
2241 for_each_kmem_cache_node(s, node, n) { in slab_out_of_memory()
2256 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, in new_slab_objects() argument
2263 freelist = get_partial(s, flags, node, c); in new_slab_objects()
2268 page = new_slab(s, flags, node); in new_slab_objects()
2270 c = raw_cpu_ptr(s->cpu_slab); in new_slab_objects()
2272 flush_slab(s, c); in new_slab_objects()
2281 stat(s, ALLOC_SLAB); in new_slab_objects()
2308 static inline void *get_freelist(struct kmem_cache *s, struct page *page) in get_freelist() argument
2324 } while (!__cmpxchg_double_slab(s, page, in get_freelist()
2351 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, in ___slab_alloc() argument
2369 stat(s, ALLOC_NODE_MISMATCH); in ___slab_alloc()
2370 deactivate_slab(s, page, c->freelist); in ___slab_alloc()
2383 deactivate_slab(s, page, c->freelist); in ___slab_alloc()
2394 freelist = get_freelist(s, page); in ___slab_alloc()
2398 stat(s, DEACTIVATE_BYPASS); in ___slab_alloc()
2402 stat(s, ALLOC_REFILL); in ___slab_alloc()
2411 c->freelist = get_freepointer(s, freelist); in ___slab_alloc()
2420 stat(s, CPU_PARTIAL_ALLOC); in ___slab_alloc()
2425 freelist = new_slab_objects(s, gfpflags, node, &c); in ___slab_alloc()
2428 slab_out_of_memory(s, gfpflags, node); in ___slab_alloc()
2433 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) in ___slab_alloc()
2437 if (kmem_cache_debug(s) && in ___slab_alloc()
2438 !alloc_debug_processing(s, page, freelist, addr)) in ___slab_alloc()
2441 deactivate_slab(s, page, get_freepointer(s, freelist)); in ___slab_alloc()
2451 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, in __slab_alloc() argument
2464 c = this_cpu_ptr(s->cpu_slab); in __slab_alloc()
2467 p = ___slab_alloc(s, gfpflags, node, addr, c); in __slab_alloc()
2482 static __always_inline void *slab_alloc_node(struct kmem_cache *s, in slab_alloc_node() argument
2490 s = slab_pre_alloc_hook(s, gfpflags); in slab_alloc_node()
2491 if (!s) in slab_alloc_node()
2505 tid = this_cpu_read(s->cpu_slab->tid); in slab_alloc_node()
2506 c = raw_cpu_ptr(s->cpu_slab); in slab_alloc_node()
2530 object = __slab_alloc(s, gfpflags, node, addr, c); in slab_alloc_node()
2531 stat(s, ALLOC_SLOWPATH); in slab_alloc_node()
2533 void *next_object = get_freepointer_safe(s, object); in slab_alloc_node()
2550 s->cpu_slab->freelist, s->cpu_slab->tid, in slab_alloc_node()
2554 note_cmpxchg_failure("slab_alloc", s, tid); in slab_alloc_node()
2557 prefetch_freepointer(s, next_object); in slab_alloc_node()
2558 stat(s, ALLOC_FASTPATH); in slab_alloc_node()
2562 memset(object, 0, s->object_size); in slab_alloc_node()
2564 slab_post_alloc_hook(s, gfpflags, 1, &object); in slab_alloc_node()
2569 static __always_inline void *slab_alloc(struct kmem_cache *s, in slab_alloc() argument
2572 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr); in slab_alloc()
2575 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) in kmem_cache_alloc() argument
2577 void *ret = slab_alloc(s, gfpflags, _RET_IP_); in kmem_cache_alloc()
2579 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, in kmem_cache_alloc()
2580 s->size, gfpflags); in kmem_cache_alloc()
2587 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) in kmem_cache_alloc_trace() argument
2589 void *ret = slab_alloc(s, gfpflags, _RET_IP_); in kmem_cache_alloc_trace()
2590 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); in kmem_cache_alloc_trace()
2591 kasan_kmalloc(s, ret, size); in kmem_cache_alloc_trace()
2598 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) in kmem_cache_alloc_node() argument
2600 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); in kmem_cache_alloc_node()
2603 s->object_size, s->size, gfpflags, node); in kmem_cache_alloc_node()
2610 void *kmem_cache_alloc_node_trace(struct kmem_cache *s, in kmem_cache_alloc_node_trace() argument
2614 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); in kmem_cache_alloc_node_trace()
2617 size, s->size, gfpflags, node); in kmem_cache_alloc_node_trace()
2619 kasan_kmalloc(s, ret, size); in kmem_cache_alloc_node_trace()
2634 static void __slab_free(struct kmem_cache *s, struct page *page, in __slab_free() argument
2646 stat(s, FREE_SLOWPATH); in __slab_free()
2648 if (kmem_cache_debug(s) && in __slab_free()
2649 !(n = free_debug_processing(s, page, head, tail, cnt, in __slab_free()
2660 set_freepointer(s, tail, prior); in __slab_free()
2666 if (kmem_cache_has_cpu_partial(s) && !prior) { in __slab_free()
2678 n = get_node(s, page_to_nid(page)); in __slab_free()
2692 } while (!cmpxchg_double_slab(s, page, in __slab_free()
2704 put_cpu_partial(s, page, 1); in __slab_free()
2705 stat(s, CPU_PARTIAL_FREE); in __slab_free()
2712 stat(s, FREE_FROZEN); in __slab_free()
2716 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) in __slab_free()
2723 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { in __slab_free()
2724 if (kmem_cache_debug(s)) in __slab_free()
2725 remove_full(s, n, page); in __slab_free()
2727 stat(s, FREE_ADD_PARTIAL); in __slab_free()
2738 stat(s, FREE_REMOVE_PARTIAL); in __slab_free()
2741 remove_full(s, n, page); in __slab_free()
2745 stat(s, FREE_SLAB); in __slab_free()
2746 discard_slab(s, page); in __slab_free()
2764 static __always_inline void slab_free(struct kmem_cache *s, struct page *page, in slab_free() argument
2772 slab_free_freelist_hook(s, head, tail); in slab_free()
2782 tid = this_cpu_read(s->cpu_slab->tid); in slab_free()
2783 c = raw_cpu_ptr(s->cpu_slab); in slab_free()
2791 set_freepointer(s, tail_obj, c->freelist); in slab_free()
2794 s->cpu_slab->freelist, s->cpu_slab->tid, in slab_free()
2798 note_cmpxchg_failure("slab_free", s, tid); in slab_free()
2801 stat(s, FREE_FASTPATH); in slab_free()
2803 __slab_free(s, page, head, tail_obj, cnt, addr); in slab_free()
2807 void kmem_cache_free(struct kmem_cache *s, void *x) in kmem_cache_free() argument
2809 s = cache_from_obj(s, x); in kmem_cache_free()
2810 if (!s) in kmem_cache_free()
2812 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); in kmem_cache_free()
2822 struct kmem_cache *s; member
2838 int build_detached_freelist(struct kmem_cache *s, size_t size, in build_detached_freelist() argument
2856 df->s = cache_from_obj(s, object); in build_detached_freelist()
2859 set_freepointer(df->s, object, NULL); in build_detached_freelist()
2874 set_freepointer(df->s, object, df->freelist); in build_detached_freelist()
2894 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) in kmem_cache_free_bulk() argument
2902 size = build_detached_freelist(s, size, p, &df); in kmem_cache_free_bulk()
2906 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_); in kmem_cache_free_bulk()
2912 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, in kmem_cache_alloc_bulk() argument
2919 s = slab_pre_alloc_hook(s, flags); in kmem_cache_alloc_bulk()
2920 if (unlikely(!s)) in kmem_cache_alloc_bulk()
2928 c = this_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
2938 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, in kmem_cache_alloc_bulk()
2943 c = this_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
2946 c->freelist = get_freepointer(s, object); in kmem_cache_alloc_bulk()
2957 memset(p[j], 0, s->object_size); in kmem_cache_alloc_bulk()
2961 slab_post_alloc_hook(s, flags, size, p); in kmem_cache_alloc_bulk()
2965 slab_post_alloc_hook(s, flags, i, p); in kmem_cache_alloc_bulk()
2966 __kmem_cache_free_bulk(s, i, p); in kmem_cache_alloc_bulk()
3107 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) in alloc_kmem_cache_cpus() argument
3116 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), in alloc_kmem_cache_cpus()
3119 if (!s->cpu_slab) in alloc_kmem_cache_cpus()
3122 init_kmem_cache_cpus(s); in alloc_kmem_cache_cpus()
3174 static void free_kmem_cache_nodes(struct kmem_cache *s) in free_kmem_cache_nodes() argument
3179 for_each_kmem_cache_node(s, node, n) { in free_kmem_cache_nodes()
3181 s->node[node] = NULL; in free_kmem_cache_nodes()
3185 static int init_kmem_cache_nodes(struct kmem_cache *s) in init_kmem_cache_nodes() argument
3200 free_kmem_cache_nodes(s); in init_kmem_cache_nodes()
3204 s->node[node] = n; in init_kmem_cache_nodes()
3210 static void set_min_partial(struct kmem_cache *s, unsigned long min) in set_min_partial() argument
3216 s->min_partial = min; in set_min_partial()
3223 static int calculate_sizes(struct kmem_cache *s, int forced_order) in calculate_sizes() argument
3225 unsigned long flags = s->flags; in calculate_sizes()
3226 unsigned long size = s->object_size; in calculate_sizes()
3243 !s->ctor) in calculate_sizes()
3244 s->flags |= __OBJECT_POISON; in calculate_sizes()
3246 s->flags &= ~__OBJECT_POISON; in calculate_sizes()
3254 if ((flags & SLAB_RED_ZONE) && size == s->object_size) in calculate_sizes()
3262 s->inuse = size; in calculate_sizes()
3265 s->ctor)) { in calculate_sizes()
3274 s->offset = size; in calculate_sizes()
3302 size = ALIGN(size, s->align); in calculate_sizes()
3303 s->size = size; in calculate_sizes()
3307 order = calculate_order(size, s->reserved); in calculate_sizes()
3312 s->allocflags = 0; in calculate_sizes()
3314 s->allocflags |= __GFP_COMP; in calculate_sizes()
3316 if (s->flags & SLAB_CACHE_DMA) in calculate_sizes()
3317 s->allocflags |= GFP_DMA; in calculate_sizes()
3319 if (s->flags & SLAB_RECLAIM_ACCOUNT) in calculate_sizes()
3320 s->allocflags |= __GFP_RECLAIMABLE; in calculate_sizes()
3325 s->oo = oo_make(order, size, s->reserved); in calculate_sizes()
3326 s->min = oo_make(get_order(size), size, s->reserved); in calculate_sizes()
3327 if (oo_objects(s->oo) > oo_objects(s->max)) in calculate_sizes()
3328 s->max = s->oo; in calculate_sizes()
3330 return !!oo_objects(s->oo); in calculate_sizes()
3333 static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) in kmem_cache_open() argument
3335 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); in kmem_cache_open()
3336 s->reserved = 0; in kmem_cache_open()
3338 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) in kmem_cache_open()
3339 s->reserved = sizeof(struct rcu_head); in kmem_cache_open()
3341 if (!calculate_sizes(s, -1)) in kmem_cache_open()
3348 if (get_order(s->size) > get_order(s->object_size)) { in kmem_cache_open()
3349 s->flags &= ~DEBUG_METADATA_FLAGS; in kmem_cache_open()
3350 s->offset = 0; in kmem_cache_open()
3351 if (!calculate_sizes(s, -1)) in kmem_cache_open()
3358 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0) in kmem_cache_open()
3360 s->flags |= __CMPXCHG_DOUBLE; in kmem_cache_open()
3367 set_min_partial(s, ilog2(s->size) / 2); in kmem_cache_open()
3386 if (!kmem_cache_has_cpu_partial(s)) in kmem_cache_open()
3387 s->cpu_partial = 0; in kmem_cache_open()
3388 else if (s->size >= PAGE_SIZE) in kmem_cache_open()
3389 s->cpu_partial = 2; in kmem_cache_open()
3390 else if (s->size >= 1024) in kmem_cache_open()
3391 s->cpu_partial = 6; in kmem_cache_open()
3392 else if (s->size >= 256) in kmem_cache_open()
3393 s->cpu_partial = 13; in kmem_cache_open()
3395 s->cpu_partial = 30; in kmem_cache_open()
3398 s->remote_node_defrag_ratio = 1000; in kmem_cache_open()
3400 if (!init_kmem_cache_nodes(s)) in kmem_cache_open()
3403 if (alloc_kmem_cache_cpus(s)) in kmem_cache_open()
3406 free_kmem_cache_nodes(s); in kmem_cache_open()
3411 s->name, (unsigned long)s->size, s->size, in kmem_cache_open()
3412 oo_order(s->oo), s->offset, flags); in kmem_cache_open()
3416 static void list_slab_objects(struct kmem_cache *s, struct page *page, in list_slab_objects() argument
3426 slab_err(s, page, text, s->name); in list_slab_objects()
3429 get_map(s, page, map); in list_slab_objects()
3430 for_each_object(p, s, addr, page->objects) { in list_slab_objects()
3432 if (!test_bit(slab_index(p, s, addr), map)) { in list_slab_objects()
3434 print_tracking(s, p); in list_slab_objects()
3447 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) in free_partial() argument
3454 discard_slab(s, page); in free_partial()
3456 list_slab_objects(s, page, in free_partial()
3465 static inline int kmem_cache_close(struct kmem_cache *s) in kmem_cache_close() argument
3470 flush_all(s); in kmem_cache_close()
3472 for_each_kmem_cache_node(s, node, n) { in kmem_cache_close()
3473 free_partial(s, n); in kmem_cache_close()
3474 if (n->nr_partial || slabs_node(s, node)) in kmem_cache_close()
3477 free_percpu(s->cpu_slab); in kmem_cache_close()
3478 free_kmem_cache_nodes(s); in kmem_cache_close()
3482 int __kmem_cache_shutdown(struct kmem_cache *s) in __kmem_cache_shutdown() argument
3484 return kmem_cache_close(s); in __kmem_cache_shutdown()
3521 struct kmem_cache *s; in __kmalloc() local
3527 s = kmalloc_slab(size, flags); in __kmalloc()
3529 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc()
3530 return s; in __kmalloc()
3532 ret = slab_alloc(s, flags, _RET_IP_); in __kmalloc()
3534 trace_kmalloc(_RET_IP_, ret, size, s->size, flags); in __kmalloc()
3536 kasan_kmalloc(s, ret, size); in __kmalloc()
3559 struct kmem_cache *s; in __kmalloc_node() local
3572 s = kmalloc_slab(size, flags); in __kmalloc_node()
3574 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc_node()
3575 return s; in __kmalloc_node()
3577 ret = slab_alloc_node(s, flags, node, _RET_IP_); in __kmalloc_node()
3579 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); in __kmalloc_node()
3581 kasan_kmalloc(s, ret, size); in __kmalloc_node()
3647 int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) in __kmem_cache_shrink() argument
3664 s->cpu_partial = 0; in __kmem_cache_shrink()
3665 s->min_partial = 0; in __kmem_cache_shrink()
3674 flush_all(s); in __kmem_cache_shrink()
3675 for_each_kmem_cache_node(s, node, n) { in __kmem_cache_shrink()
3715 discard_slab(s, page); in __kmem_cache_shrink()
3717 if (slabs_node(s, node)) in __kmem_cache_shrink()
3726 struct kmem_cache *s; in slab_mem_going_offline_callback() local
3729 list_for_each_entry(s, &slab_caches, list) in slab_mem_going_offline_callback()
3730 __kmem_cache_shrink(s, false); in slab_mem_going_offline_callback()
3739 struct kmem_cache *s; in slab_mem_offline_callback() local
3753 list_for_each_entry(s, &slab_caches, list) { in slab_mem_offline_callback()
3754 n = get_node(s, offline_node); in slab_mem_offline_callback()
3762 BUG_ON(slabs_node(s, offline_node)); in slab_mem_offline_callback()
3764 s->node[offline_node] = NULL; in slab_mem_offline_callback()
3774 struct kmem_cache *s; in slab_mem_going_online_callback() local
3792 list_for_each_entry(s, &slab_caches, list) { in slab_mem_going_online_callback()
3804 s->node[nid] = n; in slab_mem_going_online_callback()
3856 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); in bootstrap() local
3859 memcpy(s, static_cache, kmem_cache->object_size); in bootstrap()
3866 __flush_cpu_slab(s, smp_processor_id()); in bootstrap()
3867 for_each_kmem_cache_node(s, node, n) { in bootstrap()
3871 p->slab_cache = s; in bootstrap()
3875 p->slab_cache = s; in bootstrap()
3878 slab_init_memcg_params(s); in bootstrap()
3879 list_add(&s->list, &slab_caches); in bootstrap()
3880 return s; in bootstrap()
3938 struct kmem_cache *s, *c; in __kmem_cache_alias() local
3940 s = find_mergeable(size, align, flags, name, ctor); in __kmem_cache_alias()
3941 if (s) { in __kmem_cache_alias()
3942 s->refcount++; in __kmem_cache_alias()
3948 s->object_size = max(s->object_size, (int)size); in __kmem_cache_alias()
3949 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); in __kmem_cache_alias()
3951 for_each_memcg_cache(c, s) { in __kmem_cache_alias()
3952 c->object_size = s->object_size; in __kmem_cache_alias()
3957 if (sysfs_slab_alias(s, name)) { in __kmem_cache_alias()
3958 s->refcount--; in __kmem_cache_alias()
3959 s = NULL; in __kmem_cache_alias()
3963 return s; in __kmem_cache_alias()
3966 int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) in __kmem_cache_create() argument
3970 err = kmem_cache_open(s, flags); in __kmem_cache_create()
3978 memcg_propagate_slab_attrs(s); in __kmem_cache_create()
3979 err = sysfs_slab_add(s); in __kmem_cache_create()
3981 kmem_cache_close(s); in __kmem_cache_create()
3995 struct kmem_cache *s; in slab_cpuup_callback() local
4004 list_for_each_entry(s, &slab_caches, list) { in slab_cpuup_callback()
4006 __flush_cpu_slab(s, cpu); in slab_cpuup_callback()
4025 struct kmem_cache *s; in __kmalloc_track_caller() local
4031 s = kmalloc_slab(size, gfpflags); in __kmalloc_track_caller()
4033 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc_track_caller()
4034 return s; in __kmalloc_track_caller()
4036 ret = slab_alloc(s, gfpflags, caller); in __kmalloc_track_caller()
4039 trace_kmalloc(caller, ret, size, s->size, gfpflags); in __kmalloc_track_caller()
4048 struct kmem_cache *s; in __kmalloc_node_track_caller() local
4061 s = kmalloc_slab(size, gfpflags); in __kmalloc_node_track_caller()
4063 if (unlikely(ZERO_OR_NULL_PTR(s))) in __kmalloc_node_track_caller()
4064 return s; in __kmalloc_node_track_caller()
4066 ret = slab_alloc_node(s, gfpflags, node, caller); in __kmalloc_node_track_caller()
4069 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); in __kmalloc_node_track_caller()
4088 static int validate_slab(struct kmem_cache *s, struct page *page, in validate_slab() argument
4094 if (!check_slab(s, page) || in validate_slab()
4095 !on_freelist(s, page, NULL)) in validate_slab()
4101 get_map(s, page, map); in validate_slab()
4102 for_each_object(p, s, addr, page->objects) { in validate_slab()
4103 if (test_bit(slab_index(p, s, addr), map)) in validate_slab()
4104 if (!check_object(s, page, p, SLUB_RED_INACTIVE)) in validate_slab()
4108 for_each_object(p, s, addr, page->objects) in validate_slab()
4109 if (!test_bit(slab_index(p, s, addr), map)) in validate_slab()
4110 if (!check_object(s, page, p, SLUB_RED_ACTIVE)) in validate_slab()
4115 static void validate_slab_slab(struct kmem_cache *s, struct page *page, in validate_slab_slab() argument
4119 validate_slab(s, page, map); in validate_slab_slab()
4123 static int validate_slab_node(struct kmem_cache *s, in validate_slab_node() argument
4133 validate_slab_slab(s, page, map); in validate_slab_node()
4138 s->name, count, n->nr_partial); in validate_slab_node()
4140 if (!(s->flags & SLAB_STORE_USER)) in validate_slab_node()
4144 validate_slab_slab(s, page, map); in validate_slab_node()
4149 s->name, count, atomic_long_read(&n->nr_slabs)); in validate_slab_node()
4156 static long validate_slab_cache(struct kmem_cache *s) in validate_slab_cache() argument
4160 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * in validate_slab_cache()
4167 flush_all(s); in validate_slab_cache()
4168 for_each_kmem_cache_node(s, node, n) in validate_slab_cache()
4169 count += validate_slab_node(s, n, map); in validate_slab_cache()
4223 static int add_location(struct loc_track *t, struct kmem_cache *s, in add_location() argument
4299 static void process_slab(struct loc_track *t, struct kmem_cache *s, in process_slab() argument
4307 get_map(s, page, map); in process_slab()
4309 for_each_object(p, s, addr, page->objects) in process_slab()
4310 if (!test_bit(slab_index(p, s, addr), map)) in process_slab()
4311 add_location(t, s, get_track(s, p, alloc)); in process_slab()
4314 static int list_locations(struct kmem_cache *s, char *buf, in list_locations() argument
4321 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * in list_locations()
4331 flush_all(s); in list_locations()
4333 for_each_kmem_cache_node(s, node, n) { in list_locations()
4342 process_slab(&t, s, page, alloc, map); in list_locations()
4344 process_slab(&t, s, page, alloc, map); in list_locations()
4474 static ssize_t show_slab_objects(struct kmem_cache *s, in show_slab_objects() argument
4490 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, in show_slab_objects()
4530 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
4548 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
4572 static int any_slab_objects(struct kmem_cache *s) in any_slab_objects() argument
4577 for_each_kmem_cache_node(s, node, n) in any_slab_objects()
4590 ssize_t (*show)(struct kmem_cache *s, char *buf);
4591 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4602 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) in slab_size_show() argument
4604 return sprintf(buf, "%d\n", s->size); in slab_size_show()
4608 static ssize_t align_show(struct kmem_cache *s, char *buf) in align_show() argument
4610 return sprintf(buf, "%d\n", s->align); in align_show()
4614 static ssize_t object_size_show(struct kmem_cache *s, char *buf) in object_size_show() argument
4616 return sprintf(buf, "%d\n", s->object_size); in object_size_show()
4620 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) in objs_per_slab_show() argument
4622 return sprintf(buf, "%d\n", oo_objects(s->oo)); in objs_per_slab_show()
4626 static ssize_t order_store(struct kmem_cache *s, in order_store() argument
4639 calculate_sizes(s, order); in order_store()
4643 static ssize_t order_show(struct kmem_cache *s, char *buf) in order_show() argument
4645 return sprintf(buf, "%d\n", oo_order(s->oo)); in order_show()
4649 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) in min_partial_show() argument
4651 return sprintf(buf, "%lu\n", s->min_partial); in min_partial_show()
4654 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, in min_partial_store() argument
4664 set_min_partial(s, min); in min_partial_store()
4669 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) in cpu_partial_show() argument
4671 return sprintf(buf, "%u\n", s->cpu_partial); in cpu_partial_show()
4674 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, in cpu_partial_store() argument
4683 if (objects && !kmem_cache_has_cpu_partial(s)) in cpu_partial_store()
4686 s->cpu_partial = objects; in cpu_partial_store()
4687 flush_all(s); in cpu_partial_store()
4692 static ssize_t ctor_show(struct kmem_cache *s, char *buf) in ctor_show() argument
4694 if (!s->ctor) in ctor_show()
4696 return sprintf(buf, "%pS\n", s->ctor); in ctor_show()
4700 static ssize_t aliases_show(struct kmem_cache *s, char *buf) in aliases_show() argument
4702 return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); in aliases_show()
4706 static ssize_t partial_show(struct kmem_cache *s, char *buf) in partial_show() argument
4708 return show_slab_objects(s, buf, SO_PARTIAL); in partial_show()
4712 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) in cpu_slabs_show() argument
4714 return show_slab_objects(s, buf, SO_CPU); in cpu_slabs_show()
4718 static ssize_t objects_show(struct kmem_cache *s, char *buf) in objects_show() argument
4720 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); in objects_show()
4724 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) in objects_partial_show() argument
4726 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); in objects_partial_show()
4730 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) in slabs_cpu_partial_show() argument
4738 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial; in slabs_cpu_partial_show()
4750 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial; in slabs_cpu_partial_show()
4761 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) in reclaim_account_show() argument
4763 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); in reclaim_account_show()
4766 static ssize_t reclaim_account_store(struct kmem_cache *s, in reclaim_account_store() argument
4769 s->flags &= ~SLAB_RECLAIM_ACCOUNT; in reclaim_account_store()
4771 s->flags |= SLAB_RECLAIM_ACCOUNT; in reclaim_account_store()
4776 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) in hwcache_align_show() argument
4778 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); in hwcache_align_show()
4783 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) in cache_dma_show() argument
4785 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); in cache_dma_show()
4790 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) in destroy_by_rcu_show() argument
4792 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU)); in destroy_by_rcu_show()
4796 static ssize_t reserved_show(struct kmem_cache *s, char *buf) in reserved_show() argument
4798 return sprintf(buf, "%d\n", s->reserved); in reserved_show()
4803 static ssize_t slabs_show(struct kmem_cache *s, char *buf) in slabs_show() argument
4805 return show_slab_objects(s, buf, SO_ALL); in slabs_show()
4809 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) in total_objects_show() argument
4811 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); in total_objects_show()
4815 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) in sanity_checks_show() argument
4817 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE)); in sanity_checks_show()
4820 static ssize_t sanity_checks_store(struct kmem_cache *s, in sanity_checks_store() argument
4823 s->flags &= ~SLAB_DEBUG_FREE; in sanity_checks_store()
4825 s->flags &= ~__CMPXCHG_DOUBLE; in sanity_checks_store()
4826 s->flags |= SLAB_DEBUG_FREE; in sanity_checks_store()
4832 static ssize_t trace_show(struct kmem_cache *s, char *buf) in trace_show() argument
4834 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); in trace_show()
4837 static ssize_t trace_store(struct kmem_cache *s, const char *buf, in trace_store() argument
4845 if (s->refcount > 1) in trace_store()
4848 s->flags &= ~SLAB_TRACE; in trace_store()
4850 s->flags &= ~__CMPXCHG_DOUBLE; in trace_store()
4851 s->flags |= SLAB_TRACE; in trace_store()
4857 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) in red_zone_show() argument
4859 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); in red_zone_show()
4862 static ssize_t red_zone_store(struct kmem_cache *s, in red_zone_store() argument
4865 if (any_slab_objects(s)) in red_zone_store()
4868 s->flags &= ~SLAB_RED_ZONE; in red_zone_store()
4870 s->flags &= ~__CMPXCHG_DOUBLE; in red_zone_store()
4871 s->flags |= SLAB_RED_ZONE; in red_zone_store()
4873 calculate_sizes(s, -1); in red_zone_store()
4878 static ssize_t poison_show(struct kmem_cache *s, char *buf) in poison_show() argument
4880 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); in poison_show()
4883 static ssize_t poison_store(struct kmem_cache *s, in poison_store() argument
4886 if (any_slab_objects(s)) in poison_store()
4889 s->flags &= ~SLAB_POISON; in poison_store()
4891 s->flags &= ~__CMPXCHG_DOUBLE; in poison_store()
4892 s->flags |= SLAB_POISON; in poison_store()
4894 calculate_sizes(s, -1); in poison_store()
4899 static ssize_t store_user_show(struct kmem_cache *s, char *buf) in store_user_show() argument
4901 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); in store_user_show()
4904 static ssize_t store_user_store(struct kmem_cache *s, in store_user_store() argument
4907 if (any_slab_objects(s)) in store_user_store()
4910 s->flags &= ~SLAB_STORE_USER; in store_user_store()
4912 s->flags &= ~__CMPXCHG_DOUBLE; in store_user_store()
4913 s->flags |= SLAB_STORE_USER; in store_user_store()
4915 calculate_sizes(s, -1); in store_user_store()
4920 static ssize_t validate_show(struct kmem_cache *s, char *buf) in validate_show() argument
4925 static ssize_t validate_store(struct kmem_cache *s, in validate_store() argument
4931 ret = validate_slab_cache(s); in validate_store()
4939 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) in alloc_calls_show() argument
4941 if (!(s->flags & SLAB_STORE_USER)) in alloc_calls_show()
4943 return list_locations(s, buf, TRACK_ALLOC); in alloc_calls_show()
4947 static ssize_t free_calls_show(struct kmem_cache *s, char *buf) in free_calls_show() argument
4949 if (!(s->flags & SLAB_STORE_USER)) in free_calls_show()
4951 return list_locations(s, buf, TRACK_FREE); in free_calls_show()
4957 static ssize_t failslab_show(struct kmem_cache *s, char *buf) in failslab_show() argument
4959 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); in failslab_show()
4962 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, in failslab_store() argument
4965 if (s->refcount > 1) in failslab_store()
4968 s->flags &= ~SLAB_FAILSLAB; in failslab_store()
4970 s->flags |= SLAB_FAILSLAB; in failslab_store()
4976 static ssize_t shrink_show(struct kmem_cache *s, char *buf) in shrink_show() argument
4981 static ssize_t shrink_store(struct kmem_cache *s, in shrink_store() argument
4985 kmem_cache_shrink(s); in shrink_store()
4993 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) in remote_node_defrag_ratio_show() argument
4995 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10); in remote_node_defrag_ratio_show()
4998 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, in remote_node_defrag_ratio_store() argument
5009 s->remote_node_defrag_ratio = ratio * 10; in remote_node_defrag_ratio_store()
5017 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) in show_stat() argument
5028 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; in show_stat()
5046 static void clear_stat(struct kmem_cache *s, enum stat_item si) in clear_stat() argument
5051 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; in clear_stat()
5055 static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5057 return show_stat(s, buf, si); \
5059 static ssize_t text##_store(struct kmem_cache *s, \
5064 clear_stat(s, si); \
5179 struct kmem_cache *s; in slab_attr_show() local
5183 s = to_slab(kobj); in slab_attr_show()
5188 err = attribute->show(s, buf); in slab_attr_show()
5198 struct kmem_cache *s; in slab_attr_store() local
5202 s = to_slab(kobj); in slab_attr_store()
5207 err = attribute->store(s, buf, len); in slab_attr_store()
5209 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { in slab_attr_store()
5213 if (s->max_attr_size < len) in slab_attr_store()
5214 s->max_attr_size = len; in slab_attr_store()
5233 for_each_memcg_cache(c, s) in slab_attr_store()
5241 static void memcg_propagate_slab_attrs(struct kmem_cache *s) in memcg_propagate_slab_attrs() argument
5248 if (is_root_cache(s)) in memcg_propagate_slab_attrs()
5251 root_cache = s->memcg_params.root_cache; in memcg_propagate_slab_attrs()
5289 attr->store(s, buf, strlen(buf)); in memcg_propagate_slab_attrs()
5327 static inline struct kset *cache_kset(struct kmem_cache *s) in cache_kset() argument
5330 if (!is_root_cache(s)) in cache_kset()
5331 return s->memcg_params.root_cache->memcg_kset; in cache_kset()
5342 static char *create_unique_id(struct kmem_cache *s) in create_unique_id() argument
5357 if (s->flags & SLAB_CACHE_DMA) in create_unique_id()
5359 if (s->flags & SLAB_RECLAIM_ACCOUNT) in create_unique_id()
5361 if (s->flags & SLAB_DEBUG_FREE) in create_unique_id()
5363 if (!(s->flags & SLAB_NOTRACK)) in create_unique_id()
5367 p += sprintf(p, "%07d", s->size); in create_unique_id()
5373 static int sysfs_slab_add(struct kmem_cache *s) in sysfs_slab_add() argument
5377 int unmergeable = slab_unmergeable(s); in sysfs_slab_add()
5385 sysfs_remove_link(&slab_kset->kobj, s->name); in sysfs_slab_add()
5386 name = s->name; in sysfs_slab_add()
5392 name = create_unique_id(s); in sysfs_slab_add()
5395 s->kobj.kset = cache_kset(s); in sysfs_slab_add()
5396 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); in sysfs_slab_add()
5400 err = sysfs_create_group(&s->kobj, &slab_attr_group); in sysfs_slab_add()
5405 if (is_root_cache(s)) { in sysfs_slab_add()
5406 s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj); in sysfs_slab_add()
5407 if (!s->memcg_kset) { in sysfs_slab_add()
5414 kobject_uevent(&s->kobj, KOBJ_ADD); in sysfs_slab_add()
5417 sysfs_slab_alias(s, s->name); in sysfs_slab_add()
5424 kobject_del(&s->kobj); in sysfs_slab_add()
5428 void sysfs_slab_remove(struct kmem_cache *s) in sysfs_slab_remove() argument
5438 kset_unregister(s->memcg_kset); in sysfs_slab_remove()
5440 kobject_uevent(&s->kobj, KOBJ_REMOVE); in sysfs_slab_remove()
5441 kobject_del(&s->kobj); in sysfs_slab_remove()
5442 kobject_put(&s->kobj); in sysfs_slab_remove()
5450 struct kmem_cache *s; member
5457 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) in sysfs_slab_alias() argument
5466 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); in sysfs_slab_alias()
5473 al->s = s; in sysfs_slab_alias()
5482 struct kmem_cache *s; in slab_sysfs_init() local
5496 list_for_each_entry(s, &slab_caches, list) { in slab_sysfs_init()
5497 err = sysfs_slab_add(s); in slab_sysfs_init()
5500 s->name); in slab_sysfs_init()
5507 err = sysfs_slab_alias(al->s, al->name); in slab_sysfs_init()
5526 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) in get_slabinfo() argument
5534 for_each_kmem_cache_node(s, node, n) { in get_slabinfo()
5544 sinfo->objects_per_slab = oo_objects(s->oo); in get_slabinfo()
5545 sinfo->cache_order = oo_order(s->oo); in get_slabinfo()
5548 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) in slabinfo_show_stats() argument