Lines Matching refs:page
229 struct page *page, const void *object) in check_valid_pointer() argument
236 base = page_address(page); in check_valid_pointer()
237 if (object < base || object >= base + page->objects * s->size || in check_valid_pointer()
339 static __always_inline void slab_lock(struct page *page) in slab_lock() argument
341 bit_spin_lock(PG_locked, &page->flags); in slab_lock()
344 static __always_inline void slab_unlock(struct page *page) in slab_unlock() argument
346 __bit_spin_unlock(PG_locked, &page->flags); in slab_unlock()
349 static inline void set_page_slub_counters(struct page *page, unsigned long counters_new) in set_page_slub_counters() argument
351 struct page tmp; in set_page_slub_counters()
359 page->frozen = tmp.frozen; in set_page_slub_counters()
360 page->inuse = tmp.inuse; in set_page_slub_counters()
361 page->objects = tmp.objects; in set_page_slub_counters()
365 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in __cmpxchg_double_slab() argument
374 if (cmpxchg_double(&page->freelist, &page->counters, in __cmpxchg_double_slab()
381 slab_lock(page); in __cmpxchg_double_slab()
382 if (page->freelist == freelist_old && in __cmpxchg_double_slab()
383 page->counters == counters_old) { in __cmpxchg_double_slab()
384 page->freelist = freelist_new; in __cmpxchg_double_slab()
385 set_page_slub_counters(page, counters_new); in __cmpxchg_double_slab()
386 slab_unlock(page); in __cmpxchg_double_slab()
389 slab_unlock(page); in __cmpxchg_double_slab()
402 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in cmpxchg_double_slab() argument
410 if (cmpxchg_double(&page->freelist, &page->counters, in cmpxchg_double_slab()
420 slab_lock(page); in cmpxchg_double_slab()
421 if (page->freelist == freelist_old && in cmpxchg_double_slab()
422 page->counters == counters_old) { in cmpxchg_double_slab()
423 page->freelist = freelist_new; in cmpxchg_double_slab()
424 set_page_slub_counters(page, counters_new); in cmpxchg_double_slab()
425 slab_unlock(page); in cmpxchg_double_slab()
429 slab_unlock(page); in cmpxchg_double_slab()
450 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) in get_map() argument
453 void *addr = page_address(page); in get_map()
455 for (p = page->freelist; p; p = get_freepointer(s, p)) in get_map()
584 static void print_page_info(struct page *page) in print_page_info() argument
587 page, page->objects, page->inuse, page->freelist, page->flags); in print_page_info()
619 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) in print_trailer() argument
622 u8 *addr = page_address(page); in print_trailer()
626 print_page_info(page); in print_trailer()
655 void object_err(struct kmem_cache *s, struct page *page, in object_err() argument
659 print_trailer(s, page, object); in object_err()
662 static void slab_err(struct kmem_cache *s, struct page *page, in slab_err() argument
672 print_page_info(page); in slab_err()
696 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, in check_bytes_and_report() argument
716 print_trailer(s, page, object); in check_bytes_and_report()
760 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) in check_pad_bytes() argument
775 return check_bytes_and_report(s, page, p, "Object padding", in check_pad_bytes()
780 static int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
791 start = page_address(page); in slab_pad_check()
792 length = (PAGE_SIZE << compound_order(page)) - s->reserved; in slab_pad_check()
806 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); in slab_pad_check()
813 static int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
820 if (!check_bytes_and_report(s, page, object, "Redzone", in check_object()
825 check_bytes_and_report(s, page, p, "Alignment padding", in check_object()
833 (!check_bytes_and_report(s, page, p, "Poison", p, in check_object()
835 !check_bytes_and_report(s, page, p, "Poison", in check_object()
841 check_pad_bytes(s, page, p); in check_object()
852 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { in check_object()
853 object_err(s, page, p, "Freepointer corrupt"); in check_object()
865 static int check_slab(struct kmem_cache *s, struct page *page) in check_slab() argument
871 if (!PageSlab(page)) { in check_slab()
872 slab_err(s, page, "Not a valid slab page"); in check_slab()
876 maxobj = order_objects(compound_order(page), s->size, s->reserved); in check_slab()
877 if (page->objects > maxobj) { in check_slab()
878 slab_err(s, page, "objects %u > max %u", in check_slab()
879 page->objects, maxobj); in check_slab()
882 if (page->inuse > page->objects) { in check_slab()
883 slab_err(s, page, "inuse %u > max %u", in check_slab()
884 page->inuse, page->objects); in check_slab()
888 slab_pad_check(s, page); in check_slab()
896 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) in on_freelist() argument
903 fp = page->freelist; in on_freelist()
904 while (fp && nr <= page->objects) { in on_freelist()
907 if (!check_valid_pointer(s, page, fp)) { in on_freelist()
909 object_err(s, page, object, in on_freelist()
913 slab_err(s, page, "Freepointer corrupt"); in on_freelist()
914 page->freelist = NULL; in on_freelist()
915 page->inuse = page->objects; in on_freelist()
926 max_objects = order_objects(compound_order(page), s->size, s->reserved); in on_freelist()
930 if (page->objects != max_objects) { in on_freelist()
931 slab_err(s, page, "Wrong number of objects. Found %d but " in on_freelist()
932 "should be %d", page->objects, max_objects); in on_freelist()
933 page->objects = max_objects; in on_freelist()
936 if (page->inuse != page->objects - nr) { in on_freelist()
937 slab_err(s, page, "Wrong object count. Counter is %d but " in on_freelist()
938 "counted were %d", page->inuse, page->objects - nr); in on_freelist()
939 page->inuse = page->objects - nr; in on_freelist()
945 static void trace(struct kmem_cache *s, struct page *page, void *object, in trace() argument
952 object, page->inuse, in trace()
953 page->freelist); in trace()
967 struct kmem_cache_node *n, struct page *page) in add_full() argument
973 list_add(&page->lru, &n->full); in add_full()
976 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) in remove_full() argument
982 list_del(&page->lru); in remove_full()
1022 static void setup_object_debug(struct kmem_cache *s, struct page *page, in setup_object_debug() argument
1033 struct page *page, in alloc_debug_processing() argument
1036 if (!check_slab(s, page)) in alloc_debug_processing()
1039 if (!check_valid_pointer(s, page, object)) { in alloc_debug_processing()
1040 object_err(s, page, object, "Freelist Pointer check fails"); in alloc_debug_processing()
1044 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) in alloc_debug_processing()
1050 trace(s, page, object, 1); in alloc_debug_processing()
1055 if (PageSlab(page)) { in alloc_debug_processing()
1062 page->inuse = page->objects; in alloc_debug_processing()
1063 page->freelist = NULL; in alloc_debug_processing()
1070 struct kmem_cache *s, struct page *page, in free_debug_processing() argument
1074 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing()
1079 slab_lock(page); in free_debug_processing()
1081 if (!check_slab(s, page)) in free_debug_processing()
1087 if (!check_valid_pointer(s, page, object)) { in free_debug_processing()
1088 slab_err(s, page, "Invalid object pointer 0x%p", object); in free_debug_processing()
1092 if (on_freelist(s, page, object)) { in free_debug_processing()
1093 object_err(s, page, object, "Object already free"); in free_debug_processing()
1097 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) in free_debug_processing()
1100 if (unlikely(s != page->slab_cache)) { in free_debug_processing()
1101 if (!PageSlab(page)) { in free_debug_processing()
1102 slab_err(s, page, "Attempt to free object(0x%p) " in free_debug_processing()
1104 } else if (!page->slab_cache) { in free_debug_processing()
1109 object_err(s, page, object, in free_debug_processing()
1116 trace(s, page, object, 0); in free_debug_processing()
1127 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", in free_debug_processing()
1130 slab_unlock(page); in free_debug_processing()
1138 slab_unlock(page); in free_debug_processing()
1227 struct page *page, void *object) {} in setup_object_debug() argument
1230 struct page *page, void *object, unsigned long addr) { return 0; } in alloc_debug_processing() argument
1233 struct kmem_cache *s, struct page *page, in free_debug_processing() argument
1237 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
1239 static inline int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
1242 struct page *page) {} in add_full() argument
1244 struct page *page) {} in remove_full() argument
1360 static void setup_object(struct kmem_cache *s, struct page *page, in setup_object() argument
1363 setup_object_debug(s, page, object); in setup_object()
1374 static inline struct page *alloc_slab_page(struct kmem_cache *s, in alloc_slab_page()
1377 struct page *page; in alloc_slab_page() local
1383 page = alloc_pages(flags, order); in alloc_slab_page()
1385 page = __alloc_pages_node(node, flags, order); in alloc_slab_page()
1387 if (page && memcg_charge_slab(page, flags, order, s)) { in alloc_slab_page()
1388 __free_pages(page, order); in alloc_slab_page()
1389 page = NULL; in alloc_slab_page()
1392 return page; in alloc_slab_page()
1395 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab()
1397 struct page *page; in allocate_slab() local
1418 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1419 if (unlikely(!page)) { in allocate_slab()
1426 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1427 if (unlikely(!page)) in allocate_slab()
1436 kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node); in allocate_slab()
1443 kmemcheck_mark_uninitialized_pages(page, pages); in allocate_slab()
1445 kmemcheck_mark_unallocated_pages(page, pages); in allocate_slab()
1448 page->objects = oo_objects(oo); in allocate_slab()
1450 order = compound_order(page); in allocate_slab()
1451 page->slab_cache = s; in allocate_slab()
1452 __SetPageSlab(page); in allocate_slab()
1453 if (page_is_pfmemalloc(page)) in allocate_slab()
1454 SetPageSlabPfmemalloc(page); in allocate_slab()
1456 start = page_address(page); in allocate_slab()
1461 kasan_poison_slab(page); in allocate_slab()
1463 for_each_object_idx(p, idx, s, start, page->objects) { in allocate_slab()
1464 setup_object(s, page, p); in allocate_slab()
1465 if (likely(idx < page->objects)) in allocate_slab()
1471 page->freelist = start; in allocate_slab()
1472 page->inuse = page->objects; in allocate_slab()
1473 page->frozen = 1; in allocate_slab()
1478 if (!page) in allocate_slab()
1481 mod_zone_page_state(page_zone(page), in allocate_slab()
1486 inc_slabs_node(s, page_to_nid(page), page->objects); in allocate_slab()
1488 return page; in allocate_slab()
1491 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab()
1502 static void __free_slab(struct kmem_cache *s, struct page *page) in __free_slab() argument
1504 int order = compound_order(page); in __free_slab()
1510 slab_pad_check(s, page); in __free_slab()
1511 for_each_object(p, s, page_address(page), in __free_slab()
1512 page->objects) in __free_slab()
1513 check_object(s, page, p, SLUB_RED_INACTIVE); in __free_slab()
1516 kmemcheck_free_shadow(page, compound_order(page)); in __free_slab()
1518 mod_zone_page_state(page_zone(page), in __free_slab()
1523 __ClearPageSlabPfmemalloc(page); in __free_slab()
1524 __ClearPageSlab(page); in __free_slab()
1526 page_mapcount_reset(page); in __free_slab()
1529 __free_kmem_pages(page, order); in __free_slab()
1533 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1537 struct page *page; in rcu_free_slab() local
1540 page = virt_to_head_page(h); in rcu_free_slab()
1542 page = container_of((struct list_head *)h, struct page, lru); in rcu_free_slab()
1544 __free_slab(page->slab_cache, page); in rcu_free_slab()
1547 static void free_slab(struct kmem_cache *s, struct page *page) in free_slab() argument
1553 int order = compound_order(page); in free_slab()
1557 head = page_address(page) + offset; in free_slab()
1559 head = &page->rcu_head; in free_slab()
1564 __free_slab(s, page); in free_slab()
1567 static void discard_slab(struct kmem_cache *s, struct page *page) in discard_slab() argument
1569 dec_slabs_node(s, page_to_nid(page), page->objects); in discard_slab()
1570 free_slab(s, page); in discard_slab()
1577 __add_partial(struct kmem_cache_node *n, struct page *page, int tail) in __add_partial() argument
1581 list_add_tail(&page->lru, &n->partial); in __add_partial()
1583 list_add(&page->lru, &n->partial); in __add_partial()
1587 struct page *page, int tail) in add_partial() argument
1590 __add_partial(n, page, tail); in add_partial()
1594 __remove_partial(struct kmem_cache_node *n, struct page *page) in __remove_partial() argument
1596 list_del(&page->lru); in __remove_partial()
1601 struct page *page) in remove_partial() argument
1604 __remove_partial(n, page); in remove_partial()
1614 struct kmem_cache_node *n, struct page *page, in acquire_slab() argument
1619 struct page new; in acquire_slab()
1628 freelist = page->freelist; in acquire_slab()
1629 counters = page->counters; in acquire_slab()
1633 new.inuse = page->objects; in acquire_slab()
1642 if (!__cmpxchg_double_slab(s, page, in acquire_slab()
1648 remove_partial(n, page); in acquire_slab()
1653 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1654 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1662 struct page *page, *page2; in get_partial_node() local
1677 list_for_each_entry_safe(page, page2, &n->partial, lru) { in get_partial_node()
1680 if (!pfmemalloc_match(page, flags)) in get_partial_node()
1683 t = acquire_slab(s, n, page, object == NULL, &objects); in get_partial_node()
1689 c->page = page; in get_partial_node()
1693 put_cpu_partial(s, page, 0); in get_partial_node()
1860 static void deactivate_slab(struct kmem_cache *s, struct page *page, in deactivate_slab() argument
1864 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in deactivate_slab()
1869 struct page new; in deactivate_slab()
1870 struct page old; in deactivate_slab()
1872 if (page->freelist) { in deactivate_slab()
1890 prior = page->freelist; in deactivate_slab()
1891 counters = page->counters; in deactivate_slab()
1897 } while (!__cmpxchg_double_slab(s, page, in deactivate_slab()
1921 old.freelist = page->freelist; in deactivate_slab()
1922 old.counters = page->counters; in deactivate_slab()
1966 remove_partial(n, page); in deactivate_slab()
1970 remove_full(s, n, page); in deactivate_slab()
1974 add_partial(n, page, tail); in deactivate_slab()
1980 add_full(s, n, page); in deactivate_slab()
1986 if (!__cmpxchg_double_slab(s, page, in deactivate_slab()
1997 discard_slab(s, page); in deactivate_slab()
2014 struct page *page, *discard_page = NULL; in unfreeze_partials() local
2016 while ((page = c->partial)) { in unfreeze_partials()
2017 struct page new; in unfreeze_partials()
2018 struct page old; in unfreeze_partials()
2020 c->partial = page->next; in unfreeze_partials()
2022 n2 = get_node(s, page_to_nid(page)); in unfreeze_partials()
2033 old.freelist = page->freelist; in unfreeze_partials()
2034 old.counters = page->counters; in unfreeze_partials()
2042 } while (!__cmpxchg_double_slab(s, page, in unfreeze_partials()
2048 page->next = discard_page; in unfreeze_partials()
2049 discard_page = page; in unfreeze_partials()
2051 add_partial(n, page, DEACTIVATE_TO_TAIL); in unfreeze_partials()
2060 page = discard_page; in unfreeze_partials()
2064 discard_slab(s, page); in unfreeze_partials()
2079 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) in put_cpu_partial() argument
2082 struct page *oldpage; in put_cpu_partial()
2112 pobjects += page->objects - page->inuse; in put_cpu_partial()
2114 page->pages = pages; in put_cpu_partial()
2115 page->pobjects = pobjects; in put_cpu_partial()
2116 page->next = oldpage; in put_cpu_partial()
2118 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) in put_cpu_partial()
2134 deactivate_slab(s, c->page, c->freelist); in flush_slab()
2137 c->page = NULL; in flush_slab()
2151 if (c->page) in __flush_cpu_slab()
2170 return c->page || c->partial; in has_cpu_slab()
2182 static inline int node_match(struct page *page, int node) in node_match() argument
2185 if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node)) in node_match()
2192 static int count_free(struct page *page) in count_free() argument
2194 return page->objects - page->inuse; in count_free()
2205 int (*get_count)(struct page *)) in count_partial() argument
2209 struct page *page; in count_partial() local
2212 list_for_each_entry(page, &n->partial, lru) in count_partial()
2213 x += get_count(page); in count_partial()
2261 struct page *page; in new_slab_objects() local
2268 page = new_slab(s, flags, node); in new_slab_objects()
2269 if (page) { in new_slab_objects()
2271 if (c->page) in new_slab_objects()
2278 freelist = page->freelist; in new_slab_objects()
2279 page->freelist = NULL; in new_slab_objects()
2282 c->page = page; in new_slab_objects()
2290 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) in pfmemalloc_match() argument
2292 if (unlikely(PageSlabPfmemalloc(page))) in pfmemalloc_match()
2308 static inline void *get_freelist(struct kmem_cache *s, struct page *page) in get_freelist() argument
2310 struct page new; in get_freelist()
2315 freelist = page->freelist; in get_freelist()
2316 counters = page->counters; in get_freelist()
2321 new.inuse = page->objects; in get_freelist()
2324 } while (!__cmpxchg_double_slab(s, page, in get_freelist()
2355 struct page *page; in ___slab_alloc() local
2357 page = c->page; in ___slab_alloc()
2358 if (!page) in ___slab_alloc()
2362 if (unlikely(!node_match(page, node))) { in ___slab_alloc()
2368 if (unlikely(!node_match(page, searchnode))) { in ___slab_alloc()
2370 deactivate_slab(s, page, c->freelist); in ___slab_alloc()
2371 c->page = NULL; in ___slab_alloc()
2382 if (unlikely(!pfmemalloc_match(page, gfpflags))) { in ___slab_alloc()
2383 deactivate_slab(s, page, c->freelist); in ___slab_alloc()
2384 c->page = NULL; in ___slab_alloc()
2394 freelist = get_freelist(s, page); in ___slab_alloc()
2397 c->page = NULL; in ___slab_alloc()
2410 VM_BUG_ON(!c->page->frozen); in ___slab_alloc()
2418 page = c->page = c->partial; in ___slab_alloc()
2419 c->partial = page->next; in ___slab_alloc()
2432 page = c->page; in ___slab_alloc()
2433 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) in ___slab_alloc()
2438 !alloc_debug_processing(s, page, freelist, addr)) in ___slab_alloc()
2441 deactivate_slab(s, page, get_freepointer(s, freelist)); in ___slab_alloc()
2442 c->page = NULL; in ___slab_alloc()
2487 struct page *page; in slab_alloc_node() local
2528 page = c->page; in slab_alloc_node()
2529 if (unlikely(!object || !node_match(page, node))) { in slab_alloc_node()
2634 static void __slab_free(struct kmem_cache *s, struct page *page, in __slab_free() argument
2641 struct page new; in __slab_free()
2649 !(n = free_debug_processing(s, page, head, tail, cnt, in __slab_free()
2658 prior = page->freelist; in __slab_free()
2659 counters = page->counters; in __slab_free()
2678 n = get_node(s, page_to_nid(page)); in __slab_free()
2692 } while (!cmpxchg_double_slab(s, page, in __slab_free()
2704 put_cpu_partial(s, page, 1); in __slab_free()
2725 remove_full(s, n, page); in __slab_free()
2726 add_partial(n, page, DEACTIVATE_TO_TAIL); in __slab_free()
2737 remove_partial(n, page); in __slab_free()
2741 remove_full(s, n, page); in __slab_free()
2746 discard_slab(s, page); in __slab_free()
2764 static __always_inline void slab_free(struct kmem_cache *s, struct page *page, in slab_free() argument
2790 if (likely(page == c->page)) { in slab_free()
2803 __slab_free(s, page, head, tail_obj, cnt, addr); in slab_free()
2818 struct page *page; member
2846 df->page = NULL; in build_detached_freelist()
2860 df->page = virt_to_head_page(object); in build_detached_freelist()
2872 if (df->page == virt_to_head_page(object)) { in build_detached_freelist()
2903 if (unlikely(!df.page)) in kmem_cache_free_bulk()
2906 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_); in kmem_cache_free_bulk()
3140 struct page *page; in early_kmem_cache_node_alloc() local
3145 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); in early_kmem_cache_node_alloc()
3147 BUG_ON(!page); in early_kmem_cache_node_alloc()
3148 if (page_to_nid(page) != node) { in early_kmem_cache_node_alloc()
3153 n = page->freelist; in early_kmem_cache_node_alloc()
3155 page->freelist = get_freepointer(kmem_cache_node, n); in early_kmem_cache_node_alloc()
3156 page->inuse = 1; in early_kmem_cache_node_alloc()
3157 page->frozen = 0; in early_kmem_cache_node_alloc()
3165 inc_slabs_node(kmem_cache_node, node, page->objects); in early_kmem_cache_node_alloc()
3171 __add_partial(n, page, DEACTIVATE_TO_HEAD); in early_kmem_cache_node_alloc()
3416 static void list_slab_objects(struct kmem_cache *s, struct page *page, in list_slab_objects() argument
3420 void *addr = page_address(page); in list_slab_objects()
3422 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * in list_slab_objects()
3426 slab_err(s, page, text, s->name); in list_slab_objects()
3427 slab_lock(page); in list_slab_objects()
3429 get_map(s, page, map); in list_slab_objects()
3430 for_each_object(p, s, addr, page->objects) { in list_slab_objects()
3437 slab_unlock(page); in list_slab_objects()
3449 struct page *page, *h; in free_partial() local
3451 list_for_each_entry_safe(page, h, &n->partial, lru) { in free_partial()
3452 if (!page->inuse) { in free_partial()
3453 __remove_partial(n, page); in free_partial()
3454 discard_slab(s, page); in free_partial()
3456 list_slab_objects(s, page, in free_partial()
3545 struct page *page; in kmalloc_large_node() local
3549 page = alloc_kmem_pages_node(node, flags, get_order(size)); in kmalloc_large_node()
3550 if (page) in kmalloc_large_node()
3551 ptr = page_address(page); in kmalloc_large_node()
3590 struct page *page; in __ksize() local
3595 page = virt_to_head_page(object); in __ksize()
3597 if (unlikely(!PageSlab(page))) { in __ksize()
3598 WARN_ON(!PageCompound(page)); in __ksize()
3599 return PAGE_SIZE << compound_order(page); in __ksize()
3602 return slab_ksize(page->slab_cache); in __ksize()
3617 struct page *page; in kfree() local
3625 page = virt_to_head_page(x); in kfree()
3626 if (unlikely(!PageSlab(page))) { in kfree()
3627 BUG_ON(!PageCompound(page)); in kfree()
3629 __free_kmem_pages(page, compound_order(page)); in kfree()
3632 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); in kfree()
3652 struct page *page; in __kmem_cache_shrink() local
3653 struct page *t; in __kmem_cache_shrink()
3688 list_for_each_entry_safe(page, t, &n->partial, lru) { in __kmem_cache_shrink()
3689 int free = page->objects - page->inuse; in __kmem_cache_shrink()
3697 if (free == page->objects) { in __kmem_cache_shrink()
3698 list_move(&page->lru, &discard); in __kmem_cache_shrink()
3701 list_move(&page->lru, promote + free - 1); in __kmem_cache_shrink()
3714 list_for_each_entry_safe(page, t, &discard, lru) in __kmem_cache_shrink()
3715 discard_slab(s, page); in __kmem_cache_shrink()
3868 struct page *p; in bootstrap()
4076 static int count_inuse(struct page *page) in count_inuse() argument
4078 return page->inuse; in count_inuse()
4081 static int count_total(struct page *page) in count_total() argument
4083 return page->objects; in count_total()
4088 static int validate_slab(struct kmem_cache *s, struct page *page, in validate_slab() argument
4092 void *addr = page_address(page); in validate_slab()
4094 if (!check_slab(s, page) || in validate_slab()
4095 !on_freelist(s, page, NULL)) in validate_slab()
4099 bitmap_zero(map, page->objects); in validate_slab()
4101 get_map(s, page, map); in validate_slab()
4102 for_each_object(p, s, addr, page->objects) { in validate_slab()
4104 if (!check_object(s, page, p, SLUB_RED_INACTIVE)) in validate_slab()
4108 for_each_object(p, s, addr, page->objects) in validate_slab()
4110 if (!check_object(s, page, p, SLUB_RED_ACTIVE)) in validate_slab()
4115 static void validate_slab_slab(struct kmem_cache *s, struct page *page, in validate_slab_slab() argument
4118 slab_lock(page); in validate_slab_slab()
4119 validate_slab(s, page, map); in validate_slab_slab()
4120 slab_unlock(page); in validate_slab_slab()
4127 struct page *page; in validate_slab_node() local
4132 list_for_each_entry(page, &n->partial, lru) { in validate_slab_node()
4133 validate_slab_slab(s, page, map); in validate_slab_node()
4143 list_for_each_entry(page, &n->full, lru) { in validate_slab_node()
4144 validate_slab_slab(s, page, map); in validate_slab_node()
4300 struct page *page, enum track_item alloc, in process_slab() argument
4303 void *addr = page_address(page); in process_slab()
4306 bitmap_zero(map, page->objects); in process_slab()
4307 get_map(s, page, map); in process_slab()
4309 for_each_object(p, s, addr, page->objects) in process_slab()
4335 struct page *page; in list_locations() local
4341 list_for_each_entry(page, &n->partial, lru) in list_locations()
4342 process_slab(&t, s, page, alloc, map); in list_locations()
4343 list_for_each_entry(page, &n->full, lru) in list_locations()
4344 process_slab(&t, s, page, alloc, map); in list_locations()
4493 struct page *page; in show_slab_objects() local
4495 page = READ_ONCE(c->page); in show_slab_objects()
4496 if (!page) in show_slab_objects()
4499 node = page_to_nid(page); in show_slab_objects()
4501 x = page->objects; in show_slab_objects()
4503 x = page->inuse; in show_slab_objects()
4510 page = READ_ONCE(c->partial); in show_slab_objects()
4511 if (page) { in show_slab_objects()
4512 node = page_to_nid(page); in show_slab_objects()
4518 x = page->pages; in show_slab_objects()
4738 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial; in slabs_cpu_partial_show() local
4740 if (page) { in slabs_cpu_partial_show()
4741 pages += page->pages; in slabs_cpu_partial_show()
4742 objects += page->pobjects; in slabs_cpu_partial_show()
4750 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial; in slabs_cpu_partial_show() local
4752 if (page && len < PAGE_SIZE - 20) in slabs_cpu_partial_show()
4754 page->pobjects, page->pages); in slabs_cpu_partial_show()