Lines Matching refs:page
229 struct page *page, const void *object) in check_valid_pointer() argument
236 base = page_address(page); in check_valid_pointer()
237 if (object < base || object >= base + page->objects * s->size || in check_valid_pointer()
339 static __always_inline void slab_lock(struct page *page) in slab_lock() argument
341 bit_spin_lock(PG_locked, &page->flags); in slab_lock()
344 static __always_inline void slab_unlock(struct page *page) in slab_unlock() argument
346 __bit_spin_unlock(PG_locked, &page->flags); in slab_unlock()
349 static inline void set_page_slub_counters(struct page *page, unsigned long counters_new) in set_page_slub_counters() argument
351 struct page tmp; in set_page_slub_counters()
359 page->frozen = tmp.frozen; in set_page_slub_counters()
360 page->inuse = tmp.inuse; in set_page_slub_counters()
361 page->objects = tmp.objects; in set_page_slub_counters()
365 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in __cmpxchg_double_slab() argument
374 if (cmpxchg_double(&page->freelist, &page->counters, in __cmpxchg_double_slab()
381 slab_lock(page); in __cmpxchg_double_slab()
382 if (page->freelist == freelist_old && in __cmpxchg_double_slab()
383 page->counters == counters_old) { in __cmpxchg_double_slab()
384 page->freelist = freelist_new; in __cmpxchg_double_slab()
385 set_page_slub_counters(page, counters_new); in __cmpxchg_double_slab()
386 slab_unlock(page); in __cmpxchg_double_slab()
389 slab_unlock(page); in __cmpxchg_double_slab()
402 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, in cmpxchg_double_slab() argument
410 if (cmpxchg_double(&page->freelist, &page->counters, in cmpxchg_double_slab()
420 slab_lock(page); in cmpxchg_double_slab()
421 if (page->freelist == freelist_old && in cmpxchg_double_slab()
422 page->counters == counters_old) { in cmpxchg_double_slab()
423 page->freelist = freelist_new; in cmpxchg_double_slab()
424 set_page_slub_counters(page, counters_new); in cmpxchg_double_slab()
425 slab_unlock(page); in cmpxchg_double_slab()
429 slab_unlock(page); in cmpxchg_double_slab()
450 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) in get_map() argument
453 void *addr = page_address(page); in get_map()
455 for (p = page->freelist; p; p = get_freepointer(s, p)) in get_map()
582 static void print_page_info(struct page *page) in print_page_info() argument
585 page, page->objects, page->inuse, page->freelist, page->flags); in print_page_info()
617 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) in print_trailer() argument
620 u8 *addr = page_address(page); in print_trailer()
624 print_page_info(page); in print_trailer()
653 void object_err(struct kmem_cache *s, struct page *page, in object_err() argument
657 print_trailer(s, page, object); in object_err()
660 static void slab_err(struct kmem_cache *s, struct page *page, in slab_err() argument
670 print_page_info(page); in slab_err()
694 static int check_bytes_and_report(struct kmem_cache *s, struct page *page, in check_bytes_and_report() argument
714 print_trailer(s, page, object); in check_bytes_and_report()
758 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) in check_pad_bytes() argument
773 return check_bytes_and_report(s, page, p, "Object padding", in check_pad_bytes()
778 static int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
789 start = page_address(page); in slab_pad_check()
790 length = (PAGE_SIZE << compound_order(page)) - s->reserved; in slab_pad_check()
804 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); in slab_pad_check()
811 static int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
818 if (!check_bytes_and_report(s, page, object, "Redzone", in check_object()
823 check_bytes_and_report(s, page, p, "Alignment padding", in check_object()
831 (!check_bytes_and_report(s, page, p, "Poison", p, in check_object()
833 !check_bytes_and_report(s, page, p, "Poison", in check_object()
839 check_pad_bytes(s, page, p); in check_object()
850 if (!check_valid_pointer(s, page, get_freepointer(s, p))) { in check_object()
851 object_err(s, page, p, "Freepointer corrupt"); in check_object()
863 static int check_slab(struct kmem_cache *s, struct page *page) in check_slab() argument
869 if (!PageSlab(page)) { in check_slab()
870 slab_err(s, page, "Not a valid slab page"); in check_slab()
874 maxobj = order_objects(compound_order(page), s->size, s->reserved); in check_slab()
875 if (page->objects > maxobj) { in check_slab()
876 slab_err(s, page, "objects %u > max %u", in check_slab()
877 page->objects, maxobj); in check_slab()
880 if (page->inuse > page->objects) { in check_slab()
881 slab_err(s, page, "inuse %u > max %u", in check_slab()
882 page->inuse, page->objects); in check_slab()
886 slab_pad_check(s, page); in check_slab()
894 static int on_freelist(struct kmem_cache *s, struct page *page, void *search) in on_freelist() argument
901 fp = page->freelist; in on_freelist()
902 while (fp && nr <= page->objects) { in on_freelist()
905 if (!check_valid_pointer(s, page, fp)) { in on_freelist()
907 object_err(s, page, object, in on_freelist()
911 slab_err(s, page, "Freepointer corrupt"); in on_freelist()
912 page->freelist = NULL; in on_freelist()
913 page->inuse = page->objects; in on_freelist()
924 max_objects = order_objects(compound_order(page), s->size, s->reserved); in on_freelist()
928 if (page->objects != max_objects) { in on_freelist()
929 slab_err(s, page, "Wrong number of objects. Found %d but " in on_freelist()
930 "should be %d", page->objects, max_objects); in on_freelist()
931 page->objects = max_objects; in on_freelist()
934 if (page->inuse != page->objects - nr) { in on_freelist()
935 slab_err(s, page, "Wrong object count. Counter is %d but " in on_freelist()
936 "counted were %d", page->inuse, page->objects - nr); in on_freelist()
937 page->inuse = page->objects - nr; in on_freelist()
943 static void trace(struct kmem_cache *s, struct page *page, void *object, in trace() argument
950 object, page->inuse, in trace()
951 page->freelist); in trace()
965 struct kmem_cache_node *n, struct page *page) in add_full() argument
971 list_add(&page->lru, &n->full); in add_full()
974 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) in remove_full() argument
980 list_del(&page->lru); in remove_full()
1020 static void setup_object_debug(struct kmem_cache *s, struct page *page, in setup_object_debug() argument
1031 struct page *page, in alloc_debug_processing() argument
1034 if (!check_slab(s, page)) in alloc_debug_processing()
1037 if (!check_valid_pointer(s, page, object)) { in alloc_debug_processing()
1038 object_err(s, page, object, "Freelist Pointer check fails"); in alloc_debug_processing()
1042 if (!check_object(s, page, object, SLUB_RED_INACTIVE)) in alloc_debug_processing()
1048 trace(s, page, object, 1); in alloc_debug_processing()
1053 if (PageSlab(page)) { in alloc_debug_processing()
1060 page->inuse = page->objects; in alloc_debug_processing()
1061 page->freelist = NULL; in alloc_debug_processing()
1067 struct kmem_cache *s, struct page *page, void *object, in free_debug_processing() argument
1070 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing()
1073 slab_lock(page); in free_debug_processing()
1075 if (!check_slab(s, page)) in free_debug_processing()
1078 if (!check_valid_pointer(s, page, object)) { in free_debug_processing()
1079 slab_err(s, page, "Invalid object pointer 0x%p", object); in free_debug_processing()
1083 if (on_freelist(s, page, object)) { in free_debug_processing()
1084 object_err(s, page, object, "Object already free"); in free_debug_processing()
1088 if (!check_object(s, page, object, SLUB_RED_ACTIVE)) in free_debug_processing()
1091 if (unlikely(s != page->slab_cache)) { in free_debug_processing()
1092 if (!PageSlab(page)) { in free_debug_processing()
1093 slab_err(s, page, "Attempt to free object(0x%p) " in free_debug_processing()
1095 } else if (!page->slab_cache) { in free_debug_processing()
1100 object_err(s, page, object, in free_debug_processing()
1107 trace(s, page, object, 0); in free_debug_processing()
1110 slab_unlock(page); in free_debug_processing()
1118 slab_unlock(page); in free_debug_processing()
1207 struct page *page, void *object) {} in setup_object_debug() argument
1210 struct page *page, void *object, unsigned long addr) { return 0; } in alloc_debug_processing() argument
1213 struct kmem_cache *s, struct page *page, void *object, in free_debug_processing() argument
1216 static inline int slab_pad_check(struct kmem_cache *s, struct page *page) in slab_pad_check() argument
1218 static inline int check_object(struct kmem_cache *s, struct page *page, in check_object() argument
1221 struct page *page) {} in add_full() argument
1223 struct page *page) {} in remove_full() argument
1312 static inline struct page *alloc_slab_page(struct kmem_cache *s, in alloc_slab_page()
1315 struct page *page; in alloc_slab_page() local
1324 page = alloc_pages(flags, order); in alloc_slab_page()
1326 page = alloc_pages_exact_node(node, flags, order); in alloc_slab_page()
1328 if (!page) in alloc_slab_page()
1331 return page; in alloc_slab_page()
1334 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab()
1336 struct page *page; in allocate_slab() local
1353 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1354 if (unlikely(!page)) { in allocate_slab()
1361 page = alloc_slab_page(s, alloc_gfp, node, oo); in allocate_slab()
1363 if (page) in allocate_slab()
1367 if (kmemcheck_enabled && page in allocate_slab()
1371 kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node); in allocate_slab()
1378 kmemcheck_mark_uninitialized_pages(page, pages); in allocate_slab()
1380 kmemcheck_mark_unallocated_pages(page, pages); in allocate_slab()
1385 if (!page) in allocate_slab()
1388 page->objects = oo_objects(oo); in allocate_slab()
1389 mod_zone_page_state(page_zone(page), in allocate_slab()
1394 return page; in allocate_slab()
1397 static void setup_object(struct kmem_cache *s, struct page *page, in setup_object() argument
1400 setup_object_debug(s, page, object); in setup_object()
1408 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab()
1410 struct page *page; in new_slab() local
1421 page = allocate_slab(s, in new_slab()
1423 if (!page) in new_slab()
1426 order = compound_order(page); in new_slab()
1427 inc_slabs_node(s, page_to_nid(page), page->objects); in new_slab()
1428 page->slab_cache = s; in new_slab()
1429 __SetPageSlab(page); in new_slab()
1430 if (page_is_pfmemalloc(page)) in new_slab()
1431 SetPageSlabPfmemalloc(page); in new_slab()
1433 start = page_address(page); in new_slab()
1438 kasan_poison_slab(page); in new_slab()
1440 for_each_object_idx(p, idx, s, start, page->objects) { in new_slab()
1441 setup_object(s, page, p); in new_slab()
1442 if (likely(idx < page->objects)) in new_slab()
1448 page->freelist = start; in new_slab()
1449 page->inuse = page->objects; in new_slab()
1450 page->frozen = 1; in new_slab()
1452 return page; in new_slab()
1455 static void __free_slab(struct kmem_cache *s, struct page *page) in __free_slab() argument
1457 int order = compound_order(page); in __free_slab()
1463 slab_pad_check(s, page); in __free_slab()
1464 for_each_object(p, s, page_address(page), in __free_slab()
1465 page->objects) in __free_slab()
1466 check_object(s, page, p, SLUB_RED_INACTIVE); in __free_slab()
1469 kmemcheck_free_shadow(page, compound_order(page)); in __free_slab()
1471 mod_zone_page_state(page_zone(page), in __free_slab()
1476 __ClearPageSlabPfmemalloc(page); in __free_slab()
1477 __ClearPageSlab(page); in __free_slab()
1479 page_mapcount_reset(page); in __free_slab()
1482 __free_pages(page, order); in __free_slab()
1487 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1491 struct page *page; in rcu_free_slab() local
1494 page = virt_to_head_page(h); in rcu_free_slab()
1496 page = container_of((struct list_head *)h, struct page, lru); in rcu_free_slab()
1498 __free_slab(page->slab_cache, page); in rcu_free_slab()
1501 static void free_slab(struct kmem_cache *s, struct page *page) in free_slab() argument
1507 int order = compound_order(page); in free_slab()
1511 head = page_address(page) + offset; in free_slab()
1516 head = (void *)&page->lru; in free_slab()
1521 __free_slab(s, page); in free_slab()
1524 static void discard_slab(struct kmem_cache *s, struct page *page) in discard_slab() argument
1526 dec_slabs_node(s, page_to_nid(page), page->objects); in discard_slab()
1527 free_slab(s, page); in discard_slab()
1534 __add_partial(struct kmem_cache_node *n, struct page *page, int tail) in __add_partial() argument
1538 list_add_tail(&page->lru, &n->partial); in __add_partial()
1540 list_add(&page->lru, &n->partial); in __add_partial()
1544 struct page *page, int tail) in add_partial() argument
1547 __add_partial(n, page, tail); in add_partial()
1551 __remove_partial(struct kmem_cache_node *n, struct page *page) in __remove_partial() argument
1553 list_del(&page->lru); in __remove_partial()
1558 struct page *page) in remove_partial() argument
1561 __remove_partial(n, page); in remove_partial()
1571 struct kmem_cache_node *n, struct page *page, in acquire_slab() argument
1576 struct page new; in acquire_slab()
1585 freelist = page->freelist; in acquire_slab()
1586 counters = page->counters; in acquire_slab()
1590 new.inuse = page->objects; in acquire_slab()
1599 if (!__cmpxchg_double_slab(s, page, in acquire_slab()
1605 remove_partial(n, page); in acquire_slab()
1610 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1611 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1619 struct page *page, *page2; in get_partial_node() local
1634 list_for_each_entry_safe(page, page2, &n->partial, lru) { in get_partial_node()
1637 if (!pfmemalloc_match(page, flags)) in get_partial_node()
1640 t = acquire_slab(s, n, page, object == NULL, &objects); in get_partial_node()
1646 c->page = page; in get_partial_node()
1650 put_cpu_partial(s, page, 0); in get_partial_node()
1817 static void deactivate_slab(struct kmem_cache *s, struct page *page, in deactivate_slab() argument
1821 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in deactivate_slab()
1826 struct page new; in deactivate_slab()
1827 struct page old; in deactivate_slab()
1829 if (page->freelist) { in deactivate_slab()
1847 prior = page->freelist; in deactivate_slab()
1848 counters = page->counters; in deactivate_slab()
1854 } while (!__cmpxchg_double_slab(s, page, in deactivate_slab()
1878 old.freelist = page->freelist; in deactivate_slab()
1879 old.counters = page->counters; in deactivate_slab()
1923 remove_partial(n, page); in deactivate_slab()
1927 remove_full(s, n, page); in deactivate_slab()
1931 add_partial(n, page, tail); in deactivate_slab()
1937 add_full(s, n, page); in deactivate_slab()
1943 if (!__cmpxchg_double_slab(s, page, in deactivate_slab()
1954 discard_slab(s, page); in deactivate_slab()
1971 struct page *page, *discard_page = NULL; in unfreeze_partials() local
1973 while ((page = c->partial)) { in unfreeze_partials()
1974 struct page new; in unfreeze_partials()
1975 struct page old; in unfreeze_partials()
1977 c->partial = page->next; in unfreeze_partials()
1979 n2 = get_node(s, page_to_nid(page)); in unfreeze_partials()
1990 old.freelist = page->freelist; in unfreeze_partials()
1991 old.counters = page->counters; in unfreeze_partials()
1999 } while (!__cmpxchg_double_slab(s, page, in unfreeze_partials()
2005 page->next = discard_page; in unfreeze_partials()
2006 discard_page = page; in unfreeze_partials()
2008 add_partial(n, page, DEACTIVATE_TO_TAIL); in unfreeze_partials()
2017 page = discard_page; in unfreeze_partials()
2021 discard_slab(s, page); in unfreeze_partials()
2036 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) in put_cpu_partial() argument
2039 struct page *oldpage; in put_cpu_partial()
2069 pobjects += page->objects - page->inuse; in put_cpu_partial()
2071 page->pages = pages; in put_cpu_partial()
2072 page->pobjects = pobjects; in put_cpu_partial()
2073 page->next = oldpage; in put_cpu_partial()
2075 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) in put_cpu_partial()
2091 deactivate_slab(s, c->page, c->freelist); in flush_slab()
2094 c->page = NULL; in flush_slab()
2108 if (c->page) in __flush_cpu_slab()
2127 return c->page || c->partial; in has_cpu_slab()
2139 static inline int node_match(struct page *page, int node) in node_match() argument
2142 if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node)) in node_match()
2149 static int count_free(struct page *page) in count_free() argument
2151 return page->objects - page->inuse; in count_free()
2162 int (*get_count)(struct page *)) in count_partial() argument
2166 struct page *page; in count_partial() local
2169 list_for_each_entry(page, &n->partial, lru) in count_partial()
2170 x += get_count(page); in count_partial()
2218 struct page *page; in new_slab_objects() local
2225 page = new_slab(s, flags, node); in new_slab_objects()
2226 if (page) { in new_slab_objects()
2228 if (c->page) in new_slab_objects()
2235 freelist = page->freelist; in new_slab_objects()
2236 page->freelist = NULL; in new_slab_objects()
2239 c->page = page; in new_slab_objects()
2247 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) in pfmemalloc_match() argument
2249 if (unlikely(PageSlabPfmemalloc(page))) in pfmemalloc_match()
2265 static inline void *get_freelist(struct kmem_cache *s, struct page *page) in get_freelist() argument
2267 struct page new; in get_freelist()
2272 freelist = page->freelist; in get_freelist()
2273 counters = page->counters; in get_freelist()
2278 new.inuse = page->objects; in get_freelist()
2281 } while (!__cmpxchg_double_slab(s, page, in get_freelist()
2309 struct page *page; in __slab_alloc() local
2322 page = c->page; in __slab_alloc()
2323 if (!page) in __slab_alloc()
2327 if (unlikely(!node_match(page, node))) { in __slab_alloc()
2333 if (unlikely(!node_match(page, searchnode))) { in __slab_alloc()
2335 deactivate_slab(s, page, c->freelist); in __slab_alloc()
2336 c->page = NULL; in __slab_alloc()
2347 if (unlikely(!pfmemalloc_match(page, gfpflags))) { in __slab_alloc()
2348 deactivate_slab(s, page, c->freelist); in __slab_alloc()
2349 c->page = NULL; in __slab_alloc()
2359 freelist = get_freelist(s, page); in __slab_alloc()
2362 c->page = NULL; in __slab_alloc()
2375 VM_BUG_ON(!c->page->frozen); in __slab_alloc()
2384 page = c->page = c->partial; in __slab_alloc()
2385 c->partial = page->next; in __slab_alloc()
2399 page = c->page; in __slab_alloc()
2400 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) in __slab_alloc()
2405 !alloc_debug_processing(s, page, freelist, addr)) in __slab_alloc()
2408 deactivate_slab(s, page, get_freepointer(s, freelist)); in __slab_alloc()
2409 c->page = NULL; in __slab_alloc()
2430 struct page *page; in slab_alloc_node() local
2471 page = c->page; in slab_alloc_node()
2472 if (unlikely(!object || !node_match(page, node))) { in slab_alloc_node()
2577 static void __slab_free(struct kmem_cache *s, struct page *page, in __slab_free() argument
2583 struct page new; in __slab_free()
2591 !(n = free_debug_processing(s, page, x, addr, &flags))) in __slab_free()
2599 prior = page->freelist; in __slab_free()
2600 counters = page->counters; in __slab_free()
2619 n = get_node(s, page_to_nid(page)); in __slab_free()
2633 } while (!cmpxchg_double_slab(s, page, in __slab_free()
2645 put_cpu_partial(s, page, 1); in __slab_free()
2666 remove_full(s, n, page); in __slab_free()
2667 add_partial(n, page, DEACTIVATE_TO_TAIL); in __slab_free()
2678 remove_partial(n, page); in __slab_free()
2682 remove_full(s, n, page); in __slab_free()
2687 discard_slab(s, page); in __slab_free()
2702 struct page *page, void *x, unsigned long addr) in slab_free() argument
2726 if (likely(page == c->page)) { in slab_free()
2739 __slab_free(s, page, x, addr); in slab_free()
2926 struct page *page; in early_kmem_cache_node_alloc() local
2931 page = new_slab(kmem_cache_node, GFP_NOWAIT, node); in early_kmem_cache_node_alloc()
2933 BUG_ON(!page); in early_kmem_cache_node_alloc()
2934 if (page_to_nid(page) != node) { in early_kmem_cache_node_alloc()
2939 n = page->freelist; in early_kmem_cache_node_alloc()
2941 page->freelist = get_freepointer(kmem_cache_node, n); in early_kmem_cache_node_alloc()
2942 page->inuse = 1; in early_kmem_cache_node_alloc()
2943 page->frozen = 0; in early_kmem_cache_node_alloc()
2951 inc_slabs_node(kmem_cache_node, node, page->objects); in early_kmem_cache_node_alloc()
2957 __add_partial(n, page, DEACTIVATE_TO_HEAD); in early_kmem_cache_node_alloc()
3202 static void list_slab_objects(struct kmem_cache *s, struct page *page, in list_slab_objects() argument
3206 void *addr = page_address(page); in list_slab_objects()
3208 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * in list_slab_objects()
3212 slab_err(s, page, text, s->name); in list_slab_objects()
3213 slab_lock(page); in list_slab_objects()
3215 get_map(s, page, map); in list_slab_objects()
3216 for_each_object(p, s, addr, page->objects) { in list_slab_objects()
3223 slab_unlock(page); in list_slab_objects()
3235 struct page *page, *h; in free_partial() local
3237 list_for_each_entry_safe(page, h, &n->partial, lru) { in free_partial()
3238 if (!page->inuse) { in free_partial()
3239 __remove_partial(n, page); in free_partial()
3240 discard_slab(s, page); in free_partial()
3242 list_slab_objects(s, page, in free_partial()
3331 struct page *page; in kmalloc_large_node() local
3335 page = alloc_kmem_pages_node(node, flags, get_order(size)); in kmalloc_large_node()
3336 if (page) in kmalloc_large_node()
3337 ptr = page_address(page); in kmalloc_large_node()
3376 struct page *page; in __ksize() local
3381 page = virt_to_head_page(object); in __ksize()
3383 if (unlikely(!PageSlab(page))) { in __ksize()
3384 WARN_ON(!PageCompound(page)); in __ksize()
3385 return PAGE_SIZE << compound_order(page); in __ksize()
3388 return slab_ksize(page->slab_cache); in __ksize()
3403 struct page *page; in kfree() local
3411 page = virt_to_head_page(x); in kfree()
3412 if (unlikely(!PageSlab(page))) { in kfree()
3413 BUG_ON(!PageCompound(page)); in kfree()
3415 __free_kmem_pages(page, compound_order(page)); in kfree()
3418 slab_free(page->slab_cache, page, object, _RET_IP_); in kfree()
3438 struct page *page; in __kmem_cache_shrink() local
3439 struct page *t; in __kmem_cache_shrink()
3474 list_for_each_entry_safe(page, t, &n->partial, lru) { in __kmem_cache_shrink()
3475 int free = page->objects - page->inuse; in __kmem_cache_shrink()
3483 if (free == page->objects) { in __kmem_cache_shrink()
3484 list_move(&page->lru, &discard); in __kmem_cache_shrink()
3487 list_move(&page->lru, promote + free - 1); in __kmem_cache_shrink()
3500 list_for_each_entry_safe(page, t, &discard, lru) in __kmem_cache_shrink()
3501 discard_slab(s, page); in __kmem_cache_shrink()
3654 struct page *p; in bootstrap()
3861 static int count_inuse(struct page *page) in count_inuse() argument
3863 return page->inuse; in count_inuse()
3866 static int count_total(struct page *page) in count_total() argument
3868 return page->objects; in count_total()
3873 static int validate_slab(struct kmem_cache *s, struct page *page, in validate_slab() argument
3877 void *addr = page_address(page); in validate_slab()
3879 if (!check_slab(s, page) || in validate_slab()
3880 !on_freelist(s, page, NULL)) in validate_slab()
3884 bitmap_zero(map, page->objects); in validate_slab()
3886 get_map(s, page, map); in validate_slab()
3887 for_each_object(p, s, addr, page->objects) { in validate_slab()
3889 if (!check_object(s, page, p, SLUB_RED_INACTIVE)) in validate_slab()
3893 for_each_object(p, s, addr, page->objects) in validate_slab()
3895 if (!check_object(s, page, p, SLUB_RED_ACTIVE)) in validate_slab()
3900 static void validate_slab_slab(struct kmem_cache *s, struct page *page, in validate_slab_slab() argument
3903 slab_lock(page); in validate_slab_slab()
3904 validate_slab(s, page, map); in validate_slab_slab()
3905 slab_unlock(page); in validate_slab_slab()
3912 struct page *page; in validate_slab_node() local
3917 list_for_each_entry(page, &n->partial, lru) { in validate_slab_node()
3918 validate_slab_slab(s, page, map); in validate_slab_node()
3928 list_for_each_entry(page, &n->full, lru) { in validate_slab_node()
3929 validate_slab_slab(s, page, map); in validate_slab_node()
4085 struct page *page, enum track_item alloc, in process_slab() argument
4088 void *addr = page_address(page); in process_slab()
4091 bitmap_zero(map, page->objects); in process_slab()
4092 get_map(s, page, map); in process_slab()
4094 for_each_object(p, s, addr, page->objects) in process_slab()
4120 struct page *page; in list_locations() local
4126 list_for_each_entry(page, &n->partial, lru) in list_locations()
4127 process_slab(&t, s, page, alloc, map); in list_locations()
4128 list_for_each_entry(page, &n->full, lru) in list_locations()
4129 process_slab(&t, s, page, alloc, map); in list_locations()
4278 struct page *page; in show_slab_objects() local
4280 page = READ_ONCE(c->page); in show_slab_objects()
4281 if (!page) in show_slab_objects()
4284 node = page_to_nid(page); in show_slab_objects()
4286 x = page->objects; in show_slab_objects()
4288 x = page->inuse; in show_slab_objects()
4295 page = READ_ONCE(c->partial); in show_slab_objects()
4296 if (page) { in show_slab_objects()
4297 node = page_to_nid(page); in show_slab_objects()
4303 x = page->pages; in show_slab_objects()
4523 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial; in slabs_cpu_partial_show() local
4525 if (page) { in slabs_cpu_partial_show()
4526 pages += page->pages; in slabs_cpu_partial_show()
4527 objects += page->pobjects; in slabs_cpu_partial_show()
4535 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial; in slabs_cpu_partial_show() local
4537 if (page && len < PAGE_SIZE - 20) in slabs_cpu_partial_show()
4539 page->pobjects, page->pages); in slabs_cpu_partial_show()