Lines Matching refs:page
397 static void set_obj_status(struct page *page, int idx, int val) in set_obj_status() argument
401 struct kmem_cache *cachep = page->slab_cache; in set_obj_status()
404 status = (char *)page->freelist + freelist_size; in set_obj_status()
408 static inline unsigned int get_obj_status(struct page *page, int idx) in get_obj_status() argument
412 struct kmem_cache *cachep = page->slab_cache; in get_obj_status()
415 status = (char *)page->freelist + freelist_size; in get_obj_status()
421 static inline void set_obj_status(struct page *page, int idx, int val) {} in set_obj_status() argument
436 struct page *page = virt_to_head_page(obj); in virt_to_cache() local
437 return page->slab_cache; in virt_to_cache()
440 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, in index_to_obj() argument
443 return page->s_mem + cache->size * idx; in index_to_obj()
453 const struct page *page, void *obj) in obj_to_index() argument
455 u32 offset = (obj - page->s_mem); in obj_to_index()
689 static inline bool is_slab_pfmemalloc(struct page *page) in is_slab_pfmemalloc() argument
691 return PageSlabPfmemalloc(page); in is_slab_pfmemalloc()
699 struct page *page; in recheck_pfmemalloc_active() local
706 list_for_each_entry(page, &n->slabs_full, lru) in recheck_pfmemalloc_active()
707 if (is_slab_pfmemalloc(page)) in recheck_pfmemalloc_active()
710 list_for_each_entry(page, &n->slabs_partial, lru) in recheck_pfmemalloc_active()
711 if (is_slab_pfmemalloc(page)) in recheck_pfmemalloc_active()
714 list_for_each_entry(page, &n->slabs_free, lru) in recheck_pfmemalloc_active()
715 if (is_slab_pfmemalloc(page)) in recheck_pfmemalloc_active()
755 struct page *page = virt_to_head_page(objp); in __ac_get_obj() local
756 ClearPageSlabPfmemalloc(page); in __ac_get_obj()
788 struct page *page = virt_to_head_page(objp); in __ac_put_obj() local
789 if (PageSlabPfmemalloc(page)) in __ac_put_obj()
1400 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) < in kmem_cache_init()
1532 struct page *page; in slab_out_of_memory() local
1552 list_for_each_entry(page, &n->slabs_full, lru) { in slab_out_of_memory()
1556 list_for_each_entry(page, &n->slabs_partial, lru) { in slab_out_of_memory()
1557 active_objs += page->active; in slab_out_of_memory()
1560 list_for_each_entry(page, &n->slabs_free, lru) in slab_out_of_memory()
1584 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, in kmem_getpages()
1587 struct page *page; in kmem_getpages() local
1597 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); in kmem_getpages()
1598 if (!page) { in kmem_getpages()
1605 if (page_is_pfmemalloc(page)) in kmem_getpages()
1610 add_zone_page_state(page_zone(page), in kmem_getpages()
1613 add_zone_page_state(page_zone(page), in kmem_getpages()
1615 __SetPageSlab(page); in kmem_getpages()
1616 if (page_is_pfmemalloc(page)) in kmem_getpages()
1617 SetPageSlabPfmemalloc(page); in kmem_getpages()
1620 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); in kmem_getpages()
1623 kmemcheck_mark_uninitialized_pages(page, nr_pages); in kmem_getpages()
1625 kmemcheck_mark_unallocated_pages(page, nr_pages); in kmem_getpages()
1628 return page; in kmem_getpages()
1634 static void kmem_freepages(struct kmem_cache *cachep, struct page *page) in kmem_freepages() argument
1638 kmemcheck_free_shadow(page, cachep->gfporder); in kmem_freepages()
1641 sub_zone_page_state(page_zone(page), in kmem_freepages()
1644 sub_zone_page_state(page_zone(page), in kmem_freepages()
1647 BUG_ON(!PageSlab(page)); in kmem_freepages()
1648 __ClearPageSlabPfmemalloc(page); in kmem_freepages()
1649 __ClearPageSlab(page); in kmem_freepages()
1650 page_mapcount_reset(page); in kmem_freepages()
1651 page->mapping = NULL; in kmem_freepages()
1655 __free_pages(page, cachep->gfporder); in kmem_freepages()
1662 struct page *page; in kmem_rcu_free() local
1664 page = container_of(head, struct page, rcu_head); in kmem_rcu_free()
1665 cachep = page->slab_cache; in kmem_rcu_free()
1667 kmem_freepages(cachep, page); in kmem_rcu_free()
1816 struct page *page = virt_to_head_page(objp); in check_poison_obj() local
1819 objnr = obj_to_index(cachep, page, objp); in check_poison_obj()
1821 objp = index_to_obj(cachep, page, objnr - 1); in check_poison_obj()
1828 objp = index_to_obj(cachep, page, objnr + 1); in check_poison_obj()
1840 struct page *page) in slab_destroy_debugcheck() argument
1844 void *objp = index_to_obj(cachep, page, i); in slab_destroy_debugcheck()
1870 struct page *page) in slab_destroy_debugcheck() argument
1884 static void slab_destroy(struct kmem_cache *cachep, struct page *page) in slab_destroy() argument
1888 freelist = page->freelist; in slab_destroy()
1889 slab_destroy_debugcheck(cachep, page); in slab_destroy()
1899 head = (void *)&page->rcu_head; in slab_destroy()
1903 kmem_freepages(cachep, page); in slab_destroy()
1916 struct page *page, *n; in slabs_destroy() local
1918 list_for_each_entry_safe(page, n, list, lru) { in slabs_destroy()
1919 list_del(&page->lru); in slabs_destroy()
1920 slab_destroy(cachep, page); in slabs_destroy()
2376 struct page *page; in drain_freelist() local
2388 page = list_entry(p, struct page, lru); in drain_freelist()
2390 BUG_ON(page->active); in drain_freelist()
2392 list_del(&page->lru); in drain_freelist()
2399 slab_destroy(cache, page); in drain_freelist()
2460 struct page *page, int colour_off, in alloc_slabmgmt() argument
2464 void *addr = page_address(page); in alloc_slabmgmt()
2476 page->active = 0; in alloc_slabmgmt()
2477 page->s_mem = addr + colour_off; in alloc_slabmgmt()
2481 static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx) in get_free_obj() argument
2483 return ((freelist_idx_t *)page->freelist)[idx]; in get_free_obj()
2486 static inline void set_free_obj(struct page *page, in set_free_obj() argument
2489 ((freelist_idx_t *)(page->freelist))[idx] = val; in set_free_obj()
2493 struct page *page) in cache_init_objs() argument
2498 void *objp = index_to_obj(cachep, page, i); in cache_init_objs()
2534 set_obj_status(page, i, OBJECT_FREE); in cache_init_objs()
2535 set_free_obj(page, i, i); in cache_init_objs()
2549 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page, in slab_get_obj() argument
2554 objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); in slab_get_obj()
2555 page->active++; in slab_get_obj()
2563 static void slab_put_obj(struct kmem_cache *cachep, struct page *page, in slab_put_obj() argument
2566 unsigned int objnr = obj_to_index(cachep, page, objp); in slab_put_obj()
2574 for (i = page->active; i < cachep->num; i++) { in slab_put_obj()
2575 if (get_free_obj(page, i) == objnr) { in slab_put_obj()
2582 page->active--; in slab_put_obj()
2583 set_free_obj(page, page->active, objnr); in slab_put_obj()
2591 static void slab_map_pages(struct kmem_cache *cache, struct page *page, in slab_map_pages() argument
2594 page->slab_cache = cache; in slab_map_pages()
2595 page->freelist = freelist; in slab_map_pages()
2603 gfp_t flags, int nodeid, struct page *page) in cache_grow() argument
2649 if (!page) in cache_grow()
2650 page = kmem_getpages(cachep, local_flags, nodeid); in cache_grow()
2651 if (!page) in cache_grow()
2655 freelist = alloc_slabmgmt(cachep, page, offset, in cache_grow()
2660 slab_map_pages(cachep, page, freelist); in cache_grow()
2662 cache_init_objs(cachep, page); in cache_grow()
2670 list_add_tail(&page->lru, &(n->slabs_free)); in cache_grow()
2676 kmem_freepages(cachep, page); in cache_grow()
2725 struct page *page; in cache_free_debugcheck() local
2731 page = virt_to_head_page(objp); in cache_free_debugcheck()
2741 objnr = obj_to_index(cachep, page, objp); in cache_free_debugcheck()
2744 BUG_ON(objp != index_to_obj(cachep, page, objnr)); in cache_free_debugcheck()
2746 set_obj_status(page, objnr, OBJECT_FREE); in cache_free_debugcheck()
2804 struct page *page; in cache_alloc_refill() local
2814 page = list_entry(entry, struct page, lru); in cache_alloc_refill()
2822 BUG_ON(page->active >= cachep->num); in cache_alloc_refill()
2824 while (page->active < cachep->num && batchcount--) { in cache_alloc_refill()
2829 ac_put_obj(cachep, ac, slab_get_obj(cachep, page, in cache_alloc_refill()
2834 list_del(&page->lru); in cache_alloc_refill()
2835 if (page->active == cachep->num) in cache_alloc_refill()
2836 list_add(&page->lru, &n->slabs_full); in cache_alloc_refill()
2838 list_add(&page->lru, &n->slabs_partial); in cache_alloc_refill()
2880 struct page *page; in cache_alloc_debugcheck_after() local
2913 page = virt_to_head_page(objp); in cache_alloc_debugcheck_after()
2914 set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE); in cache_alloc_debugcheck_after()
3056 struct page *page; in fallback_alloc() local
3061 page = kmem_getpages(cache, local_flags, numa_mem_id()); in fallback_alloc()
3064 if (page) { in fallback_alloc()
3068 nid = page_to_nid(page); in fallback_alloc()
3069 if (cache_grow(cache, flags, nid, page)) { in fallback_alloc()
3098 struct page *page; in ____cache_alloc_node() local
3118 page = list_entry(entry, struct page, lru); in ____cache_alloc_node()
3125 BUG_ON(page->active == cachep->num); in ____cache_alloc_node()
3127 obj = slab_get_obj(cachep, page, nodeid); in ____cache_alloc_node()
3130 list_del(&page->lru); in ____cache_alloc_node()
3132 if (page->active == cachep->num) in ____cache_alloc_node()
3133 list_add(&page->lru, &n->slabs_full); in ____cache_alloc_node()
3135 list_add(&page->lru, &n->slabs_partial); in ____cache_alloc_node()
3288 struct page *page; in free_block() local
3293 page = virt_to_head_page(objp); in free_block()
3294 list_del(&page->lru); in free_block()
3296 slab_put_obj(cachep, page, objp, node); in free_block()
3301 if (page->active == 0) { in free_block()
3304 list_add_tail(&page->lru, list); in free_block()
3306 list_add(&page->lru, &n->slabs_free); in free_block()
3313 list_add_tail(&page->lru, &n->slabs_partial); in free_block()
3354 struct page *page; in cache_flusharray() local
3356 page = list_entry(p, struct page, lru); in cache_flusharray()
3357 BUG_ON(page->active); in cache_flusharray()
3920 struct page *page; in get_slabinfo() local
3937 list_for_each_entry(page, &n->slabs_full, lru) { in get_slabinfo()
3938 if (page->active != cachep->num && !error) in get_slabinfo()
3943 list_for_each_entry(page, &n->slabs_partial, lru) { in get_slabinfo()
3944 if (page->active == cachep->num && !error) in get_slabinfo()
3946 if (!page->active && !error) in get_slabinfo()
3948 active_objs += page->active; in get_slabinfo()
3951 list_for_each_entry(page, &n->slabs_free, lru) { in get_slabinfo()
3952 if (page->active && !error) in get_slabinfo()
4100 struct page *page) in handle_slab() argument
4107 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { in handle_slab()
4108 if (get_obj_status(page, i) != OBJECT_ACTIVE) in handle_slab()
4135 struct page *page; in leaks_show() local
4156 list_for_each_entry(page, &n->slabs_full, lru) in leaks_show()
4157 handle_slab(x, cachep, page); in leaks_show()
4158 list_for_each_entry(page, &n->slabs_partial, lru) in leaks_show()
4159 handle_slab(x, cachep, page); in leaks_show()