Lines Matching refs:page

398 static void set_obj_status(struct page *page, int idx, int val)  in set_obj_status()  argument
402 struct kmem_cache *cachep = page->slab_cache; in set_obj_status()
405 status = (char *)page->freelist + freelist_size; in set_obj_status()
409 static inline unsigned int get_obj_status(struct page *page, int idx) in get_obj_status() argument
413 struct kmem_cache *cachep = page->slab_cache; in get_obj_status()
416 status = (char *)page->freelist + freelist_size; in get_obj_status()
422 static inline void set_obj_status(struct page *page, int idx, int val) {} in set_obj_status() argument
437 struct page *page = virt_to_head_page(obj); in virt_to_cache() local
438 return page->slab_cache; in virt_to_cache()
441 static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, in index_to_obj() argument
444 return page->s_mem + cache->size * idx; in index_to_obj()
454 const struct page *page, void *obj) in obj_to_index() argument
456 u32 offset = (obj - page->s_mem); in obj_to_index()
690 static inline bool is_slab_pfmemalloc(struct page *page) in is_slab_pfmemalloc() argument
692 return PageSlabPfmemalloc(page); in is_slab_pfmemalloc()
700 struct page *page; in recheck_pfmemalloc_active() local
707 list_for_each_entry(page, &n->slabs_full, lru) in recheck_pfmemalloc_active()
708 if (is_slab_pfmemalloc(page)) in recheck_pfmemalloc_active()
711 list_for_each_entry(page, &n->slabs_partial, lru) in recheck_pfmemalloc_active()
712 if (is_slab_pfmemalloc(page)) in recheck_pfmemalloc_active()
715 list_for_each_entry(page, &n->slabs_free, lru) in recheck_pfmemalloc_active()
716 if (is_slab_pfmemalloc(page)) in recheck_pfmemalloc_active()
756 struct page *page = virt_to_head_page(objp); in __ac_get_obj() local
757 ClearPageSlabPfmemalloc(page); in __ac_get_obj()
789 struct page *page = virt_to_head_page(objp); in __ac_put_obj() local
790 if (PageSlabPfmemalloc(page)) in __ac_put_obj()
1401 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) < in kmem_cache_init()
1534 struct page *page; in slab_out_of_memory() local
1554 list_for_each_entry(page, &n->slabs_full, lru) { in slab_out_of_memory()
1558 list_for_each_entry(page, &n->slabs_partial, lru) { in slab_out_of_memory()
1559 active_objs += page->active; in slab_out_of_memory()
1562 list_for_each_entry(page, &n->slabs_free, lru) in slab_out_of_memory()
1586 static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, in kmem_getpages()
1589 struct page *page; in kmem_getpages() local
1596 page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); in kmem_getpages()
1597 if (!page) { in kmem_getpages()
1602 if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) { in kmem_getpages()
1603 __free_pages(page, cachep->gfporder); in kmem_getpages()
1608 if (page_is_pfmemalloc(page)) in kmem_getpages()
1613 add_zone_page_state(page_zone(page), in kmem_getpages()
1616 add_zone_page_state(page_zone(page), in kmem_getpages()
1618 __SetPageSlab(page); in kmem_getpages()
1619 if (page_is_pfmemalloc(page)) in kmem_getpages()
1620 SetPageSlabPfmemalloc(page); in kmem_getpages()
1623 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); in kmem_getpages()
1626 kmemcheck_mark_uninitialized_pages(page, nr_pages); in kmem_getpages()
1628 kmemcheck_mark_unallocated_pages(page, nr_pages); in kmem_getpages()
1631 return page; in kmem_getpages()
1637 static void kmem_freepages(struct kmem_cache *cachep, struct page *page) in kmem_freepages() argument
1641 kmemcheck_free_shadow(page, cachep->gfporder); in kmem_freepages()
1644 sub_zone_page_state(page_zone(page), in kmem_freepages()
1647 sub_zone_page_state(page_zone(page), in kmem_freepages()
1650 BUG_ON(!PageSlab(page)); in kmem_freepages()
1651 __ClearPageSlabPfmemalloc(page); in kmem_freepages()
1652 __ClearPageSlab(page); in kmem_freepages()
1653 page_mapcount_reset(page); in kmem_freepages()
1654 page->mapping = NULL; in kmem_freepages()
1658 __free_kmem_pages(page, cachep->gfporder); in kmem_freepages()
1664 struct page *page; in kmem_rcu_free() local
1666 page = container_of(head, struct page, rcu_head); in kmem_rcu_free()
1667 cachep = page->slab_cache; in kmem_rcu_free()
1669 kmem_freepages(cachep, page); in kmem_rcu_free()
1818 struct page *page = virt_to_head_page(objp); in check_poison_obj() local
1821 objnr = obj_to_index(cachep, page, objp); in check_poison_obj()
1823 objp = index_to_obj(cachep, page, objnr - 1); in check_poison_obj()
1830 objp = index_to_obj(cachep, page, objnr + 1); in check_poison_obj()
1842 struct page *page) in slab_destroy_debugcheck() argument
1846 void *objp = index_to_obj(cachep, page, i); in slab_destroy_debugcheck()
1872 struct page *page) in slab_destroy_debugcheck() argument
1886 static void slab_destroy(struct kmem_cache *cachep, struct page *page) in slab_destroy() argument
1890 freelist = page->freelist; in slab_destroy()
1891 slab_destroy_debugcheck(cachep, page); in slab_destroy()
1893 call_rcu(&page->rcu_head, kmem_rcu_free); in slab_destroy()
1895 kmem_freepages(cachep, page); in slab_destroy()
1907 struct page *page, *n; in slabs_destroy() local
1909 list_for_each_entry_safe(page, n, list, lru) { in slabs_destroy()
1910 list_del(&page->lru); in slabs_destroy()
1911 slab_destroy(cachep, page); in slabs_destroy()
2367 struct page *page; in drain_freelist() local
2379 page = list_entry(p, struct page, lru); in drain_freelist()
2381 BUG_ON(page->active); in drain_freelist()
2383 list_del(&page->lru); in drain_freelist()
2390 slab_destroy(cache, page); in drain_freelist()
2451 struct page *page, int colour_off, in alloc_slabmgmt() argument
2455 void *addr = page_address(page); in alloc_slabmgmt()
2467 page->active = 0; in alloc_slabmgmt()
2468 page->s_mem = addr + colour_off; in alloc_slabmgmt()
2472 static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx) in get_free_obj() argument
2474 return ((freelist_idx_t *)page->freelist)[idx]; in get_free_obj()
2477 static inline void set_free_obj(struct page *page, in set_free_obj() argument
2480 ((freelist_idx_t *)(page->freelist))[idx] = val; in set_free_obj()
2484 struct page *page) in cache_init_objs() argument
2489 void *objp = index_to_obj(cachep, page, i); in cache_init_objs()
2525 set_obj_status(page, i, OBJECT_FREE); in cache_init_objs()
2526 set_free_obj(page, i, i); in cache_init_objs()
2540 static void *slab_get_obj(struct kmem_cache *cachep, struct page *page, in slab_get_obj() argument
2545 objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); in slab_get_obj()
2546 page->active++; in slab_get_obj()
2554 static void slab_put_obj(struct kmem_cache *cachep, struct page *page, in slab_put_obj() argument
2557 unsigned int objnr = obj_to_index(cachep, page, objp); in slab_put_obj()
2565 for (i = page->active; i < cachep->num; i++) { in slab_put_obj()
2566 if (get_free_obj(page, i) == objnr) { in slab_put_obj()
2573 page->active--; in slab_put_obj()
2574 set_free_obj(page, page->active, objnr); in slab_put_obj()
2582 static void slab_map_pages(struct kmem_cache *cache, struct page *page, in slab_map_pages() argument
2585 page->slab_cache = cache; in slab_map_pages()
2586 page->freelist = freelist; in slab_map_pages()
2594 gfp_t flags, int nodeid, struct page *page) in cache_grow() argument
2640 if (!page) in cache_grow()
2641 page = kmem_getpages(cachep, local_flags, nodeid); in cache_grow()
2642 if (!page) in cache_grow()
2646 freelist = alloc_slabmgmt(cachep, page, offset, in cache_grow()
2651 slab_map_pages(cachep, page, freelist); in cache_grow()
2653 cache_init_objs(cachep, page); in cache_grow()
2661 list_add_tail(&page->lru, &(n->slabs_free)); in cache_grow()
2667 kmem_freepages(cachep, page); in cache_grow()
2716 struct page *page; in cache_free_debugcheck() local
2722 page = virt_to_head_page(objp); in cache_free_debugcheck()
2732 objnr = obj_to_index(cachep, page, objp); in cache_free_debugcheck()
2735 BUG_ON(objp != index_to_obj(cachep, page, objnr)); in cache_free_debugcheck()
2737 set_obj_status(page, objnr, OBJECT_FREE); in cache_free_debugcheck()
2795 struct page *page; in cache_alloc_refill() local
2805 page = list_entry(entry, struct page, lru); in cache_alloc_refill()
2813 BUG_ON(page->active >= cachep->num); in cache_alloc_refill()
2815 while (page->active < cachep->num && batchcount--) { in cache_alloc_refill()
2820 ac_put_obj(cachep, ac, slab_get_obj(cachep, page, in cache_alloc_refill()
2825 list_del(&page->lru); in cache_alloc_refill()
2826 if (page->active == cachep->num) in cache_alloc_refill()
2827 list_add(&page->lru, &n->slabs_full); in cache_alloc_refill()
2829 list_add(&page->lru, &n->slabs_partial); in cache_alloc_refill()
2871 struct page *page; in cache_alloc_debugcheck_after() local
2904 page = virt_to_head_page(objp); in cache_alloc_debugcheck_after()
2905 set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE); in cache_alloc_debugcheck_after()
3047 struct page *page; in fallback_alloc() local
3052 page = kmem_getpages(cache, local_flags, numa_mem_id()); in fallback_alloc()
3055 if (page) { in fallback_alloc()
3059 nid = page_to_nid(page); in fallback_alloc()
3060 if (cache_grow(cache, flags, nid, page)) { in fallback_alloc()
3089 struct page *page; in ____cache_alloc_node() local
3109 page = list_entry(entry, struct page, lru); in ____cache_alloc_node()
3116 BUG_ON(page->active == cachep->num); in ____cache_alloc_node()
3118 obj = slab_get_obj(cachep, page, nodeid); in ____cache_alloc_node()
3121 list_del(&page->lru); in ____cache_alloc_node()
3123 if (page->active == cachep->num) in ____cache_alloc_node()
3124 list_add(&page->lru, &n->slabs_full); in ____cache_alloc_node()
3126 list_add(&page->lru, &n->slabs_partial); in ____cache_alloc_node()
3279 struct page *page; in free_block() local
3284 page = virt_to_head_page(objp); in free_block()
3285 list_del(&page->lru); in free_block()
3287 slab_put_obj(cachep, page, objp, node); in free_block()
3292 if (page->active == 0) { in free_block()
3295 list_add_tail(&page->lru, list); in free_block()
3297 list_add(&page->lru, &n->slabs_free); in free_block()
3304 list_add_tail(&page->lru, &n->slabs_partial); in free_block()
3345 struct page *page; in cache_flusharray() local
3347 page = list_entry(p, struct page, lru); in cache_flusharray()
3348 BUG_ON(page->active); in cache_flusharray()
3924 struct page *page; in get_slabinfo() local
3941 list_for_each_entry(page, &n->slabs_full, lru) { in get_slabinfo()
3942 if (page->active != cachep->num && !error) in get_slabinfo()
3947 list_for_each_entry(page, &n->slabs_partial, lru) { in get_slabinfo()
3948 if (page->active == cachep->num && !error) in get_slabinfo()
3950 if (!page->active && !error) in get_slabinfo()
3952 active_objs += page->active; in get_slabinfo()
3955 list_for_each_entry(page, &n->slabs_free, lru) { in get_slabinfo()
3956 if (page->active && !error) in get_slabinfo()
4104 struct page *page) in handle_slab() argument
4111 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { in handle_slab()
4112 if (get_obj_status(page, i) != OBJECT_ACTIVE) in handle_slab()
4139 struct page *page; in leaks_show() local
4160 list_for_each_entry(page, &n->slabs_full, lru) in leaks_show()
4161 handle_slab(x, cachep, page); in leaks_show()
4162 list_for_each_entry(page, &n->slabs_partial, lru) in leaks_show()
4163 handle_slab(x, cachep, page); in leaks_show()