Lines Matching refs:n

246 			struct kmem_cache_node *n, int tofree);
699 struct kmem_cache_node *n = get_node(cachep, numa_mem_id()); in recheck_pfmemalloc_active() local
706 spin_lock_irqsave(&n->list_lock, flags); in recheck_pfmemalloc_active()
707 list_for_each_entry(page, &n->slabs_full, lru) in recheck_pfmemalloc_active()
711 list_for_each_entry(page, &n->slabs_partial, lru) in recheck_pfmemalloc_active()
715 list_for_each_entry(page, &n->slabs_free, lru) in recheck_pfmemalloc_active()
721 spin_unlock_irqrestore(&n->list_lock, flags); in recheck_pfmemalloc_active()
732 struct kmem_cache_node *n; in __ac_get_obj() local
754 n = get_node(cachep, numa_mem_id()); in __ac_get_obj()
755 if (!list_empty(&n->slabs_free) && force_refill) { in __ac_get_obj()
832 #define reap_alien(cachep, n) do { } while (0) argument
924 struct kmem_cache_node *n = get_node(cachep, node); in __drain_alien_cache() local
927 spin_lock(&n->list_lock); in __drain_alien_cache()
933 if (n->shared) in __drain_alien_cache()
934 transfer_objects(n->shared, ac, ac->limit); in __drain_alien_cache()
938 spin_unlock(&n->list_lock); in __drain_alien_cache()
945 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) in reap_alien() argument
949 if (n->alien) { in reap_alien()
950 struct alien_cache *alc = n->alien[node]; in reap_alien()
991 struct kmem_cache_node *n; in __cache_free_alien() local
996 n = get_node(cachep, node); in __cache_free_alien()
998 if (n->alien && n->alien[page_node]) { in __cache_free_alien()
999 alien = n->alien[page_node]; in __cache_free_alien()
1010 n = get_node(cachep, page_node); in __cache_free_alien()
1011 spin_lock(&n->list_lock); in __cache_free_alien()
1013 spin_unlock(&n->list_lock); in __cache_free_alien()
1055 struct kmem_cache_node *n; in init_cache_node_node() local
1064 n = get_node(cachep, node); in init_cache_node_node()
1065 if (!n) { in init_cache_node_node()
1066 n = kmalloc_node(memsize, GFP_KERNEL, node); in init_cache_node_node()
1067 if (!n) in init_cache_node_node()
1069 kmem_cache_node_init(n); in init_cache_node_node()
1070 n->next_reap = jiffies + REAPTIMEOUT_NODE + in init_cache_node_node()
1078 cachep->node[node] = n; in init_cache_node_node()
1081 spin_lock_irq(&n->list_lock); in init_cache_node_node()
1082 n->free_limit = in init_cache_node_node()
1085 spin_unlock_irq(&n->list_lock); in init_cache_node_node()
1091 struct kmem_cache_node *n) in slabs_tofree() argument
1093 return (n->free_objects + cachep->num - 1) / cachep->num; in slabs_tofree()
1099 struct kmem_cache_node *n = NULL; in cpuup_canceled() local
1109 n = get_node(cachep, node); in cpuup_canceled()
1110 if (!n) in cpuup_canceled()
1113 spin_lock_irq(&n->list_lock); in cpuup_canceled()
1116 n->free_limit -= cachep->batchcount; in cpuup_canceled()
1126 spin_unlock_irq(&n->list_lock); in cpuup_canceled()
1130 shared = n->shared; in cpuup_canceled()
1134 n->shared = NULL; in cpuup_canceled()
1137 alien = n->alien; in cpuup_canceled()
1138 n->alien = NULL; in cpuup_canceled()
1140 spin_unlock_irq(&n->list_lock); in cpuup_canceled()
1157 n = get_node(cachep, node); in cpuup_canceled()
1158 if (!n) in cpuup_canceled()
1160 drain_freelist(cachep, n, slabs_tofree(cachep, n)); in cpuup_canceled()
1167 struct kmem_cache_node *n = NULL; in cpuup_prepare() local
1203 n = get_node(cachep, node); in cpuup_prepare()
1204 BUG_ON(!n); in cpuup_prepare()
1206 spin_lock_irq(&n->list_lock); in cpuup_prepare()
1207 if (!n->shared) { in cpuup_prepare()
1212 n->shared = shared; in cpuup_prepare()
1216 if (!n->alien) { in cpuup_prepare()
1217 n->alien = alien; in cpuup_prepare()
1221 spin_unlock_irq(&n->list_lock); in cpuup_prepare()
1306 struct kmem_cache_node *n; in drain_cache_node_node() local
1308 n = get_node(cachep, node); in drain_cache_node_node()
1309 if (!n) in drain_cache_node_node()
1312 drain_freelist(cachep, n, slabs_tofree(cachep, n)); in drain_cache_node_node()
1314 if (!list_empty(&n->slabs_full) || in drain_cache_node_node()
1315 !list_empty(&n->slabs_partial)) { in drain_cache_node_node()
1533 struct kmem_cache_node *n; in slab_out_of_memory() local
1549 for_each_kmem_cache_node(cachep, node, n) { in slab_out_of_memory()
1553 spin_lock_irqsave(&n->list_lock, flags); in slab_out_of_memory()
1554 list_for_each_entry(page, &n->slabs_full, lru) { in slab_out_of_memory()
1558 list_for_each_entry(page, &n->slabs_partial, lru) { in slab_out_of_memory()
1562 list_for_each_entry(page, &n->slabs_free, lru) in slab_out_of_memory()
1565 free_objects += n->free_objects; in slab_out_of_memory()
1566 spin_unlock_irqrestore(&n->list_lock, flags); in slab_out_of_memory()
1907 struct page *page, *n; in slabs_destroy() local
1909 list_for_each_entry_safe(page, n, list, lru) { in slabs_destroy()
2319 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
2328 struct kmem_cache_node *n; in do_drain() local
2333 n = get_node(cachep, node); in do_drain()
2334 spin_lock(&n->list_lock); in do_drain()
2336 spin_unlock(&n->list_lock); in do_drain()
2343 struct kmem_cache_node *n; in drain_cpu_caches() local
2348 for_each_kmem_cache_node(cachep, node, n) in drain_cpu_caches()
2349 if (n->alien) in drain_cpu_caches()
2350 drain_alien_cache(cachep, n->alien); in drain_cpu_caches()
2352 for_each_kmem_cache_node(cachep, node, n) in drain_cpu_caches()
2353 drain_array(cachep, n, n->shared, 1, node); in drain_cpu_caches()
2363 struct kmem_cache_node *n, int tofree) in drain_freelist() argument
2370 while (nr_freed < tofree && !list_empty(&n->slabs_free)) { in drain_freelist()
2372 spin_lock_irq(&n->list_lock); in drain_freelist()
2373 p = n->slabs_free.prev; in drain_freelist()
2374 if (p == &n->slabs_free) { in drain_freelist()
2375 spin_unlock_irq(&n->list_lock); in drain_freelist()
2388 n->free_objects -= cache->num; in drain_freelist()
2389 spin_unlock_irq(&n->list_lock); in drain_freelist()
2401 struct kmem_cache_node *n; in __kmem_cache_shrink() local
2406 for_each_kmem_cache_node(cachep, node, n) { in __kmem_cache_shrink()
2407 drain_freelist(cachep, n, slabs_tofree(cachep, n)); in __kmem_cache_shrink()
2409 ret += !list_empty(&n->slabs_full) || in __kmem_cache_shrink()
2410 !list_empty(&n->slabs_partial); in __kmem_cache_shrink()
2418 struct kmem_cache_node *n; in __kmem_cache_shutdown() local
2427 for_each_kmem_cache_node(cachep, i, n) { in __kmem_cache_shutdown()
2428 kfree(n->shared); in __kmem_cache_shutdown()
2429 free_alien_cache(n->alien); in __kmem_cache_shutdown()
2430 kfree(n); in __kmem_cache_shutdown()
2599 struct kmem_cache_node *n; in cache_grow() local
2613 n = get_node(cachep, nodeid); in cache_grow()
2614 spin_lock(&n->list_lock); in cache_grow()
2617 offset = n->colour_next; in cache_grow()
2618 n->colour_next++; in cache_grow()
2619 if (n->colour_next >= cachep->colour) in cache_grow()
2620 n->colour_next = 0; in cache_grow()
2621 spin_unlock(&n->list_lock); in cache_grow()
2658 spin_lock(&n->list_lock); in cache_grow()
2661 list_add_tail(&page->lru, &(n->slabs_free)); in cache_grow()
2663 n->free_objects += cachep->num; in cache_grow()
2664 spin_unlock(&n->list_lock); in cache_grow()
2763 struct kmem_cache_node *n; in cache_alloc_refill() local
2782 n = get_node(cachep, node); in cache_alloc_refill()
2784 BUG_ON(ac->avail > 0 || !n); in cache_alloc_refill()
2785 spin_lock(&n->list_lock); in cache_alloc_refill()
2788 if (n->shared && transfer_objects(ac, n->shared, batchcount)) { in cache_alloc_refill()
2789 n->shared->touched = 1; in cache_alloc_refill()
2797 entry = n->slabs_partial.next; in cache_alloc_refill()
2798 if (entry == &n->slabs_partial) { in cache_alloc_refill()
2799 n->free_touched = 1; in cache_alloc_refill()
2800 entry = n->slabs_free.next; in cache_alloc_refill()
2801 if (entry == &n->slabs_free) in cache_alloc_refill()
2827 list_add(&page->lru, &n->slabs_full); in cache_alloc_refill()
2829 list_add(&page->lru, &n->slabs_partial); in cache_alloc_refill()
2833 n->free_objects -= ac->avail; in cache_alloc_refill()
2835 spin_unlock(&n->list_lock); in cache_alloc_refill()
3090 struct kmem_cache_node *n; in ____cache_alloc_node() local
3095 n = get_node(cachep, nodeid); in ____cache_alloc_node()
3096 BUG_ON(!n); in ____cache_alloc_node()
3100 spin_lock(&n->list_lock); in ____cache_alloc_node()
3101 entry = n->slabs_partial.next; in ____cache_alloc_node()
3102 if (entry == &n->slabs_partial) { in ____cache_alloc_node()
3103 n->free_touched = 1; in ____cache_alloc_node()
3104 entry = n->slabs_free.next; in ____cache_alloc_node()
3105 if (entry == &n->slabs_free) in ____cache_alloc_node()
3119 n->free_objects--; in ____cache_alloc_node()
3124 list_add(&page->lru, &n->slabs_full); in ____cache_alloc_node()
3126 list_add(&page->lru, &n->slabs_partial); in ____cache_alloc_node()
3128 spin_unlock(&n->list_lock); in ____cache_alloc_node()
3132 spin_unlock(&n->list_lock); in ____cache_alloc_node()
3275 struct kmem_cache_node *n = get_node(cachep, node); in free_block() local
3289 n->free_objects++; in free_block()
3293 if (n->free_objects > n->free_limit) { in free_block()
3294 n->free_objects -= cachep->num; in free_block()
3297 list_add(&page->lru, &n->slabs_free); in free_block()
3304 list_add_tail(&page->lru, &n->slabs_partial); in free_block()
3312 struct kmem_cache_node *n; in cache_flusharray() local
3321 n = get_node(cachep, node); in cache_flusharray()
3322 spin_lock(&n->list_lock); in cache_flusharray()
3323 if (n->shared) { in cache_flusharray()
3324 struct array_cache *shared_array = n->shared; in cache_flusharray()
3343 p = n->slabs_free.next; in cache_flusharray()
3344 while (p != &(n->slabs_free)) { in cache_flusharray()
3356 spin_unlock(&n->list_lock); in cache_flusharray()
3607 struct kmem_cache_node *n; in alloc_kmem_cache_node() local
3630 n = get_node(cachep, node); in alloc_kmem_cache_node()
3631 if (n) { in alloc_kmem_cache_node()
3632 struct array_cache *shared = n->shared; in alloc_kmem_cache_node()
3635 spin_lock_irq(&n->list_lock); in alloc_kmem_cache_node()
3641 n->shared = new_shared; in alloc_kmem_cache_node()
3642 if (!n->alien) { in alloc_kmem_cache_node()
3643 n->alien = new_alien; in alloc_kmem_cache_node()
3646 n->free_limit = (1 + nr_cpus_node(node)) * in alloc_kmem_cache_node()
3648 spin_unlock_irq(&n->list_lock); in alloc_kmem_cache_node()
3654 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); in alloc_kmem_cache_node()
3655 if (!n) { in alloc_kmem_cache_node()
3661 kmem_cache_node_init(n); in alloc_kmem_cache_node()
3662 n->next_reap = jiffies + REAPTIMEOUT_NODE + in alloc_kmem_cache_node()
3664 n->shared = new_shared; in alloc_kmem_cache_node()
3665 n->alien = new_alien; in alloc_kmem_cache_node()
3666 n->free_limit = (1 + nr_cpus_node(node)) * in alloc_kmem_cache_node()
3668 cachep->node[node] = n; in alloc_kmem_cache_node()
3677 n = get_node(cachep, node); in alloc_kmem_cache_node()
3678 if (n) { in alloc_kmem_cache_node()
3679 kfree(n->shared); in alloc_kmem_cache_node()
3680 free_alien_cache(n->alien); in alloc_kmem_cache_node()
3681 kfree(n); in alloc_kmem_cache_node()
3716 struct kmem_cache_node *n; in __do_tune_cpucache() local
3720 n = get_node(cachep, node); in __do_tune_cpucache()
3721 spin_lock_irq(&n->list_lock); in __do_tune_cpucache()
3723 spin_unlock_irq(&n->list_lock); in __do_tune_cpucache()
3827 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, in drain_array() argument
3838 spin_lock_irq(&n->list_lock); in drain_array()
3848 spin_unlock_irq(&n->list_lock); in drain_array()
3868 struct kmem_cache_node *n; in cache_reap() local
3884 n = get_node(searchp, node); in cache_reap()
3886 reap_alien(searchp, n); in cache_reap()
3888 drain_array(searchp, n, cpu_cache_get(searchp), 0, node); in cache_reap()
3894 if (time_after(n->next_reap, jiffies)) in cache_reap()
3897 n->next_reap = jiffies + REAPTIMEOUT_NODE; in cache_reap()
3899 drain_array(searchp, n, n->shared, 0, node); in cache_reap()
3901 if (n->free_touched) in cache_reap()
3902 n->free_touched = 0; in cache_reap()
3906 freed = drain_freelist(searchp, n, (n->free_limit + in cache_reap()
3932 struct kmem_cache_node *n; in get_slabinfo() local
3936 for_each_kmem_cache_node(cachep, node, n) { in get_slabinfo()
3939 spin_lock_irq(&n->list_lock); in get_slabinfo()
3941 list_for_each_entry(page, &n->slabs_full, lru) { in get_slabinfo()
3947 list_for_each_entry(page, &n->slabs_partial, lru) { in get_slabinfo()
3955 list_for_each_entry(page, &n->slabs_free, lru) { in get_slabinfo()
3960 free_objects += n->free_objects; in get_slabinfo()
3961 if (n->shared) in get_slabinfo()
3962 shared_avail += n->shared->avail; in get_slabinfo()
3964 spin_unlock_irq(&n->list_lock); in get_slabinfo()
4073 static inline int add_caller(unsigned long *n, unsigned long v) in add_caller() argument
4079 l = n[1]; in add_caller()
4080 p = n + 2; in add_caller()
4095 if (++n[1] == n[0]) in add_caller()
4097 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); in add_caller()
4103 static void handle_slab(unsigned long *n, struct kmem_cache *c, in handle_slab() argument
4109 if (n[0] == n[1]) in handle_slab()
4115 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) in handle_slab()
4140 struct kmem_cache_node *n; in leaks_show() local
4155 for_each_kmem_cache_node(cachep, node, n) { in leaks_show()
4158 spin_lock_irq(&n->list_lock); in leaks_show()
4160 list_for_each_entry(page, &n->slabs_full, lru) in leaks_show()
4162 list_for_each_entry(page, &n->slabs_partial, lru) in leaks_show()
4164 spin_unlock_irq(&n->list_lock); in leaks_show()
4202 unsigned long *n; in slabstats_open() local
4204 n = __seq_open_private(file, &slabstats_op, PAGE_SIZE); in slabstats_open()
4205 if (!n) in slabstats_open()
4208 *n = PAGE_SIZE / (2 * sizeof(unsigned long)); in slabstats_open()