Lines Matching refs:size

125 #define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)  argument
134 int size; member
145 static void set_slob(slob_t *s, slobidx_t size, slob_t *next) in set_slob() argument
150 if (size > 1) { in set_slob()
151 s[0].units = size; in set_slob()
217 static void *slob_page_alloc(struct page *sp, size_t size, int align) in slob_page_alloc() argument
220 int delta = 0, units = SLOB_UNITS(size); in slob_page_alloc()
268 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) in slob_alloc() argument
276 if (size < SLOB_BREAK1) in slob_alloc()
278 else if (size < SLOB_BREAK2) in slob_alloc()
295 if (sp->units < SLOB_UNITS(size)) in slob_alloc()
300 b = slob_page_alloc(sp, size, align); in slob_alloc()
328 b = slob_page_alloc(sp, size, align); in slob_alloc()
333 memset(b, 0, size); in slob_alloc()
340 static void slob_free(void *block, int size) in slob_free() argument
350 BUG_ON(!size); in slob_free()
353 units = SLOB_UNITS(size); in slob_free()
375 if (size < SLOB_BREAK1) in slob_free()
377 else if (size < SLOB_BREAK2) in slob_free()
427 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) in __do_kmalloc_node() argument
437 if (size < PAGE_SIZE - align) { in __do_kmalloc_node()
438 if (!size) in __do_kmalloc_node()
441 m = slob_alloc(size + align, gfp, align, node); in __do_kmalloc_node()
445 *m = size; in __do_kmalloc_node()
449 size, size + align, gfp, node); in __do_kmalloc_node()
451 unsigned int order = get_order(size); in __do_kmalloc_node()
458 size, PAGE_SIZE << order, gfp, node); in __do_kmalloc_node()
461 kmemleak_alloc(ret, size, 1, gfp); in __do_kmalloc_node()
465 void *__kmalloc(size_t size, gfp_t gfp) in __kmalloc() argument
467 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_); in __kmalloc()
471 void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) in __kmalloc_track_caller() argument
473 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller); in __kmalloc_track_caller()
477 void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, in __kmalloc_node_track_caller() argument
480 return __do_kmalloc_node(size, gfp, node, caller); in __kmalloc_node_track_caller()
529 c->size += sizeof(struct slob_rcu); in __kmem_cache_create()
543 if (c->size < PAGE_SIZE) { in slob_alloc_node()
544 b = slob_alloc(c->size, flags, c->align, node); in slob_alloc_node()
546 SLOB_UNITS(c->size) * SLOB_UNIT, in slob_alloc_node()
549 b = slob_new_pages(flags, get_order(c->size), node); in slob_alloc_node()
551 PAGE_SIZE << get_order(c->size), in slob_alloc_node()
558 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); in slob_alloc_node()
569 void *__kmalloc_node(size_t size, gfp_t gfp, int node) in __kmalloc_node() argument
571 return __do_kmalloc_node(size, gfp, node, _RET_IP_); in __kmalloc_node()
582 static void __kmem_cache_free(void *b, int size) in __kmem_cache_free() argument
584 if (size < PAGE_SIZE) in __kmem_cache_free()
585 slob_free(b, size); in __kmem_cache_free()
587 slob_free_pages(b, get_order(size)); in __kmem_cache_free()
593 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); in kmem_rcu_free()
595 __kmem_cache_free(b, slob_rcu->size); in kmem_rcu_free()
603 slob_rcu = b + (c->size - sizeof(struct slob_rcu)); in kmem_cache_free()
604 slob_rcu->size = c->size; in kmem_cache_free()
607 __kmem_cache_free(b, c->size); in kmem_cache_free()
627 .size = sizeof(struct kmem_cache),