Lines Matching refs:class
454 static inline void zs_stat_inc(struct size_class *class, in zs_stat_inc() argument
457 class->stats.objs[type] += cnt; in zs_stat_inc()
460 static inline void zs_stat_dec(struct size_class *class, in zs_stat_dec() argument
463 class->stats.objs[type] -= cnt; in zs_stat_dec()
466 static inline unsigned long zs_stat_get(struct size_class *class, in zs_stat_get() argument
469 return class->stats.objs[type]; in zs_stat_get()
493 struct size_class *class; in zs_stats_size_show() local
506 class = pool->size_class[i]; in zs_stats_size_show()
508 if (class->index != i) in zs_stats_size_show()
511 spin_lock(&class->lock); in zs_stats_size_show()
512 class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL); in zs_stats_size_show()
513 class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY); in zs_stats_size_show()
514 obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); in zs_stats_size_show()
515 obj_used = zs_stat_get(class, OBJ_USED); in zs_stats_size_show()
516 spin_unlock(&class->lock); in zs_stats_size_show()
518 objs_per_zspage = get_maxobj_per_zspage(class->size, in zs_stats_size_show()
519 class->pages_per_zspage); in zs_stats_size_show()
521 class->pages_per_zspage; in zs_stats_size_show()
524 i, class->size, class_almost_full, class_almost_empty, in zs_stats_size_show()
526 class->pages_per_zspage); in zs_stats_size_show()
588 static inline void zs_stat_inc(struct size_class *class, in zs_stat_inc() argument
593 static inline void zs_stat_dec(struct size_class *class, in zs_stat_dec() argument
598 static inline unsigned long zs_stat_get(struct size_class *class, in zs_stat_get() argument
659 static void insert_zspage(struct page *page, struct size_class *class, in insert_zspage() argument
669 head = &class->fullness_list[fullness]; in insert_zspage()
674 zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ? in insert_zspage()
682 static void remove_zspage(struct page *page, struct size_class *class, in remove_zspage() argument
692 head = &class->fullness_list[fullness]; in remove_zspage()
701 zs_stat_dec(class, fullness == ZS_ALMOST_EMPTY ? in remove_zspage()
714 static enum fullness_group fix_fullness_group(struct size_class *class, in fix_fullness_group() argument
727 remove_zspage(page, class, currfg); in fix_fullness_group()
728 insert_zspage(page, class, newfg); in fix_fullness_group()
836 static unsigned long obj_to_head(struct size_class *class, struct page *page, in obj_to_head() argument
839 if (class->huge) { in obj_to_head()
912 static void init_zspage(struct page *first_page, struct size_class *class) in init_zspage() argument
936 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
938 link += class->size / sizeof(*link); in init_zspage()
957 static struct page *alloc_zspage(struct size_class *class, gfp_t flags) in alloc_zspage() argument
974 for (i = 0; i < class->pages_per_zspage; i++) { in alloc_zspage()
994 if (i == class->pages_per_zspage - 1) /* last page */ in alloc_zspage()
999 init_zspage(first_page, class); in alloc_zspage()
1003 first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size; in alloc_zspage()
1016 static struct page *find_get_zspage(struct size_class *class) in find_get_zspage() argument
1022 page = class->fullness_list[i]; in find_get_zspage()
1269 struct size_class *class; in zs_map_object() local
1289 class = pool->size_class[class_idx]; in zs_map_object()
1290 off = obj_idx_to_offset(page, obj_idx, class->size); in zs_map_object()
1294 if (off + class->size <= PAGE_SIZE) { in zs_map_object()
1306 ret = __zs_map_object(area, pages, off, class->size); in zs_map_object()
1308 if (!class->huge) in zs_map_object()
1322 struct size_class *class; in zs_unmap_object() local
1330 class = pool->size_class[class_idx]; in zs_unmap_object()
1331 off = obj_idx_to_offset(page, obj_idx, class->size); in zs_unmap_object()
1334 if (off + class->size <= PAGE_SIZE) in zs_unmap_object()
1343 __zs_unmap_object(area, pages, off, class->size); in zs_unmap_object()
1351 struct size_class *class, unsigned long handle) in obj_malloc() argument
1363 m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); in obj_malloc()
1368 if (!class->huge) in obj_malloc()
1376 zs_stat_inc(class, OBJ_USED, 1); in obj_malloc()
1394 struct size_class *class; in zs_malloc() local
1406 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1408 spin_lock(&class->lock); in zs_malloc()
1409 first_page = find_get_zspage(class); in zs_malloc()
1412 spin_unlock(&class->lock); in zs_malloc()
1413 first_page = alloc_zspage(class, pool->flags); in zs_malloc()
1419 set_zspage_mapping(first_page, class->index, ZS_EMPTY); in zs_malloc()
1420 atomic_long_add(class->pages_per_zspage, in zs_malloc()
1423 spin_lock(&class->lock); in zs_malloc()
1424 zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage( in zs_malloc()
1425 class->size, class->pages_per_zspage)); in zs_malloc()
1428 obj = obj_malloc(first_page, class, handle); in zs_malloc()
1430 fix_fullness_group(class, first_page); in zs_malloc()
1432 spin_unlock(&class->lock); in zs_malloc()
1438 static void obj_free(struct zs_pool *pool, struct size_class *class, in obj_free() argument
1455 f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); in obj_free()
1462 if (class->huge) in obj_free()
1467 zs_stat_dec(class, OBJ_USED, 1); in obj_free()
1475 struct size_class *class; in zs_free() local
1487 class = pool->size_class[class_idx]; in zs_free()
1489 spin_lock(&class->lock); in zs_free()
1490 obj_free(pool, class, obj); in zs_free()
1491 fullness = fix_fullness_group(class, first_page); in zs_free()
1493 zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( in zs_free()
1494 class->size, class->pages_per_zspage)); in zs_free()
1495 atomic_long_sub(class->pages_per_zspage, in zs_free()
1499 spin_unlock(&class->lock); in zs_free()
1507 struct size_class *class) in zs_object_copy() argument
1516 s_size = d_size = class->size; in zs_object_copy()
1521 s_off = obj_idx_to_offset(s_page, s_objidx, class->size); in zs_object_copy()
1522 d_off = obj_idx_to_offset(d_page, d_objidx, class->size); in zs_object_copy()
1524 if (s_off + class->size > PAGE_SIZE) in zs_object_copy()
1527 if (d_off + class->size > PAGE_SIZE) in zs_object_copy()
1538 if (written == class->size) in zs_object_copy()
1553 s_size = class->size - written; in zs_object_copy()
1562 d_size = class->size - written; in zs_object_copy()
1576 struct size_class *class) in find_alloced_obj() argument
1585 offset += class->size * index; in find_alloced_obj()
1588 head = obj_to_head(class, page, addr + offset); in find_alloced_obj()
1596 offset += class->size; in find_alloced_obj()
1617 static int migrate_zspage(struct zs_pool *pool, struct size_class *class, in migrate_zspage() argument
1629 handle = find_alloced_obj(s_page, index, class); in migrate_zspage()
1646 free_obj = obj_malloc(d_page, class, handle); in migrate_zspage()
1647 zs_object_copy(used_obj, free_obj, class); in migrate_zspage()
1658 obj_free(pool, class, used_obj); in migrate_zspage()
1670 static struct page *alloc_target_page(struct size_class *class) in alloc_target_page() argument
1676 page = class->fullness_list[i]; in alloc_target_page()
1678 remove_zspage(page, class, i); in alloc_target_page()
1686 static void putback_zspage(struct zs_pool *pool, struct size_class *class, in putback_zspage() argument
1694 insert_zspage(first_page, class, fullness); in putback_zspage()
1695 set_zspage_mapping(first_page, class->index, fullness); in putback_zspage()
1698 zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( in putback_zspage()
1699 class->size, class->pages_per_zspage)); in putback_zspage()
1700 atomic_long_sub(class->pages_per_zspage, in putback_zspage()
1707 static struct page *isolate_source_page(struct size_class *class) in isolate_source_page() argument
1711 page = class->fullness_list[ZS_ALMOST_EMPTY]; in isolate_source_page()
1713 remove_zspage(page, class, ZS_ALMOST_EMPTY); in isolate_source_page()
1719 struct size_class *class) in __zs_compact() argument
1727 spin_lock(&class->lock); in __zs_compact()
1728 while ((src_page = isolate_source_page(class))) { in __zs_compact()
1737 while ((dst_page = alloc_target_page(class))) { in __zs_compact()
1743 if (!migrate_zspage(pool, class, &cc)) in __zs_compact()
1746 putback_zspage(pool, class, dst_page); in __zs_compact()
1755 putback_zspage(pool, class, dst_page); in __zs_compact()
1756 putback_zspage(pool, class, src_page); in __zs_compact()
1757 spin_unlock(&class->lock); in __zs_compact()
1760 spin_lock(&class->lock); in __zs_compact()
1764 putback_zspage(pool, class, src_page); in __zs_compact()
1766 spin_unlock(&class->lock); in __zs_compact()
1775 struct size_class *class; in zs_compact() local
1778 class = pool->size_class[i]; in zs_compact()
1779 if (!class) in zs_compact()
1781 if (class->index != i) in zs_compact()
1783 nr_migrated += __zs_compact(pool, class); in zs_compact()
1831 struct size_class *class; in zs_create_pool() local
1854 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); in zs_create_pool()
1855 if (!class) in zs_create_pool()
1858 class->size = size; in zs_create_pool()
1859 class->index = i; in zs_create_pool()
1860 class->pages_per_zspage = pages_per_zspage; in zs_create_pool()
1863 class->huge = true; in zs_create_pool()
1864 spin_lock_init(&class->lock); in zs_create_pool()
1865 pool->size_class[i] = class; in zs_create_pool()
1867 prev_class = class; in zs_create_pool()
1891 struct size_class *class = pool->size_class[i]; in zs_destroy_pool() local
1893 if (!class) in zs_destroy_pool()
1896 if (class->index != i) in zs_destroy_pool()
1900 if (class->fullness_list[fg]) { in zs_destroy_pool()
1902 class->size, fg); in zs_destroy_pool()
1905 kfree(class); in zs_destroy_pool()