zspage 308 mm/zsmalloc.c static void migrate_lock_init(struct zspage *zspage); zspage 309 mm/zsmalloc.c static void migrate_read_lock(struct zspage *zspage); zspage 310 mm/zsmalloc.c static void migrate_read_unlock(struct zspage *zspage); zspage 313 mm/zsmalloc.c static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage); zspage 319 mm/zsmalloc.c static void migrate_lock_init(struct zspage *zspage) {} zspage 320 mm/zsmalloc.c static void migrate_read_lock(struct zspage *zspage) {} zspage 321 mm/zsmalloc.c static void migrate_read_unlock(struct zspage *zspage) {} zspage 324 mm/zsmalloc.c static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} zspage 334 mm/zsmalloc.c pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage), zspage 362 mm/zsmalloc.c static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags) zspage 368 mm/zsmalloc.c static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) zspage 370 mm/zsmalloc.c kmem_cache_free(pool->zspage_cachep, zspage); zspage 464 mm/zsmalloc.c static bool is_zspage_isolated(struct zspage *zspage) zspage 466 mm/zsmalloc.c return zspage->isolated; zspage 475 mm/zsmalloc.c static inline int get_zspage_inuse(struct zspage *zspage) zspage 477 mm/zsmalloc.c return zspage->inuse; zspage 481 mm/zsmalloc.c static inline void mod_zspage_inuse(struct zspage *zspage, int val) zspage 483 mm/zsmalloc.c zspage->inuse += val; zspage 486 mm/zsmalloc.c static inline struct page *get_first_page(struct zspage *zspage) zspage 488 mm/zsmalloc.c struct page *first_page = zspage->first_page; zspage 504 mm/zsmalloc.c static inline unsigned int get_freeobj(struct zspage *zspage) zspage 506 mm/zsmalloc.c return zspage->freeobj; zspage 509 mm/zsmalloc.c static inline void set_freeobj(struct zspage *zspage, unsigned int obj) zspage 511 mm/zsmalloc.c zspage->freeobj = obj; zspage 514 mm/zsmalloc.c static void get_zspage_mapping(struct zspage *zspage, zspage 518 mm/zsmalloc.c BUG_ON(zspage->magic != ZSPAGE_MAGIC); zspage 520 mm/zsmalloc.c *fullness = zspage->fullness; zspage 521 mm/zsmalloc.c *class_idx = zspage->class; zspage 524 mm/zsmalloc.c static void set_zspage_mapping(struct zspage *zspage, zspage 528 mm/zsmalloc.c zspage->class = class_idx; zspage 529 mm/zsmalloc.c zspage->fullness = fullness; zspage 694 mm/zsmalloc.c struct zspage *zspage) zspage 699 mm/zsmalloc.c inuse = get_zspage_inuse(zspage); zspage 721 mm/zsmalloc.c struct zspage *zspage, zspage 724 mm/zsmalloc.c struct zspage *head; zspage 728 mm/zsmalloc.c struct zspage, list); zspage 734 mm/zsmalloc.c if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) { zspage 735 mm/zsmalloc.c list_add(&zspage->list, &head->list); zspage 739 mm/zsmalloc.c list_add(&zspage->list, &class->fullness_list[fullness]); zspage 747 mm/zsmalloc.c struct zspage *zspage, zspage 751 mm/zsmalloc.c VM_BUG_ON(is_zspage_isolated(zspage)); zspage 753 mm/zsmalloc.c list_del_init(&zspage->list); zspage 767 mm/zsmalloc.c struct zspage *zspage) zspage 772 mm/zsmalloc.c get_zspage_mapping(zspage, &class_idx, &currfg); zspage 773 mm/zsmalloc.c newfg = get_fullness_group(class, zspage); zspage 777 mm/zsmalloc.c if (!is_zspage_isolated(zspage)) { zspage 778 mm/zsmalloc.c remove_zspage(class, zspage, currfg); zspage 779 mm/zsmalloc.c insert_zspage(class, zspage, newfg); zspage 782 mm/zsmalloc.c set_zspage_mapping(zspage, class_idx, newfg); zspage 824 mm/zsmalloc.c static struct zspage *get_zspage(struct page *page) zspage 826 mm/zsmalloc.c struct zspage *zspage = (struct zspage *)page->private; zspage 828 mm/zsmalloc.c BUG_ON(zspage->magic != ZSPAGE_MAGIC); zspage 829 mm/zsmalloc.c return zspage; zspage 914 mm/zsmalloc.c static int trylock_zspage(struct zspage *zspage) zspage 918 mm/zsmalloc.c for (cursor = get_first_page(zspage); cursor != NULL; cursor = zspage 928 mm/zsmalloc.c for (cursor = get_first_page(zspage); cursor != fail; cursor = zspage 936 mm/zsmalloc.c struct zspage *zspage) zspage 942 mm/zsmalloc.c get_zspage_mapping(zspage, &class_idx, &fg); zspage 946 mm/zsmalloc.c VM_BUG_ON(get_zspage_inuse(zspage)); zspage 949 mm/zsmalloc.c next = page = get_first_page(zspage); zspage 960 mm/zsmalloc.c cache_free_zspage(pool, zspage); zspage 968 mm/zsmalloc.c struct zspage *zspage) zspage 970 mm/zsmalloc.c VM_BUG_ON(get_zspage_inuse(zspage)); zspage 971 mm/zsmalloc.c VM_BUG_ON(list_empty(&zspage->list)); zspage 973 mm/zsmalloc.c if (!trylock_zspage(zspage)) { zspage 978 mm/zsmalloc.c remove_zspage(class, zspage, ZS_EMPTY); zspage 979 mm/zsmalloc.c __free_zspage(pool, class, zspage); zspage 983 mm/zsmalloc.c static void init_zspage(struct size_class *class, struct zspage *zspage) zspage 987 mm/zsmalloc.c struct page *page = get_first_page(zspage); zspage 1024 mm/zsmalloc.c set_freeobj(zspage, 0); zspage 1027 mm/zsmalloc.c static void create_page_chain(struct size_class *class, struct zspage *zspage, zspage 1045 mm/zsmalloc.c set_page_private(page, (unsigned long)zspage); zspage 1048 mm/zsmalloc.c zspage->first_page = page; zspage 1063 mm/zsmalloc.c static struct zspage *alloc_zspage(struct zs_pool *pool, zspage 1069 mm/zsmalloc.c struct zspage *zspage = cache_alloc_zspage(pool, gfp); zspage 1071 mm/zsmalloc.c if (!zspage) zspage 1074 mm/zsmalloc.c memset(zspage, 0, sizeof(struct zspage)); zspage 1075 mm/zsmalloc.c zspage->magic = ZSPAGE_MAGIC; zspage 1076 mm/zsmalloc.c migrate_lock_init(zspage); zspage 1087 mm/zsmalloc.c cache_free_zspage(pool, zspage); zspage 1095 mm/zsmalloc.c create_page_chain(class, zspage, pages); zspage 1096 mm/zsmalloc.c init_zspage(class, zspage); zspage 1098 mm/zsmalloc.c return zspage; zspage 1101 mm/zsmalloc.c static struct zspage *find_get_zspage(struct size_class *class) zspage 1104 mm/zsmalloc.c struct zspage *zspage; zspage 1107 mm/zsmalloc.c zspage = list_first_entry_or_null(&class->fullness_list[i], zspage 1108 mm/zsmalloc.c struct zspage, list); zspage 1109 mm/zsmalloc.c if (zspage) zspage 1113 mm/zsmalloc.c return zspage; zspage 1265 mm/zsmalloc.c static bool zspage_full(struct size_class *class, struct zspage *zspage) zspage 1267 mm/zsmalloc.c return get_zspage_inuse(zspage) == class->objs_per_zspage; zspage 1294 mm/zsmalloc.c struct zspage *zspage; zspage 1318 mm/zsmalloc.c zspage = get_zspage(page); zspage 1321 mm/zsmalloc.c migrate_read_lock(zspage); zspage 1323 mm/zsmalloc.c get_zspage_mapping(zspage, &class_idx, &fg); zspage 1352 mm/zsmalloc.c struct zspage *zspage; zspage 1364 mm/zsmalloc.c zspage = get_zspage(page); zspage 1365 mm/zsmalloc.c get_zspage_mapping(zspage, &class_idx, &fg); zspage 1383 mm/zsmalloc.c migrate_read_unlock(zspage); zspage 1408 mm/zsmalloc.c struct zspage *zspage, unsigned long handle) zspage 1419 mm/zsmalloc.c obj = get_freeobj(zspage); zspage 1424 mm/zsmalloc.c m_page = get_first_page(zspage); zspage 1431 mm/zsmalloc.c set_freeobj(zspage, link->next >> OBJ_TAG_BITS); zspage 1437 mm/zsmalloc.c zspage->first_page->index = handle; zspage 1440 mm/zsmalloc.c mod_zspage_inuse(zspage, 1); zspage 1464 mm/zsmalloc.c struct zspage *zspage; zspage 1478 mm/zsmalloc.c zspage = find_get_zspage(class); zspage 1479 mm/zsmalloc.c if (likely(zspage)) { zspage 1480 mm/zsmalloc.c obj = obj_malloc(class, zspage, handle); zspage 1482 mm/zsmalloc.c fix_fullness_group(class, zspage); zspage 1491 mm/zsmalloc.c zspage = alloc_zspage(pool, class, gfp); zspage 1492 mm/zsmalloc.c if (!zspage) { zspage 1498 mm/zsmalloc.c obj = obj_malloc(class, zspage, handle); zspage 1499 mm/zsmalloc.c newfg = get_fullness_group(class, zspage); zspage 1500 mm/zsmalloc.c insert_zspage(class, zspage, newfg); zspage 1501 mm/zsmalloc.c set_zspage_mapping(zspage, class->index, newfg); zspage 1508 mm/zsmalloc.c SetZsPageMovable(pool, zspage); zspage 1518 mm/zsmalloc.c struct zspage *zspage; zspage 1527 mm/zsmalloc.c zspage = get_zspage(f_page); zspage 1533 mm/zsmalloc.c link->next = get_freeobj(zspage) << OBJ_TAG_BITS; zspage 1535 mm/zsmalloc.c set_freeobj(zspage, f_objidx); zspage 1536 mm/zsmalloc.c mod_zspage_inuse(zspage, -1); zspage 1542 mm/zsmalloc.c struct zspage *zspage; zspage 1557 mm/zsmalloc.c zspage = get_zspage(f_page); zspage 1559 mm/zsmalloc.c migrate_read_lock(zspage); zspage 1561 mm/zsmalloc.c get_zspage_mapping(zspage, &class_idx, &fullness); zspage 1566 mm/zsmalloc.c fullness = fix_fullness_group(class, zspage); zspage 1568 mm/zsmalloc.c migrate_read_unlock(zspage); zspage 1572 mm/zsmalloc.c isolated = is_zspage_isolated(zspage); zspage 1573 mm/zsmalloc.c migrate_read_unlock(zspage); zspage 1576 mm/zsmalloc.c free_zspage(pool, class, zspage); zspage 1745 mm/zsmalloc.c static struct zspage *isolate_zspage(struct size_class *class, bool source) zspage 1748 mm/zsmalloc.c struct zspage *zspage; zspage 1757 mm/zsmalloc.c zspage = list_first_entry_or_null(&class->fullness_list[fg[i]], zspage 1758 mm/zsmalloc.c struct zspage, list); zspage 1759 mm/zsmalloc.c if (zspage) { zspage 1760 mm/zsmalloc.c VM_BUG_ON(is_zspage_isolated(zspage)); zspage 1761 mm/zsmalloc.c remove_zspage(class, zspage, fg[i]); zspage 1762 mm/zsmalloc.c return zspage; zspage 1766 mm/zsmalloc.c return zspage; zspage 1777 mm/zsmalloc.c struct zspage *zspage) zspage 1781 mm/zsmalloc.c VM_BUG_ON(is_zspage_isolated(zspage)); zspage 1783 mm/zsmalloc.c fullness = get_fullness_group(class, zspage); zspage 1784 mm/zsmalloc.c insert_zspage(class, zspage, fullness); zspage 1785 mm/zsmalloc.c set_zspage_mapping(zspage, class->index, fullness); zspage 1795 mm/zsmalloc.c static void lock_zspage(struct zspage *zspage) zspage 1797 mm/zsmalloc.c struct page *page = get_first_page(zspage); zspage 1831 mm/zsmalloc.c static void migrate_lock_init(struct zspage *zspage) zspage 1833 mm/zsmalloc.c rwlock_init(&zspage->lock); zspage 1836 mm/zsmalloc.c static void migrate_read_lock(struct zspage *zspage) zspage 1838 mm/zsmalloc.c read_lock(&zspage->lock); zspage 1841 mm/zsmalloc.c static void migrate_read_unlock(struct zspage *zspage) zspage 1843 mm/zsmalloc.c read_unlock(&zspage->lock); zspage 1846 mm/zsmalloc.c static void migrate_write_lock(struct zspage *zspage) zspage 1848 mm/zsmalloc.c write_lock(&zspage->lock); zspage 1851 mm/zsmalloc.c static void migrate_write_unlock(struct zspage *zspage) zspage 1853 mm/zsmalloc.c write_unlock(&zspage->lock); zspage 1857 mm/zsmalloc.c static void inc_zspage_isolation(struct zspage *zspage) zspage 1859 mm/zsmalloc.c zspage->isolated++; zspage 1862 mm/zsmalloc.c static void dec_zspage_isolation(struct zspage *zspage) zspage 1864 mm/zsmalloc.c zspage->isolated--; zspage 1869 mm/zsmalloc.c struct zspage *zspage) zspage 1873 mm/zsmalloc.c fg = putback_zspage(class, zspage); zspage 1892 mm/zsmalloc.c static void replace_sub_page(struct size_class *class, struct zspage *zspage, zspage 1899 mm/zsmalloc.c page = get_first_page(zspage); zspage 1908 mm/zsmalloc.c create_page_chain(class, zspage, pages); zspage 1921 mm/zsmalloc.c struct zspage *zspage; zspage 1931 mm/zsmalloc.c zspage = get_zspage(page); zspage 1938 mm/zsmalloc.c get_zspage_mapping(zspage, &class_idx, &fullness); zspage 1944 mm/zsmalloc.c if (get_zspage_inuse(zspage) == 0) { zspage 1950 mm/zsmalloc.c if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { zspage 1959 mm/zsmalloc.c if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { zspage 1960 mm/zsmalloc.c get_zspage_mapping(zspage, &class_idx, &fullness); zspage 1962 mm/zsmalloc.c remove_zspage(class, zspage, fullness); zspage 1965 mm/zsmalloc.c inc_zspage_isolation(zspage); zspage 1978 mm/zsmalloc.c struct zspage *zspage; zspage 1998 mm/zsmalloc.c zspage = get_zspage(page); zspage 2001 mm/zsmalloc.c migrate_write_lock(zspage); zspage 2002 mm/zsmalloc.c get_zspage_mapping(zspage, &class_idx, &fullness); zspage 2008 mm/zsmalloc.c if (!get_zspage_inuse(zspage)) { zspage 2052 mm/zsmalloc.c replace_sub_page(class, zspage, newpage, page); zspage 2055 mm/zsmalloc.c dec_zspage_isolation(zspage); zspage 2061 mm/zsmalloc.c if (!is_zspage_isolated(zspage)) { zspage 2068 mm/zsmalloc.c putback_zspage_deferred(pool, class, zspage); zspage 2095 mm/zsmalloc.c migrate_write_unlock(zspage); zspage 2107 mm/zsmalloc.c struct zspage *zspage; zspage 2112 mm/zsmalloc.c zspage = get_zspage(page); zspage 2113 mm/zsmalloc.c get_zspage_mapping(zspage, &class_idx, &fg); zspage 2119 mm/zsmalloc.c dec_zspage_isolation(zspage); zspage 2120 mm/zsmalloc.c if (!is_zspage_isolated(zspage)) { zspage 2125 mm/zsmalloc.c putback_zspage_deferred(pool, class, zspage); zspage 2194 mm/zsmalloc.c struct zspage *zspage, *tmp; zspage 2210 mm/zsmalloc.c list_for_each_entry_safe(zspage, tmp, &free_pages, list) { zspage 2211 mm/zsmalloc.c list_del(&zspage->list); zspage 2212 mm/zsmalloc.c lock_zspage(zspage); zspage 2214 mm/zsmalloc.c get_zspage_mapping(zspage, &class_idx, &fullness); zspage 2218 mm/zsmalloc.c __free_zspage(pool, pool->size_class[class_idx], zspage); zspage 2233 mm/zsmalloc.c static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) zspage 2235 mm/zsmalloc.c struct page *page = get_first_page(zspage); zspage 2268 mm/zsmalloc.c struct zspage *src_zspage; zspage 2269 mm/zsmalloc.c struct zspage *dst_zspage = NULL;