lru 32 arch/m68k/mm/memory.c #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) lru 33 arch/m68k/mm/memory.c #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) lru 326 arch/mips/mm/cerr-sb1.c uint8_t lru; lru 348 arch/mips/mm/cerr-sb1.c lru = (taghi >> 14) & 0xff; lru 352 arch/mips/mm/cerr-sb1.c (lru & 0x3), lru 353 arch/mips/mm/cerr-sb1.c ((lru >> 2) & 0x3), lru 354 arch/mips/mm/cerr-sb1.c ((lru >> 4) & 0x3), lru 355 arch/mips/mm/cerr-sb1.c ((lru >> 6) & 0x3)); lru 481 arch/mips/mm/cerr-sb1.c uint8_t ecc, lru; lru 503 arch/mips/mm/cerr-sb1.c lru = (taghi >> 14) & 0xff; lru 507 arch/mips/mm/cerr-sb1.c (lru & 0x3), lru 508 arch/mips/mm/cerr-sb1.c ((lru >> 2) & 0x3), lru 509 arch/mips/mm/cerr-sb1.c ((lru >> 4) & 0x3), lru 510 arch/mips/mm/cerr-sb1.c ((lru >> 6) & 0x3)); lru 75 arch/s390/mm/gmap.c list_add(&page->lru, &gmap->crst_list); lru 191 arch/s390/mm/gmap.c list_for_each_entry_safe(page, next, &gmap->crst_list, lru) lru 199 arch/s390/mm/gmap.c list_for_each_entry_safe(page, next, &gmap->pt_list, lru) lru 319 arch/s390/mm/gmap.c list_add(&page->lru, &gmap->crst_list); lru 1340 arch/s390/mm/gmap.c list_del(&page->lru); lru 1368 arch/s390/mm/gmap.c list_del(&page->lru); lru 1397 arch/s390/mm/gmap.c list_del(&page->lru); lru 1425 arch/s390/mm/gmap.c list_del(&page->lru); lru 1454 arch/s390/mm/gmap.c list_del(&page->lru); lru 1482 arch/s390/mm/gmap.c list_del(&page->lru); lru 1511 arch/s390/mm/gmap.c list_del(&page->lru); lru 1542 arch/s390/mm/gmap.c list_del(&page->lru); lru 1771 arch/s390/mm/gmap.c list_add(&page->lru, &sg->crst_list); lru 1855 arch/s390/mm/gmap.c list_add(&page->lru, &sg->crst_list); lru 1939 arch/s390/mm/gmap.c list_add(&page->lru, &sg->crst_list); lru 2060 arch/s390/mm/gmap.c list_add(&page->lru, &sg->pt_list); lru 203 arch/s390/mm/page-states.c if (!list_empty(&page->lru)) lru 266 arch/s390/mm/page-states.c page = list_entry(l, struct page, lru); lru 204 arch/s390/mm/pgalloc.c struct page, lru); lru 214 arch/s390/mm/pgalloc.c list_del(&page->lru); lru 242 arch/s390/mm/pgalloc.c list_add(&page->lru, &mm->context.pgtable_list); lru 261 arch/s390/mm/pgalloc.c list_add(&page->lru, &mm->context.pgtable_list); lru 263 arch/s390/mm/pgalloc.c list_del(&page->lru); lru 295 arch/s390/mm/pgalloc.c list_add_tail(&page->lru, &mm->context.pgtable_list); lru 297 arch/s390/mm/pgalloc.c list_del(&page->lru); lru 205 arch/x86/mm/fault.c list_for_each_entry(page, &pgd_list, lru) { lru 143 arch/x86/mm/init_64.c list_for_each_entry(page, &pgd_list, lru) { lru 184 arch/x86/mm/init_64.c list_for_each_entry(page, &pgd_list, lru) { lru 713 arch/x86/mm/pageattr.c list_for_each_entry(page, &pgd_list, lru) { lru 90 arch/x86/mm/pgtable.c list_add(&page->lru, &pgd_list); lru 97 arch/x86/mm/pgtable.c list_del(&page->lru); lru 847 arch/x86/xen/mmu_pv.c list_for_each_entry(page, &pgd_list, lru) { lru 966 arch/x86/xen/mmu_pv.c list_for_each_entry(page, &pgd_list, lru) { lru 2079 block/blk-mq.c page = list_first_entry(&tags->page_list, struct page, lru); lru 2080 block/blk-mq.c list_del_init(&page->lru); lru 2203 block/blk-mq.c list_add_tail(&page->lru, &tags->page_list); lru 237 drivers/android/binder_alloc.c on_lru = list_lru_del(&binder_alloc_lru, &page->lru); lru 257 drivers/android/binder_alloc.c INIT_LIST_HEAD(&page->lru); lru 289 drivers/android/binder_alloc.c ret = list_lru_add(&binder_alloc_lru, &page->lru); lru 782 drivers/android/binder_alloc.c &alloc->pages[i].lru); lru 843 drivers/android/binder_alloc.c int lru = 0; lru 856 drivers/android/binder_alloc.c else if (list_empty(&page->lru)) lru 859 drivers/android/binder_alloc.c lru++; lru 863 drivers/android/binder_alloc.c seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); lru 909 drivers/android/binder_alloc.c struct list_lru_one *lru, lru 917 drivers/android/binder_alloc.c lru); lru 940 drivers/android/binder_alloc.c list_lru_isolate(lru, item); lru 63 drivers/android/binder_alloc.h struct list_head lru; lru 114 drivers/android/binder_alloc.h struct list_lru_one *lru, lru 105 drivers/android/binder_alloc_selftest.c !list_empty(&alloc->pages[page_index].lru)) { lru 147 drivers/android/binder_alloc_selftest.c if (list_empty(&alloc->pages[i].lru)) { lru 169 drivers/android/binder_alloc_selftest.c list_empty(&alloc->pages[i].lru) ? lru 156 drivers/block/xen-blkback/blkback.c page[0] = list_first_entry(&ring->free_pages, struct page, lru); lru 157 drivers/block/xen-blkback/blkback.c list_del(&page[0]->lru); lru 172 drivers/block/xen-blkback/blkback.c list_add(&page[i]->lru, &ring->free_pages); lru 188 drivers/block/xen-blkback/blkback.c struct page, lru); lru 189 drivers/block/xen-blkback/blkback.c list_del(&page[num_pages]->lru); lru 397 drivers/block/xen-blkfront.c struct page, lru); lru 398 drivers/block/xen-blkfront.c list_del(&indirect_page->lru); lru 1257 drivers/block/xen-blkfront.c list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { lru 1258 drivers/block/xen-blkfront.c list_del(&indirect_page->lru); lru 1537 drivers/block/xen-blkfront.c list_add(&indirect_page->lru, &rinfo->indirect_pages); lru 2232 drivers/block/xen-blkfront.c list_add(&indirect_page->lru, &rinfo->indirect_pages); lru 2272 drivers/block/xen-blkfront.c list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { lru 2273 drivers/block/xen-blkfront.c list_del(&indirect_page->lru); lru 665 drivers/gpu/drm/drm_fb_helper.c list_for_each_entry(page, pagelist, lru) { lru 37 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c struct list_head lru; lru 52 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c struct list_head lru; lru 142 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru); lru 148 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c list_del_init(&eobj->lru); lru 204 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c if (likely(iobj->lru.next) && iobj->map) { lru 205 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c BUG_ON(!list_empty(&iobj->lru)); lru 206 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c list_add_tail(&iobj->lru, &imem->lru); lru 245 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c if (likely(iobj->lru.next)) lru 246 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c list_del_init(&iobj->lru); lru 269 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c if (likely(iobj->lru.next)) { lru 270 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c list_del_init(&iobj->lru); lru 271 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c iobj->lru.next = NULL; lru 297 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c iobj->lru.next = NULL; /* Exclude from eviction. */ lru 319 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c if (likely(iobj->lru.next)) lru 320 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c list_del(&iobj->lru); lru 366 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c INIT_LIST_HEAD(&iobj->lru); lru 397 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c INIT_LIST_HEAD(&imem->lru); lru 158 drivers/gpu/drm/ttm/ttm_bo.c BUG_ON(!list_empty(&bo->lru)); lru 178 drivers/gpu/drm/ttm/ttm_bo.c if (!list_empty(&bo->lru)) lru 185 drivers/gpu/drm/ttm/ttm_bo.c list_add_tail(&bo->lru, &man->lru[bo->priority]); lru 217 drivers/gpu/drm/ttm/ttm_bo.c if (!list_empty(&bo->lru)) { lru 218 drivers/gpu/drm/ttm/ttm_bo.c list_del_init(&bo->lru); lru 285 drivers/gpu/drm/ttm/ttm_bo.c list_bulk_move_tail(&man->lru[i], &pos->first->lru, lru 286 drivers/gpu/drm/ttm/ttm_bo.c &pos->last->lru); lru 300 drivers/gpu/drm/ttm/ttm_bo.c list_bulk_move_tail(&man->lru[i], &pos->first->lru, lru 301 drivers/gpu/drm/ttm/ttm_bo.c &pos->last->lru); lru 306 drivers/gpu/drm/ttm/ttm_bo.c struct list_head *lru; lru 314 drivers/gpu/drm/ttm/ttm_bo.c lru = &pos->first->bdev->glob->swap_lru[i]; lru 315 drivers/gpu/drm/ttm/ttm_bo.c list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); lru 855 drivers/gpu/drm/ttm/ttm_bo.c list_for_each_entry(bo, &man->lru[i], lru) { lru 876 drivers/gpu/drm/ttm/ttm_bo.c if (&bo->lru != &man->lru[i]) lru 1077 drivers/gpu/drm/ttm/ttm_bo.c if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) { lru 1172 drivers/gpu/drm/ttm/ttm_bo.c if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) { lru 1329 drivers/gpu/drm/ttm/ttm_bo.c INIT_LIST_HEAD(&bo->lru); lru 1505 drivers/gpu/drm/ttm/ttm_bo.c while (!list_empty(&man->lru[i])) { lru 1617 drivers/gpu/drm/ttm/ttm_bo.c INIT_LIST_HEAD(&man->lru[i]); lru 1717 drivers/gpu/drm/ttm/ttm_bo.c if (list_empty(&bdev->man[0].lru[0])) lru 508 drivers/gpu/drm/ttm/ttm_bo_util.c INIT_LIST_HEAD(&fbo->base.lru); lru 72 drivers/gpu/drm/ttm/ttm_execbuf_util.c if (list_empty(&bo->lru)) lru 207 drivers/gpu/drm/ttm/ttm_execbuf_util.c if (list_empty(&bo->lru)) lru 310 drivers/gpu/drm/ttm/ttm_page_alloc.c list_for_each_entry_reverse(p, &pool->list, lru) { lru 318 drivers/gpu/drm/ttm/ttm_page_alloc.c __list_del(p->lru.prev, &pool->list); lru 353 drivers/gpu/drm/ttm/ttm_page_alloc.c __list_del(&p->lru, &pool->list); lru 476 drivers/gpu/drm/ttm/ttm_page_alloc.c list_del(&failed_pages[i]->lru); lru 527 drivers/gpu/drm/ttm/ttm_page_alloc.c list_add(&p->lru, pages); lru 613 drivers/gpu/drm/ttm/ttm_page_alloc.c list_for_each_entry(p, &new_pages, lru) { lru 678 drivers/gpu/drm/ttm/ttm_page_alloc.c list_for_each_entry(page, pages, lru) { lru 777 drivers/gpu/drm/ttm/ttm_page_alloc.c list_add_tail(&pages[i]->lru, &huge->list); lru 802 drivers/gpu/drm/ttm/ttm_page_alloc.c list_add_tail(&pages[i]->lru, &pool->list); lru 907 drivers/gpu/drm/ttm/ttm_page_alloc.c list_for_each_entry(p, &plist, lru) { lru 921 drivers/gpu/drm/ttm/ttm_page_alloc.c list_for_each_entry(p, &plist, lru) { lru 332 drivers/gpu/drm/vmwgfx/vmwgfx_fb.c list_for_each_entry(page, pagelist, lru) { lru 128 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_add_tail(&page->lru, &ctx->page_list); lru 151 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry_safe(entry, next, &ctx->page_list, lru) { lru 152 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_del_init(&entry->lru); lru 285 drivers/md/bcache/bcache.h struct list_head lru; lru 433 drivers/md/bcache/request.c i = list_first_entry(&dc->io_lru, struct io, lru); lru 447 drivers/md/bcache/request.c list_move_tail(&i->lru, &dc->io_lru); lru 1323 drivers/md/bcache/super.c list_add(&io->lru, &dc->io_lru); lru 85 drivers/md/dm-bufio.c struct list_head lru[LIST_SIZE]; lru 484 drivers/md/dm-bufio.c list_add(&b->lru_list, &c->lru[dirty]); lru 521 drivers/md/dm-bufio.c list_move(&b->lru_list, &c->lru[dirty]); lru 767 drivers/md/dm-bufio.c list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { lru 779 drivers/md/dm-bufio.c list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { lru 917 drivers/md/dm-bufio.c list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { lru 1247 drivers/md/dm-bufio.c list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { lru 1492 drivers/md/dm-bufio.c list_for_each_entry(b, &c->lru[i], lru_list) { lru 1510 drivers/md/dm-bufio.c BUG_ON(!list_empty(&c->lru[i])); lru 1563 drivers/md/dm-bufio.c list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { lru 1639 drivers/md/dm-bufio.c INIT_LIST_HEAD(&c->lru[i]); lru 1807 drivers/md/dm-bufio.c list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { lru 79 drivers/md/dm-writecache.c struct list_head lru; lru 108 drivers/md/dm-writecache.c struct list_head lru; lru 598 drivers/md/dm-writecache.c list_add(&ins->lru, &wc->lru); lru 603 drivers/md/dm-writecache.c list_del(&e->lru); lru 623 drivers/md/dm-writecache.c list_add_tail(&e->lru, &wc->freelist); lru 651 drivers/md/dm-writecache.c e = container_of(wc->freelist.next, struct wc_entry, lru); lru 652 drivers/md/dm-writecache.c list_del(&e->lru); lru 688 drivers/md/dm-writecache.c wc->lru.next = LIST_POISON1; lru 689 drivers/md/dm-writecache.c wc->lru.prev = LIST_POISON2; lru 714 drivers/md/dm-writecache.c if (list_empty(&wc->lru)) lru 717 drivers/md/dm-writecache.c e = container_of(wc->lru.next, struct wc_entry, lru); lru 728 drivers/md/dm-writecache.c if (unlikely(e->lru.next == &wc->lru)) lru 730 drivers/md/dm-writecache.c e2 = container_of(e->lru.next, struct wc_entry, lru); lru 758 drivers/md/dm-writecache.c if (unlikely(e->lru.prev == &wc->lru)) lru 760 drivers/md/dm-writecache.c e = container_of(e->lru.prev, struct wc_entry, lru); lru 923 drivers/md/dm-writecache.c INIT_LIST_HEAD(&wc->lru); lru 1376 drivers/md/dm-writecache.c INIT_LIST_HEAD(&e->lru); lru 1412 drivers/md/dm-writecache.c INIT_LIST_HEAD(&e->lru); lru 1510 drivers/md/dm-writecache.c e = container_of(wbl->list.prev, struct wc_entry, lru); lru 1511 drivers/md/dm-writecache.c list_del(&e->lru); lru 1535 drivers/md/dm-writecache.c f = container_of(wbl->list.prev, struct wc_entry, lru); lru 1542 drivers/md/dm-writecache.c list_del(&f->lru); lru 1568 drivers/md/dm-writecache.c e = container_of(wbl->list.prev, struct wc_entry, lru); lru 1569 drivers/md/dm-writecache.c list_del(&e->lru); lru 1587 drivers/md/dm-writecache.c f = container_of(wbl->list.prev, struct wc_entry, lru); lru 1589 drivers/md/dm-writecache.c list_del(&f->lru); lru 1629 drivers/md/dm-writecache.c while (!list_empty(&wc->lru) && lru 1647 drivers/md/dm-writecache.c e = container_of(wc->lru.prev, struct wc_entry, lru); lru 1658 drivers/md/dm-writecache.c list_del(&e->lru); lru 1659 drivers/md/dm-writecache.c list_add(&e->lru, &skipped); lru 1665 drivers/md/dm-writecache.c list_del(&e->lru); lru 1666 drivers/md/dm-writecache.c list_add(&e->lru, &wbl.list); lru 1701 drivers/md/dm-writecache.c list_del(&g->lru); lru 1702 drivers/md/dm-writecache.c list_add(&g->lru, &wbl.list); lru 1721 drivers/md/dm-writecache.c list_splice_tail(&skipped, &wc->lru); lru 1368 drivers/md/raid5-cache.c BUG_ON(list_empty(&sh->lru)); lru 1379 drivers/md/raid5-cache.c list_del_init(&sh->lru); lru 1409 drivers/md/raid5-cache.c list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) { lru 1417 drivers/md/raid5-cache.c &conf->r5c_partial_stripe_list, lru) { lru 1481 drivers/md/raid5-cache.c if (!list_empty(&sh->lru) && lru 1950 drivers/md/raid5-cache.c list_for_each_entry(sh, list, lru) lru 1962 drivers/md/raid5-cache.c list_for_each_entry_safe(sh, next, cached_stripe_list, lru) { lru 1964 drivers/md/raid5-cache.c list_del_init(&sh->lru); lru 1975 drivers/md/raid5-cache.c list_for_each_entry_safe(sh, next, cached_stripe_list, lru) lru 1978 drivers/md/raid5-cache.c list_del_init(&sh->lru); lru 2125 drivers/md/raid5-cache.c list_del_init(&sh->lru); lru 2180 drivers/md/raid5-cache.c list_add_tail(&sh->lru, cached_stripe_list); lru 2187 drivers/md/raid5-cache.c list_move_tail(&sh->lru, cached_stripe_list); lru 2275 drivers/md/raid5-cache.c list_for_each_entry(sh, &ctx->cached_list, lru) { lru 2371 drivers/md/raid5-cache.c list_for_each_entry(sh, &ctx->cached_list, lru) { lru 2439 drivers/md/raid5-cache.c list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { lru 2442 drivers/md/raid5-cache.c list_del_init(&sh->lru); lru 178 drivers/md/raid5.c if (list_empty(&sh->lru)) { lru 182 drivers/md/raid5.c list_add_tail(&sh->lru, &group->loprio_list); lru 184 drivers/md/raid5.c list_add_tail(&sh->lru, &group->handle_list); lru 218 drivers/md/raid5.c BUG_ON(!list_empty(&sh->lru)); lru 243 drivers/md/raid5.c list_add_tail(&sh->lru, &conf->delayed_list); lru 246 drivers/md/raid5.c list_add_tail(&sh->lru, &conf->bitmap_list); lru 252 drivers/md/raid5.c list_add_tail(&sh->lru, lru 255 drivers/md/raid5.c list_add_tail(&sh->lru, lru 272 drivers/md/raid5.c list_add_tail(&sh->lru, temp_inactive_list); lru 276 drivers/md/raid5.c list_add_tail(&sh->lru, temp_inactive_list); lru 283 drivers/md/raid5.c list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); lru 291 drivers/md/raid5.c list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); lru 440 drivers/md/raid5.c sh = list_entry(first, struct stripe_head, lru); lru 677 drivers/md/raid5.c BUG_ON(list_empty(&sh->lru) && lru 682 drivers/md/raid5.c list_del_init(&sh->lru); lru 761 drivers/md/raid5.c BUG_ON(list_empty(&head->lru) && lru 766 drivers/md/raid5.c list_del_init(&head->lru); lru 2142 drivers/md/raid5.c INIT_LIST_HEAD(&sh->lru); lru 2335 drivers/md/raid5.c list_add(&nsh->lru, &newstripes); lru 2340 drivers/md/raid5.c nsh = list_entry(newstripes.next, struct stripe_head, lru); lru 2341 drivers/md/raid5.c list_del(&nsh->lru); lru 2354 drivers/md/raid5.c list_for_each_entry(nsh, &newstripes, lru) { lru 2413 drivers/md/raid5.c nsh = list_entry(newstripes.next, struct stripe_head, lru); lru 2414 drivers/md/raid5.c list_del_init(&nsh->lru); lru 5067 drivers/md/raid5.c sh = list_entry(l, struct stripe_head, lru); lru 5072 drivers/md/raid5.c list_add_tail(&sh->lru, &conf->hold_list); lru 5086 drivers/md/raid5.c struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); lru 5088 drivers/md/raid5.c list_del_init(&sh->lru); lru 5363 drivers/md/raid5.c sh = list_entry(handle_list->next, typeof(*sh), lru); lru 5382 drivers/md/raid5.c list_for_each_entry(tmp, &conf->hold_list, lru) { lru 5412 drivers/md/raid5.c list_del_init(&sh->lru); lru 5436 drivers/md/raid5.c sh = list_first_entry(&cb->list, struct stripe_head, lru); lru 5437 drivers/md/raid5.c list_del_init(&sh->lru); lru 5485 drivers/md/raid5.c list_add_tail(&sh->lru, &cb->list); lru 5942 drivers/md/raid5.c list_add(&sh->lru, &stripes); lru 5975 drivers/md/raid5.c sh = list_entry(stripes.next, struct stripe_head, lru); lru 5976 drivers/md/raid5.c list_del_init(&sh->lru); lru 200 drivers/md/raid5.h struct list_head lru; /* inactive_list or handle_list */ lru 680 drivers/misc/vmw_balloon.c struct page, lru); lru 681 drivers/misc/vmw_balloon.c list_del(&page->lru); lru 695 drivers/misc/vmw_balloon.c list_add(&page->lru, &ctl->pages); lru 873 drivers/misc/vmw_balloon.c list_for_each_entry(page, &ctl->pages, lru) lru 905 drivers/misc/vmw_balloon.c list_move(&page->lru, &ctl->refused_pages); lru 930 drivers/misc/vmw_balloon.c list_for_each_entry_safe(page, tmp, page_list, lru) { lru 931 drivers/misc/vmw_balloon.c list_del(&page->lru); lru 1017 drivers/misc/vmw_balloon.c list_for_each_entry(page, pages, lru) { lru 1062 drivers/misc/vmw_balloon.c list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) { lru 1065 drivers/misc/vmw_balloon.c list_move(&page->lru, pages); lru 1093 drivers/misc/vmw_balloon.c list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) { lru 1094 drivers/misc/vmw_balloon.c list_del(&page->lru); lru 1097 drivers/misc/vmw_balloon.c list_add(&page[i].lru, &ctl->prealloc_pages); lru 67 drivers/staging/android/ashmem.c struct list_head lru; lru 149 drivers/staging/android/ashmem.c list_add_tail(&range->lru, &ashmem_lru_list); lru 162 drivers/staging/android/ashmem.c list_del(&range->lru); lru 478 drivers/staging/android/ashmem.c list_first_entry(&ashmem_lru_list, typeof(*range), lru); lru 32 drivers/staging/android/ion/ion_page_pool.c list_add_tail(&page->lru, &pool->high_items); lru 35 drivers/staging/android/ion/ion_page_pool.c list_add_tail(&page->lru, &pool->low_items); lru 50 drivers/staging/android/ion/ion_page_pool.c page = list_first_entry(&pool->high_items, struct page, lru); lru 54 drivers/staging/android/ion/ion_page_pool.c page = list_first_entry(&pool->low_items, struct page, lru); lru 58 drivers/staging/android/ion/ion_page_pool.c list_del(&page->lru); lru 122 drivers/staging/android/ion/ion_system_heap.c list_add_tail(&page->lru, &pages); lru 135 drivers/staging/android/ion/ion_system_heap.c list_for_each_entry_safe(page, tmp_page, &pages, lru) { lru 138 drivers/staging/android/ion/ion_system_heap.c list_del(&page->lru); lru 147 drivers/staging/android/ion/ion_system_heap.c list_for_each_entry_safe(page, tmp_page, &pages, lru) lru 353 drivers/staging/fbtft/fbtft-core.c list_for_each_entry(page, pagelist, lru) { lru 947 drivers/video/fbdev/broadsheetfb.c list_for_each_entry(cur, &fbdefio->pagelist, lru) { lru 127 drivers/video/fbdev/core/fb_defio.c list_for_each_entry(cur, &fbdefio->pagelist, lru) { lru 139 drivers/video/fbdev/core/fb_defio.c list_add_tail(&page->lru, &cur->lru); lru 187 drivers/video/fbdev/core/fb_defio.c list_for_each_entry(cur, &fbdefio->pagelist, lru) { lru 477 drivers/video/fbdev/metronomefb.c list_for_each_entry(cur, &fbdefio->pagelist, lru) { lru 451 drivers/video/fbdev/sh_mobile_lcdcfb.c list_for_each_entry(page, pagelist, lru) lru 969 drivers/video/fbdev/smscufx.c list_for_each_entry(cur, &fbdefio->pagelist, lru) { lru 811 drivers/video/fbdev/udlfb.c list_for_each_entry(cur, &fbdefio->pagelist, lru) { lru 194 drivers/video/fbdev/xen-fbfront.c list_for_each_entry(page, pagelist, lru) { lru 226 drivers/virtio/virtio_balloon.c list_for_each_entry_safe(page, next, pages, lru) { lru 230 drivers/virtio/virtio_balloon.c list_del(&page->lru); lru 254 drivers/virtio/virtio_balloon.c list_add(&page->lru, &pages); lru 165 drivers/xen/balloon.c list_add_tail(&page->lru, &ballooned_pages); lru 168 drivers/xen/balloon.c list_add(&page->lru, &ballooned_pages); lru 182 drivers/xen/balloon.c page = list_entry(ballooned_pages.next, struct page, lru); lru 185 drivers/xen/balloon.c list_del(&page->lru); lru 198 drivers/xen/balloon.c struct list_head *next = page->lru.next; lru 201 drivers/xen/balloon.c return list_entry(next, struct page, lru); lru 423 drivers/xen/balloon.c page = list_first_entry_or_null(&ballooned_pages, struct page, lru); lru 473 drivers/xen/balloon.c list_add(&page->lru, &pages); lru 490 drivers/xen/balloon.c list_for_each_entry_safe(page, tmp, &pages, lru) { lru 495 drivers/xen/balloon.c list_del(&page->lru); lru 97 drivers/xen/privcmd.c list_for_each_entry_safe(p, n, pages, lru) lru 132 drivers/xen/privcmd.c list_add_tail(&page->lru, pagelist); lru 172 drivers/xen/privcmd.c page = list_entry(pos, struct page, lru); lru 206 drivers/xen/privcmd.c page = list_entry(pos, struct page, lru); lru 285 drivers/xen/privcmd.c struct page, lru); lru 225 drivers/xen/xen-scsiback.c list_add(&page[i]->lru, &scsiback_free_pages); lru 239 drivers/xen/xen-scsiback.c page[0] = list_first_entry(&scsiback_free_pages, struct page, lru); lru 240 drivers/xen/xen-scsiback.c list_del(&page[0]->lru); lru 457 fs/afs/file.c for (p = first->lru.prev; p != pages; p = p->prev) { lru 458 fs/afs/file.c page = list_entry(p, struct page, lru); lru 487 fs/afs/file.c list_del(&page->lru); lru 4350 fs/btrfs/extent_io.c list_del(&page->lru); lru 476 fs/cachefiles/rdwr.c list_for_each_entry_safe(netpage, _n, list, lru) { lru 477 fs/cachefiles/rdwr.c list_del(&netpage->lru); lru 652 fs/cachefiles/rdwr.c list_for_each_entry_safe(netpage, _n, list, lru) { lru 653 fs/cachefiles/rdwr.c list_del(&netpage->lru); lru 730 fs/cachefiles/rdwr.c list_for_each_entry_safe(page, _n, pages, lru) { lru 751 fs/cachefiles/rdwr.c list_move(&page->lru, &backpages); lru 856 fs/cachefiles/rdwr.c list_for_each_entry(page, pages, lru) { lru 342 fs/ceph/addr.c list_del(&page->lru); lru 353 fs/ceph/addr.c list_for_each_entry_reverse(page, page_list, lru) { lru 383 fs/ceph/addr.c page = list_entry(page_list->prev, struct page, lru); lru 385 fs/ceph/addr.c list_del(&page->lru); lru 3296 fs/ceph/mds_client.c page = list_first_entry(&recon_state->pagelist->head, struct page, lru); lru 3699 fs/ceph/mds_client.c struct page, lru); lru 324 fs/ceph/mds_client.h struct list_head lru; lru 1028 fs/ceph/snap.c list_del_init(&exist->lru); lru 1049 fs/ceph/snap.c INIT_LIST_HEAD(&sm->lru); lru 1070 fs/ceph/snap.c list_del_init(&exist->lru); lru 1095 fs/ceph/snap.c list_add_tail(&sm->lru, &mdsc->snapid_map_lru); lru 1117 fs/ceph/snap.c struct ceph_snapid_map, lru); lru 1122 fs/ceph/snap.c list_move(&sm->lru, &to_free); lru 1127 fs/ceph/snap.c sm = list_first_entry(&to_free, struct ceph_snapid_map, lru); lru 1128 fs/ceph/snap.c list_del(&sm->lru); lru 1146 fs/ceph/snap.c list_move(&sm->lru, &to_free); lru 1151 fs/ceph/snap.c sm = list_first_entry(&to_free, struct ceph_snapid_map, lru); lru 1152 fs/ceph/snap.c list_del(&sm->lru); lru 1248 fs/ceph/xattr.c struct page, lru); lru 4258 fs/cifs/file.c list_move_tail(&page->lru, tmplist); lru 4262 fs/cifs/file.c list_for_each_entry_safe_reverse(page, tpage, page_list, lru) { lru 4276 fs/cifs/file.c list_move_tail(&page->lru, tmplist); lru 4375 fs/cifs/file.c list_for_each_entry_safe(page, tpage, &tmplist, lru) { lru 4376 fs/cifs/file.c list_del(&page->lru); lru 4397 fs/cifs/file.c list_for_each_entry_safe(page, tpage, &tmplist, lru) { lru 4398 fs/cifs/file.c list_del(&page->lru); lru 439 fs/dcache.c static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry) lru 446 fs/dcache.c list_lru_isolate(lru, &dentry->d_lru); lru 449 fs/dcache.c static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry, lru 456 fs/dcache.c list_lru_isolate_move(lru, &dentry->d_lru, list); lru 1125 fs/dcache.c struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) lru 1145 fs/dcache.c d_lru_isolate(lru, dentry); lru 1176 fs/dcache.c d_lru_shrink_move(lru, dentry, freeable); lru 1206 fs/dcache.c struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) lru 1219 fs/dcache.c d_lru_shrink_move(lru, dentry, freeable); lru 52 fs/erofs/compress.h list_add(&page->lru, pagepool); lru 291 fs/erofs/data.c struct page *page = list_last_entry(pages, struct page, lru); lru 296 fs/erofs/data.c page = list_entry(pages->prev, struct page, lru); lru 299 fs/erofs/data.c list_del(&page->lru); lru 17 fs/erofs/utils.c list_del(&page->lru); lru 1098 fs/erofs/zdata.c list_add(&page->lru, pagepool); lru 1390 fs/erofs/zdata.c list_del(&page->lru); lru 1400 fs/erofs/zdata.c list_add(&page->lru, &pagepool); lru 259 fs/ext4/readpage.c list_del(&page->lru); lru 1786 fs/f2fs/data.c page = list_last_entry(pages, struct page, lru); lru 1789 fs/f2fs/data.c list_del(&page->lru); lru 1834 fs/f2fs/data.c struct page *page = list_last_entry(pages, struct page, lru); lru 770 fs/fscache/page.c list_for_each_entry(page, pages, lru) { lru 146 fs/gfs2/quota.c struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) lru 156 fs/gfs2/quota.c list_lru_isolate_move(lru, &qd->qd_lru, dispose); lru 733 fs/inode.c struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) lru 751 fs/inode.c list_lru_isolate(lru, &inode->i_lru); lru 785 fs/inode.c list_lru_isolate_move(lru, &inode->i_lru, freeable); lru 334 fs/iomap/buffered-io.c list_del(&page->lru); lru 388 fs/iomap/buffered-io.c loff_t pos = page_offset(list_entry(pages->prev, struct page, lru)); lru 389 fs/iomap/buffered-io.c loff_t last = page_offset(list_entry(pages->next, struct page, lru)); lru 398 fs/mpage.c list_del(&page->lru); lru 2185 fs/nfs/dir.c cache = list_entry(head->next, struct nfs_access_entry, lru); lru 2186 fs/nfs/dir.c list_del(&cache->lru); lru 2210 fs/nfs/dir.c struct nfs_access_entry, lru); lru 2211 fs/nfs/dir.c list_move(&cache->lru, &head); lru 2275 fs/nfs/dir.c list_move(&entry->lru, head); lru 2350 fs/nfs/dir.c list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru); lru 2375 fs/nfs/dir.c cache = list_entry(lh, struct nfs_access_entry, lru); lru 2415 fs/nfs/dir.c list_add_tail(&set->lru, &nfsi->access_cache_entry_lru); lru 2420 fs/nfs/dir.c list_add_tail(&set->lru, &nfsi->access_cache_entry_lru); lru 2421 fs/nfs/dir.c list_del(&entry->lru); lru 360 fs/nfsd/filecache.c nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru, lru 394 fs/nfsd/filecache.c list_lru_isolate_move(lru, &nf->nf_lru, head); lru 41 fs/proc/meminfo.c int lru; lru 52 fs/proc/meminfo.c for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) lru 53 fs/proc/meminfo.c pages[lru] = global_node_page_state(NR_LRU_BASE + lru); lru 1559 fs/xfs/xfs_buf.c struct list_lru_one *lru, lru 1581 fs/xfs/xfs_buf.c list_lru_isolate_move(lru, item, dispose); lru 1635 fs/xfs/xfs_buf.c struct list_lru_one *lru, lru 1659 fs/xfs/xfs_buf.c list_lru_isolate_move(lru, item, dispose); lru 413 fs/xfs/xfs_qm.c struct list_lru_one *lru, lru 434 fs/xfs/xfs_qm.c list_lru_isolate(lru, &dqp->q_lru); lru 475 fs/xfs/xfs_qm.c list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose); lru 211 include/drm/ttm/ttm_bo_api.h struct list_head lru; lru 201 include/drm/ttm/ttm_bo_driver.h struct list_head lru[TTM_MAX_BO_PRIORITY]; lru 781 include/drm/ttm/ttm_bo_driver.h if (list_empty(&bo->lru)) lru 105 include/linux/balloon_compaction.h list_add(&page->lru, &balloon->pages); lru 126 include/linux/balloon_compaction.h list_del(&page->lru); lru 149 include/linux/balloon_compaction.h list_add(&page->lru, &balloon->pages); lru 155 include/linux/balloon_compaction.h list_del(&page->lru); lru 190 include/linux/balloon_compaction.h list_add(&page->lru, pages); lru 202 include/linux/balloon_compaction.h struct page *page = list_first_entry_or_null(pages, struct page, lru); lru 207 include/linux/balloon_compaction.h list_del(&page->lru); lru 37 include/linux/list_lru.h struct list_lru_one *lru[0]; lru 44 include/linux/list_lru.h struct list_lru_one lru; lru 61 include/linux/list_lru.h void list_lru_destroy(struct list_lru *lru); lru 62 include/linux/list_lru.h int __list_lru_init(struct list_lru *lru, bool memcg_aware, lru 65 include/linux/list_lru.h #define list_lru_init(lru) \ lru 66 include/linux/list_lru.h __list_lru_init((lru), false, NULL, NULL) lru 67 include/linux/list_lru.h #define list_lru_init_key(lru, key) \ lru 68 include/linux/list_lru.h __list_lru_init((lru), false, (key), NULL) lru 69 include/linux/list_lru.h #define list_lru_init_memcg(lru, shrinker) \ lru 70 include/linux/list_lru.h __list_lru_init((lru), true, NULL, shrinker) lru 91 include/linux/list_lru.h bool list_lru_add(struct list_lru *lru, struct list_head *item); lru 104 include/linux/list_lru.h bool list_lru_del(struct list_lru *lru, struct list_head *item); lru 116 include/linux/list_lru.h unsigned long list_lru_count_one(struct list_lru *lru, lru 118 include/linux/list_lru.h unsigned long list_lru_count_node(struct list_lru *lru, int nid); lru 120 include/linux/list_lru.h static inline unsigned long list_lru_shrink_count(struct list_lru *lru, lru 123 include/linux/list_lru.h return list_lru_count_one(lru, sc->nid, sc->memcg); lru 126 include/linux/list_lru.h static inline unsigned long list_lru_count(struct list_lru *lru) lru 132 include/linux/list_lru.h count += list_lru_count_node(lru, nid); lru 166 include/linux/lru_cache.h struct list_head lru; lru 536 include/linux/memcontrol.h void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, lru 541 include/linux/memcontrol.h enum lru_list lru, int zone_idx) lru 546 include/linux/memcontrol.h return mz->lru_zone_size[zone_idx][lru]; lru 985 include/linux/memcontrol.h enum lru_list lru, int zone_idx) lru 221 include/linux/mm.h #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) lru 27 include/linux/mm_inline.h enum lru_list lru, enum zone_type zid, lru 32 include/linux/mm_inline.h __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); lru 34 include/linux/mm_inline.h NR_ZONE_LRU_BASE + lru, nr_pages); lru 38 include/linux/mm_inline.h enum lru_list lru, enum zone_type zid, lru 41 include/linux/mm_inline.h __update_lru_size(lruvec, lru, zid, nr_pages); lru 43 include/linux/mm_inline.h mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); lru 48 include/linux/mm_inline.h struct lruvec *lruvec, enum lru_list lru) lru 50 include/linux/mm_inline.h update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); lru 51 include/linux/mm_inline.h list_add(&page->lru, &lruvec->lists[lru]); lru 55 include/linux/mm_inline.h struct lruvec *lruvec, enum lru_list lru) lru 57 include/linux/mm_inline.h update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); lru 58 include/linux/mm_inline.h list_add_tail(&page->lru, &lruvec->lists[lru]); lru 62 include/linux/mm_inline.h struct lruvec *lruvec, enum lru_list lru) lru 64 include/linux/mm_inline.h list_del(&page->lru); lru 65 include/linux/mm_inline.h update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); lru 92 include/linux/mm_inline.h enum lru_list lru; lru 96 include/linux/mm_inline.h lru = LRU_UNEVICTABLE; lru 98 include/linux/mm_inline.h lru = page_lru_base_type(page); lru 101 include/linux/mm_inline.h lru += LRU_ACTIVE; lru 104 include/linux/mm_inline.h return lru; lru 116 include/linux/mm_inline.h enum lru_list lru; lru 119 include/linux/mm_inline.h lru = LRU_UNEVICTABLE; lru 121 include/linux/mm_inline.h lru = page_lru_base_type(page); lru 123 include/linux/mm_inline.h lru += LRU_ACTIVE; lru 125 include/linux/mm_inline.h return lru; lru 84 include/linux/mm_types.h struct list_head lru; lru 107 include/linux/mmzone.h list_add(&page->lru, &area->free_list[migratetype]); lru 115 include/linux/mmzone.h list_add_tail(&page->lru, &area->free_list[migratetype]); lru 135 include/linux/mmzone.h list_move(&page->lru, &area->free_list[migratetype]); lru 142 include/linux/mmzone.h struct page, lru); lru 148 include/linux/mmzone.h list_del(&page->lru); lru 271 include/linux/mmzone.h #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) lru 273 include/linux/mmzone.h #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) lru 275 include/linux/mmzone.h static inline int is_file_lru(enum lru_list lru) lru 277 include/linux/mmzone.h return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); lru 280 include/linux/mmzone.h static inline int is_active_lru(enum lru_list lru) lru 282 include/linux/mmzone.h return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); lru 841 include/linux/mmzone.h extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); lru 53 include/linux/nfs_fs.h struct list_head lru; lru 320 include/linux/page-flags.h PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) lru 32 include/trace/events/pagemap.h int lru lru 35 include/trace/events/pagemap.h TP_ARGS(page, lru), lru 40 include/trace/events/pagemap.h __field(int, lru ) lru 47 include/trace/events/pagemap.h __entry->lru = lru; lru 55 include/trace/events/pagemap.h __entry->lru, lru 275 include/trace/events/vmscan.h int lru), lru 277 include/trace/events/vmscan.h TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru), lru 287 include/trace/events/vmscan.h __field(int, lru) lru 298 include/trace/events/vmscan.h __entry->lru = lru; lru 309 include/trace/events/vmscan.h __print_symbolic(__entry->lru, LRU_NAMES)) lru 139 kernel/bpf/bpf_lru_list.c static void __bpf_lru_list_rotate_active(struct bpf_lru *lru, lru 153 kernel/bpf/bpf_lru_list.c if (++i == lru->nr_scans || node == first_node) lru 166 kernel/bpf/bpf_lru_list.c static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru, lru 182 kernel/bpf/bpf_lru_list.c while (i < lru->nr_scans) { lru 206 kernel/bpf/bpf_lru_list.c __bpf_lru_list_shrink_inactive(struct bpf_lru *lru, lru 220 kernel/bpf/bpf_lru_list.c } else if (lru->del_from_htab(lru->del_arg, node)) { lru 227 kernel/bpf/bpf_lru_list.c if (++i == lru->nr_scans) lru 237 kernel/bpf/bpf_lru_list.c static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l) lru 240 kernel/bpf/bpf_lru_list.c __bpf_lru_list_rotate_active(lru, l); lru 242 kernel/bpf/bpf_lru_list.c __bpf_lru_list_rotate_inactive(lru, l); lru 255 kernel/bpf/bpf_lru_list.c static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru, lru 266 kernel/bpf/bpf_lru_list.c nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink, lru 279 kernel/bpf/bpf_lru_list.c if (lru->del_from_htab(lru->del_arg, node)) { lru 318 kernel/bpf/bpf_lru_list.c static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru, lru 321 kernel/bpf/bpf_lru_list.c struct bpf_lru_list *l = &lru->common_lru.lru_list; lru 329 kernel/bpf/bpf_lru_list.c __bpf_lru_list_rotate(lru, l); lru 340 kernel/bpf/bpf_lru_list.c __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree, lru 347 kernel/bpf/bpf_lru_list.c static void __local_list_add_pending(struct bpf_lru *lru, lru 353 kernel/bpf/bpf_lru_list.c *(u32 *)((void *)node + lru->hash_offset) = hash; lru 375 kernel/bpf/bpf_lru_list.c __local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l) lru 385 kernel/bpf/bpf_lru_list.c lru->del_from_htab(lru->del_arg, node)) { lru 399 kernel/bpf/bpf_lru_list.c static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru, lru 408 kernel/bpf/bpf_lru_list.c l = per_cpu_ptr(lru->percpu_lru, cpu); lru 412 kernel/bpf/bpf_lru_list.c __bpf_lru_list_rotate(lru, l); lru 416 kernel/bpf/bpf_lru_list.c __bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list, lru 421 kernel/bpf/bpf_lru_list.c *(u32 *)((void *)node + lru->hash_offset) = hash; lru 431 kernel/bpf/bpf_lru_list.c static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru, lru 435 kernel/bpf/bpf_lru_list.c struct bpf_common_lru *clru = &lru->common_lru; lru 447 kernel/bpf/bpf_lru_list.c bpf_lru_list_pop_free_to_local(lru, loc_l); lru 452 kernel/bpf/bpf_lru_list.c __local_list_add_pending(lru, loc_l, cpu, node, hash); lru 476 kernel/bpf/bpf_lru_list.c node = __local_list_pop_pending(lru, steal_loc_l); lru 487 kernel/bpf/bpf_lru_list.c __local_list_add_pending(lru, loc_l, cpu, node, hash); lru 494 kernel/bpf/bpf_lru_list.c struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash) lru 496 kernel/bpf/bpf_lru_list.c if (lru->percpu) lru 497 kernel/bpf/bpf_lru_list.c return bpf_percpu_lru_pop_free(lru, hash); lru 499 kernel/bpf/bpf_lru_list.c return bpf_common_lru_pop_free(lru, hash); lru 502 kernel/bpf/bpf_lru_list.c static void bpf_common_lru_push_free(struct bpf_lru *lru, lru 514 kernel/bpf/bpf_lru_list.c loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu); lru 532 kernel/bpf/bpf_lru_list.c bpf_lru_list_push_free(&lru->common_lru.lru_list, node); lru 535 kernel/bpf/bpf_lru_list.c static void bpf_percpu_lru_push_free(struct bpf_lru *lru, lru 541 kernel/bpf/bpf_lru_list.c l = per_cpu_ptr(lru->percpu_lru, node->cpu); lru 550 kernel/bpf/bpf_lru_list.c void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node) lru 552 kernel/bpf/bpf_lru_list.c if (lru->percpu) lru 553 kernel/bpf/bpf_lru_list.c bpf_percpu_lru_push_free(lru, node); lru 555 kernel/bpf/bpf_lru_list.c bpf_common_lru_push_free(lru, node); lru 558 kernel/bpf/bpf_lru_list.c static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf, lru 562 kernel/bpf/bpf_lru_list.c struct bpf_lru_list *l = &lru->common_lru.lru_list; lru 576 kernel/bpf/bpf_lru_list.c static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf, lru 591 kernel/bpf/bpf_lru_list.c l = per_cpu_ptr(lru->percpu_lru, cpu); lru 607 kernel/bpf/bpf_lru_list.c void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset, lru 610 kernel/bpf/bpf_lru_list.c if (lru->percpu) lru 611 kernel/bpf/bpf_lru_list.c bpf_percpu_lru_populate(lru, buf, node_offset, elem_size, lru 614 kernel/bpf/bpf_lru_list.c bpf_common_lru_populate(lru, buf, node_offset, elem_size, lru 645 kernel/bpf/bpf_lru_list.c int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset, lru 651 kernel/bpf/bpf_lru_list.c lru->percpu_lru = alloc_percpu(struct bpf_lru_list); lru 652 kernel/bpf/bpf_lru_list.c if (!lru->percpu_lru) lru 658 kernel/bpf/bpf_lru_list.c l = per_cpu_ptr(lru->percpu_lru, cpu); lru 661 kernel/bpf/bpf_lru_list.c lru->nr_scans = PERCPU_NR_SCANS; lru 663 kernel/bpf/bpf_lru_list.c struct bpf_common_lru *clru = &lru->common_lru; lru 677 kernel/bpf/bpf_lru_list.c lru->nr_scans = LOCAL_NR_SCANS; lru 680 kernel/bpf/bpf_lru_list.c lru->percpu = percpu; lru 681 kernel/bpf/bpf_lru_list.c lru->del_from_htab = del_from_htab; lru 682 kernel/bpf/bpf_lru_list.c lru->del_arg = del_arg; lru 683 kernel/bpf/bpf_lru_list.c lru->hash_offset = hash_offset; lru 688 kernel/bpf/bpf_lru_list.c void bpf_lru_destroy(struct bpf_lru *lru) lru 690 kernel/bpf/bpf_lru_list.c if (lru->percpu) lru 691 kernel/bpf/bpf_lru_list.c free_percpu(lru->percpu_lru); lru 693 kernel/bpf/bpf_lru_list.c free_percpu(lru->common_lru.local_list); lru 73 kernel/bpf/bpf_lru_list.h int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset, lru 75 kernel/bpf/bpf_lru_list.h void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset, lru 77 kernel/bpf/bpf_lru_list.h void bpf_lru_destroy(struct bpf_lru *lru); lru 78 kernel/bpf/bpf_lru_list.h struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash); lru 79 kernel/bpf/bpf_lru_list.h void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node); lru 80 kernel/bpf/bpf_lru_list.h void bpf_lru_promote(struct bpf_lru *lru, struct bpf_lru_node *node); lru 31 kernel/bpf/hashtab.c struct bpf_lru lru; lru 122 kernel/bpf/hashtab.c struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); lru 164 kernel/bpf/hashtab.c err = bpf_lru_init(&htab->lru, lru 177 kernel/bpf/hashtab.c bpf_lru_populate(&htab->lru, htab->elems, lru 197 kernel/bpf/hashtab.c bpf_lru_destroy(&htab->lru); lru 230 kernel/bpf/hashtab.c bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || lru 247 kernel/bpf/hashtab.c if (lru && !capable(CAP_SYS_ADMIN)) lru 261 kernel/bpf/hashtab.c if (!lru && percpu_lru) lru 264 kernel/bpf/hashtab.c if (lru && !prealloc) lru 299 kernel/bpf/hashtab.c bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || lru 382 kernel/bpf/hashtab.c if (!percpu && !lru) { lru 968 kernel/bpf/hashtab.c bpf_lru_push_free(&htab->lru, &l_new->lru_node); lru 970 kernel/bpf/hashtab.c bpf_lru_push_free(&htab->lru, &l_old->lru_node); lru 1089 kernel/bpf/hashtab.c bpf_lru_push_free(&htab->lru, &l_new->lru_node); lru 1168 kernel/bpf/hashtab.c bpf_lru_push_free(&htab->lru, &l->lru_node); lru 426 kernel/crash_core.c VMCOREINFO_OFFSET(page, lru); lru 344 kernel/kexec_core.c list_for_each_entry_safe(page, next, list, lru) { lru 345 kernel/kexec_core.c list_del(&page->lru); lru 388 kernel/kexec_core.c list_add(&pages->lru, &extra_pages); lru 395 kernel/kexec_core.c list_add(&pages->lru, &image->control_pages); lru 707 kernel/kexec_core.c list_for_each_entry(page, &image->dest_pages, lru) { lru 710 kernel/kexec_core.c list_del(&page->lru); lru 725 kernel/kexec_core.c list_add(&page->lru, &image->unusable_pages); lru 769 kernel/kexec_core.c list_add(&page->lru, &image->dest_pages); lru 32 kernel/power/wakelock.c struct list_head lru; lru 95 kernel/power/wakelock.c list_add(&wl->lru, &wakelocks_lru_list); lru 100 kernel/power/wakelock.c list_move(&wl->lru, &wakelocks_lru_list); lru 111 kernel/power/wakelock.c list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) { lru 126 kernel/power/wakelock.c list_del(&wl->lru); lru 119 lib/lru_cache.c INIT_LIST_HEAD(&lc->lru); lru 198 lib/lru_cache.c INIT_LIST_HEAD(&lc->lru); lru 328 lib/lru_cache.c else if (!list_empty(&lc->lru)) lru 329 lib/lru_cache.c n = lc->lru.prev; lru 349 lib/lru_cache.c if (!list_empty(&lc->lru)) lru 567 lib/lru_cache.c list_move(&e->list, &lc->lru); lru 624 lib/lru_cache.c lh = &lc->lru; lru 48 mm/balloon_compaction.c list_for_each_entry_safe(page, tmp, pages, lru) { lru 49 mm/balloon_compaction.c list_del(&page->lru); lru 84 mm/balloon_compaction.c list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { lru 104 mm/balloon_compaction.c list_add(&page->lru, pages); lru 200 mm/balloon_compaction.c return list_first_entry(&pages, struct page, lru); lru 213 mm/balloon_compaction.c list_del(&page->lru); lru 226 mm/balloon_compaction.c list_add(&page->lru, &b_dev_info->pages); lru 58 mm/compaction.c list_for_each_entry_safe(page, next, freelist, lru) { lru 60 mm/compaction.c list_del(&page->lru); lru 75 mm/compaction.c list_for_each_entry_safe(page, next, list, lru) { lru 76 mm/compaction.c list_del(&page->lru); lru 86 mm/compaction.c list_add(&page->lru, &tmp_list); lru 618 mm/compaction.c list_add_tail(&page->lru, freelist); lru 992 mm/compaction.c list_add(&page->lru, &cc->migratepages); lru 1196 mm/compaction.c if (!list_is_last(freelist, &freepage->lru)) { lru 1197 mm/compaction.c list_cut_before(&sublist, freelist, &freepage->lru); lru 1214 mm/compaction.c if (!list_is_first(freelist, &freepage->lru)) { lru 1215 mm/compaction.c list_cut_position(&sublist, freelist, &freepage->lru); lru 1330 mm/compaction.c list_for_each_entry_reverse(freepage, freelist, lru) { lru 1375 mm/compaction.c list_add_tail(&page->lru, &cc->freepages); lru 1562 mm/compaction.c freepage = list_entry(cc->freepages.next, struct page, lru); lru 1563 mm/compaction.c list_del(&freepage->lru); lru 1578 mm/compaction.c list_add(&page->lru, &cc->freepages); lru 1686 mm/compaction.c list_for_each_entry(freepage, freelist, lru) { lru 1699 mm/compaction.c if (list_is_last(freelist, &freepage->lru)) lru 1487 mm/gup.c list_add_tail(&head->lru, &cma_page_list); lru 869 mm/hugetlb.c list_move(&page->lru, &h->hugepage_freelists[nid]); lru 878 mm/hugetlb.c list_for_each_entry(page, &h->hugepage_freelists[nid], lru) lru 885 mm/hugetlb.c if (&h->hugepage_freelists[nid] == &page->lru) lru 887 mm/hugetlb.c list_move(&page->lru, &h->hugepage_activelist); lru 1306 mm/hugetlb.c list_del(&page->lru); lru 1311 mm/hugetlb.c list_del(&page->lru); lru 1372 mm/hugetlb.c INIT_LIST_HEAD(&page->lru); lru 1574 mm/hugetlb.c struct page, lru); lru 1575 mm/hugetlb.c list_del(&page->lru); lru 1629 mm/hugetlb.c list_del(&head->lru); lru 1843 mm/hugetlb.c list_add(&page->lru, &surplus_list); lru 1878 mm/hugetlb.c list_for_each_entry_safe(page, tmp, &surplus_list, lru) { lru 1893 mm/hugetlb.c list_for_each_entry_safe(page, tmp, &surplus_list, lru) lru 2182 mm/hugetlb.c list_move(&page->lru, &h->hugepage_activelist); lru 2372 mm/hugetlb.c list_for_each_entry_safe(page, next, freel, lru) { lru 2377 mm/hugetlb.c list_del(&page->lru); lru 5136 mm/hugetlb.c list_move_tail(&page->lru, list); lru 5147 mm/hugetlb.c list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); lru 171 mm/hugetlb_cgroup.c list_for_each_entry(page, &h->hugepage_activelist, lru) lru 431 mm/hugetlb_cgroup.c list_move(&newhpage->lru, &h->hugepage_activelist); lru 1688 mm/khugepaged.c list_add_tail(&page->lru, &pagelist); lru 1727 mm/khugepaged.c list_for_each_entry_safe(page, tmp, &pagelist, lru) { lru 1734 mm/khugepaged.c list_del(&page->lru); lru 1780 mm/khugepaged.c struct page, lru); lru 1793 mm/khugepaged.c list_del(&page->lru); lru 21 mm/list_lru.c static void list_lru_register(struct list_lru *lru) lru 24 mm/list_lru.c list_add(&lru->list, &list_lrus); lru 28 mm/list_lru.c static void list_lru_unregister(struct list_lru *lru) lru 31 mm/list_lru.c list_del(&lru->list); lru 35 mm/list_lru.c static int lru_shrinker_id(struct list_lru *lru) lru 37 mm/list_lru.c return lru->shrinker_id; lru 40 mm/list_lru.c static inline bool list_lru_memcg_aware(struct list_lru *lru) lru 42 mm/list_lru.c return lru->memcg_aware; lru 56 mm/list_lru.c return memcg_lrus->lru[idx]; lru 57 mm/list_lru.c return &nlru->lru; lru 74 mm/list_lru.c struct list_lru_one *l = &nlru->lru; lru 91 mm/list_lru.c static void list_lru_register(struct list_lru *lru) lru 95 mm/list_lru.c static void list_lru_unregister(struct list_lru *lru) lru 99 mm/list_lru.c static int lru_shrinker_id(struct list_lru *lru) lru 104 mm/list_lru.c static inline bool list_lru_memcg_aware(struct list_lru *lru) lru 112 mm/list_lru.c return &nlru->lru; lru 121 mm/list_lru.c return &nlru->lru; lru 125 mm/list_lru.c bool list_lru_add(struct list_lru *lru, struct list_head *item) lru 128 mm/list_lru.c struct list_lru_node *nlru = &lru->node[nid]; lru 139 mm/list_lru.c lru_shrinker_id(lru)); lru 149 mm/list_lru.c bool list_lru_del(struct list_lru *lru, struct list_head *item) lru 152 mm/list_lru.c struct list_lru_node *nlru = &lru->node[nid]; lru 184 mm/list_lru.c unsigned long list_lru_count_one(struct list_lru *lru, lru 187 mm/list_lru.c struct list_lru_node *nlru = &lru->node[nid]; lru 200 mm/list_lru.c unsigned long list_lru_count_node(struct list_lru *lru, int nid) lru 204 mm/list_lru.c nlru = &lru->node[nid]; lru 268 mm/list_lru.c list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, lru 272 mm/list_lru.c struct list_lru_node *nlru = &lru->node[nid]; lru 284 mm/list_lru.c list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, lru 288 mm/list_lru.c struct list_lru_node *nlru = &lru->node[nid]; lru 298 mm/list_lru.c unsigned long list_lru_walk_node(struct list_lru *lru, int nid, lru 305 mm/list_lru.c isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg, lru 307 mm/list_lru.c if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { lru 309 mm/list_lru.c struct list_lru_node *nlru = &lru->node[nid]; lru 338 mm/list_lru.c kfree(memcg_lrus->lru[i]); lru 354 mm/list_lru.c memcg_lrus->lru[i] = l; lru 419 mm/list_lru.c memcpy(&new->lru, &old->lru, old_size * sizeof(void *)); lru 448 mm/list_lru.c static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) lru 452 mm/list_lru.c lru->memcg_aware = memcg_aware; lru 458 mm/list_lru.c if (memcg_init_list_lru_node(&lru->node[i])) lru 464 mm/list_lru.c if (!lru->node[i].memcg_lrus) lru 466 mm/list_lru.c memcg_destroy_list_lru_node(&lru->node[i]); lru 471 mm/list_lru.c static void memcg_destroy_list_lru(struct list_lru *lru) lru 475 mm/list_lru.c if (!list_lru_memcg_aware(lru)) lru 479 mm/list_lru.c memcg_destroy_list_lru_node(&lru->node[i]); lru 482 mm/list_lru.c static int memcg_update_list_lru(struct list_lru *lru, lru 487 mm/list_lru.c if (!list_lru_memcg_aware(lru)) lru 491 mm/list_lru.c if (memcg_update_list_lru_node(&lru->node[i], lru 498 mm/list_lru.c if (!lru->node[i].memcg_lrus) lru 501 mm/list_lru.c memcg_cancel_update_list_lru_node(&lru->node[i], lru 507 mm/list_lru.c static void memcg_cancel_update_list_lru(struct list_lru *lru, lru 512 mm/list_lru.c if (!list_lru_memcg_aware(lru)) lru 516 mm/list_lru.c memcg_cancel_update_list_lru_node(&lru->node[i], lru 523 mm/list_lru.c struct list_lru *lru; lru 527 mm/list_lru.c list_for_each_entry(lru, &list_lrus, list) { lru 528 mm/list_lru.c ret = memcg_update_list_lru(lru, old_size, new_size); lru 536 mm/list_lru.c list_for_each_entry_continue_reverse(lru, &list_lrus, list) lru 537 mm/list_lru.c memcg_cancel_update_list_lru(lru, old_size, new_size); lru 541 mm/list_lru.c static void memcg_drain_list_lru_node(struct list_lru *lru, int nid, lru 544 mm/list_lru.c struct list_lru_node *nlru = &lru->node[nid]; lru 562 mm/list_lru.c memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); lru 568 mm/list_lru.c static void memcg_drain_list_lru(struct list_lru *lru, lru 573 mm/list_lru.c if (!list_lru_memcg_aware(lru)) lru 577 mm/list_lru.c memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg); lru 582 mm/list_lru.c struct list_lru *lru; lru 585 mm/list_lru.c list_for_each_entry(lru, &list_lrus, list) lru 586 mm/list_lru.c memcg_drain_list_lru(lru, src_idx, dst_memcg); lru 590 mm/list_lru.c static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) lru 595 mm/list_lru.c static void memcg_destroy_list_lru(struct list_lru *lru) lru 600 mm/list_lru.c int __list_lru_init(struct list_lru *lru, bool memcg_aware, lru 608 mm/list_lru.c lru->shrinker_id = shrinker->id; lru 610 mm/list_lru.c lru->shrinker_id = -1; lru 614 mm/list_lru.c lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL); lru 615 mm/list_lru.c if (!lru->node) lru 619 mm/list_lru.c spin_lock_init(&lru->node[i].lock); lru 621 mm/list_lru.c lockdep_set_class(&lru->node[i].lock, key); lru 622 mm/list_lru.c init_one_lru(&lru->node[i].lru); lru 625 mm/list_lru.c err = memcg_init_list_lru(lru, memcg_aware); lru 627 mm/list_lru.c kfree(lru->node); lru 629 mm/list_lru.c lru->node = NULL; lru 633 mm/list_lru.c list_lru_register(lru); lru 640 mm/list_lru.c void list_lru_destroy(struct list_lru *lru) lru 643 mm/list_lru.c if (!lru->node) lru 648 mm/list_lru.c list_lru_unregister(lru); lru 650 mm/list_lru.c memcg_destroy_list_lru(lru); lru 651 mm/list_lru.c kfree(lru->node); lru 652 mm/list_lru.c lru->node = NULL; lru 655 mm/list_lru.c lru->shrinker_id = -1; lru 372 mm/madvise.c list_add(&page->lru, &page_list); lru 458 mm/madvise.c list_add(&page->lru, &page_list); lru 1290 mm/memcontrol.c void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, lru 1301 mm/memcontrol.c lru_size = &mz->lru_zone_size[zid][lru]; lru 1309 mm/memcontrol.c __func__, lruvec, lru, nr_pages, size)) { lru 3802 mm/memcontrol.c enum lru_list lru; lru 3806 mm/memcontrol.c for_each_lru(lru) { lru 3807 mm/memcontrol.c if (!(BIT(lru) & lru_mask)) lru 3809 mm/memcontrol.c nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru); lru 3818 mm/memcontrol.c enum lru_list lru; lru 3820 mm/memcontrol.c for_each_lru(lru) { lru 3821 mm/memcontrol.c if (!(BIT(lru) & lru_mask)) lru 3823 mm/memcontrol.c nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); lru 6757 mm/memcontrol.c page = list_entry(next, struct page, lru); lru 6758 mm/memcontrol.c next = page->lru.next; lru 865 mm/memory-failure.c { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty }, lru 866 mm/memory-failure.c { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean }, lru 1803 mm/memory-failure.c list_add(&page->lru, &pagelist); lru 159 mm/memory_hotplug.c INIT_LIST_HEAD(&page->lru); lru 1358 mm/memory_hotplug.c list_add_tail(&page->lru, &source); lru 1374 mm/memory_hotplug.c list_for_each_entry(page, &source, lru) { lru 980 mm/mempolicy.c list_add_tail(&head->lru, pagelist); lru 173 mm/migrate.c list_for_each_entry_safe(page, page2, l, lru) { lru 178 mm/migrate.c list_del(&page->lru); lru 1209 mm/migrate.c list_del(&page->lru); lru 1418 mm/migrate.c list_for_each_entry_safe(page, page2, from, lru) { lru 1449 mm/migrate.c list_safe_reset_next(page, page2, lru); lru 1573 mm/migrate.c list_add_tail(&head->lru, pagelist); lru 1996 mm/migrate.c list_add(&page->lru, &migratepages); lru 2002 mm/migrate.c list_del(&page->lru); lru 91 mm/mmzone.c enum lru_list lru; lru 95 mm/mmzone.c for_each_lru(lru) lru 96 mm/mmzone.c INIT_LIST_HEAD(&lruvec->lists[lru]); lru 748 mm/page_alloc.c INIT_LIST_HEAD(&page->lru); lru 1281 mm/page_alloc.c page = list_last_entry(list, struct page, lru); lru 1283 mm/page_alloc.c list_del(&page->lru); lru 1289 mm/page_alloc.c list_add_tail(&page->lru, &head); lru 1312 mm/page_alloc.c list_for_each_entry_safe(page, tmp, &head, lru) { lru 1350 mm/page_alloc.c INIT_LIST_HEAD(&page->lru); lru 1402 mm/page_alloc.c INIT_LIST_HEAD(&page->lru); lru 2769 mm/page_alloc.c list_add_tail(&page->lru, list); lru 2999 mm/page_alloc.c &zone->free_area[order].free_list[t], lru) { lru 3053 mm/page_alloc.c list_add(&page->lru, &pcp->lists[migratetype]); lru 3087 mm/page_alloc.c list_for_each_entry_safe(page, next, list, lru) { lru 3090 mm/page_alloc.c list_del(&page->lru); lru 3095 mm/page_alloc.c list_for_each_entry_safe(page, next, list, lru) { lru 3229 mm/page_alloc.c page = list_first_entry(list, struct page, lru); lru 3230 mm/page_alloc.c list_del(&page->lru); lru 5106 mm/page_alloc.c int lru; lru 5108 mm/page_alloc.c for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) lru 5109 mm/page_alloc.c pages[lru] = global_node_page_state(NR_LRU_BASE + lru); lru 159 mm/pgtable-generic.c INIT_LIST_HEAD(&pgtable->lru); lru 161 mm/pgtable-generic.c list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); lru 176 mm/pgtable-generic.c pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, lru 177 mm/pgtable-generic.c struct page, lru); lru 179 mm/pgtable-generic.c list_del(&pgtable->lru); lru 71 mm/readahead.c list_del(&victim->lru); lru 96 mm/readahead.c list_del(&page->lru); lru 134 mm/readahead.c list_del(&page->lru); lru 200 mm/readahead.c list_add(&page->lru, &page_pool); lru 160 mm/shuffle.c list_swap(&page_i->lru, &page_j->lru); lru 132 mm/swap.c list_del(&victim->lru); lru 280 mm/swap.c int lru = page_lru_base_type(page); lru 282 mm/swap.c del_page_from_lru_list(page, lruvec, lru); lru 284 mm/swap.c lru += LRU_ACTIVE; lru 285 mm/swap.c add_page_to_lru_list(page, lruvec, lru); lru 499 mm/swap.c int lru, file; lru 514 mm/swap.c lru = page_lru_base_type(page); lru 516 mm/swap.c del_page_from_lru_list(page, lruvec, lru + active); lru 526 mm/swap.c add_page_to_lru_list(page, lruvec, lru); lru 533 mm/swap.c add_page_to_lru_list_tail(page, lruvec, lru); lru 547 mm/swap.c int lru = page_lru_base_type(page); lru 549 mm/swap.c del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); lru 552 mm/swap.c add_page_to_lru_list(page, lruvec, lru); lru 836 mm/swap.c list_add(&page->lru, &pages_to_free); lru 883 mm/swap.c list_add_tail(&page_tail->lru, &page->lru); lru 887 mm/swap.c list_add_tail(&page_tail->lru, list); lru 908 mm/swap.c enum lru_list lru; lru 943 mm/swap.c lru = page_lru(page); lru 949 mm/swap.c lru = LRU_UNEVICTABLE; lru 956 mm/swap.c add_page_to_lru_list(page, lruvec, lru); lru 957 mm/swap.c trace_mm_lru_insertion(page, lru); lru 1525 mm/swapfile.c page = list_next_entry(page, lru); lru 3583 mm/swapfile.c INIT_LIST_HEAD(&head->lru); lru 3588 mm/swapfile.c list_for_each_entry(list_page, &head->lru, lru) { lru 3610 mm/swapfile.c list_add_tail(&page->lru, &head->lru); lru 3649 mm/swapfile.c page = list_entry(head->lru.next, struct page, lru); lru 3661 mm/swapfile.c page = list_entry(page->lru.next, struct page, lru); lru 3667 mm/swapfile.c page = list_entry(page->lru.next, struct page, lru); lru 3677 mm/swapfile.c page = list_entry(page->lru.prev, struct page, lru); lru 3682 mm/swapfile.c page = list_entry(page->lru.prev, struct page, lru); lru 3693 mm/swapfile.c page = list_entry(page->lru.next, struct page, lru); lru 3702 mm/swapfile.c page = list_entry(page->lru.prev, struct page, lru); lru 3708 mm/swapfile.c page = list_entry(page->lru.prev, struct page, lru); lru 3731 mm/swapfile.c list_for_each_entry_safe(page, next, &head->lru, lru) { lru 3732 mm/swapfile.c list_del(&page->lru); lru 139 mm/vmscan.c if ((_page)->lru.prev != _base) { \ lru 142 mm/vmscan.c prev = lru_to_page(&(_page->lru)); \ lru 153 mm/vmscan.c if ((_page)->lru.prev != _base) { \ lru 156 mm/vmscan.c prev = lru_to_page(&(_page->lru)); \ lru 352 mm/vmscan.c unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) lru 359 mm/vmscan.c lru_size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); lru 361 mm/vmscan.c lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru); lru 371 mm/vmscan.c size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid); lru 374 mm/vmscan.c NR_ZONE_LRU_BASE + lru); lru 1145 mm/vmscan.c list_del(&page->lru); lru 1264 mm/vmscan.c list_add_tail(&page->lru, page_list); lru 1493 mm/vmscan.c list_add(&page->lru, &free_pages); lru 1520 mm/vmscan.c list_add(&page->lru, &ret_pages); lru 1549 mm/vmscan.c list_for_each_entry_safe(page, next, page_list, lru) { lru 1553 mm/vmscan.c list_move(&page->lru, &clean_pages); lru 1647 mm/vmscan.c enum lru_list lru, unsigned long *nr_zone_taken) lru 1655 mm/vmscan.c __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); lru 1657 mm/vmscan.c mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); lru 1686 mm/vmscan.c enum lru_list lru) lru 1688 mm/vmscan.c struct list_head *src = &lruvec->lists[lru]; lru 1711 mm/vmscan.c list_move(&page->lru, &pages_skipped); lru 1731 mm/vmscan.c list_move(&page->lru, dst); lru 1736 mm/vmscan.c list_move(&page->lru, src); lru 1765 mm/vmscan.c total_scan, skipped, nr_taken, mode, lru); lru 1766 mm/vmscan.c update_lru_sizes(lruvec, lru, nr_zone_taken); lru 1810 mm/vmscan.c int lru = page_lru(page); lru 1813 mm/vmscan.c del_page_from_lru_list(page, lruvec, lru); lru 1885 mm/vmscan.c enum lru_list lru; lru 1891 mm/vmscan.c list_del(&page->lru); lru 1900 mm/vmscan.c lru = page_lru(page); lru 1903 mm/vmscan.c update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); lru 1904 mm/vmscan.c list_move(&page->lru, &lruvec->lists[lru]); lru 1909 mm/vmscan.c del_page_from_lru_list(page, lruvec, lru); lru 1916 mm/vmscan.c list_add(&page->lru, &pages_to_free); lru 1949 mm/vmscan.c struct scan_control *sc, enum lru_list lru) lru 1956 mm/vmscan.c int file = is_file_lru(lru); lru 1980 mm/vmscan.c &nr_scanned, sc, lru); lru 2046 mm/vmscan.c enum lru_list lru) lru 2058 mm/vmscan.c int file = is_file_lru(lru); lru 2066 mm/vmscan.c &nr_scanned, sc, lru); lru 2079 mm/vmscan.c list_del(&page->lru); lru 2107 mm/vmscan.c list_add(&page->lru, &l_active); lru 2114 mm/vmscan.c list_add(&page->lru, &l_inactive); lru 2170 mm/vmscan.c list_move(&page->lru, &node_page_list); lru 2180 mm/vmscan.c list_del(&page->lru); lru 2194 mm/vmscan.c list_del(&page->lru); lru 2276 mm/vmscan.c static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, lru 2279 mm/vmscan.c if (is_active_lru(lru)) { lru 2280 mm/vmscan.c if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true)) lru 2281 mm/vmscan.c shrink_active_list(nr_to_scan, lruvec, sc, lru); lru 2285 mm/vmscan.c return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); lru 2317 mm/vmscan.c enum lru_list lru; lru 2458 mm/vmscan.c for_each_evictable_lru(lru) { lru 2459 mm/vmscan.c int file = is_file_lru(lru); lru 2464 mm/vmscan.c lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); lru 2556 mm/vmscan.c nr[lru] = scan; lru 2570 mm/vmscan.c enum lru_list lru; lru 2601 mm/vmscan.c for_each_evictable_lru(lru) { lru 2602 mm/vmscan.c if (nr[lru]) { lru 2603 mm/vmscan.c nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); lru 2604 mm/vmscan.c nr[lru] -= nr_to_scan; lru 2606 mm/vmscan.c nr_reclaimed += shrink_list(lru, nr_to_scan, lru 2638 mm/vmscan.c lru = LRU_BASE; lru 2643 mm/vmscan.c lru = LRU_FILE; lru 2648 mm/vmscan.c nr[lru] = 0; lru 2649 mm/vmscan.c nr[lru + LRU_ACTIVE] = 0; lru 2655 mm/vmscan.c lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE; lru 2656 mm/vmscan.c nr_scanned = targets[lru] - nr[lru]; lru 2657 mm/vmscan.c nr[lru] = targets[lru] * (100 - percentage) / 100; lru 2658 mm/vmscan.c nr[lru] -= min(nr[lru], nr_scanned); lru 2660 mm/vmscan.c lru += LRU_ACTIVE; lru 2661 mm/vmscan.c nr_scanned = targets[lru] - nr[lru]; lru 2662 mm/vmscan.c nr[lru] = targets[lru] * (100 - percentage) / 100; lru 2663 mm/vmscan.c nr[lru] -= min(nr[lru], nr_scanned); lru 4363 mm/vmscan.c enum lru_list lru = page_lru_base_type(page); lru 4368 mm/vmscan.c add_page_to_lru_list(page, lruvec, lru); lru 450 mm/workingset.c struct list_lru_one *lru, lru 480 mm/workingset.c list_lru_isolate(lru, item); lru 157 mm/z3fold.c struct list_head lru; lru 304 mm/z3fold.c INIT_LIST_HEAD(&page->lru); lru 450 mm/z3fold.c if (!list_empty(&page->lru)) lru 451 mm/z3fold.c list_del_init(&page->lru); lru 790 mm/z3fold.c INIT_LIST_HEAD(&pool->lru); lru 973 mm/z3fold.c if (!list_empty(&page->lru)) lru 974 mm/z3fold.c list_del(&page->lru); lru 976 mm/z3fold.c list_add(&page->lru, &pool->lru); lru 1015 mm/z3fold.c list_del(&page->lru); lru 1127 mm/z3fold.c if (list_empty(&pool->lru)) { lru 1131 mm/z3fold.c list_for_each_prev(pos, &pool->lru) { lru 1132 mm/z3fold.c page = list_entry(pos, struct page, lru); lru 1165 mm/z3fold.c list_del_init(&page->lru); lru 1221 mm/z3fold.c list_add(&page->lru, &pool->lru); lru 1237 mm/z3fold.c list_add(&page->lru, &pool->lru); lru 1362 mm/z3fold.c if (!list_empty(&page->lru)) lru 1363 mm/z3fold.c list_del(&page->lru); lru 1425 mm/z3fold.c list_add(&newpage->lru, &pool->lru); lru 1448 mm/z3fold.c INIT_LIST_HEAD(&page->lru); lru 1454 mm/z3fold.c list_add(&page->lru, &pool->lru); lru 98 mm/zbud.c struct list_head lru; lru 117 mm/zbud.c struct list_head lru; lru 245 mm/zbud.c INIT_LIST_HEAD(&zhdr->lru); lru 318 mm/zbud.c INIT_LIST_HEAD(&pool->lru); lru 410 mm/zbud.c if (!list_empty(&zhdr->lru)) lru 411 mm/zbud.c list_del(&zhdr->lru); lru 412 mm/zbud.c list_add(&zhdr->lru, &pool->lru); lru 455 mm/zbud.c list_del(&zhdr->lru); lru 509 mm/zbud.c if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || lru 515 mm/zbud.c zhdr = list_last_entry(&pool->lru, struct zbud_header, lru); lru 516 mm/zbud.c list_del(&zhdr->lru); lru 566 mm/zbud.c list_add(&zhdr->lru, &pool->lru); lru 1021 net/ceph/messenger.c page = list_first_entry(&pagelist->head, struct page, lru); lru 1081 net/ceph/messenger.c BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); lru 1082 net/ceph/messenger.c cursor->page = list_next_entry(cursor->page, lru); lru 32 net/ceph/pagelist.c struct page *page = list_entry(pl->head.prev, struct page, lru); lru 45 net/ceph/pagelist.c lru); lru 46 net/ceph/pagelist.c list_del(&page->lru); lru 61 net/ceph/pagelist.c page = list_first_entry(&pl->free_list, struct page, lru); lru 62 net/ceph/pagelist.c list_del(&page->lru); lru 69 net/ceph/pagelist.c list_add_tail(&page->lru, &pl->head); lru 113 net/ceph/pagelist.c list_add_tail(&page->lru, &pl->free_list); lru 125 net/ceph/pagelist.c struct page, lru); lru 126 net/ceph/pagelist.c list_del(&page->lru); lru 159 net/ceph/pagelist.c page = list_entry(pl->head.prev, struct page, lru); lru 161 net/ceph/pagelist.c list_move_tail(&page->lru, &pl->free_list); lru 166 net/ceph/pagelist.c page = list_entry(pl->head.prev, struct page, lru); lru 105 samples/bpf/test_lru_dist.c static void pfect_lru_init(struct pfect_lru *lru, unsigned int lru_size, lru 108 samples/bpf/test_lru_dist.c lru->map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, lru 112 samples/bpf/test_lru_dist.c assert(lru->map_fd != -1); lru 114 samples/bpf/test_lru_dist.c lru->free_nodes = malloc(lru_size * sizeof(struct pfect_lru_node)); lru 115 samples/bpf/test_lru_dist.c assert(lru->free_nodes); lru 117 samples/bpf/test_lru_dist.c INIT_LIST_HEAD(&lru->list); lru 118 samples/bpf/test_lru_dist.c lru->cur_size = 0; lru 119 samples/bpf/test_lru_dist.c lru->lru_size = lru_size; lru 120 samples/bpf/test_lru_dist.c lru->nr_unique = lru->nr_misses = lru->total = 0; lru 123 samples/bpf/test_lru_dist.c static void pfect_lru_destroy(struct pfect_lru *lru) lru 125 samples/bpf/test_lru_dist.c close(lru->map_fd); lru 126 samples/bpf/test_lru_dist.c free(lru->free_nodes); lru 129 samples/bpf/test_lru_dist.c static int pfect_lru_lookup_or_insert(struct pfect_lru *lru, lru 135 samples/bpf/test_lru_dist.c lru->total++; lru 136 samples/bpf/test_lru_dist.c if (!bpf_map_lookup_elem(lru->map_fd, &key, &node)) { lru 138 samples/bpf/test_lru_dist.c list_move(&node->list, &lru->list); lru 144 samples/bpf/test_lru_dist.c if (lru->cur_size < lru->lru_size) { lru 145 samples/bpf/test_lru_dist.c node = &lru->free_nodes[lru->cur_size++]; lru 150 samples/bpf/test_lru_dist.c node = list_last_entry(&lru->list, lru 153 samples/bpf/test_lru_dist.c bpf_map_update_elem(lru->map_fd, &node->key, &null_node, BPF_EXIST); lru 157 samples/bpf/test_lru_dist.c list_move(&node->list, &lru->list); lru 159 samples/bpf/test_lru_dist.c lru->nr_misses++; lru 161 samples/bpf/test_lru_dist.c assert(!bpf_map_update_elem(lru->map_fd, &key, &node, BPF_EXIST)); lru 163 samples/bpf/test_lru_dist.c lru->nr_unique++; lru 164 samples/bpf/test_lru_dist.c assert(!bpf_map_update_elem(lru->map_fd, &key, &node, BPF_NOEXIST));