/linux-4.4.14/mm/ |
D | list_lru.c | 19 static void list_lru_register(struct list_lru *lru) in list_lru_register() argument 22 list_add(&lru->list, &list_lrus); in list_lru_register() 26 static void list_lru_unregister(struct list_lru *lru) in list_lru_unregister() argument 29 list_del(&lru->list); in list_lru_unregister() 33 static void list_lru_register(struct list_lru *lru) in list_lru_register() argument 37 static void list_lru_unregister(struct list_lru *lru) in list_lru_unregister() argument 43 static inline bool list_lru_memcg_aware(struct list_lru *lru) in list_lru_memcg_aware() argument 49 return !!lru->node[0].memcg_lrus; in list_lru_memcg_aware() 61 return nlru->memcg_lrus->lru[idx]; in list_lru_from_memcg_idx() 63 return &nlru->lru; in list_lru_from_memcg_idx() [all …]
|
D | vmscan.c | 109 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 114 if ((_page)->lru.prev != _base) { \ 117 prev = lru_to_page(&(_page->lru)); \ 128 if ((_page)->lru.prev != _base) { \ 131 prev = lru_to_page(&(_page->lru)); \ 215 static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru) in get_lru_size() argument 218 return mem_cgroup_get_lru_size(lruvec, lru); in get_lru_size() 220 return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru); in get_lru_size() 913 list_del(&page->lru); in shrink_page_list() 1025 list_add_tail(&page->lru, page_list); in shrink_page_list() [all …]
|
D | zbud.c | 97 struct list_head lru; member 116 struct list_head lru; member 244 INIT_LIST_HEAD(&zhdr->lru); in init_zbud_page() 317 INIT_LIST_HEAD(&pool->lru); in zbud_create_pool() 409 if (!list_empty(&zhdr->lru)) in zbud_alloc() 410 list_del(&zhdr->lru); in zbud_alloc() 411 list_add(&zhdr->lru, &pool->lru); in zbud_alloc() 454 list_del(&zhdr->lru); in zbud_free() 511 if (!pool->ops || !pool->ops->evict || list_empty(&pool->lru) || in zbud_reclaim_page() 517 zhdr = list_tail_entry(&pool->lru, struct zbud_header, lru); in zbud_reclaim_page() [all …]
|
D | swap.c | 359 victim = list_entry(pages->prev, struct page, lru); in put_pages_list() 360 list_del(&victim->lru); in put_pages_list() 453 enum lru_list lru = page_lru_base_type(page); in pagevec_move_tail_fn() local 454 list_move_tail(&page->lru, &lruvec->lists[lru]); in pagevec_move_tail_fn() 507 int lru = page_lru_base_type(page); in __activate_page() local 509 del_page_from_lru_list(page, lruvec, lru); in __activate_page() 511 lru += LRU_ACTIVE; in __activate_page() 512 add_page_to_lru_list(page, lruvec, lru); in __activate_page() 759 int lru, file; in lru_deactivate_file_fn() local 774 lru = page_lru_base_type(page); in lru_deactivate_file_fn() [all …]
|
D | pgtable-generic.c | 162 INIT_LIST_HEAD(&pgtable->lru); in pgtable_trans_huge_deposit() 164 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); in pgtable_trans_huge_deposit() 179 if (list_empty(&pgtable->lru)) in pgtable_trans_huge_withdraw() 182 pmd_huge_pte(mm, pmdp) = list_entry(pgtable->lru.next, in pgtable_trans_huge_withdraw() 183 struct page, lru); in pgtable_trans_huge_withdraw() 184 list_del(&pgtable->lru); in pgtable_trans_huge_withdraw()
|
D | mmzone.c | 90 enum lru_list lru; in lruvec_init() local 94 for_each_lru(lru) in lruvec_init() 95 INIT_LIST_HEAD(&lruvec->lists[lru]); in lruvec_init()
|
D | balloon_compaction.c | 65 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_dequeue() 116 list_del(&page->lru); in __isolate_balloon_page() 128 list_add(&page->lru, &b_dev_info->pages); in __putback_balloon_page()
|
D | readahead.c | 35 #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) 68 list_del(&victim->lru); in read_cache_pages_invalidate_pages() 91 list_del(&page->lru); in read_cache_pages() 129 list_del(&page->lru); in read_pages() 188 list_add(&page->lru, &page_pool); in __do_page_cache_readahead()
|
D | swapfile.c | 906 page = list_entry(page->lru.next, struct page, lru); in swp_swapcount() 2828 INIT_LIST_HEAD(&head->lru); in add_swap_count_continuation() 2833 list_for_each_entry(list_page, &head->lru, lru) { in add_swap_count_continuation() 2855 list_add_tail(&page->lru, &head->lru); in add_swap_count_continuation() 2887 page = list_entry(head->lru.next, struct page, lru); in swap_count_continued() 2899 page = list_entry(page->lru.next, struct page, lru); in swap_count_continued() 2905 page = list_entry(page->lru.next, struct page, lru); in swap_count_continued() 2913 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued() 2918 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued() 2929 page = list_entry(page->lru.next, struct page, lru); in swap_count_continued() [all …]
|
D | zsmalloc.c | 673 list_add_tail(&page->lru, &(*head)->lru); in insert_zspage() 694 if (list_empty(&(*head)->lru)) in remove_zspage() 697 *head = (struct page *)list_entry((*head)->lru.next, in remove_zspage() 698 struct page, lru); in remove_zspage() 700 list_del_init(&page->lru); in remove_zspage() 793 next = list_entry(page->lru.next, struct page, lru); in get_next_page() 902 list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) { in free_zspage() 903 list_del(&nextp->lru); in free_zspage() 981 INIT_LIST_HEAD(&page->lru); in alloc_zspage() 993 list_add(&page->lru, &prev_page->lru); in alloc_zspage()
|
D | slab.c | 707 list_for_each_entry(page, &n->slabs_full, lru) in recheck_pfmemalloc_active() 711 list_for_each_entry(page, &n->slabs_partial, lru) in recheck_pfmemalloc_active() 715 list_for_each_entry(page, &n->slabs_free, lru) in recheck_pfmemalloc_active() 1401 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) < in kmem_cache_init() 1554 list_for_each_entry(page, &n->slabs_full, lru) { in slab_out_of_memory() 1558 list_for_each_entry(page, &n->slabs_partial, lru) { in slab_out_of_memory() 1562 list_for_each_entry(page, &n->slabs_free, lru) in slab_out_of_memory() 1909 list_for_each_entry_safe(page, n, list, lru) { in slabs_destroy() 1910 list_del(&page->lru); in slabs_destroy() 2379 page = list_entry(p, struct page, lru); in drain_freelist() [all …]
|
D | compaction.c | 47 list_for_each_entry_safe(page, next, freelist, lru) { in release_freepages() 49 list_del(&page->lru); in release_freepages() 62 list_for_each_entry(page, list, lru) { in map_pages() 480 list_add(&page->lru, freelist); in isolate_freepages_block() 623 list_for_each_entry(page, &cc->migratepages, lru) in acct_isolated() 814 list_add(&page->lru, migratelist); in isolate_migratepages_block() 1057 freepage = list_entry(cc->freepages.next, struct page, lru); in compaction_alloc() 1058 list_del(&freepage->lru); in compaction_alloc() 1073 list_add(&page->lru, &cc->freepages); in compaction_free()
|
D | slob.c | 114 list_add(&sp->lru, list); in set_slob_page_free() 120 list_del(&sp->lru); in clear_slob_page_free() 285 list_for_each_entry(sp, slob_list, lru) { in slob_alloc() 299 prev = sp->lru.prev; in slob_alloc() 325 INIT_LIST_HEAD(&sp->lru); in slob_alloc()
|
D | workingset.c | 305 struct list_lru_one *lru, in shadow_lru_isolate() argument 336 list_lru_isolate(lru, item); in shadow_lru_isolate()
|
D | memory-failure.c | 776 #define lru (1UL << PG_lru) macro 812 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty }, 813 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean }, 826 #undef lru 1696 list_add(&page->lru, &pagelist); in __soft_offline_page() 1701 list_del(&page->lru); in __soft_offline_page()
|
D | page_alloc.c | 544 INIT_LIST_HEAD(&page->lru); in set_page_guard() 694 list_del(&buddy->lru); in __free_one_page() 746 list_add_tail(&page->lru, in __free_one_page() 752 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page() 835 page = list_entry(list->prev, struct page, lru); in free_pcppages_bulk() 837 list_del(&page->lru); in free_pcppages_bulk() 908 INIT_LIST_HEAD(&page->lru); in __init_single_page() 966 INIT_LIST_HEAD(&page->lru); in reserve_bootmem_region() 1345 list_add(&page[size].lru, &area->free_list[migratetype]); in expand() 1444 struct page, lru); in __rmqueue_smallest() [all …]
|
D | hugetlb.c | 841 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page() 850 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node() 857 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node() 859 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node() 1240 list_del(&page->lru); in free_huge_page() 1253 INIT_LIST_HEAD(&page->lru); in prep_new_huge_page() 1394 struct page, lru); in free_pool_huge_page() 1395 list_del(&page->lru); in free_pool_huge_page() 1421 list_del(&page->lru); in dissolve_free_huge_page() 1575 INIT_LIST_HEAD(&page->lru); in __alloc_buddy_huge_page() [all …]
|
D | migrate.c | 88 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages() 93 list_del(&page->lru); in putback_movable_pages() 962 list_del(&page->lru); in unmap_and_move() 1147 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages() 1289 list_add_tail(&page->lru, &pagelist); in do_move_page_to_node_array() 1709 list_add(&page->lru, &migratepages); in migrate_misplaced_page() 1715 list_del(&page->lru); in migrate_misplaced_page()
|
D | hugetlb_cgroup.c | 155 list_for_each_entry(page, &h->hugepage_activelist, lru) in hugetlb_cgroup_css_offline() 414 list_move(&newhpage->lru, &h->hugepage_activelist); in hugetlb_cgroup_migrate()
|
D | slub.c | 973 list_add(&page->lru, &n->full); in add_full() 982 list_del(&page->lru); in remove_full() 1533 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) 1542 page = container_of((struct list_head *)h, struct page, lru); in rcu_free_slab() 1581 list_add_tail(&page->lru, &n->partial); in __add_partial() 1583 list_add(&page->lru, &n->partial); in __add_partial() 1596 list_del(&page->lru); in __remove_partial() 1677 list_for_each_entry_safe(page, page2, &n->partial, lru) { in get_partial_node() 2212 list_for_each_entry(page, &n->partial, lru) in count_partial() 3451 list_for_each_entry_safe(page, h, &n->partial, lru) { in free_partial() [all …]
|
D | memory_hotplug.c | 161 page->lru.next = (struct list_head *) type; in get_page_bootmem() 171 type = (unsigned long) page->lru.next; in put_page_bootmem() 178 INIT_LIST_HEAD(&page->lru); in put_page_bootmem() 1470 list_add_tail(&page->lru, &source); in do_migrate_range()
|
D | memcontrol.c | 739 enum lru_list lru; in mem_cgroup_node_nr_lru_pages() local 741 for_each_lru(lru) { in mem_cgroup_node_nr_lru_pages() 742 if (!(BIT(lru) & lru_mask)) in mem_cgroup_node_nr_lru_pages() 745 nr += mz->lru_size[lru]; in mem_cgroup_node_nr_lru_pages() 1125 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, in mem_cgroup_update_lru_size() argument 1135 lru_size = mz->lru_size + lru; in mem_cgroup_update_lru_size() 5503 page = list_entry(next, struct page, lru); in uncharge_list() 5504 next = page->lru.next; in uncharge_list() 5564 INIT_LIST_HEAD(&page->lru); in mem_cgroup_uncharge() 5565 uncharge_list(&page->lru); in mem_cgroup_uncharge()
|
D | sparse.c | 666 magic = (unsigned long) page->lru.next; in free_map_bootmem()
|
D | mempolicy.c | 932 list_add_tail(&page->lru, pagelist); in migrate_page_add()
|
/linux-4.4.14/include/linux/ |
D | list_lru.h | 35 struct list_lru_one *lru[0]; member 42 struct list_lru_one lru; member 56 void list_lru_destroy(struct list_lru *lru); 57 int __list_lru_init(struct list_lru *lru, bool memcg_aware, 60 #define list_lru_init(lru) __list_lru_init((lru), false, NULL) argument 61 #define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key)) argument 62 #define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL) argument 83 bool list_lru_add(struct list_lru *lru, struct list_head *item); 96 bool list_lru_del(struct list_lru *lru, struct list_head *item); 108 unsigned long list_lru_count_one(struct list_lru *lru, [all …]
|
D | mm_inline.h | 26 struct lruvec *lruvec, enum lru_list lru) in add_page_to_lru_list() argument 29 mem_cgroup_update_lru_size(lruvec, lru, nr_pages); in add_page_to_lru_list() 30 list_add(&page->lru, &lruvec->lists[lru]); in add_page_to_lru_list() 31 __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages); in add_page_to_lru_list() 35 struct lruvec *lruvec, enum lru_list lru) in del_page_from_lru_list() argument 38 mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); in del_page_from_lru_list() 39 list_del(&page->lru); in del_page_from_lru_list() 40 __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages); in del_page_from_lru_list() 67 enum lru_list lru; in page_off_lru() local 71 lru = LRU_UNEVICTABLE; in page_off_lru() [all …]
|
D | mmzone.h | 184 #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) argument 186 #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) argument 188 static inline int is_file_lru(enum lru_list lru) in is_file_lru() argument 190 return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); in is_file_lru() 193 static inline int is_active_lru(enum lru_list lru) in is_active_lru() argument 195 return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); in is_active_lru() 198 static inline int is_unevictable_lru(enum lru_list lru) in is_unevictable_lru() argument 200 return (lru == LRU_UNEVICTABLE); in is_unevictable_lru()
|
D | balloon_compaction.h | 129 list_add(&page->lru, &balloon->pages); in balloon_page_insert() 146 list_del(&page->lru); in balloon_page_delete() 170 list_add(&page->lru, &balloon->pages); in balloon_page_insert() 176 list_del(&page->lru); in balloon_page_delete()
|
D | memcontrol.h | 360 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 378 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) in mem_cgroup_get_lru_size() argument 383 return mz->lru_size[lru]; in mem_cgroup_get_lru_size() 599 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) in mem_cgroup_get_lru_size() argument 605 mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, in mem_cgroup_update_lru_size() argument
|
D | lru_cache.h | 178 struct list_head lru; member
|
D | page-flags.h | 214 PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
|
D | mm_types.h | 122 struct list_head lru; /* Pageout list, eg. active_list member
|
D | nfs_fs.h | 51 struct list_head lru; member
|
/linux-4.4.14/net/ceph/ |
D | pagelist.c | 11 struct page *page = list_entry(pl->head.prev, struct page, lru); in ceph_pagelist_unmap_tail() 24 lru); in ceph_pagelist_release() 25 list_del(&page->lru); in ceph_pagelist_release() 40 page = list_first_entry(&pl->free_list, struct page, lru); in ceph_pagelist_addpage() 41 list_del(&page->lru); in ceph_pagelist_addpage() 48 list_add_tail(&page->lru, &pl->head); in ceph_pagelist_addpage() 92 list_add_tail(&page->lru, &pl->free_list); in ceph_pagelist_reserve() 104 struct page, lru); in ceph_pagelist_free_reserve() 105 list_del(&page->lru); in ceph_pagelist_free_reserve() 138 page = list_entry(pl->head.prev, struct page, lru); in ceph_pagelist_truncate() [all …]
|
D | messenger.c | 1009 page = list_first_entry(&pagelist->head, struct page, lru); in ceph_msg_data_pagelist_cursor_init() 1069 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); in ceph_msg_data_pagelist_advance() 1070 cursor->page = list_entry_next(cursor->page, lru); in ceph_msg_data_pagelist_advance()
|
/linux-4.4.14/include/trace/events/ |
D | pagemap.h | 31 int lru 34 TP_ARGS(page, lru), 39 __field(int, lru ) 46 __entry->lru = lru; 54 __entry->lru,
|
/linux-4.4.14/arch/mips/mm/ |
D | cerr-sb1.c | 339 uint8_t lru; in extract_ic() local 361 lru = (taghi >> 14) & 0xff; in extract_ic() 365 (lru & 0x3), in extract_ic() 366 ((lru >> 2) & 0x3), in extract_ic() 367 ((lru >> 4) & 0x3), in extract_ic() 368 ((lru >> 6) & 0x3)); in extract_ic() 494 uint8_t ecc, lru; in extract_dc() local 516 lru = (taghi >> 14) & 0xff; in extract_dc() 520 (lru & 0x3), in extract_dc() 521 ((lru >> 2) & 0x3), in extract_dc() [all …]
|
/linux-4.4.14/fs/proc/ |
D | meminfo.c | 36 int lru; in meminfo_proc_show() local 51 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) in meminfo_proc_show() 52 pages[lru] = global_page_state(NR_LRU_BASE + lru); in meminfo_proc_show()
|
/linux-4.4.14/drivers/staging/android/ion/ |
D | ion_page_pool.c | 48 list_add_tail(&page->lru, &pool->high_items); in ion_page_pool_add() 51 list_add_tail(&page->lru, &pool->low_items); in ion_page_pool_add() 64 page = list_first_entry(&pool->high_items, struct page, lru); in ion_page_pool_remove() 68 page = list_first_entry(&pool->low_items, struct page, lru); in ion_page_pool_remove() 72 list_del(&page->lru); in ion_page_pool_remove()
|
D | ion_system_heap.c | 148 list_add_tail(&page->lru, &pages); in ion_system_heap_allocate() 161 list_for_each_entry_safe(page, tmp_page, &pages, lru) { in ion_system_heap_allocate() 164 list_del(&page->lru); in ion_system_heap_allocate() 173 list_for_each_entry_safe(page, tmp_page, &pages, lru) in ion_system_heap_allocate()
|
/linux-4.4.14/drivers/xen/ |
D | balloon.c | 174 list_add_tail(&page->lru, &ballooned_pages); in __balloon_append() 177 list_add(&page->lru, &ballooned_pages); in __balloon_append() 197 page = list_entry(ballooned_pages.next, struct page, lru); in balloon_retrieve() 200 list_del(&page->lru); in balloon_retrieve() 214 struct list_head *next = page->lru.next; in balloon_next_page() 217 return list_entry(next, struct page, lru); in balloon_next_page() 426 page = list_first_entry_or_null(&ballooned_pages, struct page, lru); in increase_reservation() 507 list_add(&page->lru, &pages); in decrease_reservation() 524 list_for_each_entry_safe(page, tmp, &pages, lru) { in decrease_reservation() 547 list_del(&page->lru); in decrease_reservation()
|
D | privcmd.c | 73 list_for_each_entry_safe(p, n, pages, lru) in free_page_list() 108 list_add_tail(&page->lru, pagelist); in gather_array() 148 page = list_entry(pos, struct page, lru); in traverse_pages() 185 page = list_entry(pos, struct page, lru); in traverse_pages_block() 259 struct page, lru); in privcmd_ioctl_mmap()
|
D | xen-scsiback.c | 227 list_add(&page[i]->lru, &scsiback_free_pages); in put_free_pages() 241 page[0] = list_first_entry(&scsiback_free_pages, struct page, lru); in get_free_page() 242 list_del(&page[0]->lru); in get_free_page()
|
/linux-4.4.14/kernel/power/ |
D | wakelock.c | 31 struct list_head lru; member 94 list_add(&wl->lru, &wakelocks_lru_list); in wakelocks_lru_add() 99 list_move(&wl->lru, &wakelocks_lru_list); in wakelocks_lru_most_recent() 110 list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) { in __wakelocks_gc() 125 list_del(&wl->lru); in __wakelocks_gc()
|
/linux-4.4.14/drivers/gpu/drm/ttm/ |
D | ttm_page_alloc.c | 328 list_for_each_entry_reverse(p, &pool->list, lru) { in ttm_page_pool_free() 336 __list_del(p->lru.prev, &pool->list); in ttm_page_pool_free() 371 __list_del(&p->lru, &pool->list); in ttm_page_pool_free() 482 list_del(&failed_pages[i]->lru); in ttm_handle_caching_state_failure() 553 list_add(&p->lru, pages); in ttm_alloc_new_pages() 615 list_for_each_entry(p, &pool->list, lru) { in ttm_page_pool_fill_locked() 701 list_add_tail(&pages[i]->lru, &pool->list); in ttm_put_pages() 765 list_for_each_entry(p, &plist, lru) { in ttm_get_pages() 771 list_for_each_entry(p, &plist, lru) { in ttm_get_pages() 786 list_for_each_entry(p, &plist, lru) { in ttm_get_pages()
|
D | ttm_bo.c | 147 BUG_ON(!list_empty(&bo->lru)); in ttm_bo_release_list() 173 BUG_ON(!list_empty(&bo->lru)); in ttm_bo_add_to_lru() 176 list_add_tail(&bo->lru, &man->lru); in ttm_bo_add_to_lru() 195 if (!list_empty(&bo->lru)) { in ttm_bo_del_from_lru() 196 list_del_init(&bo->lru); in ttm_bo_del_from_lru() 722 list_for_each_entry(bo, &man->lru, lru) { in ttm_mem_evict_first() 1122 INIT_LIST_HEAD(&bo->lru); in ttm_bo_init() 1252 while (!list_empty(&man->lru)) { in ttm_bo_force_list_clean() 1346 INIT_LIST_HEAD(&man->lru); in ttm_bo_init_mm() 1447 if (list_empty(&bdev->man[0].lru)) in ttm_bo_device_release()
|
D | ttm_bo_util.c | 462 INIT_LIST_HEAD(&fbo->lru); in ttm_buffer_object_transfer()
|
/linux-4.4.14/drivers/video/fbdev/core/ |
D | fb_defio.c | 129 list_for_each_entry(cur, &fbdefio->pagelist, lru) { in fb_deferred_io_mkwrite() 141 list_add_tail(&page->lru, &cur->lru); in fb_deferred_io_mkwrite() 188 list_for_each_entry(cur, &fbdefio->pagelist, lru) { in fb_deferred_io_work()
|
/linux-4.4.14/drivers/misc/ |
D | vmw_balloon.c | 570 list_for_each_entry_safe(page, next, &page_size->pages, lru) { in vmballoon_pop() 571 list_del(&page->lru); in vmballoon_pop() 622 list_add(&page->lru, &page_size->refused_pages); in vmballoon_lock_page() 630 list_add(&page->lru, &page_size->pages); in vmballoon_lock_page() 667 list_add(&p->lru, &page_size->pages); in vmballoon_lock_batched_page() 674 list_add(&p->lru, &page_size->refused_pages); in vmballoon_lock_batched_page() 706 list_add(&page->lru, &page_size->pages); in vmballoon_unlock_page() 746 list_add(&p->lru, &page_size->pages); in vmballoon_unlock_batched_page() 771 list_for_each_entry_safe(page, next, &page_size->refused_pages, lru) { in vmballoon_release_refused_pages() 772 list_del(&page->lru); in vmballoon_release_refused_pages() [all …]
|
/linux-4.4.14/lib/ |
D | lru_cache.c | 131 INIT_LIST_HEAD(&lc->lru); in lc_create() 210 INIT_LIST_HEAD(&lc->lru); in lc_reset() 342 else if (!list_empty(&lc->lru)) in lc_prepare_for_change() 343 n = lc->lru.prev; in lc_prepare_for_change() 363 if (!list_empty(&lc->lru)) in lc_unused_element_available() 581 list_move(&e->list, &lc->lru); in lc_put() 638 lh = &lc->lru; in lc_set()
|
/linux-4.4.14/kernel/ |
D | kexec_core.c | 318 page = list_entry(pos, struct page, lru); in kimage_free_page_list() 319 list_del(&page->lru); in kimage_free_page_list() 362 list_add(&pages->lru, &extra_pages); in kimage_alloc_normal_control_pages() 369 list_add(&pages->lru, &image->control_pages); in kimage_alloc_normal_control_pages() 636 list_for_each_entry(page, &image->dest_pages, lru) { in kimage_alloc_page() 639 list_del(&page->lru); in kimage_alloc_page() 654 list_add(&page->lru, &image->unusable_pages); in kimage_alloc_page() 698 list_add(&page->lru, &image->dest_pages); in kimage_alloc_page() 1388 VMCOREINFO_OFFSET(page, lru); in crash_save_vmcoreinfo_init()
|
/linux-4.4.14/drivers/md/ |
D | dm-bufio.c | 89 struct list_head lru[LIST_SIZE]; member 483 list_add(&b->lru_list, &c->lru[dirty]); in __link_buffer() 514 list_move(&b->lru_list, &c->lru[dirty]); in __relink_lru() 752 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { in __get_unclaimed_buffer() 764 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { in __get_unclaimed_buffer() 891 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in __write_dirty_buffers_async() 1242 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) { in dm_bufio_write_dirty_buffers() 1479 list_for_each_entry(b, &c->lru[i], lru_list) in drop_buffers() 1484 BUG_ON(!list_empty(&c->lru[i])); in drop_buffers() 1532 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { in __scan() [all …]
|
D | raid5.c | 258 if (list_empty(&sh->lru)) { in raid5_wakeup_stripe_thread() 261 list_add_tail(&sh->lru, &group->handle_list); in raid5_wakeup_stripe_thread() 292 BUG_ON(!list_empty(&sh->lru)); in do_release_stripe() 297 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe() 300 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe() 305 list_add_tail(&sh->lru, &conf->handle_list); in do_release_stripe() 320 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe() 471 sh = list_entry(first, struct stripe_head, lru); in get_free_stripe() 704 BUG_ON(list_empty(&sh->lru) && in raid5_get_active_stripe() 706 list_del_init(&sh->lru); in raid5_get_active_stripe() [all …]
|
D | raid5.h | 199 struct list_head lru; /* inactive_list or handle_list */ member
|
/linux-4.4.14/fs/cachefiles/ |
D | rdwr.c | 474 list_for_each_entry_safe(netpage, _n, list, lru) { in cachefiles_read_backing_file() 475 list_del(&netpage->lru); in cachefiles_read_backing_file() 645 list_for_each_entry_safe(netpage, _n, list, lru) { in cachefiles_read_backing_file() 646 list_del(&netpage->lru); in cachefiles_read_backing_file() 723 list_for_each_entry_safe(page, _n, pages, lru) { in cachefiles_read_or_alloc_pages() 744 list_move(&page->lru, &backpages); in cachefiles_read_or_alloc_pages() 849 list_for_each_entry(page, pages, lru) { in cachefiles_allocate_pages()
|
/linux-4.4.14/arch/m68k/mm/ |
D | memory.c | 31 #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) 32 #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
|
/linux-4.4.14/arch/s390/mm/ |
D | pgtable.c | 147 list_add(&page->lru, &gmap->crst_list); in gmap_alloc() 213 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) in gmap_free() 261 list_add(&page->lru, &gmap->crst_list); in gmap_alloc_table() 923 struct page, lru); in page_table_alloc() 932 list_del(&page->lru); in page_table_alloc() 959 list_add(&page->lru, &mm->context.pgtable_list); in page_table_alloc() 977 list_add(&page->lru, &mm->context.pgtable_list); in page_table_free() 979 list_del(&page->lru); in page_table_free() 1009 list_add_tail(&page->lru, &mm->context.pgtable_list); in page_table_free_rcu() 1011 list_del(&page->lru); in page_table_free_rcu()
|
D | page-states.c | 105 page = list_entry(l, struct page, lru); in arch_set_page_states()
|
/linux-4.4.14/drivers/staging/android/ |
D | ashmem.c | 75 struct list_head lru; member 136 list_add_tail(&range->lru, &ashmem_lru_list); in lru_add() 149 list_del(&range->lru); in lru_del() 445 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { in ashmem_shrink_scan()
|
/linux-4.4.14/fs/ext4/ |
D | readpage.c | 166 page = list_entry(pages->prev, struct page, lru); in ext4_mpage_readpages() 167 list_del(&page->lru); in ext4_mpage_readpages()
|
/linux-4.4.14/fs/nfs/ |
D | dir.c | 2094 cache = list_entry(head->next, struct nfs_access_entry, lru); in nfs_access_free_list() 2095 list_del(&cache->lru); in nfs_access_free_list() 2119 struct nfs_access_entry, lru); in nfs_do_access_cache_scan() 2120 list_move(&cache->lru, &head); in nfs_do_access_cache_scan() 2184 list_move(&entry->lru, head); in __nfs_access_zap_cache() 2244 list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru); in nfs_access_get_cached() 2251 list_del(&cache->lru); in nfs_access_get_cached() 2275 cache = list_entry(lh, struct nfs_access_entry, lru); in nfs_access_get_cached_rcu() 2315 list_add_tail(&set->lru, &nfsi->access_cache_entry_lru); in nfs_access_add_rbtree() 2320 list_add_tail(&set->lru, &nfsi->access_cache_entry_lru); in nfs_access_add_rbtree() [all …]
|
/linux-4.4.14/Documentation/vm/ |
D | 00-INDEX | 39 unevictable-lru.txt
|
D | unevictable-lru.txt | 259 Nick resolved this by putting mlocked pages back on the lru list before 583 active/inactive lru lists. Note that these pages do not have PageUnevictable
|
/linux-4.4.14/arch/x86/mm/ |
D | pgtable.c | 91 list_add(&page->lru, &pgd_list); in pgd_list_add() 98 list_del(&page->lru); in pgd_list_del()
|
D | init_64.c | 183 list_for_each_entry(page, &pgd_list, lru) { in sync_global_pgds() 722 magic = (unsigned long)page->lru.next; in free_pagetable()
|
D | fault.c | 243 list_for_each_entry(page, &pgd_list, lru) { in vmalloc_sync_all()
|
D | pageattr.c | 459 list_for_each_entry(page, &pgd_list, lru) { in __set_pmd_pte()
|
/linux-4.4.14/include/drm/ttm/ |
D | ttm_bo_api.h | 233 struct list_head lru; member
|
D | ttm_bo_driver.h | 300 struct list_head lru; member
|
/linux-4.4.14/drivers/block/ |
D | xen-blkfront.c | 331 struct page, lru); in get_indirect_grant() 332 list_del(&indirect_page->lru); in get_indirect_grant() 1115 list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { in blkif_free() 1116 list_del(&indirect_page->lru); in blkif_free() 1287 list_add(&indirect_page->lru, &info->indirect_pages); in blkif_completion() 1883 list_add(&indirect_page->lru, &info->indirect_pages); in blkfront_setup_indirect() 1919 list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) { in blkfront_setup_indirect() 1920 list_del(&indirect_page->lru); in blkfront_setup_indirect()
|
/linux-4.4.14/fs/ |
D | dcache.c | 428 static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry) in d_lru_isolate() argument 433 list_lru_isolate(lru, &dentry->d_lru); in d_lru_isolate() 436 static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry, in d_lru_shrink_move() argument 441 list_lru_isolate_move(lru, &dentry->d_lru, list); in d_lru_shrink_move() 1012 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) in dentry_lru_isolate() argument 1032 d_lru_isolate(lru, dentry); in dentry_lru_isolate() 1063 d_lru_shrink_move(lru, dentry, freeable); in dentry_lru_isolate() 1093 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) in dentry_lru_isolate_shrink() argument 1106 d_lru_shrink_move(lru, dentry, freeable); in dentry_lru_isolate_shrink()
|
D | mpage.c | 369 struct page *page = list_entry(pages->prev, struct page, lru); in mpage_readpages() 372 list_del(&page->lru); in mpage_readpages()
|
D | inode.c | 692 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) in inode_lru_isolate() argument 710 list_lru_isolate(lru, &inode->i_lru); in inode_lru_isolate() 744 list_lru_isolate_move(lru, &inode->i_lru, freeable); in inode_lru_isolate()
|
/linux-4.4.14/drivers/block/xen-blkback/ |
D | blkback.c | 127 page[0] = list_first_entry(&blkif->free_pages, struct page, lru); in get_free_page() 128 list_del(&page[0]->lru); in get_free_page() 143 list_add(&page[i]->lru, &blkif->free_pages); in put_free_pages() 159 struct page, lru); in shrink_free_pagepool() 160 list_del(&page[num_pages]->lru); in shrink_free_pagepool()
|
/linux-4.4.14/fs/xfs/ |
D | xfs_buf.c | 1496 struct list_lru_one *lru, in xfs_buftarg_wait_rele() argument 1518 list_lru_isolate_move(lru, item, dispose); in xfs_buftarg_wait_rele() 1566 struct list_lru_one *lru, in xfs_buftarg_isolate() argument 1590 list_lru_isolate_move(lru, item, dispose); in xfs_buftarg_isolate()
|
D | xfs_qm.c | 433 struct list_lru_one *lru, in xfs_qm_dquot_isolate() argument 454 list_lru_isolate(lru, &dqp->q_lru); in xfs_qm_dquot_isolate() 498 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose); in xfs_qm_dquot_isolate()
|
/linux-4.4.14/fs/ceph/ |
D | debugfs.c | 149 list_for_each_entry(di, &mdsc->dentry_lru, lru) { in dentry_lru_show()
|
D | addr.c | 316 struct page *page = list_entry(page_list->prev, struct page, lru); in start_read() 331 list_for_each_entry_reverse(page, page_list, lru) { in start_read() 358 page = list_entry(page_list->prev, struct page, lru); in start_read() 360 list_del(&page->lru); in start_read()
|
D | dir.c | 1248 list_add_tail(&di->lru, &mdsc->dentry_lru); in ceph_dentry_lru_add() 1262 list_move_tail(&di->lru, &mdsc->dentry_lru); in ceph_dentry_lru_touch() 1274 list_del_init(&di->lru); in ceph_dentry_lru_del()
|
D | super.h | 247 struct list_head lru; member
|
D | mds_client.c | 3010 struct page, lru); in send_mds_reconnect()
|
/linux-4.4.14/fs/cifs/ |
D | file.c | 3387 page = list_entry(page_list->prev, struct page, lru); in readpages_get_pages() 3408 list_move_tail(&page->lru, tmplist); in readpages_get_pages() 3412 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) { in readpages_get_pages() 3426 list_move_tail(&page->lru, tmplist); in readpages_get_pages() 3511 list_for_each_entry_safe(page, tpage, &tmplist, lru) { in cifs_readpages() 3512 list_del(&page->lru); in cifs_readpages() 3531 list_for_each_entry_safe(page, tpage, &tmplist, lru) { in cifs_readpages() 3532 list_del(&page->lru); in cifs_readpages()
|
/linux-4.4.14/fs/f2fs/ |
D | data.c | 899 page = list_entry(pages->prev, struct page, lru); in f2fs_mpage_readpages() 900 list_del(&page->lru); in f2fs_mpage_readpages() 1034 struct page *page = list_entry(pages->prev, struct page, lru); in f2fs_read_data_pages()
|
/linux-4.4.14/drivers/md/bcache/ |
D | bcache.h | 283 struct list_head lru; member
|
D | request.c | 418 i = list_first_entry(&dc->io_lru, struct io, lru); in check_should_bypass() 432 list_move_tail(&i->lru, &dc->io_lru); in check_should_bypass()
|
D | super.c | 1112 list_add(&io->lru, &dc->io_lru); in cached_dev_init()
|
/linux-4.4.14/drivers/staging/lustre/lustre/llite/ |
D | vvp_page.c | 393 list_empty(&vmpage->lru) ? "not-" : ""); in vvp_page_print()
|
/linux-4.4.14/drivers/gpu/drm/qxl/ |
D | qxl_fb.c | 179 list_for_each_entry(page, pagelist, lru) { in qxl_deferred_io()
|
/linux-4.4.14/drivers/base/ |
D | node.c | 360 #define page_initialized(page) (page->lru.next)
|
/linux-4.4.14/block/ |
D | blk-mq.c | 1438 page = list_first_entry(&tags->page_list, struct page, lru); in blk_mq_free_rq_map() 1439 list_del_init(&page->lru); in blk_mq_free_rq_map() 1514 list_add_tail(&page->lru, &tags->page_list); in blk_mq_init_rq_map()
|
/linux-4.4.14/drivers/gpu/drm/udl/ |
D | udl_fb.c | 114 list_for_each_entry(cur, &fbdefio->pagelist, lru) { in udlfb_dpy_deferred_io()
|
/linux-4.4.14/drivers/video/fbdev/ |
D | metronomefb.c | 475 list_for_each_entry(cur, &fbdefio->pagelist, lru) { in metronomefb_dpy_deferred_io()
|
D | xen-fbfront.c | 192 list_for_each_entry(page, pagelist, lru) { in xenfb_deferred_io()
|
D | broadsheetfb.c | 947 list_for_each_entry(cur, &fbdefio->pagelist, lru) { in broadsheetfb_dpy_deferred_io()
|
D | auo_k190x.c | 330 list_for_each_entry(cur, &fbdefio->pagelist, lru) { in auok190xfb_dpy_deferred_io()
|
D | smscufx.c | 972 list_for_each_entry(cur, &fbdefio->pagelist, lru) { in ufx_dpy_deferred_io()
|
D | udlfb.c | 733 list_for_each_entry(cur, &fbdefio->pagelist, lru) { in dlfb_dpy_deferred_io()
|
D | sh_mobile_lcdcfb.c | 457 list_for_each_entry(page, pagelist, lru) in sh_mobile_lcdc_sginit()
|
/linux-4.4.14/fs/gfs2/ |
D | quota.c | 149 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) in gfs2_qd_isolate() argument 159 list_lru_isolate_move(lru, &qd->qd_lru, dispose); in gfs2_qd_isolate()
|
/linux-4.4.14/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_fb.c | 313 list_for_each_entry(page, pagelist, lru) { in vmw_deferred_io()
|
/linux-4.4.14/fs/fscache/ |
D | page.c | 748 list_for_each_entry(page, pages, lru) { in __fscache_readpages_cancel()
|
/linux-4.4.14/arch/x86/xen/ |
D | mmu.c | 861 list_for_each_entry(page, &pgd_list, lru) { in xen_mm_pin_all() 975 list_for_each_entry(page, &pgd_list, lru) { in xen_mm_unpin_all()
|
/linux-4.4.14/drivers/staging/fbtft/ |
D | fbtft-core.c | 454 list_for_each_entry(page, pagelist, lru) { in fbtft_deferred_io()
|
/linux-4.4.14/Documentation/ |
D | bcache.txt | 414 One of either lru, fifo or random.
|
/linux-4.4.14/fs/ocfs2/ |
D | aops.c | 387 last = list_entry(pages->prev, struct page, lru); in ocfs2_readpages()
|
/linux-4.4.14/Documentation/sysctl/ |
D | vm.txt | 113 allowed to examine the unevictable lru (mlocked pages) for pages to compact.
|
/linux-4.4.14/fs/btrfs/ |
D | extent_io.c | 4282 page = list_entry(pages->prev, struct page, lru); in extent_readpages() 4285 list_del(&page->lru); in extent_readpages()
|
/linux-4.4.14/init/ |
D | Kconfig | 1068 control group is tracked in the third page lru pointer. This means
|
/linux-4.4.14/Documentation/virtual/uml/ |
D | UserModeLinux-HOWTO.txt | 3691 index = 0, next_hash = 0x0, count = {counter = 1}, flags = 132, lru = {
|