Lines Matching refs:page

121 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
122 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
125 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
128 struct page **pagep, enum sgp_type sgp, int *fault_type) in shmem_getpage()
294 static int shmem_add_to_page_cache(struct page *page, in shmem_add_to_page_cache() argument
300 VM_BUG_ON_PAGE(!PageLocked(page), page); in shmem_add_to_page_cache()
301 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in shmem_add_to_page_cache()
303 page_cache_get(page); in shmem_add_to_page_cache()
304 page->mapping = mapping; in shmem_add_to_page_cache()
305 page->index = index; in shmem_add_to_page_cache()
309 error = radix_tree_insert(&mapping->page_tree, index, page); in shmem_add_to_page_cache()
312 page); in shmem_add_to_page_cache()
315 __inc_zone_page_state(page, NR_FILE_PAGES); in shmem_add_to_page_cache()
316 __inc_zone_page_state(page, NR_SHMEM); in shmem_add_to_page_cache()
319 page->mapping = NULL; in shmem_add_to_page_cache()
321 page_cache_release(page); in shmem_add_to_page_cache()
329 static void shmem_delete_from_page_cache(struct page *page, void *radswap) in shmem_delete_from_page_cache() argument
331 struct address_space *mapping = page->mapping; in shmem_delete_from_page_cache()
335 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); in shmem_delete_from_page_cache()
336 page->mapping = NULL; in shmem_delete_from_page_cache()
338 __dec_zone_page_state(page, NR_FILE_PAGES); in shmem_delete_from_page_cache()
339 __dec_zone_page_state(page, NR_SHMEM); in shmem_delete_from_page_cache()
341 page_cache_release(page); in shmem_delete_from_page_cache()
423 struct page *page = pvec.pages[i]; in shmem_undo_range() local
429 if (radix_tree_exceptional_entry(page)) { in shmem_undo_range()
433 index, page); in shmem_undo_range()
437 if (!trylock_page(page)) in shmem_undo_range()
439 if (!unfalloc || !PageUptodate(page)) { in shmem_undo_range()
440 if (page->mapping == mapping) { in shmem_undo_range()
441 VM_BUG_ON_PAGE(PageWriteback(page), page); in shmem_undo_range()
442 truncate_inode_page(mapping, page); in shmem_undo_range()
445 unlock_page(page); in shmem_undo_range()
454 struct page *page = NULL; in shmem_undo_range() local
455 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); in shmem_undo_range()
456 if (page) { in shmem_undo_range()
462 zero_user_segment(page, partial_start, top); in shmem_undo_range()
463 set_page_dirty(page); in shmem_undo_range()
464 unlock_page(page); in shmem_undo_range()
465 page_cache_release(page); in shmem_undo_range()
469 struct page *page = NULL; in shmem_undo_range() local
470 shmem_getpage(inode, end, &page, SGP_READ, NULL); in shmem_undo_range()
471 if (page) { in shmem_undo_range()
472 zero_user_segment(page, 0, partial_end); in shmem_undo_range()
473 set_page_dirty(page); in shmem_undo_range()
474 unlock_page(page); in shmem_undo_range()
475 page_cache_release(page); in shmem_undo_range()
497 struct page *page = pvec.pages[i]; in shmem_undo_range() local
503 if (radix_tree_exceptional_entry(page)) { in shmem_undo_range()
506 if (shmem_free_swap(mapping, index, page)) { in shmem_undo_range()
515 lock_page(page); in shmem_undo_range()
516 if (!unfalloc || !PageUptodate(page)) { in shmem_undo_range()
517 if (page->mapping == mapping) { in shmem_undo_range()
518 VM_BUG_ON_PAGE(PageWriteback(page), page); in shmem_undo_range()
519 truncate_inode_page(mapping, page); in shmem_undo_range()
522 unlock_page(page); in shmem_undo_range()
527 unlock_page(page); in shmem_undo_range()
635 swp_entry_t swap, struct page **pagep) in shmem_unuse_inode()
712 int shmem_unuse(swp_entry_t swap, struct page *page) in shmem_unuse() argument
723 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) in shmem_unuse()
731 error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg); in shmem_unuse()
741 error = shmem_unuse_inode(info, swap, &page); in shmem_unuse()
754 mem_cgroup_cancel_charge(page, memcg); in shmem_unuse()
756 mem_cgroup_commit_charge(page, memcg, true); in shmem_unuse()
758 unlock_page(page); in shmem_unuse()
759 page_cache_release(page); in shmem_unuse()
766 static int shmem_writepage(struct page *page, struct writeback_control *wbc) in shmem_writepage() argument
774 BUG_ON(!PageLocked(page)); in shmem_writepage()
775 mapping = page->mapping; in shmem_writepage()
776 index = page->index; in shmem_writepage()
807 if (!PageUptodate(page)) { in shmem_writepage()
823 clear_highpage(page); in shmem_writepage()
824 flush_dcache_page(page); in shmem_writepage()
825 SetPageUptodate(page); in shmem_writepage()
844 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { in shmem_writepage()
851 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); in shmem_writepage()
854 BUG_ON(page_mapped(page)); in shmem_writepage()
855 swap_writepage(page, wbc); in shmem_writepage()
862 set_page_dirty(page); in shmem_writepage()
865 unlock_page(page); in shmem_writepage()
896 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, in shmem_swapin()
900 struct page *page; in shmem_swapin() local
909 page = swapin_readahead(swap, gfp, &pvma, 0); in shmem_swapin()
914 return page; in shmem_swapin()
917 static struct page *shmem_alloc_page(gfp_t gfp, in shmem_alloc_page()
921 struct page *page; in shmem_alloc_page() local
930 page = alloc_page_vma(gfp, &pvma, 0); in shmem_alloc_page()
935 return page; in shmem_alloc_page()
944 static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, in shmem_swapin()
950 static inline struct page *shmem_alloc_page(gfp_t gfp, in shmem_alloc_page()
976 static bool shmem_should_replace_page(struct page *page, gfp_t gfp) in shmem_should_replace_page() argument
978 return page_zonenum(page) > gfp_zone(gfp); in shmem_should_replace_page()
981 static int shmem_replace_page(struct page **pagep, gfp_t gfp, in shmem_replace_page()
984 struct page *oldpage, *newpage; in shmem_replace_page()
1055 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) in shmem_getpage_gfp()
1061 struct page *page; in shmem_getpage_gfp() local
1071 page = find_lock_entry(mapping, index); in shmem_getpage_gfp()
1072 if (radix_tree_exceptional_entry(page)) { in shmem_getpage_gfp()
1073 swap = radix_to_swp_entry(page); in shmem_getpage_gfp()
1074 page = NULL; in shmem_getpage_gfp()
1083 if (page && sgp == SGP_WRITE) in shmem_getpage_gfp()
1084 mark_page_accessed(page); in shmem_getpage_gfp()
1087 if (page && !PageUptodate(page)) { in shmem_getpage_gfp()
1090 unlock_page(page); in shmem_getpage_gfp()
1091 page_cache_release(page); in shmem_getpage_gfp()
1092 page = NULL; in shmem_getpage_gfp()
1094 if (page || (sgp == SGP_READ && !swap.val)) { in shmem_getpage_gfp()
1095 *pagep = page; in shmem_getpage_gfp()
1108 page = lookup_swap_cache(swap); in shmem_getpage_gfp()
1109 if (!page) { in shmem_getpage_gfp()
1113 page = shmem_swapin(swap, gfp, info, index); in shmem_getpage_gfp()
1114 if (!page) { in shmem_getpage_gfp()
1121 lock_page(page); in shmem_getpage_gfp()
1122 if (!PageSwapCache(page) || page_private(page) != swap.val || in shmem_getpage_gfp()
1127 if (!PageUptodate(page)) { in shmem_getpage_gfp()
1131 wait_on_page_writeback(page); in shmem_getpage_gfp()
1133 if (shmem_should_replace_page(page, gfp)) { in shmem_getpage_gfp()
1134 error = shmem_replace_page(&page, gfp, info, index); in shmem_getpage_gfp()
1139 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); in shmem_getpage_gfp()
1141 error = shmem_add_to_page_cache(page, mapping, index, in shmem_getpage_gfp()
1156 mem_cgroup_cancel_charge(page, memcg); in shmem_getpage_gfp()
1157 delete_from_swap_cache(page); in shmem_getpage_gfp()
1163 mem_cgroup_commit_charge(page, memcg, true); in shmem_getpage_gfp()
1171 mark_page_accessed(page); in shmem_getpage_gfp()
1173 delete_from_swap_cache(page); in shmem_getpage_gfp()
1174 set_page_dirty(page); in shmem_getpage_gfp()
1191 page = shmem_alloc_page(gfp, info, index); in shmem_getpage_gfp()
1192 if (!page) { in shmem_getpage_gfp()
1197 __SetPageSwapBacked(page); in shmem_getpage_gfp()
1198 __set_page_locked(page); in shmem_getpage_gfp()
1200 __SetPageReferenced(page); in shmem_getpage_gfp()
1202 error = mem_cgroup_try_charge(page, current->mm, gfp, &memcg); in shmem_getpage_gfp()
1207 error = shmem_add_to_page_cache(page, mapping, index, in shmem_getpage_gfp()
1212 mem_cgroup_cancel_charge(page, memcg); in shmem_getpage_gfp()
1215 mem_cgroup_commit_charge(page, memcg, false); in shmem_getpage_gfp()
1216 lru_cache_add_anon(page); in shmem_getpage_gfp()
1237 clear_highpage(page); in shmem_getpage_gfp()
1238 flush_dcache_page(page); in shmem_getpage_gfp()
1239 SetPageUptodate(page); in shmem_getpage_gfp()
1242 set_page_dirty(page); in shmem_getpage_gfp()
1249 ClearPageDirty(page); in shmem_getpage_gfp()
1250 delete_from_page_cache(page); in shmem_getpage_gfp()
1258 *pagep = page; in shmem_getpage_gfp()
1273 if (page) { in shmem_getpage_gfp()
1274 unlock_page(page); in shmem_getpage_gfp()
1275 page_cache_release(page); in shmem_getpage_gfp()
1353 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); in shmem_fault()
1493 struct page **pagep, void **fsdata) in shmem_write_begin()
1513 struct page *page, void *fsdata) in shmem_write_end() argument
1520 if (!PageUptodate(page)) { in shmem_write_end()
1523 zero_user_segments(page, 0, from, in shmem_write_end()
1526 SetPageUptodate(page); in shmem_write_end()
1528 set_page_dirty(page); in shmem_write_end()
1529 unlock_page(page); in shmem_write_end()
1530 page_cache_release(page); in shmem_write_end()
1559 struct page *page = NULL; in shmem_file_read_iter() local
1573 error = shmem_getpage(inode, index, &page, sgp, NULL); in shmem_file_read_iter()
1579 if (page) in shmem_file_read_iter()
1580 unlock_page(page); in shmem_file_read_iter()
1592 if (page) in shmem_file_read_iter()
1593 page_cache_release(page); in shmem_file_read_iter()
1599 if (page) { in shmem_file_read_iter()
1606 flush_dcache_page(page); in shmem_file_read_iter()
1611 mark_page_accessed(page); in shmem_file_read_iter()
1613 page = ZERO_PAGE(0); in shmem_file_read_iter()
1614 page_cache_get(page); in shmem_file_read_iter()
1621 ret = copy_page_to_iter(page, offset, nr, to); in shmem_file_read_iter()
1627 page_cache_release(page); in shmem_file_read_iter()
1649 struct page *pages[PIPE_DEF_BUFFERS]; in shmem_file_splice_read()
1651 struct page *page; in shmem_file_splice_read() local
1686 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); in shmem_file_splice_read()
1689 unlock_page(page); in shmem_file_splice_read()
1690 spd.pages[spd.nr_pages++] = page; in shmem_file_splice_read()
1705 page = spd.pages[page_nr]; in shmem_file_splice_read()
1707 if (!PageUptodate(page) || page->mapping != mapping) { in shmem_file_splice_read()
1708 error = shmem_getpage(inode, index, &page, in shmem_file_splice_read()
1712 unlock_page(page); in shmem_file_splice_read()
1714 spd.pages[page_nr] = page; in shmem_file_splice_read()
1762 struct page *page; in shmem_seek_hole_data() local
1786 page = pvec.pages[i]; in shmem_seek_hole_data()
1787 if (page && !radix_tree_exceptional_entry(page)) { in shmem_seek_hole_data()
1788 if (!PageUptodate(page)) in shmem_seek_hole_data()
1789 page = NULL; in shmem_seek_hole_data()
1792 (page && whence == SEEK_DATA) || in shmem_seek_hole_data()
1793 (!page && whence == SEEK_HOLE)) { in shmem_seek_hole_data()
1856 struct page *page; in shmem_tag_pins() local
1864 page = radix_tree_deref_slot(slot); in shmem_tag_pins()
1865 if (!page || radix_tree_exception(page)) { in shmem_tag_pins()
1866 if (radix_tree_deref_retry(page)) in shmem_tag_pins()
1868 } else if (page_count(page) - page_mapcount(page) > 1) { in shmem_tag_pins()
1898 struct page *page; in shmem_wait_for_pins() local
1919 page = radix_tree_deref_slot(slot); in shmem_wait_for_pins()
1920 if (radix_tree_exception(page)) { in shmem_wait_for_pins()
1921 if (radix_tree_deref_retry(page)) in shmem_wait_for_pins()
1924 page = NULL; in shmem_wait_for_pins()
1927 if (page && in shmem_wait_for_pins()
1928 page_count(page) - page_mapcount(page) != 1) { in shmem_wait_for_pins()
2141 struct page *page; in shmem_fallocate() local
2152 error = shmem_getpage(inode, index, &page, SGP_FALLOC, in shmem_fallocate()
2167 if (!PageUptodate(page)) in shmem_fallocate()
2177 set_page_dirty(page); in shmem_fallocate()
2178 unlock_page(page); in shmem_fallocate()
2179 page_cache_release(page); in shmem_fallocate()
2439 struct page *page; in shmem_symlink() local
2471 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); in shmem_symlink()
2478 kaddr = kmap_atomic(page); in shmem_symlink()
2481 SetPageUptodate(page); in shmem_symlink()
2482 set_page_dirty(page); in shmem_symlink()
2483 unlock_page(page); in shmem_symlink()
2484 page_cache_release(page); in shmem_symlink()
2495 struct page *page = NULL; in shmem_follow_link() local
2496 int error = shmem_getpage(d_inode(dentry), 0, &page, SGP_READ, NULL); in shmem_follow_link()
2499 unlock_page(page); in shmem_follow_link()
2500 *cookie = page; in shmem_follow_link()
2501 return kmap(page); in shmem_follow_link()
2506 struct page *page = cookie; in shmem_put_link() local
2507 kunmap(page); in shmem_put_link()
2508 mark_page_accessed(page); in shmem_put_link()
2509 page_cache_release(page); in shmem_put_link()
3289 int shmem_unuse(swp_entry_t swap, struct page *page) in shmem_unuse() argument
3447 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, in shmem_read_mapping_page_gfp()
3452 struct page *page; in shmem_read_mapping_page_gfp() local
3456 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); in shmem_read_mapping_page_gfp()
3458 page = ERR_PTR(error); in shmem_read_mapping_page_gfp()
3460 unlock_page(page); in shmem_read_mapping_page_gfp()
3461 return page; in shmem_read_mapping_page_gfp()