Lines Matching refs:page
81 void do_invalidatepage(struct page *page, unsigned int offset, in do_invalidatepage() argument
84 void (*invalidatepage)(struct page *, unsigned int, unsigned int); in do_invalidatepage()
86 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage()
92 (*invalidatepage)(page, offset, length); in do_invalidatepage()
106 truncate_complete_page(struct address_space *mapping, struct page *page) in truncate_complete_page() argument
108 if (page->mapping != mapping) in truncate_complete_page()
111 if (page_has_private(page)) in truncate_complete_page()
112 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); in truncate_complete_page()
119 cancel_dirty_page(page); in truncate_complete_page()
120 ClearPageMappedToDisk(page); in truncate_complete_page()
121 delete_from_page_cache(page); in truncate_complete_page()
134 invalidate_complete_page(struct address_space *mapping, struct page *page) in invalidate_complete_page() argument
138 if (page->mapping != mapping) in invalidate_complete_page()
141 if (page_has_private(page) && !try_to_release_page(page, 0)) in invalidate_complete_page()
144 ret = remove_mapping(mapping, page); in invalidate_complete_page()
149 int truncate_inode_page(struct address_space *mapping, struct page *page) in truncate_inode_page() argument
151 if (page_mapped(page)) { in truncate_inode_page()
153 (loff_t)page->index << PAGE_CACHE_SHIFT, in truncate_inode_page()
156 return truncate_complete_page(mapping, page); in truncate_inode_page()
162 int generic_error_remove_page(struct address_space *mapping, struct page *page) in generic_error_remove_page() argument
172 return truncate_inode_page(mapping, page); in generic_error_remove_page()
182 int invalidate_inode_page(struct page *page) in invalidate_inode_page() argument
184 struct address_space *mapping = page_mapping(page); in invalidate_inode_page()
187 if (PageDirty(page) || PageWriteback(page)) in invalidate_inode_page()
189 if (page_mapped(page)) in invalidate_inode_page()
191 return invalidate_complete_page(mapping, page); in invalidate_inode_page()
261 struct page *page = pvec.pages[i]; in truncate_inode_pages_range() local
268 if (radix_tree_exceptional_entry(page)) { in truncate_inode_pages_range()
269 clear_exceptional_entry(mapping, index, page); in truncate_inode_pages_range()
273 if (!trylock_page(page)) in truncate_inode_pages_range()
275 WARN_ON(page->index != index); in truncate_inode_pages_range()
276 if (PageWriteback(page)) { in truncate_inode_pages_range()
277 unlock_page(page); in truncate_inode_pages_range()
280 truncate_inode_page(mapping, page); in truncate_inode_pages_range()
281 unlock_page(page); in truncate_inode_pages_range()
290 struct page *page = find_lock_page(mapping, start - 1); in truncate_inode_pages_range() local
291 if (page) { in truncate_inode_pages_range()
298 wait_on_page_writeback(page); in truncate_inode_pages_range()
299 zero_user_segment(page, partial_start, top); in truncate_inode_pages_range()
300 cleancache_invalidate_page(mapping, page); in truncate_inode_pages_range()
301 if (page_has_private(page)) in truncate_inode_pages_range()
302 do_invalidatepage(page, partial_start, in truncate_inode_pages_range()
304 unlock_page(page); in truncate_inode_pages_range()
305 page_cache_release(page); in truncate_inode_pages_range()
309 struct page *page = find_lock_page(mapping, end); in truncate_inode_pages_range() local
310 if (page) { in truncate_inode_pages_range()
311 wait_on_page_writeback(page); in truncate_inode_pages_range()
312 zero_user_segment(page, 0, partial_end); in truncate_inode_pages_range()
313 cleancache_invalidate_page(mapping, page); in truncate_inode_pages_range()
314 if (page_has_private(page)) in truncate_inode_pages_range()
315 do_invalidatepage(page, 0, in truncate_inode_pages_range()
317 unlock_page(page); in truncate_inode_pages_range()
318 page_cache_release(page); in truncate_inode_pages_range()
347 struct page *page = pvec.pages[i]; in truncate_inode_pages_range() local
357 if (radix_tree_exceptional_entry(page)) { in truncate_inode_pages_range()
358 clear_exceptional_entry(mapping, index, page); in truncate_inode_pages_range()
362 lock_page(page); in truncate_inode_pages_range()
363 WARN_ON(page->index != index); in truncate_inode_pages_range()
364 wait_on_page_writeback(page); in truncate_inode_pages_range()
365 truncate_inode_page(mapping, page); in truncate_inode_pages_range()
366 unlock_page(page); in truncate_inode_pages_range()
469 struct page *page = pvec.pages[i]; in invalidate_mapping_pages() local
476 if (radix_tree_exceptional_entry(page)) { in invalidate_mapping_pages()
477 clear_exceptional_entry(mapping, index, page); in invalidate_mapping_pages()
481 if (!trylock_page(page)) in invalidate_mapping_pages()
483 WARN_ON(page->index != index); in invalidate_mapping_pages()
484 ret = invalidate_inode_page(page); in invalidate_mapping_pages()
485 unlock_page(page); in invalidate_mapping_pages()
491 deactivate_file_page(page); in invalidate_mapping_pages()
511 invalidate_complete_page2(struct address_space *mapping, struct page *page) in invalidate_complete_page2() argument
516 if (page->mapping != mapping) in invalidate_complete_page2()
519 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) in invalidate_complete_page2()
522 memcg = mem_cgroup_begin_page_stat(page); in invalidate_complete_page2()
524 if (PageDirty(page)) in invalidate_complete_page2()
527 BUG_ON(page_has_private(page)); in invalidate_complete_page2()
528 __delete_from_page_cache(page, NULL, memcg); in invalidate_complete_page2()
533 mapping->a_ops->freepage(page); in invalidate_complete_page2()
535 page_cache_release(page); /* pagecache ref */ in invalidate_complete_page2()
543 static int do_launder_page(struct address_space *mapping, struct page *page) in do_launder_page() argument
545 if (!PageDirty(page)) in do_launder_page()
547 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) in do_launder_page()
549 return mapping->a_ops->launder_page(page); in do_launder_page()
581 struct page *page = pvec.pages[i]; in invalidate_inode_pages2_range() local
588 if (radix_tree_exceptional_entry(page)) { in invalidate_inode_pages2_range()
589 clear_exceptional_entry(mapping, index, page); in invalidate_inode_pages2_range()
593 lock_page(page); in invalidate_inode_pages2_range()
594 WARN_ON(page->index != index); in invalidate_inode_pages2_range()
595 if (page->mapping != mapping) { in invalidate_inode_pages2_range()
596 unlock_page(page); in invalidate_inode_pages2_range()
599 wait_on_page_writeback(page); in invalidate_inode_pages2_range()
600 if (page_mapped(page)) { in invalidate_inode_pages2_range()
620 BUG_ON(page_mapped(page)); in invalidate_inode_pages2_range()
621 ret2 = do_launder_page(mapping, page); in invalidate_inode_pages2_range()
623 if (!invalidate_complete_page2(mapping, page)) in invalidate_inode_pages2_range()
628 unlock_page(page); in invalidate_inode_pages2_range()
737 struct page *page; in pagecache_isize_extended() local
750 page = find_lock_page(inode->i_mapping, index); in pagecache_isize_extended()
752 if (!page) in pagecache_isize_extended()
758 if (page_mkclean(page)) in pagecache_isize_extended()
759 set_page_dirty(page); in pagecache_isize_extended()
760 unlock_page(page); in pagecache_isize_extended()
761 page_cache_release(page); in pagecache_isize_extended()