Lines Matching refs:page

41 	struct page		*page,  in xfs_count_page_state()  argument
49 bh = head = page_buffers(page); in xfs_count_page_state()
411 struct page *page, in xfs_start_page_writeback() argument
415 ASSERT(PageLocked(page)); in xfs_start_page_writeback()
416 ASSERT(!PageWriteback(page)); in xfs_start_page_writeback()
426 clear_page_dirty_for_io(page); in xfs_start_page_writeback()
427 set_page_writeback(page); in xfs_start_page_writeback()
429 set_page_writeback_keepwrite(page); in xfs_start_page_writeback()
431 unlock_page(page); in xfs_start_page_writeback()
435 end_page_writeback(page); in xfs_start_page_writeback()
641 struct page *page, in xfs_check_page_type() argument
648 if (PageWriteback(page)) in xfs_check_page_type()
650 if (!page->mapping) in xfs_check_page_type()
652 if (!page_has_buffers(page)) in xfs_check_page_type()
655 bh = head = page_buffers(page); in xfs_check_page_type()
685 struct page *page, in xfs_convert_page() argument
697 xfs_off_t offset = page_offset(page); in xfs_convert_page()
699 if (page->index != tindex) in xfs_convert_page()
701 if (!trylock_page(page)) in xfs_convert_page()
703 if (PageWriteback(page)) in xfs_convert_page()
705 if (page->mapping != inode->i_mapping) in xfs_convert_page()
707 if (!xfs_check_page_type(page, (*ioendp)->io_type, false)) in xfs_convert_page()
724 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, in xfs_convert_page()
761 bh = head = page_buffers(page); in xfs_convert_page()
767 if (!(PageUptodate(page) || buffer_uptodate(bh))) { in xfs_convert_page()
802 SetPageUptodate(page); in xfs_convert_page()
809 xfs_start_page_writeback(page, !page_dirty, count); in xfs_convert_page()
813 unlock_page(page); in xfs_convert_page()
855 struct page *page, in xfs_vm_invalidatepage() argument
859 trace_xfs_invalidatepage(page->mapping->host, page, offset, in xfs_vm_invalidatepage()
861 block_invalidatepage(page, offset, length); in xfs_vm_invalidatepage()
882 struct page *page) in xfs_aops_discard_page() argument
884 struct inode *inode = page->mapping->host; in xfs_aops_discard_page()
887 loff_t offset = page_offset(page); in xfs_aops_discard_page()
889 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true)) in xfs_aops_discard_page()
897 page, ip->i_ino, offset); in xfs_aops_discard_page()
900 bh = head = page_buffers(page); in xfs_aops_discard_page()
925 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE); in xfs_aops_discard_page()
939 struct page *page, in xfs_vm_writepage() argument
942 struct inode *inode = page->mapping->host; in xfs_vm_writepage()
955 trace_xfs_writepage(inode, page, 0, 0); in xfs_vm_writepage()
957 ASSERT(page_has_buffers(page)); in xfs_vm_writepage()
996 if (page->index < end_index) in xfs_vm_writepage()
997 end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT; in xfs_vm_writepage()
1029 if (page->index > end_index || in xfs_vm_writepage()
1030 (page->index == end_index && offset_into_page == 0)) in xfs_vm_writepage()
1041 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE); in xfs_vm_writepage()
1049 bh = head = page_buffers(page); in xfs_vm_writepage()
1050 offset = page_offset(page); in xfs_vm_writepage()
1091 if (PageUptodate(page)) in xfs_vm_writepage()
1136 SetPageUptodate(page); in xfs_vm_writepage()
1138 xfs_start_page_writeback(page, 1, count); in xfs_vm_writepage()
1166 xfs_cluster_write(inode, page->index + 1, &imap, &ioend, in xfs_vm_writepage()
1189 xfs_aops_discard_page(page); in xfs_vm_writepage()
1190 ClearPageUptodate(page); in xfs_vm_writepage()
1191 unlock_page(page); in xfs_vm_writepage()
1195 redirty_page_for_writepage(wbc, page); in xfs_vm_writepage()
1196 unlock_page(page); in xfs_vm_writepage()
1218 struct page *page, in xfs_vm_releasepage() argument
1223 trace_xfs_releasepage(page->mapping->host, page, 0, 0); in xfs_vm_releasepage()
1225 xfs_count_page_state(page, &delalloc, &unwritten); in xfs_vm_releasepage()
1232 return try_to_free_buffers(page); in xfs_vm_releasepage()
1668 struct page *page, in xfs_vm_write_failed() argument
1694 head = page_buffers(page); in xfs_vm_write_failed()
1744 struct page **pagep, in xfs_vm_write_begin()
1748 struct page *page; in xfs_vm_write_begin() local
1753 page = grab_cache_page_write_begin(mapping, index, flags); in xfs_vm_write_begin()
1754 if (!page) in xfs_vm_write_begin()
1757 status = __block_write_begin(page, pos, len, xfs_get_blocks); in xfs_vm_write_begin()
1762 xfs_vm_write_failed(inode, page, pos, len); in xfs_vm_write_begin()
1763 unlock_page(page); in xfs_vm_write_begin()
1776 page_cache_release(page); in xfs_vm_write_begin()
1777 page = NULL; in xfs_vm_write_begin()
1780 *pagep = page; in xfs_vm_write_begin()
1799 struct page *page, in xfs_vm_write_end() argument
1806 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); in xfs_vm_write_end()
1841 struct page *page) in xfs_vm_readpage() argument
1843 return mpage_readpage(page, xfs_get_blocks); in xfs_vm_readpage()
1870 struct page *page) in xfs_vm_set_page_dirty() argument
1872 struct address_space *mapping = page->mapping; in xfs_vm_set_page_dirty()
1879 return !TestSetPageDirty(page); in xfs_vm_set_page_dirty()
1882 offset = page_offset(page); in xfs_vm_set_page_dirty()
1885 if (page_has_buffers(page)) { in xfs_vm_set_page_dirty()
1886 struct buffer_head *head = page_buffers(page); in xfs_vm_set_page_dirty()
1896 newly_dirty = !TestSetPageDirty(page); in xfs_vm_set_page_dirty()
1904 if (page->mapping) { /* Race with truncate? */ in xfs_vm_set_page_dirty()
1905 WARN_ON_ONCE(!PageUptodate(page)); in xfs_vm_set_page_dirty()
1906 account_page_dirtied(page, mapping); in xfs_vm_set_page_dirty()
1908 page_index(page), PAGECACHE_TAG_DIRTY); in xfs_vm_set_page_dirty()