Lines Matching refs:page

83 void buffer_check_dirty_writeback(struct page *page,  in buffer_check_dirty_writeback()  argument
90 BUG_ON(!PageLocked(page)); in buffer_check_dirty_writeback()
92 if (!page_has_buffers(page)) in buffer_check_dirty_writeback()
95 if (PageWriteback(page)) in buffer_check_dirty_writeback()
98 head = page_buffers(page); in buffer_check_dirty_writeback()
124 __clear_page_buffers(struct page *page) in __clear_page_buffers() argument
126 ClearPagePrivate(page); in __clear_page_buffers()
127 set_page_private(page, 0); in __clear_page_buffers()
128 page_cache_release(page); in __clear_page_buffers()
206 struct page *page; in __find_get_block_slow() local
210 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); in __find_get_block_slow()
211 if (!page) in __find_get_block_slow()
215 if (!page_has_buffers(page)) in __find_get_block_slow()
217 head = page_buffers(page); in __find_get_block_slow()
249 page_cache_release(page); in __find_get_block_slow()
284 struct page *page; in end_buffer_async_read() local
289 page = bh->b_page; in end_buffer_async_read()
295 SetPageError(page); in end_buffer_async_read()
303 first = page_buffers(page); in end_buffer_async_read()
325 if (page_uptodate && !PageError(page)) in end_buffer_async_read()
326 SetPageUptodate(page); in end_buffer_async_read()
327 unlock_page(page); in end_buffer_async_read()
345 struct page *page; in end_buffer_async_write() local
349 page = bh->b_page; in end_buffer_async_write()
354 set_bit(AS_EIO, &page->mapping->flags); in end_buffer_async_write()
357 SetPageError(page); in end_buffer_async_write()
360 first = page_buffers(page); in end_buffer_async_write()
376 end_page_writeback(page); in end_buffer_async_write()
627 static void __set_page_dirty(struct page *page, in __set_page_dirty() argument
633 if (page->mapping) { /* Race with truncate? */ in __set_page_dirty()
634 WARN_ON_ONCE(warn && !PageUptodate(page)); in __set_page_dirty()
635 account_page_dirtied(page, mapping); in __set_page_dirty()
637 page_index(page), PAGECACHE_TAG_DIRTY); in __set_page_dirty()
668 int __set_page_dirty_buffers(struct page *page) in __set_page_dirty_buffers() argument
671 struct address_space *mapping = page_mapping(page); in __set_page_dirty_buffers()
674 return !TestSetPageDirty(page); in __set_page_dirty_buffers()
677 if (page_has_buffers(page)) { in __set_page_dirty_buffers()
678 struct buffer_head *head = page_buffers(page); in __set_page_dirty_buffers()
686 newly_dirty = !TestSetPageDirty(page); in __set_page_dirty_buffers()
690 __set_page_dirty(page, mapping, 1); in __set_page_dirty_buffers()
855 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, in alloc_page_buffers() argument
876 set_bh_page(bh, page, offset); in alloc_page_buffers()
912 link_dev_buffers(struct page *page, struct buffer_head *head) in link_dev_buffers() argument
922 attach_page_buffers(page, head); in link_dev_buffers()
941 init_page_buffers(struct page *page, struct block_device *bdev, in init_page_buffers() argument
944 struct buffer_head *head = page_buffers(page); in init_page_buffers()
946 int uptodate = PageUptodate(page); in init_page_buffers()
979 struct page *page; in grow_dev_page() local
995 page = find_or_create_page(inode->i_mapping, index, gfp_mask); in grow_dev_page()
996 if (!page) in grow_dev_page()
999 BUG_ON(!PageLocked(page)); in grow_dev_page()
1001 if (page_has_buffers(page)) { in grow_dev_page()
1002 bh = page_buffers(page); in grow_dev_page()
1004 end_block = init_page_buffers(page, bdev, in grow_dev_page()
1009 if (!try_to_free_buffers(page)) in grow_dev_page()
1016 bh = alloc_page_buffers(page, size, 0); in grow_dev_page()
1026 link_dev_buffers(page, bh); in grow_dev_page()
1027 end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits, in grow_dev_page()
1033 unlock_page(page); in grow_dev_page()
1034 page_cache_release(page); in grow_dev_page()
1160 struct page *page = bh->b_page; in mark_buffer_dirty() local
1161 if (!TestSetPageDirty(page)) { in mark_buffer_dirty()
1162 struct address_space *mapping = page_mapping(page); in mark_buffer_dirty()
1164 __set_page_dirty(page, mapping, 0); in mark_buffer_dirty()
1452 struct page *page, unsigned long offset) in set_bh_page() argument
1454 bh->b_page = page; in set_bh_page()
1456 if (PageHighMem(page)) in set_bh_page()
1462 bh->b_data = page_address(page) + offset; in set_bh_page()
1509 void block_invalidatepage(struct page *page, unsigned int offset, in block_invalidatepage() argument
1516 BUG_ON(!PageLocked(page)); in block_invalidatepage()
1517 if (!page_has_buffers(page)) in block_invalidatepage()
1525 head = page_buffers(page); in block_invalidatepage()
1552 try_to_release_page(page, 0); in block_invalidatepage()
1564 void create_empty_buffers(struct page *page, in create_empty_buffers() argument
1569 head = alloc_page_buffers(page, blocksize, 1); in create_empty_buffers()
1578 spin_lock(&page->mapping->private_lock); in create_empty_buffers()
1579 if (PageUptodate(page) || PageDirty(page)) { in create_empty_buffers()
1582 if (PageDirty(page)) in create_empty_buffers()
1584 if (PageUptodate(page)) in create_empty_buffers()
1589 attach_page_buffers(page, head); in create_empty_buffers()
1590 spin_unlock(&page->mapping->private_lock); in create_empty_buffers()
1639 static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int… in create_page_buffers() argument
1641 BUG_ON(!PageLocked(page)); in create_page_buffers()
1643 if (!page_has_buffers(page)) in create_page_buffers()
1644 create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state); in create_page_buffers()
1645 return page_buffers(page); in create_page_buffers()
1677 static int __block_write_full_page(struct inode *inode, struct page *page, in __block_write_full_page() argument
1690 head = create_page_buffers(page, inode, in __block_write_full_page()
1707 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); in __block_write_full_page()
1757 redirty_page_for_writepage(wbc, page); in __block_write_full_page()
1771 BUG_ON(PageWriteback(page)); in __block_write_full_page()
1772 set_page_writeback(page); in __block_write_full_page()
1782 unlock_page(page); in __block_write_full_page()
1792 end_page_writeback(page); in __block_write_full_page()
1823 SetPageError(page); in __block_write_full_page()
1824 BUG_ON(PageWriteback(page)); in __block_write_full_page()
1825 mapping_set_error(page->mapping, err); in __block_write_full_page()
1826 set_page_writeback(page); in __block_write_full_page()
1836 unlock_page(page); in __block_write_full_page()
1845 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) in page_zero_new_buffers() argument
1850 BUG_ON(!PageLocked(page)); in page_zero_new_buffers()
1851 if (!page_has_buffers(page)) in page_zero_new_buffers()
1854 bh = head = page_buffers(page); in page_zero_new_buffers()
1861 if (!PageUptodate(page)) { in page_zero_new_buffers()
1867 zero_user(page, start, size); in page_zero_new_buffers()
1882 int __block_write_begin(struct page *page, loff_t pos, unsigned len, in __block_write_begin() argument
1887 struct inode *inode = page->mapping->host; in __block_write_begin()
1894 BUG_ON(!PageLocked(page)); in __block_write_begin()
1899 head = create_page_buffers(page, inode, 0); in __block_write_begin()
1903 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); in __block_write_begin()
1909 if (PageUptodate(page)) { in __block_write_begin()
1925 if (PageUptodate(page)) { in __block_write_begin()
1932 zero_user_segments(page, in __block_write_begin()
1938 if (PageUptodate(page)) { in __block_write_begin()
1959 page_zero_new_buffers(page, from, to); in __block_write_begin()
1964 static int __block_commit_write(struct inode *inode, struct page *page, in __block_commit_write() argument
1972 bh = head = page_buffers(page); in __block_commit_write()
1998 SetPageUptodate(page); in __block_commit_write()
2009 unsigned flags, struct page **pagep, get_block_t *get_block) in block_write_begin()
2012 struct page *page; in block_write_begin() local
2015 page = grab_cache_page_write_begin(mapping, index, flags); in block_write_begin()
2016 if (!page) in block_write_begin()
2019 status = __block_write_begin(page, pos, len, get_block); in block_write_begin()
2021 unlock_page(page); in block_write_begin()
2022 page_cache_release(page); in block_write_begin()
2023 page = NULL; in block_write_begin()
2026 *pagep = page; in block_write_begin()
2033 struct page *page, void *fsdata) in block_write_end() argument
2053 if (!PageUptodate(page)) in block_write_end()
2056 page_zero_new_buffers(page, start+copied, start+len); in block_write_end()
2058 flush_dcache_page(page); in block_write_end()
2061 __block_commit_write(inode, page, start, start+copied); in block_write_end()
2069 struct page *page, void *fsdata) in generic_write_end() argument
2075 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); in generic_write_end()
2089 unlock_page(page); in generic_write_end()
2090 page_cache_release(page); in generic_write_end()
2114 int block_is_partially_uptodate(struct page *page, unsigned long from, in block_is_partially_uptodate() argument
2122 if (!page_has_buffers(page)) in block_is_partially_uptodate()
2125 head = page_buffers(page); in block_is_partially_uptodate()
2159 int block_read_full_page(struct page *page, get_block_t *get_block) in block_read_full_page() argument
2161 struct inode *inode = page->mapping->host; in block_read_full_page()
2168 head = create_page_buffers(page, inode, 0); in block_read_full_page()
2172 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); in block_read_full_page()
2190 SetPageError(page); in block_read_full_page()
2193 zero_user(page, i * blocksize, blocksize); in block_read_full_page()
2209 SetPageMappedToDisk(page); in block_read_full_page()
2216 if (!PageError(page)) in block_read_full_page()
2217 SetPageUptodate(page); in block_read_full_page()
2218 unlock_page(page); in block_read_full_page()
2252 struct page *page; in generic_cont_expand_simple() local
2262 &page, &fsdata); in generic_cont_expand_simple()
2266 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); in generic_cont_expand_simple()
2279 struct page *page; in cont_expand_zero() local
2299 &page, &fsdata); in cont_expand_zero()
2302 zero_user(page, zerofrom, len); in cont_expand_zero()
2304 page, fsdata); in cont_expand_zero()
2333 &page, &fsdata); in cont_expand_zero()
2336 zero_user(page, zerofrom, len); in cont_expand_zero()
2338 page, fsdata); in cont_expand_zero()
2354 struct page **pagep, void **fsdata, in cont_write_begin()
2376 int block_commit_write(struct page *page, unsigned from, unsigned to) in block_commit_write() argument
2378 struct inode *inode = page->mapping->host; in block_commit_write()
2379 __block_commit_write(inode,page,from,to); in block_commit_write()
2405 struct page *page = vmf->page; in __block_page_mkwrite() local
2411 lock_page(page); in __block_page_mkwrite()
2413 if ((page->mapping != inode->i_mapping) || in __block_page_mkwrite()
2414 (page_offset(page) > size)) { in __block_page_mkwrite()
2421 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) in __block_page_mkwrite()
2426 ret = __block_write_begin(page, 0, end, get_block); in __block_page_mkwrite()
2428 ret = block_commit_write(page, 0, end); in __block_page_mkwrite()
2432 set_page_dirty(page); in __block_page_mkwrite()
2433 wait_for_stable_page(page); in __block_page_mkwrite()
2436 unlock_page(page); in __block_page_mkwrite()
2476 static void attach_nobh_buffers(struct page *page, struct buffer_head *head) in attach_nobh_buffers() argument
2480 BUG_ON(!PageLocked(page)); in attach_nobh_buffers()
2482 spin_lock(&page->mapping->private_lock); in attach_nobh_buffers()
2485 if (PageDirty(page)) in attach_nobh_buffers()
2491 attach_page_buffers(page, head); in attach_nobh_buffers()
2492 spin_unlock(&page->mapping->private_lock); in attach_nobh_buffers()
2502 struct page **pagep, void **fsdata, in nobh_write_begin()
2509 struct page *page; in nobh_write_begin() local
2523 page = grab_cache_page_write_begin(mapping, index, flags); in nobh_write_begin()
2524 if (!page) in nobh_write_begin()
2526 *pagep = page; in nobh_write_begin()
2529 if (page_has_buffers(page)) { in nobh_write_begin()
2530 ret = __block_write_begin(page, pos, len, get_block); in nobh_write_begin()
2536 if (PageMappedToDisk(page)) in nobh_write_begin()
2548 head = alloc_page_buffers(page, blocksize, 0); in nobh_write_begin()
2554 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); in nobh_write_begin()
2579 if (PageUptodate(page)) { in nobh_write_begin()
2584 zero_user_segments(page, block_start, from, in nobh_write_begin()
2614 SetPageMappedToDisk(page); in nobh_write_begin()
2629 attach_nobh_buffers(page, head); in nobh_write_begin()
2630 page_zero_new_buffers(page, from, to); in nobh_write_begin()
2633 unlock_page(page); in nobh_write_begin()
2634 page_cache_release(page); in nobh_write_begin()
2643 struct page *page, void *fsdata) in nobh_write_end() argument
2645 struct inode *inode = page->mapping->host; in nobh_write_end()
2648 BUG_ON(fsdata != NULL && page_has_buffers(page)); in nobh_write_end()
2651 attach_nobh_buffers(page, head); in nobh_write_end()
2652 if (page_has_buffers(page)) in nobh_write_end()
2654 copied, page, fsdata); in nobh_write_end()
2656 SetPageUptodate(page); in nobh_write_end()
2657 set_page_dirty(page); in nobh_write_end()
2663 unlock_page(page); in nobh_write_end()
2664 page_cache_release(page); in nobh_write_end()
2681 int nobh_writepage(struct page *page, get_block_t *get_block, in nobh_writepage() argument
2684 struct inode * const inode = page->mapping->host; in nobh_writepage()
2691 if (page->index < end_index) in nobh_writepage()
2696 if (page->index >= end_index+1 || !offset) { in nobh_writepage()
2704 if (page->mapping->a_ops->invalidatepage) in nobh_writepage()
2705 page->mapping->a_ops->invalidatepage(page, offset); in nobh_writepage()
2707 unlock_page(page); in nobh_writepage()
2718 zero_user_segment(page, offset, PAGE_CACHE_SIZE); in nobh_writepage()
2720 ret = mpage_writepage(page, get_block, wbc); in nobh_writepage()
2722 ret = __block_write_full_page(inode, page, get_block, wbc, in nobh_writepage()
2737 struct page *page; in nobh_truncate_page() local
2751 page = grab_cache_page(mapping, index); in nobh_truncate_page()
2753 if (!page) in nobh_truncate_page()
2756 if (page_has_buffers(page)) { in nobh_truncate_page()
2758 unlock_page(page); in nobh_truncate_page()
2759 page_cache_release(page); in nobh_truncate_page()
2780 if (!PageUptodate(page)) { in nobh_truncate_page()
2781 err = mapping->a_ops->readpage(NULL, page); in nobh_truncate_page()
2783 page_cache_release(page); in nobh_truncate_page()
2786 lock_page(page); in nobh_truncate_page()
2787 if (!PageUptodate(page)) { in nobh_truncate_page()
2791 if (page_has_buffers(page)) in nobh_truncate_page()
2794 zero_user(page, offset, length); in nobh_truncate_page()
2795 set_page_dirty(page); in nobh_truncate_page()
2799 unlock_page(page); in nobh_truncate_page()
2800 page_cache_release(page); in nobh_truncate_page()
2815 struct page *page; in block_truncate_page() local
2829 page = grab_cache_page(mapping, index); in block_truncate_page()
2831 if (!page) in block_truncate_page()
2834 if (!page_has_buffers(page)) in block_truncate_page()
2835 create_empty_buffers(page, blocksize, 0); in block_truncate_page()
2838 bh = page_buffers(page); in block_truncate_page()
2858 if (PageUptodate(page)) in block_truncate_page()
2870 zero_user(page, offset, length); in block_truncate_page()
2875 unlock_page(page); in block_truncate_page()
2876 page_cache_release(page); in block_truncate_page()
2885 int block_write_full_page(struct page *page, get_block_t *get_block, in block_write_full_page() argument
2888 struct inode * const inode = page->mapping->host; in block_write_full_page()
2894 if (page->index < end_index) in block_write_full_page()
2895 return __block_write_full_page(inode, page, get_block, wbc, in block_write_full_page()
2900 if (page->index >= end_index+1 || !offset) { in block_write_full_page()
2906 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); in block_write_full_page()
2907 unlock_page(page); in block_write_full_page()
2918 zero_user_segment(page, offset, PAGE_CACHE_SIZE); in block_write_full_page()
2919 return __block_write_full_page(inode, page, get_block, wbc, in block_write_full_page()
3186 drop_buffers(struct page *page, struct buffer_head **buffers_to_free) in drop_buffers() argument
3188 struct buffer_head *head = page_buffers(page); in drop_buffers()
3193 if (buffer_write_io_error(bh) && page->mapping) in drop_buffers()
3194 set_bit(AS_EIO, &page->mapping->flags); in drop_buffers()
3208 __clear_page_buffers(page); in drop_buffers()
3214 int try_to_free_buffers(struct page *page) in try_to_free_buffers() argument
3216 struct address_space * const mapping = page->mapping; in try_to_free_buffers()
3220 BUG_ON(!PageLocked(page)); in try_to_free_buffers()
3221 if (PageWriteback(page)) in try_to_free_buffers()
3225 ret = drop_buffers(page, &buffers_to_free); in try_to_free_buffers()
3230 ret = drop_buffers(page, &buffers_to_free); in try_to_free_buffers()
3246 if (ret && TestClearPageDirty(page)) in try_to_free_buffers()
3247 account_page_cleaned(page, mapping); in try_to_free_buffers()