Lines Matching refs:page

87 void buffer_check_dirty_writeback(struct page *page,  in buffer_check_dirty_writeback()  argument
94 BUG_ON(!PageLocked(page)); in buffer_check_dirty_writeback()
96 if (!page_has_buffers(page)) in buffer_check_dirty_writeback()
99 if (PageWriteback(page)) in buffer_check_dirty_writeback()
102 head = page_buffers(page); in buffer_check_dirty_writeback()
128 __clear_page_buffers(struct page *page) in __clear_page_buffers() argument
130 ClearPagePrivate(page); in __clear_page_buffers()
131 set_page_private(page, 0); in __clear_page_buffers()
132 page_cache_release(page); in __clear_page_buffers()
210 struct page *page; in __find_get_block_slow() local
214 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); in __find_get_block_slow()
215 if (!page) in __find_get_block_slow()
219 if (!page_has_buffers(page)) in __find_get_block_slow()
221 head = page_buffers(page); in __find_get_block_slow()
253 page_cache_release(page); in __find_get_block_slow()
288 struct page *page; in end_buffer_async_read() local
293 page = bh->b_page; in end_buffer_async_read()
299 SetPageError(page); in end_buffer_async_read()
307 first = page_buffers(page); in end_buffer_async_read()
329 if (page_uptodate && !PageError(page)) in end_buffer_async_read()
330 SetPageUptodate(page); in end_buffer_async_read()
331 unlock_page(page); in end_buffer_async_read()
349 struct page *page; in end_buffer_async_write() local
353 page = bh->b_page; in end_buffer_async_write()
358 set_bit(AS_EIO, &page->mapping->flags); in end_buffer_async_write()
361 SetPageError(page); in end_buffer_async_write()
364 first = page_buffers(page); in end_buffer_async_write()
380 end_page_writeback(page); in end_buffer_async_write()
633 static void __set_page_dirty(struct page *page, struct address_space *mapping, in __set_page_dirty() argument
639 if (page->mapping) { /* Race with truncate? */ in __set_page_dirty()
640 WARN_ON_ONCE(warn && !PageUptodate(page)); in __set_page_dirty()
641 account_page_dirtied(page, mapping, memcg); in __set_page_dirty()
643 page_index(page), PAGECACHE_TAG_DIRTY); in __set_page_dirty()
673 int __set_page_dirty_buffers(struct page *page) in __set_page_dirty_buffers() argument
677 struct address_space *mapping = page_mapping(page); in __set_page_dirty_buffers()
680 return !TestSetPageDirty(page); in __set_page_dirty_buffers()
683 if (page_has_buffers(page)) { in __set_page_dirty_buffers()
684 struct buffer_head *head = page_buffers(page); in __set_page_dirty_buffers()
696 memcg = mem_cgroup_begin_page_stat(page); in __set_page_dirty_buffers()
697 newly_dirty = !TestSetPageDirty(page); in __set_page_dirty_buffers()
701 __set_page_dirty(page, mapping, memcg, 1); in __set_page_dirty_buffers()
872 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, in alloc_page_buffers() argument
893 set_bh_page(bh, page, offset); in alloc_page_buffers()
929 link_dev_buffers(struct page *page, struct buffer_head *head) in link_dev_buffers() argument
939 attach_page_buffers(page, head); in link_dev_buffers()
958 init_page_buffers(struct page *page, struct block_device *bdev, in init_page_buffers() argument
961 struct buffer_head *head = page_buffers(page); in init_page_buffers()
963 int uptodate = PageUptodate(page); in init_page_buffers()
996 struct page *page; in grow_dev_page() local
1012 page = find_or_create_page(inode->i_mapping, index, gfp_mask); in grow_dev_page()
1013 if (!page) in grow_dev_page()
1016 BUG_ON(!PageLocked(page)); in grow_dev_page()
1018 if (page_has_buffers(page)) { in grow_dev_page()
1019 bh = page_buffers(page); in grow_dev_page()
1021 end_block = init_page_buffers(page, bdev, in grow_dev_page()
1026 if (!try_to_free_buffers(page)) in grow_dev_page()
1033 bh = alloc_page_buffers(page, size, 0); in grow_dev_page()
1043 link_dev_buffers(page, bh); in grow_dev_page()
1044 end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits, in grow_dev_page()
1050 unlock_page(page); in grow_dev_page()
1051 page_cache_release(page); in grow_dev_page()
1177 struct page *page = bh->b_page; in mark_buffer_dirty() local
1181 memcg = mem_cgroup_begin_page_stat(page); in mark_buffer_dirty()
1182 if (!TestSetPageDirty(page)) { in mark_buffer_dirty()
1183 mapping = page_mapping(page); in mark_buffer_dirty()
1185 __set_page_dirty(page, mapping, memcg, 0); in mark_buffer_dirty()
1476 struct page *page, unsigned long offset) in set_bh_page() argument
1478 bh->b_page = page; in set_bh_page()
1480 if (PageHighMem(page)) in set_bh_page()
1486 bh->b_data = page_address(page) + offset; in set_bh_page()
1533 void block_invalidatepage(struct page *page, unsigned int offset, in block_invalidatepage() argument
1540 BUG_ON(!PageLocked(page)); in block_invalidatepage()
1541 if (!page_has_buffers(page)) in block_invalidatepage()
1549 head = page_buffers(page); in block_invalidatepage()
1576 try_to_release_page(page, 0); in block_invalidatepage()
1588 void create_empty_buffers(struct page *page, in create_empty_buffers() argument
1593 head = alloc_page_buffers(page, blocksize, 1); in create_empty_buffers()
1602 spin_lock(&page->mapping->private_lock); in create_empty_buffers()
1603 if (PageUptodate(page) || PageDirty(page)) { in create_empty_buffers()
1606 if (PageDirty(page)) in create_empty_buffers()
1608 if (PageUptodate(page)) in create_empty_buffers()
1613 attach_page_buffers(page, head); in create_empty_buffers()
1614 spin_unlock(&page->mapping->private_lock); in create_empty_buffers()
1663 static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int… in create_page_buffers() argument
1665 BUG_ON(!PageLocked(page)); in create_page_buffers()
1667 if (!page_has_buffers(page)) in create_page_buffers()
1668 create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state); in create_page_buffers()
1669 return page_buffers(page); in create_page_buffers()
1701 static int __block_write_full_page(struct inode *inode, struct page *page, in __block_write_full_page() argument
1713 head = create_page_buffers(page, inode, in __block_write_full_page()
1730 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); in __block_write_full_page()
1780 redirty_page_for_writepage(wbc, page); in __block_write_full_page()
1794 BUG_ON(PageWriteback(page)); in __block_write_full_page()
1795 set_page_writeback(page); in __block_write_full_page()
1805 unlock_page(page); in __block_write_full_page()
1815 end_page_writeback(page); in __block_write_full_page()
1846 SetPageError(page); in __block_write_full_page()
1847 BUG_ON(PageWriteback(page)); in __block_write_full_page()
1848 mapping_set_error(page->mapping, err); in __block_write_full_page()
1849 set_page_writeback(page); in __block_write_full_page()
1859 unlock_page(page); in __block_write_full_page()
1868 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) in page_zero_new_buffers() argument
1873 BUG_ON(!PageLocked(page)); in page_zero_new_buffers()
1874 if (!page_has_buffers(page)) in page_zero_new_buffers()
1877 bh = head = page_buffers(page); in page_zero_new_buffers()
1884 if (!PageUptodate(page)) { in page_zero_new_buffers()
1890 zero_user(page, start, size); in page_zero_new_buffers()
1905 int __block_write_begin(struct page *page, loff_t pos, unsigned len, in __block_write_begin() argument
1910 struct inode *inode = page->mapping->host; in __block_write_begin()
1917 BUG_ON(!PageLocked(page)); in __block_write_begin()
1922 head = create_page_buffers(page, inode, 0); in __block_write_begin()
1926 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); in __block_write_begin()
1932 if (PageUptodate(page)) { in __block_write_begin()
1948 if (PageUptodate(page)) { in __block_write_begin()
1955 zero_user_segments(page, in __block_write_begin()
1961 if (PageUptodate(page)) { in __block_write_begin()
1982 page_zero_new_buffers(page, from, to); in __block_write_begin()
1987 static int __block_commit_write(struct inode *inode, struct page *page, in __block_commit_write() argument
1995 bh = head = page_buffers(page); in __block_commit_write()
2021 SetPageUptodate(page); in __block_commit_write()
2032 unsigned flags, struct page **pagep, get_block_t *get_block) in block_write_begin()
2035 struct page *page; in block_write_begin() local
2038 page = grab_cache_page_write_begin(mapping, index, flags); in block_write_begin()
2039 if (!page) in block_write_begin()
2042 status = __block_write_begin(page, pos, len, get_block); in block_write_begin()
2044 unlock_page(page); in block_write_begin()
2045 page_cache_release(page); in block_write_begin()
2046 page = NULL; in block_write_begin()
2049 *pagep = page; in block_write_begin()
2056 struct page *page, void *fsdata) in block_write_end() argument
2076 if (!PageUptodate(page)) in block_write_end()
2079 page_zero_new_buffers(page, start+copied, start+len); in block_write_end()
2081 flush_dcache_page(page); in block_write_end()
2084 __block_commit_write(inode, page, start, start+copied); in block_write_end()
2092 struct page *page, void *fsdata) in generic_write_end() argument
2098 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); in generic_write_end()
2112 unlock_page(page); in generic_write_end()
2113 page_cache_release(page); in generic_write_end()
2137 int block_is_partially_uptodate(struct page *page, unsigned long from, in block_is_partially_uptodate() argument
2145 if (!page_has_buffers(page)) in block_is_partially_uptodate()
2148 head = page_buffers(page); in block_is_partially_uptodate()
2182 int block_read_full_page(struct page *page, get_block_t *get_block) in block_read_full_page() argument
2184 struct inode *inode = page->mapping->host; in block_read_full_page()
2191 head = create_page_buffers(page, inode, 0); in block_read_full_page()
2195 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); in block_read_full_page()
2213 SetPageError(page); in block_read_full_page()
2216 zero_user(page, i * blocksize, blocksize); in block_read_full_page()
2232 SetPageMappedToDisk(page); in block_read_full_page()
2239 if (!PageError(page)) in block_read_full_page()
2240 SetPageUptodate(page); in block_read_full_page()
2241 unlock_page(page); in block_read_full_page()
2275 struct page *page; in generic_cont_expand_simple() local
2285 &page, &fsdata); in generic_cont_expand_simple()
2289 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); in generic_cont_expand_simple()
2302 struct page *page; in cont_expand_zero() local
2322 &page, &fsdata); in cont_expand_zero()
2325 zero_user(page, zerofrom, len); in cont_expand_zero()
2327 page, fsdata); in cont_expand_zero()
2356 &page, &fsdata); in cont_expand_zero()
2359 zero_user(page, zerofrom, len); in cont_expand_zero()
2361 page, fsdata); in cont_expand_zero()
2377 struct page **pagep, void **fsdata, in cont_write_begin()
2399 int block_commit_write(struct page *page, unsigned from, unsigned to) in block_commit_write() argument
2401 struct inode *inode = page->mapping->host; in block_commit_write()
2402 __block_commit_write(inode,page,from,to); in block_commit_write()
2428 struct page *page = vmf->page; in block_page_mkwrite() local
2434 lock_page(page); in block_page_mkwrite()
2436 if ((page->mapping != inode->i_mapping) || in block_page_mkwrite()
2437 (page_offset(page) > size)) { in block_page_mkwrite()
2444 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) in block_page_mkwrite()
2449 ret = __block_write_begin(page, 0, end, get_block); in block_page_mkwrite()
2451 ret = block_commit_write(page, 0, end); in block_page_mkwrite()
2455 set_page_dirty(page); in block_page_mkwrite()
2456 wait_for_stable_page(page); in block_page_mkwrite()
2459 unlock_page(page); in block_page_mkwrite()
2479 static void attach_nobh_buffers(struct page *page, struct buffer_head *head) in attach_nobh_buffers() argument
2483 BUG_ON(!PageLocked(page)); in attach_nobh_buffers()
2485 spin_lock(&page->mapping->private_lock); in attach_nobh_buffers()
2488 if (PageDirty(page)) in attach_nobh_buffers()
2494 attach_page_buffers(page, head); in attach_nobh_buffers()
2495 spin_unlock(&page->mapping->private_lock); in attach_nobh_buffers()
2505 struct page **pagep, void **fsdata, in nobh_write_begin()
2512 struct page *page; in nobh_write_begin() local
2526 page = grab_cache_page_write_begin(mapping, index, flags); in nobh_write_begin()
2527 if (!page) in nobh_write_begin()
2529 *pagep = page; in nobh_write_begin()
2532 if (page_has_buffers(page)) { in nobh_write_begin()
2533 ret = __block_write_begin(page, pos, len, get_block); in nobh_write_begin()
2539 if (PageMappedToDisk(page)) in nobh_write_begin()
2551 head = alloc_page_buffers(page, blocksize, 0); in nobh_write_begin()
2557 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); in nobh_write_begin()
2582 if (PageUptodate(page)) { in nobh_write_begin()
2587 zero_user_segments(page, block_start, from, in nobh_write_begin()
2617 SetPageMappedToDisk(page); in nobh_write_begin()
2632 attach_nobh_buffers(page, head); in nobh_write_begin()
2633 page_zero_new_buffers(page, from, to); in nobh_write_begin()
2636 unlock_page(page); in nobh_write_begin()
2637 page_cache_release(page); in nobh_write_begin()
2646 struct page *page, void *fsdata) in nobh_write_end() argument
2648 struct inode *inode = page->mapping->host; in nobh_write_end()
2651 BUG_ON(fsdata != NULL && page_has_buffers(page)); in nobh_write_end()
2654 attach_nobh_buffers(page, head); in nobh_write_end()
2655 if (page_has_buffers(page)) in nobh_write_end()
2657 copied, page, fsdata); in nobh_write_end()
2659 SetPageUptodate(page); in nobh_write_end()
2660 set_page_dirty(page); in nobh_write_end()
2666 unlock_page(page); in nobh_write_end()
2667 page_cache_release(page); in nobh_write_end()
2684 int nobh_writepage(struct page *page, get_block_t *get_block, in nobh_writepage() argument
2687 struct inode * const inode = page->mapping->host; in nobh_writepage()
2694 if (page->index < end_index) in nobh_writepage()
2699 if (page->index >= end_index+1 || !offset) { in nobh_writepage()
2707 if (page->mapping->a_ops->invalidatepage) in nobh_writepage()
2708 page->mapping->a_ops->invalidatepage(page, offset); in nobh_writepage()
2710 unlock_page(page); in nobh_writepage()
2721 zero_user_segment(page, offset, PAGE_CACHE_SIZE); in nobh_writepage()
2723 ret = mpage_writepage(page, get_block, wbc); in nobh_writepage()
2725 ret = __block_write_full_page(inode, page, get_block, wbc, in nobh_writepage()
2740 struct page *page; in nobh_truncate_page() local
2754 page = grab_cache_page(mapping, index); in nobh_truncate_page()
2756 if (!page) in nobh_truncate_page()
2759 if (page_has_buffers(page)) { in nobh_truncate_page()
2761 unlock_page(page); in nobh_truncate_page()
2762 page_cache_release(page); in nobh_truncate_page()
2783 if (!PageUptodate(page)) { in nobh_truncate_page()
2784 err = mapping->a_ops->readpage(NULL, page); in nobh_truncate_page()
2786 page_cache_release(page); in nobh_truncate_page()
2789 lock_page(page); in nobh_truncate_page()
2790 if (!PageUptodate(page)) { in nobh_truncate_page()
2794 if (page_has_buffers(page)) in nobh_truncate_page()
2797 zero_user(page, offset, length); in nobh_truncate_page()
2798 set_page_dirty(page); in nobh_truncate_page()
2802 unlock_page(page); in nobh_truncate_page()
2803 page_cache_release(page); in nobh_truncate_page()
2818 struct page *page; in block_truncate_page() local
2832 page = grab_cache_page(mapping, index); in block_truncate_page()
2834 if (!page) in block_truncate_page()
2837 if (!page_has_buffers(page)) in block_truncate_page()
2838 create_empty_buffers(page, blocksize, 0); in block_truncate_page()
2841 bh = page_buffers(page); in block_truncate_page()
2861 if (PageUptodate(page)) in block_truncate_page()
2873 zero_user(page, offset, length); in block_truncate_page()
2878 unlock_page(page); in block_truncate_page()
2879 page_cache_release(page); in block_truncate_page()
2888 int block_write_full_page(struct page *page, get_block_t *get_block, in block_write_full_page() argument
2891 struct inode * const inode = page->mapping->host; in block_write_full_page()
2897 if (page->index < end_index) in block_write_full_page()
2898 return __block_write_full_page(inode, page, get_block, wbc, in block_write_full_page()
2903 if (page->index >= end_index+1 || !offset) { in block_write_full_page()
2909 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); in block_write_full_page()
2910 unlock_page(page); in block_write_full_page()
2921 zero_user_segment(page, offset, PAGE_CACHE_SIZE); in block_write_full_page()
2922 return __block_write_full_page(inode, page, get_block, wbc, in block_write_full_page()
3186 drop_buffers(struct page *page, struct buffer_head **buffers_to_free) in drop_buffers() argument
3188 struct buffer_head *head = page_buffers(page); in drop_buffers()
3193 if (buffer_write_io_error(bh) && page->mapping) in drop_buffers()
3194 set_bit(AS_EIO, &page->mapping->flags); in drop_buffers()
3208 __clear_page_buffers(page); in drop_buffers()
3214 int try_to_free_buffers(struct page *page) in try_to_free_buffers() argument
3216 struct address_space * const mapping = page->mapping; in try_to_free_buffers()
3220 BUG_ON(!PageLocked(page)); in try_to_free_buffers()
3221 if (PageWriteback(page)) in try_to_free_buffers()
3225 ret = drop_buffers(page, &buffers_to_free); in try_to_free_buffers()
3230 ret = drop_buffers(page, &buffers_to_free); in try_to_free_buffers()
3247 cancel_dirty_page(page); in try_to_free_buffers()