Lines Matching refs:page

1458 	struct page *page;  in extent_range_clear_dirty_for_io()  local
1461 page = find_get_page(inode->i_mapping, index); in extent_range_clear_dirty_for_io()
1462 BUG_ON(!page); /* Pages should be in the extent_io_tree */ in extent_range_clear_dirty_for_io()
1463 clear_page_dirty_for_io(page); in extent_range_clear_dirty_for_io()
1464 page_cache_release(page); in extent_range_clear_dirty_for_io()
1474 struct page *page; in extent_range_redirty_for_io() local
1477 page = find_get_page(inode->i_mapping, index); in extent_range_redirty_for_io()
1478 BUG_ON(!page); /* Pages should be in the extent_io_tree */ in extent_range_redirty_for_io()
1479 __set_page_dirty_nobuffers(page); in extent_range_redirty_for_io()
1480 account_page_redirty(page); in extent_range_redirty_for_io()
1481 page_cache_release(page); in extent_range_redirty_for_io()
1494 struct page *page; in set_range_writeback() local
1497 page = find_get_page(tree->mapping, index); in set_range_writeback()
1498 BUG_ON(!page); /* Pages should be in the extent_io_tree */ in set_range_writeback()
1499 set_page_writeback(page); in set_range_writeback()
1500 page_cache_release(page); in set_range_writeback()
1647 struct page *locked_page, in __unlock_for_delalloc()
1651 struct page *pages[16]; in __unlock_for_delalloc()
1676 struct page *locked_page, in lock_delalloc_pages()
1684 struct page *pages[16]; in lock_delalloc_pages()
1745 struct page *locked_page, u64 *start, in find_lock_delalloc_range()
1824 struct page *locked_page, in extent_clear_unlock_delalloc()
1830 struct page *pages[16]; in extent_clear_unlock_delalloc()
2053 static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) in check_page_uptodate() argument
2055 u64 start = page_offset(page); in check_page_uptodate()
2058 SetPageUptodate(page); in check_page_uptodate()
2095 struct page *page, unsigned int pg_offset, int mirror_num) in repair_io_failure() argument
2135 bio_add_page(bio, page, length, pg_offset); in repair_io_failure()
2163 struct page *p = eb->pages[i]; in repair_eb_io_failure()
2180 int clean_io_failure(struct inode *inode, u64 start, struct page *page, in clean_io_failure() argument
2226 failrec->logical, page, in clean_io_failure()
2423 struct page *page, int pg_offset, int icsum, in btrfs_create_repair_bio() argument
2452 bio_add_page(bio, page, failrec->len, pg_offset); in btrfs_create_repair_bio()
2466 struct page *page, u64 start, u64 end, in bio_readpage_error() argument
2470 struct inode *inode = page->mapping->host; in bio_readpage_error()
2494 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, in bio_readpage_error()
2495 start - page_offset(page), in bio_readpage_error()
2519 int end_extent_writepage(struct page *page, int err, u64 start, u64 end) in end_extent_writepage() argument
2525 tree = &BTRFS_I(page->mapping->host)->io_tree; in end_extent_writepage()
2528 ret = tree->ops->writepage_end_io_hook(page, start, in end_extent_writepage()
2535 ClearPageUptodate(page); in end_extent_writepage()
2536 SetPageError(page); in end_extent_writepage()
2538 mapping_set_error(page->mapping, ret); in end_extent_writepage()
2560 struct page *page = bvec->bv_page; in end_bio_extent_writepage() local
2569 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, in end_bio_extent_writepage()
2573 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info, in end_bio_extent_writepage()
2579 start = page_offset(page); in end_bio_extent_writepage()
2582 if (end_extent_writepage(page, bio->bi_error, start, end)) in end_bio_extent_writepage()
2585 end_page_writeback(page); in end_bio_extent_writepage()
2631 struct page *page = bvec->bv_page; in end_bio_extent_readpage() local
2632 struct inode *inode = page->mapping->host; in end_bio_extent_readpage()
2646 btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, in end_bio_extent_readpage()
2650 btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info, in end_bio_extent_readpage()
2656 start = page_offset(page); in end_bio_extent_readpage()
2664 page, start, end, in end_bio_extent_readpage()
2669 clean_io_failure(inode, start, page, 0); in end_bio_extent_readpage()
2676 ret = tree->ops->readpage_io_failed_hook(page, mirror); in end_bio_extent_readpage()
2690 ret = bio_readpage_error(bio, offset, page, start, end, in end_bio_extent_readpage()
2706 if (page->index == end_index && off) in end_bio_extent_readpage()
2707 zero_user_segment(page, off, PAGE_CACHE_SIZE); in end_bio_extent_readpage()
2708 SetPageUptodate(page); in end_bio_extent_readpage()
2710 ClearPageUptodate(page); in end_bio_extent_readpage()
2711 SetPageError(page); in end_bio_extent_readpage()
2713 unlock_page(page); in end_bio_extent_readpage()
2821 struct page *page = bvec->bv_page; in submit_one_bio() local
2825 start = page_offset(page) + bvec->bv_offset; in submit_one_bio()
2832 ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, in submit_one_bio()
2841 static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page, in merge_bio() argument
2847 ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio, in merge_bio()
2856 struct page *page, sector_t sector, in submit_extent_page() argument
2882 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || in submit_extent_page()
2883 bio_add_page(bio, page, page_size, offset) < page_size) { in submit_extent_page()
2893 wbc_account_io(wbc, page, page_size); in submit_extent_page()
2903 bio_add_page(bio, page, page_size, offset); in submit_extent_page()
2908 wbc_account_io(wbc, page, page_size); in submit_extent_page()
2920 struct page *page) in attach_extent_buffer_page() argument
2922 if (!PagePrivate(page)) { in attach_extent_buffer_page()
2923 SetPagePrivate(page); in attach_extent_buffer_page()
2924 page_cache_get(page); in attach_extent_buffer_page()
2925 set_page_private(page, (unsigned long)eb); in attach_extent_buffer_page()
2927 WARN_ON(page->private != (unsigned long)eb); in attach_extent_buffer_page()
2931 void set_page_extent_mapped(struct page *page) in set_page_extent_mapped() argument
2933 if (!PagePrivate(page)) { in set_page_extent_mapped()
2934 SetPagePrivate(page); in set_page_extent_mapped()
2935 page_cache_get(page); in set_page_extent_mapped()
2936 set_page_private(page, EXTENT_PAGE_PRIVATE); in set_page_extent_mapped()
2941 __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, in __get_extent_map() argument
2959 em = get_extent(inode, page, pg_offset, start, len, 0); in __get_extent_map()
2974 struct page *page, in __do_readpage() argument
2981 struct inode *inode = page->mapping->host; in __do_readpage()
2982 u64 start = page_offset(page); in __do_readpage()
3002 set_page_extent_mapped(page); in __do_readpage()
3005 if (!PageUptodate(page)) { in __do_readpage()
3006 if (cleancache_get_page(page) == 0) { in __do_readpage()
3013 if (page->index == last_byte >> PAGE_CACHE_SHIFT) { in __do_readpage()
3019 userpage = kmap_atomic(page); in __do_readpage()
3021 flush_dcache_page(page); in __do_readpage()
3034 userpage = kmap_atomic(page); in __do_readpage()
3036 flush_dcache_page(page); in __do_readpage()
3046 em = __get_extent_map(inode, page, pg_offset, cur, in __do_readpage()
3049 SetPageError(page); in __do_readpage()
3129 userpage = kmap_atomic(page); in __do_readpage()
3131 flush_dcache_page(page); in __do_readpage()
3149 check_page_uptodate(tree, page); in __do_readpage()
3160 SetPageError(page); in __do_readpage()
3168 pnr -= page->index; in __do_readpage()
3169 ret = submit_extent_page(rw, tree, NULL, page, in __do_readpage()
3180 SetPageError(page); in __do_readpage()
3189 if (!PageError(page)) in __do_readpage()
3190 SetPageUptodate(page); in __do_readpage()
3191 unlock_page(page); in __do_readpage()
3197 struct page *pages[], int nr_pages, in __do_contiguous_readpages()
3229 struct page *pages[], in __extent_readpages()
3271 struct page *page, in __extent_read_full_page() argument
3276 struct inode *inode = page->mapping->host; in __extent_read_full_page()
3278 u64 start = page_offset(page); in __extent_read_full_page()
3292 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, in __extent_read_full_page()
3297 int extent_read_full_page(struct extent_io_tree *tree, struct page *page, in extent_read_full_page() argument
3304 ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num, in extent_read_full_page()
3311 int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page, in extent_read_full_page_nolock() argument
3318 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num, in extent_read_full_page_nolock()
3325 static noinline void update_nr_written(struct page *page, in update_nr_written() argument
3332 page->mapping->writeback_index = page->index + nr_written; in update_nr_written()
3346 struct page *page, struct writeback_control *wbc, in writepage_delalloc() argument
3364 page, in writepage_delalloc()
3372 ret = tree->ops->fill_delalloc(inode, page, in writepage_delalloc()
3379 SetPageError(page); in writepage_delalloc()
3435 struct page *page, in __extent_writepage_io() argument
3443 u64 start = page_offset(page); in __extent_writepage_io()
3461 ret = tree->ops->writepage_start_hook(page, start, in __extent_writepage_io()
3468 redirty_page_for_writepage(wbc, page); in __extent_writepage_io()
3470 update_nr_written(page, wbc, nr_written); in __extent_writepage_io()
3471 unlock_page(page); in __extent_writepage_io()
3481 update_nr_written(page, wbc, nr_written + 1); in __extent_writepage_io()
3486 tree->ops->writepage_end_io_hook(page, start, in __extent_writepage_io()
3497 tree->ops->writepage_end_io_hook(page, cur, in __extent_writepage_io()
3501 em = epd->get_extent(inode, page, pg_offset, cur, in __extent_writepage_io()
3504 SetPageError(page); in __extent_writepage_io()
3534 tree->ops->writepage_end_io_hook(page, cur, in __extent_writepage_io()
3551 ret = tree->ops->writepage_io_hook(page, cur, in __extent_writepage_io()
3557 SetPageError(page); in __extent_writepage_io()
3562 if (!PageWriteback(page)) { in __extent_writepage_io()
3565 page->index, cur, end); in __extent_writepage_io()
3568 ret = submit_extent_page(write_flags, tree, wbc, page, in __extent_writepage_io()
3574 SetPageError(page); in __extent_writepage_io()
3596 static int __extent_writepage(struct page *page, struct writeback_control *wbc, in __extent_writepage() argument
3599 struct inode *inode = page->mapping->host; in __extent_writepage()
3601 u64 start = page_offset(page); in __extent_writepage()
3616 trace___extent_writepage(page, inode, wbc); in __extent_writepage()
3618 WARN_ON(!PageLocked(page)); in __extent_writepage()
3620 ClearPageError(page); in __extent_writepage()
3623 if (page->index > end_index || in __extent_writepage()
3624 (page->index == end_index && !pg_offset)) { in __extent_writepage()
3625 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); in __extent_writepage()
3626 unlock_page(page); in __extent_writepage()
3630 if (page->index == end_index) { in __extent_writepage()
3633 userpage = kmap_atomic(page); in __extent_writepage()
3637 flush_dcache_page(page); in __extent_writepage()
3642 set_page_extent_mapped(page); in __extent_writepage()
3644 ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written); in __extent_writepage()
3650 ret = __extent_writepage_io(inode, page, wbc, epd, in __extent_writepage()
3658 set_page_writeback(page); in __extent_writepage()
3659 end_page_writeback(page); in __extent_writepage()
3661 if (PageError(page)) { in __extent_writepage()
3663 end_extent_writepage(page, ret, start, page_end); in __extent_writepage()
3665 unlock_page(page); in __extent_writepage()
3735 struct page *p = eb->pages[i]; in lock_extent_buffer_for_io()
3756 static void set_btree_ioerr(struct page *page) in set_btree_ioerr() argument
3758 struct extent_buffer *eb = (struct extent_buffer *)page->private; in set_btree_ioerr()
3761 SetPageError(page); in set_btree_ioerr()
3825 struct page *page = bvec->bv_page; in end_bio_extent_buffer_writepage() local
3827 eb = (struct extent_buffer *)page->private; in end_bio_extent_buffer_writepage()
3833 ClearPageUptodate(page); in end_bio_extent_buffer_writepage()
3834 set_btree_ioerr(page); in end_bio_extent_buffer_writepage()
3837 end_page_writeback(page); in end_bio_extent_buffer_writepage()
3868 struct page *p = eb->pages[i]; in write_one_eb()
3892 struct page *p = eb->pages[i]; in write_one_eb()
3947 struct page *page = pvec.pages[i]; in btree_write_cache_pages() local
3949 if (!PagePrivate(page)) in btree_write_cache_pages()
3952 if (!wbc->range_cyclic && page->index > end) { in btree_write_cache_pages()
3958 if (!PagePrivate(page)) { in btree_write_cache_pages()
3963 eb = (struct extent_buffer *)page->private; in btree_write_cache_pages()
4091 struct page *page = pvec.pages[i]; in extent_write_cache_pages() local
4100 if (!trylock_page(page)) { in extent_write_cache_pages()
4102 lock_page(page); in extent_write_cache_pages()
4105 if (unlikely(page->mapping != mapping)) { in extent_write_cache_pages()
4106 unlock_page(page); in extent_write_cache_pages()
4110 if (!wbc->range_cyclic && page->index > end) { in extent_write_cache_pages()
4112 unlock_page(page); in extent_write_cache_pages()
4117 if (PageWriteback(page)) in extent_write_cache_pages()
4119 wait_on_page_writeback(page); in extent_write_cache_pages()
4122 if (PageWriteback(page) || in extent_write_cache_pages()
4123 !clear_page_dirty_for_io(page)) { in extent_write_cache_pages()
4124 unlock_page(page); in extent_write_cache_pages()
4128 ret = (*writepage)(page, wbc, data); in extent_write_cache_pages()
4131 unlock_page(page); in extent_write_cache_pages()
4181 int extent_write_full_page(struct extent_io_tree *tree, struct page *page, in extent_write_full_page() argument
4195 ret = __extent_writepage(page, wbc, &epd); in extent_write_full_page()
4207 struct page *page; in extent_write_locked_range() local
4227 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); in extent_write_locked_range()
4228 if (clear_page_dirty_for_io(page)) in extent_write_locked_range()
4229 ret = __extent_writepage(page, &wbc_writepages, &epd); in extent_write_locked_range()
4232 tree->ops->writepage_end_io_hook(page, start, in extent_write_locked_range()
4235 unlock_page(page); in extent_write_locked_range()
4237 page_cache_release(page); in extent_write_locked_range()
4275 struct page *pagepool[16]; in extent_readpages()
4276 struct page *page; in extent_readpages() local
4282 page = list_entry(pages->prev, struct page, lru); in extent_readpages()
4284 prefetchw(&page->flags); in extent_readpages()
4285 list_del(&page->lru); in extent_readpages()
4286 if (add_to_page_cache_lru(page, mapping, in extent_readpages()
4287 page->index, GFP_NOFS)) { in extent_readpages()
4288 page_cache_release(page); in extent_readpages()
4292 pagepool[nr++] = page; in extent_readpages()
4318 struct page *page, unsigned long offset) in extent_invalidatepage() argument
4321 u64 start = page_offset(page); in extent_invalidatepage()
4323 size_t blocksize = page->mapping->host->i_sb->s_blocksize; in extent_invalidatepage()
4330 wait_on_page_writeback(page); in extent_invalidatepage()
4345 struct page *page, gfp_t mask) in try_release_extent_state() argument
4347 u64 start = page_offset(page); in try_release_extent_state()
4382 struct extent_io_tree *tree, struct page *page, in try_release_extent_mapping() argument
4386 u64 start = page_offset(page); in try_release_extent_mapping()
4390 page->mapping->host->i_size > 16 * 1024 * 1024) { in try_release_extent_mapping()
4421 return try_release_extent_state(map, tree, page, mask); in try_release_extent_mapping()
4677 struct page *page; in btrfs_release_extent_buffer_page() local
4688 page = eb->pages[index]; in btrfs_release_extent_buffer_page()
4689 if (!page) in btrfs_release_extent_buffer_page()
4692 spin_lock(&page->mapping->private_lock); in btrfs_release_extent_buffer_page()
4700 if (PagePrivate(page) && in btrfs_release_extent_buffer_page()
4701 page->private == (unsigned long)eb) { in btrfs_release_extent_buffer_page()
4703 BUG_ON(PageDirty(page)); in btrfs_release_extent_buffer_page()
4704 BUG_ON(PageWriteback(page)); in btrfs_release_extent_buffer_page()
4709 ClearPagePrivate(page); in btrfs_release_extent_buffer_page()
4710 set_page_private(page, 0); in btrfs_release_extent_buffer_page()
4712 page_cache_release(page); in btrfs_release_extent_buffer_page()
4716 spin_unlock(&page->mapping->private_lock); in btrfs_release_extent_buffer_page()
4719 page_cache_release(page); in btrfs_release_extent_buffer_page()
4773 struct page *p; in btrfs_clone_extent_buffer()
4874 struct page *accessed) in mark_extent_buffer_accessed()
4882 struct page *p = eb->pages[i]; in mark_extent_buffer_accessed()
4982 struct page *p; in alloc_extent_buffer()
5185 struct page *page; in clear_extent_buffer_dirty() local
5190 page = eb->pages[i]; in clear_extent_buffer_dirty()
5191 if (!PageDirty(page)) in clear_extent_buffer_dirty()
5194 lock_page(page); in clear_extent_buffer_dirty()
5195 WARN_ON(!PagePrivate(page)); in clear_extent_buffer_dirty()
5197 clear_page_dirty_for_io(page); in clear_extent_buffer_dirty()
5198 spin_lock_irq(&page->mapping->tree_lock); in clear_extent_buffer_dirty()
5199 if (!PageDirty(page)) { in clear_extent_buffer_dirty()
5200 radix_tree_tag_clear(&page->mapping->page_tree, in clear_extent_buffer_dirty()
5201 page_index(page), in clear_extent_buffer_dirty()
5204 spin_unlock_irq(&page->mapping->tree_lock); in clear_extent_buffer_dirty()
5205 ClearPageError(page); in clear_extent_buffer_dirty()
5206 unlock_page(page); in clear_extent_buffer_dirty()
5233 struct page *page; in clear_extent_buffer_uptodate() local
5239 page = eb->pages[i]; in clear_extent_buffer_uptodate()
5240 if (page) in clear_extent_buffer_uptodate()
5241 ClearPageUptodate(page); in clear_extent_buffer_uptodate()
5249 struct page *page; in set_extent_buffer_uptodate() local
5255 page = eb->pages[i]; in set_extent_buffer_uptodate()
5256 SetPageUptodate(page); in set_extent_buffer_uptodate()
5272 struct page *page; in read_extent_buffer_pages() local
5295 page = eb->pages[i]; in read_extent_buffer_pages()
5297 if (!trylock_page(page)) in read_extent_buffer_pages()
5300 lock_page(page); in read_extent_buffer_pages()
5303 if (!PageUptodate(page)) { in read_extent_buffer_pages()
5318 page = eb->pages[i]; in read_extent_buffer_pages()
5319 if (!PageUptodate(page)) { in read_extent_buffer_pages()
5320 ClearPageError(page); in read_extent_buffer_pages()
5321 err = __extent_read_full_page(tree, page, in read_extent_buffer_pages()
5328 unlock_page(page); in read_extent_buffer_pages()
5343 page = eb->pages[i]; in read_extent_buffer_pages()
5344 wait_on_page_locked(page); in read_extent_buffer_pages()
5345 if (!PageUptodate(page)) in read_extent_buffer_pages()
5354 page = eb->pages[i]; in read_extent_buffer_pages()
5356 unlock_page(page); in read_extent_buffer_pages()
5368 struct page *page; in read_extent_buffer() local
5380 page = eb->pages[i]; in read_extent_buffer()
5383 kaddr = page_address(page); in read_extent_buffer()
5399 struct page *page; in read_extent_buffer_to_user() local
5412 page = eb->pages[i]; in read_extent_buffer_to_user()
5415 kaddr = page_address(page); in read_extent_buffer_to_user()
5437 struct page *p; in map_private_extent_buffer()
5474 struct page *page; in memcmp_extent_buffer() local
5487 page = eb->pages[i]; in memcmp_extent_buffer()
5491 kaddr = page_address(page); in memcmp_extent_buffer()
5509 struct page *page; in write_extent_buffer() local
5521 page = eb->pages[i]; in write_extent_buffer()
5522 WARN_ON(!PageUptodate(page)); in write_extent_buffer()
5525 kaddr = page_address(page); in write_extent_buffer()
5540 struct page *page; in memset_extent_buffer() local
5551 page = eb->pages[i]; in memset_extent_buffer()
5552 WARN_ON(!PageUptodate(page)); in memset_extent_buffer()
5555 kaddr = page_address(page); in memset_extent_buffer()
5571 struct page *page; in copy_extent_buffer() local
5582 page = dst->pages[i]; in copy_extent_buffer()
5583 WARN_ON(!PageUptodate(page)); in copy_extent_buffer()
5587 kaddr = page_address(page); in copy_extent_buffer()
5603 static void copy_pages(struct page *dst_page, struct page *src_page, in copy_pages()
5718 int try_release_extent_buffer(struct page *page) in try_release_extent_buffer() argument
5726 spin_lock(&page->mapping->private_lock); in try_release_extent_buffer()
5727 if (!PagePrivate(page)) { in try_release_extent_buffer()
5728 spin_unlock(&page->mapping->private_lock); in try_release_extent_buffer()
5732 eb = (struct extent_buffer *)page->private; in try_release_extent_buffer()
5743 spin_unlock(&page->mapping->private_lock); in try_release_extent_buffer()
5746 spin_unlock(&page->mapping->private_lock); in try_release_extent_buffer()