Lines Matching refs:page

103 static int do_readpage(struct page *page)  in do_readpage()  argument
109 struct inode *inode = page->mapping->host; in do_readpage()
113 inode->i_ino, page->index, i_size, page->flags); in do_readpage()
114 ubifs_assert(!PageChecked(page)); in do_readpage()
115 ubifs_assert(!PagePrivate(page)); in do_readpage()
117 addr = kmap(page); in do_readpage()
119 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; in do_readpage()
123 SetPageChecked(page); in do_readpage()
165 SetPageChecked(page); in do_readpage()
170 page->index, inode->i_ino, err); in do_readpage()
177 SetPageUptodate(page); in do_readpage()
178 ClearPageError(page); in do_readpage()
179 flush_dcache_page(page); in do_readpage()
180 kunmap(page); in do_readpage()
185 ClearPageUptodate(page); in do_readpage()
186 SetPageError(page); in do_readpage()
187 flush_dcache_page(page); in do_readpage()
188 kunmap(page); in do_readpage()
221 loff_t pos, unsigned len, struct page **pagep, in write_begin_slow()
229 struct page *page; in write_begin_slow() local
250 page = grab_cache_page_write_begin(mapping, index, flags); in write_begin_slow()
251 if (unlikely(!page)) { in write_begin_slow()
256 if (!PageUptodate(page)) { in write_begin_slow()
258 SetPageChecked(page); in write_begin_slow()
260 err = do_readpage(page); in write_begin_slow()
262 unlock_page(page); in write_begin_slow()
263 page_cache_release(page); in write_begin_slow()
269 SetPageUptodate(page); in write_begin_slow()
270 ClearPageError(page); in write_begin_slow()
273 if (PagePrivate(page)) in write_begin_slow()
285 else if (!PageChecked(page)) in write_begin_slow()
311 *pagep = page; in write_begin_slow()
328 static int allocate_budget(struct ubifs_info *c, struct page *page, in allocate_budget() argument
333 if (PagePrivate(page)) { in allocate_budget()
360 if (PageChecked(page)) in allocate_budget()
426 struct page **pagep, void **fsdata) in ubifs_write_begin()
434 struct page *page; in ubifs_write_begin() local
443 page = grab_cache_page_write_begin(mapping, index, flags); in ubifs_write_begin()
444 if (unlikely(!page)) in ubifs_write_begin()
447 if (!PageUptodate(page)) { in ubifs_write_begin()
459 SetPageChecked(page); in ubifs_write_begin()
462 err = do_readpage(page); in ubifs_write_begin()
464 unlock_page(page); in ubifs_write_begin()
465 page_cache_release(page); in ubifs_write_begin()
470 SetPageUptodate(page); in ubifs_write_begin()
471 ClearPageError(page); in ubifs_write_begin()
474 err = allocate_budget(c, page, ui, appending); in ubifs_write_begin()
482 ClearPageChecked(page); in ubifs_write_begin()
483 ClearPageUptodate(page); in ubifs_write_begin()
496 unlock_page(page); in ubifs_write_begin()
497 page_cache_release(page); in ubifs_write_begin()
508 *pagep = page; in ubifs_write_begin()
523 static void cancel_budget(struct ubifs_info *c, struct page *page, in cancel_budget() argument
531 if (!PagePrivate(page)) { in cancel_budget()
532 if (PageChecked(page)) in cancel_budget()
541 struct page *page, void *fsdata) in ubifs_write_end() argument
550 inode->i_ino, pos, page->index, len, copied, inode->i_size); in ubifs_write_end()
564 cancel_budget(c, page, ui, appending); in ubifs_write_end()
565 ClearPageChecked(page); in ubifs_write_end()
571 copied = do_readpage(page); in ubifs_write_end()
575 if (!PagePrivate(page)) { in ubifs_write_end()
576 SetPagePrivate(page); in ubifs_write_end()
578 __set_page_dirty_nobuffers(page); in ubifs_write_end()
595 unlock_page(page); in ubifs_write_end()
596 page_cache_release(page); in ubifs_write_end()
609 static int populate_page(struct ubifs_info *c, struct page *page, in populate_page() argument
613 struct inode *inode = page->mapping->host; in populate_page()
620 inode->i_ino, page->index, i_size, page->flags); in populate_page()
622 addr = zaddr = kmap(page); in populate_page()
625 if (!i_size || page->index > end_index) { in populate_page()
631 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; in populate_page()
675 if (end_index == page->index) { in populate_page()
684 SetPageChecked(page); in populate_page()
688 SetPageUptodate(page); in populate_page()
689 ClearPageError(page); in populate_page()
690 flush_dcache_page(page); in populate_page()
691 kunmap(page); in populate_page()
696 ClearPageUptodate(page); in populate_page()
697 SetPageError(page); in populate_page()
698 flush_dcache_page(page); in populate_page()
699 kunmap(page); in populate_page()
714 struct page *page1) in ubifs_do_bulk_read()
780 struct page *page; in ubifs_do_bulk_read() local
784 page = find_or_create_page(mapping, page_offset, in ubifs_do_bulk_read()
786 if (!page) in ubifs_do_bulk_read()
788 if (!PageUptodate(page)) in ubifs_do_bulk_read()
789 err = populate_page(c, page, bu, &n); in ubifs_do_bulk_read()
790 unlock_page(page); in ubifs_do_bulk_read()
791 page_cache_release(page); in ubifs_do_bulk_read()
821 static int ubifs_bulk_read(struct page *page) in ubifs_bulk_read() argument
823 struct inode *inode = page->mapping->host; in ubifs_bulk_read()
826 pgoff_t index = page->index, last_page_read = ui->last_page_read; in ubifs_bulk_read()
874 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); in ubifs_bulk_read()
875 err = ubifs_do_bulk_read(c, bu, page); in ubifs_bulk_read()
887 static int ubifs_readpage(struct file *file, struct page *page) in ubifs_readpage() argument
889 if (ubifs_bulk_read(page)) in ubifs_readpage()
891 do_readpage(page); in ubifs_readpage()
892 unlock_page(page); in ubifs_readpage()
896 static int do_writepage(struct page *page, int len) in do_writepage() argument
902 struct inode *inode = page->mapping->host; in do_writepage()
908 ubifs_assert(page->index <= ui->synced_i_size >> PAGE_CACHE_SHIFT); in do_writepage()
913 set_page_writeback(page); in do_writepage()
915 addr = kmap(page); in do_writepage()
916 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; in do_writepage()
931 SetPageError(page); in do_writepage()
933 page->index, inode->i_ino, err); in do_writepage()
937 ubifs_assert(PagePrivate(page)); in do_writepage()
938 if (PageChecked(page)) in do_writepage()
944 ClearPagePrivate(page); in do_writepage()
945 ClearPageChecked(page); in do_writepage()
947 kunmap(page); in do_writepage()
948 unlock_page(page); in do_writepage()
949 end_page_writeback(page); in do_writepage()
999 static int ubifs_writepage(struct page *page, struct writeback_control *wbc) in ubifs_writepage() argument
1001 struct inode *inode = page->mapping->host; in ubifs_writepage()
1009 inode->i_ino, page->index, page->flags); in ubifs_writepage()
1010 ubifs_assert(PagePrivate(page)); in ubifs_writepage()
1013 if (page->index > end_index || (page->index == end_index && !len)) { in ubifs_writepage()
1023 if (page->index < end_index) { in ubifs_writepage()
1024 if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) { in ubifs_writepage()
1037 return do_writepage(page, PAGE_CACHE_SIZE); in ubifs_writepage()
1047 kaddr = kmap_atomic(page); in ubifs_writepage()
1049 flush_dcache_page(page); in ubifs_writepage()
1058 return do_writepage(page, len); in ubifs_writepage()
1061 unlock_page(page); in ubifs_writepage()
1142 struct page *page; in do_truncation() local
1144 page = find_lock_page(inode->i_mapping, index); in do_truncation()
1145 if (page) { in do_truncation()
1146 if (PageDirty(page)) { in do_truncation()
1155 ubifs_assert(PagePrivate(page)); in do_truncation()
1157 clear_page_dirty_for_io(page); in do_truncation()
1161 err = do_writepage(page, offset); in do_truncation()
1162 page_cache_release(page); in do_truncation()
1175 unlock_page(page); in do_truncation()
1176 page_cache_release(page); in do_truncation()
1281 static void ubifs_invalidatepage(struct page *page, unsigned int offset, in ubifs_invalidatepage() argument
1284 struct inode *inode = page->mapping->host; in ubifs_invalidatepage()
1287 ubifs_assert(PagePrivate(page)); in ubifs_invalidatepage()
1292 if (PageChecked(page)) in ubifs_invalidatepage()
1298 ClearPagePrivate(page); in ubifs_invalidatepage()
1299 ClearPageChecked(page); in ubifs_invalidatepage()
1442 static int ubifs_set_page_dirty(struct page *page) in ubifs_set_page_dirty() argument
1446 ret = __set_page_dirty_nobuffers(page); in ubifs_set_page_dirty()
1455 static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) in ubifs_releasepage() argument
1461 if (PageWriteback(page)) in ubifs_releasepage()
1463 ubifs_assert(PagePrivate(page)); in ubifs_releasepage()
1465 ClearPagePrivate(page); in ubifs_releasepage()
1466 ClearPageChecked(page); in ubifs_releasepage()
1477 struct page *page = vmf->page; in ubifs_vm_page_mkwrite() local
1484 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index, in ubifs_vm_page_mkwrite()
1525 lock_page(page); in ubifs_vm_page_mkwrite()
1526 if (unlikely(page->mapping != inode->i_mapping || in ubifs_vm_page_mkwrite()
1527 page_offset(page) > i_size_read(inode))) { in ubifs_vm_page_mkwrite()
1533 if (PagePrivate(page)) in ubifs_vm_page_mkwrite()
1536 if (!PageChecked(page)) in ubifs_vm_page_mkwrite()
1538 SetPagePrivate(page); in ubifs_vm_page_mkwrite()
1540 __set_page_dirty_nobuffers(page); in ubifs_vm_page_mkwrite()
1556 wait_for_stable_page(page); in ubifs_vm_page_mkwrite()
1560 unlock_page(page); in ubifs_vm_page_mkwrite()