Lines Matching refs:page
104 static int do_readpage(struct page *page) in do_readpage() argument
110 struct inode *inode = page->mapping->host; in do_readpage()
114 inode->i_ino, page->index, i_size, page->flags); in do_readpage()
115 ubifs_assert(!PageChecked(page)); in do_readpage()
116 ubifs_assert(!PagePrivate(page)); in do_readpage()
118 addr = kmap(page); in do_readpage()
120 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; in do_readpage()
124 SetPageChecked(page); in do_readpage()
166 SetPageChecked(page); in do_readpage()
171 page->index, inode->i_ino, err); in do_readpage()
178 SetPageUptodate(page); in do_readpage()
179 ClearPageError(page); in do_readpage()
180 flush_dcache_page(page); in do_readpage()
181 kunmap(page); in do_readpage()
186 ClearPageUptodate(page); in do_readpage()
187 SetPageError(page); in do_readpage()
188 flush_dcache_page(page); in do_readpage()
189 kunmap(page); in do_readpage()
222 loff_t pos, unsigned len, struct page **pagep, in write_begin_slow()
230 struct page *page; in write_begin_slow() local
251 page = grab_cache_page_write_begin(mapping, index, flags); in write_begin_slow()
252 if (unlikely(!page)) { in write_begin_slow()
257 if (!PageUptodate(page)) { in write_begin_slow()
259 SetPageChecked(page); in write_begin_slow()
261 err = do_readpage(page); in write_begin_slow()
263 unlock_page(page); in write_begin_slow()
264 page_cache_release(page); in write_begin_slow()
270 SetPageUptodate(page); in write_begin_slow()
271 ClearPageError(page); in write_begin_slow()
274 if (PagePrivate(page)) in write_begin_slow()
286 else if (!PageChecked(page)) in write_begin_slow()
312 *pagep = page; in write_begin_slow()
329 static int allocate_budget(struct ubifs_info *c, struct page *page, in allocate_budget() argument
334 if (PagePrivate(page)) { in allocate_budget()
361 if (PageChecked(page)) in allocate_budget()
427 struct page **pagep, void **fsdata) in ubifs_write_begin()
435 struct page *page; in ubifs_write_begin() local
444 page = grab_cache_page_write_begin(mapping, index, flags); in ubifs_write_begin()
445 if (unlikely(!page)) in ubifs_write_begin()
448 if (!PageUptodate(page)) { in ubifs_write_begin()
460 SetPageChecked(page); in ubifs_write_begin()
463 err = do_readpage(page); in ubifs_write_begin()
465 unlock_page(page); in ubifs_write_begin()
466 page_cache_release(page); in ubifs_write_begin()
471 SetPageUptodate(page); in ubifs_write_begin()
472 ClearPageError(page); in ubifs_write_begin()
475 err = allocate_budget(c, page, ui, appending); in ubifs_write_begin()
483 ClearPageChecked(page); in ubifs_write_begin()
484 ClearPageUptodate(page); in ubifs_write_begin()
497 unlock_page(page); in ubifs_write_begin()
498 page_cache_release(page); in ubifs_write_begin()
509 *pagep = page; in ubifs_write_begin()
524 static void cancel_budget(struct ubifs_info *c, struct page *page, in cancel_budget() argument
532 if (!PagePrivate(page)) { in cancel_budget()
533 if (PageChecked(page)) in cancel_budget()
542 struct page *page, void *fsdata) in ubifs_write_end() argument
551 inode->i_ino, pos, page->index, len, copied, inode->i_size); in ubifs_write_end()
565 cancel_budget(c, page, ui, appending); in ubifs_write_end()
566 ClearPageChecked(page); in ubifs_write_end()
572 copied = do_readpage(page); in ubifs_write_end()
576 if (!PagePrivate(page)) { in ubifs_write_end()
577 SetPagePrivate(page); in ubifs_write_end()
579 __set_page_dirty_nobuffers(page); in ubifs_write_end()
596 unlock_page(page); in ubifs_write_end()
597 page_cache_release(page); in ubifs_write_end()
610 static int populate_page(struct ubifs_info *c, struct page *page, in populate_page() argument
614 struct inode *inode = page->mapping->host; in populate_page()
621 inode->i_ino, page->index, i_size, page->flags); in populate_page()
623 addr = zaddr = kmap(page); in populate_page()
626 if (!i_size || page->index > end_index) { in populate_page()
632 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; in populate_page()
676 if (end_index == page->index) { in populate_page()
685 SetPageChecked(page); in populate_page()
689 SetPageUptodate(page); in populate_page()
690 ClearPageError(page); in populate_page()
691 flush_dcache_page(page); in populate_page()
692 kunmap(page); in populate_page()
697 ClearPageUptodate(page); in populate_page()
698 SetPageError(page); in populate_page()
699 flush_dcache_page(page); in populate_page()
700 kunmap(page); in populate_page()
715 struct page *page1) in ubifs_do_bulk_read()
781 struct page *page; in ubifs_do_bulk_read() local
785 page = find_or_create_page(mapping, page_offset, in ubifs_do_bulk_read()
787 if (!page) in ubifs_do_bulk_read()
789 if (!PageUptodate(page)) in ubifs_do_bulk_read()
790 err = populate_page(c, page, bu, &n); in ubifs_do_bulk_read()
791 unlock_page(page); in ubifs_do_bulk_read()
792 page_cache_release(page); in ubifs_do_bulk_read()
822 static int ubifs_bulk_read(struct page *page) in ubifs_bulk_read() argument
824 struct inode *inode = page->mapping->host; in ubifs_bulk_read()
827 pgoff_t index = page->index, last_page_read = ui->last_page_read; in ubifs_bulk_read()
875 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); in ubifs_bulk_read()
876 err = ubifs_do_bulk_read(c, bu, page); in ubifs_bulk_read()
888 static int ubifs_readpage(struct file *file, struct page *page) in ubifs_readpage() argument
890 if (ubifs_bulk_read(page)) in ubifs_readpage()
892 do_readpage(page); in ubifs_readpage()
893 unlock_page(page); in ubifs_readpage()
897 static int do_writepage(struct page *page, int len) in do_writepage() argument
903 struct inode *inode = page->mapping->host; in do_writepage()
909 ubifs_assert(page->index <= ui->synced_i_size >> PAGE_CACHE_SHIFT); in do_writepage()
914 set_page_writeback(page); in do_writepage()
916 addr = kmap(page); in do_writepage()
917 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; in do_writepage()
932 SetPageError(page); in do_writepage()
934 page->index, inode->i_ino, err); in do_writepage()
938 ubifs_assert(PagePrivate(page)); in do_writepage()
939 if (PageChecked(page)) in do_writepage()
945 ClearPagePrivate(page); in do_writepage()
946 ClearPageChecked(page); in do_writepage()
948 kunmap(page); in do_writepage()
949 unlock_page(page); in do_writepage()
950 end_page_writeback(page); in do_writepage()
1000 static int ubifs_writepage(struct page *page, struct writeback_control *wbc) in ubifs_writepage() argument
1002 struct inode *inode = page->mapping->host; in ubifs_writepage()
1010 inode->i_ino, page->index, page->flags); in ubifs_writepage()
1011 ubifs_assert(PagePrivate(page)); in ubifs_writepage()
1014 if (page->index > end_index || (page->index == end_index && !len)) { in ubifs_writepage()
1024 if (page->index < end_index) { in ubifs_writepage()
1025 if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) { in ubifs_writepage()
1038 return do_writepage(page, PAGE_CACHE_SIZE); in ubifs_writepage()
1048 kaddr = kmap_atomic(page); in ubifs_writepage()
1050 flush_dcache_page(page); in ubifs_writepage()
1059 return do_writepage(page, len); in ubifs_writepage()
1062 unlock_page(page); in ubifs_writepage()
1143 struct page *page; in do_truncation() local
1145 page = find_lock_page(inode->i_mapping, index); in do_truncation()
1146 if (page) { in do_truncation()
1147 if (PageDirty(page)) { in do_truncation()
1156 ubifs_assert(PagePrivate(page)); in do_truncation()
1158 clear_page_dirty_for_io(page); in do_truncation()
1162 err = do_writepage(page, offset); in do_truncation()
1163 page_cache_release(page); in do_truncation()
1176 unlock_page(page); in do_truncation()
1177 page_cache_release(page); in do_truncation()
1282 static void ubifs_invalidatepage(struct page *page, unsigned int offset, in ubifs_invalidatepage() argument
1285 struct inode *inode = page->mapping->host; in ubifs_invalidatepage()
1288 ubifs_assert(PagePrivate(page)); in ubifs_invalidatepage()
1293 if (PageChecked(page)) in ubifs_invalidatepage()
1299 ClearPagePrivate(page); in ubifs_invalidatepage()
1300 ClearPageChecked(page); in ubifs_invalidatepage()
1410 static int ubifs_set_page_dirty(struct page *page) in ubifs_set_page_dirty() argument
1414 ret = __set_page_dirty_nobuffers(page); in ubifs_set_page_dirty()
1423 static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) in ubifs_releasepage() argument
1429 if (PageWriteback(page)) in ubifs_releasepage()
1431 ubifs_assert(PagePrivate(page)); in ubifs_releasepage()
1433 ClearPagePrivate(page); in ubifs_releasepage()
1434 ClearPageChecked(page); in ubifs_releasepage()
1445 struct page *page = vmf->page; in ubifs_vm_page_mkwrite() local
1452 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index, in ubifs_vm_page_mkwrite()
1493 lock_page(page); in ubifs_vm_page_mkwrite()
1494 if (unlikely(page->mapping != inode->i_mapping || in ubifs_vm_page_mkwrite()
1495 page_offset(page) > i_size_read(inode))) { in ubifs_vm_page_mkwrite()
1501 if (PagePrivate(page)) in ubifs_vm_page_mkwrite()
1504 if (!PageChecked(page)) in ubifs_vm_page_mkwrite()
1506 SetPagePrivate(page); in ubifs_vm_page_mkwrite()
1508 __set_page_dirty_nobuffers(page); in ubifs_vm_page_mkwrite()
1524 wait_for_stable_page(page); in ubifs_vm_page_mkwrite()
1528 unlock_page(page); in ubifs_vm_page_mkwrite()