Lines Matching refs:page

45 		struct page *page = bvec->bv_page;  in f2fs_read_end_io()  local
48 SetPageUptodate(page); in f2fs_read_end_io()
50 ClearPageUptodate(page); in f2fs_read_end_io()
51 SetPageError(page); in f2fs_read_end_io()
53 unlock_page(page); in f2fs_read_end_io()
65 struct page *page = bvec->bv_page; in f2fs_write_end_io() local
67 f2fs_restore_and_release_control_page(&page); in f2fs_write_end_io()
70 set_page_dirty(page); in f2fs_write_end_io()
71 set_bit(AS_EIO, &page->mapping->flags); in f2fs_write_end_io()
74 end_page_writeback(page); in f2fs_write_end_io()
148 struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; in f2fs_submit_page_bio() local
150 trace_f2fs_submit_page_bio(page, fio); in f2fs_submit_page_bio()
156 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { in f2fs_submit_page_bio()
171 struct page *bio_page; in f2fs_submit_page_mbio()
193 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; in f2fs_submit_page_mbio()
205 trace_f2fs_submit_page_mbio(fio->page, fio); in f2fs_submit_page_mbio()
218 struct page *node_page = dn->node_page; in set_data_blkaddr()
278 struct page *get_read_data_page(struct inode *inode, pgoff_t index, in get_read_data_page()
283 struct page *page; in get_read_data_page() local
296 page = f2fs_grab_cache_page(mapping, index, for_write); in get_read_data_page()
297 if (!page) in get_read_data_page()
316 if (PageUptodate(page)) { in get_read_data_page()
317 unlock_page(page); in get_read_data_page()
318 return page; in get_read_data_page()
328 zero_user_segment(page, 0, PAGE_CACHE_SIZE); in get_read_data_page()
329 SetPageUptodate(page); in get_read_data_page()
330 unlock_page(page); in get_read_data_page()
331 return page; in get_read_data_page()
335 fio.page = page; in get_read_data_page()
339 return page; in get_read_data_page()
342 f2fs_put_page(page, 1); in get_read_data_page()
346 struct page *find_data_page(struct inode *inode, pgoff_t index) in find_data_page()
349 struct page *page; in find_data_page() local
351 page = find_get_page(mapping, index); in find_data_page()
352 if (page && PageUptodate(page)) in find_data_page()
353 return page; in find_data_page()
354 f2fs_put_page(page, 0); in find_data_page()
356 page = get_read_data_page(inode, index, READ_SYNC, false); in find_data_page()
357 if (IS_ERR(page)) in find_data_page()
358 return page; in find_data_page()
360 if (PageUptodate(page)) in find_data_page()
361 return page; in find_data_page()
363 wait_on_page_locked(page); in find_data_page()
364 if (unlikely(!PageUptodate(page))) { in find_data_page()
365 f2fs_put_page(page, 0); in find_data_page()
368 return page; in find_data_page()
376 struct page *get_lock_data_page(struct inode *inode, pgoff_t index, in get_lock_data_page()
380 struct page *page; in get_lock_data_page() local
382 page = get_read_data_page(inode, index, READ_SYNC, for_write); in get_lock_data_page()
383 if (IS_ERR(page)) in get_lock_data_page()
384 return page; in get_lock_data_page()
387 lock_page(page); in get_lock_data_page()
388 if (unlikely(!PageUptodate(page))) { in get_lock_data_page()
389 f2fs_put_page(page, 1); in get_lock_data_page()
392 if (unlikely(page->mapping != mapping)) { in get_lock_data_page()
393 f2fs_put_page(page, 1); in get_lock_data_page()
396 return page; in get_lock_data_page()
408 struct page *get_new_data_page(struct inode *inode, in get_new_data_page()
409 struct page *ipage, pgoff_t index, bool new_i_size) in get_new_data_page()
412 struct page *page; in get_new_data_page() local
416 page = f2fs_grab_cache_page(mapping, index, true); in get_new_data_page()
417 if (!page) { in get_new_data_page()
429 f2fs_put_page(page, 1); in get_new_data_page()
435 if (PageUptodate(page)) in get_new_data_page()
439 zero_user_segment(page, 0, PAGE_CACHE_SIZE); in get_new_data_page()
440 SetPageUptodate(page); in get_new_data_page()
442 f2fs_put_page(page, 1); in get_new_data_page()
444 page = get_read_data_page(inode, index, READ_SYNC, true); in get_new_data_page()
445 if (IS_ERR(page)) in get_new_data_page()
449 lock_page(page); in get_new_data_page()
458 return page; in get_new_data_page()
874 struct list_head *pages, struct page *page, in f2fs_mpage_readpages() argument
897 prefetchw(&page->flags); in f2fs_mpage_readpages()
899 page = list_entry(pages->prev, struct page, lru); in f2fs_mpage_readpages()
900 list_del(&page->lru); in f2fs_mpage_readpages()
901 if (add_to_page_cache_lru(page, mapping, in f2fs_mpage_readpages()
902 page->index, GFP_KERNEL)) in f2fs_mpage_readpages()
906 block_in_file = (sector_t)page->index; in f2fs_mpage_readpages()
938 SetPageMappedToDisk(page); in f2fs_mpage_readpages()
940 if (!PageUptodate(page) && !cleancache_get_page(page)) { in f2fs_mpage_readpages()
941 SetPageUptodate(page); in f2fs_mpage_readpages()
945 zero_user_segment(page, 0, PAGE_CACHE_SIZE); in f2fs_mpage_readpages()
946 SetPageUptodate(page); in f2fs_mpage_readpages()
947 unlock_page(page); in f2fs_mpage_readpages()
988 if (bio_add_page(bio, page, blocksize, 0) < blocksize) in f2fs_mpage_readpages()
994 SetPageError(page); in f2fs_mpage_readpages()
995 zero_user_segment(page, 0, PAGE_CACHE_SIZE); in f2fs_mpage_readpages()
996 unlock_page(page); in f2fs_mpage_readpages()
1003 unlock_page(page); in f2fs_mpage_readpages()
1006 page_cache_release(page); in f2fs_mpage_readpages()
1014 static int f2fs_read_data_page(struct file *file, struct page *page) in f2fs_read_data_page() argument
1016 struct inode *inode = page->mapping->host; in f2fs_read_data_page()
1019 trace_f2fs_readpage(page, DATA); in f2fs_read_data_page()
1023 ret = f2fs_read_inline_data(inode, page); in f2fs_read_data_page()
1025 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1); in f2fs_read_data_page()
1034 struct page *page = list_entry(pages->prev, struct page, lru); in f2fs_read_data_pages() local
1036 trace_f2fs_readpages(inode, page, nr_pages); in f2fs_read_data_pages()
1047 struct page *page = fio->page; in do_write_data_page() local
1048 struct inode *inode = page->mapping->host; in do_write_data_page()
1053 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); in do_write_data_page()
1061 ClearPageUptodate(page); in do_write_data_page()
1071 fio->encrypted_page = f2fs_encrypt(inode, fio->page); in do_write_data_page()
1078 set_page_writeback(page); in do_write_data_page()
1085 !is_cold_data(page) && in do_write_data_page()
1089 trace_f2fs_do_write_data_page(page, IPU); in do_write_data_page()
1094 trace_f2fs_do_write_data_page(page, OPU); in do_write_data_page()
1096 if (page->index == 0) in do_write_data_page()
1104 static int f2fs_write_data_page(struct page *page, in f2fs_write_data_page() argument
1107 struct inode *inode = page->mapping->host; in f2fs_write_data_page()
1119 .page = page, in f2fs_write_data_page()
1123 trace_f2fs_writepage(page, DATA); in f2fs_write_data_page()
1125 if (page->index < end_index) in f2fs_write_data_page()
1133 if ((page->index >= end_index + 1) || !offset) in f2fs_write_data_page()
1136 zero_user_segment(page, offset, PAGE_CACHE_SIZE); in f2fs_write_data_page()
1156 SetPageError(page); in f2fs_write_data_page()
1168 err = f2fs_write_inline_data(inode, page); in f2fs_write_data_page()
1176 clear_cold_data(page); in f2fs_write_data_page()
1180 ClearPageUptodate(page); in f2fs_write_data_page()
1181 unlock_page(page); in f2fs_write_data_page()
1189 redirty_page_for_writepage(wbc, page); in f2fs_write_data_page()
1193 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, in __f2fs_writepage() argument
1197 int ret = mapping->a_ops->writepage(page, wbc); in __f2fs_writepage()
1258 struct page *page = pvec.pages[i]; in f2fs_write_cache_pages() local
1260 if (page->index > end) { in f2fs_write_cache_pages()
1265 done_index = page->index; in f2fs_write_cache_pages()
1267 lock_page(page); in f2fs_write_cache_pages()
1269 if (unlikely(page->mapping != mapping)) { in f2fs_write_cache_pages()
1271 unlock_page(page); in f2fs_write_cache_pages()
1275 if (!PageDirty(page)) { in f2fs_write_cache_pages()
1280 if (step == is_cold_data(page)) in f2fs_write_cache_pages()
1283 if (PageWriteback(page)) { in f2fs_write_cache_pages()
1285 f2fs_wait_on_page_writeback(page, DATA); in f2fs_write_cache_pages()
1290 BUG_ON(PageWriteback(page)); in f2fs_write_cache_pages()
1291 if (!clear_page_dirty_for_io(page)) in f2fs_write_cache_pages()
1294 ret = (*writepage)(page, wbc, data); in f2fs_write_cache_pages()
1297 unlock_page(page); in f2fs_write_cache_pages()
1300 done_index = page->index + 1; in f2fs_write_cache_pages()
1394 struct page **pagep, void **fsdata) in f2fs_write_begin()
1398 struct page *page = NULL; in f2fs_write_begin() local
1399 struct page *ipage; in f2fs_write_begin()
1419 page = grab_cache_page_write_begin(mapping, index, flags); in f2fs_write_begin()
1420 if (!page) { in f2fs_write_begin()
1425 *pagep = page; in f2fs_write_begin()
1440 read_inline_data(page, ipage); in f2fs_write_begin()
1445 err = f2fs_convert_inline_page(&dn, page); in f2fs_write_begin()
1457 f2fs_wait_on_page_writeback(page, DATA); in f2fs_write_begin()
1465 if (PageUptodate(page)) in f2fs_write_begin()
1473 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); in f2fs_write_begin()
1478 zero_user_segment(page, 0, PAGE_CACHE_SIZE); in f2fs_write_begin()
1485 .page = page, in f2fs_write_begin()
1492 lock_page(page); in f2fs_write_begin()
1493 if (unlikely(!PageUptodate(page))) { in f2fs_write_begin()
1497 if (unlikely(page->mapping != mapping)) { in f2fs_write_begin()
1498 f2fs_put_page(page, 1); in f2fs_write_begin()
1504 err = f2fs_decrypt_one(inode, page); in f2fs_write_begin()
1510 SetPageUptodate(page); in f2fs_write_begin()
1512 clear_cold_data(page); in f2fs_write_begin()
1520 f2fs_put_page(page, 1); in f2fs_write_begin()
1528 struct page *page, void *fsdata) in f2fs_write_end() argument
1530 struct inode *inode = page->mapping->host; in f2fs_write_end()
1534 set_page_dirty(page); in f2fs_write_end()
1542 f2fs_put_page(page, 1); in f2fs_write_end()
1603 void f2fs_invalidate_page(struct page *page, unsigned int offset, in f2fs_invalidate_page() argument
1606 struct inode *inode = page->mapping->host; in f2fs_invalidate_page()
1613 if (PageDirty(page)) { in f2fs_invalidate_page()
1623 if (IS_ATOMIC_WRITTEN_PAGE(page)) in f2fs_invalidate_page()
1626 ClearPagePrivate(page); in f2fs_invalidate_page()
1629 int f2fs_release_page(struct page *page, gfp_t wait) in f2fs_release_page() argument
1632 if (PageDirty(page)) in f2fs_release_page()
1636 if (IS_ATOMIC_WRITTEN_PAGE(page)) in f2fs_release_page()
1639 ClearPagePrivate(page); in f2fs_release_page()
1643 static int f2fs_set_data_page_dirty(struct page *page) in f2fs_set_data_page_dirty() argument
1645 struct address_space *mapping = page->mapping; in f2fs_set_data_page_dirty()
1648 trace_f2fs_set_page_dirty(page, DATA); in f2fs_set_data_page_dirty()
1650 SetPageUptodate(page); in f2fs_set_data_page_dirty()
1653 if (!IS_ATOMIC_WRITTEN_PAGE(page)) { in f2fs_set_data_page_dirty()
1654 register_inmem_page(inode, page); in f2fs_set_data_page_dirty()
1664 if (!PageDirty(page)) { in f2fs_set_data_page_dirty()
1665 __set_page_dirty_nobuffers(page); in f2fs_set_data_page_dirty()
1666 update_dirty_page(inode, page); in f2fs_set_data_page_dirty()