Lines Matching refs:mapping

111 static void page_cache_tree_delete(struct address_space *mapping,  in page_cache_tree_delete()  argument
122 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); in page_cache_tree_delete()
125 mapping->nrshadows++; in page_cache_tree_delete()
134 mapping->nrpages--; in page_cache_tree_delete()
138 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; in page_cache_tree_delete()
148 radix_tree_tag_clear(&mapping->page_tree, index, tag); in page_cache_tree_delete()
157 if (__radix_tree_delete_node(&mapping->page_tree, node)) in page_cache_tree_delete()
169 node->private_data = mapping; in page_cache_tree_delete()
181 struct address_space *mapping = page->mapping; in __delete_from_page_cache() local
192 cleancache_invalidate_page(mapping, page); in __delete_from_page_cache()
194 page_cache_tree_delete(mapping, page, shadow); in __delete_from_page_cache()
196 page->mapping = NULL; in __delete_from_page_cache()
213 account_page_cleaned(page, mapping); in __delete_from_page_cache()
226 struct address_space *mapping = page->mapping; in delete_from_page_cache() local
231 freepage = mapping->a_ops->freepage; in delete_from_page_cache()
232 spin_lock_irq(&mapping->tree_lock); in delete_from_page_cache()
234 spin_unlock_irq(&mapping->tree_lock); in delete_from_page_cache()
242 static int filemap_check_errors(struct address_space *mapping) in filemap_check_errors() argument
246 if (test_bit(AS_ENOSPC, &mapping->flags) && in filemap_check_errors()
247 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_errors()
249 if (test_bit(AS_EIO, &mapping->flags) && in filemap_check_errors()
250 test_and_clear_bit(AS_EIO, &mapping->flags)) in filemap_check_errors()
270 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in __filemap_fdatawrite_range() argument
281 if (!mapping_cap_writeback_dirty(mapping)) in __filemap_fdatawrite_range()
284 ret = do_writepages(mapping, &wbc); in __filemap_fdatawrite_range()
288 static inline int __filemap_fdatawrite(struct address_space *mapping, in __filemap_fdatawrite() argument
291 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); in __filemap_fdatawrite()
294 int filemap_fdatawrite(struct address_space *mapping) in filemap_fdatawrite() argument
296 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); in filemap_fdatawrite()
300 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in filemap_fdatawrite_range() argument
303 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); in filemap_fdatawrite_range()
314 int filemap_flush(struct address_space *mapping) in filemap_flush() argument
316 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); in filemap_flush()
329 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, in filemap_fdatawait_range() argument
343 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, in filemap_fdatawait_range()
363 ret2 = filemap_check_errors(mapping); in filemap_fdatawait_range()
378 int filemap_fdatawait(struct address_space *mapping) in filemap_fdatawait() argument
380 loff_t i_size = i_size_read(mapping->host); in filemap_fdatawait()
385 return filemap_fdatawait_range(mapping, 0, i_size - 1); in filemap_fdatawait()
389 int filemap_write_and_wait(struct address_space *mapping) in filemap_write_and_wait() argument
393 if (mapping->nrpages) { in filemap_write_and_wait()
394 err = filemap_fdatawrite(mapping); in filemap_write_and_wait()
402 int err2 = filemap_fdatawait(mapping); in filemap_write_and_wait()
407 err = filemap_check_errors(mapping); in filemap_write_and_wait()
424 int filemap_write_and_wait_range(struct address_space *mapping, in filemap_write_and_wait_range() argument
429 if (mapping->nrpages) { in filemap_write_and_wait_range()
430 err = __filemap_fdatawrite_range(mapping, lstart, lend, in filemap_write_and_wait_range()
434 int err2 = filemap_fdatawait_range(mapping, in filemap_write_and_wait_range()
440 err = filemap_check_errors(mapping); in filemap_write_and_wait_range()
467 VM_BUG_ON_PAGE(new->mapping, new); in replace_page_cache_page()
471 struct address_space *mapping = old->mapping; in replace_page_cache_page() local
475 freepage = mapping->a_ops->freepage; in replace_page_cache_page()
478 new->mapping = mapping; in replace_page_cache_page()
481 spin_lock_irq(&mapping->tree_lock); in replace_page_cache_page()
483 error = radix_tree_insert(&mapping->page_tree, offset, new); in replace_page_cache_page()
485 mapping->nrpages++; in replace_page_cache_page()
489 spin_unlock_irq(&mapping->tree_lock); in replace_page_cache_page()
501 static int page_cache_tree_insert(struct address_space *mapping, in page_cache_tree_insert() argument
508 error = __radix_tree_create(&mapping->page_tree, page->index, in page_cache_tree_insert()
515 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); in page_cache_tree_insert()
520 mapping->nrshadows--; in page_cache_tree_insert()
525 mapping->nrpages++; in page_cache_tree_insert()
544 struct address_space *mapping, in __add_to_page_cache_locked() argument
570 page->mapping = mapping; in __add_to_page_cache_locked()
573 spin_lock_irq(&mapping->tree_lock); in __add_to_page_cache_locked()
574 error = page_cache_tree_insert(mapping, page, shadowp); in __add_to_page_cache_locked()
579 spin_unlock_irq(&mapping->tree_lock); in __add_to_page_cache_locked()
585 page->mapping = NULL; in __add_to_page_cache_locked()
587 spin_unlock_irq(&mapping->tree_lock); in __add_to_page_cache_locked()
604 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, in add_to_page_cache_locked() argument
607 return __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_locked()
612 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, in add_to_page_cache_lru() argument
619 ret = __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_lru()
795 if (page->mapping) in page_endio()
796 mapping_set_error(page->mapping, err); in page_endio()
889 pgoff_t page_cache_next_hole(struct address_space *mapping, in page_cache_next_hole() argument
897 page = radix_tree_lookup(&mapping->page_tree, index); in page_cache_next_hole()
930 pgoff_t page_cache_prev_hole(struct address_space *mapping, in page_cache_prev_hole() argument
938 page = radix_tree_lookup(&mapping->page_tree, index); in page_cache_prev_hole()
963 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) in find_get_entry() argument
971 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); in find_get_entry()
1022 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) in find_lock_entry() argument
1027 page = find_get_entry(mapping, offset); in find_lock_entry()
1031 if (unlikely(page->mapping != mapping)) { in find_lock_entry()
1065 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, in pagecache_get_page() argument
1071 page = find_get_entry(mapping, offset); in pagecache_get_page()
1088 if (unlikely(page->mapping != mapping)) { in pagecache_get_page()
1102 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) in pagecache_get_page()
1118 err = add_to_page_cache_lru(page, mapping, offset, in pagecache_get_page()
1155 unsigned find_get_entries(struct address_space *mapping, in find_get_entries() argument
1168 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { in find_get_entries()
1218 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, in find_get_pages() argument
1230 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { in find_get_pages()
1285 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, in find_get_pages_contig() argument
1297 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { in find_get_pages_contig()
1336 if (page->mapping == NULL || page->index != iter.index) { in find_get_pages_contig()
1361 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, in find_get_pages_tag() argument
1373 radix_tree_for_each_tagged(slot, &mapping->page_tree, in find_get_pages_tag()
1464 struct address_space *mapping = filp->f_mapping; in do_generic_file_read() local
1465 struct inode *inode = mapping->host; in do_generic_file_read()
1488 page = find_get_page(mapping, index); in do_generic_file_read()
1490 page_cache_sync_readahead(mapping, in do_generic_file_read()
1493 page = find_get_page(mapping, index); in do_generic_file_read()
1498 page_cache_async_readahead(mapping, in do_generic_file_read()
1504 !mapping->a_ops->is_partially_uptodate) in do_generic_file_read()
1509 if (!page->mapping) in do_generic_file_read()
1511 if (!mapping->a_ops->is_partially_uptodate(page, in do_generic_file_read()
1548 if (mapping_writably_mapped(mapping)) in do_generic_file_read()
1588 if (!page->mapping) { in do_generic_file_read()
1608 error = mapping->a_ops->readpage(filp, page); in do_generic_file_read()
1624 if (page->mapping == NULL) { in do_generic_file_read()
1652 page = page_cache_alloc_cold(mapping); in do_generic_file_read()
1657 error = add_to_page_cache_lru(page, mapping, in do_generic_file_read()
1697 struct address_space *mapping = file->f_mapping; in generic_file_read_iter() local
1698 struct inode *inode = mapping->host; in generic_file_read_iter()
1705 retval = filemap_write_and_wait_range(mapping, pos, in generic_file_read_iter()
1709 retval = mapping->a_ops->direct_IO(iocb, &data, pos); in generic_file_read_iter()
1750 struct address_space *mapping = file->f_mapping; in page_cache_read() local
1755 page = page_cache_alloc_cold(mapping); in page_cache_read()
1759 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); in page_cache_read()
1761 ret = mapping->a_ops->readpage(file, page); in page_cache_read()
1784 struct address_space *mapping = file->f_mapping; in do_sync_mmap_readahead() local
1793 page_cache_sync_readahead(mapping, ra, file, offset, in do_sync_mmap_readahead()
1816 ra_submit(ra, mapping, file); in do_sync_mmap_readahead()
1829 struct address_space *mapping = file->f_mapping; in do_async_mmap_readahead() local
1837 page_cache_async_readahead(mapping, ra, file, in do_async_mmap_readahead()
1869 struct address_space *mapping = file->f_mapping; in filemap_fault() local
1871 struct inode *inode = mapping->host; in filemap_fault()
1884 page = find_get_page(mapping, offset); in filemap_fault()
1898 page = find_get_page(mapping, offset); in filemap_fault()
1909 if (unlikely(page->mapping != mapping)) { in filemap_fault()
1969 error = mapping->a_ops->readpage(file, page); in filemap_fault()
1991 struct address_space *mapping = file->f_mapping; in filemap_map_pages() local
1999 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) { in filemap_map_pages()
2029 if (page->mapping != mapping || !PageUptodate(page)) in filemap_map_pages()
2032 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE); in filemap_map_pages()
2067 if (page->mapping != inode->i_mapping) { in filemap_page_mkwrite()
2095 struct address_space *mapping = file->f_mapping; in generic_file_mmap() local
2097 if (!mapping->a_ops->readpage) in generic_file_mmap()
2139 static struct page *__read_cache_page(struct address_space *mapping, in __read_cache_page() argument
2148 page = find_get_page(mapping, index); in __read_cache_page()
2153 err = add_to_page_cache_lru(page, mapping, index, gfp); in __read_cache_page()
2172 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page() argument
2183 page = __read_cache_page(mapping, index, filler, data, gfp); in do_read_cache_page()
2190 if (!page->mapping) { in do_read_cache_page()
2225 struct page *read_cache_page(struct address_space *mapping, in read_cache_page() argument
2230 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); in read_cache_page()
2245 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp() argument
2249 filler_t *filler = (filler_t *)mapping->a_ops->readpage; in read_cache_page_gfp()
2251 return do_read_cache_page(mapping, index, filler, NULL, gfp); in read_cache_page_gfp()
2311 int pagecache_write_begin(struct file *file, struct address_space *mapping, in pagecache_write_begin() argument
2315 const struct address_space_operations *aops = mapping->a_ops; in pagecache_write_begin()
2317 return aops->write_begin(file, mapping, pos, len, flags, in pagecache_write_begin()
2322 int pagecache_write_end(struct file *file, struct address_space *mapping, in pagecache_write_end() argument
2326 const struct address_space_operations *aops = mapping->a_ops; in pagecache_write_end()
2328 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); in pagecache_write_end()
2336 struct address_space *mapping = file->f_mapping; in generic_file_direct_write() local
2337 struct inode *inode = mapping->host; in generic_file_direct_write()
2346 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); in generic_file_direct_write()
2356 if (mapping->nrpages) { in generic_file_direct_write()
2357 written = invalidate_inode_pages2_range(mapping, in generic_file_direct_write()
2371 written = mapping->a_ops->direct_IO(iocb, &data, pos); in generic_file_direct_write()
2381 if (mapping->nrpages) { in generic_file_direct_write()
2382 invalidate_inode_pages2_range(mapping, in generic_file_direct_write()
2404 struct page *grab_cache_page_write_begin(struct address_space *mapping, in grab_cache_page_write_begin() argument
2413 page = pagecache_get_page(mapping, index, fgp_flags, in grab_cache_page_write_begin()
2414 mapping_gfp_mask(mapping)); in grab_cache_page_write_begin()
2425 struct address_space *mapping = file->f_mapping; in generic_perform_write() local
2426 const struct address_space_operations *a_ops = mapping->a_ops; in generic_perform_write()
2469 status = a_ops->write_begin(file, mapping, pos, bytes, flags, in generic_perform_write()
2474 if (mapping_writably_mapped(mapping)) in generic_perform_write()
2480 status = a_ops->write_end(file, mapping, pos, bytes, copied, in generic_perform_write()
2505 balance_dirty_pages_ratelimited(mapping); in generic_perform_write()
2532 struct address_space * mapping = file->f_mapping; in __generic_file_write_iter() local
2533 struct inode *inode = mapping->host; in __generic_file_write_iter()
2580 err = filemap_write_and_wait_range(mapping, pos, endbyte); in __generic_file_write_iter()
2584 invalidate_mapping_pages(mapping, in __generic_file_write_iter()
2655 struct address_space * const mapping = page->mapping; in try_to_release_page() local
2661 if (mapping && mapping->a_ops->releasepage) in try_to_release_page()
2662 return mapping->a_ops->releasepage(page, gfp_mask); in try_to_release_page()