Lines Matching refs:mapping
358 set_bit(AS_EIO, &page->mapping->flags); in end_buffer_async_write()
574 int sync_mapping_buffers(struct address_space *mapping) in sync_mapping_buffers() argument
576 struct address_space *buffer_mapping = mapping->private_data; in sync_mapping_buffers()
578 if (buffer_mapping == NULL || list_empty(&mapping->private_list)) in sync_mapping_buffers()
582 &mapping->private_list); in sync_mapping_buffers()
605 struct address_space *mapping = inode->i_mapping; in mark_buffer_dirty_inode() local
606 struct address_space *buffer_mapping = bh->b_page->mapping; in mark_buffer_dirty_inode()
609 if (!mapping->private_data) { in mark_buffer_dirty_inode()
610 mapping->private_data = buffer_mapping; in mark_buffer_dirty_inode()
612 BUG_ON(mapping->private_data != buffer_mapping); in mark_buffer_dirty_inode()
617 &mapping->private_list); in mark_buffer_dirty_inode()
618 bh->b_assoc_map = mapping; in mark_buffer_dirty_inode()
633 static void __set_page_dirty(struct page *page, struct address_space *mapping, in __set_page_dirty() argument
638 spin_lock_irqsave(&mapping->tree_lock, flags); in __set_page_dirty()
639 if (page->mapping) { /* Race with truncate? */ in __set_page_dirty()
641 account_page_dirtied(page, mapping, memcg); in __set_page_dirty()
642 radix_tree_tag_set(&mapping->page_tree, in __set_page_dirty()
645 spin_unlock_irqrestore(&mapping->tree_lock, flags); in __set_page_dirty()
677 struct address_space *mapping = page_mapping(page); in __set_page_dirty_buffers() local
679 if (unlikely(!mapping)) in __set_page_dirty_buffers()
682 spin_lock(&mapping->private_lock); in __set_page_dirty_buffers()
698 spin_unlock(&mapping->private_lock); in __set_page_dirty_buffers()
701 __set_page_dirty(page, mapping, memcg, 1); in __set_page_dirty_buffers()
706 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in __set_page_dirty_buffers()
735 struct address_space *mapping; in fsync_buffers_list() local
745 mapping = bh->b_assoc_map; in fsync_buffers_list()
752 bh->b_assoc_map = mapping; in fsync_buffers_list()
784 mapping = bh->b_assoc_map; in fsync_buffers_list()
791 &mapping->private_list); in fsync_buffers_list()
792 bh->b_assoc_map = mapping; in fsync_buffers_list()
822 struct address_space *mapping = &inode->i_data; in invalidate_inode_buffers() local
823 struct list_head *list = &mapping->private_list; in invalidate_inode_buffers()
824 struct address_space *buffer_mapping = mapping->private_data; in invalidate_inode_buffers()
845 struct address_space *mapping = &inode->i_data; in remove_inode_buffers() local
846 struct list_head *list = &mapping->private_list; in remove_inode_buffers()
847 struct address_space *buffer_mapping = mapping->private_data; in remove_inode_buffers()
1178 struct address_space *mapping = NULL; in mark_buffer_dirty() local
1183 mapping = page_mapping(page); in mark_buffer_dirty()
1184 if (mapping) in mark_buffer_dirty()
1185 __set_page_dirty(page, mapping, memcg, 0); in mark_buffer_dirty()
1188 if (mapping) in mark_buffer_dirty()
1189 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in mark_buffer_dirty()
1219 struct address_space *buffer_mapping = bh->b_page->mapping; in __bforget()
1602 spin_lock(&page->mapping->private_lock); in create_empty_buffers()
1614 spin_unlock(&page->mapping->private_lock); in create_empty_buffers()
1848 mapping_set_error(page->mapping, err); in __block_write_full_page()
1910 struct inode *inode = page->mapping->host; in __block_write_begin()
2031 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, in block_write_begin() argument
2038 page = grab_cache_page_write_begin(mapping, index, flags); in block_write_begin()
2054 int block_write_end(struct file *file, struct address_space *mapping, in block_write_end() argument
2058 struct inode *inode = mapping->host; in block_write_end()
2090 int generic_write_end(struct file *file, struct address_space *mapping, in generic_write_end() argument
2094 struct inode *inode = mapping->host; in generic_write_end()
2098 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); in generic_write_end()
2184 struct inode *inode = page->mapping->host; in block_read_full_page()
2274 struct address_space *mapping = inode->i_mapping; in generic_cont_expand_simple() local
2283 err = pagecache_write_begin(NULL, mapping, size, 0, in generic_cont_expand_simple()
2289 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); in generic_cont_expand_simple()
2297 static int cont_expand_zero(struct file *file, struct address_space *mapping, in cont_expand_zero() argument
2300 struct inode *inode = mapping->host; in cont_expand_zero()
2320 err = pagecache_write_begin(file, mapping, curpos, len, in cont_expand_zero()
2326 err = pagecache_write_end(file, mapping, curpos, len, len, in cont_expand_zero()
2333 balance_dirty_pages_ratelimited(mapping); in cont_expand_zero()
2354 err = pagecache_write_begin(file, mapping, curpos, len, in cont_expand_zero()
2360 err = pagecache_write_end(file, mapping, curpos, len, len, in cont_expand_zero()
2375 int cont_write_begin(struct file *file, struct address_space *mapping, in cont_write_begin() argument
2380 struct inode *inode = mapping->host; in cont_write_begin()
2385 err = cont_expand_zero(file, mapping, pos, bytes); in cont_write_begin()
2395 return block_write_begin(mapping, pos, len, flags, pagep, get_block); in cont_write_begin()
2401 struct inode *inode = page->mapping->host; in block_commit_write()
2436 if ((page->mapping != inode->i_mapping) || in block_page_mkwrite()
2485 spin_lock(&page->mapping->private_lock); in attach_nobh_buffers()
2495 spin_unlock(&page->mapping->private_lock); in attach_nobh_buffers()
2503 int nobh_write_begin(struct address_space *mapping, in nobh_write_begin() argument
2508 struct inode *inode = mapping->host; in nobh_write_begin()
2526 page = grab_cache_page_write_begin(mapping, index, flags); in nobh_write_begin()
2644 int nobh_write_end(struct file *file, struct address_space *mapping, in nobh_write_end() argument
2648 struct inode *inode = page->mapping->host; in nobh_write_end()
2656 return generic_write_end(file, mapping, pos, len, in nobh_write_end()
2687 struct inode * const inode = page->mapping->host; in nobh_writepage()
2707 if (page->mapping->a_ops->invalidatepage) in nobh_writepage()
2708 page->mapping->a_ops->invalidatepage(page, offset); in nobh_writepage()
2731 int nobh_truncate_page(struct address_space *mapping, in nobh_truncate_page() argument
2739 struct inode *inode = mapping->host; in nobh_truncate_page()
2754 page = grab_cache_page(mapping, index); in nobh_truncate_page()
2763 return block_truncate_page(mapping, from, get_block); in nobh_truncate_page()
2784 err = mapping->a_ops->readpage(NULL, page); in nobh_truncate_page()
2809 int block_truncate_page(struct address_space *mapping, in block_truncate_page() argument
2817 struct inode *inode = mapping->host; in block_truncate_page()
2832 page = grab_cache_page(mapping, index); in block_truncate_page()
2891 struct inode * const inode = page->mapping->host; in block_write_full_page()
2927 sector_t generic_block_bmap(struct address_space *mapping, sector_t block, in generic_block_bmap() argument
2931 struct inode *inode = mapping->host; in generic_block_bmap()
3193 if (buffer_write_io_error(bh) && page->mapping) in drop_buffers()
3194 set_bit(AS_EIO, &page->mapping->flags); in drop_buffers()
3216 struct address_space * const mapping = page->mapping; in try_to_free_buffers() local
3224 if (mapping == NULL) { /* can this still happen? */ in try_to_free_buffers()
3229 spin_lock(&mapping->private_lock); in try_to_free_buffers()
3248 spin_unlock(&mapping->private_lock); in try_to_free_buffers()