Searched refs:private_lock (Results 1 - 9 of 9) sorted by relevance

/linux-4.1.27/fs/
H A Dbuffer.c190 * private_lock.
194 * succeeds, there is no need to take private_lock. (But if
195 * private_lock is contended then so is mapping->tree_lock).
214 spin_lock(&bd_mapping->private_lock); __find_get_block_slow()
248 spin_unlock(&bd_mapping->private_lock); __find_get_block_slow()
442 * So the locking for private_list is via the private_lock in the address_space
445 * mapping->private_lock does *not* protect mapping->private_list! In fact,
447 * ->private_lock.
453 * utility functions are free to use private_lock and private_list for
477 * The buffer's backing address_space's private_lock must be held
577 return fsync_buffers_list(&buffer_mapping->private_lock, sync_mapping_buffers()
611 spin_lock(&buffer_mapping->private_lock); mark_buffer_dirty_inode()
615 spin_unlock(&buffer_mapping->private_lock); mark_buffer_dirty_inode()
661 * We use private_lock to lock against try_to_free_buffers while using the
676 spin_lock(&mapping->private_lock); __set_page_dirty_buffers()
687 spin_unlock(&mapping->private_lock); __set_page_dirty_buffers()
798 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
809 spin_lock(&buffer_mapping->private_lock); invalidate_inode_buffers()
812 spin_unlock(&buffer_mapping->private_lock); invalidate_inode_buffers()
832 spin_lock(&buffer_mapping->private_lock); remove_inode_buffers()
841 spin_unlock(&buffer_mapping->private_lock); remove_inode_buffers()
1025 spin_lock(&inode->i_mapping->private_lock); grow_dev_page()
1029 spin_unlock(&inode->i_mapping->private_lock); grow_dev_page()
1138 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1197 spin_lock(&buffer_mapping->private_lock); __bforget()
1200 spin_unlock(&buffer_mapping->private_lock); __bforget()
1561 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1578 spin_lock(&page->mapping->private_lock); create_empty_buffers()
1590 spin_unlock(&page->mapping->private_lock); create_empty_buffers()
2482 spin_lock(&page->mapping->private_lock); attach_nobh_buffers()
2492 spin_unlock(&page->mapping->private_lock); attach_nobh_buffers()
3164 * locking the page or by holding its mapping's private_lock.
3175 * private_lock.
3229 spin_lock(&mapping->private_lock); try_to_free_buffers()
3242 * private_lock must be held over this entire operation in order try_to_free_buffers()
3248 spin_unlock(&mapping->private_lock); try_to_free_buffers()
H A Daio.c276 spin_lock(&aio_ring_file->f_inode->i_mapping->private_lock); put_aio_ring_file()
279 spin_unlock(&aio_ring_file->f_inode->i_mapping->private_lock); put_aio_ring_file()
361 /* mapping->private_lock here protects against the kioctx teardown. */ aio_migratepage()
362 spin_lock(&mapping->private_lock); aio_migratepage()
415 spin_unlock(&mapping->private_lock); aio_migratepage()
H A Dinode.c345 spin_lock_init(&mapping->private_lock); address_space_init_once()
/linux-4.1.27/fs/btrfs/
H A Dextent_io.c3885 spin_lock(&mapping->private_lock); btree_write_cache_pages()
3887 spin_unlock(&mapping->private_lock); btree_write_cache_pages()
3899 spin_unlock(&mapping->private_lock); btree_write_cache_pages()
3904 spin_unlock(&mapping->private_lock); btree_write_cache_pages()
3909 spin_unlock(&mapping->private_lock); btree_write_cache_pages()
4618 spin_lock(&page->mapping->private_lock); btrfs_release_extent_buffer_page()
4642 spin_unlock(&page->mapping->private_lock); btrfs_release_extent_buffer_page()
4928 spin_lock(&mapping->private_lock); alloc_extent_buffer()
4939 spin_unlock(&mapping->private_lock); alloc_extent_buffer()
4956 spin_unlock(&mapping->private_lock); alloc_extent_buffer()
5652 spin_lock(&page->mapping->private_lock); try_release_extent_buffer()
5654 spin_unlock(&page->mapping->private_lock); try_release_extent_buffer()
5669 spin_unlock(&page->mapping->private_lock); try_release_extent_buffer()
5672 spin_unlock(&page->mapping->private_lock); try_release_extent_buffer()
/linux-4.1.27/fs/ntfs/
H A Daops.c1726 * the mapping->private_lock. Once the buffers are marked dirty we no longer
1738 spin_lock(&mapping->private_lock); mark_ntfs_record_dirty()
1740 spin_unlock(&mapping->private_lock); mark_ntfs_record_dirty()
1742 spin_lock(&mapping->private_lock); mark_ntfs_record_dirty()
1766 spin_unlock(&mapping->private_lock); mark_ntfs_record_dirty()
/linux-4.1.27/mm/
H A Dfilemap.c65 * ->private_lock (__free_pte->__set_page_dirty_buffers)
95 * ->private_lock (try_to_unmap_one)
99 * ->private_lock (page_remove_rmap->set_page_dirty)
105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
H A Drmap.c32 * mapping->private_lock (in __set_page_dirty_buffers)
/linux-4.1.27/fs/xfs/
H A Dxfs_aops.c1884 spin_lock(&mapping->private_lock); xfs_vm_set_page_dirty()
1897 spin_unlock(&mapping->private_lock); xfs_vm_set_page_dirty()
/linux-4.1.27/include/linux/
H A Dfs.h438 spinlock_t private_lock; /* for use by the address_space */ member in struct:address_space

Completed in 291 milliseconds