/linux-4.1.27/fs/nilfs2/ |
D | btnode.c | 201 spin_lock_irq(&btnc->tree_lock); in nilfs_btnode_prepare_change_key() 203 spin_unlock_irq(&btnc->tree_lock); in nilfs_btnode_prepare_change_key() 259 spin_lock_irq(&btnc->tree_lock); in nilfs_btnode_commit_change_key() 263 spin_unlock_irq(&btnc->tree_lock); in nilfs_btnode_commit_change_key() 291 spin_lock_irq(&btnc->tree_lock); in nilfs_btnode_abort_change_key() 293 spin_unlock_irq(&btnc->tree_lock); in nilfs_btnode_abort_change_key()
|
D | page.c | 341 spin_lock_irq(&smap->tree_lock); in nilfs_copy_back_pages() 346 spin_unlock_irq(&smap->tree_lock); in nilfs_copy_back_pages() 348 spin_lock_irq(&dmap->tree_lock); in nilfs_copy_back_pages() 362 spin_unlock_irq(&dmap->tree_lock); in nilfs_copy_back_pages() 485 spin_lock_irq(&mapping->tree_lock); in __nilfs_clear_page_dirty() 490 spin_unlock_irq(&mapping->tree_lock); in __nilfs_clear_page_dirty() 493 spin_unlock_irq(&mapping->tree_lock); in __nilfs_clear_page_dirty()
|
/linux-4.1.27/fs/btrfs/ |
D | free-space-cache.c | 647 spin_lock(&ctl->tree_lock); in merge_space_tree() 661 spin_unlock(&ctl->tree_lock); in merge_space_tree() 667 spin_unlock(&ctl->tree_lock); in merge_space_tree() 768 spin_lock(&ctl->tree_lock); in __load_free_space_cache() 770 spin_unlock(&ctl->tree_lock); in __load_free_space_cache() 786 spin_lock(&ctl->tree_lock); in __load_free_space_cache() 790 spin_unlock(&ctl->tree_lock); in __load_free_space_cache() 877 spin_lock(&ctl->tree_lock); in load_free_space_cache() 880 spin_unlock(&ctl->tree_lock); in load_free_space_cache() 1272 spin_lock(&ctl->tree_lock); in __btrfs_write_out_cache() [all …]
|
D | inode-map.c | 249 spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock; in btrfs_unpin_free_ino() 369 spin_lock_init(&ctl->tree_lock); in btrfs_init_free_ino_ctl() 384 spin_lock_init(&pinned->tree_lock); in btrfs_init_free_ino_ctl() 482 spin_lock(&ctl->tree_lock); in btrfs_save_ino_cache() 486 spin_unlock(&ctl->tree_lock); in btrfs_save_ino_cache()
|
D | free-space-cache.h | 31 spinlock_t tree_lock; member
|
D | extent_io.c | 5126 spin_lock_irq(&page->mapping->tree_lock); in clear_extent_buffer_dirty() 5132 spin_unlock_irq(&page->mapping->tree_lock); in clear_extent_buffer_dirty()
|
D | extent-tree.c | 6992 spin_lock(&block_group->free_space_ctl->tree_lock); in find_free_extent() 7000 spin_unlock(&block_group->free_space_ctl->tree_lock); in find_free_extent() 7003 spin_unlock(&block_group->free_space_ctl->tree_lock); in find_free_extent()
|
/linux-4.1.27/mm/ |
D | truncate.c | 36 spin_lock_irq(&mapping->tree_lock); in clear_exceptional_entry() 63 spin_unlock_irq(&mapping->tree_lock); in clear_exceptional_entry() 435 spin_lock_irq(&mapping->tree_lock); in truncate_inode_pages_final() 436 spin_unlock_irq(&mapping->tree_lock); in truncate_inode_pages_final() 521 spin_lock_irq(&mapping->tree_lock); in invalidate_complete_page2() 527 spin_unlock_irq(&mapping->tree_lock); in invalidate_complete_page2() 535 spin_unlock_irq(&mapping->tree_lock); in invalidate_complete_page2()
|
D | swap_state.c | 93 spin_lock_irq(&address_space->tree_lock); in __add_to_swap_cache() 101 spin_unlock_irq(&address_space->tree_lock); in __add_to_swap_cache() 220 spin_lock_irq(&address_space->tree_lock); in delete_from_swap_cache() 222 spin_unlock_irq(&address_space->tree_lock); in delete_from_swap_cache()
|
D | migrate.c | 323 spin_lock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 330 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { in migrate_page_move_mapping() 331 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 336 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 350 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 388 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 409 spin_lock_irq(&mapping->tree_lock); in migrate_huge_page_move_mapping() 416 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { in migrate_huge_page_move_mapping() 417 spin_unlock_irq(&mapping->tree_lock); in migrate_huge_page_move_mapping() 422 spin_unlock_irq(&mapping->tree_lock); in migrate_huge_page_move_mapping() [all …]
|
D | workingset.c | 330 if (!spin_trylock(&mapping->tree_lock)) { in shadow_lru_isolate() 363 spin_unlock(&mapping->tree_lock); in shadow_lru_isolate()
|
D | page-writeback.c | 1800 spin_lock_irq(&mapping->tree_lock); in tag_pages_for_writeback() 1804 spin_unlock_irq(&mapping->tree_lock); in tag_pages_for_writeback() 2153 spin_lock_irqsave(&mapping->tree_lock, flags); in __set_page_dirty_nobuffers() 2159 spin_unlock_irqrestore(&mapping->tree_lock, flags); in __set_page_dirty_nobuffers() 2347 spin_lock_irqsave(&mapping->tree_lock, flags); in test_clear_page_writeback() 2358 spin_unlock_irqrestore(&mapping->tree_lock, flags); in test_clear_page_writeback() 2382 spin_lock_irqsave(&mapping->tree_lock, flags); in __test_set_page_writeback() 2399 spin_unlock_irqrestore(&mapping->tree_lock, flags); in __test_set_page_writeback()
|
D | filemap.c | 232 spin_lock_irq(&mapping->tree_lock); in delete_from_page_cache() 234 spin_unlock_irq(&mapping->tree_lock); in delete_from_page_cache() 481 spin_lock_irq(&mapping->tree_lock); in replace_page_cache_page() 489 spin_unlock_irq(&mapping->tree_lock); in replace_page_cache_page() 515 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); in page_cache_tree_insert() 573 spin_lock_irq(&mapping->tree_lock); in __add_to_page_cache_locked() 579 spin_unlock_irq(&mapping->tree_lock); in __add_to_page_cache_locked() 587 spin_unlock_irq(&mapping->tree_lock); in __add_to_page_cache_locked()
|
D | shmem.c | 264 item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock); in shmem_radix_tree_replace() 305 spin_lock_irq(&mapping->tree_lock); in shmem_add_to_page_cache() 315 spin_unlock_irq(&mapping->tree_lock); in shmem_add_to_page_cache() 318 spin_unlock_irq(&mapping->tree_lock); in shmem_add_to_page_cache() 332 spin_lock_irq(&mapping->tree_lock); in shmem_delete_from_page_cache() 338 spin_unlock_irq(&mapping->tree_lock); in shmem_delete_from_page_cache() 351 spin_lock_irq(&mapping->tree_lock); in shmem_free_swap() 353 spin_unlock_irq(&mapping->tree_lock); in shmem_free_swap() 994 spin_lock_irq(&swap_mapping->tree_lock); in shmem_replace_page() 1001 spin_unlock_irq(&swap_mapping->tree_lock); in shmem_replace_page() [all …]
|
D | vmscan.c | 585 spin_lock_irq(&mapping->tree_lock); in __remove_mapping() 623 spin_unlock_irq(&mapping->tree_lock); in __remove_mapping() 644 spin_unlock_irq(&mapping->tree_lock); in __remove_mapping() 653 spin_unlock_irq(&mapping->tree_lock); in __remove_mapping()
|
D | swap.c | 1150 spin_lock_init(&swapper_spaces[i].tree_lock); in swap_setup()
|
/linux-4.1.27/arch/arm64/include/asm/ |
D | cacheflush.h | 130 spin_lock_irq(&(mapping)->tree_lock) 132 spin_unlock_irq(&(mapping)->tree_lock)
|
/linux-4.1.27/arch/unicore32/include/asm/ |
D | cacheflush.h | 183 spin_lock_irq(&(mapping)->tree_lock) 185 spin_unlock_irq(&(mapping)->tree_lock)
|
/linux-4.1.27/fs/hfsplus/ |
D | bfind.c | 29 mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX); in hfs_find_init() 32 mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX); in hfs_find_init() 35 mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX); in hfs_find_init() 49 mutex_unlock(&fd->tree->tree_lock); in hfs_find_exit()
|
D | btree.c | 145 mutex_init(&tree->tree_lock); in hfs_btree_open()
|
D | hfsplus_fs.h | 92 struct mutex tree_lock; member
|
/linux-4.1.27/arch/parisc/include/asm/ |
D | cacheflush.h | 78 spin_lock_irq(&(mapping)->tree_lock) 80 spin_unlock_irq(&(mapping)->tree_lock)
|
/linux-4.1.27/arch/arm/include/asm/ |
D | cacheflush.h | 326 spin_lock_irq(&(mapping)->tree_lock) 328 spin_unlock_irq(&(mapping)->tree_lock)
|
/linux-4.1.27/fs/hfs/ |
D | bfind.c | 27 mutex_lock(&tree->tree_lock); in hfs_find_init() 37 mutex_unlock(&fd->tree->tree_lock); in hfs_find_exit()
|
D | btree.h | 36 struct mutex tree_lock; member
|
D | btree.c | 30 mutex_init(&tree->tree_lock); in hfs_btree_open()
|
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/ |
D | cl_page.c | 168 int tree_lock = 1; in cl_page_gang_lookup() local 221 tree_lock = 0; in cl_page_gang_lookup() 240 tree_lock = 1; in cl_page_gang_lookup() 242 if (tree_lock) in cl_page_gang_lookup()
|
/linux-4.1.27/fs/ |
D | inode.c | 342 spin_lock_init(&mapping->tree_lock); in address_space_init_once() 496 spin_lock_irq(&inode->i_data.tree_lock); in clear_inode() 499 spin_unlock_irq(&inode->i_data.tree_lock); in clear_inode()
|
D | buffer.c | 632 spin_lock_irqsave(&mapping->tree_lock, flags); in __set_page_dirty() 639 spin_unlock_irqrestore(&mapping->tree_lock, flags); in __set_page_dirty()
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
D | dir.c | 281 spin_lock_irq(&mapping->tree_lock); in ll_dir_page_locate() 288 spin_unlock_irq(&mapping->tree_lock); in ll_dir_page_locate() 332 spin_unlock_irq(&mapping->tree_lock); in ll_dir_page_locate()
|
D | llite_lib.c | 1804 spin_lock_irq(&inode->i_data.tree_lock); in ll_delete_inode() 1805 spin_unlock_irq(&inode->i_data.tree_lock); in ll_delete_inode()
|
/linux-4.1.27/Documentation/vm/ |
D | page_migration | 125 become possible again. Processes will move from spinning on the tree_lock
|
/linux-4.1.27/fs/xfs/ |
D | xfs_aops.c | 1903 spin_lock_irqsave(&mapping->tree_lock, flags); in xfs_vm_set_page_dirty() 1910 spin_unlock_irqrestore(&mapping->tree_lock, flags); in xfs_vm_set_page_dirty()
|
/linux-4.1.27/fs/f2fs/ |
D | node.c | 85 spin_lock_irqsave(&mapping->tree_lock, flags); in clear_node_page_dirty() 89 spin_unlock_irqrestore(&mapping->tree_lock, flags); in clear_node_page_dirty()
|
/linux-4.1.27/include/linux/ |
D | fs.h | 428 spinlock_t tree_lock; /* and lock protecting it */ member
|
/linux-4.1.27/Documentation/cgroups/ |
D | memory.txt | 265 mapping->tree_lock.
|