Searched refs:tree_lock (Results 1 - 39 of 39) sorted by relevance

/linux-4.1.27/fs/nilfs2/
H A Dbtnode.c201 spin_lock_irq(&btnc->tree_lock); nilfs_btnode_prepare_change_key()
203 spin_unlock_irq(&btnc->tree_lock); nilfs_btnode_prepare_change_key()
259 spin_lock_irq(&btnc->tree_lock); nilfs_btnode_commit_change_key()
263 spin_unlock_irq(&btnc->tree_lock); nilfs_btnode_commit_change_key()
291 spin_lock_irq(&btnc->tree_lock); nilfs_btnode_abort_change_key()
293 spin_unlock_irq(&btnc->tree_lock); nilfs_btnode_abort_change_key()
H A Dpage.c341 spin_lock_irq(&smap->tree_lock); nilfs_copy_back_pages()
346 spin_unlock_irq(&smap->tree_lock); nilfs_copy_back_pages()
348 spin_lock_irq(&dmap->tree_lock); nilfs_copy_back_pages()
362 spin_unlock_irq(&dmap->tree_lock); nilfs_copy_back_pages()
485 spin_lock_irq(&mapping->tree_lock); __nilfs_clear_page_dirty()
490 spin_unlock_irq(&mapping->tree_lock); __nilfs_clear_page_dirty()
493 spin_unlock_irq(&mapping->tree_lock); __nilfs_clear_page_dirty()
/linux-4.1.27/fs/btrfs/
H A Dfree-space-cache.c647 spin_lock(&ctl->tree_lock); merge_space_tree()
661 spin_unlock(&ctl->tree_lock); merge_space_tree()
667 spin_unlock(&ctl->tree_lock); merge_space_tree()
768 spin_lock(&ctl->tree_lock); __load_free_space_cache()
770 spin_unlock(&ctl->tree_lock); __load_free_space_cache()
786 spin_lock(&ctl->tree_lock); __load_free_space_cache()
790 spin_unlock(&ctl->tree_lock); __load_free_space_cache()
877 spin_lock(&ctl->tree_lock); load_free_space_cache()
880 spin_unlock(&ctl->tree_lock); load_free_space_cache()
1272 spin_lock(&ctl->tree_lock); __btrfs_write_out_cache()
1297 spin_unlock(&ctl->tree_lock); __btrfs_write_out_cache()
1350 spin_unlock(&ctl->tree_lock); __btrfs_write_out_cache()
2079 spin_unlock(&ctl->tree_lock); insert_into_bitmap()
2086 spin_lock(&ctl->tree_lock); insert_into_bitmap()
2094 spin_lock(&ctl->tree_lock); insert_into_bitmap()
2294 spin_lock(&ctl->tree_lock); __btrfs_add_free_space()
2324 spin_unlock(&ctl->tree_lock); __btrfs_add_free_space()
2342 spin_lock(&ctl->tree_lock); btrfs_remove_free_space()
2404 spin_unlock(&ctl->tree_lock); btrfs_remove_free_space()
2419 spin_unlock(&ctl->tree_lock); btrfs_remove_free_space()
2451 spin_lock_init(&ctl->tree_lock); btrfs_init_free_space_ctl()
2531 cond_resched_lock(&ctl->tree_lock); __btrfs_remove_free_space_cache_locked()
2537 spin_lock(&ctl->tree_lock); __btrfs_remove_free_space_cache()
2539 spin_unlock(&ctl->tree_lock); __btrfs_remove_free_space_cache()
2548 spin_lock(&ctl->tree_lock); btrfs_remove_free_space_cache()
2557 cond_resched_lock(&ctl->tree_lock); btrfs_remove_free_space_cache()
2560 spin_unlock(&ctl->tree_lock); btrfs_remove_free_space_cache()
2575 spin_lock(&ctl->tree_lock); btrfs_find_space_for_alloc()
2601 spin_unlock(&ctl->tree_lock); btrfs_find_space_for_alloc()
2642 spin_lock(&ctl->tree_lock); btrfs_return_cluster_to_free_space()
2644 spin_unlock(&ctl->tree_lock); btrfs_return_cluster_to_free_space()
2750 spin_lock(&ctl->tree_lock); btrfs_alloc_from_cluster()
2763 spin_unlock(&ctl->tree_lock); btrfs_alloc_from_cluster()
3003 spin_lock(&ctl->tree_lock); btrfs_find_space_cluster()
3010 spin_unlock(&ctl->tree_lock); btrfs_find_space_cluster()
3047 spin_unlock(&ctl->tree_lock); btrfs_find_space_cluster()
3126 spin_lock(&ctl->tree_lock); trim_no_bitmap()
3129 spin_unlock(&ctl->tree_lock); trim_no_bitmap()
3136 spin_unlock(&ctl->tree_lock); trim_no_bitmap()
3145 spin_unlock(&ctl->tree_lock); trim_no_bitmap()
3154 spin_unlock(&ctl->tree_lock); trim_no_bitmap()
3164 spin_unlock(&ctl->tree_lock); trim_no_bitmap()
3172 spin_unlock(&ctl->tree_lock); trim_no_bitmap()
3211 spin_lock(&ctl->tree_lock); trim_bitmaps()
3214 spin_unlock(&ctl->tree_lock); trim_bitmaps()
3221 spin_unlock(&ctl->tree_lock); trim_bitmaps()
3230 spin_unlock(&ctl->tree_lock); trim_bitmaps()
3238 spin_unlock(&ctl->tree_lock); trim_bitmaps()
3247 spin_unlock(&ctl->tree_lock); trim_bitmaps()
3349 spin_lock(&ctl->tree_lock); btrfs_find_ino_for_alloc()
3382 spin_unlock(&ctl->tree_lock); btrfs_find_ino_for_alloc()
3525 spin_lock(&ctl->tree_lock); test_add_free_space_entry()
3529 spin_unlock(&ctl->tree_lock); test_add_free_space_entry()
3543 spin_lock(&ctl->tree_lock); test_add_free_space_entry()
3557 spin_unlock(&ctl->tree_lock); test_add_free_space_entry()
3581 spin_lock(&ctl->tree_lock); test_check_exists()
3650 spin_unlock(&ctl->tree_lock); test_check_exists()
H A Dinode-map.c249 spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock; btrfs_unpin_free_ino()
369 spin_lock_init(&ctl->tree_lock); btrfs_init_free_ino_ctl()
384 spin_lock_init(&pinned->tree_lock); btrfs_init_free_ino_ctl()
482 spin_lock(&ctl->tree_lock); btrfs_save_ino_cache()
486 spin_unlock(&ctl->tree_lock); btrfs_save_ino_cache()
H A Dfree-space-cache.h31 spinlock_t tree_lock; member in struct:btrfs_free_space_ctl
H A Dextent_io.c4022 * At this point we hold neither mapping->tree_lock nor extent_write_cache_pages()
5126 spin_lock_irq(&page->mapping->tree_lock); clear_extent_buffer_dirty()
5132 spin_unlock_irq(&page->mapping->tree_lock); clear_extent_buffer_dirty()
H A Dextent-tree.c6992 spin_lock(&block_group->free_space_ctl->tree_lock); find_free_extent()
7000 spin_unlock(&block_group->free_space_ctl->tree_lock); find_free_extent()
7003 spin_unlock(&block_group->free_space_ctl->tree_lock); find_free_extent()
/linux-4.1.27/fs/hfsplus/
H A Dbfind.c29 mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX); hfs_find_init()
32 mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX); hfs_find_init()
35 mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX); hfs_find_init()
49 mutex_unlock(&fd->tree->tree_lock); hfs_find_exit()
H A Dbtree.c145 mutex_init(&tree->tree_lock); hfs_btree_open()
H A Dhfsplus_fs.h92 struct mutex tree_lock; member in struct:hfs_btree
/linux-4.1.27/mm/
H A Dworkingset.c276 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ count_shadow_nodes()
316 * the shadow node LRU under the mapping->tree_lock and the shadow_lru_isolate()
321 * We can then safely transition to the mapping->tree_lock to shadow_lru_isolate()
330 if (!spin_trylock(&mapping->tree_lock)) { shadow_lru_isolate()
363 spin_unlock(&mapping->tree_lock); shadow_lru_isolate()
378 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ scan_shadow_nodes()
395 * mapping->tree_lock.
H A Dswap_state.c93 spin_lock_irq(&address_space->tree_lock); __add_to_swap_cache()
101 spin_unlock_irq(&address_space->tree_lock); __add_to_swap_cache()
220 spin_lock_irq(&address_space->tree_lock); delete_from_swap_cache()
222 spin_unlock_irq(&address_space->tree_lock); delete_from_swap_cache()
H A Dtruncate.c36 spin_lock_irq(&mapping->tree_lock); clear_exceptional_entry()
56 * protected by mapping->tree_lock. clear_exceptional_entry()
63 spin_unlock_irq(&mapping->tree_lock); clear_exceptional_entry()
435 spin_lock_irq(&mapping->tree_lock); truncate_inode_pages_final()
436 spin_unlock_irq(&mapping->tree_lock); truncate_inode_pages_final()
521 spin_lock_irq(&mapping->tree_lock); invalidate_complete_page2()
527 spin_unlock_irq(&mapping->tree_lock); invalidate_complete_page2()
535 spin_unlock_irq(&mapping->tree_lock); invalidate_complete_page2()
H A Dfilemap.c67 * ->mapping->tree_lock
75 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
85 * ->mapping->tree_lock (__sync_single_inode)
96 * ->tree_lock (try_to_unmap_one)
100 * ->tree_lock (page_remove_rmap->set_page_dirty)
165 * protected by mapping->tree_lock. page_cache_tree_delete()
177 * is safe. The caller must hold the mapping's tree_lock.
232 spin_lock_irq(&mapping->tree_lock); delete_from_page_cache()
234 spin_unlock_irq(&mapping->tree_lock); delete_from_page_cache()
481 spin_lock_irq(&mapping->tree_lock); replace_page_cache_page()
489 spin_unlock_irq(&mapping->tree_lock); replace_page_cache_page()
515 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); page_cache_tree_insert()
534 * mapping->tree_lock. page_cache_tree_insert()
573 spin_lock_irq(&mapping->tree_lock); __add_to_page_cache_locked()
579 spin_unlock_irq(&mapping->tree_lock); __add_to_page_cache_locked()
587 spin_unlock_irq(&mapping->tree_lock); __add_to_page_cache_locked()
H A Dmigrate.c323 spin_lock_irq(&mapping->tree_lock); migrate_page_move_mapping()
330 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { migrate_page_move_mapping()
331 spin_unlock_irq(&mapping->tree_lock); migrate_page_move_mapping()
336 spin_unlock_irq(&mapping->tree_lock); migrate_page_move_mapping()
350 spin_unlock_irq(&mapping->tree_lock); migrate_page_move_mapping()
388 spin_unlock_irq(&mapping->tree_lock); migrate_page_move_mapping()
409 spin_lock_irq(&mapping->tree_lock); migrate_huge_page_move_mapping()
416 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { migrate_huge_page_move_mapping()
417 spin_unlock_irq(&mapping->tree_lock); migrate_huge_page_move_mapping()
422 spin_unlock_irq(&mapping->tree_lock); migrate_huge_page_move_mapping()
432 spin_unlock_irq(&mapping->tree_lock); migrate_huge_page_move_mapping()
H A Dshmem.c264 item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock); shmem_radix_tree_replace()
305 spin_lock_irq(&mapping->tree_lock); shmem_add_to_page_cache()
315 spin_unlock_irq(&mapping->tree_lock); shmem_add_to_page_cache()
318 spin_unlock_irq(&mapping->tree_lock); shmem_add_to_page_cache()
332 spin_lock_irq(&mapping->tree_lock); shmem_delete_from_page_cache()
338 spin_unlock_irq(&mapping->tree_lock); shmem_delete_from_page_cache()
351 spin_lock_irq(&mapping->tree_lock); shmem_free_swap()
353 spin_unlock_irq(&mapping->tree_lock); shmem_free_swap()
994 spin_lock_irq(&swap_mapping->tree_lock); shmem_replace_page()
1001 spin_unlock_irq(&swap_mapping->tree_lock); shmem_replace_page()
1853 spin_lock_irq(&mapping->tree_lock); shmem_tag_pins()
1856 spin_unlock_irq(&mapping->tree_lock); shmem_tag_pins()
1924 spin_lock_irq(&mapping->tree_lock); shmem_wait_for_pins()
1927 spin_unlock_irq(&mapping->tree_lock); shmem_wait_for_pins()
H A Dpage-writeback.c1791 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
1800 spin_lock_irq(&mapping->tree_lock); tag_pages_for_writeback()
1804 spin_unlock_irq(&mapping->tree_lock); tag_pages_for_writeback()
2153 spin_lock_irqsave(&mapping->tree_lock, flags); __set_page_dirty_nobuffers()
2159 spin_unlock_irqrestore(&mapping->tree_lock, flags); __set_page_dirty_nobuffers()
2347 spin_lock_irqsave(&mapping->tree_lock, flags); test_clear_page_writeback()
2358 spin_unlock_irqrestore(&mapping->tree_lock, flags); test_clear_page_writeback()
2382 spin_lock_irqsave(&mapping->tree_lock, flags); __test_set_page_writeback()
2399 spin_unlock_irqrestore(&mapping->tree_lock, flags); __test_set_page_writeback()
H A Dvmscan.c585 spin_lock_irq(&mapping->tree_lock); __remove_mapping()
609 * and thus under tree_lock, then this ordering is not required. __remove_mapping()
623 spin_unlock_irq(&mapping->tree_lock); __remove_mapping()
644 spin_unlock_irq(&mapping->tree_lock); __remove_mapping()
653 spin_unlock_irq(&mapping->tree_lock); __remove_mapping()
H A Dswap.c1150 spin_lock_init(&swapper_spaces[i].tree_lock); swap_setup()
H A Drmap.c36 * mapping->tree_lock (widely used, in set_page_dirty,
H A Dmemcontrol.c5846 /* Caller disabled preemption with mapping->tree_lock */ mem_cgroup_swapout()
/linux-4.1.27/fs/hfs/
H A Dbfind.c27 mutex_lock(&tree->tree_lock); hfs_find_init()
37 mutex_unlock(&fd->tree->tree_lock); hfs_find_exit()
H A Dbtree.h36 struct mutex tree_lock; member in struct:hfs_btree
H A Dbtree.c30 mutex_init(&tree->tree_lock); hfs_btree_open()
/linux-4.1.27/arch/parisc/include/asm/
H A Dcacheflush.h78 spin_lock_irq(&(mapping)->tree_lock)
80 spin_unlock_irq(&(mapping)->tree_lock)
/linux-4.1.27/arch/arm64/include/asm/
H A Dcacheflush.h130 spin_lock_irq(&(mapping)->tree_lock)
132 spin_unlock_irq(&(mapping)->tree_lock)
/linux-4.1.27/arch/unicore32/include/asm/
H A Dcacheflush.h183 spin_lock_irq(&(mapping)->tree_lock)
185 spin_unlock_irq(&(mapping)->tree_lock)
/linux-4.1.27/include/linux/
H A Dpagemap.h123 * following (with tree_lock held for write):
136 * old find_get_page using tree_lock could equally have run before or after
H A Dfs.h428 spinlock_t tree_lock; /* and lock protecting it */ member in struct:address_space
432 /* Protected by tree_lock together with the radix tree */
/linux-4.1.27/arch/arm/include/asm/
H A Dcacheflush.h326 spin_lock_irq(&(mapping)->tree_lock)
328 spin_unlock_irq(&(mapping)->tree_lock)
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/
H A Dcl_page.c168 int tree_lock = 1; cl_page_gang_lookup() local
221 tree_lock = 0; cl_page_gang_lookup()
240 tree_lock = 1; cl_page_gang_lookup()
242 if (tree_lock) cl_page_gang_lookup()
/linux-4.1.27/fs/
H A Dinode.c342 spin_lock_init(&mapping->tree_lock); address_space_init_once()
492 * We have to cycle tree_lock here because reclaim can be still in the clear_inode()
496 spin_lock_irq(&inode->i_data.tree_lock); clear_inode()
499 spin_unlock_irq(&inode->i_data.tree_lock); clear_inode()
H A Dbuffer.c195 * private_lock is contended then so is mapping->tree_lock).
632 spin_lock_irqsave(&mapping->tree_lock, flags); __set_page_dirty()
639 spin_unlock_irqrestore(&mapping->tree_lock, flags); __set_page_dirty()
1139 * mapping->tree_lock and mapping->host->i_lock.
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Ddir.c281 spin_lock_irq(&mapping->tree_lock); ll_dir_page_locate()
288 spin_unlock_irq(&mapping->tree_lock); ll_dir_page_locate()
332 spin_unlock_irq(&mapping->tree_lock); ll_dir_page_locate()
H A Dllite_lib.c1804 spin_lock_irq(&inode->i_data.tree_lock); ll_delete_inode()
1805 spin_unlock_irq(&inode->i_data.tree_lock); ll_delete_inode()
/linux-4.1.27/fs/afs/
H A Dwrite.c488 /* at this point we hold neither mapping->tree_lock nor lock on afs_writepages_region()
/linux-4.1.27/fs/f2fs/
H A Dnode.c85 spin_lock_irqsave(&mapping->tree_lock, flags); clear_node_page_dirty()
89 spin_unlock_irqrestore(&mapping->tree_lock, flags); clear_node_page_dirty()
/linux-4.1.27/fs/xfs/
H A Dxfs_aops.c1903 spin_lock_irqsave(&mapping->tree_lock, flags); xfs_vm_set_page_dirty()
1910 spin_unlock_irqrestore(&mapping->tree_lock, flags); xfs_vm_set_page_dirty()
/linux-4.1.27/fs/cifs/
H A Dfile.c1933 * At this point we hold neither mapping->tree_lock nor wdata_prepare_pages()

Completed in 766 milliseconds