Lines Matching refs:delayed_refs
810 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_lookup_extent_info() local
894 delayed_refs = &trans->transaction->delayed_refs; in btrfs_lookup_extent_info()
895 spin_lock(&delayed_refs->lock); in btrfs_lookup_extent_info()
900 spin_unlock(&delayed_refs->lock); in btrfs_lookup_extent_info()
923 spin_unlock(&delayed_refs->lock); in btrfs_lookup_extent_info()
2457 struct btrfs_delayed_ref_root *delayed_refs; in __btrfs_run_delayed_refs() local
2468 delayed_refs = &trans->transaction->delayed_refs; in __btrfs_run_delayed_refs()
2474 spin_lock(&delayed_refs->lock); in __btrfs_run_delayed_refs()
2477 spin_unlock(&delayed_refs->lock); in __btrfs_run_delayed_refs()
2484 spin_unlock(&delayed_refs->lock); in __btrfs_run_delayed_refs()
2511 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs, in __btrfs_run_delayed_refs()
2521 btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) { in __btrfs_run_delayed_refs()
2524 spin_lock(&delayed_refs->lock); in __btrfs_run_delayed_refs()
2526 delayed_refs->num_heads_ready++; in __btrfs_run_delayed_refs()
2527 spin_unlock(&delayed_refs->lock); in __btrfs_run_delayed_refs()
2587 spin_lock(&delayed_refs->lock); in __btrfs_run_delayed_refs()
2592 spin_unlock(&delayed_refs->lock); in __btrfs_run_delayed_refs()
2596 delayed_refs->num_heads--; in __btrfs_run_delayed_refs()
2598 &delayed_refs->href_root); in __btrfs_run_delayed_refs()
2599 spin_unlock(&delayed_refs->lock); in __btrfs_run_delayed_refs()
2605 atomic_dec(&delayed_refs->num_entries); in __btrfs_run_delayed_refs()
2647 spin_lock(&delayed_refs->lock); in __btrfs_run_delayed_refs()
2648 delayed_refs->pending_csums -= ref->num_bytes; in __btrfs_run_delayed_refs()
2649 spin_unlock(&delayed_refs->lock); in __btrfs_run_delayed_refs()
2672 spin_lock(&delayed_refs->lock); in __btrfs_run_delayed_refs()
2675 spin_unlock(&delayed_refs->lock); in __btrfs_run_delayed_refs()
2762 u64 num_heads = trans->transaction->delayed_refs.num_heads_ready; in btrfs_check_space_for_delayed_refs()
2763 u64 csum_bytes = trans->transaction->delayed_refs.pending_csums; in btrfs_check_space_for_delayed_refs()
2799 atomic_read(&trans->transaction->delayed_refs.num_entries); in btrfs_should_throttle_delayed_refs()
2903 struct btrfs_delayed_ref_root *delayed_refs; in btrfs_run_delayed_refs() local
2916 delayed_refs = &trans->transaction->delayed_refs; in btrfs_run_delayed_refs()
2918 count = atomic_read(&delayed_refs->num_entries) * 2; in btrfs_run_delayed_refs()
2922 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); in btrfs_run_delayed_refs()
2935 spin_lock(&delayed_refs->lock); in btrfs_run_delayed_refs()
2936 node = rb_first(&delayed_refs->href_root); in btrfs_run_delayed_refs()
2938 spin_unlock(&delayed_refs->lock); in btrfs_run_delayed_refs()
2952 spin_unlock(&delayed_refs->lock); in btrfs_run_delayed_refs()
2968 spin_unlock(&delayed_refs->lock); in btrfs_run_delayed_refs()
3011 struct btrfs_delayed_ref_root *delayed_refs; in check_delayed_ref() local
3014 delayed_refs = &trans->transaction->delayed_refs; in check_delayed_ref()
3015 spin_lock(&delayed_refs->lock); in check_delayed_ref()
3018 spin_unlock(&delayed_refs->lock); in check_delayed_ref()
3024 spin_unlock(&delayed_refs->lock); in check_delayed_ref()
3037 spin_unlock(&delayed_refs->lock); in check_delayed_ref()
6690 struct btrfs_delayed_ref_root *delayed_refs; in check_ref_cleanup() local
6693 delayed_refs = &trans->transaction->delayed_refs; in check_ref_cleanup()
6694 spin_lock(&delayed_refs->lock); in check_ref_cleanup()
6722 rb_erase(&head->href_node, &delayed_refs->href_root); in check_ref_cleanup()
6724 atomic_dec(&delayed_refs->num_entries); in check_ref_cleanup()
6730 delayed_refs->num_heads--; in check_ref_cleanup()
6732 delayed_refs->num_heads_ready--; in check_ref_cleanup()
6735 spin_unlock(&delayed_refs->lock); in check_ref_cleanup()
6748 spin_unlock(&delayed_refs->lock); in check_ref_cleanup()
8122 struct btrfs_delayed_ref_root *delayed_refs; in record_one_subtree_extent() local
8132 delayed_refs = &trans->transaction->delayed_refs; in record_one_subtree_extent()
8133 spin_lock(&delayed_refs->lock); in record_one_subtree_extent()
8134 if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord)) in record_one_subtree_extent()
8136 spin_unlock(&delayed_refs->lock); in record_one_subtree_extent()