Lines Matching refs:em
50 struct extent_map *em; in alloc_extent_map() local
51 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); in alloc_extent_map()
52 if (!em) in alloc_extent_map()
54 RB_CLEAR_NODE(&em->rb_node); in alloc_extent_map()
55 em->flags = 0; in alloc_extent_map()
56 em->compress_type = BTRFS_COMPRESS_NONE; in alloc_extent_map()
57 em->generation = 0; in alloc_extent_map()
58 atomic_set(&em->refs, 1); in alloc_extent_map()
59 INIT_LIST_HEAD(&em->list); in alloc_extent_map()
60 return em; in alloc_extent_map()
70 void free_extent_map(struct extent_map *em) in free_extent_map() argument
72 if (!em) in free_extent_map()
74 WARN_ON(atomic_read(&em->refs) == 0); in free_extent_map()
75 if (atomic_dec_and_test(&em->refs)) { in free_extent_map()
76 WARN_ON(extent_map_in_tree(em)); in free_extent_map()
77 WARN_ON(!list_empty(&em->list)); in free_extent_map()
78 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) in free_extent_map()
79 kfree(em->bdev); in free_extent_map()
80 kmem_cache_free(extent_map_cache, em); in free_extent_map()
92 static int tree_insert(struct rb_root *root, struct extent_map *em) in tree_insert() argument
98 u64 end = range_end(em->start, em->len); in tree_insert()
104 if (em->start < entry->start) in tree_insert()
106 else if (em->start >= extent_map_end(entry)) in tree_insert()
113 while (parent && em->start >= extent_map_end(entry)) { in tree_insert()
118 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert()
123 while (parent && em->start < entry->start) { in tree_insert()
128 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert()
131 rb_link_node(&em->rb_node, orig_parent, p); in tree_insert()
132 rb_insert_color(&em->rb_node, root); in tree_insert()
225 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) in try_merge_map() argument
230 if (em->start != 0) { in try_merge_map()
231 rb = rb_prev(&em->rb_node); in try_merge_map()
234 if (rb && mergable_maps(merge, em)) { in try_merge_map()
235 em->start = merge->start; in try_merge_map()
236 em->orig_start = merge->orig_start; in try_merge_map()
237 em->len += merge->len; in try_merge_map()
238 em->block_len += merge->block_len; in try_merge_map()
239 em->block_start = merge->block_start; in try_merge_map()
240 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start; in try_merge_map()
241 em->mod_start = merge->mod_start; in try_merge_map()
242 em->generation = max(em->generation, merge->generation); in try_merge_map()
250 rb = rb_next(&em->rb_node); in try_merge_map()
253 if (rb && mergable_maps(em, merge)) { in try_merge_map()
254 em->len += merge->len; in try_merge_map()
255 em->block_len += merge->block_len; in try_merge_map()
258 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start; in try_merge_map()
259 em->generation = max(em->generation, merge->generation); in try_merge_map()
279 struct extent_map *em; in unpin_extent_cache() local
283 em = lookup_extent_mapping(tree, start, len); in unpin_extent_cache()
285 WARN_ON(!em || em->start != start); in unpin_extent_cache()
287 if (!em) in unpin_extent_cache()
290 em->generation = gen; in unpin_extent_cache()
291 clear_bit(EXTENT_FLAG_PINNED, &em->flags); in unpin_extent_cache()
292 em->mod_start = em->start; in unpin_extent_cache()
293 em->mod_len = em->len; in unpin_extent_cache()
295 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) { in unpin_extent_cache()
297 clear_bit(EXTENT_FLAG_FILLING, &em->flags); in unpin_extent_cache()
300 try_merge_map(tree, em); in unpin_extent_cache()
303 em->mod_start = em->start; in unpin_extent_cache()
304 em->mod_len = em->len; in unpin_extent_cache()
307 free_extent_map(em); in unpin_extent_cache()
314 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) in clear_em_logging() argument
316 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); in clear_em_logging()
317 if (extent_map_in_tree(em)) in clear_em_logging()
318 try_merge_map(tree, em); in clear_em_logging()
322 struct extent_map *em, in setup_extent_mapping() argument
325 atomic_inc(&em->refs); in setup_extent_mapping()
326 em->mod_start = em->start; in setup_extent_mapping()
327 em->mod_len = em->len; in setup_extent_mapping()
330 list_move(&em->list, &tree->modified_extents); in setup_extent_mapping()
332 try_merge_map(tree, em); in setup_extent_mapping()
346 struct extent_map *em, int modified) in add_extent_mapping() argument
350 ret = tree_insert(&tree->map, em); in add_extent_mapping()
354 setup_extent_mapping(tree, em, modified); in add_extent_mapping()
363 struct extent_map *em; in __lookup_extent_mapping() local
379 em = rb_entry(rb_node, struct extent_map, rb_node); in __lookup_extent_mapping()
381 if (strict && !(end > em->start && start < extent_map_end(em))) in __lookup_extent_mapping()
384 atomic_inc(&em->refs); in __lookup_extent_mapping()
385 return em; in __lookup_extent_mapping()
430 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) in remove_extent_mapping() argument
434 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); in remove_extent_mapping()
435 rb_erase(&em->rb_node, &tree->map); in remove_extent_mapping()
436 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) in remove_extent_mapping()
437 list_del_init(&em->list); in remove_extent_mapping()
438 RB_CLEAR_NODE(&em->rb_node); in remove_extent_mapping()