1#include <linux/err.h>
2#include <linux/slab.h>
3#include <linux/spinlock.h>
4#include <linux/hardirq.h>
5#include "ctree.h"
6#include "extent_map.h"
7
8
9static struct kmem_cache *extent_map_cache;
10
11int __init extent_map_init(void)
12{
13	extent_map_cache = kmem_cache_create("btrfs_extent_map",
14			sizeof(struct extent_map), 0,
15			SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
16	if (!extent_map_cache)
17		return -ENOMEM;
18	return 0;
19}
20
21void extent_map_exit(void)
22{
23	if (extent_map_cache)
24		kmem_cache_destroy(extent_map_cache);
25}
26
27/**
28 * extent_map_tree_init - initialize extent map tree
29 * @tree:		tree to initialize
30 *
31 * Initialize the extent tree @tree.  Should be called for each new inode
32 * or other user of the extent_map interface.
33 */
34void extent_map_tree_init(struct extent_map_tree *tree)
35{
36	tree->map = RB_ROOT;
37	INIT_LIST_HEAD(&tree->modified_extents);
38	rwlock_init(&tree->lock);
39}
40
41/**
42 * alloc_extent_map - allocate new extent map structure
43 *
44 * Allocate a new extent_map structure.  The new structure is
45 * returned with a reference count of one and needs to be
46 * freed using free_extent_map()
47 */
48struct extent_map *alloc_extent_map(void)
49{
50	struct extent_map *em;
51	em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
52	if (!em)
53		return NULL;
54	RB_CLEAR_NODE(&em->rb_node);
55	em->flags = 0;
56	em->compress_type = BTRFS_COMPRESS_NONE;
57	em->generation = 0;
58	atomic_set(&em->refs, 1);
59	INIT_LIST_HEAD(&em->list);
60	return em;
61}
62
63/**
64 * free_extent_map - drop reference count of an extent_map
65 * @em:		extent map beeing releasead
66 *
67 * Drops the reference out on @em by one and free the structure
68 * if the reference count hits zero.
69 */
70void free_extent_map(struct extent_map *em)
71{
72	if (!em)
73		return;
74	WARN_ON(atomic_read(&em->refs) == 0);
75	if (atomic_dec_and_test(&em->refs)) {
76		WARN_ON(extent_map_in_tree(em));
77		WARN_ON(!list_empty(&em->list));
78		if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
79			kfree(em->bdev);
80		kmem_cache_free(extent_map_cache, em);
81	}
82}
83
84/* simple helper to do math around the end of an extent, handling wrap */
85static u64 range_end(u64 start, u64 len)
86{
87	if (start + len < start)
88		return (u64)-1;
89	return start + len;
90}
91
92static int tree_insert(struct rb_root *root, struct extent_map *em)
93{
94	struct rb_node **p = &root->rb_node;
95	struct rb_node *parent = NULL;
96	struct extent_map *entry = NULL;
97	struct rb_node *orig_parent = NULL;
98	u64 end = range_end(em->start, em->len);
99
100	while (*p) {
101		parent = *p;
102		entry = rb_entry(parent, struct extent_map, rb_node);
103
104		if (em->start < entry->start)
105			p = &(*p)->rb_left;
106		else if (em->start >= extent_map_end(entry))
107			p = &(*p)->rb_right;
108		else
109			return -EEXIST;
110	}
111
112	orig_parent = parent;
113	while (parent && em->start >= extent_map_end(entry)) {
114		parent = rb_next(parent);
115		entry = rb_entry(parent, struct extent_map, rb_node);
116	}
117	if (parent)
118		if (end > entry->start && em->start < extent_map_end(entry))
119			return -EEXIST;
120
121	parent = orig_parent;
122	entry = rb_entry(parent, struct extent_map, rb_node);
123	while (parent && em->start < entry->start) {
124		parent = rb_prev(parent);
125		entry = rb_entry(parent, struct extent_map, rb_node);
126	}
127	if (parent)
128		if (end > entry->start && em->start < extent_map_end(entry))
129			return -EEXIST;
130
131	rb_link_node(&em->rb_node, orig_parent, p);
132	rb_insert_color(&em->rb_node, root);
133	return 0;
134}
135
136/*
137 * search through the tree for an extent_map with a given offset.  If
138 * it can't be found, try to find some neighboring extents
139 */
140static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
141				     struct rb_node **prev_ret,
142				     struct rb_node **next_ret)
143{
144	struct rb_node *n = root->rb_node;
145	struct rb_node *prev = NULL;
146	struct rb_node *orig_prev = NULL;
147	struct extent_map *entry;
148	struct extent_map *prev_entry = NULL;
149
150	while (n) {
151		entry = rb_entry(n, struct extent_map, rb_node);
152		prev = n;
153		prev_entry = entry;
154
155		if (offset < entry->start)
156			n = n->rb_left;
157		else if (offset >= extent_map_end(entry))
158			n = n->rb_right;
159		else
160			return n;
161	}
162
163	if (prev_ret) {
164		orig_prev = prev;
165		while (prev && offset >= extent_map_end(prev_entry)) {
166			prev = rb_next(prev);
167			prev_entry = rb_entry(prev, struct extent_map, rb_node);
168		}
169		*prev_ret = prev;
170		prev = orig_prev;
171	}
172
173	if (next_ret) {
174		prev_entry = rb_entry(prev, struct extent_map, rb_node);
175		while (prev && offset < prev_entry->start) {
176			prev = rb_prev(prev);
177			prev_entry = rb_entry(prev, struct extent_map, rb_node);
178		}
179		*next_ret = prev;
180	}
181	return NULL;
182}
183
184/* check to see if two extent_map structs are adjacent and safe to merge */
185static int mergable_maps(struct extent_map *prev, struct extent_map *next)
186{
187	if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
188		return 0;
189
190	/*
191	 * don't merge compressed extents, we need to know their
192	 * actual size
193	 */
194	if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
195		return 0;
196
197	if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
198	    test_bit(EXTENT_FLAG_LOGGING, &next->flags))
199		return 0;
200
201	/*
202	 * We don't want to merge stuff that hasn't been written to the log yet
203	 * since it may not reflect exactly what is on disk, and that would be
204	 * bad.
205	 */
206	if (!list_empty(&prev->list) || !list_empty(&next->list))
207		return 0;
208
209	if (extent_map_end(prev) == next->start &&
210	    prev->flags == next->flags &&
211	    prev->bdev == next->bdev &&
212	    ((next->block_start == EXTENT_MAP_HOLE &&
213	      prev->block_start == EXTENT_MAP_HOLE) ||
214	     (next->block_start == EXTENT_MAP_INLINE &&
215	      prev->block_start == EXTENT_MAP_INLINE) ||
216	     (next->block_start == EXTENT_MAP_DELALLOC &&
217	      prev->block_start == EXTENT_MAP_DELALLOC) ||
218	     (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
219	      next->block_start == extent_map_block_end(prev)))) {
220		return 1;
221	}
222	return 0;
223}
224
225static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
226{
227	struct extent_map *merge = NULL;
228	struct rb_node *rb;
229
230	if (em->start != 0) {
231		rb = rb_prev(&em->rb_node);
232		if (rb)
233			merge = rb_entry(rb, struct extent_map, rb_node);
234		if (rb && mergable_maps(merge, em)) {
235			em->start = merge->start;
236			em->orig_start = merge->orig_start;
237			em->len += merge->len;
238			em->block_len += merge->block_len;
239			em->block_start = merge->block_start;
240			em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
241			em->mod_start = merge->mod_start;
242			em->generation = max(em->generation, merge->generation);
243
244			rb_erase(&merge->rb_node, &tree->map);
245			RB_CLEAR_NODE(&merge->rb_node);
246			free_extent_map(merge);
247		}
248	}
249
250	rb = rb_next(&em->rb_node);
251	if (rb)
252		merge = rb_entry(rb, struct extent_map, rb_node);
253	if (rb && mergable_maps(em, merge)) {
254		em->len += merge->len;
255		em->block_len += merge->block_len;
256		rb_erase(&merge->rb_node, &tree->map);
257		RB_CLEAR_NODE(&merge->rb_node);
258		em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
259		em->generation = max(em->generation, merge->generation);
260		free_extent_map(merge);
261	}
262}
263
264/**
265 * unpin_extent_cache - unpin an extent from the cache
266 * @tree:	tree to unpin the extent in
267 * @start:	logical offset in the file
268 * @len:	length of the extent
269 * @gen:	generation that this extent has been modified in
270 *
271 * Called after an extent has been written to disk properly.  Set the generation
272 * to the generation that actually added the file item to the inode so we know
273 * we need to sync this extent when we call fsync().
274 */
275int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
276		       u64 gen)
277{
278	int ret = 0;
279	struct extent_map *em;
280	bool prealloc = false;
281
282	write_lock(&tree->lock);
283	em = lookup_extent_mapping(tree, start, len);
284
285	WARN_ON(!em || em->start != start);
286
287	if (!em)
288		goto out;
289
290	em->generation = gen;
291	clear_bit(EXTENT_FLAG_PINNED, &em->flags);
292	em->mod_start = em->start;
293	em->mod_len = em->len;
294
295	if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
296		prealloc = true;
297		clear_bit(EXTENT_FLAG_FILLING, &em->flags);
298	}
299
300	try_merge_map(tree, em);
301
302	if (prealloc) {
303		em->mod_start = em->start;
304		em->mod_len = em->len;
305	}
306
307	free_extent_map(em);
308out:
309	write_unlock(&tree->lock);
310	return ret;
311
312}
313
314void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
315{
316	clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
317	if (extent_map_in_tree(em))
318		try_merge_map(tree, em);
319}
320
321static inline void setup_extent_mapping(struct extent_map_tree *tree,
322					struct extent_map *em,
323					int modified)
324{
325	atomic_inc(&em->refs);
326	em->mod_start = em->start;
327	em->mod_len = em->len;
328
329	if (modified)
330		list_move(&em->list, &tree->modified_extents);
331	else
332		try_merge_map(tree, em);
333}
334
335/**
336 * add_extent_mapping - add new extent map to the extent tree
337 * @tree:	tree to insert new map in
338 * @em:		map to insert
339 *
340 * Insert @em into @tree or perform a simple forward/backward merge with
341 * existing mappings.  The extent_map struct passed in will be inserted
342 * into the tree directly, with an additional reference taken, or a
343 * reference dropped if the merge attempt was successful.
344 */
345int add_extent_mapping(struct extent_map_tree *tree,
346		       struct extent_map *em, int modified)
347{
348	int ret = 0;
349
350	ret = tree_insert(&tree->map, em);
351	if (ret)
352		goto out;
353
354	setup_extent_mapping(tree, em, modified);
355out:
356	return ret;
357}
358
359static struct extent_map *
360__lookup_extent_mapping(struct extent_map_tree *tree,
361			u64 start, u64 len, int strict)
362{
363	struct extent_map *em;
364	struct rb_node *rb_node;
365	struct rb_node *prev = NULL;
366	struct rb_node *next = NULL;
367	u64 end = range_end(start, len);
368
369	rb_node = __tree_search(&tree->map, start, &prev, &next);
370	if (!rb_node) {
371		if (prev)
372			rb_node = prev;
373		else if (next)
374			rb_node = next;
375		else
376			return NULL;
377	}
378
379	em = rb_entry(rb_node, struct extent_map, rb_node);
380
381	if (strict && !(end > em->start && start < extent_map_end(em)))
382		return NULL;
383
384	atomic_inc(&em->refs);
385	return em;
386}
387
388/**
389 * lookup_extent_mapping - lookup extent_map
390 * @tree:	tree to lookup in
391 * @start:	byte offset to start the search
392 * @len:	length of the lookup range
393 *
394 * Find and return the first extent_map struct in @tree that intersects the
395 * [start, len] range.  There may be additional objects in the tree that
396 * intersect, so check the object returned carefully to make sure that no
397 * additional lookups are needed.
398 */
399struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
400					 u64 start, u64 len)
401{
402	return __lookup_extent_mapping(tree, start, len, 1);
403}
404
405/**
406 * search_extent_mapping - find a nearby extent map
407 * @tree:	tree to lookup in
408 * @start:	byte offset to start the search
409 * @len:	length of the lookup range
410 *
411 * Find and return the first extent_map struct in @tree that intersects the
412 * [start, len] range.
413 *
414 * If one can't be found, any nearby extent may be returned
415 */
416struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
417					 u64 start, u64 len)
418{
419	return __lookup_extent_mapping(tree, start, len, 0);
420}
421
422/**
423 * remove_extent_mapping - removes an extent_map from the extent tree
424 * @tree:	extent tree to remove from
425 * @em:		extent map beeing removed
426 *
427 * Removes @em from @tree.  No reference counts are dropped, and no checks
428 * are done to see if the range is in use
429 */
430int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
431{
432	int ret = 0;
433
434	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
435	rb_erase(&em->rb_node, &tree->map);
436	if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
437		list_del_init(&em->list);
438	RB_CLEAR_NODE(&em->rb_node);
439	return ret;
440}
441
442void replace_extent_mapping(struct extent_map_tree *tree,
443			    struct extent_map *cur,
444			    struct extent_map *new,
445			    int modified)
446{
447	WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
448	ASSERT(extent_map_in_tree(cur));
449	if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
450		list_del_init(&cur->list);
451	rb_replace_node(&cur->rb_node, &new->rb_node, &tree->map);
452	RB_CLEAR_NODE(&cur->rb_node);
453
454	setup_extent_mapping(tree, new, modified);
455}
456