This source file includes following definitions.
- bset_tree_last
- bset_written
- bkey_written
- bset_byte_offset
- bset_sector_offset
- bch_btree_keys_u64s_remaining
- bset_next_set
- bch_bset_search
- bch_btree_sort
- bset_bkey_idx
- bkey_init
- bkey_cmp
- bch_cut_front
- bch_cut_back
- preceding_key
- bch_ptr_invalid
- bch_ptr_bad
- bch_bkey_to_text
- bch_bkey_equal_header
- bch_keylist_init
- bch_keylist_init_single
- bch_keylist_push
- bch_keylist_add
- bch_keylist_empty
- bch_keylist_reset
- bch_keylist_free
- bch_keylist_nkeys
- bch_keylist_bytes
- __printf
- __printf
- bch_dump_bucket
- btree_keys_expensive_checks
- bch_count_data
1
2 #ifndef _BCACHE_BSET_H
3 #define _BCACHE_BSET_H
4
5 #include <linux/bcache.h>
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8
9 #include "util.h"
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150 struct btree_keys;
151 struct btree_iter;
152 struct btree_iter_set;
153 struct bkey_float;
154
155 #define MAX_BSETS 4U
156
157 struct bset_tree {
158
159
160
161
162
163
164
165
166 unsigned int size;
167
168
169 unsigned int extra;
170
171
172 struct bkey end;
173 struct bkey_float *tree;
174
175
176
177
178
179
180
181
182
183 uint8_t *prev;
184
185
186 struct bset *data;
187 };
188
189 struct btree_keys_ops {
190 bool (*sort_cmp)(struct btree_iter_set l,
191 struct btree_iter_set r);
192 struct bkey *(*sort_fixup)(struct btree_iter *iter,
193 struct bkey *tmp);
194 bool (*insert_fixup)(struct btree_keys *b,
195 struct bkey *insert,
196 struct btree_iter *iter,
197 struct bkey *replace_key);
198 bool (*key_invalid)(struct btree_keys *bk,
199 const struct bkey *k);
200 bool (*key_bad)(struct btree_keys *bk,
201 const struct bkey *k);
202 bool (*key_merge)(struct btree_keys *bk,
203 struct bkey *l, struct bkey *r);
204 void (*key_to_text)(char *buf,
205 size_t size,
206 const struct bkey *k);
207 void (*key_dump)(struct btree_keys *keys,
208 const struct bkey *k);
209
210
211
212
213
214 bool is_extents;
215 };
216
217 struct btree_keys {
218 const struct btree_keys_ops *ops;
219 uint8_t page_order;
220 uint8_t nsets;
221 unsigned int last_set_unwritten:1;
222 bool *expensive_debug_checks;
223
224
225
226
227
228
229
230
231 struct bset_tree set[MAX_BSETS];
232 };
233
234 static inline struct bset_tree *bset_tree_last(struct btree_keys *b)
235 {
236 return b->set + b->nsets;
237 }
238
239 static inline bool bset_written(struct btree_keys *b, struct bset_tree *t)
240 {
241 return t <= b->set + b->nsets - b->last_set_unwritten;
242 }
243
244 static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
245 {
246 return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
247 }
248
249 static inline unsigned int bset_byte_offset(struct btree_keys *b,
250 struct bset *i)
251 {
252 return ((size_t) i) - ((size_t) b->set->data);
253 }
254
255 static inline unsigned int bset_sector_offset(struct btree_keys *b,
256 struct bset *i)
257 {
258 return bset_byte_offset(b, i) >> 9;
259 }
260
261 #define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t))
262 #define set_bytes(i) __set_bytes(i, i->keys)
263
264 #define __set_blocks(i, k, block_bytes) \
265 DIV_ROUND_UP(__set_bytes(i, k), block_bytes)
266 #define set_blocks(i, block_bytes) \
267 __set_blocks(i, (i)->keys, block_bytes)
268
269 static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
270 {
271 struct bset_tree *t = bset_tree_last(b);
272
273 BUG_ON((PAGE_SIZE << b->page_order) <
274 (bset_byte_offset(b, t->data) + set_bytes(t->data)));
275
276 if (!b->last_set_unwritten)
277 return 0;
278
279 return ((PAGE_SIZE << b->page_order) -
280 (bset_byte_offset(b, t->data) + set_bytes(t->data))) /
281 sizeof(u64);
282 }
283
284 static inline struct bset *bset_next_set(struct btree_keys *b,
285 unsigned int block_bytes)
286 {
287 struct bset *i = bset_tree_last(b)->data;
288
289 return ((void *) i) + roundup(set_bytes(i), block_bytes);
290 }
291
292 void bch_btree_keys_free(struct btree_keys *b);
293 int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order,
294 gfp_t gfp);
295 void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
296 bool *expensive_debug_checks);
297
298 void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic);
299 void bch_bset_build_written_tree(struct btree_keys *b);
300 void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k);
301 bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r);
302 void bch_bset_insert(struct btree_keys *b, struct bkey *where,
303 struct bkey *insert);
304 unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
305 struct bkey *replace_key);
306
307 enum {
308 BTREE_INSERT_STATUS_NO_INSERT = 0,
309 BTREE_INSERT_STATUS_INSERT,
310 BTREE_INSERT_STATUS_BACK_MERGE,
311 BTREE_INSERT_STATUS_OVERWROTE,
312 BTREE_INSERT_STATUS_FRONT_MERGE,
313 };
314
315
316
317 struct btree_iter {
318 size_t size, used;
319 #ifdef CONFIG_BCACHE_DEBUG
320 struct btree_keys *b;
321 #endif
322 struct btree_iter_set {
323 struct bkey *k, *end;
324 } data[MAX_BSETS];
325 };
326
327 typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
328
329 struct bkey *bch_btree_iter_next(struct btree_iter *iter);
330 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
331 struct btree_keys *b,
332 ptr_filter_fn fn);
333
334 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
335 struct bkey *end);
336 struct bkey *bch_btree_iter_init(struct btree_keys *b,
337 struct btree_iter *iter,
338 struct bkey *search);
339
340 struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
341 const struct bkey *search);
342
343
344
345
346 static inline struct bkey *bch_bset_search(struct btree_keys *b,
347 struct bset_tree *t,
348 const struct bkey *search)
349 {
350 return search ? __bch_bset_search(b, t, search) : t->data->start;
351 }
352
353 #define for_each_key_filter(b, k, iter, filter) \
354 for (bch_btree_iter_init((b), (iter), NULL); \
355 ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
356
357 #define for_each_key(b, k, iter) \
358 for (bch_btree_iter_init((b), (iter), NULL); \
359 ((k) = bch_btree_iter_next(iter));)
360
361
362
363 struct bset_sort_state {
364 mempool_t pool;
365
366 unsigned int page_order;
367 unsigned int crit_factor;
368
369 struct time_stats time;
370 };
371
372 void bch_bset_sort_state_free(struct bset_sort_state *state);
373 int bch_bset_sort_state_init(struct bset_sort_state *state,
374 unsigned int page_order);
375 void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state);
376 void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
377 struct bset_sort_state *state);
378 void bch_btree_sort_and_fix_extents(struct btree_keys *b,
379 struct btree_iter *iter,
380 struct bset_sort_state *state);
381 void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
382 struct bset_sort_state *state);
383
384 static inline void bch_btree_sort(struct btree_keys *b,
385 struct bset_sort_state *state)
386 {
387 bch_btree_sort_partial(b, 0, state);
388 }
389
390 struct bset_stats {
391 size_t sets_written, sets_unwritten;
392 size_t bytes_written, bytes_unwritten;
393 size_t floats, failed;
394 };
395
396 void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
397
398
399
400 #define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, \
401 (unsigned int)(i)->keys)
402
403 static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
404 {
405 return bkey_idx(i->start, idx);
406 }
407
408 static inline void bkey_init(struct bkey *k)
409 {
410 *k = ZERO_KEY;
411 }
412
413 static __always_inline int64_t bkey_cmp(const struct bkey *l,
414 const struct bkey *r)
415 {
416 return unlikely(KEY_INODE(l) != KEY_INODE(r))
417 ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r)
418 : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
419 }
420
421 void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
422 unsigned int i);
423 bool __bch_cut_front(const struct bkey *where, struct bkey *k);
424 bool __bch_cut_back(const struct bkey *where, struct bkey *k);
425
426 static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
427 {
428 BUG_ON(bkey_cmp(where, k) > 0);
429 return __bch_cut_front(where, k);
430 }
431
432 static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
433 {
434 BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0);
435 return __bch_cut_back(where, k);
436 }
437
438
439
440
441
442
443
444
445
446
447 static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p)
448 {
449 if (KEY_INODE(k) || KEY_OFFSET(k)) {
450 (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0);
451 if (!(*preceding_key_p)->low)
452 (*preceding_key_p)->high--;
453 (*preceding_key_p)->low--;
454 } else {
455 (*preceding_key_p) = NULL;
456 }
457 }
458
459 static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
460 {
461 return b->ops->key_invalid(b, k);
462 }
463
464 static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k)
465 {
466 return b->ops->key_bad(b, k);
467 }
468
469 static inline void bch_bkey_to_text(struct btree_keys *b, char *buf,
470 size_t size, const struct bkey *k)
471 {
472 return b->ops->key_to_text(buf, size, k);
473 }
474
475 static inline bool bch_bkey_equal_header(const struct bkey *l,
476 const struct bkey *r)
477 {
478 return (KEY_DIRTY(l) == KEY_DIRTY(r) &&
479 KEY_PTRS(l) == KEY_PTRS(r) &&
480 KEY_CSUM(l) == KEY_CSUM(r));
481 }
482
483
484
485 struct keylist {
486 union {
487 struct bkey *keys;
488 uint64_t *keys_p;
489 };
490 union {
491 struct bkey *top;
492 uint64_t *top_p;
493 };
494
495
496 #define KEYLIST_INLINE 16
497 uint64_t inline_keys[KEYLIST_INLINE];
498 };
499
500 static inline void bch_keylist_init(struct keylist *l)
501 {
502 l->top_p = l->keys_p = l->inline_keys;
503 }
504
505 static inline void bch_keylist_init_single(struct keylist *l, struct bkey *k)
506 {
507 l->keys = k;
508 l->top = bkey_next(k);
509 }
510
511 static inline void bch_keylist_push(struct keylist *l)
512 {
513 l->top = bkey_next(l->top);
514 }
515
516 static inline void bch_keylist_add(struct keylist *l, struct bkey *k)
517 {
518 bkey_copy(l->top, k);
519 bch_keylist_push(l);
520 }
521
522 static inline bool bch_keylist_empty(struct keylist *l)
523 {
524 return l->top == l->keys;
525 }
526
527 static inline void bch_keylist_reset(struct keylist *l)
528 {
529 l->top = l->keys;
530 }
531
532 static inline void bch_keylist_free(struct keylist *l)
533 {
534 if (l->keys_p != l->inline_keys)
535 kfree(l->keys_p);
536 }
537
538 static inline size_t bch_keylist_nkeys(struct keylist *l)
539 {
540 return l->top_p - l->keys_p;
541 }
542
543 static inline size_t bch_keylist_bytes(struct keylist *l)
544 {
545 return bch_keylist_nkeys(l) * sizeof(uint64_t);
546 }
547
548 struct bkey *bch_keylist_pop(struct keylist *l);
549 void bch_keylist_pop_front(struct keylist *l);
550 int __bch_keylist_realloc(struct keylist *l, unsigned int u64s);
551
552
553
554 #ifdef CONFIG_BCACHE_DEBUG
555
556 int __bch_count_data(struct btree_keys *b);
557 void __printf(2, 3) __bch_check_keys(struct btree_keys *b,
558 const char *fmt,
559 ...);
560 void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
561 void bch_dump_bucket(struct btree_keys *b);
562
563 #else
564
565 static inline int __bch_count_data(struct btree_keys *b) { return -1; }
566 static inline void __printf(2, 3)
567 __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
568 static inline void bch_dump_bucket(struct btree_keys *b) {}
569 void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
570
571 #endif
572
573 static inline bool btree_keys_expensive_checks(struct btree_keys *b)
574 {
575 #ifdef CONFIG_BCACHE_DEBUG
576 return *b->expensive_debug_checks;
577 #else
578 return false;
579 #endif
580 }
581
582 static inline int bch_count_data(struct btree_keys *b)
583 {
584 return btree_keys_expensive_checks(b) ? __bch_count_data(b) : -1;
585 }
586
587 #define bch_check_keys(b, ...) \
588 do { \
589 if (btree_keys_expensive_checks(b)) \
590 __bch_check_keys(b, __VA_ARGS__); \
591 } while (0)
592
593 #endif