Lines Matching refs:j
40 struct jset *j, *data = ca->set->journal.w[0].data; in journal_read_bucket() local
73 j = data; in journal_read_bucket()
76 size_t blocks, bytes = set_bytes(j); in journal_read_bucket()
78 if (j->magic != jset_magic(&ca->sb)) { in journal_read_bucket()
93 if (j->csum != csum_set(j)) { in journal_read_bucket()
99 blocks = set_blocks(j, block_bytes(ca->set)); in journal_read_bucket()
104 if (i->j.seq >= j->last_seq) in journal_read_bucket()
111 if (j->seq == i->j.seq) in journal_read_bucket()
114 if (j->seq < i->j.last_seq) in journal_read_bucket()
117 if (j->seq > i->j.seq) { in journal_read_bucket()
125 i = kmalloc(offsetof(struct journal_replay, j) + in journal_read_bucket()
129 memcpy(&i->j, j, bytes); in journal_read_bucket()
133 ja->seq[bucket_index] = j->seq; in journal_read_bucket()
137 j = ((void *) j) + blocks * block_bytes(ca); in journal_read_bucket()
206 list)->j.seq; in bch_journal_read()
212 list)->j.seq) in bch_journal_read()
260 list)->j.seq; in bch_journal_read()
271 struct journal *j = &c->journal; in bch_journal_mark() local
272 uint64_t last = j->seq; in bch_journal_mark()
282 BUG_ON(last < i->j.seq); in bch_journal_mark()
285 while (last-- != i->j.seq) in bch_journal_mark()
286 if (fifo_free(&j->pin) > 1) { in bch_journal_mark()
287 fifo_push_front(&j->pin, p); in bch_journal_mark()
288 atomic_set(&fifo_front(&j->pin), 0); in bch_journal_mark()
291 if (fifo_free(&j->pin) > 1) { in bch_journal_mark()
292 fifo_push_front(&j->pin, p); in bch_journal_mark()
293 i->pin = &fifo_front(&j->pin); in bch_journal_mark()
297 for (k = i->j.start; in bch_journal_mark()
298 k < bset_bkey_last(&i->j); in bch_journal_mark()
301 unsigned j; in bch_journal_mark() local
303 for (j = 0; j < KEY_PTRS(k); j++) in bch_journal_mark()
304 if (ptr_available(c, k, j)) in bch_journal_mark()
305 atomic_inc(&PTR_BUCKET(c, k, j)->pin); in bch_journal_mark()
319 uint64_t start = i->j.last_seq, end = i->j.seq, n = start; in bch_journal_replay()
325 cache_set_err_on(n != i->j.seq, s, in bch_journal_replay()
327 n, i->j.seq - 1, start, end); in bch_journal_replay()
329 for (k = i->j.start; in bch_journal_replay()
330 k < bset_bkey_last(&i->j); in bch_journal_replay()
348 n = i->j.seq + 1; in bch_journal_replay()
402 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1) argument
526 void bch_journal_next(struct journal *j) in bch_journal_next() argument
530 j->cur = (j->cur == j->w) in bch_journal_next()
531 ? &j->w[1] in bch_journal_next()
532 : &j->w[0]; in bch_journal_next()
538 BUG_ON(!fifo_push(&j->pin, p)); in bch_journal_next()
539 atomic_set(&fifo_back(&j->pin), 1); in bch_journal_next()
541 j->cur->data->seq = ++j->seq; in bch_journal_next()
542 j->cur->dirty = false; in bch_journal_next()
543 j->cur->need_write = false; in bch_journal_next()
544 j->cur->data->keys = 0; in bch_journal_next()
546 if (fifo_full(&j->pin)) in bch_journal_next()
547 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin)); in bch_journal_next()
562 struct journal *j = container_of(cl, struct journal, io); in journal_write_done() local
563 struct journal_write *w = (j->cur == j->w) in journal_write_done()
564 ? &j->w[1] in journal_write_done()
565 : &j->w[0]; in journal_write_done()
805 struct journal *j = &c->journal; in bch_journal_alloc() local
807 spin_lock_init(&j->lock); in bch_journal_alloc()
808 INIT_DELAYED_WORK(&j->work, journal_write_work); in bch_journal_alloc()
812 j->w[0].c = c; in bch_journal_alloc()
813 j->w[1].c = c; in bch_journal_alloc()
815 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) || in bch_journal_alloc()
816 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) || in bch_journal_alloc()
817 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS))) in bch_journal_alloc()