Lines Matching refs:c
144 int bch_journal_read(struct cache_set *c, struct list_head *list) in bch_journal_read() argument
158 for_each_cache(ca, c, iter) { in bch_journal_read()
258 c->journal.seq = list_entry(list->prev, in bch_journal_read()
266 void bch_journal_mark(struct cache_set *c, struct list_head *list) in bch_journal_mark() argument
271 struct journal *j = &c->journal; in bch_journal_mark()
300 if (!__bch_extent_invalid(c, k)) { in bch_journal_mark()
304 if (ptr_available(c, k, j)) in bch_journal_mark()
305 atomic_inc(&PTR_BUCKET(c, k, j)->pin); in bch_journal_mark()
307 bch_initial_mark_key(c, 0, k); in bch_journal_mark()
366 static void btree_flush_write(struct cache_set *c) in btree_flush_write() argument
377 for_each_cached_btree(b, c, i) in btree_flush_write()
381 else if (journal_pin_cmp(c, in btree_flush_write()
467 static void journal_reclaim(struct cache_set *c) in journal_reclaim() argument
469 struct bkey *k = &c->journal.key; in journal_reclaim()
475 while (!atomic_read(&fifo_front(&c->journal.pin))) in journal_reclaim()
476 fifo_pop(&c->journal.pin, p); in journal_reclaim()
478 last_seq = last_seq(&c->journal); in journal_reclaim()
482 for_each_cache(ca, c, iter) { in journal_reclaim()
491 for_each_cache(ca, c, iter) in journal_reclaim()
494 if (c->journal.blocks_free) in journal_reclaim()
502 for_each_cache(ca, c, iter) { in journal_reclaim()
512 bucket_to_sector(c, ca->sb.d[ja->cur_idx]), in journal_reclaim()
520 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; in journal_reclaim()
522 if (!journal_full(&c->journal)) in journal_reclaim()
523 __closure_wake_up(&c->journal.wait); in journal_reclaim()
554 cache_set_err_on(bio->bi_error, w->c, "journal io error"); in journal_write_endio()
555 closure_put(&w->c->journal.io); in journal_write_endio()
573 struct cache_set *c = container_of(cl, struct cache_set, journal.io); in journal_write_unlock() local
575 c->journal.io_in_flight = 0; in journal_write_unlock()
576 spin_unlock(&c->journal.lock); in journal_write_unlock()
580 __releases(c->journal.lock) in journal_write_unlocked()
582 struct cache_set *c = container_of(cl, struct cache_set, journal.io); in journal_write_unlocked() local
584 struct journal_write *w = c->journal.cur; in journal_write_unlocked()
585 struct bkey *k = &c->journal.key; in journal_write_unlocked()
586 unsigned i, sectors = set_blocks(w->data, block_bytes(c)) * in journal_write_unlocked()
587 c->sb.block_size; in journal_write_unlocked()
596 } else if (journal_full(&c->journal)) { in journal_write_unlocked()
597 journal_reclaim(c); in journal_write_unlocked()
598 spin_unlock(&c->journal.lock); in journal_write_unlocked()
600 btree_flush_write(c); in journal_write_unlocked()
605 c->journal.blocks_free -= set_blocks(w->data, block_bytes(c)); in journal_write_unlocked()
607 w->data->btree_level = c->root->level; in journal_write_unlocked()
609 bkey_copy(&w->data->btree_root, &c->root->key); in journal_write_unlocked()
610 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket); in journal_write_unlocked()
612 for_each_cache(ca, c, i) in journal_write_unlocked()
615 w->data->magic = jset_magic(&c->sb); in journal_write_unlocked()
617 w->data->last_seq = last_seq(&c->journal); in journal_write_unlocked()
621 ca = PTR_CACHE(c, k, i); in journal_write_unlocked()
644 atomic_dec_bug(&fifo_back(&c->journal.pin)); in journal_write_unlocked()
645 bch_journal_next(&c->journal); in journal_write_unlocked()
646 journal_reclaim(c); in journal_write_unlocked()
648 spin_unlock(&c->journal.lock); in journal_write_unlocked()
658 struct cache_set *c = container_of(cl, struct cache_set, journal.io); in journal_write() local
660 spin_lock(&c->journal.lock); in journal_write()
664 static void journal_try_write(struct cache_set *c) in journal_try_write() argument
665 __releases(c->journal.lock) in journal_try_write()
667 struct closure *cl = &c->journal.io; in journal_try_write()
668 struct journal_write *w = c->journal.cur; in journal_try_write()
672 if (!c->journal.io_in_flight) { in journal_try_write()
673 c->journal.io_in_flight = 1; in journal_try_write()
674 closure_call(cl, journal_write_unlocked, NULL, &c->cl); in journal_try_write()
676 spin_unlock(&c->journal.lock); in journal_try_write()
680 static struct journal_write *journal_wait_for_write(struct cache_set *c, in journal_wait_for_write() argument
689 spin_lock(&c->journal.lock); in journal_wait_for_write()
692 struct journal_write *w = c->journal.cur; in journal_wait_for_write()
695 block_bytes(c)) * c->sb.block_size; in journal_wait_for_write()
698 c->journal.blocks_free * c->sb.block_size, in journal_wait_for_write()
703 closure_wait(&c->journal.wait, &cl); in journal_wait_for_write()
705 if (!journal_full(&c->journal)) { in journal_wait_for_write()
707 trace_bcache_journal_entry_full(c); in journal_wait_for_write()
717 journal_try_write(c); /* unlocks */ in journal_wait_for_write()
720 trace_bcache_journal_full(c); in journal_wait_for_write()
722 journal_reclaim(c); in journal_wait_for_write()
723 spin_unlock(&c->journal.lock); in journal_wait_for_write()
725 btree_flush_write(c); in journal_wait_for_write()
729 spin_lock(&c->journal.lock); in journal_wait_for_write()
736 struct cache_set *c = container_of(to_delayed_work(work), in journal_write_work() local
739 spin_lock(&c->journal.lock); in journal_write_work()
740 if (c->journal.cur->dirty) in journal_write_work()
741 journal_try_write(c); in journal_write_work()
743 spin_unlock(&c->journal.lock); in journal_write_work()
752 atomic_t *bch_journal(struct cache_set *c, in bch_journal() argument
759 if (!CACHE_SYNC(&c->sb)) in bch_journal()
762 w = journal_wait_for_write(c, bch_keylist_nkeys(keys)); in bch_journal()
767 ret = &fifo_back(&c->journal.pin); in bch_journal()
772 journal_try_write(c); in bch_journal()
775 schedule_delayed_work(&c->journal.work, in bch_journal()
776 msecs_to_jiffies(c->journal_delay_ms)); in bch_journal()
777 spin_unlock(&c->journal.lock); in bch_journal()
779 spin_unlock(&c->journal.lock); in bch_journal()
786 void bch_journal_meta(struct cache_set *c, struct closure *cl) in bch_journal_meta() argument
793 ref = bch_journal(c, &keys, cl); in bch_journal_meta()
798 void bch_journal_free(struct cache_set *c) in bch_journal_free() argument
800 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS); in bch_journal_free()
801 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS); in bch_journal_free()
802 free_fifo(&c->journal.pin); in bch_journal_free()
805 int bch_journal_alloc(struct cache_set *c) in bch_journal_alloc() argument
807 struct journal *j = &c->journal; in bch_journal_alloc()
812 c->journal_delay_ms = 100; in bch_journal_alloc()
814 j->w[0].c = c; in bch_journal_alloc()
815 j->w[1].c = c; in bch_journal_alloc()