Lines Matching refs:c

144 int bch_journal_read(struct cache_set *c, struct list_head *list)  in bch_journal_read()  argument
158 for_each_cache(ca, c, iter) { in bch_journal_read()
258 c->journal.seq = list_entry(list->prev, in bch_journal_read()
266 void bch_journal_mark(struct cache_set *c, struct list_head *list) in bch_journal_mark() argument
271 struct journal *j = &c->journal; in bch_journal_mark()
300 if (!__bch_extent_invalid(c, k)) { in bch_journal_mark()
304 if (ptr_available(c, k, j)) in bch_journal_mark()
305 atomic_inc(&PTR_BUCKET(c, k, j)->pin); in bch_journal_mark()
307 bch_initial_mark_key(c, 0, k); in bch_journal_mark()
366 static void btree_flush_write(struct cache_set *c) in btree_flush_write() argument
377 for_each_cached_btree(b, c, i) in btree_flush_write()
381 else if (journal_pin_cmp(c, in btree_flush_write()
467 static void journal_reclaim(struct cache_set *c) in journal_reclaim() argument
469 struct bkey *k = &c->journal.key; in journal_reclaim()
475 while (!atomic_read(&fifo_front(&c->journal.pin))) in journal_reclaim()
476 fifo_pop(&c->journal.pin, p); in journal_reclaim()
478 last_seq = last_seq(&c->journal); in journal_reclaim()
482 for_each_cache(ca, c, iter) { in journal_reclaim()
491 for_each_cache(ca, c, iter) in journal_reclaim()
494 if (c->journal.blocks_free) in journal_reclaim()
502 for_each_cache(ca, c, iter) { in journal_reclaim()
512 bucket_to_sector(c, ca->sb.d[ja->cur_idx]), in journal_reclaim()
520 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; in journal_reclaim()
522 if (!journal_full(&c->journal)) in journal_reclaim()
523 __closure_wake_up(&c->journal.wait); in journal_reclaim()
554 cache_set_err_on(error, w->c, "journal io error"); in journal_write_endio()
555 closure_put(&w->c->journal.io); in journal_write_endio()
573 struct cache_set *c = container_of(cl, struct cache_set, journal.io); in journal_write_unlock() local
575 c->journal.io_in_flight = 0; in journal_write_unlock()
576 spin_unlock(&c->journal.lock); in journal_write_unlock()
580 __releases(c->journal.lock) in journal_write_unlocked()
582 struct cache_set *c = container_of(cl, struct cache_set, journal.io); in journal_write_unlocked() local
584 struct journal_write *w = c->journal.cur; in journal_write_unlocked()
585 struct bkey *k = &c->journal.key; in journal_write_unlocked()
586 unsigned i, sectors = set_blocks(w->data, block_bytes(c)) * in journal_write_unlocked()
587 c->sb.block_size; in journal_write_unlocked()
595 } else if (journal_full(&c->journal)) { in journal_write_unlocked()
596 journal_reclaim(c); in journal_write_unlocked()
597 spin_unlock(&c->journal.lock); in journal_write_unlocked()
599 btree_flush_write(c); in journal_write_unlocked()
603 c->journal.blocks_free -= set_blocks(w->data, block_bytes(c)); in journal_write_unlocked()
605 w->data->btree_level = c->root->level; in journal_write_unlocked()
607 bkey_copy(&w->data->btree_root, &c->root->key); in journal_write_unlocked()
608 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket); in journal_write_unlocked()
610 for_each_cache(ca, c, i) in journal_write_unlocked()
613 w->data->magic = jset_magic(&c->sb); in journal_write_unlocked()
615 w->data->last_seq = last_seq(&c->journal); in journal_write_unlocked()
619 ca = PTR_CACHE(c, k, i); in journal_write_unlocked()
642 atomic_dec_bug(&fifo_back(&c->journal.pin)); in journal_write_unlocked()
643 bch_journal_next(&c->journal); in journal_write_unlocked()
644 journal_reclaim(c); in journal_write_unlocked()
646 spin_unlock(&c->journal.lock); in journal_write_unlocked()
649 closure_bio_submit(bio, cl, c->cache[0]); in journal_write_unlocked()
656 struct cache_set *c = container_of(cl, struct cache_set, journal.io); in journal_write() local
658 spin_lock(&c->journal.lock); in journal_write()
662 static void journal_try_write(struct cache_set *c) in journal_try_write() argument
663 __releases(c->journal.lock) in journal_try_write()
665 struct closure *cl = &c->journal.io; in journal_try_write()
666 struct journal_write *w = c->journal.cur; in journal_try_write()
670 if (!c->journal.io_in_flight) { in journal_try_write()
671 c->journal.io_in_flight = 1; in journal_try_write()
672 closure_call(cl, journal_write_unlocked, NULL, &c->cl); in journal_try_write()
674 spin_unlock(&c->journal.lock); in journal_try_write()
678 static struct journal_write *journal_wait_for_write(struct cache_set *c, in journal_wait_for_write() argument
687 spin_lock(&c->journal.lock); in journal_wait_for_write()
690 struct journal_write *w = c->journal.cur; in journal_wait_for_write()
693 block_bytes(c)) * c->sb.block_size; in journal_wait_for_write()
696 c->journal.blocks_free * c->sb.block_size, in journal_wait_for_write()
701 closure_wait(&c->journal.wait, &cl); in journal_wait_for_write()
703 if (!journal_full(&c->journal)) { in journal_wait_for_write()
705 trace_bcache_journal_entry_full(c); in journal_wait_for_write()
715 journal_try_write(c); /* unlocks */ in journal_wait_for_write()
718 trace_bcache_journal_full(c); in journal_wait_for_write()
720 journal_reclaim(c); in journal_wait_for_write()
721 spin_unlock(&c->journal.lock); in journal_wait_for_write()
723 btree_flush_write(c); in journal_wait_for_write()
727 spin_lock(&c->journal.lock); in journal_wait_for_write()
734 struct cache_set *c = container_of(to_delayed_work(work), in journal_write_work() local
737 spin_lock(&c->journal.lock); in journal_write_work()
738 if (c->journal.cur->dirty) in journal_write_work()
739 journal_try_write(c); in journal_write_work()
741 spin_unlock(&c->journal.lock); in journal_write_work()
750 atomic_t *bch_journal(struct cache_set *c, in bch_journal() argument
757 if (!CACHE_SYNC(&c->sb)) in bch_journal()
760 w = journal_wait_for_write(c, bch_keylist_nkeys(keys)); in bch_journal()
765 ret = &fifo_back(&c->journal.pin); in bch_journal()
770 journal_try_write(c); in bch_journal()
773 schedule_delayed_work(&c->journal.work, in bch_journal()
774 msecs_to_jiffies(c->journal_delay_ms)); in bch_journal()
775 spin_unlock(&c->journal.lock); in bch_journal()
777 spin_unlock(&c->journal.lock); in bch_journal()
784 void bch_journal_meta(struct cache_set *c, struct closure *cl) in bch_journal_meta() argument
791 ref = bch_journal(c, &keys, cl); in bch_journal_meta()
796 void bch_journal_free(struct cache_set *c) in bch_journal_free() argument
798 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS); in bch_journal_free()
799 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS); in bch_journal_free()
800 free_fifo(&c->journal.pin); in bch_journal_free()
803 int bch_journal_alloc(struct cache_set *c) in bch_journal_alloc() argument
805 struct journal *j = &c->journal; in bch_journal_alloc()
810 c->journal_delay_ms = 100; in bch_journal_alloc()
812 j->w[0].c = c; in bch_journal_alloc()
813 j->w[1].c = c; in bch_journal_alloc()