Lines Matching refs:rrpc
22 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
25 #define rrpc_for_each_lun(rrpc, rlun, i) \ argument
26 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27 (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
29 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a) in rrpc_page_invalidate() argument
34 lockdep_assert_held(&rrpc->rev_lock); in rrpc_page_invalidate()
41 div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset); in rrpc_page_invalidate()
47 rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY; in rrpc_page_invalidate()
50 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba, in rrpc_invalidate_range() argument
55 spin_lock(&rrpc->rev_lock); in rrpc_invalidate_range()
57 struct rrpc_addr *gp = &rrpc->trans_map[i]; in rrpc_invalidate_range()
59 rrpc_page_invalidate(rrpc, gp); in rrpc_invalidate_range()
62 spin_unlock(&rrpc->rev_lock); in rrpc_invalidate_range()
65 static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc, in rrpc_inflight_laddr_acquire() argument
71 rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC); in rrpc_inflight_laddr_acquire()
76 if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) { in rrpc_inflight_laddr_acquire()
77 mempool_free(rqd, rrpc->rq_pool); in rrpc_inflight_laddr_acquire()
84 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd) in rrpc_inflight_laddr_release() argument
88 rrpc_unlock_laddr(rrpc, inf); in rrpc_inflight_laddr_release()
90 mempool_free(rqd, rrpc->rq_pool); in rrpc_inflight_laddr_release()
93 static void rrpc_discard(struct rrpc *rrpc, struct bio *bio) in rrpc_discard() argument
100 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len); in rrpc_discard()
110 rrpc_invalidate_range(rrpc, slba, len); in rrpc_discard()
111 rrpc_inflight_laddr_release(rrpc, rqd); in rrpc_discard()
114 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk) in block_is_full() argument
116 return (rblk->next_page == rrpc->dev->pgs_per_blk); in block_is_full()
119 static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) in block_to_addr() argument
123 return blk->id * rrpc->dev->pgs_per_blk; in block_to_addr()
167 struct rrpc *rrpc = rlun->rrpc; in rrpc_set_lun_cur() local
173 WARN_ON(!block_is_full(rrpc, rlun->cur)); in rrpc_set_lun_cur()
179 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, in rrpc_get_blk() argument
185 blk = nvm_get_blk(rrpc->dev, rlun->parent, flags); in rrpc_get_blk()
192 bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk); in rrpc_get_blk()
200 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk) in rrpc_put_blk() argument
202 nvm_put_blk(rrpc->dev, rblk->parent); in rrpc_put_blk()
205 static void rrpc_put_blks(struct rrpc *rrpc) in rrpc_put_blks() argument
210 for (i = 0; i < rrpc->nr_luns; i++) { in rrpc_put_blks()
211 rlun = &rrpc->luns[i]; in rrpc_put_blks()
213 rrpc_put_blk(rrpc, rlun->cur); in rrpc_put_blks()
215 rrpc_put_blk(rrpc, rlun->gc_cur); in rrpc_put_blks()
219 static struct rrpc_lun *get_next_lun(struct rrpc *rrpc) in get_next_lun() argument
221 int next = atomic_inc_return(&rrpc->next_lun); in get_next_lun()
223 return &rrpc->luns[next % rrpc->nr_luns]; in get_next_lun()
226 static void rrpc_gc_kick(struct rrpc *rrpc) in rrpc_gc_kick() argument
231 for (i = 0; i < rrpc->nr_luns; i++) { in rrpc_gc_kick()
232 rlun = &rrpc->luns[i]; in rrpc_gc_kick()
233 queue_work(rrpc->krqd_wq, &rlun->ws_gc); in rrpc_gc_kick()
242 struct rrpc *rrpc = (struct rrpc *)data; in rrpc_gc_timer() local
244 rrpc_gc_kick(rrpc); in rrpc_gc_timer()
245 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); in rrpc_gc_timer()
268 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) in rrpc_move_valid_pages() argument
270 struct request_queue *q = rrpc->dev->q; in rrpc_move_valid_pages()
276 int nr_pgs_per_blk = rrpc->dev->pgs_per_blk; in rrpc_move_valid_pages()
289 page = mempool_alloc(rrpc->page_pool, GFP_NOIO); in rrpc_move_valid_pages()
298 spin_lock(&rrpc->rev_lock); in rrpc_move_valid_pages()
300 rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset]; in rrpc_move_valid_pages()
303 spin_unlock(&rrpc->rev_lock); in rrpc_move_valid_pages()
307 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1); in rrpc_move_valid_pages()
309 spin_unlock(&rrpc->rev_lock); in rrpc_move_valid_pages()
314 spin_unlock(&rrpc->rev_lock); in rrpc_move_valid_pages()
325 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { in rrpc_move_valid_pages()
327 rrpc_inflight_laddr_release(rrpc, rqd); in rrpc_move_valid_pages()
345 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { in rrpc_move_valid_pages()
347 rrpc_inflight_laddr_release(rrpc, rqd); in rrpc_move_valid_pages()
352 rrpc_inflight_laddr_release(rrpc, rqd); in rrpc_move_valid_pages()
358 mempool_free(page, rrpc->page_pool); in rrpc_move_valid_pages()
373 struct rrpc *rrpc = gcb->rrpc; in rrpc_block_gc() local
375 struct nvm_dev *dev = rrpc->dev; in rrpc_block_gc()
379 if (rrpc_move_valid_pages(rrpc, rblk)) in rrpc_block_gc()
383 rrpc_put_blk(rrpc, rblk); in rrpc_block_gc()
385 mempool_free(gcb, rrpc->gcb_pool); in rrpc_block_gc()
420 struct rrpc *rrpc = rlun->rrpc; in rrpc_lun_gc() local
425 nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE; in rrpc_lun_gc()
427 if (nr_blocks_need < rrpc->nr_luns) in rrpc_lun_gc()
428 nr_blocks_need = rrpc->nr_luns; in rrpc_lun_gc()
441 BUG_ON(!block_is_full(rrpc, rblock)); in rrpc_lun_gc()
445 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); in rrpc_lun_gc()
449 gcb->rrpc = rrpc; in rrpc_lun_gc()
453 queue_work(rrpc->kgc_wq, &gcb->ws_gc); in rrpc_lun_gc()
466 struct rrpc *rrpc = gcb->rrpc; in rrpc_gc_queue() local
469 struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset]; in rrpc_gc_queue()
475 mempool_free(gcb, rrpc->gcb_pool); in rrpc_gc_queue()
484 static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc) in rrpc_get_lun_rr() argument
490 return get_next_lun(rrpc); in rrpc_get_lun_rr()
495 max_free = &rrpc->luns[0]; in rrpc_get_lun_rr()
500 rrpc_for_each_lun(rrpc, rlun, i) { in rrpc_get_lun_rr()
509 static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr, in rrpc_update_map() argument
515 BUG_ON(laddr >= rrpc->nr_pages); in rrpc_update_map()
517 gp = &rrpc->trans_map[laddr]; in rrpc_update_map()
518 spin_lock(&rrpc->rev_lock); in rrpc_update_map()
520 rrpc_page_invalidate(rrpc, gp); in rrpc_update_map()
525 rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset]; in rrpc_update_map()
527 spin_unlock(&rrpc->rev_lock); in rrpc_update_map()
532 static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk) in rrpc_alloc_addr() argument
537 if (block_is_full(rrpc, rblk)) in rrpc_alloc_addr()
540 addr = block_to_addr(rrpc, rblk) + rblk->next_page; in rrpc_alloc_addr()
556 static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr, in rrpc_map_page() argument
564 rlun = rrpc_get_lun_rr(rrpc, is_gc); in rrpc_map_page()
567 if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4) in rrpc_map_page()
574 paddr = rrpc_alloc_addr(rrpc, rblk); in rrpc_map_page()
577 rblk = rrpc_get_blk(rrpc, rlun, 0); in rrpc_map_page()
585 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur); in rrpc_map_page()
587 rblk = rrpc_get_blk(rrpc, rlun, 1); in rrpc_map_page()
594 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur); in rrpc_map_page()
601 return rrpc_update_map(rrpc, laddr, rblk, paddr); in rrpc_map_page()
607 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk) in rrpc_run_gc() argument
611 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); in rrpc_run_gc()
617 gcb->rrpc = rrpc; in rrpc_run_gc()
621 queue_work(rrpc->kgc_wq, &gcb->ws_gc); in rrpc_run_gc()
624 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, in rrpc_end_io_write() argument
633 p = &rrpc->trans_map[laddr + i]; in rrpc_end_io_write()
638 if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) in rrpc_end_io_write()
639 rrpc_run_gc(rrpc, rblk); in rrpc_end_io_write()
645 struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); in rrpc_end_io() local
651 rrpc_end_io_write(rrpc, rrqd, laddr, npages); in rrpc_end_io()
656 rrpc_unlock_rq(rrpc, rqd); in rrpc_end_io()
660 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); in rrpc_end_io()
662 nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata); in rrpc_end_io()
664 mempool_free(rqd, rrpc->rq_pool); in rrpc_end_io()
669 static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio, in rrpc_read_ppalist_rq() argument
678 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { in rrpc_read_ppalist_rq()
679 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); in rrpc_read_ppalist_rq()
685 BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages)); in rrpc_read_ppalist_rq()
686 gp = &rrpc->trans_map[laddr + i]; in rrpc_read_ppalist_rq()
689 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev, in rrpc_read_ppalist_rq()
693 rrpc_unlock_laddr(rrpc, r); in rrpc_read_ppalist_rq()
694 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, in rrpc_read_ppalist_rq()
705 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd, in rrpc_read_rq() argument
713 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) in rrpc_read_rq()
716 BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages)); in rrpc_read_rq()
717 gp = &rrpc->trans_map[laddr]; in rrpc_read_rq()
720 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr); in rrpc_read_rq()
723 rrpc_unlock_rq(rrpc, rqd); in rrpc_read_rq()
733 static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio, in rrpc_write_ppalist_rq() argument
742 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { in rrpc_write_ppalist_rq()
743 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); in rrpc_write_ppalist_rq()
749 p = rrpc_map_page(rrpc, laddr + i, is_gc); in rrpc_write_ppalist_rq()
752 rrpc_unlock_laddr(rrpc, r); in rrpc_write_ppalist_rq()
753 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, in rrpc_write_ppalist_rq()
755 rrpc_gc_kick(rrpc); in rrpc_write_ppalist_rq()
759 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev, in rrpc_write_ppalist_rq()
768 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio, in rrpc_write_rq() argument
776 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) in rrpc_write_rq()
779 p = rrpc_map_page(rrpc, laddr, is_gc); in rrpc_write_rq()
782 rrpc_unlock_rq(rrpc, rqd); in rrpc_write_rq()
783 rrpc_gc_kick(rrpc); in rrpc_write_rq()
787 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr); in rrpc_write_rq()
794 static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio, in rrpc_setup_rq() argument
798 rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL, in rrpc_setup_rq()
806 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags, in rrpc_setup_rq()
809 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages); in rrpc_setup_rq()
813 return rrpc_write_rq(rrpc, bio, rqd, flags); in rrpc_setup_rq()
815 return rrpc_read_rq(rrpc, bio, rqd, flags); in rrpc_setup_rq()
818 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, in rrpc_submit_io() argument
826 if (bio_size < rrpc->dev->sec_size) in rrpc_submit_io()
828 else if (bio_size > rrpc->dev->max_rq_size) in rrpc_submit_io()
831 err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages); in rrpc_submit_io()
837 rqd->ins = &rrpc->instance; in rrpc_submit_io()
841 err = nvm_submit_io(rrpc->dev, rqd); in rrpc_submit_io()
852 struct rrpc *rrpc = q->queuedata; in rrpc_make_rq() local
857 rrpc_discard(rrpc, bio); in rrpc_make_rq()
861 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL); in rrpc_make_rq()
869 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE); in rrpc_make_rq()
880 spin_lock(&rrpc->bio_lock); in rrpc_make_rq()
881 bio_list_add(&rrpc->requeue_bios, bio); in rrpc_make_rq()
882 spin_unlock(&rrpc->bio_lock); in rrpc_make_rq()
883 queue_work(rrpc->kgc_wq, &rrpc->ws_requeue); in rrpc_make_rq()
887 mempool_free(rqd, rrpc->rq_pool); in rrpc_make_rq()
893 struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue); in rrpc_requeue() local
899 spin_lock(&rrpc->bio_lock); in rrpc_requeue()
900 bio_list_merge(&bios, &rrpc->requeue_bios); in rrpc_requeue()
901 bio_list_init(&rrpc->requeue_bios); in rrpc_requeue()
902 spin_unlock(&rrpc->bio_lock); in rrpc_requeue()
905 rrpc_make_rq(rrpc->disk->queue, bio); in rrpc_requeue()
908 static void rrpc_gc_free(struct rrpc *rrpc) in rrpc_gc_free() argument
913 if (rrpc->krqd_wq) in rrpc_gc_free()
914 destroy_workqueue(rrpc->krqd_wq); in rrpc_gc_free()
916 if (rrpc->kgc_wq) in rrpc_gc_free()
917 destroy_workqueue(rrpc->kgc_wq); in rrpc_gc_free()
919 if (!rrpc->luns) in rrpc_gc_free()
922 for (i = 0; i < rrpc->nr_luns; i++) { in rrpc_gc_free()
923 rlun = &rrpc->luns[i]; in rrpc_gc_free()
931 static int rrpc_gc_init(struct rrpc *rrpc) in rrpc_gc_init() argument
933 rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND, in rrpc_gc_init()
934 rrpc->nr_luns); in rrpc_gc_init()
935 if (!rrpc->krqd_wq) in rrpc_gc_init()
938 rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1); in rrpc_gc_init()
939 if (!rrpc->kgc_wq) in rrpc_gc_init()
942 setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc); in rrpc_gc_init()
947 static void rrpc_map_free(struct rrpc *rrpc) in rrpc_map_free() argument
949 vfree(rrpc->rev_trans_map); in rrpc_map_free()
950 vfree(rrpc->trans_map); in rrpc_map_free()
955 struct rrpc *rrpc = (struct rrpc *)private; in rrpc_l2p_update() local
956 struct nvm_dev *dev = rrpc->dev; in rrpc_l2p_update()
957 struct rrpc_addr *addr = rrpc->trans_map + slba; in rrpc_l2p_update()
958 struct rrpc_rev_addr *raddr = rrpc->rev_trans_map; in rrpc_l2p_update()
992 static int rrpc_map_init(struct rrpc *rrpc) in rrpc_map_init() argument
994 struct nvm_dev *dev = rrpc->dev; in rrpc_map_init()
998 rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages); in rrpc_map_init()
999 if (!rrpc->trans_map) in rrpc_map_init()
1002 rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) in rrpc_map_init()
1003 * rrpc->nr_pages); in rrpc_map_init()
1004 if (!rrpc->rev_trans_map) in rrpc_map_init()
1007 for (i = 0; i < rrpc->nr_pages; i++) { in rrpc_map_init()
1008 struct rrpc_addr *p = &rrpc->trans_map[i]; in rrpc_map_init()
1009 struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i]; in rrpc_map_init()
1020 rrpc_l2p_update, rrpc); in rrpc_map_init()
1034 static int rrpc_core_init(struct rrpc *rrpc) in rrpc_core_init() argument
1056 rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0); in rrpc_core_init()
1057 if (!rrpc->page_pool) in rrpc_core_init()
1060 rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns, in rrpc_core_init()
1062 if (!rrpc->gcb_pool) in rrpc_core_init()
1065 rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache); in rrpc_core_init()
1066 if (!rrpc->rq_pool) in rrpc_core_init()
1069 spin_lock_init(&rrpc->inflights.lock); in rrpc_core_init()
1070 INIT_LIST_HEAD(&rrpc->inflights.reqs); in rrpc_core_init()
1075 static void rrpc_core_free(struct rrpc *rrpc) in rrpc_core_free() argument
1077 mempool_destroy(rrpc->page_pool); in rrpc_core_free()
1078 mempool_destroy(rrpc->gcb_pool); in rrpc_core_free()
1079 mempool_destroy(rrpc->rq_pool); in rrpc_core_free()
1082 static void rrpc_luns_free(struct rrpc *rrpc) in rrpc_luns_free() argument
1084 kfree(rrpc->luns); in rrpc_luns_free()
1087 static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) in rrpc_luns_init() argument
1089 struct nvm_dev *dev = rrpc->dev; in rrpc_luns_init()
1093 spin_lock_init(&rrpc->rev_lock); in rrpc_luns_init()
1095 rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun), in rrpc_luns_init()
1097 if (!rrpc->luns) in rrpc_luns_init()
1101 for (i = 0; i < rrpc->nr_luns; i++) { in rrpc_luns_init()
1110 rlun = &rrpc->luns[i]; in rrpc_luns_init()
1111 rlun->rrpc = rrpc; in rrpc_luns_init()
1117 rrpc->total_blocks += dev->blks_per_lun; in rrpc_luns_init()
1118 rrpc->nr_pages += dev->sec_per_lun; in rrpc_luns_init()
1121 rrpc->dev->blks_per_lun); in rrpc_luns_init()
1125 for (j = 0; j < rrpc->dev->blks_per_lun; j++) { in rrpc_luns_init()
1140 static void rrpc_free(struct rrpc *rrpc) in rrpc_free() argument
1142 rrpc_gc_free(rrpc); in rrpc_free()
1143 rrpc_map_free(rrpc); in rrpc_free()
1144 rrpc_core_free(rrpc); in rrpc_free()
1145 rrpc_luns_free(rrpc); in rrpc_free()
1147 kfree(rrpc); in rrpc_free()
1152 struct rrpc *rrpc = private; in rrpc_exit() local
1154 del_timer(&rrpc->gc_timer); in rrpc_exit()
1156 flush_workqueue(rrpc->krqd_wq); in rrpc_exit()
1157 flush_workqueue(rrpc->kgc_wq); in rrpc_exit()
1159 rrpc_free(rrpc); in rrpc_exit()
1164 struct rrpc *rrpc = private; in rrpc_capacity() local
1165 struct nvm_dev *dev = rrpc->dev; in rrpc_capacity()
1169 reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4; in rrpc_capacity()
1170 provisioned = rrpc->nr_pages - reserved; in rrpc_capacity()
1172 if (reserved > rrpc->nr_pages) { in rrpc_capacity()
1186 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk) in rrpc_block_map_update() argument
1188 struct nvm_dev *dev = rrpc->dev; in rrpc_block_map_update()
1194 paddr = block_to_addr(rrpc, rblk) + offset; in rrpc_block_map_update()
1196 pladdr = rrpc->rev_trans_map[paddr].addr; in rrpc_block_map_update()
1200 laddr = &rrpc->trans_map[pladdr]; in rrpc_block_map_update()
1211 static int rrpc_blocks_init(struct rrpc *rrpc) in rrpc_blocks_init() argument
1217 for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) { in rrpc_blocks_init()
1218 rlun = &rrpc->luns[lun_iter]; in rrpc_blocks_init()
1220 for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun; in rrpc_blocks_init()
1223 rrpc_block_map_update(rrpc, rblk); in rrpc_blocks_init()
1230 static int rrpc_luns_configure(struct rrpc *rrpc) in rrpc_luns_configure() argument
1236 for (i = 0; i < rrpc->nr_luns; i++) { in rrpc_luns_configure()
1237 rlun = &rrpc->luns[i]; in rrpc_luns_configure()
1239 rblk = rrpc_get_blk(rrpc, rlun, 0); in rrpc_luns_configure()
1246 rblk = rrpc_get_blk(rrpc, rlun, 1); in rrpc_luns_configure()
1254 rrpc_put_blks(rrpc); in rrpc_luns_configure()
1265 struct rrpc *rrpc; in rrpc_init() local
1274 rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL); in rrpc_init()
1275 if (!rrpc) in rrpc_init()
1278 rrpc->instance.tt = &tt_rrpc; in rrpc_init()
1279 rrpc->dev = dev; in rrpc_init()
1280 rrpc->disk = tdisk; in rrpc_init()
1282 bio_list_init(&rrpc->requeue_bios); in rrpc_init()
1283 spin_lock_init(&rrpc->bio_lock); in rrpc_init()
1284 INIT_WORK(&rrpc->ws_requeue, rrpc_requeue); in rrpc_init()
1286 rrpc->nr_luns = lun_end - lun_begin + 1; in rrpc_init()
1289 atomic_set(&rrpc->next_lun, -1); in rrpc_init()
1291 ret = rrpc_luns_init(rrpc, lun_begin, lun_end); in rrpc_init()
1297 rrpc->poffset = dev->sec_per_lun * lun_begin; in rrpc_init()
1298 rrpc->lun_offset = lun_begin; in rrpc_init()
1300 ret = rrpc_core_init(rrpc); in rrpc_init()
1306 ret = rrpc_map_init(rrpc); in rrpc_init()
1312 ret = rrpc_blocks_init(rrpc); in rrpc_init()
1318 ret = rrpc_luns_configure(rrpc); in rrpc_init()
1324 ret = rrpc_gc_init(rrpc); in rrpc_init()
1335 rrpc->nr_luns, (unsigned long long)rrpc->nr_pages); in rrpc_init()
1337 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); in rrpc_init()
1339 return rrpc; in rrpc_init()
1341 rrpc_free(rrpc); in rrpc_init()