Lines Matching refs:rqd
23 struct nvm_rq *rqd, unsigned long flags);
68 struct nvm_rq *rqd; in rrpc_inflight_laddr_acquire() local
71 rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC); in rrpc_inflight_laddr_acquire()
72 if (!rqd) in rrpc_inflight_laddr_acquire()
75 inf = rrpc_get_inflight_rq(rqd); in rrpc_inflight_laddr_acquire()
77 mempool_free(rqd, rrpc->rq_pool); in rrpc_inflight_laddr_acquire()
81 return rqd; in rrpc_inflight_laddr_acquire()
84 static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd) in rrpc_inflight_laddr_release() argument
86 struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd); in rrpc_inflight_laddr_release()
90 mempool_free(rqd, rrpc->rq_pool); in rrpc_inflight_laddr_release()
97 struct nvm_rq *rqd; in rrpc_discard() local
100 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len); in rrpc_discard()
102 } while (!rqd); in rrpc_discard()
104 if (IS_ERR(rqd)) { in rrpc_discard()
111 rrpc_inflight_laddr_release(rrpc, rqd); in rrpc_discard()
272 struct nvm_rq *rqd; in rrpc_move_valid_pages() local
307 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1); in rrpc_move_valid_pages()
308 if (IS_ERR_OR_NULL(rqd)) { in rrpc_move_valid_pages()
325 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { in rrpc_move_valid_pages()
327 rrpc_inflight_laddr_release(rrpc, rqd); in rrpc_move_valid_pages()
345 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) { in rrpc_move_valid_pages()
347 rrpc_inflight_laddr_release(rrpc, rqd); in rrpc_move_valid_pages()
352 rrpc_inflight_laddr_release(rrpc, rqd); in rrpc_move_valid_pages()
643 static int rrpc_end_io(struct nvm_rq *rqd, int error) in rrpc_end_io() argument
645 struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); in rrpc_end_io()
646 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); in rrpc_end_io()
647 uint8_t npages = rqd->nr_pages; in rrpc_end_io()
648 sector_t laddr = rrpc_get_laddr(rqd->bio) - npages; in rrpc_end_io()
650 if (bio_data_dir(rqd->bio) == WRITE) in rrpc_end_io()
656 rrpc_unlock_rq(rrpc, rqd); in rrpc_end_io()
657 bio_put(rqd->bio); in rrpc_end_io()
660 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); in rrpc_end_io()
661 if (rqd->metadata) in rrpc_end_io()
662 nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata); in rrpc_end_io()
664 mempool_free(rqd, rrpc->rq_pool); in rrpc_end_io()
670 struct nvm_rq *rqd, unsigned long flags, int npages) in rrpc_read_ppalist_rq() argument
672 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); in rrpc_read_ppalist_rq()
678 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { in rrpc_read_ppalist_rq()
679 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); in rrpc_read_ppalist_rq()
689 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev, in rrpc_read_ppalist_rq()
694 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, in rrpc_read_ppalist_rq()
695 rqd->dma_ppa_list); in rrpc_read_ppalist_rq()
700 rqd->opcode = NVM_OP_HBREAD; in rrpc_read_ppalist_rq()
705 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd, in rrpc_read_rq() argument
708 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); in rrpc_read_rq()
713 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) in rrpc_read_rq()
720 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr); in rrpc_read_rq()
723 rrpc_unlock_rq(rrpc, rqd); in rrpc_read_rq()
727 rqd->opcode = NVM_OP_HBREAD; in rrpc_read_rq()
734 struct nvm_rq *rqd, unsigned long flags, int npages) in rrpc_write_ppalist_rq() argument
736 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); in rrpc_write_ppalist_rq()
742 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) { in rrpc_write_ppalist_rq()
743 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); in rrpc_write_ppalist_rq()
753 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, in rrpc_write_ppalist_rq()
754 rqd->dma_ppa_list); in rrpc_write_ppalist_rq()
759 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev, in rrpc_write_ppalist_rq()
763 rqd->opcode = NVM_OP_HBWRITE; in rrpc_write_ppalist_rq()
769 struct nvm_rq *rqd, unsigned long flags) in rrpc_write_rq() argument
771 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); in rrpc_write_rq()
776 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) in rrpc_write_rq()
782 rrpc_unlock_rq(rrpc, rqd); in rrpc_write_rq()
787 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr); in rrpc_write_rq()
788 rqd->opcode = NVM_OP_HBWRITE; in rrpc_write_rq()
795 struct nvm_rq *rqd, unsigned long flags, uint8_t npages) in rrpc_setup_rq() argument
798 rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL, in rrpc_setup_rq()
799 &rqd->dma_ppa_list); in rrpc_setup_rq()
800 if (!rqd->ppa_list) { in rrpc_setup_rq()
806 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags, in rrpc_setup_rq()
809 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages); in rrpc_setup_rq()
813 return rrpc_write_rq(rrpc, bio, rqd, flags); in rrpc_setup_rq()
815 return rrpc_read_rq(rrpc, bio, rqd, flags); in rrpc_setup_rq()
819 struct nvm_rq *rqd, unsigned long flags) in rrpc_submit_io() argument
822 struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd); in rrpc_submit_io()
831 err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages); in rrpc_submit_io()
836 rqd->bio = bio; in rrpc_submit_io()
837 rqd->ins = &rrpc->instance; in rrpc_submit_io()
838 rqd->nr_pages = nr_pages; in rrpc_submit_io()
841 err = nvm_submit_io(rrpc->dev, rqd); in rrpc_submit_io()
853 struct nvm_rq *rqd; in rrpc_make_rq() local
861 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL); in rrpc_make_rq()
862 if (!rqd) { in rrpc_make_rq()
867 memset(rqd, 0, sizeof(struct nvm_rq)); in rrpc_make_rq()
869 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE); in rrpc_make_rq()
887 mempool_free(rqd, rrpc->rq_pool); in rrpc_make_rq()