rqd               116 block/blk-rq-qos.c bool rq_depth_calc_max_depth(struct rq_depth *rqd)
rqd               128 block/blk-rq-qos.c 	if (rqd->queue_depth == 1) {
rqd               129 block/blk-rq-qos.c 		if (rqd->scale_step > 0)
rqd               130 block/blk-rq-qos.c 			rqd->max_depth = 1;
rqd               132 block/blk-rq-qos.c 			rqd->max_depth = 2;
rqd               143 block/blk-rq-qos.c 		depth = min_t(unsigned int, rqd->default_depth,
rqd               144 block/blk-rq-qos.c 			      rqd->queue_depth);
rqd               145 block/blk-rq-qos.c 		if (rqd->scale_step > 0)
rqd               146 block/blk-rq-qos.c 			depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
rqd               147 block/blk-rq-qos.c 		else if (rqd->scale_step < 0) {
rqd               148 block/blk-rq-qos.c 			unsigned int maxd = 3 * rqd->queue_depth / 4;
rqd               150 block/blk-rq-qos.c 			depth = 1 + ((depth - 1) << -rqd->scale_step);
rqd               157 block/blk-rq-qos.c 		rqd->max_depth = depth;
rqd               164 block/blk-rq-qos.c bool rq_depth_scale_up(struct rq_depth *rqd)
rqd               169 block/blk-rq-qos.c 	if (rqd->scaled_max)
rqd               172 block/blk-rq-qos.c 	rqd->scale_step--;
rqd               174 block/blk-rq-qos.c 	rqd->scaled_max = rq_depth_calc_max_depth(rqd);
rqd               183 block/blk-rq-qos.c bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
rqd               190 block/blk-rq-qos.c 	if (rqd->max_depth == 1)
rqd               193 block/blk-rq-qos.c 	if (rqd->scale_step < 0 && hard_throttle)
rqd               194 block/blk-rq-qos.c 		rqd->scale_step = 0;
rqd               196 block/blk-rq-qos.c 		rqd->scale_step++;
rqd               198 block/blk-rq-qos.c 	rqd->scaled_max = false;
rqd               199 block/blk-rq-qos.c 	rq_depth_calc_max_depth(rqd);
rqd               130 block/blk-rq-qos.h bool rq_depth_scale_up(struct rq_depth *rqd);
rqd               131 block/blk-rq-qos.h bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
rqd               132 block/blk-rq-qos.h bool rq_depth_calc_max_depth(struct rq_depth *rqd);
rqd               237 block/blk-wbt.c 	struct rq_depth *rqd = &rwb->rq_depth;
rqd               281 block/blk-wbt.c 	if (rqd->scale_step)
rqd               290 block/blk-wbt.c 	struct rq_depth *rqd = &rwb->rq_depth;
rqd               292 block/blk-wbt.c 	trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
rqd               293 block/blk-wbt.c 			rwb->wb_background, rwb->wb_normal, rqd->max_depth);
rqd               330 block/blk-wbt.c 	struct rq_depth *rqd = &rwb->rq_depth;
rqd               332 block/blk-wbt.c 	if (rqd->scale_step > 0) {
rqd               340 block/blk-wbt.c 					int_sqrt((rqd->scale_step + 1) << 8));
rqd               355 block/blk-wbt.c 	struct rq_depth *rqd = &rwb->rq_depth;
rqd               361 block/blk-wbt.c 	trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
rqd               392 block/blk-wbt.c 		if (rqd->scale_step > 0)
rqd               394 block/blk-wbt.c 		else if (rqd->scale_step < 0)
rqd               404 block/blk-wbt.c 	if (rqd->scale_step || inflight)
rqd               410 block/blk-wbt.c 	struct rq_depth *rqd = &rwb->rq_depth;
rqd               412 block/blk-wbt.c 	rqd->scale_step = 0;
rqd               413 block/blk-wbt.c 	rqd->scaled_max = false;
rqd               415 block/blk-wbt.c 	rq_depth_calc_max_depth(rqd);
rqd               539 drivers/dma/pl330.c 	struct list_head rqd;
rqd              1702 drivers/dma/pl330.c 			list_add_tail(&descdone->rqd, &pl330->req_done);
rqd              1709 drivers/dma/pl330.c 					    struct dma_pl330_desc, rqd);
rqd              1710 drivers/dma/pl330.c 		list_del(&descdone->rqd);
rqd               611 drivers/lightnvm/core.c static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
rqd               613 drivers/lightnvm/core.c 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
rqd               615 drivers/lightnvm/core.c 	nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
rqd               618 drivers/lightnvm/core.c static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
rqd               620 drivers/lightnvm/core.c 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
rqd               622 drivers/lightnvm/core.c 	nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
rqd               676 drivers/lightnvm/core.c static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
rqd               685 drivers/lightnvm/core.c 		rqd->nr_ppas = nr_ppas;
rqd               686 drivers/lightnvm/core.c 		rqd->ppa_addr = ppas[0];
rqd               691 drivers/lightnvm/core.c 	rqd->nr_ppas = nr_ppas;
rqd               692 drivers/lightnvm/core.c 	rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
rqd               693 drivers/lightnvm/core.c 	if (!rqd->ppa_list) {
rqd               699 drivers/lightnvm/core.c 	rqd->nr_ppas *= plane_cnt;
rqd               705 drivers/lightnvm/core.c 			rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
rqd               713 drivers/lightnvm/core.c 			struct nvm_rq *rqd)
rqd               715 drivers/lightnvm/core.c 	if (!rqd->ppa_list)
rqd               718 drivers/lightnvm/core.c 	nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
rqd               721 drivers/lightnvm/core.c static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
rqd               728 drivers/lightnvm/core.c 	if (rqd->is_seq)
rqd               731 drivers/lightnvm/core.c 	if (rqd->opcode == NVM_OP_PREAD)
rqd               733 drivers/lightnvm/core.c 	else if (rqd->opcode == NVM_OP_PWRITE)
rqd               739 drivers/lightnvm/core.c int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
rqd               747 drivers/lightnvm/core.c 	nvm_rq_tgt_to_dev(tgt_dev, rqd);
rqd               749 drivers/lightnvm/core.c 	rqd->dev = tgt_dev;
rqd               750 drivers/lightnvm/core.c 	rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
rqd               753 drivers/lightnvm/core.c 	ret = dev->ops->submit_io(dev, rqd, buf);
rqd               755 drivers/lightnvm/core.c 		nvm_rq_dev_to_tgt(tgt_dev, rqd);
rqd               760 drivers/lightnvm/core.c static void nvm_sync_end_io(struct nvm_rq *rqd)
rqd               762 drivers/lightnvm/core.c 	struct completion *waiting = rqd->private;
rqd               767 drivers/lightnvm/core.c static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
rqd               773 drivers/lightnvm/core.c 	rqd->end_io = nvm_sync_end_io;
rqd               774 drivers/lightnvm/core.c 	rqd->private = &wait;
rqd               776 drivers/lightnvm/core.c 	ret = dev->ops->submit_io(dev, rqd, buf);
rqd               785 drivers/lightnvm/core.c int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
rqd               794 drivers/lightnvm/core.c 	nvm_rq_tgt_to_dev(tgt_dev, rqd);
rqd               796 drivers/lightnvm/core.c 	rqd->dev = tgt_dev;
rqd               797 drivers/lightnvm/core.c 	rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
rqd               799 drivers/lightnvm/core.c 	ret = nvm_submit_io_wait(dev, rqd, buf);
rqd               805 drivers/lightnvm/core.c void nvm_end_io(struct nvm_rq *rqd)
rqd               807 drivers/lightnvm/core.c 	struct nvm_tgt_dev *tgt_dev = rqd->dev;
rqd               811 drivers/lightnvm/core.c 		nvm_rq_dev_to_tgt(tgt_dev, rqd);
rqd               813 drivers/lightnvm/core.c 	if (rqd->end_io)
rqd               814 drivers/lightnvm/core.c 		rqd->end_io(rqd);
rqd               818 drivers/lightnvm/core.c static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
rqd               823 drivers/lightnvm/core.c 	rqd->dev = NULL;
rqd               824 drivers/lightnvm/core.c 	rqd->flags = nvm_set_flags(&dev->geo, rqd);
rqd               826 drivers/lightnvm/core.c 	return nvm_submit_io_wait(dev, rqd, NULL);
rqd               831 drivers/lightnvm/core.c 	struct nvm_rq rqd = { NULL };
rqd               845 drivers/lightnvm/core.c 	rqd.bio = &bio;
rqd               846 drivers/lightnvm/core.c 	rqd.opcode = NVM_OP_PREAD;
rqd               847 drivers/lightnvm/core.c 	rqd.is_seq = 1;
rqd               848 drivers/lightnvm/core.c 	rqd.nr_ppas = 1;
rqd               849 drivers/lightnvm/core.c 	rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
rqd               851 drivers/lightnvm/core.c 	ret = nvm_submit_io_sync_raw(dev, &rqd);
rqd               857 drivers/lightnvm/core.c 	return rqd.error;
rqd              1074 drivers/lightnvm/core.c 	struct nvm_rq rqd;
rqd              1085 drivers/lightnvm/core.c 	memset(&rqd, 0, sizeof(struct nvm_rq));
rqd              1087 drivers/lightnvm/core.c 	nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
rqd              1088 drivers/lightnvm/core.c 	nvm_rq_tgt_to_dev(tgt_dev, &rqd);
rqd              1090 drivers/lightnvm/core.c 	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
rqd              1091 drivers/lightnvm/core.c 	nvm_free_rqd_ppalist(tgt_dev, &rqd);
rqd                79 drivers/lightnvm/pblk-core.c static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
rqd                87 drivers/lightnvm/pblk-core.c 	line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
rqd                88 drivers/lightnvm/pblk-core.c 	pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
rqd                93 drivers/lightnvm/pblk-core.c 	if (rqd->error) {
rqd                95 drivers/lightnvm/pblk-core.c 				&rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
rqd                98 drivers/lightnvm/pblk-core.c 		pblk_mark_bb(pblk, line, rqd->ppa_addr);
rqd               101 drivers/lightnvm/pblk-core.c 				&rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
rqd               106 drivers/lightnvm/pblk-core.c 	trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
rqd               113 drivers/lightnvm/pblk-core.c static void pblk_end_io_erase(struct nvm_rq *rqd)
rqd               115 drivers/lightnvm/pblk-core.c 	struct pblk *pblk = rqd->private;
rqd               117 drivers/lightnvm/pblk-core.c 	__pblk_end_io_erase(pblk, rqd);
rqd               118 drivers/lightnvm/pblk-core.c 	mempool_free(rqd, &pblk->e_rq_pool);
rqd               241 drivers/lightnvm/pblk-core.c int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
rqd               245 drivers/lightnvm/pblk-core.c 	rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
rqd               246 drivers/lightnvm/pblk-core.c 							&rqd->dma_meta_list);
rqd               247 drivers/lightnvm/pblk-core.c 	if (!rqd->meta_list)
rqd               250 drivers/lightnvm/pblk-core.c 	if (rqd->nr_ppas == 1)
rqd               253 drivers/lightnvm/pblk-core.c 	rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
rqd               254 drivers/lightnvm/pblk-core.c 	rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
rqd               259 drivers/lightnvm/pblk-core.c void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
rqd               263 drivers/lightnvm/pblk-core.c 	if (rqd->meta_list)
rqd               264 drivers/lightnvm/pblk-core.c 		nvm_dev_dma_free(dev->parent, rqd->meta_list,
rqd               265 drivers/lightnvm/pblk-core.c 				rqd->dma_meta_list);
rqd               272 drivers/lightnvm/pblk-core.c 	struct nvm_rq *rqd;
rqd               290 drivers/lightnvm/pblk-core.c 	rqd = mempool_alloc(pool, GFP_KERNEL);
rqd               291 drivers/lightnvm/pblk-core.c 	memset(rqd, 0, rq_size);
rqd               293 drivers/lightnvm/pblk-core.c 	return rqd;
rqd               297 drivers/lightnvm/pblk-core.c void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
rqd               303 drivers/lightnvm/pblk-core.c 		kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
rqd               319 drivers/lightnvm/pblk-core.c 	pblk_free_rqd_meta(pblk, rqd);
rqd               320 drivers/lightnvm/pblk-core.c 	mempool_free(rqd, pool);
rqd               473 drivers/lightnvm/pblk-core.c void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
rqd               477 drivers/lightnvm/pblk-core.c 	pblk_print_failed_rqd(pblk, rqd, rqd->error);
rqd               481 drivers/lightnvm/pblk-core.c void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
rqd               484 drivers/lightnvm/pblk-core.c 	if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
rqd               489 drivers/lightnvm/pblk-core.c 	switch (rqd->error) {
rqd               498 drivers/lightnvm/pblk-core.c 		pblk_err(pblk, "unknown read error:%d\n", rqd->error);
rqd               501 drivers/lightnvm/pblk-core.c 	pblk_print_failed_rqd(pblk, rqd, rqd->error);
rqd               510 drivers/lightnvm/pblk-core.c int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
rqd               517 drivers/lightnvm/pblk-core.c 	if (pblk_check_io(pblk, rqd))
rqd               521 drivers/lightnvm/pblk-core.c 	return nvm_submit_io(dev, rqd, buf);
rqd               524 drivers/lightnvm/pblk-core.c void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
rqd               526 drivers/lightnvm/pblk-core.c 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
rqd               530 drivers/lightnvm/pblk-core.c 	for (i = 0; i < rqd->nr_ppas; i++) {
rqd               544 drivers/lightnvm/pblk-core.c int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
rqd               552 drivers/lightnvm/pblk-core.c 	if (pblk_check_io(pblk, rqd))
rqd               556 drivers/lightnvm/pblk-core.c 	ret = nvm_submit_io_sync(dev, rqd, buf);
rqd               559 drivers/lightnvm/pblk-core.c 	    rqd->opcode == NVM_OP_PWRITE)
rqd               560 drivers/lightnvm/pblk-core.c 		pblk_check_chunk_state_update(pblk, rqd);
rqd               565 drivers/lightnvm/pblk-core.c static int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd,
rqd               568 drivers/lightnvm/pblk-core.c 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
rqd               572 drivers/lightnvm/pblk-core.c 	ret = pblk_submit_io_sync(pblk, rqd, buf);
rqd               681 drivers/lightnvm/pblk-core.c 	struct nvm_rq rqd;
rqd               685 drivers/lightnvm/pblk-core.c 	memset(&rqd, 0, sizeof(struct nvm_rq));
rqd               687 drivers/lightnvm/pblk-core.c 	ret = pblk_alloc_rqd_meta(pblk, &rqd);
rqd               691 drivers/lightnvm/pblk-core.c 	rqd.opcode = NVM_OP_PREAD;
rqd               692 drivers/lightnvm/pblk-core.c 	rqd.nr_ppas = lm->smeta_sec;
rqd               693 drivers/lightnvm/pblk-core.c 	rqd.is_seq = 1;
rqd               694 drivers/lightnvm/pblk-core.c 	ppa_list = nvm_rq_to_ppa_list(&rqd);
rqd               699 drivers/lightnvm/pblk-core.c 	ret = pblk_submit_io_sync(pblk, &rqd, line->smeta);
rqd               707 drivers/lightnvm/pblk-core.c 	if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
rqd               708 drivers/lightnvm/pblk-core.c 		pblk_log_read_err(pblk, &rqd);
rqd               713 drivers/lightnvm/pblk-core.c 	pblk_free_rqd_meta(pblk, &rqd);
rqd               722 drivers/lightnvm/pblk-core.c 	struct nvm_rq rqd;
rqd               727 drivers/lightnvm/pblk-core.c 	memset(&rqd, 0, sizeof(struct nvm_rq));
rqd               729 drivers/lightnvm/pblk-core.c 	ret = pblk_alloc_rqd_meta(pblk, &rqd);
rqd               733 drivers/lightnvm/pblk-core.c 	rqd.opcode = NVM_OP_PWRITE;
rqd               734 drivers/lightnvm/pblk-core.c 	rqd.nr_ppas = lm->smeta_sec;
rqd               735 drivers/lightnvm/pblk-core.c 	rqd.is_seq = 1;
rqd               736 drivers/lightnvm/pblk-core.c 	ppa_list = nvm_rq_to_ppa_list(&rqd);
rqd               740 drivers/lightnvm/pblk-core.c 							   rqd.meta_list, i);
rqd               746 drivers/lightnvm/pblk-core.c 	ret = pblk_submit_io_sync_sem(pblk, &rqd, line->smeta);
rqd               754 drivers/lightnvm/pblk-core.c 	if (rqd.error) {
rqd               755 drivers/lightnvm/pblk-core.c 		pblk_log_write_err(pblk, &rqd);
rqd               760 drivers/lightnvm/pblk-core.c 	pblk_free_rqd_meta(pblk, &rqd);
rqd               772 drivers/lightnvm/pblk-core.c 	struct nvm_rq rqd;
rqd               791 drivers/lightnvm/pblk-core.c 	memset(&rqd, 0, sizeof(struct nvm_rq));
rqd               796 drivers/lightnvm/pblk-core.c 	rqd.meta_list = meta_list;
rqd               797 drivers/lightnvm/pblk-core.c 	rqd.ppa_list = ppa_list_buf;
rqd               798 drivers/lightnvm/pblk-core.c 	rqd.dma_meta_list = dma_meta_list;
rqd               799 drivers/lightnvm/pblk-core.c 	rqd.dma_ppa_list = dma_ppa_list;
rqd               800 drivers/lightnvm/pblk-core.c 	rqd.opcode = NVM_OP_PREAD;
rqd               801 drivers/lightnvm/pblk-core.c 	rqd.nr_ppas = rq_ppas;
rqd               802 drivers/lightnvm/pblk-core.c 	ppa_list = nvm_rq_to_ppa_list(&rqd);
rqd               804 drivers/lightnvm/pblk-core.c 	for (i = 0; i < rqd.nr_ppas; ) {
rqd               809 drivers/lightnvm/pblk-core.c 			rqd.is_seq = 1;
rqd               831 drivers/lightnvm/pblk-core.c 	ret = pblk_submit_io_sync(pblk, &rqd, emeta_buf);
rqd               839 drivers/lightnvm/pblk-core.c 	if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
rqd               840 drivers/lightnvm/pblk-core.c 		pblk_log_read_err(pblk, &rqd);
rqd               851 drivers/lightnvm/pblk-core.c 	nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
rqd               855 drivers/lightnvm/pblk-core.c static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
rqd               858 drivers/lightnvm/pblk-core.c 	rqd->opcode = NVM_OP_ERASE;
rqd               859 drivers/lightnvm/pblk-core.c 	rqd->ppa_addr = ppa;
rqd               860 drivers/lightnvm/pblk-core.c 	rqd->nr_ppas = 1;
rqd               861 drivers/lightnvm/pblk-core.c 	rqd->is_seq = 1;
rqd               862 drivers/lightnvm/pblk-core.c 	rqd->bio = NULL;
rqd               867 drivers/lightnvm/pblk-core.c 	struct nvm_rq rqd = {NULL};
rqd               873 drivers/lightnvm/pblk-core.c 	pblk_setup_e_rq(pblk, &rqd, ppa);
rqd               878 drivers/lightnvm/pblk-core.c 	ret = pblk_submit_io_sync(pblk, &rqd, NULL);
rqd               879 drivers/lightnvm/pblk-core.c 	rqd.private = pblk;
rqd               880 drivers/lightnvm/pblk-core.c 	__pblk_end_io_erase(pblk, &rqd);
rqd              1444 drivers/lightnvm/pblk-core.c void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
rqd              1446 drivers/lightnvm/pblk-core.c 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
rqd              1449 drivers/lightnvm/pblk-core.c 	for (i = 0; i < rqd->nr_ppas; i++)
rqd              1691 drivers/lightnvm/pblk-core.c 	struct nvm_rq *rqd;
rqd              1694 drivers/lightnvm/pblk-core.c 	rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
rqd              1696 drivers/lightnvm/pblk-core.c 	pblk_setup_e_rq(pblk, rqd, ppa);
rqd              1698 drivers/lightnvm/pblk-core.c 	rqd->end_io = pblk_end_io_erase;
rqd              1699 drivers/lightnvm/pblk-core.c 	rqd->private = pblk;
rqd              1707 drivers/lightnvm/pblk-core.c 	err = pblk_submit_io(pblk, rqd, NULL);
rqd              2115 drivers/lightnvm/pblk-core.c void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd)
rqd              2121 drivers/lightnvm/pblk-core.c 		buffer = rqd->meta_list;
rqd              2127 drivers/lightnvm/pblk-core.c 			rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
rqd              2133 drivers/lightnvm/pblk-core.c void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
rqd              2135 drivers/lightnvm/pblk-core.c 	void *meta_list = rqd->meta_list;
rqd              2142 drivers/lightnvm/pblk-core.c 	page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
rqd              2144 drivers/lightnvm/pblk-core.c 	for (; i < rqd->nr_ppas; i++)
rqd                96 drivers/lightnvm/pblk-map.c int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
rqd               100 drivers/lightnvm/pblk-map.c 	void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
rqd               102 drivers/lightnvm/pblk-map.c 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
rqd               108 drivers/lightnvm/pblk-map.c 	for (i = off; i < rqd->nr_ppas; i += min) {
rqd               122 drivers/lightnvm/pblk-map.c int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
rqd               129 drivers/lightnvm/pblk-map.c 	void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
rqd               131 drivers/lightnvm/pblk-map.c 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
rqd               139 drivers/lightnvm/pblk-map.c 	for (i = 0; i < rqd->nr_ppas; i += min) {
rqd               155 drivers/lightnvm/pblk-map.c 			return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
rqd               170 drivers/lightnvm/pblk-map.c 			return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
rqd               550 drivers/lightnvm/pblk-rb.c unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
rqd               556 drivers/lightnvm/pblk-rb.c 	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
rqd               557 drivers/lightnvm/pblk-rb.c 	struct bio *bio = rqd->bio;
rqd                40 drivers/lightnvm/pblk-read.c static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
rqd                44 drivers/lightnvm/pblk-read.c 	void *meta_list = rqd->meta_list;
rqd                48 drivers/lightnvm/pblk-read.c 	nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas,
rqd                58 drivers/lightnvm/pblk-read.c 		if (pblk_ppa_empty(rqd->ppa_list[i])) {
rqd                62 drivers/lightnvm/pblk-read.c 		} else if (pblk_addr_in_cache(rqd->ppa_list[i])) {
rqd                69 drivers/lightnvm/pblk-read.c 							rqd->ppa_list[i])) {
rqd                99 drivers/lightnvm/pblk-read.c 		rqd->is_seq = 1;
rqd               109 drivers/lightnvm/pblk-read.c static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
rqd               112 drivers/lightnvm/pblk-read.c 	void *meta_list = rqd->meta_list;
rqd               113 drivers/lightnvm/pblk-read.c 	int nr_lbas = rqd->nr_ppas;
rqd               128 drivers/lightnvm/pblk-read.c 			struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
rqd               142 drivers/lightnvm/pblk-read.c static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
rqd               145 drivers/lightnvm/pblk-read.c 	void *meta_lba_list = rqd->meta_list;
rqd               164 drivers/lightnvm/pblk-read.c 			struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
rqd               176 drivers/lightnvm/pblk-read.c 	WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
rqd               187 drivers/lightnvm/pblk-read.c static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
rqd               191 drivers/lightnvm/pblk-read.c 	struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
rqd               192 drivers/lightnvm/pblk-read.c 	struct bio *int_bio = rqd->bio;
rqd               197 drivers/lightnvm/pblk-read.c 	if (rqd->error)
rqd               198 drivers/lightnvm/pblk-read.c 		pblk_log_read_err(pblk, rqd);
rqd               200 drivers/lightnvm/pblk-read.c 	pblk_read_check_seq(pblk, rqd, r_ctx->lba);
rqd               204 drivers/lightnvm/pblk-read.c 		pblk_rq_to_line_put(pblk, rqd);
rqd               207 drivers/lightnvm/pblk-read.c 	atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
rqd               208 drivers/lightnvm/pblk-read.c 	atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
rqd               211 drivers/lightnvm/pblk-read.c 	pblk_free_rqd(pblk, rqd, PBLK_READ);
rqd               215 drivers/lightnvm/pblk-read.c static void pblk_end_io_read(struct nvm_rq *rqd)
rqd               217 drivers/lightnvm/pblk-read.c 	struct pblk *pblk = rqd->private;
rqd               218 drivers/lightnvm/pblk-read.c 	struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
rqd               221 drivers/lightnvm/pblk-read.c 	pblk_end_user_read(bio, rqd->error);
rqd               222 drivers/lightnvm/pblk-read.c 	__pblk_end_io_read(pblk, rqd, true);
rqd               225 drivers/lightnvm/pblk-read.c static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
rqd               228 drivers/lightnvm/pblk-read.c 	struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
rqd               260 drivers/lightnvm/pblk-read.c 		rqd->ppa_addr = ppa;
rqd               272 drivers/lightnvm/pblk-read.c 	struct nvm_rq *rqd;
rqd               278 drivers/lightnvm/pblk-read.c 	rqd = pblk_alloc_rqd(pblk, PBLK_READ);
rqd               280 drivers/lightnvm/pblk-read.c 	rqd->opcode = NVM_OP_PREAD;
rqd               281 drivers/lightnvm/pblk-read.c 	rqd->nr_ppas = nr_secs;
rqd               282 drivers/lightnvm/pblk-read.c 	rqd->private = pblk;
rqd               283 drivers/lightnvm/pblk-read.c 	rqd->end_io = pblk_end_io_read;
rqd               285 drivers/lightnvm/pblk-read.c 	r_ctx = nvm_rq_to_pdu(rqd);
rqd               289 drivers/lightnvm/pblk-read.c 	if (pblk_alloc_rqd_meta(pblk, rqd)) {
rqd               291 drivers/lightnvm/pblk-read.c 		pblk_free_rqd(pblk, rqd, PBLK_READ);
rqd               302 drivers/lightnvm/pblk-read.c 		nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba,
rqd               305 drivers/lightnvm/pblk-read.c 		pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache);
rqd               309 drivers/lightnvm/pblk-read.c 	rqd->bio = int_bio; /* internal bio */
rqd               311 drivers/lightnvm/pblk-read.c 	if (from_cache && nr_secs == rqd->nr_ppas) {
rqd               315 drivers/lightnvm/pblk-read.c 		__pblk_end_io_read(pblk, rqd, false);
rqd               316 drivers/lightnvm/pblk-read.c 	} else if (nr_secs != rqd->nr_ppas) {
rqd               335 drivers/lightnvm/pblk-read.c 		rqd->nr_ppas = nr_secs;
rqd               336 drivers/lightnvm/pblk-read.c 		if (rqd->nr_ppas == 1)
rqd               337 drivers/lightnvm/pblk-read.c 			rqd->ppa_addr = rqd->ppa_list[0];
rqd               345 drivers/lightnvm/pblk-read.c 	} else if (pblk_submit_io(pblk, rqd, NULL)) {
rqd               347 drivers/lightnvm/pblk-read.c 		rqd->error = -ENODEV;
rqd               348 drivers/lightnvm/pblk-read.c 		pblk_end_io_read(rqd);
rqd               352 drivers/lightnvm/pblk-read.c static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
rqd               373 drivers/lightnvm/pblk-read.c 		rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
rqd               383 drivers/lightnvm/pblk-read.c static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
rqd               407 drivers/lightnvm/pblk-read.c 	rqd->ppa_addr = ppa_l2p;
rqd               420 drivers/lightnvm/pblk-read.c 	struct nvm_rq rqd;
rqd               423 drivers/lightnvm/pblk-read.c 	memset(&rqd, 0, sizeof(struct nvm_rq));
rqd               425 drivers/lightnvm/pblk-read.c 	ret = pblk_alloc_rqd_meta(pblk, &rqd);
rqd               430 drivers/lightnvm/pblk-read.c 		gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
rqd               435 drivers/lightnvm/pblk-read.c 			rqd.ppa_addr = rqd.ppa_list[0];
rqd               437 drivers/lightnvm/pblk-read.c 		gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
rqd               445 drivers/lightnvm/pblk-read.c 	rqd.opcode = NVM_OP_PREAD;
rqd               446 drivers/lightnvm/pblk-read.c 	rqd.nr_ppas = gc_rq->secs_to_gc;
rqd               448 drivers/lightnvm/pblk-read.c 	if (pblk_submit_io_sync(pblk, &rqd, gc_rq->data)) {
rqd               453 drivers/lightnvm/pblk-read.c 	pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
rqd               457 drivers/lightnvm/pblk-read.c 	if (rqd.error) {
rqd               460 drivers/lightnvm/pblk-read.c 		pblk_print_failed_rqd(pblk, &rqd, rqd.error);
rqd               471 drivers/lightnvm/pblk-read.c 	pblk_free_rqd_meta(pblk, &rqd);
rqd               475 drivers/lightnvm/pblk-read.c 	pblk_free_rqd_meta(pblk, &rqd);
rqd               145 drivers/lightnvm/pblk-recovery.c 	struct nvm_rq *rqd;
rqd               158 drivers/lightnvm/pblk-recovery.c static void pblk_end_io_recov(struct nvm_rq *rqd)
rqd               160 drivers/lightnvm/pblk-recovery.c 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
rqd               161 drivers/lightnvm/pblk-recovery.c 	struct pblk_pad_rq *pad_rq = rqd->private;
rqd               166 drivers/lightnvm/pblk-recovery.c 	pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
rqd               180 drivers/lightnvm/pblk-recovery.c 	struct nvm_rq *rqd;
rqd               214 drivers/lightnvm/pblk-recovery.c 	rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
rqd               216 drivers/lightnvm/pblk-recovery.c 	ret = pblk_alloc_rqd_meta(pblk, rqd);
rqd               218 drivers/lightnvm/pblk-recovery.c 		pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
rqd               222 drivers/lightnvm/pblk-recovery.c 	rqd->bio = NULL;
rqd               223 drivers/lightnvm/pblk-recovery.c 	rqd->opcode = NVM_OP_PWRITE;
rqd               224 drivers/lightnvm/pblk-recovery.c 	rqd->is_seq = 1;
rqd               225 drivers/lightnvm/pblk-recovery.c 	rqd->nr_ppas = rq_ppas;
rqd               226 drivers/lightnvm/pblk-recovery.c 	rqd->end_io = pblk_end_io_recov;
rqd               227 drivers/lightnvm/pblk-recovery.c 	rqd->private = pad_rq;
rqd               229 drivers/lightnvm/pblk-recovery.c 	ppa_list = nvm_rq_to_ppa_list(rqd);
rqd               230 drivers/lightnvm/pblk-recovery.c 	meta_list = rqd->meta_list;
rqd               232 drivers/lightnvm/pblk-recovery.c 	for (i = 0; i < rqd->nr_ppas; ) {
rqd               264 drivers/lightnvm/pblk-recovery.c 	ret = pblk_submit_io(pblk, rqd, data);
rqd               269 drivers/lightnvm/pblk-recovery.c 		pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
rqd               362 drivers/lightnvm/pblk-recovery.c 	struct nvm_rq *rqd;
rqd               378 drivers/lightnvm/pblk-recovery.c 	rqd = p.rqd;
rqd               386 drivers/lightnvm/pblk-recovery.c 	memset(rqd, 0, pblk_g_rq_size);
rqd               393 drivers/lightnvm/pblk-recovery.c 	rqd->bio = NULL;
rqd               394 drivers/lightnvm/pblk-recovery.c 	rqd->opcode = NVM_OP_PREAD;
rqd               395 drivers/lightnvm/pblk-recovery.c 	rqd->meta_list = meta_list;
rqd               396 drivers/lightnvm/pblk-recovery.c 	rqd->nr_ppas = rq_ppas;
rqd               397 drivers/lightnvm/pblk-recovery.c 	rqd->ppa_list = ppa_list;
rqd               398 drivers/lightnvm/pblk-recovery.c 	rqd->dma_ppa_list = dma_ppa_list;
rqd               399 drivers/lightnvm/pblk-recovery.c 	rqd->dma_meta_list = dma_meta_list;
rqd               400 drivers/lightnvm/pblk-recovery.c 	ppa_list = nvm_rq_to_ppa_list(rqd);
rqd               403 drivers/lightnvm/pblk-recovery.c 		rqd->is_seq = 1;
rqd               405 drivers/lightnvm/pblk-recovery.c 	for (i = 0; i < rqd->nr_ppas; ) {
rqd               423 drivers/lightnvm/pblk-recovery.c 	ret = pblk_submit_io_sync(pblk, rqd, data);
rqd               432 drivers/lightnvm/pblk-recovery.c 	if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
rqd               436 drivers/lightnvm/pblk-recovery.c 			pblk_log_read_err(pblk, rqd);
rqd               450 drivers/lightnvm/pblk-recovery.c 	pblk_get_packed_meta(pblk, rqd);
rqd               452 drivers/lightnvm/pblk-recovery.c 	for (i = 0; i < rqd->nr_ppas; i++) {
rqd               481 drivers/lightnvm/pblk-recovery.c 	struct nvm_rq *rqd;
rqd               502 drivers/lightnvm/pblk-recovery.c 	rqd = mempool_alloc(&pblk->r_rq_pool, GFP_KERNEL);
rqd               503 drivers/lightnvm/pblk-recovery.c 	memset(rqd, 0, pblk_g_rq_size);
rqd               507 drivers/lightnvm/pblk-recovery.c 	p.rqd = rqd;
rqd               522 drivers/lightnvm/pblk-recovery.c 	mempool_free(rqd, &pblk->r_rq_pool);
rqd                22 drivers/lightnvm/pblk-write.c static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
rqd                53 drivers/lightnvm/pblk-write.c 		pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
rqd                57 drivers/lightnvm/pblk-write.c 	atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
rqd                62 drivers/lightnvm/pblk-write.c 	bio_put(rqd->bio);
rqd                63 drivers/lightnvm/pblk-write.c 	pblk_free_rqd(pblk, rqd, PBLK_WRITE);
rqd                69 drivers/lightnvm/pblk-write.c 					   struct nvm_rq *rqd,
rqd                73 drivers/lightnvm/pblk-write.c 	return pblk_end_w_bio(pblk, rqd, c_ctx);
rqd                76 drivers/lightnvm/pblk-write.c static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
rqd                90 drivers/lightnvm/pblk-write.c 		pos = pblk_end_w_bio(pblk, rqd, c_ctx);
rqd                94 drivers/lightnvm/pblk-write.c 			rqd = nvm_rq_from_c_ctx(c);
rqd                96 drivers/lightnvm/pblk-write.c 				pos = pblk_end_queued_w_bio(pblk, rqd, c);
rqd               101 drivers/lightnvm/pblk-write.c 		WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
rqd               213 drivers/lightnvm/pblk-write.c 	struct nvm_rq *rqd = recovery->rqd;
rqd               214 drivers/lightnvm/pblk-write.c 	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
rqd               215 drivers/lightnvm/pblk-write.c 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
rqd               217 drivers/lightnvm/pblk-write.c 	pblk_log_write_err(pblk, rqd);
rqd               219 drivers/lightnvm/pblk-write.c 	pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas);
rqd               224 drivers/lightnvm/pblk-write.c 		pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
rqd               226 drivers/lightnvm/pblk-write.c 	bio_put(rqd->bio);
rqd               227 drivers/lightnvm/pblk-write.c 	pblk_free_rqd(pblk, rqd, PBLK_WRITE);
rqd               235 drivers/lightnvm/pblk-write.c static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
rqd               246 drivers/lightnvm/pblk-write.c 	recovery->rqd = rqd;
rqd               252 drivers/lightnvm/pblk-write.c static void pblk_end_io_write(struct nvm_rq *rqd)
rqd               254 drivers/lightnvm/pblk-write.c 	struct pblk *pblk = rqd->private;
rqd               255 drivers/lightnvm/pblk-write.c 	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
rqd               257 drivers/lightnvm/pblk-write.c 	if (rqd->error) {
rqd               258 drivers/lightnvm/pblk-write.c 		pblk_end_w_fail(pblk, rqd);
rqd               262 drivers/lightnvm/pblk-write.c 			pblk_check_chunk_state_update(pblk, rqd);
rqd               264 drivers/lightnvm/pblk-write.c 		WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
rqd               268 drivers/lightnvm/pblk-write.c 	pblk_complete_write(pblk, rqd, c_ctx);
rqd               272 drivers/lightnvm/pblk-write.c static void pblk_end_io_write_meta(struct nvm_rq *rqd)
rqd               274 drivers/lightnvm/pblk-write.c 	struct pblk *pblk = rqd->private;
rqd               275 drivers/lightnvm/pblk-write.c 	struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
rqd               278 drivers/lightnvm/pblk-write.c 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
rqd               283 drivers/lightnvm/pblk-write.c 	if (rqd->error) {
rqd               284 drivers/lightnvm/pblk-write.c 		pblk_log_write_err(pblk, rqd);
rqd               289 drivers/lightnvm/pblk-write.c 			pblk_check_chunk_state_update(pblk, rqd);
rqd               292 drivers/lightnvm/pblk-write.c 	sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
rqd               297 drivers/lightnvm/pblk-write.c 	pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
rqd               302 drivers/lightnvm/pblk-write.c static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
rqd               306 drivers/lightnvm/pblk-write.c 	rqd->opcode = NVM_OP_PWRITE;
rqd               307 drivers/lightnvm/pblk-write.c 	rqd->nr_ppas = nr_secs;
rqd               308 drivers/lightnvm/pblk-write.c 	rqd->is_seq = 1;
rqd               309 drivers/lightnvm/pblk-write.c 	rqd->private = pblk;
rqd               310 drivers/lightnvm/pblk-write.c 	rqd->end_io = end_io;
rqd               312 drivers/lightnvm/pblk-write.c 	return pblk_alloc_rqd_meta(pblk, rqd);
rqd               315 drivers/lightnvm/pblk-write.c static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
rqd               320 drivers/lightnvm/pblk-write.c 	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
rqd               332 drivers/lightnvm/pblk-write.c 	ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
rqd               339 drivers/lightnvm/pblk-write.c 		ret = pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
rqd               342 drivers/lightnvm/pblk-write.c 		ret = pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
rqd               376 drivers/lightnvm/pblk-write.c 	struct nvm_rq *rqd;
rqd               385 drivers/lightnvm/pblk-write.c 	rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
rqd               387 drivers/lightnvm/pblk-write.c 	m_ctx = nvm_rq_to_pdu(rqd);
rqd               393 drivers/lightnvm/pblk-write.c 	ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
rqd               397 drivers/lightnvm/pblk-write.c 	ppa_list = nvm_rq_to_ppa_list(rqd);
rqd               398 drivers/lightnvm/pblk-write.c 	for (i = 0; i < rqd->nr_ppas; ) {
rqd               414 drivers/lightnvm/pblk-write.c 	ret = pblk_submit_io(pblk, rqd, data);
rqd               429 drivers/lightnvm/pblk-write.c 	pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
rqd               494 drivers/lightnvm/pblk-write.c static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
rqd               503 drivers/lightnvm/pblk-write.c 	err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
rqd               509 drivers/lightnvm/pblk-write.c 	meta_line = pblk_should_submit_meta_io(pblk, rqd);
rqd               512 drivers/lightnvm/pblk-write.c 	err = pblk_submit_io(pblk, rqd, NULL);
rqd               545 drivers/lightnvm/pblk-write.c static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
rqd               547 drivers/lightnvm/pblk-write.c 	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
rqd               548 drivers/lightnvm/pblk-write.c 	struct bio *bio = rqd->bio;
rqd               558 drivers/lightnvm/pblk-write.c 	struct nvm_rq *rqd;
rqd               619 drivers/lightnvm/pblk-write.c 	rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
rqd               620 drivers/lightnvm/pblk-write.c 	rqd->bio = bio;
rqd               622 drivers/lightnvm/pblk-write.c 	if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
rqd               628 drivers/lightnvm/pblk-write.c 	if (pblk_submit_io_set(pblk, rqd))
rqd               639 drivers/lightnvm/pblk-write.c 	pblk_free_write_rqd(pblk, rqd);
rqd               642 drivers/lightnvm/pblk-write.c 	pblk_free_rqd(pblk, rqd, PBLK_WRITE);
rqd               134 drivers/lightnvm/pblk.h 	struct nvm_rq *rqd;
rqd               738 drivers/lightnvm/pblk.h unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
rqd               765 drivers/lightnvm/pblk.h void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
rqd               766 drivers/lightnvm/pblk.h int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
rqd               767 drivers/lightnvm/pblk.h void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
rqd               769 drivers/lightnvm/pblk.h int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
rqd               776 drivers/lightnvm/pblk.h void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
rqd               777 drivers/lightnvm/pblk.h void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
rqd               778 drivers/lightnvm/pblk.h int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf);
rqd               779 drivers/lightnvm/pblk.h int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf);
rqd               781 drivers/lightnvm/pblk.h void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd);
rqd               786 drivers/lightnvm/pblk.h void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd);
rqd               840 drivers/lightnvm/pblk.h void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd);
rqd               841 drivers/lightnvm/pblk.h void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd);
rqd               853 drivers/lightnvm/pblk.h int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
rqd               856 drivers/lightnvm/pblk.h int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
rqd              1212 drivers/lightnvm/pblk.h static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
rqd              1217 drivers/lightnvm/pblk.h 	if (rqd->nr_ppas ==  1) {
rqd              1218 drivers/lightnvm/pblk.h 		print_ppa(pblk, &rqd->ppa_addr, "rqd", error);
rqd              1222 drivers/lightnvm/pblk.h 	while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas,
rqd              1223 drivers/lightnvm/pblk.h 						bit + 1)) < rqd->nr_ppas) {
rqd              1224 drivers/lightnvm/pblk.h 		print_ppa(pblk, &rqd->ppa_list[bit], "rqd", error);
rqd              1227 drivers/lightnvm/pblk.h 	pblk_err(pblk, "error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
rqd              1265 drivers/lightnvm/pblk.h static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
rqd              1268 drivers/lightnvm/pblk.h 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
rqd              1270 drivers/lightnvm/pblk.h 	if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
rqd              1275 drivers/lightnvm/pblk.h 	if (rqd->opcode == NVM_OP_PWRITE) {
rqd              1279 drivers/lightnvm/pblk.h 		for (i = 0; i < rqd->nr_ppas; i++) {
rqd               624 drivers/nvme/host/lightnvm.c static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
rqd               627 drivers/nvme/host/lightnvm.c 	c->ph_rw.opcode = rqd->opcode;
rqd               629 drivers/nvme/host/lightnvm.c 	c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
rqd               630 drivers/nvme/host/lightnvm.c 	c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
rqd               631 drivers/nvme/host/lightnvm.c 	c->ph_rw.control = cpu_to_le16(rqd->flags);
rqd               632 drivers/nvme/host/lightnvm.c 	c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
rqd               637 drivers/nvme/host/lightnvm.c 	struct nvm_rq *rqd = rq->end_io_data;
rqd               639 drivers/nvme/host/lightnvm.c 	rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
rqd               640 drivers/nvme/host/lightnvm.c 	rqd->error = nvme_req(rq)->status;
rqd               641 drivers/nvme/host/lightnvm.c 	nvm_end_io(rqd);
rqd               648 drivers/nvme/host/lightnvm.c 					      struct nvm_rq *rqd,
rqd               654 drivers/nvme/host/lightnvm.c 	nvme_nvm_rqtocmd(rqd, ns, cmd);
rqd               662 drivers/nvme/host/lightnvm.c 	if (rqd->bio)
rqd               663 drivers/nvme/host/lightnvm.c 		blk_rq_append_bio(rq, &rqd->bio);
rqd               670 drivers/nvme/host/lightnvm.c static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd,
rqd               683 drivers/nvme/host/lightnvm.c 	rq = nvme_nvm_alloc_request(q, rqd, cmd);
rqd               690 drivers/nvme/host/lightnvm.c 		ret = blk_rq_map_kern(q, rq, buf, geo->csecs * rqd->nr_ppas,
rqd               696 drivers/nvme/host/lightnvm.c 	rq->end_io_data = rqd;
rqd               321 include/linux/lightnvm.h static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
rqd               323 include/linux/lightnvm.h 	return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;