fod               136 drivers/nvme/target/fc.c 	struct nvmet_fc_fcp_iod		fod[];		/* array of fcp_iods */
fod               159 drivers/nvme/target/fc.c 	return (fodptr - fodptr->queue->fod);
fod               228 drivers/nvme/target/fc.c 					struct nvmet_fc_fcp_iod *fod);
fod               420 drivers/nvme/target/fc.c 	struct nvmet_fc_fcp_iod *fod = queue->fod;
fod               423 drivers/nvme/target/fc.c 	for (i = 0; i < queue->sqsize; fod++, i++) {
fod               424 drivers/nvme/target/fc.c 		INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
fod               425 drivers/nvme/target/fc.c 		fod->tgtport = tgtport;
fod               426 drivers/nvme/target/fc.c 		fod->queue = queue;
fod               427 drivers/nvme/target/fc.c 		fod->active = false;
fod               428 drivers/nvme/target/fc.c 		fod->abort = false;
fod               429 drivers/nvme/target/fc.c 		fod->aborted = false;
fod               430 drivers/nvme/target/fc.c 		fod->fcpreq = NULL;
fod               431 drivers/nvme/target/fc.c 		list_add_tail(&fod->fcp_list, &queue->fod_list);
fod               432 drivers/nvme/target/fc.c 		spin_lock_init(&fod->flock);
fod               434 drivers/nvme/target/fc.c 		fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
fod               435 drivers/nvme/target/fc.c 					sizeof(fod->rspiubuf), DMA_TO_DEVICE);
fod               436 drivers/nvme/target/fc.c 		if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
fod               437 drivers/nvme/target/fc.c 			list_del(&fod->fcp_list);
fod               438 drivers/nvme/target/fc.c 			for (fod--, i--; i >= 0; fod--, i--) {
fod               439 drivers/nvme/target/fc.c 				fc_dma_unmap_single(tgtport->dev, fod->rspdma,
fod               440 drivers/nvme/target/fc.c 						sizeof(fod->rspiubuf),
fod               442 drivers/nvme/target/fc.c 				fod->rspdma = 0L;
fod               443 drivers/nvme/target/fc.c 				list_del(&fod->fcp_list);
fod               455 drivers/nvme/target/fc.c 	struct nvmet_fc_fcp_iod *fod = queue->fod;
fod               458 drivers/nvme/target/fc.c 	for (i = 0; i < queue->sqsize; fod++, i++) {
fod               459 drivers/nvme/target/fc.c 		if (fod->rspdma)
fod               460 drivers/nvme/target/fc.c 			fc_dma_unmap_single(tgtport->dev, fod->rspdma,
fod               461 drivers/nvme/target/fc.c 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
fod               468 drivers/nvme/target/fc.c 	struct nvmet_fc_fcp_iod *fod;
fod               472 drivers/nvme/target/fc.c 	fod = list_first_entry_or_null(&queue->fod_list,
fod               474 drivers/nvme/target/fc.c 	if (fod) {
fod               475 drivers/nvme/target/fc.c 		list_del(&fod->fcp_list);
fod               476 drivers/nvme/target/fc.c 		fod->active = true;
fod               483 drivers/nvme/target/fc.c 	return fod;
fod               492 drivers/nvme/target/fc.c 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
fod               501 drivers/nvme/target/fc.c 	nvmet_fc_handle_fcp_rqst(tgtport, fod);
fod               507 drivers/nvme/target/fc.c 	struct nvmet_fc_fcp_iod *fod =
fod               511 drivers/nvme/target/fc.c 	nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
fod               517 drivers/nvme/target/fc.c 			struct nvmet_fc_fcp_iod *fod)
fod               519 drivers/nvme/target/fc.c 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
fod               520 drivers/nvme/target/fc.c 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
fod               524 drivers/nvme/target/fc.c 	fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
fod               525 drivers/nvme/target/fc.c 				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
fod               529 drivers/nvme/target/fc.c 	fod->active = false;
fod               530 drivers/nvme/target/fc.c 	fod->abort = false;
fod               531 drivers/nvme/target/fc.c 	fod->aborted = false;
fod               532 drivers/nvme/target/fc.c 	fod->writedataactive = false;
fod               533 drivers/nvme/target/fc.c 	fod->fcpreq = NULL;
fod               544 drivers/nvme/target/fc.c 		list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
fod               560 drivers/nvme/target/fc.c 	memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
fod               565 drivers/nvme/target/fc.c 	fcpreq->nvmet_fc_private = fod;
fod               566 drivers/nvme/target/fc.c 	fod->fcpreq = fcpreq;
fod               567 drivers/nvme/target/fc.c 	fod->active = true;
fod               577 drivers/nvme/target/fc.c 	queue_work(queue->work_q, &fod->defer_work);
fod               591 drivers/nvme/target/fc.c 	queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
fod               678 drivers/nvme/target/fc.c 	struct nvmet_fc_fcp_iod *fod = queue->fod;
fod               688 drivers/nvme/target/fc.c 	for (i = 0; i < queue->sqsize; fod++, i++) {
fod               689 drivers/nvme/target/fc.c 		if (fod->active) {
fod               690 drivers/nvme/target/fc.c 			spin_lock(&fod->flock);
fod               691 drivers/nvme/target/fc.c 			fod->abort = true;
fod               692 drivers/nvme/target/fc.c 			writedataactive = fod->writedataactive;
fod               693 drivers/nvme/target/fc.c 			spin_unlock(&fod->flock);
fod               700 drivers/nvme/target/fc.c 				spin_lock(&fod->flock);
fod               701 drivers/nvme/target/fc.c 				fod->aborted = true;
fod               702 drivers/nvme/target/fc.c 				spin_unlock(&fod->flock);
fod               704 drivers/nvme/target/fc.c 					&tgtport->fc_target_port, fod->fcpreq);
fod              1718 drivers/nvme/target/fc.c nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
fod              1723 drivers/nvme/target/fc.c 	sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
fod              1727 drivers/nvme/target/fc.c 	fod->data_sg = sg;
fod              1728 drivers/nvme/target/fc.c 	fod->data_sg_cnt = nent;
fod              1729 drivers/nvme/target/fc.c 	fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
fod              1730 drivers/nvme/target/fc.c 				((fod->io_dir == NVMET_FCP_WRITE) ?
fod              1733 drivers/nvme/target/fc.c 	fod->next_sg = fod->data_sg;
fod              1742 drivers/nvme/target/fc.c nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
fod              1744 drivers/nvme/target/fc.c 	if (!fod->data_sg || !fod->data_sg_cnt)
fod              1747 drivers/nvme/target/fc.c 	fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
fod              1748 drivers/nvme/target/fc.c 				((fod->io_dir == NVMET_FCP_WRITE) ?
fod              1750 drivers/nvme/target/fc.c 	sgl_free(fod->data_sg);
fod              1751 drivers/nvme/target/fc.c 	fod->data_sg = NULL;
fod              1752 drivers/nvme/target/fc.c 	fod->data_sg_cnt = 0;
fod              1774 drivers/nvme/target/fc.c 				struct nvmet_fc_fcp_iod *fod)
fod              1776 drivers/nvme/target/fc.c 	struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
fod              1777 drivers/nvme/target/fc.c 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
fod              1783 drivers/nvme/target/fc.c 	if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
fod              1784 drivers/nvme/target/fc.c 		xfr_length = fod->req.transfer_len;
fod              1786 drivers/nvme/target/fc.c 		xfr_length = fod->offset;
fod              1807 drivers/nvme/target/fc.c 	rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
fod              1808 drivers/nvme/target/fc.c 	if (!(rspcnt % fod->queue->ersp_ratio) ||
fod              1810 drivers/nvme/target/fc.c 	    xfr_length != fod->req.transfer_len ||
fod              1813 drivers/nvme/target/fc.c 	    queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
fod              1817 drivers/nvme/target/fc.c 	fod->fcpreq->rspaddr = ersp;
fod              1818 drivers/nvme/target/fc.c 	fod->fcpreq->rspdma = fod->rspdma;
fod              1822 drivers/nvme/target/fc.c 		fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
fod              1825 drivers/nvme/target/fc.c 		rsn = atomic_inc_return(&fod->queue->rsn);
fod              1828 drivers/nvme/target/fc.c 		fod->fcpreq->rsplen = sizeof(*ersp);
fod              1831 drivers/nvme/target/fc.c 	fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
fod              1832 drivers/nvme/target/fc.c 				  sizeof(fod->rspiubuf), DMA_TO_DEVICE);
fod              1839 drivers/nvme/target/fc.c 				struct nvmet_fc_fcp_iod *fod)
fod              1841 drivers/nvme/target/fc.c 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
fod              1844 drivers/nvme/target/fc.c 	nvmet_fc_free_tgt_pgs(fod);
fod              1851 drivers/nvme/target/fc.c 	if (!fod->aborted)
fod              1854 drivers/nvme/target/fc.c 	nvmet_fc_free_fcp_iod(fod->queue, fod);
fod              1859 drivers/nvme/target/fc.c 				struct nvmet_fc_fcp_iod *fod)
fod              1863 drivers/nvme/target/fc.c 	fod->fcpreq->op = NVMET_FCOP_RSP;
fod              1864 drivers/nvme/target/fc.c 	fod->fcpreq->timeout = 0;
fod              1866 drivers/nvme/target/fc.c 	nvmet_fc_prep_fcp_rsp(tgtport, fod);
fod              1868 drivers/nvme/target/fc.c 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
fod              1870 drivers/nvme/target/fc.c 		nvmet_fc_abort_op(tgtport, fod);
fod              1875 drivers/nvme/target/fc.c 				struct nvmet_fc_fcp_iod *fod, u8 op)
fod              1877 drivers/nvme/target/fc.c 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
fod              1878 drivers/nvme/target/fc.c 	struct scatterlist *sg = fod->next_sg;
fod              1880 drivers/nvme/target/fc.c 	u32 remaininglen = fod->req.transfer_len - fod->offset;
fod              1885 drivers/nvme/target/fc.c 	fcpreq->offset = fod->offset;
fod              1912 drivers/nvme/target/fc.c 		fod->next_sg = sg;
fod              1914 drivers/nvme/target/fc.c 		fod->next_sg = NULL;
fod              1926 drivers/nvme/target/fc.c 	    ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
fod              1929 drivers/nvme/target/fc.c 		nvmet_fc_prep_fcp_rsp(tgtport, fod);
fod              1932 drivers/nvme/target/fc.c 	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
fod              1939 drivers/nvme/target/fc.c 		fod->abort = true;
fod              1942 drivers/nvme/target/fc.c 			spin_lock_irqsave(&fod->flock, flags);
fod              1943 drivers/nvme/target/fc.c 			fod->writedataactive = false;
fod              1944 drivers/nvme/target/fc.c 			spin_unlock_irqrestore(&fod->flock, flags);
fod              1945 drivers/nvme/target/fc.c 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
fod              1949 drivers/nvme/target/fc.c 			nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
fod              1955 drivers/nvme/target/fc.c __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
fod              1957 drivers/nvme/target/fc.c 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
fod              1958 drivers/nvme/target/fc.c 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
fod              1963 drivers/nvme/target/fc.c 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
fod              1967 drivers/nvme/target/fc.c 		nvmet_fc_abort_op(tgtport, fod);
fod              1978 drivers/nvme/target/fc.c nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
fod              1980 drivers/nvme/target/fc.c 	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
fod              1981 drivers/nvme/target/fc.c 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
fod              1985 drivers/nvme/target/fc.c 	spin_lock_irqsave(&fod->flock, flags);
fod              1986 drivers/nvme/target/fc.c 	abort = fod->abort;
fod              1987 drivers/nvme/target/fc.c 	fod->writedataactive = false;
fod              1988 drivers/nvme/target/fc.c 	spin_unlock_irqrestore(&fod->flock, flags);
fod              1993 drivers/nvme/target/fc.c 		if (__nvmet_fc_fod_op_abort(fod, abort))
fod              1997 drivers/nvme/target/fc.c 			spin_lock(&fod->flock);
fod              1998 drivers/nvme/target/fc.c 			fod->abort = true;
fod              1999 drivers/nvme/target/fc.c 			spin_unlock(&fod->flock);
fod              2001 drivers/nvme/target/fc.c 			nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
fod              2005 drivers/nvme/target/fc.c 		fod->offset += fcpreq->transferred_length;
fod              2006 drivers/nvme/target/fc.c 		if (fod->offset != fod->req.transfer_len) {
fod              2007 drivers/nvme/target/fc.c 			spin_lock_irqsave(&fod->flock, flags);
fod              2008 drivers/nvme/target/fc.c 			fod->writedataactive = true;
fod              2009 drivers/nvme/target/fc.c 			spin_unlock_irqrestore(&fod->flock, flags);
fod              2012 drivers/nvme/target/fc.c 			nvmet_fc_transfer_fcp_data(tgtport, fod,
fod              2018 drivers/nvme/target/fc.c 		nvmet_req_execute(&fod->req);
fod              2023 drivers/nvme/target/fc.c 		if (__nvmet_fc_fod_op_abort(fod, abort))
fod              2027 drivers/nvme/target/fc.c 			nvmet_fc_abort_op(tgtport, fod);
fod              2035 drivers/nvme/target/fc.c 			nvmet_fc_free_tgt_pgs(fod);
fod              2036 drivers/nvme/target/fc.c 			nvmet_fc_free_fcp_iod(fod->queue, fod);
fod              2040 drivers/nvme/target/fc.c 		fod->offset += fcpreq->transferred_length;
fod              2041 drivers/nvme/target/fc.c 		if (fod->offset != fod->req.transfer_len) {
fod              2043 drivers/nvme/target/fc.c 			nvmet_fc_transfer_fcp_data(tgtport, fod,
fod              2051 drivers/nvme/target/fc.c 		nvmet_fc_free_tgt_pgs(fod);
fod              2053 drivers/nvme/target/fc.c 		nvmet_fc_xmt_fcp_rsp(tgtport, fod);
fod              2058 drivers/nvme/target/fc.c 		if (__nvmet_fc_fod_op_abort(fod, abort))
fod              2060 drivers/nvme/target/fc.c 		nvmet_fc_free_fcp_iod(fod->queue, fod);
fod              2071 drivers/nvme/target/fc.c 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
fod              2073 drivers/nvme/target/fc.c 	nvmet_fc_fod_op_done(fod);
fod              2081 drivers/nvme/target/fc.c 			struct nvmet_fc_fcp_iod *fod, int status)
fod              2083 drivers/nvme/target/fc.c 	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
fod              2084 drivers/nvme/target/fc.c 	struct nvme_completion *cqe = &fod->rspiubuf.cqe;
fod              2088 drivers/nvme/target/fc.c 	spin_lock_irqsave(&fod->flock, flags);
fod              2089 drivers/nvme/target/fc.c 	abort = fod->abort;
fod              2090 drivers/nvme/target/fc.c 	spin_unlock_irqrestore(&fod->flock, flags);
fod              2094 drivers/nvme/target/fc.c 		fod->queue->sqhd = cqe->sq_head;
fod              2097 drivers/nvme/target/fc.c 		nvmet_fc_abort_op(tgtport, fod);
fod              2105 drivers/nvme/target/fc.c 		cqe->sq_head = fod->queue->sqhd;	/* echo last cqe sqhd */
fod              2106 drivers/nvme/target/fc.c 		cqe->sq_id = cpu_to_le16(fod->queue->qid);
fod              2116 drivers/nvme/target/fc.c 		if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
fod              2118 drivers/nvme/target/fc.c 			nvmet_fc_transfer_fcp_data(tgtport, fod,
fod              2127 drivers/nvme/target/fc.c 	nvmet_fc_free_tgt_pgs(fod);
fod              2129 drivers/nvme/target/fc.c 	nvmet_fc_xmt_fcp_rsp(tgtport, fod);
fod              2136 drivers/nvme/target/fc.c 	struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
fod              2137 drivers/nvme/target/fc.c 	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
fod              2139 drivers/nvme/target/fc.c 	__nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
fod              2148 drivers/nvme/target/fc.c 			struct nvmet_fc_fcp_iod *fod)
fod              2150 drivers/nvme/target/fc.c 	struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
fod              2170 drivers/nvme/target/fc.c 	fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
fod              2173 drivers/nvme/target/fc.c 		fod->io_dir = NVMET_FCP_WRITE;
fod              2177 drivers/nvme/target/fc.c 		fod->io_dir = NVMET_FCP_READ;
fod              2181 drivers/nvme/target/fc.c 		fod->io_dir = NVMET_FCP_NODATA;
fod              2186 drivers/nvme/target/fc.c 	fod->req.cmd = &fod->cmdiubuf.sqe;
fod              2187 drivers/nvme/target/fc.c 	fod->req.cqe = &fod->rspiubuf.cqe;
fod              2188 drivers/nvme/target/fc.c 	fod->req.port = tgtport->pe->port;
fod              2191 drivers/nvme/target/fc.c 	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
fod              2193 drivers/nvme/target/fc.c 	fod->data_sg = NULL;
fod              2194 drivers/nvme/target/fc.c 	fod->data_sg_cnt = 0;
fod              2196 drivers/nvme/target/fc.c 	ret = nvmet_req_init(&fod->req,
fod              2197 drivers/nvme/target/fc.c 				&fod->queue->nvme_cq,
fod              2198 drivers/nvme/target/fc.c 				&fod->queue->nvme_sq,
fod              2206 drivers/nvme/target/fc.c 	fod->req.transfer_len = xfrlen;
fod              2209 drivers/nvme/target/fc.c 	atomic_inc(&fod->queue->sqtail);
fod              2211 drivers/nvme/target/fc.c 	if (fod->req.transfer_len) {
fod              2212 drivers/nvme/target/fc.c 		ret = nvmet_fc_alloc_tgt_pgs(fod);
fod              2214 drivers/nvme/target/fc.c 			nvmet_req_complete(&fod->req, ret);
fod              2218 drivers/nvme/target/fc.c 	fod->req.sg = fod->data_sg;
fod              2219 drivers/nvme/target/fc.c 	fod->req.sg_cnt = fod->data_sg_cnt;
fod              2220 drivers/nvme/target/fc.c 	fod->offset = 0;
fod              2222 drivers/nvme/target/fc.c 	if (fod->io_dir == NVMET_FCP_WRITE) {
fod              2224 drivers/nvme/target/fc.c 		nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
fod              2234 drivers/nvme/target/fc.c 	nvmet_req_execute(&fod->req);
fod              2238 drivers/nvme/target/fc.c 	nvmet_fc_abort_op(tgtport, fod);
fod              2296 drivers/nvme/target/fc.c 	struct nvmet_fc_fcp_iod *fod;
fod              2321 drivers/nvme/target/fc.c 	fod = nvmet_fc_alloc_fcp_iod(queue);
fod              2322 drivers/nvme/target/fc.c 	if (fod) {
fod              2325 drivers/nvme/target/fc.c 		fcpreq->nvmet_fc_private = fod;
fod              2326 drivers/nvme/target/fc.c 		fod->fcpreq = fcpreq;
fod              2328 drivers/nvme/target/fc.c 		memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
fod              2403 drivers/nvme/target/fc.c 	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
fod              2407 drivers/nvme/target/fc.c 	if (!fod || fod->fcpreq != fcpreq)
fod              2411 drivers/nvme/target/fc.c 	queue = fod->queue;
fod              2414 drivers/nvme/target/fc.c 	if (fod->active) {
fod              2420 drivers/nvme/target/fc.c 		spin_lock(&fod->flock);
fod              2421 drivers/nvme/target/fc.c 		fod->abort = true;
fod              2422 drivers/nvme/target/fc.c 		fod->aborted = true;
fod              2423 drivers/nvme/target/fc.c 		spin_unlock(&fod->flock);