1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c)  2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8#include "qla_target.h"
9
10#include <linux/blkdev.h>
11#include <linux/delay.h>
12
13#include <scsi/scsi_tcq.h>
14
15static void qla25xx_set_que(srb_t *, struct rsp_que **);
16/**
17 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18 * @cmd: SCSI command
19 *
20 * Returns the proper CF_* direction based on CDB.
21 */
22static inline uint16_t
23qla2x00_get_cmd_direction(srb_t *sp)
24{
25	uint16_t cflags;
26	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27	struct scsi_qla_host *vha = sp->fcport->vha;
28
29	cflags = 0;
30
31	/* Set transfer direction */
32	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33		cflags = CF_WRITE;
34		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35		vha->qla_stats.output_requests++;
36	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37		cflags = CF_READ;
38		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
39		vha->qla_stats.input_requests++;
40	}
41	return (cflags);
42}
43
44/**
45 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
46 * Continuation Type 0 IOCBs to allocate.
47 *
48 * @dsds: number of data segment decriptors needed
49 *
50 * Returns the number of IOCB entries needed to store @dsds.
51 */
52uint16_t
53qla2x00_calc_iocbs_32(uint16_t dsds)
54{
55	uint16_t iocbs;
56
57	iocbs = 1;
58	if (dsds > 3) {
59		iocbs += (dsds - 3) / 7;
60		if ((dsds - 3) % 7)
61			iocbs++;
62	}
63	return (iocbs);
64}
65
66/**
67 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
68 * Continuation Type 1 IOCBs to allocate.
69 *
70 * @dsds: number of data segment decriptors needed
71 *
72 * Returns the number of IOCB entries needed to store @dsds.
73 */
74uint16_t
75qla2x00_calc_iocbs_64(uint16_t dsds)
76{
77	uint16_t iocbs;
78
79	iocbs = 1;
80	if (dsds > 2) {
81		iocbs += (dsds - 2) / 5;
82		if ((dsds - 2) % 5)
83			iocbs++;
84	}
85	return (iocbs);
86}
87
88/**
89 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90 * @ha: HA context
91 *
92 * Returns a pointer to the Continuation Type 0 IOCB packet.
93 */
94static inline cont_entry_t *
95qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96{
97	cont_entry_t *cont_pkt;
98	struct req_que *req = vha->req;
99	/* Adjust ring index. */
100	req->ring_index++;
101	if (req->ring_index == req->length) {
102		req->ring_index = 0;
103		req->ring_ptr = req->ring;
104	} else {
105		req->ring_ptr++;
106	}
107
108	cont_pkt = (cont_entry_t *)req->ring_ptr;
109
110	/* Load packet defaults. */
111	*((uint32_t *)(&cont_pkt->entry_type)) =
112	    __constant_cpu_to_le32(CONTINUE_TYPE);
113
114	return (cont_pkt);
115}
116
117/**
118 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119 * @ha: HA context
120 *
121 * Returns a pointer to the continuation type 1 IOCB packet.
122 */
123static inline cont_a64_entry_t *
124qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
125{
126	cont_a64_entry_t *cont_pkt;
127
128	/* Adjust ring index. */
129	req->ring_index++;
130	if (req->ring_index == req->length) {
131		req->ring_index = 0;
132		req->ring_ptr = req->ring;
133	} else {
134		req->ring_ptr++;
135	}
136
137	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138
139	/* Load packet defaults. */
140	*((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
141	    __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
142	    __constant_cpu_to_le32(CONTINUE_A64_TYPE);
143
144	return (cont_pkt);
145}
146
147static inline int
148qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
149{
150	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
151	uint8_t	guard = scsi_host_get_guard(cmd->device->host);
152
153	/* We always use DIFF Bundling for best performance */
154	*fw_prot_opts = 0;
155
156	/* Translate SCSI opcode to a protection opcode */
157	switch (scsi_get_prot_op(cmd)) {
158	case SCSI_PROT_READ_STRIP:
159		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
160		break;
161	case SCSI_PROT_WRITE_INSERT:
162		*fw_prot_opts |= PO_MODE_DIF_INSERT;
163		break;
164	case SCSI_PROT_READ_INSERT:
165		*fw_prot_opts |= PO_MODE_DIF_INSERT;
166		break;
167	case SCSI_PROT_WRITE_STRIP:
168		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
169		break;
170	case SCSI_PROT_READ_PASS:
171	case SCSI_PROT_WRITE_PASS:
172		if (guard & SHOST_DIX_GUARD_IP)
173			*fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
174		else
175			*fw_prot_opts |= PO_MODE_DIF_PASS;
176		break;
177	default:	/* Normal Request */
178		*fw_prot_opts |= PO_MODE_DIF_PASS;
179		break;
180	}
181
182	return scsi_prot_sg_count(cmd);
183}
184
185/*
186 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
187 * capable IOCB types.
188 *
189 * @sp: SRB command to process
190 * @cmd_pkt: Command type 2 IOCB
191 * @tot_dsds: Total number of segments to transfer
192 */
193void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
194    uint16_t tot_dsds)
195{
196	uint16_t	avail_dsds;
197	uint32_t	*cur_dsd;
198	scsi_qla_host_t	*vha;
199	struct scsi_cmnd *cmd;
200	struct scatterlist *sg;
201	int i;
202
203	cmd = GET_CMD_SP(sp);
204
205	/* Update entry type to indicate Command Type 2 IOCB */
206	*((uint32_t *)(&cmd_pkt->entry_type)) =
207	    __constant_cpu_to_le32(COMMAND_TYPE);
208
209	/* No data transfer */
210	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
211		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
212		return;
213	}
214
215	vha = sp->fcport->vha;
216	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
217
218	/* Three DSDs are available in the Command Type 2 IOCB */
219	avail_dsds = 3;
220	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
221
222	/* Load data segments */
223	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
224		cont_entry_t *cont_pkt;
225
226		/* Allocate additional continuation packets? */
227		if (avail_dsds == 0) {
228			/*
229			 * Seven DSDs are available in the Continuation
230			 * Type 0 IOCB.
231			 */
232			cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
233			cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
234			avail_dsds = 7;
235		}
236
237		*cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
238		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
239		avail_dsds--;
240	}
241}
242
243/**
244 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
245 * capable IOCB types.
246 *
247 * @sp: SRB command to process
248 * @cmd_pkt: Command type 3 IOCB
249 * @tot_dsds: Total number of segments to transfer
250 */
251void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
252    uint16_t tot_dsds)
253{
254	uint16_t	avail_dsds;
255	uint32_t	*cur_dsd;
256	scsi_qla_host_t	*vha;
257	struct scsi_cmnd *cmd;
258	struct scatterlist *sg;
259	int i;
260
261	cmd = GET_CMD_SP(sp);
262
263	/* Update entry type to indicate Command Type 3 IOCB */
264	*((uint32_t *)(&cmd_pkt->entry_type)) =
265	    __constant_cpu_to_le32(COMMAND_A64_TYPE);
266
267	/* No data transfer */
268	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
269		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
270		return;
271	}
272
273	vha = sp->fcport->vha;
274	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
275
276	/* Two DSDs are available in the Command Type 3 IOCB */
277	avail_dsds = 2;
278	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
279
280	/* Load data segments */
281	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
282		dma_addr_t	sle_dma;
283		cont_a64_entry_t *cont_pkt;
284
285		/* Allocate additional continuation packets? */
286		if (avail_dsds == 0) {
287			/*
288			 * Five DSDs are available in the Continuation
289			 * Type 1 IOCB.
290			 */
291			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
292			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
293			avail_dsds = 5;
294		}
295
296		sle_dma = sg_dma_address(sg);
297		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
298		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
299		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
300		avail_dsds--;
301	}
302}
303
304/**
305 * qla2x00_start_scsi() - Send a SCSI command to the ISP
306 * @sp: command to send to the ISP
307 *
308 * Returns non-zero if a failure occurred, else zero.
309 */
310int
311qla2x00_start_scsi(srb_t *sp)
312{
313	int		ret, nseg;
314	unsigned long   flags;
315	scsi_qla_host_t	*vha;
316	struct scsi_cmnd *cmd;
317	uint32_t	*clr_ptr;
318	uint32_t        index;
319	uint32_t	handle;
320	cmd_entry_t	*cmd_pkt;
321	uint16_t	cnt;
322	uint16_t	req_cnt;
323	uint16_t	tot_dsds;
324	struct device_reg_2xxx __iomem *reg;
325	struct qla_hw_data *ha;
326	struct req_que *req;
327	struct rsp_que *rsp;
328
329	/* Setup device pointers. */
330	ret = 0;
331	vha = sp->fcport->vha;
332	ha = vha->hw;
333	reg = &ha->iobase->isp;
334	cmd = GET_CMD_SP(sp);
335	req = ha->req_q_map[0];
336	rsp = ha->rsp_q_map[0];
337	/* So we know we haven't pci_map'ed anything yet */
338	tot_dsds = 0;
339
340	/* Send marker if required */
341	if (vha->marker_needed != 0) {
342		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
343		    QLA_SUCCESS) {
344			return (QLA_FUNCTION_FAILED);
345		}
346		vha->marker_needed = 0;
347	}
348
349	/* Acquire ring specific lock */
350	spin_lock_irqsave(&ha->hardware_lock, flags);
351
352	/* Check for room in outstanding command list. */
353	handle = req->current_outstanding_cmd;
354	for (index = 1; index < req->num_outstanding_cmds; index++) {
355		handle++;
356		if (handle == req->num_outstanding_cmds)
357			handle = 1;
358		if (!req->outstanding_cmds[handle])
359			break;
360	}
361	if (index == req->num_outstanding_cmds)
362		goto queuing_error;
363
364	/* Map the sg table so we have an accurate count of sg entries needed */
365	if (scsi_sg_count(cmd)) {
366		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
367		    scsi_sg_count(cmd), cmd->sc_data_direction);
368		if (unlikely(!nseg))
369			goto queuing_error;
370	} else
371		nseg = 0;
372
373	tot_dsds = nseg;
374
375	/* Calculate the number of request entries needed. */
376	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
377	if (req->cnt < (req_cnt + 2)) {
378		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
379		if (req->ring_index < cnt)
380			req->cnt = cnt - req->ring_index;
381		else
382			req->cnt = req->length -
383			    (req->ring_index - cnt);
384		/* If still no head room then bail out */
385		if (req->cnt < (req_cnt + 2))
386			goto queuing_error;
387	}
388
389	/* Build command packet */
390	req->current_outstanding_cmd = handle;
391	req->outstanding_cmds[handle] = sp;
392	sp->handle = handle;
393	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
394	req->cnt -= req_cnt;
395
396	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
397	cmd_pkt->handle = handle;
398	/* Zero out remaining portion of packet. */
399	clr_ptr = (uint32_t *)cmd_pkt + 2;
400	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
401	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
402
403	/* Set target ID and LUN number*/
404	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
405	cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
406	cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
407
408	/* Load SCSI command packet. */
409	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
410	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
411
412	/* Build IOCB segments */
413	ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
414
415	/* Set total data segment count. */
416	cmd_pkt->entry_count = (uint8_t)req_cnt;
417	wmb();
418
419	/* Adjust ring index. */
420	req->ring_index++;
421	if (req->ring_index == req->length) {
422		req->ring_index = 0;
423		req->ring_ptr = req->ring;
424	} else
425		req->ring_ptr++;
426
427	sp->flags |= SRB_DMA_VALID;
428
429	/* Set chip new ring index. */
430	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
431	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
432
433	/* Manage unprocessed RIO/ZIO commands in response queue. */
434	if (vha->flags.process_response_queue &&
435	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
436		qla2x00_process_response_queue(rsp);
437
438	spin_unlock_irqrestore(&ha->hardware_lock, flags);
439	return (QLA_SUCCESS);
440
441queuing_error:
442	if (tot_dsds)
443		scsi_dma_unmap(cmd);
444
445	spin_unlock_irqrestore(&ha->hardware_lock, flags);
446
447	return (QLA_FUNCTION_FAILED);
448}
449
450/**
451 * qla2x00_start_iocbs() - Execute the IOCB command
452 */
453void
454qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
455{
456	struct qla_hw_data *ha = vha->hw;
457	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
458
459	if (IS_P3P_TYPE(ha)) {
460		qla82xx_start_iocbs(vha);
461	} else {
462		/* Adjust ring index. */
463		req->ring_index++;
464		if (req->ring_index == req->length) {
465			req->ring_index = 0;
466			req->ring_ptr = req->ring;
467		} else
468			req->ring_ptr++;
469
470		/* Set chip new ring index. */
471		if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
472			WRT_REG_DWORD(req->req_q_in, req->ring_index);
473			RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
474		} else if (IS_QLAFX00(ha)) {
475			WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
476			RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
477			QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
478		} else if (IS_FWI2_CAPABLE(ha)) {
479			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
480			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
481		} else {
482			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
483				req->ring_index);
484			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
485		}
486	}
487}
488
489/**
490 * qla2x00_marker() - Send a marker IOCB to the firmware.
491 * @ha: HA context
492 * @loop_id: loop ID
493 * @lun: LUN
494 * @type: marker modifier
495 *
496 * Can be called from both normal and interrupt context.
497 *
498 * Returns non-zero if a failure occurred, else zero.
499 */
500static int
501__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
502			struct rsp_que *rsp, uint16_t loop_id,
503			uint64_t lun, uint8_t type)
504{
505	mrk_entry_t *mrk;
506	struct mrk_entry_24xx *mrk24 = NULL;
507
508	struct qla_hw_data *ha = vha->hw;
509	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
510
511	req = ha->req_q_map[0];
512	mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
513	if (mrk == NULL) {
514		ql_log(ql_log_warn, base_vha, 0x3026,
515		    "Failed to allocate Marker IOCB.\n");
516
517		return (QLA_FUNCTION_FAILED);
518	}
519
520	mrk->entry_type = MARKER_TYPE;
521	mrk->modifier = type;
522	if (type != MK_SYNC_ALL) {
523		if (IS_FWI2_CAPABLE(ha)) {
524			mrk24 = (struct mrk_entry_24xx *) mrk;
525			mrk24->nport_handle = cpu_to_le16(loop_id);
526			int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
527			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
528			mrk24->vp_index = vha->vp_idx;
529			mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
530		} else {
531			SET_TARGET_ID(ha, mrk->target, loop_id);
532			mrk->lun = cpu_to_le16((uint16_t)lun);
533		}
534	}
535	wmb();
536
537	qla2x00_start_iocbs(vha, req);
538
539	return (QLA_SUCCESS);
540}
541
542int
543qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
544		struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
545		uint8_t type)
546{
547	int ret;
548	unsigned long flags = 0;
549
550	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
551	ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
552	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
553
554	return (ret);
555}
556
557/*
558 * qla2x00_issue_marker
559 *
560 * Issue marker
561 * Caller CAN have hardware lock held as specified by ha_locked parameter.
562 * Might release it, then reaquire.
563 */
564int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
565{
566	if (ha_locked) {
567		if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
568					MK_SYNC_ALL) != QLA_SUCCESS)
569			return QLA_FUNCTION_FAILED;
570	} else {
571		if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
572					MK_SYNC_ALL) != QLA_SUCCESS)
573			return QLA_FUNCTION_FAILED;
574	}
575	vha->marker_needed = 0;
576
577	return QLA_SUCCESS;
578}
579
580static inline int
581qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
582	uint16_t tot_dsds)
583{
584	uint32_t *cur_dsd = NULL;
585	scsi_qla_host_t	*vha;
586	struct qla_hw_data *ha;
587	struct scsi_cmnd *cmd;
588	struct	scatterlist *cur_seg;
589	uint32_t *dsd_seg;
590	void *next_dsd;
591	uint8_t avail_dsds;
592	uint8_t first_iocb = 1;
593	uint32_t dsd_list_len;
594	struct dsd_dma *dsd_ptr;
595	struct ct6_dsd *ctx;
596
597	cmd = GET_CMD_SP(sp);
598
599	/* Update entry type to indicate Command Type 3 IOCB */
600	*((uint32_t *)(&cmd_pkt->entry_type)) =
601		__constant_cpu_to_le32(COMMAND_TYPE_6);
602
603	/* No data transfer */
604	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
605		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
606		return 0;
607	}
608
609	vha = sp->fcport->vha;
610	ha = vha->hw;
611
612	/* Set transfer direction */
613	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
614		cmd_pkt->control_flags =
615		    __constant_cpu_to_le16(CF_WRITE_DATA);
616		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
617		vha->qla_stats.output_requests++;
618	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
619		cmd_pkt->control_flags =
620		    __constant_cpu_to_le16(CF_READ_DATA);
621		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
622		vha->qla_stats.input_requests++;
623	}
624
625	cur_seg = scsi_sglist(cmd);
626	ctx = GET_CMD_CTX_SP(sp);
627
628	while (tot_dsds) {
629		avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
630		    QLA_DSDS_PER_IOCB : tot_dsds;
631		tot_dsds -= avail_dsds;
632		dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
633
634		dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
635		    struct dsd_dma, list);
636		next_dsd = dsd_ptr->dsd_addr;
637		list_del(&dsd_ptr->list);
638		ha->gbl_dsd_avail--;
639		list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
640		ctx->dsd_use_cnt++;
641		ha->gbl_dsd_inuse++;
642
643		if (first_iocb) {
644			first_iocb = 0;
645			dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
646			*dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
647			*dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
648			cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
649		} else {
650			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
651			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
652			*cur_dsd++ = cpu_to_le32(dsd_list_len);
653		}
654		cur_dsd = (uint32_t *)next_dsd;
655		while (avail_dsds) {
656			dma_addr_t	sle_dma;
657
658			sle_dma = sg_dma_address(cur_seg);
659			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
660			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
661			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
662			cur_seg = sg_next(cur_seg);
663			avail_dsds--;
664		}
665	}
666
667	/* Null termination */
668	*cur_dsd++ =  0;
669	*cur_dsd++ = 0;
670	*cur_dsd++ = 0;
671	cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
672	return 0;
673}
674
675/*
676 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
677 * for Command Type 6.
678 *
679 * @dsds: number of data segment decriptors needed
680 *
681 * Returns the number of dsd list needed to store @dsds.
682 */
683inline uint16_t
684qla24xx_calc_dsd_lists(uint16_t dsds)
685{
686	uint16_t dsd_lists = 0;
687
688	dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
689	if (dsds % QLA_DSDS_PER_IOCB)
690		dsd_lists++;
691	return dsd_lists;
692}
693
694
695/**
696 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
697 * IOCB types.
698 *
699 * @sp: SRB command to process
700 * @cmd_pkt: Command type 3 IOCB
701 * @tot_dsds: Total number of segments to transfer
702 */
703inline void
704qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
705    uint16_t tot_dsds)
706{
707	uint16_t	avail_dsds;
708	uint32_t	*cur_dsd;
709	scsi_qla_host_t	*vha;
710	struct scsi_cmnd *cmd;
711	struct scatterlist *sg;
712	int i;
713	struct req_que *req;
714
715	cmd = GET_CMD_SP(sp);
716
717	/* Update entry type to indicate Command Type 3 IOCB */
718	*((uint32_t *)(&cmd_pkt->entry_type)) =
719	    __constant_cpu_to_le32(COMMAND_TYPE_7);
720
721	/* No data transfer */
722	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
723		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
724		return;
725	}
726
727	vha = sp->fcport->vha;
728	req = vha->req;
729
730	/* Set transfer direction */
731	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
732		cmd_pkt->task_mgmt_flags =
733		    __constant_cpu_to_le16(TMF_WRITE_DATA);
734		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
735		vha->qla_stats.output_requests++;
736	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
737		cmd_pkt->task_mgmt_flags =
738		    __constant_cpu_to_le16(TMF_READ_DATA);
739		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
740		vha->qla_stats.input_requests++;
741	}
742
743	/* One DSD is available in the Command Type 3 IOCB */
744	avail_dsds = 1;
745	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
746
747	/* Load data segments */
748
749	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
750		dma_addr_t	sle_dma;
751		cont_a64_entry_t *cont_pkt;
752
753		/* Allocate additional continuation packets? */
754		if (avail_dsds == 0) {
755			/*
756			 * Five DSDs are available in the Continuation
757			 * Type 1 IOCB.
758			 */
759			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
760			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
761			avail_dsds = 5;
762		}
763
764		sle_dma = sg_dma_address(sg);
765		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
766		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
767		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
768		avail_dsds--;
769	}
770}
771
772struct fw_dif_context {
773	uint32_t ref_tag;
774	uint16_t app_tag;
775	uint8_t ref_tag_mask[4];	/* Validation/Replacement Mask*/
776	uint8_t app_tag_mask[2];	/* Validation/Replacement Mask*/
777};
778
779/*
780 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
781 *
782 */
783static inline void
784qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
785    unsigned int protcnt)
786{
787	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
788
789	switch (scsi_get_prot_type(cmd)) {
790	case SCSI_PROT_DIF_TYPE0:
791		/*
792		 * No check for ql2xenablehba_err_chk, as it would be an
793		 * I/O error if hba tag generation is not done.
794		 */
795		pkt->ref_tag = cpu_to_le32((uint32_t)
796		    (0xffffffff & scsi_get_lba(cmd)));
797
798		if (!qla2x00_hba_err_chk_enabled(sp))
799			break;
800
801		pkt->ref_tag_mask[0] = 0xff;
802		pkt->ref_tag_mask[1] = 0xff;
803		pkt->ref_tag_mask[2] = 0xff;
804		pkt->ref_tag_mask[3] = 0xff;
805		break;
806
807	/*
808	 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
809	 * match LBA in CDB + N
810	 */
811	case SCSI_PROT_DIF_TYPE2:
812		pkt->app_tag = __constant_cpu_to_le16(0);
813		pkt->app_tag_mask[0] = 0x0;
814		pkt->app_tag_mask[1] = 0x0;
815
816		pkt->ref_tag = cpu_to_le32((uint32_t)
817		    (0xffffffff & scsi_get_lba(cmd)));
818
819		if (!qla2x00_hba_err_chk_enabled(sp))
820			break;
821
822		/* enable ALL bytes of the ref tag */
823		pkt->ref_tag_mask[0] = 0xff;
824		pkt->ref_tag_mask[1] = 0xff;
825		pkt->ref_tag_mask[2] = 0xff;
826		pkt->ref_tag_mask[3] = 0xff;
827		break;
828
829	/* For Type 3 protection: 16 bit GUARD only */
830	case SCSI_PROT_DIF_TYPE3:
831		pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
832			pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
833								0x00;
834		break;
835
836	/*
837	 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
838	 * 16 bit app tag.
839	 */
840	case SCSI_PROT_DIF_TYPE1:
841		pkt->ref_tag = cpu_to_le32((uint32_t)
842		    (0xffffffff & scsi_get_lba(cmd)));
843		pkt->app_tag = __constant_cpu_to_le16(0);
844		pkt->app_tag_mask[0] = 0x0;
845		pkt->app_tag_mask[1] = 0x0;
846
847		if (!qla2x00_hba_err_chk_enabled(sp))
848			break;
849
850		/* enable ALL bytes of the ref tag */
851		pkt->ref_tag_mask[0] = 0xff;
852		pkt->ref_tag_mask[1] = 0xff;
853		pkt->ref_tag_mask[2] = 0xff;
854		pkt->ref_tag_mask[3] = 0xff;
855		break;
856	}
857}
858
859struct qla2_sgx {
860	dma_addr_t		dma_addr;	/* OUT */
861	uint32_t		dma_len;	/* OUT */
862
863	uint32_t		tot_bytes;	/* IN */
864	struct scatterlist	*cur_sg;	/* IN */
865
866	/* for book keeping, bzero on initial invocation */
867	uint32_t		bytes_consumed;
868	uint32_t		num_bytes;
869	uint32_t		tot_partial;
870
871	/* for debugging */
872	uint32_t		num_sg;
873	srb_t			*sp;
874};
875
876static int
877qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
878	uint32_t *partial)
879{
880	struct scatterlist *sg;
881	uint32_t cumulative_partial, sg_len;
882	dma_addr_t sg_dma_addr;
883
884	if (sgx->num_bytes == sgx->tot_bytes)
885		return 0;
886
887	sg = sgx->cur_sg;
888	cumulative_partial = sgx->tot_partial;
889
890	sg_dma_addr = sg_dma_address(sg);
891	sg_len = sg_dma_len(sg);
892
893	sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
894
895	if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
896		sgx->dma_len = (blk_sz - cumulative_partial);
897		sgx->tot_partial = 0;
898		sgx->num_bytes += blk_sz;
899		*partial = 0;
900	} else {
901		sgx->dma_len = sg_len - sgx->bytes_consumed;
902		sgx->tot_partial += sgx->dma_len;
903		*partial = 1;
904	}
905
906	sgx->bytes_consumed += sgx->dma_len;
907
908	if (sg_len == sgx->bytes_consumed) {
909		sg = sg_next(sg);
910		sgx->num_sg++;
911		sgx->cur_sg = sg;
912		sgx->bytes_consumed = 0;
913	}
914
915	return 1;
916}
917
918int
919qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
920	uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
921{
922	void *next_dsd;
923	uint8_t avail_dsds = 0;
924	uint32_t dsd_list_len;
925	struct dsd_dma *dsd_ptr;
926	struct scatterlist *sg_prot;
927	uint32_t *cur_dsd = dsd;
928	uint16_t	used_dsds = tot_dsds;
929
930	uint32_t	prot_int; /* protection interval */
931	uint32_t	partial;
932	struct qla2_sgx sgx;
933	dma_addr_t	sle_dma;
934	uint32_t	sle_dma_len, tot_prot_dma_len = 0;
935	struct scsi_cmnd *cmd;
936	struct scsi_qla_host *vha;
937
938	memset(&sgx, 0, sizeof(struct qla2_sgx));
939	if (sp) {
940		vha = sp->fcport->vha;
941		cmd = GET_CMD_SP(sp);
942		prot_int = cmd->device->sector_size;
943
944		sgx.tot_bytes = scsi_bufflen(cmd);
945		sgx.cur_sg = scsi_sglist(cmd);
946		sgx.sp = sp;
947
948		sg_prot = scsi_prot_sglist(cmd);
949	} else if (tc) {
950		vha = tc->vha;
951		prot_int      = tc->blk_sz;
952		sgx.tot_bytes = tc->bufflen;
953		sgx.cur_sg    = tc->sg;
954		sg_prot	      = tc->prot_sg;
955	} else {
956		BUG();
957		return 1;
958	}
959
960	while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
961
962		sle_dma = sgx.dma_addr;
963		sle_dma_len = sgx.dma_len;
964alloc_and_fill:
965		/* Allocate additional continuation packets? */
966		if (avail_dsds == 0) {
967			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
968					QLA_DSDS_PER_IOCB : used_dsds;
969			dsd_list_len = (avail_dsds + 1) * 12;
970			used_dsds -= avail_dsds;
971
972			/* allocate tracking DS */
973			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
974			if (!dsd_ptr)
975				return 1;
976
977			/* allocate new list */
978			dsd_ptr->dsd_addr = next_dsd =
979			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
980				&dsd_ptr->dsd_list_dma);
981
982			if (!next_dsd) {
983				/*
984				 * Need to cleanup only this dsd_ptr, rest
985				 * will be done by sp_free_dma()
986				 */
987				kfree(dsd_ptr);
988				return 1;
989			}
990
991			if (sp) {
992				list_add_tail(&dsd_ptr->list,
993				    &((struct crc_context *)
994					    sp->u.scmd.ctx)->dsd_list);
995
996				sp->flags |= SRB_CRC_CTX_DSD_VALID;
997			} else {
998				list_add_tail(&dsd_ptr->list,
999				    &(tc->ctx->dsd_list));
1000				tc->ctx_dsd_alloced = 1;
1001			}
1002
1003
1004			/* add new list to cmd iocb or last list */
1005			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1006			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1007			*cur_dsd++ = dsd_list_len;
1008			cur_dsd = (uint32_t *)next_dsd;
1009		}
1010		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1011		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1012		*cur_dsd++ = cpu_to_le32(sle_dma_len);
1013		avail_dsds--;
1014
1015		if (partial == 0) {
1016			/* Got a full protection interval */
1017			sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1018			sle_dma_len = 8;
1019
1020			tot_prot_dma_len += sle_dma_len;
1021			if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1022				tot_prot_dma_len = 0;
1023				sg_prot = sg_next(sg_prot);
1024			}
1025
1026			partial = 1; /* So as to not re-enter this block */
1027			goto alloc_and_fill;
1028		}
1029	}
1030	/* Null termination */
1031	*cur_dsd++ = 0;
1032	*cur_dsd++ = 0;
1033	*cur_dsd++ = 0;
1034	return 0;
1035}
1036
1037int
1038qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1039	uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1040{
1041	void *next_dsd;
1042	uint8_t avail_dsds = 0;
1043	uint32_t dsd_list_len;
1044	struct dsd_dma *dsd_ptr;
1045	struct scatterlist *sg, *sgl;
1046	uint32_t *cur_dsd = dsd;
1047	int	i;
1048	uint16_t	used_dsds = tot_dsds;
1049	struct scsi_cmnd *cmd;
1050	struct scsi_qla_host *vha;
1051
1052	if (sp) {
1053		cmd = GET_CMD_SP(sp);
1054		sgl = scsi_sglist(cmd);
1055		vha = sp->fcport->vha;
1056	} else if (tc) {
1057		sgl = tc->sg;
1058		vha = tc->vha;
1059	} else {
1060		BUG();
1061		return 1;
1062	}
1063
1064
1065	for_each_sg(sgl, sg, tot_dsds, i) {
1066		dma_addr_t	sle_dma;
1067
1068		/* Allocate additional continuation packets? */
1069		if (avail_dsds == 0) {
1070			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1071					QLA_DSDS_PER_IOCB : used_dsds;
1072			dsd_list_len = (avail_dsds + 1) * 12;
1073			used_dsds -= avail_dsds;
1074
1075			/* allocate tracking DS */
1076			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1077			if (!dsd_ptr)
1078				return 1;
1079
1080			/* allocate new list */
1081			dsd_ptr->dsd_addr = next_dsd =
1082			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1083				&dsd_ptr->dsd_list_dma);
1084
1085			if (!next_dsd) {
1086				/*
1087				 * Need to cleanup only this dsd_ptr, rest
1088				 * will be done by sp_free_dma()
1089				 */
1090				kfree(dsd_ptr);
1091				return 1;
1092			}
1093
1094			if (sp) {
1095				list_add_tail(&dsd_ptr->list,
1096				    &((struct crc_context *)
1097					    sp->u.scmd.ctx)->dsd_list);
1098
1099				sp->flags |= SRB_CRC_CTX_DSD_VALID;
1100			} else {
1101				list_add_tail(&dsd_ptr->list,
1102				    &(tc->ctx->dsd_list));
1103				tc->ctx_dsd_alloced = 1;
1104			}
1105
1106			/* add new list to cmd iocb or last list */
1107			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1108			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1109			*cur_dsd++ = dsd_list_len;
1110			cur_dsd = (uint32_t *)next_dsd;
1111		}
1112		sle_dma = sg_dma_address(sg);
1113
1114		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1115		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1116		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1117		avail_dsds--;
1118
1119	}
1120	/* Null termination */
1121	*cur_dsd++ = 0;
1122	*cur_dsd++ = 0;
1123	*cur_dsd++ = 0;
1124	return 0;
1125}
1126
1127int
1128qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1129	uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1130{
1131	void *next_dsd;
1132	uint8_t avail_dsds = 0;
1133	uint32_t dsd_list_len;
1134	struct dsd_dma *dsd_ptr;
1135	struct scatterlist *sg, *sgl;
1136	int	i;
1137	struct scsi_cmnd *cmd;
1138	uint32_t *cur_dsd = dsd;
1139	uint16_t used_dsds = tot_dsds;
1140	struct scsi_qla_host *vha;
1141
1142	if (sp) {
1143		cmd = GET_CMD_SP(sp);
1144		sgl = scsi_prot_sglist(cmd);
1145		vha = sp->fcport->vha;
1146	} else if (tc) {
1147		vha = tc->vha;
1148		sgl = tc->prot_sg;
1149	} else {
1150		BUG();
1151		return 1;
1152	}
1153
1154	ql_dbg(ql_dbg_tgt, vha, 0xe021,
1155		"%s: enter\n", __func__);
1156
1157	for_each_sg(sgl, sg, tot_dsds, i) {
1158		dma_addr_t	sle_dma;
1159
1160		/* Allocate additional continuation packets? */
1161		if (avail_dsds == 0) {
1162			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1163						QLA_DSDS_PER_IOCB : used_dsds;
1164			dsd_list_len = (avail_dsds + 1) * 12;
1165			used_dsds -= avail_dsds;
1166
1167			/* allocate tracking DS */
1168			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1169			if (!dsd_ptr)
1170				return 1;
1171
1172			/* allocate new list */
1173			dsd_ptr->dsd_addr = next_dsd =
1174			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1175				&dsd_ptr->dsd_list_dma);
1176
1177			if (!next_dsd) {
1178				/*
1179				 * Need to cleanup only this dsd_ptr, rest
1180				 * will be done by sp_free_dma()
1181				 */
1182				kfree(dsd_ptr);
1183				return 1;
1184			}
1185
1186			if (sp) {
1187				list_add_tail(&dsd_ptr->list,
1188				    &((struct crc_context *)
1189					    sp->u.scmd.ctx)->dsd_list);
1190
1191				sp->flags |= SRB_CRC_CTX_DSD_VALID;
1192			} else {
1193				list_add_tail(&dsd_ptr->list,
1194				    &(tc->ctx->dsd_list));
1195				tc->ctx_dsd_alloced = 1;
1196			}
1197
1198			/* add new list to cmd iocb or last list */
1199			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1200			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1201			*cur_dsd++ = dsd_list_len;
1202			cur_dsd = (uint32_t *)next_dsd;
1203		}
1204		sle_dma = sg_dma_address(sg);
1205
1206		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1207		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1208		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1209
1210		avail_dsds--;
1211	}
1212	/* Null termination */
1213	*cur_dsd++ = 0;
1214	*cur_dsd++ = 0;
1215	*cur_dsd++ = 0;
1216	return 0;
1217}
1218
1219/**
1220 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1221 *							Type 6 IOCB types.
1222 *
1223 * @sp: SRB command to process
1224 * @cmd_pkt: Command type 3 IOCB
1225 * @tot_dsds: Total number of segments to transfer
1226 */
1227static inline int
1228qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1229    uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1230{
1231	uint32_t		*cur_dsd, *fcp_dl;
1232	scsi_qla_host_t		*vha;
1233	struct scsi_cmnd	*cmd;
1234	int			sgc;
1235	uint32_t		total_bytes = 0;
1236	uint32_t		data_bytes;
1237	uint32_t		dif_bytes;
1238	uint8_t			bundling = 1;
1239	uint16_t		blk_size;
1240	uint8_t			*clr_ptr;
1241	struct crc_context	*crc_ctx_pkt = NULL;
1242	struct qla_hw_data	*ha;
1243	uint8_t			additional_fcpcdb_len;
1244	uint16_t		fcp_cmnd_len;
1245	struct fcp_cmnd		*fcp_cmnd;
1246	dma_addr_t		crc_ctx_dma;
1247
1248	cmd = GET_CMD_SP(sp);
1249
1250	sgc = 0;
1251	/* Update entry type to indicate Command Type CRC_2 IOCB */
1252	*((uint32_t *)(&cmd_pkt->entry_type)) =
1253	    __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1254
1255	vha = sp->fcport->vha;
1256	ha = vha->hw;
1257
1258	/* No data transfer */
1259	data_bytes = scsi_bufflen(cmd);
1260	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1261		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1262		return QLA_SUCCESS;
1263	}
1264
1265	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1266
1267	/* Set transfer direction */
1268	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1269		cmd_pkt->control_flags =
1270		    __constant_cpu_to_le16(CF_WRITE_DATA);
1271	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1272		cmd_pkt->control_flags =
1273		    __constant_cpu_to_le16(CF_READ_DATA);
1274	}
1275
1276	if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1277	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1278	    (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1279	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1280		bundling = 0;
1281
1282	/* Allocate CRC context from global pool */
1283	crc_ctx_pkt = sp->u.scmd.ctx =
1284	    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1285
1286	if (!crc_ctx_pkt)
1287		goto crc_queuing_error;
1288
1289	/* Zero out CTX area. */
1290	clr_ptr = (uint8_t *)crc_ctx_pkt;
1291	memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1292
1293	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1294
1295	sp->flags |= SRB_CRC_CTX_DMA_VALID;
1296
1297	/* Set handle */
1298	crc_ctx_pkt->handle = cmd_pkt->handle;
1299
1300	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1301
1302	qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1303	    &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1304
1305	cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1306	cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1307	cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1308
1309	/* Determine SCSI command length -- align to 4 byte boundary */
1310	if (cmd->cmd_len > 16) {
1311		additional_fcpcdb_len = cmd->cmd_len - 16;
1312		if ((cmd->cmd_len % 4) != 0) {
1313			/* SCSI cmd > 16 bytes must be multiple of 4 */
1314			goto crc_queuing_error;
1315		}
1316		fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1317	} else {
1318		additional_fcpcdb_len = 0;
1319		fcp_cmnd_len = 12 + 16 + 4;
1320	}
1321
1322	fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1323
1324	fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1325	if (cmd->sc_data_direction == DMA_TO_DEVICE)
1326		fcp_cmnd->additional_cdb_len |= 1;
1327	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1328		fcp_cmnd->additional_cdb_len |= 2;
1329
1330	int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1331	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1332	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1333	cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1334	    LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1335	cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1336	    MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1337	fcp_cmnd->task_management = 0;
1338	fcp_cmnd->task_attribute = TSK_SIMPLE;
1339
1340	cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1341
1342	/* Compute dif len and adjust data len to incude protection */
1343	dif_bytes = 0;
1344	blk_size = cmd->device->sector_size;
1345	dif_bytes = (data_bytes / blk_size) * 8;
1346
1347	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1348	case SCSI_PROT_READ_INSERT:
1349	case SCSI_PROT_WRITE_STRIP:
1350	    total_bytes = data_bytes;
1351	    data_bytes += dif_bytes;
1352	    break;
1353
1354	case SCSI_PROT_READ_STRIP:
1355	case SCSI_PROT_WRITE_INSERT:
1356	case SCSI_PROT_READ_PASS:
1357	case SCSI_PROT_WRITE_PASS:
1358	    total_bytes = data_bytes + dif_bytes;
1359	    break;
1360	default:
1361	    BUG();
1362	}
1363
1364	if (!qla2x00_hba_err_chk_enabled(sp))
1365		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1366	/* HBA error checking enabled */
1367	else if (IS_PI_UNINIT_CAPABLE(ha)) {
1368		if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1369		    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1370			SCSI_PROT_DIF_TYPE2))
1371			fw_prot_opts |= BIT_10;
1372		else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1373		    SCSI_PROT_DIF_TYPE3)
1374			fw_prot_opts |= BIT_11;
1375	}
1376
1377	if (!bundling) {
1378		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1379	} else {
1380		/*
1381		 * Configure Bundling if we need to fetch interlaving
1382		 * protection PCI accesses
1383		 */
1384		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1385		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1386		crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1387							tot_prot_dsds);
1388		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1389	}
1390
1391	/* Finish the common fields of CRC pkt */
1392	crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1393	crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1394	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1395	crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1396	/* Fibre channel byte count */
1397	cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1398	fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1399	    additional_fcpcdb_len);
1400	*fcp_dl = htonl(total_bytes);
1401
1402	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1403		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1404		return QLA_SUCCESS;
1405	}
1406	/* Walks data segments */
1407
1408	cmd_pkt->control_flags |=
1409	    __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1410
1411	if (!bundling && tot_prot_dsds) {
1412		if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1413			cur_dsd, tot_dsds, NULL))
1414			goto crc_queuing_error;
1415	} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1416			(tot_dsds - tot_prot_dsds), NULL))
1417		goto crc_queuing_error;
1418
1419	if (bundling && tot_prot_dsds) {
1420		/* Walks dif segments */
1421		cmd_pkt->control_flags |=
1422			__constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1423		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1424		if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1425				tot_prot_dsds, NULL))
1426			goto crc_queuing_error;
1427	}
1428	return QLA_SUCCESS;
1429
1430crc_queuing_error:
1431	/* Cleanup will be performed by the caller */
1432
1433	return QLA_FUNCTION_FAILED;
1434}
1435
1436/**
1437 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1438 * @sp: command to send to the ISP
1439 *
1440 * Returns non-zero if a failure occurred, else zero.
1441 */
1442int
1443qla24xx_start_scsi(srb_t *sp)
1444{
1445	int		ret, nseg;
1446	unsigned long   flags;
1447	uint32_t	*clr_ptr;
1448	uint32_t        index;
1449	uint32_t	handle;
1450	struct cmd_type_7 *cmd_pkt;
1451	uint16_t	cnt;
1452	uint16_t	req_cnt;
1453	uint16_t	tot_dsds;
1454	struct req_que *req = NULL;
1455	struct rsp_que *rsp = NULL;
1456	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1457	struct scsi_qla_host *vha = sp->fcport->vha;
1458	struct qla_hw_data *ha = vha->hw;
1459
1460	/* Setup device pointers. */
1461	ret = 0;
1462
1463	qla25xx_set_que(sp, &rsp);
1464	req = vha->req;
1465
1466	/* So we know we haven't pci_map'ed anything yet */
1467	tot_dsds = 0;
1468
1469	/* Send marker if required */
1470	if (vha->marker_needed != 0) {
1471		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1472		    QLA_SUCCESS)
1473			return QLA_FUNCTION_FAILED;
1474		vha->marker_needed = 0;
1475	}
1476
1477	/* Acquire ring specific lock */
1478	spin_lock_irqsave(&ha->hardware_lock, flags);
1479
1480	/* Check for room in outstanding command list. */
1481	handle = req->current_outstanding_cmd;
1482	for (index = 1; index < req->num_outstanding_cmds; index++) {
1483		handle++;
1484		if (handle == req->num_outstanding_cmds)
1485			handle = 1;
1486		if (!req->outstanding_cmds[handle])
1487			break;
1488	}
1489	if (index == req->num_outstanding_cmds)
1490		goto queuing_error;
1491
1492	/* Map the sg table so we have an accurate count of sg entries needed */
1493	if (scsi_sg_count(cmd)) {
1494		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1495		    scsi_sg_count(cmd), cmd->sc_data_direction);
1496		if (unlikely(!nseg))
1497			goto queuing_error;
1498	} else
1499		nseg = 0;
1500
1501	tot_dsds = nseg;
1502	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1503	if (req->cnt < (req_cnt + 2)) {
1504		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1505		    RD_REG_DWORD_RELAXED(req->req_q_out);
1506		if (req->ring_index < cnt)
1507			req->cnt = cnt - req->ring_index;
1508		else
1509			req->cnt = req->length -
1510				(req->ring_index - cnt);
1511		if (req->cnt < (req_cnt + 2))
1512			goto queuing_error;
1513	}
1514
1515	/* Build command packet. */
1516	req->current_outstanding_cmd = handle;
1517	req->outstanding_cmds[handle] = sp;
1518	sp->handle = handle;
1519	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1520	req->cnt -= req_cnt;
1521
1522	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1523	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1524
1525	/* Zero out remaining portion of packet. */
1526	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1527	clr_ptr = (uint32_t *)cmd_pkt + 2;
1528	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1529	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1530
1531	/* Set NPORT-ID and LUN number*/
1532	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1533	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1534	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1535	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1536	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1537
1538	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1539	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1540
1541	cmd_pkt->task = TSK_SIMPLE;
1542
1543	/* Load SCSI command packet. */
1544	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1545	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1546
1547	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1548
1549	/* Build IOCB segments */
1550	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1551
1552	/* Set total data segment count. */
1553	cmd_pkt->entry_count = (uint8_t)req_cnt;
1554	/* Specify response queue number where completion should happen */
1555	cmd_pkt->entry_status = (uint8_t) rsp->id;
1556	wmb();
1557	/* Adjust ring index. */
1558	req->ring_index++;
1559	if (req->ring_index == req->length) {
1560		req->ring_index = 0;
1561		req->ring_ptr = req->ring;
1562	} else
1563		req->ring_ptr++;
1564
1565	sp->flags |= SRB_DMA_VALID;
1566
1567	/* Set chip new ring index. */
1568	WRT_REG_DWORD(req->req_q_in, req->ring_index);
1569	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1570
1571	/* Manage unprocessed RIO/ZIO commands in response queue. */
1572	if (vha->flags.process_response_queue &&
1573		rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1574		qla24xx_process_response_queue(vha, rsp);
1575
1576	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1577	return QLA_SUCCESS;
1578
1579queuing_error:
1580	if (tot_dsds)
1581		scsi_dma_unmap(cmd);
1582
1583	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1584
1585	return QLA_FUNCTION_FAILED;
1586}
1587
1588/**
1589 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1590 * @sp: command to send to the ISP
1591 *
1592 * Returns non-zero if a failure occurred, else zero.
1593 */
1594int
1595qla24xx_dif_start_scsi(srb_t *sp)
1596{
1597	int			nseg;
1598	unsigned long		flags;
1599	uint32_t		*clr_ptr;
1600	uint32_t		index;
1601	uint32_t		handle;
1602	uint16_t		cnt;
1603	uint16_t		req_cnt = 0;
1604	uint16_t		tot_dsds;
1605	uint16_t		tot_prot_dsds;
1606	uint16_t		fw_prot_opts = 0;
1607	struct req_que		*req = NULL;
1608	struct rsp_que		*rsp = NULL;
1609	struct scsi_cmnd	*cmd = GET_CMD_SP(sp);
1610	struct scsi_qla_host	*vha = sp->fcport->vha;
1611	struct qla_hw_data	*ha = vha->hw;
1612	struct cmd_type_crc_2	*cmd_pkt;
1613	uint32_t		status = 0;
1614
1615#define QDSS_GOT_Q_SPACE	BIT_0
1616
1617	/* Only process protection or >16 cdb in this routine */
1618	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1619		if (cmd->cmd_len <= 16)
1620			return qla24xx_start_scsi(sp);
1621	}
1622
1623	/* Setup device pointers. */
1624
1625	qla25xx_set_que(sp, &rsp);
1626	req = vha->req;
1627
1628	/* So we know we haven't pci_map'ed anything yet */
1629	tot_dsds = 0;
1630
1631	/* Send marker if required */
1632	if (vha->marker_needed != 0) {
1633		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1634		    QLA_SUCCESS)
1635			return QLA_FUNCTION_FAILED;
1636		vha->marker_needed = 0;
1637	}
1638
1639	/* Acquire ring specific lock */
1640	spin_lock_irqsave(&ha->hardware_lock, flags);
1641
1642	/* Check for room in outstanding command list. */
1643	handle = req->current_outstanding_cmd;
1644	for (index = 1; index < req->num_outstanding_cmds; index++) {
1645		handle++;
1646		if (handle == req->num_outstanding_cmds)
1647			handle = 1;
1648		if (!req->outstanding_cmds[handle])
1649			break;
1650	}
1651
1652	if (index == req->num_outstanding_cmds)
1653		goto queuing_error;
1654
1655	/* Compute number of required data segments */
1656	/* Map the sg table so we have an accurate count of sg entries needed */
1657	if (scsi_sg_count(cmd)) {
1658		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1659		    scsi_sg_count(cmd), cmd->sc_data_direction);
1660		if (unlikely(!nseg))
1661			goto queuing_error;
1662		else
1663			sp->flags |= SRB_DMA_VALID;
1664
1665		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1666		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1667			struct qla2_sgx sgx;
1668			uint32_t	partial;
1669
1670			memset(&sgx, 0, sizeof(struct qla2_sgx));
1671			sgx.tot_bytes = scsi_bufflen(cmd);
1672			sgx.cur_sg = scsi_sglist(cmd);
1673			sgx.sp = sp;
1674
1675			nseg = 0;
1676			while (qla24xx_get_one_block_sg(
1677			    cmd->device->sector_size, &sgx, &partial))
1678				nseg++;
1679		}
1680	} else
1681		nseg = 0;
1682
1683	/* number of required data segments */
1684	tot_dsds = nseg;
1685
1686	/* Compute number of required protection segments */
1687	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1688		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1689		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1690		if (unlikely(!nseg))
1691			goto queuing_error;
1692		else
1693			sp->flags |= SRB_CRC_PROT_DMA_VALID;
1694
1695		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1696		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1697			nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1698		}
1699	} else {
1700		nseg = 0;
1701	}
1702
1703	req_cnt = 1;
1704	/* Total Data and protection sg segment(s) */
1705	tot_prot_dsds = nseg;
1706	tot_dsds += nseg;
1707	if (req->cnt < (req_cnt + 2)) {
1708		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1709		    RD_REG_DWORD_RELAXED(req->req_q_out);
1710		if (req->ring_index < cnt)
1711			req->cnt = cnt - req->ring_index;
1712		else
1713			req->cnt = req->length -
1714				(req->ring_index - cnt);
1715		if (req->cnt < (req_cnt + 2))
1716			goto queuing_error;
1717	}
1718
1719	status |= QDSS_GOT_Q_SPACE;
1720
1721	/* Build header part of command packet (excluding the OPCODE). */
1722	req->current_outstanding_cmd = handle;
1723	req->outstanding_cmds[handle] = sp;
1724	sp->handle = handle;
1725	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1726	req->cnt -= req_cnt;
1727
1728	/* Fill-in common area */
1729	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1730	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1731
1732	clr_ptr = (uint32_t *)cmd_pkt + 2;
1733	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1734
1735	/* Set NPORT-ID and LUN number*/
1736	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1737	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1738	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1739	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1740
1741	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1742	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1743
1744	/* Total Data and protection segment(s) */
1745	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1746
1747	/* Build IOCB segments and adjust for data protection segments */
1748	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1749	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1750		QLA_SUCCESS)
1751		goto queuing_error;
1752
1753	cmd_pkt->entry_count = (uint8_t)req_cnt;
1754	/* Specify response queue number where completion should happen */
1755	cmd_pkt->entry_status = (uint8_t) rsp->id;
1756	cmd_pkt->timeout = __constant_cpu_to_le16(0);
1757	wmb();
1758
1759	/* Adjust ring index. */
1760	req->ring_index++;
1761	if (req->ring_index == req->length) {
1762		req->ring_index = 0;
1763		req->ring_ptr = req->ring;
1764	} else
1765		req->ring_ptr++;
1766
1767	/* Set chip new ring index. */
1768	WRT_REG_DWORD(req->req_q_in, req->ring_index);
1769	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1770
1771	/* Manage unprocessed RIO/ZIO commands in response queue. */
1772	if (vha->flags.process_response_queue &&
1773	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1774		qla24xx_process_response_queue(vha, rsp);
1775
1776	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1777
1778	return QLA_SUCCESS;
1779
1780queuing_error:
1781	if (status & QDSS_GOT_Q_SPACE) {
1782		req->outstanding_cmds[handle] = NULL;
1783		req->cnt += req_cnt;
1784	}
1785	/* Cleanup will be performed by the caller (queuecommand) */
1786
1787	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1788	return QLA_FUNCTION_FAILED;
1789}
1790
1791
1792static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1793{
1794	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1795	struct qla_hw_data *ha = sp->fcport->vha->hw;
1796	int affinity = cmd->request->cpu;
1797
1798	if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1799		affinity < ha->max_rsp_queues - 1)
1800		*rsp = ha->rsp_q_map[affinity + 1];
1801	 else
1802		*rsp = ha->rsp_q_map[0];
1803}
1804
1805/* Generic Control-SRB manipulation functions. */
1806
1807/* hardware_lock assumed to be held. */
1808void *
1809qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp)
1810{
1811	if (qla2x00_reset_active(vha))
1812		return NULL;
1813
1814	return qla2x00_alloc_iocbs(vha, sp);
1815}
1816
1817void *
1818qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1819{
1820	struct qla_hw_data *ha = vha->hw;
1821	struct req_que *req = ha->req_q_map[0];
1822	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1823	uint32_t index, handle;
1824	request_t *pkt;
1825	uint16_t cnt, req_cnt;
1826
1827	pkt = NULL;
1828	req_cnt = 1;
1829	handle = 0;
1830
1831	if (!sp)
1832		goto skip_cmd_array;
1833
1834	/* Check for room in outstanding command list. */
1835	handle = req->current_outstanding_cmd;
1836	for (index = 1; index < req->num_outstanding_cmds; index++) {
1837		handle++;
1838		if (handle == req->num_outstanding_cmds)
1839			handle = 1;
1840		if (!req->outstanding_cmds[handle])
1841			break;
1842	}
1843	if (index == req->num_outstanding_cmds) {
1844		ql_log(ql_log_warn, vha, 0x700b,
1845		    "No room on outstanding cmd array.\n");
1846		goto queuing_error;
1847	}
1848
1849	/* Prep command array. */
1850	req->current_outstanding_cmd = handle;
1851	req->outstanding_cmds[handle] = sp;
1852	sp->handle = handle;
1853
1854	/* Adjust entry-counts as needed. */
1855	if (sp->type != SRB_SCSI_CMD)
1856		req_cnt = sp->iocbs;
1857
1858skip_cmd_array:
1859	/* Check for room on request queue. */
1860	if (req->cnt < req_cnt + 2) {
1861		if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
1862			cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1863		else if (IS_P3P_TYPE(ha))
1864			cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1865		else if (IS_FWI2_CAPABLE(ha))
1866			cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1867		else if (IS_QLAFX00(ha))
1868			cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
1869		else
1870			cnt = qla2x00_debounce_register(
1871			    ISP_REQ_Q_OUT(ha, &reg->isp));
1872
1873		if  (req->ring_index < cnt)
1874			req->cnt = cnt - req->ring_index;
1875		else
1876			req->cnt = req->length -
1877			    (req->ring_index - cnt);
1878	}
1879	if (req->cnt < req_cnt + 2)
1880		goto queuing_error;
1881
1882	/* Prep packet */
1883	req->cnt -= req_cnt;
1884	pkt = req->ring_ptr;
1885	memset(pkt, 0, REQUEST_ENTRY_SIZE);
1886	if (IS_QLAFX00(ha)) {
1887		WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
1888		WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
1889	} else {
1890		pkt->entry_count = req_cnt;
1891		pkt->handle = handle;
1892	}
1893
1894queuing_error:
1895	return pkt;
1896}
1897
1898static void
1899qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1900{
1901	struct srb_iocb *lio = &sp->u.iocb_cmd;
1902
1903	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1904	logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1905	if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1906		logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1907	if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1908		logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1909	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1910	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1911	logio->port_id[1] = sp->fcport->d_id.b.area;
1912	logio->port_id[2] = sp->fcport->d_id.b.domain;
1913	logio->vp_index = sp->fcport->vha->vp_idx;
1914}
1915
1916static void
1917qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1918{
1919	struct qla_hw_data *ha = sp->fcport->vha->hw;
1920	struct srb_iocb *lio = &sp->u.iocb_cmd;
1921	uint16_t opts;
1922
1923	mbx->entry_type = MBX_IOCB_TYPE;
1924	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1925	mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1926	opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1927	opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1928	if (HAS_EXTENDED_IDS(ha)) {
1929		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1930		mbx->mb10 = cpu_to_le16(opts);
1931	} else {
1932		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1933	}
1934	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1935	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1936	    sp->fcport->d_id.b.al_pa);
1937	mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1938}
1939
1940static void
1941qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1942{
1943	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1944	logio->control_flags =
1945	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1946	if (!sp->fcport->tgt_session ||
1947	    !sp->fcport->tgt_session->keep_nport_handle)
1948		logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
1949	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1950	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1951	logio->port_id[1] = sp->fcport->d_id.b.area;
1952	logio->port_id[2] = sp->fcport->d_id.b.domain;
1953	logio->vp_index = sp->fcport->vha->vp_idx;
1954}
1955
1956static void
1957qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1958{
1959	struct qla_hw_data *ha = sp->fcport->vha->hw;
1960
1961	mbx->entry_type = MBX_IOCB_TYPE;
1962	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1963	mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1964	mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1965	    cpu_to_le16(sp->fcport->loop_id):
1966	    cpu_to_le16(sp->fcport->loop_id << 8);
1967	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1968	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1969	    sp->fcport->d_id.b.al_pa);
1970	mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1971	/* Implicit: mbx->mbx10 = 0. */
1972}
1973
1974static void
1975qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1976{
1977	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1978	logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1979	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1980	logio->vp_index = sp->fcport->vha->vp_idx;
1981}
1982
1983static void
1984qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1985{
1986	struct qla_hw_data *ha = sp->fcport->vha->hw;
1987
1988	mbx->entry_type = MBX_IOCB_TYPE;
1989	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1990	mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1991	if (HAS_EXTENDED_IDS(ha)) {
1992		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1993		mbx->mb10 = cpu_to_le16(BIT_0);
1994	} else {
1995		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1996	}
1997	mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1998	mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1999	mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2000	mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2001	mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
2002}
2003
2004static void
2005qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2006{
2007	uint32_t flags;
2008	uint64_t lun;
2009	struct fc_port *fcport = sp->fcport;
2010	scsi_qla_host_t *vha = fcport->vha;
2011	struct qla_hw_data *ha = vha->hw;
2012	struct srb_iocb *iocb = &sp->u.iocb_cmd;
2013	struct req_que *req = vha->req;
2014
2015	flags = iocb->u.tmf.flags;
2016	lun = iocb->u.tmf.lun;
2017
2018	tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2019	tsk->entry_count = 1;
2020	tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2021	tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2022	tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2023	tsk->control_flags = cpu_to_le32(flags);
2024	tsk->port_id[0] = fcport->d_id.b.al_pa;
2025	tsk->port_id[1] = fcport->d_id.b.area;
2026	tsk->port_id[2] = fcport->d_id.b.domain;
2027	tsk->vp_index = fcport->vha->vp_idx;
2028
2029	if (flags == TCF_LUN_RESET) {
2030		int_to_scsilun(lun, &tsk->lun);
2031		host_to_fcp_swap((uint8_t *)&tsk->lun,
2032			sizeof(tsk->lun));
2033	}
2034}
2035
2036static void
2037qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2038{
2039	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2040
2041        els_iocb->entry_type = ELS_IOCB_TYPE;
2042        els_iocb->entry_count = 1;
2043        els_iocb->sys_define = 0;
2044        els_iocb->entry_status = 0;
2045        els_iocb->handle = sp->handle;
2046        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2047        els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2048	els_iocb->vp_index = sp->fcport->vha->vp_idx;
2049        els_iocb->sof_type = EST_SOFI3;
2050        els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2051
2052	els_iocb->opcode =
2053	    sp->type == SRB_ELS_CMD_RPT ?
2054	    bsg_job->request->rqst_data.r_els.els_code :
2055	    bsg_job->request->rqst_data.h_els.command_code;
2056        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2057        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2058        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2059        els_iocb->control_flags = 0;
2060        els_iocb->rx_byte_count =
2061            cpu_to_le32(bsg_job->reply_payload.payload_len);
2062        els_iocb->tx_byte_count =
2063            cpu_to_le32(bsg_job->request_payload.payload_len);
2064
2065        els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2066            (bsg_job->request_payload.sg_list)));
2067        els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2068            (bsg_job->request_payload.sg_list)));
2069        els_iocb->tx_len = cpu_to_le32(sg_dma_len
2070            (bsg_job->request_payload.sg_list));
2071
2072        els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2073            (bsg_job->reply_payload.sg_list)));
2074        els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2075            (bsg_job->reply_payload.sg_list)));
2076        els_iocb->rx_len = cpu_to_le32(sg_dma_len
2077            (bsg_job->reply_payload.sg_list));
2078
2079	sp->fcport->vha->qla_stats.control_requests++;
2080}
2081
2082static void
2083qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2084{
2085	uint16_t        avail_dsds;
2086	uint32_t        *cur_dsd;
2087	struct scatterlist *sg;
2088	int index;
2089	uint16_t tot_dsds;
2090	scsi_qla_host_t *vha = sp->fcport->vha;
2091	struct qla_hw_data *ha = vha->hw;
2092	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2093	int loop_iterartion = 0;
2094	int cont_iocb_prsnt = 0;
2095	int entry_count = 1;
2096
2097	memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2098	ct_iocb->entry_type = CT_IOCB_TYPE;
2099	ct_iocb->entry_status = 0;
2100	ct_iocb->handle1 = sp->handle;
2101	SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2102	ct_iocb->status = __constant_cpu_to_le16(0);
2103	ct_iocb->control_flags = __constant_cpu_to_le16(0);
2104	ct_iocb->timeout = 0;
2105	ct_iocb->cmd_dsd_count =
2106	    __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2107	ct_iocb->total_dsd_count =
2108	    __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2109	ct_iocb->req_bytecount =
2110	    cpu_to_le32(bsg_job->request_payload.payload_len);
2111	ct_iocb->rsp_bytecount =
2112	    cpu_to_le32(bsg_job->reply_payload.payload_len);
2113
2114	ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2115	    (bsg_job->request_payload.sg_list)));
2116	ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2117	    (bsg_job->request_payload.sg_list)));
2118	ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2119
2120	ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2121	    (bsg_job->reply_payload.sg_list)));
2122	ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2123	    (bsg_job->reply_payload.sg_list)));
2124	ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2125
2126	avail_dsds = 1;
2127	cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2128	index = 0;
2129	tot_dsds = bsg_job->reply_payload.sg_cnt;
2130
2131	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2132		dma_addr_t       sle_dma;
2133		cont_a64_entry_t *cont_pkt;
2134
2135		/* Allocate additional continuation packets? */
2136		if (avail_dsds == 0) {
2137			/*
2138			* Five DSDs are available in the Cont.
2139			* Type 1 IOCB.
2140			       */
2141			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2142			    vha->hw->req_q_map[0]);
2143			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2144			avail_dsds = 5;
2145			cont_iocb_prsnt = 1;
2146			entry_count++;
2147		}
2148
2149		sle_dma = sg_dma_address(sg);
2150		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2151		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2152		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2153		loop_iterartion++;
2154		avail_dsds--;
2155	}
2156	ct_iocb->entry_count = entry_count;
2157
2158	sp->fcport->vha->qla_stats.control_requests++;
2159}
2160
2161static void
2162qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2163{
2164	uint16_t        avail_dsds;
2165	uint32_t        *cur_dsd;
2166	struct scatterlist *sg;
2167	int index;
2168	uint16_t tot_dsds;
2169        scsi_qla_host_t *vha = sp->fcport->vha;
2170	struct qla_hw_data *ha = vha->hw;
2171	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2172	int loop_iterartion = 0;
2173	int cont_iocb_prsnt = 0;
2174	int entry_count = 1;
2175
2176	ct_iocb->entry_type = CT_IOCB_TYPE;
2177        ct_iocb->entry_status = 0;
2178        ct_iocb->sys_define = 0;
2179        ct_iocb->handle = sp->handle;
2180
2181	ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2182	ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2183        ct_iocb->comp_status = __constant_cpu_to_le16(0);
2184
2185	ct_iocb->cmd_dsd_count =
2186            __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2187        ct_iocb->timeout = 0;
2188        ct_iocb->rsp_dsd_count =
2189            __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2190        ct_iocb->rsp_byte_count =
2191            cpu_to_le32(bsg_job->reply_payload.payload_len);
2192        ct_iocb->cmd_byte_count =
2193            cpu_to_le32(bsg_job->request_payload.payload_len);
2194        ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2195            (bsg_job->request_payload.sg_list)));
2196        ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2197           (bsg_job->request_payload.sg_list)));
2198        ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2199            (bsg_job->request_payload.sg_list));
2200
2201	avail_dsds = 1;
2202	cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2203	index = 0;
2204	tot_dsds = bsg_job->reply_payload.sg_cnt;
2205
2206	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2207		dma_addr_t       sle_dma;
2208		cont_a64_entry_t *cont_pkt;
2209
2210		/* Allocate additional continuation packets? */
2211		if (avail_dsds == 0) {
2212			/*
2213			* Five DSDs are available in the Cont.
2214			* Type 1 IOCB.
2215			       */
2216			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2217			    ha->req_q_map[0]);
2218			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2219			avail_dsds = 5;
2220			cont_iocb_prsnt = 1;
2221			entry_count++;
2222		}
2223
2224		sle_dma = sg_dma_address(sg);
2225		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2226		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2227		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2228		loop_iterartion++;
2229		avail_dsds--;
2230	}
2231        ct_iocb->entry_count = entry_count;
2232}
2233
2234/*
2235 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2236 * @sp: command to send to the ISP
2237 *
2238 * Returns non-zero if a failure occurred, else zero.
2239 */
2240int
2241qla82xx_start_scsi(srb_t *sp)
2242{
2243	int		ret, nseg;
2244	unsigned long   flags;
2245	struct scsi_cmnd *cmd;
2246	uint32_t	*clr_ptr;
2247	uint32_t        index;
2248	uint32_t	handle;
2249	uint16_t	cnt;
2250	uint16_t	req_cnt;
2251	uint16_t	tot_dsds;
2252	struct device_reg_82xx __iomem *reg;
2253	uint32_t dbval;
2254	uint32_t *fcp_dl;
2255	uint8_t additional_cdb_len;
2256	struct ct6_dsd *ctx;
2257	struct scsi_qla_host *vha = sp->fcport->vha;
2258	struct qla_hw_data *ha = vha->hw;
2259	struct req_que *req = NULL;
2260	struct rsp_que *rsp = NULL;
2261
2262	/* Setup device pointers. */
2263	ret = 0;
2264	reg = &ha->iobase->isp82;
2265	cmd = GET_CMD_SP(sp);
2266	req = vha->req;
2267	rsp = ha->rsp_q_map[0];
2268
2269	/* So we know we haven't pci_map'ed anything yet */
2270	tot_dsds = 0;
2271
2272	dbval = 0x04 | (ha->portnum << 5);
2273
2274	/* Send marker if required */
2275	if (vha->marker_needed != 0) {
2276		if (qla2x00_marker(vha, req,
2277			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2278			ql_log(ql_log_warn, vha, 0x300c,
2279			    "qla2x00_marker failed for cmd=%p.\n", cmd);
2280			return QLA_FUNCTION_FAILED;
2281		}
2282		vha->marker_needed = 0;
2283	}
2284
2285	/* Acquire ring specific lock */
2286	spin_lock_irqsave(&ha->hardware_lock, flags);
2287
2288	/* Check for room in outstanding command list. */
2289	handle = req->current_outstanding_cmd;
2290	for (index = 1; index < req->num_outstanding_cmds; index++) {
2291		handle++;
2292		if (handle == req->num_outstanding_cmds)
2293			handle = 1;
2294		if (!req->outstanding_cmds[handle])
2295			break;
2296	}
2297	if (index == req->num_outstanding_cmds)
2298		goto queuing_error;
2299
2300	/* Map the sg table so we have an accurate count of sg entries needed */
2301	if (scsi_sg_count(cmd)) {
2302		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2303		    scsi_sg_count(cmd), cmd->sc_data_direction);
2304		if (unlikely(!nseg))
2305			goto queuing_error;
2306	} else
2307		nseg = 0;
2308
2309	tot_dsds = nseg;
2310
2311	if (tot_dsds > ql2xshiftctondsd) {
2312		struct cmd_type_6 *cmd_pkt;
2313		uint16_t more_dsd_lists = 0;
2314		struct dsd_dma *dsd_ptr;
2315		uint16_t i;
2316
2317		more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2318		if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2319			ql_dbg(ql_dbg_io, vha, 0x300d,
2320			    "Num of DSD list %d is than %d for cmd=%p.\n",
2321			    more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2322			    cmd);
2323			goto queuing_error;
2324		}
2325
2326		if (more_dsd_lists <= ha->gbl_dsd_avail)
2327			goto sufficient_dsds;
2328		else
2329			more_dsd_lists -= ha->gbl_dsd_avail;
2330
2331		for (i = 0; i < more_dsd_lists; i++) {
2332			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2333			if (!dsd_ptr) {
2334				ql_log(ql_log_fatal, vha, 0x300e,
2335				    "Failed to allocate memory for dsd_dma "
2336				    "for cmd=%p.\n", cmd);
2337				goto queuing_error;
2338			}
2339
2340			dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2341				GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2342			if (!dsd_ptr->dsd_addr) {
2343				kfree(dsd_ptr);
2344				ql_log(ql_log_fatal, vha, 0x300f,
2345				    "Failed to allocate memory for dsd_addr "
2346				    "for cmd=%p.\n", cmd);
2347				goto queuing_error;
2348			}
2349			list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2350			ha->gbl_dsd_avail++;
2351		}
2352
2353sufficient_dsds:
2354		req_cnt = 1;
2355
2356		if (req->cnt < (req_cnt + 2)) {
2357			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2358				&reg->req_q_out[0]);
2359			if (req->ring_index < cnt)
2360				req->cnt = cnt - req->ring_index;
2361			else
2362				req->cnt = req->length -
2363					(req->ring_index - cnt);
2364			if (req->cnt < (req_cnt + 2))
2365				goto queuing_error;
2366		}
2367
2368		ctx = sp->u.scmd.ctx =
2369		    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2370		if (!ctx) {
2371			ql_log(ql_log_fatal, vha, 0x3010,
2372			    "Failed to allocate ctx for cmd=%p.\n", cmd);
2373			goto queuing_error;
2374		}
2375
2376		memset(ctx, 0, sizeof(struct ct6_dsd));
2377		ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2378			GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2379		if (!ctx->fcp_cmnd) {
2380			ql_log(ql_log_fatal, vha, 0x3011,
2381			    "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2382			goto queuing_error;
2383		}
2384
2385		/* Initialize the DSD list and dma handle */
2386		INIT_LIST_HEAD(&ctx->dsd_list);
2387		ctx->dsd_use_cnt = 0;
2388
2389		if (cmd->cmd_len > 16) {
2390			additional_cdb_len = cmd->cmd_len - 16;
2391			if ((cmd->cmd_len % 4) != 0) {
2392				/* SCSI command bigger than 16 bytes must be
2393				 * multiple of 4
2394				 */
2395				ql_log(ql_log_warn, vha, 0x3012,
2396				    "scsi cmd len %d not multiple of 4 "
2397				    "for cmd=%p.\n", cmd->cmd_len, cmd);
2398				goto queuing_error_fcp_cmnd;
2399			}
2400			ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2401		} else {
2402			additional_cdb_len = 0;
2403			ctx->fcp_cmnd_len = 12 + 16 + 4;
2404		}
2405
2406		cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2407		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2408
2409		/* Zero out remaining portion of packet. */
2410		/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2411		clr_ptr = (uint32_t *)cmd_pkt + 2;
2412		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2413		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2414
2415		/* Set NPORT-ID and LUN number*/
2416		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2417		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2418		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2419		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2420		cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2421
2422		/* Build IOCB segments */
2423		if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2424			goto queuing_error_fcp_cmnd;
2425
2426		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2427		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2428
2429		/* build FCP_CMND IU */
2430		memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2431		int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2432		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2433
2434		if (cmd->sc_data_direction == DMA_TO_DEVICE)
2435			ctx->fcp_cmnd->additional_cdb_len |= 1;
2436		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2437			ctx->fcp_cmnd->additional_cdb_len |= 2;
2438
2439		/* Populate the FCP_PRIO. */
2440		if (ha->flags.fcp_prio_enabled)
2441			ctx->fcp_cmnd->task_attribute |=
2442			    sp->fcport->fcp_prio << 3;
2443
2444		memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2445
2446		fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2447		    additional_cdb_len);
2448		*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2449
2450		cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2451		cmd_pkt->fcp_cmnd_dseg_address[0] =
2452		    cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2453		cmd_pkt->fcp_cmnd_dseg_address[1] =
2454		    cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2455
2456		sp->flags |= SRB_FCP_CMND_DMA_VALID;
2457		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2458		/* Set total data segment count. */
2459		cmd_pkt->entry_count = (uint8_t)req_cnt;
2460		/* Specify response queue number where
2461		 * completion should happen
2462		 */
2463		cmd_pkt->entry_status = (uint8_t) rsp->id;
2464	} else {
2465		struct cmd_type_7 *cmd_pkt;
2466		req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2467		if (req->cnt < (req_cnt + 2)) {
2468			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2469			    &reg->req_q_out[0]);
2470			if (req->ring_index < cnt)
2471				req->cnt = cnt - req->ring_index;
2472			else
2473				req->cnt = req->length -
2474					(req->ring_index - cnt);
2475		}
2476		if (req->cnt < (req_cnt + 2))
2477			goto queuing_error;
2478
2479		cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2480		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2481
2482		/* Zero out remaining portion of packet. */
2483		/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2484		clr_ptr = (uint32_t *)cmd_pkt + 2;
2485		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2486		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2487
2488		/* Set NPORT-ID and LUN number*/
2489		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2490		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2491		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2492		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2493		cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2494
2495		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2496		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2497		    sizeof(cmd_pkt->lun));
2498
2499		/* Populate the FCP_PRIO. */
2500		if (ha->flags.fcp_prio_enabled)
2501			cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2502
2503		/* Load SCSI command packet. */
2504		memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2505		host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2506
2507		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2508
2509		/* Build IOCB segments */
2510		qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2511
2512		/* Set total data segment count. */
2513		cmd_pkt->entry_count = (uint8_t)req_cnt;
2514		/* Specify response queue number where
2515		 * completion should happen.
2516		 */
2517		cmd_pkt->entry_status = (uint8_t) rsp->id;
2518
2519	}
2520	/* Build command packet. */
2521	req->current_outstanding_cmd = handle;
2522	req->outstanding_cmds[handle] = sp;
2523	sp->handle = handle;
2524	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2525	req->cnt -= req_cnt;
2526	wmb();
2527
2528	/* Adjust ring index. */
2529	req->ring_index++;
2530	if (req->ring_index == req->length) {
2531		req->ring_index = 0;
2532		req->ring_ptr = req->ring;
2533	} else
2534		req->ring_ptr++;
2535
2536	sp->flags |= SRB_DMA_VALID;
2537
2538	/* Set chip new ring index. */
2539	/* write, read and verify logic */
2540	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2541	if (ql2xdbwr)
2542		qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2543	else {
2544		WRT_REG_DWORD(
2545			(unsigned long __iomem *)ha->nxdb_wr_ptr,
2546			dbval);
2547		wmb();
2548		while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2549			WRT_REG_DWORD(
2550				(unsigned long __iomem *)ha->nxdb_wr_ptr,
2551				dbval);
2552			wmb();
2553		}
2554	}
2555
2556	/* Manage unprocessed RIO/ZIO commands in response queue. */
2557	if (vha->flags.process_response_queue &&
2558	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2559		qla24xx_process_response_queue(vha, rsp);
2560
2561	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2562	return QLA_SUCCESS;
2563
2564queuing_error_fcp_cmnd:
2565	dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2566queuing_error:
2567	if (tot_dsds)
2568		scsi_dma_unmap(cmd);
2569
2570	if (sp->u.scmd.ctx) {
2571		mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2572		sp->u.scmd.ctx = NULL;
2573	}
2574	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2575
2576	return QLA_FUNCTION_FAILED;
2577}
2578
2579static void
2580qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
2581{
2582	struct srb_iocb *aio = &sp->u.iocb_cmd;
2583	scsi_qla_host_t *vha = sp->fcport->vha;
2584	struct req_que *req = vha->req;
2585
2586	memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
2587	abt_iocb->entry_type = ABORT_IOCB_TYPE;
2588	abt_iocb->entry_count = 1;
2589	abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
2590	abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2591	abt_iocb->handle_to_abort =
2592	    cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
2593	abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2594	abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
2595	abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2596	abt_iocb->vp_index = vha->vp_idx;
2597	abt_iocb->req_que_no = cpu_to_le16(req->id);
2598	/* Send the command to the firmware */
2599	wmb();
2600}
2601
2602int
2603qla2x00_start_sp(srb_t *sp)
2604{
2605	int rval;
2606	struct qla_hw_data *ha = sp->fcport->vha->hw;
2607	void *pkt;
2608	unsigned long flags;
2609
2610	rval = QLA_FUNCTION_FAILED;
2611	spin_lock_irqsave(&ha->hardware_lock, flags);
2612	pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2613	if (!pkt) {
2614		ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2615		    "qla2x00_alloc_iocbs failed.\n");
2616		goto done;
2617	}
2618
2619	rval = QLA_SUCCESS;
2620	switch (sp->type) {
2621	case SRB_LOGIN_CMD:
2622		IS_FWI2_CAPABLE(ha) ?
2623		    qla24xx_login_iocb(sp, pkt) :
2624		    qla2x00_login_iocb(sp, pkt);
2625		break;
2626	case SRB_LOGOUT_CMD:
2627		IS_FWI2_CAPABLE(ha) ?
2628		    qla24xx_logout_iocb(sp, pkt) :
2629		    qla2x00_logout_iocb(sp, pkt);
2630		break;
2631	case SRB_ELS_CMD_RPT:
2632	case SRB_ELS_CMD_HST:
2633		qla24xx_els_iocb(sp, pkt);
2634		break;
2635	case SRB_CT_CMD:
2636		IS_FWI2_CAPABLE(ha) ?
2637		    qla24xx_ct_iocb(sp, pkt) :
2638		    qla2x00_ct_iocb(sp, pkt);
2639		break;
2640	case SRB_ADISC_CMD:
2641		IS_FWI2_CAPABLE(ha) ?
2642		    qla24xx_adisc_iocb(sp, pkt) :
2643		    qla2x00_adisc_iocb(sp, pkt);
2644		break;
2645	case SRB_TM_CMD:
2646		IS_QLAFX00(ha) ?
2647		    qlafx00_tm_iocb(sp, pkt) :
2648		    qla24xx_tm_iocb(sp, pkt);
2649		break;
2650	case SRB_FXIOCB_DCMD:
2651	case SRB_FXIOCB_BCMD:
2652		qlafx00_fxdisc_iocb(sp, pkt);
2653		break;
2654	case SRB_ABT_CMD:
2655		IS_QLAFX00(ha) ?
2656			qlafx00_abort_iocb(sp, pkt) :
2657			qla24xx_abort_iocb(sp, pkt);
2658		break;
2659	default:
2660		break;
2661	}
2662
2663	wmb();
2664	qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2665done:
2666	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2667	return rval;
2668}
2669
2670static void
2671qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2672				struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2673{
2674	uint16_t avail_dsds;
2675	uint32_t *cur_dsd;
2676	uint32_t req_data_len = 0;
2677	uint32_t rsp_data_len = 0;
2678	struct scatterlist *sg;
2679	int index;
2680	int entry_count = 1;
2681	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2682
2683	/*Update entry type to indicate bidir command */
2684	*((uint32_t *)(&cmd_pkt->entry_type)) =
2685		__constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2686
2687	/* Set the transfer direction, in this set both flags
2688	 * Also set the BD_WRAP_BACK flag, firmware will take care
2689	 * assigning DID=SID for outgoing pkts.
2690	 */
2691	cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2692	cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2693	cmd_pkt->control_flags =
2694			__constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2695							BD_WRAP_BACK);
2696
2697	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2698	cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2699	cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2700	cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2701
2702	vha->bidi_stats.transfer_bytes += req_data_len;
2703	vha->bidi_stats.io_count++;
2704
2705	vha->qla_stats.output_bytes += req_data_len;
2706	vha->qla_stats.output_requests++;
2707
2708	/* Only one dsd is available for bidirectional IOCB, remaining dsds
2709	 * are bundled in continuation iocb
2710	 */
2711	avail_dsds = 1;
2712	cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2713
2714	index = 0;
2715
2716	for_each_sg(bsg_job->request_payload.sg_list, sg,
2717				bsg_job->request_payload.sg_cnt, index) {
2718		dma_addr_t sle_dma;
2719		cont_a64_entry_t *cont_pkt;
2720
2721		/* Allocate additional continuation packets */
2722		if (avail_dsds == 0) {
2723			/* Continuation type 1 IOCB can accomodate
2724			 * 5 DSDS
2725			 */
2726			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2727			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2728			avail_dsds = 5;
2729			entry_count++;
2730		}
2731		sle_dma = sg_dma_address(sg);
2732		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2733		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2734		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2735		avail_dsds--;
2736	}
2737	/* For read request DSD will always goes to continuation IOCB
2738	 * and follow the write DSD. If there is room on the current IOCB
2739	 * then it is added to that IOCB else new continuation IOCB is
2740	 * allocated.
2741	 */
2742	for_each_sg(bsg_job->reply_payload.sg_list, sg,
2743				bsg_job->reply_payload.sg_cnt, index) {
2744		dma_addr_t sle_dma;
2745		cont_a64_entry_t *cont_pkt;
2746
2747		/* Allocate additional continuation packets */
2748		if (avail_dsds == 0) {
2749			/* Continuation type 1 IOCB can accomodate
2750			 * 5 DSDS
2751			 */
2752			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2753			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2754			avail_dsds = 5;
2755			entry_count++;
2756		}
2757		sle_dma = sg_dma_address(sg);
2758		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2759		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2760		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2761		avail_dsds--;
2762	}
2763	/* This value should be same as number of IOCB required for this cmd */
2764	cmd_pkt->entry_count = entry_count;
2765}
2766
2767int
2768qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2769{
2770
2771	struct qla_hw_data *ha = vha->hw;
2772	unsigned long flags;
2773	uint32_t handle;
2774	uint32_t index;
2775	uint16_t req_cnt;
2776	uint16_t cnt;
2777	uint32_t *clr_ptr;
2778	struct cmd_bidir *cmd_pkt = NULL;
2779	struct rsp_que *rsp;
2780	struct req_que *req;
2781	int rval = EXT_STATUS_OK;
2782
2783	rval = QLA_SUCCESS;
2784
2785	rsp = ha->rsp_q_map[0];
2786	req = vha->req;
2787
2788	/* Send marker if required */
2789	if (vha->marker_needed != 0) {
2790		if (qla2x00_marker(vha, req,
2791			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2792			return EXT_STATUS_MAILBOX;
2793		vha->marker_needed = 0;
2794	}
2795
2796	/* Acquire ring specific lock */
2797	spin_lock_irqsave(&ha->hardware_lock, flags);
2798
2799	/* Check for room in outstanding command list. */
2800	handle = req->current_outstanding_cmd;
2801	for (index = 1; index < req->num_outstanding_cmds; index++) {
2802		handle++;
2803	if (handle == req->num_outstanding_cmds)
2804		handle = 1;
2805	if (!req->outstanding_cmds[handle])
2806		break;
2807	}
2808
2809	if (index == req->num_outstanding_cmds) {
2810		rval = EXT_STATUS_BUSY;
2811		goto queuing_error;
2812	}
2813
2814	/* Calculate number of IOCB required */
2815	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2816
2817	/* Check for room on request queue. */
2818	if (req->cnt < req_cnt + 2) {
2819		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2820		    RD_REG_DWORD_RELAXED(req->req_q_out);
2821		if  (req->ring_index < cnt)
2822			req->cnt = cnt - req->ring_index;
2823		else
2824			req->cnt = req->length -
2825				(req->ring_index - cnt);
2826	}
2827	if (req->cnt < req_cnt + 2) {
2828		rval = EXT_STATUS_BUSY;
2829		goto queuing_error;
2830	}
2831
2832	cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2833	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2834
2835	/* Zero out remaining portion of packet. */
2836	/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2837	clr_ptr = (uint32_t *)cmd_pkt + 2;
2838	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2839
2840	/* Set NPORT-ID  (of vha)*/
2841	cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2842	cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2843	cmd_pkt->port_id[1] = vha->d_id.b.area;
2844	cmd_pkt->port_id[2] = vha->d_id.b.domain;
2845
2846	qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2847	cmd_pkt->entry_status = (uint8_t) rsp->id;
2848	/* Build command packet. */
2849	req->current_outstanding_cmd = handle;
2850	req->outstanding_cmds[handle] = sp;
2851	sp->handle = handle;
2852	req->cnt -= req_cnt;
2853
2854	/* Send the command to the firmware */
2855	wmb();
2856	qla2x00_start_iocbs(vha, req);
2857queuing_error:
2858	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2859	return rval;
2860}
2861