1/*
2 * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/mempool.h>
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/workqueue.h>
22#include <linux/pci.h>
23#include <linux/scatterlist.h>
24#include <linux/skbuff.h>
25#include <linux/spinlock.h>
26#include <linux/if_ether.h>
27#include <linux/if_vlan.h>
28#include <linux/delay.h>
29#include <linux/gfp.h>
30#include <scsi/scsi.h>
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_cmnd.h>
34#include <scsi/scsi_tcq.h>
35#include <scsi/fc/fc_els.h>
36#include <scsi/fc/fc_fcoe.h>
37#include <scsi/libfc.h>
38#include <scsi/fc_frame.h>
39#include "fnic_io.h"
40#include "fnic.h"
41
42const char *fnic_state_str[] = {
43	[FNIC_IN_FC_MODE] =           "FNIC_IN_FC_MODE",
44	[FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
45	[FNIC_IN_ETH_MODE] =          "FNIC_IN_ETH_MODE",
46	[FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
47};
48
49static const char *fnic_ioreq_state_str[] = {
50	[FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
51	[FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
52	[FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
53	[FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
54	[FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
55};
56
57static const char *fcpio_status_str[] =  {
58	[FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
59	[FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
60	[FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
61	[FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
62	[FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
63	[FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
64	[FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
65	[FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
66	[FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
67	[FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
68	[FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
69	[FCPIO_FW_ERR] = "FCPIO_FW_ERR",
70	[FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
71	[FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
72	[FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
73	[FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
74	[FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
75	[FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
76	[FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
77};
78
79const char *fnic_state_to_str(unsigned int state)
80{
81	if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
82		return "unknown";
83
84	return fnic_state_str[state];
85}
86
87static const char *fnic_ioreq_state_to_str(unsigned int state)
88{
89	if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
90	    !fnic_ioreq_state_str[state])
91		return "unknown";
92
93	return fnic_ioreq_state_str[state];
94}
95
96static const char *fnic_fcpio_status_to_str(unsigned int status)
97{
98	if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
99		return "unknown";
100
101	return fcpio_status_str[status];
102}
103
104static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
105
106static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
107					    struct scsi_cmnd *sc)
108{
109	u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
110
111	return &fnic->io_req_lock[hash];
112}
113
114static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
115					    int tag)
116{
117	return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
118}
119
120/*
121 * Unmap the data buffer and sense buffer for an io_req,
122 * also unmap and free the device-private scatter/gather list.
123 */
124static void fnic_release_ioreq_buf(struct fnic *fnic,
125				   struct fnic_io_req *io_req,
126				   struct scsi_cmnd *sc)
127{
128	if (io_req->sgl_list_pa)
129		pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
130				 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
131				 PCI_DMA_TODEVICE);
132	scsi_dma_unmap(sc);
133
134	if (io_req->sgl_cnt)
135		mempool_free(io_req->sgl_list_alloc,
136			     fnic->io_sgl_pool[io_req->sgl_type]);
137	if (io_req->sense_buf_pa)
138		pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
139				 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
140}
141
142/* Free up Copy Wq descriptors. Called with copy_wq lock held */
143static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
144{
145	/* if no Ack received from firmware, then nothing to clean */
146	if (!fnic->fw_ack_recd[0])
147		return 1;
148
149	/*
150	 * Update desc_available count based on number of freed descriptors
151	 * Account for wraparound
152	 */
153	if (wq->to_clean_index <= fnic->fw_ack_index[0])
154		wq->ring.desc_avail += (fnic->fw_ack_index[0]
155					- wq->to_clean_index + 1);
156	else
157		wq->ring.desc_avail += (wq->ring.desc_count
158					- wq->to_clean_index
159					+ fnic->fw_ack_index[0] + 1);
160
161	/*
162	 * just bump clean index to ack_index+1 accounting for wraparound
163	 * this will essentially free up all descriptors between
164	 * to_clean_index and fw_ack_index, both inclusive
165	 */
166	wq->to_clean_index =
167		(fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
168
169	/* we have processed the acks received so far */
170	fnic->fw_ack_recd[0] = 0;
171	return 0;
172}
173
174
175/**
176 * __fnic_set_state_flags
177 * Sets/Clears bits in fnic's state_flags
178 **/
179void
180__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
181			unsigned long clearbits)
182{
183	struct Scsi_Host *host = fnic->lport->host;
184	int sh_locked = spin_is_locked(host->host_lock);
185	unsigned long flags = 0;
186
187	if (!sh_locked)
188		spin_lock_irqsave(host->host_lock, flags);
189
190	if (clearbits)
191		fnic->state_flags &= ~st_flags;
192	else
193		fnic->state_flags |= st_flags;
194
195	if (!sh_locked)
196		spin_unlock_irqrestore(host->host_lock, flags);
197
198	return;
199}
200
201
202/*
203 * fnic_fw_reset_handler
204 * Routine to send reset msg to fw
205 */
206int fnic_fw_reset_handler(struct fnic *fnic)
207{
208	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
209	int ret = 0;
210	unsigned long flags;
211
212	/* indicate fwreset to io path */
213	fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
214
215	skb_queue_purge(&fnic->frame_queue);
216	skb_queue_purge(&fnic->tx_queue);
217
218	/* wait for io cmpl */
219	while (atomic_read(&fnic->in_flight))
220		schedule_timeout(msecs_to_jiffies(1));
221
222	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
223
224	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
225		free_wq_copy_descs(fnic, wq);
226
227	if (!vnic_wq_copy_desc_avail(wq))
228		ret = -EAGAIN;
229	else {
230		fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
231		atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
232		if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
233			  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
234			atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
235				atomic64_read(
236				  &fnic->fnic_stats.fw_stats.active_fw_reqs));
237	}
238
239	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
240
241	if (!ret) {
242		atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
243		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
244			      "Issued fw reset\n");
245	} else {
246		fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
247		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
248			      "Failed to issue fw reset\n");
249	}
250
251	return ret;
252}
253
254
255/*
256 * fnic_flogi_reg_handler
257 * Routine to send flogi register msg to fw
258 */
259int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
260{
261	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
262	enum fcpio_flogi_reg_format_type format;
263	struct fc_lport *lp = fnic->lport;
264	u8 gw_mac[ETH_ALEN];
265	int ret = 0;
266	unsigned long flags;
267
268	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
269
270	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
271		free_wq_copy_descs(fnic, wq);
272
273	if (!vnic_wq_copy_desc_avail(wq)) {
274		ret = -EAGAIN;
275		goto flogi_reg_ioreq_end;
276	}
277
278	if (fnic->ctlr.map_dest) {
279		memset(gw_mac, 0xff, ETH_ALEN);
280		format = FCPIO_FLOGI_REG_DEF_DEST;
281	} else {
282		memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
283		format = FCPIO_FLOGI_REG_GW_DEST;
284	}
285
286	if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
287		fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
288						fc_id, gw_mac,
289						fnic->data_src_addr,
290						lp->r_a_tov, lp->e_d_tov);
291		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
292			      "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
293			      fc_id, fnic->data_src_addr, gw_mac);
294	} else {
295		fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
296						  format, fc_id, gw_mac);
297		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
298			      "FLOGI reg issued fcid %x map %d dest %pM\n",
299			      fc_id, fnic->ctlr.map_dest, gw_mac);
300	}
301
302	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
303	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
304		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
305		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
306		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
307
308flogi_reg_ioreq_end:
309	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
310	return ret;
311}
312
313/*
314 * fnic_queue_wq_copy_desc
315 * Routine to enqueue a wq copy desc
316 */
317static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
318					  struct vnic_wq_copy *wq,
319					  struct fnic_io_req *io_req,
320					  struct scsi_cmnd *sc,
321					  int sg_count)
322{
323	struct scatterlist *sg;
324	struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
325	struct fc_rport_libfc_priv *rp = rport->dd_data;
326	struct host_sg_desc *desc;
327	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
328	unsigned int i;
329	unsigned long intr_flags;
330	int flags;
331	u8 exch_flags;
332	struct scsi_lun fc_lun;
333	int r;
334
335	if (sg_count) {
336		/* For each SGE, create a device desc entry */
337		desc = io_req->sgl_list;
338		for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
339			desc->addr = cpu_to_le64(sg_dma_address(sg));
340			desc->len = cpu_to_le32(sg_dma_len(sg));
341			desc->_resvd = 0;
342			desc++;
343		}
344
345		io_req->sgl_list_pa = pci_map_single
346			(fnic->pdev,
347			 io_req->sgl_list,
348			 sizeof(io_req->sgl_list[0]) * sg_count,
349			 PCI_DMA_TODEVICE);
350
351		r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa);
352		if (r) {
353			printk(KERN_ERR "PCI mapping failed with error %d\n", r);
354			return SCSI_MLQUEUE_HOST_BUSY;
355		}
356	}
357
358	io_req->sense_buf_pa = pci_map_single(fnic->pdev,
359					      sc->sense_buffer,
360					      SCSI_SENSE_BUFFERSIZE,
361					      PCI_DMA_FROMDEVICE);
362
363	r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa);
364	if (r) {
365		pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
366				sizeof(io_req->sgl_list[0]) * sg_count,
367				PCI_DMA_TODEVICE);
368		printk(KERN_ERR "PCI mapping failed with error %d\n", r);
369		return SCSI_MLQUEUE_HOST_BUSY;
370	}
371
372	int_to_scsilun(sc->device->lun, &fc_lun);
373
374	/* Enqueue the descriptor in the Copy WQ */
375	spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
376
377	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
378		free_wq_copy_descs(fnic, wq);
379
380	if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
381		spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
382		FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
383			  "fnic_queue_wq_copy_desc failure - no descriptors\n");
384		atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
385		return SCSI_MLQUEUE_HOST_BUSY;
386	}
387
388	flags = 0;
389	if (sc->sc_data_direction == DMA_FROM_DEVICE)
390		flags = FCPIO_ICMND_RDDATA;
391	else if (sc->sc_data_direction == DMA_TO_DEVICE)
392		flags = FCPIO_ICMND_WRDATA;
393
394	exch_flags = 0;
395	if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
396	    (rp->flags & FC_RP_FLAGS_RETRY))
397		exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
398
399	fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
400					 0, exch_flags, io_req->sgl_cnt,
401					 SCSI_SENSE_BUFFERSIZE,
402					 io_req->sgl_list_pa,
403					 io_req->sense_buf_pa,
404					 0, /* scsi cmd ref, always 0 */
405					 FCPIO_ICMND_PTA_SIMPLE,
406					 	/* scsi pri and tag */
407					 flags,	/* command flags */
408					 sc->cmnd, sc->cmd_len,
409					 scsi_bufflen(sc),
410					 fc_lun.scsi_lun, io_req->port_id,
411					 rport->maxframe_size, rp->r_a_tov,
412					 rp->e_d_tov);
413
414	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
415	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
416		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
417		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
418		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
419
420	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
421	return 0;
422}
423
424/*
425 * fnic_queuecommand
426 * Routine to send a scsi cdb
427 * Called with host_lock held and interrupts disabled.
428 */
429static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
430{
431	struct fc_lport *lp = shost_priv(sc->device->host);
432	struct fc_rport *rport;
433	struct fnic_io_req *io_req = NULL;
434	struct fnic *fnic = lport_priv(lp);
435	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
436	struct vnic_wq_copy *wq;
437	int ret;
438	u64 cmd_trace;
439	int sg_count = 0;
440	unsigned long flags = 0;
441	unsigned long ptr;
442	struct fc_rport_priv *rdata;
443	spinlock_t *io_lock = NULL;
444	int io_lock_acquired = 0;
445
446	if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
447		return SCSI_MLQUEUE_HOST_BUSY;
448
449	rport = starget_to_rport(scsi_target(sc->device));
450	ret = fc_remote_port_chkready(rport);
451	if (ret) {
452		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
453		sc->result = ret;
454		done(sc);
455		return 0;
456	}
457
458	rdata = lp->tt.rport_lookup(lp, rport->port_id);
459	if (!rdata || (rdata->rp_state == RPORT_ST_DELETE)) {
460		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
461			"returning IO as rport is removed\n");
462		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
463		sc->result = DID_NO_CONNECT;
464		done(sc);
465		return 0;
466	}
467
468	if (lp->state != LPORT_ST_READY || !(lp->link_up))
469		return SCSI_MLQUEUE_HOST_BUSY;
470
471	atomic_inc(&fnic->in_flight);
472
473	/*
474	 * Release host lock, use driver resource specific locks from here.
475	 * Don't re-enable interrupts in case they were disabled prior to the
476	 * caller disabling them.
477	 */
478	spin_unlock(lp->host->host_lock);
479	CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
480	CMD_FLAGS(sc) = FNIC_NO_FLAGS;
481
482	/* Get a new io_req for this SCSI IO */
483	io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
484	if (!io_req) {
485		atomic64_inc(&fnic_stats->io_stats.alloc_failures);
486		ret = SCSI_MLQUEUE_HOST_BUSY;
487		goto out;
488	}
489	memset(io_req, 0, sizeof(*io_req));
490
491	/* Map the data buffer */
492	sg_count = scsi_dma_map(sc);
493	if (sg_count < 0) {
494		FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
495			  sc->request->tag, sc, 0, sc->cmnd[0],
496			  sg_count, CMD_STATE(sc));
497		mempool_free(io_req, fnic->io_req_pool);
498		goto out;
499	}
500
501	/* Determine the type of scatter/gather list we need */
502	io_req->sgl_cnt = sg_count;
503	io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
504	if (sg_count > FNIC_DFLT_SG_DESC_CNT)
505		io_req->sgl_type = FNIC_SGL_CACHE_MAX;
506
507	if (sg_count) {
508		io_req->sgl_list =
509			mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
510				      GFP_ATOMIC);
511		if (!io_req->sgl_list) {
512			atomic64_inc(&fnic_stats->io_stats.alloc_failures);
513			ret = SCSI_MLQUEUE_HOST_BUSY;
514			scsi_dma_unmap(sc);
515			mempool_free(io_req, fnic->io_req_pool);
516			goto out;
517		}
518
519		/* Cache sgl list allocated address before alignment */
520		io_req->sgl_list_alloc = io_req->sgl_list;
521		ptr = (unsigned long) io_req->sgl_list;
522		if (ptr % FNIC_SG_DESC_ALIGN) {
523			io_req->sgl_list = (struct host_sg_desc *)
524				(((unsigned long) ptr
525				  + FNIC_SG_DESC_ALIGN - 1)
526				 & ~(FNIC_SG_DESC_ALIGN - 1));
527		}
528	}
529
530	/*
531	* Will acquire lock defore setting to IO initialized.
532	*/
533
534	io_lock = fnic_io_lock_hash(fnic, sc);
535	spin_lock_irqsave(io_lock, flags);
536
537	/* initialize rest of io_req */
538	io_lock_acquired = 1;
539	io_req->port_id = rport->port_id;
540	io_req->start_time = jiffies;
541	CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
542	CMD_SP(sc) = (char *)io_req;
543	CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
544	sc->scsi_done = done;
545
546	/* create copy wq desc and enqueue it */
547	wq = &fnic->wq_copy[0];
548	ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
549	if (ret) {
550		/*
551		 * In case another thread cancelled the request,
552		 * refetch the pointer under the lock.
553		 */
554		FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
555			  sc->request->tag, sc, 0, 0, 0,
556			  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
557		io_req = (struct fnic_io_req *)CMD_SP(sc);
558		CMD_SP(sc) = NULL;
559		CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
560		spin_unlock_irqrestore(io_lock, flags);
561		if (io_req) {
562			fnic_release_ioreq_buf(fnic, io_req, sc);
563			mempool_free(io_req, fnic->io_req_pool);
564		}
565		atomic_dec(&fnic->in_flight);
566		/* acquire host lock before returning to SCSI */
567		spin_lock(lp->host->host_lock);
568		return ret;
569	} else {
570		atomic64_inc(&fnic_stats->io_stats.active_ios);
571		atomic64_inc(&fnic_stats->io_stats.num_ios);
572		if (atomic64_read(&fnic_stats->io_stats.active_ios) >
573			  atomic64_read(&fnic_stats->io_stats.max_active_ios))
574			atomic64_set(&fnic_stats->io_stats.max_active_ios,
575			     atomic64_read(&fnic_stats->io_stats.active_ios));
576
577		/* REVISIT: Use per IO lock in the final code */
578		CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
579	}
580out:
581	cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
582			(u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
583			(u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
584			sc->cmnd[5]);
585
586	FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
587		  sc->request->tag, sc, io_req,
588		  sg_count, cmd_trace,
589		  (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
590
591	/* if only we issued IO, will we have the io lock */
592	if (io_lock_acquired)
593		spin_unlock_irqrestore(io_lock, flags);
594
595	atomic_dec(&fnic->in_flight);
596	/* acquire host lock before returning to SCSI */
597	spin_lock(lp->host->host_lock);
598	return ret;
599}
600
601DEF_SCSI_QCMD(fnic_queuecommand)
602
603/*
604 * fnic_fcpio_fw_reset_cmpl_handler
605 * Routine to handle fw reset completion
606 */
607static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
608					    struct fcpio_fw_req *desc)
609{
610	u8 type;
611	u8 hdr_status;
612	struct fcpio_tag tag;
613	int ret = 0;
614	unsigned long flags;
615	struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
616
617	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
618
619	atomic64_inc(&reset_stats->fw_reset_completions);
620
621	/* Clean up all outstanding io requests */
622	fnic_cleanup_io(fnic, SCSI_NO_TAG);
623
624	atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
625	atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
626
627	spin_lock_irqsave(&fnic->fnic_lock, flags);
628
629	/* fnic should be in FC_TRANS_ETH_MODE */
630	if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
631		/* Check status of reset completion */
632		if (!hdr_status) {
633			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
634				      "reset cmpl success\n");
635			/* Ready to send flogi out */
636			fnic->state = FNIC_IN_ETH_MODE;
637		} else {
638			FNIC_SCSI_DBG(KERN_DEBUG,
639				      fnic->lport->host,
640				      "fnic fw_reset : failed %s\n",
641				      fnic_fcpio_status_to_str(hdr_status));
642
643			/*
644			 * Unable to change to eth mode, cannot send out flogi
645			 * Change state to fc mode, so that subsequent Flogi
646			 * requests from libFC will cause more attempts to
647			 * reset the firmware. Free the cached flogi
648			 */
649			fnic->state = FNIC_IN_FC_MODE;
650			atomic64_inc(&reset_stats->fw_reset_failures);
651			ret = -1;
652		}
653	} else {
654		FNIC_SCSI_DBG(KERN_DEBUG,
655			      fnic->lport->host,
656			      "Unexpected state %s while processing"
657			      " reset cmpl\n", fnic_state_to_str(fnic->state));
658		atomic64_inc(&reset_stats->fw_reset_failures);
659		ret = -1;
660	}
661
662	/* Thread removing device blocks till firmware reset is complete */
663	if (fnic->remove_wait)
664		complete(fnic->remove_wait);
665
666	/*
667	 * If fnic is being removed, or fw reset failed
668	 * free the flogi frame. Else, send it out
669	 */
670	if (fnic->remove_wait || ret) {
671		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
672		skb_queue_purge(&fnic->tx_queue);
673		goto reset_cmpl_handler_end;
674	}
675
676	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
677
678	fnic_flush_tx(fnic);
679
680 reset_cmpl_handler_end:
681	fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
682
683	return ret;
684}
685
686/*
687 * fnic_fcpio_flogi_reg_cmpl_handler
688 * Routine to handle flogi register completion
689 */
690static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
691					     struct fcpio_fw_req *desc)
692{
693	u8 type;
694	u8 hdr_status;
695	struct fcpio_tag tag;
696	int ret = 0;
697	unsigned long flags;
698
699	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
700
701	/* Update fnic state based on status of flogi reg completion */
702	spin_lock_irqsave(&fnic->fnic_lock, flags);
703
704	if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
705
706		/* Check flogi registration completion status */
707		if (!hdr_status) {
708			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
709				      "flog reg succeeded\n");
710			fnic->state = FNIC_IN_FC_MODE;
711		} else {
712			FNIC_SCSI_DBG(KERN_DEBUG,
713				      fnic->lport->host,
714				      "fnic flogi reg :failed %s\n",
715				      fnic_fcpio_status_to_str(hdr_status));
716			fnic->state = FNIC_IN_ETH_MODE;
717			ret = -1;
718		}
719	} else {
720		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
721			      "Unexpected fnic state %s while"
722			      " processing flogi reg completion\n",
723			      fnic_state_to_str(fnic->state));
724		ret = -1;
725	}
726
727	if (!ret) {
728		if (fnic->stop_rx_link_events) {
729			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
730			goto reg_cmpl_handler_end;
731		}
732		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
733
734		fnic_flush_tx(fnic);
735		queue_work(fnic_event_queue, &fnic->frame_work);
736	} else {
737		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
738	}
739
740reg_cmpl_handler_end:
741	return ret;
742}
743
744static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
745					u16 request_out)
746{
747	if (wq->to_clean_index <= wq->to_use_index) {
748		/* out of range, stale request_out index */
749		if (request_out < wq->to_clean_index ||
750		    request_out >= wq->to_use_index)
751			return 0;
752	} else {
753		/* out of range, stale request_out index */
754		if (request_out < wq->to_clean_index &&
755		    request_out >= wq->to_use_index)
756			return 0;
757	}
758	/* request_out index is in range */
759	return 1;
760}
761
762
763/*
764 * Mark that ack received and store the Ack index. If there are multiple
765 * acks received before Tx thread cleans it up, the latest value will be
766 * used which is correct behavior. This state should be in the copy Wq
767 * instead of in the fnic
768 */
769static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
770					  unsigned int cq_index,
771					  struct fcpio_fw_req *desc)
772{
773	struct vnic_wq_copy *wq;
774	u16 request_out = desc->u.ack.request_out;
775	unsigned long flags;
776	u64 *ox_id_tag = (u64 *)(void *)desc;
777
778	/* mark the ack state */
779	wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
780	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
781
782	fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
783	if (is_ack_index_in_range(wq, request_out)) {
784		fnic->fw_ack_index[0] = request_out;
785		fnic->fw_ack_recd[0] = 1;
786	} else
787		atomic64_inc(
788			&fnic->fnic_stats.misc_stats.ack_index_out_of_range);
789
790	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
791	FNIC_TRACE(fnic_fcpio_ack_handler,
792		  fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
793		  ox_id_tag[4], ox_id_tag[5]);
794}
795
796/*
797 * fnic_fcpio_icmnd_cmpl_handler
798 * Routine to handle icmnd completions
799 */
800static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
801					 struct fcpio_fw_req *desc)
802{
803	u8 type;
804	u8 hdr_status;
805	struct fcpio_tag tag;
806	u32 id;
807	u64 xfer_len = 0;
808	struct fcpio_icmnd_cmpl *icmnd_cmpl;
809	struct fnic_io_req *io_req;
810	struct scsi_cmnd *sc;
811	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
812	unsigned long flags;
813	spinlock_t *io_lock;
814	u64 cmd_trace;
815	unsigned long start_time;
816
817	/* Decode the cmpl description to get the io_req id */
818	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
819	fcpio_tag_id_dec(&tag, &id);
820	icmnd_cmpl = &desc->u.icmnd_cmpl;
821
822	if (id >= fnic->fnic_max_tag_id) {
823		shost_printk(KERN_ERR, fnic->lport->host,
824			"Tag out of range tag %x hdr status = %s\n",
825			     id, fnic_fcpio_status_to_str(hdr_status));
826		return;
827	}
828
829	sc = scsi_host_find_tag(fnic->lport->host, id);
830	WARN_ON_ONCE(!sc);
831	if (!sc) {
832		atomic64_inc(&fnic_stats->io_stats.sc_null);
833		shost_printk(KERN_ERR, fnic->lport->host,
834			  "icmnd_cmpl sc is null - "
835			  "hdr status = %s tag = 0x%x desc = 0x%p\n",
836			  fnic_fcpio_status_to_str(hdr_status), id, desc);
837		FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
838			  fnic->lport->host->host_no, id,
839			  ((u64)icmnd_cmpl->_resvd0[1] << 16 |
840			  (u64)icmnd_cmpl->_resvd0[0]),
841			  ((u64)hdr_status << 16 |
842			  (u64)icmnd_cmpl->scsi_status << 8 |
843			  (u64)icmnd_cmpl->flags), desc,
844			  (u64)icmnd_cmpl->residual, 0);
845		return;
846	}
847
848	io_lock = fnic_io_lock_hash(fnic, sc);
849	spin_lock_irqsave(io_lock, flags);
850	io_req = (struct fnic_io_req *)CMD_SP(sc);
851	WARN_ON_ONCE(!io_req);
852	if (!io_req) {
853		atomic64_inc(&fnic_stats->io_stats.ioreq_null);
854		CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
855		spin_unlock_irqrestore(io_lock, flags);
856		shost_printk(KERN_ERR, fnic->lport->host,
857			  "icmnd_cmpl io_req is null - "
858			  "hdr status = %s tag = 0x%x sc 0x%p\n",
859			  fnic_fcpio_status_to_str(hdr_status), id, sc);
860		return;
861	}
862	start_time = io_req->start_time;
863
864	/* firmware completed the io */
865	io_req->io_completed = 1;
866
867	/*
868	 *  if SCSI-ML has already issued abort on this command,
869	 * ignore completion of the IO. The abts path will clean it up
870	 */
871	if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
872		spin_unlock_irqrestore(io_lock, flags);
873		CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
874		switch (hdr_status) {
875		case FCPIO_SUCCESS:
876			CMD_FLAGS(sc) |= FNIC_IO_DONE;
877			FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
878				  "icmnd_cmpl ABTS pending hdr status = %s "
879				  "sc  0x%p scsi_status %x  residual %d\n",
880				  fnic_fcpio_status_to_str(hdr_status), sc,
881				  icmnd_cmpl->scsi_status,
882				  icmnd_cmpl->residual);
883			break;
884		case FCPIO_ABORTED:
885			CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
886			break;
887		default:
888			FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
889					  "icmnd_cmpl abts pending "
890					  "hdr status = %s tag = 0x%x sc = 0x%p\n",
891					  fnic_fcpio_status_to_str(hdr_status),
892					  id, sc);
893			break;
894		}
895		return;
896	}
897
898	/* Mark the IO as complete */
899	CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
900
901	icmnd_cmpl = &desc->u.icmnd_cmpl;
902
903	switch (hdr_status) {
904	case FCPIO_SUCCESS:
905		sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
906		xfer_len = scsi_bufflen(sc);
907		scsi_set_resid(sc, icmnd_cmpl->residual);
908
909		if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
910			xfer_len -= icmnd_cmpl->residual;
911
912		if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
913			atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
914		break;
915
916	case FCPIO_TIMEOUT:          /* request was timed out */
917		atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
918		sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
919		break;
920
921	case FCPIO_ABORTED:          /* request was aborted */
922		atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
923		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
924		break;
925
926	case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
927		atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
928		scsi_set_resid(sc, icmnd_cmpl->residual);
929		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
930		break;
931
932	case FCPIO_OUT_OF_RESOURCE:  /* out of resources to complete request */
933		atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
934		sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
935		break;
936
937	case FCPIO_IO_NOT_FOUND:     /* requested I/O was not found */
938		atomic64_inc(&fnic_stats->io_stats.io_not_found);
939		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
940		break;
941
942	case FCPIO_SGL_INVALID:      /* request was aborted due to sgl error */
943		atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
944		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
945		break;
946
947	case FCPIO_FW_ERR:           /* request was terminated due fw error */
948		atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
949		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
950		break;
951
952	case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */
953		atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
954		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
955		break;
956
957	case FCPIO_INVALID_HEADER:   /* header contains invalid data */
958	case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */
959	case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
960	default:
961		shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
962			     fnic_fcpio_status_to_str(hdr_status));
963		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
964		break;
965	}
966
967	if (hdr_status != FCPIO_SUCCESS) {
968		atomic64_inc(&fnic_stats->io_stats.io_failures);
969		shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
970			     fnic_fcpio_status_to_str(hdr_status));
971	}
972	/* Break link with the SCSI command */
973	CMD_SP(sc) = NULL;
974	CMD_FLAGS(sc) |= FNIC_IO_DONE;
975
976	spin_unlock_irqrestore(io_lock, flags);
977
978	fnic_release_ioreq_buf(fnic, io_req, sc);
979
980	mempool_free(io_req, fnic->io_req_pool);
981
982	cmd_trace = ((u64)hdr_status << 56) |
983		  (u64)icmnd_cmpl->scsi_status << 48 |
984		  (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
985		  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
986		  (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
987
988	FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
989		  sc->device->host->host_no, id, sc,
990		  ((u64)icmnd_cmpl->_resvd0[1] << 56 |
991		  (u64)icmnd_cmpl->_resvd0[0] << 48 |
992		  jiffies_to_msecs(jiffies - start_time)),
993		  desc, cmd_trace,
994		  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
995
996	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
997		fnic->lport->host_stats.fcp_input_requests++;
998		fnic->fcp_input_bytes += xfer_len;
999	} else if (sc->sc_data_direction == DMA_TO_DEVICE) {
1000		fnic->lport->host_stats.fcp_output_requests++;
1001		fnic->fcp_output_bytes += xfer_len;
1002	} else
1003		fnic->lport->host_stats.fcp_control_requests++;
1004
1005	atomic64_dec(&fnic_stats->io_stats.active_ios);
1006	if (atomic64_read(&fnic->io_cmpl_skip))
1007		atomic64_dec(&fnic->io_cmpl_skip);
1008	else
1009		atomic64_inc(&fnic_stats->io_stats.io_completions);
1010
1011	/* Call SCSI completion function to complete the IO */
1012	if (sc->scsi_done)
1013		sc->scsi_done(sc);
1014}
1015
1016/* fnic_fcpio_itmf_cmpl_handler
1017 * Routine to handle itmf completions
1018 */
1019static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
1020					struct fcpio_fw_req *desc)
1021{
1022	u8 type;
1023	u8 hdr_status;
1024	struct fcpio_tag tag;
1025	u32 id;
1026	struct scsi_cmnd *sc;
1027	struct fnic_io_req *io_req;
1028	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1029	struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
1030	struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1031	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1032	unsigned long flags;
1033	spinlock_t *io_lock;
1034	unsigned long start_time;
1035
1036	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
1037	fcpio_tag_id_dec(&tag, &id);
1038
1039	if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
1040		shost_printk(KERN_ERR, fnic->lport->host,
1041		"Tag out of range tag %x hdr status = %s\n",
1042		id, fnic_fcpio_status_to_str(hdr_status));
1043		return;
1044	}
1045
1046	sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
1047	WARN_ON_ONCE(!sc);
1048	if (!sc) {
1049		atomic64_inc(&fnic_stats->io_stats.sc_null);
1050		shost_printk(KERN_ERR, fnic->lport->host,
1051			  "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1052			  fnic_fcpio_status_to_str(hdr_status), id);
1053		return;
1054	}
1055	io_lock = fnic_io_lock_hash(fnic, sc);
1056	spin_lock_irqsave(io_lock, flags);
1057	io_req = (struct fnic_io_req *)CMD_SP(sc);
1058	WARN_ON_ONCE(!io_req);
1059	if (!io_req) {
1060		atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1061		spin_unlock_irqrestore(io_lock, flags);
1062		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1063		shost_printk(KERN_ERR, fnic->lport->host,
1064			  "itmf_cmpl io_req is null - "
1065			  "hdr status = %s tag = 0x%x sc 0x%p\n",
1066			  fnic_fcpio_status_to_str(hdr_status), id, sc);
1067		return;
1068	}
1069	start_time = io_req->start_time;
1070
1071	if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1072		/* Abort and terminate completion of device reset req */
1073		/* REVISIT : Add asserts about various flags */
1074		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1075			      "dev reset abts cmpl recd. id %x status %s\n",
1076			      id, fnic_fcpio_status_to_str(hdr_status));
1077		CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1078		CMD_ABTS_STATUS(sc) = hdr_status;
1079		CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1080		if (io_req->abts_done)
1081			complete(io_req->abts_done);
1082		spin_unlock_irqrestore(io_lock, flags);
1083	} else if (id & FNIC_TAG_ABORT) {
1084		/* Completion of abort cmd */
1085		switch (hdr_status) {
1086		case FCPIO_SUCCESS:
1087			break;
1088		case FCPIO_TIMEOUT:
1089			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1090				atomic64_inc(&abts_stats->abort_fw_timeouts);
1091			else
1092				atomic64_inc(
1093					&term_stats->terminate_fw_timeouts);
1094			break;
1095		case FCPIO_IO_NOT_FOUND:
1096			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1097				atomic64_inc(&abts_stats->abort_io_not_found);
1098			else
1099				atomic64_inc(
1100					&term_stats->terminate_io_not_found);
1101			break;
1102		default:
1103			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
1104				atomic64_inc(&abts_stats->abort_failures);
1105			else
1106				atomic64_inc(
1107					&term_stats->terminate_failures);
1108			break;
1109		}
1110		if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
1111			/* This is a late completion. Ignore it */
1112			spin_unlock_irqrestore(io_lock, flags);
1113			return;
1114		}
1115		CMD_ABTS_STATUS(sc) = hdr_status;
1116		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
1117
1118		atomic64_dec(&fnic_stats->io_stats.active_ios);
1119		if (atomic64_read(&fnic->io_cmpl_skip))
1120			atomic64_dec(&fnic->io_cmpl_skip);
1121		else
1122			atomic64_inc(&fnic_stats->io_stats.io_completions);
1123
1124		if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1125			atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1126
1127		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1128			      "abts cmpl recd. id %d status %s\n",
1129			      (int)(id & FNIC_TAG_MASK),
1130			      fnic_fcpio_status_to_str(hdr_status));
1131
1132		/*
1133		 * If scsi_eh thread is blocked waiting for abts to complete,
1134		 * signal completion to it. IO will be cleaned in the thread
1135		 * else clean it in this context
1136		 */
1137		if (io_req->abts_done) {
1138			complete(io_req->abts_done);
1139			spin_unlock_irqrestore(io_lock, flags);
1140		} else {
1141			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1142				      "abts cmpl, completing IO\n");
1143			CMD_SP(sc) = NULL;
1144			sc->result = (DID_ERROR << 16);
1145
1146			spin_unlock_irqrestore(io_lock, flags);
1147
1148			fnic_release_ioreq_buf(fnic, io_req, sc);
1149			mempool_free(io_req, fnic->io_req_pool);
1150			if (sc->scsi_done) {
1151				FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1152					sc->device->host->host_no, id,
1153					sc,
1154					jiffies_to_msecs(jiffies - start_time),
1155					desc,
1156					(((u64)hdr_status << 40) |
1157					(u64)sc->cmnd[0] << 32 |
1158					(u64)sc->cmnd[2] << 24 |
1159					(u64)sc->cmnd[3] << 16 |
1160					(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1161					(((u64)CMD_FLAGS(sc) << 32) |
1162					CMD_STATE(sc)));
1163				sc->scsi_done(sc);
1164			}
1165		}
1166
1167	} else if (id & FNIC_TAG_DEV_RST) {
1168		/* Completion of device reset */
1169		CMD_LR_STATUS(sc) = hdr_status;
1170		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1171			spin_unlock_irqrestore(io_lock, flags);
1172			CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
1173			FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1174				  sc->device->host->host_no, id, sc,
1175				  jiffies_to_msecs(jiffies - start_time),
1176				  desc, 0,
1177				  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1178			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1179				"Terminate pending "
1180				"dev reset cmpl recd. id %d status %s\n",
1181				(int)(id & FNIC_TAG_MASK),
1182				fnic_fcpio_status_to_str(hdr_status));
1183			return;
1184		}
1185		if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
1186			/* Need to wait for terminate completion */
1187			spin_unlock_irqrestore(io_lock, flags);
1188			FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1189				  sc->device->host->host_no, id, sc,
1190				  jiffies_to_msecs(jiffies - start_time),
1191				  desc, 0,
1192				  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1193			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1194				"dev reset cmpl recd after time out. "
1195				"id %d status %s\n",
1196				(int)(id & FNIC_TAG_MASK),
1197				fnic_fcpio_status_to_str(hdr_status));
1198			return;
1199		}
1200		CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
1201		CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1202		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1203			      "dev reset cmpl recd. id %d status %s\n",
1204			      (int)(id & FNIC_TAG_MASK),
1205			      fnic_fcpio_status_to_str(hdr_status));
1206		if (io_req->dr_done)
1207			complete(io_req->dr_done);
1208		spin_unlock_irqrestore(io_lock, flags);
1209
1210	} else {
1211		shost_printk(KERN_ERR, fnic->lport->host,
1212			     "Unexpected itmf io state %s tag %x\n",
1213			     fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
1214		spin_unlock_irqrestore(io_lock, flags);
1215	}
1216
1217}
1218
1219/*
1220 * fnic_fcpio_cmpl_handler
1221 * Routine to service the cq for wq_copy
1222 */
1223static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1224				   unsigned int cq_index,
1225				   struct fcpio_fw_req *desc)
1226{
1227	struct fnic *fnic = vnic_dev_priv(vdev);
1228
1229	switch (desc->hdr.type) {
1230	case FCPIO_ICMND_CMPL: /* fw completed a command */
1231	case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1232	case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1233	case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1234	case FCPIO_RESET_CMPL: /* fw completed reset */
1235		atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1236		break;
1237	default:
1238		break;
1239	}
1240
1241	switch (desc->hdr.type) {
1242	case FCPIO_ACK: /* fw copied copy wq desc to its queue */
1243		fnic_fcpio_ack_handler(fnic, cq_index, desc);
1244		break;
1245
1246	case FCPIO_ICMND_CMPL: /* fw completed a command */
1247		fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
1248		break;
1249
1250	case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1251		fnic_fcpio_itmf_cmpl_handler(fnic, desc);
1252		break;
1253
1254	case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1255	case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1256		fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1257		break;
1258
1259	case FCPIO_RESET_CMPL: /* fw completed reset */
1260		fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1261		break;
1262
1263	default:
1264		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1265			      "firmware completion type %d\n",
1266			      desc->hdr.type);
1267		break;
1268	}
1269
1270	return 0;
1271}
1272
1273/*
1274 * fnic_wq_copy_cmpl_handler
1275 * Routine to process wq copy
1276 */
1277int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
1278{
1279	unsigned int wq_work_done = 0;
1280	unsigned int i, cq_index;
1281	unsigned int cur_work_done;
1282
1283	for (i = 0; i < fnic->wq_copy_count; i++) {
1284		cq_index = i + fnic->raw_wq_count + fnic->rq_count;
1285		cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1286						     fnic_fcpio_cmpl_handler,
1287						     copy_work_to_do);
1288		wq_work_done += cur_work_done;
1289	}
1290	return wq_work_done;
1291}
1292
1293static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1294{
1295	int i;
1296	struct fnic_io_req *io_req;
1297	unsigned long flags = 0;
1298	struct scsi_cmnd *sc;
1299	spinlock_t *io_lock;
1300	unsigned long start_time = 0;
1301	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1302
1303	for (i = 0; i < fnic->fnic_max_tag_id; i++) {
1304		if (i == exclude_id)
1305			continue;
1306
1307		io_lock = fnic_io_lock_tag(fnic, i);
1308		spin_lock_irqsave(io_lock, flags);
1309		sc = scsi_host_find_tag(fnic->lport->host, i);
1310		if (!sc) {
1311			spin_unlock_irqrestore(io_lock, flags);
1312			continue;
1313		}
1314
1315		io_req = (struct fnic_io_req *)CMD_SP(sc);
1316		if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1317			!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
1318			/*
1319			 * We will be here only when FW completes reset
1320			 * without sending completions for outstanding ios.
1321			 */
1322			CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1323			if (io_req && io_req->dr_done)
1324				complete(io_req->dr_done);
1325			else if (io_req && io_req->abts_done)
1326				complete(io_req->abts_done);
1327			spin_unlock_irqrestore(io_lock, flags);
1328			continue;
1329		} else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1330			spin_unlock_irqrestore(io_lock, flags);
1331			continue;
1332		}
1333		if (!io_req) {
1334			spin_unlock_irqrestore(io_lock, flags);
1335			goto cleanup_scsi_cmd;
1336		}
1337
1338		CMD_SP(sc) = NULL;
1339
1340		spin_unlock_irqrestore(io_lock, flags);
1341
1342		/*
1343		 * If there is a scsi_cmnd associated with this io_req, then
1344		 * free the corresponding state
1345		 */
1346		start_time = io_req->start_time;
1347		fnic_release_ioreq_buf(fnic, io_req, sc);
1348		mempool_free(io_req, fnic->io_req_pool);
1349
1350cleanup_scsi_cmd:
1351		sc->result = DID_TRANSPORT_DISRUPTED << 16;
1352		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1353			      "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n",
1354			      __func__, (jiffies - start_time));
1355
1356		if (atomic64_read(&fnic->io_cmpl_skip))
1357			atomic64_dec(&fnic->io_cmpl_skip);
1358		else
1359			atomic64_inc(&fnic_stats->io_stats.io_completions);
1360
1361		/* Complete the command to SCSI */
1362		if (sc->scsi_done) {
1363			FNIC_TRACE(fnic_cleanup_io,
1364				  sc->device->host->host_no, i, sc,
1365				  jiffies_to_msecs(jiffies - start_time),
1366				  0, ((u64)sc->cmnd[0] << 32 |
1367				  (u64)sc->cmnd[2] << 24 |
1368				  (u64)sc->cmnd[3] << 16 |
1369				  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1370				  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1371
1372			sc->scsi_done(sc);
1373		}
1374	}
1375}
1376
1377void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1378				  struct fcpio_host_req *desc)
1379{
1380	u32 id;
1381	struct fnic *fnic = vnic_dev_priv(wq->vdev);
1382	struct fnic_io_req *io_req;
1383	struct scsi_cmnd *sc;
1384	unsigned long flags;
1385	spinlock_t *io_lock;
1386	unsigned long start_time = 0;
1387
1388	/* get the tag reference */
1389	fcpio_tag_id_dec(&desc->hdr.tag, &id);
1390	id &= FNIC_TAG_MASK;
1391
1392	if (id >= fnic->fnic_max_tag_id)
1393		return;
1394
1395	sc = scsi_host_find_tag(fnic->lport->host, id);
1396	if (!sc)
1397		return;
1398
1399	io_lock = fnic_io_lock_hash(fnic, sc);
1400	spin_lock_irqsave(io_lock, flags);
1401
1402	/* Get the IO context which this desc refers to */
1403	io_req = (struct fnic_io_req *)CMD_SP(sc);
1404
1405	/* fnic interrupts are turned off by now */
1406
1407	if (!io_req) {
1408		spin_unlock_irqrestore(io_lock, flags);
1409		goto wq_copy_cleanup_scsi_cmd;
1410	}
1411
1412	CMD_SP(sc) = NULL;
1413
1414	spin_unlock_irqrestore(io_lock, flags);
1415
1416	start_time = io_req->start_time;
1417	fnic_release_ioreq_buf(fnic, io_req, sc);
1418	mempool_free(io_req, fnic->io_req_pool);
1419
1420wq_copy_cleanup_scsi_cmd:
1421	sc->result = DID_NO_CONNECT << 16;
1422	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
1423		      " DID_NO_CONNECT\n");
1424
1425	if (sc->scsi_done) {
1426		FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1427			  sc->device->host->host_no, id, sc,
1428			  jiffies_to_msecs(jiffies - start_time),
1429			  0, ((u64)sc->cmnd[0] << 32 |
1430			  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1431			  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1432			  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1433
1434		sc->scsi_done(sc);
1435	}
1436}
1437
1438static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1439					  u32 task_req, u8 *fc_lun,
1440					  struct fnic_io_req *io_req)
1441{
1442	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1443	struct Scsi_Host *host = fnic->lport->host;
1444	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1445	unsigned long flags;
1446
1447	spin_lock_irqsave(host->host_lock, flags);
1448	if (unlikely(fnic_chk_state_flags_locked(fnic,
1449						FNIC_FLAGS_IO_BLOCKED))) {
1450		spin_unlock_irqrestore(host->host_lock, flags);
1451		return 1;
1452	} else
1453		atomic_inc(&fnic->in_flight);
1454	spin_unlock_irqrestore(host->host_lock, flags);
1455
1456	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1457
1458	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1459		free_wq_copy_descs(fnic, wq);
1460
1461	if (!vnic_wq_copy_desc_avail(wq)) {
1462		spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1463		atomic_dec(&fnic->in_flight);
1464		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1465			"fnic_queue_abort_io_req: failure: no descriptors\n");
1466		atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1467		return 1;
1468	}
1469	fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1470				     0, task_req, tag, fc_lun, io_req->port_id,
1471				     fnic->config.ra_tov, fnic->config.ed_tov);
1472
1473	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1474	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1475		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1476		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1477		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1478
1479	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1480	atomic_dec(&fnic->in_flight);
1481
1482	return 0;
1483}
1484
1485static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1486{
1487	int tag;
1488	int abt_tag;
1489	int term_cnt = 0;
1490	struct fnic_io_req *io_req;
1491	spinlock_t *io_lock;
1492	unsigned long flags;
1493	struct scsi_cmnd *sc;
1494	struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1495	struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1496	struct scsi_lun fc_lun;
1497	enum fnic_ioreq_state old_ioreq_state;
1498
1499	FNIC_SCSI_DBG(KERN_DEBUG,
1500		      fnic->lport->host,
1501		      "fnic_rport_exch_reset called portid 0x%06x\n",
1502		      port_id);
1503
1504	if (fnic->in_remove)
1505		return;
1506
1507	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1508		abt_tag = tag;
1509		io_lock = fnic_io_lock_tag(fnic, tag);
1510		spin_lock_irqsave(io_lock, flags);
1511		sc = scsi_host_find_tag(fnic->lport->host, tag);
1512		if (!sc) {
1513			spin_unlock_irqrestore(io_lock, flags);
1514			continue;
1515		}
1516
1517		io_req = (struct fnic_io_req *)CMD_SP(sc);
1518
1519		if (!io_req || io_req->port_id != port_id) {
1520			spin_unlock_irqrestore(io_lock, flags);
1521			continue;
1522		}
1523
1524		if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1525			(!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1526			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1527			"fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
1528			sc);
1529			spin_unlock_irqrestore(io_lock, flags);
1530			continue;
1531		}
1532
1533		/*
1534		 * Found IO that is still pending with firmware and
1535		 * belongs to rport that went away
1536		 */
1537		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1538			spin_unlock_irqrestore(io_lock, flags);
1539			continue;
1540		}
1541		if (io_req->abts_done) {
1542			shost_printk(KERN_ERR, fnic->lport->host,
1543			"fnic_rport_exch_reset: io_req->abts_done is set "
1544			"state is %s\n",
1545			fnic_ioreq_state_to_str(CMD_STATE(sc)));
1546		}
1547
1548		if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1549			shost_printk(KERN_ERR, fnic->lport->host,
1550				  "rport_exch_reset "
1551				  "IO not yet issued %p tag 0x%x flags "
1552				  "%x state %d\n",
1553				  sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1554		}
1555		old_ioreq_state = CMD_STATE(sc);
1556		CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1557		CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1558		if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1559			atomic64_inc(&reset_stats->device_reset_terminates);
1560			abt_tag = (tag | FNIC_TAG_DEV_RST);
1561			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1562			"fnic_rport_exch_reset dev rst sc 0x%p\n",
1563			sc);
1564		}
1565
1566		BUG_ON(io_req->abts_done);
1567
1568		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1569			      "fnic_rport_reset_exch: Issuing abts\n");
1570
1571		spin_unlock_irqrestore(io_lock, flags);
1572
1573		/* Now queue the abort command to firmware */
1574		int_to_scsilun(sc->device->lun, &fc_lun);
1575
1576		if (fnic_queue_abort_io_req(fnic, abt_tag,
1577					    FCPIO_ITMF_ABT_TASK_TERM,
1578					    fc_lun.scsi_lun, io_req)) {
1579			/*
1580			 * Revert the cmd state back to old state, if
1581			 * it hasn't changed in between. This cmd will get
1582			 * aborted later by scsi_eh, or cleaned up during
1583			 * lun reset
1584			 */
1585			spin_lock_irqsave(io_lock, flags);
1586			if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1587				CMD_STATE(sc) = old_ioreq_state;
1588			spin_unlock_irqrestore(io_lock, flags);
1589		} else {
1590			spin_lock_irqsave(io_lock, flags);
1591			if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1592				CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1593			else
1594				CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1595			spin_unlock_irqrestore(io_lock, flags);
1596			atomic64_inc(&term_stats->terminates);
1597			term_cnt++;
1598		}
1599	}
1600	if (term_cnt > atomic64_read(&term_stats->max_terminates))
1601		atomic64_set(&term_stats->max_terminates, term_cnt);
1602
1603}
1604
1605void fnic_terminate_rport_io(struct fc_rport *rport)
1606{
1607	int tag;
1608	int abt_tag;
1609	int term_cnt = 0;
1610	struct fnic_io_req *io_req;
1611	spinlock_t *io_lock;
1612	unsigned long flags;
1613	struct scsi_cmnd *sc;
1614	struct scsi_lun fc_lun;
1615	struct fc_rport_libfc_priv *rdata;
1616	struct fc_lport *lport;
1617	struct fnic *fnic;
1618	struct fc_rport *cmd_rport;
1619	struct reset_stats *reset_stats;
1620	struct terminate_stats *term_stats;
1621	enum fnic_ioreq_state old_ioreq_state;
1622
1623	if (!rport) {
1624		printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
1625		return;
1626	}
1627	rdata = rport->dd_data;
1628
1629	if (!rdata) {
1630		printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
1631		return;
1632	}
1633	lport = rdata->local_port;
1634
1635	if (!lport) {
1636		printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
1637		return;
1638	}
1639	fnic = lport_priv(lport);
1640	FNIC_SCSI_DBG(KERN_DEBUG,
1641		      fnic->lport->host, "fnic_terminate_rport_io called"
1642		      " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
1643		      rport->port_name, rport->node_name, rport,
1644		      rport->port_id);
1645
1646	if (fnic->in_remove)
1647		return;
1648
1649	reset_stats = &fnic->fnic_stats.reset_stats;
1650	term_stats = &fnic->fnic_stats.term_stats;
1651
1652	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
1653		abt_tag = tag;
1654		io_lock = fnic_io_lock_tag(fnic, tag);
1655		spin_lock_irqsave(io_lock, flags);
1656		sc = scsi_host_find_tag(fnic->lport->host, tag);
1657		if (!sc) {
1658			spin_unlock_irqrestore(io_lock, flags);
1659			continue;
1660		}
1661
1662		cmd_rport = starget_to_rport(scsi_target(sc->device));
1663		if (rport != cmd_rport) {
1664			spin_unlock_irqrestore(io_lock, flags);
1665			continue;
1666		}
1667
1668		io_req = (struct fnic_io_req *)CMD_SP(sc);
1669
1670		if (!io_req || rport != cmd_rport) {
1671			spin_unlock_irqrestore(io_lock, flags);
1672			continue;
1673		}
1674
1675		if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
1676			(!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
1677			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1678			"fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
1679			sc);
1680			spin_unlock_irqrestore(io_lock, flags);
1681			continue;
1682		}
1683		/*
1684		 * Found IO that is still pending with firmware and
1685		 * belongs to rport that went away
1686		 */
1687		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1688			spin_unlock_irqrestore(io_lock, flags);
1689			continue;
1690		}
1691		if (io_req->abts_done) {
1692			shost_printk(KERN_ERR, fnic->lport->host,
1693			"fnic_terminate_rport_io: io_req->abts_done is set "
1694			"state is %s\n",
1695			fnic_ioreq_state_to_str(CMD_STATE(sc)));
1696		}
1697		if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
1698			FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
1699				  "fnic_terminate_rport_io "
1700				  "IO not yet issued %p tag 0x%x flags "
1701				  "%x state %d\n",
1702				  sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
1703		}
1704		old_ioreq_state = CMD_STATE(sc);
1705		CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1706		CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1707		if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
1708			atomic64_inc(&reset_stats->device_reset_terminates);
1709			abt_tag = (tag | FNIC_TAG_DEV_RST);
1710			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1711			"fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
1712		}
1713
1714		BUG_ON(io_req->abts_done);
1715
1716		FNIC_SCSI_DBG(KERN_DEBUG,
1717			      fnic->lport->host,
1718			      "fnic_terminate_rport_io: Issuing abts\n");
1719
1720		spin_unlock_irqrestore(io_lock, flags);
1721
1722		/* Now queue the abort command to firmware */
1723		int_to_scsilun(sc->device->lun, &fc_lun);
1724
1725		if (fnic_queue_abort_io_req(fnic, abt_tag,
1726					    FCPIO_ITMF_ABT_TASK_TERM,
1727					    fc_lun.scsi_lun, io_req)) {
1728			/*
1729			 * Revert the cmd state back to old state, if
1730			 * it hasn't changed in between. This cmd will get
1731			 * aborted later by scsi_eh, or cleaned up during
1732			 * lun reset
1733			 */
1734			spin_lock_irqsave(io_lock, flags);
1735			if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1736				CMD_STATE(sc) = old_ioreq_state;
1737			spin_unlock_irqrestore(io_lock, flags);
1738		} else {
1739			spin_lock_irqsave(io_lock, flags);
1740			if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
1741				CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
1742			else
1743				CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
1744			spin_unlock_irqrestore(io_lock, flags);
1745			atomic64_inc(&term_stats->terminates);
1746			term_cnt++;
1747		}
1748	}
1749	if (term_cnt > atomic64_read(&term_stats->max_terminates))
1750		atomic64_set(&term_stats->max_terminates, term_cnt);
1751
1752}
1753
1754/*
1755 * This function is exported to SCSI for sending abort cmnds.
1756 * A SCSI IO is represented by a io_req in the driver.
1757 * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
1758 */
1759int fnic_abort_cmd(struct scsi_cmnd *sc)
1760{
1761	struct fc_lport *lp;
1762	struct fnic *fnic;
1763	struct fnic_io_req *io_req = NULL;
1764	struct fc_rport *rport;
1765	spinlock_t *io_lock;
1766	unsigned long flags;
1767	unsigned long start_time = 0;
1768	int ret = SUCCESS;
1769	u32 task_req = 0;
1770	struct scsi_lun fc_lun;
1771	struct fnic_stats *fnic_stats;
1772	struct abort_stats *abts_stats;
1773	struct terminate_stats *term_stats;
1774	enum fnic_ioreq_state old_ioreq_state;
1775	int tag;
1776	DECLARE_COMPLETION_ONSTACK(tm_done);
1777
1778	/* Wait for rport to unblock */
1779	fc_block_scsi_eh(sc);
1780
1781	/* Get local-port, check ready and link up */
1782	lp = shost_priv(sc->device->host);
1783
1784	fnic = lport_priv(lp);
1785	fnic_stats = &fnic->fnic_stats;
1786	abts_stats = &fnic->fnic_stats.abts_stats;
1787	term_stats = &fnic->fnic_stats.term_stats;
1788
1789	rport = starget_to_rport(scsi_target(sc->device));
1790	tag = sc->request->tag;
1791	FNIC_SCSI_DBG(KERN_DEBUG,
1792		fnic->lport->host,
1793		"Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
1794		rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
1795
1796	CMD_FLAGS(sc) = FNIC_NO_FLAGS;
1797
1798	if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
1799		ret = FAILED;
1800		goto fnic_abort_cmd_end;
1801	}
1802
1803	/*
1804	 * Avoid a race between SCSI issuing the abort and the device
1805	 * completing the command.
1806	 *
1807	 * If the command is already completed by the fw cmpl code,
1808	 * we just return SUCCESS from here. This means that the abort
1809	 * succeeded. In the SCSI ML, since the timeout for command has
1810	 * happened, the completion wont actually complete the command
1811	 * and it will be considered as an aborted command
1812	 *
1813	 * The CMD_SP will not be cleared except while holding io_req_lock.
1814	 */
1815	io_lock = fnic_io_lock_hash(fnic, sc);
1816	spin_lock_irqsave(io_lock, flags);
1817	io_req = (struct fnic_io_req *)CMD_SP(sc);
1818	if (!io_req) {
1819		spin_unlock_irqrestore(io_lock, flags);
1820		goto fnic_abort_cmd_end;
1821	}
1822
1823	io_req->abts_done = &tm_done;
1824
1825	if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
1826		spin_unlock_irqrestore(io_lock, flags);
1827		goto wait_pending;
1828	}
1829	/*
1830	 * Command is still pending, need to abort it
1831	 * If the firmware completes the command after this point,
1832	 * the completion wont be done till mid-layer, since abort
1833	 * has already started.
1834	 */
1835	old_ioreq_state = CMD_STATE(sc);
1836	CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
1837	CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
1838
1839	spin_unlock_irqrestore(io_lock, flags);
1840
1841	/*
1842	 * Check readiness of the remote port. If the path to remote
1843	 * port is up, then send abts to the remote port to terminate
1844	 * the IO. Else, just locally terminate the IO in the firmware
1845	 */
1846	if (fc_remote_port_chkready(rport) == 0)
1847		task_req = FCPIO_ITMF_ABT_TASK;
1848	else {
1849		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
1850		task_req = FCPIO_ITMF_ABT_TASK_TERM;
1851	}
1852
1853	/* Now queue the abort command to firmware */
1854	int_to_scsilun(sc->device->lun, &fc_lun);
1855
1856	if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
1857				    fc_lun.scsi_lun, io_req)) {
1858		spin_lock_irqsave(io_lock, flags);
1859		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
1860			CMD_STATE(sc) = old_ioreq_state;
1861		io_req = (struct fnic_io_req *)CMD_SP(sc);
1862		if (io_req)
1863			io_req->abts_done = NULL;
1864		spin_unlock_irqrestore(io_lock, flags);
1865		ret = FAILED;
1866		goto fnic_abort_cmd_end;
1867	}
1868	if (task_req == FCPIO_ITMF_ABT_TASK) {
1869		CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
1870		atomic64_inc(&fnic_stats->abts_stats.aborts);
1871	} else {
1872		CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
1873		atomic64_inc(&fnic_stats->term_stats.terminates);
1874	}
1875
1876	/*
1877	 * We queued an abort IO, wait for its completion.
1878	 * Once the firmware completes the abort command, it will
1879	 * wake up this thread.
1880	 */
1881 wait_pending:
1882	wait_for_completion_timeout(&tm_done,
1883				    msecs_to_jiffies
1884				    (2 * fnic->config.ra_tov +
1885				     fnic->config.ed_tov));
1886
1887	/* Check the abort status */
1888	spin_lock_irqsave(io_lock, flags);
1889
1890	io_req = (struct fnic_io_req *)CMD_SP(sc);
1891	if (!io_req) {
1892		atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1893		spin_unlock_irqrestore(io_lock, flags);
1894		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
1895		ret = FAILED;
1896		goto fnic_abort_cmd_end;
1897	}
1898	io_req->abts_done = NULL;
1899
1900	/* fw did not complete abort, timed out */
1901	if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
1902		spin_unlock_irqrestore(io_lock, flags);
1903		if (task_req == FCPIO_ITMF_ABT_TASK) {
1904			atomic64_inc(&abts_stats->abort_drv_timeouts);
1905		} else {
1906			atomic64_inc(&term_stats->terminate_drv_timeouts);
1907		}
1908		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
1909		ret = FAILED;
1910		goto fnic_abort_cmd_end;
1911	}
1912
1913	/* IO out of order */
1914
1915	if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
1916		spin_unlock_irqrestore(io_lock, flags);
1917		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1918			"Issuing Host reset due to out of order IO\n");
1919
1920		if (fnic_host_reset(sc) == FAILED) {
1921			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1922				"fnic_host_reset failed.\n");
1923		}
1924		ret = FAILED;
1925		goto fnic_abort_cmd_end;
1926	}
1927
1928	CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
1929
1930	/*
1931	 * firmware completed the abort, check the status,
1932	 * free the io_req irrespective of failure or success
1933	 */
1934	if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
1935		ret = FAILED;
1936
1937	CMD_SP(sc) = NULL;
1938
1939	spin_unlock_irqrestore(io_lock, flags);
1940
1941	start_time = io_req->start_time;
1942	fnic_release_ioreq_buf(fnic, io_req, sc);
1943	mempool_free(io_req, fnic->io_req_pool);
1944
1945fnic_abort_cmd_end:
1946	FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
1947		  sc->request->tag, sc,
1948		  jiffies_to_msecs(jiffies - start_time),
1949		  0, ((u64)sc->cmnd[0] << 32 |
1950		  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1951		  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1952		  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1953
1954	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1955		      "Returning from abort cmd type %x %s\n", task_req,
1956		      (ret == SUCCESS) ?
1957		      "SUCCESS" : "FAILED");
1958	return ret;
1959}
1960
1961static inline int fnic_queue_dr_io_req(struct fnic *fnic,
1962				       struct scsi_cmnd *sc,
1963				       struct fnic_io_req *io_req)
1964{
1965	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
1966	struct Scsi_Host *host = fnic->lport->host;
1967	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1968	struct scsi_lun fc_lun;
1969	int ret = 0;
1970	unsigned long intr_flags;
1971
1972	spin_lock_irqsave(host->host_lock, intr_flags);
1973	if (unlikely(fnic_chk_state_flags_locked(fnic,
1974						FNIC_FLAGS_IO_BLOCKED))) {
1975		spin_unlock_irqrestore(host->host_lock, intr_flags);
1976		return FAILED;
1977	} else
1978		atomic_inc(&fnic->in_flight);
1979	spin_unlock_irqrestore(host->host_lock, intr_flags);
1980
1981	spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
1982
1983	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
1984		free_wq_copy_descs(fnic, wq);
1985
1986	if (!vnic_wq_copy_desc_avail(wq)) {
1987		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1988			  "queue_dr_io_req failure - no descriptors\n");
1989		atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
1990		ret = -EAGAIN;
1991		goto lr_io_req_end;
1992	}
1993
1994	/* fill in the lun info */
1995	int_to_scsilun(sc->device->lun, &fc_lun);
1996
1997	fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
1998				     0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
1999				     fc_lun.scsi_lun, io_req->port_id,
2000				     fnic->config.ra_tov, fnic->config.ed_tov);
2001
2002	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
2003	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
2004		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
2005		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
2006		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
2007
2008lr_io_req_end:
2009	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
2010	atomic_dec(&fnic->in_flight);
2011
2012	return ret;
2013}
2014
2015/*
2016 * Clean up any pending aborts on the lun
2017 * For each outstanding IO on this lun, whose abort is not completed by fw,
2018 * issue a local abort. Wait for abort to complete. Return 0 if all commands
2019 * successfully aborted, 1 otherwise
2020 */
2021static int fnic_clean_pending_aborts(struct fnic *fnic,
2022				     struct scsi_cmnd *lr_sc)
2023{
2024	int tag, abt_tag;
2025	struct fnic_io_req *io_req;
2026	spinlock_t *io_lock;
2027	unsigned long flags;
2028	int ret = 0;
2029	struct scsi_cmnd *sc;
2030	struct scsi_lun fc_lun;
2031	struct scsi_device *lun_dev = lr_sc->device;
2032	DECLARE_COMPLETION_ONSTACK(tm_done);
2033	enum fnic_ioreq_state old_ioreq_state;
2034
2035	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2036		io_lock = fnic_io_lock_tag(fnic, tag);
2037		spin_lock_irqsave(io_lock, flags);
2038		sc = scsi_host_find_tag(fnic->lport->host, tag);
2039		/*
2040		 * ignore this lun reset cmd or cmds that do not belong to
2041		 * this lun
2042		 */
2043		if (!sc || sc == lr_sc || sc->device != lun_dev) {
2044			spin_unlock_irqrestore(io_lock, flags);
2045			continue;
2046		}
2047
2048		io_req = (struct fnic_io_req *)CMD_SP(sc);
2049
2050		if (!io_req || sc->device != lun_dev) {
2051			spin_unlock_irqrestore(io_lock, flags);
2052			continue;
2053		}
2054
2055		/*
2056		 * Found IO that is still pending with firmware and
2057		 * belongs to the LUN that we are resetting
2058		 */
2059		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2060			      "Found IO in %s on lun\n",
2061			      fnic_ioreq_state_to_str(CMD_STATE(sc)));
2062
2063		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
2064			spin_unlock_irqrestore(io_lock, flags);
2065			continue;
2066		}
2067		if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
2068			(!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
2069			FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2070				"%s dev rst not pending sc 0x%p\n", __func__,
2071				sc);
2072			spin_unlock_irqrestore(io_lock, flags);
2073			continue;
2074		}
2075
2076		if (io_req->abts_done)
2077			shost_printk(KERN_ERR, fnic->lport->host,
2078			  "%s: io_req->abts_done is set state is %s\n",
2079			  __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
2080		old_ioreq_state = CMD_STATE(sc);
2081		/*
2082		 * Any pending IO issued prior to reset is expected to be
2083		 * in abts pending state, if not we need to set
2084		 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2085		 * When IO is completed, the IO will be handed over and
2086		 * handled in this function.
2087		 */
2088		CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2089
2090		BUG_ON(io_req->abts_done);
2091
2092		abt_tag = tag;
2093		if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
2094			abt_tag |= FNIC_TAG_DEV_RST;
2095			FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2096				  "%s: dev rst sc 0x%p\n", __func__, sc);
2097		}
2098
2099		CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
2100		io_req->abts_done = &tm_done;
2101		spin_unlock_irqrestore(io_lock, flags);
2102
2103		/* Now queue the abort command to firmware */
2104		int_to_scsilun(sc->device->lun, &fc_lun);
2105
2106		if (fnic_queue_abort_io_req(fnic, abt_tag,
2107					    FCPIO_ITMF_ABT_TASK_TERM,
2108					    fc_lun.scsi_lun, io_req)) {
2109			spin_lock_irqsave(io_lock, flags);
2110			io_req = (struct fnic_io_req *)CMD_SP(sc);
2111			if (io_req)
2112				io_req->abts_done = NULL;
2113			if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2114				CMD_STATE(sc) = old_ioreq_state;
2115			spin_unlock_irqrestore(io_lock, flags);
2116			ret = 1;
2117			goto clean_pending_aborts_end;
2118		} else {
2119			spin_lock_irqsave(io_lock, flags);
2120			if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
2121				CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2122			spin_unlock_irqrestore(io_lock, flags);
2123		}
2124		CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
2125
2126		wait_for_completion_timeout(&tm_done,
2127					    msecs_to_jiffies
2128					    (fnic->config.ed_tov));
2129
2130		/* Recheck cmd state to check if it is now aborted */
2131		spin_lock_irqsave(io_lock, flags);
2132		io_req = (struct fnic_io_req *)CMD_SP(sc);
2133		if (!io_req) {
2134			spin_unlock_irqrestore(io_lock, flags);
2135			CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
2136			continue;
2137		}
2138
2139		io_req->abts_done = NULL;
2140
2141		/* if abort is still pending with fw, fail */
2142		if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
2143			spin_unlock_irqrestore(io_lock, flags);
2144			CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
2145			ret = 1;
2146			goto clean_pending_aborts_end;
2147		}
2148		CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
2149		CMD_SP(sc) = NULL;
2150		spin_unlock_irqrestore(io_lock, flags);
2151
2152		fnic_release_ioreq_buf(fnic, io_req, sc);
2153		mempool_free(io_req, fnic->io_req_pool);
2154	}
2155
2156	schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2157
2158	/* walk again to check, if IOs are still pending in fw */
2159	if (fnic_is_abts_pending(fnic, lr_sc))
2160		ret = FAILED;
2161
2162clean_pending_aborts_end:
2163	return ret;
2164}
2165
2166/**
2167 * fnic_scsi_host_start_tag
2168 * Allocates tagid from host's tag list
2169 **/
2170static inline int
2171fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2172{
2173	struct blk_queue_tag *bqt = fnic->lport->host->bqt;
2174	int tag, ret = SCSI_NO_TAG;
2175
2176	BUG_ON(!bqt);
2177	if (!bqt) {
2178		pr_err("Tags are not supported\n");
2179		goto end;
2180	}
2181
2182	do {
2183		tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
2184		if (tag >= bqt->max_depth) {
2185			pr_err("Tag allocation failure\n");
2186			goto end;
2187		}
2188	} while (test_and_set_bit(tag, bqt->tag_map));
2189
2190	bqt->tag_index[tag] = sc->request;
2191	sc->request->tag = tag;
2192	sc->tag = tag;
2193	if (!sc->request->special)
2194		sc->request->special = sc;
2195
2196	ret = tag;
2197
2198end:
2199	return ret;
2200}
2201
2202/**
2203 * fnic_scsi_host_end_tag
2204 * frees tag allocated by fnic_scsi_host_start_tag.
2205 **/
2206static inline void
2207fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
2208{
2209	struct blk_queue_tag *bqt = fnic->lport->host->bqt;
2210	int tag = sc->request->tag;
2211
2212	if (tag == SCSI_NO_TAG)
2213		return;
2214
2215	BUG_ON(!bqt || !bqt->tag_index[tag]);
2216	if (!bqt)
2217		return;
2218
2219	bqt->tag_index[tag] = NULL;
2220	clear_bit(tag, bqt->tag_map);
2221
2222	return;
2223}
2224
2225/*
2226 * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2227 * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2228 * on the LUN.
2229 */
2230int fnic_device_reset(struct scsi_cmnd *sc)
2231{
2232	struct fc_lport *lp;
2233	struct fnic *fnic;
2234	struct fnic_io_req *io_req = NULL;
2235	struct fc_rport *rport;
2236	int status;
2237	int ret = FAILED;
2238	spinlock_t *io_lock;
2239	unsigned long flags;
2240	unsigned long start_time = 0;
2241	struct scsi_lun fc_lun;
2242	struct fnic_stats *fnic_stats;
2243	struct reset_stats *reset_stats;
2244	int tag = 0;
2245	DECLARE_COMPLETION_ONSTACK(tm_done);
2246	int tag_gen_flag = 0;   /*to track tags allocated by fnic driver*/
2247
2248	/* Wait for rport to unblock */
2249	fc_block_scsi_eh(sc);
2250
2251	/* Get local-port, check ready and link up */
2252	lp = shost_priv(sc->device->host);
2253
2254	fnic = lport_priv(lp);
2255	fnic_stats = &fnic->fnic_stats;
2256	reset_stats = &fnic->fnic_stats.reset_stats;
2257
2258	atomic64_inc(&reset_stats->device_resets);
2259
2260	rport = starget_to_rport(scsi_target(sc->device));
2261	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2262		      "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
2263		      rport->port_id, sc->device->lun, sc);
2264
2265	if (lp->state != LPORT_ST_READY || !(lp->link_up))
2266		goto fnic_device_reset_end;
2267
2268	/* Check if remote port up */
2269	if (fc_remote_port_chkready(rport)) {
2270		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
2271		goto fnic_device_reset_end;
2272	}
2273
2274	CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
2275	/* Allocate tag if not present */
2276
2277	tag = sc->request->tag;
2278	if (unlikely(tag < 0)) {
2279		/*
2280		 * XXX(hch): current the midlayer fakes up a struct
2281		 * request for the explicit reset ioctls, and those
2282		 * don't have a tag allocated to them.  The below
2283		 * code pokes into midlayer structures to paper over
2284		 * this design issue, but that won't work for blk-mq.
2285		 *
2286		 * Either someone who can actually test the hardware
2287		 * will have to come up with a similar hack for the
2288		 * blk-mq case, or we'll have to bite the bullet and
2289		 * fix the way the EH ioctls work for real, but until
2290		 * that happens we fail these explicit requests here.
2291		 */
2292		if (shost_use_blk_mq(sc->device->host))
2293			goto fnic_device_reset_end;
2294
2295		tag = fnic_scsi_host_start_tag(fnic, sc);
2296		if (unlikely(tag == SCSI_NO_TAG))
2297			goto fnic_device_reset_end;
2298		tag_gen_flag = 1;
2299	}
2300	io_lock = fnic_io_lock_hash(fnic, sc);
2301	spin_lock_irqsave(io_lock, flags);
2302	io_req = (struct fnic_io_req *)CMD_SP(sc);
2303
2304	/*
2305	 * If there is a io_req attached to this command, then use it,
2306	 * else allocate a new one.
2307	 */
2308	if (!io_req) {
2309		io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2310		if (!io_req) {
2311			spin_unlock_irqrestore(io_lock, flags);
2312			goto fnic_device_reset_end;
2313		}
2314		memset(io_req, 0, sizeof(*io_req));
2315		io_req->port_id = rport->port_id;
2316		CMD_SP(sc) = (char *)io_req;
2317	}
2318	io_req->dr_done = &tm_done;
2319	CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
2320	CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
2321	spin_unlock_irqrestore(io_lock, flags);
2322
2323	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
2324
2325	/*
2326	 * issue the device reset, if enqueue failed, clean up the ioreq
2327	 * and break assoc with scsi cmd
2328	 */
2329	if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2330		spin_lock_irqsave(io_lock, flags);
2331		io_req = (struct fnic_io_req *)CMD_SP(sc);
2332		if (io_req)
2333			io_req->dr_done = NULL;
2334		goto fnic_device_reset_clean;
2335	}
2336	spin_lock_irqsave(io_lock, flags);
2337	CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
2338	spin_unlock_irqrestore(io_lock, flags);
2339
2340	/*
2341	 * Wait on the local completion for LUN reset.  The io_req may be
2342	 * freed while we wait since we hold no lock.
2343	 */
2344	wait_for_completion_timeout(&tm_done,
2345				    msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2346
2347	spin_lock_irqsave(io_lock, flags);
2348	io_req = (struct fnic_io_req *)CMD_SP(sc);
2349	if (!io_req) {
2350		spin_unlock_irqrestore(io_lock, flags);
2351		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2352				"io_req is null tag 0x%x sc 0x%p\n", tag, sc);
2353		goto fnic_device_reset_end;
2354	}
2355	io_req->dr_done = NULL;
2356
2357	status = CMD_LR_STATUS(sc);
2358
2359	/*
2360	 * If lun reset not completed, bail out with failed. io_req
2361	 * gets cleaned up during higher levels of EH
2362	 */
2363	if (status == FCPIO_INVALID_CODE) {
2364		atomic64_inc(&reset_stats->device_reset_timeouts);
2365		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2366			      "Device reset timed out\n");
2367		CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
2368		spin_unlock_irqrestore(io_lock, flags);
2369		int_to_scsilun(sc->device->lun, &fc_lun);
2370		/*
2371		 * Issue abort and terminate on device reset request.
2372		 * If q'ing of terminate fails, retry it after a delay.
2373		 */
2374		while (1) {
2375			spin_lock_irqsave(io_lock, flags);
2376			if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
2377				spin_unlock_irqrestore(io_lock, flags);
2378				break;
2379			}
2380			spin_unlock_irqrestore(io_lock, flags);
2381			if (fnic_queue_abort_io_req(fnic,
2382				tag | FNIC_TAG_DEV_RST,
2383				FCPIO_ITMF_ABT_TASK_TERM,
2384				fc_lun.scsi_lun, io_req)) {
2385				wait_for_completion_timeout(&tm_done,
2386				msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
2387			} else {
2388				spin_lock_irqsave(io_lock, flags);
2389				CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
2390				CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
2391				io_req->abts_done = &tm_done;
2392				spin_unlock_irqrestore(io_lock, flags);
2393				FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2394				"Abort and terminate issued on Device reset "
2395				"tag 0x%x sc 0x%p\n", tag, sc);
2396				break;
2397			}
2398		}
2399		while (1) {
2400			spin_lock_irqsave(io_lock, flags);
2401			if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
2402				spin_unlock_irqrestore(io_lock, flags);
2403				wait_for_completion_timeout(&tm_done,
2404				msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2405				break;
2406			} else {
2407				io_req = (struct fnic_io_req *)CMD_SP(sc);
2408				io_req->abts_done = NULL;
2409				goto fnic_device_reset_clean;
2410			}
2411		}
2412	} else {
2413		spin_unlock_irqrestore(io_lock, flags);
2414	}
2415
2416	/* Completed, but not successful, clean up the io_req, return fail */
2417	if (status != FCPIO_SUCCESS) {
2418		spin_lock_irqsave(io_lock, flags);
2419		FNIC_SCSI_DBG(KERN_DEBUG,
2420			      fnic->lport->host,
2421			      "Device reset completed - failed\n");
2422		io_req = (struct fnic_io_req *)CMD_SP(sc);
2423		goto fnic_device_reset_clean;
2424	}
2425
2426	/*
2427	 * Clean up any aborts on this lun that have still not
2428	 * completed. If any of these fail, then LUN reset fails.
2429	 * clean_pending_aborts cleans all cmds on this lun except
2430	 * the lun reset cmd. If all cmds get cleaned, the lun reset
2431	 * succeeds
2432	 */
2433	if (fnic_clean_pending_aborts(fnic, sc)) {
2434		spin_lock_irqsave(io_lock, flags);
2435		io_req = (struct fnic_io_req *)CMD_SP(sc);
2436		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2437			      "Device reset failed"
2438			      " since could not abort all IOs\n");
2439		goto fnic_device_reset_clean;
2440	}
2441
2442	/* Clean lun reset command */
2443	spin_lock_irqsave(io_lock, flags);
2444	io_req = (struct fnic_io_req *)CMD_SP(sc);
2445	if (io_req)
2446		/* Completed, and successful */
2447		ret = SUCCESS;
2448
2449fnic_device_reset_clean:
2450	if (io_req)
2451		CMD_SP(sc) = NULL;
2452
2453	spin_unlock_irqrestore(io_lock, flags);
2454
2455	if (io_req) {
2456		start_time = io_req->start_time;
2457		fnic_release_ioreq_buf(fnic, io_req, sc);
2458		mempool_free(io_req, fnic->io_req_pool);
2459	}
2460
2461fnic_device_reset_end:
2462	FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
2463		  sc->request->tag, sc,
2464		  jiffies_to_msecs(jiffies - start_time),
2465		  0, ((u64)sc->cmnd[0] << 32 |
2466		  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2467		  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2468		  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
2469
2470	/* free tag if it is allocated */
2471	if (unlikely(tag_gen_flag))
2472		fnic_scsi_host_end_tag(fnic, sc);
2473
2474	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2475		      "Returning from device reset %s\n",
2476		      (ret == SUCCESS) ?
2477		      "SUCCESS" : "FAILED");
2478
2479	if (ret == FAILED)
2480		atomic64_inc(&reset_stats->device_reset_failures);
2481
2482	return ret;
2483}
2484
2485/* Clean up all IOs, clean up libFC local port */
2486int fnic_reset(struct Scsi_Host *shost)
2487{
2488	struct fc_lport *lp;
2489	struct fnic *fnic;
2490	int ret = 0;
2491	struct reset_stats *reset_stats;
2492
2493	lp = shost_priv(shost);
2494	fnic = lport_priv(lp);
2495	reset_stats = &fnic->fnic_stats.reset_stats;
2496
2497	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2498		      "fnic_reset called\n");
2499
2500	atomic64_inc(&reset_stats->fnic_resets);
2501
2502	/*
2503	 * Reset local port, this will clean up libFC exchanges,
2504	 * reset remote port sessions, and if link is up, begin flogi
2505	 */
2506	ret = lp->tt.lport_reset(lp);
2507
2508	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2509		      "Returning from fnic reset %s\n",
2510		      (ret == 0) ?
2511		      "SUCCESS" : "FAILED");
2512
2513	if (ret == 0)
2514		atomic64_inc(&reset_stats->fnic_reset_completions);
2515	else
2516		atomic64_inc(&reset_stats->fnic_reset_failures);
2517
2518	return ret;
2519}
2520
2521/*
2522 * SCSI Error handling calls driver's eh_host_reset if all prior
2523 * error handling levels return FAILED. If host reset completes
2524 * successfully, and if link is up, then Fabric login begins.
2525 *
2526 * Host Reset is the highest level of error recovery. If this fails, then
2527 * host is offlined by SCSI.
2528 *
2529 */
2530int fnic_host_reset(struct scsi_cmnd *sc)
2531{
2532	int ret;
2533	unsigned long wait_host_tmo;
2534	struct Scsi_Host *shost = sc->device->host;
2535	struct fc_lport *lp = shost_priv(shost);
2536
2537	/*
2538	 * If fnic_reset is successful, wait for fabric login to complete
2539	 * scsi-ml tries to send a TUR to every device if host reset is
2540	 * successful, so before returning to scsi, fabric should be up
2541	 */
2542	ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
2543	if (ret == SUCCESS) {
2544		wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2545		ret = FAILED;
2546		while (time_before(jiffies, wait_host_tmo)) {
2547			if ((lp->state == LPORT_ST_READY) &&
2548			    (lp->link_up)) {
2549				ret = SUCCESS;
2550				break;
2551			}
2552			ssleep(1);
2553		}
2554	}
2555
2556	return ret;
2557}
2558
2559/*
2560 * This fxn is called from libFC when host is removed
2561 */
2562void fnic_scsi_abort_io(struct fc_lport *lp)
2563{
2564	int err = 0;
2565	unsigned long flags;
2566	enum fnic_state old_state;
2567	struct fnic *fnic = lport_priv(lp);
2568	DECLARE_COMPLETION_ONSTACK(remove_wait);
2569
2570	/* Issue firmware reset for fnic, wait for reset to complete */
2571retry_fw_reset:
2572	spin_lock_irqsave(&fnic->fnic_lock, flags);
2573	if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2574		/* fw reset is in progress, poll for its completion */
2575		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2576		schedule_timeout(msecs_to_jiffies(100));
2577		goto retry_fw_reset;
2578	}
2579
2580	fnic->remove_wait = &remove_wait;
2581	old_state = fnic->state;
2582	fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2583	fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2584	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2585
2586	err = fnic_fw_reset_handler(fnic);
2587	if (err) {
2588		spin_lock_irqsave(&fnic->fnic_lock, flags);
2589		if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2590			fnic->state = old_state;
2591		fnic->remove_wait = NULL;
2592		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2593		return;
2594	}
2595
2596	/* Wait for firmware reset to complete */
2597	wait_for_completion_timeout(&remove_wait,
2598				    msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
2599
2600	spin_lock_irqsave(&fnic->fnic_lock, flags);
2601	fnic->remove_wait = NULL;
2602	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2603		      "fnic_scsi_abort_io %s\n",
2604		      (fnic->state == FNIC_IN_ETH_MODE) ?
2605		      "SUCCESS" : "FAILED");
2606	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2607
2608}
2609
2610/*
2611 * This fxn called from libFC to clean up driver IO state on link down
2612 */
2613void fnic_scsi_cleanup(struct fc_lport *lp)
2614{
2615	unsigned long flags;
2616	enum fnic_state old_state;
2617	struct fnic *fnic = lport_priv(lp);
2618
2619	/* issue fw reset */
2620retry_fw_reset:
2621	spin_lock_irqsave(&fnic->fnic_lock, flags);
2622	if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
2623		/* fw reset is in progress, poll for its completion */
2624		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2625		schedule_timeout(msecs_to_jiffies(100));
2626		goto retry_fw_reset;
2627	}
2628	old_state = fnic->state;
2629	fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
2630	fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
2631	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2632
2633	if (fnic_fw_reset_handler(fnic)) {
2634		spin_lock_irqsave(&fnic->fnic_lock, flags);
2635		if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
2636			fnic->state = old_state;
2637		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2638	}
2639
2640}
2641
2642void fnic_empty_scsi_cleanup(struct fc_lport *lp)
2643{
2644}
2645
2646void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
2647{
2648	struct fnic *fnic = lport_priv(lp);
2649
2650	/* Non-zero sid, nothing to do */
2651	if (sid)
2652		goto call_fc_exch_mgr_reset;
2653
2654	if (did) {
2655		fnic_rport_exch_reset(fnic, did);
2656		goto call_fc_exch_mgr_reset;
2657	}
2658
2659	/*
2660	 * sid = 0, did = 0
2661	 * link down or device being removed
2662	 */
2663	if (!fnic->in_remove)
2664		fnic_scsi_cleanup(lp);
2665	else
2666		fnic_scsi_abort_io(lp);
2667
2668	/* call libFC exch mgr reset to reset its exchanges */
2669call_fc_exch_mgr_reset:
2670	fc_exch_mgr_reset(lp, sid, did);
2671
2672}
2673
2674/*
2675 * fnic_is_abts_pending() is a helper function that
2676 * walks through tag map to check if there is any IOs pending,if there is one,
2677 * then it returns 1 (true), otherwise 0 (false)
2678 * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2679 * otherwise, it checks for all IOs.
2680 */
2681int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
2682{
2683	int tag;
2684	struct fnic_io_req *io_req;
2685	spinlock_t *io_lock;
2686	unsigned long flags;
2687	int ret = 0;
2688	struct scsi_cmnd *sc;
2689	struct scsi_device *lun_dev = NULL;
2690
2691	if (lr_sc)
2692		lun_dev = lr_sc->device;
2693
2694	/* walk again to check, if IOs are still pending in fw */
2695	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
2696		sc = scsi_host_find_tag(fnic->lport->host, tag);
2697		/*
2698		 * ignore this lun reset cmd or cmds that do not belong to
2699		 * this lun
2700		 */
2701		if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
2702			continue;
2703
2704		io_lock = fnic_io_lock_hash(fnic, sc);
2705		spin_lock_irqsave(io_lock, flags);
2706
2707		io_req = (struct fnic_io_req *)CMD_SP(sc);
2708
2709		if (!io_req || sc->device != lun_dev) {
2710			spin_unlock_irqrestore(io_lock, flags);
2711			continue;
2712		}
2713
2714		/*
2715		 * Found IO that is still pending with firmware and
2716		 * belongs to the LUN that we are resetting
2717		 */
2718		FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
2719			      "Found IO in %s on lun\n",
2720			      fnic_ioreq_state_to_str(CMD_STATE(sc)));
2721
2722		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
2723			ret = 1;
2724		spin_unlock_irqrestore(io_lock, flags);
2725	}
2726
2727	return ret;
2728}
2729