1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2009-2015 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 *                                                                 *
8 * This program is free software; you can redistribute it and/or   *
9 * modify it under the terms of version 2 of the GNU General       *
10 * Public License as published by the Free Software Foundation.    *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
17 * more details, a copy of which can be found in the file COPYING  *
18 * included with this package.                                     *
19 *******************************************************************/
20
21#include <linux/interrupt.h>
22#include <linux/mempool.h>
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
26#include <linux/list.h>
27
28#include <scsi/scsi.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31#include <scsi/scsi_bsg_fc.h>
32#include <scsi/fc/fc_fs.h>
33
34#include "lpfc_hw4.h"
35#include "lpfc_hw.h"
36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
38#include "lpfc_nl.h"
39#include "lpfc_bsg.h"
40#include "lpfc_disc.h"
41#include "lpfc_scsi.h"
42#include "lpfc.h"
43#include "lpfc_logmsg.h"
44#include "lpfc_crtn.h"
45#include "lpfc_debugfs.h"
46#include "lpfc_vport.h"
47#include "lpfc_version.h"
48
49struct lpfc_bsg_event {
50	struct list_head node;
51	struct kref kref;
52	wait_queue_head_t wq;
53
54	/* Event type and waiter identifiers */
55	uint32_t type_mask;
56	uint32_t req_id;
57	uint32_t reg_id;
58
59	/* next two flags are here for the auto-delete logic */
60	unsigned long wait_time_stamp;
61	int waiting;
62
63	/* seen and not seen events */
64	struct list_head events_to_get;
65	struct list_head events_to_see;
66
67	/* driver data associated with the job */
68	void *dd_data;
69};
70
71struct lpfc_bsg_iocb {
72	struct lpfc_iocbq *cmdiocbq;
73	struct lpfc_dmabuf *rmp;
74	struct lpfc_nodelist *ndlp;
75};
76
77struct lpfc_bsg_mbox {
78	LPFC_MBOXQ_t *pmboxq;
79	MAILBOX_t *mb;
80	struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
81	uint8_t *ext; /* extended mailbox data */
82	uint32_t mbOffset; /* from app */
83	uint32_t inExtWLen; /* from app */
84	uint32_t outExtWLen; /* from app */
85};
86
87#define MENLO_DID 0x0000FC0E
88
89struct lpfc_bsg_menlo {
90	struct lpfc_iocbq *cmdiocbq;
91	struct lpfc_dmabuf *rmp;
92};
93
94#define TYPE_EVT 	1
95#define TYPE_IOCB	2
96#define TYPE_MBOX	3
97#define TYPE_MENLO	4
98struct bsg_job_data {
99	uint32_t type;
100	struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */
101	union {
102		struct lpfc_bsg_event *evt;
103		struct lpfc_bsg_iocb iocb;
104		struct lpfc_bsg_mbox mbox;
105		struct lpfc_bsg_menlo menlo;
106	} context_un;
107};
108
109struct event_data {
110	struct list_head node;
111	uint32_t type;
112	uint32_t immed_dat;
113	void *data;
114	uint32_t len;
115};
116
117#define BUF_SZ_4K 4096
118#define SLI_CT_ELX_LOOPBACK 0x10
119
120enum ELX_LOOPBACK_CMD {
121	ELX_LOOPBACK_XRI_SETUP,
122	ELX_LOOPBACK_DATA,
123};
124
125#define ELX_LOOPBACK_HEADER_SZ \
126	(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
127
128struct lpfc_dmabufext {
129	struct lpfc_dmabuf dma;
130	uint32_t size;
131	uint32_t flag;
132};
133
134static void
135lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
136{
137	struct lpfc_dmabuf *mlast, *next_mlast;
138
139	if (mlist) {
140		list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
141					 list) {
142			lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
143			list_del(&mlast->list);
144			kfree(mlast);
145		}
146		lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
147		kfree(mlist);
148	}
149	return;
150}
151
152static struct lpfc_dmabuf *
153lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
154		       int outbound_buffers, struct ulp_bde64 *bpl,
155		       int *bpl_entries)
156{
157	struct lpfc_dmabuf *mlist = NULL;
158	struct lpfc_dmabuf *mp;
159	unsigned int bytes_left = size;
160
161	/* Verify we can support the size specified */
162	if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
163		return NULL;
164
165	/* Determine the number of dma buffers to allocate */
166	*bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
167			size/LPFC_BPL_SIZE);
168
169	/* Allocate dma buffer and place in BPL passed */
170	while (bytes_left) {
171		/* Allocate dma buffer  */
172		mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
173		if (!mp) {
174			if (mlist)
175				lpfc_free_bsg_buffers(phba, mlist);
176			return NULL;
177		}
178
179		INIT_LIST_HEAD(&mp->list);
180		mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
181
182		if (!mp->virt) {
183			kfree(mp);
184			if (mlist)
185				lpfc_free_bsg_buffers(phba, mlist);
186			return NULL;
187		}
188
189		/* Queue it to a linked list */
190		if (!mlist)
191			mlist = mp;
192		else
193			list_add_tail(&mp->list, &mlist->list);
194
195		/* Add buffer to buffer pointer list */
196		if (outbound_buffers)
197			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
198		else
199			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
200		bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
201		bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
202		bpl->tus.f.bdeSize = (uint16_t)
203			(bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
204			 bytes_left);
205		bytes_left -= bpl->tus.f.bdeSize;
206		bpl->tus.w = le32_to_cpu(bpl->tus.w);
207		bpl++;
208	}
209	return mlist;
210}
211
212static unsigned int
213lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
214		   struct fc_bsg_buffer *bsg_buffers,
215		   unsigned int bytes_to_transfer, int to_buffers)
216{
217
218	struct lpfc_dmabuf *mp;
219	unsigned int transfer_bytes, bytes_copied = 0;
220	unsigned int sg_offset, dma_offset;
221	unsigned char *dma_address, *sg_address;
222	LIST_HEAD(temp_list);
223	struct sg_mapping_iter miter;
224	unsigned long flags;
225	unsigned int sg_flags = SG_MITER_ATOMIC;
226	bool sg_valid;
227
228	list_splice_init(&dma_buffers->list, &temp_list);
229	list_add(&dma_buffers->list, &temp_list);
230	sg_offset = 0;
231	if (to_buffers)
232		sg_flags |= SG_MITER_FROM_SG;
233	else
234		sg_flags |= SG_MITER_TO_SG;
235	sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
236		       sg_flags);
237	local_irq_save(flags);
238	sg_valid = sg_miter_next(&miter);
239	list_for_each_entry(mp, &temp_list, list) {
240		dma_offset = 0;
241		while (bytes_to_transfer && sg_valid &&
242		       (dma_offset < LPFC_BPL_SIZE)) {
243			dma_address = mp->virt + dma_offset;
244			if (sg_offset) {
245				/* Continue previous partial transfer of sg */
246				sg_address = miter.addr + sg_offset;
247				transfer_bytes = miter.length - sg_offset;
248			} else {
249				sg_address = miter.addr;
250				transfer_bytes = miter.length;
251			}
252			if (bytes_to_transfer < transfer_bytes)
253				transfer_bytes = bytes_to_transfer;
254			if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
255				transfer_bytes = LPFC_BPL_SIZE - dma_offset;
256			if (to_buffers)
257				memcpy(dma_address, sg_address, transfer_bytes);
258			else
259				memcpy(sg_address, dma_address, transfer_bytes);
260			dma_offset += transfer_bytes;
261			sg_offset += transfer_bytes;
262			bytes_to_transfer -= transfer_bytes;
263			bytes_copied += transfer_bytes;
264			if (sg_offset >= miter.length) {
265				sg_offset = 0;
266				sg_valid = sg_miter_next(&miter);
267			}
268		}
269	}
270	sg_miter_stop(&miter);
271	local_irq_restore(flags);
272	list_del_init(&dma_buffers->list);
273	list_splice(&temp_list, &dma_buffers->list);
274	return bytes_copied;
275}
276
277/**
278 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
279 * @phba: Pointer to HBA context object.
280 * @cmdiocbq: Pointer to command iocb.
281 * @rspiocbq: Pointer to response iocb.
282 *
283 * This function is the completion handler for iocbs issued using
284 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
285 * ring event handler function without any lock held. This function
286 * can be called from both worker thread context and interrupt
287 * context. This function also can be called from another thread which
288 * cleans up the SLI layer objects.
289 * This function copies the contents of the response iocb to the
290 * response iocb memory object provided by the caller of
291 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
292 * sleeps for the iocb completion.
293 **/
294static void
295lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
296			struct lpfc_iocbq *cmdiocbq,
297			struct lpfc_iocbq *rspiocbq)
298{
299	struct bsg_job_data *dd_data;
300	struct fc_bsg_job *job;
301	IOCB_t *rsp;
302	struct lpfc_dmabuf *bmp, *cmp, *rmp;
303	struct lpfc_nodelist *ndlp;
304	struct lpfc_bsg_iocb *iocb;
305	unsigned long flags;
306	unsigned int rsp_size;
307	int rc = 0;
308
309	dd_data = cmdiocbq->context1;
310
311	/* Determine if job has been aborted */
312	spin_lock_irqsave(&phba->ct_ev_lock, flags);
313	job = dd_data->set_job;
314	if (job) {
315		/* Prevent timeout handling from trying to abort job */
316		job->dd_data = NULL;
317	}
318	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
319
320	/* Close the timeout handler abort window */
321	spin_lock_irqsave(&phba->hbalock, flags);
322	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
323	spin_unlock_irqrestore(&phba->hbalock, flags);
324
325	iocb = &dd_data->context_un.iocb;
326	ndlp = iocb->ndlp;
327	rmp = iocb->rmp;
328	cmp = cmdiocbq->context2;
329	bmp = cmdiocbq->context3;
330	rsp = &rspiocbq->iocb;
331
332	/* Copy the completed data or set the error status */
333
334	if (job) {
335		if (rsp->ulpStatus) {
336			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
337				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
338				case IOERR_SEQUENCE_TIMEOUT:
339					rc = -ETIMEDOUT;
340					break;
341				case IOERR_INVALID_RPI:
342					rc = -EFAULT;
343					break;
344				default:
345					rc = -EACCES;
346					break;
347				}
348			} else {
349				rc = -EACCES;
350			}
351		} else {
352			rsp_size = rsp->un.genreq64.bdl.bdeSize;
353			job->reply->reply_payload_rcv_len =
354				lpfc_bsg_copy_data(rmp, &job->reply_payload,
355						   rsp_size, 0);
356		}
357	}
358
359	lpfc_free_bsg_buffers(phba, cmp);
360	lpfc_free_bsg_buffers(phba, rmp);
361	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
362	kfree(bmp);
363	lpfc_sli_release_iocbq(phba, cmdiocbq);
364	lpfc_nlp_put(ndlp);
365	kfree(dd_data);
366
367	/* Complete the job if the job is still active */
368
369	if (job) {
370		job->reply->result = rc;
371		job->job_done(job);
372	}
373	return;
374}
375
376/**
377 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
378 * @job: fc_bsg_job to handle
379 **/
380static int
381lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
382{
383	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
384	struct lpfc_hba *phba = vport->phba;
385	struct lpfc_rport_data *rdata = job->rport->dd_data;
386	struct lpfc_nodelist *ndlp = rdata->pnode;
387	struct ulp_bde64 *bpl = NULL;
388	uint32_t timeout;
389	struct lpfc_iocbq *cmdiocbq = NULL;
390	IOCB_t *cmd;
391	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
392	int request_nseg;
393	int reply_nseg;
394	struct bsg_job_data *dd_data;
395	unsigned long flags;
396	uint32_t creg_val;
397	int rc = 0;
398	int iocb_stat;
399
400	/* in case no data is transferred */
401	job->reply->reply_payload_rcv_len = 0;
402
403	/* allocate our bsg tracking structure */
404	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
405	if (!dd_data) {
406		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
407				"2733 Failed allocation of dd_data\n");
408		rc = -ENOMEM;
409		goto no_dd_data;
410	}
411
412	if (!lpfc_nlp_get(ndlp)) {
413		rc = -ENODEV;
414		goto no_ndlp;
415	}
416
417	if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
418		rc = -ENODEV;
419		goto free_ndlp;
420	}
421
422	cmdiocbq = lpfc_sli_get_iocbq(phba);
423	if (!cmdiocbq) {
424		rc = -ENOMEM;
425		goto free_ndlp;
426	}
427
428	cmd = &cmdiocbq->iocb;
429
430	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
431	if (!bmp) {
432		rc = -ENOMEM;
433		goto free_cmdiocbq;
434	}
435	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
436	if (!bmp->virt) {
437		rc = -ENOMEM;
438		goto free_bmp;
439	}
440
441	INIT_LIST_HEAD(&bmp->list);
442
443	bpl = (struct ulp_bde64 *) bmp->virt;
444	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
445	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
446				     1, bpl, &request_nseg);
447	if (!cmp) {
448		rc = -ENOMEM;
449		goto free_bmp;
450	}
451	lpfc_bsg_copy_data(cmp, &job->request_payload,
452			   job->request_payload.payload_len, 1);
453
454	bpl += request_nseg;
455	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
456	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
457				     bpl, &reply_nseg);
458	if (!rmp) {
459		rc = -ENOMEM;
460		goto free_cmp;
461	}
462
463	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
464	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
465	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
466	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
467	cmd->un.genreq64.bdl.bdeSize =
468		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
469	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
470	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
471	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
472	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
473	cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
474	cmd->ulpBdeCount = 1;
475	cmd->ulpLe = 1;
476	cmd->ulpClass = CLASS3;
477	cmd->ulpContext = ndlp->nlp_rpi;
478	if (phba->sli_rev == LPFC_SLI_REV4)
479		cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
480	cmd->ulpOwner = OWN_CHIP;
481	cmdiocbq->vport = phba->pport;
482	cmdiocbq->context3 = bmp;
483	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
484	timeout = phba->fc_ratov * 2;
485	cmd->ulpTimeout = timeout;
486
487	cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
488	cmdiocbq->context1 = dd_data;
489	cmdiocbq->context2 = cmp;
490	cmdiocbq->context3 = bmp;
491	cmdiocbq->context_un.ndlp = ndlp;
492	dd_data->type = TYPE_IOCB;
493	dd_data->set_job = job;
494	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
495	dd_data->context_un.iocb.ndlp = ndlp;
496	dd_data->context_un.iocb.rmp = rmp;
497	job->dd_data = dd_data;
498
499	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
500		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
501			rc = -EIO ;
502			goto free_rmp;
503		}
504		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
505		writel(creg_val, phba->HCregaddr);
506		readl(phba->HCregaddr); /* flush */
507	}
508
509	iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
510
511	if (iocb_stat == IOCB_SUCCESS) {
512		spin_lock_irqsave(&phba->hbalock, flags);
513		/* make sure the I/O had not been completed yet */
514		if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
515			/* open up abort window to timeout handler */
516			cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
517		}
518		spin_unlock_irqrestore(&phba->hbalock, flags);
519		return 0; /* done for now */
520	} else if (iocb_stat == IOCB_BUSY) {
521		rc = -EAGAIN;
522	} else {
523		rc = -EIO;
524	}
525
526	/* iocb failed so cleanup */
527	job->dd_data = NULL;
528
529free_rmp:
530	lpfc_free_bsg_buffers(phba, rmp);
531free_cmp:
532	lpfc_free_bsg_buffers(phba, cmp);
533free_bmp:
534	if (bmp->virt)
535		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
536	kfree(bmp);
537free_cmdiocbq:
538	lpfc_sli_release_iocbq(phba, cmdiocbq);
539free_ndlp:
540	lpfc_nlp_put(ndlp);
541no_ndlp:
542	kfree(dd_data);
543no_dd_data:
544	/* make error code available to userspace */
545	job->reply->result = rc;
546	job->dd_data = NULL;
547	return rc;
548}
549
550/**
551 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
552 * @phba: Pointer to HBA context object.
553 * @cmdiocbq: Pointer to command iocb.
554 * @rspiocbq: Pointer to response iocb.
555 *
556 * This function is the completion handler for iocbs issued using
557 * lpfc_bsg_rport_els_cmp function. This function is called by the
558 * ring event handler function without any lock held. This function
559 * can be called from both worker thread context and interrupt
560 * context. This function also can be called from other thread which
561 * cleans up the SLI layer objects.
562 * This function copies the contents of the response iocb to the
563 * response iocb memory object provided by the caller of
564 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
565 * sleeps for the iocb completion.
566 **/
567static void
568lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
569			struct lpfc_iocbq *cmdiocbq,
570			struct lpfc_iocbq *rspiocbq)
571{
572	struct bsg_job_data *dd_data;
573	struct fc_bsg_job *job;
574	IOCB_t *rsp;
575	struct lpfc_nodelist *ndlp;
576	struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
577	struct fc_bsg_ctels_reply *els_reply;
578	uint8_t *rjt_data;
579	unsigned long flags;
580	unsigned int rsp_size;
581	int rc = 0;
582
583	dd_data = cmdiocbq->context1;
584	ndlp = dd_data->context_un.iocb.ndlp;
585	cmdiocbq->context1 = ndlp;
586
587	/* Determine if job has been aborted */
588	spin_lock_irqsave(&phba->ct_ev_lock, flags);
589	job = dd_data->set_job;
590	if (job) {
591		/* Prevent timeout handling from trying to abort job  */
592		job->dd_data = NULL;
593	}
594	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
595
596	/* Close the timeout handler abort window */
597	spin_lock_irqsave(&phba->hbalock, flags);
598	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
599	spin_unlock_irqrestore(&phba->hbalock, flags);
600
601	rsp = &rspiocbq->iocb;
602	pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
603	prsp = (struct lpfc_dmabuf *)pcmd->list.next;
604
605	/* Copy the completed job data or determine the job status if job is
606	 * still active
607	 */
608
609	if (job) {
610		if (rsp->ulpStatus == IOSTAT_SUCCESS) {
611			rsp_size = rsp->un.elsreq64.bdl.bdeSize;
612			job->reply->reply_payload_rcv_len =
613				sg_copy_from_buffer(job->reply_payload.sg_list,
614						    job->reply_payload.sg_cnt,
615						    prsp->virt,
616						    rsp_size);
617		} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
618			job->reply->reply_payload_rcv_len =
619				sizeof(struct fc_bsg_ctels_reply);
620			/* LS_RJT data returned in word 4 */
621			rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
622			els_reply = &job->reply->reply_data.ctels_reply;
623			els_reply->status = FC_CTELS_STATUS_REJECT;
624			els_reply->rjt_data.action = rjt_data[3];
625			els_reply->rjt_data.reason_code = rjt_data[2];
626			els_reply->rjt_data.reason_explanation = rjt_data[1];
627			els_reply->rjt_data.vendor_unique = rjt_data[0];
628		} else {
629			rc = -EIO;
630		}
631	}
632
633	lpfc_nlp_put(ndlp);
634	lpfc_els_free_iocb(phba, cmdiocbq);
635	kfree(dd_data);
636
637	/* Complete the job if the job is still active */
638
639	if (job) {
640		job->reply->result = rc;
641		job->job_done(job);
642	}
643	return;
644}
645
646/**
647 * lpfc_bsg_rport_els - send an ELS command from a bsg request
648 * @job: fc_bsg_job to handle
649 **/
650static int
651lpfc_bsg_rport_els(struct fc_bsg_job *job)
652{
653	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
654	struct lpfc_hba *phba = vport->phba;
655	struct lpfc_rport_data *rdata = job->rport->dd_data;
656	struct lpfc_nodelist *ndlp = rdata->pnode;
657	uint32_t elscmd;
658	uint32_t cmdsize;
659	struct lpfc_iocbq *cmdiocbq;
660	uint16_t rpi = 0;
661	struct bsg_job_data *dd_data;
662	unsigned long flags;
663	uint32_t creg_val;
664	int rc = 0;
665
666	/* in case no data is transferred */
667	job->reply->reply_payload_rcv_len = 0;
668
669	/* verify the els command is not greater than the
670	 * maximum ELS transfer size.
671	 */
672
673	if (job->request_payload.payload_len > FCELSSIZE) {
674		rc = -EINVAL;
675		goto no_dd_data;
676	}
677
678	/* allocate our bsg tracking structure */
679	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
680	if (!dd_data) {
681		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
682				"2735 Failed allocation of dd_data\n");
683		rc = -ENOMEM;
684		goto no_dd_data;
685	}
686
687	elscmd = job->request->rqst_data.r_els.els_code;
688	cmdsize = job->request_payload.payload_len;
689
690	if (!lpfc_nlp_get(ndlp)) {
691		rc = -ENODEV;
692		goto free_dd_data;
693	}
694
695	/* We will use the allocated dma buffers by prep els iocb for command
696	 * and response to ensure if the job times out and the request is freed,
697	 * we won't be dma into memory that is no longer allocated to for the
698	 * request.
699	 */
700
701	cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
702				      ndlp->nlp_DID, elscmd);
703	if (!cmdiocbq) {
704		rc = -EIO;
705		goto release_ndlp;
706	}
707
708	rpi = ndlp->nlp_rpi;
709
710	/* Transfer the request payload to allocated command dma buffer */
711
712	sg_copy_to_buffer(job->request_payload.sg_list,
713			  job->request_payload.sg_cnt,
714			  ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
715			  cmdsize);
716
717	if (phba->sli_rev == LPFC_SLI_REV4)
718		cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
719	else
720		cmdiocbq->iocb.ulpContext = rpi;
721	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
722	cmdiocbq->context1 = dd_data;
723	cmdiocbq->context_un.ndlp = ndlp;
724	cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
725	dd_data->type = TYPE_IOCB;
726	dd_data->set_job = job;
727	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
728	dd_data->context_un.iocb.ndlp = ndlp;
729	dd_data->context_un.iocb.rmp = NULL;
730	job->dd_data = dd_data;
731
732	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
733		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
734			rc = -EIO;
735			goto linkdown_err;
736		}
737		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
738		writel(creg_val, phba->HCregaddr);
739		readl(phba->HCregaddr); /* flush */
740	}
741
742	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
743
744	if (rc == IOCB_SUCCESS) {
745		spin_lock_irqsave(&phba->hbalock, flags);
746		/* make sure the I/O had not been completed/released */
747		if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
748			/* open up abort window to timeout handler */
749			cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
750		}
751		spin_unlock_irqrestore(&phba->hbalock, flags);
752		return 0; /* done for now */
753	} else if (rc == IOCB_BUSY) {
754		rc = -EAGAIN;
755	} else {
756		rc = -EIO;
757	}
758
759	/* iocb failed so cleanup */
760	job->dd_data = NULL;
761
762linkdown_err:
763	cmdiocbq->context1 = ndlp;
764	lpfc_els_free_iocb(phba, cmdiocbq);
765
766release_ndlp:
767	lpfc_nlp_put(ndlp);
768
769free_dd_data:
770	kfree(dd_data);
771
772no_dd_data:
773	/* make error code available to userspace */
774	job->reply->result = rc;
775	job->dd_data = NULL;
776	return rc;
777}
778
779/**
780 * lpfc_bsg_event_free - frees an allocated event structure
781 * @kref: Pointer to a kref.
782 *
783 * Called from kref_put. Back cast the kref into an event structure address.
784 * Free any events to get, delete associated nodes, free any events to see,
785 * free any data then free the event itself.
786 **/
787static void
788lpfc_bsg_event_free(struct kref *kref)
789{
790	struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
791						  kref);
792	struct event_data *ed;
793
794	list_del(&evt->node);
795
796	while (!list_empty(&evt->events_to_get)) {
797		ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
798		list_del(&ed->node);
799		kfree(ed->data);
800		kfree(ed);
801	}
802
803	while (!list_empty(&evt->events_to_see)) {
804		ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
805		list_del(&ed->node);
806		kfree(ed->data);
807		kfree(ed);
808	}
809
810	kfree(evt->dd_data);
811	kfree(evt);
812}
813
814/**
815 * lpfc_bsg_event_ref - increments the kref for an event
816 * @evt: Pointer to an event structure.
817 **/
818static inline void
819lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
820{
821	kref_get(&evt->kref);
822}
823
824/**
825 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
826 * @evt: Pointer to an event structure.
827 **/
828static inline void
829lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
830{
831	kref_put(&evt->kref, lpfc_bsg_event_free);
832}
833
834/**
835 * lpfc_bsg_event_new - allocate and initialize a event structure
836 * @ev_mask: Mask of events.
837 * @ev_reg_id: Event reg id.
838 * @ev_req_id: Event request id.
839 **/
840static struct lpfc_bsg_event *
841lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
842{
843	struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
844
845	if (!evt)
846		return NULL;
847
848	INIT_LIST_HEAD(&evt->events_to_get);
849	INIT_LIST_HEAD(&evt->events_to_see);
850	evt->type_mask = ev_mask;
851	evt->req_id = ev_req_id;
852	evt->reg_id = ev_reg_id;
853	evt->wait_time_stamp = jiffies;
854	evt->dd_data = NULL;
855	init_waitqueue_head(&evt->wq);
856	kref_init(&evt->kref);
857	return evt;
858}
859
860/**
861 * diag_cmd_data_free - Frees an lpfc dma buffer extension
862 * @phba: Pointer to HBA context object.
863 * @mlist: Pointer to an lpfc dma buffer extension.
864 **/
865static int
866diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
867{
868	struct lpfc_dmabufext *mlast;
869	struct pci_dev *pcidev;
870	struct list_head head, *curr, *next;
871
872	if ((!mlist) || (!lpfc_is_link_up(phba) &&
873		(phba->link_flag & LS_LOOPBACK_MODE))) {
874		return 0;
875	}
876
877	pcidev = phba->pcidev;
878	list_add_tail(&head, &mlist->dma.list);
879
880	list_for_each_safe(curr, next, &head) {
881		mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
882		if (mlast->dma.virt)
883			dma_free_coherent(&pcidev->dev,
884					  mlast->size,
885					  mlast->dma.virt,
886					  mlast->dma.phys);
887		kfree(mlast);
888	}
889	return 0;
890}
891
892/**
893 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
894 * @phba:
895 * @pring:
896 * @piocbq:
897 *
898 * This function is called when an unsolicited CT command is received.  It
899 * forwards the event to any processes registered to receive CT events.
900 **/
901int
902lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
903			struct lpfc_iocbq *piocbq)
904{
905	uint32_t evt_req_id = 0;
906	uint32_t cmd;
907	struct lpfc_dmabuf *dmabuf = NULL;
908	struct lpfc_bsg_event *evt;
909	struct event_data *evt_dat = NULL;
910	struct lpfc_iocbq *iocbq;
911	size_t offset = 0;
912	struct list_head head;
913	struct ulp_bde64 *bde;
914	dma_addr_t dma_addr;
915	int i;
916	struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
917	struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
918	struct lpfc_hbq_entry *hbqe;
919	struct lpfc_sli_ct_request *ct_req;
920	struct fc_bsg_job *job = NULL;
921	struct bsg_job_data *dd_data = NULL;
922	unsigned long flags;
923	int size = 0;
924
925	INIT_LIST_HEAD(&head);
926	list_add_tail(&head, &piocbq->list);
927
928	if (piocbq->iocb.ulpBdeCount == 0 ||
929	    piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
930		goto error_ct_unsol_exit;
931
932	if (phba->link_state == LPFC_HBA_ERROR ||
933		(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
934		goto error_ct_unsol_exit;
935
936	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
937		dmabuf = bdeBuf1;
938	else {
939		dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
940				    piocbq->iocb.un.cont64[0].addrLow);
941		dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
942	}
943	if (dmabuf == NULL)
944		goto error_ct_unsol_exit;
945	ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
946	evt_req_id = ct_req->FsType;
947	cmd = ct_req->CommandResponse.bits.CmdRsp;
948	if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
949		lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
950
951	spin_lock_irqsave(&phba->ct_ev_lock, flags);
952	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
953		if (!(evt->type_mask & FC_REG_CT_EVENT) ||
954			evt->req_id != evt_req_id)
955			continue;
956
957		lpfc_bsg_event_ref(evt);
958		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
959		evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
960		if (evt_dat == NULL) {
961			spin_lock_irqsave(&phba->ct_ev_lock, flags);
962			lpfc_bsg_event_unref(evt);
963			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
964					"2614 Memory allocation failed for "
965					"CT event\n");
966			break;
967		}
968
969		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
970			/* take accumulated byte count from the last iocbq */
971			iocbq = list_entry(head.prev, typeof(*iocbq), list);
972			evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
973		} else {
974			list_for_each_entry(iocbq, &head, list) {
975				for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
976					evt_dat->len +=
977					iocbq->iocb.un.cont64[i].tus.f.bdeSize;
978			}
979		}
980
981		evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
982		if (evt_dat->data == NULL) {
983			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
984					"2615 Memory allocation failed for "
985					"CT event data, size %d\n",
986					evt_dat->len);
987			kfree(evt_dat);
988			spin_lock_irqsave(&phba->ct_ev_lock, flags);
989			lpfc_bsg_event_unref(evt);
990			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
991			goto error_ct_unsol_exit;
992		}
993
994		list_for_each_entry(iocbq, &head, list) {
995			size = 0;
996			if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
997				bdeBuf1 = iocbq->context2;
998				bdeBuf2 = iocbq->context3;
999			}
1000			for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
1001				if (phba->sli3_options &
1002				    LPFC_SLI3_HBQ_ENABLED) {
1003					if (i == 0) {
1004						hbqe = (struct lpfc_hbq_entry *)
1005						  &iocbq->iocb.un.ulpWord[0];
1006						size = hbqe->bde.tus.f.bdeSize;
1007						dmabuf = bdeBuf1;
1008					} else if (i == 1) {
1009						hbqe = (struct lpfc_hbq_entry *)
1010							&iocbq->iocb.unsli3.
1011							sli3Words[4];
1012						size = hbqe->bde.tus.f.bdeSize;
1013						dmabuf = bdeBuf2;
1014					}
1015					if ((offset + size) > evt_dat->len)
1016						size = evt_dat->len - offset;
1017				} else {
1018					size = iocbq->iocb.un.cont64[i].
1019						tus.f.bdeSize;
1020					bde = &iocbq->iocb.un.cont64[i];
1021					dma_addr = getPaddr(bde->addrHigh,
1022							    bde->addrLow);
1023					dmabuf = lpfc_sli_ringpostbuf_get(phba,
1024							pring, dma_addr);
1025				}
1026				if (!dmabuf) {
1027					lpfc_printf_log(phba, KERN_ERR,
1028						LOG_LIBDFC, "2616 No dmabuf "
1029						"found for iocbq 0x%p\n",
1030						iocbq);
1031					kfree(evt_dat->data);
1032					kfree(evt_dat);
1033					spin_lock_irqsave(&phba->ct_ev_lock,
1034						flags);
1035					lpfc_bsg_event_unref(evt);
1036					spin_unlock_irqrestore(
1037						&phba->ct_ev_lock, flags);
1038					goto error_ct_unsol_exit;
1039				}
1040				memcpy((char *)(evt_dat->data) + offset,
1041				       dmabuf->virt, size);
1042				offset += size;
1043				if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
1044				    !(phba->sli3_options &
1045				      LPFC_SLI3_HBQ_ENABLED)) {
1046					lpfc_sli_ringpostbuf_put(phba, pring,
1047								 dmabuf);
1048				} else {
1049					switch (cmd) {
1050					case ELX_LOOPBACK_DATA:
1051						if (phba->sli_rev <
1052						    LPFC_SLI_REV4)
1053							diag_cmd_data_free(phba,
1054							(struct lpfc_dmabufext
1055							 *)dmabuf);
1056						break;
1057					case ELX_LOOPBACK_XRI_SETUP:
1058						if ((phba->sli_rev ==
1059							LPFC_SLI_REV2) ||
1060							(phba->sli3_options &
1061							LPFC_SLI3_HBQ_ENABLED
1062							)) {
1063							lpfc_in_buf_free(phba,
1064									dmabuf);
1065						} else {
1066							lpfc_post_buffer(phba,
1067									 pring,
1068									 1);
1069						}
1070						break;
1071					default:
1072						if (!(phba->sli3_options &
1073						      LPFC_SLI3_HBQ_ENABLED))
1074							lpfc_post_buffer(phba,
1075									 pring,
1076									 1);
1077						break;
1078					}
1079				}
1080			}
1081		}
1082
1083		spin_lock_irqsave(&phba->ct_ev_lock, flags);
1084		if (phba->sli_rev == LPFC_SLI_REV4) {
1085			evt_dat->immed_dat = phba->ctx_idx;
1086			phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
1087			/* Provide warning for over-run of the ct_ctx array */
1088			if (phba->ct_ctx[evt_dat->immed_dat].valid ==
1089			    UNSOL_VALID)
1090				lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1091						"2717 CT context array entry "
1092						"[%d] over-run: oxid:x%x, "
1093						"sid:x%x\n", phba->ctx_idx,
1094						phba->ct_ctx[
1095						    evt_dat->immed_dat].oxid,
1096						phba->ct_ctx[
1097						    evt_dat->immed_dat].SID);
1098			phba->ct_ctx[evt_dat->immed_dat].rxid =
1099				piocbq->iocb.ulpContext;
1100			phba->ct_ctx[evt_dat->immed_dat].oxid =
1101				piocbq->iocb.unsli3.rcvsli3.ox_id;
1102			phba->ct_ctx[evt_dat->immed_dat].SID =
1103				piocbq->iocb.un.rcvels.remoteID;
1104			phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
1105		} else
1106			evt_dat->immed_dat = piocbq->iocb.ulpContext;
1107
1108		evt_dat->type = FC_REG_CT_EVENT;
1109		list_add(&evt_dat->node, &evt->events_to_see);
1110		if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
1111			wake_up_interruptible(&evt->wq);
1112			lpfc_bsg_event_unref(evt);
1113			break;
1114		}
1115
1116		list_move(evt->events_to_see.prev, &evt->events_to_get);
1117
1118		dd_data = (struct bsg_job_data *)evt->dd_data;
1119		job = dd_data->set_job;
1120		dd_data->set_job = NULL;
1121		lpfc_bsg_event_unref(evt);
1122		if (job) {
1123			job->reply->reply_payload_rcv_len = size;
1124			/* make error code available to userspace */
1125			job->reply->result = 0;
1126			job->dd_data = NULL;
1127			/* complete the job back to userspace */
1128			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1129			job->job_done(job);
1130			spin_lock_irqsave(&phba->ct_ev_lock, flags);
1131		}
1132	}
1133	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1134
1135error_ct_unsol_exit:
1136	if (!list_empty(&head))
1137		list_del(&head);
1138	if ((phba->sli_rev < LPFC_SLI_REV4) &&
1139	    (evt_req_id == SLI_CT_ELX_LOOPBACK))
1140		return 0;
1141	return 1;
1142}
1143
1144/**
1145 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1146 * @phba: Pointer to HBA context object.
1147 * @dmabuf: pointer to a dmabuf that describes the FC sequence
1148 *
1149 * This function handles abort to the CT command toward management plane
1150 * for SLI4 port.
1151 *
1152 * If the pending context of a CT command to management plane present, clears
1153 * such context and returns 1 for handled; otherwise, it returns 0 indicating
1154 * no context exists.
1155 **/
1156int
1157lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1158{
1159	struct fc_frame_header fc_hdr;
1160	struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1161	int ctx_idx, handled = 0;
1162	uint16_t oxid, rxid;
1163	uint32_t sid;
1164
1165	memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1166	sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1167	oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1168	rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1169
1170	for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1171		if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1172			continue;
1173		if (phba->ct_ctx[ctx_idx].rxid != rxid)
1174			continue;
1175		if (phba->ct_ctx[ctx_idx].oxid != oxid)
1176			continue;
1177		if (phba->ct_ctx[ctx_idx].SID != sid)
1178			continue;
1179		phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1180		handled = 1;
1181	}
1182	return handled;
1183}
1184
1185/**
1186 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1187 * @job: SET_EVENT fc_bsg_job
1188 **/
1189static int
1190lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1191{
1192	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1193	struct lpfc_hba *phba = vport->phba;
1194	struct set_ct_event *event_req;
1195	struct lpfc_bsg_event *evt;
1196	int rc = 0;
1197	struct bsg_job_data *dd_data = NULL;
1198	uint32_t ev_mask;
1199	unsigned long flags;
1200
1201	if (job->request_len <
1202	    sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1203		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1204				"2612 Received SET_CT_EVENT below minimum "
1205				"size\n");
1206		rc = -EINVAL;
1207		goto job_error;
1208	}
1209
1210	event_req = (struct set_ct_event *)
1211		job->request->rqst_data.h_vendor.vendor_cmd;
1212	ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1213				FC_REG_EVENT_MASK);
1214	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1215	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1216		if (evt->reg_id == event_req->ev_reg_id) {
1217			lpfc_bsg_event_ref(evt);
1218			evt->wait_time_stamp = jiffies;
1219			dd_data = (struct bsg_job_data *)evt->dd_data;
1220			break;
1221		}
1222	}
1223	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1224
1225	if (&evt->node == &phba->ct_ev_waiters) {
1226		/* no event waiting struct yet - first call */
1227		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1228		if (dd_data == NULL) {
1229			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1230					"2734 Failed allocation of dd_data\n");
1231			rc = -ENOMEM;
1232			goto job_error;
1233		}
1234		evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1235					event_req->ev_req_id);
1236		if (!evt) {
1237			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1238					"2617 Failed allocation of event "
1239					"waiter\n");
1240			rc = -ENOMEM;
1241			goto job_error;
1242		}
1243		dd_data->type = TYPE_EVT;
1244		dd_data->set_job = NULL;
1245		dd_data->context_un.evt = evt;
1246		evt->dd_data = (void *)dd_data;
1247		spin_lock_irqsave(&phba->ct_ev_lock, flags);
1248		list_add(&evt->node, &phba->ct_ev_waiters);
1249		lpfc_bsg_event_ref(evt);
1250		evt->wait_time_stamp = jiffies;
1251		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1252	}
1253
1254	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1255	evt->waiting = 1;
1256	dd_data->set_job = job; /* for unsolicited command */
1257	job->dd_data = dd_data; /* for fc transport timeout callback*/
1258	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1259	return 0; /* call job done later */
1260
1261job_error:
1262	if (dd_data != NULL)
1263		kfree(dd_data);
1264
1265	job->dd_data = NULL;
1266	return rc;
1267}
1268
1269/**
1270 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1271 * @job: GET_EVENT fc_bsg_job
1272 **/
1273static int
1274lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1275{
1276	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1277	struct lpfc_hba *phba = vport->phba;
1278	struct get_ct_event *event_req;
1279	struct get_ct_event_reply *event_reply;
1280	struct lpfc_bsg_event *evt, *evt_next;
1281	struct event_data *evt_dat = NULL;
1282	unsigned long flags;
1283	uint32_t rc = 0;
1284
1285	if (job->request_len <
1286	    sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1287		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1288				"2613 Received GET_CT_EVENT request below "
1289				"minimum size\n");
1290		rc = -EINVAL;
1291		goto job_error;
1292	}
1293
1294	event_req = (struct get_ct_event *)
1295		job->request->rqst_data.h_vendor.vendor_cmd;
1296
1297	event_reply = (struct get_ct_event_reply *)
1298		job->reply->reply_data.vendor_reply.vendor_rsp;
1299	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1300	list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
1301		if (evt->reg_id == event_req->ev_reg_id) {
1302			if (list_empty(&evt->events_to_get))
1303				break;
1304			lpfc_bsg_event_ref(evt);
1305			evt->wait_time_stamp = jiffies;
1306			evt_dat = list_entry(evt->events_to_get.prev,
1307					     struct event_data, node);
1308			list_del(&evt_dat->node);
1309			break;
1310		}
1311	}
1312	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1313
1314	/* The app may continue to ask for event data until it gets
1315	 * an error indicating that there isn't anymore
1316	 */
1317	if (evt_dat == NULL) {
1318		job->reply->reply_payload_rcv_len = 0;
1319		rc = -ENOENT;
1320		goto job_error;
1321	}
1322
1323	if (evt_dat->len > job->request_payload.payload_len) {
1324		evt_dat->len = job->request_payload.payload_len;
1325		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1326				"2618 Truncated event data at %d "
1327				"bytes\n",
1328				job->request_payload.payload_len);
1329	}
1330
1331	event_reply->type = evt_dat->type;
1332	event_reply->immed_data = evt_dat->immed_dat;
1333	if (evt_dat->len > 0)
1334		job->reply->reply_payload_rcv_len =
1335			sg_copy_from_buffer(job->request_payload.sg_list,
1336					    job->request_payload.sg_cnt,
1337					    evt_dat->data, evt_dat->len);
1338	else
1339		job->reply->reply_payload_rcv_len = 0;
1340
1341	if (evt_dat) {
1342		kfree(evt_dat->data);
1343		kfree(evt_dat);
1344	}
1345
1346	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1347	lpfc_bsg_event_unref(evt);
1348	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1349	job->dd_data = NULL;
1350	job->reply->result = 0;
1351	job->job_done(job);
1352	return 0;
1353
1354job_error:
1355	job->dd_data = NULL;
1356	job->reply->result = rc;
1357	return rc;
1358}
1359
1360/**
1361 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1362 * @phba: Pointer to HBA context object.
1363 * @cmdiocbq: Pointer to command iocb.
1364 * @rspiocbq: Pointer to response iocb.
1365 *
1366 * This function is the completion handler for iocbs issued using
1367 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1368 * ring event handler function without any lock held. This function
1369 * can be called from both worker thread context and interrupt
1370 * context. This function also can be called from other thread which
1371 * cleans up the SLI layer objects.
1372 * This function copy the contents of the response iocb to the
1373 * response iocb memory object provided by the caller of
1374 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1375 * sleeps for the iocb completion.
1376 **/
1377static void
1378lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1379			struct lpfc_iocbq *cmdiocbq,
1380			struct lpfc_iocbq *rspiocbq)
1381{
1382	struct bsg_job_data *dd_data;
1383	struct fc_bsg_job *job;
1384	IOCB_t *rsp;
1385	struct lpfc_dmabuf *bmp, *cmp;
1386	struct lpfc_nodelist *ndlp;
1387	unsigned long flags;
1388	int rc = 0;
1389
1390	dd_data = cmdiocbq->context1;
1391
1392	/* Determine if job has been aborted */
1393	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1394	job = dd_data->set_job;
1395	if (job) {
1396		/* Prevent timeout handling from trying to abort job  */
1397		job->dd_data = NULL;
1398	}
1399	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1400
1401	/* Close the timeout handler abort window */
1402	spin_lock_irqsave(&phba->hbalock, flags);
1403	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
1404	spin_unlock_irqrestore(&phba->hbalock, flags);
1405
1406	ndlp = dd_data->context_un.iocb.ndlp;
1407	cmp = cmdiocbq->context2;
1408	bmp = cmdiocbq->context3;
1409	rsp = &rspiocbq->iocb;
1410
1411	/* Copy the completed job data or set the error status */
1412
1413	if (job) {
1414		if (rsp->ulpStatus) {
1415			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1416				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
1417				case IOERR_SEQUENCE_TIMEOUT:
1418					rc = -ETIMEDOUT;
1419					break;
1420				case IOERR_INVALID_RPI:
1421					rc = -EFAULT;
1422					break;
1423				default:
1424					rc = -EACCES;
1425					break;
1426				}
1427			} else {
1428				rc = -EACCES;
1429			}
1430		} else {
1431			job->reply->reply_payload_rcv_len = 0;
1432		}
1433	}
1434
1435	lpfc_free_bsg_buffers(phba, cmp);
1436	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1437	kfree(bmp);
1438	lpfc_sli_release_iocbq(phba, cmdiocbq);
1439	lpfc_nlp_put(ndlp);
1440	kfree(dd_data);
1441
1442	/* Complete the job if the job is still active */
1443
1444	if (job) {
1445		job->reply->result = rc;
1446		job->job_done(job);
1447	}
1448	return;
1449}
1450
1451/**
1452 * lpfc_issue_ct_rsp - issue a ct response
1453 * @phba: Pointer to HBA context object.
1454 * @job: Pointer to the job object.
1455 * @tag: tag index value into the ports context exchange array.
1456 * @bmp: Pointer to a dma buffer descriptor.
1457 * @num_entry: Number of enties in the bde.
1458 **/
1459static int
1460lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1461		  struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
1462		  int num_entry)
1463{
1464	IOCB_t *icmd;
1465	struct lpfc_iocbq *ctiocb = NULL;
1466	int rc = 0;
1467	struct lpfc_nodelist *ndlp = NULL;
1468	struct bsg_job_data *dd_data;
1469	unsigned long flags;
1470	uint32_t creg_val;
1471
1472	/* allocate our bsg tracking structure */
1473	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1474	if (!dd_data) {
1475		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1476				"2736 Failed allocation of dd_data\n");
1477		rc = -ENOMEM;
1478		goto no_dd_data;
1479	}
1480
1481	/* Allocate buffer for  command iocb */
1482	ctiocb = lpfc_sli_get_iocbq(phba);
1483	if (!ctiocb) {
1484		rc = -ENOMEM;
1485		goto no_ctiocb;
1486	}
1487
1488	icmd = &ctiocb->iocb;
1489	icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1490	icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1491	icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1492	icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1493	icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1494	icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1495	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1496	icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1497	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1498
1499	/* Fill in rest of iocb */
1500	icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1501	icmd->ulpBdeCount = 1;
1502	icmd->ulpLe = 1;
1503	icmd->ulpClass = CLASS3;
1504	if (phba->sli_rev == LPFC_SLI_REV4) {
1505		/* Do not issue unsol response if oxid not marked as valid */
1506		if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1507			rc = IOCB_ERROR;
1508			goto issue_ct_rsp_exit;
1509		}
1510		icmd->ulpContext = phba->ct_ctx[tag].rxid;
1511		icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
1512		ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1513		if (!ndlp) {
1514			lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1515				 "2721 ndlp null for oxid %x SID %x\n",
1516					icmd->ulpContext,
1517					phba->ct_ctx[tag].SID);
1518			rc = IOCB_ERROR;
1519			goto issue_ct_rsp_exit;
1520		}
1521
1522		/* Check if the ndlp is active */
1523		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1524			rc = IOCB_ERROR;
1525			goto issue_ct_rsp_exit;
1526		}
1527
1528		/* get a refernece count so the ndlp doesn't go away while
1529		 * we respond
1530		 */
1531		if (!lpfc_nlp_get(ndlp)) {
1532			rc = IOCB_ERROR;
1533			goto issue_ct_rsp_exit;
1534		}
1535
1536		icmd->un.ulpWord[3] =
1537				phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1538
1539		/* The exchange is done, mark the entry as invalid */
1540		phba->ct_ctx[tag].valid = UNSOL_INVALID;
1541	} else
1542		icmd->ulpContext = (ushort) tag;
1543
1544	icmd->ulpTimeout = phba->fc_ratov * 2;
1545
1546	/* Xmit CT response on exchange <xid> */
1547	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1548		"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1549		icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1550
1551	ctiocb->iocb_cmpl = NULL;
1552	ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1553	ctiocb->vport = phba->pport;
1554	ctiocb->context1 = dd_data;
1555	ctiocb->context2 = cmp;
1556	ctiocb->context3 = bmp;
1557	ctiocb->context_un.ndlp = ndlp;
1558	ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1559
1560	dd_data->type = TYPE_IOCB;
1561	dd_data->set_job = job;
1562	dd_data->context_un.iocb.cmdiocbq = ctiocb;
1563	dd_data->context_un.iocb.ndlp = ndlp;
1564	dd_data->context_un.iocb.rmp = NULL;
1565	job->dd_data = dd_data;
1566
1567	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1568		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1569			rc = -IOCB_ERROR;
1570			goto issue_ct_rsp_exit;
1571		}
1572		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1573		writel(creg_val, phba->HCregaddr);
1574		readl(phba->HCregaddr); /* flush */
1575	}
1576
1577	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1578
1579	if (rc == IOCB_SUCCESS) {
1580		spin_lock_irqsave(&phba->hbalock, flags);
1581		/* make sure the I/O had not been completed/released */
1582		if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
1583			/* open up abort window to timeout handler */
1584			ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
1585		}
1586		spin_unlock_irqrestore(&phba->hbalock, flags);
1587		return 0; /* done for now */
1588	}
1589
1590	/* iocb failed so cleanup */
1591	job->dd_data = NULL;
1592
1593issue_ct_rsp_exit:
1594	lpfc_sli_release_iocbq(phba, ctiocb);
1595no_ctiocb:
1596	kfree(dd_data);
1597no_dd_data:
1598	return rc;
1599}
1600
1601/**
1602 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1603 * @job: SEND_MGMT_RESP fc_bsg_job
1604 **/
1605static int
1606lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1607{
1608	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1609	struct lpfc_hba *phba = vport->phba;
1610	struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1611		job->request->rqst_data.h_vendor.vendor_cmd;
1612	struct ulp_bde64 *bpl;
1613	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
1614	int bpl_entries;
1615	uint32_t tag = mgmt_resp->tag;
1616	unsigned long reqbfrcnt =
1617			(unsigned long)job->request_payload.payload_len;
1618	int rc = 0;
1619
1620	/* in case no data is transferred */
1621	job->reply->reply_payload_rcv_len = 0;
1622
1623	if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1624		rc = -ERANGE;
1625		goto send_mgmt_rsp_exit;
1626	}
1627
1628	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1629	if (!bmp) {
1630		rc = -ENOMEM;
1631		goto send_mgmt_rsp_exit;
1632	}
1633
1634	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1635	if (!bmp->virt) {
1636		rc = -ENOMEM;
1637		goto send_mgmt_rsp_free_bmp;
1638	}
1639
1640	INIT_LIST_HEAD(&bmp->list);
1641	bpl = (struct ulp_bde64 *) bmp->virt;
1642	bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
1643	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1644				     1, bpl, &bpl_entries);
1645	if (!cmp) {
1646		rc = -ENOMEM;
1647		goto send_mgmt_rsp_free_bmp;
1648	}
1649	lpfc_bsg_copy_data(cmp, &job->request_payload,
1650			   job->request_payload.payload_len, 1);
1651
1652	rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
1653
1654	if (rc == IOCB_SUCCESS)
1655		return 0; /* done for now */
1656
1657	rc = -EACCES;
1658
1659	lpfc_free_bsg_buffers(phba, cmp);
1660
1661send_mgmt_rsp_free_bmp:
1662	if (bmp->virt)
1663		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1664	kfree(bmp);
1665send_mgmt_rsp_exit:
1666	/* make error code available to userspace */
1667	job->reply->result = rc;
1668	job->dd_data = NULL;
1669	return rc;
1670}
1671
1672/**
1673 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1674 * @phba: Pointer to HBA context object.
1675 *
1676 * This function is responsible for preparing driver for diag loopback
1677 * on device.
1678 */
1679static int
1680lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1681{
1682	struct lpfc_vport **vports;
1683	struct Scsi_Host *shost;
1684	struct lpfc_sli *psli;
1685	struct lpfc_sli_ring *pring;
1686	int i = 0;
1687
1688	psli = &phba->sli;
1689	if (!psli)
1690		return -ENODEV;
1691
1692	pring = &psli->ring[LPFC_FCP_RING];
1693	if (!pring)
1694		return -ENODEV;
1695
1696	if ((phba->link_state == LPFC_HBA_ERROR) ||
1697	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1698	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1699		return -EACCES;
1700
1701	vports = lpfc_create_vport_work_array(phba);
1702	if (vports) {
1703		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1704			shost = lpfc_shost_from_vport(vports[i]);
1705			scsi_block_requests(shost);
1706		}
1707		lpfc_destroy_vport_work_array(phba, vports);
1708	} else {
1709		shost = lpfc_shost_from_vport(phba->pport);
1710		scsi_block_requests(shost);
1711	}
1712
1713	while (!list_empty(&pring->txcmplq)) {
1714		if (i++ > 500)  /* wait up to 5 seconds */
1715			break;
1716		msleep(10);
1717	}
1718	return 0;
1719}
1720
1721/**
1722 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1723 * @phba: Pointer to HBA context object.
1724 *
1725 * This function is responsible for driver exit processing of setting up
1726 * diag loopback mode on device.
1727 */
1728static void
1729lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1730{
1731	struct Scsi_Host *shost;
1732	struct lpfc_vport **vports;
1733	int i;
1734
1735	vports = lpfc_create_vport_work_array(phba);
1736	if (vports) {
1737		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1738			shost = lpfc_shost_from_vport(vports[i]);
1739			scsi_unblock_requests(shost);
1740		}
1741		lpfc_destroy_vport_work_array(phba, vports);
1742	} else {
1743		shost = lpfc_shost_from_vport(phba->pport);
1744		scsi_unblock_requests(shost);
1745	}
1746	return;
1747}
1748
1749/**
1750 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1751 * @phba: Pointer to HBA context object.
1752 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1753 *
1754 * This function is responsible for placing an sli3  port into diagnostic
1755 * loopback mode in order to perform a diagnostic loopback test.
1756 * All new scsi requests are blocked, a small delay is used to allow the
1757 * scsi requests to complete then the link is brought down. If the link is
1758 * is placed in loopback mode then scsi requests are again allowed
1759 * so the scsi mid-layer doesn't give up on the port.
1760 * All of this is done in-line.
1761 */
1762static int
1763lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1764{
1765	struct diag_mode_set *loopback_mode;
1766	uint32_t link_flags;
1767	uint32_t timeout;
1768	LPFC_MBOXQ_t *pmboxq  = NULL;
1769	int mbxstatus = MBX_SUCCESS;
1770	int i = 0;
1771	int rc = 0;
1772
1773	/* no data to return just the return code */
1774	job->reply->reply_payload_rcv_len = 0;
1775
1776	if (job->request_len < sizeof(struct fc_bsg_request) +
1777	    sizeof(struct diag_mode_set)) {
1778		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1779				"2738 Received DIAG MODE request size:%d "
1780				"below the minimum size:%d\n",
1781				job->request_len,
1782				(int)(sizeof(struct fc_bsg_request) +
1783				sizeof(struct diag_mode_set)));
1784		rc = -EINVAL;
1785		goto job_error;
1786	}
1787
1788	rc = lpfc_bsg_diag_mode_enter(phba);
1789	if (rc)
1790		goto job_error;
1791
1792	/* bring the link to diagnostic mode */
1793	loopback_mode = (struct diag_mode_set *)
1794		job->request->rqst_data.h_vendor.vendor_cmd;
1795	link_flags = loopback_mode->type;
1796	timeout = loopback_mode->timeout * 100;
1797
1798	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1799	if (!pmboxq) {
1800		rc = -ENOMEM;
1801		goto loopback_mode_exit;
1802	}
1803	memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1804	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1805	pmboxq->u.mb.mbxOwner = OWN_HOST;
1806
1807	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1808
1809	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1810		/* wait for link down before proceeding */
1811		i = 0;
1812		while (phba->link_state != LPFC_LINK_DOWN) {
1813			if (i++ > timeout) {
1814				rc = -ETIMEDOUT;
1815				goto loopback_mode_exit;
1816			}
1817			msleep(10);
1818		}
1819
1820		memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1821		if (link_flags == INTERNAL_LOOP_BACK)
1822			pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1823		else
1824			pmboxq->u.mb.un.varInitLnk.link_flags =
1825				FLAGS_TOPOLOGY_MODE_LOOP;
1826
1827		pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1828		pmboxq->u.mb.mbxOwner = OWN_HOST;
1829
1830		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1831						     LPFC_MBOX_TMO);
1832
1833		if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1834			rc = -ENODEV;
1835		else {
1836			spin_lock_irq(&phba->hbalock);
1837			phba->link_flag |= LS_LOOPBACK_MODE;
1838			spin_unlock_irq(&phba->hbalock);
1839			/* wait for the link attention interrupt */
1840			msleep(100);
1841
1842			i = 0;
1843			while (phba->link_state != LPFC_HBA_READY) {
1844				if (i++ > timeout) {
1845					rc = -ETIMEDOUT;
1846					break;
1847				}
1848
1849				msleep(10);
1850			}
1851		}
1852
1853	} else
1854		rc = -ENODEV;
1855
1856loopback_mode_exit:
1857	lpfc_bsg_diag_mode_exit(phba);
1858
1859	/*
1860	 * Let SLI layer release mboxq if mbox command completed after timeout.
1861	 */
1862	if (pmboxq && mbxstatus != MBX_TIMEOUT)
1863		mempool_free(pmboxq, phba->mbox_mem_pool);
1864
1865job_error:
1866	/* make error code available to userspace */
1867	job->reply->result = rc;
1868	/* complete the job back to userspace if no error */
1869	if (rc == 0)
1870		job->job_done(job);
1871	return rc;
1872}
1873
1874/**
1875 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1876 * @phba: Pointer to HBA context object.
1877 * @diag: Flag for set link to diag or nomral operation state.
1878 *
1879 * This function is responsible for issuing a sli4 mailbox command for setting
1880 * link to either diag state or normal operation state.
1881 */
1882static int
1883lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1884{
1885	LPFC_MBOXQ_t *pmboxq;
1886	struct lpfc_mbx_set_link_diag_state *link_diag_state;
1887	uint32_t req_len, alloc_len;
1888	int mbxstatus = MBX_SUCCESS, rc;
1889
1890	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1891	if (!pmboxq)
1892		return -ENOMEM;
1893
1894	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1895		   sizeof(struct lpfc_sli4_cfg_mhdr));
1896	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1897				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1898				req_len, LPFC_SLI4_MBX_EMBED);
1899	if (alloc_len != req_len) {
1900		rc = -ENOMEM;
1901		goto link_diag_state_set_out;
1902	}
1903	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1904			"3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1905			diag, phba->sli4_hba.lnk_info.lnk_tp,
1906			phba->sli4_hba.lnk_info.lnk_no);
1907
1908	link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1909	bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1910	       LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1911	bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1912	       phba->sli4_hba.lnk_info.lnk_no);
1913	bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1914	       phba->sli4_hba.lnk_info.lnk_tp);
1915	if (diag)
1916		bf_set(lpfc_mbx_set_diag_state_diag,
1917		       &link_diag_state->u.req, 1);
1918	else
1919		bf_set(lpfc_mbx_set_diag_state_diag,
1920		       &link_diag_state->u.req, 0);
1921
1922	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1923
1924	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1925		rc = 0;
1926	else
1927		rc = -ENODEV;
1928
1929link_diag_state_set_out:
1930	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1931		mempool_free(pmboxq, phba->mbox_mem_pool);
1932
1933	return rc;
1934}
1935
1936/**
1937 * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
1938 * @phba: Pointer to HBA context object.
1939 *
1940 * This function is responsible for issuing a sli4 mailbox command for setting
1941 * up internal loopback diagnostic.
1942 */
1943static int
1944lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
1945{
1946	LPFC_MBOXQ_t *pmboxq;
1947	uint32_t req_len, alloc_len;
1948	struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1949	int mbxstatus = MBX_SUCCESS, rc = 0;
1950
1951	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1952	if (!pmboxq)
1953		return -ENOMEM;
1954	req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1955		   sizeof(struct lpfc_sli4_cfg_mhdr));
1956	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1957				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1958				req_len, LPFC_SLI4_MBX_EMBED);
1959	if (alloc_len != req_len) {
1960		mempool_free(pmboxq, phba->mbox_mem_pool);
1961		return -ENOMEM;
1962	}
1963	link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1964	bf_set(lpfc_mbx_set_diag_state_link_num,
1965	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
1966	bf_set(lpfc_mbx_set_diag_state_link_type,
1967	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
1968	bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1969	       LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
1970
1971	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1972	if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
1973		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1974				"3127 Failed setup loopback mode mailbox "
1975				"command, rc:x%x, status:x%x\n", mbxstatus,
1976				pmboxq->u.mb.mbxStatus);
1977		rc = -ENODEV;
1978	}
1979	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1980		mempool_free(pmboxq, phba->mbox_mem_pool);
1981	return rc;
1982}
1983
1984/**
1985 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
1986 * @phba: Pointer to HBA context object.
1987 *
1988 * This function set up SLI4 FC port registrations for diagnostic run, which
1989 * includes all the rpis, vfi, and also vpi.
1990 */
1991static int
1992lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
1993{
1994	int rc;
1995
1996	if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
1997		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1998				"3136 Port still had vfi registered: "
1999				"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
2000				phba->pport->fc_myDID, phba->fcf.fcfi,
2001				phba->sli4_hba.vfi_ids[phba->pport->vfi],
2002				phba->vpi_ids[phba->pport->vpi]);
2003		return -EINVAL;
2004	}
2005	rc = lpfc_issue_reg_vfi(phba->pport);
2006	return rc;
2007}
2008
2009/**
2010 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
2011 * @phba: Pointer to HBA context object.
2012 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2013 *
2014 * This function is responsible for placing an sli4 port into diagnostic
2015 * loopback mode in order to perform a diagnostic loopback test.
2016 */
2017static int
2018lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
2019{
2020	struct diag_mode_set *loopback_mode;
2021	uint32_t link_flags, timeout;
2022	int i, rc = 0;
2023
2024	/* no data to return just the return code */
2025	job->reply->reply_payload_rcv_len = 0;
2026
2027	if (job->request_len < sizeof(struct fc_bsg_request) +
2028	    sizeof(struct diag_mode_set)) {
2029		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2030				"3011 Received DIAG MODE request size:%d "
2031				"below the minimum size:%d\n",
2032				job->request_len,
2033				(int)(sizeof(struct fc_bsg_request) +
2034				sizeof(struct diag_mode_set)));
2035		rc = -EINVAL;
2036		goto job_error;
2037	}
2038
2039	rc = lpfc_bsg_diag_mode_enter(phba);
2040	if (rc)
2041		goto job_error;
2042
2043	/* indicate we are in loobpack diagnostic mode */
2044	spin_lock_irq(&phba->hbalock);
2045	phba->link_flag |= LS_LOOPBACK_MODE;
2046	spin_unlock_irq(&phba->hbalock);
2047
2048	/* reset port to start frome scratch */
2049	rc = lpfc_selective_reset(phba);
2050	if (rc)
2051		goto job_error;
2052
2053	/* bring the link to diagnostic mode */
2054	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2055			"3129 Bring link to diagnostic state.\n");
2056	loopback_mode = (struct diag_mode_set *)
2057		job->request->rqst_data.h_vendor.vendor_cmd;
2058	link_flags = loopback_mode->type;
2059	timeout = loopback_mode->timeout * 100;
2060
2061	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2062	if (rc) {
2063		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2064				"3130 Failed to bring link to diagnostic "
2065				"state, rc:x%x\n", rc);
2066		goto loopback_mode_exit;
2067	}
2068
2069	/* wait for link down before proceeding */
2070	i = 0;
2071	while (phba->link_state != LPFC_LINK_DOWN) {
2072		if (i++ > timeout) {
2073			rc = -ETIMEDOUT;
2074			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2075					"3131 Timeout waiting for link to "
2076					"diagnostic mode, timeout:%d ms\n",
2077					timeout * 10);
2078			goto loopback_mode_exit;
2079		}
2080		msleep(10);
2081	}
2082
2083	/* set up loopback mode */
2084	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2085			"3132 Set up loopback mode:x%x\n", link_flags);
2086
2087	if (link_flags == INTERNAL_LOOP_BACK)
2088		rc = lpfc_sli4_bsg_set_internal_loopback(phba);
2089	else if (link_flags == EXTERNAL_LOOP_BACK)
2090		rc = lpfc_hba_init_link_fc_topology(phba,
2091						    FLAGS_TOPOLOGY_MODE_PT_PT,
2092						    MBX_NOWAIT);
2093	else {
2094		rc = -EINVAL;
2095		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2096				"3141 Loopback mode:x%x not supported\n",
2097				link_flags);
2098		goto loopback_mode_exit;
2099	}
2100
2101	if (!rc) {
2102		/* wait for the link attention interrupt */
2103		msleep(100);
2104		i = 0;
2105		while (phba->link_state < LPFC_LINK_UP) {
2106			if (i++ > timeout) {
2107				rc = -ETIMEDOUT;
2108				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2109					"3137 Timeout waiting for link up "
2110					"in loopback mode, timeout:%d ms\n",
2111					timeout * 10);
2112				break;
2113			}
2114			msleep(10);
2115		}
2116	}
2117
2118	/* port resource registration setup for loopback diagnostic */
2119	if (!rc) {
2120		/* set up a none zero myDID for loopback test */
2121		phba->pport->fc_myDID = 1;
2122		rc = lpfc_sli4_diag_fcport_reg_setup(phba);
2123	} else
2124		goto loopback_mode_exit;
2125
2126	if (!rc) {
2127		/* wait for the port ready */
2128		msleep(100);
2129		i = 0;
2130		while (phba->link_state != LPFC_HBA_READY) {
2131			if (i++ > timeout) {
2132				rc = -ETIMEDOUT;
2133				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2134					"3133 Timeout waiting for port "
2135					"loopback mode ready, timeout:%d ms\n",
2136					timeout * 10);
2137				break;
2138			}
2139			msleep(10);
2140		}
2141	}
2142
2143loopback_mode_exit:
2144	/* clear loopback diagnostic mode */
2145	if (rc) {
2146		spin_lock_irq(&phba->hbalock);
2147		phba->link_flag &= ~LS_LOOPBACK_MODE;
2148		spin_unlock_irq(&phba->hbalock);
2149	}
2150	lpfc_bsg_diag_mode_exit(phba);
2151
2152job_error:
2153	/* make error code available to userspace */
2154	job->reply->result = rc;
2155	/* complete the job back to userspace if no error */
2156	if (rc == 0)
2157		job->job_done(job);
2158	return rc;
2159}
2160
2161/**
2162 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
2163 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2164 *
2165 * This function is responsible for responding to check and dispatch bsg diag
2166 * command from the user to proper driver action routines.
2167 */
2168static int
2169lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
2170{
2171	struct Scsi_Host *shost;
2172	struct lpfc_vport *vport;
2173	struct lpfc_hba *phba;
2174	int rc;
2175
2176	shost = job->shost;
2177	if (!shost)
2178		return -ENODEV;
2179	vport = (struct lpfc_vport *)job->shost->hostdata;
2180	if (!vport)
2181		return -ENODEV;
2182	phba = vport->phba;
2183	if (!phba)
2184		return -ENODEV;
2185
2186	if (phba->sli_rev < LPFC_SLI_REV4)
2187		rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
2188	else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
2189		 LPFC_SLI_INTF_IF_TYPE_2)
2190		rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
2191	else
2192		rc = -ENODEV;
2193
2194	return rc;
2195}
2196
2197/**
2198 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2199 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2200 *
2201 * This function is responsible for responding to check and dispatch bsg diag
2202 * command from the user to proper driver action routines.
2203 */
2204static int
2205lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
2206{
2207	struct Scsi_Host *shost;
2208	struct lpfc_vport *vport;
2209	struct lpfc_hba *phba;
2210	struct diag_mode_set *loopback_mode_end_cmd;
2211	uint32_t timeout;
2212	int rc, i;
2213
2214	shost = job->shost;
2215	if (!shost)
2216		return -ENODEV;
2217	vport = (struct lpfc_vport *)job->shost->hostdata;
2218	if (!vport)
2219		return -ENODEV;
2220	phba = vport->phba;
2221	if (!phba)
2222		return -ENODEV;
2223
2224	if (phba->sli_rev < LPFC_SLI_REV4)
2225		return -ENODEV;
2226	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2227	    LPFC_SLI_INTF_IF_TYPE_2)
2228		return -ENODEV;
2229
2230	/* clear loopback diagnostic mode */
2231	spin_lock_irq(&phba->hbalock);
2232	phba->link_flag &= ~LS_LOOPBACK_MODE;
2233	spin_unlock_irq(&phba->hbalock);
2234	loopback_mode_end_cmd = (struct diag_mode_set *)
2235			job->request->rqst_data.h_vendor.vendor_cmd;
2236	timeout = loopback_mode_end_cmd->timeout * 100;
2237
2238	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2239	if (rc) {
2240		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2241				"3139 Failed to bring link to diagnostic "
2242				"state, rc:x%x\n", rc);
2243		goto loopback_mode_end_exit;
2244	}
2245
2246	/* wait for link down before proceeding */
2247	i = 0;
2248	while (phba->link_state != LPFC_LINK_DOWN) {
2249		if (i++ > timeout) {
2250			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2251					"3140 Timeout waiting for link to "
2252					"diagnostic mode_end, timeout:%d ms\n",
2253					timeout * 10);
2254			/* there is nothing much we can do here */
2255			break;
2256		}
2257		msleep(10);
2258	}
2259
2260	/* reset port resource registrations */
2261	rc = lpfc_selective_reset(phba);
2262	phba->pport->fc_myDID = 0;
2263
2264loopback_mode_end_exit:
2265	/* make return code available to userspace */
2266	job->reply->result = rc;
2267	/* complete the job back to userspace if no error */
2268	if (rc == 0)
2269		job->job_done(job);
2270	return rc;
2271}
2272
2273/**
2274 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2275 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2276 *
2277 * This function is to perform SLI4 diag link test request from the user
2278 * applicaiton.
2279 */
2280static int
2281lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
2282{
2283	struct Scsi_Host *shost;
2284	struct lpfc_vport *vport;
2285	struct lpfc_hba *phba;
2286	LPFC_MBOXQ_t *pmboxq;
2287	struct sli4_link_diag *link_diag_test_cmd;
2288	uint32_t req_len, alloc_len;
2289	struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2290	union lpfc_sli4_cfg_shdr *shdr;
2291	uint32_t shdr_status, shdr_add_status;
2292	struct diag_status *diag_status_reply;
2293	int mbxstatus, rc = 0;
2294
2295	shost = job->shost;
2296	if (!shost) {
2297		rc = -ENODEV;
2298		goto job_error;
2299	}
2300	vport = (struct lpfc_vport *)job->shost->hostdata;
2301	if (!vport) {
2302		rc = -ENODEV;
2303		goto job_error;
2304	}
2305	phba = vport->phba;
2306	if (!phba) {
2307		rc = -ENODEV;
2308		goto job_error;
2309	}
2310
2311	if (phba->sli_rev < LPFC_SLI_REV4) {
2312		rc = -ENODEV;
2313		goto job_error;
2314	}
2315	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2316	    LPFC_SLI_INTF_IF_TYPE_2) {
2317		rc = -ENODEV;
2318		goto job_error;
2319	}
2320
2321	if (job->request_len < sizeof(struct fc_bsg_request) +
2322	    sizeof(struct sli4_link_diag)) {
2323		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2324				"3013 Received LINK DIAG TEST request "
2325				" size:%d below the minimum size:%d\n",
2326				job->request_len,
2327				(int)(sizeof(struct fc_bsg_request) +
2328				sizeof(struct sli4_link_diag)));
2329		rc = -EINVAL;
2330		goto job_error;
2331	}
2332
2333	rc = lpfc_bsg_diag_mode_enter(phba);
2334	if (rc)
2335		goto job_error;
2336
2337	link_diag_test_cmd = (struct sli4_link_diag *)
2338			 job->request->rqst_data.h_vendor.vendor_cmd;
2339
2340	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2341
2342	if (rc)
2343		goto job_error;
2344
2345	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2346	if (!pmboxq) {
2347		rc = -ENOMEM;
2348		goto link_diag_test_exit;
2349	}
2350
2351	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2352		   sizeof(struct lpfc_sli4_cfg_mhdr));
2353	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2354				     LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2355				     req_len, LPFC_SLI4_MBX_EMBED);
2356	if (alloc_len != req_len) {
2357		rc = -ENOMEM;
2358		goto link_diag_test_exit;
2359	}
2360	run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2361	bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2362	       phba->sli4_hba.lnk_info.lnk_no);
2363	bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2364	       phba->sli4_hba.lnk_info.lnk_tp);
2365	bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2366	       link_diag_test_cmd->test_id);
2367	bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2368	       link_diag_test_cmd->loops);
2369	bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2370	       link_diag_test_cmd->test_version);
2371	bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2372	       link_diag_test_cmd->error_action);
2373
2374	mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2375
2376	shdr = (union lpfc_sli4_cfg_shdr *)
2377		&pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2378	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2379	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2380	if (shdr_status || shdr_add_status || mbxstatus) {
2381		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2382				"3010 Run link diag test mailbox failed with "
2383				"mbx_status x%x status x%x, add_status x%x\n",
2384				mbxstatus, shdr_status, shdr_add_status);
2385	}
2386
2387	diag_status_reply = (struct diag_status *)
2388			    job->reply->reply_data.vendor_reply.vendor_rsp;
2389
2390	if (job->reply_len <
2391	    sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
2392		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2393				"3012 Received Run link diag test reply "
2394				"below minimum size (%d): reply_len:%d\n",
2395				(int)(sizeof(struct fc_bsg_request) +
2396				sizeof(struct diag_status)),
2397				job->reply_len);
2398		rc = -EINVAL;
2399		goto job_error;
2400	}
2401
2402	diag_status_reply->mbox_status = mbxstatus;
2403	diag_status_reply->shdr_status = shdr_status;
2404	diag_status_reply->shdr_add_status = shdr_add_status;
2405
2406link_diag_test_exit:
2407	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2408
2409	if (pmboxq)
2410		mempool_free(pmboxq, phba->mbox_mem_pool);
2411
2412	lpfc_bsg_diag_mode_exit(phba);
2413
2414job_error:
2415	/* make error code available to userspace */
2416	job->reply->result = rc;
2417	/* complete the job back to userspace if no error */
2418	if (rc == 0)
2419		job->job_done(job);
2420	return rc;
2421}
2422
2423/**
2424 * lpfcdiag_loop_self_reg - obtains a remote port login id
2425 * @phba: Pointer to HBA context object
2426 * @rpi: Pointer to a remote port login id
2427 *
2428 * This function obtains a remote port login id so the diag loopback test
2429 * can send and receive its own unsolicited CT command.
2430 **/
2431static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2432{
2433	LPFC_MBOXQ_t *mbox;
2434	struct lpfc_dmabuf *dmabuff;
2435	int status;
2436
2437	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2438	if (!mbox)
2439		return -ENOMEM;
2440
2441	if (phba->sli_rev < LPFC_SLI_REV4)
2442		status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2443				(uint8_t *)&phba->pport->fc_sparam,
2444				mbox, *rpi);
2445	else {
2446		*rpi = lpfc_sli4_alloc_rpi(phba);
2447		status = lpfc_reg_rpi(phba, phba->pport->vpi,
2448				phba->pport->fc_myDID,
2449				(uint8_t *)&phba->pport->fc_sparam,
2450				mbox, *rpi);
2451	}
2452
2453	if (status) {
2454		mempool_free(mbox, phba->mbox_mem_pool);
2455		if (phba->sli_rev == LPFC_SLI_REV4)
2456			lpfc_sli4_free_rpi(phba, *rpi);
2457		return -ENOMEM;
2458	}
2459
2460	dmabuff = (struct lpfc_dmabuf *) mbox->context1;
2461	mbox->context1 = NULL;
2462	mbox->context2 = NULL;
2463	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2464
2465	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2466		lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2467		kfree(dmabuff);
2468		if (status != MBX_TIMEOUT)
2469			mempool_free(mbox, phba->mbox_mem_pool);
2470		if (phba->sli_rev == LPFC_SLI_REV4)
2471			lpfc_sli4_free_rpi(phba, *rpi);
2472		return -ENODEV;
2473	}
2474
2475	if (phba->sli_rev < LPFC_SLI_REV4)
2476		*rpi = mbox->u.mb.un.varWords[0];
2477
2478	lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2479	kfree(dmabuff);
2480	mempool_free(mbox, phba->mbox_mem_pool);
2481	return 0;
2482}
2483
2484/**
2485 * lpfcdiag_loop_self_unreg - unregs from the rpi
2486 * @phba: Pointer to HBA context object
2487 * @rpi: Remote port login id
2488 *
2489 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2490 **/
2491static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2492{
2493	LPFC_MBOXQ_t *mbox;
2494	int status;
2495
2496	/* Allocate mboxq structure */
2497	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2498	if (mbox == NULL)
2499		return -ENOMEM;
2500
2501	if (phba->sli_rev < LPFC_SLI_REV4)
2502		lpfc_unreg_login(phba, 0, rpi, mbox);
2503	else
2504		lpfc_unreg_login(phba, phba->pport->vpi,
2505				 phba->sli4_hba.rpi_ids[rpi], mbox);
2506
2507	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2508
2509	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2510		if (status != MBX_TIMEOUT)
2511			mempool_free(mbox, phba->mbox_mem_pool);
2512		return -EIO;
2513	}
2514	mempool_free(mbox, phba->mbox_mem_pool);
2515	if (phba->sli_rev == LPFC_SLI_REV4)
2516		lpfc_sli4_free_rpi(phba, rpi);
2517	return 0;
2518}
2519
2520/**
2521 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2522 * @phba: Pointer to HBA context object
2523 * @rpi: Remote port login id
2524 * @txxri: Pointer to transmit exchange id
2525 * @rxxri: Pointer to response exchabge id
2526 *
2527 * This function obtains the transmit and receive ids required to send
2528 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2529 * flags are used to the unsolicted response handler is able to process
2530 * the ct command sent on the same port.
2531 **/
2532static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2533			 uint16_t *txxri, uint16_t * rxxri)
2534{
2535	struct lpfc_bsg_event *evt;
2536	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2537	IOCB_t *cmd, *rsp;
2538	struct lpfc_dmabuf *dmabuf;
2539	struct ulp_bde64 *bpl = NULL;
2540	struct lpfc_sli_ct_request *ctreq = NULL;
2541	int ret_val = 0;
2542	int time_left;
2543	int iocb_stat = IOCB_SUCCESS;
2544	unsigned long flags;
2545
2546	*txxri = 0;
2547	*rxxri = 0;
2548	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2549				SLI_CT_ELX_LOOPBACK);
2550	if (!evt)
2551		return -ENOMEM;
2552
2553	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2554	list_add(&evt->node, &phba->ct_ev_waiters);
2555	lpfc_bsg_event_ref(evt);
2556	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2557
2558	cmdiocbq = lpfc_sli_get_iocbq(phba);
2559	rspiocbq = lpfc_sli_get_iocbq(phba);
2560
2561	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2562	if (dmabuf) {
2563		dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2564		if (dmabuf->virt) {
2565			INIT_LIST_HEAD(&dmabuf->list);
2566			bpl = (struct ulp_bde64 *) dmabuf->virt;
2567			memset(bpl, 0, sizeof(*bpl));
2568			ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2569			bpl->addrHigh =
2570				le32_to_cpu(putPaddrHigh(dmabuf->phys +
2571					sizeof(*bpl)));
2572			bpl->addrLow =
2573				le32_to_cpu(putPaddrLow(dmabuf->phys +
2574					sizeof(*bpl)));
2575			bpl->tus.f.bdeFlags = 0;
2576			bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2577			bpl->tus.w = le32_to_cpu(bpl->tus.w);
2578		}
2579	}
2580
2581	if (cmdiocbq == NULL || rspiocbq == NULL ||
2582	    dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2583		dmabuf->virt == NULL) {
2584		ret_val = -ENOMEM;
2585		goto err_get_xri_exit;
2586	}
2587
2588	cmd = &cmdiocbq->iocb;
2589	rsp = &rspiocbq->iocb;
2590
2591	memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2592
2593	ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2594	ctreq->RevisionId.bits.InId = 0;
2595	ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2596	ctreq->FsSubType = 0;
2597	ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2598	ctreq->CommandResponse.bits.Size = 0;
2599
2600
2601	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
2602	cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
2603	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2604	cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
2605
2606	cmd->un.xseq64.w5.hcsw.Fctl = LA;
2607	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2608	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2609	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2610
2611	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2612	cmd->ulpBdeCount = 1;
2613	cmd->ulpLe = 1;
2614	cmd->ulpClass = CLASS3;
2615	cmd->ulpContext = rpi;
2616
2617	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2618	cmdiocbq->vport = phba->pport;
2619	cmdiocbq->iocb_cmpl = NULL;
2620
2621	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2622				rspiocbq,
2623				(phba->fc_ratov * 2)
2624				+ LPFC_DRVR_TIMEOUT);
2625	if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
2626		ret_val = -EIO;
2627		goto err_get_xri_exit;
2628	}
2629	*txxri =  rsp->ulpContext;
2630
2631	evt->waiting = 1;
2632	evt->wait_time_stamp = jiffies;
2633	time_left = wait_event_interruptible_timeout(
2634		evt->wq, !list_empty(&evt->events_to_see),
2635		msecs_to_jiffies(1000 *
2636			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
2637	if (list_empty(&evt->events_to_see))
2638		ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2639	else {
2640		spin_lock_irqsave(&phba->ct_ev_lock, flags);
2641		list_move(evt->events_to_see.prev, &evt->events_to_get);
2642		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2643		*rxxri = (list_entry(evt->events_to_get.prev,
2644				     typeof(struct event_data),
2645				     node))->immed_dat;
2646	}
2647	evt->waiting = 0;
2648
2649err_get_xri_exit:
2650	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2651	lpfc_bsg_event_unref(evt); /* release ref */
2652	lpfc_bsg_event_unref(evt); /* delete */
2653	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2654
2655	if (dmabuf) {
2656		if (dmabuf->virt)
2657			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2658		kfree(dmabuf);
2659	}
2660
2661	if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2662		lpfc_sli_release_iocbq(phba, cmdiocbq);
2663	if (rspiocbq)
2664		lpfc_sli_release_iocbq(phba, rspiocbq);
2665	return ret_val;
2666}
2667
2668/**
2669 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2670 * @phba: Pointer to HBA context object
2671 *
2672 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
2673 * returns the pointer to the buffer.
2674 **/
2675static struct lpfc_dmabuf *
2676lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2677{
2678	struct lpfc_dmabuf *dmabuf;
2679	struct pci_dev *pcidev = phba->pcidev;
2680
2681	/* allocate dma buffer struct */
2682	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2683	if (!dmabuf)
2684		return NULL;
2685
2686	INIT_LIST_HEAD(&dmabuf->list);
2687
2688	/* now, allocate dma buffer */
2689	dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2690					   &(dmabuf->phys), GFP_KERNEL);
2691
2692	if (!dmabuf->virt) {
2693		kfree(dmabuf);
2694		return NULL;
2695	}
2696
2697	return dmabuf;
2698}
2699
2700/**
2701 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2702 * @phba: Pointer to HBA context object.
2703 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2704 *
2705 * This routine just simply frees a dma buffer and its associated buffer
2706 * descriptor referred by @dmabuf.
2707 **/
2708static void
2709lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2710{
2711	struct pci_dev *pcidev = phba->pcidev;
2712
2713	if (!dmabuf)
2714		return;
2715
2716	if (dmabuf->virt)
2717		dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2718				  dmabuf->virt, dmabuf->phys);
2719	kfree(dmabuf);
2720	return;
2721}
2722
2723/**
2724 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2725 * @phba: Pointer to HBA context object.
2726 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2727 *
2728 * This routine just simply frees all dma buffers and their associated buffer
2729 * descriptors referred by @dmabuf_list.
2730 **/
2731static void
2732lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2733			    struct list_head *dmabuf_list)
2734{
2735	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2736
2737	if (list_empty(dmabuf_list))
2738		return;
2739
2740	list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2741		list_del_init(&dmabuf->list);
2742		lpfc_bsg_dma_page_free(phba, dmabuf);
2743	}
2744	return;
2745}
2746
2747/**
2748 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2749 * @phba: Pointer to HBA context object
2750 * @bpl: Pointer to 64 bit bde structure
2751 * @size: Number of bytes to process
2752 * @nocopydata: Flag to copy user data into the allocated buffer
2753 *
2754 * This function allocates page size buffers and populates an lpfc_dmabufext.
2755 * If allowed the user data pointed to with indataptr is copied into the kernel
2756 * memory. The chained list of page size buffers is returned.
2757 **/
2758static struct lpfc_dmabufext *
2759diag_cmd_data_alloc(struct lpfc_hba *phba,
2760		   struct ulp_bde64 *bpl, uint32_t size,
2761		   int nocopydata)
2762{
2763	struct lpfc_dmabufext *mlist = NULL;
2764	struct lpfc_dmabufext *dmp;
2765	int cnt, offset = 0, i = 0;
2766	struct pci_dev *pcidev;
2767
2768	pcidev = phba->pcidev;
2769
2770	while (size) {
2771		/* We get chunks of 4K */
2772		if (size > BUF_SZ_4K)
2773			cnt = BUF_SZ_4K;
2774		else
2775			cnt = size;
2776
2777		/* allocate struct lpfc_dmabufext buffer header */
2778		dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2779		if (!dmp)
2780			goto out;
2781
2782		INIT_LIST_HEAD(&dmp->dma.list);
2783
2784		/* Queue it to a linked list */
2785		if (mlist)
2786			list_add_tail(&dmp->dma.list, &mlist->dma.list);
2787		else
2788			mlist = dmp;
2789
2790		/* allocate buffer */
2791		dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2792						   cnt,
2793						   &(dmp->dma.phys),
2794						   GFP_KERNEL);
2795
2796		if (!dmp->dma.virt)
2797			goto out;
2798
2799		dmp->size = cnt;
2800
2801		if (nocopydata) {
2802			bpl->tus.f.bdeFlags = 0;
2803			pci_dma_sync_single_for_device(phba->pcidev,
2804				dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
2805
2806		} else {
2807			memset((uint8_t *)dmp->dma.virt, 0, cnt);
2808			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2809		}
2810
2811		/* build buffer ptr list for IOCB */
2812		bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2813		bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2814		bpl->tus.f.bdeSize = (ushort) cnt;
2815		bpl->tus.w = le32_to_cpu(bpl->tus.w);
2816		bpl++;
2817
2818		i++;
2819		offset += cnt;
2820		size -= cnt;
2821	}
2822
2823	if (mlist) {
2824		mlist->flag = i;
2825		return mlist;
2826	}
2827out:
2828	diag_cmd_data_free(phba, mlist);
2829	return NULL;
2830}
2831
2832/**
2833 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2834 * @phba: Pointer to HBA context object
2835 * @rxxri: Receive exchange id
2836 * @len: Number of data bytes
2837 *
2838 * This function allocates and posts a data buffer of sufficient size to receive
2839 * an unsolicted CT command.
2840 **/
2841static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2842			     size_t len)
2843{
2844	struct lpfc_sli *psli = &phba->sli;
2845	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2846	struct lpfc_iocbq *cmdiocbq;
2847	IOCB_t *cmd = NULL;
2848	struct list_head head, *curr, *next;
2849	struct lpfc_dmabuf *rxbmp;
2850	struct lpfc_dmabuf *dmp;
2851	struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2852	struct ulp_bde64 *rxbpl = NULL;
2853	uint32_t num_bde;
2854	struct lpfc_dmabufext *rxbuffer = NULL;
2855	int ret_val = 0;
2856	int iocb_stat;
2857	int i = 0;
2858
2859	cmdiocbq = lpfc_sli_get_iocbq(phba);
2860	rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2861	if (rxbmp != NULL) {
2862		rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2863		if (rxbmp->virt) {
2864			INIT_LIST_HEAD(&rxbmp->list);
2865			rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2866			rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2867		}
2868	}
2869
2870	if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
2871		ret_val = -ENOMEM;
2872		goto err_post_rxbufs_exit;
2873	}
2874
2875	/* Queue buffers for the receive exchange */
2876	num_bde = (uint32_t)rxbuffer->flag;
2877	dmp = &rxbuffer->dma;
2878
2879	cmd = &cmdiocbq->iocb;
2880	i = 0;
2881
2882	INIT_LIST_HEAD(&head);
2883	list_add_tail(&head, &dmp->list);
2884	list_for_each_safe(curr, next, &head) {
2885		mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2886		list_del(curr);
2887
2888		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2889			mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2890			cmd->un.quexri64cx.buff.bde.addrHigh =
2891				putPaddrHigh(mp[i]->phys);
2892			cmd->un.quexri64cx.buff.bde.addrLow =
2893				putPaddrLow(mp[i]->phys);
2894			cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2895				((struct lpfc_dmabufext *)mp[i])->size;
2896			cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2897			cmd->ulpCommand = CMD_QUE_XRI64_CX;
2898			cmd->ulpPU = 0;
2899			cmd->ulpLe = 1;
2900			cmd->ulpBdeCount = 1;
2901			cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2902
2903		} else {
2904			cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
2905			cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
2906			cmd->un.cont64[i].tus.f.bdeSize =
2907				((struct lpfc_dmabufext *)mp[i])->size;
2908					cmd->ulpBdeCount = ++i;
2909
2910			if ((--num_bde > 0) && (i < 2))
2911				continue;
2912
2913			cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
2914			cmd->ulpLe = 1;
2915		}
2916
2917		cmd->ulpClass = CLASS3;
2918		cmd->ulpContext = rxxri;
2919
2920		iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2921						0);
2922		if (iocb_stat == IOCB_ERROR) {
2923			diag_cmd_data_free(phba,
2924				(struct lpfc_dmabufext *)mp[0]);
2925			if (mp[1])
2926				diag_cmd_data_free(phba,
2927					  (struct lpfc_dmabufext *)mp[1]);
2928			dmp = list_entry(next, struct lpfc_dmabuf, list);
2929			ret_val = -EIO;
2930			goto err_post_rxbufs_exit;
2931		}
2932
2933		lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2934		if (mp[1]) {
2935			lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2936			mp[1] = NULL;
2937		}
2938
2939		/* The iocb was freed by lpfc_sli_issue_iocb */
2940		cmdiocbq = lpfc_sli_get_iocbq(phba);
2941		if (!cmdiocbq) {
2942			dmp = list_entry(next, struct lpfc_dmabuf, list);
2943			ret_val = -EIO;
2944			goto err_post_rxbufs_exit;
2945		}
2946
2947		cmd = &cmdiocbq->iocb;
2948		i = 0;
2949	}
2950	list_del(&head);
2951
2952err_post_rxbufs_exit:
2953
2954	if (rxbmp) {
2955		if (rxbmp->virt)
2956			lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2957		kfree(rxbmp);
2958	}
2959
2960	if (cmdiocbq)
2961		lpfc_sli_release_iocbq(phba, cmdiocbq);
2962	return ret_val;
2963}
2964
2965/**
2966 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
2967 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2968 *
2969 * This function receives a user data buffer to be transmitted and received on
2970 * the same port, the link must be up and in loopback mode prior
2971 * to being called.
2972 * 1. A kernel buffer is allocated to copy the user data into.
2973 * 2. The port registers with "itself".
2974 * 3. The transmit and receive exchange ids are obtained.
2975 * 4. The receive exchange id is posted.
2976 * 5. A new els loopback event is created.
2977 * 6. The command and response iocbs are allocated.
2978 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2979 *
2980 * This function is meant to be called n times while the port is in loopback
2981 * so it is the apps responsibility to issue a reset to take the port out
2982 * of loopback mode.
2983 **/
2984static int
2985lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2986{
2987	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2988	struct lpfc_hba *phba = vport->phba;
2989	struct lpfc_bsg_event *evt;
2990	struct event_data *evdat;
2991	struct lpfc_sli *psli = &phba->sli;
2992	uint32_t size;
2993	uint32_t full_size;
2994	size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2995	uint16_t rpi = 0;
2996	struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
2997	IOCB_t *cmd, *rsp = NULL;
2998	struct lpfc_sli_ct_request *ctreq;
2999	struct lpfc_dmabuf *txbmp;
3000	struct ulp_bde64 *txbpl = NULL;
3001	struct lpfc_dmabufext *txbuffer = NULL;
3002	struct list_head head;
3003	struct lpfc_dmabuf  *curr;
3004	uint16_t txxri = 0, rxxri;
3005	uint32_t num_bde;
3006	uint8_t *ptr = NULL, *rx_databuf = NULL;
3007	int rc = 0;
3008	int time_left;
3009	int iocb_stat = IOCB_SUCCESS;
3010	unsigned long flags;
3011	void *dataout = NULL;
3012	uint32_t total_mem;
3013
3014	/* in case no data is returned return just the return code */
3015	job->reply->reply_payload_rcv_len = 0;
3016
3017	if (job->request_len <
3018	    sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
3019		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3020				"2739 Received DIAG TEST request below minimum "
3021				"size\n");
3022		rc = -EINVAL;
3023		goto loopback_test_exit;
3024	}
3025
3026	if (job->request_payload.payload_len !=
3027		job->reply_payload.payload_len) {
3028		rc = -EINVAL;
3029		goto loopback_test_exit;
3030	}
3031
3032	if ((phba->link_state == LPFC_HBA_ERROR) ||
3033	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
3034	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
3035		rc = -EACCES;
3036		goto loopback_test_exit;
3037	}
3038
3039	if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
3040		rc = -EACCES;
3041		goto loopback_test_exit;
3042	}
3043
3044	size = job->request_payload.payload_len;
3045	full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
3046
3047	if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
3048		rc = -ERANGE;
3049		goto loopback_test_exit;
3050	}
3051
3052	if (full_size >= BUF_SZ_4K) {
3053		/*
3054		 * Allocate memory for ioctl data. If buffer is bigger than 64k,
3055		 * then we allocate 64k and re-use that buffer over and over to
3056		 * xfer the whole block. This is because Linux kernel has a
3057		 * problem allocating more than 120k of kernel space memory. Saw
3058		 * problem with GET_FCPTARGETMAPPING...
3059		 */
3060		if (size <= (64 * 1024))
3061			total_mem = full_size;
3062		else
3063			total_mem = 64 * 1024;
3064	} else
3065		/* Allocate memory for ioctl data */
3066		total_mem = BUF_SZ_4K;
3067
3068	dataout = kmalloc(total_mem, GFP_KERNEL);
3069	if (dataout == NULL) {
3070		rc = -ENOMEM;
3071		goto loopback_test_exit;
3072	}
3073
3074	ptr = dataout;
3075	ptr += ELX_LOOPBACK_HEADER_SZ;
3076	sg_copy_to_buffer(job->request_payload.sg_list,
3077				job->request_payload.sg_cnt,
3078				ptr, size);
3079	rc = lpfcdiag_loop_self_reg(phba, &rpi);
3080	if (rc)
3081		goto loopback_test_exit;
3082
3083	if (phba->sli_rev < LPFC_SLI_REV4) {
3084		rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
3085		if (rc) {
3086			lpfcdiag_loop_self_unreg(phba, rpi);
3087			goto loopback_test_exit;
3088		}
3089
3090		rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
3091		if (rc) {
3092			lpfcdiag_loop_self_unreg(phba, rpi);
3093			goto loopback_test_exit;
3094		}
3095	}
3096	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
3097				SLI_CT_ELX_LOOPBACK);
3098	if (!evt) {
3099		lpfcdiag_loop_self_unreg(phba, rpi);
3100		rc = -ENOMEM;
3101		goto loopback_test_exit;
3102	}
3103
3104	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3105	list_add(&evt->node, &phba->ct_ev_waiters);
3106	lpfc_bsg_event_ref(evt);
3107	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3108
3109	cmdiocbq = lpfc_sli_get_iocbq(phba);
3110	if (phba->sli_rev < LPFC_SLI_REV4)
3111		rspiocbq = lpfc_sli_get_iocbq(phba);
3112	txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3113
3114	if (txbmp) {
3115		txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
3116		if (txbmp->virt) {
3117			INIT_LIST_HEAD(&txbmp->list);
3118			txbpl = (struct ulp_bde64 *) txbmp->virt;
3119			txbuffer = diag_cmd_data_alloc(phba,
3120							txbpl, full_size, 0);
3121		}
3122	}
3123
3124	if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
3125		rc = -ENOMEM;
3126		goto err_loopback_test_exit;
3127	}
3128	if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
3129		rc = -ENOMEM;
3130		goto err_loopback_test_exit;
3131	}
3132
3133	cmd = &cmdiocbq->iocb;
3134	if (phba->sli_rev < LPFC_SLI_REV4)
3135		rsp = &rspiocbq->iocb;
3136
3137	INIT_LIST_HEAD(&head);
3138	list_add_tail(&head, &txbuffer->dma.list);
3139	list_for_each_entry(curr, &head, list) {
3140		segment_len = ((struct lpfc_dmabufext *)curr)->size;
3141		if (current_offset == 0) {
3142			ctreq = curr->virt;
3143			memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
3144			ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
3145			ctreq->RevisionId.bits.InId = 0;
3146			ctreq->FsType = SLI_CT_ELX_LOOPBACK;
3147			ctreq->FsSubType = 0;
3148			ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
3149			ctreq->CommandResponse.bits.Size   = size;
3150			segment_offset = ELX_LOOPBACK_HEADER_SZ;
3151		} else
3152			segment_offset = 0;
3153
3154		BUG_ON(segment_offset >= segment_len);
3155		memcpy(curr->virt + segment_offset,
3156			ptr + current_offset,
3157			segment_len - segment_offset);
3158
3159		current_offset += segment_len - segment_offset;
3160		BUG_ON(current_offset > size);
3161	}
3162	list_del(&head);
3163
3164	/* Build the XMIT_SEQUENCE iocb */
3165	num_bde = (uint32_t)txbuffer->flag;
3166
3167	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
3168	cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
3169	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
3170	cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
3171
3172	cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
3173	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
3174	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
3175	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
3176
3177	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
3178	cmd->ulpBdeCount = 1;
3179	cmd->ulpLe = 1;
3180	cmd->ulpClass = CLASS3;
3181
3182	if (phba->sli_rev < LPFC_SLI_REV4) {
3183		cmd->ulpContext = txxri;
3184	} else {
3185		cmd->un.xseq64.bdl.ulpIoTag32 = 0;
3186		cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
3187		cmdiocbq->context3 = txbmp;
3188		cmdiocbq->sli4_xritag = NO_XRI;
3189		cmd->unsli3.rcvsli3.ox_id = 0xffff;
3190	}
3191	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
3192	cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
3193	cmdiocbq->vport = phba->pport;
3194	cmdiocbq->iocb_cmpl = NULL;
3195	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3196					     rspiocbq, (phba->fc_ratov * 2) +
3197					     LPFC_DRVR_TIMEOUT);
3198
3199	if ((iocb_stat != IOCB_SUCCESS) ||
3200	    ((phba->sli_rev < LPFC_SLI_REV4) &&
3201	     (rsp->ulpStatus != IOSTAT_SUCCESS))) {
3202		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3203				"3126 Failed loopback test issue iocb: "
3204				"iocb_stat:x%x\n", iocb_stat);
3205		rc = -EIO;
3206		goto err_loopback_test_exit;
3207	}
3208
3209	evt->waiting = 1;
3210	time_left = wait_event_interruptible_timeout(
3211		evt->wq, !list_empty(&evt->events_to_see),
3212		msecs_to_jiffies(1000 *
3213			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
3214	evt->waiting = 0;
3215	if (list_empty(&evt->events_to_see)) {
3216		rc = (time_left) ? -EINTR : -ETIMEDOUT;
3217		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3218				"3125 Not receiving unsolicited event, "
3219				"rc:x%x\n", rc);
3220	} else {
3221		spin_lock_irqsave(&phba->ct_ev_lock, flags);
3222		list_move(evt->events_to_see.prev, &evt->events_to_get);
3223		evdat = list_entry(evt->events_to_get.prev,
3224				   typeof(*evdat), node);
3225		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3226		rx_databuf = evdat->data;
3227		if (evdat->len != full_size) {
3228			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3229				"1603 Loopback test did not receive expected "
3230				"data length. actual length 0x%x expected "
3231				"length 0x%x\n",
3232				evdat->len, full_size);
3233			rc = -EIO;
3234		} else if (rx_databuf == NULL)
3235			rc = -EIO;
3236		else {
3237			rc = IOCB_SUCCESS;
3238			/* skip over elx loopback header */
3239			rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3240			job->reply->reply_payload_rcv_len =
3241				sg_copy_from_buffer(job->reply_payload.sg_list,
3242						    job->reply_payload.sg_cnt,
3243						    rx_databuf, size);
3244			job->reply->reply_payload_rcv_len = size;
3245		}
3246	}
3247
3248err_loopback_test_exit:
3249	lpfcdiag_loop_self_unreg(phba, rpi);
3250
3251	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3252	lpfc_bsg_event_unref(evt); /* release ref */
3253	lpfc_bsg_event_unref(evt); /* delete */
3254	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3255
3256	if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
3257		lpfc_sli_release_iocbq(phba, cmdiocbq);
3258
3259	if (rspiocbq != NULL)
3260		lpfc_sli_release_iocbq(phba, rspiocbq);
3261
3262	if (txbmp != NULL) {
3263		if (txbpl != NULL) {
3264			if (txbuffer != NULL)
3265				diag_cmd_data_free(phba, txbuffer);
3266			lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3267		}
3268		kfree(txbmp);
3269	}
3270
3271loopback_test_exit:
3272	kfree(dataout);
3273	/* make error code available to userspace */
3274	job->reply->result = rc;
3275	job->dd_data = NULL;
3276	/* complete the job back to userspace if no error */
3277	if (rc == IOCB_SUCCESS)
3278		job->job_done(job);
3279	return rc;
3280}
3281
3282/**
3283 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3284 * @job: GET_DFC_REV fc_bsg_job
3285 **/
3286static int
3287lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
3288{
3289	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
3290	struct lpfc_hba *phba = vport->phba;
3291	struct get_mgmt_rev_reply *event_reply;
3292	int rc = 0;
3293
3294	if (job->request_len <
3295	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3296		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3297				"2740 Received GET_DFC_REV request below "
3298				"minimum size\n");
3299		rc = -EINVAL;
3300		goto job_error;
3301	}
3302
3303	event_reply = (struct get_mgmt_rev_reply *)
3304		job->reply->reply_data.vendor_reply.vendor_rsp;
3305
3306	if (job->reply_len <
3307	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
3308		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3309				"2741 Received GET_DFC_REV reply below "
3310				"minimum size\n");
3311		rc = -EINVAL;
3312		goto job_error;
3313	}
3314
3315	event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3316	event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3317job_error:
3318	job->reply->result = rc;
3319	if (rc == 0)
3320		job->job_done(job);
3321	return rc;
3322}
3323
3324/**
3325 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3326 * @phba: Pointer to HBA context object.
3327 * @pmboxq: Pointer to mailbox command.
3328 *
3329 * This is completion handler function for mailbox commands issued from
3330 * lpfc_bsg_issue_mbox function. This function is called by the
3331 * mailbox event handler function with no lock held. This function
3332 * will wake up thread waiting on the wait queue pointed by context1
3333 * of the mailbox.
3334 **/
3335static void
3336lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3337{
3338	struct bsg_job_data *dd_data;
3339	struct fc_bsg_job *job;
3340	uint32_t size;
3341	unsigned long flags;
3342	uint8_t *pmb, *pmb_buf;
3343
3344	dd_data = pmboxq->context1;
3345
3346	/*
3347	 * The outgoing buffer is readily referred from the dma buffer,
3348	 * just need to get header part from mailboxq structure.
3349	 */
3350	pmb = (uint8_t *)&pmboxq->u.mb;
3351	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3352	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3353
3354	/* Determine if job has been aborted */
3355
3356	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3357	job = dd_data->set_job;
3358	if (job) {
3359		/* Prevent timeout handling from trying to abort job  */
3360		job->dd_data = NULL;
3361	}
3362	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3363
3364	/* Copy the mailbox data to the job if it is still active */
3365
3366	if (job) {
3367		size = job->reply_payload.payload_len;
3368		job->reply->reply_payload_rcv_len =
3369			sg_copy_from_buffer(job->reply_payload.sg_list,
3370					    job->reply_payload.sg_cnt,
3371					    pmb_buf, size);
3372	}
3373
3374	dd_data->set_job = NULL;
3375	mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3376	lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3377	kfree(dd_data);
3378
3379	/* Complete the job if the job is still active */
3380
3381	if (job) {
3382		job->reply->result = 0;
3383		job->job_done(job);
3384	}
3385	return;
3386}
3387
3388/**
3389 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3390 * @phba: Pointer to HBA context object.
3391 * @mb: Pointer to a mailbox object.
3392 * @vport: Pointer to a vport object.
3393 *
3394 * Some commands require the port to be offline, some may not be called from
3395 * the application.
3396 **/
3397static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3398	MAILBOX_t *mb, struct lpfc_vport *vport)
3399{
3400	/* return negative error values for bsg job */
3401	switch (mb->mbxCommand) {
3402	/* Offline only */
3403	case MBX_INIT_LINK:
3404	case MBX_DOWN_LINK:
3405	case MBX_CONFIG_LINK:
3406	case MBX_CONFIG_RING:
3407	case MBX_RESET_RING:
3408	case MBX_UNREG_LOGIN:
3409	case MBX_CLEAR_LA:
3410	case MBX_DUMP_CONTEXT:
3411	case MBX_RUN_DIAGS:
3412	case MBX_RESTART:
3413	case MBX_SET_MASK:
3414		if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3415			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3416				"2743 Command 0x%x is illegal in on-line "
3417				"state\n",
3418				mb->mbxCommand);
3419			return -EPERM;
3420		}
3421	case MBX_WRITE_NV:
3422	case MBX_WRITE_VPARMS:
3423	case MBX_LOAD_SM:
3424	case MBX_READ_NV:
3425	case MBX_READ_CONFIG:
3426	case MBX_READ_RCONFIG:
3427	case MBX_READ_STATUS:
3428	case MBX_READ_XRI:
3429	case MBX_READ_REV:
3430	case MBX_READ_LNK_STAT:
3431	case MBX_DUMP_MEMORY:
3432	case MBX_DOWN_LOAD:
3433	case MBX_UPDATE_CFG:
3434	case MBX_KILL_BOARD:
3435	case MBX_READ_TOPOLOGY:
3436	case MBX_LOAD_AREA:
3437	case MBX_LOAD_EXP_ROM:
3438	case MBX_BEACON:
3439	case MBX_DEL_LD_ENTRY:
3440	case MBX_SET_DEBUG:
3441	case MBX_WRITE_WWN:
3442	case MBX_SLI4_CONFIG:
3443	case MBX_READ_EVENT_LOG:
3444	case MBX_READ_EVENT_LOG_STATUS:
3445	case MBX_WRITE_EVENT_LOG:
3446	case MBX_PORT_CAPABILITIES:
3447	case MBX_PORT_IOV_CONTROL:
3448	case MBX_RUN_BIU_DIAG64:
3449		break;
3450	case MBX_SET_VARIABLE:
3451		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3452			"1226 mbox: set_variable 0x%x, 0x%x\n",
3453			mb->un.varWords[0],
3454			mb->un.varWords[1]);
3455		if ((mb->un.varWords[0] == SETVAR_MLOMNT)
3456			&& (mb->un.varWords[1] == 1)) {
3457			phba->wait_4_mlo_maint_flg = 1;
3458		} else if (mb->un.varWords[0] == SETVAR_MLORST) {
3459			spin_lock_irq(&phba->hbalock);
3460			phba->link_flag &= ~LS_LOOPBACK_MODE;
3461			spin_unlock_irq(&phba->hbalock);
3462			phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
3463		}
3464		break;
3465	case MBX_READ_SPARM64:
3466	case MBX_REG_LOGIN:
3467	case MBX_REG_LOGIN64:
3468	case MBX_CONFIG_PORT:
3469	case MBX_RUN_BIU_DIAG:
3470	default:
3471		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3472			"2742 Unknown Command 0x%x\n",
3473			mb->mbxCommand);
3474		return -EPERM;
3475	}
3476
3477	return 0; /* ok */
3478}
3479
3480/**
3481 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3482 * @phba: Pointer to HBA context object.
3483 *
3484 * This is routine clean up and reset BSG handling of multi-buffer mbox
3485 * command session.
3486 **/
3487static void
3488lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3489{
3490	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3491		return;
3492
3493	/* free all memory, including dma buffers */
3494	lpfc_bsg_dma_page_list_free(phba,
3495				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3496	lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3497	/* multi-buffer write mailbox command pass-through complete */
3498	memset((char *)&phba->mbox_ext_buf_ctx, 0,
3499	       sizeof(struct lpfc_mbox_ext_buf_ctx));
3500	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3501
3502	return;
3503}
3504
3505/**
3506 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3507 * @phba: Pointer to HBA context object.
3508 * @pmboxq: Pointer to mailbox command.
3509 *
3510 * This is routine handles BSG job for mailbox commands completions with
3511 * multiple external buffers.
3512 **/
3513static struct fc_bsg_job *
3514lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3515{
3516	struct bsg_job_data *dd_data;
3517	struct fc_bsg_job *job;
3518	uint8_t *pmb, *pmb_buf;
3519	unsigned long flags;
3520	uint32_t size;
3521	int rc = 0;
3522	struct lpfc_dmabuf *dmabuf;
3523	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3524	uint8_t *pmbx;
3525
3526	dd_data = pmboxq->context1;
3527
3528	/* Determine if job has been aborted */
3529	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3530	job = dd_data->set_job;
3531	if (job) {
3532		/* Prevent timeout handling from trying to abort job  */
3533		job->dd_data = NULL;
3534	}
3535	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3536
3537	/*
3538	 * The outgoing buffer is readily referred from the dma buffer,
3539	 * just need to get header part from mailboxq structure.
3540	 */
3541
3542	pmb = (uint8_t *)&pmboxq->u.mb;
3543	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3544	/* Copy the byte swapped response mailbox back to the user */
3545	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3546	/* if there is any non-embedded extended data copy that too */
3547	dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3548	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3549	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3550	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3551		pmbx = (uint8_t *)dmabuf->virt;
3552		/* byte swap the extended data following the mailbox command */
3553		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3554			&pmbx[sizeof(MAILBOX_t)],
3555			sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3556	}
3557
3558	/* Complete the job if the job is still active */
3559
3560	if (job) {
3561		size = job->reply_payload.payload_len;
3562		job->reply->reply_payload_rcv_len =
3563			sg_copy_from_buffer(job->reply_payload.sg_list,
3564					    job->reply_payload.sg_cnt,
3565					    pmb_buf, size);
3566
3567		/* result for successful */
3568		job->reply->result = 0;
3569
3570		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3571				"2937 SLI_CONFIG ext-buffer maibox command "
3572				"(x%x/x%x) complete bsg job done, bsize:%d\n",
3573				phba->mbox_ext_buf_ctx.nembType,
3574				phba->mbox_ext_buf_ctx.mboxType, size);
3575		lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3576					phba->mbox_ext_buf_ctx.nembType,
3577					phba->mbox_ext_buf_ctx.mboxType,
3578					dma_ebuf, sta_pos_addr,
3579					phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3580	} else {
3581		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3582				"2938 SLI_CONFIG ext-buffer maibox "
3583				"command (x%x/x%x) failure, rc:x%x\n",
3584				phba->mbox_ext_buf_ctx.nembType,
3585				phba->mbox_ext_buf_ctx.mboxType, rc);
3586	}
3587
3588
3589	/* state change */
3590	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3591	kfree(dd_data);
3592	return job;
3593}
3594
3595/**
3596 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3597 * @phba: Pointer to HBA context object.
3598 * @pmboxq: Pointer to mailbox command.
3599 *
3600 * This is completion handler function for mailbox read commands with multiple
3601 * external buffers.
3602 **/
3603static void
3604lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3605{
3606	struct fc_bsg_job *job;
3607
3608	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3609
3610	/* handle the BSG job with mailbox command */
3611	if (!job)
3612		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3613
3614	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3615			"2939 SLI_CONFIG ext-buffer rd maibox command "
3616			"complete, ctxState:x%x, mbxStatus:x%x\n",
3617			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3618
3619	if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3620		lpfc_bsg_mbox_ext_session_reset(phba);
3621
3622	/* free base driver mailbox structure memory */
3623	mempool_free(pmboxq, phba->mbox_mem_pool);
3624
3625	/* if the job is still active, call job done */
3626	if (job)
3627		job->job_done(job);
3628
3629	return;
3630}
3631
3632/**
3633 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3634 * @phba: Pointer to HBA context object.
3635 * @pmboxq: Pointer to mailbox command.
3636 *
3637 * This is completion handler function for mailbox write commands with multiple
3638 * external buffers.
3639 **/
3640static void
3641lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3642{
3643	struct fc_bsg_job *job;
3644
3645	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3646
3647	/* handle the BSG job with the mailbox command */
3648	if (!job)
3649		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3650
3651	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3652			"2940 SLI_CONFIG ext-buffer wr maibox command "
3653			"complete, ctxState:x%x, mbxStatus:x%x\n",
3654			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3655
3656	/* free all memory, including dma buffers */
3657	mempool_free(pmboxq, phba->mbox_mem_pool);
3658	lpfc_bsg_mbox_ext_session_reset(phba);
3659
3660	/* if the job is still active, call job done */
3661	if (job)
3662		job->job_done(job);
3663
3664	return;
3665}
3666
3667static void
3668lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3669				uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3670				struct lpfc_dmabuf *ext_dmabuf)
3671{
3672	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3673
3674	/* pointer to the start of mailbox command */
3675	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3676
3677	if (nemb_tp == nemb_mse) {
3678		if (index == 0) {
3679			sli_cfg_mbx->un.sli_config_emb0_subsys.
3680				mse[index].pa_hi =
3681				putPaddrHigh(mbx_dmabuf->phys +
3682					     sizeof(MAILBOX_t));
3683			sli_cfg_mbx->un.sli_config_emb0_subsys.
3684				mse[index].pa_lo =
3685				putPaddrLow(mbx_dmabuf->phys +
3686					    sizeof(MAILBOX_t));
3687			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3688					"2943 SLI_CONFIG(mse)[%d], "
3689					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3690					index,
3691					sli_cfg_mbx->un.sli_config_emb0_subsys.
3692					mse[index].buf_len,
3693					sli_cfg_mbx->un.sli_config_emb0_subsys.
3694					mse[index].pa_hi,
3695					sli_cfg_mbx->un.sli_config_emb0_subsys.
3696					mse[index].pa_lo);
3697		} else {
3698			sli_cfg_mbx->un.sli_config_emb0_subsys.
3699				mse[index].pa_hi =
3700				putPaddrHigh(ext_dmabuf->phys);
3701			sli_cfg_mbx->un.sli_config_emb0_subsys.
3702				mse[index].pa_lo =
3703				putPaddrLow(ext_dmabuf->phys);
3704			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3705					"2944 SLI_CONFIG(mse)[%d], "
3706					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3707					index,
3708					sli_cfg_mbx->un.sli_config_emb0_subsys.
3709					mse[index].buf_len,
3710					sli_cfg_mbx->un.sli_config_emb0_subsys.
3711					mse[index].pa_hi,
3712					sli_cfg_mbx->un.sli_config_emb0_subsys.
3713					mse[index].pa_lo);
3714		}
3715	} else {
3716		if (index == 0) {
3717			sli_cfg_mbx->un.sli_config_emb1_subsys.
3718				hbd[index].pa_hi =
3719				putPaddrHigh(mbx_dmabuf->phys +
3720					     sizeof(MAILBOX_t));
3721			sli_cfg_mbx->un.sli_config_emb1_subsys.
3722				hbd[index].pa_lo =
3723				putPaddrLow(mbx_dmabuf->phys +
3724					    sizeof(MAILBOX_t));
3725			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3726					"3007 SLI_CONFIG(hbd)[%d], "
3727					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3728				index,
3729				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3730				&sli_cfg_mbx->un.
3731				sli_config_emb1_subsys.hbd[index]),
3732				sli_cfg_mbx->un.sli_config_emb1_subsys.
3733				hbd[index].pa_hi,
3734				sli_cfg_mbx->un.sli_config_emb1_subsys.
3735				hbd[index].pa_lo);
3736
3737		} else {
3738			sli_cfg_mbx->un.sli_config_emb1_subsys.
3739				hbd[index].pa_hi =
3740				putPaddrHigh(ext_dmabuf->phys);
3741			sli_cfg_mbx->un.sli_config_emb1_subsys.
3742				hbd[index].pa_lo =
3743				putPaddrLow(ext_dmabuf->phys);
3744			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3745					"3008 SLI_CONFIG(hbd)[%d], "
3746					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3747				index,
3748				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3749				&sli_cfg_mbx->un.
3750				sli_config_emb1_subsys.hbd[index]),
3751				sli_cfg_mbx->un.sli_config_emb1_subsys.
3752				hbd[index].pa_hi,
3753				sli_cfg_mbx->un.sli_config_emb1_subsys.
3754				hbd[index].pa_lo);
3755		}
3756	}
3757	return;
3758}
3759
3760/**
3761 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3762 * @phba: Pointer to HBA context object.
3763 * @mb: Pointer to a BSG mailbox object.
3764 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3765 * @dmabuff: Pointer to a DMA buffer descriptor.
3766 *
3767 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3768 * non-embedded external bufffers.
3769 **/
3770static int
3771lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3772			      enum nemb_type nemb_tp,
3773			      struct lpfc_dmabuf *dmabuf)
3774{
3775	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3776	struct dfc_mbox_req *mbox_req;
3777	struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3778	uint32_t ext_buf_cnt, ext_buf_index;
3779	struct lpfc_dmabuf *ext_dmabuf = NULL;
3780	struct bsg_job_data *dd_data = NULL;
3781	LPFC_MBOXQ_t *pmboxq = NULL;
3782	MAILBOX_t *pmb;
3783	uint8_t *pmbx;
3784	int rc, i;
3785
3786	mbox_req =
3787	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3788
3789	/* pointer to the start of mailbox command */
3790	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3791
3792	if (nemb_tp == nemb_mse) {
3793		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3794			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3795		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3796			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3797					"2945 Handled SLI_CONFIG(mse) rd, "
3798					"ext_buf_cnt(%d) out of range(%d)\n",
3799					ext_buf_cnt,
3800					LPFC_MBX_SLI_CONFIG_MAX_MSE);
3801			rc = -ERANGE;
3802			goto job_error;
3803		}
3804		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3805				"2941 Handled SLI_CONFIG(mse) rd, "
3806				"ext_buf_cnt:%d\n", ext_buf_cnt);
3807	} else {
3808		/* sanity check on interface type for support */
3809		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3810		    LPFC_SLI_INTF_IF_TYPE_2) {
3811			rc = -ENODEV;
3812			goto job_error;
3813		}
3814		/* nemb_tp == nemb_hbd */
3815		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3816		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3817			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3818					"2946 Handled SLI_CONFIG(hbd) rd, "
3819					"ext_buf_cnt(%d) out of range(%d)\n",
3820					ext_buf_cnt,
3821					LPFC_MBX_SLI_CONFIG_MAX_HBD);
3822			rc = -ERANGE;
3823			goto job_error;
3824		}
3825		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3826				"2942 Handled SLI_CONFIG(hbd) rd, "
3827				"ext_buf_cnt:%d\n", ext_buf_cnt);
3828	}
3829
3830	/* before dma descriptor setup */
3831	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3832					sta_pre_addr, dmabuf, ext_buf_cnt);
3833
3834	/* reject non-embedded mailbox command with none external buffer */
3835	if (ext_buf_cnt == 0) {
3836		rc = -EPERM;
3837		goto job_error;
3838	} else if (ext_buf_cnt > 1) {
3839		/* additional external read buffers */
3840		for (i = 1; i < ext_buf_cnt; i++) {
3841			ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3842			if (!ext_dmabuf) {
3843				rc = -ENOMEM;
3844				goto job_error;
3845			}
3846			list_add_tail(&ext_dmabuf->list,
3847				      &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3848		}
3849	}
3850
3851	/* bsg tracking structure */
3852	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3853	if (!dd_data) {
3854		rc = -ENOMEM;
3855		goto job_error;
3856	}
3857
3858	/* mailbox command structure for base driver */
3859	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3860	if (!pmboxq) {
3861		rc = -ENOMEM;
3862		goto job_error;
3863	}
3864	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3865
3866	/* for the first external buffer */
3867	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3868
3869	/* for the rest of external buffer descriptors if any */
3870	if (ext_buf_cnt > 1) {
3871		ext_buf_index = 1;
3872		list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3873				&phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3874			lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3875						ext_buf_index, dmabuf,
3876						curr_dmabuf);
3877			ext_buf_index++;
3878		}
3879	}
3880
3881	/* after dma descriptor setup */
3882	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3883					sta_pos_addr, dmabuf, ext_buf_cnt);
3884
3885	/* construct base driver mbox command */
3886	pmb = &pmboxq->u.mb;
3887	pmbx = (uint8_t *)dmabuf->virt;
3888	memcpy(pmb, pmbx, sizeof(*pmb));
3889	pmb->mbxOwner = OWN_HOST;
3890	pmboxq->vport = phba->pport;
3891
3892	/* multi-buffer handling context */
3893	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3894	phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3895	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3896	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3897	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3898	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3899
3900	/* callback for multi-buffer read mailbox command */
3901	pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3902
3903	/* context fields to callback function */
3904	pmboxq->context1 = dd_data;
3905	dd_data->type = TYPE_MBOX;
3906	dd_data->set_job = job;
3907	dd_data->context_un.mbox.pmboxq = pmboxq;
3908	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3909	job->dd_data = dd_data;
3910
3911	/* state change */
3912	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3913
3914	/*
3915	 * Non-embedded mailbox subcommand data gets byte swapped here because
3916	 * the lower level driver code only does the first 64 mailbox words.
3917	 */
3918	if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
3919	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
3920		(nemb_tp == nemb_mse))
3921		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3922			&pmbx[sizeof(MAILBOX_t)],
3923				sli_cfg_mbx->un.sli_config_emb0_subsys.
3924					mse[0].buf_len);
3925
3926	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3927	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3928		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3929				"2947 Issued SLI_CONFIG ext-buffer "
3930				"maibox command, rc:x%x\n", rc);
3931		return SLI_CONFIG_HANDLED;
3932	}
3933	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3934			"2948 Failed to issue SLI_CONFIG ext-buffer "
3935			"maibox command, rc:x%x\n", rc);
3936	rc = -EPIPE;
3937
3938job_error:
3939	if (pmboxq)
3940		mempool_free(pmboxq, phba->mbox_mem_pool);
3941	lpfc_bsg_dma_page_list_free(phba,
3942				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3943	kfree(dd_data);
3944	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
3945	return rc;
3946}
3947
3948/**
3949 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
3950 * @phba: Pointer to HBA context object.
3951 * @mb: Pointer to a BSG mailbox object.
3952 * @dmabuff: Pointer to a DMA buffer descriptor.
3953 *
3954 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
3955 * non-embedded external bufffers.
3956 **/
3957static int
3958lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3959			       enum nemb_type nemb_tp,
3960			       struct lpfc_dmabuf *dmabuf)
3961{
3962	struct dfc_mbox_req *mbox_req;
3963	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3964	uint32_t ext_buf_cnt;
3965	struct bsg_job_data *dd_data = NULL;
3966	LPFC_MBOXQ_t *pmboxq = NULL;
3967	MAILBOX_t *pmb;
3968	uint8_t *mbx;
3969	int rc = SLI_CONFIG_NOT_HANDLED, i;
3970
3971	mbox_req =
3972	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3973
3974	/* pointer to the start of mailbox command */
3975	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3976
3977	if (nemb_tp == nemb_mse) {
3978		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3979			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3980		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3981			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3982					"2953 Failed SLI_CONFIG(mse) wr, "
3983					"ext_buf_cnt(%d) out of range(%d)\n",
3984					ext_buf_cnt,
3985					LPFC_MBX_SLI_CONFIG_MAX_MSE);
3986			return -ERANGE;
3987		}
3988		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3989				"2949 Handled SLI_CONFIG(mse) wr, "
3990				"ext_buf_cnt:%d\n", ext_buf_cnt);
3991	} else {
3992		/* sanity check on interface type for support */
3993		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3994		    LPFC_SLI_INTF_IF_TYPE_2)
3995			return -ENODEV;
3996		/* nemb_tp == nemb_hbd */
3997		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3998		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3999			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4000					"2954 Failed SLI_CONFIG(hbd) wr, "
4001					"ext_buf_cnt(%d) out of range(%d)\n",
4002					ext_buf_cnt,
4003					LPFC_MBX_SLI_CONFIG_MAX_HBD);
4004			return -ERANGE;
4005		}
4006		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4007				"2950 Handled SLI_CONFIG(hbd) wr, "
4008				"ext_buf_cnt:%d\n", ext_buf_cnt);
4009	}
4010
4011	/* before dma buffer descriptor setup */
4012	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4013					sta_pre_addr, dmabuf, ext_buf_cnt);
4014
4015	if (ext_buf_cnt == 0)
4016		return -EPERM;
4017
4018	/* for the first external buffer */
4019	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
4020
4021	/* after dma descriptor setup */
4022	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4023					sta_pos_addr, dmabuf, ext_buf_cnt);
4024
4025	/* log for looking forward */
4026	for (i = 1; i < ext_buf_cnt; i++) {
4027		if (nemb_tp == nemb_mse)
4028			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4029				"2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
4030				i, sli_cfg_mbx->un.sli_config_emb0_subsys.
4031				mse[i].buf_len);
4032		else
4033			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4034				"2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
4035				i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4036				&sli_cfg_mbx->un.sli_config_emb1_subsys.
4037				hbd[i]));
4038	}
4039
4040	/* multi-buffer handling context */
4041	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4042	phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
4043	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4044	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4045	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4046	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4047
4048	if (ext_buf_cnt == 1) {
4049		/* bsg tracking structure */
4050		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4051		if (!dd_data) {
4052			rc = -ENOMEM;
4053			goto job_error;
4054		}
4055
4056		/* mailbox command structure for base driver */
4057		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4058		if (!pmboxq) {
4059			rc = -ENOMEM;
4060			goto job_error;
4061		}
4062		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4063		pmb = &pmboxq->u.mb;
4064		mbx = (uint8_t *)dmabuf->virt;
4065		memcpy(pmb, mbx, sizeof(*pmb));
4066		pmb->mbxOwner = OWN_HOST;
4067		pmboxq->vport = phba->pport;
4068
4069		/* callback for multi-buffer read mailbox command */
4070		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4071
4072		/* context fields to callback function */
4073		pmboxq->context1 = dd_data;
4074		dd_data->type = TYPE_MBOX;
4075		dd_data->set_job = job;
4076		dd_data->context_un.mbox.pmboxq = pmboxq;
4077		dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
4078		job->dd_data = dd_data;
4079
4080		/* state change */
4081
4082		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4083		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4084		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4085			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4086					"2955 Issued SLI_CONFIG ext-buffer "
4087					"maibox command, rc:x%x\n", rc);
4088			return SLI_CONFIG_HANDLED;
4089		}
4090		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4091				"2956 Failed to issue SLI_CONFIG ext-buffer "
4092				"maibox command, rc:x%x\n", rc);
4093		rc = -EPIPE;
4094		goto job_error;
4095	}
4096
4097	/* wait for additoinal external buffers */
4098
4099	job->reply->result = 0;
4100	job->job_done(job);
4101	return SLI_CONFIG_HANDLED;
4102
4103job_error:
4104	if (pmboxq)
4105		mempool_free(pmboxq, phba->mbox_mem_pool);
4106	kfree(dd_data);
4107
4108	return rc;
4109}
4110
4111/**
4112 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
4113 * @phba: Pointer to HBA context object.
4114 * @mb: Pointer to a BSG mailbox object.
4115 * @dmabuff: Pointer to a DMA buffer descriptor.
4116 *
4117 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
4118 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
4119 * with embedded sussystem 0x1 and opcodes with external HBDs.
4120 **/
4121static int
4122lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4123			     struct lpfc_dmabuf *dmabuf)
4124{
4125	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4126	uint32_t subsys;
4127	uint32_t opcode;
4128	int rc = SLI_CONFIG_NOT_HANDLED;
4129
4130	/* state change on new multi-buffer pass-through mailbox command */
4131	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
4132
4133	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4134
4135	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
4136	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
4137		subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
4138				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
4139		opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
4140				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
4141		if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
4142			switch (opcode) {
4143			case FCOE_OPCODE_READ_FCF:
4144			case FCOE_OPCODE_GET_DPORT_RESULTS:
4145				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4146						"2957 Handled SLI_CONFIG "
4147						"subsys_fcoe, opcode:x%x\n",
4148						opcode);
4149				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4150							nemb_mse, dmabuf);
4151				break;
4152			case FCOE_OPCODE_ADD_FCF:
4153			case FCOE_OPCODE_SET_DPORT_MODE:
4154			case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
4155				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4156						"2958 Handled SLI_CONFIG "
4157						"subsys_fcoe, opcode:x%x\n",
4158						opcode);
4159				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4160							nemb_mse, dmabuf);
4161				break;
4162			default:
4163				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4164						"2959 Reject SLI_CONFIG "
4165						"subsys_fcoe, opcode:x%x\n",
4166						opcode);
4167				rc = -EPERM;
4168				break;
4169			}
4170		} else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4171			switch (opcode) {
4172			case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
4173			case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
4174			case COMN_OPCODE_GET_PROFILE_CONFIG:
4175				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4176						"3106 Handled SLI_CONFIG "
4177						"subsys_comn, opcode:x%x\n",
4178						opcode);
4179				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4180							nemb_mse, dmabuf);
4181				break;
4182			default:
4183				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4184						"3107 Reject SLI_CONFIG "
4185						"subsys_comn, opcode:x%x\n",
4186						opcode);
4187				rc = -EPERM;
4188				break;
4189			}
4190		} else {
4191			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4192					"2977 Reject SLI_CONFIG "
4193					"subsys:x%d, opcode:x%x\n",
4194					subsys, opcode);
4195			rc = -EPERM;
4196		}
4197	} else {
4198		subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4199				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4200		opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4201				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4202		if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4203			switch (opcode) {
4204			case COMN_OPCODE_READ_OBJECT:
4205			case COMN_OPCODE_READ_OBJECT_LIST:
4206				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4207						"2960 Handled SLI_CONFIG "
4208						"subsys_comn, opcode:x%x\n",
4209						opcode);
4210				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4211							nemb_hbd, dmabuf);
4212				break;
4213			case COMN_OPCODE_WRITE_OBJECT:
4214				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4215						"2961 Handled SLI_CONFIG "
4216						"subsys_comn, opcode:x%x\n",
4217						opcode);
4218				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4219							nemb_hbd, dmabuf);
4220				break;
4221			default:
4222				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4223						"2962 Not handled SLI_CONFIG "
4224						"subsys_comn, opcode:x%x\n",
4225						opcode);
4226				rc = SLI_CONFIG_NOT_HANDLED;
4227				break;
4228			}
4229		} else {
4230			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4231					"2978 Not handled SLI_CONFIG "
4232					"subsys:x%d, opcode:x%x\n",
4233					subsys, opcode);
4234			rc = SLI_CONFIG_NOT_HANDLED;
4235		}
4236	}
4237
4238	/* state reset on not handled new multi-buffer mailbox command */
4239	if (rc != SLI_CONFIG_HANDLED)
4240		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4241
4242	return rc;
4243}
4244
4245/**
4246 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
4247 * @phba: Pointer to HBA context object.
4248 *
4249 * This routine is for requesting to abort a pass-through mailbox command with
4250 * multiple external buffers due to error condition.
4251 **/
4252static void
4253lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4254{
4255	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4256		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4257	else
4258		lpfc_bsg_mbox_ext_session_reset(phba);
4259	return;
4260}
4261
4262/**
4263 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4264 * @phba: Pointer to HBA context object.
4265 * @dmabuf: Pointer to a DMA buffer descriptor.
4266 *
4267 * This routine extracts the next mailbox read external buffer back to
4268 * user space through BSG.
4269 **/
4270static int
4271lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
4272{
4273	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4274	struct lpfc_dmabuf *dmabuf;
4275	uint8_t *pbuf;
4276	uint32_t size;
4277	uint32_t index;
4278
4279	index = phba->mbox_ext_buf_ctx.seqNum;
4280	phba->mbox_ext_buf_ctx.seqNum++;
4281
4282	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4283			phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4284
4285	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4286		size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4287			&sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4288		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4289				"2963 SLI_CONFIG (mse) ext-buffer rd get "
4290				"buffer[%d], size:%d\n", index, size);
4291	} else {
4292		size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4293			&sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4294		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4295				"2964 SLI_CONFIG (hbd) ext-buffer rd get "
4296				"buffer[%d], size:%d\n", index, size);
4297	}
4298	if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4299		return -EPIPE;
4300	dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4301				  struct lpfc_dmabuf, list);
4302	list_del_init(&dmabuf->list);
4303
4304	/* after dma buffer descriptor setup */
4305	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4306					mbox_rd, dma_ebuf, sta_pos_addr,
4307					dmabuf, index);
4308
4309	pbuf = (uint8_t *)dmabuf->virt;
4310	job->reply->reply_payload_rcv_len =
4311		sg_copy_from_buffer(job->reply_payload.sg_list,
4312				    job->reply_payload.sg_cnt,
4313				    pbuf, size);
4314
4315	lpfc_bsg_dma_page_free(phba, dmabuf);
4316
4317	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4318		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4319				"2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4320				"command session done\n");
4321		lpfc_bsg_mbox_ext_session_reset(phba);
4322	}
4323
4324	job->reply->result = 0;
4325	job->job_done(job);
4326
4327	return SLI_CONFIG_HANDLED;
4328}
4329
4330/**
4331 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4332 * @phba: Pointer to HBA context object.
4333 * @dmabuf: Pointer to a DMA buffer descriptor.
4334 *
4335 * This routine sets up the next mailbox read external buffer obtained
4336 * from user space through BSG.
4337 **/
4338static int
4339lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
4340			struct lpfc_dmabuf *dmabuf)
4341{
4342	struct bsg_job_data *dd_data = NULL;
4343	LPFC_MBOXQ_t *pmboxq = NULL;
4344	MAILBOX_t *pmb;
4345	enum nemb_type nemb_tp;
4346	uint8_t *pbuf;
4347	uint32_t size;
4348	uint32_t index;
4349	int rc;
4350
4351	index = phba->mbox_ext_buf_ctx.seqNum;
4352	phba->mbox_ext_buf_ctx.seqNum++;
4353	nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4354
4355	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4356	if (!dd_data) {
4357		rc = -ENOMEM;
4358		goto job_error;
4359	}
4360
4361	pbuf = (uint8_t *)dmabuf->virt;
4362	size = job->request_payload.payload_len;
4363	sg_copy_to_buffer(job->request_payload.sg_list,
4364			  job->request_payload.sg_cnt,
4365			  pbuf, size);
4366
4367	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4368		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4369				"2966 SLI_CONFIG (mse) ext-buffer wr set "
4370				"buffer[%d], size:%d\n",
4371				phba->mbox_ext_buf_ctx.seqNum, size);
4372
4373	} else {
4374		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4375				"2967 SLI_CONFIG (hbd) ext-buffer wr set "
4376				"buffer[%d], size:%d\n",
4377				phba->mbox_ext_buf_ctx.seqNum, size);
4378
4379	}
4380
4381	/* set up external buffer descriptor and add to external buffer list */
4382	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4383					phba->mbox_ext_buf_ctx.mbx_dmabuf,
4384					dmabuf);
4385	list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4386
4387	/* after write dma buffer */
4388	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4389					mbox_wr, dma_ebuf, sta_pos_addr,
4390					dmabuf, index);
4391
4392	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4393		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4394				"2968 SLI_CONFIG ext-buffer wr all %d "
4395				"ebuffers received\n",
4396				phba->mbox_ext_buf_ctx.numBuf);
4397		/* mailbox command structure for base driver */
4398		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4399		if (!pmboxq) {
4400			rc = -ENOMEM;
4401			goto job_error;
4402		}
4403		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4404		pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4405		pmb = &pmboxq->u.mb;
4406		memcpy(pmb, pbuf, sizeof(*pmb));
4407		pmb->mbxOwner = OWN_HOST;
4408		pmboxq->vport = phba->pport;
4409
4410		/* callback for multi-buffer write mailbox command */
4411		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4412
4413		/* context fields to callback function */
4414		pmboxq->context1 = dd_data;
4415		dd_data->type = TYPE_MBOX;
4416		dd_data->set_job = job;
4417		dd_data->context_un.mbox.pmboxq = pmboxq;
4418		dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4419		job->dd_data = dd_data;
4420
4421		/* state change */
4422		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4423
4424		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4425		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4426			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4427					"2969 Issued SLI_CONFIG ext-buffer "
4428					"maibox command, rc:x%x\n", rc);
4429			return SLI_CONFIG_HANDLED;
4430		}
4431		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4432				"2970 Failed to issue SLI_CONFIG ext-buffer "
4433				"maibox command, rc:x%x\n", rc);
4434		rc = -EPIPE;
4435		goto job_error;
4436	}
4437
4438	/* wait for additoinal external buffers */
4439	job->reply->result = 0;
4440	job->job_done(job);
4441	return SLI_CONFIG_HANDLED;
4442
4443job_error:
4444	lpfc_bsg_dma_page_free(phba, dmabuf);
4445	kfree(dd_data);
4446
4447	return rc;
4448}
4449
4450/**
4451 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4452 * @phba: Pointer to HBA context object.
4453 * @mb: Pointer to a BSG mailbox object.
4454 * @dmabuff: Pointer to a DMA buffer descriptor.
4455 *
4456 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4457 * command with multiple non-embedded external buffers.
4458 **/
4459static int
4460lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
4461			     struct lpfc_dmabuf *dmabuf)
4462{
4463	int rc;
4464
4465	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4466			"2971 SLI_CONFIG buffer (type:x%x)\n",
4467			phba->mbox_ext_buf_ctx.mboxType);
4468
4469	if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4470		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4471			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4472					"2972 SLI_CONFIG rd buffer state "
4473					"mismatch:x%x\n",
4474					phba->mbox_ext_buf_ctx.state);
4475			lpfc_bsg_mbox_ext_abort(phba);
4476			return -EPIPE;
4477		}
4478		rc = lpfc_bsg_read_ebuf_get(phba, job);
4479		if (rc == SLI_CONFIG_HANDLED)
4480			lpfc_bsg_dma_page_free(phba, dmabuf);
4481	} else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4482		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4483			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4484					"2973 SLI_CONFIG wr buffer state "
4485					"mismatch:x%x\n",
4486					phba->mbox_ext_buf_ctx.state);
4487			lpfc_bsg_mbox_ext_abort(phba);
4488			return -EPIPE;
4489		}
4490		rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4491	}
4492	return rc;
4493}
4494
4495/**
4496 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4497 * @phba: Pointer to HBA context object.
4498 * @mb: Pointer to a BSG mailbox object.
4499 * @dmabuff: Pointer to a DMA buffer descriptor.
4500 *
4501 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4502 * (0x9B) mailbox commands and external buffers.
4503 **/
4504static int
4505lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4506			    struct lpfc_dmabuf *dmabuf)
4507{
4508	struct dfc_mbox_req *mbox_req;
4509	int rc = SLI_CONFIG_NOT_HANDLED;
4510
4511	mbox_req =
4512	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4513
4514	/* mbox command with/without single external buffer */
4515	if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4516		return rc;
4517
4518	/* mbox command and first external buffer */
4519	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4520		if (mbox_req->extSeqNum == 1) {
4521			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4522					"2974 SLI_CONFIG mailbox: tag:%d, "
4523					"seq:%d\n", mbox_req->extMboxTag,
4524					mbox_req->extSeqNum);
4525			rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4526			return rc;
4527		} else
4528			goto sli_cfg_ext_error;
4529	}
4530
4531	/*
4532	 * handle additional external buffers
4533	 */
4534
4535	/* check broken pipe conditions */
4536	if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4537		goto sli_cfg_ext_error;
4538	if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4539		goto sli_cfg_ext_error;
4540	if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4541		goto sli_cfg_ext_error;
4542
4543	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4544			"2975 SLI_CONFIG mailbox external buffer: "
4545			"extSta:x%x, tag:%d, seq:%d\n",
4546			phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4547			mbox_req->extSeqNum);
4548	rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4549	return rc;
4550
4551sli_cfg_ext_error:
4552	/* all other cases, broken pipe */
4553	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4554			"2976 SLI_CONFIG mailbox broken pipe: "
4555			"ctxSta:x%x, ctxNumBuf:%d "
4556			"ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4557			phba->mbox_ext_buf_ctx.state,
4558			phba->mbox_ext_buf_ctx.numBuf,
4559			phba->mbox_ext_buf_ctx.mbxTag,
4560			phba->mbox_ext_buf_ctx.seqNum,
4561			mbox_req->extMboxTag, mbox_req->extSeqNum);
4562
4563	lpfc_bsg_mbox_ext_session_reset(phba);
4564
4565	return -EPIPE;
4566}
4567
4568/**
4569 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4570 * @phba: Pointer to HBA context object.
4571 * @mb: Pointer to a mailbox object.
4572 * @vport: Pointer to a vport object.
4573 *
4574 * Allocate a tracking object, mailbox command memory, get a mailbox
4575 * from the mailbox pool, copy the caller mailbox command.
4576 *
4577 * If offline and the sli is active we need to poll for the command (port is
4578 * being reset) and com-plete the job, otherwise issue the mailbox command and
4579 * let our completion handler finish the command.
4580 **/
4581static int
4582lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4583	struct lpfc_vport *vport)
4584{
4585	LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4586	MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4587	/* a 4k buffer to hold the mb and extended data from/to the bsg */
4588	uint8_t *pmbx = NULL;
4589	struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4590	struct lpfc_dmabuf *dmabuf = NULL;
4591	struct dfc_mbox_req *mbox_req;
4592	struct READ_EVENT_LOG_VAR *rdEventLog;
4593	uint32_t transmit_length, receive_length, mode;
4594	struct lpfc_mbx_sli4_config *sli4_config;
4595	struct lpfc_mbx_nembed_cmd *nembed_sge;
4596	struct ulp_bde64 *bde;
4597	uint8_t *ext = NULL;
4598	int rc = 0;
4599	uint8_t *from;
4600	uint32_t size;
4601
4602	/* in case no data is transferred */
4603	job->reply->reply_payload_rcv_len = 0;
4604
4605	/* sanity check to protect driver */
4606	if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4607	    job->request_payload.payload_len > BSG_MBOX_SIZE) {
4608		rc = -ERANGE;
4609		goto job_done;
4610	}
4611
4612	/*
4613	 * Don't allow mailbox commands to be sent when blocked or when in
4614	 * the middle of discovery
4615	 */
4616	 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4617		rc = -EAGAIN;
4618		goto job_done;
4619	}
4620
4621	mbox_req =
4622	    (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4623
4624	/* check if requested extended data lengths are valid */
4625	if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4626	    (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4627		rc = -ERANGE;
4628		goto job_done;
4629	}
4630
4631	dmabuf = lpfc_bsg_dma_page_alloc(phba);
4632	if (!dmabuf || !dmabuf->virt) {
4633		rc = -ENOMEM;
4634		goto job_done;
4635	}
4636
4637	/* Get the mailbox command or external buffer from BSG */
4638	pmbx = (uint8_t *)dmabuf->virt;
4639	size = job->request_payload.payload_len;
4640	sg_copy_to_buffer(job->request_payload.sg_list,
4641			  job->request_payload.sg_cnt, pmbx, size);
4642
4643	/* Handle possible SLI_CONFIG with non-embedded payloads */
4644	if (phba->sli_rev == LPFC_SLI_REV4) {
4645		rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4646		if (rc == SLI_CONFIG_HANDLED)
4647			goto job_cont;
4648		if (rc)
4649			goto job_done;
4650		/* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4651	}
4652
4653	rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4654	if (rc != 0)
4655		goto job_done; /* must be negative */
4656
4657	/* allocate our bsg tracking structure */
4658	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4659	if (!dd_data) {
4660		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4661				"2727 Failed allocation of dd_data\n");
4662		rc = -ENOMEM;
4663		goto job_done;
4664	}
4665
4666	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4667	if (!pmboxq) {
4668		rc = -ENOMEM;
4669		goto job_done;
4670	}
4671	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4672
4673	pmb = &pmboxq->u.mb;
4674	memcpy(pmb, pmbx, sizeof(*pmb));
4675	pmb->mbxOwner = OWN_HOST;
4676	pmboxq->vport = vport;
4677
4678	/* If HBA encountered an error attention, allow only DUMP
4679	 * or RESTART mailbox commands until the HBA is restarted.
4680	 */
4681	if (phba->pport->stopped &&
4682	    pmb->mbxCommand != MBX_DUMP_MEMORY &&
4683	    pmb->mbxCommand != MBX_RESTART &&
4684	    pmb->mbxCommand != MBX_WRITE_VPARMS &&
4685	    pmb->mbxCommand != MBX_WRITE_WWN)
4686		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4687				"2797 mbox: Issued mailbox cmd "
4688				"0x%x while in stopped state.\n",
4689				pmb->mbxCommand);
4690
4691	/* extended mailbox commands will need an extended buffer */
4692	if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4693		from = pmbx;
4694		ext = from + sizeof(MAILBOX_t);
4695		pmboxq->context2 = ext;
4696		pmboxq->in_ext_byte_len =
4697			mbox_req->inExtWLen * sizeof(uint32_t);
4698		pmboxq->out_ext_byte_len =
4699			mbox_req->outExtWLen * sizeof(uint32_t);
4700		pmboxq->mbox_offset_word = mbox_req->mbOffset;
4701	}
4702
4703	/* biu diag will need a kernel buffer to transfer the data
4704	 * allocate our own buffer and setup the mailbox command to
4705	 * use ours
4706	 */
4707	if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4708		transmit_length = pmb->un.varWords[1];
4709		receive_length = pmb->un.varWords[4];
4710		/* transmit length cannot be greater than receive length or
4711		 * mailbox extension size
4712		 */
4713		if ((transmit_length > receive_length) ||
4714			(transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4715			rc = -ERANGE;
4716			goto job_done;
4717		}
4718		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4719			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4720		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4721			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4722
4723		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4724			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4725			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4726		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4727			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4728			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4729	} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4730		rdEventLog = &pmb->un.varRdEventLog;
4731		receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4732		mode = bf_get(lpfc_event_log, rdEventLog);
4733
4734		/* receive length cannot be greater than mailbox
4735		 * extension size
4736		 */
4737		if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4738			rc = -ERANGE;
4739			goto job_done;
4740		}
4741
4742		/* mode zero uses a bde like biu diags command */
4743		if (mode == 0) {
4744			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4745							+ sizeof(MAILBOX_t));
4746			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4747							+ sizeof(MAILBOX_t));
4748		}
4749	} else if (phba->sli_rev == LPFC_SLI_REV4) {
4750		/* Let type 4 (well known data) through because the data is
4751		 * returned in varwords[4-8]
4752		 * otherwise check the recieve length and fetch the buffer addr
4753		 */
4754		if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4755			(pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4756			/* rebuild the command for sli4 using our own buffers
4757			* like we do for biu diags
4758			*/
4759			receive_length = pmb->un.varWords[2];
4760			/* receive length cannot be greater than mailbox
4761			 * extension size
4762			 */
4763			if (receive_length == 0) {
4764				rc = -ERANGE;
4765				goto job_done;
4766			}
4767			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4768						+ sizeof(MAILBOX_t));
4769			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4770						+ sizeof(MAILBOX_t));
4771		} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4772			pmb->un.varUpdateCfg.co) {
4773			bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4774
4775			/* bde size cannot be greater than mailbox ext size */
4776			if (bde->tus.f.bdeSize >
4777			    BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4778				rc = -ERANGE;
4779				goto job_done;
4780			}
4781			bde->addrHigh = putPaddrHigh(dmabuf->phys
4782						+ sizeof(MAILBOX_t));
4783			bde->addrLow = putPaddrLow(dmabuf->phys
4784						+ sizeof(MAILBOX_t));
4785		} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4786			/* Handling non-embedded SLI_CONFIG mailbox command */
4787			sli4_config = &pmboxq->u.mqe.un.sli4_config;
4788			if (!bf_get(lpfc_mbox_hdr_emb,
4789			    &sli4_config->header.cfg_mhdr)) {
4790				/* rebuild the command for sli4 using our
4791				 * own buffers like we do for biu diags
4792				 */
4793				nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4794						&pmb->un.varWords[0];
4795				receive_length = nembed_sge->sge[0].length;
4796
4797				/* receive length cannot be greater than
4798				 * mailbox extension size
4799				 */
4800				if ((receive_length == 0) ||
4801				    (receive_length >
4802				     BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4803					rc = -ERANGE;
4804					goto job_done;
4805				}
4806
4807				nembed_sge->sge[0].pa_hi =
4808						putPaddrHigh(dmabuf->phys
4809						   + sizeof(MAILBOX_t));
4810				nembed_sge->sge[0].pa_lo =
4811						putPaddrLow(dmabuf->phys
4812						   + sizeof(MAILBOX_t));
4813			}
4814		}
4815	}
4816
4817	dd_data->context_un.mbox.dmabuffers = dmabuf;
4818
4819	/* setup wake call as IOCB callback */
4820	pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4821
4822	/* setup context field to pass wait_queue pointer to wake function */
4823	pmboxq->context1 = dd_data;
4824	dd_data->type = TYPE_MBOX;
4825	dd_data->set_job = job;
4826	dd_data->context_un.mbox.pmboxq = pmboxq;
4827	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4828	dd_data->context_un.mbox.ext = ext;
4829	dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4830	dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4831	dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4832	job->dd_data = dd_data;
4833
4834	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4835	    (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4836		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4837		if (rc != MBX_SUCCESS) {
4838			rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4839			goto job_done;
4840		}
4841
4842		/* job finished, copy the data */
4843		memcpy(pmbx, pmb, sizeof(*pmb));
4844		job->reply->reply_payload_rcv_len =
4845			sg_copy_from_buffer(job->reply_payload.sg_list,
4846					    job->reply_payload.sg_cnt,
4847					    pmbx, size);
4848		/* not waiting mbox already done */
4849		rc = 0;
4850		goto job_done;
4851	}
4852
4853	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4854	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4855		return 1; /* job started */
4856
4857job_done:
4858	/* common exit for error or job completed inline */
4859	if (pmboxq)
4860		mempool_free(pmboxq, phba->mbox_mem_pool);
4861	lpfc_bsg_dma_page_free(phba, dmabuf);
4862	kfree(dd_data);
4863
4864job_cont:
4865	return rc;
4866}
4867
4868/**
4869 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
4870 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
4871 **/
4872static int
4873lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
4874{
4875	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4876	struct lpfc_hba *phba = vport->phba;
4877	struct dfc_mbox_req *mbox_req;
4878	int rc = 0;
4879
4880	/* mix-and-match backward compatibility */
4881	job->reply->reply_payload_rcv_len = 0;
4882	if (job->request_len <
4883	    sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
4884		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4885				"2737 Mix-and-match backward compatibility "
4886				"between MBOX_REQ old size:%d and "
4887				"new request size:%d\n",
4888				(int)(job->request_len -
4889				      sizeof(struct fc_bsg_request)),
4890				(int)sizeof(struct dfc_mbox_req));
4891		mbox_req = (struct dfc_mbox_req *)
4892				job->request->rqst_data.h_vendor.vendor_cmd;
4893		mbox_req->extMboxTag = 0;
4894		mbox_req->extSeqNum = 0;
4895	}
4896
4897	rc = lpfc_bsg_issue_mbox(phba, job, vport);
4898
4899	if (rc == 0) {
4900		/* job done */
4901		job->reply->result = 0;
4902		job->dd_data = NULL;
4903		job->job_done(job);
4904	} else if (rc == 1)
4905		/* job submitted, will complete later*/
4906		rc = 0; /* return zero, no error */
4907	else {
4908		/* some error occurred */
4909		job->reply->result = rc;
4910		job->dd_data = NULL;
4911	}
4912
4913	return rc;
4914}
4915
4916/**
4917 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
4918 * @phba: Pointer to HBA context object.
4919 * @cmdiocbq: Pointer to command iocb.
4920 * @rspiocbq: Pointer to response iocb.
4921 *
4922 * This function is the completion handler for iocbs issued using
4923 * lpfc_menlo_cmd function. This function is called by the
4924 * ring event handler function without any lock held. This function
4925 * can be called from both worker thread context and interrupt
4926 * context. This function also can be called from another thread which
4927 * cleans up the SLI layer objects.
4928 * This function copies the contents of the response iocb to the
4929 * response iocb memory object provided by the caller of
4930 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
4931 * sleeps for the iocb completion.
4932 **/
4933static void
4934lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4935			struct lpfc_iocbq *cmdiocbq,
4936			struct lpfc_iocbq *rspiocbq)
4937{
4938	struct bsg_job_data *dd_data;
4939	struct fc_bsg_job *job;
4940	IOCB_t *rsp;
4941	struct lpfc_dmabuf *bmp, *cmp, *rmp;
4942	struct lpfc_bsg_menlo *menlo;
4943	unsigned long flags;
4944	struct menlo_response *menlo_resp;
4945	unsigned int rsp_size;
4946	int rc = 0;
4947
4948	dd_data = cmdiocbq->context1;
4949	cmp = cmdiocbq->context2;
4950	bmp = cmdiocbq->context3;
4951	menlo = &dd_data->context_un.menlo;
4952	rmp = menlo->rmp;
4953	rsp = &rspiocbq->iocb;
4954
4955	/* Determine if job has been aborted */
4956	spin_lock_irqsave(&phba->ct_ev_lock, flags);
4957	job = dd_data->set_job;
4958	if (job) {
4959		/* Prevent timeout handling from trying to abort job  */
4960		job->dd_data = NULL;
4961	}
4962	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4963
4964	/* Copy the job data or set the failing status for the job */
4965
4966	if (job) {
4967		/* always return the xri, this would be used in the case
4968		 * of a menlo download to allow the data to be sent as a
4969		 * continuation of the exchange.
4970		 */
4971
4972		menlo_resp = (struct menlo_response *)
4973			job->reply->reply_data.vendor_reply.vendor_rsp;
4974		menlo_resp->xri = rsp->ulpContext;
4975		if (rsp->ulpStatus) {
4976			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
4977				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
4978				case IOERR_SEQUENCE_TIMEOUT:
4979					rc = -ETIMEDOUT;
4980					break;
4981				case IOERR_INVALID_RPI:
4982					rc = -EFAULT;
4983					break;
4984				default:
4985					rc = -EACCES;
4986					break;
4987				}
4988			} else {
4989				rc = -EACCES;
4990			}
4991		} else {
4992			rsp_size = rsp->un.genreq64.bdl.bdeSize;
4993			job->reply->reply_payload_rcv_len =
4994				lpfc_bsg_copy_data(rmp, &job->reply_payload,
4995						   rsp_size, 0);
4996		}
4997
4998	}
4999
5000	lpfc_sli_release_iocbq(phba, cmdiocbq);
5001	lpfc_free_bsg_buffers(phba, cmp);
5002	lpfc_free_bsg_buffers(phba, rmp);
5003	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5004	kfree(bmp);
5005	kfree(dd_data);
5006
5007	/* Complete the job if active */
5008
5009	if (job) {
5010		job->reply->result = rc;
5011		job->job_done(job);
5012	}
5013
5014	return;
5015}
5016
5017/**
5018 * lpfc_menlo_cmd - send an ioctl for menlo hardware
5019 * @job: fc_bsg_job to handle
5020 *
5021 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
5022 * all the command completions will return the xri for the command.
5023 * For menlo data requests a gen request 64 CX is used to continue the exchange
5024 * supplied in the menlo request header xri field.
5025 **/
5026static int
5027lpfc_menlo_cmd(struct fc_bsg_job *job)
5028{
5029	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
5030	struct lpfc_hba *phba = vport->phba;
5031	struct lpfc_iocbq *cmdiocbq;
5032	IOCB_t *cmd;
5033	int rc = 0;
5034	struct menlo_command *menlo_cmd;
5035	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
5036	int request_nseg;
5037	int reply_nseg;
5038	struct bsg_job_data *dd_data;
5039	struct ulp_bde64 *bpl = NULL;
5040
5041	/* in case no data is returned return just the return code */
5042	job->reply->reply_payload_rcv_len = 0;
5043
5044	if (job->request_len <
5045	    sizeof(struct fc_bsg_request) +
5046		sizeof(struct menlo_command)) {
5047		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5048				"2784 Received MENLO_CMD request below "
5049				"minimum size\n");
5050		rc = -ERANGE;
5051		goto no_dd_data;
5052	}
5053
5054	if (job->reply_len <
5055	    sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
5056		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5057				"2785 Received MENLO_CMD reply below "
5058				"minimum size\n");
5059		rc = -ERANGE;
5060		goto no_dd_data;
5061	}
5062
5063	if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
5064		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5065				"2786 Adapter does not support menlo "
5066				"commands\n");
5067		rc = -EPERM;
5068		goto no_dd_data;
5069	}
5070
5071	menlo_cmd = (struct menlo_command *)
5072		job->request->rqst_data.h_vendor.vendor_cmd;
5073
5074	/* allocate our bsg tracking structure */
5075	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
5076	if (!dd_data) {
5077		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5078				"2787 Failed allocation of dd_data\n");
5079		rc = -ENOMEM;
5080		goto no_dd_data;
5081	}
5082
5083	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5084	if (!bmp) {
5085		rc = -ENOMEM;
5086		goto free_dd;
5087	}
5088
5089	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
5090	if (!bmp->virt) {
5091		rc = -ENOMEM;
5092		goto free_bmp;
5093	}
5094
5095	INIT_LIST_HEAD(&bmp->list);
5096
5097	bpl = (struct ulp_bde64 *)bmp->virt;
5098	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
5099	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
5100				     1, bpl, &request_nseg);
5101	if (!cmp) {
5102		rc = -ENOMEM;
5103		goto free_bmp;
5104	}
5105	lpfc_bsg_copy_data(cmp, &job->request_payload,
5106			   job->request_payload.payload_len, 1);
5107
5108	bpl += request_nseg;
5109	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
5110	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
5111				     bpl, &reply_nseg);
5112	if (!rmp) {
5113		rc = -ENOMEM;
5114		goto free_cmp;
5115	}
5116
5117	cmdiocbq = lpfc_sli_get_iocbq(phba);
5118	if (!cmdiocbq) {
5119		rc = -ENOMEM;
5120		goto free_rmp;
5121	}
5122
5123	cmd = &cmdiocbq->iocb;
5124	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
5125	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
5126	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
5127	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
5128	cmd->un.genreq64.bdl.bdeSize =
5129	    (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
5130	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
5131	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
5132	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
5133	cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
5134	cmd->ulpBdeCount = 1;
5135	cmd->ulpClass = CLASS3;
5136	cmd->ulpOwner = OWN_CHIP;
5137	cmd->ulpLe = 1; /* Limited Edition */
5138	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
5139	cmdiocbq->vport = phba->pport;
5140	/* We want the firmware to timeout before we do */
5141	cmd->ulpTimeout = MENLO_TIMEOUT - 5;
5142	cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
5143	cmdiocbq->context1 = dd_data;
5144	cmdiocbq->context2 = cmp;
5145	cmdiocbq->context3 = bmp;
5146	if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
5147		cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
5148		cmd->ulpPU = MENLO_PU; /* 3 */
5149		cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
5150		cmd->ulpContext = MENLO_CONTEXT; /* 0 */
5151	} else {
5152		cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
5153		cmd->ulpPU = 1;
5154		cmd->un.ulpWord[4] = 0;
5155		cmd->ulpContext = menlo_cmd->xri;
5156	}
5157
5158	dd_data->type = TYPE_MENLO;
5159	dd_data->set_job = job;
5160	dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
5161	dd_data->context_un.menlo.rmp = rmp;
5162	job->dd_data = dd_data;
5163
5164	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
5165		MENLO_TIMEOUT - 5);
5166	if (rc == IOCB_SUCCESS)
5167		return 0; /* done for now */
5168
5169	lpfc_sli_release_iocbq(phba, cmdiocbq);
5170
5171free_rmp:
5172	lpfc_free_bsg_buffers(phba, rmp);
5173free_cmp:
5174	lpfc_free_bsg_buffers(phba, cmp);
5175free_bmp:
5176	if (bmp->virt)
5177		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5178	kfree(bmp);
5179free_dd:
5180	kfree(dd_data);
5181no_dd_data:
5182	/* make error code available to userspace */
5183	job->reply->result = rc;
5184	job->dd_data = NULL;
5185	return rc;
5186}
5187
5188/**
5189 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5190 * @job: fc_bsg_job to handle
5191 **/
5192static int
5193lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
5194{
5195	int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
5196	int rc;
5197
5198	switch (command) {
5199	case LPFC_BSG_VENDOR_SET_CT_EVENT:
5200		rc = lpfc_bsg_hba_set_event(job);
5201		break;
5202	case LPFC_BSG_VENDOR_GET_CT_EVENT:
5203		rc = lpfc_bsg_hba_get_event(job);
5204		break;
5205	case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
5206		rc = lpfc_bsg_send_mgmt_rsp(job);
5207		break;
5208	case LPFC_BSG_VENDOR_DIAG_MODE:
5209		rc = lpfc_bsg_diag_loopback_mode(job);
5210		break;
5211	case LPFC_BSG_VENDOR_DIAG_MODE_END:
5212		rc = lpfc_sli4_bsg_diag_mode_end(job);
5213		break;
5214	case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
5215		rc = lpfc_bsg_diag_loopback_run(job);
5216		break;
5217	case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
5218		rc = lpfc_sli4_bsg_link_diag_test(job);
5219		break;
5220	case LPFC_BSG_VENDOR_GET_MGMT_REV:
5221		rc = lpfc_bsg_get_dfc_rev(job);
5222		break;
5223	case LPFC_BSG_VENDOR_MBOX:
5224		rc = lpfc_bsg_mbox_cmd(job);
5225		break;
5226	case LPFC_BSG_VENDOR_MENLO_CMD:
5227	case LPFC_BSG_VENDOR_MENLO_DATA:
5228		rc = lpfc_menlo_cmd(job);
5229		break;
5230	default:
5231		rc = -EINVAL;
5232		job->reply->reply_payload_rcv_len = 0;
5233		/* make error code available to userspace */
5234		job->reply->result = rc;
5235		break;
5236	}
5237
5238	return rc;
5239}
5240
5241/**
5242 * lpfc_bsg_request - handle a bsg request from the FC transport
5243 * @job: fc_bsg_job to handle
5244 **/
5245int
5246lpfc_bsg_request(struct fc_bsg_job *job)
5247{
5248	uint32_t msgcode;
5249	int rc;
5250
5251	msgcode = job->request->msgcode;
5252	switch (msgcode) {
5253	case FC_BSG_HST_VENDOR:
5254		rc = lpfc_bsg_hst_vendor(job);
5255		break;
5256	case FC_BSG_RPT_ELS:
5257		rc = lpfc_bsg_rport_els(job);
5258		break;
5259	case FC_BSG_RPT_CT:
5260		rc = lpfc_bsg_send_mgmt_cmd(job);
5261		break;
5262	default:
5263		rc = -EINVAL;
5264		job->reply->reply_payload_rcv_len = 0;
5265		/* make error code available to userspace */
5266		job->reply->result = rc;
5267		break;
5268	}
5269
5270	return rc;
5271}
5272
5273/**
5274 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5275 * @job: fc_bsg_job that has timed out
5276 *
5277 * This function just aborts the job's IOCB.  The aborted IOCB will return to
5278 * the waiting function which will handle passing the error back to userspace
5279 **/
5280int
5281lpfc_bsg_timeout(struct fc_bsg_job *job)
5282{
5283	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
5284	struct lpfc_hba *phba = vport->phba;
5285	struct lpfc_iocbq *cmdiocb;
5286	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5287	struct bsg_job_data *dd_data;
5288	unsigned long flags;
5289	int rc = 0;
5290	LIST_HEAD(completions);
5291	struct lpfc_iocbq *check_iocb, *next_iocb;
5292
5293	/* if job's driver data is NULL, the command completed or is in the
5294	 * the process of completing.  In this case, return status to request
5295	 * so the timeout is retried.  This avoids double completion issues
5296	 * and the request will be pulled off the timer queue when the
5297	 * command's completion handler executes.  Otherwise, prevent the
5298	 * command's completion handler from executing the job done callback
5299	 * and continue processing to abort the outstanding the command.
5300	 */
5301
5302	spin_lock_irqsave(&phba->ct_ev_lock, flags);
5303	dd_data = (struct bsg_job_data *)job->dd_data;
5304	if (dd_data) {
5305		dd_data->set_job = NULL;
5306		job->dd_data = NULL;
5307	} else {
5308		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5309		return -EAGAIN;
5310	}
5311
5312	switch (dd_data->type) {
5313	case TYPE_IOCB:
5314		/* Check to see if IOCB was issued to the port or not. If not,
5315		 * remove it from the txq queue and call cancel iocbs.
5316		 * Otherwise, call abort iotag
5317		 */
5318		cmdiocb = dd_data->context_un.iocb.cmdiocbq;
5319		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5320
5321		spin_lock_irqsave(&phba->hbalock, flags);
5322		/* make sure the I/O abort window is still open */
5323		if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
5324			spin_unlock_irqrestore(&phba->hbalock, flags);
5325			return -EAGAIN;
5326		}
5327		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5328					 list) {
5329			if (check_iocb == cmdiocb) {
5330				list_move_tail(&check_iocb->list, &completions);
5331				break;
5332			}
5333		}
5334		if (list_empty(&completions))
5335			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5336		spin_unlock_irqrestore(&phba->hbalock, flags);
5337		if (!list_empty(&completions)) {
5338			lpfc_sli_cancel_iocbs(phba, &completions,
5339					      IOSTAT_LOCAL_REJECT,
5340					      IOERR_SLI_ABORTED);
5341		}
5342		break;
5343
5344	case TYPE_EVT:
5345		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5346		break;
5347
5348	case TYPE_MBOX:
5349		/* Update the ext buf ctx state if needed */
5350
5351		if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5352			phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5353		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5354		break;
5355	case TYPE_MENLO:
5356		/* Check to see if IOCB was issued to the port or not. If not,
5357		 * remove it from the txq queue and call cancel iocbs.
5358		 * Otherwise, call abort iotag.
5359		 */
5360		cmdiocb = dd_data->context_un.menlo.cmdiocbq;
5361		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5362
5363		spin_lock_irqsave(&phba->hbalock, flags);
5364		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5365					 list) {
5366			if (check_iocb == cmdiocb) {
5367				list_move_tail(&check_iocb->list, &completions);
5368				break;
5369			}
5370		}
5371		if (list_empty(&completions))
5372			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5373		spin_unlock_irqrestore(&phba->hbalock, flags);
5374		if (!list_empty(&completions)) {
5375			lpfc_sli_cancel_iocbs(phba, &completions,
5376					      IOSTAT_LOCAL_REJECT,
5377					      IOERR_SLI_ABORTED);
5378		}
5379		break;
5380	default:
5381		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5382		break;
5383	}
5384
5385	/* scsi transport fc fc_bsg_job_timeout expects a zero return code,
5386	 * otherwise an error message will be displayed on the console
5387	 * so always return success (zero)
5388	 */
5389	return rc;
5390}
5391