1/*
2 * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/errno.h>
19#include <linux/pci.h>
20#include <linux/slab.h>
21#include <linux/skbuff.h>
22#include <linux/interrupt.h>
23#include <linux/spinlock.h>
24#include <linux/if_ether.h>
25#include <linux/if_vlan.h>
26#include <linux/workqueue.h>
27#include <scsi/fc/fc_fip.h>
28#include <scsi/fc/fc_els.h>
29#include <scsi/fc/fc_fcoe.h>
30#include <scsi/fc_frame.h>
31#include <scsi/libfc.h>
32#include "fnic_io.h"
33#include "fnic.h"
34#include "fnic_fip.h"
35#include "cq_enet_desc.h"
36#include "cq_exch_desc.h"
37
38static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
39struct workqueue_struct *fnic_fip_queue;
40struct workqueue_struct *fnic_event_queue;
41
42static void fnic_set_eth_mode(struct fnic *);
43static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
44static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
45static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
46static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
47static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
48
49void fnic_handle_link(struct work_struct *work)
50{
51	struct fnic *fnic = container_of(work, struct fnic, link_work);
52	unsigned long flags;
53	int old_link_status;
54	u32 old_link_down_cnt;
55
56	spin_lock_irqsave(&fnic->fnic_lock, flags);
57
58	if (fnic->stop_rx_link_events) {
59		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
60		return;
61	}
62
63	old_link_down_cnt = fnic->link_down_cnt;
64	old_link_status = fnic->link_status;
65	fnic->link_status = vnic_dev_link_status(fnic->vdev);
66	fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
67
68	if (old_link_status == fnic->link_status) {
69		if (!fnic->link_status) {
70			/* DOWN -> DOWN */
71			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
72			fnic_fc_trace_set_data(fnic->lport->host->host_no,
73				FNIC_FC_LE, "Link Status: DOWN->DOWN",
74				strlen("Link Status: DOWN->DOWN"));
75		} else {
76			if (old_link_down_cnt != fnic->link_down_cnt) {
77				/* UP -> DOWN -> UP */
78				fnic->lport->host_stats.link_failure_count++;
79				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
80				fnic_fc_trace_set_data(
81					fnic->lport->host->host_no,
82					FNIC_FC_LE,
83					"Link Status:UP_DOWN_UP",
84					strlen("Link_Status:UP_DOWN_UP")
85					);
86				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
87					     "link down\n");
88				fcoe_ctlr_link_down(&fnic->ctlr);
89				if (fnic->config.flags & VFCF_FIP_CAPABLE) {
90					/* start FCoE VLAN discovery */
91					fnic_fc_trace_set_data(
92						fnic->lport->host->host_no,
93						FNIC_FC_LE,
94						"Link Status: UP_DOWN_UP_VLAN",
95						strlen(
96						"Link Status: UP_DOWN_UP_VLAN")
97						);
98					fnic_fcoe_send_vlan_req(fnic);
99					return;
100				}
101				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
102					     "link up\n");
103				fcoe_ctlr_link_up(&fnic->ctlr);
104			} else {
105				/* UP -> UP */
106				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
107				fnic_fc_trace_set_data(
108					fnic->lport->host->host_no, FNIC_FC_LE,
109					"Link Status: UP_UP",
110					strlen("Link Status: UP_UP"));
111			}
112		}
113	} else if (fnic->link_status) {
114		/* DOWN -> UP */
115		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
116		if (fnic->config.flags & VFCF_FIP_CAPABLE) {
117			/* start FCoE VLAN discovery */
118				fnic_fc_trace_set_data(
119				fnic->lport->host->host_no,
120				FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
121				strlen("Link Status: DOWN_UP_VLAN"));
122			fnic_fcoe_send_vlan_req(fnic);
123			return;
124		}
125		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
126		fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
127			"Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
128		fcoe_ctlr_link_up(&fnic->ctlr);
129	} else {
130		/* UP -> DOWN */
131		fnic->lport->host_stats.link_failure_count++;
132		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
133		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
134		fnic_fc_trace_set_data(
135			fnic->lport->host->host_no, FNIC_FC_LE,
136			"Link Status: UP_DOWN",
137			strlen("Link Status: UP_DOWN"));
138		if (fnic->config.flags & VFCF_FIP_CAPABLE) {
139			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
140				"deleting fip-timer during link-down\n");
141			del_timer_sync(&fnic->fip_timer);
142		}
143		fcoe_ctlr_link_down(&fnic->ctlr);
144	}
145
146}
147
148/*
149 * This function passes incoming fabric frames to libFC
150 */
151void fnic_handle_frame(struct work_struct *work)
152{
153	struct fnic *fnic = container_of(work, struct fnic, frame_work);
154	struct fc_lport *lp = fnic->lport;
155	unsigned long flags;
156	struct sk_buff *skb;
157	struct fc_frame *fp;
158
159	while ((skb = skb_dequeue(&fnic->frame_queue))) {
160
161		spin_lock_irqsave(&fnic->fnic_lock, flags);
162		if (fnic->stop_rx_link_events) {
163			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
164			dev_kfree_skb(skb);
165			return;
166		}
167		fp = (struct fc_frame *)skb;
168
169		/*
170		 * If we're in a transitional state, just re-queue and return.
171		 * The queue will be serviced when we get to a stable state.
172		 */
173		if (fnic->state != FNIC_IN_FC_MODE &&
174		    fnic->state != FNIC_IN_ETH_MODE) {
175			skb_queue_head(&fnic->frame_queue, skb);
176			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
177			return;
178		}
179		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
180
181		fc_exch_recv(lp, fp);
182	}
183}
184
185void fnic_fcoe_evlist_free(struct fnic *fnic)
186{
187	struct fnic_event *fevt = NULL;
188	struct fnic_event *next = NULL;
189	unsigned long flags;
190
191	spin_lock_irqsave(&fnic->fnic_lock, flags);
192	if (list_empty(&fnic->evlist)) {
193		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
194		return;
195	}
196
197	list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
198		list_del(&fevt->list);
199		kfree(fevt);
200	}
201	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
202}
203
204void fnic_handle_event(struct work_struct *work)
205{
206	struct fnic *fnic = container_of(work, struct fnic, event_work);
207	struct fnic_event *fevt = NULL;
208	struct fnic_event *next = NULL;
209	unsigned long flags;
210
211	spin_lock_irqsave(&fnic->fnic_lock, flags);
212	if (list_empty(&fnic->evlist)) {
213		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
214		return;
215	}
216
217	list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
218		if (fnic->stop_rx_link_events) {
219			list_del(&fevt->list);
220			kfree(fevt);
221			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
222			return;
223		}
224		/*
225		 * If we're in a transitional state, just re-queue and return.
226		 * The queue will be serviced when we get to a stable state.
227		 */
228		if (fnic->state != FNIC_IN_FC_MODE &&
229		    fnic->state != FNIC_IN_ETH_MODE) {
230			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
231			return;
232		}
233
234		list_del(&fevt->list);
235		switch (fevt->event) {
236		case FNIC_EVT_START_VLAN_DISC:
237			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
238			fnic_fcoe_send_vlan_req(fnic);
239			spin_lock_irqsave(&fnic->fnic_lock, flags);
240			break;
241		case FNIC_EVT_START_FCF_DISC:
242			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
243				  "Start FCF Discovery\n");
244			fnic_fcoe_start_fcf_disc(fnic);
245			break;
246		default:
247			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
248				  "Unknown event 0x%x\n", fevt->event);
249			break;
250		}
251		kfree(fevt);
252	}
253	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
254}
255
256/**
257 * Check if the Received FIP FLOGI frame is rejected
258 * @fip: The FCoE controller that received the frame
259 * @skb: The received FIP frame
260 *
261 * Returns non-zero if the frame is rejected with unsupported cmd with
262 * insufficient resource els explanation.
263 */
264static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
265					 struct sk_buff *skb)
266{
267	struct fc_lport *lport = fip->lp;
268	struct fip_header *fiph;
269	struct fc_frame_header *fh = NULL;
270	struct fip_desc *desc;
271	struct fip_encaps *els;
272	enum fip_desc_type els_dtype = 0;
273	u16 op;
274	u8 els_op;
275	u8 sub;
276
277	size_t els_len = 0;
278	size_t rlen;
279	size_t dlen = 0;
280
281	if (skb_linearize(skb))
282		return 0;
283
284	if (skb->len < sizeof(*fiph))
285		return 0;
286
287	fiph = (struct fip_header *)skb->data;
288	op = ntohs(fiph->fip_op);
289	sub = fiph->fip_subcode;
290
291	if (op != FIP_OP_LS)
292		return 0;
293
294	if (sub != FIP_SC_REP)
295		return 0;
296
297	rlen = ntohs(fiph->fip_dl_len) * 4;
298	if (rlen + sizeof(*fiph) > skb->len)
299		return 0;
300
301	desc = (struct fip_desc *)(fiph + 1);
302	dlen = desc->fip_dlen * FIP_BPW;
303
304	if (desc->fip_dtype == FIP_DT_FLOGI) {
305
306		if (dlen < sizeof(*els) + sizeof(*fh) + 1)
307			return 0;
308
309		els_len = dlen - sizeof(*els);
310		els = (struct fip_encaps *)desc;
311		fh = (struct fc_frame_header *)(els + 1);
312		els_dtype = desc->fip_dtype;
313
314		if (!fh)
315			return 0;
316
317		/*
318		 * ELS command code, reason and explanation should be = Reject,
319		 * unsupported command and insufficient resource
320		 */
321		els_op = *(u8 *)(fh + 1);
322		if (els_op == ELS_LS_RJT) {
323			shost_printk(KERN_INFO, lport->host,
324				  "Flogi Request Rejected by Switch\n");
325			return 1;
326		}
327		shost_printk(KERN_INFO, lport->host,
328				"Flogi Request Accepted by Switch\n");
329	}
330	return 0;
331}
332
333static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
334{
335	struct fcoe_ctlr *fip = &fnic->ctlr;
336	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
337	struct sk_buff *skb;
338	char *eth_fr;
339	int fr_len;
340	struct fip_vlan *vlan;
341	u64 vlan_tov;
342
343	fnic_fcoe_reset_vlans(fnic);
344	fnic->set_vlan(fnic, 0);
345	FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
346		  "Sending VLAN request...\n");
347	skb = dev_alloc_skb(sizeof(struct fip_vlan));
348	if (!skb)
349		return;
350
351	fr_len = sizeof(*vlan);
352	eth_fr = (char *)skb->data;
353	vlan = (struct fip_vlan *)eth_fr;
354
355	memset(vlan, 0, sizeof(*vlan));
356	memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
357	memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
358	vlan->eth.h_proto = htons(ETH_P_FIP);
359
360	vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
361	vlan->fip.fip_op = htons(FIP_OP_VLAN);
362	vlan->fip.fip_subcode = FIP_SC_VL_REQ;
363	vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
364
365	vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
366	vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
367	memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
368
369	vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
370	vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
371	put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
372	atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
373
374	skb_put(skb, sizeof(*vlan));
375	skb->protocol = htons(ETH_P_FIP);
376	skb_reset_mac_header(skb);
377	skb_reset_network_header(skb);
378	fip->send(fip, skb);
379
380	/* set a timer so that we can retry if there no response */
381	vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
382	mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
383}
384
385static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
386{
387	struct fcoe_ctlr *fip = &fnic->ctlr;
388	struct fip_header *fiph;
389	struct fip_desc *desc;
390	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
391	u16 vid;
392	size_t rlen;
393	size_t dlen;
394	struct fcoe_vlan *vlan;
395	u64 sol_time;
396	unsigned long flags;
397
398	FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
399		  "Received VLAN response...\n");
400
401	fiph = (struct fip_header *) skb->data;
402
403	FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
404		  "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
405		  ntohs(fiph->fip_op), fiph->fip_subcode);
406
407	rlen = ntohs(fiph->fip_dl_len) * 4;
408	fnic_fcoe_reset_vlans(fnic);
409	spin_lock_irqsave(&fnic->vlans_lock, flags);
410	desc = (struct fip_desc *)(fiph + 1);
411	while (rlen > 0) {
412		dlen = desc->fip_dlen * FIP_BPW;
413		switch (desc->fip_dtype) {
414		case FIP_DT_VLAN:
415			vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
416			shost_printk(KERN_INFO, fnic->lport->host,
417				  "process_vlan_resp: FIP VLAN %d\n", vid);
418			vlan = kmalloc(sizeof(*vlan),
419							GFP_ATOMIC);
420			if (!vlan) {
421				/* retry from timer */
422				spin_unlock_irqrestore(&fnic->vlans_lock,
423							flags);
424				goto out;
425			}
426			memset(vlan, 0, sizeof(struct fcoe_vlan));
427			vlan->vid = vid & 0x0fff;
428			vlan->state = FIP_VLAN_AVAIL;
429			list_add_tail(&vlan->list, &fnic->vlans);
430			break;
431		}
432		desc = (struct fip_desc *)((char *)desc + dlen);
433		rlen -= dlen;
434	}
435
436	/* any VLAN descriptors present ? */
437	if (list_empty(&fnic->vlans)) {
438		/* retry from timer */
439		atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
440		FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
441			  "No VLAN descriptors in FIP VLAN response\n");
442		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
443		goto out;
444	}
445
446	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
447	fnic->set_vlan(fnic, vlan->vid);
448	vlan->state = FIP_VLAN_SENT; /* sent now */
449	vlan->sol_count++;
450	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
451
452	/* start the solicitation */
453	fcoe_ctlr_link_up(fip);
454
455	sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
456	mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
457out:
458	return;
459}
460
461static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
462{
463	unsigned long flags;
464	struct fcoe_vlan *vlan;
465	u64 sol_time;
466
467	spin_lock_irqsave(&fnic->vlans_lock, flags);
468	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
469	fnic->set_vlan(fnic, vlan->vid);
470	vlan->state = FIP_VLAN_SENT; /* sent now */
471	vlan->sol_count = 1;
472	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
473
474	/* start the solicitation */
475	fcoe_ctlr_link_up(&fnic->ctlr);
476
477	sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
478	mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
479}
480
481static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
482{
483	unsigned long flags;
484	struct fcoe_vlan *fvlan;
485
486	spin_lock_irqsave(&fnic->vlans_lock, flags);
487	if (list_empty(&fnic->vlans)) {
488		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
489		return -EINVAL;
490	}
491
492	fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
493	if (fvlan->state == FIP_VLAN_USED) {
494		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
495		return 0;
496	}
497
498	if (fvlan->state == FIP_VLAN_SENT) {
499		fvlan->state = FIP_VLAN_USED;
500		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
501		return 0;
502	}
503	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
504	return -EINVAL;
505}
506
507static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
508{
509	struct fnic_event *fevt;
510	unsigned long flags;
511
512	fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
513	if (!fevt)
514		return;
515
516	fevt->fnic = fnic;
517	fevt->event = ev;
518
519	spin_lock_irqsave(&fnic->fnic_lock, flags);
520	list_add_tail(&fevt->list, &fnic->evlist);
521	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
522
523	schedule_work(&fnic->event_work);
524}
525
526static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
527{
528	struct fip_header *fiph;
529	int ret = 1;
530	u16 op;
531	u8 sub;
532
533	if (!skb || !(skb->data))
534		return -1;
535
536	if (skb_linearize(skb))
537		goto drop;
538
539	fiph = (struct fip_header *)skb->data;
540	op = ntohs(fiph->fip_op);
541	sub = fiph->fip_subcode;
542
543	if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
544		goto drop;
545
546	if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
547		goto drop;
548
549	if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
550		if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
551			goto drop;
552		/* pass it on to fcoe */
553		ret = 1;
554	} else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
555		/* set the vlan as used */
556		fnic_fcoe_process_vlan_resp(fnic, skb);
557		ret = 0;
558	} else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
559		/* received CVL request, restart vlan disc */
560		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
561		/* pass it on to fcoe */
562		ret = 1;
563	}
564drop:
565	return ret;
566}
567
568void fnic_handle_fip_frame(struct work_struct *work)
569{
570	struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
571	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
572	unsigned long flags;
573	struct sk_buff *skb;
574	struct ethhdr *eh;
575
576	while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
577		spin_lock_irqsave(&fnic->fnic_lock, flags);
578		if (fnic->stop_rx_link_events) {
579			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
580			dev_kfree_skb(skb);
581			return;
582		}
583		/*
584		 * If we're in a transitional state, just re-queue and return.
585		 * The queue will be serviced when we get to a stable state.
586		 */
587		if (fnic->state != FNIC_IN_FC_MODE &&
588		    fnic->state != FNIC_IN_ETH_MODE) {
589			skb_queue_head(&fnic->fip_frame_queue, skb);
590			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
591			return;
592		}
593		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
594		eh = (struct ethhdr *)skb->data;
595		if (eh->h_proto == htons(ETH_P_FIP)) {
596			skb_pull(skb, sizeof(*eh));
597			if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
598				dev_kfree_skb(skb);
599				continue;
600			}
601			/*
602			 * If there's FLOGI rejects - clear all
603			 * fcf's & restart from scratch
604			 */
605			if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
606				atomic64_inc(
607					&fnic_stats->vlan_stats.flogi_rejects);
608				shost_printk(KERN_INFO, fnic->lport->host,
609					  "Trigger a Link down - VLAN Disc\n");
610				fcoe_ctlr_link_down(&fnic->ctlr);
611				/* start FCoE VLAN discovery */
612				fnic_fcoe_send_vlan_req(fnic);
613				dev_kfree_skb(skb);
614				continue;
615			}
616			fcoe_ctlr_recv(&fnic->ctlr, skb);
617			continue;
618		}
619	}
620}
621
622/**
623 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
624 * @fnic:	fnic instance.
625 * @skb:	Ethernet Frame.
626 */
627static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
628{
629	struct fc_frame *fp;
630	struct ethhdr *eh;
631	struct fcoe_hdr *fcoe_hdr;
632	struct fcoe_crc_eof *ft;
633
634	/*
635	 * Undo VLAN encapsulation if present.
636	 */
637	eh = (struct ethhdr *)skb->data;
638	if (eh->h_proto == htons(ETH_P_8021Q)) {
639		memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
640		eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
641		skb_reset_mac_header(skb);
642	}
643	if (eh->h_proto == htons(ETH_P_FIP)) {
644		if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
645			printk(KERN_ERR "Dropped FIP frame, as firmware "
646					"uses non-FIP mode, Enable FIP "
647					"using UCSM\n");
648			goto drop;
649		}
650		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
651			FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
652			printk(KERN_ERR "fnic ctlr frame trace error!!!");
653		}
654		skb_queue_tail(&fnic->fip_frame_queue, skb);
655		queue_work(fnic_fip_queue, &fnic->fip_frame_work);
656		return 1;		/* let caller know packet was used */
657	}
658	if (eh->h_proto != htons(ETH_P_FCOE))
659		goto drop;
660	skb_set_network_header(skb, sizeof(*eh));
661	skb_pull(skb, sizeof(*eh));
662
663	fcoe_hdr = (struct fcoe_hdr *)skb->data;
664	if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
665		goto drop;
666
667	fp = (struct fc_frame *)skb;
668	fc_frame_init(fp);
669	fr_sof(fp) = fcoe_hdr->fcoe_sof;
670	skb_pull(skb, sizeof(struct fcoe_hdr));
671	skb_reset_transport_header(skb);
672
673	ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
674	fr_eof(fp) = ft->fcoe_eof;
675	skb_trim(skb, skb->len - sizeof(*ft));
676	return 0;
677drop:
678	dev_kfree_skb_irq(skb);
679	return -1;
680}
681
682/**
683 * fnic_update_mac_locked() - set data MAC address and filters.
684 * @fnic:	fnic instance.
685 * @new:	newly-assigned FCoE MAC address.
686 *
687 * Called with the fnic lock held.
688 */
689void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
690{
691	u8 *ctl = fnic->ctlr.ctl_src_addr;
692	u8 *data = fnic->data_src_addr;
693
694	if (is_zero_ether_addr(new))
695		new = ctl;
696	if (ether_addr_equal(data, new))
697		return;
698	FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
699	if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
700		vnic_dev_del_addr(fnic->vdev, data);
701	memcpy(data, new, ETH_ALEN);
702	if (!ether_addr_equal(new, ctl))
703		vnic_dev_add_addr(fnic->vdev, new);
704}
705
706/**
707 * fnic_update_mac() - set data MAC address and filters.
708 * @lport:	local port.
709 * @new:	newly-assigned FCoE MAC address.
710 */
711void fnic_update_mac(struct fc_lport *lport, u8 *new)
712{
713	struct fnic *fnic = lport_priv(lport);
714
715	spin_lock_irq(&fnic->fnic_lock);
716	fnic_update_mac_locked(fnic, new);
717	spin_unlock_irq(&fnic->fnic_lock);
718}
719
720/**
721 * fnic_set_port_id() - set the port_ID after successful FLOGI.
722 * @lport:	local port.
723 * @port_id:	assigned FC_ID.
724 * @fp:		received frame containing the FLOGI accept or NULL.
725 *
726 * This is called from libfc when a new FC_ID has been assigned.
727 * This causes us to reset the firmware to FC_MODE and setup the new MAC
728 * address and FC_ID.
729 *
730 * It is also called with FC_ID 0 when we're logged off.
731 *
732 * If the FC_ID is due to point-to-point, fp may be NULL.
733 */
734void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
735{
736	struct fnic *fnic = lport_priv(lport);
737	u8 *mac;
738	int ret;
739
740	FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
741		     port_id, fp);
742
743	/*
744	 * If we're clearing the FC_ID, change to use the ctl_src_addr.
745	 * Set ethernet mode to send FLOGI.
746	 */
747	if (!port_id) {
748		fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
749		fnic_set_eth_mode(fnic);
750		return;
751	}
752
753	if (fp) {
754		mac = fr_cb(fp)->granted_mac;
755		if (is_zero_ether_addr(mac)) {
756			/* non-FIP - FLOGI already accepted - ignore return */
757			fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
758		}
759		fnic_update_mac(lport, mac);
760	}
761
762	/* Change state to reflect transition to FC mode */
763	spin_lock_irq(&fnic->fnic_lock);
764	if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
765		fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
766	else {
767		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
768			     "Unexpected fnic state %s while"
769			     " processing flogi resp\n",
770			     fnic_state_to_str(fnic->state));
771		spin_unlock_irq(&fnic->fnic_lock);
772		return;
773	}
774	spin_unlock_irq(&fnic->fnic_lock);
775
776	/*
777	 * Send FLOGI registration to firmware to set up FC mode.
778	 * The new address will be set up when registration completes.
779	 */
780	ret = fnic_flogi_reg_handler(fnic, port_id);
781
782	if (ret < 0) {
783		spin_lock_irq(&fnic->fnic_lock);
784		if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
785			fnic->state = FNIC_IN_ETH_MODE;
786		spin_unlock_irq(&fnic->fnic_lock);
787	}
788}
789
790static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
791				    *cq_desc, struct vnic_rq_buf *buf,
792				    int skipped __attribute__((unused)),
793				    void *opaque)
794{
795	struct fnic *fnic = vnic_dev_priv(rq->vdev);
796	struct sk_buff *skb;
797	struct fc_frame *fp;
798	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
799	unsigned int eth_hdrs_stripped;
800	u8 type, color, eop, sop, ingress_port, vlan_stripped;
801	u8 fcoe = 0, fcoe_sof, fcoe_eof;
802	u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
803	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
804	u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
805	u8 fcs_ok = 1, packet_error = 0;
806	u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
807	u32 rss_hash;
808	u16 exchange_id, tmpl;
809	u8 sof = 0;
810	u8 eof = 0;
811	u32 fcp_bytes_written = 0;
812	unsigned long flags;
813
814	pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
815			 PCI_DMA_FROMDEVICE);
816	skb = buf->os_buf;
817	fp = (struct fc_frame *)skb;
818	buf->os_buf = NULL;
819
820	cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
821	if (type == CQ_DESC_TYPE_RQ_FCP) {
822		cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
823				   &type, &color, &q_number, &completed_index,
824				   &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
825				   &tmpl, &fcp_bytes_written, &sof, &eof,
826				   &ingress_port, &packet_error,
827				   &fcoe_enc_error, &fcs_ok, &vlan_stripped,
828				   &vlan);
829		eth_hdrs_stripped = 1;
830		skb_trim(skb, fcp_bytes_written);
831		fr_sof(fp) = sof;
832		fr_eof(fp) = eof;
833
834	} else if (type == CQ_DESC_TYPE_RQ_ENET) {
835		cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
836				    &type, &color, &q_number, &completed_index,
837				    &ingress_port, &fcoe, &eop, &sop,
838				    &rss_type, &csum_not_calc, &rss_hash,
839				    &bytes_written, &packet_error,
840				    &vlan_stripped, &vlan, &checksum,
841				    &fcoe_sof, &fcoe_fc_crc_ok,
842				    &fcoe_enc_error, &fcoe_eof,
843				    &tcp_udp_csum_ok, &udp, &tcp,
844				    &ipv4_csum_ok, &ipv6, &ipv4,
845				    &ipv4_fragment, &fcs_ok);
846		eth_hdrs_stripped = 0;
847		skb_trim(skb, bytes_written);
848		if (!fcs_ok) {
849			atomic64_inc(&fnic_stats->misc_stats.frame_errors);
850			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
851				     "fcs error.  dropping packet.\n");
852			goto drop;
853		}
854		if (fnic_import_rq_eth_pkt(fnic, skb))
855			return;
856
857	} else {
858		/* wrong CQ type*/
859		shost_printk(KERN_ERR, fnic->lport->host,
860			     "fnic rq_cmpl wrong cq type x%x\n", type);
861		goto drop;
862	}
863
864	if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
865		atomic64_inc(&fnic_stats->misc_stats.frame_errors);
866		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
867			     "fnic rq_cmpl fcoe x%x fcsok x%x"
868			     " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
869			     " x%x\n",
870			     fcoe, fcs_ok, packet_error,
871			     fcoe_fc_crc_ok, fcoe_enc_error);
872		goto drop;
873	}
874
875	spin_lock_irqsave(&fnic->fnic_lock, flags);
876	if (fnic->stop_rx_link_events) {
877		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
878		goto drop;
879	}
880	fr_dev(fp) = fnic->lport;
881	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
882	if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
883					(char *)skb->data, skb->len)) != 0) {
884		printk(KERN_ERR "fnic ctlr frame trace error!!!");
885	}
886
887	skb_queue_tail(&fnic->frame_queue, skb);
888	queue_work(fnic_event_queue, &fnic->frame_work);
889
890	return;
891drop:
892	dev_kfree_skb_irq(skb);
893}
894
895static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
896				     struct cq_desc *cq_desc, u8 type,
897				     u16 q_number, u16 completed_index,
898				     void *opaque)
899{
900	struct fnic *fnic = vnic_dev_priv(vdev);
901
902	vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
903			VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
904			NULL);
905	return 0;
906}
907
908int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
909{
910	unsigned int tot_rq_work_done = 0, cur_work_done;
911	unsigned int i;
912	int err;
913
914	for (i = 0; i < fnic->rq_count; i++) {
915		cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
916						fnic_rq_cmpl_handler_cont,
917						NULL);
918		if (cur_work_done) {
919			err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
920			if (err)
921				shost_printk(KERN_ERR, fnic->lport->host,
922					     "fnic_alloc_rq_frame can't alloc"
923					     " frame\n");
924		}
925		tot_rq_work_done += cur_work_done;
926	}
927
928	return tot_rq_work_done;
929}
930
931/*
932 * This function is called once at init time to allocate and fill RQ
933 * buffers. Subsequently, it is called in the interrupt context after RQ
934 * buffer processing to replenish the buffers in the RQ
935 */
936int fnic_alloc_rq_frame(struct vnic_rq *rq)
937{
938	struct fnic *fnic = vnic_dev_priv(rq->vdev);
939	struct sk_buff *skb;
940	u16 len;
941	dma_addr_t pa;
942	int r;
943
944	len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
945	skb = dev_alloc_skb(len);
946	if (!skb) {
947		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
948			     "Unable to allocate RQ sk_buff\n");
949		return -ENOMEM;
950	}
951	skb_reset_mac_header(skb);
952	skb_reset_transport_header(skb);
953	skb_reset_network_header(skb);
954	skb_put(skb, len);
955	pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
956
957	r = pci_dma_mapping_error(fnic->pdev, pa);
958	if (r) {
959		printk(KERN_ERR "PCI mapping failed with error %d\n", r);
960		goto free_skb;
961	}
962
963	fnic_queue_rq_desc(rq, skb, pa, len);
964	return 0;
965
966free_skb:
967	kfree_skb(skb);
968	return r;
969}
970
971void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
972{
973	struct fc_frame *fp = buf->os_buf;
974	struct fnic *fnic = vnic_dev_priv(rq->vdev);
975
976	pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
977			 PCI_DMA_FROMDEVICE);
978
979	dev_kfree_skb(fp_skb(fp));
980	buf->os_buf = NULL;
981}
982
983/**
984 * fnic_eth_send() - Send Ethernet frame.
985 * @fip:	fcoe_ctlr instance.
986 * @skb:	Ethernet Frame, FIP, without VLAN encapsulation.
987 */
988void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
989{
990	struct fnic *fnic = fnic_from_ctlr(fip);
991	struct vnic_wq *wq = &fnic->wq[0];
992	dma_addr_t pa;
993	struct ethhdr *eth_hdr;
994	struct vlan_ethhdr *vlan_hdr;
995	unsigned long flags;
996	int r;
997
998	if (!fnic->vlan_hw_insert) {
999		eth_hdr = (struct ethhdr *)skb_mac_header(skb);
1000		vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
1001				sizeof(*vlan_hdr) - sizeof(*eth_hdr));
1002		memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
1003		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1004		vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
1005		vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1006		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1007			FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
1008			printk(KERN_ERR "fnic ctlr frame trace error!!!");
1009		}
1010	} else {
1011		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1012			FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
1013			printk(KERN_ERR "fnic ctlr frame trace error!!!");
1014		}
1015	}
1016
1017	pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
1018
1019	r = pci_dma_mapping_error(fnic->pdev, pa);
1020	if (r) {
1021		printk(KERN_ERR "PCI mapping failed with error %d\n", r);
1022		goto free_skb;
1023	}
1024
1025	spin_lock_irqsave(&fnic->wq_lock[0], flags);
1026	if (!vnic_wq_desc_avail(wq))
1027		goto irq_restore;
1028
1029	fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
1030			       0 /* hw inserts cos value */,
1031			       fnic->vlan_id, 1);
1032	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1033	return;
1034
1035irq_restore:
1036	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1037	pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
1038free_skb:
1039	kfree_skb(skb);
1040}
1041
1042/*
1043 * Send FC frame.
1044 */
1045static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
1046{
1047	struct vnic_wq *wq = &fnic->wq[0];
1048	struct sk_buff *skb;
1049	dma_addr_t pa;
1050	struct ethhdr *eth_hdr;
1051	struct vlan_ethhdr *vlan_hdr;
1052	struct fcoe_hdr *fcoe_hdr;
1053	struct fc_frame_header *fh;
1054	u32 tot_len, eth_hdr_len;
1055	int ret = 0;
1056	unsigned long flags;
1057
1058	fh = fc_frame_header_get(fp);
1059	skb = fp_skb(fp);
1060
1061	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1062	    fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
1063		return 0;
1064
1065	if (!fnic->vlan_hw_insert) {
1066		eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
1067		vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
1068		eth_hdr = (struct ethhdr *)vlan_hdr;
1069		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1070		vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
1071		vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1072		fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
1073	} else {
1074		eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
1075		eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
1076		eth_hdr->h_proto = htons(ETH_P_FCOE);
1077		fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
1078	}
1079
1080	if (fnic->ctlr.map_dest)
1081		fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
1082	else
1083		memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
1084	memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
1085
1086	tot_len = skb->len;
1087	BUG_ON(tot_len % 4);
1088
1089	memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
1090	fcoe_hdr->fcoe_sof = fr_sof(fp);
1091	if (FC_FCOE_VER)
1092		FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
1093
1094	pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
1095
1096	ret = pci_dma_mapping_error(fnic->pdev, pa);
1097	if (ret) {
1098		printk(KERN_ERR "DMA map failed with error %d\n", ret);
1099		goto free_skb_on_err;
1100	}
1101
1102	if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
1103				(char *)eth_hdr, tot_len)) != 0) {
1104		printk(KERN_ERR "fnic ctlr frame trace error!!!");
1105	}
1106
1107	spin_lock_irqsave(&fnic->wq_lock[0], flags);
1108
1109	if (!vnic_wq_desc_avail(wq)) {
1110		pci_unmap_single(fnic->pdev, pa,
1111				 tot_len, PCI_DMA_TODEVICE);
1112		ret = -1;
1113		goto irq_restore;
1114	}
1115
1116	fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
1117			   0 /* hw inserts cos value */,
1118			   fnic->vlan_id, 1, 1, 1);
1119
1120irq_restore:
1121	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1122
1123free_skb_on_err:
1124	if (ret)
1125		dev_kfree_skb_any(fp_skb(fp));
1126
1127	return ret;
1128}
1129
1130/*
1131 * fnic_send
1132 * Routine to send a raw frame
1133 */
1134int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
1135{
1136	struct fnic *fnic = lport_priv(lp);
1137	unsigned long flags;
1138
1139	if (fnic->in_remove) {
1140		dev_kfree_skb(fp_skb(fp));
1141		return -1;
1142	}
1143
1144	/*
1145	 * Queue frame if in a transitional state.
1146	 * This occurs while registering the Port_ID / MAC address after FLOGI.
1147	 */
1148	spin_lock_irqsave(&fnic->fnic_lock, flags);
1149	if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
1150		skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
1151		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1152		return 0;
1153	}
1154	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1155
1156	return fnic_send_frame(fnic, fp);
1157}
1158
1159/**
1160 * fnic_flush_tx() - send queued frames.
1161 * @fnic: fnic device
1162 *
1163 * Send frames that were waiting to go out in FC or Ethernet mode.
1164 * Whenever changing modes we purge queued frames, so these frames should
1165 * be queued for the stable mode that we're in, either FC or Ethernet.
1166 *
1167 * Called without fnic_lock held.
1168 */
1169void fnic_flush_tx(struct fnic *fnic)
1170{
1171	struct sk_buff *skb;
1172	struct fc_frame *fp;
1173
1174	while ((skb = skb_dequeue(&fnic->tx_queue))) {
1175		fp = (struct fc_frame *)skb;
1176		fnic_send_frame(fnic, fp);
1177	}
1178}
1179
1180/**
1181 * fnic_set_eth_mode() - put fnic into ethernet mode.
1182 * @fnic: fnic device
1183 *
1184 * Called without fnic lock held.
1185 */
1186static void fnic_set_eth_mode(struct fnic *fnic)
1187{
1188	unsigned long flags;
1189	enum fnic_state old_state;
1190	int ret;
1191
1192	spin_lock_irqsave(&fnic->fnic_lock, flags);
1193again:
1194	old_state = fnic->state;
1195	switch (old_state) {
1196	case FNIC_IN_FC_MODE:
1197	case FNIC_IN_ETH_TRANS_FC_MODE:
1198	default:
1199		fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1200		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1201
1202		ret = fnic_fw_reset_handler(fnic);
1203
1204		spin_lock_irqsave(&fnic->fnic_lock, flags);
1205		if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
1206			goto again;
1207		if (ret)
1208			fnic->state = old_state;
1209		break;
1210
1211	case FNIC_IN_FC_TRANS_ETH_MODE:
1212	case FNIC_IN_ETH_MODE:
1213		break;
1214	}
1215	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1216}
1217
1218static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
1219					struct cq_desc *cq_desc,
1220					struct vnic_wq_buf *buf, void *opaque)
1221{
1222	struct sk_buff *skb = buf->os_buf;
1223	struct fc_frame *fp = (struct fc_frame *)skb;
1224	struct fnic *fnic = vnic_dev_priv(wq->vdev);
1225
1226	pci_unmap_single(fnic->pdev, buf->dma_addr,
1227			 buf->len, PCI_DMA_TODEVICE);
1228	dev_kfree_skb_irq(fp_skb(fp));
1229	buf->os_buf = NULL;
1230}
1231
1232static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
1233				     struct cq_desc *cq_desc, u8 type,
1234				     u16 q_number, u16 completed_index,
1235				     void *opaque)
1236{
1237	struct fnic *fnic = vnic_dev_priv(vdev);
1238	unsigned long flags;
1239
1240	spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
1241	vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
1242			fnic_wq_complete_frame_send, NULL);
1243	spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
1244
1245	return 0;
1246}
1247
1248int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
1249{
1250	unsigned int wq_work_done = 0;
1251	unsigned int i;
1252
1253	for (i = 0; i < fnic->raw_wq_count; i++) {
1254		wq_work_done  += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
1255						 work_to_do,
1256						 fnic_wq_cmpl_handler_cont,
1257						 NULL);
1258	}
1259
1260	return wq_work_done;
1261}
1262
1263
1264void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
1265{
1266	struct fc_frame *fp = buf->os_buf;
1267	struct fnic *fnic = vnic_dev_priv(wq->vdev);
1268
1269	pci_unmap_single(fnic->pdev, buf->dma_addr,
1270			 buf->len, PCI_DMA_TODEVICE);
1271
1272	dev_kfree_skb(fp_skb(fp));
1273	buf->os_buf = NULL;
1274}
1275
1276void fnic_fcoe_reset_vlans(struct fnic *fnic)
1277{
1278	unsigned long flags;
1279	struct fcoe_vlan *vlan;
1280	struct fcoe_vlan *next;
1281
1282	/*
1283	 * indicate a link down to fcoe so that all fcf's are free'd
1284	 * might not be required since we did this before sending vlan
1285	 * discovery request
1286	 */
1287	spin_lock_irqsave(&fnic->vlans_lock, flags);
1288	if (!list_empty(&fnic->vlans)) {
1289		list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
1290			list_del(&vlan->list);
1291			kfree(vlan);
1292		}
1293	}
1294	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1295}
1296
1297void fnic_handle_fip_timer(struct fnic *fnic)
1298{
1299	unsigned long flags;
1300	struct fcoe_vlan *vlan;
1301	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1302	u64 sol_time;
1303
1304	spin_lock_irqsave(&fnic->fnic_lock, flags);
1305	if (fnic->stop_rx_link_events) {
1306		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1307		return;
1308	}
1309	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1310
1311	if (fnic->ctlr.mode == FIP_ST_NON_FIP)
1312		return;
1313
1314	spin_lock_irqsave(&fnic->vlans_lock, flags);
1315	if (list_empty(&fnic->vlans)) {
1316		/* no vlans available, try again */
1317		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1318			  "Start VLAN Discovery\n");
1319		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1320		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1321		return;
1322	}
1323
1324	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
1325	shost_printk(KERN_DEBUG, fnic->lport->host,
1326		  "fip_timer: vlan %d state %d sol_count %d\n",
1327		  vlan->vid, vlan->state, vlan->sol_count);
1328	switch (vlan->state) {
1329	case FIP_VLAN_USED:
1330		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1331			  "FIP VLAN is selected for FC transaction\n");
1332		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1333		break;
1334	case FIP_VLAN_FAILED:
1335		/* if all vlans are in failed state, restart vlan disc */
1336		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1337			  "Start VLAN Discovery\n");
1338		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1339		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1340		break;
1341	case FIP_VLAN_SENT:
1342		if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
1343			/*
1344			 * no response on this vlan, remove  from the list.
1345			 * Try the next vlan
1346			 */
1347			shost_printk(KERN_INFO, fnic->lport->host,
1348				  "Dequeue this VLAN ID %d from list\n",
1349				  vlan->vid);
1350			list_del(&vlan->list);
1351			kfree(vlan);
1352			vlan = NULL;
1353			if (list_empty(&fnic->vlans)) {
1354				/* we exhausted all vlans, restart vlan disc */
1355				spin_unlock_irqrestore(&fnic->vlans_lock,
1356							flags);
1357				shost_printk(KERN_INFO, fnic->lport->host,
1358					  "fip_timer: vlan list empty, "
1359					  "trigger vlan disc\n");
1360				fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1361				return;
1362			}
1363			/* check the next vlan */
1364			vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
1365							list);
1366			fnic->set_vlan(fnic, vlan->vid);
1367			vlan->state = FIP_VLAN_SENT; /* sent now */
1368		}
1369		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1370		atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
1371		vlan->sol_count++;
1372		sol_time = jiffies + msecs_to_jiffies
1373					(FCOE_CTLR_START_DELAY);
1374		mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
1375		break;
1376	}
1377}
1378