This source file includes following definitions.
- fnic_handle_link
- fnic_handle_frame
- fnic_fcoe_evlist_free
- fnic_handle_event
- is_fnic_fip_flogi_reject
- fnic_fcoe_send_vlan_req
- fnic_fcoe_process_vlan_resp
- fnic_fcoe_start_fcf_disc
- fnic_fcoe_vlan_check
- fnic_event_enq
- fnic_fcoe_handle_fip_frame
- fnic_handle_fip_frame
- fnic_import_rq_eth_pkt
- fnic_update_mac_locked
- fnic_update_mac
- fnic_set_port_id
- fnic_rq_cmpl_frame_recv
- fnic_rq_cmpl_handler_cont
- fnic_rq_cmpl_handler
- fnic_alloc_rq_frame
- fnic_free_rq_buf
- fnic_eth_send
- fnic_send_frame
- fnic_send
- fnic_flush_tx
- fnic_set_eth_mode
- fnic_wq_complete_frame_send
- fnic_wq_cmpl_handler_cont
- fnic_wq_cmpl_handler
- fnic_free_wq_buf
- fnic_fcoe_reset_vlans
- fnic_handle_fip_timer
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/workqueue.h>
27 #include <scsi/fc/fc_fip.h>
28 #include <scsi/fc/fc_els.h>
29 #include <scsi/fc/fc_fcoe.h>
30 #include <scsi/fc_frame.h>
31 #include <scsi/libfc.h>
32 #include "fnic_io.h"
33 #include "fnic.h"
34 #include "fnic_fip.h"
35 #include "cq_enet_desc.h"
36 #include "cq_exch_desc.h"
37
38 static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
39 struct workqueue_struct *fnic_fip_queue;
40 struct workqueue_struct *fnic_event_queue;
41
42 static void fnic_set_eth_mode(struct fnic *);
43 static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
44 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
45 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
46 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
47 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
48
49 void fnic_handle_link(struct work_struct *work)
50 {
51 struct fnic *fnic = container_of(work, struct fnic, link_work);
52 unsigned long flags;
53 int old_link_status;
54 u32 old_link_down_cnt;
55 u64 old_port_speed, new_port_speed;
56
57 spin_lock_irqsave(&fnic->fnic_lock, flags);
58
59 if (fnic->stop_rx_link_events) {
60 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
61 return;
62 }
63
64 old_link_down_cnt = fnic->link_down_cnt;
65 old_link_status = fnic->link_status;
66 old_port_speed = atomic64_read(
67 &fnic->fnic_stats.misc_stats.current_port_speed);
68
69 fnic->link_status = vnic_dev_link_status(fnic->vdev);
70 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
71
72 new_port_speed = vnic_dev_port_speed(fnic->vdev);
73 atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
74 new_port_speed);
75 if (old_port_speed != new_port_speed)
76 shost_printk(KERN_INFO, fnic->lport->host,
77 "Current vnic speed set to : %llu\n",
78 new_port_speed);
79
80 switch (vnic_dev_port_speed(fnic->vdev)) {
81 case DCEM_PORTSPEED_10G:
82 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
83 fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
84 break;
85 case DCEM_PORTSPEED_20G:
86 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT;
87 fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
88 break;
89 case DCEM_PORTSPEED_25G:
90 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
91 fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
92 break;
93 case DCEM_PORTSPEED_40G:
94 case DCEM_PORTSPEED_4x10G:
95 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
96 fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
97 break;
98 case DCEM_PORTSPEED_100G:
99 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
100 fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
101 break;
102 default:
103 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
104 fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
105 break;
106 }
107
108 if (old_link_status == fnic->link_status) {
109 if (!fnic->link_status) {
110
111 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
112 fnic_fc_trace_set_data(fnic->lport->host->host_no,
113 FNIC_FC_LE, "Link Status: DOWN->DOWN",
114 strlen("Link Status: DOWN->DOWN"));
115 } else {
116 if (old_link_down_cnt != fnic->link_down_cnt) {
117
118 fnic->lport->host_stats.link_failure_count++;
119 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
120 fnic_fc_trace_set_data(
121 fnic->lport->host->host_no,
122 FNIC_FC_LE,
123 "Link Status:UP_DOWN_UP",
124 strlen("Link_Status:UP_DOWN_UP")
125 );
126 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
127 "link down\n");
128 fcoe_ctlr_link_down(&fnic->ctlr);
129 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
130
131 fnic_fc_trace_set_data(
132 fnic->lport->host->host_no,
133 FNIC_FC_LE,
134 "Link Status: UP_DOWN_UP_VLAN",
135 strlen(
136 "Link Status: UP_DOWN_UP_VLAN")
137 );
138 fnic_fcoe_send_vlan_req(fnic);
139 return;
140 }
141 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
142 "link up\n");
143 fcoe_ctlr_link_up(&fnic->ctlr);
144 } else {
145
146 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
147 fnic_fc_trace_set_data(
148 fnic->lport->host->host_no, FNIC_FC_LE,
149 "Link Status: UP_UP",
150 strlen("Link Status: UP_UP"));
151 }
152 }
153 } else if (fnic->link_status) {
154
155 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
156 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
157
158 fnic_fc_trace_set_data(
159 fnic->lport->host->host_no,
160 FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
161 strlen("Link Status: DOWN_UP_VLAN"));
162 fnic_fcoe_send_vlan_req(fnic);
163 return;
164 }
165 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
166 fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
167 "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
168 fcoe_ctlr_link_up(&fnic->ctlr);
169 } else {
170
171 fnic->lport->host_stats.link_failure_count++;
172 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
173 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
174 fnic_fc_trace_set_data(
175 fnic->lport->host->host_no, FNIC_FC_LE,
176 "Link Status: UP_DOWN",
177 strlen("Link Status: UP_DOWN"));
178 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
179 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
180 "deleting fip-timer during link-down\n");
181 del_timer_sync(&fnic->fip_timer);
182 }
183 fcoe_ctlr_link_down(&fnic->ctlr);
184 }
185
186 }
187
188
189
190
191 void fnic_handle_frame(struct work_struct *work)
192 {
193 struct fnic *fnic = container_of(work, struct fnic, frame_work);
194 struct fc_lport *lp = fnic->lport;
195 unsigned long flags;
196 struct sk_buff *skb;
197 struct fc_frame *fp;
198
199 while ((skb = skb_dequeue(&fnic->frame_queue))) {
200
201 spin_lock_irqsave(&fnic->fnic_lock, flags);
202 if (fnic->stop_rx_link_events) {
203 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
204 dev_kfree_skb(skb);
205 return;
206 }
207 fp = (struct fc_frame *)skb;
208
209
210
211
212
213 if (fnic->state != FNIC_IN_FC_MODE &&
214 fnic->state != FNIC_IN_ETH_MODE) {
215 skb_queue_head(&fnic->frame_queue, skb);
216 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
217 return;
218 }
219 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
220
221 fc_exch_recv(lp, fp);
222 }
223 }
224
225 void fnic_fcoe_evlist_free(struct fnic *fnic)
226 {
227 struct fnic_event *fevt = NULL;
228 struct fnic_event *next = NULL;
229 unsigned long flags;
230
231 spin_lock_irqsave(&fnic->fnic_lock, flags);
232 if (list_empty(&fnic->evlist)) {
233 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
234 return;
235 }
236
237 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
238 list_del(&fevt->list);
239 kfree(fevt);
240 }
241 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
242 }
243
244 void fnic_handle_event(struct work_struct *work)
245 {
246 struct fnic *fnic = container_of(work, struct fnic, event_work);
247 struct fnic_event *fevt = NULL;
248 struct fnic_event *next = NULL;
249 unsigned long flags;
250
251 spin_lock_irqsave(&fnic->fnic_lock, flags);
252 if (list_empty(&fnic->evlist)) {
253 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
254 return;
255 }
256
257 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
258 if (fnic->stop_rx_link_events) {
259 list_del(&fevt->list);
260 kfree(fevt);
261 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
262 return;
263 }
264
265
266
267
268 if (fnic->state != FNIC_IN_FC_MODE &&
269 fnic->state != FNIC_IN_ETH_MODE) {
270 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
271 return;
272 }
273
274 list_del(&fevt->list);
275 switch (fevt->event) {
276 case FNIC_EVT_START_VLAN_DISC:
277 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
278 fnic_fcoe_send_vlan_req(fnic);
279 spin_lock_irqsave(&fnic->fnic_lock, flags);
280 break;
281 case FNIC_EVT_START_FCF_DISC:
282 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
283 "Start FCF Discovery\n");
284 fnic_fcoe_start_fcf_disc(fnic);
285 break;
286 default:
287 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
288 "Unknown event 0x%x\n", fevt->event);
289 break;
290 }
291 kfree(fevt);
292 }
293 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
294 }
295
296
297
298
299
300
301
302
303
304 static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
305 struct sk_buff *skb)
306 {
307 struct fc_lport *lport = fip->lp;
308 struct fip_header *fiph;
309 struct fc_frame_header *fh = NULL;
310 struct fip_desc *desc;
311 struct fip_encaps *els;
312 enum fip_desc_type els_dtype = 0;
313 u16 op;
314 u8 els_op;
315 u8 sub;
316
317 size_t els_len = 0;
318 size_t rlen;
319 size_t dlen = 0;
320
321 if (skb_linearize(skb))
322 return 0;
323
324 if (skb->len < sizeof(*fiph))
325 return 0;
326
327 fiph = (struct fip_header *)skb->data;
328 op = ntohs(fiph->fip_op);
329 sub = fiph->fip_subcode;
330
331 if (op != FIP_OP_LS)
332 return 0;
333
334 if (sub != FIP_SC_REP)
335 return 0;
336
337 rlen = ntohs(fiph->fip_dl_len) * 4;
338 if (rlen + sizeof(*fiph) > skb->len)
339 return 0;
340
341 desc = (struct fip_desc *)(fiph + 1);
342 dlen = desc->fip_dlen * FIP_BPW;
343
344 if (desc->fip_dtype == FIP_DT_FLOGI) {
345
346 if (dlen < sizeof(*els) + sizeof(*fh) + 1)
347 return 0;
348
349 els_len = dlen - sizeof(*els);
350 els = (struct fip_encaps *)desc;
351 fh = (struct fc_frame_header *)(els + 1);
352 els_dtype = desc->fip_dtype;
353
354 if (!fh)
355 return 0;
356
357
358
359
360
361 els_op = *(u8 *)(fh + 1);
362 if (els_op == ELS_LS_RJT) {
363 shost_printk(KERN_INFO, lport->host,
364 "Flogi Request Rejected by Switch\n");
365 return 1;
366 }
367 shost_printk(KERN_INFO, lport->host,
368 "Flogi Request Accepted by Switch\n");
369 }
370 return 0;
371 }
372
373 static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
374 {
375 struct fcoe_ctlr *fip = &fnic->ctlr;
376 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
377 struct sk_buff *skb;
378 char *eth_fr;
379 int fr_len;
380 struct fip_vlan *vlan;
381 u64 vlan_tov;
382
383 fnic_fcoe_reset_vlans(fnic);
384 fnic->set_vlan(fnic, 0);
385
386 if (printk_ratelimit())
387 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
388 "Sending VLAN request...\n");
389
390 skb = dev_alloc_skb(sizeof(struct fip_vlan));
391 if (!skb)
392 return;
393
394 fr_len = sizeof(*vlan);
395 eth_fr = (char *)skb->data;
396 vlan = (struct fip_vlan *)eth_fr;
397
398 memset(vlan, 0, sizeof(*vlan));
399 memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
400 memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
401 vlan->eth.h_proto = htons(ETH_P_FIP);
402
403 vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
404 vlan->fip.fip_op = htons(FIP_OP_VLAN);
405 vlan->fip.fip_subcode = FIP_SC_VL_REQ;
406 vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
407
408 vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
409 vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
410 memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
411
412 vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
413 vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
414 put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
415 atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
416
417 skb_put(skb, sizeof(*vlan));
418 skb->protocol = htons(ETH_P_FIP);
419 skb_reset_mac_header(skb);
420 skb_reset_network_header(skb);
421 fip->send(fip, skb);
422
423
424 vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
425 mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
426 }
427
428 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
429 {
430 struct fcoe_ctlr *fip = &fnic->ctlr;
431 struct fip_header *fiph;
432 struct fip_desc *desc;
433 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
434 u16 vid;
435 size_t rlen;
436 size_t dlen;
437 struct fcoe_vlan *vlan;
438 u64 sol_time;
439 unsigned long flags;
440
441 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
442 "Received VLAN response...\n");
443
444 fiph = (struct fip_header *) skb->data;
445
446 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
447 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
448 ntohs(fiph->fip_op), fiph->fip_subcode);
449
450 rlen = ntohs(fiph->fip_dl_len) * 4;
451 fnic_fcoe_reset_vlans(fnic);
452 spin_lock_irqsave(&fnic->vlans_lock, flags);
453 desc = (struct fip_desc *)(fiph + 1);
454 while (rlen > 0) {
455 dlen = desc->fip_dlen * FIP_BPW;
456 switch (desc->fip_dtype) {
457 case FIP_DT_VLAN:
458 vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
459 shost_printk(KERN_INFO, fnic->lport->host,
460 "process_vlan_resp: FIP VLAN %d\n", vid);
461 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
462 if (!vlan) {
463
464 spin_unlock_irqrestore(&fnic->vlans_lock,
465 flags);
466 goto out;
467 }
468 vlan->vid = vid & 0x0fff;
469 vlan->state = FIP_VLAN_AVAIL;
470 list_add_tail(&vlan->list, &fnic->vlans);
471 break;
472 }
473 desc = (struct fip_desc *)((char *)desc + dlen);
474 rlen -= dlen;
475 }
476
477
478 if (list_empty(&fnic->vlans)) {
479
480 atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
481 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
482 "No VLAN descriptors in FIP VLAN response\n");
483 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
484 goto out;
485 }
486
487 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
488 fnic->set_vlan(fnic, vlan->vid);
489 vlan->state = FIP_VLAN_SENT;
490 vlan->sol_count++;
491 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
492
493
494 fcoe_ctlr_link_up(fip);
495
496 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
497 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
498 out:
499 return;
500 }
501
502 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
503 {
504 unsigned long flags;
505 struct fcoe_vlan *vlan;
506 u64 sol_time;
507
508 spin_lock_irqsave(&fnic->vlans_lock, flags);
509 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
510 fnic->set_vlan(fnic, vlan->vid);
511 vlan->state = FIP_VLAN_SENT;
512 vlan->sol_count = 1;
513 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
514
515
516 fcoe_ctlr_link_up(&fnic->ctlr);
517
518 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
519 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
520 }
521
522 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
523 {
524 unsigned long flags;
525 struct fcoe_vlan *fvlan;
526
527 spin_lock_irqsave(&fnic->vlans_lock, flags);
528 if (list_empty(&fnic->vlans)) {
529 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
530 return -EINVAL;
531 }
532
533 fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
534 if (fvlan->state == FIP_VLAN_USED) {
535 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
536 return 0;
537 }
538
539 if (fvlan->state == FIP_VLAN_SENT) {
540 fvlan->state = FIP_VLAN_USED;
541 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
542 return 0;
543 }
544 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
545 return -EINVAL;
546 }
547
548 static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
549 {
550 struct fnic_event *fevt;
551 unsigned long flags;
552
553 fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
554 if (!fevt)
555 return;
556
557 fevt->fnic = fnic;
558 fevt->event = ev;
559
560 spin_lock_irqsave(&fnic->fnic_lock, flags);
561 list_add_tail(&fevt->list, &fnic->evlist);
562 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
563
564 schedule_work(&fnic->event_work);
565 }
566
567 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
568 {
569 struct fip_header *fiph;
570 int ret = 1;
571 u16 op;
572 u8 sub;
573
574 if (!skb || !(skb->data))
575 return -1;
576
577 if (skb_linearize(skb))
578 goto drop;
579
580 fiph = (struct fip_header *)skb->data;
581 op = ntohs(fiph->fip_op);
582 sub = fiph->fip_subcode;
583
584 if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
585 goto drop;
586
587 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
588 goto drop;
589
590 if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
591 if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
592 goto drop;
593
594 ret = 1;
595 } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
596
597 fnic_fcoe_process_vlan_resp(fnic, skb);
598 ret = 0;
599 } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
600
601 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
602
603 ret = 1;
604 }
605 drop:
606 return ret;
607 }
608
609 void fnic_handle_fip_frame(struct work_struct *work)
610 {
611 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
612 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
613 unsigned long flags;
614 struct sk_buff *skb;
615 struct ethhdr *eh;
616
617 while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
618 spin_lock_irqsave(&fnic->fnic_lock, flags);
619 if (fnic->stop_rx_link_events) {
620 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
621 dev_kfree_skb(skb);
622 return;
623 }
624
625
626
627
628 if (fnic->state != FNIC_IN_FC_MODE &&
629 fnic->state != FNIC_IN_ETH_MODE) {
630 skb_queue_head(&fnic->fip_frame_queue, skb);
631 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
632 return;
633 }
634 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
635 eh = (struct ethhdr *)skb->data;
636 if (eh->h_proto == htons(ETH_P_FIP)) {
637 skb_pull(skb, sizeof(*eh));
638 if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
639 dev_kfree_skb(skb);
640 continue;
641 }
642
643
644
645
646 if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
647 atomic64_inc(
648 &fnic_stats->vlan_stats.flogi_rejects);
649 shost_printk(KERN_INFO, fnic->lport->host,
650 "Trigger a Link down - VLAN Disc\n");
651 fcoe_ctlr_link_down(&fnic->ctlr);
652
653 fnic_fcoe_send_vlan_req(fnic);
654 dev_kfree_skb(skb);
655 continue;
656 }
657 fcoe_ctlr_recv(&fnic->ctlr, skb);
658 continue;
659 }
660 }
661 }
662
663
664
665
666
667
668 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
669 {
670 struct fc_frame *fp;
671 struct ethhdr *eh;
672 struct fcoe_hdr *fcoe_hdr;
673 struct fcoe_crc_eof *ft;
674
675
676
677
678 eh = (struct ethhdr *)skb->data;
679 if (eh->h_proto == htons(ETH_P_8021Q)) {
680 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
681 eh = skb_pull(skb, VLAN_HLEN);
682 skb_reset_mac_header(skb);
683 }
684 if (eh->h_proto == htons(ETH_P_FIP)) {
685 if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
686 printk(KERN_ERR "Dropped FIP frame, as firmware "
687 "uses non-FIP mode, Enable FIP "
688 "using UCSM\n");
689 goto drop;
690 }
691 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
692 FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
693 printk(KERN_ERR "fnic ctlr frame trace error!!!");
694 }
695 skb_queue_tail(&fnic->fip_frame_queue, skb);
696 queue_work(fnic_fip_queue, &fnic->fip_frame_work);
697 return 1;
698 }
699 if (eh->h_proto != htons(ETH_P_FCOE))
700 goto drop;
701 skb_set_network_header(skb, sizeof(*eh));
702 skb_pull(skb, sizeof(*eh));
703
704 fcoe_hdr = (struct fcoe_hdr *)skb->data;
705 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
706 goto drop;
707
708 fp = (struct fc_frame *)skb;
709 fc_frame_init(fp);
710 fr_sof(fp) = fcoe_hdr->fcoe_sof;
711 skb_pull(skb, sizeof(struct fcoe_hdr));
712 skb_reset_transport_header(skb);
713
714 ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
715 fr_eof(fp) = ft->fcoe_eof;
716 skb_trim(skb, skb->len - sizeof(*ft));
717 return 0;
718 drop:
719 dev_kfree_skb_irq(skb);
720 return -1;
721 }
722
723
724
725
726
727
728
729
730 void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
731 {
732 u8 *ctl = fnic->ctlr.ctl_src_addr;
733 u8 *data = fnic->data_src_addr;
734
735 if (is_zero_ether_addr(new))
736 new = ctl;
737 if (ether_addr_equal(data, new))
738 return;
739 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
740 if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
741 vnic_dev_del_addr(fnic->vdev, data);
742 memcpy(data, new, ETH_ALEN);
743 if (!ether_addr_equal(new, ctl))
744 vnic_dev_add_addr(fnic->vdev, new);
745 }
746
747
748
749
750
751
752 void fnic_update_mac(struct fc_lport *lport, u8 *new)
753 {
754 struct fnic *fnic = lport_priv(lport);
755
756 spin_lock_irq(&fnic->fnic_lock);
757 fnic_update_mac_locked(fnic, new);
758 spin_unlock_irq(&fnic->fnic_lock);
759 }
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775 void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
776 {
777 struct fnic *fnic = lport_priv(lport);
778 u8 *mac;
779 int ret;
780
781 FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
782 port_id, fp);
783
784
785
786
787
788 if (!port_id) {
789 fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
790 fnic_set_eth_mode(fnic);
791 return;
792 }
793
794 if (fp) {
795 mac = fr_cb(fp)->granted_mac;
796 if (is_zero_ether_addr(mac)) {
797
798 fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
799 }
800 fnic_update_mac(lport, mac);
801 }
802
803
804 spin_lock_irq(&fnic->fnic_lock);
805 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
806 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
807 else {
808 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
809 "Unexpected fnic state %s while"
810 " processing flogi resp\n",
811 fnic_state_to_str(fnic->state));
812 spin_unlock_irq(&fnic->fnic_lock);
813 return;
814 }
815 spin_unlock_irq(&fnic->fnic_lock);
816
817
818
819
820
821 ret = fnic_flogi_reg_handler(fnic, port_id);
822
823 if (ret < 0) {
824 spin_lock_irq(&fnic->fnic_lock);
825 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
826 fnic->state = FNIC_IN_ETH_MODE;
827 spin_unlock_irq(&fnic->fnic_lock);
828 }
829 }
830
831 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
832 *cq_desc, struct vnic_rq_buf *buf,
833 int skipped __attribute__((unused)),
834 void *opaque)
835 {
836 struct fnic *fnic = vnic_dev_priv(rq->vdev);
837 struct sk_buff *skb;
838 struct fc_frame *fp;
839 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
840 unsigned int eth_hdrs_stripped;
841 u8 type, color, eop, sop, ingress_port, vlan_stripped;
842 u8 fcoe = 0, fcoe_sof, fcoe_eof;
843 u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
844 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
845 u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
846 u8 fcs_ok = 1, packet_error = 0;
847 u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
848 u32 rss_hash;
849 u16 exchange_id, tmpl;
850 u8 sof = 0;
851 u8 eof = 0;
852 u32 fcp_bytes_written = 0;
853 unsigned long flags;
854
855 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
856 DMA_FROM_DEVICE);
857 skb = buf->os_buf;
858 fp = (struct fc_frame *)skb;
859 buf->os_buf = NULL;
860
861 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
862 if (type == CQ_DESC_TYPE_RQ_FCP) {
863 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
864 &type, &color, &q_number, &completed_index,
865 &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
866 &tmpl, &fcp_bytes_written, &sof, &eof,
867 &ingress_port, &packet_error,
868 &fcoe_enc_error, &fcs_ok, &vlan_stripped,
869 &vlan);
870 eth_hdrs_stripped = 1;
871 skb_trim(skb, fcp_bytes_written);
872 fr_sof(fp) = sof;
873 fr_eof(fp) = eof;
874
875 } else if (type == CQ_DESC_TYPE_RQ_ENET) {
876 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
877 &type, &color, &q_number, &completed_index,
878 &ingress_port, &fcoe, &eop, &sop,
879 &rss_type, &csum_not_calc, &rss_hash,
880 &bytes_written, &packet_error,
881 &vlan_stripped, &vlan, &checksum,
882 &fcoe_sof, &fcoe_fc_crc_ok,
883 &fcoe_enc_error, &fcoe_eof,
884 &tcp_udp_csum_ok, &udp, &tcp,
885 &ipv4_csum_ok, &ipv6, &ipv4,
886 &ipv4_fragment, &fcs_ok);
887 eth_hdrs_stripped = 0;
888 skb_trim(skb, bytes_written);
889 if (!fcs_ok) {
890 atomic64_inc(&fnic_stats->misc_stats.frame_errors);
891 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
892 "fcs error. dropping packet.\n");
893 goto drop;
894 }
895 if (fnic_import_rq_eth_pkt(fnic, skb))
896 return;
897
898 } else {
899
900 shost_printk(KERN_ERR, fnic->lport->host,
901 "fnic rq_cmpl wrong cq type x%x\n", type);
902 goto drop;
903 }
904
905 if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
906 atomic64_inc(&fnic_stats->misc_stats.frame_errors);
907 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
908 "fnic rq_cmpl fcoe x%x fcsok x%x"
909 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
910 " x%x\n",
911 fcoe, fcs_ok, packet_error,
912 fcoe_fc_crc_ok, fcoe_enc_error);
913 goto drop;
914 }
915
916 spin_lock_irqsave(&fnic->fnic_lock, flags);
917 if (fnic->stop_rx_link_events) {
918 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
919 goto drop;
920 }
921 fr_dev(fp) = fnic->lport;
922 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
923 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
924 (char *)skb->data, skb->len)) != 0) {
925 printk(KERN_ERR "fnic ctlr frame trace error!!!");
926 }
927
928 skb_queue_tail(&fnic->frame_queue, skb);
929 queue_work(fnic_event_queue, &fnic->frame_work);
930
931 return;
932 drop:
933 dev_kfree_skb_irq(skb);
934 }
935
936 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
937 struct cq_desc *cq_desc, u8 type,
938 u16 q_number, u16 completed_index,
939 void *opaque)
940 {
941 struct fnic *fnic = vnic_dev_priv(vdev);
942
943 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
944 VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
945 NULL);
946 return 0;
947 }
948
949 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
950 {
951 unsigned int tot_rq_work_done = 0, cur_work_done;
952 unsigned int i;
953 int err;
954
955 for (i = 0; i < fnic->rq_count; i++) {
956 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
957 fnic_rq_cmpl_handler_cont,
958 NULL);
959 if (cur_work_done) {
960 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
961 if (err)
962 shost_printk(KERN_ERR, fnic->lport->host,
963 "fnic_alloc_rq_frame can't alloc"
964 " frame\n");
965 }
966 tot_rq_work_done += cur_work_done;
967 }
968
969 return tot_rq_work_done;
970 }
971
972
973
974
975
976
977 int fnic_alloc_rq_frame(struct vnic_rq *rq)
978 {
979 struct fnic *fnic = vnic_dev_priv(rq->vdev);
980 struct sk_buff *skb;
981 u16 len;
982 dma_addr_t pa;
983 int r;
984
985 len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
986 skb = dev_alloc_skb(len);
987 if (!skb) {
988 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
989 "Unable to allocate RQ sk_buff\n");
990 return -ENOMEM;
991 }
992 skb_reset_mac_header(skb);
993 skb_reset_transport_header(skb);
994 skb_reset_network_header(skb);
995 skb_put(skb, len);
996 pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
997 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
998 r = -ENOMEM;
999 printk(KERN_ERR "PCI mapping failed with error %d\n", r);
1000 goto free_skb;
1001 }
1002
1003 fnic_queue_rq_desc(rq, skb, pa, len);
1004 return 0;
1005
1006 free_skb:
1007 kfree_skb(skb);
1008 return r;
1009 }
1010
1011 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1012 {
1013 struct fc_frame *fp = buf->os_buf;
1014 struct fnic *fnic = vnic_dev_priv(rq->vdev);
1015
1016 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1017 DMA_FROM_DEVICE);
1018
1019 dev_kfree_skb(fp_skb(fp));
1020 buf->os_buf = NULL;
1021 }
1022
1023
1024
1025
1026
1027
1028 void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
1029 {
1030 struct fnic *fnic = fnic_from_ctlr(fip);
1031 struct vnic_wq *wq = &fnic->wq[0];
1032 dma_addr_t pa;
1033 struct ethhdr *eth_hdr;
1034 struct vlan_ethhdr *vlan_hdr;
1035 unsigned long flags;
1036
1037 if (!fnic->vlan_hw_insert) {
1038 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
1039 vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
1040 memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
1041 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1042 vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
1043 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1044 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1045 FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
1046 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1047 }
1048 } else {
1049 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1050 FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
1051 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1052 }
1053 }
1054
1055 pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
1056 DMA_TO_DEVICE);
1057 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1058 printk(KERN_ERR "DMA mapping failed\n");
1059 goto free_skb;
1060 }
1061
1062 spin_lock_irqsave(&fnic->wq_lock[0], flags);
1063 if (!vnic_wq_desc_avail(wq))
1064 goto irq_restore;
1065
1066 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
1067 0 ,
1068 fnic->vlan_id, 1);
1069 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1070 return;
1071
1072 irq_restore:
1073 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1074 dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
1075 free_skb:
1076 kfree_skb(skb);
1077 }
1078
1079
1080
1081
1082 static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
1083 {
1084 struct vnic_wq *wq = &fnic->wq[0];
1085 struct sk_buff *skb;
1086 dma_addr_t pa;
1087 struct ethhdr *eth_hdr;
1088 struct vlan_ethhdr *vlan_hdr;
1089 struct fcoe_hdr *fcoe_hdr;
1090 struct fc_frame_header *fh;
1091 u32 tot_len, eth_hdr_len;
1092 int ret = 0;
1093 unsigned long flags;
1094
1095 fh = fc_frame_header_get(fp);
1096 skb = fp_skb(fp);
1097
1098 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1099 fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
1100 return 0;
1101
1102 if (!fnic->vlan_hw_insert) {
1103 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
1104 vlan_hdr = skb_push(skb, eth_hdr_len);
1105 eth_hdr = (struct ethhdr *)vlan_hdr;
1106 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1107 vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
1108 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1109 fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
1110 } else {
1111 eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
1112 eth_hdr = skb_push(skb, eth_hdr_len);
1113 eth_hdr->h_proto = htons(ETH_P_FCOE);
1114 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
1115 }
1116
1117 if (fnic->ctlr.map_dest)
1118 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
1119 else
1120 memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
1121 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
1122
1123 tot_len = skb->len;
1124 BUG_ON(tot_len % 4);
1125
1126 memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
1127 fcoe_hdr->fcoe_sof = fr_sof(fp);
1128 if (FC_FCOE_VER)
1129 FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
1130
1131 pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
1132 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1133 ret = -ENOMEM;
1134 printk(KERN_ERR "DMA map failed with error %d\n", ret);
1135 goto free_skb_on_err;
1136 }
1137
1138 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
1139 (char *)eth_hdr, tot_len)) != 0) {
1140 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1141 }
1142
1143 spin_lock_irqsave(&fnic->wq_lock[0], flags);
1144
1145 if (!vnic_wq_desc_avail(wq)) {
1146 dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
1147 ret = -1;
1148 goto irq_restore;
1149 }
1150
1151 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
1152 0 ,
1153 fnic->vlan_id, 1, 1, 1);
1154
1155 irq_restore:
1156 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1157
1158 free_skb_on_err:
1159 if (ret)
1160 dev_kfree_skb_any(fp_skb(fp));
1161
1162 return ret;
1163 }
1164
1165
1166
1167
1168
1169 int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
1170 {
1171 struct fnic *fnic = lport_priv(lp);
1172 unsigned long flags;
1173
1174 if (fnic->in_remove) {
1175 dev_kfree_skb(fp_skb(fp));
1176 return -1;
1177 }
1178
1179
1180
1181
1182
1183 spin_lock_irqsave(&fnic->fnic_lock, flags);
1184 if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
1185 skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
1186 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1187 return 0;
1188 }
1189 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1190
1191 return fnic_send_frame(fnic, fp);
1192 }
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204 void fnic_flush_tx(struct fnic *fnic)
1205 {
1206 struct sk_buff *skb;
1207 struct fc_frame *fp;
1208
1209 while ((skb = skb_dequeue(&fnic->tx_queue))) {
1210 fp = (struct fc_frame *)skb;
1211 fnic_send_frame(fnic, fp);
1212 }
1213 }
1214
1215
1216
1217
1218
1219
1220
1221 static void fnic_set_eth_mode(struct fnic *fnic)
1222 {
1223 unsigned long flags;
1224 enum fnic_state old_state;
1225 int ret;
1226
1227 spin_lock_irqsave(&fnic->fnic_lock, flags);
1228 again:
1229 old_state = fnic->state;
1230 switch (old_state) {
1231 case FNIC_IN_FC_MODE:
1232 case FNIC_IN_ETH_TRANS_FC_MODE:
1233 default:
1234 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1235 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1236
1237 ret = fnic_fw_reset_handler(fnic);
1238
1239 spin_lock_irqsave(&fnic->fnic_lock, flags);
1240 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
1241 goto again;
1242 if (ret)
1243 fnic->state = old_state;
1244 break;
1245
1246 case FNIC_IN_FC_TRANS_ETH_MODE:
1247 case FNIC_IN_ETH_MODE:
1248 break;
1249 }
1250 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1251 }
1252
1253 static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
1254 struct cq_desc *cq_desc,
1255 struct vnic_wq_buf *buf, void *opaque)
1256 {
1257 struct sk_buff *skb = buf->os_buf;
1258 struct fc_frame *fp = (struct fc_frame *)skb;
1259 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1260
1261 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1262 DMA_TO_DEVICE);
1263 dev_kfree_skb_irq(fp_skb(fp));
1264 buf->os_buf = NULL;
1265 }
1266
1267 static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
1268 struct cq_desc *cq_desc, u8 type,
1269 u16 q_number, u16 completed_index,
1270 void *opaque)
1271 {
1272 struct fnic *fnic = vnic_dev_priv(vdev);
1273 unsigned long flags;
1274
1275 spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
1276 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
1277 fnic_wq_complete_frame_send, NULL);
1278 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
1279
1280 return 0;
1281 }
1282
1283 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
1284 {
1285 unsigned int wq_work_done = 0;
1286 unsigned int i;
1287
1288 for (i = 0; i < fnic->raw_wq_count; i++) {
1289 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
1290 work_to_do,
1291 fnic_wq_cmpl_handler_cont,
1292 NULL);
1293 }
1294
1295 return wq_work_done;
1296 }
1297
1298
1299 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
1300 {
1301 struct fc_frame *fp = buf->os_buf;
1302 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1303
1304 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1305 DMA_TO_DEVICE);
1306
1307 dev_kfree_skb(fp_skb(fp));
1308 buf->os_buf = NULL;
1309 }
1310
1311 void fnic_fcoe_reset_vlans(struct fnic *fnic)
1312 {
1313 unsigned long flags;
1314 struct fcoe_vlan *vlan;
1315 struct fcoe_vlan *next;
1316
1317
1318
1319
1320
1321
1322 spin_lock_irqsave(&fnic->vlans_lock, flags);
1323 if (!list_empty(&fnic->vlans)) {
1324 list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
1325 list_del(&vlan->list);
1326 kfree(vlan);
1327 }
1328 }
1329 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1330 }
1331
1332 void fnic_handle_fip_timer(struct fnic *fnic)
1333 {
1334 unsigned long flags;
1335 struct fcoe_vlan *vlan;
1336 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1337 u64 sol_time;
1338
1339 spin_lock_irqsave(&fnic->fnic_lock, flags);
1340 if (fnic->stop_rx_link_events) {
1341 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1342 return;
1343 }
1344 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1345
1346 if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
1347 return;
1348
1349 spin_lock_irqsave(&fnic->vlans_lock, flags);
1350 if (list_empty(&fnic->vlans)) {
1351 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1352
1353 if (printk_ratelimit())
1354 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1355 "Start VLAN Discovery\n");
1356 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1357 return;
1358 }
1359
1360 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
1361 shost_printk(KERN_DEBUG, fnic->lport->host,
1362 "fip_timer: vlan %d state %d sol_count %d\n",
1363 vlan->vid, vlan->state, vlan->sol_count);
1364 switch (vlan->state) {
1365 case FIP_VLAN_USED:
1366 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1367 "FIP VLAN is selected for FC transaction\n");
1368 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1369 break;
1370 case FIP_VLAN_FAILED:
1371 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1372
1373 if (printk_ratelimit())
1374 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1375 "Start VLAN Discovery\n");
1376 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1377 break;
1378 case FIP_VLAN_SENT:
1379 if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
1380
1381
1382
1383
1384 shost_printk(KERN_INFO, fnic->lport->host,
1385 "Dequeue this VLAN ID %d from list\n",
1386 vlan->vid);
1387 list_del(&vlan->list);
1388 kfree(vlan);
1389 vlan = NULL;
1390 if (list_empty(&fnic->vlans)) {
1391
1392 spin_unlock_irqrestore(&fnic->vlans_lock,
1393 flags);
1394 shost_printk(KERN_INFO, fnic->lport->host,
1395 "fip_timer: vlan list empty, "
1396 "trigger vlan disc\n");
1397 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1398 return;
1399 }
1400
1401 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
1402 list);
1403 fnic->set_vlan(fnic, vlan->vid);
1404 vlan->state = FIP_VLAN_SENT;
1405 }
1406 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1407 atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
1408 vlan->sol_count++;
1409 sol_time = jiffies + msecs_to_jiffies
1410 (FCOE_CTLR_START_DELAY);
1411 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
1412 break;
1413 }
1414 }