This source file includes following definitions.
- is_vxge_card_up
- VXGE_COMPLETE_VPATH_TX
- VXGE_COMPLETE_ALL_TX
- VXGE_COMPLETE_ALL_RX
- vxge_callback_link_up
- vxge_callback_link_down
- vxge_rx_alloc
- vxge_rx_map
- vxge_rx_initial_replenish
- vxge_rx_complete
- vxge_re_pre_post
- vxge_post
- vxge_rx_1b_compl
- vxge_xmit_compl
- vxge_get_vpath_no
- vxge_search_mac_addr_in_list
- vxge_mac_list_add
- vxge_add_mac_addr
- vxge_learn_mac
- vxge_xmit
- vxge_rx_term
- vxge_tx_term
- vxge_mac_list_del
- vxge_del_mac_addr
- vxge_set_multicast
- vxge_set_mac_addr
- vxge_vpath_intr_enable
- vxge_vpath_intr_disable
- vxge_search_mac_addr_in_da_table
- vxge_restore_vpath_mac_addr
- vxge_restore_vpath_vid_table
- vxge_reset_vpath
- vxge_config_ci_for_tti_rti
- do_vxge_reset
- vxge_reset
- vxge_poll_msix
- vxge_poll_inta
- vxge_netpoll
- vxge_rth_configure
- vxge_reset_all_vpaths
- vxge_close_vpaths
- vxge_open_vpaths
- adaptive_coalesce_tx_interrupts
- adaptive_coalesce_rx_interrupts
- vxge_isr_napi
- vxge_tx_msix_handle
- vxge_rx_msix_napi_handle
- vxge_alarm_msix_handle
- vxge_alloc_msix
- vxge_enable_msix
- vxge_rem_msix_isr
- vxge_rem_isr
- vxge_add_isr
- vxge_poll_vp_reset
- vxge_poll_vp_lockup
- vxge_fix_features
- vxge_set_features
- vxge_open
- vxge_free_mac_add_list
- vxge_napi_del_all
- do_vxge_close
- vxge_close
- vxge_change_mtu
- vxge_get_stats64
- vxge_timestamp_config
- vxge_hwtstamp_set
- vxge_hwtstamp_get
- vxge_ioctl
- vxge_tx_watchdog
- vxge_vlan_rx_add_vid
- vxge_vlan_rx_kill_vid
- vxge_device_register
- vxge_device_unregister
- vxge_callback_crit_err
- verify_bandwidth
- vxge_config_vpaths
- vxge_device_config_init
- vxge_print_parm
- vxge_pm_suspend
- vxge_pm_resume
- vxge_io_error_detected
- vxge_io_slot_reset
- vxge_io_resume
- vxge_get_num_vfs
- vxge_fw_upgrade
- vxge_probe_fw_update
- is_sriov_initialized
- vxge_probe
- vxge_remove
- vxge_starter
- vxge_closer
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46 #include <linux/bitops.h>
47 #include <linux/if_vlan.h>
48 #include <linux/interrupt.h>
49 #include <linux/pci.h>
50 #include <linux/slab.h>
51 #include <linux/tcp.h>
52 #include <net/ip.h>
53 #include <linux/netdevice.h>
54 #include <linux/etherdevice.h>
55 #include <linux/firmware.h>
56 #include <linux/net_tstamp.h>
57 #include <linux/prefetch.h>
58 #include <linux/module.h>
59 #include "vxge-main.h"
60 #include "vxge-reg.h"
61
62 MODULE_LICENSE("Dual BSD/GPL");
63 MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
64 "Virtualized Server Adapter");
65
66 static const struct pci_device_id vxge_id_table[] = {
67 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
68 PCI_ANY_ID},
69 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
70 PCI_ANY_ID},
71 {0}
72 };
73
74 MODULE_DEVICE_TABLE(pci, vxge_id_table);
75
76 VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
77 VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
78 VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
79 VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
80 VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
81 VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
82
83 static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
84 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
85 static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
86 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
87 module_param_array(bw_percentage, uint, NULL, 0);
88
89 static struct vxge_drv_config *driver_config;
90 static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
91
92 static inline int is_vxge_card_up(struct vxgedev *vdev)
93 {
94 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
95 }
96
97 static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
98 {
99 struct sk_buff **skb_ptr = NULL;
100 struct sk_buff **temp;
101 #define NR_SKB_COMPLETED 128
102 struct sk_buff *completed[NR_SKB_COMPLETED];
103 int more;
104
105 do {
106 more = 0;
107 skb_ptr = completed;
108
109 if (__netif_tx_trylock(fifo->txq)) {
110 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
111 NR_SKB_COMPLETED, &more);
112 __netif_tx_unlock(fifo->txq);
113 }
114
115
116 for (temp = completed; temp != skb_ptr; temp++)
117 dev_consume_skb_irq(*temp);
118 } while (more);
119 }
120
121 static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
122 {
123 int i;
124
125
126 for (i = 0; i < vdev->no_of_vpath; i++)
127 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
128 }
129
130 static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
131 {
132 int i;
133 struct vxge_ring *ring;
134
135
136 for (i = 0; i < vdev->no_of_vpath; i++) {
137 ring = &vdev->vpaths[i].ring;
138 vxge_hw_vpath_poll_rx(ring->handle);
139 }
140 }
141
142
143
144
145
146
147
148 static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
149 {
150 struct net_device *dev = hldev->ndev;
151 struct vxgedev *vdev = netdev_priv(dev);
152
153 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
154 vdev->ndev->name, __func__, __LINE__);
155 netdev_notice(vdev->ndev, "Link Up\n");
156 vdev->stats.link_up++;
157
158 netif_carrier_on(vdev->ndev);
159 netif_tx_wake_all_queues(vdev->ndev);
160
161 vxge_debug_entryexit(VXGE_TRACE,
162 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
163 }
164
165
166
167
168
169
170
171 static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
172 {
173 struct net_device *dev = hldev->ndev;
174 struct vxgedev *vdev = netdev_priv(dev);
175
176 vxge_debug_entryexit(VXGE_TRACE,
177 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
178 netdev_notice(vdev->ndev, "Link Down\n");
179
180 vdev->stats.link_down++;
181 netif_carrier_off(vdev->ndev);
182 netif_tx_stop_all_queues(vdev->ndev);
183
184 vxge_debug_entryexit(VXGE_TRACE,
185 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
186 }
187
188
189
190
191
192
193 static struct sk_buff *
194 vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
195 {
196 struct net_device *dev;
197 struct sk_buff *skb;
198 struct vxge_rx_priv *rx_priv;
199
200 dev = ring->ndev;
201 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
202 ring->ndev->name, __func__, __LINE__);
203
204 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
205
206
207 skb = netdev_alloc_skb(dev, skb_size +
208 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
209 if (skb == NULL) {
210 vxge_debug_mem(VXGE_ERR,
211 "%s: out of memory to allocate SKB", dev->name);
212 ring->stats.skb_alloc_fail++;
213 return NULL;
214 }
215
216 vxge_debug_mem(VXGE_TRACE,
217 "%s: %s:%d Skb : 0x%p", ring->ndev->name,
218 __func__, __LINE__, skb);
219
220 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
221
222 rx_priv->skb = skb;
223 rx_priv->skb_data = NULL;
224 rx_priv->data_size = skb_size;
225 vxge_debug_entryexit(VXGE_TRACE,
226 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
227
228 return skb;
229 }
230
231
232
233
234 static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
235 {
236 struct vxge_rx_priv *rx_priv;
237 dma_addr_t dma_addr;
238
239 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
240 ring->ndev->name, __func__, __LINE__);
241 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
242
243 rx_priv->skb_data = rx_priv->skb->data;
244 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
245 rx_priv->data_size, PCI_DMA_FROMDEVICE);
246
247 if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
248 ring->stats.pci_map_fail++;
249 return -EIO;
250 }
251 vxge_debug_mem(VXGE_TRACE,
252 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
253 ring->ndev->name, __func__, __LINE__,
254 (unsigned long long)dma_addr);
255 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
256
257 rx_priv->data_dma = dma_addr;
258 vxge_debug_entryexit(VXGE_TRACE,
259 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
260
261 return 0;
262 }
263
264
265
266
267
268 static enum vxge_hw_status
269 vxge_rx_initial_replenish(void *dtrh, void *userdata)
270 {
271 struct vxge_ring *ring = (struct vxge_ring *)userdata;
272 struct vxge_rx_priv *rx_priv;
273
274 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
275 ring->ndev->name, __func__, __LINE__);
276 if (vxge_rx_alloc(dtrh, ring,
277 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
278 return VXGE_HW_FAIL;
279
280 if (vxge_rx_map(dtrh, ring)) {
281 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
282 dev_kfree_skb(rx_priv->skb);
283
284 return VXGE_HW_FAIL;
285 }
286 vxge_debug_entryexit(VXGE_TRACE,
287 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
288
289 return VXGE_HW_OK;
290 }
291
292 static inline void
293 vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
294 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
295 {
296
297 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
298 ring->ndev->name, __func__, __LINE__);
299 skb_record_rx_queue(skb, ring->driver_id);
300 skb->protocol = eth_type_trans(skb, ring->ndev);
301
302 u64_stats_update_begin(&ring->stats.syncp);
303 ring->stats.rx_frms++;
304 ring->stats.rx_bytes += pkt_length;
305
306 if (skb->pkt_type == PACKET_MULTICAST)
307 ring->stats.rx_mcast++;
308 u64_stats_update_end(&ring->stats.syncp);
309
310 vxge_debug_rx(VXGE_TRACE,
311 "%s: %s:%d skb protocol = %d",
312 ring->ndev->name, __func__, __LINE__, skb->protocol);
313
314 if (ext_info->vlan &&
315 ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
316 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ext_info->vlan);
317 napi_gro_receive(ring->napi_p, skb);
318
319 vxge_debug_entryexit(VXGE_TRACE,
320 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
321 }
322
323 static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
324 struct vxge_rx_priv *rx_priv)
325 {
326 pci_dma_sync_single_for_device(ring->pdev,
327 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
328
329 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
330 vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
331 }
332
333 static inline void vxge_post(int *dtr_cnt, void **first_dtr,
334 void *post_dtr, struct __vxge_hw_ring *ringh)
335 {
336 int dtr_count = *dtr_cnt;
337 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
338 if (*first_dtr)
339 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
340 *first_dtr = post_dtr;
341 } else
342 vxge_hw_ring_rxd_post_post(ringh, post_dtr);
343 dtr_count++;
344 *dtr_cnt = dtr_count;
345 }
346
347
348
349
350
351
352
353 static enum vxge_hw_status
354 vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
355 u8 t_code, void *userdata)
356 {
357 struct vxge_ring *ring = (struct vxge_ring *)userdata;
358 struct net_device *dev = ring->ndev;
359 unsigned int dma_sizes;
360 void *first_dtr = NULL;
361 int dtr_cnt = 0;
362 int data_size;
363 dma_addr_t data_dma;
364 int pkt_length;
365 struct sk_buff *skb;
366 struct vxge_rx_priv *rx_priv;
367 struct vxge_hw_ring_rxd_info ext_info;
368 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
369 ring->ndev->name, __func__, __LINE__);
370
371 if (ring->budget <= 0)
372 goto out;
373
374 do {
375 prefetch((char *)dtr + L1_CACHE_BYTES);
376 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
377 skb = rx_priv->skb;
378 data_size = rx_priv->data_size;
379 data_dma = rx_priv->data_dma;
380 prefetch(rx_priv->skb_data);
381
382 vxge_debug_rx(VXGE_TRACE,
383 "%s: %s:%d skb = 0x%p",
384 ring->ndev->name, __func__, __LINE__, skb);
385
386 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
387 pkt_length = dma_sizes;
388
389 pkt_length -= ETH_FCS_LEN;
390
391 vxge_debug_rx(VXGE_TRACE,
392 "%s: %s:%d Packet Length = %d",
393 ring->ndev->name, __func__, __LINE__, pkt_length);
394
395 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
396
397
398 vxge_assert(skb);
399
400 prefetch((char *)skb + L1_CACHE_BYTES);
401 if (unlikely(t_code)) {
402 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
403 VXGE_HW_OK) {
404
405 ring->stats.rx_errors++;
406 vxge_debug_rx(VXGE_TRACE,
407 "%s: %s :%d Rx T_code is %d",
408 ring->ndev->name, __func__,
409 __LINE__, t_code);
410
411
412
413
414
415 vxge_re_pre_post(dtr, ring, rx_priv);
416
417 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
418 ring->stats.rx_dropped++;
419 continue;
420 }
421 }
422
423 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
424 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
425 if (!vxge_rx_map(dtr, ring)) {
426 skb_put(skb, pkt_length);
427
428 pci_unmap_single(ring->pdev, data_dma,
429 data_size, PCI_DMA_FROMDEVICE);
430
431 vxge_hw_ring_rxd_pre_post(ringh, dtr);
432 vxge_post(&dtr_cnt, &first_dtr, dtr,
433 ringh);
434 } else {
435 dev_kfree_skb(rx_priv->skb);
436 rx_priv->skb = skb;
437 rx_priv->data_size = data_size;
438 vxge_re_pre_post(dtr, ring, rx_priv);
439
440 vxge_post(&dtr_cnt, &first_dtr, dtr,
441 ringh);
442 ring->stats.rx_dropped++;
443 break;
444 }
445 } else {
446 vxge_re_pre_post(dtr, ring, rx_priv);
447
448 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
449 ring->stats.rx_dropped++;
450 break;
451 }
452 } else {
453 struct sk_buff *skb_up;
454
455 skb_up = netdev_alloc_skb(dev, pkt_length +
456 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
457 if (skb_up != NULL) {
458 skb_reserve(skb_up,
459 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
460
461 pci_dma_sync_single_for_cpu(ring->pdev,
462 data_dma, data_size,
463 PCI_DMA_FROMDEVICE);
464
465 vxge_debug_mem(VXGE_TRACE,
466 "%s: %s:%d skb_up = %p",
467 ring->ndev->name, __func__,
468 __LINE__, skb);
469 memcpy(skb_up->data, skb->data, pkt_length);
470
471 vxge_re_pre_post(dtr, ring, rx_priv);
472
473 vxge_post(&dtr_cnt, &first_dtr, dtr,
474 ringh);
475
476 skb = skb_up;
477 skb_put(skb, pkt_length);
478 } else {
479 vxge_re_pre_post(dtr, ring, rx_priv);
480
481 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
482 vxge_debug_rx(VXGE_ERR,
483 "%s: vxge_rx_1b_compl: out of "
484 "memory", dev->name);
485 ring->stats.skb_alloc_fail++;
486 break;
487 }
488 }
489
490 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
491 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
492 (dev->features & NETIF_F_RXCSUM) &&
493 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
494 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
495 skb->ip_summed = CHECKSUM_UNNECESSARY;
496 else
497 skb_checksum_none_assert(skb);
498
499
500 if (ring->rx_hwts) {
501 struct skb_shared_hwtstamps *skb_hwts;
502 u32 ns = *(u32 *)(skb->head + pkt_length);
503
504 skb_hwts = skb_hwtstamps(skb);
505 skb_hwts->hwtstamp = ns_to_ktime(ns);
506 }
507
508
509
510
511
512 if (ext_info.rth_value)
513 skb_set_hash(skb, ext_info.rth_value,
514 PKT_HASH_TYPE_L3);
515
516 vxge_rx_complete(ring, skb, ext_info.vlan,
517 pkt_length, &ext_info);
518
519 ring->budget--;
520 ring->pkts_processed++;
521 if (!ring->budget)
522 break;
523
524 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
525 &t_code) == VXGE_HW_OK);
526
527 if (first_dtr)
528 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
529
530 out:
531 vxge_debug_entryexit(VXGE_TRACE,
532 "%s:%d Exiting...",
533 __func__, __LINE__);
534 return VXGE_HW_OK;
535 }
536
537
538
539
540
541
542
543
544
545 static enum vxge_hw_status
546 vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
547 enum vxge_hw_fifo_tcode t_code, void *userdata,
548 struct sk_buff ***skb_ptr, int nr_skb, int *more)
549 {
550 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
551 struct sk_buff *skb, **done_skb = *skb_ptr;
552 int pkt_cnt = 0;
553
554 vxge_debug_entryexit(VXGE_TRACE,
555 "%s:%d Entered....", __func__, __LINE__);
556
557 do {
558 int frg_cnt;
559 skb_frag_t *frag;
560 int i = 0, j;
561 struct vxge_tx_priv *txd_priv =
562 vxge_hw_fifo_txdl_private_get(dtr);
563
564 skb = txd_priv->skb;
565 frg_cnt = skb_shinfo(skb)->nr_frags;
566 frag = &skb_shinfo(skb)->frags[0];
567
568 vxge_debug_tx(VXGE_TRACE,
569 "%s: %s:%d fifo_hw = %p dtr = %p "
570 "tcode = 0x%x", fifo->ndev->name, __func__,
571 __LINE__, fifo_hw, dtr, t_code);
572
573 vxge_assert(skb);
574 vxge_debug_tx(VXGE_TRACE,
575 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
576 fifo->ndev->name, __func__, __LINE__,
577 skb, txd_priv, frg_cnt);
578 if (unlikely(t_code)) {
579 fifo->stats.tx_errors++;
580 vxge_debug_tx(VXGE_ERR,
581 "%s: tx: dtr %p completed due to "
582 "error t_code %01x", fifo->ndev->name,
583 dtr, t_code);
584 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
585 }
586
587
588 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
589 skb_headlen(skb), PCI_DMA_TODEVICE);
590
591 for (j = 0; j < frg_cnt; j++) {
592 pci_unmap_page(fifo->pdev,
593 txd_priv->dma_buffers[i++],
594 skb_frag_size(frag), PCI_DMA_TODEVICE);
595 frag += 1;
596 }
597
598 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
599
600
601 u64_stats_update_begin(&fifo->stats.syncp);
602 fifo->stats.tx_frms++;
603 fifo->stats.tx_bytes += skb->len;
604 u64_stats_update_end(&fifo->stats.syncp);
605
606 *done_skb++ = skb;
607
608 if (--nr_skb <= 0) {
609 *more = 1;
610 break;
611 }
612
613 pkt_cnt++;
614 if (pkt_cnt > fifo->indicate_max_pkts)
615 break;
616
617 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
618 &dtr, &t_code) == VXGE_HW_OK);
619
620 *skb_ptr = done_skb;
621 if (netif_tx_queue_stopped(fifo->txq))
622 netif_tx_wake_queue(fifo->txq);
623
624 vxge_debug_entryexit(VXGE_TRACE,
625 "%s: %s:%d Exiting...",
626 fifo->ndev->name, __func__, __LINE__);
627 return VXGE_HW_OK;
628 }
629
630
631 static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
632 {
633 u16 queue_len, counter = 0;
634 if (skb->protocol == htons(ETH_P_IP)) {
635 struct iphdr *ip;
636 struct tcphdr *th;
637
638 ip = ip_hdr(skb);
639
640 if (!ip_is_fragment(ip)) {
641 th = (struct tcphdr *)(((unsigned char *)ip) +
642 ip->ihl*4);
643
644 queue_len = vdev->no_of_vpath;
645 counter = (ntohs(th->source) +
646 ntohs(th->dest)) &
647 vdev->vpath_selector[queue_len - 1];
648 if (counter >= queue_len)
649 counter = queue_len - 1;
650 }
651 }
652 return counter;
653 }
654
655 static enum vxge_hw_status vxge_search_mac_addr_in_list(
656 struct vxge_vpath *vpath, u64 del_mac)
657 {
658 struct list_head *entry, *next;
659 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
660 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
661 return TRUE;
662 }
663 return FALSE;
664 }
665
666 static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
667 {
668 struct vxge_mac_addrs *new_mac_entry;
669 u8 *mac_address = NULL;
670
671 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
672 return TRUE;
673
674 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
675 if (!new_mac_entry) {
676 vxge_debug_mem(VXGE_ERR,
677 "%s: memory allocation failed",
678 VXGE_DRIVER_NAME);
679 return FALSE;
680 }
681
682 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
683
684
685 mac_address = (u8 *)&new_mac_entry->macaddr;
686 memcpy(mac_address, mac->macaddr, ETH_ALEN);
687
688 new_mac_entry->state = mac->state;
689 vpath->mac_addr_cnt++;
690
691 if (is_multicast_ether_addr(mac->macaddr))
692 vpath->mcast_addr_cnt++;
693
694 return TRUE;
695 }
696
697
698 static enum vxge_hw_status
699 vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
700 {
701 enum vxge_hw_status status = VXGE_HW_OK;
702 struct vxge_vpath *vpath;
703 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
704
705 if (is_multicast_ether_addr(mac->macaddr))
706 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
707 else
708 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
709
710 vpath = &vdev->vpaths[mac->vpath_no];
711 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
712 mac->macmask, duplicate_mode);
713 if (status != VXGE_HW_OK) {
714 vxge_debug_init(VXGE_ERR,
715 "DA config add entry failed for vpath:%d",
716 vpath->device_id);
717 } else
718 if (FALSE == vxge_mac_list_add(vpath, mac))
719 status = -EPERM;
720
721 return status;
722 }
723
724 static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
725 {
726 struct macInfo mac_info;
727 u8 *mac_address = NULL;
728 u64 mac_addr = 0, vpath_vector = 0;
729 int vpath_idx = 0;
730 enum vxge_hw_status status = VXGE_HW_OK;
731 struct vxge_vpath *vpath = NULL;
732
733 mac_address = (u8 *)&mac_addr;
734 memcpy(mac_address, mac_header, ETH_ALEN);
735
736
737 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
738 vpath = &vdev->vpaths[vpath_idx];
739 if (vxge_search_mac_addr_in_list(vpath, mac_addr))
740 return vpath_idx;
741 }
742
743 memset(&mac_info, 0, sizeof(struct macInfo));
744 memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
745
746
747 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
748 vpath = &vdev->vpaths[vpath_idx];
749 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
750
751 mac_info.vpath_no = vpath_idx;
752 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
753 status = vxge_add_mac_addr(vdev, &mac_info);
754 if (status != VXGE_HW_OK)
755 return -EPERM;
756 return vpath_idx;
757 }
758 }
759
760 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
761 vpath_idx = 0;
762 mac_info.vpath_no = vpath_idx;
763
764 vpath = &vdev->vpaths[vpath_idx];
765 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
766
767 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
768 return -EPERM;
769 return vpath_idx;
770 }
771
772
773 vpath_vector = vxge_mBIT(vpath->device_id);
774 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
775 vxge_hw_mgmt_reg_type_mrpcim,
776 0,
777 (ulong)offsetof(
778 struct vxge_hw_mrpcim_reg,
779 rts_mgr_cbasin_cfg),
780 vpath_vector);
781 if (status != VXGE_HW_OK) {
782 vxge_debug_tx(VXGE_ERR,
783 "%s: Unable to set the vpath-%d in catch-basin mode",
784 VXGE_DRIVER_NAME, vpath->device_id);
785 return -EPERM;
786 }
787
788 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
789 return -EPERM;
790
791 return vpath_idx;
792 }
793
794
795
796
797
798
799
800
801
802 static netdev_tx_t
803 vxge_xmit(struct sk_buff *skb, struct net_device *dev)
804 {
805 struct vxge_fifo *fifo = NULL;
806 void *dtr_priv;
807 void *dtr = NULL;
808 struct vxgedev *vdev = NULL;
809 enum vxge_hw_status status;
810 int frg_cnt, first_frg_len;
811 skb_frag_t *frag;
812 int i = 0, j = 0, avail;
813 u64 dma_pointer;
814 struct vxge_tx_priv *txdl_priv = NULL;
815 struct __vxge_hw_fifo *fifo_hw;
816 int offload_type;
817 int vpath_no = 0;
818
819 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
820 dev->name, __func__, __LINE__);
821
822
823 if (unlikely(skb->len <= 0)) {
824 vxge_debug_tx(VXGE_ERR,
825 "%s: Buffer has no data..", dev->name);
826 dev_kfree_skb_any(skb);
827 return NETDEV_TX_OK;
828 }
829
830 vdev = netdev_priv(dev);
831
832 if (unlikely(!is_vxge_card_up(vdev))) {
833 vxge_debug_tx(VXGE_ERR,
834 "%s: vdev not initialized", dev->name);
835 dev_kfree_skb_any(skb);
836 return NETDEV_TX_OK;
837 }
838
839 if (vdev->config.addr_learn_en) {
840 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
841 if (vpath_no == -EPERM) {
842 vxge_debug_tx(VXGE_ERR,
843 "%s: Failed to store the mac address",
844 dev->name);
845 dev_kfree_skb_any(skb);
846 return NETDEV_TX_OK;
847 }
848 }
849
850 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
851 vpath_no = skb_get_queue_mapping(skb);
852 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
853 vpath_no = vxge_get_vpath_no(vdev, skb);
854
855 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
856
857 if (vpath_no >= vdev->no_of_vpath)
858 vpath_no = 0;
859
860 fifo = &vdev->vpaths[vpath_no].fifo;
861 fifo_hw = fifo->handle;
862
863 if (netif_tx_queue_stopped(fifo->txq))
864 return NETDEV_TX_BUSY;
865
866 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
867 if (avail == 0) {
868 vxge_debug_tx(VXGE_ERR,
869 "%s: No free TXDs available", dev->name);
870 fifo->stats.txd_not_free++;
871 goto _exit0;
872 }
873
874
875
876
877 if (avail == 1)
878 netif_tx_stop_queue(fifo->txq);
879
880 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
881 if (unlikely(status != VXGE_HW_OK)) {
882 vxge_debug_tx(VXGE_ERR,
883 "%s: Out of descriptors .", dev->name);
884 fifo->stats.txd_out_of_desc++;
885 goto _exit0;
886 }
887
888 vxge_debug_tx(VXGE_TRACE,
889 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
890 dev->name, __func__, __LINE__,
891 fifo_hw, dtr, dtr_priv);
892
893 if (skb_vlan_tag_present(skb)) {
894 u16 vlan_tag = skb_vlan_tag_get(skb);
895 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
896 }
897
898 first_frg_len = skb_headlen(skb);
899
900 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
901 PCI_DMA_TODEVICE);
902
903 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
904 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
905 fifo->stats.pci_map_fail++;
906 goto _exit0;
907 }
908
909 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
910 txdl_priv->skb = skb;
911 txdl_priv->dma_buffers[j] = dma_pointer;
912
913 frg_cnt = skb_shinfo(skb)->nr_frags;
914 vxge_debug_tx(VXGE_TRACE,
915 "%s: %s:%d skb = %p txdl_priv = %p "
916 "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
917 __func__, __LINE__, skb, txdl_priv,
918 frg_cnt, (unsigned long long)dma_pointer);
919
920 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
921 first_frg_len);
922
923 frag = &skb_shinfo(skb)->frags[0];
924 for (i = 0; i < frg_cnt; i++) {
925
926 if (!skb_frag_size(frag))
927 continue;
928
929 dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag,
930 0, skb_frag_size(frag),
931 DMA_TO_DEVICE);
932
933 if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer)))
934 goto _exit2;
935 vxge_debug_tx(VXGE_TRACE,
936 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
937 dev->name, __func__, __LINE__, i,
938 (unsigned long long)dma_pointer);
939
940 txdl_priv->dma_buffers[j] = dma_pointer;
941 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
942 skb_frag_size(frag));
943 frag += 1;
944 }
945
946 offload_type = vxge_offload_type(skb);
947
948 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
949 int mss = vxge_tcp_mss(skb);
950 if (mss) {
951 vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
952 dev->name, __func__, __LINE__, mss);
953 vxge_hw_fifo_txdl_mss_set(dtr, mss);
954 } else {
955 vxge_assert(skb->len <=
956 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
957 vxge_assert(0);
958 goto _exit1;
959 }
960 }
961
962 if (skb->ip_summed == CHECKSUM_PARTIAL)
963 vxge_hw_fifo_txdl_cksum_set_bits(dtr,
964 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
965 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
966 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
967
968 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
969
970 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
971 dev->name, __func__, __LINE__);
972 return NETDEV_TX_OK;
973
974 _exit2:
975 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
976 _exit1:
977 j = 0;
978 frag = &skb_shinfo(skb)->frags[0];
979
980 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
981 skb_headlen(skb), PCI_DMA_TODEVICE);
982
983 for (; j < i; j++) {
984 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
985 skb_frag_size(frag), PCI_DMA_TODEVICE);
986 frag += 1;
987 }
988
989 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
990 _exit0:
991 netif_tx_stop_queue(fifo->txq);
992 dev_kfree_skb_any(skb);
993
994 return NETDEV_TX_OK;
995 }
996
997
998
999
1000
1001
1002
1003 static void
1004 vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
1005 {
1006 struct vxge_ring *ring = (struct vxge_ring *)userdata;
1007 struct vxge_rx_priv *rx_priv =
1008 vxge_hw_ring_rxd_private_get(dtrh);
1009
1010 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
1011 ring->ndev->name, __func__, __LINE__);
1012 if (state != VXGE_HW_RXD_STATE_POSTED)
1013 return;
1014
1015 pci_unmap_single(ring->pdev, rx_priv->data_dma,
1016 rx_priv->data_size, PCI_DMA_FROMDEVICE);
1017
1018 dev_kfree_skb(rx_priv->skb);
1019 rx_priv->skb_data = NULL;
1020
1021 vxge_debug_entryexit(VXGE_TRACE,
1022 "%s: %s:%d Exiting...",
1023 ring->ndev->name, __func__, __LINE__);
1024 }
1025
1026
1027
1028
1029
1030
1031 static void
1032 vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1033 {
1034 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
1035 skb_frag_t *frag;
1036 int i = 0, j, frg_cnt;
1037 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
1038 struct sk_buff *skb = txd_priv->skb;
1039
1040 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1041
1042 if (state != VXGE_HW_TXDL_STATE_POSTED)
1043 return;
1044
1045
1046 vxge_assert(skb);
1047 frg_cnt = skb_shinfo(skb)->nr_frags;
1048 frag = &skb_shinfo(skb)->frags[0];
1049
1050
1051 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
1052 skb_headlen(skb), PCI_DMA_TODEVICE);
1053
1054 for (j = 0; j < frg_cnt; j++) {
1055 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
1056 skb_frag_size(frag), PCI_DMA_TODEVICE);
1057 frag += 1;
1058 }
1059
1060 dev_kfree_skb(skb);
1061
1062 vxge_debug_entryexit(VXGE_TRACE,
1063 "%s:%d Exiting...", __func__, __LINE__);
1064 }
1065
1066 static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1067 {
1068 struct list_head *entry, *next;
1069 u64 del_mac = 0;
1070 u8 *mac_address = (u8 *) (&del_mac);
1071
1072
1073 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1074
1075 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1076 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1077 list_del(entry);
1078 kfree((struct vxge_mac_addrs *)entry);
1079 vpath->mac_addr_cnt--;
1080
1081 if (is_multicast_ether_addr(mac->macaddr))
1082 vpath->mcast_addr_cnt--;
1083 return TRUE;
1084 }
1085 }
1086
1087 return FALSE;
1088 }
1089
1090
1091 static enum vxge_hw_status
1092 vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1093 {
1094 enum vxge_hw_status status = VXGE_HW_OK;
1095 struct vxge_vpath *vpath;
1096
1097 vpath = &vdev->vpaths[mac->vpath_no];
1098 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1099 mac->macmask);
1100 if (status != VXGE_HW_OK) {
1101 vxge_debug_init(VXGE_ERR,
1102 "DA config delete entry failed for vpath:%d",
1103 vpath->device_id);
1104 } else
1105 vxge_mac_list_del(vpath, mac);
1106 return status;
1107 }
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120 static void vxge_set_multicast(struct net_device *dev)
1121 {
1122 struct netdev_hw_addr *ha;
1123 struct vxgedev *vdev;
1124 int i, mcast_cnt = 0;
1125 struct vxge_vpath *vpath;
1126 enum vxge_hw_status status = VXGE_HW_OK;
1127 struct macInfo mac_info;
1128 int vpath_idx = 0;
1129 struct vxge_mac_addrs *mac_entry;
1130 struct list_head *list_head;
1131 struct list_head *entry, *next;
1132 u8 *mac_address = NULL;
1133
1134 vxge_debug_entryexit(VXGE_TRACE,
1135 "%s:%d", __func__, __LINE__);
1136
1137 vdev = netdev_priv(dev);
1138
1139 if (unlikely(!is_vxge_card_up(vdev)))
1140 return;
1141
1142 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1143 for (i = 0; i < vdev->no_of_vpath; i++) {
1144 vpath = &vdev->vpaths[i];
1145 vxge_assert(vpath->is_open);
1146 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1147 if (status != VXGE_HW_OK)
1148 vxge_debug_init(VXGE_ERR, "failed to enable "
1149 "multicast, status %d", status);
1150 vdev->all_multi_flg = 1;
1151 }
1152 } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
1153 for (i = 0; i < vdev->no_of_vpath; i++) {
1154 vpath = &vdev->vpaths[i];
1155 vxge_assert(vpath->is_open);
1156 status = vxge_hw_vpath_mcast_disable(vpath->handle);
1157 if (status != VXGE_HW_OK)
1158 vxge_debug_init(VXGE_ERR, "failed to disable "
1159 "multicast, status %d", status);
1160 vdev->all_multi_flg = 0;
1161 }
1162 }
1163
1164
1165 if (!vdev->config.addr_learn_en) {
1166 for (i = 0; i < vdev->no_of_vpath; i++) {
1167 vpath = &vdev->vpaths[i];
1168 vxge_assert(vpath->is_open);
1169
1170 if (dev->flags & IFF_PROMISC)
1171 status = vxge_hw_vpath_promisc_enable(
1172 vpath->handle);
1173 else
1174 status = vxge_hw_vpath_promisc_disable(
1175 vpath->handle);
1176 if (status != VXGE_HW_OK)
1177 vxge_debug_init(VXGE_ERR, "failed to %s promisc"
1178 ", status %d", dev->flags&IFF_PROMISC ?
1179 "enable" : "disable", status);
1180 }
1181 }
1182
1183 memset(&mac_info, 0, sizeof(struct macInfo));
1184
1185 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
1186 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1187 list_head = &vdev->vpaths[0].mac_addr_list;
1188 if ((netdev_mc_count(dev) +
1189 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1190 vdev->vpaths[0].max_mac_addr_cnt)
1191 goto _set_all_mcast;
1192
1193
1194 for (i = 0; i < mcast_cnt; i++) {
1195 list_for_each_safe(entry, next, list_head) {
1196 mac_entry = (struct vxge_mac_addrs *)entry;
1197
1198 mac_address = (u8 *)&mac_entry->macaddr;
1199 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1200
1201 if (is_multicast_ether_addr(mac_info.macaddr)) {
1202 for (vpath_idx = 0; vpath_idx <
1203 vdev->no_of_vpath;
1204 vpath_idx++) {
1205 mac_info.vpath_no = vpath_idx;
1206 status = vxge_del_mac_addr(
1207 vdev,
1208 &mac_info);
1209 }
1210 }
1211 }
1212 }
1213
1214
1215 netdev_for_each_mc_addr(ha, dev) {
1216 memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
1217 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1218 vpath_idx++) {
1219 mac_info.vpath_no = vpath_idx;
1220 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1221 status = vxge_add_mac_addr(vdev, &mac_info);
1222 if (status != VXGE_HW_OK) {
1223 vxge_debug_init(VXGE_ERR,
1224 "%s:%d Setting individual"
1225 "multicast address failed",
1226 __func__, __LINE__);
1227 goto _set_all_mcast;
1228 }
1229 }
1230 }
1231
1232 return;
1233 _set_all_mcast:
1234 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1235
1236 for (i = 0; i < mcast_cnt; i++) {
1237 list_for_each_safe(entry, next, list_head) {
1238 mac_entry = (struct vxge_mac_addrs *)entry;
1239
1240 mac_address = (u8 *)&mac_entry->macaddr;
1241 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1242
1243 if (is_multicast_ether_addr(mac_info.macaddr))
1244 break;
1245 }
1246
1247 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1248 vpath_idx++) {
1249 mac_info.vpath_no = vpath_idx;
1250 status = vxge_del_mac_addr(vdev, &mac_info);
1251 }
1252 }
1253
1254
1255 for (i = 0; i < vdev->no_of_vpath; i++) {
1256 vpath = &vdev->vpaths[i];
1257 vxge_assert(vpath->is_open);
1258
1259 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1260 if (status != VXGE_HW_OK) {
1261 vxge_debug_init(VXGE_ERR,
1262 "%s:%d Enabling all multicasts failed",
1263 __func__, __LINE__);
1264 }
1265 vdev->all_multi_flg = 1;
1266 }
1267 dev->flags |= IFF_ALLMULTI;
1268 }
1269
1270 vxge_debug_entryexit(VXGE_TRACE,
1271 "%s:%d Exiting...", __func__, __LINE__);
1272 }
1273
1274
1275
1276
1277
1278
1279
1280 static int vxge_set_mac_addr(struct net_device *dev, void *p)
1281 {
1282 struct sockaddr *addr = p;
1283 struct vxgedev *vdev;
1284 enum vxge_hw_status status = VXGE_HW_OK;
1285 struct macInfo mac_info_new, mac_info_old;
1286 int vpath_idx = 0;
1287
1288 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1289
1290 vdev = netdev_priv(dev);
1291
1292 if (!is_valid_ether_addr(addr->sa_data))
1293 return -EINVAL;
1294
1295 memset(&mac_info_new, 0, sizeof(struct macInfo));
1296 memset(&mac_info_old, 0, sizeof(struct macInfo));
1297
1298 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
1299 __func__, __LINE__);
1300
1301
1302 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1303
1304
1305 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1306
1307
1308
1309 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1310 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1311 if (!vpath->is_open) {
1312
1313
1314
1315 vxge_mac_list_del(vpath, &mac_info_old);
1316
1317
1318
1319 vxge_mac_list_add(vpath, &mac_info_new);
1320
1321 continue;
1322 }
1323
1324 mac_info_old.vpath_no = vpath_idx;
1325 status = vxge_del_mac_addr(vdev, &mac_info_old);
1326 }
1327
1328 if (unlikely(!is_vxge_card_up(vdev))) {
1329 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1330 return VXGE_HW_OK;
1331 }
1332
1333
1334 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1335 mac_info_new.vpath_no = vpath_idx;
1336 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1337 status = vxge_add_mac_addr(vdev, &mac_info_new);
1338 if (status != VXGE_HW_OK)
1339 return -EINVAL;
1340 }
1341
1342 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1343
1344 return status;
1345 }
1346
1347
1348
1349
1350
1351
1352
1353
1354 static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1355 {
1356 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1357 int msix_id = 0;
1358 int tim_msix_id[4] = {0, 1, 0, 0};
1359 int alarm_msix_id = VXGE_ALARM_MSIX_ID;
1360
1361 vxge_hw_vpath_intr_enable(vpath->handle);
1362
1363 if (vdev->config.intr_type == INTA)
1364 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1365 else {
1366 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1367 alarm_msix_id);
1368
1369 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1370 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1371 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1372
1373
1374 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1375 VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
1376 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1377 }
1378 }
1379
1380
1381
1382
1383
1384
1385
1386
1387 static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1388 {
1389 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1390 struct __vxge_hw_device *hldev;
1391 int msix_id;
1392
1393 hldev = pci_get_drvdata(vdev->pdev);
1394
1395 vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1396
1397 vxge_hw_vpath_intr_disable(vpath->handle);
1398
1399 if (vdev->config.intr_type == INTA)
1400 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1401 else {
1402 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1403 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1404 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1405
1406
1407 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1408 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
1409 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1410 }
1411 }
1412
1413
1414 static enum vxge_hw_status
1415 vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
1416 {
1417 enum vxge_hw_status status = VXGE_HW_OK;
1418 unsigned char macmask[ETH_ALEN];
1419 unsigned char macaddr[ETH_ALEN];
1420
1421 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1422 macaddr, macmask);
1423 if (status != VXGE_HW_OK) {
1424 vxge_debug_init(VXGE_ERR,
1425 "DA config list entry failed for vpath:%d",
1426 vpath->device_id);
1427 return status;
1428 }
1429
1430 while (!ether_addr_equal(mac->macaddr, macaddr)) {
1431 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1432 macaddr, macmask);
1433 if (status != VXGE_HW_OK)
1434 break;
1435 }
1436
1437 return status;
1438 }
1439
1440
1441 static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1442 {
1443 enum vxge_hw_status status = VXGE_HW_OK;
1444 struct macInfo mac_info;
1445 u8 *mac_address = NULL;
1446 struct list_head *entry, *next;
1447
1448 memset(&mac_info, 0, sizeof(struct macInfo));
1449
1450 if (vpath->is_open) {
1451 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1452 mac_address =
1453 (u8 *)&
1454 ((struct vxge_mac_addrs *)entry)->macaddr;
1455 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1456 ((struct vxge_mac_addrs *)entry)->state =
1457 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1458
1459 status = vxge_search_mac_addr_in_da_table(vpath,
1460 &mac_info);
1461 if (status != VXGE_HW_OK) {
1462
1463 status = vxge_hw_vpath_mac_addr_add(
1464 vpath->handle, mac_info.macaddr,
1465 mac_info.macmask,
1466 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1467 if (status != VXGE_HW_OK) {
1468 vxge_debug_init(VXGE_ERR,
1469 "DA add entry failed for vpath:%d",
1470 vpath->device_id);
1471 ((struct vxge_mac_addrs *)entry)->state
1472 = VXGE_LL_MAC_ADDR_IN_LIST;
1473 }
1474 }
1475 }
1476 }
1477
1478 return status;
1479 }
1480
1481
1482 static enum vxge_hw_status
1483 vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1484 {
1485 enum vxge_hw_status status = VXGE_HW_OK;
1486 struct vxgedev *vdev = vpath->vdev;
1487 u16 vid;
1488
1489 if (!vpath->is_open)
1490 return status;
1491
1492 for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID)
1493 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1494
1495 return status;
1496 }
1497
1498
1499
1500
1501
1502
1503
1504
1505 static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1506 {
1507 enum vxge_hw_status status = VXGE_HW_OK;
1508 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1509 int ret = 0;
1510
1511
1512 if (unlikely(!is_vxge_card_up(vdev)))
1513 return 0;
1514
1515
1516 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1517 return 0;
1518
1519 if (vpath->handle) {
1520 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1521 if (is_vxge_card_up(vdev) &&
1522 vxge_hw_vpath_recover_from_reset(vpath->handle)
1523 != VXGE_HW_OK) {
1524 vxge_debug_init(VXGE_ERR,
1525 "vxge_hw_vpath_recover_from_reset"
1526 "failed for vpath:%d", vp_id);
1527 return status;
1528 }
1529 } else {
1530 vxge_debug_init(VXGE_ERR,
1531 "vxge_hw_vpath_reset failed for"
1532 "vpath:%d", vp_id);
1533 return status;
1534 }
1535 } else
1536 return VXGE_HW_FAIL;
1537
1538 vxge_restore_vpath_mac_addr(vpath);
1539 vxge_restore_vpath_vid_table(vpath);
1540
1541
1542 vxge_hw_vpath_bcast_enable(vpath->handle);
1543
1544
1545 if (vdev->all_multi_flg) {
1546 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1547 if (status != VXGE_HW_OK)
1548 vxge_debug_init(VXGE_ERR,
1549 "%s:%d Enabling multicast failed",
1550 __func__, __LINE__);
1551 }
1552
1553
1554 vxge_vpath_intr_enable(vdev, vp_id);
1555
1556 smp_wmb();
1557
1558
1559 vxge_hw_vpath_enable(vpath->handle);
1560
1561 smp_wmb();
1562 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
1563 vpath->ring.last_status = VXGE_HW_OK;
1564
1565
1566 clear_bit(vp_id, &vdev->vp_reset);
1567
1568
1569 if (netif_tx_queue_stopped(vpath->fifo.txq))
1570 netif_tx_wake_queue(vpath->fifo.txq);
1571
1572 return ret;
1573 }
1574
1575
1576 static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1577 {
1578 int i = 0;
1579
1580
1581 if (vdev->config.intr_type == MSI_X) {
1582 for (i = 0; i < vdev->no_of_vpath; i++) {
1583 struct __vxge_hw_ring *hw_ring;
1584
1585 hw_ring = vdev->vpaths[i].ring.handle;
1586 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1587 }
1588 }
1589
1590
1591 for (i = 0; i < vdev->no_of_vpath; i++) {
1592 struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1593 vxge_hw_vpath_tti_ci_set(hw_fifo);
1594
1595
1596
1597
1598 if ((vdev->config.intr_type == INTA) && (i == 0))
1599 break;
1600 }
1601
1602 return;
1603 }
1604
1605 static int do_vxge_reset(struct vxgedev *vdev, int event)
1606 {
1607 enum vxge_hw_status status;
1608 int ret = 0, vp_id, i;
1609
1610 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1611
1612 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1613
1614 if (unlikely(!is_vxge_card_up(vdev)))
1615 return 0;
1616
1617
1618 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1619 return 0;
1620 }
1621
1622 if (event == VXGE_LL_FULL_RESET) {
1623 netif_carrier_off(vdev->ndev);
1624
1625
1626 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1627 while (test_bit(vp_id, &vdev->vp_reset))
1628 msleep(50);
1629 }
1630
1631 netif_carrier_on(vdev->ndev);
1632
1633
1634 if (unlikely(vdev->exec_mode)) {
1635 vxge_debug_init(VXGE_ERR,
1636 "%s: execution mode is debug, returning..",
1637 vdev->ndev->name);
1638 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1639 netif_tx_stop_all_queues(vdev->ndev);
1640 return 0;
1641 }
1642 }
1643
1644 if (event == VXGE_LL_FULL_RESET) {
1645 vxge_hw_device_wait_receive_idle(vdev->devh);
1646 vxge_hw_device_intr_disable(vdev->devh);
1647
1648 switch (vdev->cric_err_event) {
1649 case VXGE_HW_EVENT_UNKNOWN:
1650 netif_tx_stop_all_queues(vdev->ndev);
1651 vxge_debug_init(VXGE_ERR,
1652 "fatal: %s: Disabling device due to"
1653 "unknown error",
1654 vdev->ndev->name);
1655 ret = -EPERM;
1656 goto out;
1657 case VXGE_HW_EVENT_RESET_START:
1658 break;
1659 case VXGE_HW_EVENT_RESET_COMPLETE:
1660 case VXGE_HW_EVENT_LINK_DOWN:
1661 case VXGE_HW_EVENT_LINK_UP:
1662 case VXGE_HW_EVENT_ALARM_CLEARED:
1663 case VXGE_HW_EVENT_ECCERR:
1664 case VXGE_HW_EVENT_MRPCIM_ECCERR:
1665 ret = -EPERM;
1666 goto out;
1667 case VXGE_HW_EVENT_FIFO_ERR:
1668 case VXGE_HW_EVENT_VPATH_ERR:
1669 break;
1670 case VXGE_HW_EVENT_CRITICAL_ERR:
1671 netif_tx_stop_all_queues(vdev->ndev);
1672 vxge_debug_init(VXGE_ERR,
1673 "fatal: %s: Disabling device due to"
1674 "serious error",
1675 vdev->ndev->name);
1676
1677
1678 ret = -EPERM;
1679 goto out;
1680 case VXGE_HW_EVENT_SERR:
1681 netif_tx_stop_all_queues(vdev->ndev);
1682 vxge_debug_init(VXGE_ERR,
1683 "fatal: %s: Disabling device due to"
1684 "serious error",
1685 vdev->ndev->name);
1686 ret = -EPERM;
1687 goto out;
1688 case VXGE_HW_EVENT_SRPCIM_SERR:
1689 case VXGE_HW_EVENT_MRPCIM_SERR:
1690 ret = -EPERM;
1691 goto out;
1692 case VXGE_HW_EVENT_SLOT_FREEZE:
1693 netif_tx_stop_all_queues(vdev->ndev);
1694 vxge_debug_init(VXGE_ERR,
1695 "fatal: %s: Disabling device due to"
1696 "slot freeze",
1697 vdev->ndev->name);
1698 ret = -EPERM;
1699 goto out;
1700 default:
1701 break;
1702
1703 }
1704 }
1705
1706 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
1707 netif_tx_stop_all_queues(vdev->ndev);
1708
1709 if (event == VXGE_LL_FULL_RESET) {
1710 status = vxge_reset_all_vpaths(vdev);
1711 if (status != VXGE_HW_OK) {
1712 vxge_debug_init(VXGE_ERR,
1713 "fatal: %s: can not reset vpaths",
1714 vdev->ndev->name);
1715 ret = -EPERM;
1716 goto out;
1717 }
1718 }
1719
1720 if (event == VXGE_LL_COMPL_RESET) {
1721 for (i = 0; i < vdev->no_of_vpath; i++)
1722 if (vdev->vpaths[i].handle) {
1723 if (vxge_hw_vpath_recover_from_reset(
1724 vdev->vpaths[i].handle)
1725 != VXGE_HW_OK) {
1726 vxge_debug_init(VXGE_ERR,
1727 "vxge_hw_vpath_recover_"
1728 "from_reset failed for vpath: "
1729 "%d", i);
1730 ret = -EPERM;
1731 goto out;
1732 }
1733 } else {
1734 vxge_debug_init(VXGE_ERR,
1735 "vxge_hw_vpath_reset failed for "
1736 "vpath:%d", i);
1737 ret = -EPERM;
1738 goto out;
1739 }
1740 }
1741
1742 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1743
1744 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1745 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1746 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1747 }
1748
1749
1750 for (i = 0; i < vdev->no_of_vpath; i++)
1751 vxge_vpath_intr_enable(vdev, i);
1752
1753 vxge_hw_device_intr_enable(vdev->devh);
1754
1755 smp_wmb();
1756
1757
1758 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1759
1760
1761 for (i = 0; i < vdev->no_of_vpath; i++) {
1762 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1763 smp_wmb();
1764 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1765 }
1766
1767 netif_tx_wake_all_queues(vdev->ndev);
1768 }
1769
1770
1771 vxge_config_ci_for_tti_rti(vdev);
1772
1773 out:
1774 vxge_debug_entryexit(VXGE_TRACE,
1775 "%s:%d Exiting...", __func__, __LINE__);
1776
1777
1778 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1779 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1780 return ret;
1781 }
1782
1783
1784
1785
1786
1787
1788
1789 static void vxge_reset(struct work_struct *work)
1790 {
1791 struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
1792
1793 if (!netif_running(vdev->ndev))
1794 return;
1795
1796 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1797 }
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811 static int vxge_poll_msix(struct napi_struct *napi, int budget)
1812 {
1813 struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1814 int pkts_processed;
1815 int budget_org = budget;
1816
1817 ring->budget = budget;
1818 ring->pkts_processed = 0;
1819 vxge_hw_vpath_poll_rx(ring->handle);
1820 pkts_processed = ring->pkts_processed;
1821
1822 if (pkts_processed < budget_org) {
1823 napi_complete_done(napi, pkts_processed);
1824
1825
1826 vxge_hw_channel_msix_unmask(
1827 (struct __vxge_hw_channel *)ring->handle,
1828 ring->rx_vector_no);
1829 }
1830
1831
1832
1833
1834 return pkts_processed;
1835 }
1836
1837 static int vxge_poll_inta(struct napi_struct *napi, int budget)
1838 {
1839 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1840 int pkts_processed = 0;
1841 int i;
1842 int budget_org = budget;
1843 struct vxge_ring *ring;
1844
1845 struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
1846
1847 for (i = 0; i < vdev->no_of_vpath; i++) {
1848 ring = &vdev->vpaths[i].ring;
1849 ring->budget = budget;
1850 ring->pkts_processed = 0;
1851 vxge_hw_vpath_poll_rx(ring->handle);
1852 pkts_processed += ring->pkts_processed;
1853 budget -= ring->pkts_processed;
1854 if (budget <= 0)
1855 break;
1856 }
1857
1858 VXGE_COMPLETE_ALL_TX(vdev);
1859
1860 if (pkts_processed < budget_org) {
1861 napi_complete_done(napi, pkts_processed);
1862
1863 vxge_hw_device_unmask_all(hldev);
1864 vxge_hw_device_flush_io(hldev);
1865 }
1866
1867 return pkts_processed;
1868 }
1869
1870 #ifdef CONFIG_NET_POLL_CONTROLLER
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880 static void vxge_netpoll(struct net_device *dev)
1881 {
1882 struct vxgedev *vdev = netdev_priv(dev);
1883 struct pci_dev *pdev = vdev->pdev;
1884 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
1885 const int irq = pdev->irq;
1886
1887 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1888
1889 if (pci_channel_offline(pdev))
1890 return;
1891
1892 disable_irq(irq);
1893 vxge_hw_device_clear_tx_rx(hldev);
1894
1895 vxge_hw_device_clear_tx_rx(hldev);
1896 VXGE_COMPLETE_ALL_RX(vdev);
1897 VXGE_COMPLETE_ALL_TX(vdev);
1898
1899 enable_irq(irq);
1900
1901 vxge_debug_entryexit(VXGE_TRACE,
1902 "%s:%d Exiting...", __func__, __LINE__);
1903 }
1904 #endif
1905
1906
1907 static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1908 {
1909 enum vxge_hw_status status = VXGE_HW_OK;
1910 struct vxge_hw_rth_hash_types hash_types;
1911 u8 itable[256] = {0};
1912 u8 mtable[256] = {0};
1913 int index;
1914
1915
1916
1917
1918
1919
1920 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1921 itable[index] = index;
1922 mtable[index] = index % vdev->no_of_vpath;
1923 }
1924
1925
1926 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1927 vdev->no_of_vpath,
1928 mtable, itable,
1929 vdev->config.rth_bkt_sz);
1930 if (status != VXGE_HW_OK) {
1931 vxge_debug_init(VXGE_ERR,
1932 "RTH indirection table configuration failed "
1933 "for vpath:%d", vdev->vpaths[0].device_id);
1934 return status;
1935 }
1936
1937
1938 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1939 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1940 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1941 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1942 hash_types.hash_type_tcpipv6ex_en =
1943 vdev->config.rth_hash_type_tcpipv6ex;
1944 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1945
1946
1947
1948
1949
1950
1951
1952 for (index = 0; index < vdev->no_of_vpath; index++) {
1953 status = vxge_hw_vpath_rts_rth_set(
1954 vdev->vpaths[index].handle,
1955 vdev->config.rth_algorithm,
1956 &hash_types,
1957 vdev->config.rth_bkt_sz);
1958 if (status != VXGE_HW_OK) {
1959 vxge_debug_init(VXGE_ERR,
1960 "RTH configuration failed for vpath:%d",
1961 vdev->vpaths[index].device_id);
1962 return status;
1963 }
1964 }
1965
1966 return status;
1967 }
1968
1969
1970 static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1971 {
1972 enum vxge_hw_status status = VXGE_HW_OK;
1973 struct vxge_vpath *vpath;
1974 int i;
1975
1976 for (i = 0; i < vdev->no_of_vpath; i++) {
1977 vpath = &vdev->vpaths[i];
1978 if (vpath->handle) {
1979 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
1980 if (is_vxge_card_up(vdev) &&
1981 vxge_hw_vpath_recover_from_reset(
1982 vpath->handle) != VXGE_HW_OK) {
1983 vxge_debug_init(VXGE_ERR,
1984 "vxge_hw_vpath_recover_"
1985 "from_reset failed for vpath: "
1986 "%d", i);
1987 return status;
1988 }
1989 } else {
1990 vxge_debug_init(VXGE_ERR,
1991 "vxge_hw_vpath_reset failed for "
1992 "vpath:%d", i);
1993 return status;
1994 }
1995 }
1996 }
1997
1998 return status;
1999 }
2000
2001
2002 static void vxge_close_vpaths(struct vxgedev *vdev, int index)
2003 {
2004 struct vxge_vpath *vpath;
2005 int i;
2006
2007 for (i = index; i < vdev->no_of_vpath; i++) {
2008 vpath = &vdev->vpaths[i];
2009
2010 if (vpath->handle && vpath->is_open) {
2011 vxge_hw_vpath_close(vpath->handle);
2012 vdev->stats.vpaths_open--;
2013 }
2014 vpath->is_open = 0;
2015 vpath->handle = NULL;
2016 }
2017 }
2018
2019
2020 static int vxge_open_vpaths(struct vxgedev *vdev)
2021 {
2022 struct vxge_hw_vpath_attr attr;
2023 enum vxge_hw_status status;
2024 struct vxge_vpath *vpath;
2025 u32 vp_id = 0;
2026 int i;
2027
2028 for (i = 0; i < vdev->no_of_vpath; i++) {
2029 vpath = &vdev->vpaths[i];
2030 vxge_assert(vpath->is_configured);
2031
2032 if (!vdev->titan1) {
2033 struct vxge_hw_vp_config *vcfg;
2034 vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2035
2036 vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
2037 vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
2038 vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
2039 vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
2040 vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
2041 vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2042 vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2043 vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
2044 vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
2045 }
2046
2047 attr.vp_id = vpath->device_id;
2048 attr.fifo_attr.callback = vxge_xmit_compl;
2049 attr.fifo_attr.txdl_term = vxge_tx_term;
2050 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
2051 attr.fifo_attr.userdata = &vpath->fifo;
2052
2053 attr.ring_attr.callback = vxge_rx_1b_compl;
2054 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2055 attr.ring_attr.rxd_term = vxge_rx_term;
2056 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
2057 attr.ring_attr.userdata = &vpath->ring;
2058
2059 vpath->ring.ndev = vdev->ndev;
2060 vpath->ring.pdev = vdev->pdev;
2061
2062 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
2063 if (status == VXGE_HW_OK) {
2064 vpath->fifo.handle =
2065 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
2066 vpath->ring.handle =
2067 (struct __vxge_hw_ring *)attr.ring_attr.userdata;
2068 vpath->fifo.tx_steering_type =
2069 vdev->config.tx_steering_type;
2070 vpath->fifo.ndev = vdev->ndev;
2071 vpath->fifo.pdev = vdev->pdev;
2072
2073 u64_stats_init(&vpath->fifo.stats.syncp);
2074 u64_stats_init(&vpath->ring.stats.syncp);
2075
2076 if (vdev->config.tx_steering_type)
2077 vpath->fifo.txq =
2078 netdev_get_tx_queue(vdev->ndev, i);
2079 else
2080 vpath->fifo.txq =
2081 netdev_get_tx_queue(vdev->ndev, 0);
2082 vpath->fifo.indicate_max_pkts =
2083 vdev->config.fifo_indicate_max_pkts;
2084 vpath->fifo.tx_vector_no = 0;
2085 vpath->ring.rx_vector_no = 0;
2086 vpath->ring.rx_hwts = vdev->rx_hwts;
2087 vpath->is_open = 1;
2088 vdev->vp_handles[i] = vpath->handle;
2089 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
2090 vdev->stats.vpaths_open++;
2091 } else {
2092 vdev->stats.vpath_open_fail++;
2093 vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
2094 "open with status: %d",
2095 vdev->ndev->name, vpath->device_id,
2096 status);
2097 vxge_close_vpaths(vdev, 0);
2098 return -EPERM;
2099 }
2100
2101 vp_id = vpath->handle->vpath->vp_id;
2102 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2103 }
2104
2105 return VXGE_HW_OK;
2106 }
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116 static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2117 {
2118 fifo->interrupt_count++;
2119 if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
2120 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2121
2122 fifo->jiffies = jiffies;
2123 if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2124 hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2125 hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2126 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2127 } else if (hw_fifo->rtimer != 0) {
2128 hw_fifo->rtimer = 0;
2129 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2130 }
2131 fifo->interrupt_count = 0;
2132 }
2133 }
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144 static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2145 {
2146 ring->interrupt_count++;
2147 if (time_before(ring->jiffies + HZ / 100, jiffies)) {
2148 struct __vxge_hw_ring *hw_ring = ring->handle;
2149
2150 ring->jiffies = jiffies;
2151 if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2152 hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2153 hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2154 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2155 } else if (hw_ring->rtimer != 0) {
2156 hw_ring->rtimer = 0;
2157 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2158 }
2159 ring->interrupt_count = 0;
2160 }
2161 }
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173 static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2174 {
2175 struct __vxge_hw_device *hldev;
2176 u64 reason;
2177 enum vxge_hw_status status;
2178 struct vxgedev *vdev = (struct vxgedev *)dev_id;
2179
2180 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2181
2182 hldev = pci_get_drvdata(vdev->pdev);
2183
2184 if (pci_channel_offline(vdev->pdev))
2185 return IRQ_NONE;
2186
2187 if (unlikely(!is_vxge_card_up(vdev)))
2188 return IRQ_HANDLED;
2189
2190 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
2191 if (status == VXGE_HW_OK) {
2192 vxge_hw_device_mask_all(hldev);
2193
2194 if (reason &
2195 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2196 vdev->vpaths_deployed >>
2197 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2198
2199 vxge_hw_device_clear_tx_rx(hldev);
2200 napi_schedule(&vdev->napi);
2201 vxge_debug_intr(VXGE_TRACE,
2202 "%s:%d Exiting...", __func__, __LINE__);
2203 return IRQ_HANDLED;
2204 } else
2205 vxge_hw_device_unmask_all(hldev);
2206 } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2207 (status == VXGE_HW_ERR_CRITICAL) ||
2208 (status == VXGE_HW_ERR_FIFO))) {
2209 vxge_hw_device_mask_all(hldev);
2210 vxge_hw_device_flush_io(hldev);
2211 return IRQ_HANDLED;
2212 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2213 return IRQ_HANDLED;
2214
2215 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
2216 return IRQ_NONE;
2217 }
2218
2219 static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
2220 {
2221 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2222
2223 adaptive_coalesce_tx_interrupts(fifo);
2224
2225 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2226 fifo->tx_vector_no);
2227
2228 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2229 fifo->tx_vector_no);
2230
2231 VXGE_COMPLETE_VPATH_TX(fifo);
2232
2233 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2234 fifo->tx_vector_no);
2235
2236 return IRQ_HANDLED;
2237 }
2238
2239 static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
2240 {
2241 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2242
2243 adaptive_coalesce_rx_interrupts(ring);
2244
2245 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
2246 ring->rx_vector_no);
2247
2248 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2249 ring->rx_vector_no);
2250
2251 napi_schedule(&ring->napi);
2252 return IRQ_HANDLED;
2253 }
2254
2255 static irqreturn_t
2256 vxge_alarm_msix_handle(int irq, void *dev_id)
2257 {
2258 int i;
2259 enum vxge_hw_status status;
2260 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2261 struct vxgedev *vdev = vpath->vdev;
2262 int msix_id = (vpath->handle->vpath->vp_id *
2263 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2264
2265 for (i = 0; i < vdev->no_of_vpath; i++) {
2266
2267
2268
2269
2270 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2271 vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2272
2273 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2274 vdev->exec_mode);
2275 if (status == VXGE_HW_OK) {
2276 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2277 msix_id);
2278 continue;
2279 }
2280 vxge_debug_intr(VXGE_ERR,
2281 "%s: vxge_hw_vpath_alarm_process failed %x ",
2282 VXGE_DRIVER_NAME, status);
2283 }
2284 return IRQ_HANDLED;
2285 }
2286
2287 static int vxge_alloc_msix(struct vxgedev *vdev)
2288 {
2289 int j, i, ret = 0;
2290 int msix_intr_vect = 0, temp;
2291 vdev->intr_cnt = 0;
2292
2293 start:
2294
2295 vdev->intr_cnt = vdev->no_of_vpath * 2;
2296
2297
2298 vdev->intr_cnt++;
2299
2300 vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2301 GFP_KERNEL);
2302 if (!vdev->entries) {
2303 vxge_debug_init(VXGE_ERR,
2304 "%s: memory allocation failed",
2305 VXGE_DRIVER_NAME);
2306 ret = -ENOMEM;
2307 goto alloc_entries_failed;
2308 }
2309
2310 vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2311 sizeof(struct vxge_msix_entry),
2312 GFP_KERNEL);
2313 if (!vdev->vxge_entries) {
2314 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2315 VXGE_DRIVER_NAME);
2316 ret = -ENOMEM;
2317 goto alloc_vxge_entries_failed;
2318 }
2319
2320 for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
2321
2322 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2323
2324
2325 vdev->entries[j].entry = msix_intr_vect;
2326 vdev->vxge_entries[j].entry = msix_intr_vect;
2327 vdev->vxge_entries[j].in_use = 0;
2328 j++;
2329
2330
2331 vdev->entries[j].entry = msix_intr_vect + 1;
2332 vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2333 vdev->vxge_entries[j].in_use = 0;
2334 j++;
2335 }
2336
2337
2338 vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2339 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
2340 vdev->vxge_entries[j].in_use = 0;
2341
2342 ret = pci_enable_msix_range(vdev->pdev,
2343 vdev->entries, 3, vdev->intr_cnt);
2344 if (ret < 0) {
2345 ret = -ENODEV;
2346 goto enable_msix_failed;
2347 } else if (ret < vdev->intr_cnt) {
2348 pci_disable_msix(vdev->pdev);
2349
2350 vxge_debug_init(VXGE_ERR,
2351 "%s: MSI-X enable failed for %d vectors, ret: %d",
2352 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
2353 if (max_config_vpath != VXGE_USE_DEFAULT) {
2354 ret = -ENODEV;
2355 goto enable_msix_failed;
2356 }
2357
2358 kfree(vdev->entries);
2359 kfree(vdev->vxge_entries);
2360 vdev->entries = NULL;
2361 vdev->vxge_entries = NULL;
2362
2363 temp = (ret - 1)/2;
2364 vxge_close_vpaths(vdev, temp);
2365 vdev->no_of_vpath = temp;
2366 goto start;
2367 }
2368 return 0;
2369
2370 enable_msix_failed:
2371 kfree(vdev->vxge_entries);
2372 alloc_vxge_entries_failed:
2373 kfree(vdev->entries);
2374 alloc_entries_failed:
2375 return ret;
2376 }
2377
2378 static int vxge_enable_msix(struct vxgedev *vdev)
2379 {
2380
2381 int i, ret = 0;
2382
2383 int tim_msix_id[4] = {0, 1, 0, 0};
2384
2385 vdev->intr_cnt = 0;
2386
2387
2388 ret = vxge_alloc_msix(vdev);
2389 if (!ret) {
2390 for (i = 0; i < vdev->no_of_vpath; i++) {
2391 struct vxge_vpath *vpath = &vdev->vpaths[i];
2392
2393
2394
2395
2396 vpath->ring.rx_vector_no = (vpath->device_id *
2397 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2398
2399 vpath->fifo.tx_vector_no = (vpath->device_id *
2400 VXGE_HW_VPATH_MSIX_ACTIVE);
2401
2402 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2403 VXGE_ALARM_MSIX_ID);
2404 }
2405 }
2406
2407 return ret;
2408 }
2409
2410 static void vxge_rem_msix_isr(struct vxgedev *vdev)
2411 {
2412 int intr_cnt;
2413
2414 for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
2415 intr_cnt++) {
2416 if (vdev->vxge_entries[intr_cnt].in_use) {
2417 synchronize_irq(vdev->entries[intr_cnt].vector);
2418 free_irq(vdev->entries[intr_cnt].vector,
2419 vdev->vxge_entries[intr_cnt].arg);
2420 vdev->vxge_entries[intr_cnt].in_use = 0;
2421 }
2422 }
2423
2424 kfree(vdev->entries);
2425 kfree(vdev->vxge_entries);
2426 vdev->entries = NULL;
2427 vdev->vxge_entries = NULL;
2428
2429 if (vdev->config.intr_type == MSI_X)
2430 pci_disable_msix(vdev->pdev);
2431 }
2432
2433 static void vxge_rem_isr(struct vxgedev *vdev)
2434 {
2435 if (IS_ENABLED(CONFIG_PCI_MSI) &&
2436 vdev->config.intr_type == MSI_X) {
2437 vxge_rem_msix_isr(vdev);
2438 } else if (vdev->config.intr_type == INTA) {
2439 synchronize_irq(vdev->pdev->irq);
2440 free_irq(vdev->pdev->irq, vdev);
2441 }
2442 }
2443
2444 static int vxge_add_isr(struct vxgedev *vdev)
2445 {
2446 int ret = 0;
2447 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2448 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2449
2450 if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X)
2451 ret = vxge_enable_msix(vdev);
2452
2453 if (ret) {
2454 vxge_debug_init(VXGE_ERR,
2455 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2456 vxge_debug_init(VXGE_ERR,
2457 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2458 vdev->config.intr_type = INTA;
2459 }
2460
2461 if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) {
2462 for (intr_idx = 0;
2463 intr_idx < (vdev->no_of_vpath *
2464 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2465
2466 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2467 irq_req = 0;
2468
2469 switch (msix_idx) {
2470 case 0:
2471 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2472 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2473 vdev->ndev->name,
2474 vdev->entries[intr_cnt].entry,
2475 pci_fun, vp_idx);
2476 ret = request_irq(
2477 vdev->entries[intr_cnt].vector,
2478 vxge_tx_msix_handle, 0,
2479 vdev->desc[intr_cnt],
2480 &vdev->vpaths[vp_idx].fifo);
2481 vdev->vxge_entries[intr_cnt].arg =
2482 &vdev->vpaths[vp_idx].fifo;
2483 irq_req = 1;
2484 break;
2485 case 1:
2486 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2487 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2488 vdev->ndev->name,
2489 vdev->entries[intr_cnt].entry,
2490 pci_fun, vp_idx);
2491 ret = request_irq(
2492 vdev->entries[intr_cnt].vector,
2493 vxge_rx_msix_napi_handle, 0,
2494 vdev->desc[intr_cnt],
2495 &vdev->vpaths[vp_idx].ring);
2496 vdev->vxge_entries[intr_cnt].arg =
2497 &vdev->vpaths[vp_idx].ring;
2498 irq_req = 1;
2499 break;
2500 }
2501
2502 if (ret) {
2503 vxge_debug_init(VXGE_ERR,
2504 "%s: MSIX - %d Registration failed",
2505 vdev->ndev->name, intr_cnt);
2506 vxge_rem_msix_isr(vdev);
2507 vdev->config.intr_type = INTA;
2508 vxge_debug_init(VXGE_ERR,
2509 "%s: Defaulting to INTA",
2510 vdev->ndev->name);
2511 goto INTA_MODE;
2512 }
2513
2514 if (irq_req) {
2515
2516 vdev->vxge_entries[intr_cnt].in_use = 1;
2517 msix_idx += vdev->vpaths[vp_idx].device_id *
2518 VXGE_HW_VPATH_MSIX_ACTIVE;
2519 vxge_hw_vpath_msix_unmask(
2520 vdev->vpaths[vp_idx].handle,
2521 msix_idx);
2522 intr_cnt++;
2523 }
2524
2525
2526 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
2527 (vp_idx < (vdev->no_of_vpath - 1)))
2528 vp_idx++;
2529 }
2530
2531 intr_cnt = vdev->no_of_vpath * 2;
2532 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2533 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2534 vdev->ndev->name,
2535 vdev->entries[intr_cnt].entry,
2536 pci_fun);
2537
2538 ret = request_irq(vdev->entries[intr_cnt].vector,
2539 vxge_alarm_msix_handle, 0,
2540 vdev->desc[intr_cnt],
2541 &vdev->vpaths[0]);
2542 if (ret) {
2543 vxge_debug_init(VXGE_ERR,
2544 "%s: MSIX - %d Registration failed",
2545 vdev->ndev->name, intr_cnt);
2546 vxge_rem_msix_isr(vdev);
2547 vdev->config.intr_type = INTA;
2548 vxge_debug_init(VXGE_ERR,
2549 "%s: Defaulting to INTA",
2550 vdev->ndev->name);
2551 goto INTA_MODE;
2552 }
2553
2554 msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2555 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2556 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2557 msix_idx);
2558 vdev->vxge_entries[intr_cnt].in_use = 1;
2559 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
2560 }
2561
2562 INTA_MODE:
2563 if (vdev->config.intr_type == INTA) {
2564 snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2565 "%s:vxge:INTA", vdev->ndev->name);
2566 vxge_hw_device_set_intr_type(vdev->devh,
2567 VXGE_HW_INTR_MODE_IRQLINE);
2568
2569 vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2570
2571 ret = request_irq((int) vdev->pdev->irq,
2572 vxge_isr_napi,
2573 IRQF_SHARED, vdev->desc[0], vdev);
2574 if (ret) {
2575 vxge_debug_init(VXGE_ERR,
2576 "%s %s-%d: ISR registration failed",
2577 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2578 return -ENODEV;
2579 }
2580 vxge_debug_init(VXGE_TRACE,
2581 "new %s-%d line allocated",
2582 "IRQ", vdev->pdev->irq);
2583 }
2584
2585 return VXGE_HW_OK;
2586 }
2587
2588 static void vxge_poll_vp_reset(struct timer_list *t)
2589 {
2590 struct vxgedev *vdev = from_timer(vdev, t, vp_reset_timer);
2591 int i, j = 0;
2592
2593 for (i = 0; i < vdev->no_of_vpath; i++) {
2594 if (test_bit(i, &vdev->vp_reset)) {
2595 vxge_reset_vpath(vdev, i);
2596 j++;
2597 }
2598 }
2599 if (j && (vdev->config.intr_type != MSI_X)) {
2600 vxge_hw_device_unmask_all(vdev->devh);
2601 vxge_hw_device_flush_io(vdev->devh);
2602 }
2603
2604 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2605 }
2606
2607 static void vxge_poll_vp_lockup(struct timer_list *t)
2608 {
2609 struct vxgedev *vdev = from_timer(vdev, t, vp_lockup_timer);
2610 enum vxge_hw_status status = VXGE_HW_OK;
2611 struct vxge_vpath *vpath;
2612 struct vxge_ring *ring;
2613 int i;
2614 unsigned long rx_frms;
2615
2616 for (i = 0; i < vdev->no_of_vpath; i++) {
2617 ring = &vdev->vpaths[i].ring;
2618
2619
2620 rx_frms = READ_ONCE(ring->stats.rx_frms);
2621
2622
2623 if (ring->stats.prev_rx_frms == rx_frms) {
2624 status = vxge_hw_vpath_check_leak(ring->handle);
2625
2626
2627 if ((VXGE_HW_FAIL == status) &&
2628 (VXGE_HW_FAIL == ring->last_status)) {
2629
2630
2631 if (!test_and_set_bit(i, &vdev->vp_reset)) {
2632 vpath = &vdev->vpaths[i];
2633
2634
2635 vxge_vpath_intr_disable(vdev, i);
2636
2637
2638 netif_tx_stop_queue(vpath->fifo.txq);
2639 continue;
2640 }
2641 }
2642 }
2643 ring->stats.prev_rx_frms = rx_frms;
2644 ring->last_status = status;
2645 }
2646
2647
2648 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2649 }
2650
2651 static netdev_features_t vxge_fix_features(struct net_device *dev,
2652 netdev_features_t features)
2653 {
2654 netdev_features_t changed = dev->features ^ features;
2655
2656
2657
2658
2659
2660 if ((changed & NETIF_F_RXHASH) && netif_running(dev))
2661 features ^= NETIF_F_RXHASH;
2662
2663 return features;
2664 }
2665
2666 static int vxge_set_features(struct net_device *dev, netdev_features_t features)
2667 {
2668 struct vxgedev *vdev = netdev_priv(dev);
2669 netdev_features_t changed = dev->features ^ features;
2670
2671 if (!(changed & NETIF_F_RXHASH))
2672 return 0;
2673
2674
2675
2676 vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
2677 if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
2678 dev->features = features ^ NETIF_F_RXHASH;
2679 vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
2680 return -EIO;
2681 }
2682
2683 return 0;
2684 }
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696 static int vxge_open(struct net_device *dev)
2697 {
2698 enum vxge_hw_status status;
2699 struct vxgedev *vdev;
2700 struct __vxge_hw_device *hldev;
2701 struct vxge_vpath *vpath;
2702 int ret = 0;
2703 int i;
2704 u64 val64;
2705
2706 vxge_debug_entryexit(VXGE_TRACE,
2707 "%s: %s:%d", dev->name, __func__, __LINE__);
2708
2709 vdev = netdev_priv(dev);
2710 hldev = pci_get_drvdata(vdev->pdev);
2711
2712
2713
2714 netif_carrier_off(dev);
2715
2716
2717 status = vxge_open_vpaths(vdev);
2718 if (status != VXGE_HW_OK) {
2719 vxge_debug_init(VXGE_ERR,
2720 "%s: fatal: Vpath open failed", vdev->ndev->name);
2721 ret = -EPERM;
2722 goto out0;
2723 }
2724
2725 vdev->mtu = dev->mtu;
2726
2727 status = vxge_add_isr(vdev);
2728 if (status != VXGE_HW_OK) {
2729 vxge_debug_init(VXGE_ERR,
2730 "%s: fatal: ISR add failed", dev->name);
2731 ret = -EPERM;
2732 goto out1;
2733 }
2734
2735 if (vdev->config.intr_type != MSI_X) {
2736 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2737 vdev->config.napi_weight);
2738 napi_enable(&vdev->napi);
2739 for (i = 0; i < vdev->no_of_vpath; i++) {
2740 vpath = &vdev->vpaths[i];
2741 vpath->ring.napi_p = &vdev->napi;
2742 }
2743 } else {
2744 for (i = 0; i < vdev->no_of_vpath; i++) {
2745 vpath = &vdev->vpaths[i];
2746 netif_napi_add(dev, &vpath->ring.napi,
2747 vxge_poll_msix, vdev->config.napi_weight);
2748 napi_enable(&vpath->ring.napi);
2749 vpath->ring.napi_p = &vpath->ring.napi;
2750 }
2751 }
2752
2753
2754 if (vdev->config.rth_steering) {
2755 status = vxge_rth_configure(vdev);
2756 if (status != VXGE_HW_OK) {
2757 vxge_debug_init(VXGE_ERR,
2758 "%s: fatal: RTH configuration failed",
2759 dev->name);
2760 ret = -EPERM;
2761 goto out2;
2762 }
2763 }
2764 printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2765 hldev->config.rth_en ? "enabled" : "disabled");
2766
2767 for (i = 0; i < vdev->no_of_vpath; i++) {
2768 vpath = &vdev->vpaths[i];
2769
2770
2771 status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
2772 if (status != VXGE_HW_OK) {
2773 vxge_debug_init(VXGE_ERR,
2774 "%s: fatal: can not set new MTU", dev->name);
2775 ret = -EPERM;
2776 goto out2;
2777 }
2778 }
2779
2780 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2781 vxge_debug_init(vdev->level_trace,
2782 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2783 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2784
2785
2786
2787
2788 if (vdev->all_multi_flg) {
2789 for (i = 0; i < vdev->no_of_vpath; i++) {
2790 vpath = &vdev->vpaths[i];
2791 vxge_restore_vpath_mac_addr(vpath);
2792 vxge_restore_vpath_vid_table(vpath);
2793
2794 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2795 if (status != VXGE_HW_OK)
2796 vxge_debug_init(VXGE_ERR,
2797 "%s:%d Enabling multicast failed",
2798 __func__, __LINE__);
2799 }
2800 }
2801
2802
2803
2804
2805
2806 val64 = 0;
2807 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2808 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2809
2810 vxge_hw_mgmt_reg_write(vdev->devh,
2811 vxge_hw_mgmt_reg_type_mrpcim,
2812 0,
2813 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2814 rxmac_authorize_all_addr),
2815 val64);
2816
2817 vxge_hw_mgmt_reg_write(vdev->devh,
2818 vxge_hw_mgmt_reg_type_mrpcim,
2819 0,
2820 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2821 rxmac_authorize_all_vid),
2822 val64);
2823
2824 vxge_set_multicast(dev);
2825
2826
2827 for (i = 0; i < vdev->no_of_vpath; i++) {
2828 vpath = &vdev->vpaths[i];
2829 status = vxge_hw_vpath_bcast_enable(vpath->handle);
2830 if (status != VXGE_HW_OK)
2831 vxge_debug_init(VXGE_ERR,
2832 "%s : Can not enable bcast for vpath "
2833 "id %d", dev->name, i);
2834 if (vdev->config.addr_learn_en) {
2835 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2836 if (status != VXGE_HW_OK)
2837 vxge_debug_init(VXGE_ERR,
2838 "%s : Can not enable mcast for vpath "
2839 "id %d", dev->name, i);
2840 }
2841 }
2842
2843 vxge_hw_device_setpause_data(vdev->devh, 0,
2844 vdev->config.tx_pause_enable,
2845 vdev->config.rx_pause_enable);
2846
2847 if (vdev->vp_reset_timer.function == NULL)
2848 vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset,
2849 HZ / 2);
2850
2851
2852 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2853 vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup,
2854 HZ / 2);
2855
2856 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2857
2858 smp_wmb();
2859
2860 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2861 netif_carrier_on(vdev->ndev);
2862 netdev_notice(vdev->ndev, "Link Up\n");
2863 vdev->stats.link_up++;
2864 }
2865
2866 vxge_hw_device_intr_enable(vdev->devh);
2867
2868 smp_wmb();
2869
2870 for (i = 0; i < vdev->no_of_vpath; i++) {
2871 vpath = &vdev->vpaths[i];
2872
2873 vxge_hw_vpath_enable(vpath->handle);
2874 smp_wmb();
2875 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
2876 }
2877
2878 netif_tx_start_all_queues(vdev->ndev);
2879
2880
2881 vxge_config_ci_for_tti_rti(vdev);
2882
2883 goto out0;
2884
2885 out2:
2886 vxge_rem_isr(vdev);
2887
2888
2889 if (vdev->config.intr_type != MSI_X)
2890 napi_disable(&vdev->napi);
2891 else {
2892 for (i = 0; i < vdev->no_of_vpath; i++)
2893 napi_disable(&vdev->vpaths[i].ring.napi);
2894 }
2895
2896 out1:
2897 vxge_close_vpaths(vdev, 0);
2898 out0:
2899 vxge_debug_entryexit(VXGE_TRACE,
2900 "%s: %s:%d Exiting...",
2901 dev->name, __func__, __LINE__);
2902 return ret;
2903 }
2904
2905
2906 static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2907 {
2908
2909 struct list_head *entry, *next;
2910 if (list_empty(&vpath->mac_addr_list))
2911 return;
2912
2913 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2914 list_del(entry);
2915 kfree((struct vxge_mac_addrs *)entry);
2916 }
2917 }
2918
2919 static void vxge_napi_del_all(struct vxgedev *vdev)
2920 {
2921 int i;
2922 if (vdev->config.intr_type != MSI_X)
2923 netif_napi_del(&vdev->napi);
2924 else {
2925 for (i = 0; i < vdev->no_of_vpath; i++)
2926 netif_napi_del(&vdev->vpaths[i].ring.napi);
2927 }
2928 }
2929
2930 static int do_vxge_close(struct net_device *dev, int do_io)
2931 {
2932 enum vxge_hw_status status;
2933 struct vxgedev *vdev;
2934 struct __vxge_hw_device *hldev;
2935 int i;
2936 u64 val64, vpath_vector;
2937 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2938 dev->name, __func__, __LINE__);
2939
2940 vdev = netdev_priv(dev);
2941 hldev = pci_get_drvdata(vdev->pdev);
2942
2943 if (unlikely(!is_vxge_card_up(vdev)))
2944 return 0;
2945
2946
2947
2948 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2949 msleep(50);
2950
2951 if (do_io) {
2952
2953 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2954 status = vxge_hw_mgmt_reg_read(vdev->devh,
2955 vxge_hw_mgmt_reg_type_mrpcim,
2956 0,
2957 (ulong)offsetof(
2958 struct vxge_hw_mrpcim_reg,
2959 rts_mgr_cbasin_cfg),
2960 &val64);
2961 if (status == VXGE_HW_OK) {
2962 val64 &= ~vpath_vector;
2963 status = vxge_hw_mgmt_reg_write(vdev->devh,
2964 vxge_hw_mgmt_reg_type_mrpcim,
2965 0,
2966 (ulong)offsetof(
2967 struct vxge_hw_mrpcim_reg,
2968 rts_mgr_cbasin_cfg),
2969 val64);
2970 }
2971
2972
2973 vxge_hw_mgmt_reg_write(vdev->devh,
2974 vxge_hw_mgmt_reg_type_mrpcim,
2975 0,
2976 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2977 rxmac_authorize_all_addr),
2978 0);
2979
2980 vxge_hw_mgmt_reg_write(vdev->devh,
2981 vxge_hw_mgmt_reg_type_mrpcim,
2982 0,
2983 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2984 rxmac_authorize_all_vid),
2985 0);
2986
2987 smp_wmb();
2988 }
2989
2990 if (vdev->titan1)
2991 del_timer_sync(&vdev->vp_lockup_timer);
2992
2993 del_timer_sync(&vdev->vp_reset_timer);
2994
2995 if (do_io)
2996 vxge_hw_device_wait_receive_idle(hldev);
2997
2998 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2999
3000
3001 if (vdev->config.intr_type != MSI_X)
3002 napi_disable(&vdev->napi);
3003 else {
3004 for (i = 0; i < vdev->no_of_vpath; i++)
3005 napi_disable(&vdev->vpaths[i].ring.napi);
3006 }
3007
3008 netif_carrier_off(vdev->ndev);
3009 netdev_notice(vdev->ndev, "Link Down\n");
3010 netif_tx_stop_all_queues(vdev->ndev);
3011
3012
3013 if (do_io)
3014 vxge_hw_device_intr_disable(vdev->devh);
3015
3016 vxge_rem_isr(vdev);
3017
3018 vxge_napi_del_all(vdev);
3019
3020 if (do_io)
3021 vxge_reset_all_vpaths(vdev);
3022
3023 vxge_close_vpaths(vdev, 0);
3024
3025 vxge_debug_entryexit(VXGE_TRACE,
3026 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
3027
3028 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
3029
3030 return 0;
3031 }
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044 static int vxge_close(struct net_device *dev)
3045 {
3046 do_vxge_close(dev, 1);
3047 return 0;
3048 }
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058 static int vxge_change_mtu(struct net_device *dev, int new_mtu)
3059 {
3060 struct vxgedev *vdev = netdev_priv(dev);
3061
3062 vxge_debug_entryexit(vdev->level_trace,
3063 "%s:%d", __func__, __LINE__);
3064
3065
3066 if (unlikely(!is_vxge_card_up(vdev))) {
3067
3068 dev->mtu = new_mtu;
3069 vxge_debug_init(vdev->level_err,
3070 "%s", "device is down on MTU change");
3071 return 0;
3072 }
3073
3074 vxge_debug_init(vdev->level_trace,
3075 "trying to apply new MTU %d", new_mtu);
3076
3077 if (vxge_close(dev))
3078 return -EIO;
3079
3080 dev->mtu = new_mtu;
3081 vdev->mtu = new_mtu;
3082
3083 if (vxge_open(dev))
3084 return -EIO;
3085
3086 vxge_debug_init(vdev->level_trace,
3087 "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
3088
3089 vxge_debug_entryexit(vdev->level_trace,
3090 "%s:%d Exiting...", __func__, __LINE__);
3091
3092 return 0;
3093 }
3094
3095
3096
3097
3098
3099
3100
3101 static void
3102 vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
3103 {
3104 struct vxgedev *vdev = netdev_priv(dev);
3105 int k;
3106
3107
3108 for (k = 0; k < vdev->no_of_vpath; k++) {
3109 struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
3110 struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
3111 unsigned int start;
3112 u64 packets, bytes, multicast;
3113
3114 do {
3115 start = u64_stats_fetch_begin_irq(&rxstats->syncp);
3116
3117 packets = rxstats->rx_frms;
3118 multicast = rxstats->rx_mcast;
3119 bytes = rxstats->rx_bytes;
3120 } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
3121
3122 net_stats->rx_packets += packets;
3123 net_stats->rx_bytes += bytes;
3124 net_stats->multicast += multicast;
3125
3126 net_stats->rx_errors += rxstats->rx_errors;
3127 net_stats->rx_dropped += rxstats->rx_dropped;
3128
3129 do {
3130 start = u64_stats_fetch_begin_irq(&txstats->syncp);
3131
3132 packets = txstats->tx_frms;
3133 bytes = txstats->tx_bytes;
3134 } while (u64_stats_fetch_retry_irq(&txstats->syncp, start));
3135
3136 net_stats->tx_packets += packets;
3137 net_stats->tx_bytes += bytes;
3138 net_stats->tx_errors += txstats->tx_errors;
3139 }
3140 }
3141
3142 static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
3143 {
3144 enum vxge_hw_status status;
3145 u64 val64;
3146
3147
3148
3149
3150
3151
3152 val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3153 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3154 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3155
3156 status = vxge_hw_mgmt_reg_write(devh,
3157 vxge_hw_mgmt_reg_type_mrpcim,
3158 0,
3159 offsetof(struct vxge_hw_mrpcim_reg,
3160 xmac_timestamp),
3161 val64);
3162 vxge_hw_device_flush_io(devh);
3163 devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
3164 return status;
3165 }
3166
3167 static int vxge_hwtstamp_set(struct vxgedev *vdev, void __user *data)
3168 {
3169 struct hwtstamp_config config;
3170 int i;
3171
3172 if (copy_from_user(&config, data, sizeof(config)))
3173 return -EFAULT;
3174
3175
3176 if (config.flags)
3177 return -EINVAL;
3178
3179
3180 switch (config.tx_type) {
3181 case HWTSTAMP_TX_OFF:
3182 break;
3183 case HWTSTAMP_TX_ON:
3184 default:
3185 return -ERANGE;
3186 }
3187
3188 switch (config.rx_filter) {
3189 case HWTSTAMP_FILTER_NONE:
3190 vdev->rx_hwts = 0;
3191 config.rx_filter = HWTSTAMP_FILTER_NONE;
3192 break;
3193
3194 case HWTSTAMP_FILTER_ALL:
3195 case HWTSTAMP_FILTER_SOME:
3196 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3197 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3198 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3199 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3200 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3201 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3202 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3203 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3204 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3205 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3206 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3207 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3208 case HWTSTAMP_FILTER_NTP_ALL:
3209 if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
3210 return -EFAULT;
3211
3212 vdev->rx_hwts = 1;
3213 config.rx_filter = HWTSTAMP_FILTER_ALL;
3214 break;
3215
3216 default:
3217 return -ERANGE;
3218 }
3219
3220 for (i = 0; i < vdev->no_of_vpath; i++)
3221 vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3222
3223 if (copy_to_user(data, &config, sizeof(config)))
3224 return -EFAULT;
3225
3226 return 0;
3227 }
3228
3229 static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
3230 {
3231 struct hwtstamp_config config;
3232
3233 config.flags = 0;
3234 config.tx_type = HWTSTAMP_TX_OFF;
3235 config.rx_filter = (vdev->rx_hwts ?
3236 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
3237
3238 if (copy_to_user(data, &config, sizeof(config)))
3239 return -EFAULT;
3240
3241 return 0;
3242 }
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254 static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3255 {
3256 struct vxgedev *vdev = netdev_priv(dev);
3257
3258 switch (cmd) {
3259 case SIOCSHWTSTAMP:
3260 return vxge_hwtstamp_set(vdev, rq->ifr_data);
3261 case SIOCGHWTSTAMP:
3262 return vxge_hwtstamp_get(vdev, rq->ifr_data);
3263 default:
3264 return -EOPNOTSUPP;
3265 }
3266 }
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276 static void vxge_tx_watchdog(struct net_device *dev)
3277 {
3278 struct vxgedev *vdev;
3279
3280 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3281
3282 vdev = netdev_priv(dev);
3283
3284 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3285
3286 schedule_work(&vdev->reset_task);
3287 vxge_debug_entryexit(VXGE_TRACE,
3288 "%s:%d Exiting...", __func__, __LINE__);
3289 }
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299 static int
3300 vxge_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
3301 {
3302 struct vxgedev *vdev = netdev_priv(dev);
3303 struct vxge_vpath *vpath;
3304 int vp_id;
3305
3306
3307 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3308 vpath = &vdev->vpaths[vp_id];
3309 if (!vpath->is_open)
3310 continue;
3311 vxge_hw_vpath_vid_add(vpath->handle, vid);
3312 }
3313 set_bit(vid, vdev->active_vlans);
3314 return 0;
3315 }
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325 static int
3326 vxge_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
3327 {
3328 struct vxgedev *vdev = netdev_priv(dev);
3329 struct vxge_vpath *vpath;
3330 int vp_id;
3331
3332 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3333
3334
3335 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3336 vpath = &vdev->vpaths[vp_id];
3337 if (!vpath->is_open)
3338 continue;
3339 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3340 }
3341 vxge_debug_entryexit(VXGE_TRACE,
3342 "%s:%d Exiting...", __func__, __LINE__);
3343 clear_bit(vid, vdev->active_vlans);
3344 return 0;
3345 }
3346
3347 static const struct net_device_ops vxge_netdev_ops = {
3348 .ndo_open = vxge_open,
3349 .ndo_stop = vxge_close,
3350 .ndo_get_stats64 = vxge_get_stats64,
3351 .ndo_start_xmit = vxge_xmit,
3352 .ndo_validate_addr = eth_validate_addr,
3353 .ndo_set_rx_mode = vxge_set_multicast,
3354 .ndo_do_ioctl = vxge_ioctl,
3355 .ndo_set_mac_address = vxge_set_mac_addr,
3356 .ndo_change_mtu = vxge_change_mtu,
3357 .ndo_fix_features = vxge_fix_features,
3358 .ndo_set_features = vxge_set_features,
3359 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3360 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3361 .ndo_tx_timeout = vxge_tx_watchdog,
3362 #ifdef CONFIG_NET_POLL_CONTROLLER
3363 .ndo_poll_controller = vxge_netpoll,
3364 #endif
3365 };
3366
3367 static int vxge_device_register(struct __vxge_hw_device *hldev,
3368 struct vxge_config *config, int high_dma,
3369 int no_of_vpath, struct vxgedev **vdev_out)
3370 {
3371 struct net_device *ndev;
3372 enum vxge_hw_status status = VXGE_HW_OK;
3373 struct vxgedev *vdev;
3374 int ret = 0, no_of_queue = 1;
3375 u64 stat;
3376
3377 *vdev_out = NULL;
3378 if (config->tx_steering_type)
3379 no_of_queue = no_of_vpath;
3380
3381 ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3382 no_of_queue);
3383 if (ndev == NULL) {
3384 vxge_debug_init(
3385 vxge_hw_device_trace_level_get(hldev),
3386 "%s : device allocation failed", __func__);
3387 ret = -ENODEV;
3388 goto _out0;
3389 }
3390
3391 vxge_debug_entryexit(
3392 vxge_hw_device_trace_level_get(hldev),
3393 "%s: %s:%d Entering...",
3394 ndev->name, __func__, __LINE__);
3395
3396 vdev = netdev_priv(ndev);
3397 memset(vdev, 0, sizeof(struct vxgedev));
3398
3399 vdev->ndev = ndev;
3400 vdev->devh = hldev;
3401 vdev->pdev = hldev->pdev;
3402 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3403 vdev->rx_hwts = 0;
3404 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
3405
3406 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3407
3408 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
3409 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3410 NETIF_F_TSO | NETIF_F_TSO6 |
3411 NETIF_F_HW_VLAN_CTAG_TX;
3412 if (vdev->config.rth_steering != NO_STEERING)
3413 ndev->hw_features |= NETIF_F_RXHASH;
3414
3415 ndev->features |= ndev->hw_features |
3416 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3417
3418
3419 ndev->netdev_ops = &vxge_netdev_ops;
3420
3421 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3422 INIT_WORK(&vdev->reset_task, vxge_reset);
3423
3424 vxge_initialize_ethtool_ops(ndev);
3425
3426
3427 vdev->vpaths = kcalloc(no_of_vpath, sizeof(struct vxge_vpath),
3428 GFP_KERNEL);
3429 if (!vdev->vpaths) {
3430 vxge_debug_init(VXGE_ERR,
3431 "%s: vpath memory allocation failed",
3432 vdev->ndev->name);
3433 ret = -ENOMEM;
3434 goto _out1;
3435 }
3436
3437 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3438 "%s : checksumming enabled", __func__);
3439
3440 if (high_dma) {
3441 ndev->features |= NETIF_F_HIGHDMA;
3442 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3443 "%s : using High DMA", __func__);
3444 }
3445
3446
3447 ndev->min_mtu = VXGE_HW_MIN_MTU;
3448 ndev->max_mtu = VXGE_HW_MAX_MTU;
3449
3450 ret = register_netdev(ndev);
3451 if (ret) {
3452 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3453 "%s: %s : device registration failed!",
3454 ndev->name, __func__);
3455 goto _out2;
3456 }
3457
3458
3459 ndev->addr_len = ETH_ALEN;
3460
3461
3462
3463
3464
3465 netif_carrier_off(ndev);
3466
3467 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3468 "%s: Ethernet device registered",
3469 ndev->name);
3470
3471 hldev->ndev = ndev;
3472 *vdev_out = vdev;
3473
3474
3475 status = vxge_hw_mrpcim_stats_access(
3476 hldev,
3477 VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3478 0,
3479 0,
3480 &stat);
3481
3482 if (status == VXGE_HW_ERR_PRIVILEGED_OPERATION)
3483 vxge_debug_init(
3484 vxge_hw_device_trace_level_get(hldev),
3485 "%s: device stats clear returns"
3486 "VXGE_HW_ERR_PRIVILEGED_OPERATION", ndev->name);
3487
3488 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3489 "%s: %s:%d Exiting...",
3490 ndev->name, __func__, __LINE__);
3491
3492 return ret;
3493 _out2:
3494 kfree(vdev->vpaths);
3495 _out1:
3496 free_netdev(ndev);
3497 _out0:
3498 return ret;
3499 }
3500
3501
3502
3503
3504
3505
3506 static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3507 {
3508 struct vxgedev *vdev;
3509 struct net_device *dev;
3510 char buf[IFNAMSIZ];
3511
3512 dev = hldev->ndev;
3513 vdev = netdev_priv(dev);
3514
3515 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3516 __func__, __LINE__);
3517
3518 strlcpy(buf, dev->name, IFNAMSIZ);
3519
3520 flush_work(&vdev->reset_task);
3521
3522
3523 unregister_netdev(dev);
3524
3525 kfree(vdev->vpaths);
3526
3527
3528 free_netdev(dev);
3529
3530 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3531 buf);
3532 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
3533 __func__, __LINE__);
3534 }
3535
3536
3537
3538
3539
3540
3541
3542 static void
3543 vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3544 enum vxge_hw_event type, u64 vp_id)
3545 {
3546 struct net_device *dev = hldev->ndev;
3547 struct vxgedev *vdev = netdev_priv(dev);
3548 struct vxge_vpath *vpath = NULL;
3549 int vpath_idx;
3550
3551 vxge_debug_entryexit(vdev->level_trace,
3552 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3553
3554
3555
3556
3557 vdev->cric_err_event = type;
3558
3559 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
3560 vpath = &vdev->vpaths[vpath_idx];
3561 if (vpath->device_id == vp_id)
3562 break;
3563 }
3564
3565 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3566 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3567 vxge_debug_init(VXGE_ERR,
3568 "%s: Slot is frozen", vdev->ndev->name);
3569 } else if (type == VXGE_HW_EVENT_SERR) {
3570 vxge_debug_init(VXGE_ERR,
3571 "%s: Encountered Serious Error",
3572 vdev->ndev->name);
3573 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3574 vxge_debug_init(VXGE_ERR,
3575 "%s: Encountered Critical Error",
3576 vdev->ndev->name);
3577 }
3578
3579 if ((type == VXGE_HW_EVENT_SERR) ||
3580 (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3581 if (unlikely(vdev->exec_mode))
3582 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3583 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3584 vxge_hw_device_mask_all(hldev);
3585 if (unlikely(vdev->exec_mode))
3586 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3587 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3588 (type == VXGE_HW_EVENT_VPATH_ERR)) {
3589
3590 if (unlikely(vdev->exec_mode))
3591 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3592 else {
3593
3594 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3595
3596
3597 vxge_vpath_intr_disable(vdev, vpath_idx);
3598
3599
3600 netif_tx_stop_queue(vpath->fifo.txq);
3601 }
3602 }
3603 }
3604
3605 vxge_debug_entryexit(vdev->level_trace,
3606 "%s: %s:%d Exiting...",
3607 vdev->ndev->name, __func__, __LINE__);
3608 }
3609
3610 static void verify_bandwidth(void)
3611 {
3612 int i, band_width, total = 0, equal_priority = 0;
3613
3614
3615 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3616 if (bw_percentage[i] == 0) {
3617 equal_priority = 1;
3618 break;
3619 }
3620 }
3621
3622 if (!equal_priority) {
3623
3624 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3625 if (bw_percentage[i] == 0xFF)
3626 break;
3627
3628 total += bw_percentage[i];
3629 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3630 equal_priority = 1;
3631 break;
3632 }
3633 }
3634 }
3635
3636 if (!equal_priority) {
3637
3638 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3639 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3640
3641 band_width =
3642 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
3643 (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3644 if (band_width < 2)
3645 equal_priority = 1;
3646 else {
3647 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3648 i++)
3649 bw_percentage[i] =
3650 band_width;
3651 }
3652 }
3653 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3654 equal_priority = 1;
3655 }
3656
3657 if (equal_priority) {
3658 vxge_debug_init(VXGE_ERR,
3659 "%s: Assigning equal bandwidth to all the vpaths",
3660 VXGE_DRIVER_NAME);
3661 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3662 VXGE_HW_MAX_VIRTUAL_PATHS;
3663 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3664 bw_percentage[i] = bw_percentage[0];
3665 }
3666 }
3667
3668
3669
3670
3671 static int vxge_config_vpaths(struct vxge_hw_device_config *device_config,
3672 u64 vpath_mask, struct vxge_config *config_param)
3673 {
3674 int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3675 u32 txdl_size, txdl_per_memblock;
3676
3677 temp = driver_config->vpath_per_dev;
3678 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3679 (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3680
3681 if (driver_config->g_no_cpus == -1)
3682 return 0;
3683
3684 if (!driver_config->g_no_cpus)
3685 driver_config->g_no_cpus =
3686 netif_get_num_default_rss_queues();
3687
3688 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3689 if (!driver_config->vpath_per_dev)
3690 driver_config->vpath_per_dev = 1;
3691
3692 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3693 if (!vxge_bVALn(vpath_mask, i, 1))
3694 continue;
3695 else
3696 default_no_vpath++;
3697 if (default_no_vpath < driver_config->vpath_per_dev)
3698 driver_config->vpath_per_dev = default_no_vpath;
3699
3700 driver_config->g_no_cpus = driver_config->g_no_cpus -
3701 (driver_config->vpath_per_dev * 2);
3702 if (driver_config->g_no_cpus <= 0)
3703 driver_config->g_no_cpus = -1;
3704 }
3705
3706 if (driver_config->vpath_per_dev == 1) {
3707 vxge_debug_ll_config(VXGE_TRACE,
3708 "%s: Disable tx and rx steering, "
3709 "as single vpath is configured", VXGE_DRIVER_NAME);
3710 config_param->rth_steering = NO_STEERING;
3711 config_param->tx_steering_type = NO_STEERING;
3712 device_config->rth_en = 0;
3713 }
3714
3715
3716 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3717 device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3718
3719 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3720 device_config->vp_config[i].vp_id = i;
3721 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3722 if (no_of_vpaths < driver_config->vpath_per_dev) {
3723 if (!vxge_bVALn(vpath_mask, i, 1)) {
3724 vxge_debug_ll_config(VXGE_TRACE,
3725 "%s: vpath: %d is not available",
3726 VXGE_DRIVER_NAME, i);
3727 continue;
3728 } else {
3729 vxge_debug_ll_config(VXGE_TRACE,
3730 "%s: vpath: %d available",
3731 VXGE_DRIVER_NAME, i);
3732 no_of_vpaths++;
3733 }
3734 } else {
3735 vxge_debug_ll_config(VXGE_TRACE,
3736 "%s: vpath: %d is not configured, "
3737 "max_config_vpath exceeded",
3738 VXGE_DRIVER_NAME, i);
3739 break;
3740 }
3741
3742
3743 device_config->vp_config[i].fifo.enable =
3744 VXGE_HW_FIFO_ENABLE;
3745 device_config->vp_config[i].fifo.max_frags =
3746 MAX_SKB_FRAGS + 1;
3747 device_config->vp_config[i].fifo.memblock_size =
3748 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3749
3750 txdl_size = device_config->vp_config[i].fifo.max_frags *
3751 sizeof(struct vxge_hw_fifo_txd);
3752 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3753
3754 device_config->vp_config[i].fifo.fifo_blocks =
3755 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3756
3757 device_config->vp_config[i].fifo.intr =
3758 VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3759
3760
3761 device_config->vp_config[i].tti.intr_enable =
3762 VXGE_HW_TIM_INTR_ENABLE;
3763
3764 device_config->vp_config[i].tti.btimer_val =
3765 (VXGE_TTI_BTIMER_VAL * 1000) / 272;
3766
3767 device_config->vp_config[i].tti.timer_ac_en =
3768 VXGE_HW_TIM_TIMER_AC_ENABLE;
3769
3770
3771
3772
3773 device_config->vp_config[i].tti.timer_ci_en =
3774 VXGE_HW_TIM_TIMER_CI_DISABLE;
3775
3776 device_config->vp_config[i].tti.timer_ri_en =
3777 VXGE_HW_TIM_TIMER_RI_DISABLE;
3778
3779 device_config->vp_config[i].tti.util_sel =
3780 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3781
3782 device_config->vp_config[i].tti.ltimer_val =
3783 (VXGE_TTI_LTIMER_VAL * 1000) / 272;
3784
3785 device_config->vp_config[i].tti.rtimer_val =
3786 (VXGE_TTI_RTIMER_VAL * 1000) / 272;
3787
3788 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3789 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3790 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3791 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3792 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3793 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3794 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3795
3796
3797 device_config->vp_config[i].ring.enable =
3798 VXGE_HW_RING_ENABLE;
3799
3800 device_config->vp_config[i].ring.ring_blocks =
3801 VXGE_HW_DEF_RING_BLOCKS;
3802
3803 device_config->vp_config[i].ring.buffer_mode =
3804 VXGE_HW_RING_RXD_BUFFER_MODE_1;
3805
3806 device_config->vp_config[i].ring.rxds_limit =
3807 VXGE_HW_DEF_RING_RXDS_LIMIT;
3808
3809 device_config->vp_config[i].ring.scatter_mode =
3810 VXGE_HW_RING_SCATTER_MODE_A;
3811
3812
3813 device_config->vp_config[i].rti.intr_enable =
3814 VXGE_HW_TIM_INTR_ENABLE;
3815
3816 device_config->vp_config[i].rti.btimer_val =
3817 (VXGE_RTI_BTIMER_VAL * 1000)/272;
3818
3819 device_config->vp_config[i].rti.timer_ac_en =
3820 VXGE_HW_TIM_TIMER_AC_ENABLE;
3821
3822 device_config->vp_config[i].rti.timer_ci_en =
3823 VXGE_HW_TIM_TIMER_CI_DISABLE;
3824
3825 device_config->vp_config[i].rti.timer_ri_en =
3826 VXGE_HW_TIM_TIMER_RI_DISABLE;
3827
3828 device_config->vp_config[i].rti.util_sel =
3829 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3830
3831 device_config->vp_config[i].rti.urange_a =
3832 RTI_RX_URANGE_A;
3833 device_config->vp_config[i].rti.urange_b =
3834 RTI_RX_URANGE_B;
3835 device_config->vp_config[i].rti.urange_c =
3836 RTI_RX_URANGE_C;
3837 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3838 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3839 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3840 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3841
3842 device_config->vp_config[i].rti.rtimer_val =
3843 (VXGE_RTI_RTIMER_VAL * 1000) / 272;
3844
3845 device_config->vp_config[i].rti.ltimer_val =
3846 (VXGE_RTI_LTIMER_VAL * 1000) / 272;
3847
3848 device_config->vp_config[i].rpa_strip_vlan_tag =
3849 vlan_tag_strip;
3850 }
3851
3852 driver_config->vpath_per_dev = temp;
3853 return no_of_vpaths;
3854 }
3855
3856
3857 static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
3858 int *intr_type)
3859 {
3860
3861 device_config->dma_blockpool_initial =
3862 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3863
3864 device_config->dma_blockpool_max =
3865 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3866
3867 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3868 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3869
3870 if (!IS_ENABLED(CONFIG_PCI_MSI)) {
3871 vxge_debug_init(VXGE_ERR,
3872 "%s: This Kernel does not support "
3873 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3874 *intr_type = INTA;
3875 }
3876
3877
3878 switch (*intr_type) {
3879 case INTA:
3880 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3881 break;
3882
3883 case MSI_X:
3884 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
3885 break;
3886 }
3887
3888
3889 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3890
3891
3892 device_config->rts_mac_en = addr_learn_en;
3893
3894
3895 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3896
3897 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3898 __func__);
3899 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3900 device_config->intr_mode);
3901 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3902 device_config->device_poll_millis);
3903 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3904 device_config->rth_en);
3905 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3906 device_config->rth_it_type);
3907 }
3908
3909 static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3910 {
3911 int i;
3912
3913 vxge_debug_init(VXGE_TRACE,
3914 "%s: %d Vpath(s) opened",
3915 vdev->ndev->name, vdev->no_of_vpath);
3916
3917 switch (vdev->config.intr_type) {
3918 case INTA:
3919 vxge_debug_init(VXGE_TRACE,
3920 "%s: Interrupt type INTA", vdev->ndev->name);
3921 break;
3922
3923 case MSI_X:
3924 vxge_debug_init(VXGE_TRACE,
3925 "%s: Interrupt type MSI-X", vdev->ndev->name);
3926 break;
3927 }
3928
3929 if (vdev->config.rth_steering) {
3930 vxge_debug_init(VXGE_TRACE,
3931 "%s: RTH steering enabled for TCP_IPV4",
3932 vdev->ndev->name);
3933 } else {
3934 vxge_debug_init(VXGE_TRACE,
3935 "%s: RTH steering disabled", vdev->ndev->name);
3936 }
3937
3938 switch (vdev->config.tx_steering_type) {
3939 case NO_STEERING:
3940 vxge_debug_init(VXGE_TRACE,
3941 "%s: Tx steering disabled", vdev->ndev->name);
3942 break;
3943 case TX_PRIORITY_STEERING:
3944 vxge_debug_init(VXGE_TRACE,
3945 "%s: Unsupported tx steering option",
3946 vdev->ndev->name);
3947 vxge_debug_init(VXGE_TRACE,
3948 "%s: Tx steering disabled", vdev->ndev->name);
3949 vdev->config.tx_steering_type = 0;
3950 break;
3951 case TX_VLAN_STEERING:
3952 vxge_debug_init(VXGE_TRACE,
3953 "%s: Unsupported tx steering option",
3954 vdev->ndev->name);
3955 vxge_debug_init(VXGE_TRACE,
3956 "%s: Tx steering disabled", vdev->ndev->name);
3957 vdev->config.tx_steering_type = 0;
3958 break;
3959 case TX_MULTIQ_STEERING:
3960 vxge_debug_init(VXGE_TRACE,
3961 "%s: Tx multiqueue steering enabled",
3962 vdev->ndev->name);
3963 break;
3964 case TX_PORT_STEERING:
3965 vxge_debug_init(VXGE_TRACE,
3966 "%s: Tx port steering enabled",
3967 vdev->ndev->name);
3968 break;
3969 default:
3970 vxge_debug_init(VXGE_ERR,
3971 "%s: Unsupported tx steering type",
3972 vdev->ndev->name);
3973 vxge_debug_init(VXGE_TRACE,
3974 "%s: Tx steering disabled", vdev->ndev->name);
3975 vdev->config.tx_steering_type = 0;
3976 }
3977
3978 if (vdev->config.addr_learn_en)
3979 vxge_debug_init(VXGE_TRACE,
3980 "%s: MAC Address learning enabled", vdev->ndev->name);
3981
3982 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3983 if (!vxge_bVALn(vpath_mask, i, 1))
3984 continue;
3985 vxge_debug_ll_config(VXGE_TRACE,
3986 "%s: MTU size - %d", vdev->ndev->name,
3987 ((vdev->devh))->
3988 config.vp_config[i].mtu);
3989 vxge_debug_init(VXGE_TRACE,
3990 "%s: VLAN tag stripping %s", vdev->ndev->name,
3991 ((vdev->devh))->
3992 config.vp_config[i].rpa_strip_vlan_tag
3993 ? "Enabled" : "Disabled");
3994 vxge_debug_ll_config(VXGE_TRACE,
3995 "%s: Max frags : %d", vdev->ndev->name,
3996 ((vdev->devh))->
3997 config.vp_config[i].fifo.max_frags);
3998 break;
3999 }
4000 }
4001
4002 #ifdef CONFIG_PM
4003
4004
4005
4006
4007 static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
4008 {
4009 return -ENOSYS;
4010 }
4011
4012
4013
4014
4015 static int vxge_pm_resume(struct pci_dev *pdev)
4016 {
4017 return -ENOSYS;
4018 }
4019
4020 #endif
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030 static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
4031 pci_channel_state_t state)
4032 {
4033 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4034 struct net_device *netdev = hldev->ndev;
4035
4036 netif_device_detach(netdev);
4037
4038 if (state == pci_channel_io_perm_failure)
4039 return PCI_ERS_RESULT_DISCONNECT;
4040
4041 if (netif_running(netdev)) {
4042
4043 do_vxge_close(netdev, 0);
4044 }
4045
4046 pci_disable_device(pdev);
4047
4048 return PCI_ERS_RESULT_NEED_RESET;
4049 }
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060 static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
4061 {
4062 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4063 struct net_device *netdev = hldev->ndev;
4064
4065 struct vxgedev *vdev = netdev_priv(netdev);
4066
4067 if (pci_enable_device(pdev)) {
4068 netdev_err(netdev, "Cannot re-enable device after reset\n");
4069 return PCI_ERS_RESULT_DISCONNECT;
4070 }
4071
4072 pci_set_master(pdev);
4073 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
4074
4075 return PCI_ERS_RESULT_RECOVERED;
4076 }
4077
4078
4079
4080
4081
4082
4083
4084
4085 static void vxge_io_resume(struct pci_dev *pdev)
4086 {
4087 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
4088 struct net_device *netdev = hldev->ndev;
4089
4090 if (netif_running(netdev)) {
4091 if (vxge_open(netdev)) {
4092 netdev_err(netdev,
4093 "Can't bring device back up after reset\n");
4094 return;
4095 }
4096 }
4097
4098 netif_device_attach(netdev);
4099 }
4100
4101 static inline u32 vxge_get_num_vfs(u64 function_mode)
4102 {
4103 u32 num_functions = 0;
4104
4105 switch (function_mode) {
4106 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4107 case VXGE_HW_FUNCTION_MODE_SRIOV_8:
4108 num_functions = 8;
4109 break;
4110 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4111 num_functions = 1;
4112 break;
4113 case VXGE_HW_FUNCTION_MODE_SRIOV:
4114 case VXGE_HW_FUNCTION_MODE_MRIOV:
4115 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
4116 num_functions = 17;
4117 break;
4118 case VXGE_HW_FUNCTION_MODE_SRIOV_4:
4119 num_functions = 4;
4120 break;
4121 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
4122 num_functions = 2;
4123 break;
4124 case VXGE_HW_FUNCTION_MODE_MRIOV_8:
4125 num_functions = 8;
4126 break;
4127 }
4128 return num_functions;
4129 }
4130
4131 int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4132 {
4133 struct __vxge_hw_device *hldev = vdev->devh;
4134 u32 maj, min, bld, cmaj, cmin, cbld;
4135 enum vxge_hw_status status;
4136 const struct firmware *fw;
4137 int ret;
4138
4139 ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4140 if (ret) {
4141 vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
4142 VXGE_DRIVER_NAME, fw_name);
4143 goto out;
4144 }
4145
4146
4147 status = vxge_update_fw_image(hldev, fw->data, fw->size);
4148 if (status != VXGE_HW_OK) {
4149 vxge_debug_init(VXGE_ERR,
4150 "%s: FW image download to adapter failed '%s'.",
4151 VXGE_DRIVER_NAME, fw_name);
4152 ret = -EIO;
4153 goto out;
4154 }
4155
4156
4157 status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
4158 if (status != VXGE_HW_OK) {
4159 vxge_debug_init(VXGE_ERR,
4160 "%s: Upgrade read version failed '%s'.",
4161 VXGE_DRIVER_NAME, fw_name);
4162 ret = -EIO;
4163 goto out;
4164 }
4165
4166 cmaj = vdev->config.device_hw_info.fw_version.major;
4167 cmin = vdev->config.device_hw_info.fw_version.minor;
4168 cbld = vdev->config.device_hw_info.fw_version.build;
4169
4170
4171
4172
4173 if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
4174 !override) {
4175 ret = -EINVAL;
4176 goto out;
4177 }
4178
4179 printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
4180 maj, min, bld);
4181
4182
4183 status = vxge_hw_flash_fw(hldev);
4184 if (status != VXGE_HW_OK) {
4185 vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
4186 VXGE_DRIVER_NAME, fw_name);
4187 ret = -EIO;
4188 goto out;
4189 }
4190
4191 printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
4192 "hard reset before using, thus requiring a system reboot or a "
4193 "hotplug event.\n");
4194
4195 out:
4196 release_firmware(fw);
4197 return ret;
4198 }
4199
4200 static int vxge_probe_fw_update(struct vxgedev *vdev)
4201 {
4202 u32 maj, min, bld;
4203 int ret, gpxe = 0;
4204 char *fw_name;
4205
4206 maj = vdev->config.device_hw_info.fw_version.major;
4207 min = vdev->config.device_hw_info.fw_version.minor;
4208 bld = vdev->config.device_hw_info.fw_version.build;
4209
4210 if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4211 return 0;
4212
4213
4214
4215
4216 if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4217 vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4218 "version, unable to load driver\n",
4219 VXGE_DRIVER_NAME);
4220 return -EINVAL;
4221 }
4222
4223
4224
4225
4226 if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4227 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4228 "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4229 return -EINVAL;
4230 }
4231
4232
4233 if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4234 int i;
4235 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4236 if (vdev->devh->eprom_versions[i]) {
4237 gpxe = 1;
4238 break;
4239 }
4240 }
4241 if (gpxe)
4242 fw_name = "vxge/X3fw-pxe.ncf";
4243 else
4244 fw_name = "vxge/X3fw.ncf";
4245
4246 ret = vxge_fw_upgrade(vdev, fw_name, 0);
4247
4248
4249
4250 if (ret != -EINVAL && ret != -ENOENT)
4251 return -EIO;
4252 else
4253 ret = 0;
4254
4255 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4256 VXGE_FW_VER(maj, min, 0)) {
4257 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4258 " be used with this driver.",
4259 VXGE_DRIVER_NAME, maj, min, bld);
4260 return -EINVAL;
4261 }
4262
4263 return ret;
4264 }
4265
4266 static int is_sriov_initialized(struct pci_dev *pdev)
4267 {
4268 int pos;
4269 u16 ctrl;
4270
4271 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4272 if (pos) {
4273 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
4274 if (ctrl & PCI_SRIOV_CTRL_VFE)
4275 return 1;
4276 }
4277 return 0;
4278 }
4279
4280 static const struct vxge_hw_uld_cbs vxge_callbacks = {
4281 .link_up = vxge_callback_link_up,
4282 .link_down = vxge_callback_link_down,
4283 .crit_err = vxge_callback_crit_err,
4284 };
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297 static int
4298 vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4299 {
4300 struct __vxge_hw_device *hldev;
4301 enum vxge_hw_status status;
4302 int ret;
4303 int high_dma = 0;
4304 u64 vpath_mask = 0;
4305 struct vxgedev *vdev;
4306 struct vxge_config *ll_config = NULL;
4307 struct vxge_hw_device_config *device_config = NULL;
4308 struct vxge_hw_device_attr attr;
4309 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
4310 u8 *macaddr;
4311 struct vxge_mac_addrs *entry;
4312 static int bus = -1, device = -1;
4313 u32 host_type;
4314 u8 new_device = 0;
4315 enum vxge_hw_status is_privileged;
4316 u32 function_mode;
4317 u32 num_vfs = 0;
4318
4319 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4320 attr.pdev = pdev;
4321
4322
4323
4324
4325 if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
4326 !pdev->is_virtfn)
4327 new_device = 1;
4328
4329 bus = pdev->bus->number;
4330 device = PCI_SLOT(pdev->devfn);
4331
4332 if (new_device) {
4333 if (driver_config->config_dev_cnt &&
4334 (driver_config->config_dev_cnt !=
4335 driver_config->total_dev_cnt))
4336 vxge_debug_init(VXGE_ERR,
4337 "%s: Configured %d of %d devices",
4338 VXGE_DRIVER_NAME,
4339 driver_config->config_dev_cnt,
4340 driver_config->total_dev_cnt);
4341 driver_config->config_dev_cnt = 0;
4342 driver_config->total_dev_cnt = 0;
4343 }
4344
4345
4346
4347
4348 driver_config->g_no_cpus = 0;
4349 driver_config->vpath_per_dev = max_config_vpath;
4350
4351 driver_config->total_dev_cnt++;
4352 if (++driver_config->config_dev_cnt > max_config_dev) {
4353 ret = 0;
4354 goto _exit0;
4355 }
4356
4357 device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4358 GFP_KERNEL);
4359 if (!device_config) {
4360 ret = -ENOMEM;
4361 vxge_debug_init(VXGE_ERR,
4362 "device_config : malloc failed %s %d",
4363 __FILE__, __LINE__);
4364 goto _exit0;
4365 }
4366
4367 ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
4368 if (!ll_config) {
4369 ret = -ENOMEM;
4370 vxge_debug_init(VXGE_ERR,
4371 "device_config : malloc failed %s %d",
4372 __FILE__, __LINE__);
4373 goto _exit0;
4374 }
4375 ll_config->tx_steering_type = TX_MULTIQ_STEERING;
4376 ll_config->intr_type = MSI_X;
4377 ll_config->napi_weight = NEW_NAPI_WEIGHT;
4378 ll_config->rth_steering = RTH_STEERING;
4379
4380
4381 vxge_hw_device_config_default_get(device_config);
4382
4383
4384 vxge_device_config_init(device_config, &ll_config->intr_type);
4385
4386 ret = pci_enable_device(pdev);
4387 if (ret) {
4388 vxge_debug_init(VXGE_ERR,
4389 "%s : can not enable PCI device", __func__);
4390 goto _exit0;
4391 }
4392
4393 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4394 vxge_debug_ll_config(VXGE_TRACE,
4395 "%s : using 64bit DMA", __func__);
4396
4397 high_dma = 1;
4398
4399 if (pci_set_consistent_dma_mask(pdev,
4400 DMA_BIT_MASK(64))) {
4401 vxge_debug_init(VXGE_ERR,
4402 "%s : unable to obtain 64bit DMA for "
4403 "consistent allocations", __func__);
4404 ret = -ENOMEM;
4405 goto _exit1;
4406 }
4407 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
4408 vxge_debug_ll_config(VXGE_TRACE,
4409 "%s : using 32bit DMA", __func__);
4410 } else {
4411 ret = -ENOMEM;
4412 goto _exit1;
4413 }
4414
4415 ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
4416 if (ret) {
4417 vxge_debug_init(VXGE_ERR,
4418 "%s : request regions failed", __func__);
4419 goto _exit1;
4420 }
4421
4422 pci_set_master(pdev);
4423
4424 attr.bar0 = pci_ioremap_bar(pdev, 0);
4425 if (!attr.bar0) {
4426 vxge_debug_init(VXGE_ERR,
4427 "%s : cannot remap io memory bar0", __func__);
4428 ret = -ENODEV;
4429 goto _exit2;
4430 }
4431 vxge_debug_ll_config(VXGE_TRACE,
4432 "pci ioremap bar0: %p:0x%llx",
4433 attr.bar0,
4434 (unsigned long long)pci_resource_start(pdev, 0));
4435
4436 status = vxge_hw_device_hw_info_get(attr.bar0,
4437 &ll_config->device_hw_info);
4438 if (status != VXGE_HW_OK) {
4439 vxge_debug_init(VXGE_ERR,
4440 "%s: Reading of hardware info failed."
4441 "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4442 ret = -EINVAL;
4443 goto _exit3;
4444 }
4445
4446 vpath_mask = ll_config->device_hw_info.vpath_mask;
4447 if (vpath_mask == 0) {
4448 vxge_debug_ll_config(VXGE_TRACE,
4449 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
4450 ret = -EINVAL;
4451 goto _exit3;
4452 }
4453
4454 vxge_debug_ll_config(VXGE_TRACE,
4455 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4456 (unsigned long long)vpath_mask);
4457
4458 function_mode = ll_config->device_hw_info.function_mode;
4459 host_type = ll_config->device_hw_info.host_type;
4460 is_privileged = __vxge_hw_device_is_privilaged(host_type,
4461 ll_config->device_hw_info.func_id);
4462
4463
4464 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4465 if (!((vpath_mask) & vxge_mBIT(i)))
4466 continue;
4467 max_vpath_supported++;
4468 }
4469
4470 if (new_device)
4471 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4472
4473
4474 if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
4475 (ll_config->intr_type != INTA)) {
4476 ret = pci_enable_sriov(pdev, num_vfs);
4477 if (ret)
4478 vxge_debug_ll_config(VXGE_ERR,
4479 "Failed in enabling SRIOV mode: %d\n", ret);
4480
4481 }
4482
4483
4484
4485
4486
4487 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
4488 if (!no_of_vpath) {
4489 vxge_debug_ll_config(VXGE_ERR,
4490 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4491 ret = 0;
4492 goto _exit3;
4493 }
4494
4495
4496 attr.uld_callbacks = &vxge_callbacks;
4497
4498 status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4499 if (status != VXGE_HW_OK) {
4500 vxge_debug_init(VXGE_ERR,
4501 "Failed to initialize device (%d)", status);
4502 ret = -EINVAL;
4503 goto _exit3;
4504 }
4505
4506 if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4507 ll_config->device_hw_info.fw_version.minor,
4508 ll_config->device_hw_info.fw_version.build) >=
4509 VXGE_EPROM_FW_VER) {
4510 struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4511
4512 status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4513 if (status != VXGE_HW_OK) {
4514 vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4515 VXGE_DRIVER_NAME);
4516
4517 }
4518
4519 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4520 hldev->eprom_versions[i] = img[i].version;
4521 if (!img[i].is_valid)
4522 break;
4523 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4524 "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
4525 VXGE_EPROM_IMG_MAJOR(img[i].version),
4526 VXGE_EPROM_IMG_MINOR(img[i].version),
4527 VXGE_EPROM_IMG_FIX(img[i].version),
4528 VXGE_EPROM_IMG_BUILD(img[i].version));
4529 }
4530 }
4531
4532
4533 status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
4534 if (status != VXGE_HW_OK) {
4535 vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
4536 " failing driver load", VXGE_DRIVER_NAME);
4537 ret = -EINVAL;
4538 goto _exit4;
4539 }
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549 if (is_privileged == VXGE_HW_OK) {
4550 status = vxge_timestamp_config(hldev);
4551 if (status != VXGE_HW_OK) {
4552 vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
4553 VXGE_DRIVER_NAME);
4554 ret = -EFAULT;
4555 goto _exit4;
4556 }
4557 }
4558
4559 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4560
4561
4562 pci_set_drvdata(pdev, hldev);
4563
4564 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4565 ll_config->addr_learn_en = addr_learn_en;
4566 ll_config->rth_algorithm = RTH_ALG_JENKINS;
4567 ll_config->rth_hash_type_tcpipv4 = 1;
4568 ll_config->rth_hash_type_ipv4 = 0;
4569 ll_config->rth_hash_type_tcpipv6 = 0;
4570 ll_config->rth_hash_type_ipv6 = 0;
4571 ll_config->rth_hash_type_tcpipv6ex = 0;
4572 ll_config->rth_hash_type_ipv6ex = 0;
4573 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4574 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4575 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4576
4577 ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4578 &vdev);
4579 if (ret) {
4580 ret = -EINVAL;
4581 goto _exit4;
4582 }
4583
4584 ret = vxge_probe_fw_update(vdev);
4585 if (ret)
4586 goto _exit5;
4587
4588 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4589 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4590 vxge_hw_device_trace_level_get(hldev));
4591
4592
4593 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4594 vdev->bar0 = attr.bar0;
4595 vdev->max_vpath_supported = max_vpath_supported;
4596 vdev->no_of_vpath = no_of_vpath;
4597
4598
4599 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4600 if (!vxge_bVALn(vpath_mask, i, 1))
4601 continue;
4602 if (j >= vdev->no_of_vpath)
4603 break;
4604
4605 vdev->vpaths[j].is_configured = 1;
4606 vdev->vpaths[j].device_id = i;
4607 vdev->vpaths[j].ring.driver_id = j;
4608 vdev->vpaths[j].vdev = vdev;
4609 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4610 memcpy((u8 *)vdev->vpaths[j].macaddr,
4611 ll_config->device_hw_info.mac_addrs[i],
4612 ETH_ALEN);
4613
4614
4615 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4616
4617 vdev->vpaths[j].mac_addr_cnt = 0;
4618 vdev->vpaths[j].mcast_addr_cnt = 0;
4619 j++;
4620 }
4621 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4622 vdev->max_config_port = max_config_port;
4623
4624 vdev->vlan_tag_strip = vlan_tag_strip;
4625
4626
4627 for (i = 0; i < vdev->no_of_vpath; i++)
4628 vdev->vpath_selector[i] = vpath_selector[i];
4629
4630 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4631
4632 ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4633 ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4634 ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
4635
4636 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
4637 vdev->ndev->name, ll_config->device_hw_info.serial_number);
4638
4639 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
4640 vdev->ndev->name, ll_config->device_hw_info.part_number);
4641
4642 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
4643 vdev->ndev->name, ll_config->device_hw_info.product_desc);
4644
4645 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4646 vdev->ndev->name, macaddr);
4647
4648 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4649 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4650
4651 vxge_debug_init(VXGE_TRACE,
4652 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
4653 ll_config->device_hw_info.fw_version.version,
4654 ll_config->device_hw_info.fw_date.date);
4655
4656 if (new_device) {
4657 switch (ll_config->device_hw_info.function_mode) {
4658 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4659 vxge_debug_init(VXGE_TRACE,
4660 "%s: Single Function Mode Enabled", vdev->ndev->name);
4661 break;
4662 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4663 vxge_debug_init(VXGE_TRACE,
4664 "%s: Multi Function Mode Enabled", vdev->ndev->name);
4665 break;
4666 case VXGE_HW_FUNCTION_MODE_SRIOV:
4667 vxge_debug_init(VXGE_TRACE,
4668 "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
4669 break;
4670 case VXGE_HW_FUNCTION_MODE_MRIOV:
4671 vxge_debug_init(VXGE_TRACE,
4672 "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
4673 break;
4674 }
4675 }
4676
4677 vxge_print_parm(vdev, vpath_mask);
4678
4679
4680 strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
4681 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4682
4683
4684 for (i = 0; i < vdev->no_of_vpath; i++) {
4685 entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
4686 if (NULL == entry) {
4687 vxge_debug_init(VXGE_ERR,
4688 "%s: mac_addr_list : memory allocation failed",
4689 vdev->ndev->name);
4690 ret = -EPERM;
4691 goto _exit6;
4692 }
4693 macaddr = (u8 *)&entry->macaddr;
4694 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4695 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4696 vdev->vpaths[i].mac_addr_cnt = 1;
4697 }
4698
4699 kfree(device_config);
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716 if (ll_config->device_hw_info.function_mode ==
4717 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4718 if (vdev->config.intr_type == INTA)
4719 vxge_hw_device_unmask_all(hldev);
4720
4721 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4722 vdev->ndev->name, __func__, __LINE__);
4723
4724 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4725 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4726 vxge_hw_device_trace_level_get(hldev));
4727
4728 kfree(ll_config);
4729 return 0;
4730
4731 _exit6:
4732 for (i = 0; i < vdev->no_of_vpath; i++)
4733 vxge_free_mac_add_list(&vdev->vpaths[i]);
4734 _exit5:
4735 vxge_device_unregister(hldev);
4736 _exit4:
4737 vxge_hw_device_terminate(hldev);
4738 pci_disable_sriov(pdev);
4739 _exit3:
4740 iounmap(attr.bar0);
4741 _exit2:
4742 pci_release_region(pdev, 0);
4743 _exit1:
4744 pci_disable_device(pdev);
4745 _exit0:
4746 kfree(ll_config);
4747 kfree(device_config);
4748 driver_config->config_dev_cnt--;
4749 driver_config->total_dev_cnt--;
4750 return ret;
4751 }
4752
4753
4754
4755
4756
4757
4758
4759 static void vxge_remove(struct pci_dev *pdev)
4760 {
4761 struct __vxge_hw_device *hldev;
4762 struct vxgedev *vdev;
4763 int i;
4764
4765 hldev = pci_get_drvdata(pdev);
4766 if (hldev == NULL)
4767 return;
4768
4769 vdev = netdev_priv(hldev->ndev);
4770
4771 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4772 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4773 __func__);
4774
4775 for (i = 0; i < vdev->no_of_vpath; i++)
4776 vxge_free_mac_add_list(&vdev->vpaths[i]);
4777
4778 vxge_device_unregister(hldev);
4779
4780 vxge_hw_device_terminate(hldev);
4781 iounmap(vdev->bar0);
4782 pci_release_region(pdev, 0);
4783 pci_disable_device(pdev);
4784 driver_config->config_dev_cnt--;
4785 driver_config->total_dev_cnt--;
4786
4787 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4788 __func__, __LINE__);
4789 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4790 __LINE__);
4791 }
4792
4793 static const struct pci_error_handlers vxge_err_handler = {
4794 .error_detected = vxge_io_error_detected,
4795 .slot_reset = vxge_io_slot_reset,
4796 .resume = vxge_io_resume,
4797 };
4798
4799 static struct pci_driver vxge_driver = {
4800 .name = VXGE_DRIVER_NAME,
4801 .id_table = vxge_id_table,
4802 .probe = vxge_probe,
4803 .remove = vxge_remove,
4804 #ifdef CONFIG_PM
4805 .suspend = vxge_pm_suspend,
4806 .resume = vxge_pm_resume,
4807 #endif
4808 .err_handler = &vxge_err_handler,
4809 };
4810
4811 static int __init
4812 vxge_starter(void)
4813 {
4814 int ret = 0;
4815
4816 pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4817 pr_info("Driver version: %s\n", DRV_VERSION);
4818
4819 verify_bandwidth();
4820
4821 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4822 if (!driver_config)
4823 return -ENOMEM;
4824
4825 ret = pci_register_driver(&vxge_driver);
4826 if (ret) {
4827 kfree(driver_config);
4828 goto err;
4829 }
4830
4831 if (driver_config->config_dev_cnt &&
4832 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4833 vxge_debug_init(VXGE_ERR,
4834 "%s: Configured %d of %d devices",
4835 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4836 driver_config->total_dev_cnt);
4837 err:
4838 return ret;
4839 }
4840
4841 static void __exit
4842 vxge_closer(void)
4843 {
4844 pci_unregister_driver(&vxge_driver);
4845 kfree(driver_config);
4846 }
4847 module_init(vxge_starter);
4848 module_exit(vxge_closer);