This source file includes following definitions.
- print_hw_id
- print_fw_id
- print_eth_id
- bdx_fifo_init
- bdx_fifo_free
- bdx_link_changed
- bdx_isr_extra
- bdx_isr_napi
- bdx_poll
- bdx_fw_load
- bdx_restore_mac
- bdx_hw_start
- bdx_hw_stop
- bdx_hw_reset_direct
- bdx_hw_reset
- bdx_sw_reset
- bdx_reset
- bdx_close
- bdx_open
- bdx_range_check
- bdx_ioctl_priv
- bdx_ioctl
- __bdx_vlan_rx_vid
- bdx_vlan_rx_add_vid
- bdx_vlan_rx_kill_vid
- bdx_change_mtu
- bdx_setmulti
- bdx_set_mac
- bdx_read_mac
- bdx_read_l2stat
- bdx_update_stats
- bdx_rxdb_destroy
- bdx_rxdb_create
- bdx_rxdb_alloc_elem
- bdx_rxdb_addr_elem
- bdx_rxdb_available
- bdx_rxdb_free_elem
- bdx_rx_init
- bdx_rx_free_skbs
- bdx_rx_free
- bdx_rx_alloc_skbs
- NETIF_RX_MUX
- bdx_recycle_skb
- bdx_rx_receive
- print_rxdd
- print_rxfd
- bdx_tx_db_size
- __bdx_tx_db_ptr_next
- bdx_tx_db_inc_rptr
- bdx_tx_db_inc_wptr
- bdx_tx_db_init
- bdx_tx_db_close
- bdx_tx_map_skb
- init_txd_sizes
- bdx_tx_init
- bdx_tx_space
- bdx_tx_transmit
- bdx_tx_cleanup
- bdx_tx_free_skbs
- bdx_tx_free
- bdx_tx_push_desc
- bdx_tx_push_desc_safe
- bdx_probe
- bdx_get_link_ksettings
- bdx_get_drvinfo
- bdx_get_coalesce
- bdx_set_coalesce
- bdx_rx_fifo_size_to_packets
- bdx_tx_fifo_size_to_packets
- bdx_get_ringparam
- bdx_set_ringparam
- bdx_get_strings
- bdx_get_sset_count
- bdx_get_ethtool_stats
- bdx_set_ethtool_ops
- bdx_remove
- print_driver_id
- bdx_module_init
- bdx_module_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
62
63 #include "tehuti.h"
64
65 static const struct pci_device_id bdx_pci_tbl[] = {
66 { PCI_VDEVICE(TEHUTI, 0x3009), },
67 { PCI_VDEVICE(TEHUTI, 0x3010), },
68 { PCI_VDEVICE(TEHUTI, 0x3014), },
69 { 0 }
70 };
71
72 MODULE_DEVICE_TABLE(pci, bdx_pci_tbl);
73
74
75 static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f);
76 static void bdx_tx_cleanup(struct bdx_priv *priv);
77 static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget);
78
79
80 static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size);
81
82
83 static int bdx_tx_init(struct bdx_priv *priv);
84 static int bdx_rx_init(struct bdx_priv *priv);
85
86
87 static void bdx_rx_free(struct bdx_priv *priv);
88 static void bdx_tx_free(struct bdx_priv *priv);
89
90
91 static void bdx_set_ethtool_ops(struct net_device *netdev);
92
93
94
95
96
97 static void print_hw_id(struct pci_dev *pdev)
98 {
99 struct pci_nic *nic = pci_get_drvdata(pdev);
100 u16 pci_link_status = 0;
101 u16 pci_ctrl = 0;
102
103 pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
104 pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
105
106 pr_info("%s%s\n", BDX_NIC_NAME,
107 nic->port_num == 1 ? "" : ", 2-Port");
108 pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
109 readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
110 readl(nic->regs + FPGA_SEED),
111 GET_LINK_STATUS_LANES(pci_link_status),
112 GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
113 }
114
115 static void print_fw_id(struct pci_nic *nic)
116 {
117 pr_info("fw 0x%x\n", readl(nic->regs + FW_VER));
118 }
119
120 static void print_eth_id(struct net_device *ndev)
121 {
122 netdev_info(ndev, "%s, Port %c\n",
123 BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B');
124
125 }
126
127
128
129
130
131 #define bdx_enable_interrupts(priv) \
132 do { WRITE_REG(priv, regIMR, IR_RUN); } while (0)
133 #define bdx_disable_interrupts(priv) \
134 do { WRITE_REG(priv, regIMR, 0); } while (0)
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149 static int
150 bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
151 u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR)
152 {
153 u16 memsz = FIFO_SIZE * (1 << fsz_type);
154
155 memset(f, 0, sizeof(struct fifo));
156
157 f->va = pci_alloc_consistent(priv->pdev,
158 memsz + FIFO_EXTRA_SPACE, &f->da);
159 if (!f->va) {
160 pr_err("pci_alloc_consistent failed\n");
161 RET(-ENOMEM);
162 }
163 f->reg_CFG0 = reg_CFG0;
164 f->reg_CFG1 = reg_CFG1;
165 f->reg_RPTR = reg_RPTR;
166 f->reg_WPTR = reg_WPTR;
167 f->rptr = 0;
168 f->wptr = 0;
169 f->memsz = memsz;
170 f->size_mask = memsz - 1;
171 WRITE_REG(priv, reg_CFG0, (u32) ((f->da & TX_RX_CFG0_BASE) | fsz_type));
172 WRITE_REG(priv, reg_CFG1, H32_64(f->da));
173
174 RET(0);
175 }
176
177
178
179
180
181
182 static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
183 {
184 ENTER;
185 if (f->va) {
186 pci_free_consistent(priv->pdev,
187 f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
188 f->va = NULL;
189 }
190 RET();
191 }
192
193
194
195
196
197 static void bdx_link_changed(struct bdx_priv *priv)
198 {
199 u32 link = READ_REG(priv, regMAC_LNK_STAT) & MAC_LINK_STAT;
200
201 if (!link) {
202 if (netif_carrier_ok(priv->ndev)) {
203 netif_stop_queue(priv->ndev);
204 netif_carrier_off(priv->ndev);
205 netdev_err(priv->ndev, "Link Down\n");
206 }
207 } else {
208 if (!netif_carrier_ok(priv->ndev)) {
209 netif_wake_queue(priv->ndev);
210 netif_carrier_on(priv->ndev);
211 netdev_err(priv->ndev, "Link Up\n");
212 }
213 }
214 }
215
216 static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
217 {
218 if (isr & IR_RX_FREE_0) {
219 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
220 DBG("RX_FREE_0\n");
221 }
222
223 if (isr & IR_LNKCHG0)
224 bdx_link_changed(priv);
225
226 if (isr & IR_PCIE_LINK)
227 netdev_err(priv->ndev, "PCI-E Link Fault\n");
228
229 if (isr & IR_PCIE_TOUT)
230 netdev_err(priv->ndev, "PCI-E Time Out\n");
231
232 }
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248 static irqreturn_t bdx_isr_napi(int irq, void *dev)
249 {
250 struct net_device *ndev = dev;
251 struct bdx_priv *priv = netdev_priv(ndev);
252 u32 isr;
253
254 ENTER;
255 isr = (READ_REG(priv, regISR) & IR_RUN);
256 if (unlikely(!isr)) {
257 bdx_enable_interrupts(priv);
258 return IRQ_NONE;
259 }
260
261 if (isr & IR_EXTRA)
262 bdx_isr_extra(priv, isr);
263
264 if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
265 if (likely(napi_schedule_prep(&priv->napi))) {
266 __napi_schedule(&priv->napi);
267 RET(IRQ_HANDLED);
268 } else {
269
270
271
272
273
274
275
276
277 READ_REG(priv, regTXF_WPTR_0);
278 READ_REG(priv, regRXD_WPTR_0);
279 }
280 }
281
282 bdx_enable_interrupts(priv);
283 RET(IRQ_HANDLED);
284 }
285
286 static int bdx_poll(struct napi_struct *napi, int budget)
287 {
288 struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi);
289 int work_done;
290
291 ENTER;
292 bdx_tx_cleanup(priv);
293 work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget);
294 if ((work_done < budget) ||
295 (priv->napi_stop++ >= 30)) {
296 DBG("rx poll is done. backing to isr-driven\n");
297
298
299
300 priv->napi_stop = 0;
301
302 napi_complete_done(napi, work_done);
303 bdx_enable_interrupts(priv);
304 }
305 return work_done;
306 }
307
308
309
310
311
312
313
314
315
316
317
318 static int bdx_fw_load(struct bdx_priv *priv)
319 {
320 const struct firmware *fw = NULL;
321 int master, i;
322 int rc;
323
324 ENTER;
325 master = READ_REG(priv, regINIT_SEMAPHORE);
326 if (!READ_REG(priv, regINIT_STATUS) && master) {
327 rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
328 if (rc)
329 goto out;
330 bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
331 mdelay(100);
332 }
333 for (i = 0; i < 200; i++) {
334 if (READ_REG(priv, regINIT_STATUS)) {
335 rc = 0;
336 goto out;
337 }
338 mdelay(2);
339 }
340 rc = -EIO;
341 out:
342 if (master)
343 WRITE_REG(priv, regINIT_SEMAPHORE, 1);
344
345 release_firmware(fw);
346
347 if (rc) {
348 netdev_err(priv->ndev, "firmware loading failed\n");
349 if (rc == -EIO)
350 DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
351 READ_REG(priv, regVPC),
352 READ_REG(priv, regVIC),
353 READ_REG(priv, regINIT_STATUS), i);
354 RET(rc);
355 } else {
356 DBG("%s: firmware loading success\n", priv->ndev->name);
357 RET(0);
358 }
359 }
360
361 static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
362 {
363 u32 val;
364
365 ENTER;
366 DBG("mac0=%x mac1=%x mac2=%x\n",
367 READ_REG(priv, regUNC_MAC0_A),
368 READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
369
370 val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]);
371 WRITE_REG(priv, regUNC_MAC2_A, val);
372 val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]);
373 WRITE_REG(priv, regUNC_MAC1_A, val);
374 val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]);
375 WRITE_REG(priv, regUNC_MAC0_A, val);
376
377 DBG("mac0=%x mac1=%x mac2=%x\n",
378 READ_REG(priv, regUNC_MAC0_A),
379 READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A));
380 RET();
381 }
382
383
384
385
386
387 static int bdx_hw_start(struct bdx_priv *priv)
388 {
389 int rc = -EIO;
390 struct net_device *ndev = priv->ndev;
391
392 ENTER;
393 bdx_link_changed(priv);
394
395
396 WRITE_REG(priv, regFRM_LENGTH, 0X3FE0);
397 WRITE_REG(priv, regPAUSE_QUANT, 0x96);
398 WRITE_REG(priv, regRX_FIFO_SECTION, 0x800010);
399 WRITE_REG(priv, regTX_FIFO_SECTION, 0xE00010);
400 WRITE_REG(priv, regRX_FULLNESS, 0);
401 WRITE_REG(priv, regTX_FULLNESS, 0);
402 WRITE_REG(priv, regCTRLST,
403 regCTRLST_BASE | regCTRLST_RX_ENA | regCTRLST_TX_ENA);
404
405 WRITE_REG(priv, regVGLB, 0);
406 WRITE_REG(priv, regMAX_FRAME_A,
407 priv->rxf_fifo0.m.pktsz & MAX_FRAME_AB_VAL);
408
409 DBG("RDINTCM=%08x\n", priv->rdintcm);
410 WRITE_REG(priv, regRDINTCM0, priv->rdintcm);
411 WRITE_REG(priv, regRDINTCM2, 0);
412
413 DBG("TDINTCM=%08x\n", priv->tdintcm);
414 WRITE_REG(priv, regTDINTCM0, priv->tdintcm);
415
416
417
418 bdx_restore_mac(priv->ndev, priv);
419
420 WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
421 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
422
423 #define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
424
425 rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
426 ndev->name, ndev);
427 if (rc)
428 goto err_irq;
429 bdx_enable_interrupts(priv);
430
431 RET(0);
432
433 err_irq:
434 RET(rc);
435 }
436
437 static void bdx_hw_stop(struct bdx_priv *priv)
438 {
439 ENTER;
440 bdx_disable_interrupts(priv);
441 free_irq(priv->pdev->irq, priv->ndev);
442
443 netif_carrier_off(priv->ndev);
444 netif_stop_queue(priv->ndev);
445
446 RET();
447 }
448
449 static int bdx_hw_reset_direct(void __iomem *regs)
450 {
451 u32 val, i;
452 ENTER;
453
454
455 val = readl(regs + regCLKPLL);
456 writel((val | CLKPLL_SFTRST) + 0x8, regs + regCLKPLL);
457 udelay(50);
458 val = readl(regs + regCLKPLL);
459 writel(val & ~CLKPLL_SFTRST, regs + regCLKPLL);
460
461
462 for (i = 0; i < 70; i++, mdelay(10))
463 if ((readl(regs + regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
464
465 readl(regs + regRXD_CFG0_0);
466 return 0;
467 }
468 pr_err("HW reset failed\n");
469 return 1;
470 }
471
472 static int bdx_hw_reset(struct bdx_priv *priv)
473 {
474 u32 val, i;
475 ENTER;
476
477 if (priv->port == 0) {
478
479 val = READ_REG(priv, regCLKPLL);
480 WRITE_REG(priv, regCLKPLL, (val | CLKPLL_SFTRST) + 0x8);
481 udelay(50);
482 val = READ_REG(priv, regCLKPLL);
483 WRITE_REG(priv, regCLKPLL, val & ~CLKPLL_SFTRST);
484 }
485
486 for (i = 0; i < 70; i++, mdelay(10))
487 if ((READ_REG(priv, regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) {
488
489 READ_REG(priv, regRXD_CFG0_0);
490 return 0;
491 }
492 pr_err("HW reset failed\n");
493 return 1;
494 }
495
496 static int bdx_sw_reset(struct bdx_priv *priv)
497 {
498 int i;
499
500 ENTER;
501
502
503 WRITE_REG(priv, regGMAC_RXF_A, 0);
504 mdelay(100);
505
506 WRITE_REG(priv, regDIS_PORT, 1);
507
508 WRITE_REG(priv, regDIS_QU, 1);
509
510 for (i = 0; i < 50; i++) {
511 if (READ_REG(priv, regRST_PORT) & 1)
512 break;
513 mdelay(10);
514 }
515 if (i == 50)
516 netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
517
518
519 WRITE_REG(priv, regRDINTCM0, 0);
520 WRITE_REG(priv, regTDINTCM0, 0);
521 WRITE_REG(priv, regIMR, 0);
522 READ_REG(priv, regISR);
523
524
525 WRITE_REG(priv, regRST_QU, 1);
526
527 WRITE_REG(priv, regRST_PORT, 1);
528
529 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
530 DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
531 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
532 WRITE_REG(priv, i, 0);
533
534 WRITE_REG(priv, regDIS_PORT, 0);
535
536 WRITE_REG(priv, regDIS_QU, 0);
537
538 WRITE_REG(priv, regRST_QU, 0);
539
540 WRITE_REG(priv, regRST_PORT, 0);
541
542
543
544 for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
545 DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
546
547 RET(0);
548 }
549
550
551 static int bdx_reset(struct bdx_priv *priv)
552 {
553 ENTER;
554 RET((priv->pdev->device == 0x3009)
555 ? bdx_hw_reset(priv)
556 : bdx_sw_reset(priv));
557 }
558
559
560
561
562
563
564
565
566
567
568
569
570 static int bdx_close(struct net_device *ndev)
571 {
572 struct bdx_priv *priv = NULL;
573
574 ENTER;
575 priv = netdev_priv(ndev);
576
577 napi_disable(&priv->napi);
578
579 bdx_reset(priv);
580 bdx_hw_stop(priv);
581 bdx_rx_free(priv);
582 bdx_tx_free(priv);
583 RET(0);
584 }
585
586
587
588
589
590
591
592
593
594
595
596
597
598 static int bdx_open(struct net_device *ndev)
599 {
600 struct bdx_priv *priv;
601 int rc;
602
603 ENTER;
604 priv = netdev_priv(ndev);
605 bdx_reset(priv);
606 if (netif_running(ndev))
607 netif_stop_queue(priv->ndev);
608
609 if ((rc = bdx_tx_init(priv)) ||
610 (rc = bdx_rx_init(priv)) ||
611 (rc = bdx_fw_load(priv)))
612 goto err;
613
614 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
615
616 rc = bdx_hw_start(priv);
617 if (rc)
618 goto err;
619
620 napi_enable(&priv->napi);
621
622 print_fw_id(priv->nic);
623
624 RET(0);
625
626 err:
627 bdx_close(ndev);
628 RET(rc);
629 }
630
631 static int bdx_range_check(struct bdx_priv *priv, u32 offset)
632 {
633 return (offset > (u32) (BDX_REGS_SIZE / priv->nic->port_num)) ?
634 -EINVAL : 0;
635 }
636
637 static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
638 {
639 struct bdx_priv *priv = netdev_priv(ndev);
640 u32 data[3];
641 int error;
642
643 ENTER;
644
645 DBG("jiffies=%ld cmd=%d\n", jiffies, cmd);
646 if (cmd != SIOCDEVPRIVATE) {
647 error = copy_from_user(data, ifr->ifr_data, sizeof(data));
648 if (error) {
649 pr_err("can't copy from user\n");
650 RET(-EFAULT);
651 }
652 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
653 } else {
654 return -EOPNOTSUPP;
655 }
656
657 if (!capable(CAP_SYS_RAWIO))
658 return -EPERM;
659
660 switch (data[0]) {
661
662 case BDX_OP_READ:
663 error = bdx_range_check(priv, data[1]);
664 if (error < 0)
665 return error;
666 data[2] = READ_REG(priv, data[1]);
667 DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
668 data[2]);
669 error = copy_to_user(ifr->ifr_data, data, sizeof(data));
670 if (error)
671 RET(-EFAULT);
672 break;
673
674 case BDX_OP_WRITE:
675 error = bdx_range_check(priv, data[1]);
676 if (error < 0)
677 return error;
678 WRITE_REG(priv, data[1], data[2]);
679 DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]);
680 break;
681
682 default:
683 RET(-EOPNOTSUPP);
684 }
685 return 0;
686 }
687
688 static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
689 {
690 ENTER;
691 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
692 RET(bdx_ioctl_priv(ndev, ifr, cmd));
693 else
694 RET(-EOPNOTSUPP);
695 }
696
697
698
699
700
701
702
703
704
705 static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
706 {
707 struct bdx_priv *priv = netdev_priv(ndev);
708 u32 reg, bit, val;
709
710 ENTER;
711 DBG2("vid=%d value=%d\n", (int)vid, enable);
712 if (unlikely(vid >= 4096)) {
713 pr_err("invalid VID: %u (> 4096)\n", vid);
714 RET();
715 }
716 reg = regVLAN_0 + (vid / 32) * 4;
717 bit = 1 << vid % 32;
718 val = READ_REG(priv, reg);
719 DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit);
720 if (enable)
721 val |= bit;
722 else
723 val &= ~bit;
724 DBG2("new val %x\n", val);
725 WRITE_REG(priv, reg, val);
726 RET();
727 }
728
729
730
731
732
733
734 static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
735 {
736 __bdx_vlan_rx_vid(ndev, vid, 1);
737 return 0;
738 }
739
740
741
742
743
744
745 static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
746 {
747 __bdx_vlan_rx_vid(ndev, vid, 0);
748 return 0;
749 }
750
751
752
753
754
755
756
757
758 static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
759 {
760 ENTER;
761
762 ndev->mtu = new_mtu;
763 if (netif_running(ndev)) {
764 bdx_close(ndev);
765 bdx_open(ndev);
766 }
767 RET(0);
768 }
769
770 static void bdx_setmulti(struct net_device *ndev)
771 {
772 struct bdx_priv *priv = netdev_priv(ndev);
773
774 u32 rxf_val =
775 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN;
776 int i;
777
778 ENTER;
779
780
781
782
783 if (ndev->flags & IFF_PROMISC) {
784 rxf_val |= GMAC_RX_FILTER_PRM;
785 } else if (ndev->flags & IFF_ALLMULTI) {
786
787 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
788 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
789 } else if (!netdev_mc_empty(ndev)) {
790 u8 hash;
791 struct netdev_hw_addr *ha;
792 u32 reg, val;
793
794
795 for (i = 0; i < MAC_MCST_HASH_NUM; i++)
796 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0);
797
798 for (i = 0; i < MAC_MCST_NUM; i++) {
799 WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0);
800 WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0);
801 }
802
803
804
805
806
807
808 netdev_for_each_mc_addr(ha, ndev) {
809 hash = 0;
810 for (i = 0; i < ETH_ALEN; i++)
811 hash ^= ha->addr[i];
812 reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
813 val = READ_REG(priv, reg);
814 val |= (1 << (hash % 32));
815 WRITE_REG(priv, reg, val);
816 }
817
818 } else {
819 DBG("only own mac %d\n", netdev_mc_count(ndev));
820 rxf_val |= GMAC_RX_FILTER_AB;
821 }
822 WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
823
824
825 RET();
826 }
827
828 static int bdx_set_mac(struct net_device *ndev, void *p)
829 {
830 struct bdx_priv *priv = netdev_priv(ndev);
831 struct sockaddr *addr = p;
832
833 ENTER;
834
835
836
837
838 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
839 bdx_restore_mac(ndev, priv);
840 RET(0);
841 }
842
843 static int bdx_read_mac(struct bdx_priv *priv)
844 {
845 u16 macAddress[3], i;
846 ENTER;
847
848 macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
849 macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
850 macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
851 macAddress[1] = READ_REG(priv, regUNC_MAC1_A);
852 macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
853 macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
854 for (i = 0; i < 3; i++) {
855 priv->ndev->dev_addr[i * 2 + 1] = macAddress[i];
856 priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8;
857 }
858 RET(0);
859 }
860
861 static u64 bdx_read_l2stat(struct bdx_priv *priv, int reg)
862 {
863 u64 val;
864
865 val = READ_REG(priv, reg);
866 val |= ((u64) READ_REG(priv, reg + 8)) << 32;
867 return val;
868 }
869
870
871 static void bdx_update_stats(struct bdx_priv *priv)
872 {
873 struct bdx_stats *stats = &priv->hw_stats;
874 u64 *stats_vector = (u64 *) stats;
875 int i;
876 int addr;
877
878
879 addr = 0x7200;
880
881 for (i = 0; i < 12; i++) {
882 stats_vector[i] = bdx_read_l2stat(priv, addr);
883 addr += 0x10;
884 }
885 BDX_ASSERT(addr != 0x72C0);
886
887 addr = 0x72F0;
888 for (; i < 16; i++) {
889 stats_vector[i] = bdx_read_l2stat(priv, addr);
890 addr += 0x10;
891 }
892 BDX_ASSERT(addr != 0x7330);
893
894 addr = 0x7370;
895 for (; i < 19; i++) {
896 stats_vector[i] = bdx_read_l2stat(priv, addr);
897 addr += 0x10;
898 }
899 BDX_ASSERT(addr != 0x73A0);
900
901 addr = 0x73C0;
902 for (; i < 23; i++) {
903 stats_vector[i] = bdx_read_l2stat(priv, addr);
904 addr += 0x10;
905 }
906 BDX_ASSERT(addr != 0x7400);
907 BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i);
908 }
909
910 static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
911 u16 rxd_vlan);
912 static void print_rxfd(struct rxf_desc *rxfd);
913
914
915
916
917
918 static void bdx_rxdb_destroy(struct rxdb *db)
919 {
920 vfree(db);
921 }
922
923 static struct rxdb *bdx_rxdb_create(int nelem)
924 {
925 struct rxdb *db;
926 int i;
927
928 db = vmalloc(sizeof(struct rxdb)
929 + (nelem * sizeof(int))
930 + (nelem * sizeof(struct rx_map)));
931 if (likely(db != NULL)) {
932 db->stack = (int *)(db + 1);
933 db->elems = (void *)(db->stack + nelem);
934 db->nelem = nelem;
935 db->top = nelem;
936 for (i = 0; i < nelem; i++)
937 db->stack[i] = nelem - i - 1;
938
939 }
940
941 return db;
942 }
943
944 static inline int bdx_rxdb_alloc_elem(struct rxdb *db)
945 {
946 BDX_ASSERT(db->top <= 0);
947 return db->stack[--(db->top)];
948 }
949
950 static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n)
951 {
952 BDX_ASSERT((n < 0) || (n >= db->nelem));
953 return db->elems + n;
954 }
955
956 static inline int bdx_rxdb_available(struct rxdb *db)
957 {
958 return db->top;
959 }
960
961 static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
962 {
963 BDX_ASSERT((n >= db->nelem) || (n < 0));
964 db->stack[(db->top)++] = n;
965 }
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989 static int bdx_rx_init(struct bdx_priv *priv)
990 {
991 ENTER;
992
993 if (bdx_fifo_init(priv, &priv->rxd_fifo0.m, priv->rxd_size,
994 regRXD_CFG0_0, regRXD_CFG1_0,
995 regRXD_RPTR_0, regRXD_WPTR_0))
996 goto err_mem;
997 if (bdx_fifo_init(priv, &priv->rxf_fifo0.m, priv->rxf_size,
998 regRXF_CFG0_0, regRXF_CFG1_0,
999 regRXF_RPTR_0, regRXF_WPTR_0))
1000 goto err_mem;
1001 priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
1002 sizeof(struct rxf_desc));
1003 if (!priv->rxdb)
1004 goto err_mem;
1005
1006 priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
1007 return 0;
1008
1009 err_mem:
1010 netdev_err(priv->ndev, "Rx init failed\n");
1011 return -ENOMEM;
1012 }
1013
1014
1015
1016
1017
1018
1019 static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1020 {
1021 struct rx_map *dm;
1022 struct rxdb *db = priv->rxdb;
1023 u16 i;
1024
1025 ENTER;
1026 DBG("total=%d free=%d busy=%d\n", db->nelem, bdx_rxdb_available(db),
1027 db->nelem - bdx_rxdb_available(db));
1028 while (bdx_rxdb_available(db) > 0) {
1029 i = bdx_rxdb_alloc_elem(db);
1030 dm = bdx_rxdb_addr_elem(db, i);
1031 dm->dma = 0;
1032 }
1033 for (i = 0; i < db->nelem; i++) {
1034 dm = bdx_rxdb_addr_elem(db, i);
1035 if (dm->dma) {
1036 pci_unmap_single(priv->pdev,
1037 dm->dma, f->m.pktsz,
1038 PCI_DMA_FROMDEVICE);
1039 dev_kfree_skb(dm->skb);
1040 }
1041 }
1042 }
1043
1044
1045
1046
1047
1048
1049
1050 static void bdx_rx_free(struct bdx_priv *priv)
1051 {
1052 ENTER;
1053 if (priv->rxdb) {
1054 bdx_rx_free_skbs(priv, &priv->rxf_fifo0);
1055 bdx_rxdb_destroy(priv->rxdb);
1056 priv->rxdb = NULL;
1057 }
1058 bdx_fifo_free(priv, &priv->rxf_fifo0.m);
1059 bdx_fifo_free(priv, &priv->rxd_fifo0.m);
1060
1061 RET();
1062 }
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081 static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1082 {
1083 struct sk_buff *skb;
1084 struct rxf_desc *rxfd;
1085 struct rx_map *dm;
1086 int dno, delta, idx;
1087 struct rxdb *db = priv->rxdb;
1088
1089 ENTER;
1090 dno = bdx_rxdb_available(db) - 1;
1091 while (dno > 0) {
1092 skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN);
1093 if (!skb)
1094 break;
1095
1096 skb_reserve(skb, NET_IP_ALIGN);
1097
1098 idx = bdx_rxdb_alloc_elem(db);
1099 dm = bdx_rxdb_addr_elem(db, idx);
1100 dm->dma = pci_map_single(priv->pdev,
1101 skb->data, f->m.pktsz,
1102 PCI_DMA_FROMDEVICE);
1103 dm->skb = skb;
1104 rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1105 rxfd->info = CPU_CHIP_SWAP32(0x10003);
1106 rxfd->va_lo = idx;
1107 rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1108 rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1109 rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1110 print_rxfd(rxfd);
1111
1112 f->m.wptr += sizeof(struct rxf_desc);
1113 delta = f->m.wptr - f->m.memsz;
1114 if (unlikely(delta >= 0)) {
1115 f->m.wptr = delta;
1116 if (delta > 0) {
1117 memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1118 DBG("wrapped descriptor\n");
1119 }
1120 }
1121 dno--;
1122 }
1123
1124 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1125 RET();
1126 }
1127
1128 static inline void
1129 NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
1130 struct sk_buff *skb)
1131 {
1132 ENTER;
1133 DBG("rxdd->flags.bits.vtag=%d\n", GET_RXD_VTAG(rxd_val1));
1134 if (GET_RXD_VTAG(rxd_val1)) {
1135 DBG("%s: vlan rcv vlan '%x' vtag '%x'\n",
1136 priv->ndev->name,
1137 GET_RXD_VLAN_ID(rxd_vlan),
1138 GET_RXD_VTAG(rxd_val1));
1139 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), GET_RXD_VLAN_TCI(rxd_vlan));
1140 }
1141 netif_receive_skb(skb);
1142 }
1143
1144 static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
1145 {
1146 struct rxf_desc *rxfd;
1147 struct rx_map *dm;
1148 struct rxf_fifo *f;
1149 struct rxdb *db;
1150 int delta;
1151
1152 ENTER;
1153 DBG("priv=%p rxdd=%p\n", priv, rxdd);
1154 f = &priv->rxf_fifo0;
1155 db = priv->rxdb;
1156 DBG("db=%p f=%p\n", db, f);
1157 dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1158 DBG("dm=%p\n", dm);
1159 rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
1160 rxfd->info = CPU_CHIP_SWAP32(0x10003);
1161 rxfd->va_lo = rxdd->va_lo;
1162 rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma));
1163 rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma));
1164 rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz);
1165 print_rxfd(rxfd);
1166
1167 f->m.wptr += sizeof(struct rxf_desc);
1168 delta = f->m.wptr - f->m.memsz;
1169 if (unlikely(delta >= 0)) {
1170 f->m.wptr = delta;
1171 if (delta > 0) {
1172 memcpy(f->m.va, f->m.va + f->m.memsz, delta);
1173 DBG("wrapped descriptor\n");
1174 }
1175 }
1176 RET();
1177 }
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192 static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1193 {
1194 struct net_device *ndev = priv->ndev;
1195 struct sk_buff *skb, *skb2;
1196 struct rxd_desc *rxdd;
1197 struct rx_map *dm;
1198 struct rxf_fifo *rxf_fifo;
1199 int tmp_len, size;
1200 int done = 0;
1201 int max_done = BDX_MAX_RX_DONE;
1202 struct rxdb *db = NULL;
1203
1204 u32 rxd_val1;
1205 u16 len;
1206 u16 rxd_vlan;
1207
1208 ENTER;
1209 max_done = budget;
1210
1211 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR;
1212
1213 size = f->m.wptr - f->m.rptr;
1214 if (size < 0)
1215 size = f->m.memsz + size;
1216
1217 while (size > 0) {
1218
1219 rxdd = (struct rxd_desc *)(f->m.va + f->m.rptr);
1220 rxd_val1 = CPU_CHIP_SWAP32(rxdd->rxd_val1);
1221
1222 len = CPU_CHIP_SWAP16(rxdd->len);
1223
1224 rxd_vlan = CPU_CHIP_SWAP16(rxdd->rxd_vlan);
1225
1226 print_rxdd(rxdd, rxd_val1, len, rxd_vlan);
1227
1228 tmp_len = GET_RXD_BC(rxd_val1) << 3;
1229 BDX_ASSERT(tmp_len <= 0);
1230 size -= tmp_len;
1231 if (size < 0)
1232 break;
1233
1234 f->m.rptr += tmp_len;
1235
1236 tmp_len = f->m.rptr - f->m.memsz;
1237 if (unlikely(tmp_len >= 0)) {
1238 f->m.rptr = tmp_len;
1239 if (tmp_len > 0) {
1240 DBG("wrapped desc rptr=%d tmp_len=%d\n",
1241 f->m.rptr, tmp_len);
1242 memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len);
1243 }
1244 }
1245
1246 if (unlikely(GET_RXD_ERR(rxd_val1))) {
1247 DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1));
1248 ndev->stats.rx_errors++;
1249 bdx_recycle_skb(priv, rxdd);
1250 continue;
1251 }
1252
1253 rxf_fifo = &priv->rxf_fifo0;
1254 db = priv->rxdb;
1255 dm = bdx_rxdb_addr_elem(db, rxdd->va_lo);
1256 skb = dm->skb;
1257
1258 if (len < BDX_COPYBREAK &&
1259 (skb2 = netdev_alloc_skb(priv->ndev, len + NET_IP_ALIGN))) {
1260 skb_reserve(skb2, NET_IP_ALIGN);
1261
1262 pci_dma_sync_single_for_cpu(priv->pdev,
1263 dm->dma, rxf_fifo->m.pktsz,
1264 PCI_DMA_FROMDEVICE);
1265 memcpy(skb2->data, skb->data, len);
1266 bdx_recycle_skb(priv, rxdd);
1267 skb = skb2;
1268 } else {
1269 pci_unmap_single(priv->pdev,
1270 dm->dma, rxf_fifo->m.pktsz,
1271 PCI_DMA_FROMDEVICE);
1272 bdx_rxdb_free_elem(db, rxdd->va_lo);
1273 }
1274
1275 ndev->stats.rx_bytes += len;
1276
1277 skb_put(skb, len);
1278 skb->protocol = eth_type_trans(skb, ndev);
1279
1280
1281 if (GET_RXD_PKT_ID(rxd_val1) == 0)
1282 skb_checksum_none_assert(skb);
1283 else
1284 skb->ip_summed = CHECKSUM_UNNECESSARY;
1285
1286 NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
1287
1288 if (++done >= max_done)
1289 break;
1290 }
1291
1292 ndev->stats.rx_packets += done;
1293
1294
1295 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1296
1297 bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
1298
1299 RET(done);
1300 }
1301
1302
1303
1304
1305 static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
1306 u16 rxd_vlan)
1307 {
1308 DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
1309 GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
1310 GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
1311 GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
1312 GET_RXD_VTAG(rxd_val1), len, GET_RXD_VLAN_ID(rxd_vlan),
1313 GET_RXD_CFI(rxd_vlan), GET_RXD_PRIO(rxd_vlan), rxdd->va_lo,
1314 rxdd->va_hi);
1315 }
1316
1317 static void print_rxfd(struct rxf_desc *rxfd)
1318 {
1319 DBG("=== RxF desc CHIP ORDER/ENDIANNESS =============\n"
1320 "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
1321 rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
1322 }
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368 static inline int bdx_tx_db_size(struct txdb *db)
1369 {
1370 int taken = db->wptr - db->rptr;
1371 if (taken < 0)
1372 taken = db->size + 1 + taken;
1373
1374 return db->size - taken;
1375 }
1376
1377
1378
1379
1380
1381
1382 static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
1383 {
1384 BDX_ASSERT(db == NULL || pptr == NULL);
1385
1386 BDX_ASSERT(*pptr != db->rptr &&
1387 *pptr != db->wptr);
1388
1389 BDX_ASSERT(*pptr < db->start ||
1390 *pptr >= db->end);
1391
1392 ++*pptr;
1393 if (unlikely(*pptr == db->end))
1394 *pptr = db->start;
1395 }
1396
1397
1398
1399
1400
1401 static inline void bdx_tx_db_inc_rptr(struct txdb *db)
1402 {
1403 BDX_ASSERT(db->rptr == db->wptr);
1404 __bdx_tx_db_ptr_next(db, &db->rptr);
1405 }
1406
1407
1408
1409
1410
1411 static inline void bdx_tx_db_inc_wptr(struct txdb *db)
1412 {
1413 __bdx_tx_db_ptr_next(db, &db->wptr);
1414 BDX_ASSERT(db->rptr == db->wptr);
1415
1416 }
1417
1418
1419
1420
1421
1422
1423
1424
1425 static int bdx_tx_db_init(struct txdb *d, int sz_type)
1426 {
1427 int memsz = FIFO_SIZE * (1 << (sz_type + 1));
1428
1429 d->start = vmalloc(memsz);
1430 if (!d->start)
1431 return -ENOMEM;
1432
1433
1434
1435
1436
1437
1438 d->size = memsz / sizeof(struct tx_map) - 1;
1439 d->end = d->start + d->size + 1;
1440
1441
1442 d->rptr = d->start;
1443 d->wptr = d->start;
1444
1445 return 0;
1446 }
1447
1448
1449
1450
1451
1452 static void bdx_tx_db_close(struct txdb *d)
1453 {
1454 BDX_ASSERT(d == NULL);
1455
1456 vfree(d->start);
1457 d->start = NULL;
1458 }
1459
1460
1461
1462
1463
1464
1465
1466 static struct {
1467 u16 bytes;
1468 u16 qwords;
1469 } txd_sizes[MAX_SKB_FRAGS + 1];
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483 static inline void
1484 bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
1485 struct txd_desc *txdd)
1486 {
1487 struct txdb *db = &priv->txdb;
1488 struct pbl *pbl = &txdd->pbl[0];
1489 int nr_frags = skb_shinfo(skb)->nr_frags;
1490 int i;
1491
1492 db->wptr->len = skb_headlen(skb);
1493 db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
1494 db->wptr->len, PCI_DMA_TODEVICE);
1495 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1496 pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1497 pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1498 DBG("=== pbl len: 0x%x ================\n", pbl->len);
1499 DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo);
1500 DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi);
1501 bdx_tx_db_inc_wptr(db);
1502
1503 for (i = 0; i < nr_frags; i++) {
1504 const skb_frag_t *frag;
1505
1506 frag = &skb_shinfo(skb)->frags[i];
1507 db->wptr->len = skb_frag_size(frag);
1508 db->wptr->addr.dma = skb_frag_dma_map(&priv->pdev->dev, frag,
1509 0, skb_frag_size(frag),
1510 DMA_TO_DEVICE);
1511
1512 pbl++;
1513 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
1514 pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
1515 pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
1516 bdx_tx_db_inc_wptr(db);
1517 }
1518
1519
1520 db->wptr->len = -txd_sizes[nr_frags].bytes;
1521 db->wptr->addr.skb = skb;
1522 bdx_tx_db_inc_wptr(db);
1523 }
1524
1525
1526
1527
1528 static void __init init_txd_sizes(void)
1529 {
1530 int i, lwords;
1531
1532
1533
1534 for (i = 0; i < MAX_SKB_FRAGS + 1; i++) {
1535 lwords = 7 + (i * 3);
1536 if (lwords & 1)
1537 lwords++;
1538 txd_sizes[i].qwords = lwords >> 1;
1539 txd_sizes[i].bytes = lwords << 2;
1540 }
1541 }
1542
1543
1544
1545 static int bdx_tx_init(struct bdx_priv *priv)
1546 {
1547 if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size,
1548 regTXD_CFG0_0,
1549 regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0))
1550 goto err_mem;
1551 if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size,
1552 regTXF_CFG0_0,
1553 regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0))
1554 goto err_mem;
1555
1556
1557
1558 if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size)))
1559 goto err_mem;
1560
1561 priv->tx_level = BDX_MAX_TX_LEVEL;
1562 #ifdef BDX_DELAY_WPTR
1563 priv->tx_update_mark = priv->tx_level - 1024;
1564 #endif
1565 return 0;
1566
1567 err_mem:
1568 netdev_err(priv->ndev, "Tx init failed\n");
1569 return -ENOMEM;
1570 }
1571
1572
1573
1574
1575
1576
1577
1578 static inline int bdx_tx_space(struct bdx_priv *priv)
1579 {
1580 struct txd_fifo *f = &priv->txd_fifo0;
1581 int fsize;
1582
1583 f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR;
1584 fsize = f->m.rptr - f->m.wptr;
1585 if (fsize <= 0)
1586 fsize = f->m.memsz + fsize;
1587 return fsize;
1588 }
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600 static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1601 struct net_device *ndev)
1602 {
1603 struct bdx_priv *priv = netdev_priv(ndev);
1604 struct txd_fifo *f = &priv->txd_fifo0;
1605 int txd_checksum = 7;
1606 int txd_lgsnd = 0;
1607 int txd_vlan_id = 0;
1608 int txd_vtag = 0;
1609 int txd_mss = 0;
1610
1611 int nr_frags = skb_shinfo(skb)->nr_frags;
1612 struct txd_desc *txdd;
1613 int len;
1614 unsigned long flags;
1615
1616 ENTER;
1617 local_irq_save(flags);
1618 spin_lock(&priv->tx_lock);
1619
1620
1621 BDX_ASSERT(f->m.wptr >= f->m.memsz);
1622 txdd = (struct txd_desc *)(f->m.va + f->m.wptr);
1623 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
1624 txd_checksum = 0;
1625
1626 if (skb_shinfo(skb)->gso_size) {
1627 txd_mss = skb_shinfo(skb)->gso_size;
1628 txd_lgsnd = 1;
1629 DBG("skb %p skb len %d gso size = %d\n", skb, skb->len,
1630 txd_mss);
1631 }
1632
1633 if (skb_vlan_tag_present(skb)) {
1634
1635 txd_vlan_id = skb_vlan_tag_get(skb) & BITS_MASK(12);
1636 txd_vtag = 1;
1637 }
1638
1639 txdd->length = CPU_CHIP_SWAP16(skb->len);
1640 txdd->mss = CPU_CHIP_SWAP16(txd_mss);
1641 txdd->txd_val1 =
1642 CPU_CHIP_SWAP32(TXD_W1_VAL
1643 (txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag,
1644 txd_lgsnd, txd_vlan_id));
1645 DBG("=== TxD desc =====================\n");
1646 DBG("=== w1: 0x%x ================\n", txdd->txd_val1);
1647 DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length);
1648
1649 bdx_tx_map_skb(priv, skb, txdd);
1650
1651
1652
1653
1654 f->m.wptr += txd_sizes[nr_frags].bytes;
1655 len = f->m.wptr - f->m.memsz;
1656 if (unlikely(len >= 0)) {
1657 f->m.wptr = len;
1658 if (len > 0) {
1659 BDX_ASSERT(len > f->m.memsz);
1660 memcpy(f->m.va, f->m.va + f->m.memsz, len);
1661 }
1662 }
1663 BDX_ASSERT(f->m.wptr >= f->m.memsz);
1664
1665 priv->tx_level -= txd_sizes[nr_frags].bytes;
1666 BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1667 #ifdef BDX_DELAY_WPTR
1668 if (priv->tx_level > priv->tx_update_mark) {
1669
1670
1671
1672
1673 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1674 } else {
1675 if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) {
1676 priv->tx_noupd = 0;
1677 WRITE_REG(priv, f->m.reg_WPTR,
1678 f->m.wptr & TXF_WPTR_WR_PTR);
1679 }
1680 }
1681 #else
1682
1683
1684
1685
1686 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1687
1688 #endif
1689 #ifdef BDX_LLTX
1690 netif_trans_update(ndev);
1691 #endif
1692 ndev->stats.tx_packets++;
1693 ndev->stats.tx_bytes += skb->len;
1694
1695 if (priv->tx_level < BDX_MIN_TX_LEVEL) {
1696 DBG("%s: %s: TX Q STOP level %d\n",
1697 BDX_DRV_NAME, ndev->name, priv->tx_level);
1698 netif_stop_queue(ndev);
1699 }
1700
1701 spin_unlock_irqrestore(&priv->tx_lock, flags);
1702 return NETDEV_TX_OK;
1703 }
1704
1705
1706
1707
1708
1709
1710
1711
1712 static void bdx_tx_cleanup(struct bdx_priv *priv)
1713 {
1714 struct txf_fifo *f = &priv->txf_fifo0;
1715 struct txdb *db = &priv->txdb;
1716 int tx_level = 0;
1717
1718 ENTER;
1719 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK;
1720 BDX_ASSERT(f->m.rptr >= f->m.memsz);
1721
1722 while (f->m.wptr != f->m.rptr) {
1723 f->m.rptr += BDX_TXF_DESC_SZ;
1724 f->m.rptr &= f->m.size_mask;
1725
1726
1727
1728 BDX_ASSERT(db->rptr->len == 0);
1729 do {
1730 BDX_ASSERT(db->rptr->addr.dma == 0);
1731 pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1732 db->rptr->len, PCI_DMA_TODEVICE);
1733 bdx_tx_db_inc_rptr(db);
1734 } while (db->rptr->len > 0);
1735 tx_level -= db->rptr->len;
1736
1737
1738 dev_consume_skb_irq(db->rptr->addr.skb);
1739 bdx_tx_db_inc_rptr(db);
1740 }
1741
1742
1743 BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz);
1744 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
1745
1746
1747
1748 spin_lock(&priv->tx_lock);
1749 priv->tx_level += tx_level;
1750 BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL);
1751 #ifdef BDX_DELAY_WPTR
1752 if (priv->tx_noupd) {
1753 priv->tx_noupd = 0;
1754 WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR,
1755 priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR);
1756 }
1757 #endif
1758
1759 if (unlikely(netif_queue_stopped(priv->ndev) &&
1760 netif_carrier_ok(priv->ndev) &&
1761 (priv->tx_level >= BDX_MIN_TX_LEVEL))) {
1762 DBG("%s: %s: TX Q WAKE level %d\n",
1763 BDX_DRV_NAME, priv->ndev->name, priv->tx_level);
1764 netif_wake_queue(priv->ndev);
1765 }
1766 spin_unlock(&priv->tx_lock);
1767 }
1768
1769
1770
1771
1772
1773 static void bdx_tx_free_skbs(struct bdx_priv *priv)
1774 {
1775 struct txdb *db = &priv->txdb;
1776
1777 ENTER;
1778 while (db->rptr != db->wptr) {
1779 if (likely(db->rptr->len))
1780 pci_unmap_page(priv->pdev, db->rptr->addr.dma,
1781 db->rptr->len, PCI_DMA_TODEVICE);
1782 else
1783 dev_kfree_skb(db->rptr->addr.skb);
1784 bdx_tx_db_inc_rptr(db);
1785 }
1786 RET();
1787 }
1788
1789
1790 static void bdx_tx_free(struct bdx_priv *priv)
1791 {
1792 ENTER;
1793 bdx_tx_free_skbs(priv);
1794 bdx_fifo_free(priv, &priv->txd_fifo0.m);
1795 bdx_fifo_free(priv, &priv->txf_fifo0.m);
1796 bdx_tx_db_close(&priv->txdb);
1797 }
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810 static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
1811 {
1812 struct txd_fifo *f = &priv->txd_fifo0;
1813 int i = f->m.memsz - f->m.wptr;
1814
1815 if (size == 0)
1816 return;
1817
1818 if (i > size) {
1819 memcpy(f->m.va + f->m.wptr, data, size);
1820 f->m.wptr += size;
1821 } else {
1822 memcpy(f->m.va + f->m.wptr, data, i);
1823 f->m.wptr = size - i;
1824 memcpy(f->m.va, data + i, f->m.wptr);
1825 }
1826 WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
1827 }
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838 static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
1839 {
1840 int timer = 0;
1841 ENTER;
1842
1843 while (size > 0) {
1844
1845
1846
1847 int avail = bdx_tx_space(priv) - 8;
1848 if (avail <= 0) {
1849 if (timer++ > 300) {
1850 DBG("timeout while writing desc to TxD fifo\n");
1851 break;
1852 }
1853 udelay(50);
1854 continue;
1855 }
1856 avail = min(avail, size);
1857 DBG("about to push %d bytes starting %p size %d\n", avail,
1858 data, size);
1859 bdx_tx_push_desc(priv, data, avail);
1860 size -= avail;
1861 data += avail;
1862 }
1863 RET();
1864 }
1865
1866 static const struct net_device_ops bdx_netdev_ops = {
1867 .ndo_open = bdx_open,
1868 .ndo_stop = bdx_close,
1869 .ndo_start_xmit = bdx_tx_transmit,
1870 .ndo_validate_addr = eth_validate_addr,
1871 .ndo_do_ioctl = bdx_ioctl,
1872 .ndo_set_rx_mode = bdx_setmulti,
1873 .ndo_change_mtu = bdx_change_mtu,
1874 .ndo_set_mac_address = bdx_set_mac,
1875 .ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid,
1876 .ndo_vlan_rx_kill_vid = bdx_vlan_rx_kill_vid,
1877 };
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896 static int
1897 bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1898 {
1899 struct net_device *ndev;
1900 struct bdx_priv *priv;
1901 int err, pci_using_dac, port;
1902 unsigned long pciaddr;
1903 u32 regionSize;
1904 struct pci_nic *nic;
1905
1906 ENTER;
1907
1908 nic = vmalloc(sizeof(*nic));
1909 if (!nic)
1910 RET(-ENOMEM);
1911
1912
1913 err = pci_enable_device(pdev);
1914 if (err)
1915 goto err_pci;
1916
1917 if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
1918 !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
1919 pci_using_dac = 1;
1920 } else {
1921 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
1922 (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
1923 pr_err("No usable DMA configuration, aborting\n");
1924 goto err_dma;
1925 }
1926 pci_using_dac = 0;
1927 }
1928
1929 err = pci_request_regions(pdev, BDX_DRV_NAME);
1930 if (err)
1931 goto err_dma;
1932
1933 pci_set_master(pdev);
1934
1935 pciaddr = pci_resource_start(pdev, 0);
1936 if (!pciaddr) {
1937 err = -EIO;
1938 pr_err("no MMIO resource\n");
1939 goto err_out_res;
1940 }
1941 regionSize = pci_resource_len(pdev, 0);
1942 if (regionSize < BDX_REGS_SIZE) {
1943 err = -EIO;
1944 pr_err("MMIO resource (%x) too small\n", regionSize);
1945 goto err_out_res;
1946 }
1947
1948 nic->regs = ioremap(pciaddr, regionSize);
1949 if (!nic->regs) {
1950 err = -EIO;
1951 pr_err("ioremap failed\n");
1952 goto err_out_res;
1953 }
1954
1955 if (pdev->irq < 2) {
1956 err = -EIO;
1957 pr_err("invalid irq (%d)\n", pdev->irq);
1958 goto err_out_iomap;
1959 }
1960 pci_set_drvdata(pdev, nic);
1961
1962 if (pdev->device == 0x3014)
1963 nic->port_num = 2;
1964 else
1965 nic->port_num = 1;
1966
1967 print_hw_id(pdev);
1968
1969 bdx_hw_reset_direct(nic->regs);
1970
1971 nic->irq_type = IRQ_INTX;
1972 #ifdef BDX_MSI
1973 if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
1974 err = pci_enable_msi(pdev);
1975 if (err)
1976 pr_err("Can't enable msi. error is %d\n", err);
1977 else
1978 nic->irq_type = IRQ_MSI;
1979 } else
1980 DBG("HW does not support MSI\n");
1981 #endif
1982
1983
1984 for (port = 0; port < nic->port_num; port++) {
1985 ndev = alloc_etherdev(sizeof(struct bdx_priv));
1986 if (!ndev) {
1987 err = -ENOMEM;
1988 goto err_out_iomap;
1989 }
1990
1991 ndev->netdev_ops = &bdx_netdev_ops;
1992 ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
1993
1994 bdx_set_ethtool_ops(ndev);
1995
1996
1997
1998 ndev->if_port = port;
1999 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
2000 | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2001 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
2002 ;
2003 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2004 NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
2005
2006 if (pci_using_dac)
2007 ndev->features |= NETIF_F_HIGHDMA;
2008
2009
2010 priv = nic->priv[port] = netdev_priv(ndev);
2011
2012 priv->pBdxRegs = nic->regs + port * 0x8000;
2013 priv->port = port;
2014 priv->pdev = pdev;
2015 priv->ndev = ndev;
2016 priv->nic = nic;
2017 priv->msg_enable = BDX_DEF_MSG_ENABLE;
2018
2019 netif_napi_add(ndev, &priv->napi, bdx_poll, 64);
2020
2021 if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) {
2022 DBG("HW statistics not supported\n");
2023 priv->stats_flag = 0;
2024 } else {
2025 priv->stats_flag = 1;
2026 }
2027
2028
2029 priv->txd_size = 2;
2030 priv->txf_size = 2;
2031 priv->rxd_size = 2;
2032 priv->rxf_size = 3;
2033
2034
2035 priv->rdintcm = INT_REG_VAL(0x20, 1, 4, 12);
2036 priv->tdintcm = INT_REG_VAL(0x20, 1, 0, 12);
2037
2038
2039
2040
2041
2042
2043 #ifdef BDX_LLTX
2044 ndev->features |= NETIF_F_LLTX;
2045 #endif
2046
2047 ndev->min_mtu = ETH_ZLEN;
2048 ndev->max_mtu = BDX_MAX_MTU;
2049
2050 spin_lock_init(&priv->tx_lock);
2051
2052
2053 if (bdx_read_mac(priv)) {
2054 pr_err("load MAC address failed\n");
2055 goto err_out_iomap;
2056 }
2057 SET_NETDEV_DEV(ndev, &pdev->dev);
2058 err = register_netdev(ndev);
2059 if (err) {
2060 pr_err("register_netdev failed\n");
2061 goto err_out_free;
2062 }
2063 netif_carrier_off(ndev);
2064 netif_stop_queue(ndev);
2065
2066 print_eth_id(ndev);
2067 }
2068 RET(0);
2069
2070 err_out_free:
2071 free_netdev(ndev);
2072 err_out_iomap:
2073 iounmap(nic->regs);
2074 err_out_res:
2075 pci_release_regions(pdev);
2076 err_dma:
2077 pci_disable_device(pdev);
2078 err_pci:
2079 vfree(nic);
2080
2081 RET(err);
2082 }
2083
2084
2085
2086 static const char
2087 bdx_stat_names[][ETH_GSTRING_LEN] = {
2088 "InUCast",
2089 "InMCast",
2090 "InBCast",
2091 "InPkts",
2092 "InErrors",
2093 "InDropped",
2094 "FrameTooLong",
2095 "FrameSequenceErrors",
2096 "InVLAN",
2097 "InDroppedDFE",
2098 "InDroppedIntFull",
2099 "InFrameAlignErrors",
2100
2101
2102
2103 "OutUCast",
2104 "OutMCast",
2105 "OutBCast",
2106 "OutPkts",
2107
2108
2109
2110 "OutVLAN",
2111 "InUCastOctects",
2112 "OutUCastOctects",
2113
2114
2115
2116 "InBCastOctects",
2117 "OutBCastOctects",
2118 "InOctects",
2119 "OutOctects",
2120 };
2121
2122
2123
2124
2125
2126
2127 static int bdx_get_link_ksettings(struct net_device *netdev,
2128 struct ethtool_link_ksettings *ecmd)
2129 {
2130 ethtool_link_ksettings_zero_link_mode(ecmd, supported);
2131 ethtool_link_ksettings_add_link_mode(ecmd, supported,
2132 10000baseT_Full);
2133 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
2134 ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
2135 ethtool_link_ksettings_add_link_mode(ecmd, advertising,
2136 10000baseT_Full);
2137 ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
2138
2139 ecmd->base.speed = SPEED_10000;
2140 ecmd->base.duplex = DUPLEX_FULL;
2141 ecmd->base.port = PORT_FIBRE;
2142 ecmd->base.autoneg = AUTONEG_DISABLE;
2143
2144 return 0;
2145 }
2146
2147
2148
2149
2150
2151
2152 static void
2153 bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
2154 {
2155 struct bdx_priv *priv = netdev_priv(netdev);
2156
2157 strlcpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
2158 strlcpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
2159 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
2160 strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
2161 sizeof(drvinfo->bus_info));
2162 }
2163
2164
2165
2166
2167
2168
2169 static int
2170 bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2171 {
2172 u32 rdintcm;
2173 u32 tdintcm;
2174 struct bdx_priv *priv = netdev_priv(netdev);
2175
2176 rdintcm = priv->rdintcm;
2177 tdintcm = priv->tdintcm;
2178
2179
2180
2181 ecoal->rx_coalesce_usecs = GET_INT_COAL(rdintcm) * INT_COAL_MULT;
2182 ecoal->rx_max_coalesced_frames =
2183 ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc));
2184
2185 ecoal->tx_coalesce_usecs = GET_INT_COAL(tdintcm) * INT_COAL_MULT;
2186 ecoal->tx_max_coalesced_frames =
2187 ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ);
2188
2189
2190 return 0;
2191 }
2192
2193
2194
2195
2196
2197
2198 static int
2199 bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2200 {
2201 u32 rdintcm;
2202 u32 tdintcm;
2203 struct bdx_priv *priv = netdev_priv(netdev);
2204 int rx_coal;
2205 int tx_coal;
2206 int rx_max_coal;
2207 int tx_max_coal;
2208
2209
2210 rx_coal = ecoal->rx_coalesce_usecs / INT_COAL_MULT;
2211 tx_coal = ecoal->tx_coalesce_usecs / INT_COAL_MULT;
2212 rx_max_coal = ecoal->rx_max_coalesced_frames;
2213 tx_max_coal = ecoal->tx_max_coalesced_frames;
2214
2215
2216 rx_max_coal =
2217 (((rx_max_coal * sizeof(struct rxf_desc)) + PCK_TH_MULT - 1)
2218 / PCK_TH_MULT);
2219 tx_max_coal =
2220 (((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1)
2221 / PCK_TH_MULT);
2222
2223 if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) ||
2224 (rx_max_coal > 0xF) || (tx_max_coal > 0xF))
2225 return -EINVAL;
2226
2227 rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm),
2228 GET_RXF_TH(priv->rdintcm), rx_max_coal);
2229 tdintcm = INT_REG_VAL(tx_coal, GET_INT_COAL_RC(priv->tdintcm), 0,
2230 tx_max_coal);
2231
2232 priv->rdintcm = rdintcm;
2233 priv->tdintcm = tdintcm;
2234
2235 WRITE_REG(priv, regRDINTCM0, rdintcm);
2236 WRITE_REG(priv, regTDINTCM0, tdintcm);
2237
2238 return 0;
2239 }
2240
2241
2242 static inline int bdx_rx_fifo_size_to_packets(int rx_size)
2243 {
2244 return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc);
2245 }
2246
2247
2248 static inline int bdx_tx_fifo_size_to_packets(int tx_size)
2249 {
2250 return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ;
2251 }
2252
2253
2254
2255
2256
2257
2258 static void
2259 bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2260 {
2261 struct bdx_priv *priv = netdev_priv(netdev);
2262
2263
2264 ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3);
2265 ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3);
2266 ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size);
2267 ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size);
2268 }
2269
2270
2271
2272
2273
2274
2275 static int
2276 bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2277 {
2278 struct bdx_priv *priv = netdev_priv(netdev);
2279 int rx_size = 0;
2280 int tx_size = 0;
2281
2282 for (; rx_size < 4; rx_size++) {
2283 if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending)
2284 break;
2285 }
2286 if (rx_size == 4)
2287 rx_size = 3;
2288
2289 for (; tx_size < 4; tx_size++) {
2290 if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending)
2291 break;
2292 }
2293 if (tx_size == 4)
2294 tx_size = 3;
2295
2296
2297 if ((rx_size == priv->rxf_size) &&
2298 (tx_size == priv->txd_size))
2299 return 0;
2300
2301 priv->rxf_size = rx_size;
2302 if (rx_size > 1)
2303 priv->rxd_size = rx_size - 1;
2304 else
2305 priv->rxd_size = rx_size;
2306
2307 priv->txf_size = priv->txd_size = tx_size;
2308
2309 if (netif_running(netdev)) {
2310 bdx_close(netdev);
2311 bdx_open(netdev);
2312 }
2313 return 0;
2314 }
2315
2316
2317
2318
2319
2320
2321 static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2322 {
2323 switch (stringset) {
2324 case ETH_SS_STATS:
2325 memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names));
2326 break;
2327 }
2328 }
2329
2330
2331
2332
2333
2334 static int bdx_get_sset_count(struct net_device *netdev, int stringset)
2335 {
2336 struct bdx_priv *priv = netdev_priv(netdev);
2337
2338 switch (stringset) {
2339 case ETH_SS_STATS:
2340 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
2341 != sizeof(struct bdx_stats) / sizeof(u64));
2342 return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0;
2343 }
2344
2345 return -EINVAL;
2346 }
2347
2348
2349
2350
2351
2352
2353
2354 static void bdx_get_ethtool_stats(struct net_device *netdev,
2355 struct ethtool_stats *stats, u64 *data)
2356 {
2357 struct bdx_priv *priv = netdev_priv(netdev);
2358
2359 if (priv->stats_flag) {
2360
2361
2362 bdx_update_stats(priv);
2363
2364
2365 memcpy(data, &priv->hw_stats, sizeof(priv->hw_stats));
2366 }
2367 }
2368
2369
2370
2371
2372
2373 static void bdx_set_ethtool_ops(struct net_device *netdev)
2374 {
2375 static const struct ethtool_ops bdx_ethtool_ops = {
2376 .get_drvinfo = bdx_get_drvinfo,
2377 .get_link = ethtool_op_get_link,
2378 .get_coalesce = bdx_get_coalesce,
2379 .set_coalesce = bdx_set_coalesce,
2380 .get_ringparam = bdx_get_ringparam,
2381 .set_ringparam = bdx_set_ringparam,
2382 .get_strings = bdx_get_strings,
2383 .get_sset_count = bdx_get_sset_count,
2384 .get_ethtool_stats = bdx_get_ethtool_stats,
2385 .get_link_ksettings = bdx_get_link_ksettings,
2386 };
2387
2388 netdev->ethtool_ops = &bdx_ethtool_ops;
2389 }
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400 static void bdx_remove(struct pci_dev *pdev)
2401 {
2402 struct pci_nic *nic = pci_get_drvdata(pdev);
2403 struct net_device *ndev;
2404 int port;
2405
2406 for (port = 0; port < nic->port_num; port++) {
2407 ndev = nic->priv[port]->ndev;
2408 unregister_netdev(ndev);
2409 free_netdev(ndev);
2410 }
2411
2412
2413 #ifdef BDX_MSI
2414 if (nic->irq_type == IRQ_MSI)
2415 pci_disable_msi(pdev);
2416 #endif
2417
2418 iounmap(nic->regs);
2419 pci_release_regions(pdev);
2420 pci_disable_device(pdev);
2421 vfree(nic);
2422
2423 RET();
2424 }
2425
2426 static struct pci_driver bdx_pci_driver = {
2427 .name = BDX_DRV_NAME,
2428 .id_table = bdx_pci_tbl,
2429 .probe = bdx_probe,
2430 .remove = bdx_remove,
2431 };
2432
2433
2434
2435
2436 static void __init print_driver_id(void)
2437 {
2438 pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION);
2439 pr_info("Options: hw_csum %s\n", BDX_MSI_STRING);
2440 }
2441
2442 static int __init bdx_module_init(void)
2443 {
2444 ENTER;
2445 init_txd_sizes();
2446 print_driver_id();
2447 RET(pci_register_driver(&bdx_pci_driver));
2448 }
2449
2450 module_init(bdx_module_init);
2451
2452 static void __exit bdx_module_exit(void)
2453 {
2454 ENTER;
2455 pci_unregister_driver(&bdx_pci_driver);
2456 RET();
2457 }
2458
2459 module_exit(bdx_module_exit);
2460
2461 MODULE_LICENSE("GPL");
2462 MODULE_AUTHOR(DRIVER_AUTHOR);
2463 MODULE_DESCRIPTION(BDX_DRV_DESC);
2464 MODULE_FIRMWARE("tehuti/bdx.bin");