This source file includes following definitions.
- macb_dma_desc_get_size
- macb_adj_dma_desc_idx
- macb_64b_desc
- macb_tx_ring_wrap
- macb_tx_desc
- macb_tx_dma
- macb_rx_ring_wrap
- macb_rx_desc
- macb_rx_buffer
- hw_readl_native
- hw_writel_native
- hw_readl
- hw_writel
- hw_is_native_io
- hw_is_gem
- macb_set_hwaddr
- macb_get_hwaddr
- macb_mdio_wait_for_idle
- macb_mdio_read
- macb_mdio_write
- macb_set_tx_clk
- macb_handle_link_change
- macb_mii_probe
- macb_mii_init
- macb_update_stats
- macb_halt_tx
- macb_tx_unmap
- macb_set_addr
- macb_get_addr
- macb_tx_error_task
- macb_tx_interrupt
- gem_rx_refill
- discard_partial_frame
- gem_rx
- macb_rx_frame
- macb_init_rx_ring
- macb_rx
- macb_poll
- macb_hresp_error_task
- macb_tx_restart
- macb_interrupt
- macb_poll_controller
- macb_tx_map
- macb_features_check
- macb_clear_csum
- macb_pad_and_fcs
- macb_start_xmit
- macb_init_rx_buffer_size
- gem_free_rx_buffers
- macb_free_rx_buffers
- macb_free_consistent
- gem_alloc_rx_buffers
- macb_alloc_rx_buffers
- macb_alloc_consistent
- gem_init_rings
- macb_init_rings
- macb_reset_hw
- gem_mdc_clk_div
- macb_mdc_clk_div
- macb_dbw
- macb_configure_dma
- macb_init_hw
- hash_bit_value
- hash_get_index
- macb_sethashtable
- macb_set_rx_mode
- macb_open
- macb_close
- macb_change_mtu
- gem_update_stats
- gem_get_stats
- gem_get_ethtool_stats
- gem_get_sset_count
- gem_get_ethtool_strings
- macb_get_stats
- macb_get_regs_len
- macb_get_regs
- macb_get_wol
- macb_set_wol
- macb_get_ringparam
- macb_set_ringparam
- gem_get_tsu_rate
- gem_get_ptp_max_adj
- gem_get_ts_info
- macb_get_ts_info
- gem_enable_flow_filters
- gem_prog_cmp_regs
- gem_add_flow_filter
- gem_del_flow_filter
- gem_get_flow_entry
- gem_get_all_flow_entries
- gem_get_rxnfc
- gem_set_rxnfc
- macb_ioctl
- macb_set_txcsum_feature
- macb_set_rxcsum_feature
- macb_set_rxflow_feature
- macb_set_features
- macb_restore_features
- macb_configure_caps
- macb_probe_queues
- macb_clk_init
- macb_init
- at91ether_start
- at91ether_open
- at91ether_close
- at91ether_start_xmit
- at91ether_rx
- at91ether_interrupt
- at91ether_poll_controller
- at91ether_clk_init
- at91ether_init
- fu540_macb_tx_recalc_rate
- fu540_macb_tx_round_rate
- fu540_macb_tx_set_rate
- fu540_c000_clk_init
- fu540_c000_init
- macb_probe
- macb_remove
- macb_suspend
- macb_resume
- macb_runtime_suspend
- macb_runtime_resume
1
2
3
4
5
6
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/clk.h>
10 #include <linux/clk-provider.h>
11 #include <linux/crc32.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/circ_buf.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/io.h>
20 #include <linux/gpio.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/interrupt.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/platform_data/macb.h>
27 #include <linux/platform_device.h>
28 #include <linux/phy.h>
29 #include <linux/of.h>
30 #include <linux/of_device.h>
31 #include <linux/of_gpio.h>
32 #include <linux/of_mdio.h>
33 #include <linux/of_net.h>
34 #include <linux/ip.h>
35 #include <linux/udp.h>
36 #include <linux/tcp.h>
37 #include <linux/iopoll.h>
38 #include <linux/pm_runtime.h>
39 #include "macb.h"
40
41
42 struct sifive_fu540_macb_mgmt {
43 void __iomem *reg;
44 unsigned long rate;
45 struct clk_hw hw;
46 };
47
48 #define MACB_RX_BUFFER_SIZE 128
49 #define RX_BUFFER_MULTIPLE 64
50
51 #define DEFAULT_RX_RING_SIZE 512
52 #define MIN_RX_RING_SIZE 64
53 #define MAX_RX_RING_SIZE 8192
54 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
55 * (bp)->rx_ring_size)
56
57 #define DEFAULT_TX_RING_SIZE 512
58 #define MIN_TX_RING_SIZE 64
59 #define MAX_TX_RING_SIZE 4096
60 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
61 * (bp)->tx_ring_size)
62
63
64 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
65
66 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
67 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
68 | MACB_BIT(ISR_RLE) \
69 | MACB_BIT(TXERR))
70 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
71 | MACB_BIT(TXUBR))
72
73
74 #define MACB_TX_LEN_ALIGN 8
75 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
76
77
78
79
80 #define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
81
82 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU
83 #define MACB_NETIF_LSO NETIF_F_TSO
84
85 #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
86 #define MACB_WOL_ENABLED (0x1 << 1)
87
88
89
90
91 #define MACB_HALT_TIMEOUT 1230
92
93 #define MACB_PM_TIMEOUT 100
94
95 #define MACB_MDIO_TIMEOUT 1000000
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124 static unsigned int macb_dma_desc_get_size(struct macb *bp)
125 {
126 #ifdef MACB_EXT_DESC
127 unsigned int desc_size;
128
129 switch (bp->hw_dma_cap) {
130 case HW_DMA_CAP_64B:
131 desc_size = sizeof(struct macb_dma_desc)
132 + sizeof(struct macb_dma_desc_64);
133 break;
134 case HW_DMA_CAP_PTP:
135 desc_size = sizeof(struct macb_dma_desc)
136 + sizeof(struct macb_dma_desc_ptp);
137 break;
138 case HW_DMA_CAP_64B_PTP:
139 desc_size = sizeof(struct macb_dma_desc)
140 + sizeof(struct macb_dma_desc_64)
141 + sizeof(struct macb_dma_desc_ptp);
142 break;
143 default:
144 desc_size = sizeof(struct macb_dma_desc);
145 }
146 return desc_size;
147 #endif
148 return sizeof(struct macb_dma_desc);
149 }
150
151 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
152 {
153 #ifdef MACB_EXT_DESC
154 switch (bp->hw_dma_cap) {
155 case HW_DMA_CAP_64B:
156 case HW_DMA_CAP_PTP:
157 desc_idx <<= 1;
158 break;
159 case HW_DMA_CAP_64B_PTP:
160 desc_idx *= 3;
161 break;
162 default:
163 break;
164 }
165 #endif
166 return desc_idx;
167 }
168
169 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
170 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
171 {
172 return (struct macb_dma_desc_64 *)((void *)desc
173 + sizeof(struct macb_dma_desc));
174 }
175 #endif
176
177
178 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
179 {
180 return index & (bp->tx_ring_size - 1);
181 }
182
183 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
184 unsigned int index)
185 {
186 index = macb_tx_ring_wrap(queue->bp, index);
187 index = macb_adj_dma_desc_idx(queue->bp, index);
188 return &queue->tx_ring[index];
189 }
190
191 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
192 unsigned int index)
193 {
194 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
195 }
196
197 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
198 {
199 dma_addr_t offset;
200
201 offset = macb_tx_ring_wrap(queue->bp, index) *
202 macb_dma_desc_get_size(queue->bp);
203
204 return queue->tx_ring_dma + offset;
205 }
206
207 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
208 {
209 return index & (bp->rx_ring_size - 1);
210 }
211
212 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
213 {
214 index = macb_rx_ring_wrap(queue->bp, index);
215 index = macb_adj_dma_desc_idx(queue->bp, index);
216 return &queue->rx_ring[index];
217 }
218
219 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
220 {
221 return queue->rx_buffers + queue->bp->rx_buffer_size *
222 macb_rx_ring_wrap(queue->bp, index);
223 }
224
225
226 static u32 hw_readl_native(struct macb *bp, int offset)
227 {
228 return __raw_readl(bp->regs + offset);
229 }
230
231 static void hw_writel_native(struct macb *bp, int offset, u32 value)
232 {
233 __raw_writel(value, bp->regs + offset);
234 }
235
236 static u32 hw_readl(struct macb *bp, int offset)
237 {
238 return readl_relaxed(bp->regs + offset);
239 }
240
241 static void hw_writel(struct macb *bp, int offset, u32 value)
242 {
243 writel_relaxed(value, bp->regs + offset);
244 }
245
246
247
248
249
250 static bool hw_is_native_io(void __iomem *addr)
251 {
252 u32 value = MACB_BIT(LLB);
253
254 __raw_writel(value, addr + MACB_NCR);
255 value = __raw_readl(addr + MACB_NCR);
256
257
258 __raw_writel(0, addr + MACB_NCR);
259
260 return value == MACB_BIT(LLB);
261 }
262
263 static bool hw_is_gem(void __iomem *addr, bool native_io)
264 {
265 u32 id;
266
267 if (native_io)
268 id = __raw_readl(addr + MACB_MID);
269 else
270 id = readl_relaxed(addr + MACB_MID);
271
272 return MACB_BFEXT(IDNUM, id) >= 0x2;
273 }
274
275 static void macb_set_hwaddr(struct macb *bp)
276 {
277 u32 bottom;
278 u16 top;
279
280 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
281 macb_or_gem_writel(bp, SA1B, bottom);
282 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
283 macb_or_gem_writel(bp, SA1T, top);
284
285
286 macb_or_gem_writel(bp, SA2B, 0);
287 macb_or_gem_writel(bp, SA2T, 0);
288 macb_or_gem_writel(bp, SA3B, 0);
289 macb_or_gem_writel(bp, SA3T, 0);
290 macb_or_gem_writel(bp, SA4B, 0);
291 macb_or_gem_writel(bp, SA4T, 0);
292 }
293
294 static void macb_get_hwaddr(struct macb *bp)
295 {
296 u32 bottom;
297 u16 top;
298 u8 addr[6];
299 int i;
300
301
302 for (i = 0; i < 4; i++) {
303 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
304 top = macb_or_gem_readl(bp, SA1T + i * 8);
305
306 addr[0] = bottom & 0xff;
307 addr[1] = (bottom >> 8) & 0xff;
308 addr[2] = (bottom >> 16) & 0xff;
309 addr[3] = (bottom >> 24) & 0xff;
310 addr[4] = top & 0xff;
311 addr[5] = (top >> 8) & 0xff;
312
313 if (is_valid_ether_addr(addr)) {
314 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
315 return;
316 }
317 }
318
319 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
320 eth_hw_addr_random(bp->dev);
321 }
322
323 static int macb_mdio_wait_for_idle(struct macb *bp)
324 {
325 u32 val;
326
327 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
328 1, MACB_MDIO_TIMEOUT);
329 }
330
331 static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
332 {
333 struct macb *bp = bus->priv;
334 int status;
335
336 status = pm_runtime_get_sync(&bp->pdev->dev);
337 if (status < 0) {
338 pm_runtime_put_noidle(&bp->pdev->dev);
339 goto mdio_pm_exit;
340 }
341
342 status = macb_mdio_wait_for_idle(bp);
343 if (status < 0)
344 goto mdio_read_exit;
345
346 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
347 | MACB_BF(RW, MACB_MAN_READ)
348 | MACB_BF(PHYA, mii_id)
349 | MACB_BF(REGA, regnum)
350 | MACB_BF(CODE, MACB_MAN_CODE)));
351
352 status = macb_mdio_wait_for_idle(bp);
353 if (status < 0)
354 goto mdio_read_exit;
355
356 status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
357
358 mdio_read_exit:
359 pm_runtime_mark_last_busy(&bp->pdev->dev);
360 pm_runtime_put_autosuspend(&bp->pdev->dev);
361 mdio_pm_exit:
362 return status;
363 }
364
365 static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
366 u16 value)
367 {
368 struct macb *bp = bus->priv;
369 int status;
370
371 status = pm_runtime_get_sync(&bp->pdev->dev);
372 if (status < 0) {
373 pm_runtime_put_noidle(&bp->pdev->dev);
374 goto mdio_pm_exit;
375 }
376
377 status = macb_mdio_wait_for_idle(bp);
378 if (status < 0)
379 goto mdio_write_exit;
380
381 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
382 | MACB_BF(RW, MACB_MAN_WRITE)
383 | MACB_BF(PHYA, mii_id)
384 | MACB_BF(REGA, regnum)
385 | MACB_BF(CODE, MACB_MAN_CODE)
386 | MACB_BF(DATA, value)));
387
388 status = macb_mdio_wait_for_idle(bp);
389 if (status < 0)
390 goto mdio_write_exit;
391
392 mdio_write_exit:
393 pm_runtime_mark_last_busy(&bp->pdev->dev);
394 pm_runtime_put_autosuspend(&bp->pdev->dev);
395 mdio_pm_exit:
396 return status;
397 }
398
399
400
401
402
403
404
405 static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
406 {
407 long ferr, rate, rate_rounded;
408
409 if (!clk)
410 return;
411
412 switch (speed) {
413 case SPEED_10:
414 rate = 2500000;
415 break;
416 case SPEED_100:
417 rate = 25000000;
418 break;
419 case SPEED_1000:
420 rate = 125000000;
421 break;
422 default:
423 return;
424 }
425
426 rate_rounded = clk_round_rate(clk, rate);
427 if (rate_rounded < 0)
428 return;
429
430
431
432
433 ferr = abs(rate_rounded - rate);
434 ferr = DIV_ROUND_UP(ferr, rate / 100000);
435 if (ferr > 5)
436 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
437 rate);
438
439 if (clk_set_rate(clk, rate_rounded))
440 netdev_err(dev, "adjusting tx_clk failed.\n");
441 }
442
443 static void macb_handle_link_change(struct net_device *dev)
444 {
445 struct macb *bp = netdev_priv(dev);
446 struct phy_device *phydev = dev->phydev;
447 unsigned long flags;
448 int status_change = 0;
449
450 spin_lock_irqsave(&bp->lock, flags);
451
452 if (phydev->link) {
453 if ((bp->speed != phydev->speed) ||
454 (bp->duplex != phydev->duplex)) {
455 u32 reg;
456
457 reg = macb_readl(bp, NCFGR);
458 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
459 if (macb_is_gem(bp))
460 reg &= ~GEM_BIT(GBE);
461
462 if (phydev->duplex)
463 reg |= MACB_BIT(FD);
464 if (phydev->speed == SPEED_100)
465 reg |= MACB_BIT(SPD);
466 if (phydev->speed == SPEED_1000 &&
467 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
468 reg |= GEM_BIT(GBE);
469
470 macb_or_gem_writel(bp, NCFGR, reg);
471
472 bp->speed = phydev->speed;
473 bp->duplex = phydev->duplex;
474 status_change = 1;
475 }
476 }
477
478 if (phydev->link != bp->link) {
479 if (!phydev->link) {
480 bp->speed = 0;
481 bp->duplex = -1;
482 }
483 bp->link = phydev->link;
484
485 status_change = 1;
486 }
487
488 spin_unlock_irqrestore(&bp->lock, flags);
489
490 if (status_change) {
491 if (phydev->link) {
492
493
494
495 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
496
497 netif_carrier_on(dev);
498 netdev_info(dev, "link up (%d/%s)\n",
499 phydev->speed,
500 phydev->duplex == DUPLEX_FULL ?
501 "Full" : "Half");
502 } else {
503 netif_carrier_off(dev);
504 netdev_info(dev, "link down\n");
505 }
506 }
507 }
508
509
510 static int macb_mii_probe(struct net_device *dev)
511 {
512 struct macb *bp = netdev_priv(dev);
513 struct phy_device *phydev;
514 struct device_node *np;
515 int ret, i;
516
517 np = bp->pdev->dev.of_node;
518 ret = 0;
519
520 if (np) {
521 if (of_phy_is_fixed_link(np)) {
522 bp->phy_node = of_node_get(np);
523 } else {
524 bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
525
526
527
528
529 if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
530 for (i = 0; i < PHY_MAX_ADDR; i++) {
531 phydev = mdiobus_scan(bp->mii_bus, i);
532 if (IS_ERR(phydev) &&
533 PTR_ERR(phydev) != -ENODEV) {
534 ret = PTR_ERR(phydev);
535 break;
536 }
537 }
538
539 if (ret)
540 return -ENODEV;
541 }
542 }
543 }
544
545 if (bp->phy_node) {
546 phydev = of_phy_connect(dev, bp->phy_node,
547 &macb_handle_link_change, 0,
548 bp->phy_interface);
549 if (!phydev)
550 return -ENODEV;
551 } else {
552 phydev = phy_find_first(bp->mii_bus);
553 if (!phydev) {
554 netdev_err(dev, "no PHY found\n");
555 return -ENXIO;
556 }
557
558
559 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
560 bp->phy_interface);
561 if (ret) {
562 netdev_err(dev, "Could not attach to PHY\n");
563 return ret;
564 }
565 }
566
567
568 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
569 phy_set_max_speed(phydev, SPEED_1000);
570 else
571 phy_set_max_speed(phydev, SPEED_100);
572
573 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
574 phy_remove_link_mode(phydev,
575 ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
576
577 bp->link = 0;
578 bp->speed = 0;
579 bp->duplex = -1;
580
581 return 0;
582 }
583
584 static int macb_mii_init(struct macb *bp)
585 {
586 struct device_node *np;
587 int err = -ENXIO;
588
589
590 macb_writel(bp, NCR, MACB_BIT(MPE));
591
592 bp->mii_bus = mdiobus_alloc();
593 if (!bp->mii_bus) {
594 err = -ENOMEM;
595 goto err_out;
596 }
597
598 bp->mii_bus->name = "MACB_mii_bus";
599 bp->mii_bus->read = &macb_mdio_read;
600 bp->mii_bus->write = &macb_mdio_write;
601 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
602 bp->pdev->name, bp->pdev->id);
603 bp->mii_bus->priv = bp;
604 bp->mii_bus->parent = &bp->pdev->dev;
605
606 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
607
608 np = bp->pdev->dev.of_node;
609 if (np && of_phy_is_fixed_link(np)) {
610 if (of_phy_register_fixed_link(np) < 0) {
611 dev_err(&bp->pdev->dev,
612 "broken fixed-link specification %pOF\n", np);
613 goto err_out_free_mdiobus;
614 }
615
616 err = mdiobus_register(bp->mii_bus);
617 } else {
618 err = of_mdiobus_register(bp->mii_bus, np);
619 }
620
621 if (err)
622 goto err_out_free_fixed_link;
623
624 err = macb_mii_probe(bp->dev);
625 if (err)
626 goto err_out_unregister_bus;
627
628 return 0;
629
630 err_out_unregister_bus:
631 mdiobus_unregister(bp->mii_bus);
632 err_out_free_fixed_link:
633 if (np && of_phy_is_fixed_link(np))
634 of_phy_deregister_fixed_link(np);
635 err_out_free_mdiobus:
636 of_node_put(bp->phy_node);
637 mdiobus_free(bp->mii_bus);
638 err_out:
639 return err;
640 }
641
642 static void macb_update_stats(struct macb *bp)
643 {
644 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
645 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
646 int offset = MACB_PFR;
647
648 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
649
650 for (; p < end; p++, offset += 4)
651 *p += bp->macb_reg_readl(bp, offset);
652 }
653
654 static int macb_halt_tx(struct macb *bp)
655 {
656 unsigned long halt_time, timeout;
657 u32 status;
658
659 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
660
661 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
662 do {
663 halt_time = jiffies;
664 status = macb_readl(bp, TSR);
665 if (!(status & MACB_BIT(TGO)))
666 return 0;
667
668 udelay(250);
669 } while (time_before(halt_time, timeout));
670
671 return -ETIMEDOUT;
672 }
673
674 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
675 {
676 if (tx_skb->mapping) {
677 if (tx_skb->mapped_as_page)
678 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
679 tx_skb->size, DMA_TO_DEVICE);
680 else
681 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
682 tx_skb->size, DMA_TO_DEVICE);
683 tx_skb->mapping = 0;
684 }
685
686 if (tx_skb->skb) {
687 dev_kfree_skb_any(tx_skb->skb);
688 tx_skb->skb = NULL;
689 }
690 }
691
692 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
693 {
694 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
695 struct macb_dma_desc_64 *desc_64;
696
697 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
698 desc_64 = macb_64b_desc(bp, desc);
699 desc_64->addrh = upper_32_bits(addr);
700
701
702
703
704 dma_wmb();
705 }
706 #endif
707 desc->addr = lower_32_bits(addr);
708 }
709
710 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
711 {
712 dma_addr_t addr = 0;
713 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
714 struct macb_dma_desc_64 *desc_64;
715
716 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
717 desc_64 = macb_64b_desc(bp, desc);
718 addr = ((u64)(desc_64->addrh) << 32);
719 }
720 #endif
721 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
722 return addr;
723 }
724
725 static void macb_tx_error_task(struct work_struct *work)
726 {
727 struct macb_queue *queue = container_of(work, struct macb_queue,
728 tx_error_task);
729 struct macb *bp = queue->bp;
730 struct macb_tx_skb *tx_skb;
731 struct macb_dma_desc *desc;
732 struct sk_buff *skb;
733 unsigned int tail;
734 unsigned long flags;
735
736 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
737 (unsigned int)(queue - bp->queues),
738 queue->tx_tail, queue->tx_head);
739
740
741
742
743
744
745
746 spin_lock_irqsave(&bp->lock, flags);
747
748
749 netif_tx_stop_all_queues(bp->dev);
750
751
752
753
754
755 if (macb_halt_tx(bp))
756
757 netdev_err(bp->dev, "BUG: halt tx timed out\n");
758
759
760
761
762 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
763 u32 ctrl;
764
765 desc = macb_tx_desc(queue, tail);
766 ctrl = desc->ctrl;
767 tx_skb = macb_tx_skb(queue, tail);
768 skb = tx_skb->skb;
769
770 if (ctrl & MACB_BIT(TX_USED)) {
771
772 while (!skb) {
773 macb_tx_unmap(bp, tx_skb);
774 tail++;
775 tx_skb = macb_tx_skb(queue, tail);
776 skb = tx_skb->skb;
777 }
778
779
780
781
782 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
783 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
784 macb_tx_ring_wrap(bp, tail),
785 skb->data);
786 bp->dev->stats.tx_packets++;
787 queue->stats.tx_packets++;
788 bp->dev->stats.tx_bytes += skb->len;
789 queue->stats.tx_bytes += skb->len;
790 }
791 } else {
792
793
794
795
796 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
797 netdev_err(bp->dev,
798 "BUG: TX buffers exhausted mid-frame\n");
799
800 desc->ctrl = ctrl | MACB_BIT(TX_USED);
801 }
802
803 macb_tx_unmap(bp, tx_skb);
804 }
805
806
807 desc = macb_tx_desc(queue, 0);
808 macb_set_addr(bp, desc, 0);
809 desc->ctrl = MACB_BIT(TX_USED);
810
811
812 wmb();
813
814
815 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
816 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
817 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
818 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
819 #endif
820
821 queue->tx_head = 0;
822 queue->tx_tail = 0;
823
824
825 macb_writel(bp, TSR, macb_readl(bp, TSR));
826 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
827
828
829 netif_tx_start_all_queues(bp->dev);
830 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
831
832 spin_unlock_irqrestore(&bp->lock, flags);
833 }
834
835 static void macb_tx_interrupt(struct macb_queue *queue)
836 {
837 unsigned int tail;
838 unsigned int head;
839 u32 status;
840 struct macb *bp = queue->bp;
841 u16 queue_index = queue - bp->queues;
842
843 status = macb_readl(bp, TSR);
844 macb_writel(bp, TSR, status);
845
846 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
847 queue_writel(queue, ISR, MACB_BIT(TCOMP));
848
849 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
850 (unsigned long)status);
851
852 head = queue->tx_head;
853 for (tail = queue->tx_tail; tail != head; tail++) {
854 struct macb_tx_skb *tx_skb;
855 struct sk_buff *skb;
856 struct macb_dma_desc *desc;
857 u32 ctrl;
858
859 desc = macb_tx_desc(queue, tail);
860
861
862 rmb();
863
864 ctrl = desc->ctrl;
865
866
867
868
869 if (!(ctrl & MACB_BIT(TX_USED)))
870 break;
871
872
873 for (;; tail++) {
874 tx_skb = macb_tx_skb(queue, tail);
875 skb = tx_skb->skb;
876
877
878 if (skb) {
879 if (unlikely(skb_shinfo(skb)->tx_flags &
880 SKBTX_HW_TSTAMP) &&
881 gem_ptp_do_txstamp(queue, skb, desc) == 0) {
882
883
884
885 tx_skb->skb = NULL;
886 }
887 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
888 macb_tx_ring_wrap(bp, tail),
889 skb->data);
890 bp->dev->stats.tx_packets++;
891 queue->stats.tx_packets++;
892 bp->dev->stats.tx_bytes += skb->len;
893 queue->stats.tx_bytes += skb->len;
894 }
895
896
897 macb_tx_unmap(bp, tx_skb);
898
899
900
901
902
903 if (skb)
904 break;
905 }
906 }
907
908 queue->tx_tail = tail;
909 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
910 CIRC_CNT(queue->tx_head, queue->tx_tail,
911 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
912 netif_wake_subqueue(bp->dev, queue_index);
913 }
914
915 static void gem_rx_refill(struct macb_queue *queue)
916 {
917 unsigned int entry;
918 struct sk_buff *skb;
919 dma_addr_t paddr;
920 struct macb *bp = queue->bp;
921 struct macb_dma_desc *desc;
922
923 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
924 bp->rx_ring_size) > 0) {
925 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
926
927
928 rmb();
929
930 queue->rx_prepared_head++;
931 desc = macb_rx_desc(queue, entry);
932
933 if (!queue->rx_skbuff[entry]) {
934
935 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
936 if (unlikely(!skb)) {
937 netdev_err(bp->dev,
938 "Unable to allocate sk_buff\n");
939 break;
940 }
941
942
943 paddr = dma_map_single(&bp->pdev->dev, skb->data,
944 bp->rx_buffer_size,
945 DMA_FROM_DEVICE);
946 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
947 dev_kfree_skb(skb);
948 break;
949 }
950
951 queue->rx_skbuff[entry] = skb;
952
953 if (entry == bp->rx_ring_size - 1)
954 paddr |= MACB_BIT(RX_WRAP);
955 desc->ctrl = 0;
956
957
958
959 dma_wmb();
960 macb_set_addr(bp, desc, paddr);
961
962
963 skb_reserve(skb, NET_IP_ALIGN);
964 } else {
965 desc->ctrl = 0;
966 dma_wmb();
967 desc->addr &= ~MACB_BIT(RX_USED);
968 }
969 }
970
971
972 wmb();
973
974 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
975 queue, queue->rx_prepared_head, queue->rx_tail);
976 }
977
978
979 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
980 unsigned int end)
981 {
982 unsigned int frag;
983
984 for (frag = begin; frag != end; frag++) {
985 struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
986
987 desc->addr &= ~MACB_BIT(RX_USED);
988 }
989
990
991 wmb();
992
993
994
995
996
997 }
998
999 static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
1000 int budget)
1001 {
1002 struct macb *bp = queue->bp;
1003 unsigned int len;
1004 unsigned int entry;
1005 struct sk_buff *skb;
1006 struct macb_dma_desc *desc;
1007 int count = 0;
1008
1009 while (count < budget) {
1010 u32 ctrl;
1011 dma_addr_t addr;
1012 bool rxused;
1013
1014 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1015 desc = macb_rx_desc(queue, entry);
1016
1017
1018 rmb();
1019
1020 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
1021 addr = macb_get_addr(bp, desc);
1022
1023 if (!rxused)
1024 break;
1025
1026
1027 dma_rmb();
1028
1029 ctrl = desc->ctrl;
1030
1031 queue->rx_tail++;
1032 count++;
1033
1034 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
1035 netdev_err(bp->dev,
1036 "not whole frame pointed by descriptor\n");
1037 bp->dev->stats.rx_dropped++;
1038 queue->stats.rx_dropped++;
1039 break;
1040 }
1041 skb = queue->rx_skbuff[entry];
1042 if (unlikely(!skb)) {
1043 netdev_err(bp->dev,
1044 "inconsistent Rx descriptor chain\n");
1045 bp->dev->stats.rx_dropped++;
1046 queue->stats.rx_dropped++;
1047 break;
1048 }
1049
1050 queue->rx_skbuff[entry] = NULL;
1051 len = ctrl & bp->rx_frm_len_mask;
1052
1053 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1054
1055 skb_put(skb, len);
1056 dma_unmap_single(&bp->pdev->dev, addr,
1057 bp->rx_buffer_size, DMA_FROM_DEVICE);
1058
1059 skb->protocol = eth_type_trans(skb, bp->dev);
1060 skb_checksum_none_assert(skb);
1061 if (bp->dev->features & NETIF_F_RXCSUM &&
1062 !(bp->dev->flags & IFF_PROMISC) &&
1063 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1064 skb->ip_summed = CHECKSUM_UNNECESSARY;
1065
1066 bp->dev->stats.rx_packets++;
1067 queue->stats.rx_packets++;
1068 bp->dev->stats.rx_bytes += skb->len;
1069 queue->stats.rx_bytes += skb->len;
1070
1071 gem_ptp_do_rxstamp(bp, skb, desc);
1072
1073 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1074 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1075 skb->len, skb->csum);
1076 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
1077 skb_mac_header(skb), 16, true);
1078 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1079 skb->data, 32, true);
1080 #endif
1081
1082 napi_gro_receive(napi, skb);
1083 }
1084
1085 gem_rx_refill(queue);
1086
1087 return count;
1088 }
1089
1090 static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
1091 unsigned int first_frag, unsigned int last_frag)
1092 {
1093 unsigned int len;
1094 unsigned int frag;
1095 unsigned int offset;
1096 struct sk_buff *skb;
1097 struct macb_dma_desc *desc;
1098 struct macb *bp = queue->bp;
1099
1100 desc = macb_rx_desc(queue, last_frag);
1101 len = desc->ctrl & bp->rx_frm_len_mask;
1102
1103 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1104 macb_rx_ring_wrap(bp, first_frag),
1105 macb_rx_ring_wrap(bp, last_frag), len);
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1116 if (!skb) {
1117 bp->dev->stats.rx_dropped++;
1118 for (frag = first_frag; ; frag++) {
1119 desc = macb_rx_desc(queue, frag);
1120 desc->addr &= ~MACB_BIT(RX_USED);
1121 if (frag == last_frag)
1122 break;
1123 }
1124
1125
1126 wmb();
1127
1128 return 1;
1129 }
1130
1131 offset = 0;
1132 len += NET_IP_ALIGN;
1133 skb_checksum_none_assert(skb);
1134 skb_put(skb, len);
1135
1136 for (frag = first_frag; ; frag++) {
1137 unsigned int frag_len = bp->rx_buffer_size;
1138
1139 if (offset + frag_len > len) {
1140 if (unlikely(frag != last_frag)) {
1141 dev_kfree_skb_any(skb);
1142 return -1;
1143 }
1144 frag_len = len - offset;
1145 }
1146 skb_copy_to_linear_data_offset(skb, offset,
1147 macb_rx_buffer(queue, frag),
1148 frag_len);
1149 offset += bp->rx_buffer_size;
1150 desc = macb_rx_desc(queue, frag);
1151 desc->addr &= ~MACB_BIT(RX_USED);
1152
1153 if (frag == last_frag)
1154 break;
1155 }
1156
1157
1158 wmb();
1159
1160 __skb_pull(skb, NET_IP_ALIGN);
1161 skb->protocol = eth_type_trans(skb, bp->dev);
1162
1163 bp->dev->stats.rx_packets++;
1164 bp->dev->stats.rx_bytes += skb->len;
1165 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1166 skb->len, skb->csum);
1167 napi_gro_receive(napi, skb);
1168
1169 return 0;
1170 }
1171
1172 static inline void macb_init_rx_ring(struct macb_queue *queue)
1173 {
1174 struct macb *bp = queue->bp;
1175 dma_addr_t addr;
1176 struct macb_dma_desc *desc = NULL;
1177 int i;
1178
1179 addr = queue->rx_buffers_dma;
1180 for (i = 0; i < bp->rx_ring_size; i++) {
1181 desc = macb_rx_desc(queue, i);
1182 macb_set_addr(bp, desc, addr);
1183 desc->ctrl = 0;
1184 addr += bp->rx_buffer_size;
1185 }
1186 desc->addr |= MACB_BIT(RX_WRAP);
1187 queue->rx_tail = 0;
1188 }
1189
1190 static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
1191 int budget)
1192 {
1193 struct macb *bp = queue->bp;
1194 bool reset_rx_queue = false;
1195 int received = 0;
1196 unsigned int tail;
1197 int first_frag = -1;
1198
1199 for (tail = queue->rx_tail; budget > 0; tail++) {
1200 struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
1201 u32 ctrl;
1202
1203
1204 rmb();
1205
1206 if (!(desc->addr & MACB_BIT(RX_USED)))
1207 break;
1208
1209
1210 dma_rmb();
1211
1212 ctrl = desc->ctrl;
1213
1214 if (ctrl & MACB_BIT(RX_SOF)) {
1215 if (first_frag != -1)
1216 discard_partial_frame(queue, first_frag, tail);
1217 first_frag = tail;
1218 }
1219
1220 if (ctrl & MACB_BIT(RX_EOF)) {
1221 int dropped;
1222
1223 if (unlikely(first_frag == -1)) {
1224 reset_rx_queue = true;
1225 continue;
1226 }
1227
1228 dropped = macb_rx_frame(queue, napi, first_frag, tail);
1229 first_frag = -1;
1230 if (unlikely(dropped < 0)) {
1231 reset_rx_queue = true;
1232 continue;
1233 }
1234 if (!dropped) {
1235 received++;
1236 budget--;
1237 }
1238 }
1239 }
1240
1241 if (unlikely(reset_rx_queue)) {
1242 unsigned long flags;
1243 u32 ctrl;
1244
1245 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1246
1247 spin_lock_irqsave(&bp->lock, flags);
1248
1249 ctrl = macb_readl(bp, NCR);
1250 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1251
1252 macb_init_rx_ring(queue);
1253 queue_writel(queue, RBQP, queue->rx_ring_dma);
1254
1255 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1256
1257 spin_unlock_irqrestore(&bp->lock, flags);
1258 return received;
1259 }
1260
1261 if (first_frag != -1)
1262 queue->rx_tail = first_frag;
1263 else
1264 queue->rx_tail = tail;
1265
1266 return received;
1267 }
1268
1269 static int macb_poll(struct napi_struct *napi, int budget)
1270 {
1271 struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
1272 struct macb *bp = queue->bp;
1273 int work_done;
1274 u32 status;
1275
1276 status = macb_readl(bp, RSR);
1277 macb_writel(bp, RSR, status);
1278
1279 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1280 (unsigned long)status, budget);
1281
1282 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
1283 if (work_done < budget) {
1284 napi_complete_done(napi, work_done);
1285
1286
1287 status = macb_readl(bp, RSR);
1288 if (status) {
1289 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1290 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1291 napi_reschedule(napi);
1292 } else {
1293 queue_writel(queue, IER, bp->rx_intr_mask);
1294 }
1295 }
1296
1297
1298
1299 return work_done;
1300 }
1301
1302 static void macb_hresp_error_task(unsigned long data)
1303 {
1304 struct macb *bp = (struct macb *)data;
1305 struct net_device *dev = bp->dev;
1306 struct macb_queue *queue = bp->queues;
1307 unsigned int q;
1308 u32 ctrl;
1309
1310 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1311 queue_writel(queue, IDR, bp->rx_intr_mask |
1312 MACB_TX_INT_FLAGS |
1313 MACB_BIT(HRESP));
1314 }
1315 ctrl = macb_readl(bp, NCR);
1316 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1317 macb_writel(bp, NCR, ctrl);
1318
1319 netif_tx_stop_all_queues(dev);
1320 netif_carrier_off(dev);
1321
1322 bp->macbgem_ops.mog_init_rings(bp);
1323
1324
1325 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1326 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
1327 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1328 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1329 queue_writel(queue, RBQPH,
1330 upper_32_bits(queue->rx_ring_dma));
1331 #endif
1332 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1333 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1334 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1335 queue_writel(queue, TBQPH,
1336 upper_32_bits(queue->tx_ring_dma));
1337 #endif
1338
1339
1340 queue_writel(queue, IER,
1341 bp->rx_intr_mask |
1342 MACB_TX_INT_FLAGS |
1343 MACB_BIT(HRESP));
1344 }
1345
1346 ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
1347 macb_writel(bp, NCR, ctrl);
1348
1349 netif_carrier_on(dev);
1350 netif_tx_start_all_queues(dev);
1351 }
1352
1353 static void macb_tx_restart(struct macb_queue *queue)
1354 {
1355 unsigned int head = queue->tx_head;
1356 unsigned int tail = queue->tx_tail;
1357 struct macb *bp = queue->bp;
1358
1359 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1360 queue_writel(queue, ISR, MACB_BIT(TXUBR));
1361
1362 if (head == tail)
1363 return;
1364
1365 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1366 }
1367
1368 static irqreturn_t macb_interrupt(int irq, void *dev_id)
1369 {
1370 struct macb_queue *queue = dev_id;
1371 struct macb *bp = queue->bp;
1372 struct net_device *dev = bp->dev;
1373 u32 status, ctrl;
1374
1375 status = queue_readl(queue, ISR);
1376
1377 if (unlikely(!status))
1378 return IRQ_NONE;
1379
1380 spin_lock(&bp->lock);
1381
1382 while (status) {
1383
1384 if (unlikely(!netif_running(dev))) {
1385 queue_writel(queue, IDR, -1);
1386 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1387 queue_writel(queue, ISR, -1);
1388 break;
1389 }
1390
1391 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1392 (unsigned int)(queue - bp->queues),
1393 (unsigned long)status);
1394
1395 if (status & bp->rx_intr_mask) {
1396
1397
1398
1399
1400
1401
1402 queue_writel(queue, IDR, bp->rx_intr_mask);
1403 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1404 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1405
1406 if (napi_schedule_prep(&queue->napi)) {
1407 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1408 __napi_schedule(&queue->napi);
1409 }
1410 }
1411
1412 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1413 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1414 schedule_work(&queue->tx_error_task);
1415
1416 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1417 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1418
1419 break;
1420 }
1421
1422 if (status & MACB_BIT(TCOMP))
1423 macb_tx_interrupt(queue);
1424
1425 if (status & MACB_BIT(TXUBR))
1426 macb_tx_restart(queue);
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439 if (status & MACB_BIT(RXUBR)) {
1440 ctrl = macb_readl(bp, NCR);
1441 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1442 wmb();
1443 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1444
1445 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1446 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1447 }
1448
1449 if (status & MACB_BIT(ISR_ROVR)) {
1450
1451 if (macb_is_gem(bp))
1452 bp->hw_stats.gem.rx_overruns++;
1453 else
1454 bp->hw_stats.macb.rx_overruns++;
1455
1456 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1457 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1458 }
1459
1460 if (status & MACB_BIT(HRESP)) {
1461 tasklet_schedule(&bp->hresp_err_tasklet);
1462 netdev_err(dev, "DMA bus error: HRESP not OK\n");
1463
1464 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1465 queue_writel(queue, ISR, MACB_BIT(HRESP));
1466 }
1467 status = queue_readl(queue, ISR);
1468 }
1469
1470 spin_unlock(&bp->lock);
1471
1472 return IRQ_HANDLED;
1473 }
1474
1475 #ifdef CONFIG_NET_POLL_CONTROLLER
1476
1477
1478
1479 static void macb_poll_controller(struct net_device *dev)
1480 {
1481 struct macb *bp = netdev_priv(dev);
1482 struct macb_queue *queue;
1483 unsigned long flags;
1484 unsigned int q;
1485
1486 local_irq_save(flags);
1487 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1488 macb_interrupt(dev->irq, queue);
1489 local_irq_restore(flags);
1490 }
1491 #endif
1492
1493 static unsigned int macb_tx_map(struct macb *bp,
1494 struct macb_queue *queue,
1495 struct sk_buff *skb,
1496 unsigned int hdrlen)
1497 {
1498 dma_addr_t mapping;
1499 unsigned int len, entry, i, tx_head = queue->tx_head;
1500 struct macb_tx_skb *tx_skb = NULL;
1501 struct macb_dma_desc *desc;
1502 unsigned int offset, size, count = 0;
1503 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1504 unsigned int eof = 1, mss_mfs = 0;
1505 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1506
1507
1508 if (skb_shinfo(skb)->gso_size != 0) {
1509 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1510
1511 lso_ctrl = MACB_LSO_UFO_ENABLE;
1512 else
1513
1514 lso_ctrl = MACB_LSO_TSO_ENABLE;
1515 }
1516
1517
1518 len = skb_headlen(skb);
1519
1520
1521 size = hdrlen;
1522
1523 offset = 0;
1524 while (len) {
1525 entry = macb_tx_ring_wrap(bp, tx_head);
1526 tx_skb = &queue->tx_skb[entry];
1527
1528 mapping = dma_map_single(&bp->pdev->dev,
1529 skb->data + offset,
1530 size, DMA_TO_DEVICE);
1531 if (dma_mapping_error(&bp->pdev->dev, mapping))
1532 goto dma_error;
1533
1534
1535 tx_skb->skb = NULL;
1536 tx_skb->mapping = mapping;
1537 tx_skb->size = size;
1538 tx_skb->mapped_as_page = false;
1539
1540 len -= size;
1541 offset += size;
1542 count++;
1543 tx_head++;
1544
1545 size = min(len, bp->max_tx_length);
1546 }
1547
1548
1549 for (f = 0; f < nr_frags; f++) {
1550 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1551
1552 len = skb_frag_size(frag);
1553 offset = 0;
1554 while (len) {
1555 size = min(len, bp->max_tx_length);
1556 entry = macb_tx_ring_wrap(bp, tx_head);
1557 tx_skb = &queue->tx_skb[entry];
1558
1559 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1560 offset, size, DMA_TO_DEVICE);
1561 if (dma_mapping_error(&bp->pdev->dev, mapping))
1562 goto dma_error;
1563
1564
1565 tx_skb->skb = NULL;
1566 tx_skb->mapping = mapping;
1567 tx_skb->size = size;
1568 tx_skb->mapped_as_page = true;
1569
1570 len -= size;
1571 offset += size;
1572 count++;
1573 tx_head++;
1574 }
1575 }
1576
1577
1578 if (unlikely(!tx_skb)) {
1579 netdev_err(bp->dev, "BUG! empty skb!\n");
1580 return 0;
1581 }
1582
1583
1584 tx_skb->skb = skb;
1585
1586
1587
1588
1589
1590
1591
1592
1593 i = tx_head;
1594 entry = macb_tx_ring_wrap(bp, i);
1595 ctrl = MACB_BIT(TX_USED);
1596 desc = macb_tx_desc(queue, entry);
1597 desc->ctrl = ctrl;
1598
1599 if (lso_ctrl) {
1600 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
1601
1602 mss_mfs = skb_shinfo(skb)->gso_size +
1603 skb_transport_offset(skb) +
1604 ETH_FCS_LEN;
1605 else {
1606 mss_mfs = skb_shinfo(skb)->gso_size;
1607
1608
1609
1610 seq_ctrl = 0;
1611 }
1612 }
1613
1614 do {
1615 i--;
1616 entry = macb_tx_ring_wrap(bp, i);
1617 tx_skb = &queue->tx_skb[entry];
1618 desc = macb_tx_desc(queue, entry);
1619
1620 ctrl = (u32)tx_skb->size;
1621 if (eof) {
1622 ctrl |= MACB_BIT(TX_LAST);
1623 eof = 0;
1624 }
1625 if (unlikely(entry == (bp->tx_ring_size - 1)))
1626 ctrl |= MACB_BIT(TX_WRAP);
1627
1628
1629 if (i == queue->tx_head) {
1630 ctrl |= MACB_BF(TX_LSO, lso_ctrl);
1631 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
1632 if ((bp->dev->features & NETIF_F_HW_CSUM) &&
1633 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
1634 ctrl |= MACB_BIT(TX_NOCRC);
1635 } else
1636
1637
1638
1639 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1640
1641
1642 macb_set_addr(bp, desc, tx_skb->mapping);
1643
1644
1645
1646 wmb();
1647 desc->ctrl = ctrl;
1648 } while (i != queue->tx_head);
1649
1650 queue->tx_head = tx_head;
1651
1652 return count;
1653
1654 dma_error:
1655 netdev_err(bp->dev, "TX DMA map failed\n");
1656
1657 for (i = queue->tx_head; i != tx_head; i++) {
1658 tx_skb = macb_tx_skb(queue, i);
1659
1660 macb_tx_unmap(bp, tx_skb);
1661 }
1662
1663 return 0;
1664 }
1665
1666 static netdev_features_t macb_features_check(struct sk_buff *skb,
1667 struct net_device *dev,
1668 netdev_features_t features)
1669 {
1670 unsigned int nr_frags, f;
1671 unsigned int hdrlen;
1672
1673
1674
1675
1676 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
1677 return features;
1678
1679
1680 hdrlen = skb_transport_offset(skb);
1681
1682
1683
1684
1685
1686 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
1687 return features & ~MACB_NETIF_LSO;
1688
1689 nr_frags = skb_shinfo(skb)->nr_frags;
1690
1691 nr_frags--;
1692 for (f = 0; f < nr_frags; f++) {
1693 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1694
1695 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
1696 return features & ~MACB_NETIF_LSO;
1697 }
1698 return features;
1699 }
1700
1701 static inline int macb_clear_csum(struct sk_buff *skb)
1702 {
1703
1704 if (skb->ip_summed != CHECKSUM_PARTIAL)
1705 return 0;
1706
1707
1708 if (unlikely(skb_cow_head(skb, 0)))
1709 return -1;
1710
1711
1712
1713
1714
1715 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1716 return 0;
1717 }
1718
1719 static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
1720 {
1721 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
1722 int padlen = ETH_ZLEN - (*skb)->len;
1723 int headroom = skb_headroom(*skb);
1724 int tailroom = skb_tailroom(*skb);
1725 struct sk_buff *nskb;
1726 u32 fcs;
1727
1728 if (!(ndev->features & NETIF_F_HW_CSUM) ||
1729 !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
1730 skb_shinfo(*skb)->gso_size)
1731 return 0;
1732
1733 if (padlen <= 0) {
1734
1735 if (tailroom >= ETH_FCS_LEN)
1736 goto add_fcs;
1737
1738 else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
1739 padlen = 0;
1740
1741 else
1742 padlen = ETH_FCS_LEN;
1743 } else {
1744
1745 padlen += ETH_FCS_LEN;
1746 }
1747
1748 if (!cloned && headroom + tailroom >= padlen) {
1749 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
1750 skb_set_tail_pointer(*skb, (*skb)->len);
1751 } else {
1752 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
1753 if (!nskb)
1754 return -ENOMEM;
1755
1756 dev_consume_skb_any(*skb);
1757 *skb = nskb;
1758 }
1759
1760 if (padlen > ETH_FCS_LEN)
1761 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
1762
1763 add_fcs:
1764
1765 fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
1766 fcs = ~fcs;
1767
1768 skb_put_u8(*skb, fcs & 0xff);
1769 skb_put_u8(*skb, (fcs >> 8) & 0xff);
1770 skb_put_u8(*skb, (fcs >> 16) & 0xff);
1771 skb_put_u8(*skb, (fcs >> 24) & 0xff);
1772
1773 return 0;
1774 }
1775
1776 static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1777 {
1778 u16 queue_index = skb_get_queue_mapping(skb);
1779 struct macb *bp = netdev_priv(dev);
1780 struct macb_queue *queue = &bp->queues[queue_index];
1781 unsigned long flags;
1782 unsigned int desc_cnt, nr_frags, frag_size, f;
1783 unsigned int hdrlen;
1784 bool is_lso, is_udp = 0;
1785 netdev_tx_t ret = NETDEV_TX_OK;
1786
1787 if (macb_clear_csum(skb)) {
1788 dev_kfree_skb_any(skb);
1789 return ret;
1790 }
1791
1792 if (macb_pad_and_fcs(&skb, dev)) {
1793 dev_kfree_skb_any(skb);
1794 return ret;
1795 }
1796
1797 is_lso = (skb_shinfo(skb)->gso_size != 0);
1798
1799 if (is_lso) {
1800 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
1801
1802
1803 if (is_udp)
1804
1805 hdrlen = skb_transport_offset(skb);
1806 else
1807 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
1808 if (skb_headlen(skb) < hdrlen) {
1809 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
1810
1811 return NETDEV_TX_BUSY;
1812 }
1813 } else
1814 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
1815
1816 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1817 netdev_vdbg(bp->dev,
1818 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1819 queue_index, skb->len, skb->head, skb->data,
1820 skb_tail_pointer(skb), skb_end_pointer(skb));
1821 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1822 skb->data, 16, true);
1823 #endif
1824
1825
1826
1827
1828
1829 if (is_lso && (skb_headlen(skb) > hdrlen))
1830
1831 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
1832 else
1833 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1834 nr_frags = skb_shinfo(skb)->nr_frags;
1835 for (f = 0; f < nr_frags; f++) {
1836 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1837 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1838 }
1839
1840 spin_lock_irqsave(&bp->lock, flags);
1841
1842
1843 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
1844 bp->tx_ring_size) < desc_cnt) {
1845 netif_stop_subqueue(dev, queue_index);
1846 spin_unlock_irqrestore(&bp->lock, flags);
1847 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1848 queue->tx_head, queue->tx_tail);
1849 return NETDEV_TX_BUSY;
1850 }
1851
1852
1853 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
1854 dev_kfree_skb_any(skb);
1855 goto unlock;
1856 }
1857
1858
1859 wmb();
1860 skb_tx_timestamp(skb);
1861
1862 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1863
1864 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
1865 netif_stop_subqueue(dev, queue_index);
1866
1867 unlock:
1868 spin_unlock_irqrestore(&bp->lock, flags);
1869
1870 return ret;
1871 }
1872
1873 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1874 {
1875 if (!macb_is_gem(bp)) {
1876 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1877 } else {
1878 bp->rx_buffer_size = size;
1879
1880 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1881 netdev_dbg(bp->dev,
1882 "RX buffer must be multiple of %d bytes, expanding\n",
1883 RX_BUFFER_MULTIPLE);
1884 bp->rx_buffer_size =
1885 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1886 }
1887 }
1888
1889 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
1890 bp->dev->mtu, bp->rx_buffer_size);
1891 }
1892
1893 static void gem_free_rx_buffers(struct macb *bp)
1894 {
1895 struct sk_buff *skb;
1896 struct macb_dma_desc *desc;
1897 struct macb_queue *queue;
1898 dma_addr_t addr;
1899 unsigned int q;
1900 int i;
1901
1902 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1903 if (!queue->rx_skbuff)
1904 continue;
1905
1906 for (i = 0; i < bp->rx_ring_size; i++) {
1907 skb = queue->rx_skbuff[i];
1908
1909 if (!skb)
1910 continue;
1911
1912 desc = macb_rx_desc(queue, i);
1913 addr = macb_get_addr(bp, desc);
1914
1915 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1916 DMA_FROM_DEVICE);
1917 dev_kfree_skb_any(skb);
1918 skb = NULL;
1919 }
1920
1921 kfree(queue->rx_skbuff);
1922 queue->rx_skbuff = NULL;
1923 }
1924 }
1925
1926 static void macb_free_rx_buffers(struct macb *bp)
1927 {
1928 struct macb_queue *queue = &bp->queues[0];
1929
1930 if (queue->rx_buffers) {
1931 dma_free_coherent(&bp->pdev->dev,
1932 bp->rx_ring_size * bp->rx_buffer_size,
1933 queue->rx_buffers, queue->rx_buffers_dma);
1934 queue->rx_buffers = NULL;
1935 }
1936 }
1937
1938 static void macb_free_consistent(struct macb *bp)
1939 {
1940 struct macb_queue *queue;
1941 unsigned int q;
1942 int size;
1943
1944 bp->macbgem_ops.mog_free_rx_buffers(bp);
1945
1946 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1947 kfree(queue->tx_skb);
1948 queue->tx_skb = NULL;
1949 if (queue->tx_ring) {
1950 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
1951 dma_free_coherent(&bp->pdev->dev, size,
1952 queue->tx_ring, queue->tx_ring_dma);
1953 queue->tx_ring = NULL;
1954 }
1955 if (queue->rx_ring) {
1956 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
1957 dma_free_coherent(&bp->pdev->dev, size,
1958 queue->rx_ring, queue->rx_ring_dma);
1959 queue->rx_ring = NULL;
1960 }
1961 }
1962 }
1963
1964 static int gem_alloc_rx_buffers(struct macb *bp)
1965 {
1966 struct macb_queue *queue;
1967 unsigned int q;
1968 int size;
1969
1970 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1971 size = bp->rx_ring_size * sizeof(struct sk_buff *);
1972 queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
1973 if (!queue->rx_skbuff)
1974 return -ENOMEM;
1975 else
1976 netdev_dbg(bp->dev,
1977 "Allocated %d RX struct sk_buff entries at %p\n",
1978 bp->rx_ring_size, queue->rx_skbuff);
1979 }
1980 return 0;
1981 }
1982
1983 static int macb_alloc_rx_buffers(struct macb *bp)
1984 {
1985 struct macb_queue *queue = &bp->queues[0];
1986 int size;
1987
1988 size = bp->rx_ring_size * bp->rx_buffer_size;
1989 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1990 &queue->rx_buffers_dma, GFP_KERNEL);
1991 if (!queue->rx_buffers)
1992 return -ENOMEM;
1993
1994 netdev_dbg(bp->dev,
1995 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1996 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
1997 return 0;
1998 }
1999
2000 static int macb_alloc_consistent(struct macb *bp)
2001 {
2002 struct macb_queue *queue;
2003 unsigned int q;
2004 int size;
2005
2006 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2007 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2008 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2009 &queue->tx_ring_dma,
2010 GFP_KERNEL);
2011 if (!queue->tx_ring)
2012 goto out_err;
2013 netdev_dbg(bp->dev,
2014 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
2015 q, size, (unsigned long)queue->tx_ring_dma,
2016 queue->tx_ring);
2017
2018 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
2019 queue->tx_skb = kmalloc(size, GFP_KERNEL);
2020 if (!queue->tx_skb)
2021 goto out_err;
2022
2023 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2024 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2025 &queue->rx_ring_dma, GFP_KERNEL);
2026 if (!queue->rx_ring)
2027 goto out_err;
2028 netdev_dbg(bp->dev,
2029 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
2030 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
2031 }
2032 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
2033 goto out_err;
2034
2035 return 0;
2036
2037 out_err:
2038 macb_free_consistent(bp);
2039 return -ENOMEM;
2040 }
2041
2042 static void gem_init_rings(struct macb *bp)
2043 {
2044 struct macb_queue *queue;
2045 struct macb_dma_desc *desc = NULL;
2046 unsigned int q;
2047 int i;
2048
2049 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2050 for (i = 0; i < bp->tx_ring_size; i++) {
2051 desc = macb_tx_desc(queue, i);
2052 macb_set_addr(bp, desc, 0);
2053 desc->ctrl = MACB_BIT(TX_USED);
2054 }
2055 desc->ctrl |= MACB_BIT(TX_WRAP);
2056 queue->tx_head = 0;
2057 queue->tx_tail = 0;
2058
2059 queue->rx_tail = 0;
2060 queue->rx_prepared_head = 0;
2061
2062 gem_rx_refill(queue);
2063 }
2064
2065 }
2066
2067 static void macb_init_rings(struct macb *bp)
2068 {
2069 int i;
2070 struct macb_dma_desc *desc = NULL;
2071
2072 macb_init_rx_ring(&bp->queues[0]);
2073
2074 for (i = 0; i < bp->tx_ring_size; i++) {
2075 desc = macb_tx_desc(&bp->queues[0], i);
2076 macb_set_addr(bp, desc, 0);
2077 desc->ctrl = MACB_BIT(TX_USED);
2078 }
2079 bp->queues[0].tx_head = 0;
2080 bp->queues[0].tx_tail = 0;
2081 desc->ctrl |= MACB_BIT(TX_WRAP);
2082 }
2083
2084 static void macb_reset_hw(struct macb *bp)
2085 {
2086 struct macb_queue *queue;
2087 unsigned int q;
2088 u32 ctrl = macb_readl(bp, NCR);
2089
2090
2091
2092
2093 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
2094
2095
2096 ctrl |= MACB_BIT(CLRSTAT);
2097
2098 macb_writel(bp, NCR, ctrl);
2099
2100
2101 macb_writel(bp, TSR, -1);
2102 macb_writel(bp, RSR, -1);
2103
2104
2105 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2106 queue_writel(queue, IDR, -1);
2107 queue_readl(queue, ISR);
2108 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2109 queue_writel(queue, ISR, -1);
2110 }
2111 }
2112
2113 static u32 gem_mdc_clk_div(struct macb *bp)
2114 {
2115 u32 config;
2116 unsigned long pclk_hz = clk_get_rate(bp->pclk);
2117
2118 if (pclk_hz <= 20000000)
2119 config = GEM_BF(CLK, GEM_CLK_DIV8);
2120 else if (pclk_hz <= 40000000)
2121 config = GEM_BF(CLK, GEM_CLK_DIV16);
2122 else if (pclk_hz <= 80000000)
2123 config = GEM_BF(CLK, GEM_CLK_DIV32);
2124 else if (pclk_hz <= 120000000)
2125 config = GEM_BF(CLK, GEM_CLK_DIV48);
2126 else if (pclk_hz <= 160000000)
2127 config = GEM_BF(CLK, GEM_CLK_DIV64);
2128 else
2129 config = GEM_BF(CLK, GEM_CLK_DIV96);
2130
2131 return config;
2132 }
2133
2134 static u32 macb_mdc_clk_div(struct macb *bp)
2135 {
2136 u32 config;
2137 unsigned long pclk_hz;
2138
2139 if (macb_is_gem(bp))
2140 return gem_mdc_clk_div(bp);
2141
2142 pclk_hz = clk_get_rate(bp->pclk);
2143 if (pclk_hz <= 20000000)
2144 config = MACB_BF(CLK, MACB_CLK_DIV8);
2145 else if (pclk_hz <= 40000000)
2146 config = MACB_BF(CLK, MACB_CLK_DIV16);
2147 else if (pclk_hz <= 80000000)
2148 config = MACB_BF(CLK, MACB_CLK_DIV32);
2149 else
2150 config = MACB_BF(CLK, MACB_CLK_DIV64);
2151
2152 return config;
2153 }
2154
2155
2156
2157
2158
2159 static u32 macb_dbw(struct macb *bp)
2160 {
2161 if (!macb_is_gem(bp))
2162 return 0;
2163
2164 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2165 case 4:
2166 return GEM_BF(DBW, GEM_DBW128);
2167 case 2:
2168 return GEM_BF(DBW, GEM_DBW64);
2169 case 1:
2170 default:
2171 return GEM_BF(DBW, GEM_DBW32);
2172 }
2173 }
2174
2175
2176
2177
2178
2179
2180
2181
2182 static void macb_configure_dma(struct macb *bp)
2183 {
2184 struct macb_queue *queue;
2185 u32 buffer_size;
2186 unsigned int q;
2187 u32 dmacfg;
2188
2189 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
2190 if (macb_is_gem(bp)) {
2191 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2192 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2193 if (q)
2194 queue_writel(queue, RBQS, buffer_size);
2195 else
2196 dmacfg |= GEM_BF(RXBS, buffer_size);
2197 }
2198 if (bp->dma_burst_length)
2199 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2200 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
2201 dmacfg &= ~GEM_BIT(ENDIA_PKT);
2202
2203 if (bp->native_io)
2204 dmacfg &= ~GEM_BIT(ENDIA_DESC);
2205 else
2206 dmacfg |= GEM_BIT(ENDIA_DESC);
2207
2208 if (bp->dev->features & NETIF_F_HW_CSUM)
2209 dmacfg |= GEM_BIT(TXCOEN);
2210 else
2211 dmacfg &= ~GEM_BIT(TXCOEN);
2212
2213 dmacfg &= ~GEM_BIT(ADDR64);
2214 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2215 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2216 dmacfg |= GEM_BIT(ADDR64);
2217 #endif
2218 #ifdef CONFIG_MACB_USE_HWSTAMP
2219 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2220 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2221 #endif
2222 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2223 dmacfg);
2224 gem_writel(bp, DMACFG, dmacfg);
2225 }
2226 }
2227
2228 static void macb_init_hw(struct macb *bp)
2229 {
2230 struct macb_queue *queue;
2231 unsigned int q;
2232
2233 u32 config;
2234
2235 macb_reset_hw(bp);
2236 macb_set_hwaddr(bp);
2237
2238 config = macb_mdc_clk_div(bp);
2239 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2240 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2241 config |= MACB_BF(RBOF, NET_IP_ALIGN);
2242 config |= MACB_BIT(PAE);
2243 config |= MACB_BIT(DRFCS);
2244 if (bp->caps & MACB_CAPS_JUMBO)
2245 config |= MACB_BIT(JFRAME);
2246 else
2247 config |= MACB_BIT(BIG);
2248 if (bp->dev->flags & IFF_PROMISC)
2249 config |= MACB_BIT(CAF);
2250 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2251 config |= GEM_BIT(RXCOEN);
2252 if (!(bp->dev->flags & IFF_BROADCAST))
2253 config |= MACB_BIT(NBC);
2254 config |= macb_dbw(bp);
2255 macb_writel(bp, NCFGR, config);
2256 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2257 gem_writel(bp, JML, bp->jumbo_max_len);
2258 bp->speed = SPEED_10;
2259 bp->duplex = DUPLEX_HALF;
2260 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
2261 if (bp->caps & MACB_CAPS_JUMBO)
2262 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2263
2264 macb_configure_dma(bp);
2265
2266
2267 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2268 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
2269 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2270 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2271 queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
2272 #endif
2273 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
2274 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2275 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2276 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
2277 #endif
2278
2279
2280 queue_writel(queue, IER,
2281 bp->rx_intr_mask |
2282 MACB_TX_INT_FLAGS |
2283 MACB_BIT(HRESP));
2284 }
2285
2286
2287 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
2288 }
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323 static inline int hash_bit_value(int bitnr, __u8 *addr)
2324 {
2325 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2326 return 1;
2327 return 0;
2328 }
2329
2330
2331 static int hash_get_index(__u8 *addr)
2332 {
2333 int i, j, bitval;
2334 int hash_index = 0;
2335
2336 for (j = 0; j < 6; j++) {
2337 for (i = 0, bitval = 0; i < 8; i++)
2338 bitval ^= hash_bit_value(i * 6 + j, addr);
2339
2340 hash_index |= (bitval << j);
2341 }
2342
2343 return hash_index;
2344 }
2345
2346
2347 static void macb_sethashtable(struct net_device *dev)
2348 {
2349 struct netdev_hw_addr *ha;
2350 unsigned long mc_filter[2];
2351 unsigned int bitnr;
2352 struct macb *bp = netdev_priv(dev);
2353
2354 mc_filter[0] = 0;
2355 mc_filter[1] = 0;
2356
2357 netdev_for_each_mc_addr(ha, dev) {
2358 bitnr = hash_get_index(ha->addr);
2359 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2360 }
2361
2362 macb_or_gem_writel(bp, HRB, mc_filter[0]);
2363 macb_or_gem_writel(bp, HRT, mc_filter[1]);
2364 }
2365
2366
2367 static void macb_set_rx_mode(struct net_device *dev)
2368 {
2369 unsigned long cfg;
2370 struct macb *bp = netdev_priv(dev);
2371
2372 cfg = macb_readl(bp, NCFGR);
2373
2374 if (dev->flags & IFF_PROMISC) {
2375
2376 cfg |= MACB_BIT(CAF);
2377
2378
2379 if (macb_is_gem(bp))
2380 cfg &= ~GEM_BIT(RXCOEN);
2381 } else {
2382
2383 cfg &= ~MACB_BIT(CAF);
2384
2385
2386 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2387 cfg |= GEM_BIT(RXCOEN);
2388 }
2389
2390 if (dev->flags & IFF_ALLMULTI) {
2391
2392 macb_or_gem_writel(bp, HRB, -1);
2393 macb_or_gem_writel(bp, HRT, -1);
2394 cfg |= MACB_BIT(NCFGR_MTI);
2395 } else if (!netdev_mc_empty(dev)) {
2396
2397 macb_sethashtable(dev);
2398 cfg |= MACB_BIT(NCFGR_MTI);
2399 } else if (dev->flags & (~IFF_ALLMULTI)) {
2400
2401 macb_or_gem_writel(bp, HRB, 0);
2402 macb_or_gem_writel(bp, HRT, 0);
2403 cfg &= ~MACB_BIT(NCFGR_MTI);
2404 }
2405
2406 macb_writel(bp, NCFGR, cfg);
2407 }
2408
2409 static int macb_open(struct net_device *dev)
2410 {
2411 struct macb *bp = netdev_priv(dev);
2412 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2413 struct macb_queue *queue;
2414 unsigned int q;
2415 int err;
2416
2417 netdev_dbg(bp->dev, "open\n");
2418
2419 err = pm_runtime_get_sync(&bp->pdev->dev);
2420 if (err < 0)
2421 goto pm_exit;
2422
2423
2424 netif_carrier_off(dev);
2425
2426
2427 if (!dev->phydev) {
2428 err = -EAGAIN;
2429 goto pm_exit;
2430 }
2431
2432
2433 macb_init_rx_buffer_size(bp, bufsz);
2434
2435 err = macb_alloc_consistent(bp);
2436 if (err) {
2437 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2438 err);
2439 goto pm_exit;
2440 }
2441
2442 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2443 napi_enable(&queue->napi);
2444
2445 bp->macbgem_ops.mog_init_rings(bp);
2446 macb_init_hw(bp);
2447
2448
2449 phy_start(dev->phydev);
2450
2451 netif_tx_start_all_queues(dev);
2452
2453 if (bp->ptp_info)
2454 bp->ptp_info->ptp_init(dev);
2455
2456 pm_exit:
2457 if (err) {
2458 pm_runtime_put_sync(&bp->pdev->dev);
2459 return err;
2460 }
2461 return 0;
2462 }
2463
2464 static int macb_close(struct net_device *dev)
2465 {
2466 struct macb *bp = netdev_priv(dev);
2467 struct macb_queue *queue;
2468 unsigned long flags;
2469 unsigned int q;
2470
2471 netif_tx_stop_all_queues(dev);
2472
2473 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2474 napi_disable(&queue->napi);
2475
2476 if (dev->phydev)
2477 phy_stop(dev->phydev);
2478
2479 spin_lock_irqsave(&bp->lock, flags);
2480 macb_reset_hw(bp);
2481 netif_carrier_off(dev);
2482 spin_unlock_irqrestore(&bp->lock, flags);
2483
2484 macb_free_consistent(bp);
2485
2486 if (bp->ptp_info)
2487 bp->ptp_info->ptp_remove(dev);
2488
2489 pm_runtime_put(&bp->pdev->dev);
2490
2491 return 0;
2492 }
2493
2494 static int macb_change_mtu(struct net_device *dev, int new_mtu)
2495 {
2496 if (netif_running(dev))
2497 return -EBUSY;
2498
2499 dev->mtu = new_mtu;
2500
2501 return 0;
2502 }
2503
2504 static void gem_update_stats(struct macb *bp)
2505 {
2506 struct macb_queue *queue;
2507 unsigned int i, q, idx;
2508 unsigned long *stat;
2509
2510 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
2511
2512 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2513 u32 offset = gem_statistics[i].offset;
2514 u64 val = bp->macb_reg_readl(bp, offset);
2515
2516 bp->ethtool_stats[i] += val;
2517 *p += val;
2518
2519 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2520
2521 val = bp->macb_reg_readl(bp, offset + 4);
2522 bp->ethtool_stats[i] += ((u64)val) << 32;
2523 *(++p) += val;
2524 }
2525 }
2526
2527 idx = GEM_STATS_LEN;
2528 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2529 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
2530 bp->ethtool_stats[idx++] = *stat;
2531 }
2532
2533 static struct net_device_stats *gem_get_stats(struct macb *bp)
2534 {
2535 struct gem_stats *hwstat = &bp->hw_stats.gem;
2536 struct net_device_stats *nstat = &bp->dev->stats;
2537
2538 gem_update_stats(bp);
2539
2540 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2541 hwstat->rx_alignment_errors +
2542 hwstat->rx_resource_errors +
2543 hwstat->rx_overruns +
2544 hwstat->rx_oversize_frames +
2545 hwstat->rx_jabbers +
2546 hwstat->rx_undersized_frames +
2547 hwstat->rx_length_field_frame_errors);
2548 nstat->tx_errors = (hwstat->tx_late_collisions +
2549 hwstat->tx_excessive_collisions +
2550 hwstat->tx_underrun +
2551 hwstat->tx_carrier_sense_errors);
2552 nstat->multicast = hwstat->rx_multicast_frames;
2553 nstat->collisions = (hwstat->tx_single_collision_frames +
2554 hwstat->tx_multiple_collision_frames +
2555 hwstat->tx_excessive_collisions);
2556 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2557 hwstat->rx_jabbers +
2558 hwstat->rx_undersized_frames +
2559 hwstat->rx_length_field_frame_errors);
2560 nstat->rx_over_errors = hwstat->rx_resource_errors;
2561 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2562 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2563 nstat->rx_fifo_errors = hwstat->rx_overruns;
2564 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2565 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2566 nstat->tx_fifo_errors = hwstat->tx_underrun;
2567
2568 return nstat;
2569 }
2570
2571 static void gem_get_ethtool_stats(struct net_device *dev,
2572 struct ethtool_stats *stats, u64 *data)
2573 {
2574 struct macb *bp;
2575
2576 bp = netdev_priv(dev);
2577 gem_update_stats(bp);
2578 memcpy(data, &bp->ethtool_stats, sizeof(u64)
2579 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
2580 }
2581
2582 static int gem_get_sset_count(struct net_device *dev, int sset)
2583 {
2584 struct macb *bp = netdev_priv(dev);
2585
2586 switch (sset) {
2587 case ETH_SS_STATS:
2588 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
2589 default:
2590 return -EOPNOTSUPP;
2591 }
2592 }
2593
2594 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2595 {
2596 char stat_string[ETH_GSTRING_LEN];
2597 struct macb *bp = netdev_priv(dev);
2598 struct macb_queue *queue;
2599 unsigned int i;
2600 unsigned int q;
2601
2602 switch (sset) {
2603 case ETH_SS_STATS:
2604 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2605 memcpy(p, gem_statistics[i].stat_string,
2606 ETH_GSTRING_LEN);
2607
2608 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2609 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
2610 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
2611 q, queue_statistics[i].stat_string);
2612 memcpy(p, stat_string, ETH_GSTRING_LEN);
2613 }
2614 }
2615 break;
2616 }
2617 }
2618
2619 static struct net_device_stats *macb_get_stats(struct net_device *dev)
2620 {
2621 struct macb *bp = netdev_priv(dev);
2622 struct net_device_stats *nstat = &bp->dev->stats;
2623 struct macb_stats *hwstat = &bp->hw_stats.macb;
2624
2625 if (macb_is_gem(bp))
2626 return gem_get_stats(bp);
2627
2628
2629 macb_update_stats(bp);
2630
2631
2632 nstat->rx_errors = (hwstat->rx_fcs_errors +
2633 hwstat->rx_align_errors +
2634 hwstat->rx_resource_errors +
2635 hwstat->rx_overruns +
2636 hwstat->rx_oversize_pkts +
2637 hwstat->rx_jabbers +
2638 hwstat->rx_undersize_pkts +
2639 hwstat->rx_length_mismatch);
2640 nstat->tx_errors = (hwstat->tx_late_cols +
2641 hwstat->tx_excessive_cols +
2642 hwstat->tx_underruns +
2643 hwstat->tx_carrier_errors +
2644 hwstat->sqe_test_errors);
2645 nstat->collisions = (hwstat->tx_single_cols +
2646 hwstat->tx_multiple_cols +
2647 hwstat->tx_excessive_cols);
2648 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2649 hwstat->rx_jabbers +
2650 hwstat->rx_undersize_pkts +
2651 hwstat->rx_length_mismatch);
2652 nstat->rx_over_errors = hwstat->rx_resource_errors +
2653 hwstat->rx_overruns;
2654 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2655 nstat->rx_frame_errors = hwstat->rx_align_errors;
2656 nstat->rx_fifo_errors = hwstat->rx_overruns;
2657
2658 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2659 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2660 nstat->tx_fifo_errors = hwstat->tx_underruns;
2661
2662
2663 return nstat;
2664 }
2665
2666 static int macb_get_regs_len(struct net_device *netdev)
2667 {
2668 return MACB_GREGS_NBR * sizeof(u32);
2669 }
2670
2671 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2672 void *p)
2673 {
2674 struct macb *bp = netdev_priv(dev);
2675 unsigned int tail, head;
2676 u32 *regs_buff = p;
2677
2678 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2679 | MACB_GREGS_VERSION;
2680
2681 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
2682 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
2683
2684 regs_buff[0] = macb_readl(bp, NCR);
2685 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
2686 regs_buff[2] = macb_readl(bp, NSR);
2687 regs_buff[3] = macb_readl(bp, TSR);
2688 regs_buff[4] = macb_readl(bp, RBQP);
2689 regs_buff[5] = macb_readl(bp, TBQP);
2690 regs_buff[6] = macb_readl(bp, RSR);
2691 regs_buff[7] = macb_readl(bp, IMR);
2692
2693 regs_buff[8] = tail;
2694 regs_buff[9] = head;
2695 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2696 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2697
2698 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2699 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2700 if (macb_is_gem(bp))
2701 regs_buff[13] = gem_readl(bp, DMACFG);
2702 }
2703
2704 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2705 {
2706 struct macb *bp = netdev_priv(netdev);
2707
2708 wol->supported = 0;
2709 wol->wolopts = 0;
2710
2711 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2712 wol->supported = WAKE_MAGIC;
2713
2714 if (bp->wol & MACB_WOL_ENABLED)
2715 wol->wolopts |= WAKE_MAGIC;
2716 }
2717 }
2718
2719 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2720 {
2721 struct macb *bp = netdev_priv(netdev);
2722
2723 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2724 (wol->wolopts & ~WAKE_MAGIC))
2725 return -EOPNOTSUPP;
2726
2727 if (wol->wolopts & WAKE_MAGIC)
2728 bp->wol |= MACB_WOL_ENABLED;
2729 else
2730 bp->wol &= ~MACB_WOL_ENABLED;
2731
2732 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2733
2734 return 0;
2735 }
2736
2737 static void macb_get_ringparam(struct net_device *netdev,
2738 struct ethtool_ringparam *ring)
2739 {
2740 struct macb *bp = netdev_priv(netdev);
2741
2742 ring->rx_max_pending = MAX_RX_RING_SIZE;
2743 ring->tx_max_pending = MAX_TX_RING_SIZE;
2744
2745 ring->rx_pending = bp->rx_ring_size;
2746 ring->tx_pending = bp->tx_ring_size;
2747 }
2748
2749 static int macb_set_ringparam(struct net_device *netdev,
2750 struct ethtool_ringparam *ring)
2751 {
2752 struct macb *bp = netdev_priv(netdev);
2753 u32 new_rx_size, new_tx_size;
2754 unsigned int reset = 0;
2755
2756 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2757 return -EINVAL;
2758
2759 new_rx_size = clamp_t(u32, ring->rx_pending,
2760 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
2761 new_rx_size = roundup_pow_of_two(new_rx_size);
2762
2763 new_tx_size = clamp_t(u32, ring->tx_pending,
2764 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
2765 new_tx_size = roundup_pow_of_two(new_tx_size);
2766
2767 if ((new_tx_size == bp->tx_ring_size) &&
2768 (new_rx_size == bp->rx_ring_size)) {
2769
2770 return 0;
2771 }
2772
2773 if (netif_running(bp->dev)) {
2774 reset = 1;
2775 macb_close(bp->dev);
2776 }
2777
2778 bp->rx_ring_size = new_rx_size;
2779 bp->tx_ring_size = new_tx_size;
2780
2781 if (reset)
2782 macb_open(bp->dev);
2783
2784 return 0;
2785 }
2786
2787 #ifdef CONFIG_MACB_USE_HWSTAMP
2788 static unsigned int gem_get_tsu_rate(struct macb *bp)
2789 {
2790 struct clk *tsu_clk;
2791 unsigned int tsu_rate;
2792
2793 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
2794 if (!IS_ERR(tsu_clk))
2795 tsu_rate = clk_get_rate(tsu_clk);
2796
2797 else if (!IS_ERR(bp->pclk)) {
2798 tsu_clk = bp->pclk;
2799 tsu_rate = clk_get_rate(tsu_clk);
2800 } else
2801 return -ENOTSUPP;
2802 return tsu_rate;
2803 }
2804
2805 static s32 gem_get_ptp_max_adj(void)
2806 {
2807 return 64000000;
2808 }
2809
2810 static int gem_get_ts_info(struct net_device *dev,
2811 struct ethtool_ts_info *info)
2812 {
2813 struct macb *bp = netdev_priv(dev);
2814
2815 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
2816 ethtool_op_get_ts_info(dev, info);
2817 return 0;
2818 }
2819
2820 info->so_timestamping =
2821 SOF_TIMESTAMPING_TX_SOFTWARE |
2822 SOF_TIMESTAMPING_RX_SOFTWARE |
2823 SOF_TIMESTAMPING_SOFTWARE |
2824 SOF_TIMESTAMPING_TX_HARDWARE |
2825 SOF_TIMESTAMPING_RX_HARDWARE |
2826 SOF_TIMESTAMPING_RAW_HARDWARE;
2827 info->tx_types =
2828 (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
2829 (1 << HWTSTAMP_TX_OFF) |
2830 (1 << HWTSTAMP_TX_ON);
2831 info->rx_filters =
2832 (1 << HWTSTAMP_FILTER_NONE) |
2833 (1 << HWTSTAMP_FILTER_ALL);
2834
2835 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
2836
2837 return 0;
2838 }
2839
2840 static struct macb_ptp_info gem_ptp_info = {
2841 .ptp_init = gem_ptp_init,
2842 .ptp_remove = gem_ptp_remove,
2843 .get_ptp_max_adj = gem_get_ptp_max_adj,
2844 .get_tsu_rate = gem_get_tsu_rate,
2845 .get_ts_info = gem_get_ts_info,
2846 .get_hwtst = gem_get_hwtst,
2847 .set_hwtst = gem_set_hwtst,
2848 };
2849 #endif
2850
2851 static int macb_get_ts_info(struct net_device *netdev,
2852 struct ethtool_ts_info *info)
2853 {
2854 struct macb *bp = netdev_priv(netdev);
2855
2856 if (bp->ptp_info)
2857 return bp->ptp_info->get_ts_info(netdev, info);
2858
2859 return ethtool_op_get_ts_info(netdev, info);
2860 }
2861
2862 static void gem_enable_flow_filters(struct macb *bp, bool enable)
2863 {
2864 struct net_device *netdev = bp->dev;
2865 struct ethtool_rx_fs_item *item;
2866 u32 t2_scr;
2867 int num_t2_scr;
2868
2869 if (!(netdev->features & NETIF_F_NTUPLE))
2870 return;
2871
2872 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
2873
2874 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2875 struct ethtool_rx_flow_spec *fs = &item->fs;
2876 struct ethtool_tcpip4_spec *tp4sp_m;
2877
2878 if (fs->location >= num_t2_scr)
2879 continue;
2880
2881 t2_scr = gem_readl_n(bp, SCRT2, fs->location);
2882
2883
2884 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
2885
2886
2887 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
2888
2889 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
2890 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
2891 else
2892 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
2893
2894 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
2895 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
2896 else
2897 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
2898
2899 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
2900 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
2901 else
2902 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
2903
2904 gem_writel_n(bp, SCRT2, fs->location, t2_scr);
2905 }
2906 }
2907
2908 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
2909 {
2910 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
2911 uint16_t index = fs->location;
2912 u32 w0, w1, t2_scr;
2913 bool cmp_a = false;
2914 bool cmp_b = false;
2915 bool cmp_c = false;
2916
2917 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
2918 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
2919
2920
2921 if (tp4sp_m->ip4src == 0xFFFFFFFF) {
2922
2923 w0 = 0;
2924 w1 = 0;
2925 w0 = tp4sp_v->ip4src;
2926 w1 = GEM_BFINS(T2DISMSK, 1, w1);
2927 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
2928 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
2929 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
2930 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
2931 cmp_a = true;
2932 }
2933
2934
2935 if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
2936
2937 w0 = 0;
2938 w1 = 0;
2939 w0 = tp4sp_v->ip4dst;
2940 w1 = GEM_BFINS(T2DISMSK, 1, w1);
2941 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
2942 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
2943 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
2944 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
2945 cmp_b = true;
2946 }
2947
2948
2949 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
2950
2951 w0 = 0;
2952 w1 = 0;
2953 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
2954 if (tp4sp_m->psrc == tp4sp_m->pdst) {
2955 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
2956 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
2957 w1 = GEM_BFINS(T2DISMSK, 1, w1);
2958 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
2959 } else {
2960
2961 w1 = GEM_BFINS(T2DISMSK, 0, w1);
2962 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
2963 if (tp4sp_m->psrc == 0xFFFF) {
2964 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
2965 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
2966 } else {
2967 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
2968 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
2969 }
2970 }
2971 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
2972 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
2973 cmp_c = true;
2974 }
2975
2976 t2_scr = 0;
2977 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
2978 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
2979 if (cmp_a)
2980 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
2981 if (cmp_b)
2982 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
2983 if (cmp_c)
2984 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
2985 gem_writel_n(bp, SCRT2, index, t2_scr);
2986 }
2987
2988 static int gem_add_flow_filter(struct net_device *netdev,
2989 struct ethtool_rxnfc *cmd)
2990 {
2991 struct macb *bp = netdev_priv(netdev);
2992 struct ethtool_rx_flow_spec *fs = &cmd->fs;
2993 struct ethtool_rx_fs_item *item, *newfs;
2994 unsigned long flags;
2995 int ret = -EINVAL;
2996 bool added = false;
2997
2998 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
2999 if (newfs == NULL)
3000 return -ENOMEM;
3001 memcpy(&newfs->fs, fs, sizeof(newfs->fs));
3002
3003 netdev_dbg(netdev,
3004 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3005 fs->flow_type, (int)fs->ring_cookie, fs->location,
3006 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3007 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3008 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
3009
3010 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3011
3012
3013 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3014 if (item->fs.location > newfs->fs.location) {
3015 list_add_tail(&newfs->list, &item->list);
3016 added = true;
3017 break;
3018 } else if (item->fs.location == fs->location) {
3019 netdev_err(netdev, "Rule not added: location %d not free!\n",
3020 fs->location);
3021 ret = -EBUSY;
3022 goto err;
3023 }
3024 }
3025 if (!added)
3026 list_add_tail(&newfs->list, &bp->rx_fs_list.list);
3027
3028 gem_prog_cmp_regs(bp, fs);
3029 bp->rx_fs_list.count++;
3030
3031 gem_enable_flow_filters(bp, 1);
3032
3033 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3034 return 0;
3035
3036 err:
3037 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3038 kfree(newfs);
3039 return ret;
3040 }
3041
3042 static int gem_del_flow_filter(struct net_device *netdev,
3043 struct ethtool_rxnfc *cmd)
3044 {
3045 struct macb *bp = netdev_priv(netdev);
3046 struct ethtool_rx_fs_item *item;
3047 struct ethtool_rx_flow_spec *fs;
3048 unsigned long flags;
3049
3050 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3051
3052 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3053 if (item->fs.location == cmd->fs.location) {
3054
3055 fs = &(item->fs);
3056 netdev_dbg(netdev,
3057 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3058 fs->flow_type, (int)fs->ring_cookie, fs->location,
3059 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3060 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3061 htons(fs->h_u.tcp_ip4_spec.psrc),
3062 htons(fs->h_u.tcp_ip4_spec.pdst));
3063
3064 gem_writel_n(bp, SCRT2, fs->location, 0);
3065
3066 list_del(&item->list);
3067 bp->rx_fs_list.count--;
3068 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3069 kfree(item);
3070 return 0;
3071 }
3072 }
3073
3074 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3075 return -EINVAL;
3076 }
3077
3078 static int gem_get_flow_entry(struct net_device *netdev,
3079 struct ethtool_rxnfc *cmd)
3080 {
3081 struct macb *bp = netdev_priv(netdev);
3082 struct ethtool_rx_fs_item *item;
3083
3084 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3085 if (item->fs.location == cmd->fs.location) {
3086 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
3087 return 0;
3088 }
3089 }
3090 return -EINVAL;
3091 }
3092
3093 static int gem_get_all_flow_entries(struct net_device *netdev,
3094 struct ethtool_rxnfc *cmd, u32 *rule_locs)
3095 {
3096 struct macb *bp = netdev_priv(netdev);
3097 struct ethtool_rx_fs_item *item;
3098 uint32_t cnt = 0;
3099
3100 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3101 if (cnt == cmd->rule_cnt)
3102 return -EMSGSIZE;
3103 rule_locs[cnt] = item->fs.location;
3104 cnt++;
3105 }
3106 cmd->data = bp->max_tuples;
3107 cmd->rule_cnt = cnt;
3108
3109 return 0;
3110 }
3111
3112 static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
3113 u32 *rule_locs)
3114 {
3115 struct macb *bp = netdev_priv(netdev);
3116 int ret = 0;
3117
3118 switch (cmd->cmd) {
3119 case ETHTOOL_GRXRINGS:
3120 cmd->data = bp->num_queues;
3121 break;
3122 case ETHTOOL_GRXCLSRLCNT:
3123 cmd->rule_cnt = bp->rx_fs_list.count;
3124 break;
3125 case ETHTOOL_GRXCLSRULE:
3126 ret = gem_get_flow_entry(netdev, cmd);
3127 break;
3128 case ETHTOOL_GRXCLSRLALL:
3129 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
3130 break;
3131 default:
3132 netdev_err(netdev,
3133 "Command parameter %d is not supported\n", cmd->cmd);
3134 ret = -EOPNOTSUPP;
3135 }
3136
3137 return ret;
3138 }
3139
3140 static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
3141 {
3142 struct macb *bp = netdev_priv(netdev);
3143 int ret;
3144
3145 switch (cmd->cmd) {
3146 case ETHTOOL_SRXCLSRLINS:
3147 if ((cmd->fs.location >= bp->max_tuples)
3148 || (cmd->fs.ring_cookie >= bp->num_queues)) {
3149 ret = -EINVAL;
3150 break;
3151 }
3152 ret = gem_add_flow_filter(netdev, cmd);
3153 break;
3154 case ETHTOOL_SRXCLSRLDEL:
3155 ret = gem_del_flow_filter(netdev, cmd);
3156 break;
3157 default:
3158 netdev_err(netdev,
3159 "Command parameter %d is not supported\n", cmd->cmd);
3160 ret = -EOPNOTSUPP;
3161 }
3162
3163 return ret;
3164 }
3165
3166 static const struct ethtool_ops macb_ethtool_ops = {
3167 .get_regs_len = macb_get_regs_len,
3168 .get_regs = macb_get_regs,
3169 .get_link = ethtool_op_get_link,
3170 .get_ts_info = ethtool_op_get_ts_info,
3171 .get_wol = macb_get_wol,
3172 .set_wol = macb_set_wol,
3173 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3174 .set_link_ksettings = phy_ethtool_set_link_ksettings,
3175 .get_ringparam = macb_get_ringparam,
3176 .set_ringparam = macb_set_ringparam,
3177 };
3178
3179 static const struct ethtool_ops gem_ethtool_ops = {
3180 .get_regs_len = macb_get_regs_len,
3181 .get_regs = macb_get_regs,
3182 .get_link = ethtool_op_get_link,
3183 .get_ts_info = macb_get_ts_info,
3184 .get_ethtool_stats = gem_get_ethtool_stats,
3185 .get_strings = gem_get_ethtool_strings,
3186 .get_sset_count = gem_get_sset_count,
3187 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3188 .set_link_ksettings = phy_ethtool_set_link_ksettings,
3189 .get_ringparam = macb_get_ringparam,
3190 .set_ringparam = macb_set_ringparam,
3191 .get_rxnfc = gem_get_rxnfc,
3192 .set_rxnfc = gem_set_rxnfc,
3193 };
3194
3195 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3196 {
3197 struct phy_device *phydev = dev->phydev;
3198 struct macb *bp = netdev_priv(dev);
3199
3200 if (!netif_running(dev))
3201 return -EINVAL;
3202
3203 if (!phydev)
3204 return -ENODEV;
3205
3206 if (!bp->ptp_info)
3207 return phy_mii_ioctl(phydev, rq, cmd);
3208
3209 switch (cmd) {
3210 case SIOCSHWTSTAMP:
3211 return bp->ptp_info->set_hwtst(dev, rq, cmd);
3212 case SIOCGHWTSTAMP:
3213 return bp->ptp_info->get_hwtst(dev, rq);
3214 default:
3215 return phy_mii_ioctl(phydev, rq, cmd);
3216 }
3217 }
3218
3219 static inline void macb_set_txcsum_feature(struct macb *bp,
3220 netdev_features_t features)
3221 {
3222 u32 val;
3223
3224 if (!macb_is_gem(bp))
3225 return;
3226
3227 val = gem_readl(bp, DMACFG);
3228 if (features & NETIF_F_HW_CSUM)
3229 val |= GEM_BIT(TXCOEN);
3230 else
3231 val &= ~GEM_BIT(TXCOEN);
3232
3233 gem_writel(bp, DMACFG, val);
3234 }
3235
3236 static inline void macb_set_rxcsum_feature(struct macb *bp,
3237 netdev_features_t features)
3238 {
3239 struct net_device *netdev = bp->dev;
3240 u32 val;
3241
3242 if (!macb_is_gem(bp))
3243 return;
3244
3245 val = gem_readl(bp, NCFGR);
3246 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC))
3247 val |= GEM_BIT(RXCOEN);
3248 else
3249 val &= ~GEM_BIT(RXCOEN);
3250
3251 gem_writel(bp, NCFGR, val);
3252 }
3253
3254 static inline void macb_set_rxflow_feature(struct macb *bp,
3255 netdev_features_t features)
3256 {
3257 if (!macb_is_gem(bp))
3258 return;
3259
3260 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE));
3261 }
3262
3263 static int macb_set_features(struct net_device *netdev,
3264 netdev_features_t features)
3265 {
3266 struct macb *bp = netdev_priv(netdev);
3267 netdev_features_t changed = features ^ netdev->features;
3268
3269
3270 if (changed & NETIF_F_HW_CSUM)
3271 macb_set_txcsum_feature(bp, features);
3272
3273
3274 if (changed & NETIF_F_RXCSUM)
3275 macb_set_rxcsum_feature(bp, features);
3276
3277
3278 if (changed & NETIF_F_NTUPLE)
3279 macb_set_rxflow_feature(bp, features);
3280
3281 return 0;
3282 }
3283
3284 static void macb_restore_features(struct macb *bp)
3285 {
3286 struct net_device *netdev = bp->dev;
3287 netdev_features_t features = netdev->features;
3288
3289
3290 macb_set_txcsum_feature(bp, features);
3291
3292
3293 macb_set_rxcsum_feature(bp, features);
3294
3295
3296 macb_set_rxflow_feature(bp, features);
3297 }
3298
3299 static const struct net_device_ops macb_netdev_ops = {
3300 .ndo_open = macb_open,
3301 .ndo_stop = macb_close,
3302 .ndo_start_xmit = macb_start_xmit,
3303 .ndo_set_rx_mode = macb_set_rx_mode,
3304 .ndo_get_stats = macb_get_stats,
3305 .ndo_do_ioctl = macb_ioctl,
3306 .ndo_validate_addr = eth_validate_addr,
3307 .ndo_change_mtu = macb_change_mtu,
3308 .ndo_set_mac_address = eth_mac_addr,
3309 #ifdef CONFIG_NET_POLL_CONTROLLER
3310 .ndo_poll_controller = macb_poll_controller,
3311 #endif
3312 .ndo_set_features = macb_set_features,
3313 .ndo_features_check = macb_features_check,
3314 };
3315
3316
3317
3318
3319 static void macb_configure_caps(struct macb *bp,
3320 const struct macb_config *dt_conf)
3321 {
3322 u32 dcfg;
3323
3324 if (dt_conf)
3325 bp->caps = dt_conf->caps;
3326
3327 if (hw_is_gem(bp->regs, bp->native_io)) {
3328 bp->caps |= MACB_CAPS_MACB_IS_GEM;
3329
3330 dcfg = gem_readl(bp, DCFG1);
3331 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
3332 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
3333 dcfg = gem_readl(bp, DCFG2);
3334 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
3335 bp->caps |= MACB_CAPS_FIFO_MODE;
3336 #ifdef CONFIG_MACB_USE_HWSTAMP
3337 if (gem_has_ptp(bp)) {
3338 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
3339 pr_err("GEM doesn't support hardware ptp.\n");
3340 else {
3341 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
3342 bp->ptp_info = &gem_ptp_info;
3343 }
3344 }
3345 #endif
3346 }
3347
3348 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
3349 }
3350
3351 static void macb_probe_queues(void __iomem *mem,
3352 bool native_io,
3353 unsigned int *queue_mask,
3354 unsigned int *num_queues)
3355 {
3356 unsigned int hw_q;
3357
3358 *queue_mask = 0x1;
3359 *num_queues = 1;
3360
3361
3362
3363
3364
3365
3366
3367 if (!hw_is_gem(mem, native_io))
3368 return;
3369
3370
3371 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
3372
3373 *queue_mask |= 0x1;
3374
3375 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
3376 if (*queue_mask & (1 << hw_q))
3377 (*num_queues)++;
3378 }
3379
3380 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
3381 struct clk **hclk, struct clk **tx_clk,
3382 struct clk **rx_clk, struct clk **tsu_clk)
3383 {
3384 struct macb_platform_data *pdata;
3385 int err;
3386
3387 pdata = dev_get_platdata(&pdev->dev);
3388 if (pdata) {
3389 *pclk = pdata->pclk;
3390 *hclk = pdata->hclk;
3391 } else {
3392 *pclk = devm_clk_get(&pdev->dev, "pclk");
3393 *hclk = devm_clk_get(&pdev->dev, "hclk");
3394 }
3395
3396 if (IS_ERR_OR_NULL(*pclk)) {
3397 err = PTR_ERR(*pclk);
3398 if (!err)
3399 err = -ENODEV;
3400
3401 dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err);
3402 return err;
3403 }
3404
3405 if (IS_ERR_OR_NULL(*hclk)) {
3406 err = PTR_ERR(*hclk);
3407 if (!err)
3408 err = -ENODEV;
3409
3410 dev_err(&pdev->dev, "failed to get hclk (%d)\n", err);
3411 return err;
3412 }
3413
3414 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
3415 if (IS_ERR(*tx_clk))
3416 return PTR_ERR(*tx_clk);
3417
3418 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
3419 if (IS_ERR(*rx_clk))
3420 return PTR_ERR(*rx_clk);
3421
3422 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
3423 if (IS_ERR(*tsu_clk))
3424 return PTR_ERR(*tsu_clk);
3425
3426 err = clk_prepare_enable(*pclk);
3427 if (err) {
3428 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
3429 return err;
3430 }
3431
3432 err = clk_prepare_enable(*hclk);
3433 if (err) {
3434 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
3435 goto err_disable_pclk;
3436 }
3437
3438 err = clk_prepare_enable(*tx_clk);
3439 if (err) {
3440 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
3441 goto err_disable_hclk;
3442 }
3443
3444 err = clk_prepare_enable(*rx_clk);
3445 if (err) {
3446 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
3447 goto err_disable_txclk;
3448 }
3449
3450 err = clk_prepare_enable(*tsu_clk);
3451 if (err) {
3452 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err);
3453 goto err_disable_rxclk;
3454 }
3455
3456 return 0;
3457
3458 err_disable_rxclk:
3459 clk_disable_unprepare(*rx_clk);
3460
3461 err_disable_txclk:
3462 clk_disable_unprepare(*tx_clk);
3463
3464 err_disable_hclk:
3465 clk_disable_unprepare(*hclk);
3466
3467 err_disable_pclk:
3468 clk_disable_unprepare(*pclk);
3469
3470 return err;
3471 }
3472
3473 static int macb_init(struct platform_device *pdev)
3474 {
3475 struct net_device *dev = platform_get_drvdata(pdev);
3476 unsigned int hw_q, q;
3477 struct macb *bp = netdev_priv(dev);
3478 struct macb_queue *queue;
3479 int err;
3480 u32 val, reg;
3481
3482 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
3483 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
3484
3485
3486
3487
3488
3489 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
3490 if (!(bp->queue_mask & (1 << hw_q)))
3491 continue;
3492
3493 queue = &bp->queues[q];
3494 queue->bp = bp;
3495 netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
3496 if (hw_q) {
3497 queue->ISR = GEM_ISR(hw_q - 1);
3498 queue->IER = GEM_IER(hw_q - 1);
3499 queue->IDR = GEM_IDR(hw_q - 1);
3500 queue->IMR = GEM_IMR(hw_q - 1);
3501 queue->TBQP = GEM_TBQP(hw_q - 1);
3502 queue->RBQP = GEM_RBQP(hw_q - 1);
3503 queue->RBQS = GEM_RBQS(hw_q - 1);
3504 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3505 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3506 queue->TBQPH = GEM_TBQPH(hw_q - 1);
3507 queue->RBQPH = GEM_RBQPH(hw_q - 1);
3508 }
3509 #endif
3510 } else {
3511
3512 queue->ISR = MACB_ISR;
3513 queue->IER = MACB_IER;
3514 queue->IDR = MACB_IDR;
3515 queue->IMR = MACB_IMR;
3516 queue->TBQP = MACB_TBQP;
3517 queue->RBQP = MACB_RBQP;
3518 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3519 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3520 queue->TBQPH = MACB_TBQPH;
3521 queue->RBQPH = MACB_RBQPH;
3522 }
3523 #endif
3524 }
3525
3526
3527
3528
3529
3530
3531 queue->irq = platform_get_irq(pdev, q);
3532 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
3533 IRQF_SHARED, dev->name, queue);
3534 if (err) {
3535 dev_err(&pdev->dev,
3536 "Unable to request IRQ %d (error %d)\n",
3537 queue->irq, err);
3538 return err;
3539 }
3540
3541 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
3542 q++;
3543 }
3544
3545 dev->netdev_ops = &macb_netdev_ops;
3546
3547
3548 if (macb_is_gem(bp)) {
3549 bp->max_tx_length = GEM_MAX_TX_LEN;
3550 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
3551 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
3552 bp->macbgem_ops.mog_init_rings = gem_init_rings;
3553 bp->macbgem_ops.mog_rx = gem_rx;
3554 dev->ethtool_ops = &gem_ethtool_ops;
3555 } else {
3556 bp->max_tx_length = MACB_MAX_TX_LEN;
3557 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
3558 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
3559 bp->macbgem_ops.mog_init_rings = macb_init_rings;
3560 bp->macbgem_ops.mog_rx = macb_rx;
3561 dev->ethtool_ops = &macb_ethtool_ops;
3562 }
3563
3564
3565 dev->hw_features = NETIF_F_SG;
3566
3567
3568 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
3569 dev->hw_features |= MACB_NETIF_LSO;
3570
3571
3572 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
3573 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
3574 if (bp->caps & MACB_CAPS_SG_DISABLED)
3575 dev->hw_features &= ~NETIF_F_SG;
3576 dev->features = dev->hw_features;
3577
3578
3579
3580
3581
3582 reg = gem_readl(bp, DCFG8);
3583 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
3584 GEM_BFEXT(T2SCR, reg));
3585 if (bp->max_tuples > 0) {
3586
3587 if (GEM_BFEXT(SCR2ETH, reg) > 0) {
3588
3589 reg = 0;
3590 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
3591 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
3592
3593 dev->hw_features |= NETIF_F_NTUPLE;
3594
3595 INIT_LIST_HEAD(&bp->rx_fs_list.list);
3596 bp->rx_fs_list.count = 0;
3597 spin_lock_init(&bp->rx_fs_lock);
3598 } else
3599 bp->max_tuples = 0;
3600 }
3601
3602 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
3603 val = 0;
3604 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
3605 val = GEM_BIT(RGMII);
3606 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
3607 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3608 val = MACB_BIT(RMII);
3609 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3610 val = MACB_BIT(MII);
3611
3612 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
3613 val |= MACB_BIT(CLKEN);
3614
3615 macb_or_gem_writel(bp, USRIO, val);
3616 }
3617
3618
3619 val = macb_mdc_clk_div(bp);
3620 val |= macb_dbw(bp);
3621 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
3622 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
3623 macb_writel(bp, NCFGR, val);
3624
3625 return 0;
3626 }
3627
3628 #if defined(CONFIG_OF)
3629
3630 #define AT91ETHER_MAX_RBUFF_SZ 0x600
3631
3632 #define AT91ETHER_MAX_RX_DESCR 9
3633
3634 static struct sifive_fu540_macb_mgmt *mgmt;
3635
3636
3637 static int at91ether_start(struct net_device *dev)
3638 {
3639 struct macb *lp = netdev_priv(dev);
3640 struct macb_queue *q = &lp->queues[0];
3641 struct macb_dma_desc *desc;
3642 dma_addr_t addr;
3643 u32 ctl;
3644 int i;
3645
3646 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
3647 (AT91ETHER_MAX_RX_DESCR *
3648 macb_dma_desc_get_size(lp)),
3649 &q->rx_ring_dma, GFP_KERNEL);
3650 if (!q->rx_ring)
3651 return -ENOMEM;
3652
3653 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
3654 AT91ETHER_MAX_RX_DESCR *
3655 AT91ETHER_MAX_RBUFF_SZ,
3656 &q->rx_buffers_dma, GFP_KERNEL);
3657 if (!q->rx_buffers) {
3658 dma_free_coherent(&lp->pdev->dev,
3659 AT91ETHER_MAX_RX_DESCR *
3660 macb_dma_desc_get_size(lp),
3661 q->rx_ring, q->rx_ring_dma);
3662 q->rx_ring = NULL;
3663 return -ENOMEM;
3664 }
3665
3666 addr = q->rx_buffers_dma;
3667 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
3668 desc = macb_rx_desc(q, i);
3669 macb_set_addr(lp, desc, addr);
3670 desc->ctrl = 0;
3671 addr += AT91ETHER_MAX_RBUFF_SZ;
3672 }
3673
3674
3675 desc->addr |= MACB_BIT(RX_WRAP);
3676
3677
3678 q->rx_tail = 0;
3679
3680
3681 macb_writel(lp, RBQP, q->rx_ring_dma);
3682
3683
3684 ctl = macb_readl(lp, NCR);
3685 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
3686
3687 return 0;
3688 }
3689
3690
3691 static int at91ether_open(struct net_device *dev)
3692 {
3693 struct macb *lp = netdev_priv(dev);
3694 u32 ctl;
3695 int ret;
3696
3697 ret = pm_runtime_get_sync(&lp->pdev->dev);
3698 if (ret < 0) {
3699 pm_runtime_put_noidle(&lp->pdev->dev);
3700 return ret;
3701 }
3702
3703
3704 ctl = macb_readl(lp, NCR);
3705 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
3706
3707 macb_set_hwaddr(lp);
3708
3709 ret = at91ether_start(dev);
3710 if (ret)
3711 return ret;
3712
3713
3714 macb_writel(lp, IER, MACB_BIT(RCOMP) |
3715 MACB_BIT(RXUBR) |
3716 MACB_BIT(ISR_TUND) |
3717 MACB_BIT(ISR_RLE) |
3718 MACB_BIT(TCOMP) |
3719 MACB_BIT(ISR_ROVR) |
3720 MACB_BIT(HRESP));
3721
3722
3723 phy_start(dev->phydev);
3724
3725 netif_start_queue(dev);
3726
3727 return 0;
3728 }
3729
3730
3731 static int at91ether_close(struct net_device *dev)
3732 {
3733 struct macb *lp = netdev_priv(dev);
3734 struct macb_queue *q = &lp->queues[0];
3735 u32 ctl;
3736
3737
3738 ctl = macb_readl(lp, NCR);
3739 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3740
3741
3742 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3743 MACB_BIT(RXUBR) |
3744 MACB_BIT(ISR_TUND) |
3745 MACB_BIT(ISR_RLE) |
3746 MACB_BIT(TCOMP) |
3747 MACB_BIT(ISR_ROVR) |
3748 MACB_BIT(HRESP));
3749
3750 netif_stop_queue(dev);
3751
3752 dma_free_coherent(&lp->pdev->dev,
3753 AT91ETHER_MAX_RX_DESCR *
3754 macb_dma_desc_get_size(lp),
3755 q->rx_ring, q->rx_ring_dma);
3756 q->rx_ring = NULL;
3757
3758 dma_free_coherent(&lp->pdev->dev,
3759 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
3760 q->rx_buffers, q->rx_buffers_dma);
3761 q->rx_buffers = NULL;
3762
3763 return pm_runtime_put(&lp->pdev->dev);
3764 }
3765
3766
3767 static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
3768 struct net_device *dev)
3769 {
3770 struct macb *lp = netdev_priv(dev);
3771
3772 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
3773 netif_stop_queue(dev);
3774
3775
3776 lp->skb = skb;
3777 lp->skb_length = skb->len;
3778 lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data,
3779 skb->len, DMA_TO_DEVICE);
3780 if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) {
3781 dev_kfree_skb_any(skb);
3782 dev->stats.tx_dropped++;
3783 netdev_err(dev, "%s: DMA mapping error\n", __func__);
3784 return NETDEV_TX_OK;
3785 }
3786
3787
3788 macb_writel(lp, TAR, lp->skb_physaddr);
3789
3790 macb_writel(lp, TCR, skb->len);
3791
3792 } else {
3793 netdev_err(dev, "%s called, but device is busy!\n", __func__);
3794 return NETDEV_TX_BUSY;
3795 }
3796
3797 return NETDEV_TX_OK;
3798 }
3799
3800
3801
3802
3803 static void at91ether_rx(struct net_device *dev)
3804 {
3805 struct macb *lp = netdev_priv(dev);
3806 struct macb_queue *q = &lp->queues[0];
3807 struct macb_dma_desc *desc;
3808 unsigned char *p_recv;
3809 struct sk_buff *skb;
3810 unsigned int pktlen;
3811
3812 desc = macb_rx_desc(q, q->rx_tail);
3813 while (desc->addr & MACB_BIT(RX_USED)) {
3814 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
3815 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
3816 skb = netdev_alloc_skb(dev, pktlen + 2);
3817 if (skb) {
3818 skb_reserve(skb, 2);
3819 skb_put_data(skb, p_recv, pktlen);
3820
3821 skb->protocol = eth_type_trans(skb, dev);
3822 dev->stats.rx_packets++;
3823 dev->stats.rx_bytes += pktlen;
3824 netif_rx(skb);
3825 } else {
3826 dev->stats.rx_dropped++;
3827 }
3828
3829 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
3830 dev->stats.multicast++;
3831
3832
3833 desc->addr &= ~MACB_BIT(RX_USED);
3834
3835
3836 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
3837 q->rx_tail = 0;
3838 else
3839 q->rx_tail++;
3840
3841 desc = macb_rx_desc(q, q->rx_tail);
3842 }
3843 }
3844
3845
3846 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
3847 {
3848 struct net_device *dev = dev_id;
3849 struct macb *lp = netdev_priv(dev);
3850 u32 intstatus, ctl;
3851
3852
3853
3854
3855 intstatus = macb_readl(lp, ISR);
3856
3857
3858 if (intstatus & MACB_BIT(RCOMP))
3859 at91ether_rx(dev);
3860
3861
3862 if (intstatus & MACB_BIT(TCOMP)) {
3863
3864 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
3865 dev->stats.tx_errors++;
3866
3867 if (lp->skb) {
3868 dev_consume_skb_irq(lp->skb);
3869 lp->skb = NULL;
3870 dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr,
3871 lp->skb_length, DMA_TO_DEVICE);
3872 dev->stats.tx_packets++;
3873 dev->stats.tx_bytes += lp->skb_length;
3874 }
3875 netif_wake_queue(dev);
3876 }
3877
3878
3879 if (intstatus & MACB_BIT(RXUBR)) {
3880 ctl = macb_readl(lp, NCR);
3881 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
3882 wmb();
3883 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
3884 }
3885
3886 if (intstatus & MACB_BIT(ISR_ROVR))
3887 netdev_err(dev, "ROVR error\n");
3888
3889 return IRQ_HANDLED;
3890 }
3891
3892 #ifdef CONFIG_NET_POLL_CONTROLLER
3893 static void at91ether_poll_controller(struct net_device *dev)
3894 {
3895 unsigned long flags;
3896
3897 local_irq_save(flags);
3898 at91ether_interrupt(dev->irq, dev);
3899 local_irq_restore(flags);
3900 }
3901 #endif
3902
3903 static const struct net_device_ops at91ether_netdev_ops = {
3904 .ndo_open = at91ether_open,
3905 .ndo_stop = at91ether_close,
3906 .ndo_start_xmit = at91ether_start_xmit,
3907 .ndo_get_stats = macb_get_stats,
3908 .ndo_set_rx_mode = macb_set_rx_mode,
3909 .ndo_set_mac_address = eth_mac_addr,
3910 .ndo_do_ioctl = macb_ioctl,
3911 .ndo_validate_addr = eth_validate_addr,
3912 #ifdef CONFIG_NET_POLL_CONTROLLER
3913 .ndo_poll_controller = at91ether_poll_controller,
3914 #endif
3915 };
3916
3917 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
3918 struct clk **hclk, struct clk **tx_clk,
3919 struct clk **rx_clk, struct clk **tsu_clk)
3920 {
3921 int err;
3922
3923 *hclk = NULL;
3924 *tx_clk = NULL;
3925 *rx_clk = NULL;
3926 *tsu_clk = NULL;
3927
3928 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
3929 if (IS_ERR(*pclk))
3930 return PTR_ERR(*pclk);
3931
3932 err = clk_prepare_enable(*pclk);
3933 if (err) {
3934 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
3935 return err;
3936 }
3937
3938 return 0;
3939 }
3940
3941 static int at91ether_init(struct platform_device *pdev)
3942 {
3943 struct net_device *dev = platform_get_drvdata(pdev);
3944 struct macb *bp = netdev_priv(dev);
3945 int err;
3946 u32 reg;
3947
3948 bp->queues[0].bp = bp;
3949
3950 dev->netdev_ops = &at91ether_netdev_ops;
3951 dev->ethtool_ops = &macb_ethtool_ops;
3952
3953 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
3954 0, dev->name, dev);
3955 if (err)
3956 return err;
3957
3958 macb_writel(bp, NCR, 0);
3959
3960 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
3961 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
3962 reg |= MACB_BIT(RM9200_RMII);
3963
3964 macb_writel(bp, NCFGR, reg);
3965
3966 return 0;
3967 }
3968
3969 static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
3970 unsigned long parent_rate)
3971 {
3972 return mgmt->rate;
3973 }
3974
3975 static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
3976 unsigned long *parent_rate)
3977 {
3978 if (WARN_ON(rate < 2500000))
3979 return 2500000;
3980 else if (rate == 2500000)
3981 return 2500000;
3982 else if (WARN_ON(rate < 13750000))
3983 return 2500000;
3984 else if (WARN_ON(rate < 25000000))
3985 return 25000000;
3986 else if (rate == 25000000)
3987 return 25000000;
3988 else if (WARN_ON(rate < 75000000))
3989 return 25000000;
3990 else if (WARN_ON(rate < 125000000))
3991 return 125000000;
3992 else if (rate == 125000000)
3993 return 125000000;
3994
3995 WARN_ON(rate > 125000000);
3996
3997 return 125000000;
3998 }
3999
4000 static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
4001 unsigned long parent_rate)
4002 {
4003 rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
4004 if (rate != 125000000)
4005 iowrite32(1, mgmt->reg);
4006 else
4007 iowrite32(0, mgmt->reg);
4008 mgmt->rate = rate;
4009
4010 return 0;
4011 }
4012
4013 static const struct clk_ops fu540_c000_ops = {
4014 .recalc_rate = fu540_macb_tx_recalc_rate,
4015 .round_rate = fu540_macb_tx_round_rate,
4016 .set_rate = fu540_macb_tx_set_rate,
4017 };
4018
4019 static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
4020 struct clk **hclk, struct clk **tx_clk,
4021 struct clk **rx_clk, struct clk **tsu_clk)
4022 {
4023 struct clk_init_data init;
4024 int err = 0;
4025
4026 err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk);
4027 if (err)
4028 return err;
4029
4030 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
4031 if (!mgmt)
4032 return -ENOMEM;
4033
4034 init.name = "sifive-gemgxl-mgmt";
4035 init.ops = &fu540_c000_ops;
4036 init.flags = 0;
4037 init.num_parents = 0;
4038
4039 mgmt->rate = 0;
4040 mgmt->hw.init = &init;
4041
4042 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
4043 if (IS_ERR(*tx_clk))
4044 return PTR_ERR(*tx_clk);
4045
4046 err = clk_prepare_enable(*tx_clk);
4047 if (err)
4048 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
4049 else
4050 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
4051
4052 return 0;
4053 }
4054
4055 static int fu540_c000_init(struct platform_device *pdev)
4056 {
4057 mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
4058 if (IS_ERR(mgmt->reg))
4059 return PTR_ERR(mgmt->reg);
4060
4061 return macb_init(pdev);
4062 }
4063
4064 static const struct macb_config fu540_c000_config = {
4065 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
4066 MACB_CAPS_GEM_HAS_PTP,
4067 .dma_burst_length = 16,
4068 .clk_init = fu540_c000_clk_init,
4069 .init = fu540_c000_init,
4070 .jumbo_max_len = 10240,
4071 };
4072
4073 static const struct macb_config at91sam9260_config = {
4074 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4075 .clk_init = macb_clk_init,
4076 .init = macb_init,
4077 };
4078
4079 static const struct macb_config sama5d3macb_config = {
4080 .caps = MACB_CAPS_SG_DISABLED
4081 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4082 .clk_init = macb_clk_init,
4083 .init = macb_init,
4084 };
4085
4086 static const struct macb_config pc302gem_config = {
4087 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
4088 .dma_burst_length = 16,
4089 .clk_init = macb_clk_init,
4090 .init = macb_init,
4091 };
4092
4093 static const struct macb_config sama5d2_config = {
4094 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4095 .dma_burst_length = 16,
4096 .clk_init = macb_clk_init,
4097 .init = macb_init,
4098 };
4099
4100 static const struct macb_config sama5d3_config = {
4101 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
4102 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
4103 .dma_burst_length = 16,
4104 .clk_init = macb_clk_init,
4105 .init = macb_init,
4106 .jumbo_max_len = 10240,
4107 };
4108
4109 static const struct macb_config sama5d4_config = {
4110 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4111 .dma_burst_length = 4,
4112 .clk_init = macb_clk_init,
4113 .init = macb_init,
4114 };
4115
4116 static const struct macb_config emac_config = {
4117 .caps = MACB_CAPS_NEEDS_RSTONUBR,
4118 .clk_init = at91ether_clk_init,
4119 .init = at91ether_init,
4120 };
4121
4122 static const struct macb_config np4_config = {
4123 .caps = MACB_CAPS_USRIO_DISABLED,
4124 .clk_init = macb_clk_init,
4125 .init = macb_init,
4126 };
4127
4128 static const struct macb_config zynqmp_config = {
4129 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4130 MACB_CAPS_JUMBO |
4131 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
4132 .dma_burst_length = 16,
4133 .clk_init = macb_clk_init,
4134 .init = macb_init,
4135 .jumbo_max_len = 10240,
4136 };
4137
4138 static const struct macb_config zynq_config = {
4139 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
4140 MACB_CAPS_NEEDS_RSTONUBR,
4141 .dma_burst_length = 16,
4142 .clk_init = macb_clk_init,
4143 .init = macb_init,
4144 };
4145
4146 static const struct of_device_id macb_dt_ids[] = {
4147 { .compatible = "cdns,at32ap7000-macb" },
4148 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
4149 { .compatible = "cdns,macb" },
4150 { .compatible = "cdns,np4-macb", .data = &np4_config },
4151 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
4152 { .compatible = "cdns,gem", .data = &pc302gem_config },
4153 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
4154 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
4155 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
4156 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
4157 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
4158 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
4159 { .compatible = "cdns,emac", .data = &emac_config },
4160 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
4161 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
4162 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
4163 { }
4164 };
4165 MODULE_DEVICE_TABLE(of, macb_dt_ids);
4166 #endif
4167
4168 static const struct macb_config default_gem_config = {
4169 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4170 MACB_CAPS_JUMBO |
4171 MACB_CAPS_GEM_HAS_PTP,
4172 .dma_burst_length = 16,
4173 .clk_init = macb_clk_init,
4174 .init = macb_init,
4175 .jumbo_max_len = 10240,
4176 };
4177
4178 static int macb_probe(struct platform_device *pdev)
4179 {
4180 const struct macb_config *macb_config = &default_gem_config;
4181 int (*clk_init)(struct platform_device *, struct clk **,
4182 struct clk **, struct clk **, struct clk **,
4183 struct clk **) = macb_config->clk_init;
4184 int (*init)(struct platform_device *) = macb_config->init;
4185 struct device_node *np = pdev->dev.of_node;
4186 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
4187 struct clk *tsu_clk = NULL;
4188 unsigned int queue_mask, num_queues;
4189 bool native_io;
4190 struct phy_device *phydev;
4191 struct net_device *dev;
4192 struct resource *regs;
4193 void __iomem *mem;
4194 const char *mac;
4195 struct macb *bp;
4196 int err, val;
4197
4198 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4199 mem = devm_ioremap_resource(&pdev->dev, regs);
4200 if (IS_ERR(mem))
4201 return PTR_ERR(mem);
4202
4203 if (np) {
4204 const struct of_device_id *match;
4205
4206 match = of_match_node(macb_dt_ids, np);
4207 if (match && match->data) {
4208 macb_config = match->data;
4209 clk_init = macb_config->clk_init;
4210 init = macb_config->init;
4211 }
4212 }
4213
4214 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
4215 if (err)
4216 return err;
4217
4218 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
4219 pm_runtime_use_autosuspend(&pdev->dev);
4220 pm_runtime_get_noresume(&pdev->dev);
4221 pm_runtime_set_active(&pdev->dev);
4222 pm_runtime_enable(&pdev->dev);
4223 native_io = hw_is_native_io(mem);
4224
4225 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
4226 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
4227 if (!dev) {
4228 err = -ENOMEM;
4229 goto err_disable_clocks;
4230 }
4231
4232 dev->base_addr = regs->start;
4233
4234 SET_NETDEV_DEV(dev, &pdev->dev);
4235
4236 bp = netdev_priv(dev);
4237 bp->pdev = pdev;
4238 bp->dev = dev;
4239 bp->regs = mem;
4240 bp->native_io = native_io;
4241 if (native_io) {
4242 bp->macb_reg_readl = hw_readl_native;
4243 bp->macb_reg_writel = hw_writel_native;
4244 } else {
4245 bp->macb_reg_readl = hw_readl;
4246 bp->macb_reg_writel = hw_writel;
4247 }
4248 bp->num_queues = num_queues;
4249 bp->queue_mask = queue_mask;
4250 if (macb_config)
4251 bp->dma_burst_length = macb_config->dma_burst_length;
4252 bp->pclk = pclk;
4253 bp->hclk = hclk;
4254 bp->tx_clk = tx_clk;
4255 bp->rx_clk = rx_clk;
4256 bp->tsu_clk = tsu_clk;
4257 if (macb_config)
4258 bp->jumbo_max_len = macb_config->jumbo_max_len;
4259
4260 bp->wol = 0;
4261 if (of_get_property(np, "magic-packet", NULL))
4262 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
4263 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
4264
4265 spin_lock_init(&bp->lock);
4266
4267
4268 macb_configure_caps(bp, macb_config);
4269
4270 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4271 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
4272 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
4273 bp->hw_dma_cap |= HW_DMA_CAP_64B;
4274 }
4275 #endif
4276 platform_set_drvdata(pdev, dev);
4277
4278 dev->irq = platform_get_irq(pdev, 0);
4279 if (dev->irq < 0) {
4280 err = dev->irq;
4281 goto err_out_free_netdev;
4282 }
4283
4284
4285 dev->min_mtu = GEM_MTU_MIN_SIZE;
4286 if (bp->caps & MACB_CAPS_JUMBO)
4287 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
4288 else
4289 dev->max_mtu = ETH_DATA_LEN;
4290
4291 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
4292 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
4293 if (val)
4294 bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
4295 macb_dma_desc_get_size(bp);
4296
4297 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
4298 if (val)
4299 bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
4300 macb_dma_desc_get_size(bp);
4301 }
4302
4303 bp->rx_intr_mask = MACB_RX_INT_FLAGS;
4304 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
4305 bp->rx_intr_mask |= MACB_BIT(RXUBR);
4306
4307 mac = of_get_mac_address(np);
4308 if (PTR_ERR(mac) == -EPROBE_DEFER) {
4309 err = -EPROBE_DEFER;
4310 goto err_out_free_netdev;
4311 } else if (!IS_ERR_OR_NULL(mac)) {
4312 ether_addr_copy(bp->dev->dev_addr, mac);
4313 } else {
4314 macb_get_hwaddr(bp);
4315 }
4316
4317 err = of_get_phy_mode(np);
4318 if (err < 0)
4319
4320 bp->phy_interface = PHY_INTERFACE_MODE_MII;
4321 else
4322 bp->phy_interface = err;
4323
4324
4325 err = init(pdev);
4326 if (err)
4327 goto err_out_free_netdev;
4328
4329 err = macb_mii_init(bp);
4330 if (err)
4331 goto err_out_free_netdev;
4332
4333 phydev = dev->phydev;
4334
4335 netif_carrier_off(dev);
4336
4337 err = register_netdev(dev);
4338 if (err) {
4339 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
4340 goto err_out_unregister_mdio;
4341 }
4342
4343 tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
4344 (unsigned long)bp);
4345
4346 phy_attached_info(phydev);
4347
4348 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
4349 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
4350 dev->base_addr, dev->irq, dev->dev_addr);
4351
4352 pm_runtime_mark_last_busy(&bp->pdev->dev);
4353 pm_runtime_put_autosuspend(&bp->pdev->dev);
4354
4355 return 0;
4356
4357 err_out_unregister_mdio:
4358 phy_disconnect(dev->phydev);
4359 mdiobus_unregister(bp->mii_bus);
4360 of_node_put(bp->phy_node);
4361 if (np && of_phy_is_fixed_link(np))
4362 of_phy_deregister_fixed_link(np);
4363 mdiobus_free(bp->mii_bus);
4364
4365 err_out_free_netdev:
4366 free_netdev(dev);
4367
4368 err_disable_clocks:
4369 clk_disable_unprepare(tx_clk);
4370 clk_disable_unprepare(hclk);
4371 clk_disable_unprepare(pclk);
4372 clk_disable_unprepare(rx_clk);
4373 clk_disable_unprepare(tsu_clk);
4374 pm_runtime_disable(&pdev->dev);
4375 pm_runtime_set_suspended(&pdev->dev);
4376 pm_runtime_dont_use_autosuspend(&pdev->dev);
4377
4378 return err;
4379 }
4380
4381 static int macb_remove(struct platform_device *pdev)
4382 {
4383 struct net_device *dev;
4384 struct macb *bp;
4385 struct device_node *np = pdev->dev.of_node;
4386
4387 dev = platform_get_drvdata(pdev);
4388
4389 if (dev) {
4390 bp = netdev_priv(dev);
4391 if (dev->phydev)
4392 phy_disconnect(dev->phydev);
4393 mdiobus_unregister(bp->mii_bus);
4394 if (np && of_phy_is_fixed_link(np))
4395 of_phy_deregister_fixed_link(np);
4396 dev->phydev = NULL;
4397 mdiobus_free(bp->mii_bus);
4398
4399 unregister_netdev(dev);
4400 tasklet_kill(&bp->hresp_err_tasklet);
4401 pm_runtime_disable(&pdev->dev);
4402 pm_runtime_dont_use_autosuspend(&pdev->dev);
4403 if (!pm_runtime_suspended(&pdev->dev)) {
4404 clk_disable_unprepare(bp->tx_clk);
4405 clk_disable_unprepare(bp->hclk);
4406 clk_disable_unprepare(bp->pclk);
4407 clk_disable_unprepare(bp->rx_clk);
4408 clk_disable_unprepare(bp->tsu_clk);
4409 pm_runtime_set_suspended(&pdev->dev);
4410 }
4411 of_node_put(bp->phy_node);
4412 free_netdev(dev);
4413 }
4414
4415 return 0;
4416 }
4417
4418 static int __maybe_unused macb_suspend(struct device *dev)
4419 {
4420 struct net_device *netdev = dev_get_drvdata(dev);
4421 struct macb *bp = netdev_priv(netdev);
4422 struct macb_queue *queue = bp->queues;
4423 unsigned long flags;
4424 unsigned int q;
4425
4426 if (!netif_running(netdev))
4427 return 0;
4428
4429
4430 if (bp->wol & MACB_WOL_ENABLED) {
4431 macb_writel(bp, IER, MACB_BIT(WOL));
4432 macb_writel(bp, WOL, MACB_BIT(MAG));
4433 enable_irq_wake(bp->queues[0].irq);
4434 netif_device_detach(netdev);
4435 } else {
4436 netif_device_detach(netdev);
4437 for (q = 0, queue = bp->queues; q < bp->num_queues;
4438 ++q, ++queue)
4439 napi_disable(&queue->napi);
4440 phy_stop(netdev->phydev);
4441 phy_suspend(netdev->phydev);
4442 spin_lock_irqsave(&bp->lock, flags);
4443 macb_reset_hw(bp);
4444 spin_unlock_irqrestore(&bp->lock, flags);
4445
4446 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
4447 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
4448
4449 if (netdev->hw_features & NETIF_F_NTUPLE)
4450 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
4451 }
4452
4453 netif_carrier_off(netdev);
4454 if (bp->ptp_info)
4455 bp->ptp_info->ptp_remove(netdev);
4456 pm_runtime_force_suspend(dev);
4457
4458 return 0;
4459 }
4460
4461 static int __maybe_unused macb_resume(struct device *dev)
4462 {
4463 struct net_device *netdev = dev_get_drvdata(dev);
4464 struct macb *bp = netdev_priv(netdev);
4465 struct macb_queue *queue = bp->queues;
4466 unsigned int q;
4467
4468 if (!netif_running(netdev))
4469 return 0;
4470
4471 pm_runtime_force_resume(dev);
4472
4473 if (bp->wol & MACB_WOL_ENABLED) {
4474 macb_writel(bp, IDR, MACB_BIT(WOL));
4475 macb_writel(bp, WOL, 0);
4476 disable_irq_wake(bp->queues[0].irq);
4477 } else {
4478 macb_writel(bp, NCR, MACB_BIT(MPE));
4479
4480 if (netdev->hw_features & NETIF_F_NTUPLE)
4481 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
4482
4483 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
4484 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
4485
4486 for (q = 0, queue = bp->queues; q < bp->num_queues;
4487 ++q, ++queue)
4488 napi_enable(&queue->napi);
4489 phy_resume(netdev->phydev);
4490 phy_init_hw(netdev->phydev);
4491 phy_start(netdev->phydev);
4492 }
4493
4494 bp->macbgem_ops.mog_init_rings(bp);
4495 macb_init_hw(bp);
4496 macb_set_rx_mode(netdev);
4497 macb_restore_features(bp);
4498 netif_device_attach(netdev);
4499 if (bp->ptp_info)
4500 bp->ptp_info->ptp_init(netdev);
4501
4502 return 0;
4503 }
4504
4505 static int __maybe_unused macb_runtime_suspend(struct device *dev)
4506 {
4507 struct net_device *netdev = dev_get_drvdata(dev);
4508 struct macb *bp = netdev_priv(netdev);
4509
4510 if (!(device_may_wakeup(&bp->dev->dev))) {
4511 clk_disable_unprepare(bp->tx_clk);
4512 clk_disable_unprepare(bp->hclk);
4513 clk_disable_unprepare(bp->pclk);
4514 clk_disable_unprepare(bp->rx_clk);
4515 }
4516 clk_disable_unprepare(bp->tsu_clk);
4517
4518 return 0;
4519 }
4520
4521 static int __maybe_unused macb_runtime_resume(struct device *dev)
4522 {
4523 struct net_device *netdev = dev_get_drvdata(dev);
4524 struct macb *bp = netdev_priv(netdev);
4525
4526 if (!(device_may_wakeup(&bp->dev->dev))) {
4527 clk_prepare_enable(bp->pclk);
4528 clk_prepare_enable(bp->hclk);
4529 clk_prepare_enable(bp->tx_clk);
4530 clk_prepare_enable(bp->rx_clk);
4531 }
4532 clk_prepare_enable(bp->tsu_clk);
4533
4534 return 0;
4535 }
4536
4537 static const struct dev_pm_ops macb_pm_ops = {
4538 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
4539 SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
4540 };
4541
4542 static struct platform_driver macb_driver = {
4543 .probe = macb_probe,
4544 .remove = macb_remove,
4545 .driver = {
4546 .name = "macb",
4547 .of_match_table = of_match_ptr(macb_dt_ids),
4548 .pm = &macb_pm_ops,
4549 },
4550 };
4551
4552 module_platform_driver(macb_driver);
4553
4554 MODULE_LICENSE("GPL");
4555 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
4556 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
4557 MODULE_ALIAS("platform:macb");