1/* drivers/net/ethernet/freescale/gianfar.c
2 *
3 * Gianfar Ethernet Driver
4 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 *
12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13 * Copyright 2007 MontaVista Software, Inc.
14 *
15 * This program is free software; you can redistribute  it and/or modify it
16 * under  the terms of  the GNU General  Public License as published by the
17 * Free Software Foundation;  either version 2 of the  License, or (at your
18 * option) any later version.
19 *
20 *  Gianfar:  AKA Lambda Draconis, "Dragon"
21 *  RA 11 31 24.2
22 *  Dec +69 19 52
23 *  V 3.84
24 *  B-V +1.62
25 *
26 *  Theory of operation
27 *
28 *  The driver is initialized through of_device. Configuration information
29 *  is therefore conveyed through an OF-style device tree.
30 *
31 *  The Gianfar Ethernet Controller uses a ring of buffer
32 *  descriptors.  The beginning is indicated by a register
33 *  pointing to the physical address of the start of the ring.
34 *  The end is determined by a "wrap" bit being set in the
35 *  last descriptor of the ring.
36 *
37 *  When a packet is received, the RXF bit in the
38 *  IEVENT register is set, triggering an interrupt when the
39 *  corresponding bit in the IMASK register is also set (if
40 *  interrupt coalescing is active, then the interrupt may not
41 *  happen immediately, but will wait until either a set number
42 *  of frames or amount of time have passed).  In NAPI, the
43 *  interrupt handler will signal there is work to be done, and
44 *  exit. This method will start at the last known empty
45 *  descriptor, and process every subsequent descriptor until there
46 *  are none left with data (NAPI will stop after a set number of
47 *  packets to give time to other tasks, but will eventually
48 *  process all the packets).  The data arrives inside a
49 *  pre-allocated skb, and so after the skb is passed up to the
50 *  stack, a new skb must be allocated, and the address field in
51 *  the buffer descriptor must be updated to indicate this new
52 *  skb.
53 *
54 *  When the kernel requests that a packet be transmitted, the
55 *  driver starts where it left off last time, and points the
56 *  descriptor at the buffer which was passed in.  The driver
57 *  then informs the DMA engine that there are packets ready to
58 *  be transmitted.  Once the controller is finished transmitting
59 *  the packet, an interrupt may be triggered (under the same
60 *  conditions as for reception, but depending on the TXF bit).
61 *  The driver then cleans up the buffer.
62 */
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65#define DEBUG
66
67#include <linux/kernel.h>
68#include <linux/string.h>
69#include <linux/errno.h>
70#include <linux/unistd.h>
71#include <linux/slab.h>
72#include <linux/interrupt.h>
73#include <linux/delay.h>
74#include <linux/netdevice.h>
75#include <linux/etherdevice.h>
76#include <linux/skbuff.h>
77#include <linux/if_vlan.h>
78#include <linux/spinlock.h>
79#include <linux/mm.h>
80#include <linux/of_address.h>
81#include <linux/of_irq.h>
82#include <linux/of_mdio.h>
83#include <linux/of_platform.h>
84#include <linux/ip.h>
85#include <linux/tcp.h>
86#include <linux/udp.h>
87#include <linux/in.h>
88#include <linux/net_tstamp.h>
89
90#include <asm/io.h>
91#ifdef CONFIG_PPC
92#include <asm/reg.h>
93#include <asm/mpc85xx.h>
94#endif
95#include <asm/irq.h>
96#include <asm/uaccess.h>
97#include <linux/module.h>
98#include <linux/dma-mapping.h>
99#include <linux/crc32.h>
100#include <linux/mii.h>
101#include <linux/phy.h>
102#include <linux/phy_fixed.h>
103#include <linux/of.h>
104#include <linux/of_net.h>
105#include <linux/of_address.h>
106#include <linux/of_irq.h>
107
108#include "gianfar.h"
109
110#define TX_TIMEOUT      (1*HZ)
111
112const char gfar_driver_version[] = "1.3";
113
114static int gfar_enet_open(struct net_device *dev);
115static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116static void gfar_reset_task(struct work_struct *work);
117static void gfar_timeout(struct net_device *dev);
118static int gfar_close(struct net_device *dev);
119static struct sk_buff *gfar_new_skb(struct net_device *dev,
120				    dma_addr_t *bufaddr);
121static int gfar_set_mac_address(struct net_device *dev);
122static int gfar_change_mtu(struct net_device *dev, int new_mtu);
123static irqreturn_t gfar_error(int irq, void *dev_id);
124static irqreturn_t gfar_transmit(int irq, void *dev_id);
125static irqreturn_t gfar_interrupt(int irq, void *dev_id);
126static void adjust_link(struct net_device *dev);
127static noinline void gfar_update_link_state(struct gfar_private *priv);
128static int init_phy(struct net_device *dev);
129static int gfar_probe(struct platform_device *ofdev);
130static int gfar_remove(struct platform_device *ofdev);
131static void free_skb_resources(struct gfar_private *priv);
132static void gfar_set_multi(struct net_device *dev);
133static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
134static void gfar_configure_serdes(struct net_device *dev);
135static int gfar_poll_rx(struct napi_struct *napi, int budget);
136static int gfar_poll_tx(struct napi_struct *napi, int budget);
137static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
138static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
139#ifdef CONFIG_NET_POLL_CONTROLLER
140static void gfar_netpoll(struct net_device *dev);
141#endif
142int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
143static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
144static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
145			       int amount_pull, struct napi_struct *napi);
146static void gfar_halt_nodisable(struct gfar_private *priv);
147static void gfar_clear_exact_match(struct net_device *dev);
148static void gfar_set_mac_for_addr(struct net_device *dev, int num,
149				  const u8 *addr);
150static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
151
152MODULE_AUTHOR("Freescale Semiconductor, Inc");
153MODULE_DESCRIPTION("Gianfar Ethernet Driver");
154MODULE_LICENSE("GPL");
155
156static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
157			    dma_addr_t buf)
158{
159	u32 lstatus;
160
161	bdp->bufPtr = cpu_to_be32(buf);
162
163	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
164	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
165		lstatus |= BD_LFLAG(RXBD_WRAP);
166
167	gfar_wmb();
168
169	bdp->lstatus = cpu_to_be32(lstatus);
170}
171
172static int gfar_init_bds(struct net_device *ndev)
173{
174	struct gfar_private *priv = netdev_priv(ndev);
175	struct gfar __iomem *regs = priv->gfargrp[0].regs;
176	struct gfar_priv_tx_q *tx_queue = NULL;
177	struct gfar_priv_rx_q *rx_queue = NULL;
178	struct txbd8 *txbdp;
179	struct rxbd8 *rxbdp;
180	u32 __iomem *rfbptr;
181	int i, j;
182	dma_addr_t bufaddr;
183
184	for (i = 0; i < priv->num_tx_queues; i++) {
185		tx_queue = priv->tx_queue[i];
186		/* Initialize some variables in our dev structure */
187		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
188		tx_queue->dirty_tx = tx_queue->tx_bd_base;
189		tx_queue->cur_tx = tx_queue->tx_bd_base;
190		tx_queue->skb_curtx = 0;
191		tx_queue->skb_dirtytx = 0;
192
193		/* Initialize Transmit Descriptor Ring */
194		txbdp = tx_queue->tx_bd_base;
195		for (j = 0; j < tx_queue->tx_ring_size; j++) {
196			txbdp->lstatus = 0;
197			txbdp->bufPtr = 0;
198			txbdp++;
199		}
200
201		/* Set the last descriptor in the ring to indicate wrap */
202		txbdp--;
203		txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
204					    TXBD_WRAP);
205	}
206
207	rfbptr = &regs->rfbptr0;
208	for (i = 0; i < priv->num_rx_queues; i++) {
209		rx_queue = priv->rx_queue[i];
210		rx_queue->cur_rx = rx_queue->rx_bd_base;
211		rx_queue->skb_currx = 0;
212		rxbdp = rx_queue->rx_bd_base;
213
214		for (j = 0; j < rx_queue->rx_ring_size; j++) {
215			struct sk_buff *skb = rx_queue->rx_skbuff[j];
216
217			if (skb) {
218				bufaddr = be32_to_cpu(rxbdp->bufPtr);
219			} else {
220				skb = gfar_new_skb(ndev, &bufaddr);
221				if (!skb) {
222					netdev_err(ndev, "Can't allocate RX buffers\n");
223					return -ENOMEM;
224				}
225				rx_queue->rx_skbuff[j] = skb;
226			}
227
228			gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
229			rxbdp++;
230		}
231
232		rx_queue->rfbptr = rfbptr;
233		rfbptr += 2;
234	}
235
236	return 0;
237}
238
239static int gfar_alloc_skb_resources(struct net_device *ndev)
240{
241	void *vaddr;
242	dma_addr_t addr;
243	int i, j, k;
244	struct gfar_private *priv = netdev_priv(ndev);
245	struct device *dev = priv->dev;
246	struct gfar_priv_tx_q *tx_queue = NULL;
247	struct gfar_priv_rx_q *rx_queue = NULL;
248
249	priv->total_tx_ring_size = 0;
250	for (i = 0; i < priv->num_tx_queues; i++)
251		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
252
253	priv->total_rx_ring_size = 0;
254	for (i = 0; i < priv->num_rx_queues; i++)
255		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
256
257	/* Allocate memory for the buffer descriptors */
258	vaddr = dma_alloc_coherent(dev,
259				   (priv->total_tx_ring_size *
260				    sizeof(struct txbd8)) +
261				   (priv->total_rx_ring_size *
262				    sizeof(struct rxbd8)),
263				   &addr, GFP_KERNEL);
264	if (!vaddr)
265		return -ENOMEM;
266
267	for (i = 0; i < priv->num_tx_queues; i++) {
268		tx_queue = priv->tx_queue[i];
269		tx_queue->tx_bd_base = vaddr;
270		tx_queue->tx_bd_dma_base = addr;
271		tx_queue->dev = ndev;
272		/* enet DMA only understands physical addresses */
273		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
274		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
275	}
276
277	/* Start the rx descriptor ring where the tx ring leaves off */
278	for (i = 0; i < priv->num_rx_queues; i++) {
279		rx_queue = priv->rx_queue[i];
280		rx_queue->rx_bd_base = vaddr;
281		rx_queue->rx_bd_dma_base = addr;
282		rx_queue->dev = ndev;
283		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
284		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
285	}
286
287	/* Setup the skbuff rings */
288	for (i = 0; i < priv->num_tx_queues; i++) {
289		tx_queue = priv->tx_queue[i];
290		tx_queue->tx_skbuff =
291			kmalloc_array(tx_queue->tx_ring_size,
292				      sizeof(*tx_queue->tx_skbuff),
293				      GFP_KERNEL);
294		if (!tx_queue->tx_skbuff)
295			goto cleanup;
296
297		for (k = 0; k < tx_queue->tx_ring_size; k++)
298			tx_queue->tx_skbuff[k] = NULL;
299	}
300
301	for (i = 0; i < priv->num_rx_queues; i++) {
302		rx_queue = priv->rx_queue[i];
303		rx_queue->rx_skbuff =
304			kmalloc_array(rx_queue->rx_ring_size,
305				      sizeof(*rx_queue->rx_skbuff),
306				      GFP_KERNEL);
307		if (!rx_queue->rx_skbuff)
308			goto cleanup;
309
310		for (j = 0; j < rx_queue->rx_ring_size; j++)
311			rx_queue->rx_skbuff[j] = NULL;
312	}
313
314	if (gfar_init_bds(ndev))
315		goto cleanup;
316
317	return 0;
318
319cleanup:
320	free_skb_resources(priv);
321	return -ENOMEM;
322}
323
324static void gfar_init_tx_rx_base(struct gfar_private *priv)
325{
326	struct gfar __iomem *regs = priv->gfargrp[0].regs;
327	u32 __iomem *baddr;
328	int i;
329
330	baddr = &regs->tbase0;
331	for (i = 0; i < priv->num_tx_queues; i++) {
332		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
333		baddr += 2;
334	}
335
336	baddr = &regs->rbase0;
337	for (i = 0; i < priv->num_rx_queues; i++) {
338		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
339		baddr += 2;
340	}
341}
342
343static void gfar_init_rqprm(struct gfar_private *priv)
344{
345	struct gfar __iomem *regs = priv->gfargrp[0].regs;
346	u32 __iomem *baddr;
347	int i;
348
349	baddr = &regs->rqprm0;
350	for (i = 0; i < priv->num_rx_queues; i++) {
351		gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
352			   (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
353		baddr++;
354	}
355}
356
357static void gfar_rx_buff_size_config(struct gfar_private *priv)
358{
359	int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
360
361	/* set this when rx hw offload (TOE) functions are being used */
362	priv->uses_rxfcb = 0;
363
364	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
365		priv->uses_rxfcb = 1;
366
367	if (priv->hwts_rx_en)
368		priv->uses_rxfcb = 1;
369
370	if (priv->uses_rxfcb)
371		frame_size += GMAC_FCB_LEN;
372
373	frame_size += priv->padding;
374
375	frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
376		     INCREMENTAL_BUFFER_SIZE;
377
378	priv->rx_buffer_size = frame_size;
379}
380
381static void gfar_mac_rx_config(struct gfar_private *priv)
382{
383	struct gfar __iomem *regs = priv->gfargrp[0].regs;
384	u32 rctrl = 0;
385
386	if (priv->rx_filer_enable) {
387		rctrl |= RCTRL_FILREN;
388		/* Program the RIR0 reg with the required distribution */
389		if (priv->poll_mode == GFAR_SQ_POLLING)
390			gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
391		else /* GFAR_MQ_POLLING */
392			gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
393	}
394
395	/* Restore PROMISC mode */
396	if (priv->ndev->flags & IFF_PROMISC)
397		rctrl |= RCTRL_PROM;
398
399	if (priv->ndev->features & NETIF_F_RXCSUM)
400		rctrl |= RCTRL_CHECKSUMMING;
401
402	if (priv->extended_hash)
403		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
404
405	if (priv->padding) {
406		rctrl &= ~RCTRL_PAL_MASK;
407		rctrl |= RCTRL_PADDING(priv->padding);
408	}
409
410	/* Enable HW time stamping if requested from user space */
411	if (priv->hwts_rx_en)
412		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
413
414	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
415		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
416
417	/* Clear the LFC bit */
418	gfar_write(&regs->rctrl, rctrl);
419	/* Init flow control threshold values */
420	gfar_init_rqprm(priv);
421	gfar_write(&regs->ptv, DEFAULT_LFC_PTVVAL);
422	rctrl |= RCTRL_LFC;
423
424	/* Init rctrl based on our settings */
425	gfar_write(&regs->rctrl, rctrl);
426}
427
428static void gfar_mac_tx_config(struct gfar_private *priv)
429{
430	struct gfar __iomem *regs = priv->gfargrp[0].regs;
431	u32 tctrl = 0;
432
433	if (priv->ndev->features & NETIF_F_IP_CSUM)
434		tctrl |= TCTRL_INIT_CSUM;
435
436	if (priv->prio_sched_en)
437		tctrl |= TCTRL_TXSCHED_PRIO;
438	else {
439		tctrl |= TCTRL_TXSCHED_WRRS;
440		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
441		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
442	}
443
444	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
445		tctrl |= TCTRL_VLINS;
446
447	gfar_write(&regs->tctrl, tctrl);
448}
449
450static void gfar_configure_coalescing(struct gfar_private *priv,
451			       unsigned long tx_mask, unsigned long rx_mask)
452{
453	struct gfar __iomem *regs = priv->gfargrp[0].regs;
454	u32 __iomem *baddr;
455
456	if (priv->mode == MQ_MG_MODE) {
457		int i = 0;
458
459		baddr = &regs->txic0;
460		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
461			gfar_write(baddr + i, 0);
462			if (likely(priv->tx_queue[i]->txcoalescing))
463				gfar_write(baddr + i, priv->tx_queue[i]->txic);
464		}
465
466		baddr = &regs->rxic0;
467		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
468			gfar_write(baddr + i, 0);
469			if (likely(priv->rx_queue[i]->rxcoalescing))
470				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
471		}
472	} else {
473		/* Backward compatible case -- even if we enable
474		 * multiple queues, there's only single reg to program
475		 */
476		gfar_write(&regs->txic, 0);
477		if (likely(priv->tx_queue[0]->txcoalescing))
478			gfar_write(&regs->txic, priv->tx_queue[0]->txic);
479
480		gfar_write(&regs->rxic, 0);
481		if (unlikely(priv->rx_queue[0]->rxcoalescing))
482			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
483	}
484}
485
486void gfar_configure_coalescing_all(struct gfar_private *priv)
487{
488	gfar_configure_coalescing(priv, 0xFF, 0xFF);
489}
490
491static struct net_device_stats *gfar_get_stats(struct net_device *dev)
492{
493	struct gfar_private *priv = netdev_priv(dev);
494	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
495	unsigned long tx_packets = 0, tx_bytes = 0;
496	int i;
497
498	for (i = 0; i < priv->num_rx_queues; i++) {
499		rx_packets += priv->rx_queue[i]->stats.rx_packets;
500		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
501		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
502	}
503
504	dev->stats.rx_packets = rx_packets;
505	dev->stats.rx_bytes   = rx_bytes;
506	dev->stats.rx_dropped = rx_dropped;
507
508	for (i = 0; i < priv->num_tx_queues; i++) {
509		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
510		tx_packets += priv->tx_queue[i]->stats.tx_packets;
511	}
512
513	dev->stats.tx_bytes   = tx_bytes;
514	dev->stats.tx_packets = tx_packets;
515
516	return &dev->stats;
517}
518
519static const struct net_device_ops gfar_netdev_ops = {
520	.ndo_open = gfar_enet_open,
521	.ndo_start_xmit = gfar_start_xmit,
522	.ndo_stop = gfar_close,
523	.ndo_change_mtu = gfar_change_mtu,
524	.ndo_set_features = gfar_set_features,
525	.ndo_set_rx_mode = gfar_set_multi,
526	.ndo_tx_timeout = gfar_timeout,
527	.ndo_do_ioctl = gfar_ioctl,
528	.ndo_get_stats = gfar_get_stats,
529	.ndo_set_mac_address = eth_mac_addr,
530	.ndo_validate_addr = eth_validate_addr,
531#ifdef CONFIG_NET_POLL_CONTROLLER
532	.ndo_poll_controller = gfar_netpoll,
533#endif
534};
535
536static void gfar_ints_disable(struct gfar_private *priv)
537{
538	int i;
539	for (i = 0; i < priv->num_grps; i++) {
540		struct gfar __iomem *regs = priv->gfargrp[i].regs;
541		/* Clear IEVENT */
542		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
543
544		/* Initialize IMASK */
545		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
546	}
547}
548
549static void gfar_ints_enable(struct gfar_private *priv)
550{
551	int i;
552	for (i = 0; i < priv->num_grps; i++) {
553		struct gfar __iomem *regs = priv->gfargrp[i].regs;
554		/* Unmask the interrupts we look for */
555		gfar_write(&regs->imask, IMASK_DEFAULT);
556	}
557}
558
559static void lock_tx_qs(struct gfar_private *priv)
560{
561	int i;
562
563	for (i = 0; i < priv->num_tx_queues; i++)
564		spin_lock(&priv->tx_queue[i]->txlock);
565}
566
567static void unlock_tx_qs(struct gfar_private *priv)
568{
569	int i;
570
571	for (i = 0; i < priv->num_tx_queues; i++)
572		spin_unlock(&priv->tx_queue[i]->txlock);
573}
574
575static int gfar_alloc_tx_queues(struct gfar_private *priv)
576{
577	int i;
578
579	for (i = 0; i < priv->num_tx_queues; i++) {
580		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
581					    GFP_KERNEL);
582		if (!priv->tx_queue[i])
583			return -ENOMEM;
584
585		priv->tx_queue[i]->tx_skbuff = NULL;
586		priv->tx_queue[i]->qindex = i;
587		priv->tx_queue[i]->dev = priv->ndev;
588		spin_lock_init(&(priv->tx_queue[i]->txlock));
589	}
590	return 0;
591}
592
593static int gfar_alloc_rx_queues(struct gfar_private *priv)
594{
595	int i;
596
597	for (i = 0; i < priv->num_rx_queues; i++) {
598		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
599					    GFP_KERNEL);
600		if (!priv->rx_queue[i])
601			return -ENOMEM;
602
603		priv->rx_queue[i]->rx_skbuff = NULL;
604		priv->rx_queue[i]->qindex = i;
605		priv->rx_queue[i]->dev = priv->ndev;
606	}
607	return 0;
608}
609
610static void gfar_free_tx_queues(struct gfar_private *priv)
611{
612	int i;
613
614	for (i = 0; i < priv->num_tx_queues; i++)
615		kfree(priv->tx_queue[i]);
616}
617
618static void gfar_free_rx_queues(struct gfar_private *priv)
619{
620	int i;
621
622	for (i = 0; i < priv->num_rx_queues; i++)
623		kfree(priv->rx_queue[i]);
624}
625
626static void unmap_group_regs(struct gfar_private *priv)
627{
628	int i;
629
630	for (i = 0; i < MAXGROUPS; i++)
631		if (priv->gfargrp[i].regs)
632			iounmap(priv->gfargrp[i].regs);
633}
634
635static void free_gfar_dev(struct gfar_private *priv)
636{
637	int i, j;
638
639	for (i = 0; i < priv->num_grps; i++)
640		for (j = 0; j < GFAR_NUM_IRQS; j++) {
641			kfree(priv->gfargrp[i].irqinfo[j]);
642			priv->gfargrp[i].irqinfo[j] = NULL;
643		}
644
645	free_netdev(priv->ndev);
646}
647
648static void disable_napi(struct gfar_private *priv)
649{
650	int i;
651
652	for (i = 0; i < priv->num_grps; i++) {
653		napi_disable(&priv->gfargrp[i].napi_rx);
654		napi_disable(&priv->gfargrp[i].napi_tx);
655	}
656}
657
658static void enable_napi(struct gfar_private *priv)
659{
660	int i;
661
662	for (i = 0; i < priv->num_grps; i++) {
663		napi_enable(&priv->gfargrp[i].napi_rx);
664		napi_enable(&priv->gfargrp[i].napi_tx);
665	}
666}
667
668static int gfar_parse_group(struct device_node *np,
669			    struct gfar_private *priv, const char *model)
670{
671	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
672	int i;
673
674	for (i = 0; i < GFAR_NUM_IRQS; i++) {
675		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
676					  GFP_KERNEL);
677		if (!grp->irqinfo[i])
678			return -ENOMEM;
679	}
680
681	grp->regs = of_iomap(np, 0);
682	if (!grp->regs)
683		return -ENOMEM;
684
685	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
686
687	/* If we aren't the FEC we have multiple interrupts */
688	if (model && strcasecmp(model, "FEC")) {
689		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
690		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
691		if (gfar_irq(grp, TX)->irq == NO_IRQ ||
692		    gfar_irq(grp, RX)->irq == NO_IRQ ||
693		    gfar_irq(grp, ER)->irq == NO_IRQ)
694			return -EINVAL;
695	}
696
697	grp->priv = priv;
698	spin_lock_init(&grp->grplock);
699	if (priv->mode == MQ_MG_MODE) {
700		u32 rxq_mask, txq_mask;
701		int ret;
702
703		grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
704		grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
705
706		ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
707		if (!ret) {
708			grp->rx_bit_map = rxq_mask ?
709			rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
710		}
711
712		ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
713		if (!ret) {
714			grp->tx_bit_map = txq_mask ?
715			txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
716		}
717
718		if (priv->poll_mode == GFAR_SQ_POLLING) {
719			/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
720			grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
721			grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
722		}
723	} else {
724		grp->rx_bit_map = 0xFF;
725		grp->tx_bit_map = 0xFF;
726	}
727
728	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
729	 * right to left, so we need to revert the 8 bits to get the q index
730	 */
731	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
732	grp->tx_bit_map = bitrev8(grp->tx_bit_map);
733
734	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
735	 * also assign queues to groups
736	 */
737	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
738		if (!grp->rx_queue)
739			grp->rx_queue = priv->rx_queue[i];
740		grp->num_rx_queues++;
741		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
742		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
743		priv->rx_queue[i]->grp = grp;
744	}
745
746	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
747		if (!grp->tx_queue)
748			grp->tx_queue = priv->tx_queue[i];
749		grp->num_tx_queues++;
750		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
751		priv->tqueue |= (TQUEUE_EN0 >> i);
752		priv->tx_queue[i]->grp = grp;
753	}
754
755	priv->num_grps++;
756
757	return 0;
758}
759
760static int gfar_of_group_count(struct device_node *np)
761{
762	struct device_node *child;
763	int num = 0;
764
765	for_each_available_child_of_node(np, child)
766		if (!of_node_cmp(child->name, "queue-group"))
767			num++;
768
769	return num;
770}
771
772static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
773{
774	const char *model;
775	const char *ctype;
776	const void *mac_addr;
777	int err = 0, i;
778	struct net_device *dev = NULL;
779	struct gfar_private *priv = NULL;
780	struct device_node *np = ofdev->dev.of_node;
781	struct device_node *child = NULL;
782	struct property *stash;
783	u32 stash_len = 0;
784	u32 stash_idx = 0;
785	unsigned int num_tx_qs, num_rx_qs;
786	unsigned short mode, poll_mode;
787
788	if (!np)
789		return -ENODEV;
790
791	if (of_device_is_compatible(np, "fsl,etsec2")) {
792		mode = MQ_MG_MODE;
793		poll_mode = GFAR_SQ_POLLING;
794	} else {
795		mode = SQ_SG_MODE;
796		poll_mode = GFAR_SQ_POLLING;
797	}
798
799	if (mode == SQ_SG_MODE) {
800		num_tx_qs = 1;
801		num_rx_qs = 1;
802	} else { /* MQ_MG_MODE */
803		/* get the actual number of supported groups */
804		unsigned int num_grps = gfar_of_group_count(np);
805
806		if (num_grps == 0 || num_grps > MAXGROUPS) {
807			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
808				num_grps);
809			pr_err("Cannot do alloc_etherdev, aborting\n");
810			return -EINVAL;
811		}
812
813		if (poll_mode == GFAR_SQ_POLLING) {
814			num_tx_qs = num_grps; /* one txq per int group */
815			num_rx_qs = num_grps; /* one rxq per int group */
816		} else { /* GFAR_MQ_POLLING */
817			u32 tx_queues, rx_queues;
818			int ret;
819
820			/* parse the num of HW tx and rx queues */
821			ret = of_property_read_u32(np, "fsl,num_tx_queues",
822						   &tx_queues);
823			num_tx_qs = ret ? 1 : tx_queues;
824
825			ret = of_property_read_u32(np, "fsl,num_rx_queues",
826						   &rx_queues);
827			num_rx_qs = ret ? 1 : rx_queues;
828		}
829	}
830
831	if (num_tx_qs > MAX_TX_QS) {
832		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
833		       num_tx_qs, MAX_TX_QS);
834		pr_err("Cannot do alloc_etherdev, aborting\n");
835		return -EINVAL;
836	}
837
838	if (num_rx_qs > MAX_RX_QS) {
839		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
840		       num_rx_qs, MAX_RX_QS);
841		pr_err("Cannot do alloc_etherdev, aborting\n");
842		return -EINVAL;
843	}
844
845	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
846	dev = *pdev;
847	if (NULL == dev)
848		return -ENOMEM;
849
850	priv = netdev_priv(dev);
851	priv->ndev = dev;
852
853	priv->mode = mode;
854	priv->poll_mode = poll_mode;
855
856	priv->num_tx_queues = num_tx_qs;
857	netif_set_real_num_rx_queues(dev, num_rx_qs);
858	priv->num_rx_queues = num_rx_qs;
859
860	err = gfar_alloc_tx_queues(priv);
861	if (err)
862		goto tx_alloc_failed;
863
864	err = gfar_alloc_rx_queues(priv);
865	if (err)
866		goto rx_alloc_failed;
867
868	err = of_property_read_string(np, "model", &model);
869	if (err) {
870		pr_err("Device model property missing, aborting\n");
871		goto rx_alloc_failed;
872	}
873
874	/* Init Rx queue filer rule set linked list */
875	INIT_LIST_HEAD(&priv->rx_list.list);
876	priv->rx_list.count = 0;
877	mutex_init(&priv->rx_queue_access);
878
879	for (i = 0; i < MAXGROUPS; i++)
880		priv->gfargrp[i].regs = NULL;
881
882	/* Parse and initialize group specific information */
883	if (priv->mode == MQ_MG_MODE) {
884		for_each_available_child_of_node(np, child) {
885			if (of_node_cmp(child->name, "queue-group"))
886				continue;
887
888			err = gfar_parse_group(child, priv, model);
889			if (err)
890				goto err_grp_init;
891		}
892	} else { /* SQ_SG_MODE */
893		err = gfar_parse_group(np, priv, model);
894		if (err)
895			goto err_grp_init;
896	}
897
898	stash = of_find_property(np, "bd-stash", NULL);
899
900	if (stash) {
901		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
902		priv->bd_stash_en = 1;
903	}
904
905	err = of_property_read_u32(np, "rx-stash-len", &stash_len);
906
907	if (err == 0)
908		priv->rx_stash_size = stash_len;
909
910	err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
911
912	if (err == 0)
913		priv->rx_stash_index = stash_idx;
914
915	if (stash_len || stash_idx)
916		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
917
918	mac_addr = of_get_mac_address(np);
919
920	if (mac_addr)
921		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
922
923	if (model && !strcasecmp(model, "TSEC"))
924		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
925				     FSL_GIANFAR_DEV_HAS_COALESCE |
926				     FSL_GIANFAR_DEV_HAS_RMON |
927				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
928
929	if (model && !strcasecmp(model, "eTSEC"))
930		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
931				     FSL_GIANFAR_DEV_HAS_COALESCE |
932				     FSL_GIANFAR_DEV_HAS_RMON |
933				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
934				     FSL_GIANFAR_DEV_HAS_CSUM |
935				     FSL_GIANFAR_DEV_HAS_VLAN |
936				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
937				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
938				     FSL_GIANFAR_DEV_HAS_TIMER;
939
940	err = of_property_read_string(np, "phy-connection-type", &ctype);
941
942	/* We only care about rgmii-id.  The rest are autodetected */
943	if (err == 0 && !strcmp(ctype, "rgmii-id"))
944		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
945	else
946		priv->interface = PHY_INTERFACE_MODE_MII;
947
948	if (of_find_property(np, "fsl,magic-packet", NULL))
949		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
950
951	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
952
953	/* In the case of a fixed PHY, the DT node associated
954	 * to the PHY is the Ethernet MAC DT node.
955	 */
956	if (!priv->phy_node && of_phy_is_fixed_link(np)) {
957		err = of_phy_register_fixed_link(np);
958		if (err)
959			goto err_grp_init;
960
961		priv->phy_node = of_node_get(np);
962	}
963
964	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
965	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
966
967	return 0;
968
969err_grp_init:
970	unmap_group_regs(priv);
971rx_alloc_failed:
972	gfar_free_rx_queues(priv);
973tx_alloc_failed:
974	gfar_free_tx_queues(priv);
975	free_gfar_dev(priv);
976	return err;
977}
978
979static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
980{
981	struct hwtstamp_config config;
982	struct gfar_private *priv = netdev_priv(netdev);
983
984	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
985		return -EFAULT;
986
987	/* reserved for future extensions */
988	if (config.flags)
989		return -EINVAL;
990
991	switch (config.tx_type) {
992	case HWTSTAMP_TX_OFF:
993		priv->hwts_tx_en = 0;
994		break;
995	case HWTSTAMP_TX_ON:
996		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
997			return -ERANGE;
998		priv->hwts_tx_en = 1;
999		break;
1000	default:
1001		return -ERANGE;
1002	}
1003
1004	switch (config.rx_filter) {
1005	case HWTSTAMP_FILTER_NONE:
1006		if (priv->hwts_rx_en) {
1007			priv->hwts_rx_en = 0;
1008			reset_gfar(netdev);
1009		}
1010		break;
1011	default:
1012		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
1013			return -ERANGE;
1014		if (!priv->hwts_rx_en) {
1015			priv->hwts_rx_en = 1;
1016			reset_gfar(netdev);
1017		}
1018		config.rx_filter = HWTSTAMP_FILTER_ALL;
1019		break;
1020	}
1021
1022	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1023		-EFAULT : 0;
1024}
1025
1026static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
1027{
1028	struct hwtstamp_config config;
1029	struct gfar_private *priv = netdev_priv(netdev);
1030
1031	config.flags = 0;
1032	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1033	config.rx_filter = (priv->hwts_rx_en ?
1034			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
1035
1036	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1037		-EFAULT : 0;
1038}
1039
1040static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1041{
1042	struct gfar_private *priv = netdev_priv(dev);
1043
1044	if (!netif_running(dev))
1045		return -EINVAL;
1046
1047	if (cmd == SIOCSHWTSTAMP)
1048		return gfar_hwtstamp_set(dev, rq);
1049	if (cmd == SIOCGHWTSTAMP)
1050		return gfar_hwtstamp_get(dev, rq);
1051
1052	if (!priv->phydev)
1053		return -ENODEV;
1054
1055	return phy_mii_ioctl(priv->phydev, rq, cmd);
1056}
1057
1058static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
1059				   u32 class)
1060{
1061	u32 rqfpr = FPR_FILER_MASK;
1062	u32 rqfcr = 0x0;
1063
1064	rqfar--;
1065	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
1066	priv->ftp_rqfpr[rqfar] = rqfpr;
1067	priv->ftp_rqfcr[rqfar] = rqfcr;
1068	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1069
1070	rqfar--;
1071	rqfcr = RQFCR_CMP_NOMATCH;
1072	priv->ftp_rqfpr[rqfar] = rqfpr;
1073	priv->ftp_rqfcr[rqfar] = rqfcr;
1074	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1075
1076	rqfar--;
1077	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
1078	rqfpr = class;
1079	priv->ftp_rqfcr[rqfar] = rqfcr;
1080	priv->ftp_rqfpr[rqfar] = rqfpr;
1081	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1082
1083	rqfar--;
1084	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
1085	rqfpr = class;
1086	priv->ftp_rqfcr[rqfar] = rqfcr;
1087	priv->ftp_rqfpr[rqfar] = rqfpr;
1088	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1089
1090	return rqfar;
1091}
1092
1093static void gfar_init_filer_table(struct gfar_private *priv)
1094{
1095	int i = 0x0;
1096	u32 rqfar = MAX_FILER_IDX;
1097	u32 rqfcr = 0x0;
1098	u32 rqfpr = FPR_FILER_MASK;
1099
1100	/* Default rule */
1101	rqfcr = RQFCR_CMP_MATCH;
1102	priv->ftp_rqfcr[rqfar] = rqfcr;
1103	priv->ftp_rqfpr[rqfar] = rqfpr;
1104	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
1105
1106	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
1107	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
1108	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
1109	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
1110	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
1111	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
1112
1113	/* cur_filer_idx indicated the first non-masked rule */
1114	priv->cur_filer_idx = rqfar;
1115
1116	/* Rest are masked rules */
1117	rqfcr = RQFCR_CMP_NOMATCH;
1118	for (i = 0; i < rqfar; i++) {
1119		priv->ftp_rqfcr[i] = rqfcr;
1120		priv->ftp_rqfpr[i] = rqfpr;
1121		gfar_write_filer(priv, i, rqfcr, rqfpr);
1122	}
1123}
1124
1125#ifdef CONFIG_PPC
1126static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1127{
1128	unsigned int pvr = mfspr(SPRN_PVR);
1129	unsigned int svr = mfspr(SPRN_SVR);
1130	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
1131	unsigned int rev = svr & 0xffff;
1132
1133	/* MPC8313 Rev 2.0 and higher; All MPC837x */
1134	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1135	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1136		priv->errata |= GFAR_ERRATA_74;
1137
1138	/* MPC8313 and MPC837x all rev */
1139	if ((pvr == 0x80850010 && mod == 0x80b0) ||
1140	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1141		priv->errata |= GFAR_ERRATA_76;
1142
1143	/* MPC8313 Rev < 2.0 */
1144	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
1145		priv->errata |= GFAR_ERRATA_12;
1146}
1147
1148static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1149{
1150	unsigned int svr = mfspr(SPRN_SVR);
1151
1152	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1153		priv->errata |= GFAR_ERRATA_12;
1154	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1155	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
1156		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1157}
1158#endif
1159
1160static void gfar_detect_errata(struct gfar_private *priv)
1161{
1162	struct device *dev = &priv->ofdev->dev;
1163
1164	/* no plans to fix */
1165	priv->errata |= GFAR_ERRATA_A002;
1166
1167#ifdef CONFIG_PPC
1168	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
1169		__gfar_detect_errata_85xx(priv);
1170	else /* non-mpc85xx parts, i.e. e300 core based */
1171		__gfar_detect_errata_83xx(priv);
1172#endif
1173
1174	if (priv->errata)
1175		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
1176			 priv->errata);
1177}
1178
1179void gfar_mac_reset(struct gfar_private *priv)
1180{
1181	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1182	u32 tempval;
1183
1184	/* Reset MAC layer */
1185	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1186
1187	/* We need to delay at least 3 TX clocks */
1188	udelay(3);
1189
1190	/* the soft reset bit is not self-resetting, so we need to
1191	 * clear it before resuming normal operation
1192	 */
1193	gfar_write(&regs->maccfg1, 0);
1194
1195	udelay(3);
1196
1197	/* Compute rx_buff_size based on config flags */
1198	gfar_rx_buff_size_config(priv);
1199
1200	/* Initialize the max receive frame/buffer lengths */
1201	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1202	gfar_write(&regs->mrblr, priv->rx_buffer_size);
1203
1204	/* Initialize the Minimum Frame Length Register */
1205	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1206
1207	/* Initialize MACCFG2. */
1208	tempval = MACCFG2_INIT_SETTINGS;
1209
1210	/* If the mtu is larger than the max size for standard
1211	 * ethernet frames (ie, a jumbo frame), then set maccfg2
1212	 * to allow huge frames, and to check the length
1213	 */
1214	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
1215	    gfar_has_errata(priv, GFAR_ERRATA_74))
1216		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1217
1218	gfar_write(&regs->maccfg2, tempval);
1219
1220	/* Clear mac addr hash registers */
1221	gfar_write(&regs->igaddr0, 0);
1222	gfar_write(&regs->igaddr1, 0);
1223	gfar_write(&regs->igaddr2, 0);
1224	gfar_write(&regs->igaddr3, 0);
1225	gfar_write(&regs->igaddr4, 0);
1226	gfar_write(&regs->igaddr5, 0);
1227	gfar_write(&regs->igaddr6, 0);
1228	gfar_write(&regs->igaddr7, 0);
1229
1230	gfar_write(&regs->gaddr0, 0);
1231	gfar_write(&regs->gaddr1, 0);
1232	gfar_write(&regs->gaddr2, 0);
1233	gfar_write(&regs->gaddr3, 0);
1234	gfar_write(&regs->gaddr4, 0);
1235	gfar_write(&regs->gaddr5, 0);
1236	gfar_write(&regs->gaddr6, 0);
1237	gfar_write(&regs->gaddr7, 0);
1238
1239	if (priv->extended_hash)
1240		gfar_clear_exact_match(priv->ndev);
1241
1242	gfar_mac_rx_config(priv);
1243
1244	gfar_mac_tx_config(priv);
1245
1246	gfar_set_mac_address(priv->ndev);
1247
1248	gfar_set_multi(priv->ndev);
1249
1250	/* clear ievent and imask before configuring coalescing */
1251	gfar_ints_disable(priv);
1252
1253	/* Configure the coalescing support */
1254	gfar_configure_coalescing_all(priv);
1255}
1256
1257static void gfar_hw_init(struct gfar_private *priv)
1258{
1259	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1260	u32 attrs;
1261
1262	/* Stop the DMA engine now, in case it was running before
1263	 * (The firmware could have used it, and left it running).
1264	 */
1265	gfar_halt(priv);
1266
1267	gfar_mac_reset(priv);
1268
1269	/* Zero out the rmon mib registers if it has them */
1270	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1271		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1272
1273		/* Mask off the CAM interrupts */
1274		gfar_write(&regs->rmon.cam1, 0xffffffff);
1275		gfar_write(&regs->rmon.cam2, 0xffffffff);
1276	}
1277
1278	/* Initialize ECNTRL */
1279	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1280
1281	/* Set the extraction length and index */
1282	attrs = ATTRELI_EL(priv->rx_stash_size) |
1283		ATTRELI_EI(priv->rx_stash_index);
1284
1285	gfar_write(&regs->attreli, attrs);
1286
1287	/* Start with defaults, and add stashing
1288	 * depending on driver parameters
1289	 */
1290	attrs = ATTR_INIT_SETTINGS;
1291
1292	if (priv->bd_stash_en)
1293		attrs |= ATTR_BDSTASH;
1294
1295	if (priv->rx_stash_size != 0)
1296		attrs |= ATTR_BUFSTASH;
1297
1298	gfar_write(&regs->attr, attrs);
1299
1300	/* FIFO configs */
1301	gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1302	gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1303	gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1304
1305	/* Program the interrupt steering regs, only for MG devices */
1306	if (priv->num_grps > 1)
1307		gfar_write_isrg(priv);
1308}
1309
1310static void gfar_init_addr_hash_table(struct gfar_private *priv)
1311{
1312	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1313
1314	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1315		priv->extended_hash = 1;
1316		priv->hash_width = 9;
1317
1318		priv->hash_regs[0] = &regs->igaddr0;
1319		priv->hash_regs[1] = &regs->igaddr1;
1320		priv->hash_regs[2] = &regs->igaddr2;
1321		priv->hash_regs[3] = &regs->igaddr3;
1322		priv->hash_regs[4] = &regs->igaddr4;
1323		priv->hash_regs[5] = &regs->igaddr5;
1324		priv->hash_regs[6] = &regs->igaddr6;
1325		priv->hash_regs[7] = &regs->igaddr7;
1326		priv->hash_regs[8] = &regs->gaddr0;
1327		priv->hash_regs[9] = &regs->gaddr1;
1328		priv->hash_regs[10] = &regs->gaddr2;
1329		priv->hash_regs[11] = &regs->gaddr3;
1330		priv->hash_regs[12] = &regs->gaddr4;
1331		priv->hash_regs[13] = &regs->gaddr5;
1332		priv->hash_regs[14] = &regs->gaddr6;
1333		priv->hash_regs[15] = &regs->gaddr7;
1334
1335	} else {
1336		priv->extended_hash = 0;
1337		priv->hash_width = 8;
1338
1339		priv->hash_regs[0] = &regs->gaddr0;
1340		priv->hash_regs[1] = &regs->gaddr1;
1341		priv->hash_regs[2] = &regs->gaddr2;
1342		priv->hash_regs[3] = &regs->gaddr3;
1343		priv->hash_regs[4] = &regs->gaddr4;
1344		priv->hash_regs[5] = &regs->gaddr5;
1345		priv->hash_regs[6] = &regs->gaddr6;
1346		priv->hash_regs[7] = &regs->gaddr7;
1347	}
1348}
1349
1350/* Set up the ethernet device structure, private data,
1351 * and anything else we need before we start
1352 */
1353static int gfar_probe(struct platform_device *ofdev)
1354{
1355	struct net_device *dev = NULL;
1356	struct gfar_private *priv = NULL;
1357	int err = 0, i;
1358
1359	err = gfar_of_init(ofdev, &dev);
1360
1361	if (err)
1362		return err;
1363
1364	priv = netdev_priv(dev);
1365	priv->ndev = dev;
1366	priv->ofdev = ofdev;
1367	priv->dev = &ofdev->dev;
1368	SET_NETDEV_DEV(dev, &ofdev->dev);
1369
1370	spin_lock_init(&priv->bflock);
1371	INIT_WORK(&priv->reset_task, gfar_reset_task);
1372
1373	platform_set_drvdata(ofdev, priv);
1374
1375	gfar_detect_errata(priv);
1376
1377	/* Set the dev->base_addr to the gfar reg region */
1378	dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1379
1380	/* Fill in the dev structure */
1381	dev->watchdog_timeo = TX_TIMEOUT;
1382	dev->mtu = 1500;
1383	dev->netdev_ops = &gfar_netdev_ops;
1384	dev->ethtool_ops = &gfar_ethtool_ops;
1385
1386	/* Register for napi ...We are registering NAPI for each grp */
1387	for (i = 0; i < priv->num_grps; i++) {
1388		if (priv->poll_mode == GFAR_SQ_POLLING) {
1389			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1390				       gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1391			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1392				       gfar_poll_tx_sq, 2);
1393		} else {
1394			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1395				       gfar_poll_rx, GFAR_DEV_WEIGHT);
1396			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1397				       gfar_poll_tx, 2);
1398		}
1399	}
1400
1401	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1402		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1403				   NETIF_F_RXCSUM;
1404		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1405				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1406	}
1407
1408	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1409		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1410				    NETIF_F_HW_VLAN_CTAG_RX;
1411		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1412	}
1413
1414	gfar_init_addr_hash_table(priv);
1415
1416	/* Insert receive time stamps into padding alignment bytes */
1417	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1418		priv->padding = 8;
1419
1420	if (dev->features & NETIF_F_IP_CSUM ||
1421	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1422		dev->needed_headroom = GMAC_FCB_LEN;
1423
1424	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1425
1426	/* Initializing some of the rx/tx queue level parameters */
1427	for (i = 0; i < priv->num_tx_queues; i++) {
1428		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
1429		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1430		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1431		priv->tx_queue[i]->txic = DEFAULT_TXIC;
1432	}
1433
1434	for (i = 0; i < priv->num_rx_queues; i++) {
1435		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1436		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1437		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1438	}
1439
1440	/* always enable rx filer */
1441	priv->rx_filer_enable = 1;
1442	/* Enable most messages by default */
1443	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1444	/* use pritority h/w tx queue scheduling for single queue devices */
1445	if (priv->num_tx_queues == 1)
1446		priv->prio_sched_en = 1;
1447
1448	set_bit(GFAR_DOWN, &priv->state);
1449
1450	gfar_hw_init(priv);
1451
1452	/* Carrier starts down, phylib will bring it up */
1453	netif_carrier_off(dev);
1454
1455	err = register_netdev(dev);
1456
1457	if (err) {
1458		pr_err("%s: Cannot register net device, aborting\n", dev->name);
1459		goto register_fail;
1460	}
1461
1462	device_init_wakeup(&dev->dev,
1463			   priv->device_flags &
1464			   FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1465
1466	/* fill out IRQ number and name fields */
1467	for (i = 0; i < priv->num_grps; i++) {
1468		struct gfar_priv_grp *grp = &priv->gfargrp[i];
1469		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1470			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1471				dev->name, "_g", '0' + i, "_tx");
1472			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1473				dev->name, "_g", '0' + i, "_rx");
1474			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1475				dev->name, "_g", '0' + i, "_er");
1476		} else
1477			strcpy(gfar_irq(grp, TX)->name, dev->name);
1478	}
1479
1480	/* Initialize the filer table */
1481	gfar_init_filer_table(priv);
1482
1483	/* Print out the device info */
1484	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1485
1486	/* Even more device info helps when determining which kernel
1487	 * provided which set of benchmarks.
1488	 */
1489	netdev_info(dev, "Running with NAPI enabled\n");
1490	for (i = 0; i < priv->num_rx_queues; i++)
1491		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
1492			    i, priv->rx_queue[i]->rx_ring_size);
1493	for (i = 0; i < priv->num_tx_queues; i++)
1494		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
1495			    i, priv->tx_queue[i]->tx_ring_size);
1496
1497	return 0;
1498
1499register_fail:
1500	unmap_group_regs(priv);
1501	gfar_free_rx_queues(priv);
1502	gfar_free_tx_queues(priv);
1503	of_node_put(priv->phy_node);
1504	of_node_put(priv->tbi_node);
1505	free_gfar_dev(priv);
1506	return err;
1507}
1508
1509static int gfar_remove(struct platform_device *ofdev)
1510{
1511	struct gfar_private *priv = platform_get_drvdata(ofdev);
1512
1513	of_node_put(priv->phy_node);
1514	of_node_put(priv->tbi_node);
1515
1516	unregister_netdev(priv->ndev);
1517	unmap_group_regs(priv);
1518	gfar_free_rx_queues(priv);
1519	gfar_free_tx_queues(priv);
1520	free_gfar_dev(priv);
1521
1522	return 0;
1523}
1524
1525#ifdef CONFIG_PM
1526
1527static int gfar_suspend(struct device *dev)
1528{
1529	struct gfar_private *priv = dev_get_drvdata(dev);
1530	struct net_device *ndev = priv->ndev;
1531	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1532	unsigned long flags;
1533	u32 tempval;
1534
1535	int magic_packet = priv->wol_en &&
1536			   (priv->device_flags &
1537			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1538
1539	netif_device_detach(ndev);
1540
1541	if (netif_running(ndev)) {
1542
1543		local_irq_save(flags);
1544		lock_tx_qs(priv);
1545
1546		gfar_halt_nodisable(priv);
1547
1548		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
1549		tempval = gfar_read(&regs->maccfg1);
1550
1551		tempval &= ~MACCFG1_TX_EN;
1552
1553		if (!magic_packet)
1554			tempval &= ~MACCFG1_RX_EN;
1555
1556		gfar_write(&regs->maccfg1, tempval);
1557
1558		unlock_tx_qs(priv);
1559		local_irq_restore(flags);
1560
1561		disable_napi(priv);
1562
1563		if (magic_packet) {
1564			/* Enable interrupt on Magic Packet */
1565			gfar_write(&regs->imask, IMASK_MAG);
1566
1567			/* Enable Magic Packet mode */
1568			tempval = gfar_read(&regs->maccfg2);
1569			tempval |= MACCFG2_MPEN;
1570			gfar_write(&regs->maccfg2, tempval);
1571		} else {
1572			phy_stop(priv->phydev);
1573		}
1574	}
1575
1576	return 0;
1577}
1578
1579static int gfar_resume(struct device *dev)
1580{
1581	struct gfar_private *priv = dev_get_drvdata(dev);
1582	struct net_device *ndev = priv->ndev;
1583	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1584	unsigned long flags;
1585	u32 tempval;
1586	int magic_packet = priv->wol_en &&
1587			   (priv->device_flags &
1588			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1589
1590	if (!netif_running(ndev)) {
1591		netif_device_attach(ndev);
1592		return 0;
1593	}
1594
1595	if (!magic_packet && priv->phydev)
1596		phy_start(priv->phydev);
1597
1598	/* Disable Magic Packet mode, in case something
1599	 * else woke us up.
1600	 */
1601	local_irq_save(flags);
1602	lock_tx_qs(priv);
1603
1604	tempval = gfar_read(&regs->maccfg2);
1605	tempval &= ~MACCFG2_MPEN;
1606	gfar_write(&regs->maccfg2, tempval);
1607
1608	gfar_start(priv);
1609
1610	unlock_tx_qs(priv);
1611	local_irq_restore(flags);
1612
1613	netif_device_attach(ndev);
1614
1615	enable_napi(priv);
1616
1617	return 0;
1618}
1619
1620static int gfar_restore(struct device *dev)
1621{
1622	struct gfar_private *priv = dev_get_drvdata(dev);
1623	struct net_device *ndev = priv->ndev;
1624
1625	if (!netif_running(ndev)) {
1626		netif_device_attach(ndev);
1627
1628		return 0;
1629	}
1630
1631	if (gfar_init_bds(ndev)) {
1632		free_skb_resources(priv);
1633		return -ENOMEM;
1634	}
1635
1636	gfar_mac_reset(priv);
1637
1638	gfar_init_tx_rx_base(priv);
1639
1640	gfar_start(priv);
1641
1642	priv->oldlink = 0;
1643	priv->oldspeed = 0;
1644	priv->oldduplex = -1;
1645
1646	if (priv->phydev)
1647		phy_start(priv->phydev);
1648
1649	netif_device_attach(ndev);
1650	enable_napi(priv);
1651
1652	return 0;
1653}
1654
1655static struct dev_pm_ops gfar_pm_ops = {
1656	.suspend = gfar_suspend,
1657	.resume = gfar_resume,
1658	.freeze = gfar_suspend,
1659	.thaw = gfar_resume,
1660	.restore = gfar_restore,
1661};
1662
1663#define GFAR_PM_OPS (&gfar_pm_ops)
1664
1665#else
1666
1667#define GFAR_PM_OPS NULL
1668
1669#endif
1670
1671/* Reads the controller's registers to determine what interface
1672 * connects it to the PHY.
1673 */
1674static phy_interface_t gfar_get_interface(struct net_device *dev)
1675{
1676	struct gfar_private *priv = netdev_priv(dev);
1677	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1678	u32 ecntrl;
1679
1680	ecntrl = gfar_read(&regs->ecntrl);
1681
1682	if (ecntrl & ECNTRL_SGMII_MODE)
1683		return PHY_INTERFACE_MODE_SGMII;
1684
1685	if (ecntrl & ECNTRL_TBI_MODE) {
1686		if (ecntrl & ECNTRL_REDUCED_MODE)
1687			return PHY_INTERFACE_MODE_RTBI;
1688		else
1689			return PHY_INTERFACE_MODE_TBI;
1690	}
1691
1692	if (ecntrl & ECNTRL_REDUCED_MODE) {
1693		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1694			return PHY_INTERFACE_MODE_RMII;
1695		}
1696		else {
1697			phy_interface_t interface = priv->interface;
1698
1699			/* This isn't autodetected right now, so it must
1700			 * be set by the device tree or platform code.
1701			 */
1702			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
1703				return PHY_INTERFACE_MODE_RGMII_ID;
1704
1705			return PHY_INTERFACE_MODE_RGMII;
1706		}
1707	}
1708
1709	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1710		return PHY_INTERFACE_MODE_GMII;
1711
1712	return PHY_INTERFACE_MODE_MII;
1713}
1714
1715
1716/* Initializes driver's PHY state, and attaches to the PHY.
1717 * Returns 0 on success.
1718 */
1719static int init_phy(struct net_device *dev)
1720{
1721	struct gfar_private *priv = netdev_priv(dev);
1722	uint gigabit_support =
1723		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1724		GFAR_SUPPORTED_GBIT : 0;
1725	phy_interface_t interface;
1726
1727	priv->oldlink = 0;
1728	priv->oldspeed = 0;
1729	priv->oldduplex = -1;
1730
1731	interface = gfar_get_interface(dev);
1732
1733	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1734				      interface);
1735	if (!priv->phydev) {
1736		dev_err(&dev->dev, "could not attach to PHY\n");
1737		return -ENODEV;
1738	}
1739
1740	if (interface == PHY_INTERFACE_MODE_SGMII)
1741		gfar_configure_serdes(dev);
1742
1743	/* Remove any features not supported by the controller */
1744	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
1745	priv->phydev->advertising = priv->phydev->supported;
1746
1747	/* Add support for flow control, but don't advertise it by default */
1748	priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1749
1750	return 0;
1751}
1752
1753/* Initialize TBI PHY interface for communicating with the
1754 * SERDES lynx PHY on the chip.  We communicate with this PHY
1755 * through the MDIO bus on each controller, treating it as a
1756 * "normal" PHY at the address found in the TBIPA register.  We assume
1757 * that the TBIPA register is valid.  Either the MDIO bus code will set
1758 * it to a value that doesn't conflict with other PHYs on the bus, or the
1759 * value doesn't matter, as there are no other PHYs on the bus.
1760 */
1761static void gfar_configure_serdes(struct net_device *dev)
1762{
1763	struct gfar_private *priv = netdev_priv(dev);
1764	struct phy_device *tbiphy;
1765
1766	if (!priv->tbi_node) {
1767		dev_warn(&dev->dev, "error: SGMII mode requires that the "
1768				    "device tree specify a tbi-handle\n");
1769		return;
1770	}
1771
1772	tbiphy = of_phy_find_device(priv->tbi_node);
1773	if (!tbiphy) {
1774		dev_err(&dev->dev, "error: Could not get TBI device\n");
1775		return;
1776	}
1777
1778	/* If the link is already up, we must already be ok, and don't need to
1779	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
1780	 * everything for us?  Resetting it takes the link down and requires
1781	 * several seconds for it to come back.
1782	 */
1783	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1784		return;
1785
1786	/* Single clk mode, mii mode off(for serdes communication) */
1787	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1788
1789	phy_write(tbiphy, MII_ADVERTISE,
1790		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1791		  ADVERTISE_1000XPSE_ASYM);
1792
1793	phy_write(tbiphy, MII_BMCR,
1794		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1795		  BMCR_SPEED1000);
1796}
1797
1798static int __gfar_is_rx_idle(struct gfar_private *priv)
1799{
1800	u32 res;
1801
1802	/* Normaly TSEC should not hang on GRS commands, so we should
1803	 * actually wait for IEVENT_GRSC flag.
1804	 */
1805	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1806		return 0;
1807
1808	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1809	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1810	 * and the Rx can be safely reset.
1811	 */
1812	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1813	res &= 0x7f807f80;
1814	if ((res & 0xffff) == (res >> 16))
1815		return 1;
1816
1817	return 0;
1818}
1819
1820/* Halt the receive and transmit queues */
1821static void gfar_halt_nodisable(struct gfar_private *priv)
1822{
1823	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1824	u32 tempval;
1825	unsigned int timeout;
1826	int stopped;
1827
1828	gfar_ints_disable(priv);
1829
1830	if (gfar_is_dma_stopped(priv))
1831		return;
1832
1833	/* Stop the DMA, and wait for it to stop */
1834	tempval = gfar_read(&regs->dmactrl);
1835	tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1836	gfar_write(&regs->dmactrl, tempval);
1837
1838retry:
1839	timeout = 1000;
1840	while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1841		cpu_relax();
1842		timeout--;
1843	}
1844
1845	if (!timeout)
1846		stopped = gfar_is_dma_stopped(priv);
1847
1848	if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1849	    !__gfar_is_rx_idle(priv))
1850		goto retry;
1851}
1852
1853/* Halt the receive and transmit queues */
1854void gfar_halt(struct gfar_private *priv)
1855{
1856	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1857	u32 tempval;
1858
1859	/* Dissable the Rx/Tx hw queues */
1860	gfar_write(&regs->rqueue, 0);
1861	gfar_write(&regs->tqueue, 0);
1862
1863	mdelay(10);
1864
1865	gfar_halt_nodisable(priv);
1866
1867	/* Disable Rx/Tx DMA */
1868	tempval = gfar_read(&regs->maccfg1);
1869	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1870	gfar_write(&regs->maccfg1, tempval);
1871}
1872
1873void stop_gfar(struct net_device *dev)
1874{
1875	struct gfar_private *priv = netdev_priv(dev);
1876
1877	netif_tx_stop_all_queues(dev);
1878
1879	smp_mb__before_atomic();
1880	set_bit(GFAR_DOWN, &priv->state);
1881	smp_mb__after_atomic();
1882
1883	disable_napi(priv);
1884
1885	/* disable ints and gracefully shut down Rx/Tx DMA */
1886	gfar_halt(priv);
1887
1888	phy_stop(priv->phydev);
1889
1890	free_skb_resources(priv);
1891}
1892
1893static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1894{
1895	struct txbd8 *txbdp;
1896	struct gfar_private *priv = netdev_priv(tx_queue->dev);
1897	int i, j;
1898
1899	txbdp = tx_queue->tx_bd_base;
1900
1901	for (i = 0; i < tx_queue->tx_ring_size; i++) {
1902		if (!tx_queue->tx_skbuff[i])
1903			continue;
1904
1905		dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1906				 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1907		txbdp->lstatus = 0;
1908		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1909		     j++) {
1910			txbdp++;
1911			dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1912				       be16_to_cpu(txbdp->length),
1913				       DMA_TO_DEVICE);
1914		}
1915		txbdp++;
1916		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1917		tx_queue->tx_skbuff[i] = NULL;
1918	}
1919	kfree(tx_queue->tx_skbuff);
1920	tx_queue->tx_skbuff = NULL;
1921}
1922
1923static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1924{
1925	struct rxbd8 *rxbdp;
1926	struct gfar_private *priv = netdev_priv(rx_queue->dev);
1927	int i;
1928
1929	rxbdp = rx_queue->rx_bd_base;
1930
1931	for (i = 0; i < rx_queue->rx_ring_size; i++) {
1932		if (rx_queue->rx_skbuff[i]) {
1933			dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
1934					 priv->rx_buffer_size,
1935					 DMA_FROM_DEVICE);
1936			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1937			rx_queue->rx_skbuff[i] = NULL;
1938		}
1939		rxbdp->lstatus = 0;
1940		rxbdp->bufPtr = 0;
1941		rxbdp++;
1942	}
1943	kfree(rx_queue->rx_skbuff);
1944	rx_queue->rx_skbuff = NULL;
1945}
1946
1947/* If there are any tx skbs or rx skbs still around, free them.
1948 * Then free tx_skbuff and rx_skbuff
1949 */
1950static void free_skb_resources(struct gfar_private *priv)
1951{
1952	struct gfar_priv_tx_q *tx_queue = NULL;
1953	struct gfar_priv_rx_q *rx_queue = NULL;
1954	int i;
1955
1956	/* Go through all the buffer descriptors and free their data buffers */
1957	for (i = 0; i < priv->num_tx_queues; i++) {
1958		struct netdev_queue *txq;
1959
1960		tx_queue = priv->tx_queue[i];
1961		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1962		if (tx_queue->tx_skbuff)
1963			free_skb_tx_queue(tx_queue);
1964		netdev_tx_reset_queue(txq);
1965	}
1966
1967	for (i = 0; i < priv->num_rx_queues; i++) {
1968		rx_queue = priv->rx_queue[i];
1969		if (rx_queue->rx_skbuff)
1970			free_skb_rx_queue(rx_queue);
1971	}
1972
1973	dma_free_coherent(priv->dev,
1974			  sizeof(struct txbd8) * priv->total_tx_ring_size +
1975			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
1976			  priv->tx_queue[0]->tx_bd_base,
1977			  priv->tx_queue[0]->tx_bd_dma_base);
1978}
1979
1980void gfar_start(struct gfar_private *priv)
1981{
1982	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1983	u32 tempval;
1984	int i = 0;
1985
1986	/* Enable Rx/Tx hw queues */
1987	gfar_write(&regs->rqueue, priv->rqueue);
1988	gfar_write(&regs->tqueue, priv->tqueue);
1989
1990	/* Initialize DMACTRL to have WWR and WOP */
1991	tempval = gfar_read(&regs->dmactrl);
1992	tempval |= DMACTRL_INIT_SETTINGS;
1993	gfar_write(&regs->dmactrl, tempval);
1994
1995	/* Make sure we aren't stopped */
1996	tempval = gfar_read(&regs->dmactrl);
1997	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1998	gfar_write(&regs->dmactrl, tempval);
1999
2000	for (i = 0; i < priv->num_grps; i++) {
2001		regs = priv->gfargrp[i].regs;
2002		/* Clear THLT/RHLT, so that the DMA starts polling now */
2003		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
2004		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
2005	}
2006
2007	/* Enable Rx/Tx DMA */
2008	tempval = gfar_read(&regs->maccfg1);
2009	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
2010	gfar_write(&regs->maccfg1, tempval);
2011
2012	gfar_ints_enable(priv);
2013
2014	priv->ndev->trans_start = jiffies; /* prevent tx timeout */
2015}
2016
2017static void free_grp_irqs(struct gfar_priv_grp *grp)
2018{
2019	free_irq(gfar_irq(grp, TX)->irq, grp);
2020	free_irq(gfar_irq(grp, RX)->irq, grp);
2021	free_irq(gfar_irq(grp, ER)->irq, grp);
2022}
2023
2024static int register_grp_irqs(struct gfar_priv_grp *grp)
2025{
2026	struct gfar_private *priv = grp->priv;
2027	struct net_device *dev = priv->ndev;
2028	int err;
2029
2030	/* If the device has multiple interrupts, register for
2031	 * them.  Otherwise, only register for the one
2032	 */
2033	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2034		/* Install our interrupt handlers for Error,
2035		 * Transmit, and Receive
2036		 */
2037		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2038				  gfar_irq(grp, ER)->name, grp);
2039		if (err < 0) {
2040			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2041				  gfar_irq(grp, ER)->irq);
2042
2043			goto err_irq_fail;
2044		}
2045		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2046				  gfar_irq(grp, TX)->name, grp);
2047		if (err < 0) {
2048			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2049				  gfar_irq(grp, TX)->irq);
2050			goto tx_irq_fail;
2051		}
2052		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2053				  gfar_irq(grp, RX)->name, grp);
2054		if (err < 0) {
2055			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2056				  gfar_irq(grp, RX)->irq);
2057			goto rx_irq_fail;
2058		}
2059	} else {
2060		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2061				  gfar_irq(grp, TX)->name, grp);
2062		if (err < 0) {
2063			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2064				  gfar_irq(grp, TX)->irq);
2065			goto err_irq_fail;
2066		}
2067	}
2068
2069	return 0;
2070
2071rx_irq_fail:
2072	free_irq(gfar_irq(grp, TX)->irq, grp);
2073tx_irq_fail:
2074	free_irq(gfar_irq(grp, ER)->irq, grp);
2075err_irq_fail:
2076	return err;
2077
2078}
2079
2080static void gfar_free_irq(struct gfar_private *priv)
2081{
2082	int i;
2083
2084	/* Free the IRQs */
2085	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2086		for (i = 0; i < priv->num_grps; i++)
2087			free_grp_irqs(&priv->gfargrp[i]);
2088	} else {
2089		for (i = 0; i < priv->num_grps; i++)
2090			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2091				 &priv->gfargrp[i]);
2092	}
2093}
2094
2095static int gfar_request_irq(struct gfar_private *priv)
2096{
2097	int err, i, j;
2098
2099	for (i = 0; i < priv->num_grps; i++) {
2100		err = register_grp_irqs(&priv->gfargrp[i]);
2101		if (err) {
2102			for (j = 0; j < i; j++)
2103				free_grp_irqs(&priv->gfargrp[j]);
2104			return err;
2105		}
2106	}
2107
2108	return 0;
2109}
2110
2111/* Bring the controller up and running */
2112int startup_gfar(struct net_device *ndev)
2113{
2114	struct gfar_private *priv = netdev_priv(ndev);
2115	int err;
2116
2117	gfar_mac_reset(priv);
2118
2119	err = gfar_alloc_skb_resources(ndev);
2120	if (err)
2121		return err;
2122
2123	gfar_init_tx_rx_base(priv);
2124
2125	smp_mb__before_atomic();
2126	clear_bit(GFAR_DOWN, &priv->state);
2127	smp_mb__after_atomic();
2128
2129	/* Start Rx/Tx DMA and enable the interrupts */
2130	gfar_start(priv);
2131
2132	phy_start(priv->phydev);
2133
2134	enable_napi(priv);
2135
2136	netif_tx_wake_all_queues(ndev);
2137
2138	return 0;
2139}
2140
2141/* Called when something needs to use the ethernet device
2142 * Returns 0 for success.
2143 */
2144static int gfar_enet_open(struct net_device *dev)
2145{
2146	struct gfar_private *priv = netdev_priv(dev);
2147	int err;
2148
2149	err = init_phy(dev);
2150	if (err)
2151		return err;
2152
2153	err = gfar_request_irq(priv);
2154	if (err)
2155		return err;
2156
2157	err = startup_gfar(dev);
2158	if (err)
2159		return err;
2160
2161	device_set_wakeup_enable(&dev->dev, priv->wol_en);
2162
2163	return err;
2164}
2165
2166static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2167{
2168	struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2169
2170	memset(fcb, 0, GMAC_FCB_LEN);
2171
2172	return fcb;
2173}
2174
2175static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2176				    int fcb_length)
2177{
2178	/* If we're here, it's a IP packet with a TCP or UDP
2179	 * payload.  We set it to checksum, using a pseudo-header
2180	 * we provide
2181	 */
2182	u8 flags = TXFCB_DEFAULT;
2183
2184	/* Tell the controller what the protocol is
2185	 * And provide the already calculated phcs
2186	 */
2187	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2188		flags |= TXFCB_UDP;
2189		fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
2190	} else
2191		fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
2192
2193	/* l3os is the distance between the start of the
2194	 * frame (skb->data) and the start of the IP hdr.
2195	 * l4os is the distance between the start of the
2196	 * l3 hdr and the l4 hdr
2197	 */
2198	fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
2199	fcb->l4os = skb_network_header_len(skb);
2200
2201	fcb->flags = flags;
2202}
2203
2204void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2205{
2206	fcb->flags |= TXFCB_VLN;
2207	fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
2208}
2209
2210static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2211				      struct txbd8 *base, int ring_size)
2212{
2213	struct txbd8 *new_bd = bdp + stride;
2214
2215	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
2216}
2217
2218static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2219				      int ring_size)
2220{
2221	return skip_txbd(bdp, 1, base, ring_size);
2222}
2223
2224/* eTSEC12: csum generation not supported for some fcb offsets */
2225static inline bool gfar_csum_errata_12(struct gfar_private *priv,
2226				       unsigned long fcb_addr)
2227{
2228	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
2229	       (fcb_addr % 0x20) > 0x18);
2230}
2231
2232/* eTSEC76: csum generation for frames larger than 2500 may
2233 * cause excess delays before start of transmission
2234 */
2235static inline bool gfar_csum_errata_76(struct gfar_private *priv,
2236				       unsigned int len)
2237{
2238	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
2239	       (len > 2500));
2240}
2241
2242/* This is called by the kernel when a frame is ready for transmission.
2243 * It is pointed to by the dev->hard_start_xmit function pointer
2244 */
2245static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2246{
2247	struct gfar_private *priv = netdev_priv(dev);
2248	struct gfar_priv_tx_q *tx_queue = NULL;
2249	struct netdev_queue *txq;
2250	struct gfar __iomem *regs = NULL;
2251	struct txfcb *fcb = NULL;
2252	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2253	u32 lstatus;
2254	int i, rq = 0;
2255	int do_tstamp, do_csum, do_vlan;
2256	u32 bufaddr;
2257	unsigned long flags;
2258	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2259
2260	rq = skb->queue_mapping;
2261	tx_queue = priv->tx_queue[rq];
2262	txq = netdev_get_tx_queue(dev, rq);
2263	base = tx_queue->tx_bd_base;
2264	regs = tx_queue->grp->regs;
2265
2266	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
2267	do_vlan = skb_vlan_tag_present(skb);
2268	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2269		    priv->hwts_tx_en;
2270
2271	if (do_csum || do_vlan)
2272		fcb_len = GMAC_FCB_LEN;
2273
2274	/* check if time stamp should be generated */
2275	if (unlikely(do_tstamp))
2276		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2277
2278	/* make space for additional header when fcb is needed */
2279	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2280		struct sk_buff *skb_new;
2281
2282		skb_new = skb_realloc_headroom(skb, fcb_len);
2283		if (!skb_new) {
2284			dev->stats.tx_errors++;
2285			dev_kfree_skb_any(skb);
2286			return NETDEV_TX_OK;
2287		}
2288
2289		if (skb->sk)
2290			skb_set_owner_w(skb_new, skb->sk);
2291		dev_consume_skb_any(skb);
2292		skb = skb_new;
2293	}
2294
2295	/* total number of fragments in the SKB */
2296	nr_frags = skb_shinfo(skb)->nr_frags;
2297
2298	/* calculate the required number of TxBDs for this skb */
2299	if (unlikely(do_tstamp))
2300		nr_txbds = nr_frags + 2;
2301	else
2302		nr_txbds = nr_frags + 1;
2303
2304	/* check if there is space to queue this packet */
2305	if (nr_txbds > tx_queue->num_txbdfree) {
2306		/* no space, stop the queue */
2307		netif_tx_stop_queue(txq);
2308		dev->stats.tx_fifo_errors++;
2309		return NETDEV_TX_BUSY;
2310	}
2311
2312	/* Update transmit stats */
2313	bytes_sent = skb->len;
2314	tx_queue->stats.tx_bytes += bytes_sent;
2315	/* keep Tx bytes on wire for BQL accounting */
2316	GFAR_CB(skb)->bytes_sent = bytes_sent;
2317	tx_queue->stats.tx_packets++;
2318
2319	txbdp = txbdp_start = tx_queue->cur_tx;
2320	lstatus = be32_to_cpu(txbdp->lstatus);
2321
2322	/* Time stamp insertion requires one additional TxBD */
2323	if (unlikely(do_tstamp))
2324		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2325						 tx_queue->tx_ring_size);
2326
2327	if (nr_frags == 0) {
2328		if (unlikely(do_tstamp)) {
2329			u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2330
2331			lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2332			txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
2333		} else {
2334			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2335		}
2336	} else {
2337		/* Place the fragment addresses and lengths into the TxBDs */
2338		for (i = 0; i < nr_frags; i++) {
2339			unsigned int frag_len;
2340			/* Point at the next BD, wrapping as needed */
2341			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2342
2343			frag_len = skb_shinfo(skb)->frags[i].size;
2344
2345			lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
2346				  BD_LFLAG(TXBD_READY);
2347
2348			/* Handle the last BD specially */
2349			if (i == nr_frags - 1)
2350				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
2351
2352			bufaddr = skb_frag_dma_map(priv->dev,
2353						   &skb_shinfo(skb)->frags[i],
2354						   0,
2355						   frag_len,
2356						   DMA_TO_DEVICE);
2357			if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2358				goto dma_map_err;
2359
2360			/* set the TxBD length and buffer pointer */
2361			txbdp->bufPtr = cpu_to_be32(bufaddr);
2362			txbdp->lstatus = cpu_to_be32(lstatus);
2363		}
2364
2365		lstatus = be32_to_cpu(txbdp_start->lstatus);
2366	}
2367
2368	/* Add TxPAL between FCB and frame if required */
2369	if (unlikely(do_tstamp)) {
2370		skb_push(skb, GMAC_TXPAL_LEN);
2371		memset(skb->data, 0, GMAC_TXPAL_LEN);
2372	}
2373
2374	/* Add TxFCB if required */
2375	if (fcb_len) {
2376		fcb = gfar_add_fcb(skb);
2377		lstatus |= BD_LFLAG(TXBD_TOE);
2378	}
2379
2380	/* Set up checksumming */
2381	if (do_csum) {
2382		gfar_tx_checksum(skb, fcb, fcb_len);
2383
2384		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
2385		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
2386			__skb_pull(skb, GMAC_FCB_LEN);
2387			skb_checksum_help(skb);
2388			if (do_vlan || do_tstamp) {
2389				/* put back a new fcb for vlan/tstamp TOE */
2390				fcb = gfar_add_fcb(skb);
2391			} else {
2392				/* Tx TOE not used */
2393				lstatus &= ~(BD_LFLAG(TXBD_TOE));
2394				fcb = NULL;
2395			}
2396		}
2397	}
2398
2399	if (do_vlan)
2400		gfar_tx_vlan(skb, fcb);
2401
2402	/* Setup tx hardware time stamping if requested */
2403	if (unlikely(do_tstamp)) {
2404		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2405		fcb->ptp = 1;
2406	}
2407
2408	bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
2409				 DMA_TO_DEVICE);
2410	if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
2411		goto dma_map_err;
2412
2413	txbdp_start->bufPtr = cpu_to_be32(bufaddr);
2414
2415	/* If time stamping is requested one additional TxBD must be set up. The
2416	 * first TxBD points to the FCB and must have a data length of
2417	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2418	 * the full frame length.
2419	 */
2420	if (unlikely(do_tstamp)) {
2421		u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
2422
2423		bufaddr = be32_to_cpu(txbdp_start->bufPtr);
2424		bufaddr += fcb_len;
2425		lstatus_ts |= BD_LFLAG(TXBD_READY) |
2426			      (skb_headlen(skb) - fcb_len);
2427
2428		txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
2429		txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
2430		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2431	} else {
2432		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2433	}
2434
2435	netdev_tx_sent_queue(txq, bytes_sent);
2436
2437	/* We can work in parallel with gfar_clean_tx_ring(), except
2438	 * when modifying num_txbdfree. Note that we didn't grab the lock
2439	 * when we were reading the num_txbdfree and checking for available
2440	 * space, that's because outside of this function it can only grow,
2441	 * and once we've got needed space, it cannot suddenly disappear.
2442	 *
2443	 * The lock also protects us from gfar_error(), which can modify
2444	 * regs->tstat and thus retrigger the transfers, which is why we
2445	 * also must grab the lock before setting ready bit for the first
2446	 * to be transmitted BD.
2447	 */
2448	spin_lock_irqsave(&tx_queue->txlock, flags);
2449
2450	gfar_wmb();
2451
2452	txbdp_start->lstatus = cpu_to_be32(lstatus);
2453
2454	gfar_wmb(); /* force lstatus write before tx_skbuff */
2455
2456	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2457
2458	/* Update the current skb pointer to the next entry we will use
2459	 * (wrapping if necessary)
2460	 */
2461	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2462			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
2463
2464	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2465
2466	/* reduce TxBD free count */
2467	tx_queue->num_txbdfree -= (nr_txbds);
2468
2469	/* If the next BD still needs to be cleaned up, then the bds
2470	 * are full.  We need to tell the kernel to stop sending us stuff.
2471	 */
2472	if (!tx_queue->num_txbdfree) {
2473		netif_tx_stop_queue(txq);
2474
2475		dev->stats.tx_fifo_errors++;
2476	}
2477
2478	/* Tell the DMA to go go go */
2479	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2480
2481	/* Unlock priv */
2482	spin_unlock_irqrestore(&tx_queue->txlock, flags);
2483
2484	return NETDEV_TX_OK;
2485
2486dma_map_err:
2487	txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2488	if (do_tstamp)
2489		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2490	for (i = 0; i < nr_frags; i++) {
2491		lstatus = be32_to_cpu(txbdp->lstatus);
2492		if (!(lstatus & BD_LFLAG(TXBD_READY)))
2493			break;
2494
2495		lstatus &= ~BD_LFLAG(TXBD_READY);
2496		txbdp->lstatus = cpu_to_be32(lstatus);
2497		bufaddr = be32_to_cpu(txbdp->bufPtr);
2498		dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
2499			       DMA_TO_DEVICE);
2500		txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2501	}
2502	gfar_wmb();
2503	dev_kfree_skb_any(skb);
2504	return NETDEV_TX_OK;
2505}
2506
2507/* Stops the kernel queue, and halts the controller */
2508static int gfar_close(struct net_device *dev)
2509{
2510	struct gfar_private *priv = netdev_priv(dev);
2511
2512	cancel_work_sync(&priv->reset_task);
2513	stop_gfar(dev);
2514
2515	/* Disconnect from the PHY */
2516	phy_disconnect(priv->phydev);
2517	priv->phydev = NULL;
2518
2519	gfar_free_irq(priv);
2520
2521	return 0;
2522}
2523
2524/* Changes the mac address if the controller is not running. */
2525static int gfar_set_mac_address(struct net_device *dev)
2526{
2527	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2528
2529	return 0;
2530}
2531
2532static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2533{
2534	struct gfar_private *priv = netdev_priv(dev);
2535	int frame_size = new_mtu + ETH_HLEN;
2536
2537	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2538		netif_err(priv, drv, dev, "Invalid MTU setting\n");
2539		return -EINVAL;
2540	}
2541
2542	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2543		cpu_relax();
2544
2545	if (dev->flags & IFF_UP)
2546		stop_gfar(dev);
2547
2548	dev->mtu = new_mtu;
2549
2550	if (dev->flags & IFF_UP)
2551		startup_gfar(dev);
2552
2553	clear_bit_unlock(GFAR_RESETTING, &priv->state);
2554
2555	return 0;
2556}
2557
2558void reset_gfar(struct net_device *ndev)
2559{
2560	struct gfar_private *priv = netdev_priv(ndev);
2561
2562	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2563		cpu_relax();
2564
2565	stop_gfar(ndev);
2566	startup_gfar(ndev);
2567
2568	clear_bit_unlock(GFAR_RESETTING, &priv->state);
2569}
2570
2571/* gfar_reset_task gets scheduled when a packet has not been
2572 * transmitted after a set amount of time.
2573 * For now, assume that clearing out all the structures, and
2574 * starting over will fix the problem.
2575 */
2576static void gfar_reset_task(struct work_struct *work)
2577{
2578	struct gfar_private *priv = container_of(work, struct gfar_private,
2579						 reset_task);
2580	reset_gfar(priv->ndev);
2581}
2582
2583static void gfar_timeout(struct net_device *dev)
2584{
2585	struct gfar_private *priv = netdev_priv(dev);
2586
2587	dev->stats.tx_errors++;
2588	schedule_work(&priv->reset_task);
2589}
2590
2591static void gfar_align_skb(struct sk_buff *skb)
2592{
2593	/* We need the data buffer to be aligned properly.  We will reserve
2594	 * as many bytes as needed to align the data properly
2595	 */
2596	skb_reserve(skb, RXBUF_ALIGNMENT -
2597		    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
2598}
2599
2600/* Interrupt Handler for Transmit complete */
2601static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2602{
2603	struct net_device *dev = tx_queue->dev;
2604	struct netdev_queue *txq;
2605	struct gfar_private *priv = netdev_priv(dev);
2606	struct txbd8 *bdp, *next = NULL;
2607	struct txbd8 *lbdp = NULL;
2608	struct txbd8 *base = tx_queue->tx_bd_base;
2609	struct sk_buff *skb;
2610	int skb_dirtytx;
2611	int tx_ring_size = tx_queue->tx_ring_size;
2612	int frags = 0, nr_txbds = 0;
2613	int i;
2614	int howmany = 0;
2615	int tqi = tx_queue->qindex;
2616	unsigned int bytes_sent = 0;
2617	u32 lstatus;
2618	size_t buflen;
2619
2620	txq = netdev_get_tx_queue(dev, tqi);
2621	bdp = tx_queue->dirty_tx;
2622	skb_dirtytx = tx_queue->skb_dirtytx;
2623
2624	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2625		unsigned long flags;
2626
2627		frags = skb_shinfo(skb)->nr_frags;
2628
2629		/* When time stamping, one additional TxBD must be freed.
2630		 * Also, we need to dma_unmap_single() the TxPAL.
2631		 */
2632		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2633			nr_txbds = frags + 2;
2634		else
2635			nr_txbds = frags + 1;
2636
2637		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2638
2639		lstatus = be32_to_cpu(lbdp->lstatus);
2640
2641		/* Only clean completed frames */
2642		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2643		    (lstatus & BD_LENGTH_MASK))
2644			break;
2645
2646		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2647			next = next_txbd(bdp, base, tx_ring_size);
2648			buflen = be16_to_cpu(next->length) +
2649				 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2650		} else
2651			buflen = be16_to_cpu(bdp->length);
2652
2653		dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2654				 buflen, DMA_TO_DEVICE);
2655
2656		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2657			struct skb_shared_hwtstamps shhwtstamps;
2658			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2659
2660			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2661			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2662			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2663			skb_tstamp_tx(skb, &shhwtstamps);
2664			gfar_clear_txbd_status(bdp);
2665			bdp = next;
2666		}
2667
2668		gfar_clear_txbd_status(bdp);
2669		bdp = next_txbd(bdp, base, tx_ring_size);
2670
2671		for (i = 0; i < frags; i++) {
2672			dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2673				       be16_to_cpu(bdp->length),
2674				       DMA_TO_DEVICE);
2675			gfar_clear_txbd_status(bdp);
2676			bdp = next_txbd(bdp, base, tx_ring_size);
2677		}
2678
2679		bytes_sent += GFAR_CB(skb)->bytes_sent;
2680
2681		dev_kfree_skb_any(skb);
2682
2683		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2684
2685		skb_dirtytx = (skb_dirtytx + 1) &
2686			      TX_RING_MOD_MASK(tx_ring_size);
2687
2688		howmany++;
2689		spin_lock_irqsave(&tx_queue->txlock, flags);
2690		tx_queue->num_txbdfree += nr_txbds;
2691		spin_unlock_irqrestore(&tx_queue->txlock, flags);
2692	}
2693
2694	/* If we freed a buffer, we can restart transmission, if necessary */
2695	if (tx_queue->num_txbdfree &&
2696	    netif_tx_queue_stopped(txq) &&
2697	    !(test_bit(GFAR_DOWN, &priv->state)))
2698		netif_wake_subqueue(priv->ndev, tqi);
2699
2700	/* Update dirty indicators */
2701	tx_queue->skb_dirtytx = skb_dirtytx;
2702	tx_queue->dirty_tx = bdp;
2703
2704	netdev_tx_completed_queue(txq, howmany, bytes_sent);
2705}
2706
2707static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2708{
2709	struct gfar_private *priv = netdev_priv(dev);
2710	struct sk_buff *skb;
2711
2712	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2713	if (!skb)
2714		return NULL;
2715
2716	gfar_align_skb(skb);
2717
2718	return skb;
2719}
2720
2721static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
2722{
2723	struct gfar_private *priv = netdev_priv(dev);
2724	struct sk_buff *skb;
2725	dma_addr_t addr;
2726
2727	skb = gfar_alloc_skb(dev);
2728	if (!skb)
2729		return NULL;
2730
2731	addr = dma_map_single(priv->dev, skb->data,
2732			      priv->rx_buffer_size, DMA_FROM_DEVICE);
2733	if (unlikely(dma_mapping_error(priv->dev, addr))) {
2734		dev_kfree_skb_any(skb);
2735		return NULL;
2736	}
2737
2738	*bufaddr = addr;
2739	return skb;
2740}
2741
2742static inline void count_errors(unsigned short status, struct net_device *dev)
2743{
2744	struct gfar_private *priv = netdev_priv(dev);
2745	struct net_device_stats *stats = &dev->stats;
2746	struct gfar_extra_stats *estats = &priv->extra_stats;
2747
2748	/* If the packet was truncated, none of the other errors matter */
2749	if (status & RXBD_TRUNCATED) {
2750		stats->rx_length_errors++;
2751
2752		atomic64_inc(&estats->rx_trunc);
2753
2754		return;
2755	}
2756	/* Count the errors, if there were any */
2757	if (status & (RXBD_LARGE | RXBD_SHORT)) {
2758		stats->rx_length_errors++;
2759
2760		if (status & RXBD_LARGE)
2761			atomic64_inc(&estats->rx_large);
2762		else
2763			atomic64_inc(&estats->rx_short);
2764	}
2765	if (status & RXBD_NONOCTET) {
2766		stats->rx_frame_errors++;
2767		atomic64_inc(&estats->rx_nonoctet);
2768	}
2769	if (status & RXBD_CRCERR) {
2770		atomic64_inc(&estats->rx_crcerr);
2771		stats->rx_crc_errors++;
2772	}
2773	if (status & RXBD_OVERRUN) {
2774		atomic64_inc(&estats->rx_overrun);
2775		stats->rx_crc_errors++;
2776	}
2777}
2778
2779irqreturn_t gfar_receive(int irq, void *grp_id)
2780{
2781	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2782	unsigned long flags;
2783	u32 imask;
2784
2785	if (likely(napi_schedule_prep(&grp->napi_rx))) {
2786		spin_lock_irqsave(&grp->grplock, flags);
2787		imask = gfar_read(&grp->regs->imask);
2788		imask &= IMASK_RX_DISABLED;
2789		gfar_write(&grp->regs->imask, imask);
2790		spin_unlock_irqrestore(&grp->grplock, flags);
2791		__napi_schedule(&grp->napi_rx);
2792	} else {
2793		/* Clear IEVENT, so interrupts aren't called again
2794		 * because of the packets that have already arrived.
2795		 */
2796		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2797	}
2798
2799	return IRQ_HANDLED;
2800}
2801
2802/* Interrupt Handler for Transmit complete */
2803static irqreturn_t gfar_transmit(int irq, void *grp_id)
2804{
2805	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2806	unsigned long flags;
2807	u32 imask;
2808
2809	if (likely(napi_schedule_prep(&grp->napi_tx))) {
2810		spin_lock_irqsave(&grp->grplock, flags);
2811		imask = gfar_read(&grp->regs->imask);
2812		imask &= IMASK_TX_DISABLED;
2813		gfar_write(&grp->regs->imask, imask);
2814		spin_unlock_irqrestore(&grp->grplock, flags);
2815		__napi_schedule(&grp->napi_tx);
2816	} else {
2817		/* Clear IEVENT, so interrupts aren't called again
2818		 * because of the packets that have already arrived.
2819		 */
2820		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2821	}
2822
2823	return IRQ_HANDLED;
2824}
2825
2826static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2827{
2828	/* If valid headers were found, and valid sums
2829	 * were verified, then we tell the kernel that no
2830	 * checksumming is necessary.  Otherwise, it is [FIXME]
2831	 */
2832	if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
2833	    (RXFCB_CIP | RXFCB_CTU))
2834		skb->ip_summed = CHECKSUM_UNNECESSARY;
2835	else
2836		skb_checksum_none_assert(skb);
2837}
2838
2839/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2840static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2841			       int amount_pull, struct napi_struct *napi)
2842{
2843	struct gfar_private *priv = netdev_priv(dev);
2844	struct rxfcb *fcb = NULL;
2845
2846	/* fcb is at the beginning if exists */
2847	fcb = (struct rxfcb *)skb->data;
2848
2849	/* Remove the FCB from the skb
2850	 * Remove the padded bytes, if there are any
2851	 */
2852	if (amount_pull) {
2853		skb_record_rx_queue(skb, fcb->rq);
2854		skb_pull(skb, amount_pull);
2855	}
2856
2857	/* Get receive timestamp from the skb */
2858	if (priv->hwts_rx_en) {
2859		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2860		u64 *ns = (u64 *) skb->data;
2861
2862		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2863		shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2864	}
2865
2866	if (priv->padding)
2867		skb_pull(skb, priv->padding);
2868
2869	if (dev->features & NETIF_F_RXCSUM)
2870		gfar_rx_checksum(skb, fcb);
2871
2872	/* Tell the skb what kind of packet this is */
2873	skb->protocol = eth_type_trans(skb, dev);
2874
2875	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2876	 * Even if vlan rx accel is disabled, on some chips
2877	 * RXFCB_VLN is pseudo randomly set.
2878	 */
2879	if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2880	    be16_to_cpu(fcb->flags) & RXFCB_VLN)
2881		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2882				       be16_to_cpu(fcb->vlctl));
2883
2884	/* Send the packet up the stack */
2885	napi_gro_receive(napi, skb);
2886
2887}
2888
2889/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2890 * until the budget/quota has been reached. Returns the number
2891 * of frames handled
2892 */
2893int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2894{
2895	struct net_device *dev = rx_queue->dev;
2896	struct rxbd8 *bdp, *base;
2897	struct sk_buff *skb;
2898	int pkt_len;
2899	int amount_pull;
2900	int howmany = 0;
2901	struct gfar_private *priv = netdev_priv(dev);
2902
2903	/* Get the first full descriptor */
2904	bdp = rx_queue->cur_rx;
2905	base = rx_queue->rx_bd_base;
2906
2907	amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
2908
2909	while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
2910		struct sk_buff *newskb;
2911		dma_addr_t bufaddr;
2912
2913		rmb();
2914
2915		/* Add another skb for the future */
2916		newskb = gfar_new_skb(dev, &bufaddr);
2917
2918		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
2919
2920		dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2921				 priv->rx_buffer_size, DMA_FROM_DEVICE);
2922
2923		if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
2924			     be16_to_cpu(bdp->length) > priv->rx_buffer_size))
2925			bdp->status = cpu_to_be16(RXBD_LARGE);
2926
2927		/* We drop the frame if we failed to allocate a new buffer */
2928		if (unlikely(!newskb ||
2929			     !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
2930			     be16_to_cpu(bdp->status) & RXBD_ERR)) {
2931			count_errors(be16_to_cpu(bdp->status), dev);
2932
2933			if (unlikely(!newskb)) {
2934				newskb = skb;
2935				bufaddr = be32_to_cpu(bdp->bufPtr);
2936			} else if (skb)
2937				dev_kfree_skb(skb);
2938		} else {
2939			/* Increment the number of packets */
2940			rx_queue->stats.rx_packets++;
2941			howmany++;
2942
2943			if (likely(skb)) {
2944				pkt_len = be16_to_cpu(bdp->length) -
2945					  ETH_FCS_LEN;
2946				/* Remove the FCS from the packet length */
2947				skb_put(skb, pkt_len);
2948				rx_queue->stats.rx_bytes += pkt_len;
2949				skb_record_rx_queue(skb, rx_queue->qindex);
2950				gfar_process_frame(dev, skb, amount_pull,
2951						   &rx_queue->grp->napi_rx);
2952
2953			} else {
2954				netif_warn(priv, rx_err, dev, "Missing skb!\n");
2955				rx_queue->stats.rx_dropped++;
2956				atomic64_inc(&priv->extra_stats.rx_skbmissing);
2957			}
2958
2959		}
2960
2961		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
2962
2963		/* Setup the new bdp */
2964		gfar_init_rxbdp(rx_queue, bdp, bufaddr);
2965
2966		/* Update Last Free RxBD pointer for LFC */
2967		if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
2968			gfar_write(rx_queue->rfbptr, (u32)bdp);
2969
2970		/* Update to the next pointer */
2971		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
2972
2973		/* update to point at the next skb */
2974		rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
2975				      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
2976	}
2977
2978	/* Update the current rxbd pointer to be the next one */
2979	rx_queue->cur_rx = bdp;
2980
2981	return howmany;
2982}
2983
2984static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2985{
2986	struct gfar_priv_grp *gfargrp =
2987		container_of(napi, struct gfar_priv_grp, napi_rx);
2988	struct gfar __iomem *regs = gfargrp->regs;
2989	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2990	int work_done = 0;
2991
2992	/* Clear IEVENT, so interrupts aren't called again
2993	 * because of the packets that have already arrived
2994	 */
2995	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2996
2997	work_done = gfar_clean_rx_ring(rx_queue, budget);
2998
2999	if (work_done < budget) {
3000		u32 imask;
3001		napi_complete(napi);
3002		/* Clear the halt bit in RSTAT */
3003		gfar_write(&regs->rstat, gfargrp->rstat);
3004
3005		spin_lock_irq(&gfargrp->grplock);
3006		imask = gfar_read(&regs->imask);
3007		imask |= IMASK_RX_DEFAULT;
3008		gfar_write(&regs->imask, imask);
3009		spin_unlock_irq(&gfargrp->grplock);
3010	}
3011
3012	return work_done;
3013}
3014
3015static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
3016{
3017	struct gfar_priv_grp *gfargrp =
3018		container_of(napi, struct gfar_priv_grp, napi_tx);
3019	struct gfar __iomem *regs = gfargrp->regs;
3020	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
3021	u32 imask;
3022
3023	/* Clear IEVENT, so interrupts aren't called again
3024	 * because of the packets that have already arrived
3025	 */
3026	gfar_write(&regs->ievent, IEVENT_TX_MASK);
3027
3028	/* run Tx cleanup to completion */
3029	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
3030		gfar_clean_tx_ring(tx_queue);
3031
3032	napi_complete(napi);
3033
3034	spin_lock_irq(&gfargrp->grplock);
3035	imask = gfar_read(&regs->imask);
3036	imask |= IMASK_TX_DEFAULT;
3037	gfar_write(&regs->imask, imask);
3038	spin_unlock_irq(&gfargrp->grplock);
3039
3040	return 0;
3041}
3042
3043static int gfar_poll_rx(struct napi_struct *napi, int budget)
3044{
3045	struct gfar_priv_grp *gfargrp =
3046		container_of(napi, struct gfar_priv_grp, napi_rx);
3047	struct gfar_private *priv = gfargrp->priv;
3048	struct gfar __iomem *regs = gfargrp->regs;
3049	struct gfar_priv_rx_q *rx_queue = NULL;
3050	int work_done = 0, work_done_per_q = 0;
3051	int i, budget_per_q = 0;
3052	unsigned long rstat_rxf;
3053	int num_act_queues;
3054
3055	/* Clear IEVENT, so interrupts aren't called again
3056	 * because of the packets that have already arrived
3057	 */
3058	gfar_write(&regs->ievent, IEVENT_RX_MASK);
3059
3060	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
3061
3062	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
3063	if (num_act_queues)
3064		budget_per_q = budget/num_act_queues;
3065
3066	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
3067		/* skip queue if not active */
3068		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
3069			continue;
3070
3071		rx_queue = priv->rx_queue[i];
3072		work_done_per_q =
3073			gfar_clean_rx_ring(rx_queue, budget_per_q);
3074		work_done += work_done_per_q;
3075
3076		/* finished processing this queue */
3077		if (work_done_per_q < budget_per_q) {
3078			/* clear active queue hw indication */
3079			gfar_write(&regs->rstat,
3080				   RSTAT_CLEAR_RXF0 >> i);
3081			num_act_queues--;
3082
3083			if (!num_act_queues)
3084				break;
3085		}
3086	}
3087
3088	if (!num_act_queues) {
3089		u32 imask;
3090		napi_complete(napi);
3091
3092		/* Clear the halt bit in RSTAT */
3093		gfar_write(&regs->rstat, gfargrp->rstat);
3094
3095		spin_lock_irq(&gfargrp->grplock);
3096		imask = gfar_read(&regs->imask);
3097		imask |= IMASK_RX_DEFAULT;
3098		gfar_write(&regs->imask, imask);
3099		spin_unlock_irq(&gfargrp->grplock);
3100	}
3101
3102	return work_done;
3103}
3104
3105static int gfar_poll_tx(struct napi_struct *napi, int budget)
3106{
3107	struct gfar_priv_grp *gfargrp =
3108		container_of(napi, struct gfar_priv_grp, napi_tx);
3109	struct gfar_private *priv = gfargrp->priv;
3110	struct gfar __iomem *regs = gfargrp->regs;
3111	struct gfar_priv_tx_q *tx_queue = NULL;
3112	int has_tx_work = 0;
3113	int i;
3114
3115	/* Clear IEVENT, so interrupts aren't called again
3116	 * because of the packets that have already arrived
3117	 */
3118	gfar_write(&regs->ievent, IEVENT_TX_MASK);
3119
3120	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
3121		tx_queue = priv->tx_queue[i];
3122		/* run Tx cleanup to completion */
3123		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3124			gfar_clean_tx_ring(tx_queue);
3125			has_tx_work = 1;
3126		}
3127	}
3128
3129	if (!has_tx_work) {
3130		u32 imask;
3131		napi_complete(napi);
3132
3133		spin_lock_irq(&gfargrp->grplock);
3134		imask = gfar_read(&regs->imask);
3135		imask |= IMASK_TX_DEFAULT;
3136		gfar_write(&regs->imask, imask);
3137		spin_unlock_irq(&gfargrp->grplock);
3138	}
3139
3140	return 0;
3141}
3142
3143
3144#ifdef CONFIG_NET_POLL_CONTROLLER
3145/* Polling 'interrupt' - used by things like netconsole to send skbs
3146 * without having to re-enable interrupts. It's not called while
3147 * the interrupt routine is executing.
3148 */
3149static void gfar_netpoll(struct net_device *dev)
3150{
3151	struct gfar_private *priv = netdev_priv(dev);
3152	int i;
3153
3154	/* If the device has multiple interrupts, run tx/rx */
3155	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3156		for (i = 0; i < priv->num_grps; i++) {
3157			struct gfar_priv_grp *grp = &priv->gfargrp[i];
3158
3159			disable_irq(gfar_irq(grp, TX)->irq);
3160			disable_irq(gfar_irq(grp, RX)->irq);
3161			disable_irq(gfar_irq(grp, ER)->irq);
3162			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3163			enable_irq(gfar_irq(grp, ER)->irq);
3164			enable_irq(gfar_irq(grp, RX)->irq);
3165			enable_irq(gfar_irq(grp, TX)->irq);
3166		}
3167	} else {
3168		for (i = 0; i < priv->num_grps; i++) {
3169			struct gfar_priv_grp *grp = &priv->gfargrp[i];
3170
3171			disable_irq(gfar_irq(grp, TX)->irq);
3172			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
3173			enable_irq(gfar_irq(grp, TX)->irq);
3174		}
3175	}
3176}
3177#endif
3178
3179/* The interrupt handler for devices with one interrupt */
3180static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3181{
3182	struct gfar_priv_grp *gfargrp = grp_id;
3183
3184	/* Save ievent for future reference */
3185	u32 events = gfar_read(&gfargrp->regs->ievent);
3186
3187	/* Check for reception */
3188	if (events & IEVENT_RX_MASK)
3189		gfar_receive(irq, grp_id);
3190
3191	/* Check for transmit completion */
3192	if (events & IEVENT_TX_MASK)
3193		gfar_transmit(irq, grp_id);
3194
3195	/* Check for errors */
3196	if (events & IEVENT_ERR_MASK)
3197		gfar_error(irq, grp_id);
3198
3199	return IRQ_HANDLED;
3200}
3201
3202/* Called every time the controller might need to be made
3203 * aware of new link state.  The PHY code conveys this
3204 * information through variables in the phydev structure, and this
3205 * function converts those variables into the appropriate
3206 * register values, and can bring down the device if needed.
3207 */
3208static void adjust_link(struct net_device *dev)
3209{
3210	struct gfar_private *priv = netdev_priv(dev);
3211	struct phy_device *phydev = priv->phydev;
3212
3213	if (unlikely(phydev->link != priv->oldlink ||
3214		     (phydev->link && (phydev->duplex != priv->oldduplex ||
3215				       phydev->speed != priv->oldspeed))))
3216		gfar_update_link_state(priv);
3217}
3218
3219/* Update the hash table based on the current list of multicast
3220 * addresses we subscribe to.  Also, change the promiscuity of
3221 * the device based on the flags (this function is called
3222 * whenever dev->flags is changed
3223 */
3224static void gfar_set_multi(struct net_device *dev)
3225{
3226	struct netdev_hw_addr *ha;
3227	struct gfar_private *priv = netdev_priv(dev);
3228	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3229	u32 tempval;
3230
3231	if (dev->flags & IFF_PROMISC) {
3232		/* Set RCTRL to PROM */
3233		tempval = gfar_read(&regs->rctrl);
3234		tempval |= RCTRL_PROM;
3235		gfar_write(&regs->rctrl, tempval);
3236	} else {
3237		/* Set RCTRL to not PROM */
3238		tempval = gfar_read(&regs->rctrl);
3239		tempval &= ~(RCTRL_PROM);
3240		gfar_write(&regs->rctrl, tempval);
3241	}
3242
3243	if (dev->flags & IFF_ALLMULTI) {
3244		/* Set the hash to rx all multicast frames */
3245		gfar_write(&regs->igaddr0, 0xffffffff);
3246		gfar_write(&regs->igaddr1, 0xffffffff);
3247		gfar_write(&regs->igaddr2, 0xffffffff);
3248		gfar_write(&regs->igaddr3, 0xffffffff);
3249		gfar_write(&regs->igaddr4, 0xffffffff);
3250		gfar_write(&regs->igaddr5, 0xffffffff);
3251		gfar_write(&regs->igaddr6, 0xffffffff);
3252		gfar_write(&regs->igaddr7, 0xffffffff);
3253		gfar_write(&regs->gaddr0, 0xffffffff);
3254		gfar_write(&regs->gaddr1, 0xffffffff);
3255		gfar_write(&regs->gaddr2, 0xffffffff);
3256		gfar_write(&regs->gaddr3, 0xffffffff);
3257		gfar_write(&regs->gaddr4, 0xffffffff);
3258		gfar_write(&regs->gaddr5, 0xffffffff);
3259		gfar_write(&regs->gaddr6, 0xffffffff);
3260		gfar_write(&regs->gaddr7, 0xffffffff);
3261	} else {
3262		int em_num;
3263		int idx;
3264
3265		/* zero out the hash */
3266		gfar_write(&regs->igaddr0, 0x0);
3267		gfar_write(&regs->igaddr1, 0x0);
3268		gfar_write(&regs->igaddr2, 0x0);
3269		gfar_write(&regs->igaddr3, 0x0);
3270		gfar_write(&regs->igaddr4, 0x0);
3271		gfar_write(&regs->igaddr5, 0x0);
3272		gfar_write(&regs->igaddr6, 0x0);
3273		gfar_write(&regs->igaddr7, 0x0);
3274		gfar_write(&regs->gaddr0, 0x0);
3275		gfar_write(&regs->gaddr1, 0x0);
3276		gfar_write(&regs->gaddr2, 0x0);
3277		gfar_write(&regs->gaddr3, 0x0);
3278		gfar_write(&regs->gaddr4, 0x0);
3279		gfar_write(&regs->gaddr5, 0x0);
3280		gfar_write(&regs->gaddr6, 0x0);
3281		gfar_write(&regs->gaddr7, 0x0);
3282
3283		/* If we have extended hash tables, we need to
3284		 * clear the exact match registers to prepare for
3285		 * setting them
3286		 */
3287		if (priv->extended_hash) {
3288			em_num = GFAR_EM_NUM + 1;
3289			gfar_clear_exact_match(dev);
3290			idx = 1;
3291		} else {
3292			idx = 0;
3293			em_num = 0;
3294		}
3295
3296		if (netdev_mc_empty(dev))
3297			return;
3298
3299		/* Parse the list, and set the appropriate bits */
3300		netdev_for_each_mc_addr(ha, dev) {
3301			if (idx < em_num) {
3302				gfar_set_mac_for_addr(dev, idx, ha->addr);
3303				idx++;
3304			} else
3305				gfar_set_hash_for_addr(dev, ha->addr);
3306		}
3307	}
3308}
3309
3310
3311/* Clears each of the exact match registers to zero, so they
3312 * don't interfere with normal reception
3313 */
3314static void gfar_clear_exact_match(struct net_device *dev)
3315{
3316	int idx;
3317	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3318
3319	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3320		gfar_set_mac_for_addr(dev, idx, zero_arr);
3321}
3322
3323/* Set the appropriate hash bit for the given addr */
3324/* The algorithm works like so:
3325 * 1) Take the Destination Address (ie the multicast address), and
3326 * do a CRC on it (little endian), and reverse the bits of the
3327 * result.
3328 * 2) Use the 8 most significant bits as a hash into a 256-entry
3329 * table.  The table is controlled through 8 32-bit registers:
3330 * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
3331 * gaddr7.  This means that the 3 most significant bits in the
3332 * hash index which gaddr register to use, and the 5 other bits
3333 * indicate which bit (assuming an IBM numbering scheme, which
3334 * for PowerPC (tm) is usually the case) in the register holds
3335 * the entry.
3336 */
3337static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3338{
3339	u32 tempval;
3340	struct gfar_private *priv = netdev_priv(dev);
3341	u32 result = ether_crc(ETH_ALEN, addr);
3342	int width = priv->hash_width;
3343	u8 whichbit = (result >> (32 - width)) & 0x1f;
3344	u8 whichreg = result >> (32 - width + 5);
3345	u32 value = (1 << (31-whichbit));
3346
3347	tempval = gfar_read(priv->hash_regs[whichreg]);
3348	tempval |= value;
3349	gfar_write(priv->hash_regs[whichreg], tempval);
3350}
3351
3352
3353/* There are multiple MAC Address register pairs on some controllers
3354 * This function sets the numth pair to a given address
3355 */
3356static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3357				  const u8 *addr)
3358{
3359	struct gfar_private *priv = netdev_priv(dev);
3360	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3361	u32 tempval;
3362	u32 __iomem *macptr = &regs->macstnaddr1;
3363
3364	macptr += num*2;
3365
3366	/* For a station address of 0x12345678ABCD in transmission
3367	 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3368	 * MACnADDR2 is set to 0x34120000.
3369	 */
3370	tempval = (addr[5] << 24) | (addr[4] << 16) |
3371		  (addr[3] << 8)  |  addr[2];
3372
3373	gfar_write(macptr, tempval);
3374
3375	tempval = (addr[1] << 24) | (addr[0] << 16);
3376
3377	gfar_write(macptr+1, tempval);
3378}
3379
3380/* GFAR error interrupt handler */
3381static irqreturn_t gfar_error(int irq, void *grp_id)
3382{
3383	struct gfar_priv_grp *gfargrp = grp_id;
3384	struct gfar __iomem *regs = gfargrp->regs;
3385	struct gfar_private *priv= gfargrp->priv;
3386	struct net_device *dev = priv->ndev;
3387
3388	/* Save ievent for future reference */
3389	u32 events = gfar_read(&regs->ievent);
3390
3391	/* Clear IEVENT */
3392	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3393
3394	/* Magic Packet is not an error. */
3395	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3396	    (events & IEVENT_MAG))
3397		events &= ~IEVENT_MAG;
3398
3399	/* Hmm... */
3400	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3401		netdev_dbg(dev,
3402			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3403			   events, gfar_read(&regs->imask));
3404
3405	/* Update the error counters */
3406	if (events & IEVENT_TXE) {
3407		dev->stats.tx_errors++;
3408
3409		if (events & IEVENT_LC)
3410			dev->stats.tx_window_errors++;
3411		if (events & IEVENT_CRL)
3412			dev->stats.tx_aborted_errors++;
3413		if (events & IEVENT_XFUN) {
3414			unsigned long flags;
3415
3416			netif_dbg(priv, tx_err, dev,
3417				  "TX FIFO underrun, packet dropped\n");
3418			dev->stats.tx_dropped++;
3419			atomic64_inc(&priv->extra_stats.tx_underrun);
3420
3421			local_irq_save(flags);
3422			lock_tx_qs(priv);
3423
3424			/* Reactivate the Tx Queues */
3425			gfar_write(&regs->tstat, gfargrp->tstat);
3426
3427			unlock_tx_qs(priv);
3428			local_irq_restore(flags);
3429		}
3430		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
3431	}
3432	if (events & IEVENT_BSY) {
3433		dev->stats.rx_errors++;
3434		atomic64_inc(&priv->extra_stats.rx_bsy);
3435
3436		gfar_receive(irq, grp_id);
3437
3438		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
3439			  gfar_read(&regs->rstat));
3440	}
3441	if (events & IEVENT_BABR) {
3442		dev->stats.rx_errors++;
3443		atomic64_inc(&priv->extra_stats.rx_babr);
3444
3445		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
3446	}
3447	if (events & IEVENT_EBERR) {
3448		atomic64_inc(&priv->extra_stats.eberr);
3449		netif_dbg(priv, rx_err, dev, "bus error\n");
3450	}
3451	if (events & IEVENT_RXC)
3452		netif_dbg(priv, rx_status, dev, "control frame\n");
3453
3454	if (events & IEVENT_BABT) {
3455		atomic64_inc(&priv->extra_stats.tx_babt);
3456		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
3457	}
3458	return IRQ_HANDLED;
3459}
3460
3461static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3462{
3463	struct phy_device *phydev = priv->phydev;
3464	u32 val = 0;
3465
3466	if (!phydev->duplex)
3467		return val;
3468
3469	if (!priv->pause_aneg_en) {
3470		if (priv->tx_pause_en)
3471			val |= MACCFG1_TX_FLOW;
3472		if (priv->rx_pause_en)
3473			val |= MACCFG1_RX_FLOW;
3474	} else {
3475		u16 lcl_adv, rmt_adv;
3476		u8 flowctrl;
3477		/* get link partner capabilities */
3478		rmt_adv = 0;
3479		if (phydev->pause)
3480			rmt_adv = LPA_PAUSE_CAP;
3481		if (phydev->asym_pause)
3482			rmt_adv |= LPA_PAUSE_ASYM;
3483
3484		lcl_adv = 0;
3485		if (phydev->advertising & ADVERTISED_Pause)
3486			lcl_adv |= ADVERTISE_PAUSE_CAP;
3487		if (phydev->advertising & ADVERTISED_Asym_Pause)
3488			lcl_adv |= ADVERTISE_PAUSE_ASYM;
3489
3490		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3491		if (flowctrl & FLOW_CTRL_TX)
3492			val |= MACCFG1_TX_FLOW;
3493		if (flowctrl & FLOW_CTRL_RX)
3494			val |= MACCFG1_RX_FLOW;
3495	}
3496
3497	return val;
3498}
3499
3500static noinline void gfar_update_link_state(struct gfar_private *priv)
3501{
3502	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3503	struct phy_device *phydev = priv->phydev;
3504	struct gfar_priv_rx_q *rx_queue = NULL;
3505	int i;
3506	struct rxbd8 *bdp;
3507
3508	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3509		return;
3510
3511	if (phydev->link) {
3512		u32 tempval1 = gfar_read(&regs->maccfg1);
3513		u32 tempval = gfar_read(&regs->maccfg2);
3514		u32 ecntrl = gfar_read(&regs->ecntrl);
3515		u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
3516
3517		if (phydev->duplex != priv->oldduplex) {
3518			if (!(phydev->duplex))
3519				tempval &= ~(MACCFG2_FULL_DUPLEX);
3520			else
3521				tempval |= MACCFG2_FULL_DUPLEX;
3522
3523			priv->oldduplex = phydev->duplex;
3524		}
3525
3526		if (phydev->speed != priv->oldspeed) {
3527			switch (phydev->speed) {
3528			case 1000:
3529				tempval =
3530				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3531
3532				ecntrl &= ~(ECNTRL_R100);
3533				break;
3534			case 100:
3535			case 10:
3536				tempval =
3537				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3538
3539				/* Reduced mode distinguishes
3540				 * between 10 and 100
3541				 */
3542				if (phydev->speed == SPEED_100)
3543					ecntrl |= ECNTRL_R100;
3544				else
3545					ecntrl &= ~(ECNTRL_R100);
3546				break;
3547			default:
3548				netif_warn(priv, link, priv->ndev,
3549					   "Ack!  Speed (%d) is not 10/100/1000!\n",
3550					   phydev->speed);
3551				break;
3552			}
3553
3554			priv->oldspeed = phydev->speed;
3555		}
3556
3557		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3558		tempval1 |= gfar_get_flowctrl_cfg(priv);
3559
3560		/* Turn last free buffer recording on */
3561		if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
3562			for (i = 0; i < priv->num_rx_queues; i++) {
3563				rx_queue = priv->rx_queue[i];
3564				bdp = rx_queue->cur_rx;
3565				/* skip to previous bd */
3566				bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
3567					      rx_queue->rx_bd_base,
3568					      rx_queue->rx_ring_size);
3569
3570				if (rx_queue->rfbptr)
3571					gfar_write(rx_queue->rfbptr, (u32)bdp);
3572			}
3573
3574			priv->tx_actual_en = 1;
3575		}
3576
3577		if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
3578			priv->tx_actual_en = 0;
3579
3580		gfar_write(&regs->maccfg1, tempval1);
3581		gfar_write(&regs->maccfg2, tempval);
3582		gfar_write(&regs->ecntrl, ecntrl);
3583
3584		if (!priv->oldlink)
3585			priv->oldlink = 1;
3586
3587	} else if (priv->oldlink) {
3588		priv->oldlink = 0;
3589		priv->oldspeed = 0;
3590		priv->oldduplex = -1;
3591	}
3592
3593	if (netif_msg_link(priv))
3594		phy_print_status(phydev);
3595}
3596
3597static const struct of_device_id gfar_match[] =
3598{
3599	{
3600		.type = "network",
3601		.compatible = "gianfar",
3602	},
3603	{
3604		.compatible = "fsl,etsec2",
3605	},
3606	{},
3607};
3608MODULE_DEVICE_TABLE(of, gfar_match);
3609
3610/* Structure for a device driver */
3611static struct platform_driver gfar_driver = {
3612	.driver = {
3613		.name = "fsl-gianfar",
3614		.pm = GFAR_PM_OPS,
3615		.of_match_table = gfar_match,
3616	},
3617	.probe = gfar_probe,
3618	.remove = gfar_remove,
3619};
3620
3621module_platform_driver(gfar_driver);
3622