1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 *  by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12 *
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/ptrace.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/skbuff.h>
31#include <linux/spinlock.h>
32#include <linux/mii.h>
33#include <linux/ethtool.h>
34#include <linux/bitops.h>
35#include <linux/fs.h>
36#include <linux/platform_device.h>
37#include <linux/phy.h>
38#include <linux/of.h>
39#include <linux/of_mdio.h>
40#include <linux/of_platform.h>
41#include <linux/of_gpio.h>
42#include <linux/of_net.h>
43
44#include <linux/vmalloc.h>
45#include <asm/pgtable.h>
46#include <asm/irq.h>
47#include <asm/uaccess.h>
48
49#include "fs_enet.h"
50
51/*************************************************/
52
53MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
54MODULE_DESCRIPTION("Freescale Ethernet Driver");
55MODULE_LICENSE("GPL");
56MODULE_VERSION(DRV_MODULE_VERSION);
57
58static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
59module_param(fs_enet_debug, int, 0);
60MODULE_PARM_DESC(fs_enet_debug,
61		 "Freescale bitmapped debugging message enable value");
62
63#ifdef CONFIG_NET_POLL_CONTROLLER
64static void fs_enet_netpoll(struct net_device *dev);
65#endif
66
67static void fs_set_multicast_list(struct net_device *dev)
68{
69	struct fs_enet_private *fep = netdev_priv(dev);
70
71	(*fep->ops->set_multicast_list)(dev);
72}
73
74static void skb_align(struct sk_buff *skb, int align)
75{
76	int off = ((unsigned long)skb->data) & (align - 1);
77
78	if (off)
79		skb_reserve(skb, align - off);
80}
81
82/* NAPI receive function */
83static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
84{
85	struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
86	struct net_device *dev = fep->ndev;
87	const struct fs_platform_info *fpi = fep->fpi;
88	cbd_t __iomem *bdp;
89	struct sk_buff *skb, *skbn;
90	int received = 0;
91	u16 pkt_len, sc;
92	int curidx;
93
94	if (budget <= 0)
95		return received;
96
97	/*
98	 * First, grab all of the stats for the incoming packet.
99	 * These get messed up if we get called due to a busy condition.
100	 */
101	bdp = fep->cur_rx;
102
103	/* clear RX status bits for napi*/
104	(*fep->ops->napi_clear_rx_event)(dev);
105
106	while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
107		curidx = bdp - fep->rx_bd_base;
108
109		/*
110		 * Since we have allocated space to hold a complete frame,
111		 * the last indicator should be set.
112		 */
113		if ((sc & BD_ENET_RX_LAST) == 0)
114			dev_warn(fep->dev, "rcv is not +last\n");
115
116		/*
117		 * Check for errors.
118		 */
119		if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
120			  BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
121			fep->stats.rx_errors++;
122			/* Frame too long or too short. */
123			if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
124				fep->stats.rx_length_errors++;
125			/* Frame alignment */
126			if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
127				fep->stats.rx_frame_errors++;
128			/* CRC Error */
129			if (sc & BD_ENET_RX_CR)
130				fep->stats.rx_crc_errors++;
131			/* FIFO overrun */
132			if (sc & BD_ENET_RX_OV)
133				fep->stats.rx_crc_errors++;
134
135			skb = fep->rx_skbuff[curidx];
136
137			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
138				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
139				DMA_FROM_DEVICE);
140
141			skbn = skb;
142
143		} else {
144			skb = fep->rx_skbuff[curidx];
145
146			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
147				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
148				DMA_FROM_DEVICE);
149
150			/*
151			 * Process the incoming frame.
152			 */
153			fep->stats.rx_packets++;
154			pkt_len = CBDR_DATLEN(bdp) - 4;	/* remove CRC */
155			fep->stats.rx_bytes += pkt_len + 4;
156
157			if (pkt_len <= fpi->rx_copybreak) {
158				/* +2 to make IP header L1 cache aligned */
159				skbn = netdev_alloc_skb(dev, pkt_len + 2);
160				if (skbn != NULL) {
161					skb_reserve(skbn, 2);	/* align IP header */
162					skb_copy_from_linear_data(skb,
163						      skbn->data, pkt_len);
164					swap(skb, skbn);
165				}
166			} else {
167				skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
168
169				if (skbn)
170					skb_align(skbn, ENET_RX_ALIGN);
171			}
172
173			if (skbn != NULL) {
174				skb_put(skb, pkt_len);	/* Make room */
175				skb->protocol = eth_type_trans(skb, dev);
176				received++;
177				netif_receive_skb(skb);
178			} else {
179				fep->stats.rx_dropped++;
180				skbn = skb;
181			}
182		}
183
184		fep->rx_skbuff[curidx] = skbn;
185		CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
186			     L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
187			     DMA_FROM_DEVICE));
188		CBDW_DATLEN(bdp, 0);
189		CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
190
191		/*
192		 * Update BD pointer to next entry.
193		 */
194		if ((sc & BD_ENET_RX_WRAP) == 0)
195			bdp++;
196		else
197			bdp = fep->rx_bd_base;
198
199		(*fep->ops->rx_bd_done)(dev);
200
201		if (received >= budget)
202			break;
203	}
204
205	fep->cur_rx = bdp;
206
207	if (received < budget) {
208		/* done */
209		napi_complete(napi);
210		(*fep->ops->napi_enable_rx)(dev);
211	}
212	return received;
213}
214
215static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
216{
217	struct fs_enet_private *fep = container_of(napi, struct fs_enet_private,
218						   napi_tx);
219	struct net_device *dev = fep->ndev;
220	cbd_t __iomem *bdp;
221	struct sk_buff *skb;
222	int dirtyidx, do_wake, do_restart;
223	u16 sc;
224	int has_tx_work = 0;
225
226	spin_lock(&fep->tx_lock);
227	bdp = fep->dirty_tx;
228
229	/* clear TX status bits for napi*/
230	(*fep->ops->napi_clear_tx_event)(dev);
231
232	do_wake = do_restart = 0;
233	while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
234		dirtyidx = bdp - fep->tx_bd_base;
235
236		if (fep->tx_free == fep->tx_ring)
237			break;
238
239		skb = fep->tx_skbuff[dirtyidx];
240
241		/*
242		 * Check for errors.
243		 */
244		if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
245			  BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
246
247			if (sc & BD_ENET_TX_HB)	/* No heartbeat */
248				fep->stats.tx_heartbeat_errors++;
249			if (sc & BD_ENET_TX_LC)	/* Late collision */
250				fep->stats.tx_window_errors++;
251			if (sc & BD_ENET_TX_RL)	/* Retrans limit */
252				fep->stats.tx_aborted_errors++;
253			if (sc & BD_ENET_TX_UN)	/* Underrun */
254				fep->stats.tx_fifo_errors++;
255			if (sc & BD_ENET_TX_CSL)	/* Carrier lost */
256				fep->stats.tx_carrier_errors++;
257
258			if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
259				fep->stats.tx_errors++;
260				do_restart = 1;
261			}
262		} else
263			fep->stats.tx_packets++;
264
265		if (sc & BD_ENET_TX_READY) {
266			dev_warn(fep->dev,
267				 "HEY! Enet xmit interrupt and TX_READY.\n");
268		}
269
270		/*
271		 * Deferred means some collisions occurred during transmit,
272		 * but we eventually sent the packet OK.
273		 */
274		if (sc & BD_ENET_TX_DEF)
275			fep->stats.collisions++;
276
277		/* unmap */
278		if (fep->mapped_as_page[dirtyidx])
279			dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
280				       CBDR_DATLEN(bdp), DMA_TO_DEVICE);
281		else
282			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
283					 CBDR_DATLEN(bdp), DMA_TO_DEVICE);
284
285		/*
286		 * Free the sk buffer associated with this last transmit.
287		 */
288		if (skb) {
289			dev_kfree_skb(skb);
290			fep->tx_skbuff[dirtyidx] = NULL;
291		}
292
293		/*
294		 * Update pointer to next buffer descriptor to be transmitted.
295		 */
296		if ((sc & BD_ENET_TX_WRAP) == 0)
297			bdp++;
298		else
299			bdp = fep->tx_bd_base;
300
301		/*
302		 * Since we have freed up a buffer, the ring is no longer
303		 * full.
304		 */
305		if (++fep->tx_free >= MAX_SKB_FRAGS)
306			do_wake = 1;
307		has_tx_work = 1;
308	}
309
310	fep->dirty_tx = bdp;
311
312	if (do_restart)
313		(*fep->ops->tx_restart)(dev);
314
315	if (!has_tx_work) {
316		napi_complete(napi);
317		(*fep->ops->napi_enable_tx)(dev);
318	}
319
320	spin_unlock(&fep->tx_lock);
321
322	if (do_wake)
323		netif_wake_queue(dev);
324
325	if (has_tx_work)
326		return budget;
327	return 0;
328}
329
330/*
331 * The interrupt handler.
332 * This is called from the MPC core interrupt.
333 */
334static irqreturn_t
335fs_enet_interrupt(int irq, void *dev_id)
336{
337	struct net_device *dev = dev_id;
338	struct fs_enet_private *fep;
339	const struct fs_platform_info *fpi;
340	u32 int_events;
341	u32 int_clr_events;
342	int nr, napi_ok;
343	int handled;
344
345	fep = netdev_priv(dev);
346	fpi = fep->fpi;
347
348	nr = 0;
349	while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
350		nr++;
351
352		int_clr_events = int_events;
353		int_clr_events &= ~fep->ev_napi_rx;
354
355		(*fep->ops->clear_int_events)(dev, int_clr_events);
356
357		if (int_events & fep->ev_err)
358			(*fep->ops->ev_error)(dev, int_events);
359
360		if (int_events & fep->ev_rx) {
361			napi_ok = napi_schedule_prep(&fep->napi);
362
363			(*fep->ops->napi_disable_rx)(dev);
364			(*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
365
366			/* NOTE: it is possible for FCCs in NAPI mode    */
367			/* to submit a spurious interrupt while in poll  */
368			if (napi_ok)
369				__napi_schedule(&fep->napi);
370		}
371
372		if (int_events & fep->ev_tx) {
373			napi_ok = napi_schedule_prep(&fep->napi_tx);
374
375			(*fep->ops->napi_disable_tx)(dev);
376			(*fep->ops->clear_int_events)(dev, fep->ev_napi_tx);
377
378			/* NOTE: it is possible for FCCs in NAPI mode    */
379			/* to submit a spurious interrupt while in poll  */
380			if (napi_ok)
381				__napi_schedule(&fep->napi_tx);
382		}
383	}
384
385	handled = nr > 0;
386	return IRQ_RETVAL(handled);
387}
388
389void fs_init_bds(struct net_device *dev)
390{
391	struct fs_enet_private *fep = netdev_priv(dev);
392	cbd_t __iomem *bdp;
393	struct sk_buff *skb;
394	int i;
395
396	fs_cleanup_bds(dev);
397
398	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
399	fep->tx_free = fep->tx_ring;
400	fep->cur_rx = fep->rx_bd_base;
401
402	/*
403	 * Initialize the receive buffer descriptors.
404	 */
405	for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
406		skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
407		if (skb == NULL)
408			break;
409
410		skb_align(skb, ENET_RX_ALIGN);
411		fep->rx_skbuff[i] = skb;
412		CBDW_BUFADDR(bdp,
413			dma_map_single(fep->dev, skb->data,
414				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
415				DMA_FROM_DEVICE));
416		CBDW_DATLEN(bdp, 0);	/* zero */
417		CBDW_SC(bdp, BD_ENET_RX_EMPTY |
418			((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
419	}
420	/*
421	 * if we failed, fillup remainder
422	 */
423	for (; i < fep->rx_ring; i++, bdp++) {
424		fep->rx_skbuff[i] = NULL;
425		CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
426	}
427
428	/*
429	 * ...and the same for transmit.
430	 */
431	for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
432		fep->tx_skbuff[i] = NULL;
433		CBDW_BUFADDR(bdp, 0);
434		CBDW_DATLEN(bdp, 0);
435		CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
436	}
437}
438
439void fs_cleanup_bds(struct net_device *dev)
440{
441	struct fs_enet_private *fep = netdev_priv(dev);
442	struct sk_buff *skb;
443	cbd_t __iomem *bdp;
444	int i;
445
446	/*
447	 * Reset SKB transmit buffers.
448	 */
449	for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
450		if ((skb = fep->tx_skbuff[i]) == NULL)
451			continue;
452
453		/* unmap */
454		dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
455				skb->len, DMA_TO_DEVICE);
456
457		fep->tx_skbuff[i] = NULL;
458		dev_kfree_skb(skb);
459	}
460
461	/*
462	 * Reset SKB receive buffers
463	 */
464	for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
465		if ((skb = fep->rx_skbuff[i]) == NULL)
466			continue;
467
468		/* unmap */
469		dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
470			L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
471			DMA_FROM_DEVICE);
472
473		fep->rx_skbuff[i] = NULL;
474
475		dev_kfree_skb(skb);
476	}
477}
478
479/**********************************************************************************/
480
481#ifdef CONFIG_FS_ENET_MPC5121_FEC
482/*
483 * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
484 */
485static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
486					       struct sk_buff *skb)
487{
488	struct sk_buff *new_skb;
489
490	if (skb_linearize(skb))
491		return NULL;
492
493	/* Alloc new skb */
494	new_skb = netdev_alloc_skb(dev, skb->len + 4);
495	if (!new_skb)
496		return NULL;
497
498	/* Make sure new skb is properly aligned */
499	skb_align(new_skb, 4);
500
501	/* Copy data to new skb ... */
502	skb_copy_from_linear_data(skb, new_skb->data, skb->len);
503	skb_put(new_skb, skb->len);
504
505	/* ... and free an old one */
506	dev_kfree_skb_any(skb);
507
508	return new_skb;
509}
510#endif
511
512static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
513{
514	struct fs_enet_private *fep = netdev_priv(dev);
515	cbd_t __iomem *bdp;
516	int curidx;
517	u16 sc;
518	int nr_frags;
519	skb_frag_t *frag;
520	int len;
521#ifdef CONFIG_FS_ENET_MPC5121_FEC
522	int is_aligned = 1;
523	int i;
524
525	if (!IS_ALIGNED((unsigned long)skb->data, 4)) {
526		is_aligned = 0;
527	} else {
528		nr_frags = skb_shinfo(skb)->nr_frags;
529		frag = skb_shinfo(skb)->frags;
530		for (i = 0; i < nr_frags; i++, frag++) {
531			if (!IS_ALIGNED(frag->page_offset, 4)) {
532				is_aligned = 0;
533				break;
534			}
535		}
536	}
537
538	if (!is_aligned) {
539		skb = tx_skb_align_workaround(dev, skb);
540		if (!skb) {
541			/*
542			 * We have lost packet due to memory allocation error
543			 * in tx_skb_align_workaround(). Hopefully original
544			 * skb is still valid, so try transmit it later.
545			 */
546			return NETDEV_TX_BUSY;
547		}
548	}
549#endif
550
551	spin_lock(&fep->tx_lock);
552
553	/*
554	 * Fill in a Tx ring entry
555	 */
556	bdp = fep->cur_tx;
557
558	nr_frags = skb_shinfo(skb)->nr_frags;
559	if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
560		netif_stop_queue(dev);
561		spin_unlock(&fep->tx_lock);
562
563		/*
564		 * Ooops.  All transmit buffers are full.  Bail out.
565		 * This should not happen, since the tx queue should be stopped.
566		 */
567		dev_warn(fep->dev, "tx queue full!.\n");
568		return NETDEV_TX_BUSY;
569	}
570
571	curidx = bdp - fep->tx_bd_base;
572
573	len = skb->len;
574	fep->stats.tx_bytes += len;
575	if (nr_frags)
576		len -= skb->data_len;
577	fep->tx_free -= nr_frags + 1;
578	/*
579	 * Push the data cache so the CPM does not get stale memory data.
580	 */
581	CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
582				skb->data, len, DMA_TO_DEVICE));
583	CBDW_DATLEN(bdp, len);
584
585	fep->mapped_as_page[curidx] = 0;
586	frag = skb_shinfo(skb)->frags;
587	while (nr_frags) {
588		CBDC_SC(bdp,
589			BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
590			BD_ENET_TX_TC);
591		CBDS_SC(bdp, BD_ENET_TX_READY);
592
593		if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
594			bdp++, curidx++;
595		else
596			bdp = fep->tx_bd_base, curidx = 0;
597
598		len = skb_frag_size(frag);
599		CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
600						   DMA_TO_DEVICE));
601		CBDW_DATLEN(bdp, len);
602
603		fep->tx_skbuff[curidx] = NULL;
604		fep->mapped_as_page[curidx] = 1;
605
606		frag++;
607		nr_frags--;
608	}
609
610	/* Trigger transmission start */
611	sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
612	     BD_ENET_TX_LAST | BD_ENET_TX_TC;
613
614	/* note that while FEC does not have this bit
615	 * it marks it as available for software use
616	 * yay for hw reuse :) */
617	if (skb->len <= 60)
618		sc |= BD_ENET_TX_PAD;
619	CBDC_SC(bdp, BD_ENET_TX_STATS);
620	CBDS_SC(bdp, sc);
621
622	/* Save skb pointer. */
623	fep->tx_skbuff[curidx] = skb;
624
625	/* If this was the last BD in the ring, start at the beginning again. */
626	if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
627		bdp++;
628	else
629		bdp = fep->tx_bd_base;
630	fep->cur_tx = bdp;
631
632	if (fep->tx_free < MAX_SKB_FRAGS)
633		netif_stop_queue(dev);
634
635	skb_tx_timestamp(skb);
636
637	(*fep->ops->tx_kickstart)(dev);
638
639	spin_unlock(&fep->tx_lock);
640
641	return NETDEV_TX_OK;
642}
643
644static void fs_timeout(struct net_device *dev)
645{
646	struct fs_enet_private *fep = netdev_priv(dev);
647	unsigned long flags;
648	int wake = 0;
649
650	fep->stats.tx_errors++;
651
652	spin_lock_irqsave(&fep->lock, flags);
653
654	if (dev->flags & IFF_UP) {
655		phy_stop(fep->phydev);
656		(*fep->ops->stop)(dev);
657		(*fep->ops->restart)(dev);
658		phy_start(fep->phydev);
659	}
660
661	phy_start(fep->phydev);
662	wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
663	spin_unlock_irqrestore(&fep->lock, flags);
664
665	if (wake)
666		netif_wake_queue(dev);
667}
668
669/*-----------------------------------------------------------------------------
670 *  generic link-change handler - should be sufficient for most cases
671 *-----------------------------------------------------------------------------*/
672static void generic_adjust_link(struct  net_device *dev)
673{
674	struct fs_enet_private *fep = netdev_priv(dev);
675	struct phy_device *phydev = fep->phydev;
676	int new_state = 0;
677
678	if (phydev->link) {
679		/* adjust to duplex mode */
680		if (phydev->duplex != fep->oldduplex) {
681			new_state = 1;
682			fep->oldduplex = phydev->duplex;
683		}
684
685		if (phydev->speed != fep->oldspeed) {
686			new_state = 1;
687			fep->oldspeed = phydev->speed;
688		}
689
690		if (!fep->oldlink) {
691			new_state = 1;
692			fep->oldlink = 1;
693		}
694
695		if (new_state)
696			fep->ops->restart(dev);
697	} else if (fep->oldlink) {
698		new_state = 1;
699		fep->oldlink = 0;
700		fep->oldspeed = 0;
701		fep->oldduplex = -1;
702	}
703
704	if (new_state && netif_msg_link(fep))
705		phy_print_status(phydev);
706}
707
708
709static void fs_adjust_link(struct net_device *dev)
710{
711	struct fs_enet_private *fep = netdev_priv(dev);
712	unsigned long flags;
713
714	spin_lock_irqsave(&fep->lock, flags);
715
716	if(fep->ops->adjust_link)
717		fep->ops->adjust_link(dev);
718	else
719		generic_adjust_link(dev);
720
721	spin_unlock_irqrestore(&fep->lock, flags);
722}
723
724static int fs_init_phy(struct net_device *dev)
725{
726	struct fs_enet_private *fep = netdev_priv(dev);
727	struct phy_device *phydev;
728	phy_interface_t iface;
729
730	fep->oldlink = 0;
731	fep->oldspeed = 0;
732	fep->oldduplex = -1;
733
734	iface = fep->fpi->use_rmii ?
735		PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII;
736
737	phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
738				iface);
739	if (!phydev) {
740		dev_err(&dev->dev, "Could not attach to PHY\n");
741		return -ENODEV;
742	}
743
744	fep->phydev = phydev;
745
746	return 0;
747}
748
749static int fs_enet_open(struct net_device *dev)
750{
751	struct fs_enet_private *fep = netdev_priv(dev);
752	int r;
753	int err;
754
755	/* to initialize the fep->cur_rx,... */
756	/* not doing this, will cause a crash in fs_enet_rx_napi */
757	fs_init_bds(fep->ndev);
758
759	napi_enable(&fep->napi);
760	napi_enable(&fep->napi_tx);
761
762	/* Install our interrupt handler. */
763	r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
764			"fs_enet-mac", dev);
765	if (r != 0) {
766		dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
767		napi_disable(&fep->napi);
768		napi_disable(&fep->napi_tx);
769		return -EINVAL;
770	}
771
772	err = fs_init_phy(dev);
773	if (err) {
774		free_irq(fep->interrupt, dev);
775		napi_disable(&fep->napi);
776		napi_disable(&fep->napi_tx);
777		return err;
778	}
779	phy_start(fep->phydev);
780
781	netif_start_queue(dev);
782
783	return 0;
784}
785
786static int fs_enet_close(struct net_device *dev)
787{
788	struct fs_enet_private *fep = netdev_priv(dev);
789	unsigned long flags;
790
791	netif_stop_queue(dev);
792	netif_carrier_off(dev);
793	napi_disable(&fep->napi);
794	napi_disable(&fep->napi_tx);
795	phy_stop(fep->phydev);
796
797	spin_lock_irqsave(&fep->lock, flags);
798	spin_lock(&fep->tx_lock);
799	(*fep->ops->stop)(dev);
800	spin_unlock(&fep->tx_lock);
801	spin_unlock_irqrestore(&fep->lock, flags);
802
803	/* release any irqs */
804	phy_disconnect(fep->phydev);
805	fep->phydev = NULL;
806	free_irq(fep->interrupt, dev);
807
808	return 0;
809}
810
811static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
812{
813	struct fs_enet_private *fep = netdev_priv(dev);
814	return &fep->stats;
815}
816
817/*************************************************************************/
818
819static void fs_get_drvinfo(struct net_device *dev,
820			    struct ethtool_drvinfo *info)
821{
822	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
823	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
824}
825
826static int fs_get_regs_len(struct net_device *dev)
827{
828	struct fs_enet_private *fep = netdev_priv(dev);
829
830	return (*fep->ops->get_regs_len)(dev);
831}
832
833static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
834			 void *p)
835{
836	struct fs_enet_private *fep = netdev_priv(dev);
837	unsigned long flags;
838	int r, len;
839
840	len = regs->len;
841
842	spin_lock_irqsave(&fep->lock, flags);
843	r = (*fep->ops->get_regs)(dev, p, &len);
844	spin_unlock_irqrestore(&fep->lock, flags);
845
846	if (r == 0)
847		regs->version = 0;
848}
849
850static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
851{
852	struct fs_enet_private *fep = netdev_priv(dev);
853
854	if (!fep->phydev)
855		return -ENODEV;
856
857	return phy_ethtool_gset(fep->phydev, cmd);
858}
859
860static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
861{
862	struct fs_enet_private *fep = netdev_priv(dev);
863
864	if (!fep->phydev)
865		return -ENODEV;
866
867	return phy_ethtool_sset(fep->phydev, cmd);
868}
869
870static int fs_nway_reset(struct net_device *dev)
871{
872	return 0;
873}
874
875static u32 fs_get_msglevel(struct net_device *dev)
876{
877	struct fs_enet_private *fep = netdev_priv(dev);
878	return fep->msg_enable;
879}
880
881static void fs_set_msglevel(struct net_device *dev, u32 value)
882{
883	struct fs_enet_private *fep = netdev_priv(dev);
884	fep->msg_enable = value;
885}
886
887static const struct ethtool_ops fs_ethtool_ops = {
888	.get_drvinfo = fs_get_drvinfo,
889	.get_regs_len = fs_get_regs_len,
890	.get_settings = fs_get_settings,
891	.set_settings = fs_set_settings,
892	.nway_reset = fs_nway_reset,
893	.get_link = ethtool_op_get_link,
894	.get_msglevel = fs_get_msglevel,
895	.set_msglevel = fs_set_msglevel,
896	.get_regs = fs_get_regs,
897	.get_ts_info = ethtool_op_get_ts_info,
898};
899
900static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
901{
902	struct fs_enet_private *fep = netdev_priv(dev);
903
904	if (!netif_running(dev))
905		return -EINVAL;
906
907	return phy_mii_ioctl(fep->phydev, rq, cmd);
908}
909
910extern int fs_mii_connect(struct net_device *dev);
911extern void fs_mii_disconnect(struct net_device *dev);
912
913/**************************************************************************************/
914
915#ifdef CONFIG_FS_ENET_HAS_FEC
916#define IS_FEC(match) ((match)->data == &fs_fec_ops)
917#else
918#define IS_FEC(match) 0
919#endif
920
921static const struct net_device_ops fs_enet_netdev_ops = {
922	.ndo_open		= fs_enet_open,
923	.ndo_stop		= fs_enet_close,
924	.ndo_get_stats		= fs_enet_get_stats,
925	.ndo_start_xmit		= fs_enet_start_xmit,
926	.ndo_tx_timeout		= fs_timeout,
927	.ndo_set_rx_mode	= fs_set_multicast_list,
928	.ndo_do_ioctl		= fs_ioctl,
929	.ndo_validate_addr	= eth_validate_addr,
930	.ndo_set_mac_address	= eth_mac_addr,
931	.ndo_change_mtu		= eth_change_mtu,
932#ifdef CONFIG_NET_POLL_CONTROLLER
933	.ndo_poll_controller	= fs_enet_netpoll,
934#endif
935};
936
937static const struct of_device_id fs_enet_match[];
938static int fs_enet_probe(struct platform_device *ofdev)
939{
940	const struct of_device_id *match;
941	struct net_device *ndev;
942	struct fs_enet_private *fep;
943	struct fs_platform_info *fpi;
944	const u32 *data;
945	struct clk *clk;
946	int err;
947	const u8 *mac_addr;
948	const char *phy_connection_type;
949	int privsize, len, ret = -ENODEV;
950
951	match = of_match_device(fs_enet_match, &ofdev->dev);
952	if (!match)
953		return -EINVAL;
954
955	fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
956	if (!fpi)
957		return -ENOMEM;
958
959	if (!IS_FEC(match)) {
960		data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
961		if (!data || len != 4)
962			goto out_free_fpi;
963
964		fpi->cp_command = *data;
965	}
966
967	fpi->rx_ring = 32;
968	fpi->tx_ring = 64;
969	fpi->rx_copybreak = 240;
970	fpi->napi_weight = 17;
971	fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
972	if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
973		err = of_phy_register_fixed_link(ofdev->dev.of_node);
974		if (err)
975			goto out_free_fpi;
976
977		/* In the case of a fixed PHY, the DT node associated
978		 * to the PHY is the Ethernet MAC DT node.
979		 */
980		fpi->phy_node = of_node_get(ofdev->dev.of_node);
981	}
982
983	if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
984		phy_connection_type = of_get_property(ofdev->dev.of_node,
985						"phy-connection-type", NULL);
986		if (phy_connection_type && !strcmp("rmii", phy_connection_type))
987			fpi->use_rmii = 1;
988	}
989
990	/* make clock lookup non-fatal (the driver is shared among platforms),
991	 * but require enable to succeed when a clock was specified/found,
992	 * keep a reference to the clock upon successful acquisition
993	 */
994	clk = devm_clk_get(&ofdev->dev, "per");
995	if (!IS_ERR(clk)) {
996		err = clk_prepare_enable(clk);
997		if (err) {
998			ret = err;
999			goto out_free_fpi;
1000		}
1001		fpi->clk_per = clk;
1002	}
1003
1004	privsize = sizeof(*fep) +
1005	           sizeof(struct sk_buff **) *
1006		     (fpi->rx_ring + fpi->tx_ring) +
1007		   sizeof(char) * fpi->tx_ring;
1008
1009	ndev = alloc_etherdev(privsize);
1010	if (!ndev) {
1011		ret = -ENOMEM;
1012		goto out_put;
1013	}
1014
1015	SET_NETDEV_DEV(ndev, &ofdev->dev);
1016	platform_set_drvdata(ofdev, ndev);
1017
1018	fep = netdev_priv(ndev);
1019	fep->dev = &ofdev->dev;
1020	fep->ndev = ndev;
1021	fep->fpi = fpi;
1022	fep->ops = match->data;
1023
1024	ret = fep->ops->setup_data(ndev);
1025	if (ret)
1026		goto out_free_dev;
1027
1028	fep->rx_skbuff = (struct sk_buff **)&fep[1];
1029	fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1030	fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring +
1031				       fpi->tx_ring);
1032
1033	spin_lock_init(&fep->lock);
1034	spin_lock_init(&fep->tx_lock);
1035
1036	mac_addr = of_get_mac_address(ofdev->dev.of_node);
1037	if (mac_addr)
1038		memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
1039
1040	ret = fep->ops->allocate_bd(ndev);
1041	if (ret)
1042		goto out_cleanup_data;
1043
1044	fep->rx_bd_base = fep->ring_base;
1045	fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1046
1047	fep->tx_ring = fpi->tx_ring;
1048	fep->rx_ring = fpi->rx_ring;
1049
1050	ndev->netdev_ops = &fs_enet_netdev_ops;
1051	ndev->watchdog_timeo = 2 * HZ;
1052	netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight);
1053	netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2);
1054
1055	ndev->ethtool_ops = &fs_ethtool_ops;
1056
1057	init_timer(&fep->phy_timer_list);
1058
1059	netif_carrier_off(ndev);
1060
1061	ndev->features |= NETIF_F_SG;
1062
1063	ret = register_netdev(ndev);
1064	if (ret)
1065		goto out_free_bd;
1066
1067	pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
1068
1069	return 0;
1070
1071out_free_bd:
1072	fep->ops->free_bd(ndev);
1073out_cleanup_data:
1074	fep->ops->cleanup_data(ndev);
1075out_free_dev:
1076	free_netdev(ndev);
1077out_put:
1078	of_node_put(fpi->phy_node);
1079	if (fpi->clk_per)
1080		clk_disable_unprepare(fpi->clk_per);
1081out_free_fpi:
1082	kfree(fpi);
1083	return ret;
1084}
1085
1086static int fs_enet_remove(struct platform_device *ofdev)
1087{
1088	struct net_device *ndev = platform_get_drvdata(ofdev);
1089	struct fs_enet_private *fep = netdev_priv(ndev);
1090
1091	unregister_netdev(ndev);
1092
1093	fep->ops->free_bd(ndev);
1094	fep->ops->cleanup_data(ndev);
1095	dev_set_drvdata(fep->dev, NULL);
1096	of_node_put(fep->fpi->phy_node);
1097	if (fep->fpi->clk_per)
1098		clk_disable_unprepare(fep->fpi->clk_per);
1099	free_netdev(ndev);
1100	return 0;
1101}
1102
1103static const struct of_device_id fs_enet_match[] = {
1104#ifdef CONFIG_FS_ENET_HAS_SCC
1105	{
1106		.compatible = "fsl,cpm1-scc-enet",
1107		.data = (void *)&fs_scc_ops,
1108	},
1109	{
1110		.compatible = "fsl,cpm2-scc-enet",
1111		.data = (void *)&fs_scc_ops,
1112	},
1113#endif
1114#ifdef CONFIG_FS_ENET_HAS_FCC
1115	{
1116		.compatible = "fsl,cpm2-fcc-enet",
1117		.data = (void *)&fs_fcc_ops,
1118	},
1119#endif
1120#ifdef CONFIG_FS_ENET_HAS_FEC
1121#ifdef CONFIG_FS_ENET_MPC5121_FEC
1122	{
1123		.compatible = "fsl,mpc5121-fec",
1124		.data = (void *)&fs_fec_ops,
1125	},
1126	{
1127		.compatible = "fsl,mpc5125-fec",
1128		.data = (void *)&fs_fec_ops,
1129	},
1130#else
1131	{
1132		.compatible = "fsl,pq1-fec-enet",
1133		.data = (void *)&fs_fec_ops,
1134	},
1135#endif
1136#endif
1137	{}
1138};
1139MODULE_DEVICE_TABLE(of, fs_enet_match);
1140
1141static struct platform_driver fs_enet_driver = {
1142	.driver = {
1143		.name = "fs_enet",
1144		.of_match_table = fs_enet_match,
1145	},
1146	.probe = fs_enet_probe,
1147	.remove = fs_enet_remove,
1148};
1149
1150#ifdef CONFIG_NET_POLL_CONTROLLER
1151static void fs_enet_netpoll(struct net_device *dev)
1152{
1153       disable_irq(dev->irq);
1154       fs_enet_interrupt(dev->irq, dev);
1155       enable_irq(dev->irq);
1156}
1157#endif
1158
1159module_platform_driver(fs_enet_driver);
1160