1/*
2 * Copyright (C) 2006, 2007 Eugene Konev
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17 */
18
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/moduleparam.h>
22
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/errno.h>
27#include <linux/types.h>
28#include <linux/delay.h>
29
30#include <linux/netdevice.h>
31#include <linux/if_vlan.h>
32#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/skbuff.h>
35#include <linux/mii.h>
36#include <linux/phy.h>
37#include <linux/phy_fixed.h>
38#include <linux/platform_device.h>
39#include <linux/dma-mapping.h>
40#include <linux/clk.h>
41#include <linux/gpio.h>
42#include <linux/atomic.h>
43
44#include <asm/mach-ar7/ar7.h>
45
46MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
47MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
48MODULE_LICENSE("GPL");
49MODULE_ALIAS("platform:cpmac");
50
51static int debug_level = 8;
52static int dumb_switch;
53
54/* Next 2 are only used in cpmac_probe, so it's pointless to change them */
55module_param(debug_level, int, 0444);
56module_param(dumb_switch, int, 0444);
57
58MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
59MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
60
61#define CPMAC_VERSION "0.5.2"
62/* frame size + 802.1q tag + FCS size */
63#define CPMAC_SKB_SIZE		(ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
64#define CPMAC_QUEUES	8
65
66/* Ethernet registers */
67#define CPMAC_TX_CONTROL		0x0004
68#define CPMAC_TX_TEARDOWN		0x0008
69#define CPMAC_RX_CONTROL		0x0014
70#define CPMAC_RX_TEARDOWN		0x0018
71#define CPMAC_MBP			0x0100
72#define MBP_RXPASSCRC			0x40000000
73#define MBP_RXQOS			0x20000000
74#define MBP_RXNOCHAIN			0x10000000
75#define MBP_RXCMF			0x01000000
76#define MBP_RXSHORT			0x00800000
77#define MBP_RXCEF			0x00400000
78#define MBP_RXPROMISC			0x00200000
79#define MBP_PROMISCCHAN(channel)	(((channel) & 0x7) << 16)
80#define MBP_RXBCAST			0x00002000
81#define MBP_BCASTCHAN(channel)		(((channel) & 0x7) << 8)
82#define MBP_RXMCAST			0x00000020
83#define MBP_MCASTCHAN(channel)		((channel) & 0x7)
84#define CPMAC_UNICAST_ENABLE		0x0104
85#define CPMAC_UNICAST_CLEAR		0x0108
86#define CPMAC_MAX_LENGTH		0x010c
87#define CPMAC_BUFFER_OFFSET		0x0110
88#define CPMAC_MAC_CONTROL		0x0160
89#define MAC_TXPTYPE			0x00000200
90#define MAC_TXPACE			0x00000040
91#define MAC_MII				0x00000020
92#define MAC_TXFLOW			0x00000010
93#define MAC_RXFLOW			0x00000008
94#define MAC_MTEST			0x00000004
95#define MAC_LOOPBACK			0x00000002
96#define MAC_FDX				0x00000001
97#define CPMAC_MAC_STATUS		0x0164
98#define MAC_STATUS_QOS			0x00000004
99#define MAC_STATUS_RXFLOW		0x00000002
100#define MAC_STATUS_TXFLOW		0x00000001
101#define CPMAC_TX_INT_ENABLE		0x0178
102#define CPMAC_TX_INT_CLEAR		0x017c
103#define CPMAC_MAC_INT_VECTOR		0x0180
104#define MAC_INT_STATUS			0x00080000
105#define MAC_INT_HOST			0x00040000
106#define MAC_INT_RX			0x00020000
107#define MAC_INT_TX			0x00010000
108#define CPMAC_MAC_EOI_VECTOR		0x0184
109#define CPMAC_RX_INT_ENABLE		0x0198
110#define CPMAC_RX_INT_CLEAR		0x019c
111#define CPMAC_MAC_INT_ENABLE		0x01a8
112#define CPMAC_MAC_INT_CLEAR		0x01ac
113#define CPMAC_MAC_ADDR_LO(channel)	(0x01b0 + (channel) * 4)
114#define CPMAC_MAC_ADDR_MID		0x01d0
115#define CPMAC_MAC_ADDR_HI		0x01d4
116#define CPMAC_MAC_HASH_LO		0x01d8
117#define CPMAC_MAC_HASH_HI		0x01dc
118#define CPMAC_TX_PTR(channel)		(0x0600 + (channel) * 4)
119#define CPMAC_RX_PTR(channel)		(0x0620 + (channel) * 4)
120#define CPMAC_TX_ACK(channel)		(0x0640 + (channel) * 4)
121#define CPMAC_RX_ACK(channel)		(0x0660 + (channel) * 4)
122#define CPMAC_REG_END			0x0680
123
124/* Rx/Tx statistics
125 * TODO: use some of them to fill stats in cpmac_stats()
126 */
127#define CPMAC_STATS_RX_GOOD		0x0200
128#define CPMAC_STATS_RX_BCAST		0x0204
129#define CPMAC_STATS_RX_MCAST		0x0208
130#define CPMAC_STATS_RX_PAUSE		0x020c
131#define CPMAC_STATS_RX_CRC		0x0210
132#define CPMAC_STATS_RX_ALIGN		0x0214
133#define CPMAC_STATS_RX_OVER		0x0218
134#define CPMAC_STATS_RX_JABBER		0x021c
135#define CPMAC_STATS_RX_UNDER		0x0220
136#define CPMAC_STATS_RX_FRAG		0x0224
137#define CPMAC_STATS_RX_FILTER		0x0228
138#define CPMAC_STATS_RX_QOSFILTER	0x022c
139#define CPMAC_STATS_RX_OCTETS		0x0230
140
141#define CPMAC_STATS_TX_GOOD		0x0234
142#define CPMAC_STATS_TX_BCAST		0x0238
143#define CPMAC_STATS_TX_MCAST		0x023c
144#define CPMAC_STATS_TX_PAUSE		0x0240
145#define CPMAC_STATS_TX_DEFER		0x0244
146#define CPMAC_STATS_TX_COLLISION	0x0248
147#define CPMAC_STATS_TX_SINGLECOLL	0x024c
148#define CPMAC_STATS_TX_MULTICOLL	0x0250
149#define CPMAC_STATS_TX_EXCESSCOLL	0x0254
150#define CPMAC_STATS_TX_LATECOLL		0x0258
151#define CPMAC_STATS_TX_UNDERRUN		0x025c
152#define CPMAC_STATS_TX_CARRIERSENSE	0x0260
153#define CPMAC_STATS_TX_OCTETS		0x0264
154
155#define cpmac_read(base, reg)		(readl((void __iomem *)(base) + (reg)))
156#define cpmac_write(base, reg, val)	(writel(val, (void __iomem *)(base) + \
157						(reg)))
158
159/* MDIO bus */
160#define CPMAC_MDIO_VERSION		0x0000
161#define CPMAC_MDIO_CONTROL		0x0004
162#define MDIOC_IDLE			0x80000000
163#define MDIOC_ENABLE			0x40000000
164#define MDIOC_PREAMBLE			0x00100000
165#define MDIOC_FAULT			0x00080000
166#define MDIOC_FAULTDETECT		0x00040000
167#define MDIOC_INTTEST			0x00020000
168#define MDIOC_CLKDIV(div)		((div) & 0xff)
169#define CPMAC_MDIO_ALIVE		0x0008
170#define CPMAC_MDIO_LINK			0x000c
171#define CPMAC_MDIO_ACCESS(channel)	(0x0080 + (channel) * 8)
172#define MDIO_BUSY			0x80000000
173#define MDIO_WRITE			0x40000000
174#define MDIO_REG(reg)			(((reg) & 0x1f) << 21)
175#define MDIO_PHY(phy)			(((phy) & 0x1f) << 16)
176#define MDIO_DATA(data)			((data) & 0xffff)
177#define CPMAC_MDIO_PHYSEL(channel)	(0x0084 + (channel) * 8)
178#define PHYSEL_LINKSEL			0x00000040
179#define PHYSEL_LINKINT			0x00000020
180
181struct cpmac_desc {
182	u32 hw_next;
183	u32 hw_data;
184	u16 buflen;
185	u16 bufflags;
186	u16 datalen;
187	u16 dataflags;
188#define CPMAC_SOP			0x8000
189#define CPMAC_EOP			0x4000
190#define CPMAC_OWN			0x2000
191#define CPMAC_EOQ			0x1000
192	struct sk_buff *skb;
193	struct cpmac_desc *next;
194	struct cpmac_desc *prev;
195	dma_addr_t mapping;
196	dma_addr_t data_mapping;
197};
198
199struct cpmac_priv {
200	spinlock_t lock;
201	spinlock_t rx_lock;
202	struct cpmac_desc *rx_head;
203	int ring_size;
204	struct cpmac_desc *desc_ring;
205	dma_addr_t dma_ring;
206	void __iomem *regs;
207	struct mii_bus *mii_bus;
208	struct phy_device *phy;
209	char phy_name[MII_BUS_ID_SIZE + 3];
210	int oldlink, oldspeed, oldduplex;
211	u32 msg_enable;
212	struct net_device *dev;
213	struct work_struct reset_work;
214	struct platform_device *pdev;
215	struct napi_struct napi;
216	atomic_t reset_pending;
217};
218
219static irqreturn_t cpmac_irq(int, void *);
220static void cpmac_hw_start(struct net_device *dev);
221static void cpmac_hw_stop(struct net_device *dev);
222static int cpmac_stop(struct net_device *dev);
223static int cpmac_open(struct net_device *dev);
224
225static void cpmac_dump_regs(struct net_device *dev)
226{
227	int i;
228	struct cpmac_priv *priv = netdev_priv(dev);
229
230	for (i = 0; i < CPMAC_REG_END; i += 4) {
231		if (i % 16 == 0) {
232			if (i)
233				printk("\n");
234			printk("%s: reg[%p]:", dev->name, priv->regs + i);
235		}
236		printk(" %08x", cpmac_read(priv->regs, i));
237	}
238	printk("\n");
239}
240
241static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
242{
243	int i;
244
245	printk("%s: desc[%p]:", dev->name, desc);
246	for (i = 0; i < sizeof(*desc) / 4; i++)
247		printk(" %08x", ((u32 *)desc)[i]);
248	printk("\n");
249}
250
251static void cpmac_dump_all_desc(struct net_device *dev)
252{
253	struct cpmac_priv *priv = netdev_priv(dev);
254	struct cpmac_desc *dump = priv->rx_head;
255
256	do {
257		cpmac_dump_desc(dev, dump);
258		dump = dump->next;
259	} while (dump != priv->rx_head);
260}
261
262static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
263{
264	int i;
265
266	printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
267	for (i = 0; i < skb->len; i++) {
268		if (i % 16 == 0) {
269			if (i)
270				printk("\n");
271			printk("%s: data[%p]:", dev->name, skb->data + i);
272		}
273		printk(" %02x", ((u8 *)skb->data)[i]);
274	}
275	printk("\n");
276}
277
278static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
279{
280	u32 val;
281
282	while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
283		cpu_relax();
284	cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
285		    MDIO_PHY(phy_id));
286	while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
287		cpu_relax();
288
289	return MDIO_DATA(val);
290}
291
292static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
293			    int reg, u16 val)
294{
295	while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
296		cpu_relax();
297	cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
298		    MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
299
300	return 0;
301}
302
303static int cpmac_mdio_reset(struct mii_bus *bus)
304{
305	struct clk *cpmac_clk;
306
307	cpmac_clk = clk_get(&bus->dev, "cpmac");
308	if (IS_ERR(cpmac_clk)) {
309		pr_err("unable to get cpmac clock\n");
310		return -1;
311	}
312	ar7_device_reset(AR7_RESET_BIT_MDIO);
313	cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
314		    MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
315
316	return 0;
317}
318
319static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
320
321static struct mii_bus *cpmac_mii;
322
323static void cpmac_set_multicast_list(struct net_device *dev)
324{
325	struct netdev_hw_addr *ha;
326	u8 tmp;
327	u32 mbp, bit, hash[2] = { 0, };
328	struct cpmac_priv *priv = netdev_priv(dev);
329
330	mbp = cpmac_read(priv->regs, CPMAC_MBP);
331	if (dev->flags & IFF_PROMISC) {
332		cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
333			    MBP_RXPROMISC);
334	} else {
335		cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
336		if (dev->flags & IFF_ALLMULTI) {
337			/* enable all multicast mode */
338			cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
339			cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
340		} else {
341			/* cpmac uses some strange mac address hashing
342			 * (not crc32)
343			 */
344			netdev_for_each_mc_addr(ha, dev) {
345				bit = 0;
346				tmp = ha->addr[0];
347				bit  ^= (tmp >> 2) ^ (tmp << 4);
348				tmp = ha->addr[1];
349				bit  ^= (tmp >> 4) ^ (tmp << 2);
350				tmp = ha->addr[2];
351				bit  ^= (tmp >> 6) ^ tmp;
352				tmp = ha->addr[3];
353				bit  ^= (tmp >> 2) ^ (tmp << 4);
354				tmp = ha->addr[4];
355				bit  ^= (tmp >> 4) ^ (tmp << 2);
356				tmp = ha->addr[5];
357				bit  ^= (tmp >> 6) ^ tmp;
358				bit &= 0x3f;
359				hash[bit / 32] |= 1 << (bit % 32);
360			}
361
362			cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
363			cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
364		}
365	}
366}
367
368static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
369				    struct cpmac_desc *desc)
370{
371	struct sk_buff *skb, *result = NULL;
372
373	if (unlikely(netif_msg_hw(priv)))
374		cpmac_dump_desc(priv->dev, desc);
375	cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
376	if (unlikely(!desc->datalen)) {
377		if (netif_msg_rx_err(priv) && net_ratelimit())
378			netdev_warn(priv->dev, "rx: spurious interrupt\n");
379
380		return NULL;
381	}
382
383	skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
384	if (likely(skb)) {
385		skb_put(desc->skb, desc->datalen);
386		desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
387		skb_checksum_none_assert(desc->skb);
388		priv->dev->stats.rx_packets++;
389		priv->dev->stats.rx_bytes += desc->datalen;
390		result = desc->skb;
391		dma_unmap_single(&priv->dev->dev, desc->data_mapping,
392				 CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
393		desc->skb = skb;
394		desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
395						    CPMAC_SKB_SIZE,
396						    DMA_FROM_DEVICE);
397		desc->hw_data = (u32)desc->data_mapping;
398		if (unlikely(netif_msg_pktdata(priv))) {
399			netdev_dbg(priv->dev, "received packet:\n");
400			cpmac_dump_skb(priv->dev, result);
401		}
402	} else {
403		if (netif_msg_rx_err(priv) && net_ratelimit())
404			netdev_warn(priv->dev,
405				    "low on skbs, dropping packet\n");
406
407		priv->dev->stats.rx_dropped++;
408	}
409
410	desc->buflen = CPMAC_SKB_SIZE;
411	desc->dataflags = CPMAC_OWN;
412
413	return result;
414}
415
416static int cpmac_poll(struct napi_struct *napi, int budget)
417{
418	struct sk_buff *skb;
419	struct cpmac_desc *desc, *restart;
420	struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
421	int received = 0, processed = 0;
422
423	spin_lock(&priv->rx_lock);
424	if (unlikely(!priv->rx_head)) {
425		if (netif_msg_rx_err(priv) && net_ratelimit())
426			netdev_warn(priv->dev, "rx: polling, but no queue\n");
427
428		spin_unlock(&priv->rx_lock);
429		napi_complete(napi);
430		return 0;
431	}
432
433	desc = priv->rx_head;
434	restart = NULL;
435	while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
436		processed++;
437
438		if ((desc->dataflags & CPMAC_EOQ) != 0) {
439			/* The last update to eoq->hw_next didn't happen
440			 * soon enough, and the receiver stopped here.
441			 * Remember this descriptor so we can restart
442			 * the receiver after freeing some space.
443			 */
444			if (unlikely(restart)) {
445				if (netif_msg_rx_err(priv))
446					netdev_err(priv->dev, "poll found a"
447						   " duplicate EOQ: %p and %p\n",
448						   restart, desc);
449				goto fatal_error;
450			}
451
452			restart = desc->next;
453		}
454
455		skb = cpmac_rx_one(priv, desc);
456		if (likely(skb)) {
457			netif_receive_skb(skb);
458			received++;
459		}
460		desc = desc->next;
461	}
462
463	if (desc != priv->rx_head) {
464		/* We freed some buffers, but not the whole ring,
465		 * add what we did free to the rx list
466		 */
467		desc->prev->hw_next = (u32)0;
468		priv->rx_head->prev->hw_next = priv->rx_head->mapping;
469	}
470
471	/* Optimization: If we did not actually process an EOQ (perhaps because
472	 * of quota limits), check to see if the tail of the queue has EOQ set.
473	 * We should immediately restart in that case so that the receiver can
474	 * restart and run in parallel with more packet processing.
475	 * This lets us handle slightly larger bursts before running
476	 * out of ring space (assuming dev->weight < ring_size)
477	 */
478
479	if (!restart &&
480	     (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
481		    == CPMAC_EOQ &&
482	     (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
483		/* reset EOQ so the poll loop (above) doesn't try to
484		 * restart this when it eventually gets to this descriptor.
485		 */
486		priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
487		restart = priv->rx_head;
488	}
489
490	if (restart) {
491		priv->dev->stats.rx_errors++;
492		priv->dev->stats.rx_fifo_errors++;
493		if (netif_msg_rx_err(priv) && net_ratelimit())
494			netdev_warn(priv->dev, "rx dma ring overrun\n");
495
496		if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
497			if (netif_msg_drv(priv))
498				netdev_err(priv->dev, "cpmac_poll is trying "
499					"to restart rx from a descriptor "
500					"that's not free: %p\n", restart);
501			goto fatal_error;
502		}
503
504		cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
505	}
506
507	priv->rx_head = desc;
508	spin_unlock(&priv->rx_lock);
509	if (unlikely(netif_msg_rx_status(priv)))
510		netdev_dbg(priv->dev, "poll processed %d packets\n", received);
511
512	if (processed == 0) {
513		/* we ran out of packets to read,
514		 * revert to interrupt-driven mode
515		 */
516		napi_complete(napi);
517		cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
518		return 0;
519	}
520
521	return 1;
522
523fatal_error:
524	/* Something went horribly wrong.
525	 * Reset hardware to try to recover rather than wedging.
526	 */
527	if (netif_msg_drv(priv)) {
528		netdev_err(priv->dev, "cpmac_poll is confused. "
529			   "Resetting hardware\n");
530		cpmac_dump_all_desc(priv->dev);
531		netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
532			   cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
533			   cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
534	}
535
536	spin_unlock(&priv->rx_lock);
537	napi_complete(napi);
538	netif_tx_stop_all_queues(priv->dev);
539	napi_disable(&priv->napi);
540
541	atomic_inc(&priv->reset_pending);
542	cpmac_hw_stop(priv->dev);
543	if (!schedule_work(&priv->reset_work))
544		atomic_dec(&priv->reset_pending);
545
546	return 0;
547
548}
549
550static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
551{
552	int queue, len;
553	struct cpmac_desc *desc;
554	struct cpmac_priv *priv = netdev_priv(dev);
555
556	if (unlikely(atomic_read(&priv->reset_pending)))
557		return NETDEV_TX_BUSY;
558
559	if (unlikely(skb_padto(skb, ETH_ZLEN)))
560		return NETDEV_TX_OK;
561
562	len = max(skb->len, ETH_ZLEN);
563	queue = skb_get_queue_mapping(skb);
564	netif_stop_subqueue(dev, queue);
565
566	desc = &priv->desc_ring[queue];
567	if (unlikely(desc->dataflags & CPMAC_OWN)) {
568		if (netif_msg_tx_err(priv) && net_ratelimit())
569			netdev_warn(dev, "tx dma ring full\n");
570
571		return NETDEV_TX_BUSY;
572	}
573
574	spin_lock(&priv->lock);
575	spin_unlock(&priv->lock);
576	desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
577	desc->skb = skb;
578	desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
579					    DMA_TO_DEVICE);
580	desc->hw_data = (u32)desc->data_mapping;
581	desc->datalen = len;
582	desc->buflen = len;
583	if (unlikely(netif_msg_tx_queued(priv)))
584		netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
585	if (unlikely(netif_msg_hw(priv)))
586		cpmac_dump_desc(dev, desc);
587	if (unlikely(netif_msg_pktdata(priv)))
588		cpmac_dump_skb(dev, skb);
589	cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
590
591	return NETDEV_TX_OK;
592}
593
594static void cpmac_end_xmit(struct net_device *dev, int queue)
595{
596	struct cpmac_desc *desc;
597	struct cpmac_priv *priv = netdev_priv(dev);
598
599	desc = &priv->desc_ring[queue];
600	cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
601	if (likely(desc->skb)) {
602		spin_lock(&priv->lock);
603		dev->stats.tx_packets++;
604		dev->stats.tx_bytes += desc->skb->len;
605		spin_unlock(&priv->lock);
606		dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
607				 DMA_TO_DEVICE);
608
609		if (unlikely(netif_msg_tx_done(priv)))
610			netdev_dbg(dev, "sent 0x%p, len=%d\n",
611				   desc->skb, desc->skb->len);
612
613		dev_kfree_skb_irq(desc->skb);
614		desc->skb = NULL;
615		if (__netif_subqueue_stopped(dev, queue))
616			netif_wake_subqueue(dev, queue);
617	} else {
618		if (netif_msg_tx_err(priv) && net_ratelimit())
619			netdev_warn(dev, "end_xmit: spurious interrupt\n");
620		if (__netif_subqueue_stopped(dev, queue))
621			netif_wake_subqueue(dev, queue);
622	}
623}
624
625static void cpmac_hw_stop(struct net_device *dev)
626{
627	int i;
628	struct cpmac_priv *priv = netdev_priv(dev);
629	struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
630
631	ar7_device_reset(pdata->reset_bit);
632	cpmac_write(priv->regs, CPMAC_RX_CONTROL,
633		    cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
634	cpmac_write(priv->regs, CPMAC_TX_CONTROL,
635		    cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
636	for (i = 0; i < 8; i++) {
637		cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
638		cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
639	}
640	cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
641	cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
642	cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
643	cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
644	cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
645		    cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
646}
647
648static void cpmac_hw_start(struct net_device *dev)
649{
650	int i;
651	struct cpmac_priv *priv = netdev_priv(dev);
652	struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
653
654	ar7_device_reset(pdata->reset_bit);
655	for (i = 0; i < 8; i++) {
656		cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
657		cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
658	}
659	cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
660
661	cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
662		    MBP_RXMCAST);
663	cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
664	for (i = 0; i < 8; i++)
665		cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
666	cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
667	cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
668		    (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
669		    (dev->dev_addr[3] << 24));
670	cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
671	cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
672	cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
673	cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
674	cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
675	cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
676	cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
677	cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
678	cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
679
680	cpmac_write(priv->regs, CPMAC_RX_CONTROL,
681		    cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
682	cpmac_write(priv->regs, CPMAC_TX_CONTROL,
683		    cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
684	cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
685		    cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
686		    MAC_FDX);
687}
688
689static void cpmac_clear_rx(struct net_device *dev)
690{
691	struct cpmac_priv *priv = netdev_priv(dev);
692	struct cpmac_desc *desc;
693	int i;
694
695	if (unlikely(!priv->rx_head))
696		return;
697	desc = priv->rx_head;
698	for (i = 0; i < priv->ring_size; i++) {
699		if ((desc->dataflags & CPMAC_OWN) == 0) {
700			if (netif_msg_rx_err(priv) && net_ratelimit())
701				netdev_warn(dev, "packet dropped\n");
702			if (unlikely(netif_msg_hw(priv)))
703				cpmac_dump_desc(dev, desc);
704			desc->dataflags = CPMAC_OWN;
705			dev->stats.rx_dropped++;
706		}
707		desc->hw_next = desc->next->mapping;
708		desc = desc->next;
709	}
710	priv->rx_head->prev->hw_next = 0;
711}
712
713static void cpmac_clear_tx(struct net_device *dev)
714{
715	struct cpmac_priv *priv = netdev_priv(dev);
716	int i;
717
718	if (unlikely(!priv->desc_ring))
719		return;
720	for (i = 0; i < CPMAC_QUEUES; i++) {
721		priv->desc_ring[i].dataflags = 0;
722		if (priv->desc_ring[i].skb) {
723			dev_kfree_skb_any(priv->desc_ring[i].skb);
724			priv->desc_ring[i].skb = NULL;
725		}
726	}
727}
728
729static void cpmac_hw_error(struct work_struct *work)
730{
731	struct cpmac_priv *priv =
732		container_of(work, struct cpmac_priv, reset_work);
733
734	spin_lock(&priv->rx_lock);
735	cpmac_clear_rx(priv->dev);
736	spin_unlock(&priv->rx_lock);
737	cpmac_clear_tx(priv->dev);
738	cpmac_hw_start(priv->dev);
739	barrier();
740	atomic_dec(&priv->reset_pending);
741
742	netif_tx_wake_all_queues(priv->dev);
743	cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
744}
745
746static void cpmac_check_status(struct net_device *dev)
747{
748	struct cpmac_priv *priv = netdev_priv(dev);
749
750	u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
751	int rx_channel = (macstatus >> 8) & 7;
752	int rx_code = (macstatus >> 12) & 15;
753	int tx_channel = (macstatus >> 16) & 7;
754	int tx_code = (macstatus >> 20) & 15;
755
756	if (rx_code || tx_code) {
757		if (netif_msg_drv(priv) && net_ratelimit()) {
758			/* Can't find any documentation on what these
759			 * error codes actually are. So just log them and hope..
760			 */
761			if (rx_code)
762				netdev_warn(dev, "host error %d on rx "
763					"channel %d (macstatus %08x), resetting\n",
764					rx_code, rx_channel, macstatus);
765			if (tx_code)
766				netdev_warn(dev, "host error %d on tx "
767					"channel %d (macstatus %08x), resetting\n",
768					tx_code, tx_channel, macstatus);
769		}
770
771		netif_tx_stop_all_queues(dev);
772		cpmac_hw_stop(dev);
773		if (schedule_work(&priv->reset_work))
774			atomic_inc(&priv->reset_pending);
775		if (unlikely(netif_msg_hw(priv)))
776			cpmac_dump_regs(dev);
777	}
778	cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
779}
780
781static irqreturn_t cpmac_irq(int irq, void *dev_id)
782{
783	struct net_device *dev = dev_id;
784	struct cpmac_priv *priv;
785	int queue;
786	u32 status;
787
788	priv = netdev_priv(dev);
789
790	status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
791
792	if (unlikely(netif_msg_intr(priv)))
793		netdev_dbg(dev, "interrupt status: 0x%08x\n", status);
794
795	if (status & MAC_INT_TX)
796		cpmac_end_xmit(dev, (status & 7));
797
798	if (status & MAC_INT_RX) {
799		queue = (status >> 8) & 7;
800		if (napi_schedule_prep(&priv->napi)) {
801			cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
802			__napi_schedule(&priv->napi);
803		}
804	}
805
806	cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
807
808	if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
809		cpmac_check_status(dev);
810
811	return IRQ_HANDLED;
812}
813
814static void cpmac_tx_timeout(struct net_device *dev)
815{
816	struct cpmac_priv *priv = netdev_priv(dev);
817
818	spin_lock(&priv->lock);
819	dev->stats.tx_errors++;
820	spin_unlock(&priv->lock);
821	if (netif_msg_tx_err(priv) && net_ratelimit())
822		netdev_warn(dev, "transmit timeout\n");
823
824	atomic_inc(&priv->reset_pending);
825	barrier();
826	cpmac_clear_tx(dev);
827	barrier();
828	atomic_dec(&priv->reset_pending);
829
830	netif_tx_wake_all_queues(priv->dev);
831}
832
833static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
834{
835	struct cpmac_priv *priv = netdev_priv(dev);
836
837	if (!(netif_running(dev)))
838		return -EINVAL;
839	if (!priv->phy)
840		return -EINVAL;
841
842	return phy_mii_ioctl(priv->phy, ifr, cmd);
843}
844
845static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
846{
847	struct cpmac_priv *priv = netdev_priv(dev);
848
849	if (priv->phy)
850		return phy_ethtool_gset(priv->phy, cmd);
851
852	return -EINVAL;
853}
854
855static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
856{
857	struct cpmac_priv *priv = netdev_priv(dev);
858
859	if (!capable(CAP_NET_ADMIN))
860		return -EPERM;
861
862	if (priv->phy)
863		return phy_ethtool_sset(priv->phy, cmd);
864
865	return -EINVAL;
866}
867
868static void cpmac_get_ringparam(struct net_device *dev,
869						struct ethtool_ringparam *ring)
870{
871	struct cpmac_priv *priv = netdev_priv(dev);
872
873	ring->rx_max_pending = 1024;
874	ring->rx_mini_max_pending = 1;
875	ring->rx_jumbo_max_pending = 1;
876	ring->tx_max_pending = 1;
877
878	ring->rx_pending = priv->ring_size;
879	ring->rx_mini_pending = 1;
880	ring->rx_jumbo_pending = 1;
881	ring->tx_pending = 1;
882}
883
884static int cpmac_set_ringparam(struct net_device *dev,
885						struct ethtool_ringparam *ring)
886{
887	struct cpmac_priv *priv = netdev_priv(dev);
888
889	if (netif_running(dev))
890		return -EBUSY;
891	priv->ring_size = ring->rx_pending;
892
893	return 0;
894}
895
896static void cpmac_get_drvinfo(struct net_device *dev,
897			      struct ethtool_drvinfo *info)
898{
899	strlcpy(info->driver, "cpmac", sizeof(info->driver));
900	strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
901	snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
902}
903
904static const struct ethtool_ops cpmac_ethtool_ops = {
905	.get_settings = cpmac_get_settings,
906	.set_settings = cpmac_set_settings,
907	.get_drvinfo = cpmac_get_drvinfo,
908	.get_link = ethtool_op_get_link,
909	.get_ringparam = cpmac_get_ringparam,
910	.set_ringparam = cpmac_set_ringparam,
911};
912
913static void cpmac_adjust_link(struct net_device *dev)
914{
915	struct cpmac_priv *priv = netdev_priv(dev);
916	int new_state = 0;
917
918	spin_lock(&priv->lock);
919	if (priv->phy->link) {
920		netif_tx_start_all_queues(dev);
921		if (priv->phy->duplex != priv->oldduplex) {
922			new_state = 1;
923			priv->oldduplex = priv->phy->duplex;
924		}
925
926		if (priv->phy->speed != priv->oldspeed) {
927			new_state = 1;
928			priv->oldspeed = priv->phy->speed;
929		}
930
931		if (!priv->oldlink) {
932			new_state = 1;
933			priv->oldlink = 1;
934		}
935	} else if (priv->oldlink) {
936		new_state = 1;
937		priv->oldlink = 0;
938		priv->oldspeed = 0;
939		priv->oldduplex = -1;
940	}
941
942	if (new_state && netif_msg_link(priv) && net_ratelimit())
943		phy_print_status(priv->phy);
944
945	spin_unlock(&priv->lock);
946}
947
948static int cpmac_open(struct net_device *dev)
949{
950	int i, size, res;
951	struct cpmac_priv *priv = netdev_priv(dev);
952	struct resource *mem;
953	struct cpmac_desc *desc;
954	struct sk_buff *skb;
955
956	mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
957	if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
958		if (netif_msg_drv(priv))
959			netdev_err(dev, "failed to request registers\n");
960
961		res = -ENXIO;
962		goto fail_reserve;
963	}
964
965	priv->regs = ioremap(mem->start, resource_size(mem));
966	if (!priv->regs) {
967		if (netif_msg_drv(priv))
968			netdev_err(dev, "failed to remap registers\n");
969
970		res = -ENXIO;
971		goto fail_remap;
972	}
973
974	size = priv->ring_size + CPMAC_QUEUES;
975	priv->desc_ring = dma_alloc_coherent(&dev->dev,
976					     sizeof(struct cpmac_desc) * size,
977					     &priv->dma_ring,
978					     GFP_KERNEL);
979	if (!priv->desc_ring) {
980		res = -ENOMEM;
981		goto fail_alloc;
982	}
983
984	for (i = 0; i < size; i++)
985		priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
986
987	priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
988	for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
989		skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
990		if (unlikely(!skb)) {
991			res = -ENOMEM;
992			goto fail_desc;
993		}
994		desc->skb = skb;
995		desc->data_mapping = dma_map_single(&dev->dev, skb->data,
996						    CPMAC_SKB_SIZE,
997						    DMA_FROM_DEVICE);
998		desc->hw_data = (u32)desc->data_mapping;
999		desc->buflen = CPMAC_SKB_SIZE;
1000		desc->dataflags = CPMAC_OWN;
1001		desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
1002		desc->next->prev = desc;
1003		desc->hw_next = (u32)desc->next->mapping;
1004	}
1005
1006	priv->rx_head->prev->hw_next = (u32)0;
1007
1008	res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
1009	if (res) {
1010		if (netif_msg_drv(priv))
1011			netdev_err(dev, "failed to obtain irq\n");
1012
1013		goto fail_irq;
1014	}
1015
1016	atomic_set(&priv->reset_pending, 0);
1017	INIT_WORK(&priv->reset_work, cpmac_hw_error);
1018	cpmac_hw_start(dev);
1019
1020	napi_enable(&priv->napi);
1021	priv->phy->state = PHY_CHANGELINK;
1022	phy_start(priv->phy);
1023
1024	return 0;
1025
1026fail_irq:
1027fail_desc:
1028	for (i = 0; i < priv->ring_size; i++) {
1029		if (priv->rx_head[i].skb) {
1030			dma_unmap_single(&dev->dev,
1031					 priv->rx_head[i].data_mapping,
1032					 CPMAC_SKB_SIZE,
1033					 DMA_FROM_DEVICE);
1034			kfree_skb(priv->rx_head[i].skb);
1035		}
1036	}
1037fail_alloc:
1038	kfree(priv->desc_ring);
1039	iounmap(priv->regs);
1040
1041fail_remap:
1042	release_mem_region(mem->start, resource_size(mem));
1043
1044fail_reserve:
1045	return res;
1046}
1047
1048static int cpmac_stop(struct net_device *dev)
1049{
1050	int i;
1051	struct cpmac_priv *priv = netdev_priv(dev);
1052	struct resource *mem;
1053
1054	netif_tx_stop_all_queues(dev);
1055
1056	cancel_work_sync(&priv->reset_work);
1057	napi_disable(&priv->napi);
1058	phy_stop(priv->phy);
1059
1060	cpmac_hw_stop(dev);
1061
1062	for (i = 0; i < 8; i++)
1063		cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
1064	cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
1065	cpmac_write(priv->regs, CPMAC_MBP, 0);
1066
1067	free_irq(dev->irq, dev);
1068	iounmap(priv->regs);
1069	mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
1070	release_mem_region(mem->start, resource_size(mem));
1071	priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
1072	for (i = 0; i < priv->ring_size; i++) {
1073		if (priv->rx_head[i].skb) {
1074			dma_unmap_single(&dev->dev,
1075					 priv->rx_head[i].data_mapping,
1076					 CPMAC_SKB_SIZE,
1077					 DMA_FROM_DEVICE);
1078			kfree_skb(priv->rx_head[i].skb);
1079		}
1080	}
1081
1082	dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
1083			  (CPMAC_QUEUES + priv->ring_size),
1084			  priv->desc_ring, priv->dma_ring);
1085
1086	return 0;
1087}
1088
1089static const struct net_device_ops cpmac_netdev_ops = {
1090	.ndo_open		= cpmac_open,
1091	.ndo_stop		= cpmac_stop,
1092	.ndo_start_xmit		= cpmac_start_xmit,
1093	.ndo_tx_timeout		= cpmac_tx_timeout,
1094	.ndo_set_rx_mode	= cpmac_set_multicast_list,
1095	.ndo_do_ioctl		= cpmac_ioctl,
1096	.ndo_change_mtu		= eth_change_mtu,
1097	.ndo_validate_addr	= eth_validate_addr,
1098	.ndo_set_mac_address	= eth_mac_addr,
1099};
1100
1101static int external_switch;
1102
1103static int cpmac_probe(struct platform_device *pdev)
1104{
1105	int rc, phy_id;
1106	char mdio_bus_id[MII_BUS_ID_SIZE];
1107	struct resource *mem;
1108	struct cpmac_priv *priv;
1109	struct net_device *dev;
1110	struct plat_cpmac_data *pdata;
1111
1112	pdata = dev_get_platdata(&pdev->dev);
1113
1114	if (external_switch || dumb_switch) {
1115		strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
1116		phy_id = pdev->id;
1117	} else {
1118		for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1119			if (!(pdata->phy_mask & (1 << phy_id)))
1120				continue;
1121			if (!cpmac_mii->phy_map[phy_id])
1122				continue;
1123			strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
1124			break;
1125		}
1126	}
1127
1128	if (phy_id == PHY_MAX_ADDR) {
1129		dev_err(&pdev->dev, "no PHY present, falling back "
1130			"to switch on MDIO bus 0\n");
1131		strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
1132		phy_id = pdev->id;
1133	}
1134	mdio_bus_id[sizeof(mdio_bus_id) - 1] = '\0';
1135
1136	dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
1137	if (!dev)
1138		return -ENOMEM;
1139
1140	platform_set_drvdata(pdev, dev);
1141	priv = netdev_priv(dev);
1142
1143	priv->pdev = pdev;
1144	mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1145	if (!mem) {
1146		rc = -ENODEV;
1147		goto out;
1148	}
1149
1150	dev->irq = platform_get_irq_byname(pdev, "irq");
1151
1152	dev->netdev_ops = &cpmac_netdev_ops;
1153	dev->ethtool_ops = &cpmac_ethtool_ops;
1154
1155	netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
1156
1157	spin_lock_init(&priv->lock);
1158	spin_lock_init(&priv->rx_lock);
1159	priv->dev = dev;
1160	priv->ring_size = 64;
1161	priv->msg_enable = netif_msg_init(debug_level, 0xff);
1162	memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
1163
1164	snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
1165						mdio_bus_id, phy_id);
1166
1167	priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
1168				PHY_INTERFACE_MODE_MII);
1169
1170	if (IS_ERR(priv->phy)) {
1171		if (netif_msg_drv(priv))
1172			dev_err(&pdev->dev, "Could not attach to PHY\n");
1173
1174		rc = PTR_ERR(priv->phy);
1175		goto out;
1176	}
1177
1178	rc = register_netdev(dev);
1179	if (rc) {
1180		dev_err(&pdev->dev, "Could not register net device\n");
1181		goto fail;
1182	}
1183
1184	if (netif_msg_probe(priv)) {
1185		dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, "
1186			 "mac: %pM\n", (void *)mem->start, dev->irq,
1187			 priv->phy_name, dev->dev_addr);
1188	}
1189
1190	return 0;
1191
1192fail:
1193	free_netdev(dev);
1194out:
1195	return rc;
1196}
1197
1198static int cpmac_remove(struct platform_device *pdev)
1199{
1200	struct net_device *dev = platform_get_drvdata(pdev);
1201
1202	unregister_netdev(dev);
1203	free_netdev(dev);
1204
1205	return 0;
1206}
1207
1208static struct platform_driver cpmac_driver = {
1209	.driver = {
1210		.name 	= "cpmac",
1211	},
1212	.probe 	= cpmac_probe,
1213	.remove = cpmac_remove,
1214};
1215
1216int cpmac_init(void)
1217{
1218	u32 mask;
1219	int i, res;
1220
1221	cpmac_mii = mdiobus_alloc();
1222	if (cpmac_mii == NULL)
1223		return -ENOMEM;
1224
1225	cpmac_mii->name = "cpmac-mii";
1226	cpmac_mii->read = cpmac_mdio_read;
1227	cpmac_mii->write = cpmac_mdio_write;
1228	cpmac_mii->reset = cpmac_mdio_reset;
1229	cpmac_mii->irq = mii_irqs;
1230
1231	cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
1232
1233	if (!cpmac_mii->priv) {
1234		pr_err("Can't ioremap mdio registers\n");
1235		res = -ENXIO;
1236		goto fail_alloc;
1237	}
1238
1239#warning FIXME: unhardcode gpio&reset bits
1240	ar7_gpio_disable(26);
1241	ar7_gpio_disable(27);
1242	ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
1243	ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
1244	ar7_device_reset(AR7_RESET_BIT_EPHY);
1245
1246	cpmac_mii->reset(cpmac_mii);
1247
1248	for (i = 0; i < 300; i++) {
1249		mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
1250		if (mask)
1251			break;
1252		else
1253			msleep(10);
1254	}
1255
1256	mask &= 0x7fffffff;
1257	if (mask & (mask - 1)) {
1258		external_switch = 1;
1259		mask = 0;
1260	}
1261
1262	cpmac_mii->phy_mask = ~(mask | 0x80000000);
1263	snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1");
1264
1265	res = mdiobus_register(cpmac_mii);
1266	if (res)
1267		goto fail_mii;
1268
1269	res = platform_driver_register(&cpmac_driver);
1270	if (res)
1271		goto fail_cpmac;
1272
1273	return 0;
1274
1275fail_cpmac:
1276	mdiobus_unregister(cpmac_mii);
1277
1278fail_mii:
1279	iounmap(cpmac_mii->priv);
1280
1281fail_alloc:
1282	mdiobus_free(cpmac_mii);
1283
1284	return res;
1285}
1286
1287void cpmac_exit(void)
1288{
1289	platform_driver_unregister(&cpmac_driver);
1290	mdiobus_unregister(cpmac_mii);
1291	iounmap(cpmac_mii->priv);
1292	mdiobus_free(cpmac_mii);
1293}
1294
1295module_init(cpmac_init);
1296module_exit(cpmac_exit);
1297