1/* niu.c: Neptune ethernet driver.
2 *
3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <linux/dma-mapping.h>
13#include <linux/netdevice.h>
14#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/mii.h>
20#include <linux/if.h>
21#include <linux/if_ether.h>
22#include <linux/if_vlan.h>
23#include <linux/ip.h>
24#include <linux/in.h>
25#include <linux/ipv6.h>
26#include <linux/log2.h>
27#include <linux/jiffies.h>
28#include <linux/crc32.h>
29#include <linux/list.h>
30#include <linux/slab.h>
31
32#include <linux/io.h>
33#include <linux/of_device.h>
34
35#include "niu.h"
36
37#define DRV_MODULE_NAME		"niu"
38#define DRV_MODULE_VERSION	"1.1"
39#define DRV_MODULE_RELDATE	"Apr 22, 2010"
40
41static char version[] =
42	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
43
44MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
45MODULE_DESCRIPTION("NIU ethernet driver");
46MODULE_LICENSE("GPL");
47MODULE_VERSION(DRV_MODULE_VERSION);
48
49#ifndef readq
50static u64 readq(void __iomem *reg)
51{
52	return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
53}
54
55static void writeq(u64 val, void __iomem *reg)
56{
57	writel(val & 0xffffffff, reg);
58	writel(val >> 32, reg + 0x4UL);
59}
60#endif
61
62static const struct pci_device_id niu_pci_tbl[] = {
63	{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
64	{}
65};
66
67MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
68
69#define NIU_TX_TIMEOUT			(5 * HZ)
70
71#define nr64(reg)		readq(np->regs + (reg))
72#define nw64(reg, val)		writeq((val), np->regs + (reg))
73
74#define nr64_mac(reg)		readq(np->mac_regs + (reg))
75#define nw64_mac(reg, val)	writeq((val), np->mac_regs + (reg))
76
77#define nr64_ipp(reg)		readq(np->regs + np->ipp_off + (reg))
78#define nw64_ipp(reg, val)	writeq((val), np->regs + np->ipp_off + (reg))
79
80#define nr64_pcs(reg)		readq(np->regs + np->pcs_off + (reg))
81#define nw64_pcs(reg, val)	writeq((val), np->regs + np->pcs_off + (reg))
82
83#define nr64_xpcs(reg)		readq(np->regs + np->xpcs_off + (reg))
84#define nw64_xpcs(reg, val)	writeq((val), np->regs + np->xpcs_off + (reg))
85
86#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
87
88static int niu_debug;
89static int debug = -1;
90module_param(debug, int, 0);
91MODULE_PARM_DESC(debug, "NIU debug level");
92
93#define niu_lock_parent(np, flags) \
94	spin_lock_irqsave(&np->parent->lock, flags)
95#define niu_unlock_parent(np, flags) \
96	spin_unlock_irqrestore(&np->parent->lock, flags)
97
98static int serdes_init_10g_serdes(struct niu *np);
99
100static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
101				     u64 bits, int limit, int delay)
102{
103	while (--limit >= 0) {
104		u64 val = nr64_mac(reg);
105
106		if (!(val & bits))
107			break;
108		udelay(delay);
109	}
110	if (limit < 0)
111		return -ENODEV;
112	return 0;
113}
114
115static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
116					u64 bits, int limit, int delay,
117					const char *reg_name)
118{
119	int err;
120
121	nw64_mac(reg, bits);
122	err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
123	if (err)
124		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
125			   (unsigned long long)bits, reg_name,
126			   (unsigned long long)nr64_mac(reg));
127	return err;
128}
129
130#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
131({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
132	__niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
133})
134
135static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
136				     u64 bits, int limit, int delay)
137{
138	while (--limit >= 0) {
139		u64 val = nr64_ipp(reg);
140
141		if (!(val & bits))
142			break;
143		udelay(delay);
144	}
145	if (limit < 0)
146		return -ENODEV;
147	return 0;
148}
149
150static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
151					u64 bits, int limit, int delay,
152					const char *reg_name)
153{
154	int err;
155	u64 val;
156
157	val = nr64_ipp(reg);
158	val |= bits;
159	nw64_ipp(reg, val);
160
161	err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
162	if (err)
163		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
164			   (unsigned long long)bits, reg_name,
165			   (unsigned long long)nr64_ipp(reg));
166	return err;
167}
168
169#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
170({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
171	__niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
172})
173
174static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
175				 u64 bits, int limit, int delay)
176{
177	while (--limit >= 0) {
178		u64 val = nr64(reg);
179
180		if (!(val & bits))
181			break;
182		udelay(delay);
183	}
184	if (limit < 0)
185		return -ENODEV;
186	return 0;
187}
188
189#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
190({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
191	__niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
192})
193
194static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
195				    u64 bits, int limit, int delay,
196				    const char *reg_name)
197{
198	int err;
199
200	nw64(reg, bits);
201	err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
202	if (err)
203		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
204			   (unsigned long long)bits, reg_name,
205			   (unsigned long long)nr64(reg));
206	return err;
207}
208
209#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
210({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
211	__niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
212})
213
214static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
215{
216	u64 val = (u64) lp->timer;
217
218	if (on)
219		val |= LDG_IMGMT_ARM;
220
221	nw64(LDG_IMGMT(lp->ldg_num), val);
222}
223
224static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
225{
226	unsigned long mask_reg, bits;
227	u64 val;
228
229	if (ldn < 0 || ldn > LDN_MAX)
230		return -EINVAL;
231
232	if (ldn < 64) {
233		mask_reg = LD_IM0(ldn);
234		bits = LD_IM0_MASK;
235	} else {
236		mask_reg = LD_IM1(ldn - 64);
237		bits = LD_IM1_MASK;
238	}
239
240	val = nr64(mask_reg);
241	if (on)
242		val &= ~bits;
243	else
244		val |= bits;
245	nw64(mask_reg, val);
246
247	return 0;
248}
249
250static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
251{
252	struct niu_parent *parent = np->parent;
253	int i;
254
255	for (i = 0; i <= LDN_MAX; i++) {
256		int err;
257
258		if (parent->ldg_map[i] != lp->ldg_num)
259			continue;
260
261		err = niu_ldn_irq_enable(np, i, on);
262		if (err)
263			return err;
264	}
265	return 0;
266}
267
268static int niu_enable_interrupts(struct niu *np, int on)
269{
270	int i;
271
272	for (i = 0; i < np->num_ldg; i++) {
273		struct niu_ldg *lp = &np->ldg[i];
274		int err;
275
276		err = niu_enable_ldn_in_ldg(np, lp, on);
277		if (err)
278			return err;
279	}
280	for (i = 0; i < np->num_ldg; i++)
281		niu_ldg_rearm(np, &np->ldg[i], on);
282
283	return 0;
284}
285
286static u32 phy_encode(u32 type, int port)
287{
288	return type << (port * 2);
289}
290
291static u32 phy_decode(u32 val, int port)
292{
293	return (val >> (port * 2)) & PORT_TYPE_MASK;
294}
295
296static int mdio_wait(struct niu *np)
297{
298	int limit = 1000;
299	u64 val;
300
301	while (--limit > 0) {
302		val = nr64(MIF_FRAME_OUTPUT);
303		if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
304			return val & MIF_FRAME_OUTPUT_DATA;
305
306		udelay(10);
307	}
308
309	return -ENODEV;
310}
311
312static int mdio_read(struct niu *np, int port, int dev, int reg)
313{
314	int err;
315
316	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
317	err = mdio_wait(np);
318	if (err < 0)
319		return err;
320
321	nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
322	return mdio_wait(np);
323}
324
325static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
326{
327	int err;
328
329	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
330	err = mdio_wait(np);
331	if (err < 0)
332		return err;
333
334	nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
335	err = mdio_wait(np);
336	if (err < 0)
337		return err;
338
339	return 0;
340}
341
342static int mii_read(struct niu *np, int port, int reg)
343{
344	nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
345	return mdio_wait(np);
346}
347
348static int mii_write(struct niu *np, int port, int reg, int data)
349{
350	int err;
351
352	nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
353	err = mdio_wait(np);
354	if (err < 0)
355		return err;
356
357	return 0;
358}
359
360static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
361{
362	int err;
363
364	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
365			 ESR2_TI_PLL_TX_CFG_L(channel),
366			 val & 0xffff);
367	if (!err)
368		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
369				 ESR2_TI_PLL_TX_CFG_H(channel),
370				 val >> 16);
371	return err;
372}
373
374static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
375{
376	int err;
377
378	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
379			 ESR2_TI_PLL_RX_CFG_L(channel),
380			 val & 0xffff);
381	if (!err)
382		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
383				 ESR2_TI_PLL_RX_CFG_H(channel),
384				 val >> 16);
385	return err;
386}
387
388/* Mode is always 10G fiber.  */
389static int serdes_init_niu_10g_fiber(struct niu *np)
390{
391	struct niu_link_config *lp = &np->link_config;
392	u32 tx_cfg, rx_cfg;
393	unsigned long i;
394
395	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
396	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
397		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
398		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
399
400	if (lp->loopback_mode == LOOPBACK_PHY) {
401		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
402
403		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
404			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
405
406		tx_cfg |= PLL_TX_CFG_ENTEST;
407		rx_cfg |= PLL_RX_CFG_ENTEST;
408	}
409
410	/* Initialize all 4 lanes of the SERDES.  */
411	for (i = 0; i < 4; i++) {
412		int err = esr2_set_tx_cfg(np, i, tx_cfg);
413		if (err)
414			return err;
415	}
416
417	for (i = 0; i < 4; i++) {
418		int err = esr2_set_rx_cfg(np, i, rx_cfg);
419		if (err)
420			return err;
421	}
422
423	return 0;
424}
425
426static int serdes_init_niu_1g_serdes(struct niu *np)
427{
428	struct niu_link_config *lp = &np->link_config;
429	u16 pll_cfg, pll_sts;
430	int max_retry = 100;
431	u64 uninitialized_var(sig), mask, val;
432	u32 tx_cfg, rx_cfg;
433	unsigned long i;
434	int err;
435
436	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
437		  PLL_TX_CFG_RATE_HALF);
438	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
439		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
440		  PLL_RX_CFG_RATE_HALF);
441
442	if (np->port == 0)
443		rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
444
445	if (lp->loopback_mode == LOOPBACK_PHY) {
446		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
447
448		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
449			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
450
451		tx_cfg |= PLL_TX_CFG_ENTEST;
452		rx_cfg |= PLL_RX_CFG_ENTEST;
453	}
454
455	/* Initialize PLL for 1G */
456	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
457
458	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
459			 ESR2_TI_PLL_CFG_L, pll_cfg);
460	if (err) {
461		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
462			   np->port, __func__);
463		return err;
464	}
465
466	pll_sts = PLL_CFG_ENPLL;
467
468	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
469			 ESR2_TI_PLL_STS_L, pll_sts);
470	if (err) {
471		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
472			   np->port, __func__);
473		return err;
474	}
475
476	udelay(200);
477
478	/* Initialize all 4 lanes of the SERDES.  */
479	for (i = 0; i < 4; i++) {
480		err = esr2_set_tx_cfg(np, i, tx_cfg);
481		if (err)
482			return err;
483	}
484
485	for (i = 0; i < 4; i++) {
486		err = esr2_set_rx_cfg(np, i, rx_cfg);
487		if (err)
488			return err;
489	}
490
491	switch (np->port) {
492	case 0:
493		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
494		mask = val;
495		break;
496
497	case 1:
498		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
499		mask = val;
500		break;
501
502	default:
503		return -EINVAL;
504	}
505
506	while (max_retry--) {
507		sig = nr64(ESR_INT_SIGNALS);
508		if ((sig & mask) == val)
509			break;
510
511		mdelay(500);
512	}
513
514	if ((sig & mask) != val) {
515		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
516			   np->port, (int)(sig & mask), (int)val);
517		return -ENODEV;
518	}
519
520	return 0;
521}
522
523static int serdes_init_niu_10g_serdes(struct niu *np)
524{
525	struct niu_link_config *lp = &np->link_config;
526	u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
527	int max_retry = 100;
528	u64 uninitialized_var(sig), mask, val;
529	unsigned long i;
530	int err;
531
532	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
533	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
534		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
535		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
536
537	if (lp->loopback_mode == LOOPBACK_PHY) {
538		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
539
540		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
541			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
542
543		tx_cfg |= PLL_TX_CFG_ENTEST;
544		rx_cfg |= PLL_RX_CFG_ENTEST;
545	}
546
547	/* Initialize PLL for 10G */
548	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
549
550	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
551			 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
552	if (err) {
553		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
554			   np->port, __func__);
555		return err;
556	}
557
558	pll_sts = PLL_CFG_ENPLL;
559
560	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
561			 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
562	if (err) {
563		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
564			   np->port, __func__);
565		return err;
566	}
567
568	udelay(200);
569
570	/* Initialize all 4 lanes of the SERDES.  */
571	for (i = 0; i < 4; i++) {
572		err = esr2_set_tx_cfg(np, i, tx_cfg);
573		if (err)
574			return err;
575	}
576
577	for (i = 0; i < 4; i++) {
578		err = esr2_set_rx_cfg(np, i, rx_cfg);
579		if (err)
580			return err;
581	}
582
583	/* check if serdes is ready */
584
585	switch (np->port) {
586	case 0:
587		mask = ESR_INT_SIGNALS_P0_BITS;
588		val = (ESR_INT_SRDY0_P0 |
589		       ESR_INT_DET0_P0 |
590		       ESR_INT_XSRDY_P0 |
591		       ESR_INT_XDP_P0_CH3 |
592		       ESR_INT_XDP_P0_CH2 |
593		       ESR_INT_XDP_P0_CH1 |
594		       ESR_INT_XDP_P0_CH0);
595		break;
596
597	case 1:
598		mask = ESR_INT_SIGNALS_P1_BITS;
599		val = (ESR_INT_SRDY0_P1 |
600		       ESR_INT_DET0_P1 |
601		       ESR_INT_XSRDY_P1 |
602		       ESR_INT_XDP_P1_CH3 |
603		       ESR_INT_XDP_P1_CH2 |
604		       ESR_INT_XDP_P1_CH1 |
605		       ESR_INT_XDP_P1_CH0);
606		break;
607
608	default:
609		return -EINVAL;
610	}
611
612	while (max_retry--) {
613		sig = nr64(ESR_INT_SIGNALS);
614		if ((sig & mask) == val)
615			break;
616
617		mdelay(500);
618	}
619
620	if ((sig & mask) != val) {
621		pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
622			np->port, (int)(sig & mask), (int)val);
623
624		/* 10G failed, try initializing at 1G */
625		err = serdes_init_niu_1g_serdes(np);
626		if (!err) {
627			np->flags &= ~NIU_FLAGS_10G;
628			np->mac_xcvr = MAC_XCVR_PCS;
629		}  else {
630			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
631				   np->port);
632			return -ENODEV;
633		}
634	}
635	return 0;
636}
637
638static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
639{
640	int err;
641
642	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
643	if (err >= 0) {
644		*val = (err & 0xffff);
645		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
646				ESR_RXTX_CTRL_H(chan));
647		if (err >= 0)
648			*val |= ((err & 0xffff) << 16);
649		err = 0;
650	}
651	return err;
652}
653
654static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
655{
656	int err;
657
658	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
659			ESR_GLUE_CTRL0_L(chan));
660	if (err >= 0) {
661		*val = (err & 0xffff);
662		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
663				ESR_GLUE_CTRL0_H(chan));
664		if (err >= 0) {
665			*val |= ((err & 0xffff) << 16);
666			err = 0;
667		}
668	}
669	return err;
670}
671
672static int esr_read_reset(struct niu *np, u32 *val)
673{
674	int err;
675
676	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
677			ESR_RXTX_RESET_CTRL_L);
678	if (err >= 0) {
679		*val = (err & 0xffff);
680		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
681				ESR_RXTX_RESET_CTRL_H);
682		if (err >= 0) {
683			*val |= ((err & 0xffff) << 16);
684			err = 0;
685		}
686	}
687	return err;
688}
689
690static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
691{
692	int err;
693
694	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
695			 ESR_RXTX_CTRL_L(chan), val & 0xffff);
696	if (!err)
697		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
698				 ESR_RXTX_CTRL_H(chan), (val >> 16));
699	return err;
700}
701
702static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
703{
704	int err;
705
706	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
707			ESR_GLUE_CTRL0_L(chan), val & 0xffff);
708	if (!err)
709		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
710				 ESR_GLUE_CTRL0_H(chan), (val >> 16));
711	return err;
712}
713
714static int esr_reset(struct niu *np)
715{
716	u32 uninitialized_var(reset);
717	int err;
718
719	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
720			 ESR_RXTX_RESET_CTRL_L, 0x0000);
721	if (err)
722		return err;
723	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
724			 ESR_RXTX_RESET_CTRL_H, 0xffff);
725	if (err)
726		return err;
727	udelay(200);
728
729	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
730			 ESR_RXTX_RESET_CTRL_L, 0xffff);
731	if (err)
732		return err;
733	udelay(200);
734
735	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
736			 ESR_RXTX_RESET_CTRL_H, 0x0000);
737	if (err)
738		return err;
739	udelay(200);
740
741	err = esr_read_reset(np, &reset);
742	if (err)
743		return err;
744	if (reset != 0) {
745		netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
746			   np->port, reset);
747		return -ENODEV;
748	}
749
750	return 0;
751}
752
753static int serdes_init_10g(struct niu *np)
754{
755	struct niu_link_config *lp = &np->link_config;
756	unsigned long ctrl_reg, test_cfg_reg, i;
757	u64 ctrl_val, test_cfg_val, sig, mask, val;
758	int err;
759
760	switch (np->port) {
761	case 0:
762		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
763		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
764		break;
765	case 1:
766		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
767		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
768		break;
769
770	default:
771		return -EINVAL;
772	}
773	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
774		    ENET_SERDES_CTRL_SDET_1 |
775		    ENET_SERDES_CTRL_SDET_2 |
776		    ENET_SERDES_CTRL_SDET_3 |
777		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
778		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
779		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
780		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
781		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
782		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
783		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
784		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
785	test_cfg_val = 0;
786
787	if (lp->loopback_mode == LOOPBACK_PHY) {
788		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
789				  ENET_SERDES_TEST_MD_0_SHIFT) |
790				 (ENET_TEST_MD_PAD_LOOPBACK <<
791				  ENET_SERDES_TEST_MD_1_SHIFT) |
792				 (ENET_TEST_MD_PAD_LOOPBACK <<
793				  ENET_SERDES_TEST_MD_2_SHIFT) |
794				 (ENET_TEST_MD_PAD_LOOPBACK <<
795				  ENET_SERDES_TEST_MD_3_SHIFT));
796	}
797
798	nw64(ctrl_reg, ctrl_val);
799	nw64(test_cfg_reg, test_cfg_val);
800
801	/* Initialize all 4 lanes of the SERDES.  */
802	for (i = 0; i < 4; i++) {
803		u32 rxtx_ctrl, glue0;
804
805		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
806		if (err)
807			return err;
808		err = esr_read_glue0(np, i, &glue0);
809		if (err)
810			return err;
811
812		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
813		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
814			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
815
816		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
817			   ESR_GLUE_CTRL0_THCNT |
818			   ESR_GLUE_CTRL0_BLTIME);
819		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
820			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
821			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
822			  (BLTIME_300_CYCLES <<
823			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
824
825		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
826		if (err)
827			return err;
828		err = esr_write_glue0(np, i, glue0);
829		if (err)
830			return err;
831	}
832
833	err = esr_reset(np);
834	if (err)
835		return err;
836
837	sig = nr64(ESR_INT_SIGNALS);
838	switch (np->port) {
839	case 0:
840		mask = ESR_INT_SIGNALS_P0_BITS;
841		val = (ESR_INT_SRDY0_P0 |
842		       ESR_INT_DET0_P0 |
843		       ESR_INT_XSRDY_P0 |
844		       ESR_INT_XDP_P0_CH3 |
845		       ESR_INT_XDP_P0_CH2 |
846		       ESR_INT_XDP_P0_CH1 |
847		       ESR_INT_XDP_P0_CH0);
848		break;
849
850	case 1:
851		mask = ESR_INT_SIGNALS_P1_BITS;
852		val = (ESR_INT_SRDY0_P1 |
853		       ESR_INT_DET0_P1 |
854		       ESR_INT_XSRDY_P1 |
855		       ESR_INT_XDP_P1_CH3 |
856		       ESR_INT_XDP_P1_CH2 |
857		       ESR_INT_XDP_P1_CH1 |
858		       ESR_INT_XDP_P1_CH0);
859		break;
860
861	default:
862		return -EINVAL;
863	}
864
865	if ((sig & mask) != val) {
866		if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
867			np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
868			return 0;
869		}
870		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
871			   np->port, (int)(sig & mask), (int)val);
872		return -ENODEV;
873	}
874	if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
875		np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
876	return 0;
877}
878
879static int serdes_init_1g(struct niu *np)
880{
881	u64 val;
882
883	val = nr64(ENET_SERDES_1_PLL_CFG);
884	val &= ~ENET_SERDES_PLL_FBDIV2;
885	switch (np->port) {
886	case 0:
887		val |= ENET_SERDES_PLL_HRATE0;
888		break;
889	case 1:
890		val |= ENET_SERDES_PLL_HRATE1;
891		break;
892	case 2:
893		val |= ENET_SERDES_PLL_HRATE2;
894		break;
895	case 3:
896		val |= ENET_SERDES_PLL_HRATE3;
897		break;
898	default:
899		return -EINVAL;
900	}
901	nw64(ENET_SERDES_1_PLL_CFG, val);
902
903	return 0;
904}
905
906static int serdes_init_1g_serdes(struct niu *np)
907{
908	struct niu_link_config *lp = &np->link_config;
909	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
910	u64 ctrl_val, test_cfg_val, sig, mask, val;
911	int err;
912	u64 reset_val, val_rd;
913
914	val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
915		ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
916		ENET_SERDES_PLL_FBDIV0;
917	switch (np->port) {
918	case 0:
919		reset_val =  ENET_SERDES_RESET_0;
920		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
921		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
922		pll_cfg = ENET_SERDES_0_PLL_CFG;
923		break;
924	case 1:
925		reset_val =  ENET_SERDES_RESET_1;
926		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
927		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
928		pll_cfg = ENET_SERDES_1_PLL_CFG;
929		break;
930
931	default:
932		return -EINVAL;
933	}
934	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
935		    ENET_SERDES_CTRL_SDET_1 |
936		    ENET_SERDES_CTRL_SDET_2 |
937		    ENET_SERDES_CTRL_SDET_3 |
938		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
939		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
940		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
941		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
942		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
943		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
944		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
945		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
946	test_cfg_val = 0;
947
948	if (lp->loopback_mode == LOOPBACK_PHY) {
949		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
950				  ENET_SERDES_TEST_MD_0_SHIFT) |
951				 (ENET_TEST_MD_PAD_LOOPBACK <<
952				  ENET_SERDES_TEST_MD_1_SHIFT) |
953				 (ENET_TEST_MD_PAD_LOOPBACK <<
954				  ENET_SERDES_TEST_MD_2_SHIFT) |
955				 (ENET_TEST_MD_PAD_LOOPBACK <<
956				  ENET_SERDES_TEST_MD_3_SHIFT));
957	}
958
959	nw64(ENET_SERDES_RESET, reset_val);
960	mdelay(20);
961	val_rd = nr64(ENET_SERDES_RESET);
962	val_rd &= ~reset_val;
963	nw64(pll_cfg, val);
964	nw64(ctrl_reg, ctrl_val);
965	nw64(test_cfg_reg, test_cfg_val);
966	nw64(ENET_SERDES_RESET, val_rd);
967	mdelay(2000);
968
969	/* Initialize all 4 lanes of the SERDES.  */
970	for (i = 0; i < 4; i++) {
971		u32 rxtx_ctrl, glue0;
972
973		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
974		if (err)
975			return err;
976		err = esr_read_glue0(np, i, &glue0);
977		if (err)
978			return err;
979
980		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
981		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
982			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
983
984		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
985			   ESR_GLUE_CTRL0_THCNT |
986			   ESR_GLUE_CTRL0_BLTIME);
987		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
988			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
989			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
990			  (BLTIME_300_CYCLES <<
991			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
992
993		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
994		if (err)
995			return err;
996		err = esr_write_glue0(np, i, glue0);
997		if (err)
998			return err;
999	}
1000
1001
1002	sig = nr64(ESR_INT_SIGNALS);
1003	switch (np->port) {
1004	case 0:
1005		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
1006		mask = val;
1007		break;
1008
1009	case 1:
1010		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
1011		mask = val;
1012		break;
1013
1014	default:
1015		return -EINVAL;
1016	}
1017
1018	if ((sig & mask) != val) {
1019		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
1020			   np->port, (int)(sig & mask), (int)val);
1021		return -ENODEV;
1022	}
1023
1024	return 0;
1025}
1026
1027static int link_status_1g_serdes(struct niu *np, int *link_up_p)
1028{
1029	struct niu_link_config *lp = &np->link_config;
1030	int link_up;
1031	u64 val;
1032	u16 current_speed;
1033	unsigned long flags;
1034	u8 current_duplex;
1035
1036	link_up = 0;
1037	current_speed = SPEED_INVALID;
1038	current_duplex = DUPLEX_INVALID;
1039
1040	spin_lock_irqsave(&np->lock, flags);
1041
1042	val = nr64_pcs(PCS_MII_STAT);
1043
1044	if (val & PCS_MII_STAT_LINK_STATUS) {
1045		link_up = 1;
1046		current_speed = SPEED_1000;
1047		current_duplex = DUPLEX_FULL;
1048	}
1049
1050	lp->active_speed = current_speed;
1051	lp->active_duplex = current_duplex;
1052	spin_unlock_irqrestore(&np->lock, flags);
1053
1054	*link_up_p = link_up;
1055	return 0;
1056}
1057
1058static int link_status_10g_serdes(struct niu *np, int *link_up_p)
1059{
1060	unsigned long flags;
1061	struct niu_link_config *lp = &np->link_config;
1062	int link_up = 0;
1063	int link_ok = 1;
1064	u64 val, val2;
1065	u16 current_speed;
1066	u8 current_duplex;
1067
1068	if (!(np->flags & NIU_FLAGS_10G))
1069		return link_status_1g_serdes(np, link_up_p);
1070
1071	current_speed = SPEED_INVALID;
1072	current_duplex = DUPLEX_INVALID;
1073	spin_lock_irqsave(&np->lock, flags);
1074
1075	val = nr64_xpcs(XPCS_STATUS(0));
1076	val2 = nr64_mac(XMAC_INTER2);
1077	if (val2 & 0x01000000)
1078		link_ok = 0;
1079
1080	if ((val & 0x1000ULL) && link_ok) {
1081		link_up = 1;
1082		current_speed = SPEED_10000;
1083		current_duplex = DUPLEX_FULL;
1084	}
1085	lp->active_speed = current_speed;
1086	lp->active_duplex = current_duplex;
1087	spin_unlock_irqrestore(&np->lock, flags);
1088	*link_up_p = link_up;
1089	return 0;
1090}
1091
1092static int link_status_mii(struct niu *np, int *link_up_p)
1093{
1094	struct niu_link_config *lp = &np->link_config;
1095	int err;
1096	int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
1097	int supported, advertising, active_speed, active_duplex;
1098
1099	err = mii_read(np, np->phy_addr, MII_BMCR);
1100	if (unlikely(err < 0))
1101		return err;
1102	bmcr = err;
1103
1104	err = mii_read(np, np->phy_addr, MII_BMSR);
1105	if (unlikely(err < 0))
1106		return err;
1107	bmsr = err;
1108
1109	err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1110	if (unlikely(err < 0))
1111		return err;
1112	advert = err;
1113
1114	err = mii_read(np, np->phy_addr, MII_LPA);
1115	if (unlikely(err < 0))
1116		return err;
1117	lpa = err;
1118
1119	if (likely(bmsr & BMSR_ESTATEN)) {
1120		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1121		if (unlikely(err < 0))
1122			return err;
1123		estatus = err;
1124
1125		err = mii_read(np, np->phy_addr, MII_CTRL1000);
1126		if (unlikely(err < 0))
1127			return err;
1128		ctrl1000 = err;
1129
1130		err = mii_read(np, np->phy_addr, MII_STAT1000);
1131		if (unlikely(err < 0))
1132			return err;
1133		stat1000 = err;
1134	} else
1135		estatus = ctrl1000 = stat1000 = 0;
1136
1137	supported = 0;
1138	if (bmsr & BMSR_ANEGCAPABLE)
1139		supported |= SUPPORTED_Autoneg;
1140	if (bmsr & BMSR_10HALF)
1141		supported |= SUPPORTED_10baseT_Half;
1142	if (bmsr & BMSR_10FULL)
1143		supported |= SUPPORTED_10baseT_Full;
1144	if (bmsr & BMSR_100HALF)
1145		supported |= SUPPORTED_100baseT_Half;
1146	if (bmsr & BMSR_100FULL)
1147		supported |= SUPPORTED_100baseT_Full;
1148	if (estatus & ESTATUS_1000_THALF)
1149		supported |= SUPPORTED_1000baseT_Half;
1150	if (estatus & ESTATUS_1000_TFULL)
1151		supported |= SUPPORTED_1000baseT_Full;
1152	lp->supported = supported;
1153
1154	advertising = mii_adv_to_ethtool_adv_t(advert);
1155	advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
1156
1157	if (bmcr & BMCR_ANENABLE) {
1158		int neg, neg1000;
1159
1160		lp->active_autoneg = 1;
1161		advertising |= ADVERTISED_Autoneg;
1162
1163		neg = advert & lpa;
1164		neg1000 = (ctrl1000 << 2) & stat1000;
1165
1166		if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
1167			active_speed = SPEED_1000;
1168		else if (neg & LPA_100)
1169			active_speed = SPEED_100;
1170		else if (neg & (LPA_10HALF | LPA_10FULL))
1171			active_speed = SPEED_10;
1172		else
1173			active_speed = SPEED_INVALID;
1174
1175		if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
1176			active_duplex = DUPLEX_FULL;
1177		else if (active_speed != SPEED_INVALID)
1178			active_duplex = DUPLEX_HALF;
1179		else
1180			active_duplex = DUPLEX_INVALID;
1181	} else {
1182		lp->active_autoneg = 0;
1183
1184		if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
1185			active_speed = SPEED_1000;
1186		else if (bmcr & BMCR_SPEED100)
1187			active_speed = SPEED_100;
1188		else
1189			active_speed = SPEED_10;
1190
1191		if (bmcr & BMCR_FULLDPLX)
1192			active_duplex = DUPLEX_FULL;
1193		else
1194			active_duplex = DUPLEX_HALF;
1195	}
1196
1197	lp->active_advertising = advertising;
1198	lp->active_speed = active_speed;
1199	lp->active_duplex = active_duplex;
1200	*link_up_p = !!(bmsr & BMSR_LSTATUS);
1201
1202	return 0;
1203}
1204
1205static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
1206{
1207	struct niu_link_config *lp = &np->link_config;
1208	u16 current_speed, bmsr;
1209	unsigned long flags;
1210	u8 current_duplex;
1211	int err, link_up;
1212
1213	link_up = 0;
1214	current_speed = SPEED_INVALID;
1215	current_duplex = DUPLEX_INVALID;
1216
1217	spin_lock_irqsave(&np->lock, flags);
1218
1219	err = -EINVAL;
1220
1221	err = mii_read(np, np->phy_addr, MII_BMSR);
1222	if (err < 0)
1223		goto out;
1224
1225	bmsr = err;
1226	if (bmsr & BMSR_LSTATUS) {
1227		u16 adv, lpa;
1228
1229		err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1230		if (err < 0)
1231			goto out;
1232		adv = err;
1233
1234		err = mii_read(np, np->phy_addr, MII_LPA);
1235		if (err < 0)
1236			goto out;
1237		lpa = err;
1238
1239		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1240		if (err < 0)
1241			goto out;
1242		link_up = 1;
1243		current_speed = SPEED_1000;
1244		current_duplex = DUPLEX_FULL;
1245
1246	}
1247	lp->active_speed = current_speed;
1248	lp->active_duplex = current_duplex;
1249	err = 0;
1250
1251out:
1252	spin_unlock_irqrestore(&np->lock, flags);
1253
1254	*link_up_p = link_up;
1255	return err;
1256}
1257
1258static int link_status_1g(struct niu *np, int *link_up_p)
1259{
1260	struct niu_link_config *lp = &np->link_config;
1261	unsigned long flags;
1262	int err;
1263
1264	spin_lock_irqsave(&np->lock, flags);
1265
1266	err = link_status_mii(np, link_up_p);
1267	lp->supported |= SUPPORTED_TP;
1268	lp->active_advertising |= ADVERTISED_TP;
1269
1270	spin_unlock_irqrestore(&np->lock, flags);
1271	return err;
1272}
1273
1274static int bcm8704_reset(struct niu *np)
1275{
1276	int err, limit;
1277
1278	err = mdio_read(np, np->phy_addr,
1279			BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1280	if (err < 0 || err == 0xffff)
1281		return err;
1282	err |= BMCR_RESET;
1283	err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1284			 MII_BMCR, err);
1285	if (err)
1286		return err;
1287
1288	limit = 1000;
1289	while (--limit >= 0) {
1290		err = mdio_read(np, np->phy_addr,
1291				BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1292		if (err < 0)
1293			return err;
1294		if (!(err & BMCR_RESET))
1295			break;
1296	}
1297	if (limit < 0) {
1298		netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
1299			   np->port, (err & 0xffff));
1300		return -ENODEV;
1301	}
1302	return 0;
1303}
1304
1305/* When written, certain PHY registers need to be read back twice
1306 * in order for the bits to settle properly.
1307 */
1308static int bcm8704_user_dev3_readback(struct niu *np, int reg)
1309{
1310	int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1311	if (err < 0)
1312		return err;
1313	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1314	if (err < 0)
1315		return err;
1316	return 0;
1317}
1318
1319static int bcm8706_init_user_dev3(struct niu *np)
1320{
1321	int err;
1322
1323
1324	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1325			BCM8704_USER_OPT_DIGITAL_CTRL);
1326	if (err < 0)
1327		return err;
1328	err &= ~USER_ODIG_CTRL_GPIOS;
1329	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1330	err |=  USER_ODIG_CTRL_RESV2;
1331	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1332			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1333	if (err)
1334		return err;
1335
1336	mdelay(1000);
1337
1338	return 0;
1339}
1340
1341static int bcm8704_init_user_dev3(struct niu *np)
1342{
1343	int err;
1344
1345	err = mdio_write(np, np->phy_addr,
1346			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
1347			 (USER_CONTROL_OPTXRST_LVL |
1348			  USER_CONTROL_OPBIASFLT_LVL |
1349			  USER_CONTROL_OBTMPFLT_LVL |
1350			  USER_CONTROL_OPPRFLT_LVL |
1351			  USER_CONTROL_OPTXFLT_LVL |
1352			  USER_CONTROL_OPRXLOS_LVL |
1353			  USER_CONTROL_OPRXFLT_LVL |
1354			  USER_CONTROL_OPTXON_LVL |
1355			  (0x3f << USER_CONTROL_RES1_SHIFT)));
1356	if (err)
1357		return err;
1358
1359	err = mdio_write(np, np->phy_addr,
1360			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
1361			 (USER_PMD_TX_CTL_XFP_CLKEN |
1362			  (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
1363			  (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
1364			  USER_PMD_TX_CTL_TSCK_LPWREN));
1365	if (err)
1366		return err;
1367
1368	err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
1369	if (err)
1370		return err;
1371	err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
1372	if (err)
1373		return err;
1374
1375	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1376			BCM8704_USER_OPT_DIGITAL_CTRL);
1377	if (err < 0)
1378		return err;
1379	err &= ~USER_ODIG_CTRL_GPIOS;
1380	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1381	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1382			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1383	if (err)
1384		return err;
1385
1386	mdelay(1000);
1387
1388	return 0;
1389}
1390
1391static int mrvl88x2011_act_led(struct niu *np, int val)
1392{
1393	int	err;
1394
1395	err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1396		MRVL88X2011_LED_8_TO_11_CTL);
1397	if (err < 0)
1398		return err;
1399
1400	err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
1401	err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
1402
1403	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1404			  MRVL88X2011_LED_8_TO_11_CTL, err);
1405}
1406
1407static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
1408{
1409	int	err;
1410
1411	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1412			MRVL88X2011_LED_BLINK_CTL);
1413	if (err >= 0) {
1414		err &= ~MRVL88X2011_LED_BLKRATE_MASK;
1415		err |= (rate << 4);
1416
1417		err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1418				 MRVL88X2011_LED_BLINK_CTL, err);
1419	}
1420
1421	return err;
1422}
1423
1424static int xcvr_init_10g_mrvl88x2011(struct niu *np)
1425{
1426	int	err;
1427
1428	/* Set LED functions */
1429	err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
1430	if (err)
1431		return err;
1432
1433	/* led activity */
1434	err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
1435	if (err)
1436		return err;
1437
1438	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1439			MRVL88X2011_GENERAL_CTL);
1440	if (err < 0)
1441		return err;
1442
1443	err |= MRVL88X2011_ENA_XFPREFCLK;
1444
1445	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1446			 MRVL88X2011_GENERAL_CTL, err);
1447	if (err < 0)
1448		return err;
1449
1450	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1451			MRVL88X2011_PMA_PMD_CTL_1);
1452	if (err < 0)
1453		return err;
1454
1455	if (np->link_config.loopback_mode == LOOPBACK_MAC)
1456		err |= MRVL88X2011_LOOPBACK;
1457	else
1458		err &= ~MRVL88X2011_LOOPBACK;
1459
1460	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1461			 MRVL88X2011_PMA_PMD_CTL_1, err);
1462	if (err < 0)
1463		return err;
1464
1465	/* Enable PMD  */
1466	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1467			  MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
1468}
1469
1470
1471static int xcvr_diag_bcm870x(struct niu *np)
1472{
1473	u16 analog_stat0, tx_alarm_status;
1474	int err = 0;
1475
1476#if 1
1477	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1478			MII_STAT1000);
1479	if (err < 0)
1480		return err;
1481	pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
1482
1483	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
1484	if (err < 0)
1485		return err;
1486	pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
1487
1488	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1489			MII_NWAYTEST);
1490	if (err < 0)
1491		return err;
1492	pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
1493#endif
1494
1495	/* XXX dig this out it might not be so useful XXX */
1496	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1497			BCM8704_USER_ANALOG_STATUS0);
1498	if (err < 0)
1499		return err;
1500	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1501			BCM8704_USER_ANALOG_STATUS0);
1502	if (err < 0)
1503		return err;
1504	analog_stat0 = err;
1505
1506	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1507			BCM8704_USER_TX_ALARM_STATUS);
1508	if (err < 0)
1509		return err;
1510	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1511			BCM8704_USER_TX_ALARM_STATUS);
1512	if (err < 0)
1513		return err;
1514	tx_alarm_status = err;
1515
1516	if (analog_stat0 != 0x03fc) {
1517		if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
1518			pr_info("Port %u cable not connected or bad cable\n",
1519				np->port);
1520		} else if (analog_stat0 == 0x639c) {
1521			pr_info("Port %u optical module is bad or missing\n",
1522				np->port);
1523		}
1524	}
1525
1526	return 0;
1527}
1528
1529static int xcvr_10g_set_lb_bcm870x(struct niu *np)
1530{
1531	struct niu_link_config *lp = &np->link_config;
1532	int err;
1533
1534	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1535			MII_BMCR);
1536	if (err < 0)
1537		return err;
1538
1539	err &= ~BMCR_LOOPBACK;
1540
1541	if (lp->loopback_mode == LOOPBACK_MAC)
1542		err |= BMCR_LOOPBACK;
1543
1544	err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1545			 MII_BMCR, err);
1546	if (err)
1547		return err;
1548
1549	return 0;
1550}
1551
1552static int xcvr_init_10g_bcm8706(struct niu *np)
1553{
1554	int err = 0;
1555	u64 val;
1556
1557	if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
1558	    (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
1559			return err;
1560
1561	val = nr64_mac(XMAC_CONFIG);
1562	val &= ~XMAC_CONFIG_LED_POLARITY;
1563	val |= XMAC_CONFIG_FORCE_LED_ON;
1564	nw64_mac(XMAC_CONFIG, val);
1565
1566	val = nr64(MIF_CONFIG);
1567	val |= MIF_CONFIG_INDIRECT_MODE;
1568	nw64(MIF_CONFIG, val);
1569
1570	err = bcm8704_reset(np);
1571	if (err)
1572		return err;
1573
1574	err = xcvr_10g_set_lb_bcm870x(np);
1575	if (err)
1576		return err;
1577
1578	err = bcm8706_init_user_dev3(np);
1579	if (err)
1580		return err;
1581
1582	err = xcvr_diag_bcm870x(np);
1583	if (err)
1584		return err;
1585
1586	return 0;
1587}
1588
1589static int xcvr_init_10g_bcm8704(struct niu *np)
1590{
1591	int err;
1592
1593	err = bcm8704_reset(np);
1594	if (err)
1595		return err;
1596
1597	err = bcm8704_init_user_dev3(np);
1598	if (err)
1599		return err;
1600
1601	err = xcvr_10g_set_lb_bcm870x(np);
1602	if (err)
1603		return err;
1604
1605	err =  xcvr_diag_bcm870x(np);
1606	if (err)
1607		return err;
1608
1609	return 0;
1610}
1611
1612static int xcvr_init_10g(struct niu *np)
1613{
1614	int phy_id, err;
1615	u64 val;
1616
1617	val = nr64_mac(XMAC_CONFIG);
1618	val &= ~XMAC_CONFIG_LED_POLARITY;
1619	val |= XMAC_CONFIG_FORCE_LED_ON;
1620	nw64_mac(XMAC_CONFIG, val);
1621
1622	/* XXX shared resource, lock parent XXX */
1623	val = nr64(MIF_CONFIG);
1624	val |= MIF_CONFIG_INDIRECT_MODE;
1625	nw64(MIF_CONFIG, val);
1626
1627	phy_id = phy_decode(np->parent->port_phy, np->port);
1628	phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1629
1630	/* handle different phy types */
1631	switch (phy_id & NIU_PHY_ID_MASK) {
1632	case NIU_PHY_ID_MRVL88X2011:
1633		err = xcvr_init_10g_mrvl88x2011(np);
1634		break;
1635
1636	default: /* bcom 8704 */
1637		err = xcvr_init_10g_bcm8704(np);
1638		break;
1639	}
1640
1641	return err;
1642}
1643
1644static int mii_reset(struct niu *np)
1645{
1646	int limit, err;
1647
1648	err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
1649	if (err)
1650		return err;
1651
1652	limit = 1000;
1653	while (--limit >= 0) {
1654		udelay(500);
1655		err = mii_read(np, np->phy_addr, MII_BMCR);
1656		if (err < 0)
1657			return err;
1658		if (!(err & BMCR_RESET))
1659			break;
1660	}
1661	if (limit < 0) {
1662		netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
1663			   np->port, err);
1664		return -ENODEV;
1665	}
1666
1667	return 0;
1668}
1669
1670static int xcvr_init_1g_rgmii(struct niu *np)
1671{
1672	int err;
1673	u64 val;
1674	u16 bmcr, bmsr, estat;
1675
1676	val = nr64(MIF_CONFIG);
1677	val &= ~MIF_CONFIG_INDIRECT_MODE;
1678	nw64(MIF_CONFIG, val);
1679
1680	err = mii_reset(np);
1681	if (err)
1682		return err;
1683
1684	err = mii_read(np, np->phy_addr, MII_BMSR);
1685	if (err < 0)
1686		return err;
1687	bmsr = err;
1688
1689	estat = 0;
1690	if (bmsr & BMSR_ESTATEN) {
1691		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1692		if (err < 0)
1693			return err;
1694		estat = err;
1695	}
1696
1697	bmcr = 0;
1698	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1699	if (err)
1700		return err;
1701
1702	if (bmsr & BMSR_ESTATEN) {
1703		u16 ctrl1000 = 0;
1704
1705		if (estat & ESTATUS_1000_TFULL)
1706			ctrl1000 |= ADVERTISE_1000FULL;
1707		err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1708		if (err)
1709			return err;
1710	}
1711
1712	bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
1713
1714	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1715	if (err)
1716		return err;
1717
1718	err = mii_read(np, np->phy_addr, MII_BMCR);
1719	if (err < 0)
1720		return err;
1721	bmcr = mii_read(np, np->phy_addr, MII_BMCR);
1722
1723	err = mii_read(np, np->phy_addr, MII_BMSR);
1724	if (err < 0)
1725		return err;
1726
1727	return 0;
1728}
1729
1730static int mii_init_common(struct niu *np)
1731{
1732	struct niu_link_config *lp = &np->link_config;
1733	u16 bmcr, bmsr, adv, estat;
1734	int err;
1735
1736	err = mii_reset(np);
1737	if (err)
1738		return err;
1739
1740	err = mii_read(np, np->phy_addr, MII_BMSR);
1741	if (err < 0)
1742		return err;
1743	bmsr = err;
1744
1745	estat = 0;
1746	if (bmsr & BMSR_ESTATEN) {
1747		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1748		if (err < 0)
1749			return err;
1750		estat = err;
1751	}
1752
1753	bmcr = 0;
1754	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1755	if (err)
1756		return err;
1757
1758	if (lp->loopback_mode == LOOPBACK_MAC) {
1759		bmcr |= BMCR_LOOPBACK;
1760		if (lp->active_speed == SPEED_1000)
1761			bmcr |= BMCR_SPEED1000;
1762		if (lp->active_duplex == DUPLEX_FULL)
1763			bmcr |= BMCR_FULLDPLX;
1764	}
1765
1766	if (lp->loopback_mode == LOOPBACK_PHY) {
1767		u16 aux;
1768
1769		aux = (BCM5464R_AUX_CTL_EXT_LB |
1770		       BCM5464R_AUX_CTL_WRITE_1);
1771		err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
1772		if (err)
1773			return err;
1774	}
1775
1776	if (lp->autoneg) {
1777		u16 ctrl1000;
1778
1779		adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1780		if ((bmsr & BMSR_10HALF) &&
1781			(lp->advertising & ADVERTISED_10baseT_Half))
1782			adv |= ADVERTISE_10HALF;
1783		if ((bmsr & BMSR_10FULL) &&
1784			(lp->advertising & ADVERTISED_10baseT_Full))
1785			adv |= ADVERTISE_10FULL;
1786		if ((bmsr & BMSR_100HALF) &&
1787			(lp->advertising & ADVERTISED_100baseT_Half))
1788			adv |= ADVERTISE_100HALF;
1789		if ((bmsr & BMSR_100FULL) &&
1790			(lp->advertising & ADVERTISED_100baseT_Full))
1791			adv |= ADVERTISE_100FULL;
1792		err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
1793		if (err)
1794			return err;
1795
1796		if (likely(bmsr & BMSR_ESTATEN)) {
1797			ctrl1000 = 0;
1798			if ((estat & ESTATUS_1000_THALF) &&
1799				(lp->advertising & ADVERTISED_1000baseT_Half))
1800				ctrl1000 |= ADVERTISE_1000HALF;
1801			if ((estat & ESTATUS_1000_TFULL) &&
1802				(lp->advertising & ADVERTISED_1000baseT_Full))
1803				ctrl1000 |= ADVERTISE_1000FULL;
1804			err = mii_write(np, np->phy_addr,
1805					MII_CTRL1000, ctrl1000);
1806			if (err)
1807				return err;
1808		}
1809
1810		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1811	} else {
1812		/* !lp->autoneg */
1813		int fulldpx;
1814
1815		if (lp->duplex == DUPLEX_FULL) {
1816			bmcr |= BMCR_FULLDPLX;
1817			fulldpx = 1;
1818		} else if (lp->duplex == DUPLEX_HALF)
1819			fulldpx = 0;
1820		else
1821			return -EINVAL;
1822
1823		if (lp->speed == SPEED_1000) {
1824			/* if X-full requested while not supported, or
1825			   X-half requested while not supported... */
1826			if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
1827				(!fulldpx && !(estat & ESTATUS_1000_THALF)))
1828				return -EINVAL;
1829			bmcr |= BMCR_SPEED1000;
1830		} else if (lp->speed == SPEED_100) {
1831			if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
1832				(!fulldpx && !(bmsr & BMSR_100HALF)))
1833				return -EINVAL;
1834			bmcr |= BMCR_SPEED100;
1835		} else if (lp->speed == SPEED_10) {
1836			if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
1837				(!fulldpx && !(bmsr & BMSR_10HALF)))
1838				return -EINVAL;
1839		} else
1840			return -EINVAL;
1841	}
1842
1843	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1844	if (err)
1845		return err;
1846
1847#if 0
1848	err = mii_read(np, np->phy_addr, MII_BMCR);
1849	if (err < 0)
1850		return err;
1851	bmcr = err;
1852
1853	err = mii_read(np, np->phy_addr, MII_BMSR);
1854	if (err < 0)
1855		return err;
1856	bmsr = err;
1857
1858	pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1859		np->port, bmcr, bmsr);
1860#endif
1861
1862	return 0;
1863}
1864
1865static int xcvr_init_1g(struct niu *np)
1866{
1867	u64 val;
1868
1869	/* XXX shared resource, lock parent XXX */
1870	val = nr64(MIF_CONFIG);
1871	val &= ~MIF_CONFIG_INDIRECT_MODE;
1872	nw64(MIF_CONFIG, val);
1873
1874	return mii_init_common(np);
1875}
1876
1877static int niu_xcvr_init(struct niu *np)
1878{
1879	const struct niu_phy_ops *ops = np->phy_ops;
1880	int err;
1881
1882	err = 0;
1883	if (ops->xcvr_init)
1884		err = ops->xcvr_init(np);
1885
1886	return err;
1887}
1888
1889static int niu_serdes_init(struct niu *np)
1890{
1891	const struct niu_phy_ops *ops = np->phy_ops;
1892	int err;
1893
1894	err = 0;
1895	if (ops->serdes_init)
1896		err = ops->serdes_init(np);
1897
1898	return err;
1899}
1900
1901static void niu_init_xif(struct niu *);
1902static void niu_handle_led(struct niu *, int status);
1903
1904static int niu_link_status_common(struct niu *np, int link_up)
1905{
1906	struct niu_link_config *lp = &np->link_config;
1907	struct net_device *dev = np->dev;
1908	unsigned long flags;
1909
1910	if (!netif_carrier_ok(dev) && link_up) {
1911		netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
1912			   lp->active_speed == SPEED_10000 ? "10Gb/sec" :
1913			   lp->active_speed == SPEED_1000 ? "1Gb/sec" :
1914			   lp->active_speed == SPEED_100 ? "100Mbit/sec" :
1915			   "10Mbit/sec",
1916			   lp->active_duplex == DUPLEX_FULL ? "full" : "half");
1917
1918		spin_lock_irqsave(&np->lock, flags);
1919		niu_init_xif(np);
1920		niu_handle_led(np, 1);
1921		spin_unlock_irqrestore(&np->lock, flags);
1922
1923		netif_carrier_on(dev);
1924	} else if (netif_carrier_ok(dev) && !link_up) {
1925		netif_warn(np, link, dev, "Link is down\n");
1926		spin_lock_irqsave(&np->lock, flags);
1927		niu_handle_led(np, 0);
1928		spin_unlock_irqrestore(&np->lock, flags);
1929		netif_carrier_off(dev);
1930	}
1931
1932	return 0;
1933}
1934
1935static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
1936{
1937	int err, link_up, pma_status, pcs_status;
1938
1939	link_up = 0;
1940
1941	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1942			MRVL88X2011_10G_PMD_STATUS_2);
1943	if (err < 0)
1944		goto out;
1945
1946	/* Check PMA/PMD Register: 1.0001.2 == 1 */
1947	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1948			MRVL88X2011_PMA_PMD_STATUS_1);
1949	if (err < 0)
1950		goto out;
1951
1952	pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1953
1954        /* Check PMC Register : 3.0001.2 == 1: read twice */
1955	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1956			MRVL88X2011_PMA_PMD_STATUS_1);
1957	if (err < 0)
1958		goto out;
1959
1960	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1961			MRVL88X2011_PMA_PMD_STATUS_1);
1962	if (err < 0)
1963		goto out;
1964
1965	pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1966
1967        /* Check XGXS Register : 4.0018.[0-3,12] */
1968	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
1969			MRVL88X2011_10G_XGXS_LANE_STAT);
1970	if (err < 0)
1971		goto out;
1972
1973	if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
1974		    PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
1975		    PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
1976		    0x800))
1977		link_up = (pma_status && pcs_status) ? 1 : 0;
1978
1979	np->link_config.active_speed = SPEED_10000;
1980	np->link_config.active_duplex = DUPLEX_FULL;
1981	err = 0;
1982out:
1983	mrvl88x2011_act_led(np, (link_up ?
1984				 MRVL88X2011_LED_CTL_PCS_ACT :
1985				 MRVL88X2011_LED_CTL_OFF));
1986
1987	*link_up_p = link_up;
1988	return err;
1989}
1990
1991static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
1992{
1993	int err, link_up;
1994	link_up = 0;
1995
1996	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1997			BCM8704_PMD_RCV_SIGDET);
1998	if (err < 0 || err == 0xffff)
1999		goto out;
2000	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
2001		err = 0;
2002		goto out;
2003	}
2004
2005	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
2006			BCM8704_PCS_10G_R_STATUS);
2007	if (err < 0)
2008		goto out;
2009
2010	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
2011		err = 0;
2012		goto out;
2013	}
2014
2015	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
2016			BCM8704_PHYXS_XGXS_LANE_STAT);
2017	if (err < 0)
2018		goto out;
2019	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
2020		    PHYXS_XGXS_LANE_STAT_MAGIC |
2021		    PHYXS_XGXS_LANE_STAT_PATTEST |
2022		    PHYXS_XGXS_LANE_STAT_LANE3 |
2023		    PHYXS_XGXS_LANE_STAT_LANE2 |
2024		    PHYXS_XGXS_LANE_STAT_LANE1 |
2025		    PHYXS_XGXS_LANE_STAT_LANE0)) {
2026		err = 0;
2027		np->link_config.active_speed = SPEED_INVALID;
2028		np->link_config.active_duplex = DUPLEX_INVALID;
2029		goto out;
2030	}
2031
2032	link_up = 1;
2033	np->link_config.active_speed = SPEED_10000;
2034	np->link_config.active_duplex = DUPLEX_FULL;
2035	err = 0;
2036
2037out:
2038	*link_up_p = link_up;
2039	return err;
2040}
2041
2042static int link_status_10g_bcom(struct niu *np, int *link_up_p)
2043{
2044	int err, link_up;
2045
2046	link_up = 0;
2047
2048	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
2049			BCM8704_PMD_RCV_SIGDET);
2050	if (err < 0)
2051		goto out;
2052	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
2053		err = 0;
2054		goto out;
2055	}
2056
2057	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
2058			BCM8704_PCS_10G_R_STATUS);
2059	if (err < 0)
2060		goto out;
2061	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
2062		err = 0;
2063		goto out;
2064	}
2065
2066	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
2067			BCM8704_PHYXS_XGXS_LANE_STAT);
2068	if (err < 0)
2069		goto out;
2070
2071	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
2072		    PHYXS_XGXS_LANE_STAT_MAGIC |
2073		    PHYXS_XGXS_LANE_STAT_LANE3 |
2074		    PHYXS_XGXS_LANE_STAT_LANE2 |
2075		    PHYXS_XGXS_LANE_STAT_LANE1 |
2076		    PHYXS_XGXS_LANE_STAT_LANE0)) {
2077		err = 0;
2078		goto out;
2079	}
2080
2081	link_up = 1;
2082	np->link_config.active_speed = SPEED_10000;
2083	np->link_config.active_duplex = DUPLEX_FULL;
2084	err = 0;
2085
2086out:
2087	*link_up_p = link_up;
2088	return err;
2089}
2090
2091static int link_status_10g(struct niu *np, int *link_up_p)
2092{
2093	unsigned long flags;
2094	int err = -EINVAL;
2095
2096	spin_lock_irqsave(&np->lock, flags);
2097
2098	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2099		int phy_id;
2100
2101		phy_id = phy_decode(np->parent->port_phy, np->port);
2102		phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
2103
2104		/* handle different phy types */
2105		switch (phy_id & NIU_PHY_ID_MASK) {
2106		case NIU_PHY_ID_MRVL88X2011:
2107			err = link_status_10g_mrvl(np, link_up_p);
2108			break;
2109
2110		default: /* bcom 8704 */
2111			err = link_status_10g_bcom(np, link_up_p);
2112			break;
2113		}
2114	}
2115
2116	spin_unlock_irqrestore(&np->lock, flags);
2117
2118	return err;
2119}
2120
2121static int niu_10g_phy_present(struct niu *np)
2122{
2123	u64 sig, mask, val;
2124
2125	sig = nr64(ESR_INT_SIGNALS);
2126	switch (np->port) {
2127	case 0:
2128		mask = ESR_INT_SIGNALS_P0_BITS;
2129		val = (ESR_INT_SRDY0_P0 |
2130		       ESR_INT_DET0_P0 |
2131		       ESR_INT_XSRDY_P0 |
2132		       ESR_INT_XDP_P0_CH3 |
2133		       ESR_INT_XDP_P0_CH2 |
2134		       ESR_INT_XDP_P0_CH1 |
2135		       ESR_INT_XDP_P0_CH0);
2136		break;
2137
2138	case 1:
2139		mask = ESR_INT_SIGNALS_P1_BITS;
2140		val = (ESR_INT_SRDY0_P1 |
2141		       ESR_INT_DET0_P1 |
2142		       ESR_INT_XSRDY_P1 |
2143		       ESR_INT_XDP_P1_CH3 |
2144		       ESR_INT_XDP_P1_CH2 |
2145		       ESR_INT_XDP_P1_CH1 |
2146		       ESR_INT_XDP_P1_CH0);
2147		break;
2148
2149	default:
2150		return 0;
2151	}
2152
2153	if ((sig & mask) != val)
2154		return 0;
2155	return 1;
2156}
2157
2158static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
2159{
2160	unsigned long flags;
2161	int err = 0;
2162	int phy_present;
2163	int phy_present_prev;
2164
2165	spin_lock_irqsave(&np->lock, flags);
2166
2167	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2168		phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
2169			1 : 0;
2170		phy_present = niu_10g_phy_present(np);
2171		if (phy_present != phy_present_prev) {
2172			/* state change */
2173			if (phy_present) {
2174				/* A NEM was just plugged in */
2175				np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2176				if (np->phy_ops->xcvr_init)
2177					err = np->phy_ops->xcvr_init(np);
2178				if (err) {
2179					err = mdio_read(np, np->phy_addr,
2180						BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
2181					if (err == 0xffff) {
2182						/* No mdio, back-to-back XAUI */
2183						goto out;
2184					}
2185					/* debounce */
2186					np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2187				}
2188			} else {
2189				np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2190				*link_up_p = 0;
2191				netif_warn(np, link, np->dev,
2192					   "Hotplug PHY Removed\n");
2193			}
2194		}
2195out:
2196		if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
2197			err = link_status_10g_bcm8706(np, link_up_p);
2198			if (err == 0xffff) {
2199				/* No mdio, back-to-back XAUI: it is C10NEM */
2200				*link_up_p = 1;
2201				np->link_config.active_speed = SPEED_10000;
2202				np->link_config.active_duplex = DUPLEX_FULL;
2203			}
2204		}
2205	}
2206
2207	spin_unlock_irqrestore(&np->lock, flags);
2208
2209	return 0;
2210}
2211
2212static int niu_link_status(struct niu *np, int *link_up_p)
2213{
2214	const struct niu_phy_ops *ops = np->phy_ops;
2215	int err;
2216
2217	err = 0;
2218	if (ops->link_status)
2219		err = ops->link_status(np, link_up_p);
2220
2221	return err;
2222}
2223
2224static void niu_timer(unsigned long __opaque)
2225{
2226	struct niu *np = (struct niu *) __opaque;
2227	unsigned long off;
2228	int err, link_up;
2229
2230	err = niu_link_status(np, &link_up);
2231	if (!err)
2232		niu_link_status_common(np, link_up);
2233
2234	if (netif_carrier_ok(np->dev))
2235		off = 5 * HZ;
2236	else
2237		off = 1 * HZ;
2238	np->timer.expires = jiffies + off;
2239
2240	add_timer(&np->timer);
2241}
2242
2243static const struct niu_phy_ops phy_ops_10g_serdes = {
2244	.serdes_init		= serdes_init_10g_serdes,
2245	.link_status		= link_status_10g_serdes,
2246};
2247
2248static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
2249	.serdes_init		= serdes_init_niu_10g_serdes,
2250	.link_status		= link_status_10g_serdes,
2251};
2252
2253static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
2254	.serdes_init		= serdes_init_niu_1g_serdes,
2255	.link_status		= link_status_1g_serdes,
2256};
2257
2258static const struct niu_phy_ops phy_ops_1g_rgmii = {
2259	.xcvr_init		= xcvr_init_1g_rgmii,
2260	.link_status		= link_status_1g_rgmii,
2261};
2262
2263static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
2264	.serdes_init		= serdes_init_niu_10g_fiber,
2265	.xcvr_init		= xcvr_init_10g,
2266	.link_status		= link_status_10g,
2267};
2268
2269static const struct niu_phy_ops phy_ops_10g_fiber = {
2270	.serdes_init		= serdes_init_10g,
2271	.xcvr_init		= xcvr_init_10g,
2272	.link_status		= link_status_10g,
2273};
2274
2275static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
2276	.serdes_init		= serdes_init_10g,
2277	.xcvr_init		= xcvr_init_10g_bcm8706,
2278	.link_status		= link_status_10g_hotplug,
2279};
2280
2281static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
2282	.serdes_init		= serdes_init_niu_10g_fiber,
2283	.xcvr_init		= xcvr_init_10g_bcm8706,
2284	.link_status		= link_status_10g_hotplug,
2285};
2286
2287static const struct niu_phy_ops phy_ops_10g_copper = {
2288	.serdes_init		= serdes_init_10g,
2289	.link_status		= link_status_10g, /* XXX */
2290};
2291
2292static const struct niu_phy_ops phy_ops_1g_fiber = {
2293	.serdes_init		= serdes_init_1g,
2294	.xcvr_init		= xcvr_init_1g,
2295	.link_status		= link_status_1g,
2296};
2297
2298static const struct niu_phy_ops phy_ops_1g_copper = {
2299	.xcvr_init		= xcvr_init_1g,
2300	.link_status		= link_status_1g,
2301};
2302
2303struct niu_phy_template {
2304	const struct niu_phy_ops	*ops;
2305	u32				phy_addr_base;
2306};
2307
2308static const struct niu_phy_template phy_template_niu_10g_fiber = {
2309	.ops		= &phy_ops_10g_fiber_niu,
2310	.phy_addr_base	= 16,
2311};
2312
2313static const struct niu_phy_template phy_template_niu_10g_serdes = {
2314	.ops		= &phy_ops_10g_serdes_niu,
2315	.phy_addr_base	= 0,
2316};
2317
2318static const struct niu_phy_template phy_template_niu_1g_serdes = {
2319	.ops		= &phy_ops_1g_serdes_niu,
2320	.phy_addr_base	= 0,
2321};
2322
2323static const struct niu_phy_template phy_template_10g_fiber = {
2324	.ops		= &phy_ops_10g_fiber,
2325	.phy_addr_base	= 8,
2326};
2327
2328static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
2329	.ops		= &phy_ops_10g_fiber_hotplug,
2330	.phy_addr_base	= 8,
2331};
2332
2333static const struct niu_phy_template phy_template_niu_10g_hotplug = {
2334	.ops		= &phy_ops_niu_10g_hotplug,
2335	.phy_addr_base	= 8,
2336};
2337
2338static const struct niu_phy_template phy_template_10g_copper = {
2339	.ops		= &phy_ops_10g_copper,
2340	.phy_addr_base	= 10,
2341};
2342
2343static const struct niu_phy_template phy_template_1g_fiber = {
2344	.ops		= &phy_ops_1g_fiber,
2345	.phy_addr_base	= 0,
2346};
2347
2348static const struct niu_phy_template phy_template_1g_copper = {
2349	.ops		= &phy_ops_1g_copper,
2350	.phy_addr_base	= 0,
2351};
2352
2353static const struct niu_phy_template phy_template_1g_rgmii = {
2354	.ops		= &phy_ops_1g_rgmii,
2355	.phy_addr_base	= 0,
2356};
2357
2358static const struct niu_phy_template phy_template_10g_serdes = {
2359	.ops		= &phy_ops_10g_serdes,
2360	.phy_addr_base	= 0,
2361};
2362
2363static int niu_atca_port_num[4] = {
2364	0, 0,  11, 10
2365};
2366
2367static int serdes_init_10g_serdes(struct niu *np)
2368{
2369	struct niu_link_config *lp = &np->link_config;
2370	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
2371	u64 ctrl_val, test_cfg_val, sig, mask, val;
2372
2373	switch (np->port) {
2374	case 0:
2375		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
2376		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
2377		pll_cfg = ENET_SERDES_0_PLL_CFG;
2378		break;
2379	case 1:
2380		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
2381		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
2382		pll_cfg = ENET_SERDES_1_PLL_CFG;
2383		break;
2384
2385	default:
2386		return -EINVAL;
2387	}
2388	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
2389		    ENET_SERDES_CTRL_SDET_1 |
2390		    ENET_SERDES_CTRL_SDET_2 |
2391		    ENET_SERDES_CTRL_SDET_3 |
2392		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
2393		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
2394		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
2395		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
2396		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
2397		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
2398		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
2399		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
2400	test_cfg_val = 0;
2401
2402	if (lp->loopback_mode == LOOPBACK_PHY) {
2403		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
2404				  ENET_SERDES_TEST_MD_0_SHIFT) |
2405				 (ENET_TEST_MD_PAD_LOOPBACK <<
2406				  ENET_SERDES_TEST_MD_1_SHIFT) |
2407				 (ENET_TEST_MD_PAD_LOOPBACK <<
2408				  ENET_SERDES_TEST_MD_2_SHIFT) |
2409				 (ENET_TEST_MD_PAD_LOOPBACK <<
2410				  ENET_SERDES_TEST_MD_3_SHIFT));
2411	}
2412
2413	esr_reset(np);
2414	nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
2415	nw64(ctrl_reg, ctrl_val);
2416	nw64(test_cfg_reg, test_cfg_val);
2417
2418	/* Initialize all 4 lanes of the SERDES.  */
2419	for (i = 0; i < 4; i++) {
2420		u32 rxtx_ctrl, glue0;
2421		int err;
2422
2423		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
2424		if (err)
2425			return err;
2426		err = esr_read_glue0(np, i, &glue0);
2427		if (err)
2428			return err;
2429
2430		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
2431		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
2432			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
2433
2434		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
2435			   ESR_GLUE_CTRL0_THCNT |
2436			   ESR_GLUE_CTRL0_BLTIME);
2437		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
2438			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
2439			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
2440			  (BLTIME_300_CYCLES <<
2441			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
2442
2443		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
2444		if (err)
2445			return err;
2446		err = esr_write_glue0(np, i, glue0);
2447		if (err)
2448			return err;
2449	}
2450
2451
2452	sig = nr64(ESR_INT_SIGNALS);
2453	switch (np->port) {
2454	case 0:
2455		mask = ESR_INT_SIGNALS_P0_BITS;
2456		val = (ESR_INT_SRDY0_P0 |
2457		       ESR_INT_DET0_P0 |
2458		       ESR_INT_XSRDY_P0 |
2459		       ESR_INT_XDP_P0_CH3 |
2460		       ESR_INT_XDP_P0_CH2 |
2461		       ESR_INT_XDP_P0_CH1 |
2462		       ESR_INT_XDP_P0_CH0);
2463		break;
2464
2465	case 1:
2466		mask = ESR_INT_SIGNALS_P1_BITS;
2467		val = (ESR_INT_SRDY0_P1 |
2468		       ESR_INT_DET0_P1 |
2469		       ESR_INT_XSRDY_P1 |
2470		       ESR_INT_XDP_P1_CH3 |
2471		       ESR_INT_XDP_P1_CH2 |
2472		       ESR_INT_XDP_P1_CH1 |
2473		       ESR_INT_XDP_P1_CH0);
2474		break;
2475
2476	default:
2477		return -EINVAL;
2478	}
2479
2480	if ((sig & mask) != val) {
2481		int err;
2482		err = serdes_init_1g_serdes(np);
2483		if (!err) {
2484			np->flags &= ~NIU_FLAGS_10G;
2485			np->mac_xcvr = MAC_XCVR_PCS;
2486		}  else {
2487			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
2488				   np->port);
2489			return -ENODEV;
2490		}
2491	}
2492
2493	return 0;
2494}
2495
2496static int niu_determine_phy_disposition(struct niu *np)
2497{
2498	struct niu_parent *parent = np->parent;
2499	u8 plat_type = parent->plat_type;
2500	const struct niu_phy_template *tp;
2501	u32 phy_addr_off = 0;
2502
2503	if (plat_type == PLAT_TYPE_NIU) {
2504		switch (np->flags &
2505			(NIU_FLAGS_10G |
2506			 NIU_FLAGS_FIBER |
2507			 NIU_FLAGS_XCVR_SERDES)) {
2508		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2509			/* 10G Serdes */
2510			tp = &phy_template_niu_10g_serdes;
2511			break;
2512		case NIU_FLAGS_XCVR_SERDES:
2513			/* 1G Serdes */
2514			tp = &phy_template_niu_1g_serdes;
2515			break;
2516		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2517			/* 10G Fiber */
2518		default:
2519			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2520				tp = &phy_template_niu_10g_hotplug;
2521				if (np->port == 0)
2522					phy_addr_off = 8;
2523				if (np->port == 1)
2524					phy_addr_off = 12;
2525			} else {
2526				tp = &phy_template_niu_10g_fiber;
2527				phy_addr_off += np->port;
2528			}
2529			break;
2530		}
2531	} else {
2532		switch (np->flags &
2533			(NIU_FLAGS_10G |
2534			 NIU_FLAGS_FIBER |
2535			 NIU_FLAGS_XCVR_SERDES)) {
2536		case 0:
2537			/* 1G copper */
2538			tp = &phy_template_1g_copper;
2539			if (plat_type == PLAT_TYPE_VF_P0)
2540				phy_addr_off = 10;
2541			else if (plat_type == PLAT_TYPE_VF_P1)
2542				phy_addr_off = 26;
2543
2544			phy_addr_off += (np->port ^ 0x3);
2545			break;
2546
2547		case NIU_FLAGS_10G:
2548			/* 10G copper */
2549			tp = &phy_template_10g_copper;
2550			break;
2551
2552		case NIU_FLAGS_FIBER:
2553			/* 1G fiber */
2554			tp = &phy_template_1g_fiber;
2555			break;
2556
2557		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2558			/* 10G fiber */
2559			tp = &phy_template_10g_fiber;
2560			if (plat_type == PLAT_TYPE_VF_P0 ||
2561			    plat_type == PLAT_TYPE_VF_P1)
2562				phy_addr_off = 8;
2563			phy_addr_off += np->port;
2564			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2565				tp = &phy_template_10g_fiber_hotplug;
2566				if (np->port == 0)
2567					phy_addr_off = 8;
2568				if (np->port == 1)
2569					phy_addr_off = 12;
2570			}
2571			break;
2572
2573		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2574		case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
2575		case NIU_FLAGS_XCVR_SERDES:
2576			switch(np->port) {
2577			case 0:
2578			case 1:
2579				tp = &phy_template_10g_serdes;
2580				break;
2581			case 2:
2582			case 3:
2583				tp = &phy_template_1g_rgmii;
2584				break;
2585			default:
2586				return -EINVAL;
2587			}
2588			phy_addr_off = niu_atca_port_num[np->port];
2589			break;
2590
2591		default:
2592			return -EINVAL;
2593		}
2594	}
2595
2596	np->phy_ops = tp->ops;
2597	np->phy_addr = tp->phy_addr_base + phy_addr_off;
2598
2599	return 0;
2600}
2601
2602static int niu_init_link(struct niu *np)
2603{
2604	struct niu_parent *parent = np->parent;
2605	int err, ignore;
2606
2607	if (parent->plat_type == PLAT_TYPE_NIU) {
2608		err = niu_xcvr_init(np);
2609		if (err)
2610			return err;
2611		msleep(200);
2612	}
2613	err = niu_serdes_init(np);
2614	if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
2615		return err;
2616	msleep(200);
2617	err = niu_xcvr_init(np);
2618	if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
2619		niu_link_status(np, &ignore);
2620	return 0;
2621}
2622
2623static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
2624{
2625	u16 reg0 = addr[4] << 8 | addr[5];
2626	u16 reg1 = addr[2] << 8 | addr[3];
2627	u16 reg2 = addr[0] << 8 | addr[1];
2628
2629	if (np->flags & NIU_FLAGS_XMAC) {
2630		nw64_mac(XMAC_ADDR0, reg0);
2631		nw64_mac(XMAC_ADDR1, reg1);
2632		nw64_mac(XMAC_ADDR2, reg2);
2633	} else {
2634		nw64_mac(BMAC_ADDR0, reg0);
2635		nw64_mac(BMAC_ADDR1, reg1);
2636		nw64_mac(BMAC_ADDR2, reg2);
2637	}
2638}
2639
2640static int niu_num_alt_addr(struct niu *np)
2641{
2642	if (np->flags & NIU_FLAGS_XMAC)
2643		return XMAC_NUM_ALT_ADDR;
2644	else
2645		return BMAC_NUM_ALT_ADDR;
2646}
2647
2648static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
2649{
2650	u16 reg0 = addr[4] << 8 | addr[5];
2651	u16 reg1 = addr[2] << 8 | addr[3];
2652	u16 reg2 = addr[0] << 8 | addr[1];
2653
2654	if (index >= niu_num_alt_addr(np))
2655		return -EINVAL;
2656
2657	if (np->flags & NIU_FLAGS_XMAC) {
2658		nw64_mac(XMAC_ALT_ADDR0(index), reg0);
2659		nw64_mac(XMAC_ALT_ADDR1(index), reg1);
2660		nw64_mac(XMAC_ALT_ADDR2(index), reg2);
2661	} else {
2662		nw64_mac(BMAC_ALT_ADDR0(index), reg0);
2663		nw64_mac(BMAC_ALT_ADDR1(index), reg1);
2664		nw64_mac(BMAC_ALT_ADDR2(index), reg2);
2665	}
2666
2667	return 0;
2668}
2669
2670static int niu_enable_alt_mac(struct niu *np, int index, int on)
2671{
2672	unsigned long reg;
2673	u64 val, mask;
2674
2675	if (index >= niu_num_alt_addr(np))
2676		return -EINVAL;
2677
2678	if (np->flags & NIU_FLAGS_XMAC) {
2679		reg = XMAC_ADDR_CMPEN;
2680		mask = 1 << index;
2681	} else {
2682		reg = BMAC_ADDR_CMPEN;
2683		mask = 1 << (index + 1);
2684	}
2685
2686	val = nr64_mac(reg);
2687	if (on)
2688		val |= mask;
2689	else
2690		val &= ~mask;
2691	nw64_mac(reg, val);
2692
2693	return 0;
2694}
2695
2696static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
2697				   int num, int mac_pref)
2698{
2699	u64 val = nr64_mac(reg);
2700	val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
2701	val |= num;
2702	if (mac_pref)
2703		val |= HOST_INFO_MPR;
2704	nw64_mac(reg, val);
2705}
2706
2707static int __set_rdc_table_num(struct niu *np,
2708			       int xmac_index, int bmac_index,
2709			       int rdc_table_num, int mac_pref)
2710{
2711	unsigned long reg;
2712
2713	if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
2714		return -EINVAL;
2715	if (np->flags & NIU_FLAGS_XMAC)
2716		reg = XMAC_HOST_INFO(xmac_index);
2717	else
2718		reg = BMAC_HOST_INFO(bmac_index);
2719	__set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
2720	return 0;
2721}
2722
2723static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
2724					 int mac_pref)
2725{
2726	return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
2727}
2728
2729static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
2730					   int mac_pref)
2731{
2732	return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
2733}
2734
2735static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
2736				     int table_num, int mac_pref)
2737{
2738	if (idx >= niu_num_alt_addr(np))
2739		return -EINVAL;
2740	return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
2741}
2742
2743static u64 vlan_entry_set_parity(u64 reg_val)
2744{
2745	u64 port01_mask;
2746	u64 port23_mask;
2747
2748	port01_mask = 0x00ff;
2749	port23_mask = 0xff00;
2750
2751	if (hweight64(reg_val & port01_mask) & 1)
2752		reg_val |= ENET_VLAN_TBL_PARITY0;
2753	else
2754		reg_val &= ~ENET_VLAN_TBL_PARITY0;
2755
2756	if (hweight64(reg_val & port23_mask) & 1)
2757		reg_val |= ENET_VLAN_TBL_PARITY1;
2758	else
2759		reg_val &= ~ENET_VLAN_TBL_PARITY1;
2760
2761	return reg_val;
2762}
2763
2764static void vlan_tbl_write(struct niu *np, unsigned long index,
2765			   int port, int vpr, int rdc_table)
2766{
2767	u64 reg_val = nr64(ENET_VLAN_TBL(index));
2768
2769	reg_val &= ~((ENET_VLAN_TBL_VPR |
2770		      ENET_VLAN_TBL_VLANRDCTBLN) <<
2771		     ENET_VLAN_TBL_SHIFT(port));
2772	if (vpr)
2773		reg_val |= (ENET_VLAN_TBL_VPR <<
2774			    ENET_VLAN_TBL_SHIFT(port));
2775	reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
2776
2777	reg_val = vlan_entry_set_parity(reg_val);
2778
2779	nw64(ENET_VLAN_TBL(index), reg_val);
2780}
2781
2782static void vlan_tbl_clear(struct niu *np)
2783{
2784	int i;
2785
2786	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
2787		nw64(ENET_VLAN_TBL(i), 0);
2788}
2789
2790static int tcam_wait_bit(struct niu *np, u64 bit)
2791{
2792	int limit = 1000;
2793
2794	while (--limit > 0) {
2795		if (nr64(TCAM_CTL) & bit)
2796			break;
2797		udelay(1);
2798	}
2799	if (limit <= 0)
2800		return -ENODEV;
2801
2802	return 0;
2803}
2804
2805static int tcam_flush(struct niu *np, int index)
2806{
2807	nw64(TCAM_KEY_0, 0x00);
2808	nw64(TCAM_KEY_MASK_0, 0xff);
2809	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2810
2811	return tcam_wait_bit(np, TCAM_CTL_STAT);
2812}
2813
2814#if 0
2815static int tcam_read(struct niu *np, int index,
2816		     u64 *key, u64 *mask)
2817{
2818	int err;
2819
2820	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
2821	err = tcam_wait_bit(np, TCAM_CTL_STAT);
2822	if (!err) {
2823		key[0] = nr64(TCAM_KEY_0);
2824		key[1] = nr64(TCAM_KEY_1);
2825		key[2] = nr64(TCAM_KEY_2);
2826		key[3] = nr64(TCAM_KEY_3);
2827		mask[0] = nr64(TCAM_KEY_MASK_0);
2828		mask[1] = nr64(TCAM_KEY_MASK_1);
2829		mask[2] = nr64(TCAM_KEY_MASK_2);
2830		mask[3] = nr64(TCAM_KEY_MASK_3);
2831	}
2832	return err;
2833}
2834#endif
2835
2836static int tcam_write(struct niu *np, int index,
2837		      u64 *key, u64 *mask)
2838{
2839	nw64(TCAM_KEY_0, key[0]);
2840	nw64(TCAM_KEY_1, key[1]);
2841	nw64(TCAM_KEY_2, key[2]);
2842	nw64(TCAM_KEY_3, key[3]);
2843	nw64(TCAM_KEY_MASK_0, mask[0]);
2844	nw64(TCAM_KEY_MASK_1, mask[1]);
2845	nw64(TCAM_KEY_MASK_2, mask[2]);
2846	nw64(TCAM_KEY_MASK_3, mask[3]);
2847	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2848
2849	return tcam_wait_bit(np, TCAM_CTL_STAT);
2850}
2851
2852#if 0
2853static int tcam_assoc_read(struct niu *np, int index, u64 *data)
2854{
2855	int err;
2856
2857	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
2858	err = tcam_wait_bit(np, TCAM_CTL_STAT);
2859	if (!err)
2860		*data = nr64(TCAM_KEY_1);
2861
2862	return err;
2863}
2864#endif
2865
2866static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
2867{
2868	nw64(TCAM_KEY_1, assoc_data);
2869	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
2870
2871	return tcam_wait_bit(np, TCAM_CTL_STAT);
2872}
2873
2874static void tcam_enable(struct niu *np, int on)
2875{
2876	u64 val = nr64(FFLP_CFG_1);
2877
2878	if (on)
2879		val &= ~FFLP_CFG_1_TCAM_DIS;
2880	else
2881		val |= FFLP_CFG_1_TCAM_DIS;
2882	nw64(FFLP_CFG_1, val);
2883}
2884
2885static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
2886{
2887	u64 val = nr64(FFLP_CFG_1);
2888
2889	val &= ~(FFLP_CFG_1_FFLPINITDONE |
2890		 FFLP_CFG_1_CAMLAT |
2891		 FFLP_CFG_1_CAMRATIO);
2892	val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
2893	val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
2894	nw64(FFLP_CFG_1, val);
2895
2896	val = nr64(FFLP_CFG_1);
2897	val |= FFLP_CFG_1_FFLPINITDONE;
2898	nw64(FFLP_CFG_1, val);
2899}
2900
2901static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
2902				      int on)
2903{
2904	unsigned long reg;
2905	u64 val;
2906
2907	if (class < CLASS_CODE_ETHERTYPE1 ||
2908	    class > CLASS_CODE_ETHERTYPE2)
2909		return -EINVAL;
2910
2911	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2912	val = nr64(reg);
2913	if (on)
2914		val |= L2_CLS_VLD;
2915	else
2916		val &= ~L2_CLS_VLD;
2917	nw64(reg, val);
2918
2919	return 0;
2920}
2921
2922#if 0
2923static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
2924				   u64 ether_type)
2925{
2926	unsigned long reg;
2927	u64 val;
2928
2929	if (class < CLASS_CODE_ETHERTYPE1 ||
2930	    class > CLASS_CODE_ETHERTYPE2 ||
2931	    (ether_type & ~(u64)0xffff) != 0)
2932		return -EINVAL;
2933
2934	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2935	val = nr64(reg);
2936	val &= ~L2_CLS_ETYPE;
2937	val |= (ether_type << L2_CLS_ETYPE_SHIFT);
2938	nw64(reg, val);
2939
2940	return 0;
2941}
2942#endif
2943
2944static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
2945				     int on)
2946{
2947	unsigned long reg;
2948	u64 val;
2949
2950	if (class < CLASS_CODE_USER_PROG1 ||
2951	    class > CLASS_CODE_USER_PROG4)
2952		return -EINVAL;
2953
2954	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2955	val = nr64(reg);
2956	if (on)
2957		val |= L3_CLS_VALID;
2958	else
2959		val &= ~L3_CLS_VALID;
2960	nw64(reg, val);
2961
2962	return 0;
2963}
2964
2965static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
2966				  int ipv6, u64 protocol_id,
2967				  u64 tos_mask, u64 tos_val)
2968{
2969	unsigned long reg;
2970	u64 val;
2971
2972	if (class < CLASS_CODE_USER_PROG1 ||
2973	    class > CLASS_CODE_USER_PROG4 ||
2974	    (protocol_id & ~(u64)0xff) != 0 ||
2975	    (tos_mask & ~(u64)0xff) != 0 ||
2976	    (tos_val & ~(u64)0xff) != 0)
2977		return -EINVAL;
2978
2979	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2980	val = nr64(reg);
2981	val &= ~(L3_CLS_IPVER | L3_CLS_PID |
2982		 L3_CLS_TOSMASK | L3_CLS_TOS);
2983	if (ipv6)
2984		val |= L3_CLS_IPVER;
2985	val |= (protocol_id << L3_CLS_PID_SHIFT);
2986	val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
2987	val |= (tos_val << L3_CLS_TOS_SHIFT);
2988	nw64(reg, val);
2989
2990	return 0;
2991}
2992
2993static int tcam_early_init(struct niu *np)
2994{
2995	unsigned long i;
2996	int err;
2997
2998	tcam_enable(np, 0);
2999	tcam_set_lat_and_ratio(np,
3000			       DEFAULT_TCAM_LATENCY,
3001			       DEFAULT_TCAM_ACCESS_RATIO);
3002	for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
3003		err = tcam_user_eth_class_enable(np, i, 0);
3004		if (err)
3005			return err;
3006	}
3007	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
3008		err = tcam_user_ip_class_enable(np, i, 0);
3009		if (err)
3010			return err;
3011	}
3012
3013	return 0;
3014}
3015
3016static int tcam_flush_all(struct niu *np)
3017{
3018	unsigned long i;
3019
3020	for (i = 0; i < np->parent->tcam_num_entries; i++) {
3021		int err = tcam_flush(np, i);
3022		if (err)
3023			return err;
3024	}
3025	return 0;
3026}
3027
3028static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
3029{
3030	return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
3031}
3032
3033#if 0
3034static int hash_read(struct niu *np, unsigned long partition,
3035		     unsigned long index, unsigned long num_entries,
3036		     u64 *data)
3037{
3038	u64 val = hash_addr_regval(index, num_entries);
3039	unsigned long i;
3040
3041	if (partition >= FCRAM_NUM_PARTITIONS ||
3042	    index + num_entries > FCRAM_SIZE)
3043		return -EINVAL;
3044
3045	nw64(HASH_TBL_ADDR(partition), val);
3046	for (i = 0; i < num_entries; i++)
3047		data[i] = nr64(HASH_TBL_DATA(partition));
3048
3049	return 0;
3050}
3051#endif
3052
3053static int hash_write(struct niu *np, unsigned long partition,
3054		      unsigned long index, unsigned long num_entries,
3055		      u64 *data)
3056{
3057	u64 val = hash_addr_regval(index, num_entries);
3058	unsigned long i;
3059
3060	if (partition >= FCRAM_NUM_PARTITIONS ||
3061	    index + (num_entries * 8) > FCRAM_SIZE)
3062		return -EINVAL;
3063
3064	nw64(HASH_TBL_ADDR(partition), val);
3065	for (i = 0; i < num_entries; i++)
3066		nw64(HASH_TBL_DATA(partition), data[i]);
3067
3068	return 0;
3069}
3070
3071static void fflp_reset(struct niu *np)
3072{
3073	u64 val;
3074
3075	nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
3076	udelay(10);
3077	nw64(FFLP_CFG_1, 0);
3078
3079	val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
3080	nw64(FFLP_CFG_1, val);
3081}
3082
3083static void fflp_set_timings(struct niu *np)
3084{
3085	u64 val = nr64(FFLP_CFG_1);
3086
3087	val &= ~FFLP_CFG_1_FFLPINITDONE;
3088	val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
3089	nw64(FFLP_CFG_1, val);
3090
3091	val = nr64(FFLP_CFG_1);
3092	val |= FFLP_CFG_1_FFLPINITDONE;
3093	nw64(FFLP_CFG_1, val);
3094
3095	val = nr64(FCRAM_REF_TMR);
3096	val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
3097	val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
3098	val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
3099	nw64(FCRAM_REF_TMR, val);
3100}
3101
3102static int fflp_set_partition(struct niu *np, u64 partition,
3103			      u64 mask, u64 base, int enable)
3104{
3105	unsigned long reg;
3106	u64 val;
3107
3108	if (partition >= FCRAM_NUM_PARTITIONS ||
3109	    (mask & ~(u64)0x1f) != 0 ||
3110	    (base & ~(u64)0x1f) != 0)
3111		return -EINVAL;
3112
3113	reg = FLW_PRT_SEL(partition);
3114
3115	val = nr64(reg);
3116	val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
3117	val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
3118	val |= (base << FLW_PRT_SEL_BASE_SHIFT);
3119	if (enable)
3120		val |= FLW_PRT_SEL_EXT;
3121	nw64(reg, val);
3122
3123	return 0;
3124}
3125
3126static int fflp_disable_all_partitions(struct niu *np)
3127{
3128	unsigned long i;
3129
3130	for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
3131		int err = fflp_set_partition(np, 0, 0, 0, 0);
3132		if (err)
3133			return err;
3134	}
3135	return 0;
3136}
3137
3138static void fflp_llcsnap_enable(struct niu *np, int on)
3139{
3140	u64 val = nr64(FFLP_CFG_1);
3141
3142	if (on)
3143		val |= FFLP_CFG_1_LLCSNAP;
3144	else
3145		val &= ~FFLP_CFG_1_LLCSNAP;
3146	nw64(FFLP_CFG_1, val);
3147}
3148
3149static void fflp_errors_enable(struct niu *np, int on)
3150{
3151	u64 val = nr64(FFLP_CFG_1);
3152
3153	if (on)
3154		val &= ~FFLP_CFG_1_ERRORDIS;
3155	else
3156		val |= FFLP_CFG_1_ERRORDIS;
3157	nw64(FFLP_CFG_1, val);
3158}
3159
3160static int fflp_hash_clear(struct niu *np)
3161{
3162	struct fcram_hash_ipv4 ent;
3163	unsigned long i;
3164
3165	/* IPV4 hash entry with valid bit clear, rest is don't care.  */
3166	memset(&ent, 0, sizeof(ent));
3167	ent.header = HASH_HEADER_EXT;
3168
3169	for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
3170		int err = hash_write(np, 0, i, 1, (u64 *) &ent);
3171		if (err)
3172			return err;
3173	}
3174	return 0;
3175}
3176
3177static int fflp_early_init(struct niu *np)
3178{
3179	struct niu_parent *parent;
3180	unsigned long flags;
3181	int err;
3182
3183	niu_lock_parent(np, flags);
3184
3185	parent = np->parent;
3186	err = 0;
3187	if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
3188		if (np->parent->plat_type != PLAT_TYPE_NIU) {
3189			fflp_reset(np);
3190			fflp_set_timings(np);
3191			err = fflp_disable_all_partitions(np);
3192			if (err) {
3193				netif_printk(np, probe, KERN_DEBUG, np->dev,
3194					     "fflp_disable_all_partitions failed, err=%d\n",
3195					     err);
3196				goto out;
3197			}
3198		}
3199
3200		err = tcam_early_init(np);
3201		if (err) {
3202			netif_printk(np, probe, KERN_DEBUG, np->dev,
3203				     "tcam_early_init failed, err=%d\n", err);
3204			goto out;
3205		}
3206		fflp_llcsnap_enable(np, 1);
3207		fflp_errors_enable(np, 0);
3208		nw64(H1POLY, 0);
3209		nw64(H2POLY, 0);
3210
3211		err = tcam_flush_all(np);
3212		if (err) {
3213			netif_printk(np, probe, KERN_DEBUG, np->dev,
3214				     "tcam_flush_all failed, err=%d\n", err);
3215			goto out;
3216		}
3217		if (np->parent->plat_type != PLAT_TYPE_NIU) {
3218			err = fflp_hash_clear(np);
3219			if (err) {
3220				netif_printk(np, probe, KERN_DEBUG, np->dev,
3221					     "fflp_hash_clear failed, err=%d\n",
3222					     err);
3223				goto out;
3224			}
3225		}
3226
3227		vlan_tbl_clear(np);
3228
3229		parent->flags |= PARENT_FLGS_CLS_HWINIT;
3230	}
3231out:
3232	niu_unlock_parent(np, flags);
3233	return err;
3234}
3235
3236static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
3237{
3238	if (class_code < CLASS_CODE_USER_PROG1 ||
3239	    class_code > CLASS_CODE_SCTP_IPV6)
3240		return -EINVAL;
3241
3242	nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3243	return 0;
3244}
3245
3246static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
3247{
3248	if (class_code < CLASS_CODE_USER_PROG1 ||
3249	    class_code > CLASS_CODE_SCTP_IPV6)
3250		return -EINVAL;
3251
3252	nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3253	return 0;
3254}
3255
3256/* Entries for the ports are interleaved in the TCAM */
3257static u16 tcam_get_index(struct niu *np, u16 idx)
3258{
3259	/* One entry reserved for IP fragment rule */
3260	if (idx >= (np->clas.tcam_sz - 1))
3261		idx = 0;
3262	return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
3263}
3264
3265static u16 tcam_get_size(struct niu *np)
3266{
3267	/* One entry reserved for IP fragment rule */
3268	return np->clas.tcam_sz - 1;
3269}
3270
3271static u16 tcam_get_valid_entry_cnt(struct niu *np)
3272{
3273	/* One entry reserved for IP fragment rule */
3274	return np->clas.tcam_valid_entries - 1;
3275}
3276
3277static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
3278			      u32 offset, u32 size, u32 truesize)
3279{
3280	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size);
3281
3282	skb->len += size;
3283	skb->data_len += size;
3284	skb->truesize += truesize;
3285}
3286
3287static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
3288{
3289	a >>= PAGE_SHIFT;
3290	a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
3291
3292	return a & (MAX_RBR_RING_SIZE - 1);
3293}
3294
3295static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
3296				    struct page ***link)
3297{
3298	unsigned int h = niu_hash_rxaddr(rp, addr);
3299	struct page *p, **pp;
3300
3301	addr &= PAGE_MASK;
3302	pp = &rp->rxhash[h];
3303	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
3304		if (p->index == addr) {
3305			*link = pp;
3306			goto found;
3307		}
3308	}
3309	BUG();
3310
3311found:
3312	return p;
3313}
3314
3315static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
3316{
3317	unsigned int h = niu_hash_rxaddr(rp, base);
3318
3319	page->index = base;
3320	page->mapping = (struct address_space *) rp->rxhash[h];
3321	rp->rxhash[h] = page;
3322}
3323
3324static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
3325			    gfp_t mask, int start_index)
3326{
3327	struct page *page;
3328	u64 addr;
3329	int i;
3330
3331	page = alloc_page(mask);
3332	if (!page)
3333		return -ENOMEM;
3334
3335	addr = np->ops->map_page(np->device, page, 0,
3336				 PAGE_SIZE, DMA_FROM_DEVICE);
3337	if (!addr) {
3338		__free_page(page);
3339		return -ENOMEM;
3340	}
3341
3342	niu_hash_page(rp, page, addr);
3343	if (rp->rbr_blocks_per_page > 1)
3344		atomic_add(rp->rbr_blocks_per_page - 1, &page->_count);
3345
3346	for (i = 0; i < rp->rbr_blocks_per_page; i++) {
3347		__le32 *rbr = &rp->rbr[start_index + i];
3348
3349		*rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
3350		addr += rp->rbr_block_size;
3351	}
3352
3353	return 0;
3354}
3355
3356static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3357{
3358	int index = rp->rbr_index;
3359
3360	rp->rbr_pending++;
3361	if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
3362		int err = niu_rbr_add_page(np, rp, mask, index);
3363
3364		if (unlikely(err)) {
3365			rp->rbr_pending--;
3366			return;
3367		}
3368
3369		rp->rbr_index += rp->rbr_blocks_per_page;
3370		BUG_ON(rp->rbr_index > rp->rbr_table_size);
3371		if (rp->rbr_index == rp->rbr_table_size)
3372			rp->rbr_index = 0;
3373
3374		if (rp->rbr_pending >= rp->rbr_kick_thresh) {
3375			nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
3376			rp->rbr_pending = 0;
3377		}
3378	}
3379}
3380
3381static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
3382{
3383	unsigned int index = rp->rcr_index;
3384	int num_rcr = 0;
3385
3386	rp->rx_dropped++;
3387	while (1) {
3388		struct page *page, **link;
3389		u64 addr, val;
3390		u32 rcr_size;
3391
3392		num_rcr++;
3393
3394		val = le64_to_cpup(&rp->rcr[index]);
3395		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3396			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3397		page = niu_find_rxpage(rp, addr, &link);
3398
3399		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3400					 RCR_ENTRY_PKTBUFSZ_SHIFT];
3401		if ((page->index + PAGE_SIZE) - rcr_size == addr) {
3402			*link = (struct page *) page->mapping;
3403			np->ops->unmap_page(np->device, page->index,
3404					    PAGE_SIZE, DMA_FROM_DEVICE);
3405			page->index = 0;
3406			page->mapping = NULL;
3407			__free_page(page);
3408			rp->rbr_refill_pending++;
3409		}
3410
3411		index = NEXT_RCR(rp, index);
3412		if (!(val & RCR_ENTRY_MULTI))
3413			break;
3414
3415	}
3416	rp->rcr_index = index;
3417
3418	return num_rcr;
3419}
3420
3421static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3422			      struct rx_ring_info *rp)
3423{
3424	unsigned int index = rp->rcr_index;
3425	struct rx_pkt_hdr1 *rh;
3426	struct sk_buff *skb;
3427	int len, num_rcr;
3428
3429	skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
3430	if (unlikely(!skb))
3431		return niu_rx_pkt_ignore(np, rp);
3432
3433	num_rcr = 0;
3434	while (1) {
3435		struct page *page, **link;
3436		u32 rcr_size, append_size;
3437		u64 addr, val, off;
3438
3439		num_rcr++;
3440
3441		val = le64_to_cpup(&rp->rcr[index]);
3442
3443		len = (val & RCR_ENTRY_L2_LEN) >>
3444			RCR_ENTRY_L2_LEN_SHIFT;
3445		len -= ETH_FCS_LEN;
3446
3447		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3448			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3449		page = niu_find_rxpage(rp, addr, &link);
3450
3451		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3452					 RCR_ENTRY_PKTBUFSZ_SHIFT];
3453
3454		off = addr & ~PAGE_MASK;
3455		append_size = rcr_size;
3456		if (num_rcr == 1) {
3457			int ptype;
3458
3459			ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
3460			if ((ptype == RCR_PKT_TYPE_TCP ||
3461			     ptype == RCR_PKT_TYPE_UDP) &&
3462			    !(val & (RCR_ENTRY_NOPORT |
3463				     RCR_ENTRY_ERROR)))
3464				skb->ip_summed = CHECKSUM_UNNECESSARY;
3465			else
3466				skb_checksum_none_assert(skb);
3467		} else if (!(val & RCR_ENTRY_MULTI))
3468			append_size = len - skb->len;
3469
3470		niu_rx_skb_append(skb, page, off, append_size, rcr_size);
3471		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
3472			*link = (struct page *) page->mapping;
3473			np->ops->unmap_page(np->device, page->index,
3474					    PAGE_SIZE, DMA_FROM_DEVICE);
3475			page->index = 0;
3476			page->mapping = NULL;
3477			rp->rbr_refill_pending++;
3478		} else
3479			get_page(page);
3480
3481		index = NEXT_RCR(rp, index);
3482		if (!(val & RCR_ENTRY_MULTI))
3483			break;
3484
3485	}
3486	rp->rcr_index = index;
3487
3488	len += sizeof(*rh);
3489	len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
3490	__pskb_pull_tail(skb, len);
3491
3492	rh = (struct rx_pkt_hdr1 *) skb->data;
3493	if (np->dev->features & NETIF_F_RXHASH)
3494		skb_set_hash(skb,
3495			     ((u32)rh->hashval2_0 << 24 |
3496			      (u32)rh->hashval2_1 << 16 |
3497			      (u32)rh->hashval1_1 << 8 |
3498			      (u32)rh->hashval1_2 << 0),
3499			     PKT_HASH_TYPE_L3);
3500	skb_pull(skb, sizeof(*rh));
3501
3502	rp->rx_packets++;
3503	rp->rx_bytes += skb->len;
3504
3505	skb->protocol = eth_type_trans(skb, np->dev);
3506	skb_record_rx_queue(skb, rp->rx_channel);
3507	napi_gro_receive(napi, skb);
3508
3509	return num_rcr;
3510}
3511
3512static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3513{
3514	int blocks_per_page = rp->rbr_blocks_per_page;
3515	int err, index = rp->rbr_index;
3516
3517	err = 0;
3518	while (index < (rp->rbr_table_size - blocks_per_page)) {
3519		err = niu_rbr_add_page(np, rp, mask, index);
3520		if (unlikely(err))
3521			break;
3522
3523		index += blocks_per_page;
3524	}
3525
3526	rp->rbr_index = index;
3527	return err;
3528}
3529
3530static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
3531{
3532	int i;
3533
3534	for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
3535		struct page *page;
3536
3537		page = rp->rxhash[i];
3538		while (page) {
3539			struct page *next = (struct page *) page->mapping;
3540			u64 base = page->index;
3541
3542			np->ops->unmap_page(np->device, base, PAGE_SIZE,
3543					    DMA_FROM_DEVICE);
3544			page->index = 0;
3545			page->mapping = NULL;
3546
3547			__free_page(page);
3548
3549			page = next;
3550		}
3551	}
3552
3553	for (i = 0; i < rp->rbr_table_size; i++)
3554		rp->rbr[i] = cpu_to_le32(0);
3555	rp->rbr_index = 0;
3556}
3557
3558static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3559{
3560	struct tx_buff_info *tb = &rp->tx_buffs[idx];
3561	struct sk_buff *skb = tb->skb;
3562	struct tx_pkt_hdr *tp;
3563	u64 tx_flags;
3564	int i, len;
3565
3566	tp = (struct tx_pkt_hdr *) skb->data;
3567	tx_flags = le64_to_cpup(&tp->flags);
3568
3569	rp->tx_packets++;
3570	rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
3571			 ((tx_flags & TXHDR_PAD) / 2));
3572
3573	len = skb_headlen(skb);
3574	np->ops->unmap_single(np->device, tb->mapping,
3575			      len, DMA_TO_DEVICE);
3576
3577	if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
3578		rp->mark_pending--;
3579
3580	tb->skb = NULL;
3581	do {
3582		idx = NEXT_TX(rp, idx);
3583		len -= MAX_TX_DESC_LEN;
3584	} while (len > 0);
3585
3586	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3587		tb = &rp->tx_buffs[idx];
3588		BUG_ON(tb->skb != NULL);
3589		np->ops->unmap_page(np->device, tb->mapping,
3590				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
3591				    DMA_TO_DEVICE);
3592		idx = NEXT_TX(rp, idx);
3593	}
3594
3595	dev_kfree_skb(skb);
3596
3597	return idx;
3598}
3599
3600#define NIU_TX_WAKEUP_THRESH(rp)		((rp)->pending / 4)
3601
3602static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3603{
3604	struct netdev_queue *txq;
3605	u16 pkt_cnt, tmp;
3606	int cons, index;
3607	u64 cs;
3608
3609	index = (rp - np->tx_rings);
3610	txq = netdev_get_tx_queue(np->dev, index);
3611
3612	cs = rp->tx_cs;
3613	if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
3614		goto out;
3615
3616	tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
3617	pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
3618		(TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
3619
3620	rp->last_pkt_cnt = tmp;
3621
3622	cons = rp->cons;
3623
3624	netif_printk(np, tx_done, KERN_DEBUG, np->dev,
3625		     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
3626
3627	while (pkt_cnt--)
3628		cons = release_tx_packet(np, rp, cons);
3629
3630	rp->cons = cons;
3631	smp_mb();
3632
3633out:
3634	if (unlikely(netif_tx_queue_stopped(txq) &&
3635		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
3636		__netif_tx_lock(txq, smp_processor_id());
3637		if (netif_tx_queue_stopped(txq) &&
3638		    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
3639			netif_tx_wake_queue(txq);
3640		__netif_tx_unlock(txq);
3641	}
3642}
3643
3644static inline void niu_sync_rx_discard_stats(struct niu *np,
3645					     struct rx_ring_info *rp,
3646					     const int limit)
3647{
3648	/* This elaborate scheme is needed for reading the RX discard
3649	 * counters, as they are only 16-bit and can overflow quickly,
3650	 * and because the overflow indication bit is not usable as
3651	 * the counter value does not wrap, but remains at max value
3652	 * 0xFFFF.
3653	 *
3654	 * In theory and in practice counters can be lost in between
3655	 * reading nr64() and clearing the counter nw64().  For this
3656	 * reason, the number of counter clearings nw64() is
3657	 * limited/reduced though the limit parameter.
3658	 */
3659	int rx_channel = rp->rx_channel;
3660	u32 misc, wred;
3661
3662	/* RXMISC (Receive Miscellaneous Discard Count), covers the
3663	 * following discard events: IPP (Input Port Process),
3664	 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
3665	 * Block Ring) prefetch buffer is empty.
3666	 */
3667	misc = nr64(RXMISC(rx_channel));
3668	if (unlikely((misc & RXMISC_COUNT) > limit)) {
3669		nw64(RXMISC(rx_channel), 0);
3670		rp->rx_errors += misc & RXMISC_COUNT;
3671
3672		if (unlikely(misc & RXMISC_OFLOW))
3673			dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
3674				rx_channel);
3675
3676		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
3677			     "rx-%d: MISC drop=%u over=%u\n",
3678			     rx_channel, misc, misc-limit);
3679	}
3680
3681	/* WRED (Weighted Random Early Discard) by hardware */
3682	wred = nr64(RED_DIS_CNT(rx_channel));
3683	if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
3684		nw64(RED_DIS_CNT(rx_channel), 0);
3685		rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
3686
3687		if (unlikely(wred & RED_DIS_CNT_OFLOW))
3688			dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
3689
3690		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
3691			     "rx-%d: WRED drop=%u over=%u\n",
3692			     rx_channel, wred, wred-limit);
3693	}
3694}
3695
3696static int niu_rx_work(struct napi_struct *napi, struct niu *np,
3697		       struct rx_ring_info *rp, int budget)
3698{
3699	int qlen, rcr_done = 0, work_done = 0;
3700	struct rxdma_mailbox *mbox = rp->mbox;
3701	u64 stat;
3702
3703#if 1
3704	stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3705	qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
3706#else
3707	stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
3708	qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
3709#endif
3710	mbox->rx_dma_ctl_stat = 0;
3711	mbox->rcrstat_a = 0;
3712
3713	netif_printk(np, rx_status, KERN_DEBUG, np->dev,
3714		     "%s(chan[%d]), stat[%llx] qlen=%d\n",
3715		     __func__, rp->rx_channel, (unsigned long long)stat, qlen);
3716
3717	rcr_done = work_done = 0;
3718	qlen = min(qlen, budget);
3719	while (work_done < qlen) {
3720		rcr_done += niu_process_rx_pkt(napi, np, rp);
3721		work_done++;
3722	}
3723
3724	if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
3725		unsigned int i;
3726
3727		for (i = 0; i < rp->rbr_refill_pending; i++)
3728			niu_rbr_refill(np, rp, GFP_ATOMIC);
3729		rp->rbr_refill_pending = 0;
3730	}
3731
3732	stat = (RX_DMA_CTL_STAT_MEX |
3733		((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
3734		((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
3735
3736	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
3737
3738	/* Only sync discards stats when qlen indicate potential for drops */
3739	if (qlen > 10)
3740		niu_sync_rx_discard_stats(np, rp, 0x7FFF);
3741
3742	return work_done;
3743}
3744
3745static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
3746{
3747	u64 v0 = lp->v0;
3748	u32 tx_vec = (v0 >> 32);
3749	u32 rx_vec = (v0 & 0xffffffff);
3750	int i, work_done = 0;
3751
3752	netif_printk(np, intr, KERN_DEBUG, np->dev,
3753		     "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
3754
3755	for (i = 0; i < np->num_tx_rings; i++) {
3756		struct tx_ring_info *rp = &np->tx_rings[i];
3757		if (tx_vec & (1 << rp->tx_channel))
3758			niu_tx_work(np, rp);
3759		nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
3760	}
3761
3762	for (i = 0; i < np->num_rx_rings; i++) {
3763		struct rx_ring_info *rp = &np->rx_rings[i];
3764
3765		if (rx_vec & (1 << rp->rx_channel)) {
3766			int this_work_done;
3767
3768			this_work_done = niu_rx_work(&lp->napi, np, rp,
3769						     budget);
3770
3771			budget -= this_work_done;
3772			work_done += this_work_done;
3773		}
3774		nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
3775	}
3776
3777	return work_done;
3778}
3779
3780static int niu_poll(struct napi_struct *napi, int budget)
3781{
3782	struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
3783	struct niu *np = lp->np;
3784	int work_done;
3785
3786	work_done = niu_poll_core(np, lp, budget);
3787
3788	if (work_done < budget) {
3789		napi_complete(napi);
3790		niu_ldg_rearm(np, lp, 1);
3791	}
3792	return work_done;
3793}
3794
3795static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
3796				  u64 stat)
3797{
3798	netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
3799
3800	if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
3801		pr_cont("RBR_TMOUT ");
3802	if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
3803		pr_cont("RSP_CNT ");
3804	if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
3805		pr_cont("BYTE_EN_BUS ");
3806	if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
3807		pr_cont("RSP_DAT ");
3808	if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
3809		pr_cont("RCR_ACK ");
3810	if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
3811		pr_cont("RCR_SHA_PAR ");
3812	if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
3813		pr_cont("RBR_PRE_PAR ");
3814	if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
3815		pr_cont("CONFIG ");
3816	if (stat & RX_DMA_CTL_STAT_RCRINCON)
3817		pr_cont("RCRINCON ");
3818	if (stat & RX_DMA_CTL_STAT_RCRFULL)
3819		pr_cont("RCRFULL ");
3820	if (stat & RX_DMA_CTL_STAT_RBRFULL)
3821		pr_cont("RBRFULL ");
3822	if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
3823		pr_cont("RBRLOGPAGE ");
3824	if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
3825		pr_cont("CFIGLOGPAGE ");
3826	if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
3827		pr_cont("DC_FIDO ");
3828
3829	pr_cont(")\n");
3830}
3831
3832static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
3833{
3834	u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3835	int err = 0;
3836
3837
3838	if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
3839		    RX_DMA_CTL_STAT_PORT_FATAL))
3840		err = -EINVAL;
3841
3842	if (err) {
3843		netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
3844			   rp->rx_channel,
3845			   (unsigned long long) stat);
3846
3847		niu_log_rxchan_errors(np, rp, stat);
3848	}
3849
3850	nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3851	     stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
3852
3853	return err;
3854}
3855
3856static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
3857				  u64 cs)
3858{
3859	netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
3860
3861	if (cs & TX_CS_MBOX_ERR)
3862		pr_cont("MBOX ");
3863	if (cs & TX_CS_PKT_SIZE_ERR)
3864		pr_cont("PKT_SIZE ");
3865	if (cs & TX_CS_TX_RING_OFLOW)
3866		pr_cont("TX_RING_OFLOW ");
3867	if (cs & TX_CS_PREF_BUF_PAR_ERR)
3868		pr_cont("PREF_BUF_PAR ");
3869	if (cs & TX_CS_NACK_PREF)
3870		pr_cont("NACK_PREF ");
3871	if (cs & TX_CS_NACK_PKT_RD)
3872		pr_cont("NACK_PKT_RD ");
3873	if (cs & TX_CS_CONF_PART_ERR)
3874		pr_cont("CONF_PART ");
3875	if (cs & TX_CS_PKT_PRT_ERR)
3876		pr_cont("PKT_PTR ");
3877
3878	pr_cont(")\n");
3879}
3880
3881static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
3882{
3883	u64 cs, logh, logl;
3884
3885	cs = nr64(TX_CS(rp->tx_channel));
3886	logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
3887	logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
3888
3889	netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
3890		   rp->tx_channel,
3891		   (unsigned long long)cs,
3892		   (unsigned long long)logh,
3893		   (unsigned long long)logl);
3894
3895	niu_log_txchan_errors(np, rp, cs);
3896
3897	return -ENODEV;
3898}
3899
3900static int niu_mif_interrupt(struct niu *np)
3901{
3902	u64 mif_status = nr64(MIF_STATUS);
3903	int phy_mdint = 0;
3904
3905	if (np->flags & NIU_FLAGS_XMAC) {
3906		u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
3907
3908		if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
3909			phy_mdint = 1;
3910	}
3911
3912	netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
3913		   (unsigned long long)mif_status, phy_mdint);
3914
3915	return -ENODEV;
3916}
3917
3918static void niu_xmac_interrupt(struct niu *np)
3919{
3920	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
3921	u64 val;
3922
3923	val = nr64_mac(XTXMAC_STATUS);
3924	if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
3925		mp->tx_frames += TXMAC_FRM_CNT_COUNT;
3926	if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
3927		mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
3928	if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
3929		mp->tx_fifo_errors++;
3930	if (val & XTXMAC_STATUS_TXMAC_OFLOW)
3931		mp->tx_overflow_errors++;
3932	if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
3933		mp->tx_max_pkt_size_errors++;
3934	if (val & XTXMAC_STATUS_TXMAC_UFLOW)
3935		mp->tx_underflow_errors++;
3936
3937	val = nr64_mac(XRXMAC_STATUS);
3938	if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
3939		mp->rx_local_faults++;
3940	if (val & XRXMAC_STATUS_RFLT_DET)
3941		mp->rx_remote_faults++;
3942	if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
3943		mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
3944	if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
3945		mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
3946	if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
3947		mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
3948	if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
3949		mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
3950	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3951		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3952	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3953		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3954	if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
3955		mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
3956	if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
3957		mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
3958	if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
3959		mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
3960	if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
3961		mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
3962	if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
3963		mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
3964	if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
3965		mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
3966	if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
3967		mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
3968	if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
3969		mp->rx_octets += RXMAC_BT_CNT_COUNT;
3970	if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
3971		mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
3972	if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
3973		mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
3974	if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
3975		mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
3976	if (val & XRXMAC_STATUS_RXUFLOW)
3977		mp->rx_underflows++;
3978	if (val & XRXMAC_STATUS_RXOFLOW)
3979		mp->rx_overflows++;
3980
3981	val = nr64_mac(XMAC_FC_STAT);
3982	if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
3983		mp->pause_off_state++;
3984	if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
3985		mp->pause_on_state++;
3986	if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
3987		mp->pause_received++;
3988}
3989
3990static void niu_bmac_interrupt(struct niu *np)
3991{
3992	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
3993	u64 val;
3994
3995	val = nr64_mac(BTXMAC_STATUS);
3996	if (val & BTXMAC_STATUS_UNDERRUN)
3997		mp->tx_underflow_errors++;
3998	if (val & BTXMAC_STATUS_MAX_PKT_ERR)
3999		mp->tx_max_pkt_size_errors++;
4000	if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
4001		mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
4002	if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
4003		mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
4004
4005	val = nr64_mac(BRXMAC_STATUS);
4006	if (val & BRXMAC_STATUS_OVERFLOW)
4007		mp->rx_overflows++;
4008	if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
4009		mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
4010	if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
4011		mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
4012	if (val & BRXMAC_STATUS_CRC_ERR_EXP)
4013		mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
4014	if (val & BRXMAC_STATUS_LEN_ERR_EXP)
4015		mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
4016
4017	val = nr64_mac(BMAC_CTRL_STATUS);
4018	if (val & BMAC_CTRL_STATUS_NOPAUSE)
4019		mp->pause_off_state++;
4020	if (val & BMAC_CTRL_STATUS_PAUSE)
4021		mp->pause_on_state++;
4022	if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
4023		mp->pause_received++;
4024}
4025
4026static int niu_mac_interrupt(struct niu *np)
4027{
4028	if (np->flags & NIU_FLAGS_XMAC)
4029		niu_xmac_interrupt(np);
4030	else
4031		niu_bmac_interrupt(np);
4032
4033	return 0;
4034}
4035
4036static void niu_log_device_error(struct niu *np, u64 stat)
4037{
4038	netdev_err(np->dev, "Core device errors ( ");
4039
4040	if (stat & SYS_ERR_MASK_META2)
4041		pr_cont("META2 ");
4042	if (stat & SYS_ERR_MASK_META1)
4043		pr_cont("META1 ");
4044	if (stat & SYS_ERR_MASK_PEU)
4045		pr_cont("PEU ");
4046	if (stat & SYS_ERR_MASK_TXC)
4047		pr_cont("TXC ");
4048	if (stat & SYS_ERR_MASK_RDMC)
4049		pr_cont("RDMC ");
4050	if (stat & SYS_ERR_MASK_TDMC)
4051		pr_cont("TDMC ");
4052	if (stat & SYS_ERR_MASK_ZCP)
4053		pr_cont("ZCP ");
4054	if (stat & SYS_ERR_MASK_FFLP)
4055		pr_cont("FFLP ");
4056	if (stat & SYS_ERR_MASK_IPP)
4057		pr_cont("IPP ");
4058	if (stat & SYS_ERR_MASK_MAC)
4059		pr_cont("MAC ");
4060	if (stat & SYS_ERR_MASK_SMX)
4061		pr_cont("SMX ");
4062
4063	pr_cont(")\n");
4064}
4065
4066static int niu_device_error(struct niu *np)
4067{
4068	u64 stat = nr64(SYS_ERR_STAT);
4069
4070	netdev_err(np->dev, "Core device error, stat[%llx]\n",
4071		   (unsigned long long)stat);
4072
4073	niu_log_device_error(np, stat);
4074
4075	return -ENODEV;
4076}
4077
4078static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
4079			      u64 v0, u64 v1, u64 v2)
4080{
4081
4082	int i, err = 0;
4083
4084	lp->v0 = v0;
4085	lp->v1 = v1;
4086	lp->v2 = v2;
4087
4088	if (v1 & 0x00000000ffffffffULL) {
4089		u32 rx_vec = (v1 & 0xffffffff);
4090
4091		for (i = 0; i < np->num_rx_rings; i++) {
4092			struct rx_ring_info *rp = &np->rx_rings[i];
4093
4094			if (rx_vec & (1 << rp->rx_channel)) {
4095				int r = niu_rx_error(np, rp);
4096				if (r) {
4097					err = r;
4098				} else {
4099					if (!v0)
4100						nw64(RX_DMA_CTL_STAT(rp->rx_channel),
4101						     RX_DMA_CTL_STAT_MEX);
4102				}
4103			}
4104		}
4105	}
4106	if (v1 & 0x7fffffff00000000ULL) {
4107		u32 tx_vec = (v1 >> 32) & 0x7fffffff;
4108
4109		for (i = 0; i < np->num_tx_rings; i++) {
4110			struct tx_ring_info *rp = &np->tx_rings[i];
4111
4112			if (tx_vec & (1 << rp->tx_channel)) {
4113				int r = niu_tx_error(np, rp);
4114				if (r)
4115					err = r;
4116			}
4117		}
4118	}
4119	if ((v0 | v1) & 0x8000000000000000ULL) {
4120		int r = niu_mif_interrupt(np);
4121		if (r)
4122			err = r;
4123	}
4124	if (v2) {
4125		if (v2 & 0x01ef) {
4126			int r = niu_mac_interrupt(np);
4127			if (r)
4128				err = r;
4129		}
4130		if (v2 & 0x0210) {
4131			int r = niu_device_error(np);
4132			if (r)
4133				err = r;
4134		}
4135	}
4136
4137	if (err)
4138		niu_enable_interrupts(np, 0);
4139
4140	return err;
4141}
4142
4143static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
4144			    int ldn)
4145{
4146	struct rxdma_mailbox *mbox = rp->mbox;
4147	u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
4148
4149	stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
4150		      RX_DMA_CTL_STAT_RCRTO);
4151	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
4152
4153	netif_printk(np, intr, KERN_DEBUG, np->dev,
4154		     "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
4155}
4156
4157static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
4158			    int ldn)
4159{
4160	rp->tx_cs = nr64(TX_CS(rp->tx_channel));
4161
4162	netif_printk(np, intr, KERN_DEBUG, np->dev,
4163		     "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
4164}
4165
4166static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
4167{
4168	struct niu_parent *parent = np->parent;
4169	u32 rx_vec, tx_vec;
4170	int i;
4171
4172	tx_vec = (v0 >> 32);
4173	rx_vec = (v0 & 0xffffffff);
4174
4175	for (i = 0; i < np->num_rx_rings; i++) {
4176		struct rx_ring_info *rp = &np->rx_rings[i];
4177		int ldn = LDN_RXDMA(rp->rx_channel);
4178
4179		if (parent->ldg_map[ldn] != ldg)
4180			continue;
4181
4182		nw64(LD_IM0(ldn), LD_IM0_MASK);
4183		if (rx_vec & (1 << rp->rx_channel))
4184			niu_rxchan_intr(np, rp, ldn);
4185	}
4186
4187	for (i = 0; i < np->num_tx_rings; i++) {
4188		struct tx_ring_info *rp = &np->tx_rings[i];
4189		int ldn = LDN_TXDMA(rp->tx_channel);
4190
4191		if (parent->ldg_map[ldn] != ldg)
4192			continue;
4193
4194		nw64(LD_IM0(ldn), LD_IM0_MASK);
4195		if (tx_vec & (1 << rp->tx_channel))
4196			niu_txchan_intr(np, rp, ldn);
4197	}
4198}
4199
4200static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
4201			      u64 v0, u64 v1, u64 v2)
4202{
4203	if (likely(napi_schedule_prep(&lp->napi))) {
4204		lp->v0 = v0;
4205		lp->v1 = v1;
4206		lp->v2 = v2;
4207		__niu_fastpath_interrupt(np, lp->ldg_num, v0);
4208		__napi_schedule(&lp->napi);
4209	}
4210}
4211
4212static irqreturn_t niu_interrupt(int irq, void *dev_id)
4213{
4214	struct niu_ldg *lp = dev_id;
4215	struct niu *np = lp->np;
4216	int ldg = lp->ldg_num;
4217	unsigned long flags;
4218	u64 v0, v1, v2;
4219
4220	if (netif_msg_intr(np))
4221		printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
4222		       __func__, lp, ldg);
4223
4224	spin_lock_irqsave(&np->lock, flags);
4225
4226	v0 = nr64(LDSV0(ldg));
4227	v1 = nr64(LDSV1(ldg));
4228	v2 = nr64(LDSV2(ldg));
4229
4230	if (netif_msg_intr(np))
4231		pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
4232		       (unsigned long long) v0,
4233		       (unsigned long long) v1,
4234		       (unsigned long long) v2);
4235
4236	if (unlikely(!v0 && !v1 && !v2)) {
4237		spin_unlock_irqrestore(&np->lock, flags);
4238		return IRQ_NONE;
4239	}
4240
4241	if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
4242		int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
4243		if (err)
4244			goto out;
4245	}
4246	if (likely(v0 & ~((u64)1 << LDN_MIF)))
4247		niu_schedule_napi(np, lp, v0, v1, v2);
4248	else
4249		niu_ldg_rearm(np, lp, 1);
4250out:
4251	spin_unlock_irqrestore(&np->lock, flags);
4252
4253	return IRQ_HANDLED;
4254}
4255
4256static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
4257{
4258	if (rp->mbox) {
4259		np->ops->free_coherent(np->device,
4260				       sizeof(struct rxdma_mailbox),
4261				       rp->mbox, rp->mbox_dma);
4262		rp->mbox = NULL;
4263	}
4264	if (rp->rcr) {
4265		np->ops->free_coherent(np->device,
4266				       MAX_RCR_RING_SIZE * sizeof(__le64),
4267				       rp->rcr, rp->rcr_dma);
4268		rp->rcr = NULL;
4269		rp->rcr_table_size = 0;
4270		rp->rcr_index = 0;
4271	}
4272	if (rp->rbr) {
4273		niu_rbr_free(np, rp);
4274
4275		np->ops->free_coherent(np->device,
4276				       MAX_RBR_RING_SIZE * sizeof(__le32),
4277				       rp->rbr, rp->rbr_dma);
4278		rp->rbr = NULL;
4279		rp->rbr_table_size = 0;
4280		rp->rbr_index = 0;
4281	}
4282	kfree(rp->rxhash);
4283	rp->rxhash = NULL;
4284}
4285
4286static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
4287{
4288	if (rp->mbox) {
4289		np->ops->free_coherent(np->device,
4290				       sizeof(struct txdma_mailbox),
4291				       rp->mbox, rp->mbox_dma);
4292		rp->mbox = NULL;
4293	}
4294	if (rp->descr) {
4295		int i;
4296
4297		for (i = 0; i < MAX_TX_RING_SIZE; i++) {
4298			if (rp->tx_buffs[i].skb)
4299				(void) release_tx_packet(np, rp, i);
4300		}
4301
4302		np->ops->free_coherent(np->device,
4303				       MAX_TX_RING_SIZE * sizeof(__le64),
4304				       rp->descr, rp->descr_dma);
4305		rp->descr = NULL;
4306		rp->pending = 0;
4307		rp->prod = 0;
4308		rp->cons = 0;
4309		rp->wrap_bit = 0;
4310	}
4311}
4312
4313static void niu_free_channels(struct niu *np)
4314{
4315	int i;
4316
4317	if (np->rx_rings) {
4318		for (i = 0; i < np->num_rx_rings; i++) {
4319			struct rx_ring_info *rp = &np->rx_rings[i];
4320
4321			niu_free_rx_ring_info(np, rp);
4322		}
4323		kfree(np->rx_rings);
4324		np->rx_rings = NULL;
4325		np->num_rx_rings = 0;
4326	}
4327
4328	if (np->tx_rings) {
4329		for (i = 0; i < np->num_tx_rings; i++) {
4330			struct tx_ring_info *rp = &np->tx_rings[i];
4331
4332			niu_free_tx_ring_info(np, rp);
4333		}
4334		kfree(np->tx_rings);
4335		np->tx_rings = NULL;
4336		np->num_tx_rings = 0;
4337	}
4338}
4339
4340static int niu_alloc_rx_ring_info(struct niu *np,
4341				  struct rx_ring_info *rp)
4342{
4343	BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
4344
4345	rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *),
4346			     GFP_KERNEL);
4347	if (!rp->rxhash)
4348		return -ENOMEM;
4349
4350	rp->mbox = np->ops->alloc_coherent(np->device,
4351					   sizeof(struct rxdma_mailbox),
4352					   &rp->mbox_dma, GFP_KERNEL);
4353	if (!rp->mbox)
4354		return -ENOMEM;
4355	if ((unsigned long)rp->mbox & (64UL - 1)) {
4356		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
4357			   rp->mbox);
4358		return -EINVAL;
4359	}
4360
4361	rp->rcr = np->ops->alloc_coherent(np->device,
4362					  MAX_RCR_RING_SIZE * sizeof(__le64),
4363					  &rp->rcr_dma, GFP_KERNEL);
4364	if (!rp->rcr)
4365		return -ENOMEM;
4366	if ((unsigned long)rp->rcr & (64UL - 1)) {
4367		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
4368			   rp->rcr);
4369		return -EINVAL;
4370	}
4371	rp->rcr_table_size = MAX_RCR_RING_SIZE;
4372	rp->rcr_index = 0;
4373
4374	rp->rbr = np->ops->alloc_coherent(np->device,
4375					  MAX_RBR_RING_SIZE * sizeof(__le32),
4376					  &rp->rbr_dma, GFP_KERNEL);
4377	if (!rp->rbr)
4378		return -ENOMEM;
4379	if ((unsigned long)rp->rbr & (64UL - 1)) {
4380		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
4381			   rp->rbr);
4382		return -EINVAL;
4383	}
4384	rp->rbr_table_size = MAX_RBR_RING_SIZE;
4385	rp->rbr_index = 0;
4386	rp->rbr_pending = 0;
4387
4388	return 0;
4389}
4390
4391static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
4392{
4393	int mtu = np->dev->mtu;
4394
4395	/* These values are recommended by the HW designers for fair
4396	 * utilization of DRR amongst the rings.
4397	 */
4398	rp->max_burst = mtu + 32;
4399	if (rp->max_burst > 4096)
4400		rp->max_burst = 4096;
4401}
4402
4403static int niu_alloc_tx_ring_info(struct niu *np,
4404				  struct tx_ring_info *rp)
4405{
4406	BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
4407
4408	rp->mbox = np->ops->alloc_coherent(np->device,
4409					   sizeof(struct txdma_mailbox),
4410					   &rp->mbox_dma, GFP_KERNEL);
4411	if (!rp->mbox)
4412		return -ENOMEM;
4413	if ((unsigned long)rp->mbox & (64UL - 1)) {
4414		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
4415			   rp->mbox);
4416		return -EINVAL;
4417	}
4418
4419	rp->descr = np->ops->alloc_coherent(np->device,
4420					    MAX_TX_RING_SIZE * sizeof(__le64),
4421					    &rp->descr_dma, GFP_KERNEL);
4422	if (!rp->descr)
4423		return -ENOMEM;
4424	if ((unsigned long)rp->descr & (64UL - 1)) {
4425		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
4426			   rp->descr);
4427		return -EINVAL;
4428	}
4429
4430	rp->pending = MAX_TX_RING_SIZE;
4431	rp->prod = 0;
4432	rp->cons = 0;
4433	rp->wrap_bit = 0;
4434
4435	/* XXX make these configurable... XXX */
4436	rp->mark_freq = rp->pending / 4;
4437
4438	niu_set_max_burst(np, rp);
4439
4440	return 0;
4441}
4442
4443static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
4444{
4445	u16 bss;
4446
4447	bss = min(PAGE_SHIFT, 15);
4448
4449	rp->rbr_block_size = 1 << bss;
4450	rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
4451
4452	rp->rbr_sizes[0] = 256;
4453	rp->rbr_sizes[1] = 1024;
4454	if (np->dev->mtu > ETH_DATA_LEN) {
4455		switch (PAGE_SIZE) {
4456		case 4 * 1024:
4457			rp->rbr_sizes[2] = 4096;
4458			break;
4459
4460		default:
4461			rp->rbr_sizes[2] = 8192;
4462			break;
4463		}
4464	} else {
4465		rp->rbr_sizes[2] = 2048;
4466	}
4467	rp->rbr_sizes[3] = rp->rbr_block_size;
4468}
4469
4470static int niu_alloc_channels(struct niu *np)
4471{
4472	struct niu_parent *parent = np->parent;
4473	int first_rx_channel, first_tx_channel;
4474	int num_rx_rings, num_tx_rings;
4475	struct rx_ring_info *rx_rings;
4476	struct tx_ring_info *tx_rings;
4477	int i, port, err;
4478
4479	port = np->port;
4480	first_rx_channel = first_tx_channel = 0;
4481	for (i = 0; i < port; i++) {
4482		first_rx_channel += parent->rxchan_per_port[i];
4483		first_tx_channel += parent->txchan_per_port[i];
4484	}
4485
4486	num_rx_rings = parent->rxchan_per_port[port];
4487	num_tx_rings = parent->txchan_per_port[port];
4488
4489	rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
4490			   GFP_KERNEL);
4491	err = -ENOMEM;
4492	if (!rx_rings)
4493		goto out_err;
4494
4495	np->num_rx_rings = num_rx_rings;
4496	smp_wmb();
4497	np->rx_rings = rx_rings;
4498
4499	netif_set_real_num_rx_queues(np->dev, num_rx_rings);
4500
4501	for (i = 0; i < np->num_rx_rings; i++) {
4502		struct rx_ring_info *rp = &np->rx_rings[i];
4503
4504		rp->np = np;
4505		rp->rx_channel = first_rx_channel + i;
4506
4507		err = niu_alloc_rx_ring_info(np, rp);
4508		if (err)
4509			goto out_err;
4510
4511		niu_size_rbr(np, rp);
4512
4513		/* XXX better defaults, configurable, etc... XXX */
4514		rp->nonsyn_window = 64;
4515		rp->nonsyn_threshold = rp->rcr_table_size - 64;
4516		rp->syn_window = 64;
4517		rp->syn_threshold = rp->rcr_table_size - 64;
4518		rp->rcr_pkt_threshold = 16;
4519		rp->rcr_timeout = 8;
4520		rp->rbr_kick_thresh = RBR_REFILL_MIN;
4521		if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
4522			rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
4523
4524		err = niu_rbr_fill(np, rp, GFP_KERNEL);
4525		if (err)
4526			return err;
4527	}
4528
4529	tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
4530			   GFP_KERNEL);
4531	err = -ENOMEM;
4532	if (!tx_rings)
4533		goto out_err;
4534
4535	np->num_tx_rings = num_tx_rings;
4536	smp_wmb();
4537	np->tx_rings = tx_rings;
4538
4539	netif_set_real_num_tx_queues(np->dev, num_tx_rings);
4540
4541	for (i = 0; i < np->num_tx_rings; i++) {
4542		struct tx_ring_info *rp = &np->tx_rings[i];
4543
4544		rp->np = np;
4545		rp->tx_channel = first_tx_channel + i;
4546
4547		err = niu_alloc_tx_ring_info(np, rp);
4548		if (err)
4549			goto out_err;
4550	}
4551
4552	return 0;
4553
4554out_err:
4555	niu_free_channels(np);
4556	return err;
4557}
4558
4559static int niu_tx_cs_sng_poll(struct niu *np, int channel)
4560{
4561	int limit = 1000;
4562
4563	while (--limit > 0) {
4564		u64 val = nr64(TX_CS(channel));
4565		if (val & TX_CS_SNG_STATE)
4566			return 0;
4567	}
4568	return -ENODEV;
4569}
4570
4571static int niu_tx_channel_stop(struct niu *np, int channel)
4572{
4573	u64 val = nr64(TX_CS(channel));
4574
4575	val |= TX_CS_STOP_N_GO;
4576	nw64(TX_CS(channel), val);
4577
4578	return niu_tx_cs_sng_poll(np, channel);
4579}
4580
4581static int niu_tx_cs_reset_poll(struct niu *np, int channel)
4582{
4583	int limit = 1000;
4584
4585	while (--limit > 0) {
4586		u64 val = nr64(TX_CS(channel));
4587		if (!(val & TX_CS_RST))
4588			return 0;
4589	}
4590	return -ENODEV;
4591}
4592
4593static int niu_tx_channel_reset(struct niu *np, int channel)
4594{
4595	u64 val = nr64(TX_CS(channel));
4596	int err;
4597
4598	val |= TX_CS_RST;
4599	nw64(TX_CS(channel), val);
4600
4601	err = niu_tx_cs_reset_poll(np, channel);
4602	if (!err)
4603		nw64(TX_RING_KICK(channel), 0);
4604
4605	return err;
4606}
4607
4608static int niu_tx_channel_lpage_init(struct niu *np, int channel)
4609{
4610	u64 val;
4611
4612	nw64(TX_LOG_MASK1(channel), 0);
4613	nw64(TX_LOG_VAL1(channel), 0);
4614	nw64(TX_LOG_MASK2(channel), 0);
4615	nw64(TX_LOG_VAL2(channel), 0);
4616	nw64(TX_LOG_PAGE_RELO1(channel), 0);
4617	nw64(TX_LOG_PAGE_RELO2(channel), 0);
4618	nw64(TX_LOG_PAGE_HDL(channel), 0);
4619
4620	val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
4621	val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
4622	nw64(TX_LOG_PAGE_VLD(channel), val);
4623
4624	/* XXX TXDMA 32bit mode? XXX */
4625
4626	return 0;
4627}
4628
4629static void niu_txc_enable_port(struct niu *np, int on)
4630{
4631	unsigned long flags;
4632	u64 val, mask;
4633
4634	niu_lock_parent(np, flags);
4635	val = nr64(TXC_CONTROL);
4636	mask = (u64)1 << np->port;
4637	if (on) {
4638		val |= TXC_CONTROL_ENABLE | mask;
4639	} else {
4640		val &= ~mask;
4641		if ((val & ~TXC_CONTROL_ENABLE) == 0)
4642			val &= ~TXC_CONTROL_ENABLE;
4643	}
4644	nw64(TXC_CONTROL, val);
4645	niu_unlock_parent(np, flags);
4646}
4647
4648static void niu_txc_set_imask(struct niu *np, u64 imask)
4649{
4650	unsigned long flags;
4651	u64 val;
4652
4653	niu_lock_parent(np, flags);
4654	val = nr64(TXC_INT_MASK);
4655	val &= ~TXC_INT_MASK_VAL(np->port);
4656	val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
4657	niu_unlock_parent(np, flags);
4658}
4659
4660static void niu_txc_port_dma_enable(struct niu *np, int on)
4661{
4662	u64 val = 0;
4663
4664	if (on) {
4665		int i;
4666
4667		for (i = 0; i < np->num_tx_rings; i++)
4668			val |= (1 << np->tx_rings[i].tx_channel);
4669	}
4670	nw64(TXC_PORT_DMA(np->port), val);
4671}
4672
4673static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
4674{
4675	int err, channel = rp->tx_channel;
4676	u64 val, ring_len;
4677
4678	err = niu_tx_channel_stop(np, channel);
4679	if (err)
4680		return err;
4681
4682	err = niu_tx_channel_reset(np, channel);
4683	if (err)
4684		return err;
4685
4686	err = niu_tx_channel_lpage_init(np, channel);
4687	if (err)
4688		return err;
4689
4690	nw64(TXC_DMA_MAX(channel), rp->max_burst);
4691	nw64(TX_ENT_MSK(channel), 0);
4692
4693	if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
4694			      TX_RNG_CFIG_STADDR)) {
4695		netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
4696			   channel, (unsigned long long)rp->descr_dma);
4697		return -EINVAL;
4698	}
4699
4700	/* The length field in TX_RNG_CFIG is measured in 64-byte
4701	 * blocks.  rp->pending is the number of TX descriptors in
4702	 * our ring, 8 bytes each, thus we divide by 8 bytes more
4703	 * to get the proper value the chip wants.
4704	 */
4705	ring_len = (rp->pending / 8);
4706
4707	val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
4708	       rp->descr_dma);
4709	nw64(TX_RNG_CFIG(channel), val);
4710
4711	if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
4712	    ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
4713		netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
4714			    channel, (unsigned long long)rp->mbox_dma);
4715		return -EINVAL;
4716	}
4717	nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
4718	nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
4719
4720	nw64(TX_CS(channel), 0);
4721
4722	rp->last_pkt_cnt = 0;
4723
4724	return 0;
4725}
4726
4727static void niu_init_rdc_groups(struct niu *np)
4728{
4729	struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
4730	int i, first_table_num = tp->first_table_num;
4731
4732	for (i = 0; i < tp->num_tables; i++) {
4733		struct rdc_table *tbl = &tp->tables[i];
4734		int this_table = first_table_num + i;
4735		int slot;
4736
4737		for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
4738			nw64(RDC_TBL(this_table, slot),
4739			     tbl->rxdma_channel[slot]);
4740	}
4741
4742	nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
4743}
4744
4745static void niu_init_drr_weight(struct niu *np)
4746{
4747	int type = phy_decode(np->parent->port_phy, np->port);
4748	u64 val;
4749
4750	switch (type) {
4751	case PORT_TYPE_10G:
4752		val = PT_DRR_WEIGHT_DEFAULT_10G;
4753		break;
4754
4755	case PORT_TYPE_1G:
4756	default:
4757		val = PT_DRR_WEIGHT_DEFAULT_1G;
4758		break;
4759	}
4760	nw64(PT_DRR_WT(np->port), val);
4761}
4762
4763static int niu_init_hostinfo(struct niu *np)
4764{
4765	struct niu_parent *parent = np->parent;
4766	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
4767	int i, err, num_alt = niu_num_alt_addr(np);
4768	int first_rdc_table = tp->first_table_num;
4769
4770	err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
4771	if (err)
4772		return err;
4773
4774	err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
4775	if (err)
4776		return err;
4777
4778	for (i = 0; i < num_alt; i++) {
4779		err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
4780		if (err)
4781			return err;
4782	}
4783
4784	return 0;
4785}
4786
4787static int niu_rx_channel_reset(struct niu *np, int channel)
4788{
4789	return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
4790				      RXDMA_CFIG1_RST, 1000, 10,
4791				      "RXDMA_CFIG1");
4792}
4793
4794static int niu_rx_channel_lpage_init(struct niu *np, int channel)
4795{
4796	u64 val;
4797
4798	nw64(RX_LOG_MASK1(channel), 0);
4799	nw64(RX_LOG_VAL1(channel), 0);
4800	nw64(RX_LOG_MASK2(channel), 0);
4801	nw64(RX_LOG_VAL2(channel), 0);
4802	nw64(RX_LOG_PAGE_RELO1(channel), 0);
4803	nw64(RX_LOG_PAGE_RELO2(channel), 0);
4804	nw64(RX_LOG_PAGE_HDL(channel), 0);
4805
4806	val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
4807	val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
4808	nw64(RX_LOG_PAGE_VLD(channel), val);
4809
4810	return 0;
4811}
4812
4813static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
4814{
4815	u64 val;
4816
4817	val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
4818	       ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
4819	       ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
4820	       ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
4821	nw64(RDC_RED_PARA(rp->rx_channel), val);
4822}
4823
4824static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
4825{
4826	u64 val = 0;
4827
4828	*ret = 0;
4829	switch (rp->rbr_block_size) {
4830	case 4 * 1024:
4831		val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
4832		break;
4833	case 8 * 1024:
4834		val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
4835		break;
4836	case 16 * 1024:
4837		val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
4838		break;
4839	case 32 * 1024:
4840		val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
4841		break;
4842	default:
4843		return -EINVAL;
4844	}
4845	val |= RBR_CFIG_B_VLD2;
4846	switch (rp->rbr_sizes[2]) {
4847	case 2 * 1024:
4848		val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
4849		break;
4850	case 4 * 1024:
4851		val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
4852		break;
4853	case 8 * 1024:
4854		val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
4855		break;
4856	case 16 * 1024:
4857		val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
4858		break;
4859
4860	default:
4861		return -EINVAL;
4862	}
4863	val |= RBR_CFIG_B_VLD1;
4864	switch (rp->rbr_sizes[1]) {
4865	case 1 * 1024:
4866		val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
4867		break;
4868	case 2 * 1024:
4869		val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
4870		break;
4871	case 4 * 1024:
4872		val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
4873		break;
4874	case 8 * 1024:
4875		val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
4876		break;
4877
4878	default:
4879		return -EINVAL;
4880	}
4881	val |= RBR_CFIG_B_VLD0;
4882	switch (rp->rbr_sizes[0]) {
4883	case 256:
4884		val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
4885		break;
4886	case 512:
4887		val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
4888		break;
4889	case 1 * 1024:
4890		val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
4891		break;
4892	case 2 * 1024:
4893		val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
4894		break;
4895
4896	default:
4897		return -EINVAL;
4898	}
4899
4900	*ret = val;
4901	return 0;
4902}
4903
4904static int niu_enable_rx_channel(struct niu *np, int channel, int on)
4905{
4906	u64 val = nr64(RXDMA_CFIG1(channel));
4907	int limit;
4908
4909	if (on)
4910		val |= RXDMA_CFIG1_EN;
4911	else
4912		val &= ~RXDMA_CFIG1_EN;
4913	nw64(RXDMA_CFIG1(channel), val);
4914
4915	limit = 1000;
4916	while (--limit > 0) {
4917		if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
4918			break;
4919		udelay(10);
4920	}
4921	if (limit <= 0)
4922		return -ENODEV;
4923	return 0;
4924}
4925
4926static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
4927{
4928	int err, channel = rp->rx_channel;
4929	u64 val;
4930
4931	err = niu_rx_channel_reset(np, channel);
4932	if (err)
4933		return err;
4934
4935	err = niu_rx_channel_lpage_init(np, channel);
4936	if (err)
4937		return err;
4938
4939	niu_rx_channel_wred_init(np, rp);
4940
4941	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
4942	nw64(RX_DMA_CTL_STAT(channel),
4943	     (RX_DMA_CTL_STAT_MEX |
4944	      RX_DMA_CTL_STAT_RCRTHRES |
4945	      RX_DMA_CTL_STAT_RCRTO |
4946	      RX_DMA_CTL_STAT_RBR_EMPTY));
4947	nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
4948	nw64(RXDMA_CFIG2(channel),
4949	     ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
4950	      RXDMA_CFIG2_FULL_HDR));
4951	nw64(RBR_CFIG_A(channel),
4952	     ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
4953	     (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
4954	err = niu_compute_rbr_cfig_b(rp, &val);
4955	if (err)
4956		return err;
4957	nw64(RBR_CFIG_B(channel), val);
4958	nw64(RCRCFIG_A(channel),
4959	     ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
4960	     (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
4961	nw64(RCRCFIG_B(channel),
4962	     ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
4963	     RCRCFIG_B_ENTOUT |
4964	     ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
4965
4966	err = niu_enable_rx_channel(np, channel, 1);
4967	if (err)
4968		return err;
4969
4970	nw64(RBR_KICK(channel), rp->rbr_index);
4971
4972	val = nr64(RX_DMA_CTL_STAT(channel));
4973	val |= RX_DMA_CTL_STAT_RBR_EMPTY;
4974	nw64(RX_DMA_CTL_STAT(channel), val);
4975
4976	return 0;
4977}
4978
4979static int niu_init_rx_channels(struct niu *np)
4980{
4981	unsigned long flags;
4982	u64 seed = jiffies_64;
4983	int err, i;
4984
4985	niu_lock_parent(np, flags);
4986	nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
4987	nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
4988	niu_unlock_parent(np, flags);
4989
4990	/* XXX RXDMA 32bit mode? XXX */
4991
4992	niu_init_rdc_groups(np);
4993	niu_init_drr_weight(np);
4994
4995	err = niu_init_hostinfo(np);
4996	if (err)
4997		return err;
4998
4999	for (i = 0; i < np->num_rx_rings; i++) {
5000		struct rx_ring_info *rp = &np->rx_rings[i];
5001
5002		err = niu_init_one_rx_channel(np, rp);
5003		if (err)
5004			return err;
5005	}
5006
5007	return 0;
5008}
5009
5010static int niu_set_ip_frag_rule(struct niu *np)
5011{
5012	struct niu_parent *parent = np->parent;
5013	struct niu_classifier *cp = &np->clas;
5014	struct niu_tcam_entry *tp;
5015	int index, err;
5016
5017	index = cp->tcam_top;
5018	tp = &parent->tcam[index];
5019
5020	/* Note that the noport bit is the same in both ipv4 and
5021	 * ipv6 format TCAM entries.
5022	 */
5023	memset(tp, 0, sizeof(*tp));
5024	tp->key[1] = TCAM_V4KEY1_NOPORT;
5025	tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
5026	tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
5027			  ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
5028	err = tcam_write(np, index, tp->key, tp->key_mask);
5029	if (err)
5030		return err;
5031	err = tcam_assoc_write(np, index, tp->assoc_data);
5032	if (err)
5033		return err;
5034	tp->valid = 1;
5035	cp->tcam_valid_entries++;
5036
5037	return 0;
5038}
5039
5040static int niu_init_classifier_hw(struct niu *np)
5041{
5042	struct niu_parent *parent = np->parent;
5043	struct niu_classifier *cp = &np->clas;
5044	int i, err;
5045
5046	nw64(H1POLY, cp->h1_init);
5047	nw64(H2POLY, cp->h2_init);
5048
5049	err = niu_init_hostinfo(np);
5050	if (err)
5051		return err;
5052
5053	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
5054		struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
5055
5056		vlan_tbl_write(np, i, np->port,
5057			       vp->vlan_pref, vp->rdc_num);
5058	}
5059
5060	for (i = 0; i < cp->num_alt_mac_mappings; i++) {
5061		struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
5062
5063		err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
5064						ap->rdc_num, ap->mac_pref);
5065		if (err)
5066			return err;
5067	}
5068
5069	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
5070		int index = i - CLASS_CODE_USER_PROG1;
5071
5072		err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
5073		if (err)
5074			return err;
5075		err = niu_set_flow_key(np, i, parent->flow_key[index]);
5076		if (err)
5077			return err;
5078	}
5079
5080	err = niu_set_ip_frag_rule(np);
5081	if (err)
5082		return err;
5083
5084	tcam_enable(np, 1);
5085
5086	return 0;
5087}
5088
5089static int niu_zcp_write(struct niu *np, int index, u64 *data)
5090{
5091	nw64(ZCP_RAM_DATA0, data[0]);
5092	nw64(ZCP_RAM_DATA1, data[1]);
5093	nw64(ZCP_RAM_DATA2, data[2]);
5094	nw64(ZCP_RAM_DATA3, data[3]);
5095	nw64(ZCP_RAM_DATA4, data[4]);
5096	nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
5097	nw64(ZCP_RAM_ACC,
5098	     (ZCP_RAM_ACC_WRITE |
5099	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
5100	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
5101
5102	return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5103				   1000, 100);
5104}
5105
5106static int niu_zcp_read(struct niu *np, int index, u64 *data)
5107{
5108	int err;
5109
5110	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5111				  1000, 100);
5112	if (err) {
5113		netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
5114			   (unsigned long long)nr64(ZCP_RAM_ACC));
5115		return err;
5116	}
5117
5118	nw64(ZCP_RAM_ACC,
5119	     (ZCP_RAM_ACC_READ |
5120	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
5121	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
5122
5123	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5124				  1000, 100);
5125	if (err) {
5126		netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
5127			   (unsigned long long)nr64(ZCP_RAM_ACC));
5128		return err;
5129	}
5130
5131	data[0] = nr64(ZCP_RAM_DATA0);
5132	data[1] = nr64(ZCP_RAM_DATA1);
5133	data[2] = nr64(ZCP_RAM_DATA2);
5134	data[3] = nr64(ZCP_RAM_DATA3);
5135	data[4] = nr64(ZCP_RAM_DATA4);
5136
5137	return 0;
5138}
5139
5140static void niu_zcp_cfifo_reset(struct niu *np)
5141{
5142	u64 val = nr64(RESET_CFIFO);
5143
5144	val |= RESET_CFIFO_RST(np->port);
5145	nw64(RESET_CFIFO, val);
5146	udelay(10);
5147
5148	val &= ~RESET_CFIFO_RST(np->port);
5149	nw64(RESET_CFIFO, val);
5150}
5151
5152static int niu_init_zcp(struct niu *np)
5153{
5154	u64 data[5], rbuf[5];
5155	int i, max, err;
5156
5157	if (np->parent->plat_type != PLAT_TYPE_NIU) {
5158		if (np->port == 0 || np->port == 1)
5159			max = ATLAS_P0_P1_CFIFO_ENTRIES;
5160		else
5161			max = ATLAS_P2_P3_CFIFO_ENTRIES;
5162	} else
5163		max = NIU_CFIFO_ENTRIES;
5164
5165	data[0] = 0;
5166	data[1] = 0;
5167	data[2] = 0;
5168	data[3] = 0;
5169	data[4] = 0;
5170
5171	for (i = 0; i < max; i++) {
5172		err = niu_zcp_write(np, i, data);
5173		if (err)
5174			return err;
5175		err = niu_zcp_read(np, i, rbuf);
5176		if (err)
5177			return err;
5178	}
5179
5180	niu_zcp_cfifo_reset(np);
5181	nw64(CFIFO_ECC(np->port), 0);
5182	nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
5183	(void) nr64(ZCP_INT_STAT);
5184	nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
5185
5186	return 0;
5187}
5188
5189static void niu_ipp_write(struct niu *np, int index, u64 *data)
5190{
5191	u64 val = nr64_ipp(IPP_CFIG);
5192
5193	nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
5194	nw64_ipp(IPP_DFIFO_WR_PTR, index);
5195	nw64_ipp(IPP_DFIFO_WR0, data[0]);
5196	nw64_ipp(IPP_DFIFO_WR1, data[1]);
5197	nw64_ipp(IPP_DFIFO_WR2, data[2]);
5198	nw64_ipp(IPP_DFIFO_WR3, data[3]);
5199	nw64_ipp(IPP_DFIFO_WR4, data[4]);
5200	nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
5201}
5202
5203static void niu_ipp_read(struct niu *np, int index, u64 *data)
5204{
5205	nw64_ipp(IPP_DFIFO_RD_PTR, index);
5206	data[0] = nr64_ipp(IPP_DFIFO_RD0);
5207	data[1] = nr64_ipp(IPP_DFIFO_RD1);
5208	data[2] = nr64_ipp(IPP_DFIFO_RD2);
5209	data[3] = nr64_ipp(IPP_DFIFO_RD3);
5210	data[4] = nr64_ipp(IPP_DFIFO_RD4);
5211}
5212
5213static int niu_ipp_reset(struct niu *np)
5214{
5215	return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
5216					  1000, 100, "IPP_CFIG");
5217}
5218
5219static int niu_init_ipp(struct niu *np)
5220{
5221	u64 data[5], rbuf[5], val;
5222	int i, max, err;
5223
5224	if (np->parent->plat_type != PLAT_TYPE_NIU) {
5225		if (np->port == 0 || np->port == 1)
5226			max = ATLAS_P0_P1_DFIFO_ENTRIES;
5227		else
5228			max = ATLAS_P2_P3_DFIFO_ENTRIES;
5229	} else
5230		max = NIU_DFIFO_ENTRIES;
5231
5232	data[0] = 0;
5233	data[1] = 0;
5234	data[2] = 0;
5235	data[3] = 0;
5236	data[4] = 0;
5237
5238	for (i = 0; i < max; i++) {
5239		niu_ipp_write(np, i, data);
5240		niu_ipp_read(np, i, rbuf);
5241	}
5242
5243	(void) nr64_ipp(IPP_INT_STAT);
5244	(void) nr64_ipp(IPP_INT_STAT);
5245
5246	err = niu_ipp_reset(np);
5247	if (err)
5248		return err;
5249
5250	(void) nr64_ipp(IPP_PKT_DIS);
5251	(void) nr64_ipp(IPP_BAD_CS_CNT);
5252	(void) nr64_ipp(IPP_ECC);
5253
5254	(void) nr64_ipp(IPP_INT_STAT);
5255
5256	nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
5257
5258	val = nr64_ipp(IPP_CFIG);
5259	val &= ~IPP_CFIG_IP_MAX_PKT;
5260	val |= (IPP_CFIG_IPP_ENABLE |
5261		IPP_CFIG_DFIFO_ECC_EN |
5262		IPP_CFIG_DROP_BAD_CRC |
5263		IPP_CFIG_CKSUM_EN |
5264		(0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
5265	nw64_ipp(IPP_CFIG, val);
5266
5267	return 0;
5268}
5269
5270static void niu_handle_led(struct niu *np, int status)
5271{
5272	u64 val;
5273	val = nr64_mac(XMAC_CONFIG);
5274
5275	if ((np->flags & NIU_FLAGS_10G) != 0 &&
5276	    (np->flags & NIU_FLAGS_FIBER) != 0) {
5277		if (status) {
5278			val |= XMAC_CONFIG_LED_POLARITY;
5279			val &= ~XMAC_CONFIG_FORCE_LED_ON;
5280		} else {
5281			val |= XMAC_CONFIG_FORCE_LED_ON;
5282			val &= ~XMAC_CONFIG_LED_POLARITY;
5283		}
5284	}
5285
5286	nw64_mac(XMAC_CONFIG, val);
5287}
5288
5289static void niu_init_xif_xmac(struct niu *np)
5290{
5291	struct niu_link_config *lp = &np->link_config;
5292	u64 val;
5293
5294	if (np->flags & NIU_FLAGS_XCVR_SERDES) {
5295		val = nr64(MIF_CONFIG);
5296		val |= MIF_CONFIG_ATCA_GE;
5297		nw64(MIF_CONFIG, val);
5298	}
5299
5300	val = nr64_mac(XMAC_CONFIG);
5301	val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5302
5303	val |= XMAC_CONFIG_TX_OUTPUT_EN;
5304
5305	if (lp->loopback_mode == LOOPBACK_MAC) {
5306		val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5307		val |= XMAC_CONFIG_LOOPBACK;
5308	} else {
5309		val &= ~XMAC_CONFIG_LOOPBACK;
5310	}
5311
5312	if (np->flags & NIU_FLAGS_10G) {
5313		val &= ~XMAC_CONFIG_LFS_DISABLE;
5314	} else {
5315		val |= XMAC_CONFIG_LFS_DISABLE;
5316		if (!(np->flags & NIU_FLAGS_FIBER) &&
5317		    !(np->flags & NIU_FLAGS_XCVR_SERDES))
5318			val |= XMAC_CONFIG_1G_PCS_BYPASS;
5319		else
5320			val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
5321	}
5322
5323	val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5324
5325	if (lp->active_speed == SPEED_100)
5326		val |= XMAC_CONFIG_SEL_CLK_25MHZ;
5327	else
5328		val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
5329
5330	nw64_mac(XMAC_CONFIG, val);
5331
5332	val = nr64_mac(XMAC_CONFIG);
5333	val &= ~XMAC_CONFIG_MODE_MASK;
5334	if (np->flags & NIU_FLAGS_10G) {
5335		val |= XMAC_CONFIG_MODE_XGMII;
5336	} else {
5337		if (lp->active_speed == SPEED_1000)
5338			val |= XMAC_CONFIG_MODE_GMII;
5339		else
5340			val |= XMAC_CONFIG_MODE_MII;
5341	}
5342
5343	nw64_mac(XMAC_CONFIG, val);
5344}
5345
5346static void niu_init_xif_bmac(struct niu *np)
5347{
5348	struct niu_link_config *lp = &np->link_config;
5349	u64 val;
5350
5351	val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
5352
5353	if (lp->loopback_mode == LOOPBACK_MAC)
5354		val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
5355	else
5356		val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
5357
5358	if (lp->active_speed == SPEED_1000)
5359		val |= BMAC_XIF_CONFIG_GMII_MODE;
5360	else
5361		val &= ~BMAC_XIF_CONFIG_GMII_MODE;
5362
5363	val &= ~(BMAC_XIF_CONFIG_LINK_LED |
5364		 BMAC_XIF_CONFIG_LED_POLARITY);
5365
5366	if (!(np->flags & NIU_FLAGS_10G) &&
5367	    !(np->flags & NIU_FLAGS_FIBER) &&
5368	    lp->active_speed == SPEED_100)
5369		val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
5370	else
5371		val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
5372
5373	nw64_mac(BMAC_XIF_CONFIG, val);
5374}
5375
5376static void niu_init_xif(struct niu *np)
5377{
5378	if (np->flags & NIU_FLAGS_XMAC)
5379		niu_init_xif_xmac(np);
5380	else
5381		niu_init_xif_bmac(np);
5382}
5383
5384static void niu_pcs_mii_reset(struct niu *np)
5385{
5386	int limit = 1000;
5387	u64 val = nr64_pcs(PCS_MII_CTL);
5388	val |= PCS_MII_CTL_RST;
5389	nw64_pcs(PCS_MII_CTL, val);
5390	while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
5391		udelay(100);
5392		val = nr64_pcs(PCS_MII_CTL);
5393	}
5394}
5395
5396static void niu_xpcs_reset(struct niu *np)
5397{
5398	int limit = 1000;
5399	u64 val = nr64_xpcs(XPCS_CONTROL1);
5400	val |= XPCS_CONTROL1_RESET;
5401	nw64_xpcs(XPCS_CONTROL1, val);
5402	while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
5403		udelay(100);
5404		val = nr64_xpcs(XPCS_CONTROL1);
5405	}
5406}
5407
5408static int niu_init_pcs(struct niu *np)
5409{
5410	struct niu_link_config *lp = &np->link_config;
5411	u64 val;
5412
5413	switch (np->flags & (NIU_FLAGS_10G |
5414			     NIU_FLAGS_FIBER |
5415			     NIU_FLAGS_XCVR_SERDES)) {
5416	case NIU_FLAGS_FIBER:
5417		/* 1G fiber */
5418		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5419		nw64_pcs(PCS_DPATH_MODE, 0);
5420		niu_pcs_mii_reset(np);
5421		break;
5422
5423	case NIU_FLAGS_10G:
5424	case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
5425	case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
5426		/* 10G SERDES */
5427		if (!(np->flags & NIU_FLAGS_XMAC))
5428			return -EINVAL;
5429
5430		/* 10G copper or fiber */
5431		val = nr64_mac(XMAC_CONFIG);
5432		val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5433		nw64_mac(XMAC_CONFIG, val);
5434
5435		niu_xpcs_reset(np);
5436
5437		val = nr64_xpcs(XPCS_CONTROL1);
5438		if (lp->loopback_mode == LOOPBACK_PHY)
5439			val |= XPCS_CONTROL1_LOOPBACK;
5440		else
5441			val &= ~XPCS_CONTROL1_LOOPBACK;
5442		nw64_xpcs(XPCS_CONTROL1, val);
5443
5444		nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
5445		(void) nr64_xpcs(XPCS_SYMERR_CNT01);
5446		(void) nr64_xpcs(XPCS_SYMERR_CNT23);
5447		break;
5448
5449
5450	case NIU_FLAGS_XCVR_SERDES:
5451		/* 1G SERDES */
5452		niu_pcs_mii_reset(np);
5453		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5454		nw64_pcs(PCS_DPATH_MODE, 0);
5455		break;
5456
5457	case 0:
5458		/* 1G copper */
5459	case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
5460		/* 1G RGMII FIBER */
5461		nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
5462		niu_pcs_mii_reset(np);
5463		break;
5464
5465	default:
5466		return -EINVAL;
5467	}
5468
5469	return 0;
5470}
5471
5472static int niu_reset_tx_xmac(struct niu *np)
5473{
5474	return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
5475					  (XTXMAC_SW_RST_REG_RS |
5476					   XTXMAC_SW_RST_SOFT_RST),
5477					  1000, 100, "XTXMAC_SW_RST");
5478}
5479
5480static int niu_reset_tx_bmac(struct niu *np)
5481{
5482	int limit;
5483
5484	nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
5485	limit = 1000;
5486	while (--limit >= 0) {
5487		if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
5488			break;
5489		udelay(100);
5490	}
5491	if (limit < 0) {
5492		dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
5493			np->port,
5494			(unsigned long long) nr64_mac(BTXMAC_SW_RST));
5495		return -ENODEV;
5496	}
5497
5498	return 0;
5499}
5500
5501static int niu_reset_tx_mac(struct niu *np)
5502{
5503	if (np->flags & NIU_FLAGS_XMAC)
5504		return niu_reset_tx_xmac(np);
5505	else
5506		return niu_reset_tx_bmac(np);
5507}
5508
5509static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
5510{
5511	u64 val;
5512
5513	val = nr64_mac(XMAC_MIN);
5514	val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
5515		 XMAC_MIN_RX_MIN_PKT_SIZE);
5516	val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
5517	val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
5518	nw64_mac(XMAC_MIN, val);
5519
5520	nw64_mac(XMAC_MAX, max);
5521
5522	nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
5523
5524	val = nr64_mac(XMAC_IPG);
5525	if (np->flags & NIU_FLAGS_10G) {
5526		val &= ~XMAC_IPG_IPG_XGMII;
5527		val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
5528	} else {
5529		val &= ~XMAC_IPG_IPG_MII_GMII;
5530		val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
5531	}
5532	nw64_mac(XMAC_IPG, val);
5533
5534	val = nr64_mac(XMAC_CONFIG);
5535	val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
5536		 XMAC_CONFIG_STRETCH_MODE |
5537		 XMAC_CONFIG_VAR_MIN_IPG_EN |
5538		 XMAC_CONFIG_TX_ENABLE);
5539	nw64_mac(XMAC_CONFIG, val);
5540
5541	nw64_mac(TXMAC_FRM_CNT, 0);
5542	nw64_mac(TXMAC_BYTE_CNT, 0);
5543}
5544
5545static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
5546{
5547	u64 val;
5548
5549	nw64_mac(BMAC_MIN_FRAME, min);
5550	nw64_mac(BMAC_MAX_FRAME, max);
5551
5552	nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
5553	nw64_mac(BMAC_CTRL_TYPE, 0x8808);
5554	nw64_mac(BMAC_PREAMBLE_SIZE, 7);
5555
5556	val = nr64_mac(BTXMAC_CONFIG);
5557	val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
5558		 BTXMAC_CONFIG_ENABLE);
5559	nw64_mac(BTXMAC_CONFIG, val);
5560}
5561
5562static void niu_init_tx_mac(struct niu *np)
5563{
5564	u64 min, max;
5565
5566	min = 64;
5567	if (np->dev->mtu > ETH_DATA_LEN)
5568		max = 9216;
5569	else
5570		max = 1522;
5571
5572	/* The XMAC_MIN register only accepts values for TX min which
5573	 * have the low 3 bits cleared.
5574	 */
5575	BUG_ON(min & 0x7);
5576
5577	if (np->flags & NIU_FLAGS_XMAC)
5578		niu_init_tx_xmac(np, min, max);
5579	else
5580		niu_init_tx_bmac(np, min, max);
5581}
5582
5583static int niu_reset_rx_xmac(struct niu *np)
5584{
5585	int limit;
5586
5587	nw64_mac(XRXMAC_SW_RST,
5588		 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
5589	limit = 1000;
5590	while (--limit >= 0) {
5591		if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
5592						 XRXMAC_SW_RST_SOFT_RST)))
5593			break;
5594		udelay(100);
5595	}
5596	if (limit < 0) {
5597		dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
5598			np->port,
5599			(unsigned long long) nr64_mac(XRXMAC_SW_RST));
5600		return -ENODEV;
5601	}
5602
5603	return 0;
5604}
5605
5606static int niu_reset_rx_bmac(struct niu *np)
5607{
5608	int limit;
5609
5610	nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
5611	limit = 1000;
5612	while (--limit >= 0) {
5613		if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
5614			break;
5615		udelay(100);
5616	}
5617	if (limit < 0) {
5618		dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
5619			np->port,
5620			(unsigned long long) nr64_mac(BRXMAC_SW_RST));
5621		return -ENODEV;
5622	}
5623
5624	return 0;
5625}
5626
5627static int niu_reset_rx_mac(struct niu *np)
5628{
5629	if (np->flags & NIU_FLAGS_XMAC)
5630		return niu_reset_rx_xmac(np);
5631	else
5632		return niu_reset_rx_bmac(np);
5633}
5634
5635static void niu_init_rx_xmac(struct niu *np)
5636{
5637	struct niu_parent *parent = np->parent;
5638	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5639	int first_rdc_table = tp->first_table_num;
5640	unsigned long i;
5641	u64 val;
5642
5643	nw64_mac(XMAC_ADD_FILT0, 0);
5644	nw64_mac(XMAC_ADD_FILT1, 0);
5645	nw64_mac(XMAC_ADD_FILT2, 0);
5646	nw64_mac(XMAC_ADD_FILT12_MASK, 0);
5647	nw64_mac(XMAC_ADD_FILT00_MASK, 0);
5648	for (i = 0; i < MAC_NUM_HASH; i++)
5649		nw64_mac(XMAC_HASH_TBL(i), 0);
5650	nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
5651	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5652	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5653
5654	val = nr64_mac(XMAC_CONFIG);
5655	val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
5656		 XMAC_CONFIG_PROMISCUOUS |
5657		 XMAC_CONFIG_PROMISC_GROUP |
5658		 XMAC_CONFIG_ERR_CHK_DIS |
5659		 XMAC_CONFIG_RX_CRC_CHK_DIS |
5660		 XMAC_CONFIG_RESERVED_MULTICAST |
5661		 XMAC_CONFIG_RX_CODEV_CHK_DIS |
5662		 XMAC_CONFIG_ADDR_FILTER_EN |
5663		 XMAC_CONFIG_RCV_PAUSE_ENABLE |
5664		 XMAC_CONFIG_STRIP_CRC |
5665		 XMAC_CONFIG_PASS_FLOW_CTRL |
5666		 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
5667	val |= (XMAC_CONFIG_HASH_FILTER_EN);
5668	nw64_mac(XMAC_CONFIG, val);
5669
5670	nw64_mac(RXMAC_BT_CNT, 0);
5671	nw64_mac(RXMAC_BC_FRM_CNT, 0);
5672	nw64_mac(RXMAC_MC_FRM_CNT, 0);
5673	nw64_mac(RXMAC_FRAG_CNT, 0);
5674	nw64_mac(RXMAC_HIST_CNT1, 0);
5675	nw64_mac(RXMAC_HIST_CNT2, 0);
5676	nw64_mac(RXMAC_HIST_CNT3, 0);
5677	nw64_mac(RXMAC_HIST_CNT4, 0);
5678	nw64_mac(RXMAC_HIST_CNT5, 0);
5679	nw64_mac(RXMAC_HIST_CNT6, 0);
5680	nw64_mac(RXMAC_HIST_CNT7, 0);
5681	nw64_mac(RXMAC_MPSZER_CNT, 0);
5682	nw64_mac(RXMAC_CRC_ER_CNT, 0);
5683	nw64_mac(RXMAC_CD_VIO_CNT, 0);
5684	nw64_mac(LINK_FAULT_CNT, 0);
5685}
5686
5687static void niu_init_rx_bmac(struct niu *np)
5688{
5689	struct niu_parent *parent = np->parent;
5690	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5691	int first_rdc_table = tp->first_table_num;
5692	unsigned long i;
5693	u64 val;
5694
5695	nw64_mac(BMAC_ADD_FILT0, 0);
5696	nw64_mac(BMAC_ADD_FILT1, 0);
5697	nw64_mac(BMAC_ADD_FILT2, 0);
5698	nw64_mac(BMAC_ADD_FILT12_MASK, 0);
5699	nw64_mac(BMAC_ADD_FILT00_MASK, 0);
5700	for (i = 0; i < MAC_NUM_HASH; i++)
5701		nw64_mac(BMAC_HASH_TBL(i), 0);
5702	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5703	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5704	nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
5705
5706	val = nr64_mac(BRXMAC_CONFIG);
5707	val &= ~(BRXMAC_CONFIG_ENABLE |
5708		 BRXMAC_CONFIG_STRIP_PAD |
5709		 BRXMAC_CONFIG_STRIP_FCS |
5710		 BRXMAC_CONFIG_PROMISC |
5711		 BRXMAC_CONFIG_PROMISC_GRP |
5712		 BRXMAC_CONFIG_ADDR_FILT_EN |
5713		 BRXMAC_CONFIG_DISCARD_DIS);
5714	val |= (BRXMAC_CONFIG_HASH_FILT_EN);
5715	nw64_mac(BRXMAC_CONFIG, val);
5716
5717	val = nr64_mac(BMAC_ADDR_CMPEN);
5718	val |= BMAC_ADDR_CMPEN_EN0;
5719	nw64_mac(BMAC_ADDR_CMPEN, val);
5720}
5721
5722static void niu_init_rx_mac(struct niu *np)
5723{
5724	niu_set_primary_mac(np, np->dev->dev_addr);
5725
5726	if (np->flags & NIU_FLAGS_XMAC)
5727		niu_init_rx_xmac(np);
5728	else
5729		niu_init_rx_bmac(np);
5730}
5731
5732static void niu_enable_tx_xmac(struct niu *np, int on)
5733{
5734	u64 val = nr64_mac(XMAC_CONFIG);
5735
5736	if (on)
5737		val |= XMAC_CONFIG_TX_ENABLE;
5738	else
5739		val &= ~XMAC_CONFIG_TX_ENABLE;
5740	nw64_mac(XMAC_CONFIG, val);
5741}
5742
5743static void niu_enable_tx_bmac(struct niu *np, int on)
5744{
5745	u64 val = nr64_mac(BTXMAC_CONFIG);
5746
5747	if (on)
5748		val |= BTXMAC_CONFIG_ENABLE;
5749	else
5750		val &= ~BTXMAC_CONFIG_ENABLE;
5751	nw64_mac(BTXMAC_CONFIG, val);
5752}
5753
5754static void niu_enable_tx_mac(struct niu *np, int on)
5755{
5756	if (np->flags & NIU_FLAGS_XMAC)
5757		niu_enable_tx_xmac(np, on);
5758	else
5759		niu_enable_tx_bmac(np, on);
5760}
5761
5762static void niu_enable_rx_xmac(struct niu *np, int on)
5763{
5764	u64 val = nr64_mac(XMAC_CONFIG);
5765
5766	val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
5767		 XMAC_CONFIG_PROMISCUOUS);
5768
5769	if (np->flags & NIU_FLAGS_MCAST)
5770		val |= XMAC_CONFIG_HASH_FILTER_EN;
5771	if (np->flags & NIU_FLAGS_PROMISC)
5772		val |= XMAC_CONFIG_PROMISCUOUS;
5773
5774	if (on)
5775		val |= XMAC_CONFIG_RX_MAC_ENABLE;
5776	else
5777		val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
5778	nw64_mac(XMAC_CONFIG, val);
5779}
5780
5781static void niu_enable_rx_bmac(struct niu *np, int on)
5782{
5783	u64 val = nr64_mac(BRXMAC_CONFIG);
5784
5785	val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
5786		 BRXMAC_CONFIG_PROMISC);
5787
5788	if (np->flags & NIU_FLAGS_MCAST)
5789		val |= BRXMAC_CONFIG_HASH_FILT_EN;
5790	if (np->flags & NIU_FLAGS_PROMISC)
5791		val |= BRXMAC_CONFIG_PROMISC;
5792
5793	if (on)
5794		val |= BRXMAC_CONFIG_ENABLE;
5795	else
5796		val &= ~BRXMAC_CONFIG_ENABLE;
5797	nw64_mac(BRXMAC_CONFIG, val);
5798}
5799
5800static void niu_enable_rx_mac(struct niu *np, int on)
5801{
5802	if (np->flags & NIU_FLAGS_XMAC)
5803		niu_enable_rx_xmac(np, on);
5804	else
5805		niu_enable_rx_bmac(np, on);
5806}
5807
5808static int niu_init_mac(struct niu *np)
5809{
5810	int err;
5811
5812	niu_init_xif(np);
5813	err = niu_init_pcs(np);
5814	if (err)
5815		return err;
5816
5817	err = niu_reset_tx_mac(np);
5818	if (err)
5819		return err;
5820	niu_init_tx_mac(np);
5821	err = niu_reset_rx_mac(np);
5822	if (err)
5823		return err;
5824	niu_init_rx_mac(np);
5825
5826	/* This looks hookey but the RX MAC reset we just did will
5827	 * undo some of the state we setup in niu_init_tx_mac() so we
5828	 * have to call it again.  In particular, the RX MAC reset will
5829	 * set the XMAC_MAX register back to it's default value.
5830	 */
5831	niu_init_tx_mac(np);
5832	niu_enable_tx_mac(np, 1);
5833
5834	niu_enable_rx_mac(np, 1);
5835
5836	return 0;
5837}
5838
5839static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5840{
5841	(void) niu_tx_channel_stop(np, rp->tx_channel);
5842}
5843
5844static void niu_stop_tx_channels(struct niu *np)
5845{
5846	int i;
5847
5848	for (i = 0; i < np->num_tx_rings; i++) {
5849		struct tx_ring_info *rp = &np->tx_rings[i];
5850
5851		niu_stop_one_tx_channel(np, rp);
5852	}
5853}
5854
5855static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5856{
5857	(void) niu_tx_channel_reset(np, rp->tx_channel);
5858}
5859
5860static void niu_reset_tx_channels(struct niu *np)
5861{
5862	int i;
5863
5864	for (i = 0; i < np->num_tx_rings; i++) {
5865		struct tx_ring_info *rp = &np->tx_rings[i];
5866
5867		niu_reset_one_tx_channel(np, rp);
5868	}
5869}
5870
5871static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5872{
5873	(void) niu_enable_rx_channel(np, rp->rx_channel, 0);
5874}
5875
5876static void niu_stop_rx_channels(struct niu *np)
5877{
5878	int i;
5879
5880	for (i = 0; i < np->num_rx_rings; i++) {
5881		struct rx_ring_info *rp = &np->rx_rings[i];
5882
5883		niu_stop_one_rx_channel(np, rp);
5884	}
5885}
5886
5887static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5888{
5889	int channel = rp->rx_channel;
5890
5891	(void) niu_rx_channel_reset(np, channel);
5892	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
5893	nw64(RX_DMA_CTL_STAT(channel), 0);
5894	(void) niu_enable_rx_channel(np, channel, 0);
5895}
5896
5897static void niu_reset_rx_channels(struct niu *np)
5898{
5899	int i;
5900
5901	for (i = 0; i < np->num_rx_rings; i++) {
5902		struct rx_ring_info *rp = &np->rx_rings[i];
5903
5904		niu_reset_one_rx_channel(np, rp);
5905	}
5906}
5907
5908static void niu_disable_ipp(struct niu *np)
5909{
5910	u64 rd, wr, val;
5911	int limit;
5912
5913	rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5914	wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5915	limit = 100;
5916	while (--limit >= 0 && (rd != wr)) {
5917		rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5918		wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5919	}
5920	if (limit < 0 &&
5921	    (rd != 0 && wr != 1)) {
5922		netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
5923			   (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
5924			   (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
5925	}
5926
5927	val = nr64_ipp(IPP_CFIG);
5928	val &= ~(IPP_CFIG_IPP_ENABLE |
5929		 IPP_CFIG_DFIFO_ECC_EN |
5930		 IPP_CFIG_DROP_BAD_CRC |
5931		 IPP_CFIG_CKSUM_EN);
5932	nw64_ipp(IPP_CFIG, val);
5933
5934	(void) niu_ipp_reset(np);
5935}
5936
5937static int niu_init_hw(struct niu *np)
5938{
5939	int i, err;
5940
5941	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
5942	niu_txc_enable_port(np, 1);
5943	niu_txc_port_dma_enable(np, 1);
5944	niu_txc_set_imask(np, 0);
5945
5946	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
5947	for (i = 0; i < np->num_tx_rings; i++) {
5948		struct tx_ring_info *rp = &np->tx_rings[i];
5949
5950		err = niu_init_one_tx_channel(np, rp);
5951		if (err)
5952			return err;
5953	}
5954
5955	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
5956	err = niu_init_rx_channels(np);
5957	if (err)
5958		goto out_uninit_tx_channels;
5959
5960	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
5961	err = niu_init_classifier_hw(np);
5962	if (err)
5963		goto out_uninit_rx_channels;
5964
5965	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
5966	err = niu_init_zcp(np);
5967	if (err)
5968		goto out_uninit_rx_channels;
5969
5970	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
5971	err = niu_init_ipp(np);
5972	if (err)
5973		goto out_uninit_rx_channels;
5974
5975	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
5976	err = niu_init_mac(np);
5977	if (err)
5978		goto out_uninit_ipp;
5979
5980	return 0;
5981
5982out_uninit_ipp:
5983	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
5984	niu_disable_ipp(np);
5985
5986out_uninit_rx_channels:
5987	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
5988	niu_stop_rx_channels(np);
5989	niu_reset_rx_channels(np);
5990
5991out_uninit_tx_channels:
5992	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
5993	niu_stop_tx_channels(np);
5994	niu_reset_tx_channels(np);
5995
5996	return err;
5997}
5998
5999static void niu_stop_hw(struct niu *np)
6000{
6001	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
6002	niu_enable_interrupts(np, 0);
6003
6004	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
6005	niu_enable_rx_mac(np, 0);
6006
6007	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
6008	niu_disable_ipp(np);
6009
6010	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
6011	niu_stop_tx_channels(np);
6012
6013	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
6014	niu_stop_rx_channels(np);
6015
6016	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
6017	niu_reset_tx_channels(np);
6018
6019	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
6020	niu_reset_rx_channels(np);
6021}
6022
6023static void niu_set_irq_name(struct niu *np)
6024{
6025	int port = np->port;
6026	int i, j = 1;
6027
6028	sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
6029
6030	if (port == 0) {
6031		sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
6032		sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
6033		j = 3;
6034	}
6035
6036	for (i = 0; i < np->num_ldg - j; i++) {
6037		if (i < np->num_rx_rings)
6038			sprintf(np->irq_name[i+j], "%s-rx-%d",
6039				np->dev->name, i);
6040		else if (i < np->num_tx_rings + np->num_rx_rings)
6041			sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
6042				i - np->num_rx_rings);
6043	}
6044}
6045
6046static int niu_request_irq(struct niu *np)
6047{
6048	int i, j, err;
6049
6050	niu_set_irq_name(np);
6051
6052	err = 0;
6053	for (i = 0; i < np->num_ldg; i++) {
6054		struct niu_ldg *lp = &np->ldg[i];
6055
6056		err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
6057				  np->irq_name[i], lp);
6058		if (err)
6059			goto out_free_irqs;
6060
6061	}
6062
6063	return 0;
6064
6065out_free_irqs:
6066	for (j = 0; j < i; j++) {
6067		struct niu_ldg *lp = &np->ldg[j];
6068
6069		free_irq(lp->irq, lp);
6070	}
6071	return err;
6072}
6073
6074static void niu_free_irq(struct niu *np)
6075{
6076	int i;
6077
6078	for (i = 0; i < np->num_ldg; i++) {
6079		struct niu_ldg *lp = &np->ldg[i];
6080
6081		free_irq(lp->irq, lp);
6082	}
6083}
6084
6085static void niu_enable_napi(struct niu *np)
6086{
6087	int i;
6088
6089	for (i = 0; i < np->num_ldg; i++)
6090		napi_enable(&np->ldg[i].napi);
6091}
6092
6093static void niu_disable_napi(struct niu *np)
6094{
6095	int i;
6096
6097	for (i = 0; i < np->num_ldg; i++)
6098		napi_disable(&np->ldg[i].napi);
6099}
6100
6101static int niu_open(struct net_device *dev)
6102{
6103	struct niu *np = netdev_priv(dev);
6104	int err;
6105
6106	netif_carrier_off(dev);
6107
6108	err = niu_alloc_channels(np);
6109	if (err)
6110		goto out_err;
6111
6112	err = niu_enable_interrupts(np, 0);
6113	if (err)
6114		goto out_free_channels;
6115
6116	err = niu_request_irq(np);
6117	if (err)
6118		goto out_free_channels;
6119
6120	niu_enable_napi(np);
6121
6122	spin_lock_irq(&np->lock);
6123
6124	err = niu_init_hw(np);
6125	if (!err) {
6126		init_timer(&np->timer);
6127		np->timer.expires = jiffies + HZ;
6128		np->timer.data = (unsigned long) np;
6129		np->timer.function = niu_timer;
6130
6131		err = niu_enable_interrupts(np, 1);
6132		if (err)
6133			niu_stop_hw(np);
6134	}
6135
6136	spin_unlock_irq(&np->lock);
6137
6138	if (err) {
6139		niu_disable_napi(np);
6140		goto out_free_irq;
6141	}
6142
6143	netif_tx_start_all_queues(dev);
6144
6145	if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6146		netif_carrier_on(dev);
6147
6148	add_timer(&np->timer);
6149
6150	return 0;
6151
6152out_free_irq:
6153	niu_free_irq(np);
6154
6155out_free_channels:
6156	niu_free_channels(np);
6157
6158out_err:
6159	return err;
6160}
6161
6162static void niu_full_shutdown(struct niu *np, struct net_device *dev)
6163{
6164	cancel_work_sync(&np->reset_task);
6165
6166	niu_disable_napi(np);
6167	netif_tx_stop_all_queues(dev);
6168
6169	del_timer_sync(&np->timer);
6170
6171	spin_lock_irq(&np->lock);
6172
6173	niu_stop_hw(np);
6174
6175	spin_unlock_irq(&np->lock);
6176}
6177
6178static int niu_close(struct net_device *dev)
6179{
6180	struct niu *np = netdev_priv(dev);
6181
6182	niu_full_shutdown(np, dev);
6183
6184	niu_free_irq(np);
6185
6186	niu_free_channels(np);
6187
6188	niu_handle_led(np, 0);
6189
6190	return 0;
6191}
6192
6193static void niu_sync_xmac_stats(struct niu *np)
6194{
6195	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
6196
6197	mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
6198	mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
6199
6200	mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
6201	mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
6202	mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
6203	mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
6204	mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
6205	mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
6206	mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
6207	mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
6208	mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
6209	mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
6210	mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
6211	mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
6212	mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
6213	mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
6214	mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
6215	mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
6216}
6217
6218static void niu_sync_bmac_stats(struct niu *np)
6219{
6220	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
6221
6222	mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
6223	mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
6224
6225	mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
6226	mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6227	mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6228	mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
6229}
6230
6231static void niu_sync_mac_stats(struct niu *np)
6232{
6233	if (np->flags & NIU_FLAGS_XMAC)
6234		niu_sync_xmac_stats(np);
6235	else
6236		niu_sync_bmac_stats(np);
6237}
6238
6239static void niu_get_rx_stats(struct niu *np,
6240			     struct rtnl_link_stats64 *stats)
6241{
6242	u64 pkts, dropped, errors, bytes;
6243	struct rx_ring_info *rx_rings;
6244	int i;
6245
6246	pkts = dropped = errors = bytes = 0;
6247
6248	rx_rings = ACCESS_ONCE(np->rx_rings);
6249	if (!rx_rings)
6250		goto no_rings;
6251
6252	for (i = 0; i < np->num_rx_rings; i++) {
6253		struct rx_ring_info *rp = &rx_rings[i];
6254
6255		niu_sync_rx_discard_stats(np, rp, 0);
6256
6257		pkts += rp->rx_packets;
6258		bytes += rp->rx_bytes;
6259		dropped += rp->rx_dropped;
6260		errors += rp->rx_errors;
6261	}
6262
6263no_rings:
6264	stats->rx_packets = pkts;
6265	stats->rx_bytes = bytes;
6266	stats->rx_dropped = dropped;
6267	stats->rx_errors = errors;
6268}
6269
6270static void niu_get_tx_stats(struct niu *np,
6271			     struct rtnl_link_stats64 *stats)
6272{
6273	u64 pkts, errors, bytes;
6274	struct tx_ring_info *tx_rings;
6275	int i;
6276
6277	pkts = errors = bytes = 0;
6278
6279	tx_rings = ACCESS_ONCE(np->tx_rings);
6280	if (!tx_rings)
6281		goto no_rings;
6282
6283	for (i = 0; i < np->num_tx_rings; i++) {
6284		struct tx_ring_info *rp = &tx_rings[i];
6285
6286		pkts += rp->tx_packets;
6287		bytes += rp->tx_bytes;
6288		errors += rp->tx_errors;
6289	}
6290
6291no_rings:
6292	stats->tx_packets = pkts;
6293	stats->tx_bytes = bytes;
6294	stats->tx_errors = errors;
6295}
6296
6297static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
6298					       struct rtnl_link_stats64 *stats)
6299{
6300	struct niu *np = netdev_priv(dev);
6301
6302	if (netif_running(dev)) {
6303		niu_get_rx_stats(np, stats);
6304		niu_get_tx_stats(np, stats);
6305	}
6306
6307	return stats;
6308}
6309
6310static void niu_load_hash_xmac(struct niu *np, u16 *hash)
6311{
6312	int i;
6313
6314	for (i = 0; i < 16; i++)
6315		nw64_mac(XMAC_HASH_TBL(i), hash[i]);
6316}
6317
6318static void niu_load_hash_bmac(struct niu *np, u16 *hash)
6319{
6320	int i;
6321
6322	for (i = 0; i < 16; i++)
6323		nw64_mac(BMAC_HASH_TBL(i), hash[i]);
6324}
6325
6326static void niu_load_hash(struct niu *np, u16 *hash)
6327{
6328	if (np->flags & NIU_FLAGS_XMAC)
6329		niu_load_hash_xmac(np, hash);
6330	else
6331		niu_load_hash_bmac(np, hash);
6332}
6333
6334static void niu_set_rx_mode(struct net_device *dev)
6335{
6336	struct niu *np = netdev_priv(dev);
6337	int i, alt_cnt, err;
6338	struct netdev_hw_addr *ha;
6339	unsigned long flags;
6340	u16 hash[16] = { 0, };
6341
6342	spin_lock_irqsave(&np->lock, flags);
6343	niu_enable_rx_mac(np, 0);
6344
6345	np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
6346	if (dev->flags & IFF_PROMISC)
6347		np->flags |= NIU_FLAGS_PROMISC;
6348	if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
6349		np->flags |= NIU_FLAGS_MCAST;
6350
6351	alt_cnt = netdev_uc_count(dev);
6352	if (alt_cnt > niu_num_alt_addr(np)) {
6353		alt_cnt = 0;
6354		np->flags |= NIU_FLAGS_PROMISC;
6355	}
6356
6357	if (alt_cnt) {
6358		int index = 0;
6359
6360		netdev_for_each_uc_addr(ha, dev) {
6361			err = niu_set_alt_mac(np, index, ha->addr);
6362			if (err)
6363				netdev_warn(dev, "Error %d adding alt mac %d\n",
6364					    err, index);
6365			err = niu_enable_alt_mac(np, index, 1);
6366			if (err)
6367				netdev_warn(dev, "Error %d enabling alt mac %d\n",
6368					    err, index);
6369
6370			index++;
6371		}
6372	} else {
6373		int alt_start;
6374		if (np->flags & NIU_FLAGS_XMAC)
6375			alt_start = 0;
6376		else
6377			alt_start = 1;
6378		for (i = alt_start; i < niu_num_alt_addr(np); i++) {
6379			err = niu_enable_alt_mac(np, i, 0);
6380			if (err)
6381				netdev_warn(dev, "Error %d disabling alt mac %d\n",
6382					    err, i);
6383		}
6384	}
6385	if (dev->flags & IFF_ALLMULTI) {
6386		for (i = 0; i < 16; i++)
6387			hash[i] = 0xffff;
6388	} else if (!netdev_mc_empty(dev)) {
6389		netdev_for_each_mc_addr(ha, dev) {
6390			u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
6391
6392			crc >>= 24;
6393			hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
6394		}
6395	}
6396
6397	if (np->flags & NIU_FLAGS_MCAST)
6398		niu_load_hash(np, hash);
6399
6400	niu_enable_rx_mac(np, 1);
6401	spin_unlock_irqrestore(&np->lock, flags);
6402}
6403
6404static int niu_set_mac_addr(struct net_device *dev, void *p)
6405{
6406	struct niu *np = netdev_priv(dev);
6407	struct sockaddr *addr = p;
6408	unsigned long flags;
6409
6410	if (!is_valid_ether_addr(addr->sa_data))
6411		return -EADDRNOTAVAIL;
6412
6413	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
6414
6415	if (!netif_running(dev))
6416		return 0;
6417
6418	spin_lock_irqsave(&np->lock, flags);
6419	niu_enable_rx_mac(np, 0);
6420	niu_set_primary_mac(np, dev->dev_addr);
6421	niu_enable_rx_mac(np, 1);
6422	spin_unlock_irqrestore(&np->lock, flags);
6423
6424	return 0;
6425}
6426
6427static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6428{
6429	return -EOPNOTSUPP;
6430}
6431
6432static void niu_netif_stop(struct niu *np)
6433{
6434	np->dev->trans_start = jiffies;	/* prevent tx timeout */
6435
6436	niu_disable_napi(np);
6437
6438	netif_tx_disable(np->dev);
6439}
6440
6441static void niu_netif_start(struct niu *np)
6442{
6443	/* NOTE: unconditional netif_wake_queue is only appropriate
6444	 * so long as all callers are assured to have free tx slots
6445	 * (such as after niu_init_hw).
6446	 */
6447	netif_tx_wake_all_queues(np->dev);
6448
6449	niu_enable_napi(np);
6450
6451	niu_enable_interrupts(np, 1);
6452}
6453
6454static void niu_reset_buffers(struct niu *np)
6455{
6456	int i, j, k, err;
6457
6458	if (np->rx_rings) {
6459		for (i = 0; i < np->num_rx_rings; i++) {
6460			struct rx_ring_info *rp = &np->rx_rings[i];
6461
6462			for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
6463				struct page *page;
6464
6465				page = rp->rxhash[j];
6466				while (page) {
6467					struct page *next =
6468						(struct page *) page->mapping;
6469					u64 base = page->index;
6470					base = base >> RBR_DESCR_ADDR_SHIFT;
6471					rp->rbr[k++] = cpu_to_le32(base);
6472					page = next;
6473				}
6474			}
6475			for (; k < MAX_RBR_RING_SIZE; k++) {
6476				err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
6477				if (unlikely(err))
6478					break;
6479			}
6480
6481			rp->rbr_index = rp->rbr_table_size - 1;
6482			rp->rcr_index = 0;
6483			rp->rbr_pending = 0;
6484			rp->rbr_refill_pending = 0;
6485		}
6486	}
6487	if (np->tx_rings) {
6488		for (i = 0; i < np->num_tx_rings; i++) {
6489			struct tx_ring_info *rp = &np->tx_rings[i];
6490
6491			for (j = 0; j < MAX_TX_RING_SIZE; j++) {
6492				if (rp->tx_buffs[j].skb)
6493					(void) release_tx_packet(np, rp, j);
6494			}
6495
6496			rp->pending = MAX_TX_RING_SIZE;
6497			rp->prod = 0;
6498			rp->cons = 0;
6499			rp->wrap_bit = 0;
6500		}
6501	}
6502}
6503
6504static void niu_reset_task(struct work_struct *work)
6505{
6506	struct niu *np = container_of(work, struct niu, reset_task);
6507	unsigned long flags;
6508	int err;
6509
6510	spin_lock_irqsave(&np->lock, flags);
6511	if (!netif_running(np->dev)) {
6512		spin_unlock_irqrestore(&np->lock, flags);
6513		return;
6514	}
6515
6516	spin_unlock_irqrestore(&np->lock, flags);
6517
6518	del_timer_sync(&np->timer);
6519
6520	niu_netif_stop(np);
6521
6522	spin_lock_irqsave(&np->lock, flags);
6523
6524	niu_stop_hw(np);
6525
6526	spin_unlock_irqrestore(&np->lock, flags);
6527
6528	niu_reset_buffers(np);
6529
6530	spin_lock_irqsave(&np->lock, flags);
6531
6532	err = niu_init_hw(np);
6533	if (!err) {
6534		np->timer.expires = jiffies + HZ;
6535		add_timer(&np->timer);
6536		niu_netif_start(np);
6537	}
6538
6539	spin_unlock_irqrestore(&np->lock, flags);
6540}
6541
6542static void niu_tx_timeout(struct net_device *dev)
6543{
6544	struct niu *np = netdev_priv(dev);
6545
6546	dev_err(np->device, "%s: Transmit timed out, resetting\n",
6547		dev->name);
6548
6549	schedule_work(&np->reset_task);
6550}
6551
6552static void niu_set_txd(struct tx_ring_info *rp, int index,
6553			u64 mapping, u64 len, u64 mark,
6554			u64 n_frags)
6555{
6556	__le64 *desc = &rp->descr[index];
6557
6558	*desc = cpu_to_le64(mark |
6559			    (n_frags << TX_DESC_NUM_PTR_SHIFT) |
6560			    (len << TX_DESC_TR_LEN_SHIFT) |
6561			    (mapping & TX_DESC_SAD));
6562}
6563
6564static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
6565				u64 pad_bytes, u64 len)
6566{
6567	u16 eth_proto, eth_proto_inner;
6568	u64 csum_bits, l3off, ihl, ret;
6569	u8 ip_proto;
6570	int ipv6;
6571
6572	eth_proto = be16_to_cpu(ehdr->h_proto);
6573	eth_proto_inner = eth_proto;
6574	if (eth_proto == ETH_P_8021Q) {
6575		struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
6576		__be16 val = vp->h_vlan_encapsulated_proto;
6577
6578		eth_proto_inner = be16_to_cpu(val);
6579	}
6580
6581	ipv6 = ihl = 0;
6582	switch (skb->protocol) {
6583	case cpu_to_be16(ETH_P_IP):
6584		ip_proto = ip_hdr(skb)->protocol;
6585		ihl = ip_hdr(skb)->ihl;
6586		break;
6587	case cpu_to_be16(ETH_P_IPV6):
6588		ip_proto = ipv6_hdr(skb)->nexthdr;
6589		ihl = (40 >> 2);
6590		ipv6 = 1;
6591		break;
6592	default:
6593		ip_proto = ihl = 0;
6594		break;
6595	}
6596
6597	csum_bits = TXHDR_CSUM_NONE;
6598	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6599		u64 start, stuff;
6600
6601		csum_bits = (ip_proto == IPPROTO_TCP ?
6602			     TXHDR_CSUM_TCP :
6603			     (ip_proto == IPPROTO_UDP ?
6604			      TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
6605
6606		start = skb_checksum_start_offset(skb) -
6607			(pad_bytes + sizeof(struct tx_pkt_hdr));
6608		stuff = start + skb->csum_offset;
6609
6610		csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
6611		csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
6612	}
6613
6614	l3off = skb_network_offset(skb) -
6615		(pad_bytes + sizeof(struct tx_pkt_hdr));
6616
6617	ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
6618	       (len << TXHDR_LEN_SHIFT) |
6619	       ((l3off / 2) << TXHDR_L3START_SHIFT) |
6620	       (ihl << TXHDR_IHL_SHIFT) |
6621	       ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) |
6622	       ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
6623	       (ipv6 ? TXHDR_IP_VER : 0) |
6624	       csum_bits);
6625
6626	return ret;
6627}
6628
6629static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6630				  struct net_device *dev)
6631{
6632	struct niu *np = netdev_priv(dev);
6633	unsigned long align, headroom;
6634	struct netdev_queue *txq;
6635	struct tx_ring_info *rp;
6636	struct tx_pkt_hdr *tp;
6637	unsigned int len, nfg;
6638	struct ethhdr *ehdr;
6639	int prod, i, tlen;
6640	u64 mapping, mrk;
6641
6642	i = skb_get_queue_mapping(skb);
6643	rp = &np->tx_rings[i];
6644	txq = netdev_get_tx_queue(dev, i);
6645
6646	if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
6647		netif_tx_stop_queue(txq);
6648		dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
6649		rp->tx_errors++;
6650		return NETDEV_TX_BUSY;
6651	}
6652
6653	if (eth_skb_pad(skb))
6654		goto out;
6655
6656	len = sizeof(struct tx_pkt_hdr) + 15;
6657	if (skb_headroom(skb) < len) {
6658		struct sk_buff *skb_new;
6659
6660		skb_new = skb_realloc_headroom(skb, len);
6661		if (!skb_new)
6662			goto out_drop;
6663		kfree_skb(skb);
6664		skb = skb_new;
6665	} else
6666		skb_orphan(skb);
6667
6668	align = ((unsigned long) skb->data & (16 - 1));
6669	headroom = align + sizeof(struct tx_pkt_hdr);
6670
6671	ehdr = (struct ethhdr *) skb->data;
6672	tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
6673
6674	len = skb->len - sizeof(struct tx_pkt_hdr);
6675	tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
6676	tp->resv = 0;
6677
6678	len = skb_headlen(skb);
6679	mapping = np->ops->map_single(np->device, skb->data,
6680				      len, DMA_TO_DEVICE);
6681
6682	prod = rp->prod;
6683
6684	rp->tx_buffs[prod].skb = skb;
6685	rp->tx_buffs[prod].mapping = mapping;
6686
6687	mrk = TX_DESC_SOP;
6688	if (++rp->mark_counter == rp->mark_freq) {
6689		rp->mark_counter = 0;
6690		mrk |= TX_DESC_MARK;
6691		rp->mark_pending++;
6692	}
6693
6694	tlen = len;
6695	nfg = skb_shinfo(skb)->nr_frags;
6696	while (tlen > 0) {
6697		tlen -= MAX_TX_DESC_LEN;
6698		nfg++;
6699	}
6700
6701	while (len > 0) {
6702		unsigned int this_len = len;
6703
6704		if (this_len > MAX_TX_DESC_LEN)
6705			this_len = MAX_TX_DESC_LEN;
6706
6707		niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
6708		mrk = nfg = 0;
6709
6710		prod = NEXT_TX(rp, prod);
6711		mapping += this_len;
6712		len -= this_len;
6713	}
6714
6715	for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
6716		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6717
6718		len = skb_frag_size(frag);
6719		mapping = np->ops->map_page(np->device, skb_frag_page(frag),
6720					    frag->page_offset, len,
6721					    DMA_TO_DEVICE);
6722
6723		rp->tx_buffs[prod].skb = NULL;
6724		rp->tx_buffs[prod].mapping = mapping;
6725
6726		niu_set_txd(rp, prod, mapping, len, 0, 0);
6727
6728		prod = NEXT_TX(rp, prod);
6729	}
6730
6731	if (prod < rp->prod)
6732		rp->wrap_bit ^= TX_RING_KICK_WRAP;
6733	rp->prod = prod;
6734
6735	nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
6736
6737	if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
6738		netif_tx_stop_queue(txq);
6739		if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
6740			netif_tx_wake_queue(txq);
6741	}
6742
6743out:
6744	return NETDEV_TX_OK;
6745
6746out_drop:
6747	rp->tx_errors++;
6748	kfree_skb(skb);
6749	goto out;
6750}
6751
6752static int niu_change_mtu(struct net_device *dev, int new_mtu)
6753{
6754	struct niu *np = netdev_priv(dev);
6755	int err, orig_jumbo, new_jumbo;
6756
6757	if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
6758		return -EINVAL;
6759
6760	orig_jumbo = (dev->mtu > ETH_DATA_LEN);
6761	new_jumbo = (new_mtu > ETH_DATA_LEN);
6762
6763	dev->mtu = new_mtu;
6764
6765	if (!netif_running(dev) ||
6766	    (orig_jumbo == new_jumbo))
6767		return 0;
6768
6769	niu_full_shutdown(np, dev);
6770
6771	niu_free_channels(np);
6772
6773	niu_enable_napi(np);
6774
6775	err = niu_alloc_channels(np);
6776	if (err)
6777		return err;
6778
6779	spin_lock_irq(&np->lock);
6780
6781	err = niu_init_hw(np);
6782	if (!err) {
6783		init_timer(&np->timer);
6784		np->timer.expires = jiffies + HZ;
6785		np->timer.data = (unsigned long) np;
6786		np->timer.function = niu_timer;
6787
6788		err = niu_enable_interrupts(np, 1);
6789		if (err)
6790			niu_stop_hw(np);
6791	}
6792
6793	spin_unlock_irq(&np->lock);
6794
6795	if (!err) {
6796		netif_tx_start_all_queues(dev);
6797		if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6798			netif_carrier_on(dev);
6799
6800		add_timer(&np->timer);
6801	}
6802
6803	return err;
6804}
6805
6806static void niu_get_drvinfo(struct net_device *dev,
6807			    struct ethtool_drvinfo *info)
6808{
6809	struct niu *np = netdev_priv(dev);
6810	struct niu_vpd *vpd = &np->vpd;
6811
6812	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6813	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6814	snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
6815		vpd->fcode_major, vpd->fcode_minor);
6816	if (np->parent->plat_type != PLAT_TYPE_NIU)
6817		strlcpy(info->bus_info, pci_name(np->pdev),
6818			sizeof(info->bus_info));
6819}
6820
6821static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6822{
6823	struct niu *np = netdev_priv(dev);
6824	struct niu_link_config *lp;
6825
6826	lp = &np->link_config;
6827
6828	memset(cmd, 0, sizeof(*cmd));
6829	cmd->phy_address = np->phy_addr;
6830	cmd->supported = lp->supported;
6831	cmd->advertising = lp->active_advertising;
6832	cmd->autoneg = lp->active_autoneg;
6833	ethtool_cmd_speed_set(cmd, lp->active_speed);
6834	cmd->duplex = lp->active_duplex;
6835	cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
6836	cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
6837		XCVR_EXTERNAL : XCVR_INTERNAL;
6838
6839	return 0;
6840}
6841
6842static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6843{
6844	struct niu *np = netdev_priv(dev);
6845	struct niu_link_config *lp = &np->link_config;
6846
6847	lp->advertising = cmd->advertising;
6848	lp->speed = ethtool_cmd_speed(cmd);
6849	lp->duplex = cmd->duplex;
6850	lp->autoneg = cmd->autoneg;
6851	return niu_init_link(np);
6852}
6853
6854static u32 niu_get_msglevel(struct net_device *dev)
6855{
6856	struct niu *np = netdev_priv(dev);
6857	return np->msg_enable;
6858}
6859
6860static void niu_set_msglevel(struct net_device *dev, u32 value)
6861{
6862	struct niu *np = netdev_priv(dev);
6863	np->msg_enable = value;
6864}
6865
6866static int niu_nway_reset(struct net_device *dev)
6867{
6868	struct niu *np = netdev_priv(dev);
6869
6870	if (np->link_config.autoneg)
6871		return niu_init_link(np);
6872
6873	return 0;
6874}
6875
6876static int niu_get_eeprom_len(struct net_device *dev)
6877{
6878	struct niu *np = netdev_priv(dev);
6879
6880	return np->eeprom_len;
6881}
6882
6883static int niu_get_eeprom(struct net_device *dev,
6884			  struct ethtool_eeprom *eeprom, u8 *data)
6885{
6886	struct niu *np = netdev_priv(dev);
6887	u32 offset, len, val;
6888
6889	offset = eeprom->offset;
6890	len = eeprom->len;
6891
6892	if (offset + len < offset)
6893		return -EINVAL;
6894	if (offset >= np->eeprom_len)
6895		return -EINVAL;
6896	if (offset + len > np->eeprom_len)
6897		len = eeprom->len = np->eeprom_len - offset;
6898
6899	if (offset & 3) {
6900		u32 b_offset, b_count;
6901
6902		b_offset = offset & 3;
6903		b_count = 4 - b_offset;
6904		if (b_count > len)
6905			b_count = len;
6906
6907		val = nr64(ESPC_NCR((offset - b_offset) / 4));
6908		memcpy(data, ((char *)&val) + b_offset, b_count);
6909		data += b_count;
6910		len -= b_count;
6911		offset += b_count;
6912	}
6913	while (len >= 4) {
6914		val = nr64(ESPC_NCR(offset / 4));
6915		memcpy(data, &val, 4);
6916		data += 4;
6917		len -= 4;
6918		offset += 4;
6919	}
6920	if (len) {
6921		val = nr64(ESPC_NCR(offset / 4));
6922		memcpy(data, &val, len);
6923	}
6924	return 0;
6925}
6926
6927static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
6928{
6929	switch (flow_type) {
6930	case TCP_V4_FLOW:
6931	case TCP_V6_FLOW:
6932		*pid = IPPROTO_TCP;
6933		break;
6934	case UDP_V4_FLOW:
6935	case UDP_V6_FLOW:
6936		*pid = IPPROTO_UDP;
6937		break;
6938	case SCTP_V4_FLOW:
6939	case SCTP_V6_FLOW:
6940		*pid = IPPROTO_SCTP;
6941		break;
6942	case AH_V4_FLOW:
6943	case AH_V6_FLOW:
6944		*pid = IPPROTO_AH;
6945		break;
6946	case ESP_V4_FLOW:
6947	case ESP_V6_FLOW:
6948		*pid = IPPROTO_ESP;
6949		break;
6950	default:
6951		*pid = 0;
6952		break;
6953	}
6954}
6955
6956static int niu_class_to_ethflow(u64 class, int *flow_type)
6957{
6958	switch (class) {
6959	case CLASS_CODE_TCP_IPV4:
6960		*flow_type = TCP_V4_FLOW;
6961		break;
6962	case CLASS_CODE_UDP_IPV4:
6963		*flow_type = UDP_V4_FLOW;
6964		break;
6965	case CLASS_CODE_AH_ESP_IPV4:
6966		*flow_type = AH_V4_FLOW;
6967		break;
6968	case CLASS_CODE_SCTP_IPV4:
6969		*flow_type = SCTP_V4_FLOW;
6970		break;
6971	case CLASS_CODE_TCP_IPV6:
6972		*flow_type = TCP_V6_FLOW;
6973		break;
6974	case CLASS_CODE_UDP_IPV6:
6975		*flow_type = UDP_V6_FLOW;
6976		break;
6977	case CLASS_CODE_AH_ESP_IPV6:
6978		*flow_type = AH_V6_FLOW;
6979		break;
6980	case CLASS_CODE_SCTP_IPV6:
6981		*flow_type = SCTP_V6_FLOW;
6982		break;
6983	case CLASS_CODE_USER_PROG1:
6984	case CLASS_CODE_USER_PROG2:
6985	case CLASS_CODE_USER_PROG3:
6986	case CLASS_CODE_USER_PROG4:
6987		*flow_type = IP_USER_FLOW;
6988		break;
6989	default:
6990		return -EINVAL;
6991	}
6992
6993	return 0;
6994}
6995
6996static int niu_ethflow_to_class(int flow_type, u64 *class)
6997{
6998	switch (flow_type) {
6999	case TCP_V4_FLOW:
7000		*class = CLASS_CODE_TCP_IPV4;
7001		break;
7002	case UDP_V4_FLOW:
7003		*class = CLASS_CODE_UDP_IPV4;
7004		break;
7005	case AH_ESP_V4_FLOW:
7006	case AH_V4_FLOW:
7007	case ESP_V4_FLOW:
7008		*class = CLASS_CODE_AH_ESP_IPV4;
7009		break;
7010	case SCTP_V4_FLOW:
7011		*class = CLASS_CODE_SCTP_IPV4;
7012		break;
7013	case TCP_V6_FLOW:
7014		*class = CLASS_CODE_TCP_IPV6;
7015		break;
7016	case UDP_V6_FLOW:
7017		*class = CLASS_CODE_UDP_IPV6;
7018		break;
7019	case AH_ESP_V6_FLOW:
7020	case AH_V6_FLOW:
7021	case ESP_V6_FLOW:
7022		*class = CLASS_CODE_AH_ESP_IPV6;
7023		break;
7024	case SCTP_V6_FLOW:
7025		*class = CLASS_CODE_SCTP_IPV6;
7026		break;
7027	default:
7028		return 0;
7029	}
7030
7031	return 1;
7032}
7033
7034static u64 niu_flowkey_to_ethflow(u64 flow_key)
7035{
7036	u64 ethflow = 0;
7037
7038	if (flow_key & FLOW_KEY_L2DA)
7039		ethflow |= RXH_L2DA;
7040	if (flow_key & FLOW_KEY_VLAN)
7041		ethflow |= RXH_VLAN;
7042	if (flow_key & FLOW_KEY_IPSA)
7043		ethflow |= RXH_IP_SRC;
7044	if (flow_key & FLOW_KEY_IPDA)
7045		ethflow |= RXH_IP_DST;
7046	if (flow_key & FLOW_KEY_PROTO)
7047		ethflow |= RXH_L3_PROTO;
7048	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
7049		ethflow |= RXH_L4_B_0_1;
7050	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
7051		ethflow |= RXH_L4_B_2_3;
7052
7053	return ethflow;
7054
7055}
7056
7057static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
7058{
7059	u64 key = 0;
7060
7061	if (ethflow & RXH_L2DA)
7062		key |= FLOW_KEY_L2DA;
7063	if (ethflow & RXH_VLAN)
7064		key |= FLOW_KEY_VLAN;
7065	if (ethflow & RXH_IP_SRC)
7066		key |= FLOW_KEY_IPSA;
7067	if (ethflow & RXH_IP_DST)
7068		key |= FLOW_KEY_IPDA;
7069	if (ethflow & RXH_L3_PROTO)
7070		key |= FLOW_KEY_PROTO;
7071	if (ethflow & RXH_L4_B_0_1)
7072		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
7073	if (ethflow & RXH_L4_B_2_3)
7074		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
7075
7076	*flow_key = key;
7077
7078	return 1;
7079
7080}
7081
7082static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
7083{
7084	u64 class;
7085
7086	nfc->data = 0;
7087
7088	if (!niu_ethflow_to_class(nfc->flow_type, &class))
7089		return -EINVAL;
7090
7091	if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
7092	    TCAM_KEY_DISC)
7093		nfc->data = RXH_DISCARD;
7094	else
7095		nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
7096						      CLASS_CODE_USER_PROG1]);
7097	return 0;
7098}
7099
7100static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
7101					struct ethtool_rx_flow_spec *fsp)
7102{
7103	u32 tmp;
7104	u16 prt;
7105
7106	tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
7107	fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
7108
7109	tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
7110	fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
7111
7112	tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
7113	fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
7114
7115	tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
7116	fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
7117
7118	fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
7119		TCAM_V4KEY2_TOS_SHIFT;
7120	fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
7121		TCAM_V4KEY2_TOS_SHIFT;
7122
7123	switch (fsp->flow_type) {
7124	case TCP_V4_FLOW:
7125	case UDP_V4_FLOW:
7126	case SCTP_V4_FLOW:
7127		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7128			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
7129		fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
7130
7131		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7132			TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
7133		fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
7134
7135		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7136			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
7137		fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
7138
7139		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7140			 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
7141		fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
7142		break;
7143	case AH_V4_FLOW:
7144	case ESP_V4_FLOW:
7145		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7146			TCAM_V4KEY2_PORT_SPI_SHIFT;
7147		fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
7148
7149		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7150			TCAM_V4KEY2_PORT_SPI_SHIFT;
7151		fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
7152		break;
7153	case IP_USER_FLOW:
7154		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7155			TCAM_V4KEY2_PORT_SPI_SHIFT;
7156		fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
7157
7158		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7159			TCAM_V4KEY2_PORT_SPI_SHIFT;
7160		fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
7161
7162		fsp->h_u.usr_ip4_spec.proto =
7163			(tp->key[2] & TCAM_V4KEY2_PROTO) >>
7164			TCAM_V4KEY2_PROTO_SHIFT;
7165		fsp->m_u.usr_ip4_spec.proto =
7166			(tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
7167			TCAM_V4KEY2_PROTO_SHIFT;
7168
7169		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
7170		break;
7171	default:
7172		break;
7173	}
7174}
7175
7176static int niu_get_ethtool_tcam_entry(struct niu *np,
7177				      struct ethtool_rxnfc *nfc)
7178{
7179	struct niu_parent *parent = np->parent;
7180	struct niu_tcam_entry *tp;
7181	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
7182	u16 idx;
7183	u64 class;
7184	int ret = 0;
7185
7186	idx = tcam_get_index(np, (u16)nfc->fs.location);
7187
7188	tp = &parent->tcam[idx];
7189	if (!tp->valid) {
7190		netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
7191			    parent->index, (u16)nfc->fs.location, idx);
7192		return -EINVAL;
7193	}
7194
7195	/* fill the flow spec entry */
7196	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
7197		TCAM_V4KEY0_CLASS_CODE_SHIFT;
7198	ret = niu_class_to_ethflow(class, &fsp->flow_type);
7199	if (ret < 0) {
7200		netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
7201			    parent->index);
7202		goto out;
7203	}
7204
7205	if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
7206		u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
7207			TCAM_V4KEY2_PROTO_SHIFT;
7208		if (proto == IPPROTO_ESP) {
7209			if (fsp->flow_type == AH_V4_FLOW)
7210				fsp->flow_type = ESP_V4_FLOW;
7211			else
7212				fsp->flow_type = ESP_V6_FLOW;
7213		}
7214	}
7215
7216	switch (fsp->flow_type) {
7217	case TCP_V4_FLOW:
7218	case UDP_V4_FLOW:
7219	case SCTP_V4_FLOW:
7220	case AH_V4_FLOW:
7221	case ESP_V4_FLOW:
7222		niu_get_ip4fs_from_tcam_key(tp, fsp);
7223		break;
7224	case TCP_V6_FLOW:
7225	case UDP_V6_FLOW:
7226	case SCTP_V6_FLOW:
7227	case AH_V6_FLOW:
7228	case ESP_V6_FLOW:
7229		/* Not yet implemented */
7230		ret = -EINVAL;
7231		break;
7232	case IP_USER_FLOW:
7233		niu_get_ip4fs_from_tcam_key(tp, fsp);
7234		break;
7235	default:
7236		ret = -EINVAL;
7237		break;
7238	}
7239
7240	if (ret < 0)
7241		goto out;
7242
7243	if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
7244		fsp->ring_cookie = RX_CLS_FLOW_DISC;
7245	else
7246		fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
7247			TCAM_ASSOCDATA_OFFSET_SHIFT;
7248
7249	/* put the tcam size here */
7250	nfc->data = tcam_get_size(np);
7251out:
7252	return ret;
7253}
7254
7255static int niu_get_ethtool_tcam_all(struct niu *np,
7256				    struct ethtool_rxnfc *nfc,
7257				    u32 *rule_locs)
7258{
7259	struct niu_parent *parent = np->parent;
7260	struct niu_tcam_entry *tp;
7261	int i, idx, cnt;
7262	unsigned long flags;
7263	int ret = 0;
7264
7265	/* put the tcam size here */
7266	nfc->data = tcam_get_size(np);
7267
7268	niu_lock_parent(np, flags);
7269	for (cnt = 0, i = 0; i < nfc->data; i++) {
7270		idx = tcam_get_index(np, i);
7271		tp = &parent->tcam[idx];
7272		if (!tp->valid)
7273			continue;
7274		if (cnt == nfc->rule_cnt) {
7275			ret = -EMSGSIZE;
7276			break;
7277		}
7278		rule_locs[cnt] = i;
7279		cnt++;
7280	}
7281	niu_unlock_parent(np, flags);
7282
7283	nfc->rule_cnt = cnt;
7284
7285	return ret;
7286}
7287
7288static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
7289		       u32 *rule_locs)
7290{
7291	struct niu *np = netdev_priv(dev);
7292	int ret = 0;
7293
7294	switch (cmd->cmd) {
7295	case ETHTOOL_GRXFH:
7296		ret = niu_get_hash_opts(np, cmd);
7297		break;
7298	case ETHTOOL_GRXRINGS:
7299		cmd->data = np->num_rx_rings;
7300		break;
7301	case ETHTOOL_GRXCLSRLCNT:
7302		cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
7303		break;
7304	case ETHTOOL_GRXCLSRULE:
7305		ret = niu_get_ethtool_tcam_entry(np, cmd);
7306		break;
7307	case ETHTOOL_GRXCLSRLALL:
7308		ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs);
7309		break;
7310	default:
7311		ret = -EINVAL;
7312		break;
7313	}
7314
7315	return ret;
7316}
7317
7318static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
7319{
7320	u64 class;
7321	u64 flow_key = 0;
7322	unsigned long flags;
7323
7324	if (!niu_ethflow_to_class(nfc->flow_type, &class))
7325		return -EINVAL;
7326
7327	if (class < CLASS_CODE_USER_PROG1 ||
7328	    class > CLASS_CODE_SCTP_IPV6)
7329		return -EINVAL;
7330
7331	if (nfc->data & RXH_DISCARD) {
7332		niu_lock_parent(np, flags);
7333		flow_key = np->parent->tcam_key[class -
7334					       CLASS_CODE_USER_PROG1];
7335		flow_key |= TCAM_KEY_DISC;
7336		nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
7337		np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
7338		niu_unlock_parent(np, flags);
7339		return 0;
7340	} else {
7341		/* Discard was set before, but is not set now */
7342		if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
7343		    TCAM_KEY_DISC) {
7344			niu_lock_parent(np, flags);
7345			flow_key = np->parent->tcam_key[class -
7346					       CLASS_CODE_USER_PROG1];
7347			flow_key &= ~TCAM_KEY_DISC;
7348			nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
7349			     flow_key);
7350			np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
7351				flow_key;
7352			niu_unlock_parent(np, flags);
7353		}
7354	}
7355
7356	if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
7357		return -EINVAL;
7358
7359	niu_lock_parent(np, flags);
7360	nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
7361	np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
7362	niu_unlock_parent(np, flags);
7363
7364	return 0;
7365}
7366
7367static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
7368				       struct niu_tcam_entry *tp,
7369				       int l2_rdc_tab, u64 class)
7370{
7371	u8 pid = 0;
7372	u32 sip, dip, sipm, dipm, spi, spim;
7373	u16 sport, dport, spm, dpm;
7374
7375	sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
7376	sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
7377	dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
7378	dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
7379
7380	tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
7381	tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
7382	tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
7383	tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
7384
7385	tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
7386	tp->key[3] |= dip;
7387
7388	tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
7389	tp->key_mask[3] |= dipm;
7390
7391	tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
7392		       TCAM_V4KEY2_TOS_SHIFT);
7393	tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
7394			    TCAM_V4KEY2_TOS_SHIFT);
7395	switch (fsp->flow_type) {
7396	case TCP_V4_FLOW:
7397	case UDP_V4_FLOW:
7398	case SCTP_V4_FLOW:
7399		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
7400		spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
7401		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
7402		dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
7403
7404		tp->key[2] |= (((u64)sport << 16) | dport);
7405		tp->key_mask[2] |= (((u64)spm << 16) | dpm);
7406		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
7407		break;
7408	case AH_V4_FLOW:
7409	case ESP_V4_FLOW:
7410		spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
7411		spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
7412
7413		tp->key[2] |= spi;
7414		tp->key_mask[2] |= spim;
7415		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
7416		break;
7417	case IP_USER_FLOW:
7418		spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
7419		spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
7420
7421		tp->key[2] |= spi;
7422		tp->key_mask[2] |= spim;
7423		pid = fsp->h_u.usr_ip4_spec.proto;
7424		break;
7425	default:
7426		break;
7427	}
7428
7429	tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
7430	if (pid) {
7431		tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
7432	}
7433}
7434
7435static int niu_add_ethtool_tcam_entry(struct niu *np,
7436				      struct ethtool_rxnfc *nfc)
7437{
7438	struct niu_parent *parent = np->parent;
7439	struct niu_tcam_entry *tp;
7440	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
7441	struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
7442	int l2_rdc_table = rdc_table->first_table_num;
7443	u16 idx;
7444	u64 class;
7445	unsigned long flags;
7446	int err, ret;
7447
7448	ret = 0;
7449
7450	idx = nfc->fs.location;
7451	if (idx >= tcam_get_size(np))
7452		return -EINVAL;
7453
7454	if (fsp->flow_type == IP_USER_FLOW) {
7455		int i;
7456		int add_usr_cls = 0;
7457		struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
7458		struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
7459
7460		if (uspec->ip_ver != ETH_RX_NFC_IP4)
7461			return -EINVAL;
7462
7463		niu_lock_parent(np, flags);
7464
7465		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
7466			if (parent->l3_cls[i]) {
7467				if (uspec->proto == parent->l3_cls_pid[i]) {
7468					class = parent->l3_cls[i];
7469					parent->l3_cls_refcnt[i]++;
7470					add_usr_cls = 1;
7471					break;
7472				}
7473			} else {
7474				/* Program new user IP class */
7475				switch (i) {
7476				case 0:
7477					class = CLASS_CODE_USER_PROG1;
7478					break;
7479				case 1:
7480					class = CLASS_CODE_USER_PROG2;
7481					break;
7482				case 2:
7483					class = CLASS_CODE_USER_PROG3;
7484					break;
7485				case 3:
7486					class = CLASS_CODE_USER_PROG4;
7487					break;
7488				default:
7489					break;
7490				}
7491				ret = tcam_user_ip_class_set(np, class, 0,
7492							     uspec->proto,
7493							     uspec->tos,
7494							     umask->tos);
7495				if (ret)
7496					goto out;
7497
7498				ret = tcam_user_ip_class_enable(np, class, 1);
7499				if (ret)
7500					goto out;
7501				parent->l3_cls[i] = class;
7502				parent->l3_cls_pid[i] = uspec->proto;
7503				parent->l3_cls_refcnt[i]++;
7504				add_usr_cls = 1;
7505				break;
7506			}
7507		}
7508		if (!add_usr_cls) {
7509			netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
7510				    parent->index, __func__, uspec->proto);
7511			ret = -EINVAL;
7512			goto out;
7513		}
7514		niu_unlock_parent(np, flags);
7515	} else {
7516		if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
7517			return -EINVAL;
7518		}
7519	}
7520
7521	niu_lock_parent(np, flags);
7522
7523	idx = tcam_get_index(np, idx);
7524	tp = &parent->tcam[idx];
7525
7526	memset(tp, 0, sizeof(*tp));
7527
7528	/* fill in the tcam key and mask */
7529	switch (fsp->flow_type) {
7530	case TCP_V4_FLOW:
7531	case UDP_V4_FLOW:
7532	case SCTP_V4_FLOW:
7533	case AH_V4_FLOW:
7534	case ESP_V4_FLOW:
7535		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
7536		break;
7537	case TCP_V6_FLOW:
7538	case UDP_V6_FLOW:
7539	case SCTP_V6_FLOW:
7540	case AH_V6_FLOW:
7541	case ESP_V6_FLOW:
7542		/* Not yet implemented */
7543		netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
7544			    parent->index, __func__, fsp->flow_type);
7545		ret = -EINVAL;
7546		goto out;
7547	case IP_USER_FLOW:
7548		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
7549		break;
7550	default:
7551		netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
7552			    parent->index, __func__, fsp->flow_type);
7553		ret = -EINVAL;
7554		goto out;
7555	}
7556
7557	/* fill in the assoc data */
7558	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
7559		tp->assoc_data = TCAM_ASSOCDATA_DISC;
7560	} else {
7561		if (fsp->ring_cookie >= np->num_rx_rings) {
7562			netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
7563				    parent->index, __func__,
7564				    (long long)fsp->ring_cookie);
7565			ret = -EINVAL;
7566			goto out;
7567		}
7568		tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
7569				  (fsp->ring_cookie <<
7570				   TCAM_ASSOCDATA_OFFSET_SHIFT));
7571	}
7572
7573	err = tcam_write(np, idx, tp->key, tp->key_mask);
7574	if (err) {
7575		ret = -EINVAL;
7576		goto out;
7577	}
7578	err = tcam_assoc_write(np, idx, tp->assoc_data);
7579	if (err) {
7580		ret = -EINVAL;
7581		goto out;
7582	}
7583
7584	/* validate the entry */
7585	tp->valid = 1;
7586	np->clas.tcam_valid_entries++;
7587out:
7588	niu_unlock_parent(np, flags);
7589
7590	return ret;
7591}
7592
7593static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
7594{
7595	struct niu_parent *parent = np->parent;
7596	struct niu_tcam_entry *tp;
7597	u16 idx;
7598	unsigned long flags;
7599	u64 class;
7600	int ret = 0;
7601
7602	if (loc >= tcam_get_size(np))
7603		return -EINVAL;
7604
7605	niu_lock_parent(np, flags);
7606
7607	idx = tcam_get_index(np, loc);
7608	tp = &parent->tcam[idx];
7609
7610	/* if the entry is of a user defined class, then update*/
7611	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
7612		TCAM_V4KEY0_CLASS_CODE_SHIFT;
7613
7614	if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
7615		int i;
7616		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
7617			if (parent->l3_cls[i] == class) {
7618				parent->l3_cls_refcnt[i]--;
7619				if (!parent->l3_cls_refcnt[i]) {
7620					/* disable class */
7621					ret = tcam_user_ip_class_enable(np,
7622									class,
7623									0);
7624					if (ret)
7625						goto out;
7626					parent->l3_cls[i] = 0;
7627					parent->l3_cls_pid[i] = 0;
7628				}
7629				break;
7630			}
7631		}
7632		if (i == NIU_L3_PROG_CLS) {
7633			netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
7634				    parent->index, __func__,
7635				    (unsigned long long)class);
7636			ret = -EINVAL;
7637			goto out;
7638		}
7639	}
7640
7641	ret = tcam_flush(np, idx);
7642	if (ret)
7643		goto out;
7644
7645	/* invalidate the entry */
7646	tp->valid = 0;
7647	np->clas.tcam_valid_entries--;
7648out:
7649	niu_unlock_parent(np, flags);
7650
7651	return ret;
7652}
7653
7654static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
7655{
7656	struct niu *np = netdev_priv(dev);
7657	int ret = 0;
7658
7659	switch (cmd->cmd) {
7660	case ETHTOOL_SRXFH:
7661		ret = niu_set_hash_opts(np, cmd);
7662		break;
7663	case ETHTOOL_SRXCLSRLINS:
7664		ret = niu_add_ethtool_tcam_entry(np, cmd);
7665		break;
7666	case ETHTOOL_SRXCLSRLDEL:
7667		ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
7668		break;
7669	default:
7670		ret = -EINVAL;
7671		break;
7672	}
7673
7674	return ret;
7675}
7676
7677static const struct {
7678	const char string[ETH_GSTRING_LEN];
7679} niu_xmac_stat_keys[] = {
7680	{ "tx_frames" },
7681	{ "tx_bytes" },
7682	{ "tx_fifo_errors" },
7683	{ "tx_overflow_errors" },
7684	{ "tx_max_pkt_size_errors" },
7685	{ "tx_underflow_errors" },
7686	{ "rx_local_faults" },
7687	{ "rx_remote_faults" },
7688	{ "rx_link_faults" },
7689	{ "rx_align_errors" },
7690	{ "rx_frags" },
7691	{ "rx_mcasts" },
7692	{ "rx_bcasts" },
7693	{ "rx_hist_cnt1" },
7694	{ "rx_hist_cnt2" },
7695	{ "rx_hist_cnt3" },
7696	{ "rx_hist_cnt4" },
7697	{ "rx_hist_cnt5" },
7698	{ "rx_hist_cnt6" },
7699	{ "rx_hist_cnt7" },
7700	{ "rx_octets" },
7701	{ "rx_code_violations" },
7702	{ "rx_len_errors" },
7703	{ "rx_crc_errors" },
7704	{ "rx_underflows" },
7705	{ "rx_overflows" },
7706	{ "pause_off_state" },
7707	{ "pause_on_state" },
7708	{ "pause_received" },
7709};
7710
7711#define NUM_XMAC_STAT_KEYS	ARRAY_SIZE(niu_xmac_stat_keys)
7712
7713static const struct {
7714	const char string[ETH_GSTRING_LEN];
7715} niu_bmac_stat_keys[] = {
7716	{ "tx_underflow_errors" },
7717	{ "tx_max_pkt_size_errors" },
7718	{ "tx_bytes" },
7719	{ "tx_frames" },
7720	{ "rx_overflows" },
7721	{ "rx_frames" },
7722	{ "rx_align_errors" },
7723	{ "rx_crc_errors" },
7724	{ "rx_len_errors" },
7725	{ "pause_off_state" },
7726	{ "pause_on_state" },
7727	{ "pause_received" },
7728};
7729
7730#define NUM_BMAC_STAT_KEYS	ARRAY_SIZE(niu_bmac_stat_keys)
7731
7732static const struct {
7733	const char string[ETH_GSTRING_LEN];
7734} niu_rxchan_stat_keys[] = {
7735	{ "rx_channel" },
7736	{ "rx_packets" },
7737	{ "rx_bytes" },
7738	{ "rx_dropped" },
7739	{ "rx_errors" },
7740};
7741
7742#define NUM_RXCHAN_STAT_KEYS	ARRAY_SIZE(niu_rxchan_stat_keys)
7743
7744static const struct {
7745	const char string[ETH_GSTRING_LEN];
7746} niu_txchan_stat_keys[] = {
7747	{ "tx_channel" },
7748	{ "tx_packets" },
7749	{ "tx_bytes" },
7750	{ "tx_errors" },
7751};
7752
7753#define NUM_TXCHAN_STAT_KEYS	ARRAY_SIZE(niu_txchan_stat_keys)
7754
7755static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
7756{
7757	struct niu *np = netdev_priv(dev);
7758	int i;
7759
7760	if (stringset != ETH_SS_STATS)
7761		return;
7762
7763	if (np->flags & NIU_FLAGS_XMAC) {
7764		memcpy(data, niu_xmac_stat_keys,
7765		       sizeof(niu_xmac_stat_keys));
7766		data += sizeof(niu_xmac_stat_keys);
7767	} else {
7768		memcpy(data, niu_bmac_stat_keys,
7769		       sizeof(niu_bmac_stat_keys));
7770		data += sizeof(niu_bmac_stat_keys);
7771	}
7772	for (i = 0; i < np->num_rx_rings; i++) {
7773		memcpy(data, niu_rxchan_stat_keys,
7774		       sizeof(niu_rxchan_stat_keys));
7775		data += sizeof(niu_rxchan_stat_keys);
7776	}
7777	for (i = 0; i < np->num_tx_rings; i++) {
7778		memcpy(data, niu_txchan_stat_keys,
7779		       sizeof(niu_txchan_stat_keys));
7780		data += sizeof(niu_txchan_stat_keys);
7781	}
7782}
7783
7784static int niu_get_sset_count(struct net_device *dev, int stringset)
7785{
7786	struct niu *np = netdev_priv(dev);
7787
7788	if (stringset != ETH_SS_STATS)
7789		return -EINVAL;
7790
7791	return (np->flags & NIU_FLAGS_XMAC ?
7792		 NUM_XMAC_STAT_KEYS :
7793		 NUM_BMAC_STAT_KEYS) +
7794		(np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
7795		(np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
7796}
7797
7798static void niu_get_ethtool_stats(struct net_device *dev,
7799				  struct ethtool_stats *stats, u64 *data)
7800{
7801	struct niu *np = netdev_priv(dev);
7802	int i;
7803
7804	niu_sync_mac_stats(np);
7805	if (np->flags & NIU_FLAGS_XMAC) {
7806		memcpy(data, &np->mac_stats.xmac,
7807		       sizeof(struct niu_xmac_stats));
7808		data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
7809	} else {
7810		memcpy(data, &np->mac_stats.bmac,
7811		       sizeof(struct niu_bmac_stats));
7812		data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
7813	}
7814	for (i = 0; i < np->num_rx_rings; i++) {
7815		struct rx_ring_info *rp = &np->rx_rings[i];
7816
7817		niu_sync_rx_discard_stats(np, rp, 0);
7818
7819		data[0] = rp->rx_channel;
7820		data[1] = rp->rx_packets;
7821		data[2] = rp->rx_bytes;
7822		data[3] = rp->rx_dropped;
7823		data[4] = rp->rx_errors;
7824		data += 5;
7825	}
7826	for (i = 0; i < np->num_tx_rings; i++) {
7827		struct tx_ring_info *rp = &np->tx_rings[i];
7828
7829		data[0] = rp->tx_channel;
7830		data[1] = rp->tx_packets;
7831		data[2] = rp->tx_bytes;
7832		data[3] = rp->tx_errors;
7833		data += 4;
7834	}
7835}
7836
7837static u64 niu_led_state_save(struct niu *np)
7838{
7839	if (np->flags & NIU_FLAGS_XMAC)
7840		return nr64_mac(XMAC_CONFIG);
7841	else
7842		return nr64_mac(BMAC_XIF_CONFIG);
7843}
7844
7845static void niu_led_state_restore(struct niu *np, u64 val)
7846{
7847	if (np->flags & NIU_FLAGS_XMAC)
7848		nw64_mac(XMAC_CONFIG, val);
7849	else
7850		nw64_mac(BMAC_XIF_CONFIG, val);
7851}
7852
7853static void niu_force_led(struct niu *np, int on)
7854{
7855	u64 val, reg, bit;
7856
7857	if (np->flags & NIU_FLAGS_XMAC) {
7858		reg = XMAC_CONFIG;
7859		bit = XMAC_CONFIG_FORCE_LED_ON;
7860	} else {
7861		reg = BMAC_XIF_CONFIG;
7862		bit = BMAC_XIF_CONFIG_LINK_LED;
7863	}
7864
7865	val = nr64_mac(reg);
7866	if (on)
7867		val |= bit;
7868	else
7869		val &= ~bit;
7870	nw64_mac(reg, val);
7871}
7872
7873static int niu_set_phys_id(struct net_device *dev,
7874			   enum ethtool_phys_id_state state)
7875
7876{
7877	struct niu *np = netdev_priv(dev);
7878
7879	if (!netif_running(dev))
7880		return -EAGAIN;
7881
7882	switch (state) {
7883	case ETHTOOL_ID_ACTIVE:
7884		np->orig_led_state = niu_led_state_save(np);
7885		return 1;	/* cycle on/off once per second */
7886
7887	case ETHTOOL_ID_ON:
7888		niu_force_led(np, 1);
7889		break;
7890
7891	case ETHTOOL_ID_OFF:
7892		niu_force_led(np, 0);
7893		break;
7894
7895	case ETHTOOL_ID_INACTIVE:
7896		niu_led_state_restore(np, np->orig_led_state);
7897	}
7898
7899	return 0;
7900}
7901
7902static const struct ethtool_ops niu_ethtool_ops = {
7903	.get_drvinfo		= niu_get_drvinfo,
7904	.get_link		= ethtool_op_get_link,
7905	.get_msglevel		= niu_get_msglevel,
7906	.set_msglevel		= niu_set_msglevel,
7907	.nway_reset		= niu_nway_reset,
7908	.get_eeprom_len		= niu_get_eeprom_len,
7909	.get_eeprom		= niu_get_eeprom,
7910	.get_settings		= niu_get_settings,
7911	.set_settings		= niu_set_settings,
7912	.get_strings		= niu_get_strings,
7913	.get_sset_count		= niu_get_sset_count,
7914	.get_ethtool_stats	= niu_get_ethtool_stats,
7915	.set_phys_id		= niu_set_phys_id,
7916	.get_rxnfc		= niu_get_nfc,
7917	.set_rxnfc		= niu_set_nfc,
7918};
7919
7920static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
7921			      int ldg, int ldn)
7922{
7923	if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
7924		return -EINVAL;
7925	if (ldn < 0 || ldn > LDN_MAX)
7926		return -EINVAL;
7927
7928	parent->ldg_map[ldn] = ldg;
7929
7930	if (np->parent->plat_type == PLAT_TYPE_NIU) {
7931		/* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
7932		 * the firmware, and we're not supposed to change them.
7933		 * Validate the mapping, because if it's wrong we probably
7934		 * won't get any interrupts and that's painful to debug.
7935		 */
7936		if (nr64(LDG_NUM(ldn)) != ldg) {
7937			dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
7938				np->port, ldn, ldg,
7939				(unsigned long long) nr64(LDG_NUM(ldn)));
7940			return -EINVAL;
7941		}
7942	} else
7943		nw64(LDG_NUM(ldn), ldg);
7944
7945	return 0;
7946}
7947
7948static int niu_set_ldg_timer_res(struct niu *np, int res)
7949{
7950	if (res < 0 || res > LDG_TIMER_RES_VAL)
7951		return -EINVAL;
7952
7953
7954	nw64(LDG_TIMER_RES, res);
7955
7956	return 0;
7957}
7958
7959static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
7960{
7961	if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
7962	    (func < 0 || func > 3) ||
7963	    (vector < 0 || vector > 0x1f))
7964		return -EINVAL;
7965
7966	nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
7967
7968	return 0;
7969}
7970
7971static int niu_pci_eeprom_read(struct niu *np, u32 addr)
7972{
7973	u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
7974				 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
7975	int limit;
7976
7977	if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
7978		return -EINVAL;
7979
7980	frame = frame_base;
7981	nw64(ESPC_PIO_STAT, frame);
7982	limit = 64;
7983	do {
7984		udelay(5);
7985		frame = nr64(ESPC_PIO_STAT);
7986		if (frame & ESPC_PIO_STAT_READ_END)
7987			break;
7988	} while (limit--);
7989	if (!(frame & ESPC_PIO_STAT_READ_END)) {
7990		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
7991			(unsigned long long) frame);
7992		return -ENODEV;
7993	}
7994
7995	frame = frame_base;
7996	nw64(ESPC_PIO_STAT, frame);
7997	limit = 64;
7998	do {
7999		udelay(5);
8000		frame = nr64(ESPC_PIO_STAT);
8001		if (frame & ESPC_PIO_STAT_READ_END)
8002			break;
8003	} while (limit--);
8004	if (!(frame & ESPC_PIO_STAT_READ_END)) {
8005		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
8006			(unsigned long long) frame);
8007		return -ENODEV;
8008	}
8009
8010	frame = nr64(ESPC_PIO_STAT);
8011	return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
8012}
8013
8014static int niu_pci_eeprom_read16(struct niu *np, u32 off)
8015{
8016	int err = niu_pci_eeprom_read(np, off);
8017	u16 val;
8018
8019	if (err < 0)
8020		return err;
8021	val = (err << 8);
8022	err = niu_pci_eeprom_read(np, off + 1);
8023	if (err < 0)
8024		return err;
8025	val |= (err & 0xff);
8026
8027	return val;
8028}
8029
8030static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
8031{
8032	int err = niu_pci_eeprom_read(np, off);
8033	u16 val;
8034
8035	if (err < 0)
8036		return err;
8037
8038	val = (err & 0xff);
8039	err = niu_pci_eeprom_read(np, off + 1);
8040	if (err < 0)
8041		return err;
8042
8043	val |= (err & 0xff) << 8;
8044
8045	return val;
8046}
8047
8048static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf,
8049				    int namebuf_len)
8050{
8051	int i;
8052
8053	for (i = 0; i < namebuf_len; i++) {
8054		int err = niu_pci_eeprom_read(np, off + i);
8055		if (err < 0)
8056			return err;
8057		*namebuf++ = err;
8058		if (!err)
8059			break;
8060	}
8061	if (i >= namebuf_len)
8062		return -EINVAL;
8063
8064	return i + 1;
8065}
8066
8067static void niu_vpd_parse_version(struct niu *np)
8068{
8069	struct niu_vpd *vpd = &np->vpd;
8070	int len = strlen(vpd->version) + 1;
8071	const char *s = vpd->version;
8072	int i;
8073
8074	for (i = 0; i < len - 5; i++) {
8075		if (!strncmp(s + i, "FCode ", 6))
8076			break;
8077	}
8078	if (i >= len - 5)
8079		return;
8080
8081	s += i + 5;
8082	sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
8083
8084	netif_printk(np, probe, KERN_DEBUG, np->dev,
8085		     "VPD_SCAN: FCODE major(%d) minor(%d)\n",
8086		     vpd->fcode_major, vpd->fcode_minor);
8087	if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
8088	    (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
8089	     vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
8090		np->flags |= NIU_FLAGS_VPD_VALID;
8091}
8092
8093/* ESPC_PIO_EN_ENABLE must be set */
8094static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
8095{
8096	unsigned int found_mask = 0;
8097#define FOUND_MASK_MODEL	0x00000001
8098#define FOUND_MASK_BMODEL	0x00000002
8099#define FOUND_MASK_VERS		0x00000004
8100#define FOUND_MASK_MAC		0x00000008
8101#define FOUND_MASK_NMAC		0x00000010
8102#define FOUND_MASK_PHY		0x00000020
8103#define FOUND_MASK_ALL		0x0000003f
8104
8105	netif_printk(np, probe, KERN_DEBUG, np->dev,
8106		     "VPD_SCAN: start[%x] end[%x]\n", start, end);
8107	while (start < end) {
8108		int len, err, prop_len;
8109		char namebuf[64];
8110		u8 *prop_buf;
8111		int max_len;
8112
8113		if (found_mask == FOUND_MASK_ALL) {
8114			niu_vpd_parse_version(np);
8115			return 1;
8116		}
8117
8118		err = niu_pci_eeprom_read(np, start + 2);
8119		if (err < 0)
8120			return err;
8121		len = err;
8122		start += 3;
8123
8124		prop_len = niu_pci_eeprom_read(np, start + 4);
8125		err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
8126		if (err < 0)
8127			return err;
8128
8129		prop_buf = NULL;
8130		max_len = 0;
8131		if (!strcmp(namebuf, "model")) {
8132			prop_buf = np->vpd.model;
8133			max_len = NIU_VPD_MODEL_MAX;
8134			found_mask |= FOUND_MASK_MODEL;
8135		} else if (!strcmp(namebuf, "board-model")) {
8136			prop_buf = np->vpd.board_model;
8137			max_len = NIU_VPD_BD_MODEL_MAX;
8138			found_mask |= FOUND_MASK_BMODEL;
8139		} else if (!strcmp(namebuf, "version")) {
8140			prop_buf = np->vpd.version;
8141			max_len = NIU_VPD_VERSION_MAX;
8142			found_mask |= FOUND_MASK_VERS;
8143		} else if (!strcmp(namebuf, "local-mac-address")) {
8144			prop_buf = np->vpd.local_mac;
8145			max_len = ETH_ALEN;
8146			found_mask |= FOUND_MASK_MAC;
8147		} else if (!strcmp(namebuf, "num-mac-addresses")) {
8148			prop_buf = &np->vpd.mac_num;
8149			max_len = 1;
8150			found_mask |= FOUND_MASK_NMAC;
8151		} else if (!strcmp(namebuf, "phy-type")) {
8152			prop_buf = np->vpd.phy_type;
8153			max_len = NIU_VPD_PHY_TYPE_MAX;
8154			found_mask |= FOUND_MASK_PHY;
8155		}
8156
8157		if (max_len && prop_len > max_len) {
8158			dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
8159			return -EINVAL;
8160		}
8161
8162		if (prop_buf) {
8163			u32 off = start + 5 + err;
8164			int i;
8165
8166			netif_printk(np, probe, KERN_DEBUG, np->dev,
8167				     "VPD_SCAN: Reading in property [%s] len[%d]\n",
8168				     namebuf, prop_len);
8169			for (i = 0; i < prop_len; i++)
8170				*prop_buf++ = niu_pci_eeprom_read(np, off + i);
8171		}
8172
8173		start += len;
8174	}
8175
8176	return 0;
8177}
8178
8179/* ESPC_PIO_EN_ENABLE must be set */
8180static void niu_pci_vpd_fetch(struct niu *np, u32 start)
8181{
8182	u32 offset;
8183	int err;
8184
8185	err = niu_pci_eeprom_read16_swp(np, start + 1);
8186	if (err < 0)
8187		return;
8188
8189	offset = err + 3;
8190
8191	while (start + offset < ESPC_EEPROM_SIZE) {
8192		u32 here = start + offset;
8193		u32 end;
8194
8195		err = niu_pci_eeprom_read(np, here);
8196		if (err != 0x90)
8197			return;
8198
8199		err = niu_pci_eeprom_read16_swp(np, here + 1);
8200		if (err < 0)
8201			return;
8202
8203		here = start + offset + 3;
8204		end = start + offset + err;
8205
8206		offset += err;
8207
8208		err = niu_pci_vpd_scan_props(np, here, end);
8209		if (err < 0 || err == 1)
8210			return;
8211	}
8212}
8213
8214/* ESPC_PIO_EN_ENABLE must be set */
8215static u32 niu_pci_vpd_offset(struct niu *np)
8216{
8217	u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
8218	int err;
8219
8220	while (start < end) {
8221		ret = start;
8222
8223		/* ROM header signature?  */
8224		err = niu_pci_eeprom_read16(np, start +  0);
8225		if (err != 0x55aa)
8226			return 0;
8227
8228		/* Apply offset to PCI data structure.  */
8229		err = niu_pci_eeprom_read16(np, start + 23);
8230		if (err < 0)
8231			return 0;
8232		start += err;
8233
8234		/* Check for "PCIR" signature.  */
8235		err = niu_pci_eeprom_read16(np, start +  0);
8236		if (err != 0x5043)
8237			return 0;
8238		err = niu_pci_eeprom_read16(np, start +  2);
8239		if (err != 0x4952)
8240			return 0;
8241
8242		/* Check for OBP image type.  */
8243		err = niu_pci_eeprom_read(np, start + 20);
8244		if (err < 0)
8245			return 0;
8246		if (err != 0x01) {
8247			err = niu_pci_eeprom_read(np, ret + 2);
8248			if (err < 0)
8249				return 0;
8250
8251			start = ret + (err * 512);
8252			continue;
8253		}
8254
8255		err = niu_pci_eeprom_read16_swp(np, start + 8);
8256		if (err < 0)
8257			return err;
8258		ret += err;
8259
8260		err = niu_pci_eeprom_read(np, ret + 0);
8261		if (err != 0x82)
8262			return 0;
8263
8264		return ret;
8265	}
8266
8267	return 0;
8268}
8269
8270static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop)
8271{
8272	if (!strcmp(phy_prop, "mif")) {
8273		/* 1G copper, MII */
8274		np->flags &= ~(NIU_FLAGS_FIBER |
8275			       NIU_FLAGS_10G);
8276		np->mac_xcvr = MAC_XCVR_MII;
8277	} else if (!strcmp(phy_prop, "xgf")) {
8278		/* 10G fiber, XPCS */
8279		np->flags |= (NIU_FLAGS_10G |
8280			      NIU_FLAGS_FIBER);
8281		np->mac_xcvr = MAC_XCVR_XPCS;
8282	} else if (!strcmp(phy_prop, "pcs")) {
8283		/* 1G fiber, PCS */
8284		np->flags &= ~NIU_FLAGS_10G;
8285		np->flags |= NIU_FLAGS_FIBER;
8286		np->mac_xcvr = MAC_XCVR_PCS;
8287	} else if (!strcmp(phy_prop, "xgc")) {
8288		/* 10G copper, XPCS */
8289		np->flags |= NIU_FLAGS_10G;
8290		np->flags &= ~NIU_FLAGS_FIBER;
8291		np->mac_xcvr = MAC_XCVR_XPCS;
8292	} else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
8293		/* 10G Serdes or 1G Serdes, default to 10G */
8294		np->flags |= NIU_FLAGS_10G;
8295		np->flags &= ~NIU_FLAGS_FIBER;
8296		np->flags |= NIU_FLAGS_XCVR_SERDES;
8297		np->mac_xcvr = MAC_XCVR_XPCS;
8298	} else {
8299		return -EINVAL;
8300	}
8301	return 0;
8302}
8303
8304static int niu_pci_vpd_get_nports(struct niu *np)
8305{
8306	int ports = 0;
8307
8308	if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
8309	    (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
8310	    (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
8311	    (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
8312	    (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
8313		ports = 4;
8314	} else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
8315		   (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
8316		   (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
8317		   (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
8318		ports = 2;
8319	}
8320
8321	return ports;
8322}
8323
8324static void niu_pci_vpd_validate(struct niu *np)
8325{
8326	struct net_device *dev = np->dev;
8327	struct niu_vpd *vpd = &np->vpd;
8328	u8 val8;
8329
8330	if (!is_valid_ether_addr(&vpd->local_mac[0])) {
8331		dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
8332
8333		np->flags &= ~NIU_FLAGS_VPD_VALID;
8334		return;
8335	}
8336
8337	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
8338	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
8339		np->flags |= NIU_FLAGS_10G;
8340		np->flags &= ~NIU_FLAGS_FIBER;
8341		np->flags |= NIU_FLAGS_XCVR_SERDES;
8342		np->mac_xcvr = MAC_XCVR_PCS;
8343		if (np->port > 1) {
8344			np->flags |= NIU_FLAGS_FIBER;
8345			np->flags &= ~NIU_FLAGS_10G;
8346		}
8347		if (np->flags & NIU_FLAGS_10G)
8348			np->mac_xcvr = MAC_XCVR_XPCS;
8349	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
8350		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
8351			      NIU_FLAGS_HOTPLUG_PHY);
8352	} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
8353		dev_err(np->device, "Illegal phy string [%s]\n",
8354			np->vpd.phy_type);
8355		dev_err(np->device, "Falling back to SPROM\n");
8356		np->flags &= ~NIU_FLAGS_VPD_VALID;
8357		return;
8358	}
8359
8360	memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN);
8361
8362	val8 = dev->dev_addr[5];
8363	dev->dev_addr[5] += np->port;
8364	if (dev->dev_addr[5] < val8)
8365		dev->dev_addr[4]++;
8366}
8367
8368static int niu_pci_probe_sprom(struct niu *np)
8369{
8370	struct net_device *dev = np->dev;
8371	int len, i;
8372	u64 val, sum;
8373	u8 val8;
8374
8375	val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
8376	val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
8377	len = val / 4;
8378
8379	np->eeprom_len = len;
8380
8381	netif_printk(np, probe, KERN_DEBUG, np->dev,
8382		     "SPROM: Image size %llu\n", (unsigned long long)val);
8383
8384	sum = 0;
8385	for (i = 0; i < len; i++) {
8386		val = nr64(ESPC_NCR(i));
8387		sum += (val >>  0) & 0xff;
8388		sum += (val >>  8) & 0xff;
8389		sum += (val >> 16) & 0xff;
8390		sum += (val >> 24) & 0xff;
8391	}
8392	netif_printk(np, probe, KERN_DEBUG, np->dev,
8393		     "SPROM: Checksum %x\n", (int)(sum & 0xff));
8394	if ((sum & 0xff) != 0xab) {
8395		dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
8396		return -EINVAL;
8397	}
8398
8399	val = nr64(ESPC_PHY_TYPE);
8400	switch (np->port) {
8401	case 0:
8402		val8 = (val & ESPC_PHY_TYPE_PORT0) >>
8403			ESPC_PHY_TYPE_PORT0_SHIFT;
8404		break;
8405	case 1:
8406		val8 = (val & ESPC_PHY_TYPE_PORT1) >>
8407			ESPC_PHY_TYPE_PORT1_SHIFT;
8408		break;
8409	case 2:
8410		val8 = (val & ESPC_PHY_TYPE_PORT2) >>
8411			ESPC_PHY_TYPE_PORT2_SHIFT;
8412		break;
8413	case 3:
8414		val8 = (val & ESPC_PHY_TYPE_PORT3) >>
8415			ESPC_PHY_TYPE_PORT3_SHIFT;
8416		break;
8417	default:
8418		dev_err(np->device, "Bogus port number %u\n",
8419			np->port);
8420		return -EINVAL;
8421	}
8422	netif_printk(np, probe, KERN_DEBUG, np->dev,
8423		     "SPROM: PHY type %x\n", val8);
8424
8425	switch (val8) {
8426	case ESPC_PHY_TYPE_1G_COPPER:
8427		/* 1G copper, MII */
8428		np->flags &= ~(NIU_FLAGS_FIBER |
8429			       NIU_FLAGS_10G);
8430		np->mac_xcvr = MAC_XCVR_MII;
8431		break;
8432
8433	case ESPC_PHY_TYPE_1G_FIBER:
8434		/* 1G fiber, PCS */
8435		np->flags &= ~NIU_FLAGS_10G;
8436		np->flags |= NIU_FLAGS_FIBER;
8437		np->mac_xcvr = MAC_XCVR_PCS;
8438		break;
8439
8440	case ESPC_PHY_TYPE_10G_COPPER:
8441		/* 10G copper, XPCS */
8442		np->flags |= NIU_FLAGS_10G;
8443		np->flags &= ~NIU_FLAGS_FIBER;
8444		np->mac_xcvr = MAC_XCVR_XPCS;
8445		break;
8446
8447	case ESPC_PHY_TYPE_10G_FIBER:
8448		/* 10G fiber, XPCS */
8449		np->flags |= (NIU_FLAGS_10G |
8450			      NIU_FLAGS_FIBER);
8451		np->mac_xcvr = MAC_XCVR_XPCS;
8452		break;
8453
8454	default:
8455		dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
8456		return -EINVAL;
8457	}
8458
8459	val = nr64(ESPC_MAC_ADDR0);
8460	netif_printk(np, probe, KERN_DEBUG, np->dev,
8461		     "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
8462	dev->dev_addr[0] = (val >>  0) & 0xff;
8463	dev->dev_addr[1] = (val >>  8) & 0xff;
8464	dev->dev_addr[2] = (val >> 16) & 0xff;
8465	dev->dev_addr[3] = (val >> 24) & 0xff;
8466
8467	val = nr64(ESPC_MAC_ADDR1);
8468	netif_printk(np, probe, KERN_DEBUG, np->dev,
8469		     "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
8470	dev->dev_addr[4] = (val >>  0) & 0xff;
8471	dev->dev_addr[5] = (val >>  8) & 0xff;
8472
8473	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8474		dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
8475			dev->dev_addr);
8476		return -EINVAL;
8477	}
8478
8479	val8 = dev->dev_addr[5];
8480	dev->dev_addr[5] += np->port;
8481	if (dev->dev_addr[5] < val8)
8482		dev->dev_addr[4]++;
8483
8484	val = nr64(ESPC_MOD_STR_LEN);
8485	netif_printk(np, probe, KERN_DEBUG, np->dev,
8486		     "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
8487	if (val >= 8 * 4)
8488		return -EINVAL;
8489
8490	for (i = 0; i < val; i += 4) {
8491		u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
8492
8493		np->vpd.model[i + 3] = (tmp >>  0) & 0xff;
8494		np->vpd.model[i + 2] = (tmp >>  8) & 0xff;
8495		np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
8496		np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
8497	}
8498	np->vpd.model[val] = '\0';
8499
8500	val = nr64(ESPC_BD_MOD_STR_LEN);
8501	netif_printk(np, probe, KERN_DEBUG, np->dev,
8502		     "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
8503	if (val >= 4 * 4)
8504		return -EINVAL;
8505
8506	for (i = 0; i < val; i += 4) {
8507		u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
8508
8509		np->vpd.board_model[i + 3] = (tmp >>  0) & 0xff;
8510		np->vpd.board_model[i + 2] = (tmp >>  8) & 0xff;
8511		np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
8512		np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
8513	}
8514	np->vpd.board_model[val] = '\0';
8515
8516	np->vpd.mac_num =
8517		nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
8518	netif_printk(np, probe, KERN_DEBUG, np->dev,
8519		     "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
8520
8521	return 0;
8522}
8523
8524static int niu_get_and_validate_port(struct niu *np)
8525{
8526	struct niu_parent *parent = np->parent;
8527
8528	if (np->port <= 1)
8529		np->flags |= NIU_FLAGS_XMAC;
8530
8531	if (!parent->num_ports) {
8532		if (parent->plat_type == PLAT_TYPE_NIU) {
8533			parent->num_ports = 2;
8534		} else {
8535			parent->num_ports = niu_pci_vpd_get_nports(np);
8536			if (!parent->num_ports) {
8537				/* Fall back to SPROM as last resort.
8538				 * This will fail on most cards.
8539				 */
8540				parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
8541					ESPC_NUM_PORTS_MACS_VAL;
8542
8543				/* All of the current probing methods fail on
8544				 * Maramba on-board parts.
8545				 */
8546				if (!parent->num_ports)
8547					parent->num_ports = 4;
8548			}
8549		}
8550	}
8551
8552	if (np->port >= parent->num_ports)
8553		return -ENODEV;
8554
8555	return 0;
8556}
8557
8558static int phy_record(struct niu_parent *parent, struct phy_probe_info *p,
8559		      int dev_id_1, int dev_id_2, u8 phy_port, int type)
8560{
8561	u32 id = (dev_id_1 << 16) | dev_id_2;
8562	u8 idx;
8563
8564	if (dev_id_1 < 0 || dev_id_2 < 0)
8565		return 0;
8566	if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
8567		/* Because of the NIU_PHY_ID_MASK being applied, the 8704
8568		 * test covers the 8706 as well.
8569		 */
8570		if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
8571		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
8572			return 0;
8573	} else {
8574		if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
8575			return 0;
8576	}
8577
8578	pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
8579		parent->index, id,
8580		type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
8581		type == PHY_TYPE_PCS ? "PCS" : "MII",
8582		phy_port);
8583
8584	if (p->cur[type] >= NIU_MAX_PORTS) {
8585		pr_err("Too many PHY ports\n");
8586		return -EINVAL;
8587	}
8588	idx = p->cur[type];
8589	p->phy_id[type][idx] = id;
8590	p->phy_port[type][idx] = phy_port;
8591	p->cur[type] = idx + 1;
8592	return 0;
8593}
8594
8595static int port_has_10g(struct phy_probe_info *p, int port)
8596{
8597	int i;
8598
8599	for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
8600		if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
8601			return 1;
8602	}
8603	for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
8604		if (p->phy_port[PHY_TYPE_PCS][i] == port)
8605			return 1;
8606	}
8607
8608	return 0;
8609}
8610
8611static int count_10g_ports(struct phy_probe_info *p, int *lowest)
8612{
8613	int port, cnt;
8614
8615	cnt = 0;
8616	*lowest = 32;
8617	for (port = 8; port < 32; port++) {
8618		if (port_has_10g(p, port)) {
8619			if (!cnt)
8620				*lowest = port;
8621			cnt++;
8622		}
8623	}
8624
8625	return cnt;
8626}
8627
8628static int count_1g_ports(struct phy_probe_info *p, int *lowest)
8629{
8630	*lowest = 32;
8631	if (p->cur[PHY_TYPE_MII])
8632		*lowest = p->phy_port[PHY_TYPE_MII][0];
8633
8634	return p->cur[PHY_TYPE_MII];
8635}
8636
8637static void niu_n2_divide_channels(struct niu_parent *parent)
8638{
8639	int num_ports = parent->num_ports;
8640	int i;
8641
8642	for (i = 0; i < num_ports; i++) {
8643		parent->rxchan_per_port[i] = (16 / num_ports);
8644		parent->txchan_per_port[i] = (16 / num_ports);
8645
8646		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8647			parent->index, i,
8648			parent->rxchan_per_port[i],
8649			parent->txchan_per_port[i]);
8650	}
8651}
8652
8653static void niu_divide_channels(struct niu_parent *parent,
8654				int num_10g, int num_1g)
8655{
8656	int num_ports = parent->num_ports;
8657	int rx_chans_per_10g, rx_chans_per_1g;
8658	int tx_chans_per_10g, tx_chans_per_1g;
8659	int i, tot_rx, tot_tx;
8660
8661	if (!num_10g || !num_1g) {
8662		rx_chans_per_10g = rx_chans_per_1g =
8663			(NIU_NUM_RXCHAN / num_ports);
8664		tx_chans_per_10g = tx_chans_per_1g =
8665			(NIU_NUM_TXCHAN / num_ports);
8666	} else {
8667		rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
8668		rx_chans_per_10g = (NIU_NUM_RXCHAN -
8669				    (rx_chans_per_1g * num_1g)) /
8670			num_10g;
8671
8672		tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
8673		tx_chans_per_10g = (NIU_NUM_TXCHAN -
8674				    (tx_chans_per_1g * num_1g)) /
8675			num_10g;
8676	}
8677
8678	tot_rx = tot_tx = 0;
8679	for (i = 0; i < num_ports; i++) {
8680		int type = phy_decode(parent->port_phy, i);
8681
8682		if (type == PORT_TYPE_10G) {
8683			parent->rxchan_per_port[i] = rx_chans_per_10g;
8684			parent->txchan_per_port[i] = tx_chans_per_10g;
8685		} else {
8686			parent->rxchan_per_port[i] = rx_chans_per_1g;
8687			parent->txchan_per_port[i] = tx_chans_per_1g;
8688		}
8689		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8690			parent->index, i,
8691			parent->rxchan_per_port[i],
8692			parent->txchan_per_port[i]);
8693		tot_rx += parent->rxchan_per_port[i];
8694		tot_tx += parent->txchan_per_port[i];
8695	}
8696
8697	if (tot_rx > NIU_NUM_RXCHAN) {
8698		pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
8699		       parent->index, tot_rx);
8700		for (i = 0; i < num_ports; i++)
8701			parent->rxchan_per_port[i] = 1;
8702	}
8703	if (tot_tx > NIU_NUM_TXCHAN) {
8704		pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
8705		       parent->index, tot_tx);
8706		for (i = 0; i < num_ports; i++)
8707			parent->txchan_per_port[i] = 1;
8708	}
8709	if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
8710		pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
8711			parent->index, tot_rx, tot_tx);
8712	}
8713}
8714
8715static void niu_divide_rdc_groups(struct niu_parent *parent,
8716				  int num_10g, int num_1g)
8717{
8718	int i, num_ports = parent->num_ports;
8719	int rdc_group, rdc_groups_per_port;
8720	int rdc_channel_base;
8721
8722	rdc_group = 0;
8723	rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
8724
8725	rdc_channel_base = 0;
8726
8727	for (i = 0; i < num_ports; i++) {
8728		struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
8729		int grp, num_channels = parent->rxchan_per_port[i];
8730		int this_channel_offset;
8731
8732		tp->first_table_num = rdc_group;
8733		tp->num_tables = rdc_groups_per_port;
8734		this_channel_offset = 0;
8735		for (grp = 0; grp < tp->num_tables; grp++) {
8736			struct rdc_table *rt = &tp->tables[grp];
8737			int slot;
8738
8739			pr_info("niu%d: Port %d RDC tbl(%d) [ ",
8740				parent->index, i, tp->first_table_num + grp);
8741			for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
8742				rt->rxdma_channel[slot] =
8743					rdc_channel_base + this_channel_offset;
8744
8745				pr_cont("%d ", rt->rxdma_channel[slot]);
8746
8747				if (++this_channel_offset == num_channels)
8748					this_channel_offset = 0;
8749			}
8750			pr_cont("]\n");
8751		}
8752
8753		parent->rdc_default[i] = rdc_channel_base;
8754
8755		rdc_channel_base += num_channels;
8756		rdc_group += rdc_groups_per_port;
8757	}
8758}
8759
8760static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent,
8761			       struct phy_probe_info *info)
8762{
8763	unsigned long flags;
8764	int port, err;
8765
8766	memset(info, 0, sizeof(*info));
8767
8768	/* Port 0 to 7 are reserved for onboard Serdes, probe the rest.  */
8769	niu_lock_parent(np, flags);
8770	err = 0;
8771	for (port = 8; port < 32; port++) {
8772		int dev_id_1, dev_id_2;
8773
8774		dev_id_1 = mdio_read(np, port,
8775				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
8776		dev_id_2 = mdio_read(np, port,
8777				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
8778		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8779				 PHY_TYPE_PMA_PMD);
8780		if (err)
8781			break;
8782		dev_id_1 = mdio_read(np, port,
8783				     NIU_PCS_DEV_ADDR, MII_PHYSID1);
8784		dev_id_2 = mdio_read(np, port,
8785				     NIU_PCS_DEV_ADDR, MII_PHYSID2);
8786		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8787				 PHY_TYPE_PCS);
8788		if (err)
8789			break;
8790		dev_id_1 = mii_read(np, port, MII_PHYSID1);
8791		dev_id_2 = mii_read(np, port, MII_PHYSID2);
8792		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8793				 PHY_TYPE_MII);
8794		if (err)
8795			break;
8796	}
8797	niu_unlock_parent(np, flags);
8798
8799	return err;
8800}
8801
8802static int walk_phys(struct niu *np, struct niu_parent *parent)
8803{
8804	struct phy_probe_info *info = &parent->phy_probe_info;
8805	int lowest_10g, lowest_1g;
8806	int num_10g, num_1g;
8807	u32 val;
8808	int err;
8809
8810	num_10g = num_1g = 0;
8811
8812	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
8813	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
8814		num_10g = 0;
8815		num_1g = 2;
8816		parent->plat_type = PLAT_TYPE_ATCA_CP3220;
8817		parent->num_ports = 4;
8818		val = (phy_encode(PORT_TYPE_1G, 0) |
8819		       phy_encode(PORT_TYPE_1G, 1) |
8820		       phy_encode(PORT_TYPE_1G, 2) |
8821		       phy_encode(PORT_TYPE_1G, 3));
8822	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
8823		num_10g = 2;
8824		num_1g = 0;
8825		parent->num_ports = 2;
8826		val = (phy_encode(PORT_TYPE_10G, 0) |
8827		       phy_encode(PORT_TYPE_10G, 1));
8828	} else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
8829		   (parent->plat_type == PLAT_TYPE_NIU)) {
8830		/* this is the Monza case */
8831		if (np->flags & NIU_FLAGS_10G) {
8832			val = (phy_encode(PORT_TYPE_10G, 0) |
8833			       phy_encode(PORT_TYPE_10G, 1));
8834		} else {
8835			val = (phy_encode(PORT_TYPE_1G, 0) |
8836			       phy_encode(PORT_TYPE_1G, 1));
8837		}
8838	} else {
8839		err = fill_phy_probe_info(np, parent, info);
8840		if (err)
8841			return err;
8842
8843		num_10g = count_10g_ports(info, &lowest_10g);
8844		num_1g = count_1g_ports(info, &lowest_1g);
8845
8846		switch ((num_10g << 4) | num_1g) {
8847		case 0x24:
8848			if (lowest_1g == 10)
8849				parent->plat_type = PLAT_TYPE_VF_P0;
8850			else if (lowest_1g == 26)
8851				parent->plat_type = PLAT_TYPE_VF_P1;
8852			else
8853				goto unknown_vg_1g_port;
8854
8855			/* fallthru */
8856		case 0x22:
8857			val = (phy_encode(PORT_TYPE_10G, 0) |
8858			       phy_encode(PORT_TYPE_10G, 1) |
8859			       phy_encode(PORT_TYPE_1G, 2) |
8860			       phy_encode(PORT_TYPE_1G, 3));
8861			break;
8862
8863		case 0x20:
8864			val = (phy_encode(PORT_TYPE_10G, 0) |
8865			       phy_encode(PORT_TYPE_10G, 1));
8866			break;
8867
8868		case 0x10:
8869			val = phy_encode(PORT_TYPE_10G, np->port);
8870			break;
8871
8872		case 0x14:
8873			if (lowest_1g == 10)
8874				parent->plat_type = PLAT_TYPE_VF_P0;
8875			else if (lowest_1g == 26)
8876				parent->plat_type = PLAT_TYPE_VF_P1;
8877			else
8878				goto unknown_vg_1g_port;
8879
8880			/* fallthru */
8881		case 0x13:
8882			if ((lowest_10g & 0x7) == 0)
8883				val = (phy_encode(PORT_TYPE_10G, 0) |
8884				       phy_encode(PORT_TYPE_1G, 1) |
8885				       phy_encode(PORT_TYPE_1G, 2) |
8886				       phy_encode(PORT_TYPE_1G, 3));
8887			else
8888				val = (phy_encode(PORT_TYPE_1G, 0) |
8889				       phy_encode(PORT_TYPE_10G, 1) |
8890				       phy_encode(PORT_TYPE_1G, 2) |
8891				       phy_encode(PORT_TYPE_1G, 3));
8892			break;
8893
8894		case 0x04:
8895			if (lowest_1g == 10)
8896				parent->plat_type = PLAT_TYPE_VF_P0;
8897			else if (lowest_1g == 26)
8898				parent->plat_type = PLAT_TYPE_VF_P1;
8899			else
8900				goto unknown_vg_1g_port;
8901
8902			val = (phy_encode(PORT_TYPE_1G, 0) |
8903			       phy_encode(PORT_TYPE_1G, 1) |
8904			       phy_encode(PORT_TYPE_1G, 2) |
8905			       phy_encode(PORT_TYPE_1G, 3));
8906			break;
8907
8908		default:
8909			pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
8910			       num_10g, num_1g);
8911			return -EINVAL;
8912		}
8913	}
8914
8915	parent->port_phy = val;
8916
8917	if (parent->plat_type == PLAT_TYPE_NIU)
8918		niu_n2_divide_channels(parent);
8919	else
8920		niu_divide_channels(parent, num_10g, num_1g);
8921
8922	niu_divide_rdc_groups(parent, num_10g, num_1g);
8923
8924	return 0;
8925
8926unknown_vg_1g_port:
8927	pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
8928	return -EINVAL;
8929}
8930
8931static int niu_probe_ports(struct niu *np)
8932{
8933	struct niu_parent *parent = np->parent;
8934	int err, i;
8935
8936	if (parent->port_phy == PORT_PHY_UNKNOWN) {
8937		err = walk_phys(np, parent);
8938		if (err)
8939			return err;
8940
8941		niu_set_ldg_timer_res(np, 2);
8942		for (i = 0; i <= LDN_MAX; i++)
8943			niu_ldn_irq_enable(np, i, 0);
8944	}
8945
8946	if (parent->port_phy == PORT_PHY_INVALID)
8947		return -EINVAL;
8948
8949	return 0;
8950}
8951
8952static int niu_classifier_swstate_init(struct niu *np)
8953{
8954	struct niu_classifier *cp = &np->clas;
8955
8956	cp->tcam_top = (u16) np->port;
8957	cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
8958	cp->h1_init = 0xffffffff;
8959	cp->h2_init = 0xffff;
8960
8961	return fflp_early_init(np);
8962}
8963
8964static void niu_link_config_init(struct niu *np)
8965{
8966	struct niu_link_config *lp = &np->link_config;
8967
8968	lp->advertising = (ADVERTISED_10baseT_Half |
8969			   ADVERTISED_10baseT_Full |
8970			   ADVERTISED_100baseT_Half |
8971			   ADVERTISED_100baseT_Full |
8972			   ADVERTISED_1000baseT_Half |
8973			   ADVERTISED_1000baseT_Full |
8974			   ADVERTISED_10000baseT_Full |
8975			   ADVERTISED_Autoneg);
8976	lp->speed = lp->active_speed = SPEED_INVALID;
8977	lp->duplex = DUPLEX_FULL;
8978	lp->active_duplex = DUPLEX_INVALID;
8979	lp->autoneg = 1;
8980#if 0
8981	lp->loopback_mode = LOOPBACK_MAC;
8982	lp->active_speed = SPEED_10000;
8983	lp->active_duplex = DUPLEX_FULL;
8984#else
8985	lp->loopback_mode = LOOPBACK_DISABLED;
8986#endif
8987}
8988
8989static int niu_init_mac_ipp_pcs_base(struct niu *np)
8990{
8991	switch (np->port) {
8992	case 0:
8993		np->mac_regs = np->regs + XMAC_PORT0_OFF;
8994		np->ipp_off  = 0x00000;
8995		np->pcs_off  = 0x04000;
8996		np->xpcs_off = 0x02000;
8997		break;
8998
8999	case 1:
9000		np->mac_regs = np->regs + XMAC_PORT1_OFF;
9001		np->ipp_off  = 0x08000;
9002		np->pcs_off  = 0x0a000;
9003		np->xpcs_off = 0x08000;
9004		break;
9005
9006	case 2:
9007		np->mac_regs = np->regs + BMAC_PORT2_OFF;
9008		np->ipp_off  = 0x04000;
9009		np->pcs_off  = 0x0e000;
9010		np->xpcs_off = ~0UL;
9011		break;
9012
9013	case 3:
9014		np->mac_regs = np->regs + BMAC_PORT3_OFF;
9015		np->ipp_off  = 0x0c000;
9016		np->pcs_off  = 0x12000;
9017		np->xpcs_off = ~0UL;
9018		break;
9019
9020	default:
9021		dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
9022		return -EINVAL;
9023	}
9024
9025	return 0;
9026}
9027
9028static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
9029{
9030	struct msix_entry msi_vec[NIU_NUM_LDG];
9031	struct niu_parent *parent = np->parent;
9032	struct pci_dev *pdev = np->pdev;
9033	int i, num_irqs;
9034	u8 first_ldg;
9035
9036	first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
9037	for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
9038		ldg_num_map[i] = first_ldg + i;
9039
9040	num_irqs = (parent->rxchan_per_port[np->port] +
9041		    parent->txchan_per_port[np->port] +
9042		    (np->port == 0 ? 3 : 1));
9043	BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
9044
9045	for (i = 0; i < num_irqs; i++) {
9046		msi_vec[i].vector = 0;
9047		msi_vec[i].entry = i;
9048	}
9049
9050	num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
9051	if (num_irqs < 0) {
9052		np->flags &= ~NIU_FLAGS_MSIX;
9053		return;
9054	}
9055
9056	np->flags |= NIU_FLAGS_MSIX;
9057	for (i = 0; i < num_irqs; i++)
9058		np->ldg[i].irq = msi_vec[i].vector;
9059	np->num_ldg = num_irqs;
9060}
9061
9062static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
9063{
9064#ifdef CONFIG_SPARC64
9065	struct platform_device *op = np->op;
9066	const u32 *int_prop;
9067	int i;
9068
9069	int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
9070	if (!int_prop)
9071		return -ENODEV;
9072
9073	for (i = 0; i < op->archdata.num_irqs; i++) {
9074		ldg_num_map[i] = int_prop[i];
9075		np->ldg[i].irq = op->archdata.irqs[i];
9076	}
9077
9078	np->num_ldg = op->archdata.num_irqs;
9079
9080	return 0;
9081#else
9082	return -EINVAL;
9083#endif
9084}
9085
9086static int niu_ldg_init(struct niu *np)
9087{
9088	struct niu_parent *parent = np->parent;
9089	u8 ldg_num_map[NIU_NUM_LDG];
9090	int first_chan, num_chan;
9091	int i, err, ldg_rotor;
9092	u8 port;
9093
9094	np->num_ldg = 1;
9095	np->ldg[0].irq = np->dev->irq;
9096	if (parent->plat_type == PLAT_TYPE_NIU) {
9097		err = niu_n2_irq_init(np, ldg_num_map);
9098		if (err)
9099			return err;
9100	} else
9101		niu_try_msix(np, ldg_num_map);
9102
9103	port = np->port;
9104	for (i = 0; i < np->num_ldg; i++) {
9105		struct niu_ldg *lp = &np->ldg[i];
9106
9107		netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
9108
9109		lp->np = np;
9110		lp->ldg_num = ldg_num_map[i];
9111		lp->timer = 2; /* XXX */
9112
9113		/* On N2 NIU the firmware has setup the SID mappings so they go
9114		 * to the correct values that will route the LDG to the proper
9115		 * interrupt in the NCU interrupt table.
9116		 */
9117		if (np->parent->plat_type != PLAT_TYPE_NIU) {
9118			err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
9119			if (err)
9120				return err;
9121		}
9122	}
9123
9124	/* We adopt the LDG assignment ordering used by the N2 NIU
9125	 * 'interrupt' properties because that simplifies a lot of
9126	 * things.  This ordering is:
9127	 *
9128	 *	MAC
9129	 *	MIF	(if port zero)
9130	 *	SYSERR	(if port zero)
9131	 *	RX channels
9132	 *	TX channels
9133	 */
9134
9135	ldg_rotor = 0;
9136
9137	err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
9138				  LDN_MAC(port));
9139	if (err)
9140		return err;
9141
9142	ldg_rotor++;
9143	if (ldg_rotor == np->num_ldg)
9144		ldg_rotor = 0;
9145
9146	if (port == 0) {
9147		err = niu_ldg_assign_ldn(np, parent,
9148					 ldg_num_map[ldg_rotor],
9149					 LDN_MIF);
9150		if (err)
9151			return err;
9152
9153		ldg_rotor++;
9154		if (ldg_rotor == np->num_ldg)
9155			ldg_rotor = 0;
9156
9157		err = niu_ldg_assign_ldn(np, parent,
9158					 ldg_num_map[ldg_rotor],
9159					 LDN_DEVICE_ERROR);
9160		if (err)
9161			return err;
9162
9163		ldg_rotor++;
9164		if (ldg_rotor == np->num_ldg)
9165			ldg_rotor = 0;
9166
9167	}
9168
9169	first_chan = 0;
9170	for (i = 0; i < port; i++)
9171		first_chan += parent->rxchan_per_port[i];
9172	num_chan = parent->rxchan_per_port[port];
9173
9174	for (i = first_chan; i < (first_chan + num_chan); i++) {
9175		err = niu_ldg_assign_ldn(np, parent,
9176					 ldg_num_map[ldg_rotor],
9177					 LDN_RXDMA(i));
9178		if (err)
9179			return err;
9180		ldg_rotor++;
9181		if (ldg_rotor == np->num_ldg)
9182			ldg_rotor = 0;
9183	}
9184
9185	first_chan = 0;
9186	for (i = 0; i < port; i++)
9187		first_chan += parent->txchan_per_port[i];
9188	num_chan = parent->txchan_per_port[port];
9189	for (i = first_chan; i < (first_chan + num_chan); i++) {
9190		err = niu_ldg_assign_ldn(np, parent,
9191					 ldg_num_map[ldg_rotor],
9192					 LDN_TXDMA(i));
9193		if (err)
9194			return err;
9195		ldg_rotor++;
9196		if (ldg_rotor == np->num_ldg)
9197			ldg_rotor = 0;
9198	}
9199
9200	return 0;
9201}
9202
9203static void niu_ldg_free(struct niu *np)
9204{
9205	if (np->flags & NIU_FLAGS_MSIX)
9206		pci_disable_msix(np->pdev);
9207}
9208
9209static int niu_get_of_props(struct niu *np)
9210{
9211#ifdef CONFIG_SPARC64
9212	struct net_device *dev = np->dev;
9213	struct device_node *dp;
9214	const char *phy_type;
9215	const u8 *mac_addr;
9216	const char *model;
9217	int prop_len;
9218
9219	if (np->parent->plat_type == PLAT_TYPE_NIU)
9220		dp = np->op->dev.of_node;
9221	else
9222		dp = pci_device_to_OF_node(np->pdev);
9223
9224	phy_type = of_get_property(dp, "phy-type", &prop_len);
9225	if (!phy_type) {
9226		netdev_err(dev, "%s: OF node lacks phy-type property\n",
9227			   dp->full_name);
9228		return -EINVAL;
9229	}
9230
9231	if (!strcmp(phy_type, "none"))
9232		return -ENODEV;
9233
9234	strcpy(np->vpd.phy_type, phy_type);
9235
9236	if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
9237		netdev_err(dev, "%s: Illegal phy string [%s]\n",
9238			   dp->full_name, np->vpd.phy_type);
9239		return -EINVAL;
9240	}
9241
9242	mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
9243	if (!mac_addr) {
9244		netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
9245			   dp->full_name);
9246		return -EINVAL;
9247	}
9248	if (prop_len != dev->addr_len) {
9249		netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
9250			   dp->full_name, prop_len);
9251	}
9252	memcpy(dev->dev_addr, mac_addr, dev->addr_len);
9253	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9254		netdev_err(dev, "%s: OF MAC address is invalid\n",
9255			   dp->full_name);
9256		netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->dev_addr);
9257		return -EINVAL;
9258	}
9259
9260	model = of_get_property(dp, "model", &prop_len);
9261
9262	if (model)
9263		strcpy(np->vpd.model, model);
9264
9265	if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
9266		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
9267			NIU_FLAGS_HOTPLUG_PHY);
9268	}
9269
9270	return 0;
9271#else
9272	return -EINVAL;
9273#endif
9274}
9275
9276static int niu_get_invariants(struct niu *np)
9277{
9278	int err, have_props;
9279	u32 offset;
9280
9281	err = niu_get_of_props(np);
9282	if (err == -ENODEV)
9283		return err;
9284
9285	have_props = !err;
9286
9287	err = niu_init_mac_ipp_pcs_base(np);
9288	if (err)
9289		return err;
9290
9291	if (have_props) {
9292		err = niu_get_and_validate_port(np);
9293		if (err)
9294			return err;
9295
9296	} else  {
9297		if (np->parent->plat_type == PLAT_TYPE_NIU)
9298			return -EINVAL;
9299
9300		nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
9301		offset = niu_pci_vpd_offset(np);
9302		netif_printk(np, probe, KERN_DEBUG, np->dev,
9303			     "%s() VPD offset [%08x]\n", __func__, offset);
9304		if (offset)
9305			niu_pci_vpd_fetch(np, offset);
9306		nw64(ESPC_PIO_EN, 0);
9307
9308		if (np->flags & NIU_FLAGS_VPD_VALID) {
9309			niu_pci_vpd_validate(np);
9310			err = niu_get_and_validate_port(np);
9311			if (err)
9312				return err;
9313		}
9314
9315		if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
9316			err = niu_get_and_validate_port(np);
9317			if (err)
9318				return err;
9319			err = niu_pci_probe_sprom(np);
9320			if (err)
9321				return err;
9322		}
9323	}
9324
9325	err = niu_probe_ports(np);
9326	if (err)
9327		return err;
9328
9329	niu_ldg_init(np);
9330
9331	niu_classifier_swstate_init(np);
9332	niu_link_config_init(np);
9333
9334	err = niu_determine_phy_disposition(np);
9335	if (!err)
9336		err = niu_init_link(np);
9337
9338	return err;
9339}
9340
9341static LIST_HEAD(niu_parent_list);
9342static DEFINE_MUTEX(niu_parent_lock);
9343static int niu_parent_index;
9344
9345static ssize_t show_port_phy(struct device *dev,
9346			     struct device_attribute *attr, char *buf)
9347{
9348	struct platform_device *plat_dev = to_platform_device(dev);
9349	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9350	u32 port_phy = p->port_phy;
9351	char *orig_buf = buf;
9352	int i;
9353
9354	if (port_phy == PORT_PHY_UNKNOWN ||
9355	    port_phy == PORT_PHY_INVALID)
9356		return 0;
9357
9358	for (i = 0; i < p->num_ports; i++) {
9359		const char *type_str;
9360		int type;
9361
9362		type = phy_decode(port_phy, i);
9363		if (type == PORT_TYPE_10G)
9364			type_str = "10G";
9365		else
9366			type_str = "1G";
9367		buf += sprintf(buf,
9368			       (i == 0) ? "%s" : " %s",
9369			       type_str);
9370	}
9371	buf += sprintf(buf, "\n");
9372	return buf - orig_buf;
9373}
9374
9375static ssize_t show_plat_type(struct device *dev,
9376			      struct device_attribute *attr, char *buf)
9377{
9378	struct platform_device *plat_dev = to_platform_device(dev);
9379	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9380	const char *type_str;
9381
9382	switch (p->plat_type) {
9383	case PLAT_TYPE_ATLAS:
9384		type_str = "atlas";
9385		break;
9386	case PLAT_TYPE_NIU:
9387		type_str = "niu";
9388		break;
9389	case PLAT_TYPE_VF_P0:
9390		type_str = "vf_p0";
9391		break;
9392	case PLAT_TYPE_VF_P1:
9393		type_str = "vf_p1";
9394		break;
9395	default:
9396		type_str = "unknown";
9397		break;
9398	}
9399
9400	return sprintf(buf, "%s\n", type_str);
9401}
9402
9403static ssize_t __show_chan_per_port(struct device *dev,
9404				    struct device_attribute *attr, char *buf,
9405				    int rx)
9406{
9407	struct platform_device *plat_dev = to_platform_device(dev);
9408	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9409	char *orig_buf = buf;
9410	u8 *arr;
9411	int i;
9412
9413	arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
9414
9415	for (i = 0; i < p->num_ports; i++) {
9416		buf += sprintf(buf,
9417			       (i == 0) ? "%d" : " %d",
9418			       arr[i]);
9419	}
9420	buf += sprintf(buf, "\n");
9421
9422	return buf - orig_buf;
9423}
9424
9425static ssize_t show_rxchan_per_port(struct device *dev,
9426				    struct device_attribute *attr, char *buf)
9427{
9428	return __show_chan_per_port(dev, attr, buf, 1);
9429}
9430
9431static ssize_t show_txchan_per_port(struct device *dev,
9432				    struct device_attribute *attr, char *buf)
9433{
9434	return __show_chan_per_port(dev, attr, buf, 1);
9435}
9436
9437static ssize_t show_num_ports(struct device *dev,
9438			      struct device_attribute *attr, char *buf)
9439{
9440	struct platform_device *plat_dev = to_platform_device(dev);
9441	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9442
9443	return sprintf(buf, "%d\n", p->num_ports);
9444}
9445
9446static struct device_attribute niu_parent_attributes[] = {
9447	__ATTR(port_phy, S_IRUGO, show_port_phy, NULL),
9448	__ATTR(plat_type, S_IRUGO, show_plat_type, NULL),
9449	__ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL),
9450	__ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL),
9451	__ATTR(num_ports, S_IRUGO, show_num_ports, NULL),
9452	{}
9453};
9454
9455static struct niu_parent *niu_new_parent(struct niu *np,
9456					 union niu_parent_id *id, u8 ptype)
9457{
9458	struct platform_device *plat_dev;
9459	struct niu_parent *p;
9460	int i;
9461
9462	plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
9463						   NULL, 0);
9464	if (IS_ERR(plat_dev))
9465		return NULL;
9466
9467	for (i = 0; niu_parent_attributes[i].attr.name; i++) {
9468		int err = device_create_file(&plat_dev->dev,
9469					     &niu_parent_attributes[i]);
9470		if (err)
9471			goto fail_unregister;
9472	}
9473
9474	p = kzalloc(sizeof(*p), GFP_KERNEL);
9475	if (!p)
9476		goto fail_unregister;
9477
9478	p->index = niu_parent_index++;
9479
9480	plat_dev->dev.platform_data = p;
9481	p->plat_dev = plat_dev;
9482
9483	memcpy(&p->id, id, sizeof(*id));
9484	p->plat_type = ptype;
9485	INIT_LIST_HEAD(&p->list);
9486	atomic_set(&p->refcnt, 0);
9487	list_add(&p->list, &niu_parent_list);
9488	spin_lock_init(&p->lock);
9489
9490	p->rxdma_clock_divider = 7500;
9491
9492	p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
9493	if (p->plat_type == PLAT_TYPE_NIU)
9494		p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
9495
9496	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
9497		int index = i - CLASS_CODE_USER_PROG1;
9498
9499		p->tcam_key[index] = TCAM_KEY_TSEL;
9500		p->flow_key[index] = (FLOW_KEY_IPSA |
9501				      FLOW_KEY_IPDA |
9502				      FLOW_KEY_PROTO |
9503				      (FLOW_KEY_L4_BYTE12 <<
9504				       FLOW_KEY_L4_0_SHIFT) |
9505				      (FLOW_KEY_L4_BYTE12 <<
9506				       FLOW_KEY_L4_1_SHIFT));
9507	}
9508
9509	for (i = 0; i < LDN_MAX + 1; i++)
9510		p->ldg_map[i] = LDG_INVALID;
9511
9512	return p;
9513
9514fail_unregister:
9515	platform_device_unregister(plat_dev);
9516	return NULL;
9517}
9518
9519static struct niu_parent *niu_get_parent(struct niu *np,
9520					 union niu_parent_id *id, u8 ptype)
9521{
9522	struct niu_parent *p, *tmp;
9523	int port = np->port;
9524
9525	mutex_lock(&niu_parent_lock);
9526	p = NULL;
9527	list_for_each_entry(tmp, &niu_parent_list, list) {
9528		if (!memcmp(id, &tmp->id, sizeof(*id))) {
9529			p = tmp;
9530			break;
9531		}
9532	}
9533	if (!p)
9534		p = niu_new_parent(np, id, ptype);
9535
9536	if (p) {
9537		char port_name[6];
9538		int err;
9539
9540		sprintf(port_name, "port%d", port);
9541		err = sysfs_create_link(&p->plat_dev->dev.kobj,
9542					&np->device->kobj,
9543					port_name);
9544		if (!err) {
9545			p->ports[port] = np;
9546			atomic_inc(&p->refcnt);
9547		}
9548	}
9549	mutex_unlock(&niu_parent_lock);
9550
9551	return p;
9552}
9553
9554static void niu_put_parent(struct niu *np)
9555{
9556	struct niu_parent *p = np->parent;
9557	u8 port = np->port;
9558	char port_name[6];
9559
9560	BUG_ON(!p || p->ports[port] != np);
9561
9562	netif_printk(np, probe, KERN_DEBUG, np->dev,
9563		     "%s() port[%u]\n", __func__, port);
9564
9565	sprintf(port_name, "port%d", port);
9566
9567	mutex_lock(&niu_parent_lock);
9568
9569	sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
9570
9571	p->ports[port] = NULL;
9572	np->parent = NULL;
9573
9574	if (atomic_dec_and_test(&p->refcnt)) {
9575		list_del(&p->list);
9576		platform_device_unregister(p->plat_dev);
9577	}
9578
9579	mutex_unlock(&niu_parent_lock);
9580}
9581
9582static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
9583				    u64 *handle, gfp_t flag)
9584{
9585	dma_addr_t dh;
9586	void *ret;
9587
9588	ret = dma_alloc_coherent(dev, size, &dh, flag);
9589	if (ret)
9590		*handle = dh;
9591	return ret;
9592}
9593
9594static void niu_pci_free_coherent(struct device *dev, size_t size,
9595				  void *cpu_addr, u64 handle)
9596{
9597	dma_free_coherent(dev, size, cpu_addr, handle);
9598}
9599
9600static u64 niu_pci_map_page(struct device *dev, struct page *page,
9601			    unsigned long offset, size_t size,
9602			    enum dma_data_direction direction)
9603{
9604	return dma_map_page(dev, page, offset, size, direction);
9605}
9606
9607static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
9608			       size_t size, enum dma_data_direction direction)
9609{
9610	dma_unmap_page(dev, dma_address, size, direction);
9611}
9612
9613static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
9614			      size_t size,
9615			      enum dma_data_direction direction)
9616{
9617	return dma_map_single(dev, cpu_addr, size, direction);
9618}
9619
9620static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
9621				 size_t size,
9622				 enum dma_data_direction direction)
9623{
9624	dma_unmap_single(dev, dma_address, size, direction);
9625}
9626
9627static const struct niu_ops niu_pci_ops = {
9628	.alloc_coherent	= niu_pci_alloc_coherent,
9629	.free_coherent	= niu_pci_free_coherent,
9630	.map_page	= niu_pci_map_page,
9631	.unmap_page	= niu_pci_unmap_page,
9632	.map_single	= niu_pci_map_single,
9633	.unmap_single	= niu_pci_unmap_single,
9634};
9635
9636static void niu_driver_version(void)
9637{
9638	static int niu_version_printed;
9639
9640	if (niu_version_printed++ == 0)
9641		pr_info("%s", version);
9642}
9643
9644static struct net_device *niu_alloc_and_init(struct device *gen_dev,
9645					     struct pci_dev *pdev,
9646					     struct platform_device *op,
9647					     const struct niu_ops *ops, u8 port)
9648{
9649	struct net_device *dev;
9650	struct niu *np;
9651
9652	dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
9653	if (!dev)
9654		return NULL;
9655
9656	SET_NETDEV_DEV(dev, gen_dev);
9657
9658	np = netdev_priv(dev);
9659	np->dev = dev;
9660	np->pdev = pdev;
9661	np->op = op;
9662	np->device = gen_dev;
9663	np->ops = ops;
9664
9665	np->msg_enable = niu_debug;
9666
9667	spin_lock_init(&np->lock);
9668	INIT_WORK(&np->reset_task, niu_reset_task);
9669
9670	np->port = port;
9671
9672	return dev;
9673}
9674
9675static const struct net_device_ops niu_netdev_ops = {
9676	.ndo_open		= niu_open,
9677	.ndo_stop		= niu_close,
9678	.ndo_start_xmit		= niu_start_xmit,
9679	.ndo_get_stats64	= niu_get_stats,
9680	.ndo_set_rx_mode	= niu_set_rx_mode,
9681	.ndo_validate_addr	= eth_validate_addr,
9682	.ndo_set_mac_address	= niu_set_mac_addr,
9683	.ndo_do_ioctl		= niu_ioctl,
9684	.ndo_tx_timeout		= niu_tx_timeout,
9685	.ndo_change_mtu		= niu_change_mtu,
9686};
9687
9688static void niu_assign_netdev_ops(struct net_device *dev)
9689{
9690	dev->netdev_ops = &niu_netdev_ops;
9691	dev->ethtool_ops = &niu_ethtool_ops;
9692	dev->watchdog_timeo = NIU_TX_TIMEOUT;
9693}
9694
9695static void niu_device_announce(struct niu *np)
9696{
9697	struct net_device *dev = np->dev;
9698
9699	pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
9700
9701	if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
9702		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9703				dev->name,
9704				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
9705				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
9706				(np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
9707				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
9708				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
9709				np->vpd.phy_type);
9710	} else {
9711		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9712				dev->name,
9713				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
9714				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
9715				(np->flags & NIU_FLAGS_FIBER ? "FIBER" :
9716				 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
9717				  "COPPER")),
9718				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
9719				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
9720				np->vpd.phy_type);
9721	}
9722}
9723
9724static void niu_set_basic_features(struct net_device *dev)
9725{
9726	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
9727	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
9728}
9729
9730static int niu_pci_init_one(struct pci_dev *pdev,
9731			    const struct pci_device_id *ent)
9732{
9733	union niu_parent_id parent_id;
9734	struct net_device *dev;
9735	struct niu *np;
9736	int err;
9737	u64 dma_mask;
9738
9739	niu_driver_version();
9740
9741	err = pci_enable_device(pdev);
9742	if (err) {
9743		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9744		return err;
9745	}
9746
9747	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
9748	    !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9749		dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
9750		err = -ENODEV;
9751		goto err_out_disable_pdev;
9752	}
9753
9754	err = pci_request_regions(pdev, DRV_MODULE_NAME);
9755	if (err) {
9756		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9757		goto err_out_disable_pdev;
9758	}
9759
9760	if (!pci_is_pcie(pdev)) {
9761		dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
9762		err = -ENODEV;
9763		goto err_out_free_res;
9764	}
9765
9766	dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
9767				 &niu_pci_ops, PCI_FUNC(pdev->devfn));
9768	if (!dev) {
9769		err = -ENOMEM;
9770		goto err_out_free_res;
9771	}
9772	np = netdev_priv(dev);
9773
9774	memset(&parent_id, 0, sizeof(parent_id));
9775	parent_id.pci.domain = pci_domain_nr(pdev->bus);
9776	parent_id.pci.bus = pdev->bus->number;
9777	parent_id.pci.device = PCI_SLOT(pdev->devfn);
9778
9779	np->parent = niu_get_parent(np, &parent_id,
9780				    PLAT_TYPE_ATLAS);
9781	if (!np->parent) {
9782		err = -ENOMEM;
9783		goto err_out_free_dev;
9784	}
9785
9786	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
9787		PCI_EXP_DEVCTL_NOSNOOP_EN,
9788		PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
9789		PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE |
9790		PCI_EXP_DEVCTL_RELAX_EN);
9791
9792	dma_mask = DMA_BIT_MASK(44);
9793	err = pci_set_dma_mask(pdev, dma_mask);
9794	if (!err) {
9795		dev->features |= NETIF_F_HIGHDMA;
9796		err = pci_set_consistent_dma_mask(pdev, dma_mask);
9797		if (err) {
9798			dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
9799			goto err_out_release_parent;
9800		}
9801	}
9802	if (err) {
9803		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9804		if (err) {
9805			dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
9806			goto err_out_release_parent;
9807		}
9808	}
9809
9810	niu_set_basic_features(dev);
9811
9812	dev->priv_flags |= IFF_UNICAST_FLT;
9813
9814	np->regs = pci_ioremap_bar(pdev, 0);
9815	if (!np->regs) {
9816		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
9817		err = -ENOMEM;
9818		goto err_out_release_parent;
9819	}
9820
9821	pci_set_master(pdev);
9822	pci_save_state(pdev);
9823
9824	dev->irq = pdev->irq;
9825
9826	niu_assign_netdev_ops(dev);
9827
9828	err = niu_get_invariants(np);
9829	if (err) {
9830		if (err != -ENODEV)
9831			dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
9832		goto err_out_iounmap;
9833	}
9834
9835	err = register_netdev(dev);
9836	if (err) {
9837		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
9838		goto err_out_iounmap;
9839	}
9840
9841	pci_set_drvdata(pdev, dev);
9842
9843	niu_device_announce(np);
9844
9845	return 0;
9846
9847err_out_iounmap:
9848	if (np->regs) {
9849		iounmap(np->regs);
9850		np->regs = NULL;
9851	}
9852
9853err_out_release_parent:
9854	niu_put_parent(np);
9855
9856err_out_free_dev:
9857	free_netdev(dev);
9858
9859err_out_free_res:
9860	pci_release_regions(pdev);
9861
9862err_out_disable_pdev:
9863	pci_disable_device(pdev);
9864
9865	return err;
9866}
9867
9868static void niu_pci_remove_one(struct pci_dev *pdev)
9869{
9870	struct net_device *dev = pci_get_drvdata(pdev);
9871
9872	if (dev) {
9873		struct niu *np = netdev_priv(dev);
9874
9875		unregister_netdev(dev);
9876		if (np->regs) {
9877			iounmap(np->regs);
9878			np->regs = NULL;
9879		}
9880
9881		niu_ldg_free(np);
9882
9883		niu_put_parent(np);
9884
9885		free_netdev(dev);
9886		pci_release_regions(pdev);
9887		pci_disable_device(pdev);
9888	}
9889}
9890
9891static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
9892{
9893	struct net_device *dev = pci_get_drvdata(pdev);
9894	struct niu *np = netdev_priv(dev);
9895	unsigned long flags;
9896
9897	if (!netif_running(dev))
9898		return 0;
9899
9900	flush_work(&np->reset_task);
9901	niu_netif_stop(np);
9902
9903	del_timer_sync(&np->timer);
9904
9905	spin_lock_irqsave(&np->lock, flags);
9906	niu_enable_interrupts(np, 0);
9907	spin_unlock_irqrestore(&np->lock, flags);
9908
9909	netif_device_detach(dev);
9910
9911	spin_lock_irqsave(&np->lock, flags);
9912	niu_stop_hw(np);
9913	spin_unlock_irqrestore(&np->lock, flags);
9914
9915	pci_save_state(pdev);
9916
9917	return 0;
9918}
9919
9920static int niu_resume(struct pci_dev *pdev)
9921{
9922	struct net_device *dev = pci_get_drvdata(pdev);
9923	struct niu *np = netdev_priv(dev);
9924	unsigned long flags;
9925	int err;
9926
9927	if (!netif_running(dev))
9928		return 0;
9929
9930	pci_restore_state(pdev);
9931
9932	netif_device_attach(dev);
9933
9934	spin_lock_irqsave(&np->lock, flags);
9935
9936	err = niu_init_hw(np);
9937	if (!err) {
9938		np->timer.expires = jiffies + HZ;
9939		add_timer(&np->timer);
9940		niu_netif_start(np);
9941	}
9942
9943	spin_unlock_irqrestore(&np->lock, flags);
9944
9945	return err;
9946}
9947
9948static struct pci_driver niu_pci_driver = {
9949	.name		= DRV_MODULE_NAME,
9950	.id_table	= niu_pci_tbl,
9951	.probe		= niu_pci_init_one,
9952	.remove		= niu_pci_remove_one,
9953	.suspend	= niu_suspend,
9954	.resume		= niu_resume,
9955};
9956
9957#ifdef CONFIG_SPARC64
9958static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
9959				     u64 *dma_addr, gfp_t flag)
9960{
9961	unsigned long order = get_order(size);
9962	unsigned long page = __get_free_pages(flag, order);
9963
9964	if (page == 0UL)
9965		return NULL;
9966	memset((char *)page, 0, PAGE_SIZE << order);
9967	*dma_addr = __pa(page);
9968
9969	return (void *) page;
9970}
9971
9972static void niu_phys_free_coherent(struct device *dev, size_t size,
9973				   void *cpu_addr, u64 handle)
9974{
9975	unsigned long order = get_order(size);
9976
9977	free_pages((unsigned long) cpu_addr, order);
9978}
9979
9980static u64 niu_phys_map_page(struct device *dev, struct page *page,
9981			     unsigned long offset, size_t size,
9982			     enum dma_data_direction direction)
9983{
9984	return page_to_phys(page) + offset;
9985}
9986
9987static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
9988				size_t size, enum dma_data_direction direction)
9989{
9990	/* Nothing to do.  */
9991}
9992
9993static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
9994			       size_t size,
9995			       enum dma_data_direction direction)
9996{
9997	return __pa(cpu_addr);
9998}
9999
10000static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
10001				  size_t size,
10002				  enum dma_data_direction direction)
10003{
10004	/* Nothing to do.  */
10005}
10006
10007static const struct niu_ops niu_phys_ops = {
10008	.alloc_coherent	= niu_phys_alloc_coherent,
10009	.free_coherent	= niu_phys_free_coherent,
10010	.map_page	= niu_phys_map_page,
10011	.unmap_page	= niu_phys_unmap_page,
10012	.map_single	= niu_phys_map_single,
10013	.unmap_single	= niu_phys_unmap_single,
10014};
10015
10016static int niu_of_probe(struct platform_device *op)
10017{
10018	union niu_parent_id parent_id;
10019	struct net_device *dev;
10020	struct niu *np;
10021	const u32 *reg;
10022	int err;
10023
10024	niu_driver_version();
10025
10026	reg = of_get_property(op->dev.of_node, "reg", NULL);
10027	if (!reg) {
10028		dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
10029			op->dev.of_node->full_name);
10030		return -ENODEV;
10031	}
10032
10033	dev = niu_alloc_and_init(&op->dev, NULL, op,
10034				 &niu_phys_ops, reg[0] & 0x1);
10035	if (!dev) {
10036		err = -ENOMEM;
10037		goto err_out;
10038	}
10039	np = netdev_priv(dev);
10040
10041	memset(&parent_id, 0, sizeof(parent_id));
10042	parent_id.of = of_get_parent(op->dev.of_node);
10043
10044	np->parent = niu_get_parent(np, &parent_id,
10045				    PLAT_TYPE_NIU);
10046	if (!np->parent) {
10047		err = -ENOMEM;
10048		goto err_out_free_dev;
10049	}
10050
10051	niu_set_basic_features(dev);
10052
10053	np->regs = of_ioremap(&op->resource[1], 0,
10054			      resource_size(&op->resource[1]),
10055			      "niu regs");
10056	if (!np->regs) {
10057		dev_err(&op->dev, "Cannot map device registers, aborting\n");
10058		err = -ENOMEM;
10059		goto err_out_release_parent;
10060	}
10061
10062	np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
10063				    resource_size(&op->resource[2]),
10064				    "niu vregs-1");
10065	if (!np->vir_regs_1) {
10066		dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
10067		err = -ENOMEM;
10068		goto err_out_iounmap;
10069	}
10070
10071	np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
10072				    resource_size(&op->resource[3]),
10073				    "niu vregs-2");
10074	if (!np->vir_regs_2) {
10075		dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
10076		err = -ENOMEM;
10077		goto err_out_iounmap;
10078	}
10079
10080	niu_assign_netdev_ops(dev);
10081
10082	err = niu_get_invariants(np);
10083	if (err) {
10084		if (err != -ENODEV)
10085			dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
10086		goto err_out_iounmap;
10087	}
10088
10089	err = register_netdev(dev);
10090	if (err) {
10091		dev_err(&op->dev, "Cannot register net device, aborting\n");
10092		goto err_out_iounmap;
10093	}
10094
10095	platform_set_drvdata(op, dev);
10096
10097	niu_device_announce(np);
10098
10099	return 0;
10100
10101err_out_iounmap:
10102	if (np->vir_regs_1) {
10103		of_iounmap(&op->resource[2], np->vir_regs_1,
10104			   resource_size(&op->resource[2]));
10105		np->vir_regs_1 = NULL;
10106	}
10107
10108	if (np->vir_regs_2) {
10109		of_iounmap(&op->resource[3], np->vir_regs_2,
10110			   resource_size(&op->resource[3]));
10111		np->vir_regs_2 = NULL;
10112	}
10113
10114	if (np->regs) {
10115		of_iounmap(&op->resource[1], np->regs,
10116			   resource_size(&op->resource[1]));
10117		np->regs = NULL;
10118	}
10119
10120err_out_release_parent:
10121	niu_put_parent(np);
10122
10123err_out_free_dev:
10124	free_netdev(dev);
10125
10126err_out:
10127	return err;
10128}
10129
10130static int niu_of_remove(struct platform_device *op)
10131{
10132	struct net_device *dev = platform_get_drvdata(op);
10133
10134	if (dev) {
10135		struct niu *np = netdev_priv(dev);
10136
10137		unregister_netdev(dev);
10138
10139		if (np->vir_regs_1) {
10140			of_iounmap(&op->resource[2], np->vir_regs_1,
10141				   resource_size(&op->resource[2]));
10142			np->vir_regs_1 = NULL;
10143		}
10144
10145		if (np->vir_regs_2) {
10146			of_iounmap(&op->resource[3], np->vir_regs_2,
10147				   resource_size(&op->resource[3]));
10148			np->vir_regs_2 = NULL;
10149		}
10150
10151		if (np->regs) {
10152			of_iounmap(&op->resource[1], np->regs,
10153				   resource_size(&op->resource[1]));
10154			np->regs = NULL;
10155		}
10156
10157		niu_ldg_free(np);
10158
10159		niu_put_parent(np);
10160
10161		free_netdev(dev);
10162	}
10163	return 0;
10164}
10165
10166static const struct of_device_id niu_match[] = {
10167	{
10168		.name = "network",
10169		.compatible = "SUNW,niusl",
10170	},
10171	{},
10172};
10173MODULE_DEVICE_TABLE(of, niu_match);
10174
10175static struct platform_driver niu_of_driver = {
10176	.driver = {
10177		.name = "niu",
10178		.of_match_table = niu_match,
10179	},
10180	.probe		= niu_of_probe,
10181	.remove		= niu_of_remove,
10182};
10183
10184#endif /* CONFIG_SPARC64 */
10185
10186static int __init niu_init(void)
10187{
10188	int err = 0;
10189
10190	BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
10191
10192	niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
10193
10194#ifdef CONFIG_SPARC64
10195	err = platform_driver_register(&niu_of_driver);
10196#endif
10197
10198	if (!err) {
10199		err = pci_register_driver(&niu_pci_driver);
10200#ifdef CONFIG_SPARC64
10201		if (err)
10202			platform_driver_unregister(&niu_of_driver);
10203#endif
10204	}
10205
10206	return err;
10207}
10208
10209static void __exit niu_exit(void)
10210{
10211	pci_unregister_driver(&niu_pci_driver);
10212#ifdef CONFIG_SPARC64
10213	platform_driver_unregister(&niu_of_driver);
10214#endif
10215}
10216
10217module_init(niu_init);
10218module_exit(niu_exit);
10219