1/* niu.c: Neptune ethernet driver.
2 *
3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <linux/dma-mapping.h>
13#include <linux/netdevice.h>
14#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/mii.h>
20#include <linux/if.h>
21#include <linux/if_ether.h>
22#include <linux/if_vlan.h>
23#include <linux/ip.h>
24#include <linux/in.h>
25#include <linux/ipv6.h>
26#include <linux/log2.h>
27#include <linux/jiffies.h>
28#include <linux/crc32.h>
29#include <linux/list.h>
30#include <linux/slab.h>
31
32#include <linux/io.h>
33#include <linux/of_device.h>
34
35#include "niu.h"
36
37#define DRV_MODULE_NAME		"niu"
38#define DRV_MODULE_VERSION	"1.1"
39#define DRV_MODULE_RELDATE	"Apr 22, 2010"
40
41static char version[] =
42	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
43
44MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
45MODULE_DESCRIPTION("NIU ethernet driver");
46MODULE_LICENSE("GPL");
47MODULE_VERSION(DRV_MODULE_VERSION);
48
49#ifndef readq
50static u64 readq(void __iomem *reg)
51{
52	return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
53}
54
55static void writeq(u64 val, void __iomem *reg)
56{
57	writel(val & 0xffffffff, reg);
58	writel(val >> 32, reg + 0x4UL);
59}
60#endif
61
62static const struct pci_device_id niu_pci_tbl[] = {
63	{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
64	{}
65};
66
67MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
68
69#define NIU_TX_TIMEOUT			(5 * HZ)
70
71#define nr64(reg)		readq(np->regs + (reg))
72#define nw64(reg, val)		writeq((val), np->regs + (reg))
73
74#define nr64_mac(reg)		readq(np->mac_regs + (reg))
75#define nw64_mac(reg, val)	writeq((val), np->mac_regs + (reg))
76
77#define nr64_ipp(reg)		readq(np->regs + np->ipp_off + (reg))
78#define nw64_ipp(reg, val)	writeq((val), np->regs + np->ipp_off + (reg))
79
80#define nr64_pcs(reg)		readq(np->regs + np->pcs_off + (reg))
81#define nw64_pcs(reg, val)	writeq((val), np->regs + np->pcs_off + (reg))
82
83#define nr64_xpcs(reg)		readq(np->regs + np->xpcs_off + (reg))
84#define nw64_xpcs(reg, val)	writeq((val), np->regs + np->xpcs_off + (reg))
85
86#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
87
88static int niu_debug;
89static int debug = -1;
90module_param(debug, int, 0);
91MODULE_PARM_DESC(debug, "NIU debug level");
92
93#define niu_lock_parent(np, flags) \
94	spin_lock_irqsave(&np->parent->lock, flags)
95#define niu_unlock_parent(np, flags) \
96	spin_unlock_irqrestore(&np->parent->lock, flags)
97
98static int serdes_init_10g_serdes(struct niu *np);
99
100static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
101				     u64 bits, int limit, int delay)
102{
103	while (--limit >= 0) {
104		u64 val = nr64_mac(reg);
105
106		if (!(val & bits))
107			break;
108		udelay(delay);
109	}
110	if (limit < 0)
111		return -ENODEV;
112	return 0;
113}
114
115static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
116					u64 bits, int limit, int delay,
117					const char *reg_name)
118{
119	int err;
120
121	nw64_mac(reg, bits);
122	err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
123	if (err)
124		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
125			   (unsigned long long)bits, reg_name,
126			   (unsigned long long)nr64_mac(reg));
127	return err;
128}
129
130#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
131({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
132	__niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
133})
134
135static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
136				     u64 bits, int limit, int delay)
137{
138	while (--limit >= 0) {
139		u64 val = nr64_ipp(reg);
140
141		if (!(val & bits))
142			break;
143		udelay(delay);
144	}
145	if (limit < 0)
146		return -ENODEV;
147	return 0;
148}
149
150static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
151					u64 bits, int limit, int delay,
152					const char *reg_name)
153{
154	int err;
155	u64 val;
156
157	val = nr64_ipp(reg);
158	val |= bits;
159	nw64_ipp(reg, val);
160
161	err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
162	if (err)
163		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
164			   (unsigned long long)bits, reg_name,
165			   (unsigned long long)nr64_ipp(reg));
166	return err;
167}
168
169#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
170({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
171	__niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
172})
173
174static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
175				 u64 bits, int limit, int delay)
176{
177	while (--limit >= 0) {
178		u64 val = nr64(reg);
179
180		if (!(val & bits))
181			break;
182		udelay(delay);
183	}
184	if (limit < 0)
185		return -ENODEV;
186	return 0;
187}
188
189#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
190({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
191	__niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
192})
193
194static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
195				    u64 bits, int limit, int delay,
196				    const char *reg_name)
197{
198	int err;
199
200	nw64(reg, bits);
201	err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
202	if (err)
203		netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
204			   (unsigned long long)bits, reg_name,
205			   (unsigned long long)nr64(reg));
206	return err;
207}
208
209#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
210({	BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
211	__niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
212})
213
214static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
215{
216	u64 val = (u64) lp->timer;
217
218	if (on)
219		val |= LDG_IMGMT_ARM;
220
221	nw64(LDG_IMGMT(lp->ldg_num), val);
222}
223
224static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
225{
226	unsigned long mask_reg, bits;
227	u64 val;
228
229	if (ldn < 0 || ldn > LDN_MAX)
230		return -EINVAL;
231
232	if (ldn < 64) {
233		mask_reg = LD_IM0(ldn);
234		bits = LD_IM0_MASK;
235	} else {
236		mask_reg = LD_IM1(ldn - 64);
237		bits = LD_IM1_MASK;
238	}
239
240	val = nr64(mask_reg);
241	if (on)
242		val &= ~bits;
243	else
244		val |= bits;
245	nw64(mask_reg, val);
246
247	return 0;
248}
249
250static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
251{
252	struct niu_parent *parent = np->parent;
253	int i;
254
255	for (i = 0; i <= LDN_MAX; i++) {
256		int err;
257
258		if (parent->ldg_map[i] != lp->ldg_num)
259			continue;
260
261		err = niu_ldn_irq_enable(np, i, on);
262		if (err)
263			return err;
264	}
265	return 0;
266}
267
268static int niu_enable_interrupts(struct niu *np, int on)
269{
270	int i;
271
272	for (i = 0; i < np->num_ldg; i++) {
273		struct niu_ldg *lp = &np->ldg[i];
274		int err;
275
276		err = niu_enable_ldn_in_ldg(np, lp, on);
277		if (err)
278			return err;
279	}
280	for (i = 0; i < np->num_ldg; i++)
281		niu_ldg_rearm(np, &np->ldg[i], on);
282
283	return 0;
284}
285
286static u32 phy_encode(u32 type, int port)
287{
288	return type << (port * 2);
289}
290
291static u32 phy_decode(u32 val, int port)
292{
293	return (val >> (port * 2)) & PORT_TYPE_MASK;
294}
295
296static int mdio_wait(struct niu *np)
297{
298	int limit = 1000;
299	u64 val;
300
301	while (--limit > 0) {
302		val = nr64(MIF_FRAME_OUTPUT);
303		if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
304			return val & MIF_FRAME_OUTPUT_DATA;
305
306		udelay(10);
307	}
308
309	return -ENODEV;
310}
311
312static int mdio_read(struct niu *np, int port, int dev, int reg)
313{
314	int err;
315
316	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
317	err = mdio_wait(np);
318	if (err < 0)
319		return err;
320
321	nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
322	return mdio_wait(np);
323}
324
325static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
326{
327	int err;
328
329	nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
330	err = mdio_wait(np);
331	if (err < 0)
332		return err;
333
334	nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
335	err = mdio_wait(np);
336	if (err < 0)
337		return err;
338
339	return 0;
340}
341
342static int mii_read(struct niu *np, int port, int reg)
343{
344	nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
345	return mdio_wait(np);
346}
347
348static int mii_write(struct niu *np, int port, int reg, int data)
349{
350	int err;
351
352	nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
353	err = mdio_wait(np);
354	if (err < 0)
355		return err;
356
357	return 0;
358}
359
360static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
361{
362	int err;
363
364	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
365			 ESR2_TI_PLL_TX_CFG_L(channel),
366			 val & 0xffff);
367	if (!err)
368		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
369				 ESR2_TI_PLL_TX_CFG_H(channel),
370				 val >> 16);
371	return err;
372}
373
374static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
375{
376	int err;
377
378	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
379			 ESR2_TI_PLL_RX_CFG_L(channel),
380			 val & 0xffff);
381	if (!err)
382		err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
383				 ESR2_TI_PLL_RX_CFG_H(channel),
384				 val >> 16);
385	return err;
386}
387
388/* Mode is always 10G fiber.  */
389static int serdes_init_niu_10g_fiber(struct niu *np)
390{
391	struct niu_link_config *lp = &np->link_config;
392	u32 tx_cfg, rx_cfg;
393	unsigned long i;
394
395	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
396	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
397		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
398		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
399
400	if (lp->loopback_mode == LOOPBACK_PHY) {
401		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
402
403		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
404			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
405
406		tx_cfg |= PLL_TX_CFG_ENTEST;
407		rx_cfg |= PLL_RX_CFG_ENTEST;
408	}
409
410	/* Initialize all 4 lanes of the SERDES.  */
411	for (i = 0; i < 4; i++) {
412		int err = esr2_set_tx_cfg(np, i, tx_cfg);
413		if (err)
414			return err;
415	}
416
417	for (i = 0; i < 4; i++) {
418		int err = esr2_set_rx_cfg(np, i, rx_cfg);
419		if (err)
420			return err;
421	}
422
423	return 0;
424}
425
426static int serdes_init_niu_1g_serdes(struct niu *np)
427{
428	struct niu_link_config *lp = &np->link_config;
429	u16 pll_cfg, pll_sts;
430	int max_retry = 100;
431	u64 uninitialized_var(sig), mask, val;
432	u32 tx_cfg, rx_cfg;
433	unsigned long i;
434	int err;
435
436	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
437		  PLL_TX_CFG_RATE_HALF);
438	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
439		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
440		  PLL_RX_CFG_RATE_HALF);
441
442	if (np->port == 0)
443		rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
444
445	if (lp->loopback_mode == LOOPBACK_PHY) {
446		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
447
448		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
449			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
450
451		tx_cfg |= PLL_TX_CFG_ENTEST;
452		rx_cfg |= PLL_RX_CFG_ENTEST;
453	}
454
455	/* Initialize PLL for 1G */
456	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
457
458	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
459			 ESR2_TI_PLL_CFG_L, pll_cfg);
460	if (err) {
461		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
462			   np->port, __func__);
463		return err;
464	}
465
466	pll_sts = PLL_CFG_ENPLL;
467
468	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
469			 ESR2_TI_PLL_STS_L, pll_sts);
470	if (err) {
471		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
472			   np->port, __func__);
473		return err;
474	}
475
476	udelay(200);
477
478	/* Initialize all 4 lanes of the SERDES.  */
479	for (i = 0; i < 4; i++) {
480		err = esr2_set_tx_cfg(np, i, tx_cfg);
481		if (err)
482			return err;
483	}
484
485	for (i = 0; i < 4; i++) {
486		err = esr2_set_rx_cfg(np, i, rx_cfg);
487		if (err)
488			return err;
489	}
490
491	switch (np->port) {
492	case 0:
493		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
494		mask = val;
495		break;
496
497	case 1:
498		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
499		mask = val;
500		break;
501
502	default:
503		return -EINVAL;
504	}
505
506	while (max_retry--) {
507		sig = nr64(ESR_INT_SIGNALS);
508		if ((sig & mask) == val)
509			break;
510
511		mdelay(500);
512	}
513
514	if ((sig & mask) != val) {
515		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
516			   np->port, (int)(sig & mask), (int)val);
517		return -ENODEV;
518	}
519
520	return 0;
521}
522
523static int serdes_init_niu_10g_serdes(struct niu *np)
524{
525	struct niu_link_config *lp = &np->link_config;
526	u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
527	int max_retry = 100;
528	u64 uninitialized_var(sig), mask, val;
529	unsigned long i;
530	int err;
531
532	tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
533	rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
534		  PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
535		  PLL_RX_CFG_EQ_LP_ADAPTIVE);
536
537	if (lp->loopback_mode == LOOPBACK_PHY) {
538		u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
539
540		mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
541			   ESR2_TI_PLL_TEST_CFG_L, test_cfg);
542
543		tx_cfg |= PLL_TX_CFG_ENTEST;
544		rx_cfg |= PLL_RX_CFG_ENTEST;
545	}
546
547	/* Initialize PLL for 10G */
548	pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
549
550	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
551			 ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
552	if (err) {
553		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
554			   np->port, __func__);
555		return err;
556	}
557
558	pll_sts = PLL_CFG_ENPLL;
559
560	err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
561			 ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
562	if (err) {
563		netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
564			   np->port, __func__);
565		return err;
566	}
567
568	udelay(200);
569
570	/* Initialize all 4 lanes of the SERDES.  */
571	for (i = 0; i < 4; i++) {
572		err = esr2_set_tx_cfg(np, i, tx_cfg);
573		if (err)
574			return err;
575	}
576
577	for (i = 0; i < 4; i++) {
578		err = esr2_set_rx_cfg(np, i, rx_cfg);
579		if (err)
580			return err;
581	}
582
583	/* check if serdes is ready */
584
585	switch (np->port) {
586	case 0:
587		mask = ESR_INT_SIGNALS_P0_BITS;
588		val = (ESR_INT_SRDY0_P0 |
589		       ESR_INT_DET0_P0 |
590		       ESR_INT_XSRDY_P0 |
591		       ESR_INT_XDP_P0_CH3 |
592		       ESR_INT_XDP_P0_CH2 |
593		       ESR_INT_XDP_P0_CH1 |
594		       ESR_INT_XDP_P0_CH0);
595		break;
596
597	case 1:
598		mask = ESR_INT_SIGNALS_P1_BITS;
599		val = (ESR_INT_SRDY0_P1 |
600		       ESR_INT_DET0_P1 |
601		       ESR_INT_XSRDY_P1 |
602		       ESR_INT_XDP_P1_CH3 |
603		       ESR_INT_XDP_P1_CH2 |
604		       ESR_INT_XDP_P1_CH1 |
605		       ESR_INT_XDP_P1_CH0);
606		break;
607
608	default:
609		return -EINVAL;
610	}
611
612	while (max_retry--) {
613		sig = nr64(ESR_INT_SIGNALS);
614		if ((sig & mask) == val)
615			break;
616
617		mdelay(500);
618	}
619
620	if ((sig & mask) != val) {
621		pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
622			np->port, (int)(sig & mask), (int)val);
623
624		/* 10G failed, try initializing at 1G */
625		err = serdes_init_niu_1g_serdes(np);
626		if (!err) {
627			np->flags &= ~NIU_FLAGS_10G;
628			np->mac_xcvr = MAC_XCVR_PCS;
629		}  else {
630			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
631				   np->port);
632			return -ENODEV;
633		}
634	}
635	return 0;
636}
637
638static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
639{
640	int err;
641
642	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
643	if (err >= 0) {
644		*val = (err & 0xffff);
645		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
646				ESR_RXTX_CTRL_H(chan));
647		if (err >= 0)
648			*val |= ((err & 0xffff) << 16);
649		err = 0;
650	}
651	return err;
652}
653
654static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
655{
656	int err;
657
658	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
659			ESR_GLUE_CTRL0_L(chan));
660	if (err >= 0) {
661		*val = (err & 0xffff);
662		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
663				ESR_GLUE_CTRL0_H(chan));
664		if (err >= 0) {
665			*val |= ((err & 0xffff) << 16);
666			err = 0;
667		}
668	}
669	return err;
670}
671
672static int esr_read_reset(struct niu *np, u32 *val)
673{
674	int err;
675
676	err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
677			ESR_RXTX_RESET_CTRL_L);
678	if (err >= 0) {
679		*val = (err & 0xffff);
680		err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
681				ESR_RXTX_RESET_CTRL_H);
682		if (err >= 0) {
683			*val |= ((err & 0xffff) << 16);
684			err = 0;
685		}
686	}
687	return err;
688}
689
690static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
691{
692	int err;
693
694	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
695			 ESR_RXTX_CTRL_L(chan), val & 0xffff);
696	if (!err)
697		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
698				 ESR_RXTX_CTRL_H(chan), (val >> 16));
699	return err;
700}
701
702static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
703{
704	int err;
705
706	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
707			ESR_GLUE_CTRL0_L(chan), val & 0xffff);
708	if (!err)
709		err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
710				 ESR_GLUE_CTRL0_H(chan), (val >> 16));
711	return err;
712}
713
714static int esr_reset(struct niu *np)
715{
716	u32 uninitialized_var(reset);
717	int err;
718
719	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
720			 ESR_RXTX_RESET_CTRL_L, 0x0000);
721	if (err)
722		return err;
723	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
724			 ESR_RXTX_RESET_CTRL_H, 0xffff);
725	if (err)
726		return err;
727	udelay(200);
728
729	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
730			 ESR_RXTX_RESET_CTRL_L, 0xffff);
731	if (err)
732		return err;
733	udelay(200);
734
735	err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
736			 ESR_RXTX_RESET_CTRL_H, 0x0000);
737	if (err)
738		return err;
739	udelay(200);
740
741	err = esr_read_reset(np, &reset);
742	if (err)
743		return err;
744	if (reset != 0) {
745		netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
746			   np->port, reset);
747		return -ENODEV;
748	}
749
750	return 0;
751}
752
753static int serdes_init_10g(struct niu *np)
754{
755	struct niu_link_config *lp = &np->link_config;
756	unsigned long ctrl_reg, test_cfg_reg, i;
757	u64 ctrl_val, test_cfg_val, sig, mask, val;
758	int err;
759
760	switch (np->port) {
761	case 0:
762		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
763		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
764		break;
765	case 1:
766		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
767		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
768		break;
769
770	default:
771		return -EINVAL;
772	}
773	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
774		    ENET_SERDES_CTRL_SDET_1 |
775		    ENET_SERDES_CTRL_SDET_2 |
776		    ENET_SERDES_CTRL_SDET_3 |
777		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
778		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
779		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
780		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
781		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
782		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
783		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
784		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
785	test_cfg_val = 0;
786
787	if (lp->loopback_mode == LOOPBACK_PHY) {
788		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
789				  ENET_SERDES_TEST_MD_0_SHIFT) |
790				 (ENET_TEST_MD_PAD_LOOPBACK <<
791				  ENET_SERDES_TEST_MD_1_SHIFT) |
792				 (ENET_TEST_MD_PAD_LOOPBACK <<
793				  ENET_SERDES_TEST_MD_2_SHIFT) |
794				 (ENET_TEST_MD_PAD_LOOPBACK <<
795				  ENET_SERDES_TEST_MD_3_SHIFT));
796	}
797
798	nw64(ctrl_reg, ctrl_val);
799	nw64(test_cfg_reg, test_cfg_val);
800
801	/* Initialize all 4 lanes of the SERDES.  */
802	for (i = 0; i < 4; i++) {
803		u32 rxtx_ctrl, glue0;
804
805		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
806		if (err)
807			return err;
808		err = esr_read_glue0(np, i, &glue0);
809		if (err)
810			return err;
811
812		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
813		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
814			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
815
816		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
817			   ESR_GLUE_CTRL0_THCNT |
818			   ESR_GLUE_CTRL0_BLTIME);
819		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
820			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
821			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
822			  (BLTIME_300_CYCLES <<
823			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
824
825		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
826		if (err)
827			return err;
828		err = esr_write_glue0(np, i, glue0);
829		if (err)
830			return err;
831	}
832
833	err = esr_reset(np);
834	if (err)
835		return err;
836
837	sig = nr64(ESR_INT_SIGNALS);
838	switch (np->port) {
839	case 0:
840		mask = ESR_INT_SIGNALS_P0_BITS;
841		val = (ESR_INT_SRDY0_P0 |
842		       ESR_INT_DET0_P0 |
843		       ESR_INT_XSRDY_P0 |
844		       ESR_INT_XDP_P0_CH3 |
845		       ESR_INT_XDP_P0_CH2 |
846		       ESR_INT_XDP_P0_CH1 |
847		       ESR_INT_XDP_P0_CH0);
848		break;
849
850	case 1:
851		mask = ESR_INT_SIGNALS_P1_BITS;
852		val = (ESR_INT_SRDY0_P1 |
853		       ESR_INT_DET0_P1 |
854		       ESR_INT_XSRDY_P1 |
855		       ESR_INT_XDP_P1_CH3 |
856		       ESR_INT_XDP_P1_CH2 |
857		       ESR_INT_XDP_P1_CH1 |
858		       ESR_INT_XDP_P1_CH0);
859		break;
860
861	default:
862		return -EINVAL;
863	}
864
865	if ((sig & mask) != val) {
866		if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
867			np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
868			return 0;
869		}
870		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
871			   np->port, (int)(sig & mask), (int)val);
872		return -ENODEV;
873	}
874	if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
875		np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
876	return 0;
877}
878
879static int serdes_init_1g(struct niu *np)
880{
881	u64 val;
882
883	val = nr64(ENET_SERDES_1_PLL_CFG);
884	val &= ~ENET_SERDES_PLL_FBDIV2;
885	switch (np->port) {
886	case 0:
887		val |= ENET_SERDES_PLL_HRATE0;
888		break;
889	case 1:
890		val |= ENET_SERDES_PLL_HRATE1;
891		break;
892	case 2:
893		val |= ENET_SERDES_PLL_HRATE2;
894		break;
895	case 3:
896		val |= ENET_SERDES_PLL_HRATE3;
897		break;
898	default:
899		return -EINVAL;
900	}
901	nw64(ENET_SERDES_1_PLL_CFG, val);
902
903	return 0;
904}
905
906static int serdes_init_1g_serdes(struct niu *np)
907{
908	struct niu_link_config *lp = &np->link_config;
909	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
910	u64 ctrl_val, test_cfg_val, sig, mask, val;
911	int err;
912	u64 reset_val, val_rd;
913
914	val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
915		ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
916		ENET_SERDES_PLL_FBDIV0;
917	switch (np->port) {
918	case 0:
919		reset_val =  ENET_SERDES_RESET_0;
920		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
921		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
922		pll_cfg = ENET_SERDES_0_PLL_CFG;
923		break;
924	case 1:
925		reset_val =  ENET_SERDES_RESET_1;
926		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
927		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
928		pll_cfg = ENET_SERDES_1_PLL_CFG;
929		break;
930
931	default:
932		return -EINVAL;
933	}
934	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
935		    ENET_SERDES_CTRL_SDET_1 |
936		    ENET_SERDES_CTRL_SDET_2 |
937		    ENET_SERDES_CTRL_SDET_3 |
938		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
939		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
940		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
941		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
942		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
943		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
944		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
945		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
946	test_cfg_val = 0;
947
948	if (lp->loopback_mode == LOOPBACK_PHY) {
949		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
950				  ENET_SERDES_TEST_MD_0_SHIFT) |
951				 (ENET_TEST_MD_PAD_LOOPBACK <<
952				  ENET_SERDES_TEST_MD_1_SHIFT) |
953				 (ENET_TEST_MD_PAD_LOOPBACK <<
954				  ENET_SERDES_TEST_MD_2_SHIFT) |
955				 (ENET_TEST_MD_PAD_LOOPBACK <<
956				  ENET_SERDES_TEST_MD_3_SHIFT));
957	}
958
959	nw64(ENET_SERDES_RESET, reset_val);
960	mdelay(20);
961	val_rd = nr64(ENET_SERDES_RESET);
962	val_rd &= ~reset_val;
963	nw64(pll_cfg, val);
964	nw64(ctrl_reg, ctrl_val);
965	nw64(test_cfg_reg, test_cfg_val);
966	nw64(ENET_SERDES_RESET, val_rd);
967	mdelay(2000);
968
969	/* Initialize all 4 lanes of the SERDES.  */
970	for (i = 0; i < 4; i++) {
971		u32 rxtx_ctrl, glue0;
972
973		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
974		if (err)
975			return err;
976		err = esr_read_glue0(np, i, &glue0);
977		if (err)
978			return err;
979
980		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
981		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
982			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
983
984		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
985			   ESR_GLUE_CTRL0_THCNT |
986			   ESR_GLUE_CTRL0_BLTIME);
987		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
988			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
989			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
990			  (BLTIME_300_CYCLES <<
991			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
992
993		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
994		if (err)
995			return err;
996		err = esr_write_glue0(np, i, glue0);
997		if (err)
998			return err;
999	}
1000
1001
1002	sig = nr64(ESR_INT_SIGNALS);
1003	switch (np->port) {
1004	case 0:
1005		val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
1006		mask = val;
1007		break;
1008
1009	case 1:
1010		val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
1011		mask = val;
1012		break;
1013
1014	default:
1015		return -EINVAL;
1016	}
1017
1018	if ((sig & mask) != val) {
1019		netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
1020			   np->port, (int)(sig & mask), (int)val);
1021		return -ENODEV;
1022	}
1023
1024	return 0;
1025}
1026
1027static int link_status_1g_serdes(struct niu *np, int *link_up_p)
1028{
1029	struct niu_link_config *lp = &np->link_config;
1030	int link_up;
1031	u64 val;
1032	u16 current_speed;
1033	unsigned long flags;
1034	u8 current_duplex;
1035
1036	link_up = 0;
1037	current_speed = SPEED_INVALID;
1038	current_duplex = DUPLEX_INVALID;
1039
1040	spin_lock_irqsave(&np->lock, flags);
1041
1042	val = nr64_pcs(PCS_MII_STAT);
1043
1044	if (val & PCS_MII_STAT_LINK_STATUS) {
1045		link_up = 1;
1046		current_speed = SPEED_1000;
1047		current_duplex = DUPLEX_FULL;
1048	}
1049
1050	lp->active_speed = current_speed;
1051	lp->active_duplex = current_duplex;
1052	spin_unlock_irqrestore(&np->lock, flags);
1053
1054	*link_up_p = link_up;
1055	return 0;
1056}
1057
1058static int link_status_10g_serdes(struct niu *np, int *link_up_p)
1059{
1060	unsigned long flags;
1061	struct niu_link_config *lp = &np->link_config;
1062	int link_up = 0;
1063	int link_ok = 1;
1064	u64 val, val2;
1065	u16 current_speed;
1066	u8 current_duplex;
1067
1068	if (!(np->flags & NIU_FLAGS_10G))
1069		return link_status_1g_serdes(np, link_up_p);
1070
1071	current_speed = SPEED_INVALID;
1072	current_duplex = DUPLEX_INVALID;
1073	spin_lock_irqsave(&np->lock, flags);
1074
1075	val = nr64_xpcs(XPCS_STATUS(0));
1076	val2 = nr64_mac(XMAC_INTER2);
1077	if (val2 & 0x01000000)
1078		link_ok = 0;
1079
1080	if ((val & 0x1000ULL) && link_ok) {
1081		link_up = 1;
1082		current_speed = SPEED_10000;
1083		current_duplex = DUPLEX_FULL;
1084	}
1085	lp->active_speed = current_speed;
1086	lp->active_duplex = current_duplex;
1087	spin_unlock_irqrestore(&np->lock, flags);
1088	*link_up_p = link_up;
1089	return 0;
1090}
1091
1092static int link_status_mii(struct niu *np, int *link_up_p)
1093{
1094	struct niu_link_config *lp = &np->link_config;
1095	int err;
1096	int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
1097	int supported, advertising, active_speed, active_duplex;
1098
1099	err = mii_read(np, np->phy_addr, MII_BMCR);
1100	if (unlikely(err < 0))
1101		return err;
1102	bmcr = err;
1103
1104	err = mii_read(np, np->phy_addr, MII_BMSR);
1105	if (unlikely(err < 0))
1106		return err;
1107	bmsr = err;
1108
1109	err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1110	if (unlikely(err < 0))
1111		return err;
1112	advert = err;
1113
1114	err = mii_read(np, np->phy_addr, MII_LPA);
1115	if (unlikely(err < 0))
1116		return err;
1117	lpa = err;
1118
1119	if (likely(bmsr & BMSR_ESTATEN)) {
1120		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1121		if (unlikely(err < 0))
1122			return err;
1123		estatus = err;
1124
1125		err = mii_read(np, np->phy_addr, MII_CTRL1000);
1126		if (unlikely(err < 0))
1127			return err;
1128		ctrl1000 = err;
1129
1130		err = mii_read(np, np->phy_addr, MII_STAT1000);
1131		if (unlikely(err < 0))
1132			return err;
1133		stat1000 = err;
1134	} else
1135		estatus = ctrl1000 = stat1000 = 0;
1136
1137	supported = 0;
1138	if (bmsr & BMSR_ANEGCAPABLE)
1139		supported |= SUPPORTED_Autoneg;
1140	if (bmsr & BMSR_10HALF)
1141		supported |= SUPPORTED_10baseT_Half;
1142	if (bmsr & BMSR_10FULL)
1143		supported |= SUPPORTED_10baseT_Full;
1144	if (bmsr & BMSR_100HALF)
1145		supported |= SUPPORTED_100baseT_Half;
1146	if (bmsr & BMSR_100FULL)
1147		supported |= SUPPORTED_100baseT_Full;
1148	if (estatus & ESTATUS_1000_THALF)
1149		supported |= SUPPORTED_1000baseT_Half;
1150	if (estatus & ESTATUS_1000_TFULL)
1151		supported |= SUPPORTED_1000baseT_Full;
1152	lp->supported = supported;
1153
1154	advertising = mii_adv_to_ethtool_adv_t(advert);
1155	advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
1156
1157	if (bmcr & BMCR_ANENABLE) {
1158		int neg, neg1000;
1159
1160		lp->active_autoneg = 1;
1161		advertising |= ADVERTISED_Autoneg;
1162
1163		neg = advert & lpa;
1164		neg1000 = (ctrl1000 << 2) & stat1000;
1165
1166		if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
1167			active_speed = SPEED_1000;
1168		else if (neg & LPA_100)
1169			active_speed = SPEED_100;
1170		else if (neg & (LPA_10HALF | LPA_10FULL))
1171			active_speed = SPEED_10;
1172		else
1173			active_speed = SPEED_INVALID;
1174
1175		if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
1176			active_duplex = DUPLEX_FULL;
1177		else if (active_speed != SPEED_INVALID)
1178			active_duplex = DUPLEX_HALF;
1179		else
1180			active_duplex = DUPLEX_INVALID;
1181	} else {
1182		lp->active_autoneg = 0;
1183
1184		if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
1185			active_speed = SPEED_1000;
1186		else if (bmcr & BMCR_SPEED100)
1187			active_speed = SPEED_100;
1188		else
1189			active_speed = SPEED_10;
1190
1191		if (bmcr & BMCR_FULLDPLX)
1192			active_duplex = DUPLEX_FULL;
1193		else
1194			active_duplex = DUPLEX_HALF;
1195	}
1196
1197	lp->active_advertising = advertising;
1198	lp->active_speed = active_speed;
1199	lp->active_duplex = active_duplex;
1200	*link_up_p = !!(bmsr & BMSR_LSTATUS);
1201
1202	return 0;
1203}
1204
1205static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
1206{
1207	struct niu_link_config *lp = &np->link_config;
1208	u16 current_speed, bmsr;
1209	unsigned long flags;
1210	u8 current_duplex;
1211	int err, link_up;
1212
1213	link_up = 0;
1214	current_speed = SPEED_INVALID;
1215	current_duplex = DUPLEX_INVALID;
1216
1217	spin_lock_irqsave(&np->lock, flags);
1218
1219	err = -EINVAL;
1220
1221	err = mii_read(np, np->phy_addr, MII_BMSR);
1222	if (err < 0)
1223		goto out;
1224
1225	bmsr = err;
1226	if (bmsr & BMSR_LSTATUS) {
1227		u16 adv, lpa;
1228
1229		err = mii_read(np, np->phy_addr, MII_ADVERTISE);
1230		if (err < 0)
1231			goto out;
1232		adv = err;
1233
1234		err = mii_read(np, np->phy_addr, MII_LPA);
1235		if (err < 0)
1236			goto out;
1237		lpa = err;
1238
1239		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1240		if (err < 0)
1241			goto out;
1242		link_up = 1;
1243		current_speed = SPEED_1000;
1244		current_duplex = DUPLEX_FULL;
1245
1246	}
1247	lp->active_speed = current_speed;
1248	lp->active_duplex = current_duplex;
1249	err = 0;
1250
1251out:
1252	spin_unlock_irqrestore(&np->lock, flags);
1253
1254	*link_up_p = link_up;
1255	return err;
1256}
1257
1258static int link_status_1g(struct niu *np, int *link_up_p)
1259{
1260	struct niu_link_config *lp = &np->link_config;
1261	unsigned long flags;
1262	int err;
1263
1264	spin_lock_irqsave(&np->lock, flags);
1265
1266	err = link_status_mii(np, link_up_p);
1267	lp->supported |= SUPPORTED_TP;
1268	lp->active_advertising |= ADVERTISED_TP;
1269
1270	spin_unlock_irqrestore(&np->lock, flags);
1271	return err;
1272}
1273
1274static int bcm8704_reset(struct niu *np)
1275{
1276	int err, limit;
1277
1278	err = mdio_read(np, np->phy_addr,
1279			BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1280	if (err < 0 || err == 0xffff)
1281		return err;
1282	err |= BMCR_RESET;
1283	err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1284			 MII_BMCR, err);
1285	if (err)
1286		return err;
1287
1288	limit = 1000;
1289	while (--limit >= 0) {
1290		err = mdio_read(np, np->phy_addr,
1291				BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
1292		if (err < 0)
1293			return err;
1294		if (!(err & BMCR_RESET))
1295			break;
1296	}
1297	if (limit < 0) {
1298		netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
1299			   np->port, (err & 0xffff));
1300		return -ENODEV;
1301	}
1302	return 0;
1303}
1304
1305/* When written, certain PHY registers need to be read back twice
1306 * in order for the bits to settle properly.
1307 */
1308static int bcm8704_user_dev3_readback(struct niu *np, int reg)
1309{
1310	int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1311	if (err < 0)
1312		return err;
1313	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
1314	if (err < 0)
1315		return err;
1316	return 0;
1317}
1318
1319static int bcm8706_init_user_dev3(struct niu *np)
1320{
1321	int err;
1322
1323
1324	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1325			BCM8704_USER_OPT_DIGITAL_CTRL);
1326	if (err < 0)
1327		return err;
1328	err &= ~USER_ODIG_CTRL_GPIOS;
1329	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1330	err |=  USER_ODIG_CTRL_RESV2;
1331	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1332			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1333	if (err)
1334		return err;
1335
1336	mdelay(1000);
1337
1338	return 0;
1339}
1340
1341static int bcm8704_init_user_dev3(struct niu *np)
1342{
1343	int err;
1344
1345	err = mdio_write(np, np->phy_addr,
1346			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
1347			 (USER_CONTROL_OPTXRST_LVL |
1348			  USER_CONTROL_OPBIASFLT_LVL |
1349			  USER_CONTROL_OBTMPFLT_LVL |
1350			  USER_CONTROL_OPPRFLT_LVL |
1351			  USER_CONTROL_OPTXFLT_LVL |
1352			  USER_CONTROL_OPRXLOS_LVL |
1353			  USER_CONTROL_OPRXFLT_LVL |
1354			  USER_CONTROL_OPTXON_LVL |
1355			  (0x3f << USER_CONTROL_RES1_SHIFT)));
1356	if (err)
1357		return err;
1358
1359	err = mdio_write(np, np->phy_addr,
1360			 BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
1361			 (USER_PMD_TX_CTL_XFP_CLKEN |
1362			  (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
1363			  (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
1364			  USER_PMD_TX_CTL_TSCK_LPWREN));
1365	if (err)
1366		return err;
1367
1368	err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
1369	if (err)
1370		return err;
1371	err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
1372	if (err)
1373		return err;
1374
1375	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1376			BCM8704_USER_OPT_DIGITAL_CTRL);
1377	if (err < 0)
1378		return err;
1379	err &= ~USER_ODIG_CTRL_GPIOS;
1380	err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
1381	err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1382			 BCM8704_USER_OPT_DIGITAL_CTRL, err);
1383	if (err)
1384		return err;
1385
1386	mdelay(1000);
1387
1388	return 0;
1389}
1390
1391static int mrvl88x2011_act_led(struct niu *np, int val)
1392{
1393	int	err;
1394
1395	err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1396		MRVL88X2011_LED_8_TO_11_CTL);
1397	if (err < 0)
1398		return err;
1399
1400	err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
1401	err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
1402
1403	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1404			  MRVL88X2011_LED_8_TO_11_CTL, err);
1405}
1406
1407static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
1408{
1409	int	err;
1410
1411	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1412			MRVL88X2011_LED_BLINK_CTL);
1413	if (err >= 0) {
1414		err &= ~MRVL88X2011_LED_BLKRATE_MASK;
1415		err |= (rate << 4);
1416
1417		err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
1418				 MRVL88X2011_LED_BLINK_CTL, err);
1419	}
1420
1421	return err;
1422}
1423
1424static int xcvr_init_10g_mrvl88x2011(struct niu *np)
1425{
1426	int	err;
1427
1428	/* Set LED functions */
1429	err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
1430	if (err)
1431		return err;
1432
1433	/* led activity */
1434	err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
1435	if (err)
1436		return err;
1437
1438	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1439			MRVL88X2011_GENERAL_CTL);
1440	if (err < 0)
1441		return err;
1442
1443	err |= MRVL88X2011_ENA_XFPREFCLK;
1444
1445	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1446			 MRVL88X2011_GENERAL_CTL, err);
1447	if (err < 0)
1448		return err;
1449
1450	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1451			MRVL88X2011_PMA_PMD_CTL_1);
1452	if (err < 0)
1453		return err;
1454
1455	if (np->link_config.loopback_mode == LOOPBACK_MAC)
1456		err |= MRVL88X2011_LOOPBACK;
1457	else
1458		err &= ~MRVL88X2011_LOOPBACK;
1459
1460	err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1461			 MRVL88X2011_PMA_PMD_CTL_1, err);
1462	if (err < 0)
1463		return err;
1464
1465	/* Enable PMD  */
1466	return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1467			  MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
1468}
1469
1470
1471static int xcvr_diag_bcm870x(struct niu *np)
1472{
1473	u16 analog_stat0, tx_alarm_status;
1474	int err = 0;
1475
1476#if 1
1477	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1478			MII_STAT1000);
1479	if (err < 0)
1480		return err;
1481	pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
1482
1483	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
1484	if (err < 0)
1485		return err;
1486	pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
1487
1488	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
1489			MII_NWAYTEST);
1490	if (err < 0)
1491		return err;
1492	pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
1493#endif
1494
1495	/* XXX dig this out it might not be so useful XXX */
1496	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1497			BCM8704_USER_ANALOG_STATUS0);
1498	if (err < 0)
1499		return err;
1500	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1501			BCM8704_USER_ANALOG_STATUS0);
1502	if (err < 0)
1503		return err;
1504	analog_stat0 = err;
1505
1506	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1507			BCM8704_USER_TX_ALARM_STATUS);
1508	if (err < 0)
1509		return err;
1510	err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
1511			BCM8704_USER_TX_ALARM_STATUS);
1512	if (err < 0)
1513		return err;
1514	tx_alarm_status = err;
1515
1516	if (analog_stat0 != 0x03fc) {
1517		if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
1518			pr_info("Port %u cable not connected or bad cable\n",
1519				np->port);
1520		} else if (analog_stat0 == 0x639c) {
1521			pr_info("Port %u optical module is bad or missing\n",
1522				np->port);
1523		}
1524	}
1525
1526	return 0;
1527}
1528
1529static int xcvr_10g_set_lb_bcm870x(struct niu *np)
1530{
1531	struct niu_link_config *lp = &np->link_config;
1532	int err;
1533
1534	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1535			MII_BMCR);
1536	if (err < 0)
1537		return err;
1538
1539	err &= ~BMCR_LOOPBACK;
1540
1541	if (lp->loopback_mode == LOOPBACK_MAC)
1542		err |= BMCR_LOOPBACK;
1543
1544	err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
1545			 MII_BMCR, err);
1546	if (err)
1547		return err;
1548
1549	return 0;
1550}
1551
1552static int xcvr_init_10g_bcm8706(struct niu *np)
1553{
1554	int err = 0;
1555	u64 val;
1556
1557	if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
1558	    (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
1559			return err;
1560
1561	val = nr64_mac(XMAC_CONFIG);
1562	val &= ~XMAC_CONFIG_LED_POLARITY;
1563	val |= XMAC_CONFIG_FORCE_LED_ON;
1564	nw64_mac(XMAC_CONFIG, val);
1565
1566	val = nr64(MIF_CONFIG);
1567	val |= MIF_CONFIG_INDIRECT_MODE;
1568	nw64(MIF_CONFIG, val);
1569
1570	err = bcm8704_reset(np);
1571	if (err)
1572		return err;
1573
1574	err = xcvr_10g_set_lb_bcm870x(np);
1575	if (err)
1576		return err;
1577
1578	err = bcm8706_init_user_dev3(np);
1579	if (err)
1580		return err;
1581
1582	err = xcvr_diag_bcm870x(np);
1583	if (err)
1584		return err;
1585
1586	return 0;
1587}
1588
1589static int xcvr_init_10g_bcm8704(struct niu *np)
1590{
1591	int err;
1592
1593	err = bcm8704_reset(np);
1594	if (err)
1595		return err;
1596
1597	err = bcm8704_init_user_dev3(np);
1598	if (err)
1599		return err;
1600
1601	err = xcvr_10g_set_lb_bcm870x(np);
1602	if (err)
1603		return err;
1604
1605	err =  xcvr_diag_bcm870x(np);
1606	if (err)
1607		return err;
1608
1609	return 0;
1610}
1611
1612static int xcvr_init_10g(struct niu *np)
1613{
1614	int phy_id, err;
1615	u64 val;
1616
1617	val = nr64_mac(XMAC_CONFIG);
1618	val &= ~XMAC_CONFIG_LED_POLARITY;
1619	val |= XMAC_CONFIG_FORCE_LED_ON;
1620	nw64_mac(XMAC_CONFIG, val);
1621
1622	/* XXX shared resource, lock parent XXX */
1623	val = nr64(MIF_CONFIG);
1624	val |= MIF_CONFIG_INDIRECT_MODE;
1625	nw64(MIF_CONFIG, val);
1626
1627	phy_id = phy_decode(np->parent->port_phy, np->port);
1628	phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
1629
1630	/* handle different phy types */
1631	switch (phy_id & NIU_PHY_ID_MASK) {
1632	case NIU_PHY_ID_MRVL88X2011:
1633		err = xcvr_init_10g_mrvl88x2011(np);
1634		break;
1635
1636	default: /* bcom 8704 */
1637		err = xcvr_init_10g_bcm8704(np);
1638		break;
1639	}
1640
1641	return err;
1642}
1643
1644static int mii_reset(struct niu *np)
1645{
1646	int limit, err;
1647
1648	err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
1649	if (err)
1650		return err;
1651
1652	limit = 1000;
1653	while (--limit >= 0) {
1654		udelay(500);
1655		err = mii_read(np, np->phy_addr, MII_BMCR);
1656		if (err < 0)
1657			return err;
1658		if (!(err & BMCR_RESET))
1659			break;
1660	}
1661	if (limit < 0) {
1662		netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
1663			   np->port, err);
1664		return -ENODEV;
1665	}
1666
1667	return 0;
1668}
1669
1670static int xcvr_init_1g_rgmii(struct niu *np)
1671{
1672	int err;
1673	u64 val;
1674	u16 bmcr, bmsr, estat;
1675
1676	val = nr64(MIF_CONFIG);
1677	val &= ~MIF_CONFIG_INDIRECT_MODE;
1678	nw64(MIF_CONFIG, val);
1679
1680	err = mii_reset(np);
1681	if (err)
1682		return err;
1683
1684	err = mii_read(np, np->phy_addr, MII_BMSR);
1685	if (err < 0)
1686		return err;
1687	bmsr = err;
1688
1689	estat = 0;
1690	if (bmsr & BMSR_ESTATEN) {
1691		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1692		if (err < 0)
1693			return err;
1694		estat = err;
1695	}
1696
1697	bmcr = 0;
1698	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1699	if (err)
1700		return err;
1701
1702	if (bmsr & BMSR_ESTATEN) {
1703		u16 ctrl1000 = 0;
1704
1705		if (estat & ESTATUS_1000_TFULL)
1706			ctrl1000 |= ADVERTISE_1000FULL;
1707		err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1708		if (err)
1709			return err;
1710	}
1711
1712	bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
1713
1714	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1715	if (err)
1716		return err;
1717
1718	err = mii_read(np, np->phy_addr, MII_BMCR);
1719	if (err < 0)
1720		return err;
1721	bmcr = mii_read(np, np->phy_addr, MII_BMCR);
1722
1723	err = mii_read(np, np->phy_addr, MII_BMSR);
1724	if (err < 0)
1725		return err;
1726
1727	return 0;
1728}
1729
1730static int mii_init_common(struct niu *np)
1731{
1732	struct niu_link_config *lp = &np->link_config;
1733	u16 bmcr, bmsr, adv, estat;
1734	int err;
1735
1736	err = mii_reset(np);
1737	if (err)
1738		return err;
1739
1740	err = mii_read(np, np->phy_addr, MII_BMSR);
1741	if (err < 0)
1742		return err;
1743	bmsr = err;
1744
1745	estat = 0;
1746	if (bmsr & BMSR_ESTATEN) {
1747		err = mii_read(np, np->phy_addr, MII_ESTATUS);
1748		if (err < 0)
1749			return err;
1750		estat = err;
1751	}
1752
1753	bmcr = 0;
1754	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1755	if (err)
1756		return err;
1757
1758	if (lp->loopback_mode == LOOPBACK_MAC) {
1759		bmcr |= BMCR_LOOPBACK;
1760		if (lp->active_speed == SPEED_1000)
1761			bmcr |= BMCR_SPEED1000;
1762		if (lp->active_duplex == DUPLEX_FULL)
1763			bmcr |= BMCR_FULLDPLX;
1764	}
1765
1766	if (lp->loopback_mode == LOOPBACK_PHY) {
1767		u16 aux;
1768
1769		aux = (BCM5464R_AUX_CTL_EXT_LB |
1770		       BCM5464R_AUX_CTL_WRITE_1);
1771		err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
1772		if (err)
1773			return err;
1774	}
1775
1776	if (lp->autoneg) {
1777		u16 ctrl1000;
1778
1779		adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1780		if ((bmsr & BMSR_10HALF) &&
1781			(lp->advertising & ADVERTISED_10baseT_Half))
1782			adv |= ADVERTISE_10HALF;
1783		if ((bmsr & BMSR_10FULL) &&
1784			(lp->advertising & ADVERTISED_10baseT_Full))
1785			adv |= ADVERTISE_10FULL;
1786		if ((bmsr & BMSR_100HALF) &&
1787			(lp->advertising & ADVERTISED_100baseT_Half))
1788			adv |= ADVERTISE_100HALF;
1789		if ((bmsr & BMSR_100FULL) &&
1790			(lp->advertising & ADVERTISED_100baseT_Full))
1791			adv |= ADVERTISE_100FULL;
1792		err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
1793		if (err)
1794			return err;
1795
1796		if (likely(bmsr & BMSR_ESTATEN)) {
1797			ctrl1000 = 0;
1798			if ((estat & ESTATUS_1000_THALF) &&
1799				(lp->advertising & ADVERTISED_1000baseT_Half))
1800				ctrl1000 |= ADVERTISE_1000HALF;
1801			if ((estat & ESTATUS_1000_TFULL) &&
1802				(lp->advertising & ADVERTISED_1000baseT_Full))
1803				ctrl1000 |= ADVERTISE_1000FULL;
1804			err = mii_write(np, np->phy_addr,
1805					MII_CTRL1000, ctrl1000);
1806			if (err)
1807				return err;
1808		}
1809
1810		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1811	} else {
1812		/* !lp->autoneg */
1813		int fulldpx;
1814
1815		if (lp->duplex == DUPLEX_FULL) {
1816			bmcr |= BMCR_FULLDPLX;
1817			fulldpx = 1;
1818		} else if (lp->duplex == DUPLEX_HALF)
1819			fulldpx = 0;
1820		else
1821			return -EINVAL;
1822
1823		if (lp->speed == SPEED_1000) {
1824			/* if X-full requested while not supported, or
1825			   X-half requested while not supported... */
1826			if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
1827				(!fulldpx && !(estat & ESTATUS_1000_THALF)))
1828				return -EINVAL;
1829			bmcr |= BMCR_SPEED1000;
1830		} else if (lp->speed == SPEED_100) {
1831			if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
1832				(!fulldpx && !(bmsr & BMSR_100HALF)))
1833				return -EINVAL;
1834			bmcr |= BMCR_SPEED100;
1835		} else if (lp->speed == SPEED_10) {
1836			if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
1837				(!fulldpx && !(bmsr & BMSR_10HALF)))
1838				return -EINVAL;
1839		} else
1840			return -EINVAL;
1841	}
1842
1843	err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1844	if (err)
1845		return err;
1846
1847#if 0
1848	err = mii_read(np, np->phy_addr, MII_BMCR);
1849	if (err < 0)
1850		return err;
1851	bmcr = err;
1852
1853	err = mii_read(np, np->phy_addr, MII_BMSR);
1854	if (err < 0)
1855		return err;
1856	bmsr = err;
1857
1858	pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1859		np->port, bmcr, bmsr);
1860#endif
1861
1862	return 0;
1863}
1864
1865static int xcvr_init_1g(struct niu *np)
1866{
1867	u64 val;
1868
1869	/* XXX shared resource, lock parent XXX */
1870	val = nr64(MIF_CONFIG);
1871	val &= ~MIF_CONFIG_INDIRECT_MODE;
1872	nw64(MIF_CONFIG, val);
1873
1874	return mii_init_common(np);
1875}
1876
1877static int niu_xcvr_init(struct niu *np)
1878{
1879	const struct niu_phy_ops *ops = np->phy_ops;
1880	int err;
1881
1882	err = 0;
1883	if (ops->xcvr_init)
1884		err = ops->xcvr_init(np);
1885
1886	return err;
1887}
1888
1889static int niu_serdes_init(struct niu *np)
1890{
1891	const struct niu_phy_ops *ops = np->phy_ops;
1892	int err;
1893
1894	err = 0;
1895	if (ops->serdes_init)
1896		err = ops->serdes_init(np);
1897
1898	return err;
1899}
1900
1901static void niu_init_xif(struct niu *);
1902static void niu_handle_led(struct niu *, int status);
1903
1904static int niu_link_status_common(struct niu *np, int link_up)
1905{
1906	struct niu_link_config *lp = &np->link_config;
1907	struct net_device *dev = np->dev;
1908	unsigned long flags;
1909
1910	if (!netif_carrier_ok(dev) && link_up) {
1911		netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
1912			   lp->active_speed == SPEED_10000 ? "10Gb/sec" :
1913			   lp->active_speed == SPEED_1000 ? "1Gb/sec" :
1914			   lp->active_speed == SPEED_100 ? "100Mbit/sec" :
1915			   "10Mbit/sec",
1916			   lp->active_duplex == DUPLEX_FULL ? "full" : "half");
1917
1918		spin_lock_irqsave(&np->lock, flags);
1919		niu_init_xif(np);
1920		niu_handle_led(np, 1);
1921		spin_unlock_irqrestore(&np->lock, flags);
1922
1923		netif_carrier_on(dev);
1924	} else if (netif_carrier_ok(dev) && !link_up) {
1925		netif_warn(np, link, dev, "Link is down\n");
1926		spin_lock_irqsave(&np->lock, flags);
1927		niu_handle_led(np, 0);
1928		spin_unlock_irqrestore(&np->lock, flags);
1929		netif_carrier_off(dev);
1930	}
1931
1932	return 0;
1933}
1934
1935static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
1936{
1937	int err, link_up, pma_status, pcs_status;
1938
1939	link_up = 0;
1940
1941	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1942			MRVL88X2011_10G_PMD_STATUS_2);
1943	if (err < 0)
1944		goto out;
1945
1946	/* Check PMA/PMD Register: 1.0001.2 == 1 */
1947	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
1948			MRVL88X2011_PMA_PMD_STATUS_1);
1949	if (err < 0)
1950		goto out;
1951
1952	pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1953
1954        /* Check PMC Register : 3.0001.2 == 1: read twice */
1955	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1956			MRVL88X2011_PMA_PMD_STATUS_1);
1957	if (err < 0)
1958		goto out;
1959
1960	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
1961			MRVL88X2011_PMA_PMD_STATUS_1);
1962	if (err < 0)
1963		goto out;
1964
1965	pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
1966
1967        /* Check XGXS Register : 4.0018.[0-3,12] */
1968	err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
1969			MRVL88X2011_10G_XGXS_LANE_STAT);
1970	if (err < 0)
1971		goto out;
1972
1973	if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
1974		    PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
1975		    PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
1976		    0x800))
1977		link_up = (pma_status && pcs_status) ? 1 : 0;
1978
1979	np->link_config.active_speed = SPEED_10000;
1980	np->link_config.active_duplex = DUPLEX_FULL;
1981	err = 0;
1982out:
1983	mrvl88x2011_act_led(np, (link_up ?
1984				 MRVL88X2011_LED_CTL_PCS_ACT :
1985				 MRVL88X2011_LED_CTL_OFF));
1986
1987	*link_up_p = link_up;
1988	return err;
1989}
1990
1991static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
1992{
1993	int err, link_up;
1994	link_up = 0;
1995
1996	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
1997			BCM8704_PMD_RCV_SIGDET);
1998	if (err < 0 || err == 0xffff)
1999		goto out;
2000	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
2001		err = 0;
2002		goto out;
2003	}
2004
2005	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
2006			BCM8704_PCS_10G_R_STATUS);
2007	if (err < 0)
2008		goto out;
2009
2010	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
2011		err = 0;
2012		goto out;
2013	}
2014
2015	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
2016			BCM8704_PHYXS_XGXS_LANE_STAT);
2017	if (err < 0)
2018		goto out;
2019	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
2020		    PHYXS_XGXS_LANE_STAT_MAGIC |
2021		    PHYXS_XGXS_LANE_STAT_PATTEST |
2022		    PHYXS_XGXS_LANE_STAT_LANE3 |
2023		    PHYXS_XGXS_LANE_STAT_LANE2 |
2024		    PHYXS_XGXS_LANE_STAT_LANE1 |
2025		    PHYXS_XGXS_LANE_STAT_LANE0)) {
2026		err = 0;
2027		np->link_config.active_speed = SPEED_INVALID;
2028		np->link_config.active_duplex = DUPLEX_INVALID;
2029		goto out;
2030	}
2031
2032	link_up = 1;
2033	np->link_config.active_speed = SPEED_10000;
2034	np->link_config.active_duplex = DUPLEX_FULL;
2035	err = 0;
2036
2037out:
2038	*link_up_p = link_up;
2039	return err;
2040}
2041
2042static int link_status_10g_bcom(struct niu *np, int *link_up_p)
2043{
2044	int err, link_up;
2045
2046	link_up = 0;
2047
2048	err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
2049			BCM8704_PMD_RCV_SIGDET);
2050	if (err < 0)
2051		goto out;
2052	if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
2053		err = 0;
2054		goto out;
2055	}
2056
2057	err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
2058			BCM8704_PCS_10G_R_STATUS);
2059	if (err < 0)
2060		goto out;
2061	if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
2062		err = 0;
2063		goto out;
2064	}
2065
2066	err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
2067			BCM8704_PHYXS_XGXS_LANE_STAT);
2068	if (err < 0)
2069		goto out;
2070
2071	if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
2072		    PHYXS_XGXS_LANE_STAT_MAGIC |
2073		    PHYXS_XGXS_LANE_STAT_LANE3 |
2074		    PHYXS_XGXS_LANE_STAT_LANE2 |
2075		    PHYXS_XGXS_LANE_STAT_LANE1 |
2076		    PHYXS_XGXS_LANE_STAT_LANE0)) {
2077		err = 0;
2078		goto out;
2079	}
2080
2081	link_up = 1;
2082	np->link_config.active_speed = SPEED_10000;
2083	np->link_config.active_duplex = DUPLEX_FULL;
2084	err = 0;
2085
2086out:
2087	*link_up_p = link_up;
2088	return err;
2089}
2090
2091static int link_status_10g(struct niu *np, int *link_up_p)
2092{
2093	unsigned long flags;
2094	int err = -EINVAL;
2095
2096	spin_lock_irqsave(&np->lock, flags);
2097
2098	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2099		int phy_id;
2100
2101		phy_id = phy_decode(np->parent->port_phy, np->port);
2102		phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
2103
2104		/* handle different phy types */
2105		switch (phy_id & NIU_PHY_ID_MASK) {
2106		case NIU_PHY_ID_MRVL88X2011:
2107			err = link_status_10g_mrvl(np, link_up_p);
2108			break;
2109
2110		default: /* bcom 8704 */
2111			err = link_status_10g_bcom(np, link_up_p);
2112			break;
2113		}
2114	}
2115
2116	spin_unlock_irqrestore(&np->lock, flags);
2117
2118	return err;
2119}
2120
2121static int niu_10g_phy_present(struct niu *np)
2122{
2123	u64 sig, mask, val;
2124
2125	sig = nr64(ESR_INT_SIGNALS);
2126	switch (np->port) {
2127	case 0:
2128		mask = ESR_INT_SIGNALS_P0_BITS;
2129		val = (ESR_INT_SRDY0_P0 |
2130		       ESR_INT_DET0_P0 |
2131		       ESR_INT_XSRDY_P0 |
2132		       ESR_INT_XDP_P0_CH3 |
2133		       ESR_INT_XDP_P0_CH2 |
2134		       ESR_INT_XDP_P0_CH1 |
2135		       ESR_INT_XDP_P0_CH0);
2136		break;
2137
2138	case 1:
2139		mask = ESR_INT_SIGNALS_P1_BITS;
2140		val = (ESR_INT_SRDY0_P1 |
2141		       ESR_INT_DET0_P1 |
2142		       ESR_INT_XSRDY_P1 |
2143		       ESR_INT_XDP_P1_CH3 |
2144		       ESR_INT_XDP_P1_CH2 |
2145		       ESR_INT_XDP_P1_CH1 |
2146		       ESR_INT_XDP_P1_CH0);
2147		break;
2148
2149	default:
2150		return 0;
2151	}
2152
2153	if ((sig & mask) != val)
2154		return 0;
2155	return 1;
2156}
2157
2158static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
2159{
2160	unsigned long flags;
2161	int err = 0;
2162	int phy_present;
2163	int phy_present_prev;
2164
2165	spin_lock_irqsave(&np->lock, flags);
2166
2167	if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
2168		phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
2169			1 : 0;
2170		phy_present = niu_10g_phy_present(np);
2171		if (phy_present != phy_present_prev) {
2172			/* state change */
2173			if (phy_present) {
2174				/* A NEM was just plugged in */
2175				np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2176				if (np->phy_ops->xcvr_init)
2177					err = np->phy_ops->xcvr_init(np);
2178				if (err) {
2179					err = mdio_read(np, np->phy_addr,
2180						BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
2181					if (err == 0xffff) {
2182						/* No mdio, back-to-back XAUI */
2183						goto out;
2184					}
2185					/* debounce */
2186					np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2187				}
2188			} else {
2189				np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
2190				*link_up_p = 0;
2191				netif_warn(np, link, np->dev,
2192					   "Hotplug PHY Removed\n");
2193			}
2194		}
2195out:
2196		if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
2197			err = link_status_10g_bcm8706(np, link_up_p);
2198			if (err == 0xffff) {
2199				/* No mdio, back-to-back XAUI: it is C10NEM */
2200				*link_up_p = 1;
2201				np->link_config.active_speed = SPEED_10000;
2202				np->link_config.active_duplex = DUPLEX_FULL;
2203			}
2204		}
2205	}
2206
2207	spin_unlock_irqrestore(&np->lock, flags);
2208
2209	return 0;
2210}
2211
2212static int niu_link_status(struct niu *np, int *link_up_p)
2213{
2214	const struct niu_phy_ops *ops = np->phy_ops;
2215	int err;
2216
2217	err = 0;
2218	if (ops->link_status)
2219		err = ops->link_status(np, link_up_p);
2220
2221	return err;
2222}
2223
2224static void niu_timer(unsigned long __opaque)
2225{
2226	struct niu *np = (struct niu *) __opaque;
2227	unsigned long off;
2228	int err, link_up;
2229
2230	err = niu_link_status(np, &link_up);
2231	if (!err)
2232		niu_link_status_common(np, link_up);
2233
2234	if (netif_carrier_ok(np->dev))
2235		off = 5 * HZ;
2236	else
2237		off = 1 * HZ;
2238	np->timer.expires = jiffies + off;
2239
2240	add_timer(&np->timer);
2241}
2242
2243static const struct niu_phy_ops phy_ops_10g_serdes = {
2244	.serdes_init		= serdes_init_10g_serdes,
2245	.link_status		= link_status_10g_serdes,
2246};
2247
2248static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
2249	.serdes_init		= serdes_init_niu_10g_serdes,
2250	.link_status		= link_status_10g_serdes,
2251};
2252
2253static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
2254	.serdes_init		= serdes_init_niu_1g_serdes,
2255	.link_status		= link_status_1g_serdes,
2256};
2257
2258static const struct niu_phy_ops phy_ops_1g_rgmii = {
2259	.xcvr_init		= xcvr_init_1g_rgmii,
2260	.link_status		= link_status_1g_rgmii,
2261};
2262
2263static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
2264	.serdes_init		= serdes_init_niu_10g_fiber,
2265	.xcvr_init		= xcvr_init_10g,
2266	.link_status		= link_status_10g,
2267};
2268
2269static const struct niu_phy_ops phy_ops_10g_fiber = {
2270	.serdes_init		= serdes_init_10g,
2271	.xcvr_init		= xcvr_init_10g,
2272	.link_status		= link_status_10g,
2273};
2274
2275static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
2276	.serdes_init		= serdes_init_10g,
2277	.xcvr_init		= xcvr_init_10g_bcm8706,
2278	.link_status		= link_status_10g_hotplug,
2279};
2280
2281static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
2282	.serdes_init		= serdes_init_niu_10g_fiber,
2283	.xcvr_init		= xcvr_init_10g_bcm8706,
2284	.link_status		= link_status_10g_hotplug,
2285};
2286
2287static const struct niu_phy_ops phy_ops_10g_copper = {
2288	.serdes_init		= serdes_init_10g,
2289	.link_status		= link_status_10g, /* XXX */
2290};
2291
2292static const struct niu_phy_ops phy_ops_1g_fiber = {
2293	.serdes_init		= serdes_init_1g,
2294	.xcvr_init		= xcvr_init_1g,
2295	.link_status		= link_status_1g,
2296};
2297
2298static const struct niu_phy_ops phy_ops_1g_copper = {
2299	.xcvr_init		= xcvr_init_1g,
2300	.link_status		= link_status_1g,
2301};
2302
2303struct niu_phy_template {
2304	const struct niu_phy_ops	*ops;
2305	u32				phy_addr_base;
2306};
2307
2308static const struct niu_phy_template phy_template_niu_10g_fiber = {
2309	.ops		= &phy_ops_10g_fiber_niu,
2310	.phy_addr_base	= 16,
2311};
2312
2313static const struct niu_phy_template phy_template_niu_10g_serdes = {
2314	.ops		= &phy_ops_10g_serdes_niu,
2315	.phy_addr_base	= 0,
2316};
2317
2318static const struct niu_phy_template phy_template_niu_1g_serdes = {
2319	.ops		= &phy_ops_1g_serdes_niu,
2320	.phy_addr_base	= 0,
2321};
2322
2323static const struct niu_phy_template phy_template_10g_fiber = {
2324	.ops		= &phy_ops_10g_fiber,
2325	.phy_addr_base	= 8,
2326};
2327
2328static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
2329	.ops		= &phy_ops_10g_fiber_hotplug,
2330	.phy_addr_base	= 8,
2331};
2332
2333static const struct niu_phy_template phy_template_niu_10g_hotplug = {
2334	.ops		= &phy_ops_niu_10g_hotplug,
2335	.phy_addr_base	= 8,
2336};
2337
2338static const struct niu_phy_template phy_template_10g_copper = {
2339	.ops		= &phy_ops_10g_copper,
2340	.phy_addr_base	= 10,
2341};
2342
2343static const struct niu_phy_template phy_template_1g_fiber = {
2344	.ops		= &phy_ops_1g_fiber,
2345	.phy_addr_base	= 0,
2346};
2347
2348static const struct niu_phy_template phy_template_1g_copper = {
2349	.ops		= &phy_ops_1g_copper,
2350	.phy_addr_base	= 0,
2351};
2352
2353static const struct niu_phy_template phy_template_1g_rgmii = {
2354	.ops		= &phy_ops_1g_rgmii,
2355	.phy_addr_base	= 0,
2356};
2357
2358static const struct niu_phy_template phy_template_10g_serdes = {
2359	.ops		= &phy_ops_10g_serdes,
2360	.phy_addr_base	= 0,
2361};
2362
2363static int niu_atca_port_num[4] = {
2364	0, 0,  11, 10
2365};
2366
2367static int serdes_init_10g_serdes(struct niu *np)
2368{
2369	struct niu_link_config *lp = &np->link_config;
2370	unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
2371	u64 ctrl_val, test_cfg_val, sig, mask, val;
2372
2373	switch (np->port) {
2374	case 0:
2375		ctrl_reg = ENET_SERDES_0_CTRL_CFG;
2376		test_cfg_reg = ENET_SERDES_0_TEST_CFG;
2377		pll_cfg = ENET_SERDES_0_PLL_CFG;
2378		break;
2379	case 1:
2380		ctrl_reg = ENET_SERDES_1_CTRL_CFG;
2381		test_cfg_reg = ENET_SERDES_1_TEST_CFG;
2382		pll_cfg = ENET_SERDES_1_PLL_CFG;
2383		break;
2384
2385	default:
2386		return -EINVAL;
2387	}
2388	ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
2389		    ENET_SERDES_CTRL_SDET_1 |
2390		    ENET_SERDES_CTRL_SDET_2 |
2391		    ENET_SERDES_CTRL_SDET_3 |
2392		    (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
2393		    (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
2394		    (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
2395		    (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
2396		    (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
2397		    (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
2398		    (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
2399		    (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
2400	test_cfg_val = 0;
2401
2402	if (lp->loopback_mode == LOOPBACK_PHY) {
2403		test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
2404				  ENET_SERDES_TEST_MD_0_SHIFT) |
2405				 (ENET_TEST_MD_PAD_LOOPBACK <<
2406				  ENET_SERDES_TEST_MD_1_SHIFT) |
2407				 (ENET_TEST_MD_PAD_LOOPBACK <<
2408				  ENET_SERDES_TEST_MD_2_SHIFT) |
2409				 (ENET_TEST_MD_PAD_LOOPBACK <<
2410				  ENET_SERDES_TEST_MD_3_SHIFT));
2411	}
2412
2413	esr_reset(np);
2414	nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
2415	nw64(ctrl_reg, ctrl_val);
2416	nw64(test_cfg_reg, test_cfg_val);
2417
2418	/* Initialize all 4 lanes of the SERDES.  */
2419	for (i = 0; i < 4; i++) {
2420		u32 rxtx_ctrl, glue0;
2421		int err;
2422
2423		err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
2424		if (err)
2425			return err;
2426		err = esr_read_glue0(np, i, &glue0);
2427		if (err)
2428			return err;
2429
2430		rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
2431		rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
2432			      (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
2433
2434		glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
2435			   ESR_GLUE_CTRL0_THCNT |
2436			   ESR_GLUE_CTRL0_BLTIME);
2437		glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
2438			  (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
2439			  (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
2440			  (BLTIME_300_CYCLES <<
2441			   ESR_GLUE_CTRL0_BLTIME_SHIFT));
2442
2443		err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
2444		if (err)
2445			return err;
2446		err = esr_write_glue0(np, i, glue0);
2447		if (err)
2448			return err;
2449	}
2450
2451
2452	sig = nr64(ESR_INT_SIGNALS);
2453	switch (np->port) {
2454	case 0:
2455		mask = ESR_INT_SIGNALS_P0_BITS;
2456		val = (ESR_INT_SRDY0_P0 |
2457		       ESR_INT_DET0_P0 |
2458		       ESR_INT_XSRDY_P0 |
2459		       ESR_INT_XDP_P0_CH3 |
2460		       ESR_INT_XDP_P0_CH2 |
2461		       ESR_INT_XDP_P0_CH1 |
2462		       ESR_INT_XDP_P0_CH0);
2463		break;
2464
2465	case 1:
2466		mask = ESR_INT_SIGNALS_P1_BITS;
2467		val = (ESR_INT_SRDY0_P1 |
2468		       ESR_INT_DET0_P1 |
2469		       ESR_INT_XSRDY_P1 |
2470		       ESR_INT_XDP_P1_CH3 |
2471		       ESR_INT_XDP_P1_CH2 |
2472		       ESR_INT_XDP_P1_CH1 |
2473		       ESR_INT_XDP_P1_CH0);
2474		break;
2475
2476	default:
2477		return -EINVAL;
2478	}
2479
2480	if ((sig & mask) != val) {
2481		int err;
2482		err = serdes_init_1g_serdes(np);
2483		if (!err) {
2484			np->flags &= ~NIU_FLAGS_10G;
2485			np->mac_xcvr = MAC_XCVR_PCS;
2486		}  else {
2487			netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
2488				   np->port);
2489			return -ENODEV;
2490		}
2491	}
2492
2493	return 0;
2494}
2495
2496static int niu_determine_phy_disposition(struct niu *np)
2497{
2498	struct niu_parent *parent = np->parent;
2499	u8 plat_type = parent->plat_type;
2500	const struct niu_phy_template *tp;
2501	u32 phy_addr_off = 0;
2502
2503	if (plat_type == PLAT_TYPE_NIU) {
2504		switch (np->flags &
2505			(NIU_FLAGS_10G |
2506			 NIU_FLAGS_FIBER |
2507			 NIU_FLAGS_XCVR_SERDES)) {
2508		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2509			/* 10G Serdes */
2510			tp = &phy_template_niu_10g_serdes;
2511			break;
2512		case NIU_FLAGS_XCVR_SERDES:
2513			/* 1G Serdes */
2514			tp = &phy_template_niu_1g_serdes;
2515			break;
2516		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2517			/* 10G Fiber */
2518		default:
2519			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2520				tp = &phy_template_niu_10g_hotplug;
2521				if (np->port == 0)
2522					phy_addr_off = 8;
2523				if (np->port == 1)
2524					phy_addr_off = 12;
2525			} else {
2526				tp = &phy_template_niu_10g_fiber;
2527				phy_addr_off += np->port;
2528			}
2529			break;
2530		}
2531	} else {
2532		switch (np->flags &
2533			(NIU_FLAGS_10G |
2534			 NIU_FLAGS_FIBER |
2535			 NIU_FLAGS_XCVR_SERDES)) {
2536		case 0:
2537			/* 1G copper */
2538			tp = &phy_template_1g_copper;
2539			if (plat_type == PLAT_TYPE_VF_P0)
2540				phy_addr_off = 10;
2541			else if (plat_type == PLAT_TYPE_VF_P1)
2542				phy_addr_off = 26;
2543
2544			phy_addr_off += (np->port ^ 0x3);
2545			break;
2546
2547		case NIU_FLAGS_10G:
2548			/* 10G copper */
2549			tp = &phy_template_10g_copper;
2550			break;
2551
2552		case NIU_FLAGS_FIBER:
2553			/* 1G fiber */
2554			tp = &phy_template_1g_fiber;
2555			break;
2556
2557		case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
2558			/* 10G fiber */
2559			tp = &phy_template_10g_fiber;
2560			if (plat_type == PLAT_TYPE_VF_P0 ||
2561			    plat_type == PLAT_TYPE_VF_P1)
2562				phy_addr_off = 8;
2563			phy_addr_off += np->port;
2564			if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
2565				tp = &phy_template_10g_fiber_hotplug;
2566				if (np->port == 0)
2567					phy_addr_off = 8;
2568				if (np->port == 1)
2569					phy_addr_off = 12;
2570			}
2571			break;
2572
2573		case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2574		case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
2575		case NIU_FLAGS_XCVR_SERDES:
2576			switch(np->port) {
2577			case 0:
2578			case 1:
2579				tp = &phy_template_10g_serdes;
2580				break;
2581			case 2:
2582			case 3:
2583				tp = &phy_template_1g_rgmii;
2584				break;
2585			default:
2586				return -EINVAL;
2587			}
2588			phy_addr_off = niu_atca_port_num[np->port];
2589			break;
2590
2591		default:
2592			return -EINVAL;
2593		}
2594	}
2595
2596	np->phy_ops = tp->ops;
2597	np->phy_addr = tp->phy_addr_base + phy_addr_off;
2598
2599	return 0;
2600}
2601
2602static int niu_init_link(struct niu *np)
2603{
2604	struct niu_parent *parent = np->parent;
2605	int err, ignore;
2606
2607	if (parent->plat_type == PLAT_TYPE_NIU) {
2608		err = niu_xcvr_init(np);
2609		if (err)
2610			return err;
2611		msleep(200);
2612	}
2613	err = niu_serdes_init(np);
2614	if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
2615		return err;
2616	msleep(200);
2617	err = niu_xcvr_init(np);
2618	if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
2619		niu_link_status(np, &ignore);
2620	return 0;
2621}
2622
2623static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
2624{
2625	u16 reg0 = addr[4] << 8 | addr[5];
2626	u16 reg1 = addr[2] << 8 | addr[3];
2627	u16 reg2 = addr[0] << 8 | addr[1];
2628
2629	if (np->flags & NIU_FLAGS_XMAC) {
2630		nw64_mac(XMAC_ADDR0, reg0);
2631		nw64_mac(XMAC_ADDR1, reg1);
2632		nw64_mac(XMAC_ADDR2, reg2);
2633	} else {
2634		nw64_mac(BMAC_ADDR0, reg0);
2635		nw64_mac(BMAC_ADDR1, reg1);
2636		nw64_mac(BMAC_ADDR2, reg2);
2637	}
2638}
2639
2640static int niu_num_alt_addr(struct niu *np)
2641{
2642	if (np->flags & NIU_FLAGS_XMAC)
2643		return XMAC_NUM_ALT_ADDR;
2644	else
2645		return BMAC_NUM_ALT_ADDR;
2646}
2647
2648static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
2649{
2650	u16 reg0 = addr[4] << 8 | addr[5];
2651	u16 reg1 = addr[2] << 8 | addr[3];
2652	u16 reg2 = addr[0] << 8 | addr[1];
2653
2654	if (index >= niu_num_alt_addr(np))
2655		return -EINVAL;
2656
2657	if (np->flags & NIU_FLAGS_XMAC) {
2658		nw64_mac(XMAC_ALT_ADDR0(index), reg0);
2659		nw64_mac(XMAC_ALT_ADDR1(index), reg1);
2660		nw64_mac(XMAC_ALT_ADDR2(index), reg2);
2661	} else {
2662		nw64_mac(BMAC_ALT_ADDR0(index), reg0);
2663		nw64_mac(BMAC_ALT_ADDR1(index), reg1);
2664		nw64_mac(BMAC_ALT_ADDR2(index), reg2);
2665	}
2666
2667	return 0;
2668}
2669
2670static int niu_enable_alt_mac(struct niu *np, int index, int on)
2671{
2672	unsigned long reg;
2673	u64 val, mask;
2674
2675	if (index >= niu_num_alt_addr(np))
2676		return -EINVAL;
2677
2678	if (np->flags & NIU_FLAGS_XMAC) {
2679		reg = XMAC_ADDR_CMPEN;
2680		mask = 1 << index;
2681	} else {
2682		reg = BMAC_ADDR_CMPEN;
2683		mask = 1 << (index + 1);
2684	}
2685
2686	val = nr64_mac(reg);
2687	if (on)
2688		val |= mask;
2689	else
2690		val &= ~mask;
2691	nw64_mac(reg, val);
2692
2693	return 0;
2694}
2695
2696static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
2697				   int num, int mac_pref)
2698{
2699	u64 val = nr64_mac(reg);
2700	val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
2701	val |= num;
2702	if (mac_pref)
2703		val |= HOST_INFO_MPR;
2704	nw64_mac(reg, val);
2705}
2706
2707static int __set_rdc_table_num(struct niu *np,
2708			       int xmac_index, int bmac_index,
2709			       int rdc_table_num, int mac_pref)
2710{
2711	unsigned long reg;
2712
2713	if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
2714		return -EINVAL;
2715	if (np->flags & NIU_FLAGS_XMAC)
2716		reg = XMAC_HOST_INFO(xmac_index);
2717	else
2718		reg = BMAC_HOST_INFO(bmac_index);
2719	__set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
2720	return 0;
2721}
2722
2723static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
2724					 int mac_pref)
2725{
2726	return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
2727}
2728
2729static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
2730					   int mac_pref)
2731{
2732	return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
2733}
2734
2735static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
2736				     int table_num, int mac_pref)
2737{
2738	if (idx >= niu_num_alt_addr(np))
2739		return -EINVAL;
2740	return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
2741}
2742
2743static u64 vlan_entry_set_parity(u64 reg_val)
2744{
2745	u64 port01_mask;
2746	u64 port23_mask;
2747
2748	port01_mask = 0x00ff;
2749	port23_mask = 0xff00;
2750
2751	if (hweight64(reg_val & port01_mask) & 1)
2752		reg_val |= ENET_VLAN_TBL_PARITY0;
2753	else
2754		reg_val &= ~ENET_VLAN_TBL_PARITY0;
2755
2756	if (hweight64(reg_val & port23_mask) & 1)
2757		reg_val |= ENET_VLAN_TBL_PARITY1;
2758	else
2759		reg_val &= ~ENET_VLAN_TBL_PARITY1;
2760
2761	return reg_val;
2762}
2763
2764static void vlan_tbl_write(struct niu *np, unsigned long index,
2765			   int port, int vpr, int rdc_table)
2766{
2767	u64 reg_val = nr64(ENET_VLAN_TBL(index));
2768
2769	reg_val &= ~((ENET_VLAN_TBL_VPR |
2770		      ENET_VLAN_TBL_VLANRDCTBLN) <<
2771		     ENET_VLAN_TBL_SHIFT(port));
2772	if (vpr)
2773		reg_val |= (ENET_VLAN_TBL_VPR <<
2774			    ENET_VLAN_TBL_SHIFT(port));
2775	reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
2776
2777	reg_val = vlan_entry_set_parity(reg_val);
2778
2779	nw64(ENET_VLAN_TBL(index), reg_val);
2780}
2781
2782static void vlan_tbl_clear(struct niu *np)
2783{
2784	int i;
2785
2786	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
2787		nw64(ENET_VLAN_TBL(i), 0);
2788}
2789
2790static int tcam_wait_bit(struct niu *np, u64 bit)
2791{
2792	int limit = 1000;
2793
2794	while (--limit > 0) {
2795		if (nr64(TCAM_CTL) & bit)
2796			break;
2797		udelay(1);
2798	}
2799	if (limit <= 0)
2800		return -ENODEV;
2801
2802	return 0;
2803}
2804
2805static int tcam_flush(struct niu *np, int index)
2806{
2807	nw64(TCAM_KEY_0, 0x00);
2808	nw64(TCAM_KEY_MASK_0, 0xff);
2809	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2810
2811	return tcam_wait_bit(np, TCAM_CTL_STAT);
2812}
2813
2814#if 0
2815static int tcam_read(struct niu *np, int index,
2816		     u64 *key, u64 *mask)
2817{
2818	int err;
2819
2820	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
2821	err = tcam_wait_bit(np, TCAM_CTL_STAT);
2822	if (!err) {
2823		key[0] = nr64(TCAM_KEY_0);
2824		key[1] = nr64(TCAM_KEY_1);
2825		key[2] = nr64(TCAM_KEY_2);
2826		key[3] = nr64(TCAM_KEY_3);
2827		mask[0] = nr64(TCAM_KEY_MASK_0);
2828		mask[1] = nr64(TCAM_KEY_MASK_1);
2829		mask[2] = nr64(TCAM_KEY_MASK_2);
2830		mask[3] = nr64(TCAM_KEY_MASK_3);
2831	}
2832	return err;
2833}
2834#endif
2835
2836static int tcam_write(struct niu *np, int index,
2837		      u64 *key, u64 *mask)
2838{
2839	nw64(TCAM_KEY_0, key[0]);
2840	nw64(TCAM_KEY_1, key[1]);
2841	nw64(TCAM_KEY_2, key[2]);
2842	nw64(TCAM_KEY_3, key[3]);
2843	nw64(TCAM_KEY_MASK_0, mask[0]);
2844	nw64(TCAM_KEY_MASK_1, mask[1]);
2845	nw64(TCAM_KEY_MASK_2, mask[2]);
2846	nw64(TCAM_KEY_MASK_3, mask[3]);
2847	nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
2848
2849	return tcam_wait_bit(np, TCAM_CTL_STAT);
2850}
2851
2852#if 0
2853static int tcam_assoc_read(struct niu *np, int index, u64 *data)
2854{
2855	int err;
2856
2857	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
2858	err = tcam_wait_bit(np, TCAM_CTL_STAT);
2859	if (!err)
2860		*data = nr64(TCAM_KEY_1);
2861
2862	return err;
2863}
2864#endif
2865
2866static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
2867{
2868	nw64(TCAM_KEY_1, assoc_data);
2869	nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
2870
2871	return tcam_wait_bit(np, TCAM_CTL_STAT);
2872}
2873
2874static void tcam_enable(struct niu *np, int on)
2875{
2876	u64 val = nr64(FFLP_CFG_1);
2877
2878	if (on)
2879		val &= ~FFLP_CFG_1_TCAM_DIS;
2880	else
2881		val |= FFLP_CFG_1_TCAM_DIS;
2882	nw64(FFLP_CFG_1, val);
2883}
2884
2885static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
2886{
2887	u64 val = nr64(FFLP_CFG_1);
2888
2889	val &= ~(FFLP_CFG_1_FFLPINITDONE |
2890		 FFLP_CFG_1_CAMLAT |
2891		 FFLP_CFG_1_CAMRATIO);
2892	val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
2893	val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
2894	nw64(FFLP_CFG_1, val);
2895
2896	val = nr64(FFLP_CFG_1);
2897	val |= FFLP_CFG_1_FFLPINITDONE;
2898	nw64(FFLP_CFG_1, val);
2899}
2900
2901static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
2902				      int on)
2903{
2904	unsigned long reg;
2905	u64 val;
2906
2907	if (class < CLASS_CODE_ETHERTYPE1 ||
2908	    class > CLASS_CODE_ETHERTYPE2)
2909		return -EINVAL;
2910
2911	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2912	val = nr64(reg);
2913	if (on)
2914		val |= L2_CLS_VLD;
2915	else
2916		val &= ~L2_CLS_VLD;
2917	nw64(reg, val);
2918
2919	return 0;
2920}
2921
2922#if 0
2923static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
2924				   u64 ether_type)
2925{
2926	unsigned long reg;
2927	u64 val;
2928
2929	if (class < CLASS_CODE_ETHERTYPE1 ||
2930	    class > CLASS_CODE_ETHERTYPE2 ||
2931	    (ether_type & ~(u64)0xffff) != 0)
2932		return -EINVAL;
2933
2934	reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2935	val = nr64(reg);
2936	val &= ~L2_CLS_ETYPE;
2937	val |= (ether_type << L2_CLS_ETYPE_SHIFT);
2938	nw64(reg, val);
2939
2940	return 0;
2941}
2942#endif
2943
2944static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
2945				     int on)
2946{
2947	unsigned long reg;
2948	u64 val;
2949
2950	if (class < CLASS_CODE_USER_PROG1 ||
2951	    class > CLASS_CODE_USER_PROG4)
2952		return -EINVAL;
2953
2954	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2955	val = nr64(reg);
2956	if (on)
2957		val |= L3_CLS_VALID;
2958	else
2959		val &= ~L3_CLS_VALID;
2960	nw64(reg, val);
2961
2962	return 0;
2963}
2964
2965static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
2966				  int ipv6, u64 protocol_id,
2967				  u64 tos_mask, u64 tos_val)
2968{
2969	unsigned long reg;
2970	u64 val;
2971
2972	if (class < CLASS_CODE_USER_PROG1 ||
2973	    class > CLASS_CODE_USER_PROG4 ||
2974	    (protocol_id & ~(u64)0xff) != 0 ||
2975	    (tos_mask & ~(u64)0xff) != 0 ||
2976	    (tos_val & ~(u64)0xff) != 0)
2977		return -EINVAL;
2978
2979	reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
2980	val = nr64(reg);
2981	val &= ~(L3_CLS_IPVER | L3_CLS_PID |
2982		 L3_CLS_TOSMASK | L3_CLS_TOS);
2983	if (ipv6)
2984		val |= L3_CLS_IPVER;
2985	val |= (protocol_id << L3_CLS_PID_SHIFT);
2986	val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
2987	val |= (tos_val << L3_CLS_TOS_SHIFT);
2988	nw64(reg, val);
2989
2990	return 0;
2991}
2992
2993static int tcam_early_init(struct niu *np)
2994{
2995	unsigned long i;
2996	int err;
2997
2998	tcam_enable(np, 0);
2999	tcam_set_lat_and_ratio(np,
3000			       DEFAULT_TCAM_LATENCY,
3001			       DEFAULT_TCAM_ACCESS_RATIO);
3002	for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
3003		err = tcam_user_eth_class_enable(np, i, 0);
3004		if (err)
3005			return err;
3006	}
3007	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
3008		err = tcam_user_ip_class_enable(np, i, 0);
3009		if (err)
3010			return err;
3011	}
3012
3013	return 0;
3014}
3015
3016static int tcam_flush_all(struct niu *np)
3017{
3018	unsigned long i;
3019
3020	for (i = 0; i < np->parent->tcam_num_entries; i++) {
3021		int err = tcam_flush(np, i);
3022		if (err)
3023			return err;
3024	}
3025	return 0;
3026}
3027
3028static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
3029{
3030	return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
3031}
3032
3033#if 0
3034static int hash_read(struct niu *np, unsigned long partition,
3035		     unsigned long index, unsigned long num_entries,
3036		     u64 *data)
3037{
3038	u64 val = hash_addr_regval(index, num_entries);
3039	unsigned long i;
3040
3041	if (partition >= FCRAM_NUM_PARTITIONS ||
3042	    index + num_entries > FCRAM_SIZE)
3043		return -EINVAL;
3044
3045	nw64(HASH_TBL_ADDR(partition), val);
3046	for (i = 0; i < num_entries; i++)
3047		data[i] = nr64(HASH_TBL_DATA(partition));
3048
3049	return 0;
3050}
3051#endif
3052
3053static int hash_write(struct niu *np, unsigned long partition,
3054		      unsigned long index, unsigned long num_entries,
3055		      u64 *data)
3056{
3057	u64 val = hash_addr_regval(index, num_entries);
3058	unsigned long i;
3059
3060	if (partition >= FCRAM_NUM_PARTITIONS ||
3061	    index + (num_entries * 8) > FCRAM_SIZE)
3062		return -EINVAL;
3063
3064	nw64(HASH_TBL_ADDR(partition), val);
3065	for (i = 0; i < num_entries; i++)
3066		nw64(HASH_TBL_DATA(partition), data[i]);
3067
3068	return 0;
3069}
3070
3071static void fflp_reset(struct niu *np)
3072{
3073	u64 val;
3074
3075	nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
3076	udelay(10);
3077	nw64(FFLP_CFG_1, 0);
3078
3079	val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
3080	nw64(FFLP_CFG_1, val);
3081}
3082
3083static void fflp_set_timings(struct niu *np)
3084{
3085	u64 val = nr64(FFLP_CFG_1);
3086
3087	val &= ~FFLP_CFG_1_FFLPINITDONE;
3088	val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
3089	nw64(FFLP_CFG_1, val);
3090
3091	val = nr64(FFLP_CFG_1);
3092	val |= FFLP_CFG_1_FFLPINITDONE;
3093	nw64(FFLP_CFG_1, val);
3094
3095	val = nr64(FCRAM_REF_TMR);
3096	val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
3097	val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
3098	val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
3099	nw64(FCRAM_REF_TMR, val);
3100}
3101
3102static int fflp_set_partition(struct niu *np, u64 partition,
3103			      u64 mask, u64 base, int enable)
3104{
3105	unsigned long reg;
3106	u64 val;
3107
3108	if (partition >= FCRAM_NUM_PARTITIONS ||
3109	    (mask & ~(u64)0x1f) != 0 ||
3110	    (base & ~(u64)0x1f) != 0)
3111		return -EINVAL;
3112
3113	reg = FLW_PRT_SEL(partition);
3114
3115	val = nr64(reg);
3116	val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
3117	val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
3118	val |= (base << FLW_PRT_SEL_BASE_SHIFT);
3119	if (enable)
3120		val |= FLW_PRT_SEL_EXT;
3121	nw64(reg, val);
3122
3123	return 0;
3124}
3125
3126static int fflp_disable_all_partitions(struct niu *np)
3127{
3128	unsigned long i;
3129
3130	for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
3131		int err = fflp_set_partition(np, 0, 0, 0, 0);
3132		if (err)
3133			return err;
3134	}
3135	return 0;
3136}
3137
3138static void fflp_llcsnap_enable(struct niu *np, int on)
3139{
3140	u64 val = nr64(FFLP_CFG_1);
3141
3142	if (on)
3143		val |= FFLP_CFG_1_LLCSNAP;
3144	else
3145		val &= ~FFLP_CFG_1_LLCSNAP;
3146	nw64(FFLP_CFG_1, val);
3147}
3148
3149static void fflp_errors_enable(struct niu *np, int on)
3150{
3151	u64 val = nr64(FFLP_CFG_1);
3152
3153	if (on)
3154		val &= ~FFLP_CFG_1_ERRORDIS;
3155	else
3156		val |= FFLP_CFG_1_ERRORDIS;
3157	nw64(FFLP_CFG_1, val);
3158}
3159
3160static int fflp_hash_clear(struct niu *np)
3161{
3162	struct fcram_hash_ipv4 ent;
3163	unsigned long i;
3164
3165	/* IPV4 hash entry with valid bit clear, rest is don't care.  */
3166	memset(&ent, 0, sizeof(ent));
3167	ent.header = HASH_HEADER_EXT;
3168
3169	for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
3170		int err = hash_write(np, 0, i, 1, (u64 *) &ent);
3171		if (err)
3172			return err;
3173	}
3174	return 0;
3175}
3176
3177static int fflp_early_init(struct niu *np)
3178{
3179	struct niu_parent *parent;
3180	unsigned long flags;
3181	int err;
3182
3183	niu_lock_parent(np, flags);
3184
3185	parent = np->parent;
3186	err = 0;
3187	if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
3188		if (np->parent->plat_type != PLAT_TYPE_NIU) {
3189			fflp_reset(np);
3190			fflp_set_timings(np);
3191			err = fflp_disable_all_partitions(np);
3192			if (err) {
3193				netif_printk(np, probe, KERN_DEBUG, np->dev,
3194					     "fflp_disable_all_partitions failed, err=%d\n",
3195					     err);
3196				goto out;
3197			}
3198		}
3199
3200		err = tcam_early_init(np);
3201		if (err) {
3202			netif_printk(np, probe, KERN_DEBUG, np->dev,
3203				     "tcam_early_init failed, err=%d\n", err);
3204			goto out;
3205		}
3206		fflp_llcsnap_enable(np, 1);
3207		fflp_errors_enable(np, 0);
3208		nw64(H1POLY, 0);
3209		nw64(H2POLY, 0);
3210
3211		err = tcam_flush_all(np);
3212		if (err) {
3213			netif_printk(np, probe, KERN_DEBUG, np->dev,
3214				     "tcam_flush_all failed, err=%d\n", err);
3215			goto out;
3216		}
3217		if (np->parent->plat_type != PLAT_TYPE_NIU) {
3218			err = fflp_hash_clear(np);
3219			if (err) {
3220				netif_printk(np, probe, KERN_DEBUG, np->dev,
3221					     "fflp_hash_clear failed, err=%d\n",
3222					     err);
3223				goto out;
3224			}
3225		}
3226
3227		vlan_tbl_clear(np);
3228
3229		parent->flags |= PARENT_FLGS_CLS_HWINIT;
3230	}
3231out:
3232	niu_unlock_parent(np, flags);
3233	return err;
3234}
3235
3236static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
3237{
3238	if (class_code < CLASS_CODE_USER_PROG1 ||
3239	    class_code > CLASS_CODE_SCTP_IPV6)
3240		return -EINVAL;
3241
3242	nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3243	return 0;
3244}
3245
3246static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
3247{
3248	if (class_code < CLASS_CODE_USER_PROG1 ||
3249	    class_code > CLASS_CODE_SCTP_IPV6)
3250		return -EINVAL;
3251
3252	nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
3253	return 0;
3254}
3255
3256/* Entries for the ports are interleaved in the TCAM */
3257static u16 tcam_get_index(struct niu *np, u16 idx)
3258{
3259	/* One entry reserved for IP fragment rule */
3260	if (idx >= (np->clas.tcam_sz - 1))
3261		idx = 0;
3262	return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
3263}
3264
3265static u16 tcam_get_size(struct niu *np)
3266{
3267	/* One entry reserved for IP fragment rule */
3268	return np->clas.tcam_sz - 1;
3269}
3270
3271static u16 tcam_get_valid_entry_cnt(struct niu *np)
3272{
3273	/* One entry reserved for IP fragment rule */
3274	return np->clas.tcam_valid_entries - 1;
3275}
3276
3277static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
3278			      u32 offset, u32 size, u32 truesize)
3279{
3280	skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size);
3281
3282	skb->len += size;
3283	skb->data_len += size;
3284	skb->truesize += truesize;
3285}
3286
3287static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
3288{
3289	a >>= PAGE_SHIFT;
3290	a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
3291
3292	return a & (MAX_RBR_RING_SIZE - 1);
3293}
3294
3295static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
3296				    struct page ***link)
3297{
3298	unsigned int h = niu_hash_rxaddr(rp, addr);
3299	struct page *p, **pp;
3300
3301	addr &= PAGE_MASK;
3302	pp = &rp->rxhash[h];
3303	for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
3304		if (p->index == addr) {
3305			*link = pp;
3306			goto found;
3307		}
3308	}
3309	BUG();
3310
3311found:
3312	return p;
3313}
3314
3315static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
3316{
3317	unsigned int h = niu_hash_rxaddr(rp, base);
3318
3319	page->index = base;
3320	page->mapping = (struct address_space *) rp->rxhash[h];
3321	rp->rxhash[h] = page;
3322}
3323
3324static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
3325			    gfp_t mask, int start_index)
3326{
3327	struct page *page;
3328	u64 addr;
3329	int i;
3330
3331	page = alloc_page(mask);
3332	if (!page)
3333		return -ENOMEM;
3334
3335	addr = np->ops->map_page(np->device, page, 0,
3336				 PAGE_SIZE, DMA_FROM_DEVICE);
3337	if (!addr) {
3338		__free_page(page);
3339		return -ENOMEM;
3340	}
3341
3342	niu_hash_page(rp, page, addr);
3343	if (rp->rbr_blocks_per_page > 1)
3344		atomic_add(rp->rbr_blocks_per_page - 1, &page->_count);
3345
3346	for (i = 0; i < rp->rbr_blocks_per_page; i++) {
3347		__le32 *rbr = &rp->rbr[start_index + i];
3348
3349		*rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
3350		addr += rp->rbr_block_size;
3351	}
3352
3353	return 0;
3354}
3355
3356static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3357{
3358	int index = rp->rbr_index;
3359
3360	rp->rbr_pending++;
3361	if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
3362		int err = niu_rbr_add_page(np, rp, mask, index);
3363
3364		if (unlikely(err)) {
3365			rp->rbr_pending--;
3366			return;
3367		}
3368
3369		rp->rbr_index += rp->rbr_blocks_per_page;
3370		BUG_ON(rp->rbr_index > rp->rbr_table_size);
3371		if (rp->rbr_index == rp->rbr_table_size)
3372			rp->rbr_index = 0;
3373
3374		if (rp->rbr_pending >= rp->rbr_kick_thresh) {
3375			nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
3376			rp->rbr_pending = 0;
3377		}
3378	}
3379}
3380
3381static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
3382{
3383	unsigned int index = rp->rcr_index;
3384	int num_rcr = 0;
3385
3386	rp->rx_dropped++;
3387	while (1) {
3388		struct page *page, **link;
3389		u64 addr, val;
3390		u32 rcr_size;
3391
3392		num_rcr++;
3393
3394		val = le64_to_cpup(&rp->rcr[index]);
3395		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3396			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3397		page = niu_find_rxpage(rp, addr, &link);
3398
3399		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3400					 RCR_ENTRY_PKTBUFSZ_SHIFT];
3401		if ((page->index + PAGE_SIZE) - rcr_size == addr) {
3402			*link = (struct page *) page->mapping;
3403			np->ops->unmap_page(np->device, page->index,
3404					    PAGE_SIZE, DMA_FROM_DEVICE);
3405			page->index = 0;
3406			page->mapping = NULL;
3407			__free_page(page);
3408			rp->rbr_refill_pending++;
3409		}
3410
3411		index = NEXT_RCR(rp, index);
3412		if (!(val & RCR_ENTRY_MULTI))
3413			break;
3414
3415	}
3416	rp->rcr_index = index;
3417
3418	return num_rcr;
3419}
3420
3421static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3422			      struct rx_ring_info *rp)
3423{
3424	unsigned int index = rp->rcr_index;
3425	struct rx_pkt_hdr1 *rh;
3426	struct sk_buff *skb;
3427	int len, num_rcr;
3428
3429	skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
3430	if (unlikely(!skb))
3431		return niu_rx_pkt_ignore(np, rp);
3432
3433	num_rcr = 0;
3434	while (1) {
3435		struct page *page, **link;
3436		u32 rcr_size, append_size;
3437		u64 addr, val, off;
3438
3439		num_rcr++;
3440
3441		val = le64_to_cpup(&rp->rcr[index]);
3442
3443		len = (val & RCR_ENTRY_L2_LEN) >>
3444			RCR_ENTRY_L2_LEN_SHIFT;
3445		len -= ETH_FCS_LEN;
3446
3447		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
3448			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
3449		page = niu_find_rxpage(rp, addr, &link);
3450
3451		rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
3452					 RCR_ENTRY_PKTBUFSZ_SHIFT];
3453
3454		off = addr & ~PAGE_MASK;
3455		append_size = rcr_size;
3456		if (num_rcr == 1) {
3457			int ptype;
3458
3459			ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
3460			if ((ptype == RCR_PKT_TYPE_TCP ||
3461			     ptype == RCR_PKT_TYPE_UDP) &&
3462			    !(val & (RCR_ENTRY_NOPORT |
3463				     RCR_ENTRY_ERROR)))
3464				skb->ip_summed = CHECKSUM_UNNECESSARY;
3465			else
3466				skb_checksum_none_assert(skb);
3467		} else if (!(val & RCR_ENTRY_MULTI))
3468			append_size = len - skb->len;
3469
3470		niu_rx_skb_append(skb, page, off, append_size, rcr_size);
3471		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
3472			*link = (struct page *) page->mapping;
3473			np->ops->unmap_page(np->device, page->index,
3474					    PAGE_SIZE, DMA_FROM_DEVICE);
3475			page->index = 0;
3476			page->mapping = NULL;
3477			rp->rbr_refill_pending++;
3478		} else
3479			get_page(page);
3480
3481		index = NEXT_RCR(rp, index);
3482		if (!(val & RCR_ENTRY_MULTI))
3483			break;
3484
3485	}
3486	rp->rcr_index = index;
3487
3488	len += sizeof(*rh);
3489	len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
3490	__pskb_pull_tail(skb, len);
3491
3492	rh = (struct rx_pkt_hdr1 *) skb->data;
3493	if (np->dev->features & NETIF_F_RXHASH)
3494		skb_set_hash(skb,
3495			     ((u32)rh->hashval2_0 << 24 |
3496			      (u32)rh->hashval2_1 << 16 |
3497			      (u32)rh->hashval1_1 << 8 |
3498			      (u32)rh->hashval1_2 << 0),
3499			     PKT_HASH_TYPE_L3);
3500	skb_pull(skb, sizeof(*rh));
3501
3502	rp->rx_packets++;
3503	rp->rx_bytes += skb->len;
3504
3505	skb->protocol = eth_type_trans(skb, np->dev);
3506	skb_record_rx_queue(skb, rp->rx_channel);
3507	napi_gro_receive(napi, skb);
3508
3509	return num_rcr;
3510}
3511
3512static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
3513{
3514	int blocks_per_page = rp->rbr_blocks_per_page;
3515	int err, index = rp->rbr_index;
3516
3517	err = 0;
3518	while (index < (rp->rbr_table_size - blocks_per_page)) {
3519		err = niu_rbr_add_page(np, rp, mask, index);
3520		if (unlikely(err))
3521			break;
3522
3523		index += blocks_per_page;
3524	}
3525
3526	rp->rbr_index = index;
3527	return err;
3528}
3529
3530static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
3531{
3532	int i;
3533
3534	for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
3535		struct page *page;
3536
3537		page = rp->rxhash[i];
3538		while (page) {
3539			struct page *next = (struct page *) page->mapping;
3540			u64 base = page->index;
3541
3542			np->ops->unmap_page(np->device, base, PAGE_SIZE,
3543					    DMA_FROM_DEVICE);
3544			page->index = 0;
3545			page->mapping = NULL;
3546
3547			__free_page(page);
3548
3549			page = next;
3550		}
3551	}
3552
3553	for (i = 0; i < rp->rbr_table_size; i++)
3554		rp->rbr[i] = cpu_to_le32(0);
3555	rp->rbr_index = 0;
3556}
3557
3558static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3559{
3560	struct tx_buff_info *tb = &rp->tx_buffs[idx];
3561	struct sk_buff *skb = tb->skb;
3562	struct tx_pkt_hdr *tp;
3563	u64 tx_flags;
3564	int i, len;
3565
3566	tp = (struct tx_pkt_hdr *) skb->data;
3567	tx_flags = le64_to_cpup(&tp->flags);
3568
3569	rp->tx_packets++;
3570	rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
3571			 ((tx_flags & TXHDR_PAD) / 2));
3572
3573	len = skb_headlen(skb);
3574	np->ops->unmap_single(np->device, tb->mapping,
3575			      len, DMA_TO_DEVICE);
3576
3577	if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
3578		rp->mark_pending--;
3579
3580	tb->skb = NULL;
3581	do {
3582		idx = NEXT_TX(rp, idx);
3583		len -= MAX_TX_DESC_LEN;
3584	} while (len > 0);
3585
3586	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3587		tb = &rp->tx_buffs[idx];
3588		BUG_ON(tb->skb != NULL);
3589		np->ops->unmap_page(np->device, tb->mapping,
3590				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
3591				    DMA_TO_DEVICE);
3592		idx = NEXT_TX(rp, idx);
3593	}
3594
3595	dev_kfree_skb(skb);
3596
3597	return idx;
3598}
3599
3600#define NIU_TX_WAKEUP_THRESH(rp)		((rp)->pending / 4)
3601
3602static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3603{
3604	struct netdev_queue *txq;
3605	u16 pkt_cnt, tmp;
3606	int cons, index;
3607	u64 cs;
3608
3609	index = (rp - np->tx_rings);
3610	txq = netdev_get_tx_queue(np->dev, index);
3611
3612	cs = rp->tx_cs;
3613	if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
3614		goto out;
3615
3616	tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
3617	pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
3618		(TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
3619
3620	rp->last_pkt_cnt = tmp;
3621
3622	cons = rp->cons;
3623
3624	netif_printk(np, tx_done, KERN_DEBUG, np->dev,
3625		     "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
3626
3627	while (pkt_cnt--)
3628		cons = release_tx_packet(np, rp, cons);
3629
3630	rp->cons = cons;
3631	smp_mb();
3632
3633out:
3634	if (unlikely(netif_tx_queue_stopped(txq) &&
3635		     (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
3636		__netif_tx_lock(txq, smp_processor_id());
3637		if (netif_tx_queue_stopped(txq) &&
3638		    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
3639			netif_tx_wake_queue(txq);
3640		__netif_tx_unlock(txq);
3641	}
3642}
3643
3644static inline void niu_sync_rx_discard_stats(struct niu *np,
3645					     struct rx_ring_info *rp,
3646					     const int limit)
3647{
3648	/* This elaborate scheme is needed for reading the RX discard
3649	 * counters, as they are only 16-bit and can overflow quickly,
3650	 * and because the overflow indication bit is not usable as
3651	 * the counter value does not wrap, but remains at max value
3652	 * 0xFFFF.
3653	 *
3654	 * In theory and in practice counters can be lost in between
3655	 * reading nr64() and clearing the counter nw64().  For this
3656	 * reason, the number of counter clearings nw64() is
3657	 * limited/reduced though the limit parameter.
3658	 */
3659	int rx_channel = rp->rx_channel;
3660	u32 misc, wred;
3661
3662	/* RXMISC (Receive Miscellaneous Discard Count), covers the
3663	 * following discard events: IPP (Input Port Process),
3664	 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
3665	 * Block Ring) prefetch buffer is empty.
3666	 */
3667	misc = nr64(RXMISC(rx_channel));
3668	if (unlikely((misc & RXMISC_COUNT) > limit)) {
3669		nw64(RXMISC(rx_channel), 0);
3670		rp->rx_errors += misc & RXMISC_COUNT;
3671
3672		if (unlikely(misc & RXMISC_OFLOW))
3673			dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
3674				rx_channel);
3675
3676		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
3677			     "rx-%d: MISC drop=%u over=%u\n",
3678			     rx_channel, misc, misc-limit);
3679	}
3680
3681	/* WRED (Weighted Random Early Discard) by hardware */
3682	wred = nr64(RED_DIS_CNT(rx_channel));
3683	if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
3684		nw64(RED_DIS_CNT(rx_channel), 0);
3685		rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
3686
3687		if (unlikely(wred & RED_DIS_CNT_OFLOW))
3688			dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
3689
3690		netif_printk(np, rx_err, KERN_DEBUG, np->dev,
3691			     "rx-%d: WRED drop=%u over=%u\n",
3692			     rx_channel, wred, wred-limit);
3693	}
3694}
3695
3696static int niu_rx_work(struct napi_struct *napi, struct niu *np,
3697		       struct rx_ring_info *rp, int budget)
3698{
3699	int qlen, rcr_done = 0, work_done = 0;
3700	struct rxdma_mailbox *mbox = rp->mbox;
3701	u64 stat;
3702
3703#if 1
3704	stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3705	qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
3706#else
3707	stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
3708	qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
3709#endif
3710	mbox->rx_dma_ctl_stat = 0;
3711	mbox->rcrstat_a = 0;
3712
3713	netif_printk(np, rx_status, KERN_DEBUG, np->dev,
3714		     "%s(chan[%d]), stat[%llx] qlen=%d\n",
3715		     __func__, rp->rx_channel, (unsigned long long)stat, qlen);
3716
3717	rcr_done = work_done = 0;
3718	qlen = min(qlen, budget);
3719	while (work_done < qlen) {
3720		rcr_done += niu_process_rx_pkt(napi, np, rp);
3721		work_done++;
3722	}
3723
3724	if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
3725		unsigned int i;
3726
3727		for (i = 0; i < rp->rbr_refill_pending; i++)
3728			niu_rbr_refill(np, rp, GFP_ATOMIC);
3729		rp->rbr_refill_pending = 0;
3730	}
3731
3732	stat = (RX_DMA_CTL_STAT_MEX |
3733		((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
3734		((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
3735
3736	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
3737
3738	/* Only sync discards stats when qlen indicate potential for drops */
3739	if (qlen > 10)
3740		niu_sync_rx_discard_stats(np, rp, 0x7FFF);
3741
3742	return work_done;
3743}
3744
3745static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
3746{
3747	u64 v0 = lp->v0;
3748	u32 tx_vec = (v0 >> 32);
3749	u32 rx_vec = (v0 & 0xffffffff);
3750	int i, work_done = 0;
3751
3752	netif_printk(np, intr, KERN_DEBUG, np->dev,
3753		     "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
3754
3755	for (i = 0; i < np->num_tx_rings; i++) {
3756		struct tx_ring_info *rp = &np->tx_rings[i];
3757		if (tx_vec & (1 << rp->tx_channel))
3758			niu_tx_work(np, rp);
3759		nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
3760	}
3761
3762	for (i = 0; i < np->num_rx_rings; i++) {
3763		struct rx_ring_info *rp = &np->rx_rings[i];
3764
3765		if (rx_vec & (1 << rp->rx_channel)) {
3766			int this_work_done;
3767
3768			this_work_done = niu_rx_work(&lp->napi, np, rp,
3769						     budget);
3770
3771			budget -= this_work_done;
3772			work_done += this_work_done;
3773		}
3774		nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
3775	}
3776
3777	return work_done;
3778}
3779
3780static int niu_poll(struct napi_struct *napi, int budget)
3781{
3782	struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
3783	struct niu *np = lp->np;
3784	int work_done;
3785
3786	work_done = niu_poll_core(np, lp, budget);
3787
3788	if (work_done < budget) {
3789		napi_complete(napi);
3790		niu_ldg_rearm(np, lp, 1);
3791	}
3792	return work_done;
3793}
3794
3795static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
3796				  u64 stat)
3797{
3798	netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
3799
3800	if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
3801		pr_cont("RBR_TMOUT ");
3802	if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
3803		pr_cont("RSP_CNT ");
3804	if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
3805		pr_cont("BYTE_EN_BUS ");
3806	if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
3807		pr_cont("RSP_DAT ");
3808	if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
3809		pr_cont("RCR_ACK ");
3810	if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
3811		pr_cont("RCR_SHA_PAR ");
3812	if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
3813		pr_cont("RBR_PRE_PAR ");
3814	if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
3815		pr_cont("CONFIG ");
3816	if (stat & RX_DMA_CTL_STAT_RCRINCON)
3817		pr_cont("RCRINCON ");
3818	if (stat & RX_DMA_CTL_STAT_RCRFULL)
3819		pr_cont("RCRFULL ");
3820	if (stat & RX_DMA_CTL_STAT_RBRFULL)
3821		pr_cont("RBRFULL ");
3822	if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
3823		pr_cont("RBRLOGPAGE ");
3824	if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
3825		pr_cont("CFIGLOGPAGE ");
3826	if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
3827		pr_cont("DC_FIDO ");
3828
3829	pr_cont(")\n");
3830}
3831
3832static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
3833{
3834	u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
3835	int err = 0;
3836
3837
3838	if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
3839		    RX_DMA_CTL_STAT_PORT_FATAL))
3840		err = -EINVAL;
3841
3842	if (err) {
3843		netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
3844			   rp->rx_channel,
3845			   (unsigned long long) stat);
3846
3847		niu_log_rxchan_errors(np, rp, stat);
3848	}
3849
3850	nw64(RX_DMA_CTL_STAT(rp->rx_channel),
3851	     stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
3852
3853	return err;
3854}
3855
3856static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
3857				  u64 cs)
3858{
3859	netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
3860
3861	if (cs & TX_CS_MBOX_ERR)
3862		pr_cont("MBOX ");
3863	if (cs & TX_CS_PKT_SIZE_ERR)
3864		pr_cont("PKT_SIZE ");
3865	if (cs & TX_CS_TX_RING_OFLOW)
3866		pr_cont("TX_RING_OFLOW ");
3867	if (cs & TX_CS_PREF_BUF_PAR_ERR)
3868		pr_cont("PREF_BUF_PAR ");
3869	if (cs & TX_CS_NACK_PREF)
3870		pr_cont("NACK_PREF ");
3871	if (cs & TX_CS_NACK_PKT_RD)
3872		pr_cont("NACK_PKT_RD ");
3873	if (cs & TX_CS_CONF_PART_ERR)
3874		pr_cont("CONF_PART ");
3875	if (cs & TX_CS_PKT_PRT_ERR)
3876		pr_cont("PKT_PTR ");
3877
3878	pr_cont(")\n");
3879}
3880
3881static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
3882{
3883	u64 cs, logh, logl;
3884
3885	cs = nr64(TX_CS(rp->tx_channel));
3886	logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
3887	logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
3888
3889	netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
3890		   rp->tx_channel,
3891		   (unsigned long long)cs,
3892		   (unsigned long long)logh,
3893		   (unsigned long long)logl);
3894
3895	niu_log_txchan_errors(np, rp, cs);
3896
3897	return -ENODEV;
3898}
3899
3900static int niu_mif_interrupt(struct niu *np)
3901{
3902	u64 mif_status = nr64(MIF_STATUS);
3903	int phy_mdint = 0;
3904
3905	if (np->flags & NIU_FLAGS_XMAC) {
3906		u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
3907
3908		if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
3909			phy_mdint = 1;
3910	}
3911
3912	netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
3913		   (unsigned long long)mif_status, phy_mdint);
3914
3915	return -ENODEV;
3916}
3917
3918static void niu_xmac_interrupt(struct niu *np)
3919{
3920	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
3921	u64 val;
3922
3923	val = nr64_mac(XTXMAC_STATUS);
3924	if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
3925		mp->tx_frames += TXMAC_FRM_CNT_COUNT;
3926	if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
3927		mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
3928	if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
3929		mp->tx_fifo_errors++;
3930	if (val & XTXMAC_STATUS_TXMAC_OFLOW)
3931		mp->tx_overflow_errors++;
3932	if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
3933		mp->tx_max_pkt_size_errors++;
3934	if (val & XTXMAC_STATUS_TXMAC_UFLOW)
3935		mp->tx_underflow_errors++;
3936
3937	val = nr64_mac(XRXMAC_STATUS);
3938	if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
3939		mp->rx_local_faults++;
3940	if (val & XRXMAC_STATUS_RFLT_DET)
3941		mp->rx_remote_faults++;
3942	if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
3943		mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
3944	if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
3945		mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
3946	if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
3947		mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
3948	if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
3949		mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
3950	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3951		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3952	if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
3953		mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
3954	if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
3955		mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
3956	if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
3957		mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
3958	if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
3959		mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
3960	if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
3961		mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
3962	if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
3963		mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
3964	if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
3965		mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
3966	if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
3967		mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
3968	if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
3969		mp->rx_octets += RXMAC_BT_CNT_COUNT;
3970	if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
3971		mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
3972	if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
3973		mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
3974	if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
3975		mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
3976	if (val & XRXMAC_STATUS_RXUFLOW)
3977		mp->rx_underflows++;
3978	if (val & XRXMAC_STATUS_RXOFLOW)
3979		mp->rx_overflows++;
3980
3981	val = nr64_mac(XMAC_FC_STAT);
3982	if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
3983		mp->pause_off_state++;
3984	if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
3985		mp->pause_on_state++;
3986	if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
3987		mp->pause_received++;
3988}
3989
3990static void niu_bmac_interrupt(struct niu *np)
3991{
3992	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
3993	u64 val;
3994
3995	val = nr64_mac(BTXMAC_STATUS);
3996	if (val & BTXMAC_STATUS_UNDERRUN)
3997		mp->tx_underflow_errors++;
3998	if (val & BTXMAC_STATUS_MAX_PKT_ERR)
3999		mp->tx_max_pkt_size_errors++;
4000	if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
4001		mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
4002	if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
4003		mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
4004
4005	val = nr64_mac(BRXMAC_STATUS);
4006	if (val & BRXMAC_STATUS_OVERFLOW)
4007		mp->rx_overflows++;
4008	if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
4009		mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
4010	if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
4011		mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
4012	if (val & BRXMAC_STATUS_CRC_ERR_EXP)
4013		mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
4014	if (val & BRXMAC_STATUS_LEN_ERR_EXP)
4015		mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
4016
4017	val = nr64_mac(BMAC_CTRL_STATUS);
4018	if (val & BMAC_CTRL_STATUS_NOPAUSE)
4019		mp->pause_off_state++;
4020	if (val & BMAC_CTRL_STATUS_PAUSE)
4021		mp->pause_on_state++;
4022	if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
4023		mp->pause_received++;
4024}
4025
4026static int niu_mac_interrupt(struct niu *np)
4027{
4028	if (np->flags & NIU_FLAGS_XMAC)
4029		niu_xmac_interrupt(np);
4030	else
4031		niu_bmac_interrupt(np);
4032
4033	return 0;
4034}
4035
4036static void niu_log_device_error(struct niu *np, u64 stat)
4037{
4038	netdev_err(np->dev, "Core device errors ( ");
4039
4040	if (stat & SYS_ERR_MASK_META2)
4041		pr_cont("META2 ");
4042	if (stat & SYS_ERR_MASK_META1)
4043		pr_cont("META1 ");
4044	if (stat & SYS_ERR_MASK_PEU)
4045		pr_cont("PEU ");
4046	if (stat & SYS_ERR_MASK_TXC)
4047		pr_cont("TXC ");
4048	if (stat & SYS_ERR_MASK_RDMC)
4049		pr_cont("RDMC ");
4050	if (stat & SYS_ERR_MASK_TDMC)
4051		pr_cont("TDMC ");
4052	if (stat & SYS_ERR_MASK_ZCP)
4053		pr_cont("ZCP ");
4054	if (stat & SYS_ERR_MASK_FFLP)
4055		pr_cont("FFLP ");
4056	if (stat & SYS_ERR_MASK_IPP)
4057		pr_cont("IPP ");
4058	if (stat & SYS_ERR_MASK_MAC)
4059		pr_cont("MAC ");
4060	if (stat & SYS_ERR_MASK_SMX)
4061		pr_cont("SMX ");
4062
4063	pr_cont(")\n");
4064}
4065
4066static int niu_device_error(struct niu *np)
4067{
4068	u64 stat = nr64(SYS_ERR_STAT);
4069
4070	netdev_err(np->dev, "Core device error, stat[%llx]\n",
4071		   (unsigned long long)stat);
4072
4073	niu_log_device_error(np, stat);
4074
4075	return -ENODEV;
4076}
4077
4078static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
4079			      u64 v0, u64 v1, u64 v2)
4080{
4081
4082	int i, err = 0;
4083
4084	lp->v0 = v0;
4085	lp->v1 = v1;
4086	lp->v2 = v2;
4087
4088	if (v1 & 0x00000000ffffffffULL) {
4089		u32 rx_vec = (v1 & 0xffffffff);
4090
4091		for (i = 0; i < np->num_rx_rings; i++) {
4092			struct rx_ring_info *rp = &np->rx_rings[i];
4093
4094			if (rx_vec & (1 << rp->rx_channel)) {
4095				int r = niu_rx_error(np, rp);
4096				if (r) {
4097					err = r;
4098				} else {
4099					if (!v0)
4100						nw64(RX_DMA_CTL_STAT(rp->rx_channel),
4101						     RX_DMA_CTL_STAT_MEX);
4102				}
4103			}
4104		}
4105	}
4106	if (v1 & 0x7fffffff00000000ULL) {
4107		u32 tx_vec = (v1 >> 32) & 0x7fffffff;
4108
4109		for (i = 0; i < np->num_tx_rings; i++) {
4110			struct tx_ring_info *rp = &np->tx_rings[i];
4111
4112			if (tx_vec & (1 << rp->tx_channel)) {
4113				int r = niu_tx_error(np, rp);
4114				if (r)
4115					err = r;
4116			}
4117		}
4118	}
4119	if ((v0 | v1) & 0x8000000000000000ULL) {
4120		int r = niu_mif_interrupt(np);
4121		if (r)
4122			err = r;
4123	}
4124	if (v2) {
4125		if (v2 & 0x01ef) {
4126			int r = niu_mac_interrupt(np);
4127			if (r)
4128				err = r;
4129		}
4130		if (v2 & 0x0210) {
4131			int r = niu_device_error(np);
4132			if (r)
4133				err = r;
4134		}
4135	}
4136
4137	if (err)
4138		niu_enable_interrupts(np, 0);
4139
4140	return err;
4141}
4142
4143static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
4144			    int ldn)
4145{
4146	struct rxdma_mailbox *mbox = rp->mbox;
4147	u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
4148
4149	stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
4150		      RX_DMA_CTL_STAT_RCRTO);
4151	nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
4152
4153	netif_printk(np, intr, KERN_DEBUG, np->dev,
4154		     "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
4155}
4156
4157static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
4158			    int ldn)
4159{
4160	rp->tx_cs = nr64(TX_CS(rp->tx_channel));
4161
4162	netif_printk(np, intr, KERN_DEBUG, np->dev,
4163		     "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
4164}
4165
4166static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
4167{
4168	struct niu_parent *parent = np->parent;
4169	u32 rx_vec, tx_vec;
4170	int i;
4171
4172	tx_vec = (v0 >> 32);
4173	rx_vec = (v0 & 0xffffffff);
4174
4175	for (i = 0; i < np->num_rx_rings; i++) {
4176		struct rx_ring_info *rp = &np->rx_rings[i];
4177		int ldn = LDN_RXDMA(rp->rx_channel);
4178
4179		if (parent->ldg_map[ldn] != ldg)
4180			continue;
4181
4182		nw64(LD_IM0(ldn), LD_IM0_MASK);
4183		if (rx_vec & (1 << rp->rx_channel))
4184			niu_rxchan_intr(np, rp, ldn);
4185	}
4186
4187	for (i = 0; i < np->num_tx_rings; i++) {
4188		struct tx_ring_info *rp = &np->tx_rings[i];
4189		int ldn = LDN_TXDMA(rp->tx_channel);
4190
4191		if (parent->ldg_map[ldn] != ldg)
4192			continue;
4193
4194		nw64(LD_IM0(ldn), LD_IM0_MASK);
4195		if (tx_vec & (1 << rp->tx_channel))
4196			niu_txchan_intr(np, rp, ldn);
4197	}
4198}
4199
4200static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
4201			      u64 v0, u64 v1, u64 v2)
4202{
4203	if (likely(napi_schedule_prep(&lp->napi))) {
4204		lp->v0 = v0;
4205		lp->v1 = v1;
4206		lp->v2 = v2;
4207		__niu_fastpath_interrupt(np, lp->ldg_num, v0);
4208		__napi_schedule(&lp->napi);
4209	}
4210}
4211
4212static irqreturn_t niu_interrupt(int irq, void *dev_id)
4213{
4214	struct niu_ldg *lp = dev_id;
4215	struct niu *np = lp->np;
4216	int ldg = lp->ldg_num;
4217	unsigned long flags;
4218	u64 v0, v1, v2;
4219
4220	if (netif_msg_intr(np))
4221		printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
4222		       __func__, lp, ldg);
4223
4224	spin_lock_irqsave(&np->lock, flags);
4225
4226	v0 = nr64(LDSV0(ldg));
4227	v1 = nr64(LDSV1(ldg));
4228	v2 = nr64(LDSV2(ldg));
4229
4230	if (netif_msg_intr(np))
4231		pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
4232		       (unsigned long long) v0,
4233		       (unsigned long long) v1,
4234		       (unsigned long long) v2);
4235
4236	if (unlikely(!v0 && !v1 && !v2)) {
4237		spin_unlock_irqrestore(&np->lock, flags);
4238		return IRQ_NONE;
4239	}
4240
4241	if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
4242		int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
4243		if (err)
4244			goto out;
4245	}
4246	if (likely(v0 & ~((u64)1 << LDN_MIF)))
4247		niu_schedule_napi(np, lp, v0, v1, v2);
4248	else
4249		niu_ldg_rearm(np, lp, 1);
4250out:
4251	spin_unlock_irqrestore(&np->lock, flags);
4252
4253	return IRQ_HANDLED;
4254}
4255
4256static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
4257{
4258	if (rp->mbox) {
4259		np->ops->free_coherent(np->device,
4260				       sizeof(struct rxdma_mailbox),
4261				       rp->mbox, rp->mbox_dma);
4262		rp->mbox = NULL;
4263	}
4264	if (rp->rcr) {
4265		np->ops->free_coherent(np->device,
4266				       MAX_RCR_RING_SIZE * sizeof(__le64),
4267				       rp->rcr, rp->rcr_dma);
4268		rp->rcr = NULL;
4269		rp->rcr_table_size = 0;
4270		rp->rcr_index = 0;
4271	}
4272	if (rp->rbr) {
4273		niu_rbr_free(np, rp);
4274
4275		np->ops->free_coherent(np->device,
4276				       MAX_RBR_RING_SIZE * sizeof(__le32),
4277				       rp->rbr, rp->rbr_dma);
4278		rp->rbr = NULL;
4279		rp->rbr_table_size = 0;
4280		rp->rbr_index = 0;
4281	}
4282	kfree(rp->rxhash);
4283	rp->rxhash = NULL;
4284}
4285
4286static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
4287{
4288	if (rp->mbox) {
4289		np->ops->free_coherent(np->device,
4290				       sizeof(struct txdma_mailbox),
4291				       rp->mbox, rp->mbox_dma);
4292		rp->mbox = NULL;
4293	}
4294	if (rp->descr) {
4295		int i;
4296
4297		for (i = 0; i < MAX_TX_RING_SIZE; i++) {
4298			if (rp->tx_buffs[i].skb)
4299				(void) release_tx_packet(np, rp, i);
4300		}
4301
4302		np->ops->free_coherent(np->device,
4303				       MAX_TX_RING_SIZE * sizeof(__le64),
4304				       rp->descr, rp->descr_dma);
4305		rp->descr = NULL;
4306		rp->pending = 0;
4307		rp->prod = 0;
4308		rp->cons = 0;
4309		rp->wrap_bit = 0;
4310	}
4311}
4312
4313static void niu_free_channels(struct niu *np)
4314{
4315	int i;
4316
4317	if (np->rx_rings) {
4318		for (i = 0; i < np->num_rx_rings; i++) {
4319			struct rx_ring_info *rp = &np->rx_rings[i];
4320
4321			niu_free_rx_ring_info(np, rp);
4322		}
4323		kfree(np->rx_rings);
4324		np->rx_rings = NULL;
4325		np->num_rx_rings = 0;
4326	}
4327
4328	if (np->tx_rings) {
4329		for (i = 0; i < np->num_tx_rings; i++) {
4330			struct tx_ring_info *rp = &np->tx_rings[i];
4331
4332			niu_free_tx_ring_info(np, rp);
4333		}
4334		kfree(np->tx_rings);
4335		np->tx_rings = NULL;
4336		np->num_tx_rings = 0;
4337	}
4338}
4339
4340static int niu_alloc_rx_ring_info(struct niu *np,
4341				  struct rx_ring_info *rp)
4342{
4343	BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
4344
4345	rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *),
4346			     GFP_KERNEL);
4347	if (!rp->rxhash)
4348		return -ENOMEM;
4349
4350	rp->mbox = np->ops->alloc_coherent(np->device,
4351					   sizeof(struct rxdma_mailbox),
4352					   &rp->mbox_dma, GFP_KERNEL);
4353	if (!rp->mbox)
4354		return -ENOMEM;
4355	if ((unsigned long)rp->mbox & (64UL - 1)) {
4356		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
4357			   rp->mbox);
4358		return -EINVAL;
4359	}
4360
4361	rp->rcr = np->ops->alloc_coherent(np->device,
4362					  MAX_RCR_RING_SIZE * sizeof(__le64),
4363					  &rp->rcr_dma, GFP_KERNEL);
4364	if (!rp->rcr)
4365		return -ENOMEM;
4366	if ((unsigned long)rp->rcr & (64UL - 1)) {
4367		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
4368			   rp->rcr);
4369		return -EINVAL;
4370	}
4371	rp->rcr_table_size = MAX_RCR_RING_SIZE;
4372	rp->rcr_index = 0;
4373
4374	rp->rbr = np->ops->alloc_coherent(np->device,
4375					  MAX_RBR_RING_SIZE * sizeof(__le32),
4376					  &rp->rbr_dma, GFP_KERNEL);
4377	if (!rp->rbr)
4378		return -ENOMEM;
4379	if ((unsigned long)rp->rbr & (64UL - 1)) {
4380		netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
4381			   rp->rbr);
4382		return -EINVAL;
4383	}
4384	rp->rbr_table_size = MAX_RBR_RING_SIZE;
4385	rp->rbr_index = 0;
4386	rp->rbr_pending = 0;
4387
4388	return 0;
4389}
4390
4391static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
4392{
4393	int mtu = np->dev->mtu;
4394
4395	/* These values are recommended by the HW designers for fair
4396	 * utilization of DRR amongst the rings.
4397	 */
4398	rp->max_burst = mtu + 32;
4399	if (rp->max_burst > 4096)
4400		rp->max_burst = 4096;
4401}
4402
4403static int niu_alloc_tx_ring_info(struct niu *np,
4404				  struct tx_ring_info *rp)
4405{
4406	BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
4407
4408	rp->mbox = np->ops->alloc_coherent(np->device,
4409					   sizeof(struct txdma_mailbox),
4410					   &rp->mbox_dma, GFP_KERNEL);
4411	if (!rp->mbox)
4412		return -ENOMEM;
4413	if ((unsigned long)rp->mbox & (64UL - 1)) {
4414		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
4415			   rp->mbox);
4416		return -EINVAL;
4417	}
4418
4419	rp->descr = np->ops->alloc_coherent(np->device,
4420					    MAX_TX_RING_SIZE * sizeof(__le64),
4421					    &rp->descr_dma, GFP_KERNEL);
4422	if (!rp->descr)
4423		return -ENOMEM;
4424	if ((unsigned long)rp->descr & (64UL - 1)) {
4425		netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
4426			   rp->descr);
4427		return -EINVAL;
4428	}
4429
4430	rp->pending = MAX_TX_RING_SIZE;
4431	rp->prod = 0;
4432	rp->cons = 0;
4433	rp->wrap_bit = 0;
4434
4435	/* XXX make these configurable... XXX */
4436	rp->mark_freq = rp->pending / 4;
4437
4438	niu_set_max_burst(np, rp);
4439
4440	return 0;
4441}
4442
4443static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
4444{
4445	u16 bss;
4446
4447	bss = min(PAGE_SHIFT, 15);
4448
4449	rp->rbr_block_size = 1 << bss;
4450	rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
4451
4452	rp->rbr_sizes[0] = 256;
4453	rp->rbr_sizes[1] = 1024;
4454	if (np->dev->mtu > ETH_DATA_LEN) {
4455		switch (PAGE_SIZE) {
4456		case 4 * 1024:
4457			rp->rbr_sizes[2] = 4096;
4458			break;
4459
4460		default:
4461			rp->rbr_sizes[2] = 8192;
4462			break;
4463		}
4464	} else {
4465		rp->rbr_sizes[2] = 2048;
4466	}
4467	rp->rbr_sizes[3] = rp->rbr_block_size;
4468}
4469
4470static int niu_alloc_channels(struct niu *np)
4471{
4472	struct niu_parent *parent = np->parent;
4473	int first_rx_channel, first_tx_channel;
4474	int num_rx_rings, num_tx_rings;
4475	struct rx_ring_info *rx_rings;
4476	struct tx_ring_info *tx_rings;
4477	int i, port, err;
4478
4479	port = np->port;
4480	first_rx_channel = first_tx_channel = 0;
4481	for (i = 0; i < port; i++) {
4482		first_rx_channel += parent->rxchan_per_port[i];
4483		first_tx_channel += parent->txchan_per_port[i];
4484	}
4485
4486	num_rx_rings = parent->rxchan_per_port[port];
4487	num_tx_rings = parent->txchan_per_port[port];
4488
4489	rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
4490			   GFP_KERNEL);
4491	err = -ENOMEM;
4492	if (!rx_rings)
4493		goto out_err;
4494
4495	np->num_rx_rings = num_rx_rings;
4496	smp_wmb();
4497	np->rx_rings = rx_rings;
4498
4499	netif_set_real_num_rx_queues(np->dev, num_rx_rings);
4500
4501	for (i = 0; i < np->num_rx_rings; i++) {
4502		struct rx_ring_info *rp = &np->rx_rings[i];
4503
4504		rp->np = np;
4505		rp->rx_channel = first_rx_channel + i;
4506
4507		err = niu_alloc_rx_ring_info(np, rp);
4508		if (err)
4509			goto out_err;
4510
4511		niu_size_rbr(np, rp);
4512
4513		/* XXX better defaults, configurable, etc... XXX */
4514		rp->nonsyn_window = 64;
4515		rp->nonsyn_threshold = rp->rcr_table_size - 64;
4516		rp->syn_window = 64;
4517		rp->syn_threshold = rp->rcr_table_size - 64;
4518		rp->rcr_pkt_threshold = 16;
4519		rp->rcr_timeout = 8;
4520		rp->rbr_kick_thresh = RBR_REFILL_MIN;
4521		if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
4522			rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
4523
4524		err = niu_rbr_fill(np, rp, GFP_KERNEL);
4525		if (err)
4526			return err;
4527	}
4528
4529	tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
4530			   GFP_KERNEL);
4531	err = -ENOMEM;
4532	if (!tx_rings)
4533		goto out_err;
4534
4535	np->num_tx_rings = num_tx_rings;
4536	smp_wmb();
4537	np->tx_rings = tx_rings;
4538
4539	netif_set_real_num_tx_queues(np->dev, num_tx_rings);
4540
4541	for (i = 0; i < np->num_tx_rings; i++) {
4542		struct tx_ring_info *rp = &np->tx_rings[i];
4543
4544		rp->np = np;
4545		rp->tx_channel = first_tx_channel + i;
4546
4547		err = niu_alloc_tx_ring_info(np, rp);
4548		if (err)
4549			goto out_err;
4550	}
4551
4552	return 0;
4553
4554out_err:
4555	niu_free_channels(np);
4556	return err;
4557}
4558
4559static int niu_tx_cs_sng_poll(struct niu *np, int channel)
4560{
4561	int limit = 1000;
4562
4563	while (--limit > 0) {
4564		u64 val = nr64(TX_CS(channel));
4565		if (val & TX_CS_SNG_STATE)
4566			return 0;
4567	}
4568	return -ENODEV;
4569}
4570
4571static int niu_tx_channel_stop(struct niu *np, int channel)
4572{
4573	u64 val = nr64(TX_CS(channel));
4574
4575	val |= TX_CS_STOP_N_GO;
4576	nw64(TX_CS(channel), val);
4577
4578	return niu_tx_cs_sng_poll(np, channel);
4579}
4580
4581static int niu_tx_cs_reset_poll(struct niu *np, int channel)
4582{
4583	int limit = 1000;
4584
4585	while (--limit > 0) {
4586		u64 val = nr64(TX_CS(channel));
4587		if (!(val & TX_CS_RST))
4588			return 0;
4589	}
4590	return -ENODEV;
4591}
4592
4593static int niu_tx_channel_reset(struct niu *np, int channel)
4594{
4595	u64 val = nr64(TX_CS(channel));
4596	int err;
4597
4598	val |= TX_CS_RST;
4599	nw64(TX_CS(channel), val);
4600
4601	err = niu_tx_cs_reset_poll(np, channel);
4602	if (!err)
4603		nw64(TX_RING_KICK(channel), 0);
4604
4605	return err;
4606}
4607
4608static int niu_tx_channel_lpage_init(struct niu *np, int channel)
4609{
4610	u64 val;
4611
4612	nw64(TX_LOG_MASK1(channel), 0);
4613	nw64(TX_LOG_VAL1(channel), 0);
4614	nw64(TX_LOG_MASK2(channel), 0);
4615	nw64(TX_LOG_VAL2(channel), 0);
4616	nw64(TX_LOG_PAGE_RELO1(channel), 0);
4617	nw64(TX_LOG_PAGE_RELO2(channel), 0);
4618	nw64(TX_LOG_PAGE_HDL(channel), 0);
4619
4620	val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
4621	val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
4622	nw64(TX_LOG_PAGE_VLD(channel), val);
4623
4624	/* XXX TXDMA 32bit mode? XXX */
4625
4626	return 0;
4627}
4628
4629static void niu_txc_enable_port(struct niu *np, int on)
4630{
4631	unsigned long flags;
4632	u64 val, mask;
4633
4634	niu_lock_parent(np, flags);
4635	val = nr64(TXC_CONTROL);
4636	mask = (u64)1 << np->port;
4637	if (on) {
4638		val |= TXC_CONTROL_ENABLE | mask;
4639	} else {
4640		val &= ~mask;
4641		if ((val & ~TXC_CONTROL_ENABLE) == 0)
4642			val &= ~TXC_CONTROL_ENABLE;
4643	}
4644	nw64(TXC_CONTROL, val);
4645	niu_unlock_parent(np, flags);
4646}
4647
4648static void niu_txc_set_imask(struct niu *np, u64 imask)
4649{
4650	unsigned long flags;
4651	u64 val;
4652
4653	niu_lock_parent(np, flags);
4654	val = nr64(TXC_INT_MASK);
4655	val &= ~TXC_INT_MASK_VAL(np->port);
4656	val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
4657	niu_unlock_parent(np, flags);
4658}
4659
4660static void niu_txc_port_dma_enable(struct niu *np, int on)
4661{
4662	u64 val = 0;
4663
4664	if (on) {
4665		int i;
4666
4667		for (i = 0; i < np->num_tx_rings; i++)
4668			val |= (1 << np->tx_rings[i].tx_channel);
4669	}
4670	nw64(TXC_PORT_DMA(np->port), val);
4671}
4672
4673static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
4674{
4675	int err, channel = rp->tx_channel;
4676	u64 val, ring_len;
4677
4678	err = niu_tx_channel_stop(np, channel);
4679	if (err)
4680		return err;
4681
4682	err = niu_tx_channel_reset(np, channel);
4683	if (err)
4684		return err;
4685
4686	err = niu_tx_channel_lpage_init(np, channel);
4687	if (err)
4688		return err;
4689
4690	nw64(TXC_DMA_MAX(channel), rp->max_burst);
4691	nw64(TX_ENT_MSK(channel), 0);
4692
4693	if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
4694			      TX_RNG_CFIG_STADDR)) {
4695		netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
4696			   channel, (unsigned long long)rp->descr_dma);
4697		return -EINVAL;
4698	}
4699
4700	/* The length field in TX_RNG_CFIG is measured in 64-byte
4701	 * blocks.  rp->pending is the number of TX descriptors in
4702	 * our ring, 8 bytes each, thus we divide by 8 bytes more
4703	 * to get the proper value the chip wants.
4704	 */
4705	ring_len = (rp->pending / 8);
4706
4707	val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
4708	       rp->descr_dma);
4709	nw64(TX_RNG_CFIG(channel), val);
4710
4711	if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
4712	    ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
4713		netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
4714			    channel, (unsigned long long)rp->mbox_dma);
4715		return -EINVAL;
4716	}
4717	nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
4718	nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
4719
4720	nw64(TX_CS(channel), 0);
4721
4722	rp->last_pkt_cnt = 0;
4723
4724	return 0;
4725}
4726
4727static void niu_init_rdc_groups(struct niu *np)
4728{
4729	struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
4730	int i, first_table_num = tp->first_table_num;
4731
4732	for (i = 0; i < tp->num_tables; i++) {
4733		struct rdc_table *tbl = &tp->tables[i];
4734		int this_table = first_table_num + i;
4735		int slot;
4736
4737		for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
4738			nw64(RDC_TBL(this_table, slot),
4739			     tbl->rxdma_channel[slot]);
4740	}
4741
4742	nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
4743}
4744
4745static void niu_init_drr_weight(struct niu *np)
4746{
4747	int type = phy_decode(np->parent->port_phy, np->port);
4748	u64 val;
4749
4750	switch (type) {
4751	case PORT_TYPE_10G:
4752		val = PT_DRR_WEIGHT_DEFAULT_10G;
4753		break;
4754
4755	case PORT_TYPE_1G:
4756	default:
4757		val = PT_DRR_WEIGHT_DEFAULT_1G;
4758		break;
4759	}
4760	nw64(PT_DRR_WT(np->port), val);
4761}
4762
4763static int niu_init_hostinfo(struct niu *np)
4764{
4765	struct niu_parent *parent = np->parent;
4766	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
4767	int i, err, num_alt = niu_num_alt_addr(np);
4768	int first_rdc_table = tp->first_table_num;
4769
4770	err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
4771	if (err)
4772		return err;
4773
4774	err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
4775	if (err)
4776		return err;
4777
4778	for (i = 0; i < num_alt; i++) {
4779		err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
4780		if (err)
4781			return err;
4782	}
4783
4784	return 0;
4785}
4786
4787static int niu_rx_channel_reset(struct niu *np, int channel)
4788{
4789	return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
4790				      RXDMA_CFIG1_RST, 1000, 10,
4791				      "RXDMA_CFIG1");
4792}
4793
4794static int niu_rx_channel_lpage_init(struct niu *np, int channel)
4795{
4796	u64 val;
4797
4798	nw64(RX_LOG_MASK1(channel), 0);
4799	nw64(RX_LOG_VAL1(channel), 0);
4800	nw64(RX_LOG_MASK2(channel), 0);
4801	nw64(RX_LOG_VAL2(channel), 0);
4802	nw64(RX_LOG_PAGE_RELO1(channel), 0);
4803	nw64(RX_LOG_PAGE_RELO2(channel), 0);
4804	nw64(RX_LOG_PAGE_HDL(channel), 0);
4805
4806	val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
4807	val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
4808	nw64(RX_LOG_PAGE_VLD(channel), val);
4809
4810	return 0;
4811}
4812
4813static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
4814{
4815	u64 val;
4816
4817	val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
4818	       ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
4819	       ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
4820	       ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
4821	nw64(RDC_RED_PARA(rp->rx_channel), val);
4822}
4823
4824static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
4825{
4826	u64 val = 0;
4827
4828	*ret = 0;
4829	switch (rp->rbr_block_size) {
4830	case 4 * 1024:
4831		val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
4832		break;
4833	case 8 * 1024:
4834		val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
4835		break;
4836	case 16 * 1024:
4837		val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
4838		break;
4839	case 32 * 1024:
4840		val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
4841		break;
4842	default:
4843		return -EINVAL;
4844	}
4845	val |= RBR_CFIG_B_VLD2;
4846	switch (rp->rbr_sizes[2]) {
4847	case 2 * 1024:
4848		val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
4849		break;
4850	case 4 * 1024:
4851		val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
4852		break;
4853	case 8 * 1024:
4854		val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
4855		break;
4856	case 16 * 1024:
4857		val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
4858		break;
4859
4860	default:
4861		return -EINVAL;
4862	}
4863	val |= RBR_CFIG_B_VLD1;
4864	switch (rp->rbr_sizes[1]) {
4865	case 1 * 1024:
4866		val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
4867		break;
4868	case 2 * 1024:
4869		val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
4870		break;
4871	case 4 * 1024:
4872		val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
4873		break;
4874	case 8 * 1024:
4875		val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
4876		break;
4877
4878	default:
4879		return -EINVAL;
4880	}
4881	val |= RBR_CFIG_B_VLD0;
4882	switch (rp->rbr_sizes[0]) {
4883	case 256:
4884		val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
4885		break;
4886	case 512:
4887		val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
4888		break;
4889	case 1 * 1024:
4890		val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
4891		break;
4892	case 2 * 1024:
4893		val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
4894		break;
4895
4896	default:
4897		return -EINVAL;
4898	}
4899
4900	*ret = val;
4901	return 0;
4902}
4903
4904static int niu_enable_rx_channel(struct niu *np, int channel, int on)
4905{
4906	u64 val = nr64(RXDMA_CFIG1(channel));
4907	int limit;
4908
4909	if (on)
4910		val |= RXDMA_CFIG1_EN;
4911	else
4912		val &= ~RXDMA_CFIG1_EN;
4913	nw64(RXDMA_CFIG1(channel), val);
4914
4915	limit = 1000;
4916	while (--limit > 0) {
4917		if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
4918			break;
4919		udelay(10);
4920	}
4921	if (limit <= 0)
4922		return -ENODEV;
4923	return 0;
4924}
4925
4926static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
4927{
4928	int err, channel = rp->rx_channel;
4929	u64 val;
4930
4931	err = niu_rx_channel_reset(np, channel);
4932	if (err)
4933		return err;
4934
4935	err = niu_rx_channel_lpage_init(np, channel);
4936	if (err)
4937		return err;
4938
4939	niu_rx_channel_wred_init(np, rp);
4940
4941	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
4942	nw64(RX_DMA_CTL_STAT(channel),
4943	     (RX_DMA_CTL_STAT_MEX |
4944	      RX_DMA_CTL_STAT_RCRTHRES |
4945	      RX_DMA_CTL_STAT_RCRTO |
4946	      RX_DMA_CTL_STAT_RBR_EMPTY));
4947	nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
4948	nw64(RXDMA_CFIG2(channel),
4949	     ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
4950	      RXDMA_CFIG2_FULL_HDR));
4951	nw64(RBR_CFIG_A(channel),
4952	     ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
4953	     (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
4954	err = niu_compute_rbr_cfig_b(rp, &val);
4955	if (err)
4956		return err;
4957	nw64(RBR_CFIG_B(channel), val);
4958	nw64(RCRCFIG_A(channel),
4959	     ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
4960	     (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
4961	nw64(RCRCFIG_B(channel),
4962	     ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
4963	     RCRCFIG_B_ENTOUT |
4964	     ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
4965
4966	err = niu_enable_rx_channel(np, channel, 1);
4967	if (err)
4968		return err;
4969
4970	nw64(RBR_KICK(channel), rp->rbr_index);
4971
4972	val = nr64(RX_DMA_CTL_STAT(channel));
4973	val |= RX_DMA_CTL_STAT_RBR_EMPTY;
4974	nw64(RX_DMA_CTL_STAT(channel), val);
4975
4976	return 0;
4977}
4978
4979static int niu_init_rx_channels(struct niu *np)
4980{
4981	unsigned long flags;
4982	u64 seed = jiffies_64;
4983	int err, i;
4984
4985	niu_lock_parent(np, flags);
4986	nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
4987	nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
4988	niu_unlock_parent(np, flags);
4989
4990	/* XXX RXDMA 32bit mode? XXX */
4991
4992	niu_init_rdc_groups(np);
4993	niu_init_drr_weight(np);
4994
4995	err = niu_init_hostinfo(np);
4996	if (err)
4997		return err;
4998
4999	for (i = 0; i < np->num_rx_rings; i++) {
5000		struct rx_ring_info *rp = &np->rx_rings[i];
5001
5002		err = niu_init_one_rx_channel(np, rp);
5003		if (err)
5004			return err;
5005	}
5006
5007	return 0;
5008}
5009
5010static int niu_set_ip_frag_rule(struct niu *np)
5011{
5012	struct niu_parent *parent = np->parent;
5013	struct niu_classifier *cp = &np->clas;
5014	struct niu_tcam_entry *tp;
5015	int index, err;
5016
5017	index = cp->tcam_top;
5018	tp = &parent->tcam[index];
5019
5020	/* Note that the noport bit is the same in both ipv4 and
5021	 * ipv6 format TCAM entries.
5022	 */
5023	memset(tp, 0, sizeof(*tp));
5024	tp->key[1] = TCAM_V4KEY1_NOPORT;
5025	tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
5026	tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
5027			  ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
5028	err = tcam_write(np, index, tp->key, tp->key_mask);
5029	if (err)
5030		return err;
5031	err = tcam_assoc_write(np, index, tp->assoc_data);
5032	if (err)
5033		return err;
5034	tp->valid = 1;
5035	cp->tcam_valid_entries++;
5036
5037	return 0;
5038}
5039
5040static int niu_init_classifier_hw(struct niu *np)
5041{
5042	struct niu_parent *parent = np->parent;
5043	struct niu_classifier *cp = &np->clas;
5044	int i, err;
5045
5046	nw64(H1POLY, cp->h1_init);
5047	nw64(H2POLY, cp->h2_init);
5048
5049	err = niu_init_hostinfo(np);
5050	if (err)
5051		return err;
5052
5053	for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
5054		struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
5055
5056		vlan_tbl_write(np, i, np->port,
5057			       vp->vlan_pref, vp->rdc_num);
5058	}
5059
5060	for (i = 0; i < cp->num_alt_mac_mappings; i++) {
5061		struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
5062
5063		err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
5064						ap->rdc_num, ap->mac_pref);
5065		if (err)
5066			return err;
5067	}
5068
5069	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
5070		int index = i - CLASS_CODE_USER_PROG1;
5071
5072		err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
5073		if (err)
5074			return err;
5075		err = niu_set_flow_key(np, i, parent->flow_key[index]);
5076		if (err)
5077			return err;
5078	}
5079
5080	err = niu_set_ip_frag_rule(np);
5081	if (err)
5082		return err;
5083
5084	tcam_enable(np, 1);
5085
5086	return 0;
5087}
5088
5089static int niu_zcp_write(struct niu *np, int index, u64 *data)
5090{
5091	nw64(ZCP_RAM_DATA0, data[0]);
5092	nw64(ZCP_RAM_DATA1, data[1]);
5093	nw64(ZCP_RAM_DATA2, data[2]);
5094	nw64(ZCP_RAM_DATA3, data[3]);
5095	nw64(ZCP_RAM_DATA4, data[4]);
5096	nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
5097	nw64(ZCP_RAM_ACC,
5098	     (ZCP_RAM_ACC_WRITE |
5099	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
5100	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
5101
5102	return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5103				   1000, 100);
5104}
5105
5106static int niu_zcp_read(struct niu *np, int index, u64 *data)
5107{
5108	int err;
5109
5110	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5111				  1000, 100);
5112	if (err) {
5113		netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
5114			   (unsigned long long)nr64(ZCP_RAM_ACC));
5115		return err;
5116	}
5117
5118	nw64(ZCP_RAM_ACC,
5119	     (ZCP_RAM_ACC_READ |
5120	      (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
5121	      (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
5122
5123	err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
5124				  1000, 100);
5125	if (err) {
5126		netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
5127			   (unsigned long long)nr64(ZCP_RAM_ACC));
5128		return err;
5129	}
5130
5131	data[0] = nr64(ZCP_RAM_DATA0);
5132	data[1] = nr64(ZCP_RAM_DATA1);
5133	data[2] = nr64(ZCP_RAM_DATA2);
5134	data[3] = nr64(ZCP_RAM_DATA3);
5135	data[4] = nr64(ZCP_RAM_DATA4);
5136
5137	return 0;
5138}
5139
5140static void niu_zcp_cfifo_reset(struct niu *np)
5141{
5142	u64 val = nr64(RESET_CFIFO);
5143
5144	val |= RESET_CFIFO_RST(np->port);
5145	nw64(RESET_CFIFO, val);
5146	udelay(10);
5147
5148	val &= ~RESET_CFIFO_RST(np->port);
5149	nw64(RESET_CFIFO, val);
5150}
5151
5152static int niu_init_zcp(struct niu *np)
5153{
5154	u64 data[5], rbuf[5];
5155	int i, max, err;
5156
5157	if (np->parent->plat_type != PLAT_TYPE_NIU) {
5158		if (np->port == 0 || np->port == 1)
5159			max = ATLAS_P0_P1_CFIFO_ENTRIES;
5160		else
5161			max = ATLAS_P2_P3_CFIFO_ENTRIES;
5162	} else
5163		max = NIU_CFIFO_ENTRIES;
5164
5165	data[0] = 0;
5166	data[1] = 0;
5167	data[2] = 0;
5168	data[3] = 0;
5169	data[4] = 0;
5170
5171	for (i = 0; i < max; i++) {
5172		err = niu_zcp_write(np, i, data);
5173		if (err)
5174			return err;
5175		err = niu_zcp_read(np, i, rbuf);
5176		if (err)
5177			return err;
5178	}
5179
5180	niu_zcp_cfifo_reset(np);
5181	nw64(CFIFO_ECC(np->port), 0);
5182	nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
5183	(void) nr64(ZCP_INT_STAT);
5184	nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
5185
5186	return 0;
5187}
5188
5189static void niu_ipp_write(struct niu *np, int index, u64 *data)
5190{
5191	u64 val = nr64_ipp(IPP_CFIG);
5192
5193	nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
5194	nw64_ipp(IPP_DFIFO_WR_PTR, index);
5195	nw64_ipp(IPP_DFIFO_WR0, data[0]);
5196	nw64_ipp(IPP_DFIFO_WR1, data[1]);
5197	nw64_ipp(IPP_DFIFO_WR2, data[2]);
5198	nw64_ipp(IPP_DFIFO_WR3, data[3]);
5199	nw64_ipp(IPP_DFIFO_WR4, data[4]);
5200	nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
5201}
5202
5203static void niu_ipp_read(struct niu *np, int index, u64 *data)
5204{
5205	nw64_ipp(IPP_DFIFO_RD_PTR, index);
5206	data[0] = nr64_ipp(IPP_DFIFO_RD0);
5207	data[1] = nr64_ipp(IPP_DFIFO_RD1);
5208	data[2] = nr64_ipp(IPP_DFIFO_RD2);
5209	data[3] = nr64_ipp(IPP_DFIFO_RD3);
5210	data[4] = nr64_ipp(IPP_DFIFO_RD4);
5211}
5212
5213static int niu_ipp_reset(struct niu *np)
5214{
5215	return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
5216					  1000, 100, "IPP_CFIG");
5217}
5218
5219static int niu_init_ipp(struct niu *np)
5220{
5221	u64 data[5], rbuf[5], val;
5222	int i, max, err;
5223
5224	if (np->parent->plat_type != PLAT_TYPE_NIU) {
5225		if (np->port == 0 || np->port == 1)
5226			max = ATLAS_P0_P1_DFIFO_ENTRIES;
5227		else
5228			max = ATLAS_P2_P3_DFIFO_ENTRIES;
5229	} else
5230		max = NIU_DFIFO_ENTRIES;
5231
5232	data[0] = 0;
5233	data[1] = 0;
5234	data[2] = 0;
5235	data[3] = 0;
5236	data[4] = 0;
5237
5238	for (i = 0; i < max; i++) {
5239		niu_ipp_write(np, i, data);
5240		niu_ipp_read(np, i, rbuf);
5241	}
5242
5243	(void) nr64_ipp(IPP_INT_STAT);
5244	(void) nr64_ipp(IPP_INT_STAT);
5245
5246	err = niu_ipp_reset(np);
5247	if (err)
5248		return err;
5249
5250	(void) nr64_ipp(IPP_PKT_DIS);
5251	(void) nr64_ipp(IPP_BAD_CS_CNT);
5252	(void) nr64_ipp(IPP_ECC);
5253
5254	(void) nr64_ipp(IPP_INT_STAT);
5255
5256	nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
5257
5258	val = nr64_ipp(IPP_CFIG);
5259	val &= ~IPP_CFIG_IP_MAX_PKT;
5260	val |= (IPP_CFIG_IPP_ENABLE |
5261		IPP_CFIG_DFIFO_ECC_EN |
5262		IPP_CFIG_DROP_BAD_CRC |
5263		IPP_CFIG_CKSUM_EN |
5264		(0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
5265	nw64_ipp(IPP_CFIG, val);
5266
5267	return 0;
5268}
5269
5270static void niu_handle_led(struct niu *np, int status)
5271{
5272	u64 val;
5273	val = nr64_mac(XMAC_CONFIG);
5274
5275	if ((np->flags & NIU_FLAGS_10G) != 0 &&
5276	    (np->flags & NIU_FLAGS_FIBER) != 0) {
5277		if (status) {
5278			val |= XMAC_CONFIG_LED_POLARITY;
5279			val &= ~XMAC_CONFIG_FORCE_LED_ON;
5280		} else {
5281			val |= XMAC_CONFIG_FORCE_LED_ON;
5282			val &= ~XMAC_CONFIG_LED_POLARITY;
5283		}
5284	}
5285
5286	nw64_mac(XMAC_CONFIG, val);
5287}
5288
5289static void niu_init_xif_xmac(struct niu *np)
5290{
5291	struct niu_link_config *lp = &np->link_config;
5292	u64 val;
5293
5294	if (np->flags & NIU_FLAGS_XCVR_SERDES) {
5295		val = nr64(MIF_CONFIG);
5296		val |= MIF_CONFIG_ATCA_GE;
5297		nw64(MIF_CONFIG, val);
5298	}
5299
5300	val = nr64_mac(XMAC_CONFIG);
5301	val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5302
5303	val |= XMAC_CONFIG_TX_OUTPUT_EN;
5304
5305	if (lp->loopback_mode == LOOPBACK_MAC) {
5306		val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
5307		val |= XMAC_CONFIG_LOOPBACK;
5308	} else {
5309		val &= ~XMAC_CONFIG_LOOPBACK;
5310	}
5311
5312	if (np->flags & NIU_FLAGS_10G) {
5313		val &= ~XMAC_CONFIG_LFS_DISABLE;
5314	} else {
5315		val |= XMAC_CONFIG_LFS_DISABLE;
5316		if (!(np->flags & NIU_FLAGS_FIBER) &&
5317		    !(np->flags & NIU_FLAGS_XCVR_SERDES))
5318			val |= XMAC_CONFIG_1G_PCS_BYPASS;
5319		else
5320			val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
5321	}
5322
5323	val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5324
5325	if (lp->active_speed == SPEED_100)
5326		val |= XMAC_CONFIG_SEL_CLK_25MHZ;
5327	else
5328		val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
5329
5330	nw64_mac(XMAC_CONFIG, val);
5331
5332	val = nr64_mac(XMAC_CONFIG);
5333	val &= ~XMAC_CONFIG_MODE_MASK;
5334	if (np->flags & NIU_FLAGS_10G) {
5335		val |= XMAC_CONFIG_MODE_XGMII;
5336	} else {
5337		if (lp->active_speed == SPEED_1000)
5338			val |= XMAC_CONFIG_MODE_GMII;
5339		else
5340			val |= XMAC_CONFIG_MODE_MII;
5341	}
5342
5343	nw64_mac(XMAC_CONFIG, val);
5344}
5345
5346static void niu_init_xif_bmac(struct niu *np)
5347{
5348	struct niu_link_config *lp = &np->link_config;
5349	u64 val;
5350
5351	val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
5352
5353	if (lp->loopback_mode == LOOPBACK_MAC)
5354		val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
5355	else
5356		val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
5357
5358	if (lp->active_speed == SPEED_1000)
5359		val |= BMAC_XIF_CONFIG_GMII_MODE;
5360	else
5361		val &= ~BMAC_XIF_CONFIG_GMII_MODE;
5362
5363	val &= ~(BMAC_XIF_CONFIG_LINK_LED |
5364		 BMAC_XIF_CONFIG_LED_POLARITY);
5365
5366	if (!(np->flags & NIU_FLAGS_10G) &&
5367	    !(np->flags & NIU_FLAGS_FIBER) &&
5368	    lp->active_speed == SPEED_100)
5369		val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
5370	else
5371		val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
5372
5373	nw64_mac(BMAC_XIF_CONFIG, val);
5374}
5375
5376static void niu_init_xif(struct niu *np)
5377{
5378	if (np->flags & NIU_FLAGS_XMAC)
5379		niu_init_xif_xmac(np);
5380	else
5381		niu_init_xif_bmac(np);
5382}
5383
5384static void niu_pcs_mii_reset(struct niu *np)
5385{
5386	int limit = 1000;
5387	u64 val = nr64_pcs(PCS_MII_CTL);
5388	val |= PCS_MII_CTL_RST;
5389	nw64_pcs(PCS_MII_CTL, val);
5390	while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
5391		udelay(100);
5392		val = nr64_pcs(PCS_MII_CTL);
5393	}
5394}
5395
5396static void niu_xpcs_reset(struct niu *np)
5397{
5398	int limit = 1000;
5399	u64 val = nr64_xpcs(XPCS_CONTROL1);
5400	val |= XPCS_CONTROL1_RESET;
5401	nw64_xpcs(XPCS_CONTROL1, val);
5402	while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
5403		udelay(100);
5404		val = nr64_xpcs(XPCS_CONTROL1);
5405	}
5406}
5407
5408static int niu_init_pcs(struct niu *np)
5409{
5410	struct niu_link_config *lp = &np->link_config;
5411	u64 val;
5412
5413	switch (np->flags & (NIU_FLAGS_10G |
5414			     NIU_FLAGS_FIBER |
5415			     NIU_FLAGS_XCVR_SERDES)) {
5416	case NIU_FLAGS_FIBER:
5417		/* 1G fiber */
5418		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5419		nw64_pcs(PCS_DPATH_MODE, 0);
5420		niu_pcs_mii_reset(np);
5421		break;
5422
5423	case NIU_FLAGS_10G:
5424	case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
5425	case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
5426		/* 10G SERDES */
5427		if (!(np->flags & NIU_FLAGS_XMAC))
5428			return -EINVAL;
5429
5430		/* 10G copper or fiber */
5431		val = nr64_mac(XMAC_CONFIG);
5432		val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
5433		nw64_mac(XMAC_CONFIG, val);
5434
5435		niu_xpcs_reset(np);
5436
5437		val = nr64_xpcs(XPCS_CONTROL1);
5438		if (lp->loopback_mode == LOOPBACK_PHY)
5439			val |= XPCS_CONTROL1_LOOPBACK;
5440		else
5441			val &= ~XPCS_CONTROL1_LOOPBACK;
5442		nw64_xpcs(XPCS_CONTROL1, val);
5443
5444		nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
5445		(void) nr64_xpcs(XPCS_SYMERR_CNT01);
5446		(void) nr64_xpcs(XPCS_SYMERR_CNT23);
5447		break;
5448
5449
5450	case NIU_FLAGS_XCVR_SERDES:
5451		/* 1G SERDES */
5452		niu_pcs_mii_reset(np);
5453		nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
5454		nw64_pcs(PCS_DPATH_MODE, 0);
5455		break;
5456
5457	case 0:
5458		/* 1G copper */
5459	case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
5460		/* 1G RGMII FIBER */
5461		nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
5462		niu_pcs_mii_reset(np);
5463		break;
5464
5465	default:
5466		return -EINVAL;
5467	}
5468
5469	return 0;
5470}
5471
5472static int niu_reset_tx_xmac(struct niu *np)
5473{
5474	return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
5475					  (XTXMAC_SW_RST_REG_RS |
5476					   XTXMAC_SW_RST_SOFT_RST),
5477					  1000, 100, "XTXMAC_SW_RST");
5478}
5479
5480static int niu_reset_tx_bmac(struct niu *np)
5481{
5482	int limit;
5483
5484	nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
5485	limit = 1000;
5486	while (--limit >= 0) {
5487		if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
5488			break;
5489		udelay(100);
5490	}
5491	if (limit < 0) {
5492		dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
5493			np->port,
5494			(unsigned long long) nr64_mac(BTXMAC_SW_RST));
5495		return -ENODEV;
5496	}
5497
5498	return 0;
5499}
5500
5501static int niu_reset_tx_mac(struct niu *np)
5502{
5503	if (np->flags & NIU_FLAGS_XMAC)
5504		return niu_reset_tx_xmac(np);
5505	else
5506		return niu_reset_tx_bmac(np);
5507}
5508
5509static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
5510{
5511	u64 val;
5512
5513	val = nr64_mac(XMAC_MIN);
5514	val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
5515		 XMAC_MIN_RX_MIN_PKT_SIZE);
5516	val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
5517	val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
5518	nw64_mac(XMAC_MIN, val);
5519
5520	nw64_mac(XMAC_MAX, max);
5521
5522	nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
5523
5524	val = nr64_mac(XMAC_IPG);
5525	if (np->flags & NIU_FLAGS_10G) {
5526		val &= ~XMAC_IPG_IPG_XGMII;
5527		val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
5528	} else {
5529		val &= ~XMAC_IPG_IPG_MII_GMII;
5530		val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
5531	}
5532	nw64_mac(XMAC_IPG, val);
5533
5534	val = nr64_mac(XMAC_CONFIG);
5535	val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
5536		 XMAC_CONFIG_STRETCH_MODE |
5537		 XMAC_CONFIG_VAR_MIN_IPG_EN |
5538		 XMAC_CONFIG_TX_ENABLE);
5539	nw64_mac(XMAC_CONFIG, val);
5540
5541	nw64_mac(TXMAC_FRM_CNT, 0);
5542	nw64_mac(TXMAC_BYTE_CNT, 0);
5543}
5544
5545static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
5546{
5547	u64 val;
5548
5549	nw64_mac(BMAC_MIN_FRAME, min);
5550	nw64_mac(BMAC_MAX_FRAME, max);
5551
5552	nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
5553	nw64_mac(BMAC_CTRL_TYPE, 0x8808);
5554	nw64_mac(BMAC_PREAMBLE_SIZE, 7);
5555
5556	val = nr64_mac(BTXMAC_CONFIG);
5557	val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
5558		 BTXMAC_CONFIG_ENABLE);
5559	nw64_mac(BTXMAC_CONFIG, val);
5560}
5561
5562static void niu_init_tx_mac(struct niu *np)
5563{
5564	u64 min, max;
5565
5566	min = 64;
5567	if (np->dev->mtu > ETH_DATA_LEN)
5568		max = 9216;
5569	else
5570		max = 1522;
5571
5572	/* The XMAC_MIN register only accepts values for TX min which
5573	 * have the low 3 bits cleared.
5574	 */
5575	BUG_ON(min & 0x7);
5576
5577	if (np->flags & NIU_FLAGS_XMAC)
5578		niu_init_tx_xmac(np, min, max);
5579	else
5580		niu_init_tx_bmac(np, min, max);
5581}
5582
5583static int niu_reset_rx_xmac(struct niu *np)
5584{
5585	int limit;
5586
5587	nw64_mac(XRXMAC_SW_RST,
5588		 XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
5589	limit = 1000;
5590	while (--limit >= 0) {
5591		if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
5592						 XRXMAC_SW_RST_SOFT_RST)))
5593			break;
5594		udelay(100);
5595	}
5596	if (limit < 0) {
5597		dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
5598			np->port,
5599			(unsigned long long) nr64_mac(XRXMAC_SW_RST));
5600		return -ENODEV;
5601	}
5602
5603	return 0;
5604}
5605
5606static int niu_reset_rx_bmac(struct niu *np)
5607{
5608	int limit;
5609
5610	nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
5611	limit = 1000;
5612	while (--limit >= 0) {
5613		if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
5614			break;
5615		udelay(100);
5616	}
5617	if (limit < 0) {
5618		dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
5619			np->port,
5620			(unsigned long long) nr64_mac(BRXMAC_SW_RST));
5621		return -ENODEV;
5622	}
5623
5624	return 0;
5625}
5626
5627static int niu_reset_rx_mac(struct niu *np)
5628{
5629	if (np->flags & NIU_FLAGS_XMAC)
5630		return niu_reset_rx_xmac(np);
5631	else
5632		return niu_reset_rx_bmac(np);
5633}
5634
5635static void niu_init_rx_xmac(struct niu *np)
5636{
5637	struct niu_parent *parent = np->parent;
5638	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5639	int first_rdc_table = tp->first_table_num;
5640	unsigned long i;
5641	u64 val;
5642
5643	nw64_mac(XMAC_ADD_FILT0, 0);
5644	nw64_mac(XMAC_ADD_FILT1, 0);
5645	nw64_mac(XMAC_ADD_FILT2, 0);
5646	nw64_mac(XMAC_ADD_FILT12_MASK, 0);
5647	nw64_mac(XMAC_ADD_FILT00_MASK, 0);
5648	for (i = 0; i < MAC_NUM_HASH; i++)
5649		nw64_mac(XMAC_HASH_TBL(i), 0);
5650	nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
5651	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5652	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5653
5654	val = nr64_mac(XMAC_CONFIG);
5655	val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
5656		 XMAC_CONFIG_PROMISCUOUS |
5657		 XMAC_CONFIG_PROMISC_GROUP |
5658		 XMAC_CONFIG_ERR_CHK_DIS |
5659		 XMAC_CONFIG_RX_CRC_CHK_DIS |
5660		 XMAC_CONFIG_RESERVED_MULTICAST |
5661		 XMAC_CONFIG_RX_CODEV_CHK_DIS |
5662		 XMAC_CONFIG_ADDR_FILTER_EN |
5663		 XMAC_CONFIG_RCV_PAUSE_ENABLE |
5664		 XMAC_CONFIG_STRIP_CRC |
5665		 XMAC_CONFIG_PASS_FLOW_CTRL |
5666		 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
5667	val |= (XMAC_CONFIG_HASH_FILTER_EN);
5668	nw64_mac(XMAC_CONFIG, val);
5669
5670	nw64_mac(RXMAC_BT_CNT, 0);
5671	nw64_mac(RXMAC_BC_FRM_CNT, 0);
5672	nw64_mac(RXMAC_MC_FRM_CNT, 0);
5673	nw64_mac(RXMAC_FRAG_CNT, 0);
5674	nw64_mac(RXMAC_HIST_CNT1, 0);
5675	nw64_mac(RXMAC_HIST_CNT2, 0);
5676	nw64_mac(RXMAC_HIST_CNT3, 0);
5677	nw64_mac(RXMAC_HIST_CNT4, 0);
5678	nw64_mac(RXMAC_HIST_CNT5, 0);
5679	nw64_mac(RXMAC_HIST_CNT6, 0);
5680	nw64_mac(RXMAC_HIST_CNT7, 0);
5681	nw64_mac(RXMAC_MPSZER_CNT, 0);
5682	nw64_mac(RXMAC_CRC_ER_CNT, 0);
5683	nw64_mac(RXMAC_CD_VIO_CNT, 0);
5684	nw64_mac(LINK_FAULT_CNT, 0);
5685}
5686
5687static void niu_init_rx_bmac(struct niu *np)
5688{
5689	struct niu_parent *parent = np->parent;
5690	struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
5691	int first_rdc_table = tp->first_table_num;
5692	unsigned long i;
5693	u64 val;
5694
5695	nw64_mac(BMAC_ADD_FILT0, 0);
5696	nw64_mac(BMAC_ADD_FILT1, 0);
5697	nw64_mac(BMAC_ADD_FILT2, 0);
5698	nw64_mac(BMAC_ADD_FILT12_MASK, 0);
5699	nw64_mac(BMAC_ADD_FILT00_MASK, 0);
5700	for (i = 0; i < MAC_NUM_HASH; i++)
5701		nw64_mac(BMAC_HASH_TBL(i), 0);
5702	niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
5703	niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
5704	nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
5705
5706	val = nr64_mac(BRXMAC_CONFIG);
5707	val &= ~(BRXMAC_CONFIG_ENABLE |
5708		 BRXMAC_CONFIG_STRIP_PAD |
5709		 BRXMAC_CONFIG_STRIP_FCS |
5710		 BRXMAC_CONFIG_PROMISC |
5711		 BRXMAC_CONFIG_PROMISC_GRP |
5712		 BRXMAC_CONFIG_ADDR_FILT_EN |
5713		 BRXMAC_CONFIG_DISCARD_DIS);
5714	val |= (BRXMAC_CONFIG_HASH_FILT_EN);
5715	nw64_mac(BRXMAC_CONFIG, val);
5716
5717	val = nr64_mac(BMAC_ADDR_CMPEN);
5718	val |= BMAC_ADDR_CMPEN_EN0;
5719	nw64_mac(BMAC_ADDR_CMPEN, val);
5720}
5721
5722static void niu_init_rx_mac(struct niu *np)
5723{
5724	niu_set_primary_mac(np, np->dev->dev_addr);
5725
5726	if (np->flags & NIU_FLAGS_XMAC)
5727		niu_init_rx_xmac(np);
5728	else
5729		niu_init_rx_bmac(np);
5730}
5731
5732static void niu_enable_tx_xmac(struct niu *np, int on)
5733{
5734	u64 val = nr64_mac(XMAC_CONFIG);
5735
5736	if (on)
5737		val |= XMAC_CONFIG_TX_ENABLE;
5738	else
5739		val &= ~XMAC_CONFIG_TX_ENABLE;
5740	nw64_mac(XMAC_CONFIG, val);
5741}
5742
5743static void niu_enable_tx_bmac(struct niu *np, int on)
5744{
5745	u64 val = nr64_mac(BTXMAC_CONFIG);
5746
5747	if (on)
5748		val |= BTXMAC_CONFIG_ENABLE;
5749	else
5750		val &= ~BTXMAC_CONFIG_ENABLE;
5751	nw64_mac(BTXMAC_CONFIG, val);
5752}
5753
5754static void niu_enable_tx_mac(struct niu *np, int on)
5755{
5756	if (np->flags & NIU_FLAGS_XMAC)
5757		niu_enable_tx_xmac(np, on);
5758	else
5759		niu_enable_tx_bmac(np, on);
5760}
5761
5762static void niu_enable_rx_xmac(struct niu *np, int on)
5763{
5764	u64 val = nr64_mac(XMAC_CONFIG);
5765
5766	val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
5767		 XMAC_CONFIG_PROMISCUOUS);
5768
5769	if (np->flags & NIU_FLAGS_MCAST)
5770		val |= XMAC_CONFIG_HASH_FILTER_EN;
5771	if (np->flags & NIU_FLAGS_PROMISC)
5772		val |= XMAC_CONFIG_PROMISCUOUS;
5773
5774	if (on)
5775		val |= XMAC_CONFIG_RX_MAC_ENABLE;
5776	else
5777		val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
5778	nw64_mac(XMAC_CONFIG, val);
5779}
5780
5781static void niu_enable_rx_bmac(struct niu *np, int on)
5782{
5783	u64 val = nr64_mac(BRXMAC_CONFIG);
5784
5785	val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
5786		 BRXMAC_CONFIG_PROMISC);
5787
5788	if (np->flags & NIU_FLAGS_MCAST)
5789		val |= BRXMAC_CONFIG_HASH_FILT_EN;
5790	if (np->flags & NIU_FLAGS_PROMISC)
5791		val |= BRXMAC_CONFIG_PROMISC;
5792
5793	if (on)
5794		val |= BRXMAC_CONFIG_ENABLE;
5795	else
5796		val &= ~BRXMAC_CONFIG_ENABLE;
5797	nw64_mac(BRXMAC_CONFIG, val);
5798}
5799
5800static void niu_enable_rx_mac(struct niu *np, int on)
5801{
5802	if (np->flags & NIU_FLAGS_XMAC)
5803		niu_enable_rx_xmac(np, on);
5804	else
5805		niu_enable_rx_bmac(np, on);
5806}
5807
5808static int niu_init_mac(struct niu *np)
5809{
5810	int err;
5811
5812	niu_init_xif(np);
5813	err = niu_init_pcs(np);
5814	if (err)
5815		return err;
5816
5817	err = niu_reset_tx_mac(np);
5818	if (err)
5819		return err;
5820	niu_init_tx_mac(np);
5821	err = niu_reset_rx_mac(np);
5822	if (err)
5823		return err;
5824	niu_init_rx_mac(np);
5825
5826	/* This looks hookey but the RX MAC reset we just did will
5827	 * undo some of the state we setup in niu_init_tx_mac() so we
5828	 * have to call it again.  In particular, the RX MAC reset will
5829	 * set the XMAC_MAX register back to it's default value.
5830	 */
5831	niu_init_tx_mac(np);
5832	niu_enable_tx_mac(np, 1);
5833
5834	niu_enable_rx_mac(np, 1);
5835
5836	return 0;
5837}
5838
5839static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5840{
5841	(void) niu_tx_channel_stop(np, rp->tx_channel);
5842}
5843
5844static void niu_stop_tx_channels(struct niu *np)
5845{
5846	int i;
5847
5848	for (i = 0; i < np->num_tx_rings; i++) {
5849		struct tx_ring_info *rp = &np->tx_rings[i];
5850
5851		niu_stop_one_tx_channel(np, rp);
5852	}
5853}
5854
5855static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
5856{
5857	(void) niu_tx_channel_reset(np, rp->tx_channel);
5858}
5859
5860static void niu_reset_tx_channels(struct niu *np)
5861{
5862	int i;
5863
5864	for (i = 0; i < np->num_tx_rings; i++) {
5865		struct tx_ring_info *rp = &np->tx_rings[i];
5866
5867		niu_reset_one_tx_channel(np, rp);
5868	}
5869}
5870
5871static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5872{
5873	(void) niu_enable_rx_channel(np, rp->rx_channel, 0);
5874}
5875
5876static void niu_stop_rx_channels(struct niu *np)
5877{
5878	int i;
5879
5880	for (i = 0; i < np->num_rx_rings; i++) {
5881		struct rx_ring_info *rp = &np->rx_rings[i];
5882
5883		niu_stop_one_rx_channel(np, rp);
5884	}
5885}
5886
5887static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
5888{
5889	int channel = rp->rx_channel;
5890
5891	(void) niu_rx_channel_reset(np, channel);
5892	nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
5893	nw64(RX_DMA_CTL_STAT(channel), 0);
5894	(void) niu_enable_rx_channel(np, channel, 0);
5895}
5896
5897static void niu_reset_rx_channels(struct niu *np)
5898{
5899	int i;
5900
5901	for (i = 0; i < np->num_rx_rings; i++) {
5902		struct rx_ring_info *rp = &np->rx_rings[i];
5903
5904		niu_reset_one_rx_channel(np, rp);
5905	}
5906}
5907
5908static void niu_disable_ipp(struct niu *np)
5909{
5910	u64 rd, wr, val;
5911	int limit;
5912
5913	rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5914	wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5915	limit = 100;
5916	while (--limit >= 0 && (rd != wr)) {
5917		rd = nr64_ipp(IPP_DFIFO_RD_PTR);
5918		wr = nr64_ipp(IPP_DFIFO_WR_PTR);
5919	}
5920	if (limit < 0 &&
5921	    (rd != 0 && wr != 1)) {
5922		netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
5923			   (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
5924			   (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
5925	}
5926
5927	val = nr64_ipp(IPP_CFIG);
5928	val &= ~(IPP_CFIG_IPP_ENABLE |
5929		 IPP_CFIG_DFIFO_ECC_EN |
5930		 IPP_CFIG_DROP_BAD_CRC |
5931		 IPP_CFIG_CKSUM_EN);
5932	nw64_ipp(IPP_CFIG, val);
5933
5934	(void) niu_ipp_reset(np);
5935}
5936
5937static int niu_init_hw(struct niu *np)
5938{
5939	int i, err;
5940
5941	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
5942	niu_txc_enable_port(np, 1);
5943	niu_txc_port_dma_enable(np, 1);
5944	niu_txc_set_imask(np, 0);
5945
5946	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
5947	for (i = 0; i < np->num_tx_rings; i++) {
5948		struct tx_ring_info *rp = &np->tx_rings[i];
5949
5950		err = niu_init_one_tx_channel(np, rp);
5951		if (err)
5952			return err;
5953	}
5954
5955	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
5956	err = niu_init_rx_channels(np);
5957	if (err)
5958		goto out_uninit_tx_channels;
5959
5960	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
5961	err = niu_init_classifier_hw(np);
5962	if (err)
5963		goto out_uninit_rx_channels;
5964
5965	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
5966	err = niu_init_zcp(np);
5967	if (err)
5968		goto out_uninit_rx_channels;
5969
5970	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
5971	err = niu_init_ipp(np);
5972	if (err)
5973		goto out_uninit_rx_channels;
5974
5975	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
5976	err = niu_init_mac(np);
5977	if (err)
5978		goto out_uninit_ipp;
5979
5980	return 0;
5981
5982out_uninit_ipp:
5983	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
5984	niu_disable_ipp(np);
5985
5986out_uninit_rx_channels:
5987	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
5988	niu_stop_rx_channels(np);
5989	niu_reset_rx_channels(np);
5990
5991out_uninit_tx_channels:
5992	netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
5993	niu_stop_tx_channels(np);
5994	niu_reset_tx_channels(np);
5995
5996	return err;
5997}
5998
5999static void niu_stop_hw(struct niu *np)
6000{
6001	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
6002	niu_enable_interrupts(np, 0);
6003
6004	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
6005	niu_enable_rx_mac(np, 0);
6006
6007	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
6008	niu_disable_ipp(np);
6009
6010	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
6011	niu_stop_tx_channels(np);
6012
6013	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
6014	niu_stop_rx_channels(np);
6015
6016	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
6017	niu_reset_tx_channels(np);
6018
6019	netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
6020	niu_reset_rx_channels(np);
6021}
6022
6023static void niu_set_irq_name(struct niu *np)
6024{
6025	int port = np->port;
6026	int i, j = 1;
6027
6028	sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
6029
6030	if (port == 0) {
6031		sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
6032		sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
6033		j = 3;
6034	}
6035
6036	for (i = 0; i < np->num_ldg - j; i++) {
6037		if (i < np->num_rx_rings)
6038			sprintf(np->irq_name[i+j], "%s-rx-%d",
6039				np->dev->name, i);
6040		else if (i < np->num_tx_rings + np->num_rx_rings)
6041			sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
6042				i - np->num_rx_rings);
6043	}
6044}
6045
6046static int niu_request_irq(struct niu *np)
6047{
6048	int i, j, err;
6049
6050	niu_set_irq_name(np);
6051
6052	err = 0;
6053	for (i = 0; i < np->num_ldg; i++) {
6054		struct niu_ldg *lp = &np->ldg[i];
6055
6056		err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
6057				  np->irq_name[i], lp);
6058		if (err)
6059			goto out_free_irqs;
6060
6061	}
6062
6063	return 0;
6064
6065out_free_irqs:
6066	for (j = 0; j < i; j++) {
6067		struct niu_ldg *lp = &np->ldg[j];
6068
6069		free_irq(lp->irq, lp);
6070	}
6071	return err;
6072}
6073
6074static void niu_free_irq(struct niu *np)
6075{
6076	int i;
6077
6078	for (i = 0; i < np->num_ldg; i++) {
6079		struct niu_ldg *lp = &np->ldg[i];
6080
6081		free_irq(lp->irq, lp);
6082	}
6083}
6084
6085static void niu_enable_napi(struct niu *np)
6086{
6087	int i;
6088
6089	for (i = 0; i < np->num_ldg; i++)
6090		napi_enable(&np->ldg[i].napi);
6091}
6092
6093static void niu_disable_napi(struct niu *np)
6094{
6095	int i;
6096
6097	for (i = 0; i < np->num_ldg; i++)
6098		napi_disable(&np->ldg[i].napi);
6099}
6100
6101static int niu_open(struct net_device *dev)
6102{
6103	struct niu *np = netdev_priv(dev);
6104	int err;
6105
6106	netif_carrier_off(dev);
6107
6108	err = niu_alloc_channels(np);
6109	if (err)
6110		goto out_err;
6111
6112	err = niu_enable_interrupts(np, 0);
6113	if (err)
6114		goto out_free_channels;
6115
6116	err = niu_request_irq(np);
6117	if (err)
6118		goto out_free_channels;
6119
6120	niu_enable_napi(np);
6121
6122	spin_lock_irq(&np->lock);
6123
6124	err = niu_init_hw(np);
6125	if (!err) {
6126		init_timer(&np->timer);
6127		np->timer.expires = jiffies + HZ;
6128		np->timer.data = (unsigned long) np;
6129		np->timer.function = niu_timer;
6130
6131		err = niu_enable_interrupts(np, 1);
6132		if (err)
6133			niu_stop_hw(np);
6134	}
6135
6136	spin_unlock_irq(&np->lock);
6137
6138	if (err) {
6139		niu_disable_napi(np);
6140		goto out_free_irq;
6141	}
6142
6143	netif_tx_start_all_queues(dev);
6144
6145	if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6146		netif_carrier_on(dev);
6147
6148	add_timer(&np->timer);
6149
6150	return 0;
6151
6152out_free_irq:
6153	niu_free_irq(np);
6154
6155out_free_channels:
6156	niu_free_channels(np);
6157
6158out_err:
6159	return err;
6160}
6161
6162static void niu_full_shutdown(struct niu *np, struct net_device *dev)
6163{
6164	cancel_work_sync(&np->reset_task);
6165
6166	niu_disable_napi(np);
6167	netif_tx_stop_all_queues(dev);
6168
6169	del_timer_sync(&np->timer);
6170
6171	spin_lock_irq(&np->lock);
6172
6173	niu_stop_hw(np);
6174
6175	spin_unlock_irq(&np->lock);
6176}
6177
6178static int niu_close(struct net_device *dev)
6179{
6180	struct niu *np = netdev_priv(dev);
6181
6182	niu_full_shutdown(np, dev);
6183
6184	niu_free_irq(np);
6185
6186	niu_free_channels(np);
6187
6188	niu_handle_led(np, 0);
6189
6190	return 0;
6191}
6192
6193static void niu_sync_xmac_stats(struct niu *np)
6194{
6195	struct niu_xmac_stats *mp = &np->mac_stats.xmac;
6196
6197	mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
6198	mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
6199
6200	mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
6201	mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
6202	mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
6203	mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
6204	mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
6205	mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
6206	mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
6207	mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
6208	mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
6209	mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
6210	mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
6211	mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
6212	mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
6213	mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
6214	mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
6215	mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
6216}
6217
6218static void niu_sync_bmac_stats(struct niu *np)
6219{
6220	struct niu_bmac_stats *mp = &np->mac_stats.bmac;
6221
6222	mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
6223	mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
6224
6225	mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
6226	mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6227	mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
6228	mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
6229}
6230
6231static void niu_sync_mac_stats(struct niu *np)
6232{
6233	if (np->flags & NIU_FLAGS_XMAC)
6234		niu_sync_xmac_stats(np);
6235	else
6236		niu_sync_bmac_stats(np);
6237}
6238
6239static void niu_get_rx_stats(struct niu *np,
6240			     struct rtnl_link_stats64 *stats)
6241{
6242	u64 pkts, dropped, errors, bytes;
6243	struct rx_ring_info *rx_rings;
6244	int i;
6245
6246	pkts = dropped = errors = bytes = 0;
6247
6248	rx_rings = ACCESS_ONCE(np->rx_rings);
6249	if (!rx_rings)
6250		goto no_rings;
6251
6252	for (i = 0; i < np->num_rx_rings; i++) {
6253		struct rx_ring_info *rp = &rx_rings[i];
6254
6255		niu_sync_rx_discard_stats(np, rp, 0);
6256
6257		pkts += rp->rx_packets;
6258		bytes += rp->rx_bytes;
6259		dropped += rp->rx_dropped;
6260		errors += rp->rx_errors;
6261	}
6262
6263no_rings:
6264	stats->rx_packets = pkts;
6265	stats->rx_bytes = bytes;
6266	stats->rx_dropped = dropped;
6267	stats->rx_errors = errors;
6268}
6269
6270static void niu_get_tx_stats(struct niu *np,
6271			     struct rtnl_link_stats64 *stats)
6272{
6273	u64 pkts, errors, bytes;
6274	struct tx_ring_info *tx_rings;
6275	int i;
6276
6277	pkts = errors = bytes = 0;
6278
6279	tx_rings = ACCESS_ONCE(np->tx_rings);
6280	if (!tx_rings)
6281		goto no_rings;
6282
6283	for (i = 0; i < np->num_tx_rings; i++) {
6284		struct tx_ring_info *rp = &tx_rings[i];
6285
6286		pkts += rp->tx_packets;
6287		bytes += rp->tx_bytes;
6288		errors += rp->tx_errors;
6289	}
6290
6291no_rings:
6292	stats->tx_packets = pkts;
6293	stats->tx_bytes = bytes;
6294	stats->tx_errors = errors;
6295}
6296
6297static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
6298					       struct rtnl_link_stats64 *stats)
6299{
6300	struct niu *np = netdev_priv(dev);
6301
6302	if (netif_running(dev)) {
6303		niu_get_rx_stats(np, stats);
6304		niu_get_tx_stats(np, stats);
6305	}
6306
6307	return stats;
6308}
6309
6310static void niu_load_hash_xmac(struct niu *np, u16 *hash)
6311{
6312	int i;
6313
6314	for (i = 0; i < 16; i++)
6315		nw64_mac(XMAC_HASH_TBL(i), hash[i]);
6316}
6317
6318static void niu_load_hash_bmac(struct niu *np, u16 *hash)
6319{
6320	int i;
6321
6322	for (i = 0; i < 16; i++)
6323		nw64_mac(BMAC_HASH_TBL(i), hash[i]);
6324}
6325
6326static void niu_load_hash(struct niu *np, u16 *hash)
6327{
6328	if (np->flags & NIU_FLAGS_XMAC)
6329		niu_load_hash_xmac(np, hash);
6330	else
6331		niu_load_hash_bmac(np, hash);
6332}
6333
6334static void niu_set_rx_mode(struct net_device *dev)
6335{
6336	struct niu *np = netdev_priv(dev);
6337	int i, alt_cnt, err;
6338	struct netdev_hw_addr *ha;
6339	unsigned long flags;
6340	u16 hash[16] = { 0, };
6341
6342	spin_lock_irqsave(&np->lock, flags);
6343	niu_enable_rx_mac(np, 0);
6344
6345	np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
6346	if (dev->flags & IFF_PROMISC)
6347		np->flags |= NIU_FLAGS_PROMISC;
6348	if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
6349		np->flags |= NIU_FLAGS_MCAST;
6350
6351	alt_cnt = netdev_uc_count(dev);
6352	if (alt_cnt > niu_num_alt_addr(np)) {
6353		alt_cnt = 0;
6354		np->flags |= NIU_FLAGS_PROMISC;
6355	}
6356
6357	if (alt_cnt) {
6358		int index = 0;
6359
6360		netdev_for_each_uc_addr(ha, dev) {
6361			err = niu_set_alt_mac(np, index, ha->addr);
6362			if (err)
6363				netdev_warn(dev, "Error %d adding alt mac %d\n",
6364					    err, index);
6365			err = niu_enable_alt_mac(np, index, 1);
6366			if (err)
6367				netdev_warn(dev, "Error %d enabling alt mac %d\n",
6368					    err, index);
6369
6370			index++;
6371		}
6372	} else {
6373		int alt_start;
6374		if (np->flags & NIU_FLAGS_XMAC)
6375			alt_start = 0;
6376		else
6377			alt_start = 1;
6378		for (i = alt_start; i < niu_num_alt_addr(np); i++) {
6379			err = niu_enable_alt_mac(np, i, 0);
6380			if (err)
6381				netdev_warn(dev, "Error %d disabling alt mac %d\n",
6382					    err, i);
6383		}
6384	}
6385	if (dev->flags & IFF_ALLMULTI) {
6386		for (i = 0; i < 16; i++)
6387			hash[i] = 0xffff;
6388	} else if (!netdev_mc_empty(dev)) {
6389		netdev_for_each_mc_addr(ha, dev) {
6390			u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
6391
6392			crc >>= 24;
6393			hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
6394		}
6395	}
6396
6397	if (np->flags & NIU_FLAGS_MCAST)
6398		niu_load_hash(np, hash);
6399
6400	niu_enable_rx_mac(np, 1);
6401	spin_unlock_irqrestore(&np->lock, flags);
6402}
6403
6404static int niu_set_mac_addr(struct net_device *dev, void *p)
6405{
6406	struct niu *np = netdev_priv(dev);
6407	struct sockaddr *addr = p;
6408	unsigned long flags;
6409
6410	if (!is_valid_ether_addr(addr->sa_data))
6411		return -EADDRNOTAVAIL;
6412
6413	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
6414
6415	if (!netif_running(dev))
6416		return 0;
6417
6418	spin_lock_irqsave(&np->lock, flags);
6419	niu_enable_rx_mac(np, 0);
6420	niu_set_primary_mac(np, dev->dev_addr);
6421	niu_enable_rx_mac(np, 1);
6422	spin_unlock_irqrestore(&np->lock, flags);
6423
6424	return 0;
6425}
6426
6427static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6428{
6429	return -EOPNOTSUPP;
6430}
6431
6432static void niu_netif_stop(struct niu *np)
6433{
6434	np->dev->trans_start = jiffies;	/* prevent tx timeout */
6435
6436	niu_disable_napi(np);
6437
6438	netif_tx_disable(np->dev);
6439}
6440
6441static void niu_netif_start(struct niu *np)
6442{
6443	/* NOTE: unconditional netif_wake_queue is only appropriate
6444	 * so long as all callers are assured to have free tx slots
6445	 * (such as after niu_init_hw).
6446	 */
6447	netif_tx_wake_all_queues(np->dev);
6448
6449	niu_enable_napi(np);
6450
6451	niu_enable_interrupts(np, 1);
6452}
6453
6454static void niu_reset_buffers(struct niu *np)
6455{
6456	int i, j, k, err;
6457
6458	if (np->rx_rings) {
6459		for (i = 0; i < np->num_rx_rings; i++) {
6460			struct rx_ring_info *rp = &np->rx_rings[i];
6461
6462			for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
6463				struct page *page;
6464
6465				page = rp->rxhash[j];
6466				while (page) {
6467					struct page *next =
6468						(struct page *) page->mapping;
6469					u64 base = page->index;
6470					base = base >> RBR_DESCR_ADDR_SHIFT;
6471					rp->rbr[k++] = cpu_to_le32(base);
6472					page = next;
6473				}
6474			}
6475			for (; k < MAX_RBR_RING_SIZE; k++) {
6476				err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
6477				if (unlikely(err))
6478					break;
6479			}
6480
6481			rp->rbr_index = rp->rbr_table_size - 1;
6482			rp->rcr_index = 0;
6483			rp->rbr_pending = 0;
6484			rp->rbr_refill_pending = 0;
6485		}
6486	}
6487	if (np->tx_rings) {
6488		for (i = 0; i < np->num_tx_rings; i++) {
6489			struct tx_ring_info *rp = &np->tx_rings[i];
6490
6491			for (j = 0; j < MAX_TX_RING_SIZE; j++) {
6492				if (rp->tx_buffs[j].skb)
6493					(void) release_tx_packet(np, rp, j);
6494			}
6495
6496			rp->pending = MAX_TX_RING_SIZE;
6497			rp->prod = 0;
6498			rp->cons = 0;
6499			rp->wrap_bit = 0;
6500		}
6501	}
6502}
6503
6504static void niu_reset_task(struct work_struct *work)
6505{
6506	struct niu *np = container_of(work, struct niu, reset_task);
6507	unsigned long flags;
6508	int err;
6509
6510	spin_lock_irqsave(&np->lock, flags);
6511	if (!netif_running(np->dev)) {
6512		spin_unlock_irqrestore(&np->lock, flags);
6513		return;
6514	}
6515
6516	spin_unlock_irqrestore(&np->lock, flags);
6517
6518	del_timer_sync(&np->timer);
6519
6520	niu_netif_stop(np);
6521
6522	spin_lock_irqsave(&np->lock, flags);
6523
6524	niu_stop_hw(np);
6525
6526	spin_unlock_irqrestore(&np->lock, flags);
6527
6528	niu_reset_buffers(np);
6529
6530	spin_lock_irqsave(&np->lock, flags);
6531
6532	err = niu_init_hw(np);
6533	if (!err) {
6534		np->timer.expires = jiffies + HZ;
6535		add_timer(&np->timer);
6536		niu_netif_start(np);
6537	}
6538
6539	spin_unlock_irqrestore(&np->lock, flags);
6540}
6541
6542static void niu_tx_timeout(struct net_device *dev)
6543{
6544	struct niu *np = netdev_priv(dev);
6545
6546	dev_err(np->device, "%s: Transmit timed out, resetting\n",
6547		dev->name);
6548
6549	schedule_work(&np->reset_task);
6550}
6551
6552static void niu_set_txd(struct tx_ring_info *rp, int index,
6553			u64 mapping, u64 len, u64 mark,
6554			u64 n_frags)
6555{
6556	__le64 *desc = &rp->descr[index];
6557
6558	*desc = cpu_to_le64(mark |
6559			    (n_frags << TX_DESC_NUM_PTR_SHIFT) |
6560			    (len << TX_DESC_TR_LEN_SHIFT) |
6561			    (mapping & TX_DESC_SAD));
6562}
6563
6564static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
6565				u64 pad_bytes, u64 len)
6566{
6567	u16 eth_proto, eth_proto_inner;
6568	u64 csum_bits, l3off, ihl, ret;
6569	u8 ip_proto;
6570	int ipv6;
6571
6572	eth_proto = be16_to_cpu(ehdr->h_proto);
6573	eth_proto_inner = eth_proto;
6574	if (eth_proto == ETH_P_8021Q) {
6575		struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
6576		__be16 val = vp->h_vlan_encapsulated_proto;
6577
6578		eth_proto_inner = be16_to_cpu(val);
6579	}
6580
6581	ipv6 = ihl = 0;
6582	switch (skb->protocol) {
6583	case cpu_to_be16(ETH_P_IP):
6584		ip_proto = ip_hdr(skb)->protocol;
6585		ihl = ip_hdr(skb)->ihl;
6586		break;
6587	case cpu_to_be16(ETH_P_IPV6):
6588		ip_proto = ipv6_hdr(skb)->nexthdr;
6589		ihl = (40 >> 2);
6590		ipv6 = 1;
6591		break;
6592	default:
6593		ip_proto = ihl = 0;
6594		break;
6595	}
6596
6597	csum_bits = TXHDR_CSUM_NONE;
6598	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6599		u64 start, stuff;
6600
6601		csum_bits = (ip_proto == IPPROTO_TCP ?
6602			     TXHDR_CSUM_TCP :
6603			     (ip_proto == IPPROTO_UDP ?
6604			      TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
6605
6606		start = skb_checksum_start_offset(skb) -
6607			(pad_bytes + sizeof(struct tx_pkt_hdr));
6608		stuff = start + skb->csum_offset;
6609
6610		csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
6611		csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
6612	}
6613
6614	l3off = skb_network_offset(skb) -
6615		(pad_bytes + sizeof(struct tx_pkt_hdr));
6616
6617	ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
6618	       (len << TXHDR_LEN_SHIFT) |
6619	       ((l3off / 2) << TXHDR_L3START_SHIFT) |
6620	       (ihl << TXHDR_IHL_SHIFT) |
6621	       ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) |
6622	       ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
6623	       (ipv6 ? TXHDR_IP_VER : 0) |
6624	       csum_bits);
6625
6626	return ret;
6627}
6628
6629static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6630				  struct net_device *dev)
6631{
6632	struct niu *np = netdev_priv(dev);
6633	unsigned long align, headroom;
6634	struct netdev_queue *txq;
6635	struct tx_ring_info *rp;
6636	struct tx_pkt_hdr *tp;
6637	unsigned int len, nfg;
6638	struct ethhdr *ehdr;
6639	int prod, i, tlen;
6640	u64 mapping, mrk;
6641
6642	i = skb_get_queue_mapping(skb);
6643	rp = &np->tx_rings[i];
6644	txq = netdev_get_tx_queue(dev, i);
6645
6646	if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
6647		netif_tx_stop_queue(txq);
6648		dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
6649		rp->tx_errors++;
6650		return NETDEV_TX_BUSY;
6651	}
6652
6653	if (eth_skb_pad(skb))
6654		goto out;
6655
6656	len = sizeof(struct tx_pkt_hdr) + 15;
6657	if (skb_headroom(skb) < len) {
6658		struct sk_buff *skb_new;
6659
6660		skb_new = skb_realloc_headroom(skb, len);
6661		if (!skb_new) {
6662			rp->tx_errors++;
6663			goto out_drop;
6664		}
6665		kfree_skb(skb);
6666		skb = skb_new;
6667	} else
6668		skb_orphan(skb);
6669
6670	align = ((unsigned long) skb->data & (16 - 1));
6671	headroom = align + sizeof(struct tx_pkt_hdr);
6672
6673	ehdr = (struct ethhdr *) skb->data;
6674	tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
6675
6676	len = skb->len - sizeof(struct tx_pkt_hdr);
6677	tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
6678	tp->resv = 0;
6679
6680	len = skb_headlen(skb);
6681	mapping = np->ops->map_single(np->device, skb->data,
6682				      len, DMA_TO_DEVICE);
6683
6684	prod = rp->prod;
6685
6686	rp->tx_buffs[prod].skb = skb;
6687	rp->tx_buffs[prod].mapping = mapping;
6688
6689	mrk = TX_DESC_SOP;
6690	if (++rp->mark_counter == rp->mark_freq) {
6691		rp->mark_counter = 0;
6692		mrk |= TX_DESC_MARK;
6693		rp->mark_pending++;
6694	}
6695
6696	tlen = len;
6697	nfg = skb_shinfo(skb)->nr_frags;
6698	while (tlen > 0) {
6699		tlen -= MAX_TX_DESC_LEN;
6700		nfg++;
6701	}
6702
6703	while (len > 0) {
6704		unsigned int this_len = len;
6705
6706		if (this_len > MAX_TX_DESC_LEN)
6707			this_len = MAX_TX_DESC_LEN;
6708
6709		niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
6710		mrk = nfg = 0;
6711
6712		prod = NEXT_TX(rp, prod);
6713		mapping += this_len;
6714		len -= this_len;
6715	}
6716
6717	for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
6718		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6719
6720		len = skb_frag_size(frag);
6721		mapping = np->ops->map_page(np->device, skb_frag_page(frag),
6722					    frag->page_offset, len,
6723					    DMA_TO_DEVICE);
6724
6725		rp->tx_buffs[prod].skb = NULL;
6726		rp->tx_buffs[prod].mapping = mapping;
6727
6728		niu_set_txd(rp, prod, mapping, len, 0, 0);
6729
6730		prod = NEXT_TX(rp, prod);
6731	}
6732
6733	if (prod < rp->prod)
6734		rp->wrap_bit ^= TX_RING_KICK_WRAP;
6735	rp->prod = prod;
6736
6737	nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
6738
6739	if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
6740		netif_tx_stop_queue(txq);
6741		if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
6742			netif_tx_wake_queue(txq);
6743	}
6744
6745out:
6746	return NETDEV_TX_OK;
6747
6748out_drop:
6749	rp->tx_errors++;
6750	kfree_skb(skb);
6751	goto out;
6752}
6753
6754static int niu_change_mtu(struct net_device *dev, int new_mtu)
6755{
6756	struct niu *np = netdev_priv(dev);
6757	int err, orig_jumbo, new_jumbo;
6758
6759	if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
6760		return -EINVAL;
6761
6762	orig_jumbo = (dev->mtu > ETH_DATA_LEN);
6763	new_jumbo = (new_mtu > ETH_DATA_LEN);
6764
6765	dev->mtu = new_mtu;
6766
6767	if (!netif_running(dev) ||
6768	    (orig_jumbo == new_jumbo))
6769		return 0;
6770
6771	niu_full_shutdown(np, dev);
6772
6773	niu_free_channels(np);
6774
6775	niu_enable_napi(np);
6776
6777	err = niu_alloc_channels(np);
6778	if (err)
6779		return err;
6780
6781	spin_lock_irq(&np->lock);
6782
6783	err = niu_init_hw(np);
6784	if (!err) {
6785		init_timer(&np->timer);
6786		np->timer.expires = jiffies + HZ;
6787		np->timer.data = (unsigned long) np;
6788		np->timer.function = niu_timer;
6789
6790		err = niu_enable_interrupts(np, 1);
6791		if (err)
6792			niu_stop_hw(np);
6793	}
6794
6795	spin_unlock_irq(&np->lock);
6796
6797	if (!err) {
6798		netif_tx_start_all_queues(dev);
6799		if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6800			netif_carrier_on(dev);
6801
6802		add_timer(&np->timer);
6803	}
6804
6805	return err;
6806}
6807
6808static void niu_get_drvinfo(struct net_device *dev,
6809			    struct ethtool_drvinfo *info)
6810{
6811	struct niu *np = netdev_priv(dev);
6812	struct niu_vpd *vpd = &np->vpd;
6813
6814	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
6815	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
6816	snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
6817		vpd->fcode_major, vpd->fcode_minor);
6818	if (np->parent->plat_type != PLAT_TYPE_NIU)
6819		strlcpy(info->bus_info, pci_name(np->pdev),
6820			sizeof(info->bus_info));
6821}
6822
6823static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6824{
6825	struct niu *np = netdev_priv(dev);
6826	struct niu_link_config *lp;
6827
6828	lp = &np->link_config;
6829
6830	memset(cmd, 0, sizeof(*cmd));
6831	cmd->phy_address = np->phy_addr;
6832	cmd->supported = lp->supported;
6833	cmd->advertising = lp->active_advertising;
6834	cmd->autoneg = lp->active_autoneg;
6835	ethtool_cmd_speed_set(cmd, lp->active_speed);
6836	cmd->duplex = lp->active_duplex;
6837	cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
6838	cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
6839		XCVR_EXTERNAL : XCVR_INTERNAL;
6840
6841	return 0;
6842}
6843
6844static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6845{
6846	struct niu *np = netdev_priv(dev);
6847	struct niu_link_config *lp = &np->link_config;
6848
6849	lp->advertising = cmd->advertising;
6850	lp->speed = ethtool_cmd_speed(cmd);
6851	lp->duplex = cmd->duplex;
6852	lp->autoneg = cmd->autoneg;
6853	return niu_init_link(np);
6854}
6855
6856static u32 niu_get_msglevel(struct net_device *dev)
6857{
6858	struct niu *np = netdev_priv(dev);
6859	return np->msg_enable;
6860}
6861
6862static void niu_set_msglevel(struct net_device *dev, u32 value)
6863{
6864	struct niu *np = netdev_priv(dev);
6865	np->msg_enable = value;
6866}
6867
6868static int niu_nway_reset(struct net_device *dev)
6869{
6870	struct niu *np = netdev_priv(dev);
6871
6872	if (np->link_config.autoneg)
6873		return niu_init_link(np);
6874
6875	return 0;
6876}
6877
6878static int niu_get_eeprom_len(struct net_device *dev)
6879{
6880	struct niu *np = netdev_priv(dev);
6881
6882	return np->eeprom_len;
6883}
6884
6885static int niu_get_eeprom(struct net_device *dev,
6886			  struct ethtool_eeprom *eeprom, u8 *data)
6887{
6888	struct niu *np = netdev_priv(dev);
6889	u32 offset, len, val;
6890
6891	offset = eeprom->offset;
6892	len = eeprom->len;
6893
6894	if (offset + len < offset)
6895		return -EINVAL;
6896	if (offset >= np->eeprom_len)
6897		return -EINVAL;
6898	if (offset + len > np->eeprom_len)
6899		len = eeprom->len = np->eeprom_len - offset;
6900
6901	if (offset & 3) {
6902		u32 b_offset, b_count;
6903
6904		b_offset = offset & 3;
6905		b_count = 4 - b_offset;
6906		if (b_count > len)
6907			b_count = len;
6908
6909		val = nr64(ESPC_NCR((offset - b_offset) / 4));
6910		memcpy(data, ((char *)&val) + b_offset, b_count);
6911		data += b_count;
6912		len -= b_count;
6913		offset += b_count;
6914	}
6915	while (len >= 4) {
6916		val = nr64(ESPC_NCR(offset / 4));
6917		memcpy(data, &val, 4);
6918		data += 4;
6919		len -= 4;
6920		offset += 4;
6921	}
6922	if (len) {
6923		val = nr64(ESPC_NCR(offset / 4));
6924		memcpy(data, &val, len);
6925	}
6926	return 0;
6927}
6928
6929static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
6930{
6931	switch (flow_type) {
6932	case TCP_V4_FLOW:
6933	case TCP_V6_FLOW:
6934		*pid = IPPROTO_TCP;
6935		break;
6936	case UDP_V4_FLOW:
6937	case UDP_V6_FLOW:
6938		*pid = IPPROTO_UDP;
6939		break;
6940	case SCTP_V4_FLOW:
6941	case SCTP_V6_FLOW:
6942		*pid = IPPROTO_SCTP;
6943		break;
6944	case AH_V4_FLOW:
6945	case AH_V6_FLOW:
6946		*pid = IPPROTO_AH;
6947		break;
6948	case ESP_V4_FLOW:
6949	case ESP_V6_FLOW:
6950		*pid = IPPROTO_ESP;
6951		break;
6952	default:
6953		*pid = 0;
6954		break;
6955	}
6956}
6957
6958static int niu_class_to_ethflow(u64 class, int *flow_type)
6959{
6960	switch (class) {
6961	case CLASS_CODE_TCP_IPV4:
6962		*flow_type = TCP_V4_FLOW;
6963		break;
6964	case CLASS_CODE_UDP_IPV4:
6965		*flow_type = UDP_V4_FLOW;
6966		break;
6967	case CLASS_CODE_AH_ESP_IPV4:
6968		*flow_type = AH_V4_FLOW;
6969		break;
6970	case CLASS_CODE_SCTP_IPV4:
6971		*flow_type = SCTP_V4_FLOW;
6972		break;
6973	case CLASS_CODE_TCP_IPV6:
6974		*flow_type = TCP_V6_FLOW;
6975		break;
6976	case CLASS_CODE_UDP_IPV6:
6977		*flow_type = UDP_V6_FLOW;
6978		break;
6979	case CLASS_CODE_AH_ESP_IPV6:
6980		*flow_type = AH_V6_FLOW;
6981		break;
6982	case CLASS_CODE_SCTP_IPV6:
6983		*flow_type = SCTP_V6_FLOW;
6984		break;
6985	case CLASS_CODE_USER_PROG1:
6986	case CLASS_CODE_USER_PROG2:
6987	case CLASS_CODE_USER_PROG3:
6988	case CLASS_CODE_USER_PROG4:
6989		*flow_type = IP_USER_FLOW;
6990		break;
6991	default:
6992		return -EINVAL;
6993	}
6994
6995	return 0;
6996}
6997
6998static int niu_ethflow_to_class(int flow_type, u64 *class)
6999{
7000	switch (flow_type) {
7001	case TCP_V4_FLOW:
7002		*class = CLASS_CODE_TCP_IPV4;
7003		break;
7004	case UDP_V4_FLOW:
7005		*class = CLASS_CODE_UDP_IPV4;
7006		break;
7007	case AH_ESP_V4_FLOW:
7008	case AH_V4_FLOW:
7009	case ESP_V4_FLOW:
7010		*class = CLASS_CODE_AH_ESP_IPV4;
7011		break;
7012	case SCTP_V4_FLOW:
7013		*class = CLASS_CODE_SCTP_IPV4;
7014		break;
7015	case TCP_V6_FLOW:
7016		*class = CLASS_CODE_TCP_IPV6;
7017		break;
7018	case UDP_V6_FLOW:
7019		*class = CLASS_CODE_UDP_IPV6;
7020		break;
7021	case AH_ESP_V6_FLOW:
7022	case AH_V6_FLOW:
7023	case ESP_V6_FLOW:
7024		*class = CLASS_CODE_AH_ESP_IPV6;
7025		break;
7026	case SCTP_V6_FLOW:
7027		*class = CLASS_CODE_SCTP_IPV6;
7028		break;
7029	default:
7030		return 0;
7031	}
7032
7033	return 1;
7034}
7035
7036static u64 niu_flowkey_to_ethflow(u64 flow_key)
7037{
7038	u64 ethflow = 0;
7039
7040	if (flow_key & FLOW_KEY_L2DA)
7041		ethflow |= RXH_L2DA;
7042	if (flow_key & FLOW_KEY_VLAN)
7043		ethflow |= RXH_VLAN;
7044	if (flow_key & FLOW_KEY_IPSA)
7045		ethflow |= RXH_IP_SRC;
7046	if (flow_key & FLOW_KEY_IPDA)
7047		ethflow |= RXH_IP_DST;
7048	if (flow_key & FLOW_KEY_PROTO)
7049		ethflow |= RXH_L3_PROTO;
7050	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
7051		ethflow |= RXH_L4_B_0_1;
7052	if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
7053		ethflow |= RXH_L4_B_2_3;
7054
7055	return ethflow;
7056
7057}
7058
7059static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
7060{
7061	u64 key = 0;
7062
7063	if (ethflow & RXH_L2DA)
7064		key |= FLOW_KEY_L2DA;
7065	if (ethflow & RXH_VLAN)
7066		key |= FLOW_KEY_VLAN;
7067	if (ethflow & RXH_IP_SRC)
7068		key |= FLOW_KEY_IPSA;
7069	if (ethflow & RXH_IP_DST)
7070		key |= FLOW_KEY_IPDA;
7071	if (ethflow & RXH_L3_PROTO)
7072		key |= FLOW_KEY_PROTO;
7073	if (ethflow & RXH_L4_B_0_1)
7074		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
7075	if (ethflow & RXH_L4_B_2_3)
7076		key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
7077
7078	*flow_key = key;
7079
7080	return 1;
7081
7082}
7083
7084static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
7085{
7086	u64 class;
7087
7088	nfc->data = 0;
7089
7090	if (!niu_ethflow_to_class(nfc->flow_type, &class))
7091		return -EINVAL;
7092
7093	if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
7094	    TCAM_KEY_DISC)
7095		nfc->data = RXH_DISCARD;
7096	else
7097		nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
7098						      CLASS_CODE_USER_PROG1]);
7099	return 0;
7100}
7101
7102static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
7103					struct ethtool_rx_flow_spec *fsp)
7104{
7105	u32 tmp;
7106	u16 prt;
7107
7108	tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
7109	fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
7110
7111	tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
7112	fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
7113
7114	tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
7115	fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
7116
7117	tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
7118	fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
7119
7120	fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
7121		TCAM_V4KEY2_TOS_SHIFT;
7122	fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
7123		TCAM_V4KEY2_TOS_SHIFT;
7124
7125	switch (fsp->flow_type) {
7126	case TCP_V4_FLOW:
7127	case UDP_V4_FLOW:
7128	case SCTP_V4_FLOW:
7129		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7130			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
7131		fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
7132
7133		prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7134			TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
7135		fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
7136
7137		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7138			TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
7139		fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
7140
7141		prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7142			 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
7143		fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
7144		break;
7145	case AH_V4_FLOW:
7146	case ESP_V4_FLOW:
7147		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7148			TCAM_V4KEY2_PORT_SPI_SHIFT;
7149		fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
7150
7151		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7152			TCAM_V4KEY2_PORT_SPI_SHIFT;
7153		fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
7154		break;
7155	case IP_USER_FLOW:
7156		tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7157			TCAM_V4KEY2_PORT_SPI_SHIFT;
7158		fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
7159
7160		tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7161			TCAM_V4KEY2_PORT_SPI_SHIFT;
7162		fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
7163
7164		fsp->h_u.usr_ip4_spec.proto =
7165			(tp->key[2] & TCAM_V4KEY2_PROTO) >>
7166			TCAM_V4KEY2_PROTO_SHIFT;
7167		fsp->m_u.usr_ip4_spec.proto =
7168			(tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
7169			TCAM_V4KEY2_PROTO_SHIFT;
7170
7171		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
7172		break;
7173	default:
7174		break;
7175	}
7176}
7177
7178static int niu_get_ethtool_tcam_entry(struct niu *np,
7179				      struct ethtool_rxnfc *nfc)
7180{
7181	struct niu_parent *parent = np->parent;
7182	struct niu_tcam_entry *tp;
7183	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
7184	u16 idx;
7185	u64 class;
7186	int ret = 0;
7187
7188	idx = tcam_get_index(np, (u16)nfc->fs.location);
7189
7190	tp = &parent->tcam[idx];
7191	if (!tp->valid) {
7192		netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
7193			    parent->index, (u16)nfc->fs.location, idx);
7194		return -EINVAL;
7195	}
7196
7197	/* fill the flow spec entry */
7198	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
7199		TCAM_V4KEY0_CLASS_CODE_SHIFT;
7200	ret = niu_class_to_ethflow(class, &fsp->flow_type);
7201	if (ret < 0) {
7202		netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
7203			    parent->index);
7204		goto out;
7205	}
7206
7207	if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
7208		u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
7209			TCAM_V4KEY2_PROTO_SHIFT;
7210		if (proto == IPPROTO_ESP) {
7211			if (fsp->flow_type == AH_V4_FLOW)
7212				fsp->flow_type = ESP_V4_FLOW;
7213			else
7214				fsp->flow_type = ESP_V6_FLOW;
7215		}
7216	}
7217
7218	switch (fsp->flow_type) {
7219	case TCP_V4_FLOW:
7220	case UDP_V4_FLOW:
7221	case SCTP_V4_FLOW:
7222	case AH_V4_FLOW:
7223	case ESP_V4_FLOW:
7224		niu_get_ip4fs_from_tcam_key(tp, fsp);
7225		break;
7226	case TCP_V6_FLOW:
7227	case UDP_V6_FLOW:
7228	case SCTP_V6_FLOW:
7229	case AH_V6_FLOW:
7230	case ESP_V6_FLOW:
7231		/* Not yet implemented */
7232		ret = -EINVAL;
7233		break;
7234	case IP_USER_FLOW:
7235		niu_get_ip4fs_from_tcam_key(tp, fsp);
7236		break;
7237	default:
7238		ret = -EINVAL;
7239		break;
7240	}
7241
7242	if (ret < 0)
7243		goto out;
7244
7245	if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
7246		fsp->ring_cookie = RX_CLS_FLOW_DISC;
7247	else
7248		fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
7249			TCAM_ASSOCDATA_OFFSET_SHIFT;
7250
7251	/* put the tcam size here */
7252	nfc->data = tcam_get_size(np);
7253out:
7254	return ret;
7255}
7256
7257static int niu_get_ethtool_tcam_all(struct niu *np,
7258				    struct ethtool_rxnfc *nfc,
7259				    u32 *rule_locs)
7260{
7261	struct niu_parent *parent = np->parent;
7262	struct niu_tcam_entry *tp;
7263	int i, idx, cnt;
7264	unsigned long flags;
7265	int ret = 0;
7266
7267	/* put the tcam size here */
7268	nfc->data = tcam_get_size(np);
7269
7270	niu_lock_parent(np, flags);
7271	for (cnt = 0, i = 0; i < nfc->data; i++) {
7272		idx = tcam_get_index(np, i);
7273		tp = &parent->tcam[idx];
7274		if (!tp->valid)
7275			continue;
7276		if (cnt == nfc->rule_cnt) {
7277			ret = -EMSGSIZE;
7278			break;
7279		}
7280		rule_locs[cnt] = i;
7281		cnt++;
7282	}
7283	niu_unlock_parent(np, flags);
7284
7285	nfc->rule_cnt = cnt;
7286
7287	return ret;
7288}
7289
7290static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
7291		       u32 *rule_locs)
7292{
7293	struct niu *np = netdev_priv(dev);
7294	int ret = 0;
7295
7296	switch (cmd->cmd) {
7297	case ETHTOOL_GRXFH:
7298		ret = niu_get_hash_opts(np, cmd);
7299		break;
7300	case ETHTOOL_GRXRINGS:
7301		cmd->data = np->num_rx_rings;
7302		break;
7303	case ETHTOOL_GRXCLSRLCNT:
7304		cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
7305		break;
7306	case ETHTOOL_GRXCLSRULE:
7307		ret = niu_get_ethtool_tcam_entry(np, cmd);
7308		break;
7309	case ETHTOOL_GRXCLSRLALL:
7310		ret = niu_get_ethtool_tcam_all(np, cmd, rule_locs);
7311		break;
7312	default:
7313		ret = -EINVAL;
7314		break;
7315	}
7316
7317	return ret;
7318}
7319
7320static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
7321{
7322	u64 class;
7323	u64 flow_key = 0;
7324	unsigned long flags;
7325
7326	if (!niu_ethflow_to_class(nfc->flow_type, &class))
7327		return -EINVAL;
7328
7329	if (class < CLASS_CODE_USER_PROG1 ||
7330	    class > CLASS_CODE_SCTP_IPV6)
7331		return -EINVAL;
7332
7333	if (nfc->data & RXH_DISCARD) {
7334		niu_lock_parent(np, flags);
7335		flow_key = np->parent->tcam_key[class -
7336					       CLASS_CODE_USER_PROG1];
7337		flow_key |= TCAM_KEY_DISC;
7338		nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
7339		np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
7340		niu_unlock_parent(np, flags);
7341		return 0;
7342	} else {
7343		/* Discard was set before, but is not set now */
7344		if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
7345		    TCAM_KEY_DISC) {
7346			niu_lock_parent(np, flags);
7347			flow_key = np->parent->tcam_key[class -
7348					       CLASS_CODE_USER_PROG1];
7349			flow_key &= ~TCAM_KEY_DISC;
7350			nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
7351			     flow_key);
7352			np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
7353				flow_key;
7354			niu_unlock_parent(np, flags);
7355		}
7356	}
7357
7358	if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
7359		return -EINVAL;
7360
7361	niu_lock_parent(np, flags);
7362	nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
7363	np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
7364	niu_unlock_parent(np, flags);
7365
7366	return 0;
7367}
7368
7369static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
7370				       struct niu_tcam_entry *tp,
7371				       int l2_rdc_tab, u64 class)
7372{
7373	u8 pid = 0;
7374	u32 sip, dip, sipm, dipm, spi, spim;
7375	u16 sport, dport, spm, dpm;
7376
7377	sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
7378	sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
7379	dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
7380	dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
7381
7382	tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
7383	tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
7384	tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
7385	tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
7386
7387	tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
7388	tp->key[3] |= dip;
7389
7390	tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
7391	tp->key_mask[3] |= dipm;
7392
7393	tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
7394		       TCAM_V4KEY2_TOS_SHIFT);
7395	tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
7396			    TCAM_V4KEY2_TOS_SHIFT);
7397	switch (fsp->flow_type) {
7398	case TCP_V4_FLOW:
7399	case UDP_V4_FLOW:
7400	case SCTP_V4_FLOW:
7401		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
7402		spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
7403		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
7404		dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
7405
7406		tp->key[2] |= (((u64)sport << 16) | dport);
7407		tp->key_mask[2] |= (((u64)spm << 16) | dpm);
7408		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
7409		break;
7410	case AH_V4_FLOW:
7411	case ESP_V4_FLOW:
7412		spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
7413		spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
7414
7415		tp->key[2] |= spi;
7416		tp->key_mask[2] |= spim;
7417		niu_ethflow_to_l3proto(fsp->flow_type, &pid);
7418		break;
7419	case IP_USER_FLOW:
7420		spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
7421		spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
7422
7423		tp->key[2] |= spi;
7424		tp->key_mask[2] |= spim;
7425		pid = fsp->h_u.usr_ip4_spec.proto;
7426		break;
7427	default:
7428		break;
7429	}
7430
7431	tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
7432	if (pid) {
7433		tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
7434	}
7435}
7436
7437static int niu_add_ethtool_tcam_entry(struct niu *np,
7438				      struct ethtool_rxnfc *nfc)
7439{
7440	struct niu_parent *parent = np->parent;
7441	struct niu_tcam_entry *tp;
7442	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
7443	struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
7444	int l2_rdc_table = rdc_table->first_table_num;
7445	u16 idx;
7446	u64 class;
7447	unsigned long flags;
7448	int err, ret;
7449
7450	ret = 0;
7451
7452	idx = nfc->fs.location;
7453	if (idx >= tcam_get_size(np))
7454		return -EINVAL;
7455
7456	if (fsp->flow_type == IP_USER_FLOW) {
7457		int i;
7458		int add_usr_cls = 0;
7459		struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
7460		struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
7461
7462		if (uspec->ip_ver != ETH_RX_NFC_IP4)
7463			return -EINVAL;
7464
7465		niu_lock_parent(np, flags);
7466
7467		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
7468			if (parent->l3_cls[i]) {
7469				if (uspec->proto == parent->l3_cls_pid[i]) {
7470					class = parent->l3_cls[i];
7471					parent->l3_cls_refcnt[i]++;
7472					add_usr_cls = 1;
7473					break;
7474				}
7475			} else {
7476				/* Program new user IP class */
7477				switch (i) {
7478				case 0:
7479					class = CLASS_CODE_USER_PROG1;
7480					break;
7481				case 1:
7482					class = CLASS_CODE_USER_PROG2;
7483					break;
7484				case 2:
7485					class = CLASS_CODE_USER_PROG3;
7486					break;
7487				case 3:
7488					class = CLASS_CODE_USER_PROG4;
7489					break;
7490				default:
7491					break;
7492				}
7493				ret = tcam_user_ip_class_set(np, class, 0,
7494							     uspec->proto,
7495							     uspec->tos,
7496							     umask->tos);
7497				if (ret)
7498					goto out;
7499
7500				ret = tcam_user_ip_class_enable(np, class, 1);
7501				if (ret)
7502					goto out;
7503				parent->l3_cls[i] = class;
7504				parent->l3_cls_pid[i] = uspec->proto;
7505				parent->l3_cls_refcnt[i]++;
7506				add_usr_cls = 1;
7507				break;
7508			}
7509		}
7510		if (!add_usr_cls) {
7511			netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
7512				    parent->index, __func__, uspec->proto);
7513			ret = -EINVAL;
7514			goto out;
7515		}
7516		niu_unlock_parent(np, flags);
7517	} else {
7518		if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
7519			return -EINVAL;
7520		}
7521	}
7522
7523	niu_lock_parent(np, flags);
7524
7525	idx = tcam_get_index(np, idx);
7526	tp = &parent->tcam[idx];
7527
7528	memset(tp, 0, sizeof(*tp));
7529
7530	/* fill in the tcam key and mask */
7531	switch (fsp->flow_type) {
7532	case TCP_V4_FLOW:
7533	case UDP_V4_FLOW:
7534	case SCTP_V4_FLOW:
7535	case AH_V4_FLOW:
7536	case ESP_V4_FLOW:
7537		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
7538		break;
7539	case TCP_V6_FLOW:
7540	case UDP_V6_FLOW:
7541	case SCTP_V6_FLOW:
7542	case AH_V6_FLOW:
7543	case ESP_V6_FLOW:
7544		/* Not yet implemented */
7545		netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
7546			    parent->index, __func__, fsp->flow_type);
7547		ret = -EINVAL;
7548		goto out;
7549	case IP_USER_FLOW:
7550		niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
7551		break;
7552	default:
7553		netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
7554			    parent->index, __func__, fsp->flow_type);
7555		ret = -EINVAL;
7556		goto out;
7557	}
7558
7559	/* fill in the assoc data */
7560	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
7561		tp->assoc_data = TCAM_ASSOCDATA_DISC;
7562	} else {
7563		if (fsp->ring_cookie >= np->num_rx_rings) {
7564			netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
7565				    parent->index, __func__,
7566				    (long long)fsp->ring_cookie);
7567			ret = -EINVAL;
7568			goto out;
7569		}
7570		tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
7571				  (fsp->ring_cookie <<
7572				   TCAM_ASSOCDATA_OFFSET_SHIFT));
7573	}
7574
7575	err = tcam_write(np, idx, tp->key, tp->key_mask);
7576	if (err) {
7577		ret = -EINVAL;
7578		goto out;
7579	}
7580	err = tcam_assoc_write(np, idx, tp->assoc_data);
7581	if (err) {
7582		ret = -EINVAL;
7583		goto out;
7584	}
7585
7586	/* validate the entry */
7587	tp->valid = 1;
7588	np->clas.tcam_valid_entries++;
7589out:
7590	niu_unlock_parent(np, flags);
7591
7592	return ret;
7593}
7594
7595static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
7596{
7597	struct niu_parent *parent = np->parent;
7598	struct niu_tcam_entry *tp;
7599	u16 idx;
7600	unsigned long flags;
7601	u64 class;
7602	int ret = 0;
7603
7604	if (loc >= tcam_get_size(np))
7605		return -EINVAL;
7606
7607	niu_lock_parent(np, flags);
7608
7609	idx = tcam_get_index(np, loc);
7610	tp = &parent->tcam[idx];
7611
7612	/* if the entry is of a user defined class, then update*/
7613	class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
7614		TCAM_V4KEY0_CLASS_CODE_SHIFT;
7615
7616	if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
7617		int i;
7618		for (i = 0; i < NIU_L3_PROG_CLS; i++) {
7619			if (parent->l3_cls[i] == class) {
7620				parent->l3_cls_refcnt[i]--;
7621				if (!parent->l3_cls_refcnt[i]) {
7622					/* disable class */
7623					ret = tcam_user_ip_class_enable(np,
7624									class,
7625									0);
7626					if (ret)
7627						goto out;
7628					parent->l3_cls[i] = 0;
7629					parent->l3_cls_pid[i] = 0;
7630				}
7631				break;
7632			}
7633		}
7634		if (i == NIU_L3_PROG_CLS) {
7635			netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
7636				    parent->index, __func__,
7637				    (unsigned long long)class);
7638			ret = -EINVAL;
7639			goto out;
7640		}
7641	}
7642
7643	ret = tcam_flush(np, idx);
7644	if (ret)
7645		goto out;
7646
7647	/* invalidate the entry */
7648	tp->valid = 0;
7649	np->clas.tcam_valid_entries--;
7650out:
7651	niu_unlock_parent(np, flags);
7652
7653	return ret;
7654}
7655
7656static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
7657{
7658	struct niu *np = netdev_priv(dev);
7659	int ret = 0;
7660
7661	switch (cmd->cmd) {
7662	case ETHTOOL_SRXFH:
7663		ret = niu_set_hash_opts(np, cmd);
7664		break;
7665	case ETHTOOL_SRXCLSRLINS:
7666		ret = niu_add_ethtool_tcam_entry(np, cmd);
7667		break;
7668	case ETHTOOL_SRXCLSRLDEL:
7669		ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
7670		break;
7671	default:
7672		ret = -EINVAL;
7673		break;
7674	}
7675
7676	return ret;
7677}
7678
7679static const struct {
7680	const char string[ETH_GSTRING_LEN];
7681} niu_xmac_stat_keys[] = {
7682	{ "tx_frames" },
7683	{ "tx_bytes" },
7684	{ "tx_fifo_errors" },
7685	{ "tx_overflow_errors" },
7686	{ "tx_max_pkt_size_errors" },
7687	{ "tx_underflow_errors" },
7688	{ "rx_local_faults" },
7689	{ "rx_remote_faults" },
7690	{ "rx_link_faults" },
7691	{ "rx_align_errors" },
7692	{ "rx_frags" },
7693	{ "rx_mcasts" },
7694	{ "rx_bcasts" },
7695	{ "rx_hist_cnt1" },
7696	{ "rx_hist_cnt2" },
7697	{ "rx_hist_cnt3" },
7698	{ "rx_hist_cnt4" },
7699	{ "rx_hist_cnt5" },
7700	{ "rx_hist_cnt6" },
7701	{ "rx_hist_cnt7" },
7702	{ "rx_octets" },
7703	{ "rx_code_violations" },
7704	{ "rx_len_errors" },
7705	{ "rx_crc_errors" },
7706	{ "rx_underflows" },
7707	{ "rx_overflows" },
7708	{ "pause_off_state" },
7709	{ "pause_on_state" },
7710	{ "pause_received" },
7711};
7712
7713#define NUM_XMAC_STAT_KEYS	ARRAY_SIZE(niu_xmac_stat_keys)
7714
7715static const struct {
7716	const char string[ETH_GSTRING_LEN];
7717} niu_bmac_stat_keys[] = {
7718	{ "tx_underflow_errors" },
7719	{ "tx_max_pkt_size_errors" },
7720	{ "tx_bytes" },
7721	{ "tx_frames" },
7722	{ "rx_overflows" },
7723	{ "rx_frames" },
7724	{ "rx_align_errors" },
7725	{ "rx_crc_errors" },
7726	{ "rx_len_errors" },
7727	{ "pause_off_state" },
7728	{ "pause_on_state" },
7729	{ "pause_received" },
7730};
7731
7732#define NUM_BMAC_STAT_KEYS	ARRAY_SIZE(niu_bmac_stat_keys)
7733
7734static const struct {
7735	const char string[ETH_GSTRING_LEN];
7736} niu_rxchan_stat_keys[] = {
7737	{ "rx_channel" },
7738	{ "rx_packets" },
7739	{ "rx_bytes" },
7740	{ "rx_dropped" },
7741	{ "rx_errors" },
7742};
7743
7744#define NUM_RXCHAN_STAT_KEYS	ARRAY_SIZE(niu_rxchan_stat_keys)
7745
7746static const struct {
7747	const char string[ETH_GSTRING_LEN];
7748} niu_txchan_stat_keys[] = {
7749	{ "tx_channel" },
7750	{ "tx_packets" },
7751	{ "tx_bytes" },
7752	{ "tx_errors" },
7753};
7754
7755#define NUM_TXCHAN_STAT_KEYS	ARRAY_SIZE(niu_txchan_stat_keys)
7756
7757static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
7758{
7759	struct niu *np = netdev_priv(dev);
7760	int i;
7761
7762	if (stringset != ETH_SS_STATS)
7763		return;
7764
7765	if (np->flags & NIU_FLAGS_XMAC) {
7766		memcpy(data, niu_xmac_stat_keys,
7767		       sizeof(niu_xmac_stat_keys));
7768		data += sizeof(niu_xmac_stat_keys);
7769	} else {
7770		memcpy(data, niu_bmac_stat_keys,
7771		       sizeof(niu_bmac_stat_keys));
7772		data += sizeof(niu_bmac_stat_keys);
7773	}
7774	for (i = 0; i < np->num_rx_rings; i++) {
7775		memcpy(data, niu_rxchan_stat_keys,
7776		       sizeof(niu_rxchan_stat_keys));
7777		data += sizeof(niu_rxchan_stat_keys);
7778	}
7779	for (i = 0; i < np->num_tx_rings; i++) {
7780		memcpy(data, niu_txchan_stat_keys,
7781		       sizeof(niu_txchan_stat_keys));
7782		data += sizeof(niu_txchan_stat_keys);
7783	}
7784}
7785
7786static int niu_get_sset_count(struct net_device *dev, int stringset)
7787{
7788	struct niu *np = netdev_priv(dev);
7789
7790	if (stringset != ETH_SS_STATS)
7791		return -EINVAL;
7792
7793	return (np->flags & NIU_FLAGS_XMAC ?
7794		 NUM_XMAC_STAT_KEYS :
7795		 NUM_BMAC_STAT_KEYS) +
7796		(np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
7797		(np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
7798}
7799
7800static void niu_get_ethtool_stats(struct net_device *dev,
7801				  struct ethtool_stats *stats, u64 *data)
7802{
7803	struct niu *np = netdev_priv(dev);
7804	int i;
7805
7806	niu_sync_mac_stats(np);
7807	if (np->flags & NIU_FLAGS_XMAC) {
7808		memcpy(data, &np->mac_stats.xmac,
7809		       sizeof(struct niu_xmac_stats));
7810		data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
7811	} else {
7812		memcpy(data, &np->mac_stats.bmac,
7813		       sizeof(struct niu_bmac_stats));
7814		data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
7815	}
7816	for (i = 0; i < np->num_rx_rings; i++) {
7817		struct rx_ring_info *rp = &np->rx_rings[i];
7818
7819		niu_sync_rx_discard_stats(np, rp, 0);
7820
7821		data[0] = rp->rx_channel;
7822		data[1] = rp->rx_packets;
7823		data[2] = rp->rx_bytes;
7824		data[3] = rp->rx_dropped;
7825		data[4] = rp->rx_errors;
7826		data += 5;
7827	}
7828	for (i = 0; i < np->num_tx_rings; i++) {
7829		struct tx_ring_info *rp = &np->tx_rings[i];
7830
7831		data[0] = rp->tx_channel;
7832		data[1] = rp->tx_packets;
7833		data[2] = rp->tx_bytes;
7834		data[3] = rp->tx_errors;
7835		data += 4;
7836	}
7837}
7838
7839static u64 niu_led_state_save(struct niu *np)
7840{
7841	if (np->flags & NIU_FLAGS_XMAC)
7842		return nr64_mac(XMAC_CONFIG);
7843	else
7844		return nr64_mac(BMAC_XIF_CONFIG);
7845}
7846
7847static void niu_led_state_restore(struct niu *np, u64 val)
7848{
7849	if (np->flags & NIU_FLAGS_XMAC)
7850		nw64_mac(XMAC_CONFIG, val);
7851	else
7852		nw64_mac(BMAC_XIF_CONFIG, val);
7853}
7854
7855static void niu_force_led(struct niu *np, int on)
7856{
7857	u64 val, reg, bit;
7858
7859	if (np->flags & NIU_FLAGS_XMAC) {
7860		reg = XMAC_CONFIG;
7861		bit = XMAC_CONFIG_FORCE_LED_ON;
7862	} else {
7863		reg = BMAC_XIF_CONFIG;
7864		bit = BMAC_XIF_CONFIG_LINK_LED;
7865	}
7866
7867	val = nr64_mac(reg);
7868	if (on)
7869		val |= bit;
7870	else
7871		val &= ~bit;
7872	nw64_mac(reg, val);
7873}
7874
7875static int niu_set_phys_id(struct net_device *dev,
7876			   enum ethtool_phys_id_state state)
7877
7878{
7879	struct niu *np = netdev_priv(dev);
7880
7881	if (!netif_running(dev))
7882		return -EAGAIN;
7883
7884	switch (state) {
7885	case ETHTOOL_ID_ACTIVE:
7886		np->orig_led_state = niu_led_state_save(np);
7887		return 1;	/* cycle on/off once per second */
7888
7889	case ETHTOOL_ID_ON:
7890		niu_force_led(np, 1);
7891		break;
7892
7893	case ETHTOOL_ID_OFF:
7894		niu_force_led(np, 0);
7895		break;
7896
7897	case ETHTOOL_ID_INACTIVE:
7898		niu_led_state_restore(np, np->orig_led_state);
7899	}
7900
7901	return 0;
7902}
7903
7904static const struct ethtool_ops niu_ethtool_ops = {
7905	.get_drvinfo		= niu_get_drvinfo,
7906	.get_link		= ethtool_op_get_link,
7907	.get_msglevel		= niu_get_msglevel,
7908	.set_msglevel		= niu_set_msglevel,
7909	.nway_reset		= niu_nway_reset,
7910	.get_eeprom_len		= niu_get_eeprom_len,
7911	.get_eeprom		= niu_get_eeprom,
7912	.get_settings		= niu_get_settings,
7913	.set_settings		= niu_set_settings,
7914	.get_strings		= niu_get_strings,
7915	.get_sset_count		= niu_get_sset_count,
7916	.get_ethtool_stats	= niu_get_ethtool_stats,
7917	.set_phys_id		= niu_set_phys_id,
7918	.get_rxnfc		= niu_get_nfc,
7919	.set_rxnfc		= niu_set_nfc,
7920};
7921
7922static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
7923			      int ldg, int ldn)
7924{
7925	if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
7926		return -EINVAL;
7927	if (ldn < 0 || ldn > LDN_MAX)
7928		return -EINVAL;
7929
7930	parent->ldg_map[ldn] = ldg;
7931
7932	if (np->parent->plat_type == PLAT_TYPE_NIU) {
7933		/* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
7934		 * the firmware, and we're not supposed to change them.
7935		 * Validate the mapping, because if it's wrong we probably
7936		 * won't get any interrupts and that's painful to debug.
7937		 */
7938		if (nr64(LDG_NUM(ldn)) != ldg) {
7939			dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
7940				np->port, ldn, ldg,
7941				(unsigned long long) nr64(LDG_NUM(ldn)));
7942			return -EINVAL;
7943		}
7944	} else
7945		nw64(LDG_NUM(ldn), ldg);
7946
7947	return 0;
7948}
7949
7950static int niu_set_ldg_timer_res(struct niu *np, int res)
7951{
7952	if (res < 0 || res > LDG_TIMER_RES_VAL)
7953		return -EINVAL;
7954
7955
7956	nw64(LDG_TIMER_RES, res);
7957
7958	return 0;
7959}
7960
7961static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
7962{
7963	if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
7964	    (func < 0 || func > 3) ||
7965	    (vector < 0 || vector > 0x1f))
7966		return -EINVAL;
7967
7968	nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
7969
7970	return 0;
7971}
7972
7973static int niu_pci_eeprom_read(struct niu *np, u32 addr)
7974{
7975	u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
7976				 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
7977	int limit;
7978
7979	if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
7980		return -EINVAL;
7981
7982	frame = frame_base;
7983	nw64(ESPC_PIO_STAT, frame);
7984	limit = 64;
7985	do {
7986		udelay(5);
7987		frame = nr64(ESPC_PIO_STAT);
7988		if (frame & ESPC_PIO_STAT_READ_END)
7989			break;
7990	} while (limit--);
7991	if (!(frame & ESPC_PIO_STAT_READ_END)) {
7992		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
7993			(unsigned long long) frame);
7994		return -ENODEV;
7995	}
7996
7997	frame = frame_base;
7998	nw64(ESPC_PIO_STAT, frame);
7999	limit = 64;
8000	do {
8001		udelay(5);
8002		frame = nr64(ESPC_PIO_STAT);
8003		if (frame & ESPC_PIO_STAT_READ_END)
8004			break;
8005	} while (limit--);
8006	if (!(frame & ESPC_PIO_STAT_READ_END)) {
8007		dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
8008			(unsigned long long) frame);
8009		return -ENODEV;
8010	}
8011
8012	frame = nr64(ESPC_PIO_STAT);
8013	return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
8014}
8015
8016static int niu_pci_eeprom_read16(struct niu *np, u32 off)
8017{
8018	int err = niu_pci_eeprom_read(np, off);
8019	u16 val;
8020
8021	if (err < 0)
8022		return err;
8023	val = (err << 8);
8024	err = niu_pci_eeprom_read(np, off + 1);
8025	if (err < 0)
8026		return err;
8027	val |= (err & 0xff);
8028
8029	return val;
8030}
8031
8032static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
8033{
8034	int err = niu_pci_eeprom_read(np, off);
8035	u16 val;
8036
8037	if (err < 0)
8038		return err;
8039
8040	val = (err & 0xff);
8041	err = niu_pci_eeprom_read(np, off + 1);
8042	if (err < 0)
8043		return err;
8044
8045	val |= (err & 0xff) << 8;
8046
8047	return val;
8048}
8049
8050static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf,
8051				    int namebuf_len)
8052{
8053	int i;
8054
8055	for (i = 0; i < namebuf_len; i++) {
8056		int err = niu_pci_eeprom_read(np, off + i);
8057		if (err < 0)
8058			return err;
8059		*namebuf++ = err;
8060		if (!err)
8061			break;
8062	}
8063	if (i >= namebuf_len)
8064		return -EINVAL;
8065
8066	return i + 1;
8067}
8068
8069static void niu_vpd_parse_version(struct niu *np)
8070{
8071	struct niu_vpd *vpd = &np->vpd;
8072	int len = strlen(vpd->version) + 1;
8073	const char *s = vpd->version;
8074	int i;
8075
8076	for (i = 0; i < len - 5; i++) {
8077		if (!strncmp(s + i, "FCode ", 6))
8078			break;
8079	}
8080	if (i >= len - 5)
8081		return;
8082
8083	s += i + 5;
8084	sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
8085
8086	netif_printk(np, probe, KERN_DEBUG, np->dev,
8087		     "VPD_SCAN: FCODE major(%d) minor(%d)\n",
8088		     vpd->fcode_major, vpd->fcode_minor);
8089	if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
8090	    (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
8091	     vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
8092		np->flags |= NIU_FLAGS_VPD_VALID;
8093}
8094
8095/* ESPC_PIO_EN_ENABLE must be set */
8096static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
8097{
8098	unsigned int found_mask = 0;
8099#define FOUND_MASK_MODEL	0x00000001
8100#define FOUND_MASK_BMODEL	0x00000002
8101#define FOUND_MASK_VERS		0x00000004
8102#define FOUND_MASK_MAC		0x00000008
8103#define FOUND_MASK_NMAC		0x00000010
8104#define FOUND_MASK_PHY		0x00000020
8105#define FOUND_MASK_ALL		0x0000003f
8106
8107	netif_printk(np, probe, KERN_DEBUG, np->dev,
8108		     "VPD_SCAN: start[%x] end[%x]\n", start, end);
8109	while (start < end) {
8110		int len, err, prop_len;
8111		char namebuf[64];
8112		u8 *prop_buf;
8113		int max_len;
8114
8115		if (found_mask == FOUND_MASK_ALL) {
8116			niu_vpd_parse_version(np);
8117			return 1;
8118		}
8119
8120		err = niu_pci_eeprom_read(np, start + 2);
8121		if (err < 0)
8122			return err;
8123		len = err;
8124		start += 3;
8125
8126		prop_len = niu_pci_eeprom_read(np, start + 4);
8127		err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
8128		if (err < 0)
8129			return err;
8130
8131		prop_buf = NULL;
8132		max_len = 0;
8133		if (!strcmp(namebuf, "model")) {
8134			prop_buf = np->vpd.model;
8135			max_len = NIU_VPD_MODEL_MAX;
8136			found_mask |= FOUND_MASK_MODEL;
8137		} else if (!strcmp(namebuf, "board-model")) {
8138			prop_buf = np->vpd.board_model;
8139			max_len = NIU_VPD_BD_MODEL_MAX;
8140			found_mask |= FOUND_MASK_BMODEL;
8141		} else if (!strcmp(namebuf, "version")) {
8142			prop_buf = np->vpd.version;
8143			max_len = NIU_VPD_VERSION_MAX;
8144			found_mask |= FOUND_MASK_VERS;
8145		} else if (!strcmp(namebuf, "local-mac-address")) {
8146			prop_buf = np->vpd.local_mac;
8147			max_len = ETH_ALEN;
8148			found_mask |= FOUND_MASK_MAC;
8149		} else if (!strcmp(namebuf, "num-mac-addresses")) {
8150			prop_buf = &np->vpd.mac_num;
8151			max_len = 1;
8152			found_mask |= FOUND_MASK_NMAC;
8153		} else if (!strcmp(namebuf, "phy-type")) {
8154			prop_buf = np->vpd.phy_type;
8155			max_len = NIU_VPD_PHY_TYPE_MAX;
8156			found_mask |= FOUND_MASK_PHY;
8157		}
8158
8159		if (max_len && prop_len > max_len) {
8160			dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
8161			return -EINVAL;
8162		}
8163
8164		if (prop_buf) {
8165			u32 off = start + 5 + err;
8166			int i;
8167
8168			netif_printk(np, probe, KERN_DEBUG, np->dev,
8169				     "VPD_SCAN: Reading in property [%s] len[%d]\n",
8170				     namebuf, prop_len);
8171			for (i = 0; i < prop_len; i++)
8172				*prop_buf++ = niu_pci_eeprom_read(np, off + i);
8173		}
8174
8175		start += len;
8176	}
8177
8178	return 0;
8179}
8180
8181/* ESPC_PIO_EN_ENABLE must be set */
8182static void niu_pci_vpd_fetch(struct niu *np, u32 start)
8183{
8184	u32 offset;
8185	int err;
8186
8187	err = niu_pci_eeprom_read16_swp(np, start + 1);
8188	if (err < 0)
8189		return;
8190
8191	offset = err + 3;
8192
8193	while (start + offset < ESPC_EEPROM_SIZE) {
8194		u32 here = start + offset;
8195		u32 end;
8196
8197		err = niu_pci_eeprom_read(np, here);
8198		if (err != 0x90)
8199			return;
8200
8201		err = niu_pci_eeprom_read16_swp(np, here + 1);
8202		if (err < 0)
8203			return;
8204
8205		here = start + offset + 3;
8206		end = start + offset + err;
8207
8208		offset += err;
8209
8210		err = niu_pci_vpd_scan_props(np, here, end);
8211		if (err < 0 || err == 1)
8212			return;
8213	}
8214}
8215
8216/* ESPC_PIO_EN_ENABLE must be set */
8217static u32 niu_pci_vpd_offset(struct niu *np)
8218{
8219	u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
8220	int err;
8221
8222	while (start < end) {
8223		ret = start;
8224
8225		/* ROM header signature?  */
8226		err = niu_pci_eeprom_read16(np, start +  0);
8227		if (err != 0x55aa)
8228			return 0;
8229
8230		/* Apply offset to PCI data structure.  */
8231		err = niu_pci_eeprom_read16(np, start + 23);
8232		if (err < 0)
8233			return 0;
8234		start += err;
8235
8236		/* Check for "PCIR" signature.  */
8237		err = niu_pci_eeprom_read16(np, start +  0);
8238		if (err != 0x5043)
8239			return 0;
8240		err = niu_pci_eeprom_read16(np, start +  2);
8241		if (err != 0x4952)
8242			return 0;
8243
8244		/* Check for OBP image type.  */
8245		err = niu_pci_eeprom_read(np, start + 20);
8246		if (err < 0)
8247			return 0;
8248		if (err != 0x01) {
8249			err = niu_pci_eeprom_read(np, ret + 2);
8250			if (err < 0)
8251				return 0;
8252
8253			start = ret + (err * 512);
8254			continue;
8255		}
8256
8257		err = niu_pci_eeprom_read16_swp(np, start + 8);
8258		if (err < 0)
8259			return err;
8260		ret += err;
8261
8262		err = niu_pci_eeprom_read(np, ret + 0);
8263		if (err != 0x82)
8264			return 0;
8265
8266		return ret;
8267	}
8268
8269	return 0;
8270}
8271
8272static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop)
8273{
8274	if (!strcmp(phy_prop, "mif")) {
8275		/* 1G copper, MII */
8276		np->flags &= ~(NIU_FLAGS_FIBER |
8277			       NIU_FLAGS_10G);
8278		np->mac_xcvr = MAC_XCVR_MII;
8279	} else if (!strcmp(phy_prop, "xgf")) {
8280		/* 10G fiber, XPCS */
8281		np->flags |= (NIU_FLAGS_10G |
8282			      NIU_FLAGS_FIBER);
8283		np->mac_xcvr = MAC_XCVR_XPCS;
8284	} else if (!strcmp(phy_prop, "pcs")) {
8285		/* 1G fiber, PCS */
8286		np->flags &= ~NIU_FLAGS_10G;
8287		np->flags |= NIU_FLAGS_FIBER;
8288		np->mac_xcvr = MAC_XCVR_PCS;
8289	} else if (!strcmp(phy_prop, "xgc")) {
8290		/* 10G copper, XPCS */
8291		np->flags |= NIU_FLAGS_10G;
8292		np->flags &= ~NIU_FLAGS_FIBER;
8293		np->mac_xcvr = MAC_XCVR_XPCS;
8294	} else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
8295		/* 10G Serdes or 1G Serdes, default to 10G */
8296		np->flags |= NIU_FLAGS_10G;
8297		np->flags &= ~NIU_FLAGS_FIBER;
8298		np->flags |= NIU_FLAGS_XCVR_SERDES;
8299		np->mac_xcvr = MAC_XCVR_XPCS;
8300	} else {
8301		return -EINVAL;
8302	}
8303	return 0;
8304}
8305
8306static int niu_pci_vpd_get_nports(struct niu *np)
8307{
8308	int ports = 0;
8309
8310	if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
8311	    (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
8312	    (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
8313	    (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
8314	    (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
8315		ports = 4;
8316	} else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
8317		   (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
8318		   (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
8319		   (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
8320		ports = 2;
8321	}
8322
8323	return ports;
8324}
8325
8326static void niu_pci_vpd_validate(struct niu *np)
8327{
8328	struct net_device *dev = np->dev;
8329	struct niu_vpd *vpd = &np->vpd;
8330	u8 val8;
8331
8332	if (!is_valid_ether_addr(&vpd->local_mac[0])) {
8333		dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
8334
8335		np->flags &= ~NIU_FLAGS_VPD_VALID;
8336		return;
8337	}
8338
8339	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
8340	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
8341		np->flags |= NIU_FLAGS_10G;
8342		np->flags &= ~NIU_FLAGS_FIBER;
8343		np->flags |= NIU_FLAGS_XCVR_SERDES;
8344		np->mac_xcvr = MAC_XCVR_PCS;
8345		if (np->port > 1) {
8346			np->flags |= NIU_FLAGS_FIBER;
8347			np->flags &= ~NIU_FLAGS_10G;
8348		}
8349		if (np->flags & NIU_FLAGS_10G)
8350			np->mac_xcvr = MAC_XCVR_XPCS;
8351	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
8352		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
8353			      NIU_FLAGS_HOTPLUG_PHY);
8354	} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
8355		dev_err(np->device, "Illegal phy string [%s]\n",
8356			np->vpd.phy_type);
8357		dev_err(np->device, "Falling back to SPROM\n");
8358		np->flags &= ~NIU_FLAGS_VPD_VALID;
8359		return;
8360	}
8361
8362	memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN);
8363
8364	val8 = dev->dev_addr[5];
8365	dev->dev_addr[5] += np->port;
8366	if (dev->dev_addr[5] < val8)
8367		dev->dev_addr[4]++;
8368}
8369
8370static int niu_pci_probe_sprom(struct niu *np)
8371{
8372	struct net_device *dev = np->dev;
8373	int len, i;
8374	u64 val, sum;
8375	u8 val8;
8376
8377	val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
8378	val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
8379	len = val / 4;
8380
8381	np->eeprom_len = len;
8382
8383	netif_printk(np, probe, KERN_DEBUG, np->dev,
8384		     "SPROM: Image size %llu\n", (unsigned long long)val);
8385
8386	sum = 0;
8387	for (i = 0; i < len; i++) {
8388		val = nr64(ESPC_NCR(i));
8389		sum += (val >>  0) & 0xff;
8390		sum += (val >>  8) & 0xff;
8391		sum += (val >> 16) & 0xff;
8392		sum += (val >> 24) & 0xff;
8393	}
8394	netif_printk(np, probe, KERN_DEBUG, np->dev,
8395		     "SPROM: Checksum %x\n", (int)(sum & 0xff));
8396	if ((sum & 0xff) != 0xab) {
8397		dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
8398		return -EINVAL;
8399	}
8400
8401	val = nr64(ESPC_PHY_TYPE);
8402	switch (np->port) {
8403	case 0:
8404		val8 = (val & ESPC_PHY_TYPE_PORT0) >>
8405			ESPC_PHY_TYPE_PORT0_SHIFT;
8406		break;
8407	case 1:
8408		val8 = (val & ESPC_PHY_TYPE_PORT1) >>
8409			ESPC_PHY_TYPE_PORT1_SHIFT;
8410		break;
8411	case 2:
8412		val8 = (val & ESPC_PHY_TYPE_PORT2) >>
8413			ESPC_PHY_TYPE_PORT2_SHIFT;
8414		break;
8415	case 3:
8416		val8 = (val & ESPC_PHY_TYPE_PORT3) >>
8417			ESPC_PHY_TYPE_PORT3_SHIFT;
8418		break;
8419	default:
8420		dev_err(np->device, "Bogus port number %u\n",
8421			np->port);
8422		return -EINVAL;
8423	}
8424	netif_printk(np, probe, KERN_DEBUG, np->dev,
8425		     "SPROM: PHY type %x\n", val8);
8426
8427	switch (val8) {
8428	case ESPC_PHY_TYPE_1G_COPPER:
8429		/* 1G copper, MII */
8430		np->flags &= ~(NIU_FLAGS_FIBER |
8431			       NIU_FLAGS_10G);
8432		np->mac_xcvr = MAC_XCVR_MII;
8433		break;
8434
8435	case ESPC_PHY_TYPE_1G_FIBER:
8436		/* 1G fiber, PCS */
8437		np->flags &= ~NIU_FLAGS_10G;
8438		np->flags |= NIU_FLAGS_FIBER;
8439		np->mac_xcvr = MAC_XCVR_PCS;
8440		break;
8441
8442	case ESPC_PHY_TYPE_10G_COPPER:
8443		/* 10G copper, XPCS */
8444		np->flags |= NIU_FLAGS_10G;
8445		np->flags &= ~NIU_FLAGS_FIBER;
8446		np->mac_xcvr = MAC_XCVR_XPCS;
8447		break;
8448
8449	case ESPC_PHY_TYPE_10G_FIBER:
8450		/* 10G fiber, XPCS */
8451		np->flags |= (NIU_FLAGS_10G |
8452			      NIU_FLAGS_FIBER);
8453		np->mac_xcvr = MAC_XCVR_XPCS;
8454		break;
8455
8456	default:
8457		dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
8458		return -EINVAL;
8459	}
8460
8461	val = nr64(ESPC_MAC_ADDR0);
8462	netif_printk(np, probe, KERN_DEBUG, np->dev,
8463		     "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
8464	dev->dev_addr[0] = (val >>  0) & 0xff;
8465	dev->dev_addr[1] = (val >>  8) & 0xff;
8466	dev->dev_addr[2] = (val >> 16) & 0xff;
8467	dev->dev_addr[3] = (val >> 24) & 0xff;
8468
8469	val = nr64(ESPC_MAC_ADDR1);
8470	netif_printk(np, probe, KERN_DEBUG, np->dev,
8471		     "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
8472	dev->dev_addr[4] = (val >>  0) & 0xff;
8473	dev->dev_addr[5] = (val >>  8) & 0xff;
8474
8475	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8476		dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
8477			dev->dev_addr);
8478		return -EINVAL;
8479	}
8480
8481	val8 = dev->dev_addr[5];
8482	dev->dev_addr[5] += np->port;
8483	if (dev->dev_addr[5] < val8)
8484		dev->dev_addr[4]++;
8485
8486	val = nr64(ESPC_MOD_STR_LEN);
8487	netif_printk(np, probe, KERN_DEBUG, np->dev,
8488		     "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
8489	if (val >= 8 * 4)
8490		return -EINVAL;
8491
8492	for (i = 0; i < val; i += 4) {
8493		u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
8494
8495		np->vpd.model[i + 3] = (tmp >>  0) & 0xff;
8496		np->vpd.model[i + 2] = (tmp >>  8) & 0xff;
8497		np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
8498		np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
8499	}
8500	np->vpd.model[val] = '\0';
8501
8502	val = nr64(ESPC_BD_MOD_STR_LEN);
8503	netif_printk(np, probe, KERN_DEBUG, np->dev,
8504		     "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
8505	if (val >= 4 * 4)
8506		return -EINVAL;
8507
8508	for (i = 0; i < val; i += 4) {
8509		u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
8510
8511		np->vpd.board_model[i + 3] = (tmp >>  0) & 0xff;
8512		np->vpd.board_model[i + 2] = (tmp >>  8) & 0xff;
8513		np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
8514		np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
8515	}
8516	np->vpd.board_model[val] = '\0';
8517
8518	np->vpd.mac_num =
8519		nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
8520	netif_printk(np, probe, KERN_DEBUG, np->dev,
8521		     "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
8522
8523	return 0;
8524}
8525
8526static int niu_get_and_validate_port(struct niu *np)
8527{
8528	struct niu_parent *parent = np->parent;
8529
8530	if (np->port <= 1)
8531		np->flags |= NIU_FLAGS_XMAC;
8532
8533	if (!parent->num_ports) {
8534		if (parent->plat_type == PLAT_TYPE_NIU) {
8535			parent->num_ports = 2;
8536		} else {
8537			parent->num_ports = niu_pci_vpd_get_nports(np);
8538			if (!parent->num_ports) {
8539				/* Fall back to SPROM as last resort.
8540				 * This will fail on most cards.
8541				 */
8542				parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
8543					ESPC_NUM_PORTS_MACS_VAL;
8544
8545				/* All of the current probing methods fail on
8546				 * Maramba on-board parts.
8547				 */
8548				if (!parent->num_ports)
8549					parent->num_ports = 4;
8550			}
8551		}
8552	}
8553
8554	if (np->port >= parent->num_ports)
8555		return -ENODEV;
8556
8557	return 0;
8558}
8559
8560static int phy_record(struct niu_parent *parent, struct phy_probe_info *p,
8561		      int dev_id_1, int dev_id_2, u8 phy_port, int type)
8562{
8563	u32 id = (dev_id_1 << 16) | dev_id_2;
8564	u8 idx;
8565
8566	if (dev_id_1 < 0 || dev_id_2 < 0)
8567		return 0;
8568	if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
8569		/* Because of the NIU_PHY_ID_MASK being applied, the 8704
8570		 * test covers the 8706 as well.
8571		 */
8572		if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
8573		    ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
8574			return 0;
8575	} else {
8576		if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
8577			return 0;
8578	}
8579
8580	pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
8581		parent->index, id,
8582		type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
8583		type == PHY_TYPE_PCS ? "PCS" : "MII",
8584		phy_port);
8585
8586	if (p->cur[type] >= NIU_MAX_PORTS) {
8587		pr_err("Too many PHY ports\n");
8588		return -EINVAL;
8589	}
8590	idx = p->cur[type];
8591	p->phy_id[type][idx] = id;
8592	p->phy_port[type][idx] = phy_port;
8593	p->cur[type] = idx + 1;
8594	return 0;
8595}
8596
8597static int port_has_10g(struct phy_probe_info *p, int port)
8598{
8599	int i;
8600
8601	for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
8602		if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
8603			return 1;
8604	}
8605	for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
8606		if (p->phy_port[PHY_TYPE_PCS][i] == port)
8607			return 1;
8608	}
8609
8610	return 0;
8611}
8612
8613static int count_10g_ports(struct phy_probe_info *p, int *lowest)
8614{
8615	int port, cnt;
8616
8617	cnt = 0;
8618	*lowest = 32;
8619	for (port = 8; port < 32; port++) {
8620		if (port_has_10g(p, port)) {
8621			if (!cnt)
8622				*lowest = port;
8623			cnt++;
8624		}
8625	}
8626
8627	return cnt;
8628}
8629
8630static int count_1g_ports(struct phy_probe_info *p, int *lowest)
8631{
8632	*lowest = 32;
8633	if (p->cur[PHY_TYPE_MII])
8634		*lowest = p->phy_port[PHY_TYPE_MII][0];
8635
8636	return p->cur[PHY_TYPE_MII];
8637}
8638
8639static void niu_n2_divide_channels(struct niu_parent *parent)
8640{
8641	int num_ports = parent->num_ports;
8642	int i;
8643
8644	for (i = 0; i < num_ports; i++) {
8645		parent->rxchan_per_port[i] = (16 / num_ports);
8646		parent->txchan_per_port[i] = (16 / num_ports);
8647
8648		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8649			parent->index, i,
8650			parent->rxchan_per_port[i],
8651			parent->txchan_per_port[i]);
8652	}
8653}
8654
8655static void niu_divide_channels(struct niu_parent *parent,
8656				int num_10g, int num_1g)
8657{
8658	int num_ports = parent->num_ports;
8659	int rx_chans_per_10g, rx_chans_per_1g;
8660	int tx_chans_per_10g, tx_chans_per_1g;
8661	int i, tot_rx, tot_tx;
8662
8663	if (!num_10g || !num_1g) {
8664		rx_chans_per_10g = rx_chans_per_1g =
8665			(NIU_NUM_RXCHAN / num_ports);
8666		tx_chans_per_10g = tx_chans_per_1g =
8667			(NIU_NUM_TXCHAN / num_ports);
8668	} else {
8669		rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
8670		rx_chans_per_10g = (NIU_NUM_RXCHAN -
8671				    (rx_chans_per_1g * num_1g)) /
8672			num_10g;
8673
8674		tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
8675		tx_chans_per_10g = (NIU_NUM_TXCHAN -
8676				    (tx_chans_per_1g * num_1g)) /
8677			num_10g;
8678	}
8679
8680	tot_rx = tot_tx = 0;
8681	for (i = 0; i < num_ports; i++) {
8682		int type = phy_decode(parent->port_phy, i);
8683
8684		if (type == PORT_TYPE_10G) {
8685			parent->rxchan_per_port[i] = rx_chans_per_10g;
8686			parent->txchan_per_port[i] = tx_chans_per_10g;
8687		} else {
8688			parent->rxchan_per_port[i] = rx_chans_per_1g;
8689			parent->txchan_per_port[i] = tx_chans_per_1g;
8690		}
8691		pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
8692			parent->index, i,
8693			parent->rxchan_per_port[i],
8694			parent->txchan_per_port[i]);
8695		tot_rx += parent->rxchan_per_port[i];
8696		tot_tx += parent->txchan_per_port[i];
8697	}
8698
8699	if (tot_rx > NIU_NUM_RXCHAN) {
8700		pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
8701		       parent->index, tot_rx);
8702		for (i = 0; i < num_ports; i++)
8703			parent->rxchan_per_port[i] = 1;
8704	}
8705	if (tot_tx > NIU_NUM_TXCHAN) {
8706		pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
8707		       parent->index, tot_tx);
8708		for (i = 0; i < num_ports; i++)
8709			parent->txchan_per_port[i] = 1;
8710	}
8711	if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
8712		pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
8713			parent->index, tot_rx, tot_tx);
8714	}
8715}
8716
8717static void niu_divide_rdc_groups(struct niu_parent *parent,
8718				  int num_10g, int num_1g)
8719{
8720	int i, num_ports = parent->num_ports;
8721	int rdc_group, rdc_groups_per_port;
8722	int rdc_channel_base;
8723
8724	rdc_group = 0;
8725	rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
8726
8727	rdc_channel_base = 0;
8728
8729	for (i = 0; i < num_ports; i++) {
8730		struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
8731		int grp, num_channels = parent->rxchan_per_port[i];
8732		int this_channel_offset;
8733
8734		tp->first_table_num = rdc_group;
8735		tp->num_tables = rdc_groups_per_port;
8736		this_channel_offset = 0;
8737		for (grp = 0; grp < tp->num_tables; grp++) {
8738			struct rdc_table *rt = &tp->tables[grp];
8739			int slot;
8740
8741			pr_info("niu%d: Port %d RDC tbl(%d) [ ",
8742				parent->index, i, tp->first_table_num + grp);
8743			for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
8744				rt->rxdma_channel[slot] =
8745					rdc_channel_base + this_channel_offset;
8746
8747				pr_cont("%d ", rt->rxdma_channel[slot]);
8748
8749				if (++this_channel_offset == num_channels)
8750					this_channel_offset = 0;
8751			}
8752			pr_cont("]\n");
8753		}
8754
8755		parent->rdc_default[i] = rdc_channel_base;
8756
8757		rdc_channel_base += num_channels;
8758		rdc_group += rdc_groups_per_port;
8759	}
8760}
8761
8762static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent,
8763			       struct phy_probe_info *info)
8764{
8765	unsigned long flags;
8766	int port, err;
8767
8768	memset(info, 0, sizeof(*info));
8769
8770	/* Port 0 to 7 are reserved for onboard Serdes, probe the rest.  */
8771	niu_lock_parent(np, flags);
8772	err = 0;
8773	for (port = 8; port < 32; port++) {
8774		int dev_id_1, dev_id_2;
8775
8776		dev_id_1 = mdio_read(np, port,
8777				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
8778		dev_id_2 = mdio_read(np, port,
8779				     NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
8780		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8781				 PHY_TYPE_PMA_PMD);
8782		if (err)
8783			break;
8784		dev_id_1 = mdio_read(np, port,
8785				     NIU_PCS_DEV_ADDR, MII_PHYSID1);
8786		dev_id_2 = mdio_read(np, port,
8787				     NIU_PCS_DEV_ADDR, MII_PHYSID2);
8788		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8789				 PHY_TYPE_PCS);
8790		if (err)
8791			break;
8792		dev_id_1 = mii_read(np, port, MII_PHYSID1);
8793		dev_id_2 = mii_read(np, port, MII_PHYSID2);
8794		err = phy_record(parent, info, dev_id_1, dev_id_2, port,
8795				 PHY_TYPE_MII);
8796		if (err)
8797			break;
8798	}
8799	niu_unlock_parent(np, flags);
8800
8801	return err;
8802}
8803
8804static int walk_phys(struct niu *np, struct niu_parent *parent)
8805{
8806	struct phy_probe_info *info = &parent->phy_probe_info;
8807	int lowest_10g, lowest_1g;
8808	int num_10g, num_1g;
8809	u32 val;
8810	int err;
8811
8812	num_10g = num_1g = 0;
8813
8814	if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
8815	    !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
8816		num_10g = 0;
8817		num_1g = 2;
8818		parent->plat_type = PLAT_TYPE_ATCA_CP3220;
8819		parent->num_ports = 4;
8820		val = (phy_encode(PORT_TYPE_1G, 0) |
8821		       phy_encode(PORT_TYPE_1G, 1) |
8822		       phy_encode(PORT_TYPE_1G, 2) |
8823		       phy_encode(PORT_TYPE_1G, 3));
8824	} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
8825		num_10g = 2;
8826		num_1g = 0;
8827		parent->num_ports = 2;
8828		val = (phy_encode(PORT_TYPE_10G, 0) |
8829		       phy_encode(PORT_TYPE_10G, 1));
8830	} else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
8831		   (parent->plat_type == PLAT_TYPE_NIU)) {
8832		/* this is the Monza case */
8833		if (np->flags & NIU_FLAGS_10G) {
8834			val = (phy_encode(PORT_TYPE_10G, 0) |
8835			       phy_encode(PORT_TYPE_10G, 1));
8836		} else {
8837			val = (phy_encode(PORT_TYPE_1G, 0) |
8838			       phy_encode(PORT_TYPE_1G, 1));
8839		}
8840	} else {
8841		err = fill_phy_probe_info(np, parent, info);
8842		if (err)
8843			return err;
8844
8845		num_10g = count_10g_ports(info, &lowest_10g);
8846		num_1g = count_1g_ports(info, &lowest_1g);
8847
8848		switch ((num_10g << 4) | num_1g) {
8849		case 0x24:
8850			if (lowest_1g == 10)
8851				parent->plat_type = PLAT_TYPE_VF_P0;
8852			else if (lowest_1g == 26)
8853				parent->plat_type = PLAT_TYPE_VF_P1;
8854			else
8855				goto unknown_vg_1g_port;
8856
8857			/* fallthru */
8858		case 0x22:
8859			val = (phy_encode(PORT_TYPE_10G, 0) |
8860			       phy_encode(PORT_TYPE_10G, 1) |
8861			       phy_encode(PORT_TYPE_1G, 2) |
8862			       phy_encode(PORT_TYPE_1G, 3));
8863			break;
8864
8865		case 0x20:
8866			val = (phy_encode(PORT_TYPE_10G, 0) |
8867			       phy_encode(PORT_TYPE_10G, 1));
8868			break;
8869
8870		case 0x10:
8871			val = phy_encode(PORT_TYPE_10G, np->port);
8872			break;
8873
8874		case 0x14:
8875			if (lowest_1g == 10)
8876				parent->plat_type = PLAT_TYPE_VF_P0;
8877			else if (lowest_1g == 26)
8878				parent->plat_type = PLAT_TYPE_VF_P1;
8879			else
8880				goto unknown_vg_1g_port;
8881
8882			/* fallthru */
8883		case 0x13:
8884			if ((lowest_10g & 0x7) == 0)
8885				val = (phy_encode(PORT_TYPE_10G, 0) |
8886				       phy_encode(PORT_TYPE_1G, 1) |
8887				       phy_encode(PORT_TYPE_1G, 2) |
8888				       phy_encode(PORT_TYPE_1G, 3));
8889			else
8890				val = (phy_encode(PORT_TYPE_1G, 0) |
8891				       phy_encode(PORT_TYPE_10G, 1) |
8892				       phy_encode(PORT_TYPE_1G, 2) |
8893				       phy_encode(PORT_TYPE_1G, 3));
8894			break;
8895
8896		case 0x04:
8897			if (lowest_1g == 10)
8898				parent->plat_type = PLAT_TYPE_VF_P0;
8899			else if (lowest_1g == 26)
8900				parent->plat_type = PLAT_TYPE_VF_P1;
8901			else
8902				goto unknown_vg_1g_port;
8903
8904			val = (phy_encode(PORT_TYPE_1G, 0) |
8905			       phy_encode(PORT_TYPE_1G, 1) |
8906			       phy_encode(PORT_TYPE_1G, 2) |
8907			       phy_encode(PORT_TYPE_1G, 3));
8908			break;
8909
8910		default:
8911			pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
8912			       num_10g, num_1g);
8913			return -EINVAL;
8914		}
8915	}
8916
8917	parent->port_phy = val;
8918
8919	if (parent->plat_type == PLAT_TYPE_NIU)
8920		niu_n2_divide_channels(parent);
8921	else
8922		niu_divide_channels(parent, num_10g, num_1g);
8923
8924	niu_divide_rdc_groups(parent, num_10g, num_1g);
8925
8926	return 0;
8927
8928unknown_vg_1g_port:
8929	pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
8930	return -EINVAL;
8931}
8932
8933static int niu_probe_ports(struct niu *np)
8934{
8935	struct niu_parent *parent = np->parent;
8936	int err, i;
8937
8938	if (parent->port_phy == PORT_PHY_UNKNOWN) {
8939		err = walk_phys(np, parent);
8940		if (err)
8941			return err;
8942
8943		niu_set_ldg_timer_res(np, 2);
8944		for (i = 0; i <= LDN_MAX; i++)
8945			niu_ldn_irq_enable(np, i, 0);
8946	}
8947
8948	if (parent->port_phy == PORT_PHY_INVALID)
8949		return -EINVAL;
8950
8951	return 0;
8952}
8953
8954static int niu_classifier_swstate_init(struct niu *np)
8955{
8956	struct niu_classifier *cp = &np->clas;
8957
8958	cp->tcam_top = (u16) np->port;
8959	cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
8960	cp->h1_init = 0xffffffff;
8961	cp->h2_init = 0xffff;
8962
8963	return fflp_early_init(np);
8964}
8965
8966static void niu_link_config_init(struct niu *np)
8967{
8968	struct niu_link_config *lp = &np->link_config;
8969
8970	lp->advertising = (ADVERTISED_10baseT_Half |
8971			   ADVERTISED_10baseT_Full |
8972			   ADVERTISED_100baseT_Half |
8973			   ADVERTISED_100baseT_Full |
8974			   ADVERTISED_1000baseT_Half |
8975			   ADVERTISED_1000baseT_Full |
8976			   ADVERTISED_10000baseT_Full |
8977			   ADVERTISED_Autoneg);
8978	lp->speed = lp->active_speed = SPEED_INVALID;
8979	lp->duplex = DUPLEX_FULL;
8980	lp->active_duplex = DUPLEX_INVALID;
8981	lp->autoneg = 1;
8982#if 0
8983	lp->loopback_mode = LOOPBACK_MAC;
8984	lp->active_speed = SPEED_10000;
8985	lp->active_duplex = DUPLEX_FULL;
8986#else
8987	lp->loopback_mode = LOOPBACK_DISABLED;
8988#endif
8989}
8990
8991static int niu_init_mac_ipp_pcs_base(struct niu *np)
8992{
8993	switch (np->port) {
8994	case 0:
8995		np->mac_regs = np->regs + XMAC_PORT0_OFF;
8996		np->ipp_off  = 0x00000;
8997		np->pcs_off  = 0x04000;
8998		np->xpcs_off = 0x02000;
8999		break;
9000
9001	case 1:
9002		np->mac_regs = np->regs + XMAC_PORT1_OFF;
9003		np->ipp_off  = 0x08000;
9004		np->pcs_off  = 0x0a000;
9005		np->xpcs_off = 0x08000;
9006		break;
9007
9008	case 2:
9009		np->mac_regs = np->regs + BMAC_PORT2_OFF;
9010		np->ipp_off  = 0x04000;
9011		np->pcs_off  = 0x0e000;
9012		np->xpcs_off = ~0UL;
9013		break;
9014
9015	case 3:
9016		np->mac_regs = np->regs + BMAC_PORT3_OFF;
9017		np->ipp_off  = 0x0c000;
9018		np->pcs_off  = 0x12000;
9019		np->xpcs_off = ~0UL;
9020		break;
9021
9022	default:
9023		dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
9024		return -EINVAL;
9025	}
9026
9027	return 0;
9028}
9029
9030static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
9031{
9032	struct msix_entry msi_vec[NIU_NUM_LDG];
9033	struct niu_parent *parent = np->parent;
9034	struct pci_dev *pdev = np->pdev;
9035	int i, num_irqs;
9036	u8 first_ldg;
9037
9038	first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
9039	for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
9040		ldg_num_map[i] = first_ldg + i;
9041
9042	num_irqs = (parent->rxchan_per_port[np->port] +
9043		    parent->txchan_per_port[np->port] +
9044		    (np->port == 0 ? 3 : 1));
9045	BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
9046
9047	for (i = 0; i < num_irqs; i++) {
9048		msi_vec[i].vector = 0;
9049		msi_vec[i].entry = i;
9050	}
9051
9052	num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
9053	if (num_irqs < 0) {
9054		np->flags &= ~NIU_FLAGS_MSIX;
9055		return;
9056	}
9057
9058	np->flags |= NIU_FLAGS_MSIX;
9059	for (i = 0; i < num_irqs; i++)
9060		np->ldg[i].irq = msi_vec[i].vector;
9061	np->num_ldg = num_irqs;
9062}
9063
9064static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
9065{
9066#ifdef CONFIG_SPARC64
9067	struct platform_device *op = np->op;
9068	const u32 *int_prop;
9069	int i;
9070
9071	int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
9072	if (!int_prop)
9073		return -ENODEV;
9074
9075	for (i = 0; i < op->archdata.num_irqs; i++) {
9076		ldg_num_map[i] = int_prop[i];
9077		np->ldg[i].irq = op->archdata.irqs[i];
9078	}
9079
9080	np->num_ldg = op->archdata.num_irqs;
9081
9082	return 0;
9083#else
9084	return -EINVAL;
9085#endif
9086}
9087
9088static int niu_ldg_init(struct niu *np)
9089{
9090	struct niu_parent *parent = np->parent;
9091	u8 ldg_num_map[NIU_NUM_LDG];
9092	int first_chan, num_chan;
9093	int i, err, ldg_rotor;
9094	u8 port;
9095
9096	np->num_ldg = 1;
9097	np->ldg[0].irq = np->dev->irq;
9098	if (parent->plat_type == PLAT_TYPE_NIU) {
9099		err = niu_n2_irq_init(np, ldg_num_map);
9100		if (err)
9101			return err;
9102	} else
9103		niu_try_msix(np, ldg_num_map);
9104
9105	port = np->port;
9106	for (i = 0; i < np->num_ldg; i++) {
9107		struct niu_ldg *lp = &np->ldg[i];
9108
9109		netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
9110
9111		lp->np = np;
9112		lp->ldg_num = ldg_num_map[i];
9113		lp->timer = 2; /* XXX */
9114
9115		/* On N2 NIU the firmware has setup the SID mappings so they go
9116		 * to the correct values that will route the LDG to the proper
9117		 * interrupt in the NCU interrupt table.
9118		 */
9119		if (np->parent->plat_type != PLAT_TYPE_NIU) {
9120			err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
9121			if (err)
9122				return err;
9123		}
9124	}
9125
9126	/* We adopt the LDG assignment ordering used by the N2 NIU
9127	 * 'interrupt' properties because that simplifies a lot of
9128	 * things.  This ordering is:
9129	 *
9130	 *	MAC
9131	 *	MIF	(if port zero)
9132	 *	SYSERR	(if port zero)
9133	 *	RX channels
9134	 *	TX channels
9135	 */
9136
9137	ldg_rotor = 0;
9138
9139	err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
9140				  LDN_MAC(port));
9141	if (err)
9142		return err;
9143
9144	ldg_rotor++;
9145	if (ldg_rotor == np->num_ldg)
9146		ldg_rotor = 0;
9147
9148	if (port == 0) {
9149		err = niu_ldg_assign_ldn(np, parent,
9150					 ldg_num_map[ldg_rotor],
9151					 LDN_MIF);
9152		if (err)
9153			return err;
9154
9155		ldg_rotor++;
9156		if (ldg_rotor == np->num_ldg)
9157			ldg_rotor = 0;
9158
9159		err = niu_ldg_assign_ldn(np, parent,
9160					 ldg_num_map[ldg_rotor],
9161					 LDN_DEVICE_ERROR);
9162		if (err)
9163			return err;
9164
9165		ldg_rotor++;
9166		if (ldg_rotor == np->num_ldg)
9167			ldg_rotor = 0;
9168
9169	}
9170
9171	first_chan = 0;
9172	for (i = 0; i < port; i++)
9173		first_chan += parent->rxchan_per_port[i];
9174	num_chan = parent->rxchan_per_port[port];
9175
9176	for (i = first_chan; i < (first_chan + num_chan); i++) {
9177		err = niu_ldg_assign_ldn(np, parent,
9178					 ldg_num_map[ldg_rotor],
9179					 LDN_RXDMA(i));
9180		if (err)
9181			return err;
9182		ldg_rotor++;
9183		if (ldg_rotor == np->num_ldg)
9184			ldg_rotor = 0;
9185	}
9186
9187	first_chan = 0;
9188	for (i = 0; i < port; i++)
9189		first_chan += parent->txchan_per_port[i];
9190	num_chan = parent->txchan_per_port[port];
9191	for (i = first_chan; i < (first_chan + num_chan); i++) {
9192		err = niu_ldg_assign_ldn(np, parent,
9193					 ldg_num_map[ldg_rotor],
9194					 LDN_TXDMA(i));
9195		if (err)
9196			return err;
9197		ldg_rotor++;
9198		if (ldg_rotor == np->num_ldg)
9199			ldg_rotor = 0;
9200	}
9201
9202	return 0;
9203}
9204
9205static void niu_ldg_free(struct niu *np)
9206{
9207	if (np->flags & NIU_FLAGS_MSIX)
9208		pci_disable_msix(np->pdev);
9209}
9210
9211static int niu_get_of_props(struct niu *np)
9212{
9213#ifdef CONFIG_SPARC64
9214	struct net_device *dev = np->dev;
9215	struct device_node *dp;
9216	const char *phy_type;
9217	const u8 *mac_addr;
9218	const char *model;
9219	int prop_len;
9220
9221	if (np->parent->plat_type == PLAT_TYPE_NIU)
9222		dp = np->op->dev.of_node;
9223	else
9224		dp = pci_device_to_OF_node(np->pdev);
9225
9226	phy_type = of_get_property(dp, "phy-type", &prop_len);
9227	if (!phy_type) {
9228		netdev_err(dev, "%s: OF node lacks phy-type property\n",
9229			   dp->full_name);
9230		return -EINVAL;
9231	}
9232
9233	if (!strcmp(phy_type, "none"))
9234		return -ENODEV;
9235
9236	strcpy(np->vpd.phy_type, phy_type);
9237
9238	if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
9239		netdev_err(dev, "%s: Illegal phy string [%s]\n",
9240			   dp->full_name, np->vpd.phy_type);
9241		return -EINVAL;
9242	}
9243
9244	mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
9245	if (!mac_addr) {
9246		netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
9247			   dp->full_name);
9248		return -EINVAL;
9249	}
9250	if (prop_len != dev->addr_len) {
9251		netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
9252			   dp->full_name, prop_len);
9253	}
9254	memcpy(dev->dev_addr, mac_addr, dev->addr_len);
9255	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
9256		netdev_err(dev, "%s: OF MAC address is invalid\n",
9257			   dp->full_name);
9258		netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->dev_addr);
9259		return -EINVAL;
9260	}
9261
9262	model = of_get_property(dp, "model", &prop_len);
9263
9264	if (model)
9265		strcpy(np->vpd.model, model);
9266
9267	if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
9268		np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
9269			NIU_FLAGS_HOTPLUG_PHY);
9270	}
9271
9272	return 0;
9273#else
9274	return -EINVAL;
9275#endif
9276}
9277
9278static int niu_get_invariants(struct niu *np)
9279{
9280	int err, have_props;
9281	u32 offset;
9282
9283	err = niu_get_of_props(np);
9284	if (err == -ENODEV)
9285		return err;
9286
9287	have_props = !err;
9288
9289	err = niu_init_mac_ipp_pcs_base(np);
9290	if (err)
9291		return err;
9292
9293	if (have_props) {
9294		err = niu_get_and_validate_port(np);
9295		if (err)
9296			return err;
9297
9298	} else  {
9299		if (np->parent->plat_type == PLAT_TYPE_NIU)
9300			return -EINVAL;
9301
9302		nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
9303		offset = niu_pci_vpd_offset(np);
9304		netif_printk(np, probe, KERN_DEBUG, np->dev,
9305			     "%s() VPD offset [%08x]\n", __func__, offset);
9306		if (offset)
9307			niu_pci_vpd_fetch(np, offset);
9308		nw64(ESPC_PIO_EN, 0);
9309
9310		if (np->flags & NIU_FLAGS_VPD_VALID) {
9311			niu_pci_vpd_validate(np);
9312			err = niu_get_and_validate_port(np);
9313			if (err)
9314				return err;
9315		}
9316
9317		if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
9318			err = niu_get_and_validate_port(np);
9319			if (err)
9320				return err;
9321			err = niu_pci_probe_sprom(np);
9322			if (err)
9323				return err;
9324		}
9325	}
9326
9327	err = niu_probe_ports(np);
9328	if (err)
9329		return err;
9330
9331	niu_ldg_init(np);
9332
9333	niu_classifier_swstate_init(np);
9334	niu_link_config_init(np);
9335
9336	err = niu_determine_phy_disposition(np);
9337	if (!err)
9338		err = niu_init_link(np);
9339
9340	return err;
9341}
9342
9343static LIST_HEAD(niu_parent_list);
9344static DEFINE_MUTEX(niu_parent_lock);
9345static int niu_parent_index;
9346
9347static ssize_t show_port_phy(struct device *dev,
9348			     struct device_attribute *attr, char *buf)
9349{
9350	struct platform_device *plat_dev = to_platform_device(dev);
9351	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9352	u32 port_phy = p->port_phy;
9353	char *orig_buf = buf;
9354	int i;
9355
9356	if (port_phy == PORT_PHY_UNKNOWN ||
9357	    port_phy == PORT_PHY_INVALID)
9358		return 0;
9359
9360	for (i = 0; i < p->num_ports; i++) {
9361		const char *type_str;
9362		int type;
9363
9364		type = phy_decode(port_phy, i);
9365		if (type == PORT_TYPE_10G)
9366			type_str = "10G";
9367		else
9368			type_str = "1G";
9369		buf += sprintf(buf,
9370			       (i == 0) ? "%s" : " %s",
9371			       type_str);
9372	}
9373	buf += sprintf(buf, "\n");
9374	return buf - orig_buf;
9375}
9376
9377static ssize_t show_plat_type(struct device *dev,
9378			      struct device_attribute *attr, char *buf)
9379{
9380	struct platform_device *plat_dev = to_platform_device(dev);
9381	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9382	const char *type_str;
9383
9384	switch (p->plat_type) {
9385	case PLAT_TYPE_ATLAS:
9386		type_str = "atlas";
9387		break;
9388	case PLAT_TYPE_NIU:
9389		type_str = "niu";
9390		break;
9391	case PLAT_TYPE_VF_P0:
9392		type_str = "vf_p0";
9393		break;
9394	case PLAT_TYPE_VF_P1:
9395		type_str = "vf_p1";
9396		break;
9397	default:
9398		type_str = "unknown";
9399		break;
9400	}
9401
9402	return sprintf(buf, "%s\n", type_str);
9403}
9404
9405static ssize_t __show_chan_per_port(struct device *dev,
9406				    struct device_attribute *attr, char *buf,
9407				    int rx)
9408{
9409	struct platform_device *plat_dev = to_platform_device(dev);
9410	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9411	char *orig_buf = buf;
9412	u8 *arr;
9413	int i;
9414
9415	arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
9416
9417	for (i = 0; i < p->num_ports; i++) {
9418		buf += sprintf(buf,
9419			       (i == 0) ? "%d" : " %d",
9420			       arr[i]);
9421	}
9422	buf += sprintf(buf, "\n");
9423
9424	return buf - orig_buf;
9425}
9426
9427static ssize_t show_rxchan_per_port(struct device *dev,
9428				    struct device_attribute *attr, char *buf)
9429{
9430	return __show_chan_per_port(dev, attr, buf, 1);
9431}
9432
9433static ssize_t show_txchan_per_port(struct device *dev,
9434				    struct device_attribute *attr, char *buf)
9435{
9436	return __show_chan_per_port(dev, attr, buf, 1);
9437}
9438
9439static ssize_t show_num_ports(struct device *dev,
9440			      struct device_attribute *attr, char *buf)
9441{
9442	struct platform_device *plat_dev = to_platform_device(dev);
9443	struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
9444
9445	return sprintf(buf, "%d\n", p->num_ports);
9446}
9447
9448static struct device_attribute niu_parent_attributes[] = {
9449	__ATTR(port_phy, S_IRUGO, show_port_phy, NULL),
9450	__ATTR(plat_type, S_IRUGO, show_plat_type, NULL),
9451	__ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL),
9452	__ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL),
9453	__ATTR(num_ports, S_IRUGO, show_num_ports, NULL),
9454	{}
9455};
9456
9457static struct niu_parent *niu_new_parent(struct niu *np,
9458					 union niu_parent_id *id, u8 ptype)
9459{
9460	struct platform_device *plat_dev;
9461	struct niu_parent *p;
9462	int i;
9463
9464	plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
9465						   NULL, 0);
9466	if (IS_ERR(plat_dev))
9467		return NULL;
9468
9469	for (i = 0; niu_parent_attributes[i].attr.name; i++) {
9470		int err = device_create_file(&plat_dev->dev,
9471					     &niu_parent_attributes[i]);
9472		if (err)
9473			goto fail_unregister;
9474	}
9475
9476	p = kzalloc(sizeof(*p), GFP_KERNEL);
9477	if (!p)
9478		goto fail_unregister;
9479
9480	p->index = niu_parent_index++;
9481
9482	plat_dev->dev.platform_data = p;
9483	p->plat_dev = plat_dev;
9484
9485	memcpy(&p->id, id, sizeof(*id));
9486	p->plat_type = ptype;
9487	INIT_LIST_HEAD(&p->list);
9488	atomic_set(&p->refcnt, 0);
9489	list_add(&p->list, &niu_parent_list);
9490	spin_lock_init(&p->lock);
9491
9492	p->rxdma_clock_divider = 7500;
9493
9494	p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
9495	if (p->plat_type == PLAT_TYPE_NIU)
9496		p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
9497
9498	for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
9499		int index = i - CLASS_CODE_USER_PROG1;
9500
9501		p->tcam_key[index] = TCAM_KEY_TSEL;
9502		p->flow_key[index] = (FLOW_KEY_IPSA |
9503				      FLOW_KEY_IPDA |
9504				      FLOW_KEY_PROTO |
9505				      (FLOW_KEY_L4_BYTE12 <<
9506				       FLOW_KEY_L4_0_SHIFT) |
9507				      (FLOW_KEY_L4_BYTE12 <<
9508				       FLOW_KEY_L4_1_SHIFT));
9509	}
9510
9511	for (i = 0; i < LDN_MAX + 1; i++)
9512		p->ldg_map[i] = LDG_INVALID;
9513
9514	return p;
9515
9516fail_unregister:
9517	platform_device_unregister(plat_dev);
9518	return NULL;
9519}
9520
9521static struct niu_parent *niu_get_parent(struct niu *np,
9522					 union niu_parent_id *id, u8 ptype)
9523{
9524	struct niu_parent *p, *tmp;
9525	int port = np->port;
9526
9527	mutex_lock(&niu_parent_lock);
9528	p = NULL;
9529	list_for_each_entry(tmp, &niu_parent_list, list) {
9530		if (!memcmp(id, &tmp->id, sizeof(*id))) {
9531			p = tmp;
9532			break;
9533		}
9534	}
9535	if (!p)
9536		p = niu_new_parent(np, id, ptype);
9537
9538	if (p) {
9539		char port_name[6];
9540		int err;
9541
9542		sprintf(port_name, "port%d", port);
9543		err = sysfs_create_link(&p->plat_dev->dev.kobj,
9544					&np->device->kobj,
9545					port_name);
9546		if (!err) {
9547			p->ports[port] = np;
9548			atomic_inc(&p->refcnt);
9549		}
9550	}
9551	mutex_unlock(&niu_parent_lock);
9552
9553	return p;
9554}
9555
9556static void niu_put_parent(struct niu *np)
9557{
9558	struct niu_parent *p = np->parent;
9559	u8 port = np->port;
9560	char port_name[6];
9561
9562	BUG_ON(!p || p->ports[port] != np);
9563
9564	netif_printk(np, probe, KERN_DEBUG, np->dev,
9565		     "%s() port[%u]\n", __func__, port);
9566
9567	sprintf(port_name, "port%d", port);
9568
9569	mutex_lock(&niu_parent_lock);
9570
9571	sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
9572
9573	p->ports[port] = NULL;
9574	np->parent = NULL;
9575
9576	if (atomic_dec_and_test(&p->refcnt)) {
9577		list_del(&p->list);
9578		platform_device_unregister(p->plat_dev);
9579	}
9580
9581	mutex_unlock(&niu_parent_lock);
9582}
9583
9584static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
9585				    u64 *handle, gfp_t flag)
9586{
9587	dma_addr_t dh;
9588	void *ret;
9589
9590	ret = dma_alloc_coherent(dev, size, &dh, flag);
9591	if (ret)
9592		*handle = dh;
9593	return ret;
9594}
9595
9596static void niu_pci_free_coherent(struct device *dev, size_t size,
9597				  void *cpu_addr, u64 handle)
9598{
9599	dma_free_coherent(dev, size, cpu_addr, handle);
9600}
9601
9602static u64 niu_pci_map_page(struct device *dev, struct page *page,
9603			    unsigned long offset, size_t size,
9604			    enum dma_data_direction direction)
9605{
9606	return dma_map_page(dev, page, offset, size, direction);
9607}
9608
9609static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
9610			       size_t size, enum dma_data_direction direction)
9611{
9612	dma_unmap_page(dev, dma_address, size, direction);
9613}
9614
9615static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
9616			      size_t size,
9617			      enum dma_data_direction direction)
9618{
9619	return dma_map_single(dev, cpu_addr, size, direction);
9620}
9621
9622static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
9623				 size_t size,
9624				 enum dma_data_direction direction)
9625{
9626	dma_unmap_single(dev, dma_address, size, direction);
9627}
9628
9629static const struct niu_ops niu_pci_ops = {
9630	.alloc_coherent	= niu_pci_alloc_coherent,
9631	.free_coherent	= niu_pci_free_coherent,
9632	.map_page	= niu_pci_map_page,
9633	.unmap_page	= niu_pci_unmap_page,
9634	.map_single	= niu_pci_map_single,
9635	.unmap_single	= niu_pci_unmap_single,
9636};
9637
9638static void niu_driver_version(void)
9639{
9640	static int niu_version_printed;
9641
9642	if (niu_version_printed++ == 0)
9643		pr_info("%s", version);
9644}
9645
9646static struct net_device *niu_alloc_and_init(struct device *gen_dev,
9647					     struct pci_dev *pdev,
9648					     struct platform_device *op,
9649					     const struct niu_ops *ops, u8 port)
9650{
9651	struct net_device *dev;
9652	struct niu *np;
9653
9654	dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
9655	if (!dev)
9656		return NULL;
9657
9658	SET_NETDEV_DEV(dev, gen_dev);
9659
9660	np = netdev_priv(dev);
9661	np->dev = dev;
9662	np->pdev = pdev;
9663	np->op = op;
9664	np->device = gen_dev;
9665	np->ops = ops;
9666
9667	np->msg_enable = niu_debug;
9668
9669	spin_lock_init(&np->lock);
9670	INIT_WORK(&np->reset_task, niu_reset_task);
9671
9672	np->port = port;
9673
9674	return dev;
9675}
9676
9677static const struct net_device_ops niu_netdev_ops = {
9678	.ndo_open		= niu_open,
9679	.ndo_stop		= niu_close,
9680	.ndo_start_xmit		= niu_start_xmit,
9681	.ndo_get_stats64	= niu_get_stats,
9682	.ndo_set_rx_mode	= niu_set_rx_mode,
9683	.ndo_validate_addr	= eth_validate_addr,
9684	.ndo_set_mac_address	= niu_set_mac_addr,
9685	.ndo_do_ioctl		= niu_ioctl,
9686	.ndo_tx_timeout		= niu_tx_timeout,
9687	.ndo_change_mtu		= niu_change_mtu,
9688};
9689
9690static void niu_assign_netdev_ops(struct net_device *dev)
9691{
9692	dev->netdev_ops = &niu_netdev_ops;
9693	dev->ethtool_ops = &niu_ethtool_ops;
9694	dev->watchdog_timeo = NIU_TX_TIMEOUT;
9695}
9696
9697static void niu_device_announce(struct niu *np)
9698{
9699	struct net_device *dev = np->dev;
9700
9701	pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
9702
9703	if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
9704		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9705				dev->name,
9706				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
9707				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
9708				(np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
9709				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
9710				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
9711				np->vpd.phy_type);
9712	} else {
9713		pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
9714				dev->name,
9715				(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
9716				(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
9717				(np->flags & NIU_FLAGS_FIBER ? "FIBER" :
9718				 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
9719				  "COPPER")),
9720				(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
9721				 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
9722				np->vpd.phy_type);
9723	}
9724}
9725
9726static void niu_set_basic_features(struct net_device *dev)
9727{
9728	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
9729	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
9730}
9731
9732static int niu_pci_init_one(struct pci_dev *pdev,
9733			    const struct pci_device_id *ent)
9734{
9735	union niu_parent_id parent_id;
9736	struct net_device *dev;
9737	struct niu *np;
9738	int err;
9739	u64 dma_mask;
9740
9741	niu_driver_version();
9742
9743	err = pci_enable_device(pdev);
9744	if (err) {
9745		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9746		return err;
9747	}
9748
9749	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
9750	    !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9751		dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
9752		err = -ENODEV;
9753		goto err_out_disable_pdev;
9754	}
9755
9756	err = pci_request_regions(pdev, DRV_MODULE_NAME);
9757	if (err) {
9758		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9759		goto err_out_disable_pdev;
9760	}
9761
9762	if (!pci_is_pcie(pdev)) {
9763		dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
9764		err = -ENODEV;
9765		goto err_out_free_res;
9766	}
9767
9768	dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
9769				 &niu_pci_ops, PCI_FUNC(pdev->devfn));
9770	if (!dev) {
9771		err = -ENOMEM;
9772		goto err_out_free_res;
9773	}
9774	np = netdev_priv(dev);
9775
9776	memset(&parent_id, 0, sizeof(parent_id));
9777	parent_id.pci.domain = pci_domain_nr(pdev->bus);
9778	parent_id.pci.bus = pdev->bus->number;
9779	parent_id.pci.device = PCI_SLOT(pdev->devfn);
9780
9781	np->parent = niu_get_parent(np, &parent_id,
9782				    PLAT_TYPE_ATLAS);
9783	if (!np->parent) {
9784		err = -ENOMEM;
9785		goto err_out_free_dev;
9786	}
9787
9788	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
9789		PCI_EXP_DEVCTL_NOSNOOP_EN,
9790		PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
9791		PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE |
9792		PCI_EXP_DEVCTL_RELAX_EN);
9793
9794	dma_mask = DMA_BIT_MASK(44);
9795	err = pci_set_dma_mask(pdev, dma_mask);
9796	if (!err) {
9797		dev->features |= NETIF_F_HIGHDMA;
9798		err = pci_set_consistent_dma_mask(pdev, dma_mask);
9799		if (err) {
9800			dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
9801			goto err_out_release_parent;
9802		}
9803	}
9804	if (err) {
9805		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9806		if (err) {
9807			dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
9808			goto err_out_release_parent;
9809		}
9810	}
9811
9812	niu_set_basic_features(dev);
9813
9814	dev->priv_flags |= IFF_UNICAST_FLT;
9815
9816	np->regs = pci_ioremap_bar(pdev, 0);
9817	if (!np->regs) {
9818		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
9819		err = -ENOMEM;
9820		goto err_out_release_parent;
9821	}
9822
9823	pci_set_master(pdev);
9824	pci_save_state(pdev);
9825
9826	dev->irq = pdev->irq;
9827
9828	niu_assign_netdev_ops(dev);
9829
9830	err = niu_get_invariants(np);
9831	if (err) {
9832		if (err != -ENODEV)
9833			dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
9834		goto err_out_iounmap;
9835	}
9836
9837	err = register_netdev(dev);
9838	if (err) {
9839		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
9840		goto err_out_iounmap;
9841	}
9842
9843	pci_set_drvdata(pdev, dev);
9844
9845	niu_device_announce(np);
9846
9847	return 0;
9848
9849err_out_iounmap:
9850	if (np->regs) {
9851		iounmap(np->regs);
9852		np->regs = NULL;
9853	}
9854
9855err_out_release_parent:
9856	niu_put_parent(np);
9857
9858err_out_free_dev:
9859	free_netdev(dev);
9860
9861err_out_free_res:
9862	pci_release_regions(pdev);
9863
9864err_out_disable_pdev:
9865	pci_disable_device(pdev);
9866
9867	return err;
9868}
9869
9870static void niu_pci_remove_one(struct pci_dev *pdev)
9871{
9872	struct net_device *dev = pci_get_drvdata(pdev);
9873
9874	if (dev) {
9875		struct niu *np = netdev_priv(dev);
9876
9877		unregister_netdev(dev);
9878		if (np->regs) {
9879			iounmap(np->regs);
9880			np->regs = NULL;
9881		}
9882
9883		niu_ldg_free(np);
9884
9885		niu_put_parent(np);
9886
9887		free_netdev(dev);
9888		pci_release_regions(pdev);
9889		pci_disable_device(pdev);
9890	}
9891}
9892
9893static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
9894{
9895	struct net_device *dev = pci_get_drvdata(pdev);
9896	struct niu *np = netdev_priv(dev);
9897	unsigned long flags;
9898
9899	if (!netif_running(dev))
9900		return 0;
9901
9902	flush_work(&np->reset_task);
9903	niu_netif_stop(np);
9904
9905	del_timer_sync(&np->timer);
9906
9907	spin_lock_irqsave(&np->lock, flags);
9908	niu_enable_interrupts(np, 0);
9909	spin_unlock_irqrestore(&np->lock, flags);
9910
9911	netif_device_detach(dev);
9912
9913	spin_lock_irqsave(&np->lock, flags);
9914	niu_stop_hw(np);
9915	spin_unlock_irqrestore(&np->lock, flags);
9916
9917	pci_save_state(pdev);
9918
9919	return 0;
9920}
9921
9922static int niu_resume(struct pci_dev *pdev)
9923{
9924	struct net_device *dev = pci_get_drvdata(pdev);
9925	struct niu *np = netdev_priv(dev);
9926	unsigned long flags;
9927	int err;
9928
9929	if (!netif_running(dev))
9930		return 0;
9931
9932	pci_restore_state(pdev);
9933
9934	netif_device_attach(dev);
9935
9936	spin_lock_irqsave(&np->lock, flags);
9937
9938	err = niu_init_hw(np);
9939	if (!err) {
9940		np->timer.expires = jiffies + HZ;
9941		add_timer(&np->timer);
9942		niu_netif_start(np);
9943	}
9944
9945	spin_unlock_irqrestore(&np->lock, flags);
9946
9947	return err;
9948}
9949
9950static struct pci_driver niu_pci_driver = {
9951	.name		= DRV_MODULE_NAME,
9952	.id_table	= niu_pci_tbl,
9953	.probe		= niu_pci_init_one,
9954	.remove		= niu_pci_remove_one,
9955	.suspend	= niu_suspend,
9956	.resume		= niu_resume,
9957};
9958
9959#ifdef CONFIG_SPARC64
9960static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
9961				     u64 *dma_addr, gfp_t flag)
9962{
9963	unsigned long order = get_order(size);
9964	unsigned long page = __get_free_pages(flag, order);
9965
9966	if (page == 0UL)
9967		return NULL;
9968	memset((char *)page, 0, PAGE_SIZE << order);
9969	*dma_addr = __pa(page);
9970
9971	return (void *) page;
9972}
9973
9974static void niu_phys_free_coherent(struct device *dev, size_t size,
9975				   void *cpu_addr, u64 handle)
9976{
9977	unsigned long order = get_order(size);
9978
9979	free_pages((unsigned long) cpu_addr, order);
9980}
9981
9982static u64 niu_phys_map_page(struct device *dev, struct page *page,
9983			     unsigned long offset, size_t size,
9984			     enum dma_data_direction direction)
9985{
9986	return page_to_phys(page) + offset;
9987}
9988
9989static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
9990				size_t size, enum dma_data_direction direction)
9991{
9992	/* Nothing to do.  */
9993}
9994
9995static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
9996			       size_t size,
9997			       enum dma_data_direction direction)
9998{
9999	return __pa(cpu_addr);
10000}
10001
10002static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
10003				  size_t size,
10004				  enum dma_data_direction direction)
10005{
10006	/* Nothing to do.  */
10007}
10008
10009static const struct niu_ops niu_phys_ops = {
10010	.alloc_coherent	= niu_phys_alloc_coherent,
10011	.free_coherent	= niu_phys_free_coherent,
10012	.map_page	= niu_phys_map_page,
10013	.unmap_page	= niu_phys_unmap_page,
10014	.map_single	= niu_phys_map_single,
10015	.unmap_single	= niu_phys_unmap_single,
10016};
10017
10018static int niu_of_probe(struct platform_device *op)
10019{
10020	union niu_parent_id parent_id;
10021	struct net_device *dev;
10022	struct niu *np;
10023	const u32 *reg;
10024	int err;
10025
10026	niu_driver_version();
10027
10028	reg = of_get_property(op->dev.of_node, "reg", NULL);
10029	if (!reg) {
10030		dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
10031			op->dev.of_node->full_name);
10032		return -ENODEV;
10033	}
10034
10035	dev = niu_alloc_and_init(&op->dev, NULL, op,
10036				 &niu_phys_ops, reg[0] & 0x1);
10037	if (!dev) {
10038		err = -ENOMEM;
10039		goto err_out;
10040	}
10041	np = netdev_priv(dev);
10042
10043	memset(&parent_id, 0, sizeof(parent_id));
10044	parent_id.of = of_get_parent(op->dev.of_node);
10045
10046	np->parent = niu_get_parent(np, &parent_id,
10047				    PLAT_TYPE_NIU);
10048	if (!np->parent) {
10049		err = -ENOMEM;
10050		goto err_out_free_dev;
10051	}
10052
10053	niu_set_basic_features(dev);
10054
10055	np->regs = of_ioremap(&op->resource[1], 0,
10056			      resource_size(&op->resource[1]),
10057			      "niu regs");
10058	if (!np->regs) {
10059		dev_err(&op->dev, "Cannot map device registers, aborting\n");
10060		err = -ENOMEM;
10061		goto err_out_release_parent;
10062	}
10063
10064	np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
10065				    resource_size(&op->resource[2]),
10066				    "niu vregs-1");
10067	if (!np->vir_regs_1) {
10068		dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
10069		err = -ENOMEM;
10070		goto err_out_iounmap;
10071	}
10072
10073	np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
10074				    resource_size(&op->resource[3]),
10075				    "niu vregs-2");
10076	if (!np->vir_regs_2) {
10077		dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
10078		err = -ENOMEM;
10079		goto err_out_iounmap;
10080	}
10081
10082	niu_assign_netdev_ops(dev);
10083
10084	err = niu_get_invariants(np);
10085	if (err) {
10086		if (err != -ENODEV)
10087			dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
10088		goto err_out_iounmap;
10089	}
10090
10091	err = register_netdev(dev);
10092	if (err) {
10093		dev_err(&op->dev, "Cannot register net device, aborting\n");
10094		goto err_out_iounmap;
10095	}
10096
10097	platform_set_drvdata(op, dev);
10098
10099	niu_device_announce(np);
10100
10101	return 0;
10102
10103err_out_iounmap:
10104	if (np->vir_regs_1) {
10105		of_iounmap(&op->resource[2], np->vir_regs_1,
10106			   resource_size(&op->resource[2]));
10107		np->vir_regs_1 = NULL;
10108	}
10109
10110	if (np->vir_regs_2) {
10111		of_iounmap(&op->resource[3], np->vir_regs_2,
10112			   resource_size(&op->resource[3]));
10113		np->vir_regs_2 = NULL;
10114	}
10115
10116	if (np->regs) {
10117		of_iounmap(&op->resource[1], np->regs,
10118			   resource_size(&op->resource[1]));
10119		np->regs = NULL;
10120	}
10121
10122err_out_release_parent:
10123	niu_put_parent(np);
10124
10125err_out_free_dev:
10126	free_netdev(dev);
10127
10128err_out:
10129	return err;
10130}
10131
10132static int niu_of_remove(struct platform_device *op)
10133{
10134	struct net_device *dev = platform_get_drvdata(op);
10135
10136	if (dev) {
10137		struct niu *np = netdev_priv(dev);
10138
10139		unregister_netdev(dev);
10140
10141		if (np->vir_regs_1) {
10142			of_iounmap(&op->resource[2], np->vir_regs_1,
10143				   resource_size(&op->resource[2]));
10144			np->vir_regs_1 = NULL;
10145		}
10146
10147		if (np->vir_regs_2) {
10148			of_iounmap(&op->resource[3], np->vir_regs_2,
10149				   resource_size(&op->resource[3]));
10150			np->vir_regs_2 = NULL;
10151		}
10152
10153		if (np->regs) {
10154			of_iounmap(&op->resource[1], np->regs,
10155				   resource_size(&op->resource[1]));
10156			np->regs = NULL;
10157		}
10158
10159		niu_ldg_free(np);
10160
10161		niu_put_parent(np);
10162
10163		free_netdev(dev);
10164	}
10165	return 0;
10166}
10167
10168static const struct of_device_id niu_match[] = {
10169	{
10170		.name = "network",
10171		.compatible = "SUNW,niusl",
10172	},
10173	{},
10174};
10175MODULE_DEVICE_TABLE(of, niu_match);
10176
10177static struct platform_driver niu_of_driver = {
10178	.driver = {
10179		.name = "niu",
10180		.of_match_table = niu_match,
10181	},
10182	.probe		= niu_of_probe,
10183	.remove		= niu_of_remove,
10184};
10185
10186#endif /* CONFIG_SPARC64 */
10187
10188static int __init niu_init(void)
10189{
10190	int err = 0;
10191
10192	BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
10193
10194	niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
10195
10196#ifdef CONFIG_SPARC64
10197	err = platform_driver_register(&niu_of_driver);
10198#endif
10199
10200	if (!err) {
10201		err = pci_register_driver(&niu_pci_driver);
10202#ifdef CONFIG_SPARC64
10203		if (err)
10204			platform_driver_unregister(&niu_of_driver);
10205#endif
10206	}
10207
10208	return err;
10209}
10210
10211static void __exit niu_exit(void)
10212{
10213	pci_unregister_driver(&niu_pci_driver);
10214#ifdef CONFIG_SPARC64
10215	platform_driver_unregister(&niu_of_driver);
10216#endif
10217}
10218
10219module_init(niu_init);
10220module_exit(niu_exit);
10221