1 /*
2  * Broadcom Starfighter 2 DSA switch driver
3  *
4  * Copyright (C) 2014, Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11 
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
17 #include <linux/of.h>
18 #include <linux/phy.h>
19 #include <linux/phy_fixed.h>
20 #include <linux/mii.h>
21 #include <linux/of.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_address.h>
24 #include <net/dsa.h>
25 #include <linux/ethtool.h>
26 #include <linux/if_bridge.h>
27 
28 #include "bcm_sf2.h"
29 #include "bcm_sf2_regs.h"
30 
31 /* String, offset, and register size in bytes if different from 4 bytes */
32 static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = {
33 	{ "TxOctets",		0x000, 8	},
34 	{ "TxDropPkts",		0x020		},
35 	{ "TxQPKTQ0",		0x030		},
36 	{ "TxBroadcastPkts",	0x040		},
37 	{ "TxMulticastPkts",	0x050		},
38 	{ "TxUnicastPKts",	0x060		},
39 	{ "TxCollisions",	0x070		},
40 	{ "TxSingleCollision",	0x080		},
41 	{ "TxMultipleCollision", 0x090		},
42 	{ "TxDeferredCollision", 0x0a0		},
43 	{ "TxLateCollision",	0x0b0		},
44 	{ "TxExcessiveCollision", 0x0c0		},
45 	{ "TxFrameInDisc",	0x0d0		},
46 	{ "TxPausePkts",	0x0e0		},
47 	{ "TxQPKTQ1",		0x0f0		},
48 	{ "TxQPKTQ2",		0x100		},
49 	{ "TxQPKTQ3",		0x110		},
50 	{ "TxQPKTQ4",		0x120		},
51 	{ "TxQPKTQ5",		0x130		},
52 	{ "RxOctets",		0x140, 8	},
53 	{ "RxUndersizePkts",	0x160		},
54 	{ "RxPausePkts",	0x170		},
55 	{ "RxPkts64Octets",	0x180		},
56 	{ "RxPkts65to127Octets", 0x190		},
57 	{ "RxPkts128to255Octets", 0x1a0		},
58 	{ "RxPkts256to511Octets", 0x1b0		},
59 	{ "RxPkts512to1023Octets", 0x1c0	},
60 	{ "RxPkts1024toMaxPktsOctets", 0x1d0	},
61 	{ "RxOversizePkts",	0x1e0		},
62 	{ "RxJabbers",		0x1f0		},
63 	{ "RxAlignmentErrors",	0x200		},
64 	{ "RxFCSErrors",	0x210		},
65 	{ "RxGoodOctets",	0x220, 8	},
66 	{ "RxDropPkts",		0x240		},
67 	{ "RxUnicastPkts",	0x250		},
68 	{ "RxMulticastPkts",	0x260		},
69 	{ "RxBroadcastPkts",	0x270		},
70 	{ "RxSAChanges",	0x280		},
71 	{ "RxFragments",	0x290		},
72 	{ "RxJumboPkt",		0x2a0		},
73 	{ "RxSymblErr",		0x2b0		},
74 	{ "InRangeErrCount",	0x2c0		},
75 	{ "OutRangeErrCount",	0x2d0		},
76 	{ "EEELpiEvent",	0x2e0		},
77 	{ "EEELpiDuration",	0x2f0		},
78 	{ "RxDiscard",		0x300, 8	},
79 	{ "TxQPKTQ6",		0x320		},
80 	{ "TxQPKTQ7",		0x330		},
81 	{ "TxPkts64Octets",	0x340		},
82 	{ "TxPkts65to127Octets", 0x350		},
83 	{ "TxPkts128to255Octets", 0x360		},
84 	{ "TxPkts256to511Ocets", 0x370		},
85 	{ "TxPkts512to1023Ocets", 0x380		},
86 	{ "TxPkts1024toMaxPktOcets", 0x390	},
87 };
88 
89 #define BCM_SF2_STATS_SIZE	ARRAY_SIZE(bcm_sf2_mib)
90 
bcm_sf2_sw_get_strings(struct dsa_switch * ds,int port,uint8_t * data)91 static void bcm_sf2_sw_get_strings(struct dsa_switch *ds,
92 				   int port, uint8_t *data)
93 {
94 	unsigned int i;
95 
96 	for (i = 0; i < BCM_SF2_STATS_SIZE; i++)
97 		memcpy(data + i * ETH_GSTRING_LEN,
98 		       bcm_sf2_mib[i].string, ETH_GSTRING_LEN);
99 }
100 
bcm_sf2_sw_get_ethtool_stats(struct dsa_switch * ds,int port,uint64_t * data)101 static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds,
102 					 int port, uint64_t *data)
103 {
104 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
105 	const struct bcm_sf2_hw_stats *s;
106 	unsigned int i;
107 	u64 val = 0;
108 	u32 offset;
109 
110 	mutex_lock(&priv->stats_mutex);
111 
112 	/* Now fetch the per-port counters */
113 	for (i = 0; i < BCM_SF2_STATS_SIZE; i++) {
114 		s = &bcm_sf2_mib[i];
115 
116 		/* Do a latched 64-bit read if needed */
117 		offset = s->reg + CORE_P_MIB_OFFSET(port);
118 		if (s->sizeof_stat == 8)
119 			val = core_readq(priv, offset);
120 		else
121 			val = core_readl(priv, offset);
122 
123 		data[i] = (u64)val;
124 	}
125 
126 	mutex_unlock(&priv->stats_mutex);
127 }
128 
bcm_sf2_sw_get_sset_count(struct dsa_switch * ds)129 static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
130 {
131 	return BCM_SF2_STATS_SIZE;
132 }
133 
bcm_sf2_sw_probe(struct device * host_dev,int sw_addr)134 static char *bcm_sf2_sw_probe(struct device *host_dev, int sw_addr)
135 {
136 	return "Broadcom Starfighter 2";
137 }
138 
bcm_sf2_imp_vlan_setup(struct dsa_switch * ds,int cpu_port)139 static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
140 {
141 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
142 	unsigned int i;
143 	u32 reg;
144 
145 	/* Enable the IMP Port to be in the same VLAN as the other ports
146 	 * on a per-port basis such that we only have Port i and IMP in
147 	 * the same VLAN.
148 	 */
149 	for (i = 0; i < priv->hw_params.num_ports; i++) {
150 		if (!((1 << i) & ds->phys_port_mask))
151 			continue;
152 
153 		reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
154 		reg |= (1 << cpu_port);
155 		core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
156 	}
157 }
158 
bcm_sf2_imp_setup(struct dsa_switch * ds,int port)159 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
160 {
161 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
162 	u32 reg, val;
163 
164 	/* Enable the port memories */
165 	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
166 	reg &= ~P_TXQ_PSM_VDD(port);
167 	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
168 
169 	/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
170 	reg = core_readl(priv, CORE_IMP_CTL);
171 	reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
172 	reg &= ~(RX_DIS | TX_DIS);
173 	core_writel(priv, reg, CORE_IMP_CTL);
174 
175 	/* Enable forwarding */
176 	core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
177 
178 	/* Enable IMP port in dumb mode */
179 	reg = core_readl(priv, CORE_SWITCH_CTRL);
180 	reg |= MII_DUMB_FWDG_EN;
181 	core_writel(priv, reg, CORE_SWITCH_CTRL);
182 
183 	/* Resolve which bit controls the Broadcom tag */
184 	switch (port) {
185 	case 8:
186 		val = BRCM_HDR_EN_P8;
187 		break;
188 	case 7:
189 		val = BRCM_HDR_EN_P7;
190 		break;
191 	case 5:
192 		val = BRCM_HDR_EN_P5;
193 		break;
194 	default:
195 		val = 0;
196 		break;
197 	}
198 
199 	/* Enable Broadcom tags for IMP port */
200 	reg = core_readl(priv, CORE_BRCM_HDR_CTRL);
201 	reg |= val;
202 	core_writel(priv, reg, CORE_BRCM_HDR_CTRL);
203 
204 	/* Enable reception Broadcom tag for CPU TX (switch RX) to
205 	 * allow us to tag outgoing frames
206 	 */
207 	reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS);
208 	reg &= ~(1 << port);
209 	core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS);
210 
211 	/* Enable transmission of Broadcom tags from the switch (CPU RX) to
212 	 * allow delivering frames to the per-port net_devices
213 	 */
214 	reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
215 	reg &= ~(1 << port);
216 	core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
217 
218 	/* Force link status for IMP port */
219 	reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
220 	reg |= (MII_SW_OR | LINK_STS);
221 	core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
222 }
223 
bcm_sf2_eee_enable_set(struct dsa_switch * ds,int port,bool enable)224 static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
225 {
226 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
227 	u32 reg;
228 
229 	reg = core_readl(priv, CORE_EEE_EN_CTRL);
230 	if (enable)
231 		reg |= 1 << port;
232 	else
233 		reg &= ~(1 << port);
234 	core_writel(priv, reg, CORE_EEE_EN_CTRL);
235 }
236 
bcm_sf2_gphy_enable_set(struct dsa_switch * ds,bool enable)237 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
238 {
239 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
240 	u32 reg;
241 
242 	reg = reg_readl(priv, REG_SPHY_CNTRL);
243 	if (enable) {
244 		reg |= PHY_RESET;
245 		reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
246 		reg_writel(priv, reg, REG_SPHY_CNTRL);
247 		udelay(21);
248 		reg = reg_readl(priv, REG_SPHY_CNTRL);
249 		reg &= ~PHY_RESET;
250 	} else {
251 		reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
252 		reg_writel(priv, reg, REG_SPHY_CNTRL);
253 		mdelay(1);
254 		reg |= CK25_DIS;
255 	}
256 	reg_writel(priv, reg, REG_SPHY_CNTRL);
257 
258 	/* Use PHY-driven LED signaling */
259 	if (!enable) {
260 		reg = reg_readl(priv, REG_LED_CNTRL(0));
261 		reg |= SPDLNK_SRC_SEL;
262 		reg_writel(priv, reg, REG_LED_CNTRL(0));
263 	}
264 }
265 
bcm_sf2_port_setup(struct dsa_switch * ds,int port,struct phy_device * phy)266 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
267 			      struct phy_device *phy)
268 {
269 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
270 	s8 cpu_port = ds->dst[ds->index].cpu_port;
271 	u32 reg;
272 
273 	/* Clear the memory power down */
274 	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
275 	reg &= ~P_TXQ_PSM_VDD(port);
276 	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
277 
278 	/* Clear the Rx and Tx disable bits and set to no spanning tree */
279 	core_writel(priv, 0, CORE_G_PCTL_PORT(port));
280 
281 	/* Re-enable the GPHY and re-apply workarounds */
282 	if (port == 0 && priv->hw_params.num_gphy == 1) {
283 		bcm_sf2_gphy_enable_set(ds, true);
284 		if (phy) {
285 			/* if phy_stop() has been called before, phy
286 			 * will be in halted state, and phy_start()
287 			 * will call resume.
288 			 *
289 			 * the resume path does not configure back
290 			 * autoneg settings, and since we hard reset
291 			 * the phy manually here, we need to reset the
292 			 * state machine also.
293 			 */
294 			phy->state = PHY_READY;
295 			phy_init_hw(phy);
296 		}
297 	}
298 
299 	/* Enable port 7 interrupts to get notified */
300 	if (port == 7)
301 		intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF));
302 
303 	/* Set this port, and only this one to be in the default VLAN,
304 	 * if member of a bridge, restore its membership prior to
305 	 * bringing down this port.
306 	 */
307 	reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
308 	reg &= ~PORT_VLAN_CTRL_MASK;
309 	reg |= (1 << port);
310 	reg |= priv->port_sts[port].vlan_ctl_mask;
311 	core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
312 
313 	bcm_sf2_imp_vlan_setup(ds, cpu_port);
314 
315 	/* If EEE was enabled, restore it */
316 	if (priv->port_sts[port].eee.eee_enabled)
317 		bcm_sf2_eee_enable_set(ds, port, true);
318 
319 	return 0;
320 }
321 
bcm_sf2_port_disable(struct dsa_switch * ds,int port,struct phy_device * phy)322 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
323 				 struct phy_device *phy)
324 {
325 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
326 	u32 off, reg;
327 
328 	if (priv->wol_ports_mask & (1 << port))
329 		return;
330 
331 	if (port == 7) {
332 		intrl2_1_mask_set(priv, P_IRQ_MASK(P7_IRQ_OFF));
333 		intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR);
334 	}
335 
336 	if (port == 0 && priv->hw_params.num_gphy == 1)
337 		bcm_sf2_gphy_enable_set(ds, false);
338 
339 	if (dsa_is_cpu_port(ds, port))
340 		off = CORE_IMP_CTL;
341 	else
342 		off = CORE_G_PCTL_PORT(port);
343 
344 	reg = core_readl(priv, off);
345 	reg |= RX_DIS | TX_DIS;
346 	core_writel(priv, reg, off);
347 
348 	/* Power down the port memory */
349 	reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
350 	reg |= P_TXQ_PSM_VDD(port);
351 	core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
352 }
353 
354 /* Returns 0 if EEE was not enabled, or 1 otherwise
355  */
bcm_sf2_eee_init(struct dsa_switch * ds,int port,struct phy_device * phy)356 static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
357 			    struct phy_device *phy)
358 {
359 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
360 	struct ethtool_eee *p = &priv->port_sts[port].eee;
361 	int ret;
362 
363 	p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
364 
365 	ret = phy_init_eee(phy, 0);
366 	if (ret)
367 		return 0;
368 
369 	bcm_sf2_eee_enable_set(ds, port, true);
370 
371 	return 1;
372 }
373 
bcm_sf2_sw_get_eee(struct dsa_switch * ds,int port,struct ethtool_eee * e)374 static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port,
375 			      struct ethtool_eee *e)
376 {
377 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
378 	struct ethtool_eee *p = &priv->port_sts[port].eee;
379 	u32 reg;
380 
381 	reg = core_readl(priv, CORE_EEE_LPI_INDICATE);
382 	e->eee_enabled = p->eee_enabled;
383 	e->eee_active = !!(reg & (1 << port));
384 
385 	return 0;
386 }
387 
bcm_sf2_sw_set_eee(struct dsa_switch * ds,int port,struct phy_device * phydev,struct ethtool_eee * e)388 static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
389 			      struct phy_device *phydev,
390 			      struct ethtool_eee *e)
391 {
392 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
393 	struct ethtool_eee *p = &priv->port_sts[port].eee;
394 
395 	p->eee_enabled = e->eee_enabled;
396 
397 	if (!p->eee_enabled) {
398 		bcm_sf2_eee_enable_set(ds, port, false);
399 	} else {
400 		p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
401 		if (!p->eee_enabled)
402 			return -EOPNOTSUPP;
403 	}
404 
405 	return 0;
406 }
407 
408 /* Fast-ageing of ARL entries for a given port, equivalent to an ARL
409  * flush for that port.
410  */
bcm_sf2_sw_fast_age_port(struct dsa_switch * ds,int port)411 static int bcm_sf2_sw_fast_age_port(struct dsa_switch  *ds, int port)
412 {
413 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
414 	unsigned int timeout = 1000;
415 	u32 reg;
416 
417 	core_writel(priv, port, CORE_FAST_AGE_PORT);
418 
419 	reg = core_readl(priv, CORE_FAST_AGE_CTRL);
420 	reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
421 	core_writel(priv, reg, CORE_FAST_AGE_CTRL);
422 
423 	do {
424 		reg = core_readl(priv, CORE_FAST_AGE_CTRL);
425 		if (!(reg & FAST_AGE_STR_DONE))
426 			break;
427 
428 		cpu_relax();
429 	} while (timeout--);
430 
431 	if (!timeout)
432 		return -ETIMEDOUT;
433 
434 	core_writel(priv, 0, CORE_FAST_AGE_CTRL);
435 
436 	return 0;
437 }
438 
bcm_sf2_sw_br_join(struct dsa_switch * ds,int port,u32 br_port_mask)439 static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
440 			      u32 br_port_mask)
441 {
442 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
443 	unsigned int i;
444 	u32 reg, p_ctl;
445 
446 	p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
447 
448 	for (i = 0; i < priv->hw_params.num_ports; i++) {
449 		if (!((1 << i) & br_port_mask))
450 			continue;
451 
452 		/* Add this local port to the remote port VLAN control
453 		 * membership and update the remote port bitmask
454 		 */
455 		reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
456 		reg |= 1 << port;
457 		core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
458 		priv->port_sts[i].vlan_ctl_mask = reg;
459 
460 		p_ctl |= 1 << i;
461 	}
462 
463 	/* Configure the local port VLAN control membership to include
464 	 * remote ports and update the local port bitmask
465 	 */
466 	core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
467 	priv->port_sts[port].vlan_ctl_mask = p_ctl;
468 
469 	return 0;
470 }
471 
bcm_sf2_sw_br_leave(struct dsa_switch * ds,int port,u32 br_port_mask)472 static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port,
473 			       u32 br_port_mask)
474 {
475 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
476 	unsigned int i;
477 	u32 reg, p_ctl;
478 
479 	p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
480 
481 	for (i = 0; i < priv->hw_params.num_ports; i++) {
482 		/* Don't touch the remaining ports */
483 		if (!((1 << i) & br_port_mask))
484 			continue;
485 
486 		reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
487 		reg &= ~(1 << port);
488 		core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
489 		priv->port_sts[port].vlan_ctl_mask = reg;
490 
491 		/* Prevent self removal to preserve isolation */
492 		if (port != i)
493 			p_ctl &= ~(1 << i);
494 	}
495 
496 	core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
497 	priv->port_sts[port].vlan_ctl_mask = p_ctl;
498 
499 	return 0;
500 }
501 
bcm_sf2_sw_br_set_stp_state(struct dsa_switch * ds,int port,u8 state)502 static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
503 				       u8 state)
504 {
505 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
506 	u8 hw_state, cur_hw_state;
507 	int ret = 0;
508 	u32 reg;
509 
510 	reg = core_readl(priv, CORE_G_PCTL_PORT(port));
511 	cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
512 
513 	switch (state) {
514 	case BR_STATE_DISABLED:
515 		hw_state = G_MISTP_DIS_STATE;
516 		break;
517 	case BR_STATE_LISTENING:
518 		hw_state = G_MISTP_LISTEN_STATE;
519 		break;
520 	case BR_STATE_LEARNING:
521 		hw_state = G_MISTP_LEARN_STATE;
522 		break;
523 	case BR_STATE_FORWARDING:
524 		hw_state = G_MISTP_FWD_STATE;
525 		break;
526 	case BR_STATE_BLOCKING:
527 		hw_state = G_MISTP_BLOCK_STATE;
528 		break;
529 	default:
530 		pr_err("%s: invalid STP state: %d\n", __func__, state);
531 		return -EINVAL;
532 	}
533 
534 	/* Fast-age ARL entries if we are moving a port from Learning or
535 	 * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
536 	 * state (hw_state)
537 	 */
538 	if (cur_hw_state != hw_state) {
539 		if (cur_hw_state >= G_MISTP_LEARN_STATE &&
540 		    hw_state <= G_MISTP_LISTEN_STATE) {
541 			ret = bcm_sf2_sw_fast_age_port(ds, port);
542 			if (ret) {
543 				pr_err("%s: fast-ageing failed\n", __func__);
544 				return ret;
545 			}
546 		}
547 	}
548 
549 	reg = core_readl(priv, CORE_G_PCTL_PORT(port));
550 	reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
551 	reg |= hw_state;
552 	core_writel(priv, reg, CORE_G_PCTL_PORT(port));
553 
554 	return 0;
555 }
556 
bcm_sf2_switch_0_isr(int irq,void * dev_id)557 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
558 {
559 	struct bcm_sf2_priv *priv = dev_id;
560 
561 	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
562 				~priv->irq0_mask;
563 	intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
564 
565 	return IRQ_HANDLED;
566 }
567 
bcm_sf2_switch_1_isr(int irq,void * dev_id)568 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
569 {
570 	struct bcm_sf2_priv *priv = dev_id;
571 
572 	priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
573 				~priv->irq1_mask;
574 	intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
575 
576 	if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
577 		priv->port_sts[7].link = 1;
578 	if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
579 		priv->port_sts[7].link = 0;
580 
581 	return IRQ_HANDLED;
582 }
583 
bcm_sf2_sw_rst(struct bcm_sf2_priv * priv)584 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
585 {
586 	unsigned int timeout = 1000;
587 	u32 reg;
588 
589 	reg = core_readl(priv, CORE_WATCHDOG_CTRL);
590 	reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
591 	core_writel(priv, reg, CORE_WATCHDOG_CTRL);
592 
593 	do {
594 		reg = core_readl(priv, CORE_WATCHDOG_CTRL);
595 		if (!(reg & SOFTWARE_RESET))
596 			break;
597 
598 		usleep_range(1000, 2000);
599 	} while (timeout-- > 0);
600 
601 	if (timeout == 0)
602 		return -ETIMEDOUT;
603 
604 	return 0;
605 }
606 
bcm_sf2_intr_disable(struct bcm_sf2_priv * priv)607 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
608 {
609 	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
610 	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
611 	intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
612 	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
613 	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
614 	intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
615 }
616 
bcm_sf2_sw_setup(struct dsa_switch * ds)617 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
618 {
619 	const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
620 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
621 	struct device_node *dn;
622 	void __iomem **base;
623 	unsigned int port;
624 	unsigned int i;
625 	u32 reg, rev;
626 	int ret;
627 
628 	spin_lock_init(&priv->indir_lock);
629 	mutex_init(&priv->stats_mutex);
630 
631 	/* All the interesting properties are at the parent device_node
632 	 * level
633 	 */
634 	dn = ds->pd->of_node->parent;
635 
636 	priv->irq0 = irq_of_parse_and_map(dn, 0);
637 	priv->irq1 = irq_of_parse_and_map(dn, 1);
638 
639 	base = &priv->core;
640 	for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
641 		*base = of_iomap(dn, i);
642 		if (*base == NULL) {
643 			pr_err("unable to find register: %s\n", reg_names[i]);
644 			ret = -ENOMEM;
645 			goto out_unmap;
646 		}
647 		base++;
648 	}
649 
650 	ret = bcm_sf2_sw_rst(priv);
651 	if (ret) {
652 		pr_err("unable to software reset switch: %d\n", ret);
653 		goto out_unmap;
654 	}
655 
656 	/* Disable all interrupts and request them */
657 	bcm_sf2_intr_disable(priv);
658 
659 	ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
660 			  "switch_0", priv);
661 	if (ret < 0) {
662 		pr_err("failed to request switch_0 IRQ\n");
663 		goto out_unmap;
664 	}
665 
666 	ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
667 			  "switch_1", priv);
668 	if (ret < 0) {
669 		pr_err("failed to request switch_1 IRQ\n");
670 		goto out_free_irq0;
671 	}
672 
673 	/* Reset the MIB counters */
674 	reg = core_readl(priv, CORE_GMNCFGCFG);
675 	reg |= RST_MIB_CNT;
676 	core_writel(priv, reg, CORE_GMNCFGCFG);
677 	reg &= ~RST_MIB_CNT;
678 	core_writel(priv, reg, CORE_GMNCFGCFG);
679 
680 	/* Get the maximum number of ports for this switch */
681 	priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
682 	if (priv->hw_params.num_ports > DSA_MAX_PORTS)
683 		priv->hw_params.num_ports = DSA_MAX_PORTS;
684 
685 	/* Assume a single GPHY setup if we can't read that property */
686 	if (of_property_read_u32(dn, "brcm,num-gphy",
687 				 &priv->hw_params.num_gphy))
688 		priv->hw_params.num_gphy = 1;
689 
690 	/* Enable all valid ports and disable those unused */
691 	for (port = 0; port < priv->hw_params.num_ports; port++) {
692 		/* IMP port receives special treatment */
693 		if ((1 << port) & ds->phys_port_mask)
694 			bcm_sf2_port_setup(ds, port, NULL);
695 		else if (dsa_is_cpu_port(ds, port))
696 			bcm_sf2_imp_setup(ds, port);
697 		else
698 			bcm_sf2_port_disable(ds, port, NULL);
699 	}
700 
701 	/* Include the pseudo-PHY address and the broadcast PHY address to
702 	 * divert reads towards our workaround
703 	 */
704 	ds->phys_mii_mask |= ((1 << 30) | (1 << 0));
705 
706 	rev = reg_readl(priv, REG_SWITCH_REVISION);
707 	priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
708 					SWITCH_TOP_REV_MASK;
709 	priv->hw_params.core_rev = (rev & SF2_REV_MASK);
710 
711 	rev = reg_readl(priv, REG_PHY_REVISION);
712 	priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
713 
714 	pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
715 		priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
716 		priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
717 		priv->core, priv->irq0, priv->irq1);
718 
719 	return 0;
720 
721 out_free_irq0:
722 	free_irq(priv->irq0, priv);
723 out_unmap:
724 	base = &priv->core;
725 	for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
726 		if (*base)
727 			iounmap(*base);
728 		base++;
729 	}
730 	return ret;
731 }
732 
bcm_sf2_sw_set_addr(struct dsa_switch * ds,u8 * addr)733 static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
734 {
735 	return 0;
736 }
737 
bcm_sf2_sw_get_phy_flags(struct dsa_switch * ds,int port)738 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
739 {
740 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
741 
742 	/* The BCM7xxx PHY driver expects to find the integrated PHY revision
743 	 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
744 	 * the REG_PHY_REVISION register layout is.
745 	 */
746 
747 	return priv->hw_params.gphy_rev;
748 }
749 
bcm_sf2_sw_indir_rw(struct dsa_switch * ds,int op,int addr,int regnum,u16 val)750 static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr,
751 			       int regnum, u16 val)
752 {
753 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
754 	int ret = 0;
755 	u32 reg;
756 
757 	reg = reg_readl(priv, REG_SWITCH_CNTRL);
758 	reg |= MDIO_MASTER_SEL;
759 	reg_writel(priv, reg, REG_SWITCH_CNTRL);
760 
761 	/* Page << 8 | offset */
762 	reg = 0x70;
763 	reg <<= 2;
764 	core_writel(priv, addr, reg);
765 
766 	/* Page << 8 | offset */
767 	reg = 0x80 << 8 | regnum << 1;
768 	reg <<= 2;
769 
770 	if (op)
771 		ret = core_readl(priv, reg);
772 	else
773 		core_writel(priv, val, reg);
774 
775 	reg = reg_readl(priv, REG_SWITCH_CNTRL);
776 	reg &= ~MDIO_MASTER_SEL;
777 	reg_writel(priv, reg, REG_SWITCH_CNTRL);
778 
779 	return ret & 0xffff;
780 }
781 
bcm_sf2_sw_phy_read(struct dsa_switch * ds,int addr,int regnum)782 static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum)
783 {
784 	/* Intercept reads from the MDIO broadcast address or Broadcom
785 	 * pseudo-PHY address
786 	 */
787 	switch (addr) {
788 	case 0:
789 	case 30:
790 		return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
791 	default:
792 		return 0xffff;
793 	}
794 }
795 
bcm_sf2_sw_phy_write(struct dsa_switch * ds,int addr,int regnum,u16 val)796 static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum,
797 				u16 val)
798 {
799 	/* Intercept writes to the MDIO broadcast address or Broadcom
800 	 * pseudo-PHY address
801 	 */
802 	switch (addr) {
803 	case 0:
804 	case 30:
805 		bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
806 		break;
807 	}
808 
809 	return 0;
810 }
811 
bcm_sf2_sw_adjust_link(struct dsa_switch * ds,int port,struct phy_device * phydev)812 static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
813 				   struct phy_device *phydev)
814 {
815 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
816 	u32 id_mode_dis = 0, port_mode;
817 	const char *str = NULL;
818 	u32 reg;
819 
820 	switch (phydev->interface) {
821 	case PHY_INTERFACE_MODE_RGMII:
822 		str = "RGMII (no delay)";
823 		id_mode_dis = 1;
824 	case PHY_INTERFACE_MODE_RGMII_TXID:
825 		if (!str)
826 			str = "RGMII (TX delay)";
827 		port_mode = EXT_GPHY;
828 		break;
829 	case PHY_INTERFACE_MODE_MII:
830 		str = "MII";
831 		port_mode = EXT_EPHY;
832 		break;
833 	case PHY_INTERFACE_MODE_REVMII:
834 		str = "Reverse MII";
835 		port_mode = EXT_REVMII;
836 		break;
837 	default:
838 		/* All other PHYs: internal and MoCA */
839 		goto force_link;
840 	}
841 
842 	/* If the link is down, just disable the interface to conserve power */
843 	if (!phydev->link) {
844 		reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
845 		reg &= ~RGMII_MODE_EN;
846 		reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
847 		goto force_link;
848 	}
849 
850 	/* Clear id_mode_dis bit, and the existing port mode, but
851 	 * make sure we enable the RGMII block for data to pass
852 	 */
853 	reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
854 	reg &= ~ID_MODE_DIS;
855 	reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
856 	reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
857 
858 	reg |= port_mode | RGMII_MODE_EN;
859 	if (id_mode_dis)
860 		reg |= ID_MODE_DIS;
861 
862 	if (phydev->pause) {
863 		if (phydev->asym_pause)
864 			reg |= TX_PAUSE_EN;
865 		reg |= RX_PAUSE_EN;
866 	}
867 
868 	reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
869 
870 	pr_info("Port %d configured for %s\n", port, str);
871 
872 force_link:
873 	/* Force link settings detected from the PHY */
874 	reg = SW_OVERRIDE;
875 	switch (phydev->speed) {
876 	case SPEED_1000:
877 		reg |= SPDSTS_1000 << SPEED_SHIFT;
878 		break;
879 	case SPEED_100:
880 		reg |= SPDSTS_100 << SPEED_SHIFT;
881 		break;
882 	}
883 
884 	if (phydev->link)
885 		reg |= LINK_STS;
886 	if (phydev->duplex == DUPLEX_FULL)
887 		reg |= DUPLX_MODE;
888 
889 	core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
890 }
891 
bcm_sf2_sw_fixed_link_update(struct dsa_switch * ds,int port,struct fixed_phy_status * status)892 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
893 					 struct fixed_phy_status *status)
894 {
895 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
896 	u32 duplex, pause;
897 	u32 reg;
898 
899 	duplex = core_readl(priv, CORE_DUPSTS);
900 	pause = core_readl(priv, CORE_PAUSESTS);
901 
902 	status->link = 0;
903 
904 	/* Port 7 is special as we do not get link status from CORE_LNKSTS,
905 	 * which means that we need to force the link at the port override
906 	 * level to get the data to flow. We do use what the interrupt handler
907 	 * did determine before.
908 	 *
909 	 * For the other ports, we just force the link status, since this is
910 	 * a fixed PHY device.
911 	 */
912 	if (port == 7) {
913 		status->link = priv->port_sts[port].link;
914 		status->duplex = 1;
915 	} else {
916 		status->link = 1;
917 		status->duplex = !!(duplex & (1 << port));
918 	}
919 
920 	reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
921 	reg |= SW_OVERRIDE;
922 	if (status->link)
923 		reg |= LINK_STS;
924 	else
925 		reg &= ~LINK_STS;
926 	core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
927 
928 	if ((pause & (1 << port)) &&
929 	    (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
930 		status->asym_pause = 1;
931 		status->pause = 1;
932 	}
933 
934 	if (pause & (1 << port))
935 		status->pause = 1;
936 }
937 
bcm_sf2_sw_suspend(struct dsa_switch * ds)938 static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
939 {
940 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
941 	unsigned int port;
942 
943 	bcm_sf2_intr_disable(priv);
944 
945 	/* Disable all ports physically present including the IMP
946 	 * port, the other ones have already been disabled during
947 	 * bcm_sf2_sw_setup
948 	 */
949 	for (port = 0; port < DSA_MAX_PORTS; port++) {
950 		if ((1 << port) & ds->phys_port_mask ||
951 		    dsa_is_cpu_port(ds, port))
952 			bcm_sf2_port_disable(ds, port, NULL);
953 	}
954 
955 	return 0;
956 }
957 
bcm_sf2_sw_resume(struct dsa_switch * ds)958 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
959 {
960 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
961 	unsigned int port;
962 	int ret;
963 
964 	ret = bcm_sf2_sw_rst(priv);
965 	if (ret) {
966 		pr_err("%s: failed to software reset switch\n", __func__);
967 		return ret;
968 	}
969 
970 	if (priv->hw_params.num_gphy == 1)
971 		bcm_sf2_gphy_enable_set(ds, true);
972 
973 	for (port = 0; port < DSA_MAX_PORTS; port++) {
974 		if ((1 << port) & ds->phys_port_mask)
975 			bcm_sf2_port_setup(ds, port, NULL);
976 		else if (dsa_is_cpu_port(ds, port))
977 			bcm_sf2_imp_setup(ds, port);
978 	}
979 
980 	return 0;
981 }
982 
bcm_sf2_sw_get_wol(struct dsa_switch * ds,int port,struct ethtool_wolinfo * wol)983 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
984 			       struct ethtool_wolinfo *wol)
985 {
986 	struct net_device *p = ds->dst[ds->index].master_netdev;
987 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
988 	struct ethtool_wolinfo pwol;
989 
990 	/* Get the parent device WoL settings */
991 	p->ethtool_ops->get_wol(p, &pwol);
992 
993 	/* Advertise the parent device supported settings */
994 	wol->supported = pwol.supported;
995 	memset(&wol->sopass, 0, sizeof(wol->sopass));
996 
997 	if (pwol.wolopts & WAKE_MAGICSECURE)
998 		memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
999 
1000 	if (priv->wol_ports_mask & (1 << port))
1001 		wol->wolopts = pwol.wolopts;
1002 	else
1003 		wol->wolopts = 0;
1004 }
1005 
bcm_sf2_sw_set_wol(struct dsa_switch * ds,int port,struct ethtool_wolinfo * wol)1006 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
1007 			      struct ethtool_wolinfo *wol)
1008 {
1009 	struct net_device *p = ds->dst[ds->index].master_netdev;
1010 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
1011 	s8 cpu_port = ds->dst[ds->index].cpu_port;
1012 	struct ethtool_wolinfo pwol;
1013 
1014 	p->ethtool_ops->get_wol(p, &pwol);
1015 	if (wol->wolopts & ~pwol.supported)
1016 		return -EINVAL;
1017 
1018 	if (wol->wolopts)
1019 		priv->wol_ports_mask |= (1 << port);
1020 	else
1021 		priv->wol_ports_mask &= ~(1 << port);
1022 
1023 	/* If we have at least one port enabled, make sure the CPU port
1024 	 * is also enabled. If the CPU port is the last one enabled, we disable
1025 	 * it since this configuration does not make sense.
1026 	 */
1027 	if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
1028 		priv->wol_ports_mask |= (1 << cpu_port);
1029 	else
1030 		priv->wol_ports_mask &= ~(1 << cpu_port);
1031 
1032 	return p->ethtool_ops->set_wol(p, wol);
1033 }
1034 
1035 static struct dsa_switch_driver bcm_sf2_switch_driver = {
1036 	.tag_protocol		= DSA_TAG_PROTO_BRCM,
1037 	.priv_size		= sizeof(struct bcm_sf2_priv),
1038 	.probe			= bcm_sf2_sw_probe,
1039 	.setup			= bcm_sf2_sw_setup,
1040 	.set_addr		= bcm_sf2_sw_set_addr,
1041 	.get_phy_flags		= bcm_sf2_sw_get_phy_flags,
1042 	.phy_read		= bcm_sf2_sw_phy_read,
1043 	.phy_write		= bcm_sf2_sw_phy_write,
1044 	.get_strings		= bcm_sf2_sw_get_strings,
1045 	.get_ethtool_stats	= bcm_sf2_sw_get_ethtool_stats,
1046 	.get_sset_count		= bcm_sf2_sw_get_sset_count,
1047 	.adjust_link		= bcm_sf2_sw_adjust_link,
1048 	.fixed_link_update	= bcm_sf2_sw_fixed_link_update,
1049 	.suspend		= bcm_sf2_sw_suspend,
1050 	.resume			= bcm_sf2_sw_resume,
1051 	.get_wol		= bcm_sf2_sw_get_wol,
1052 	.set_wol		= bcm_sf2_sw_set_wol,
1053 	.port_enable		= bcm_sf2_port_setup,
1054 	.port_disable		= bcm_sf2_port_disable,
1055 	.get_eee		= bcm_sf2_sw_get_eee,
1056 	.set_eee		= bcm_sf2_sw_set_eee,
1057 	.port_join_bridge	= bcm_sf2_sw_br_join,
1058 	.port_leave_bridge	= bcm_sf2_sw_br_leave,
1059 	.port_stp_update	= bcm_sf2_sw_br_set_stp_state,
1060 };
1061 
bcm_sf2_init(void)1062 static int __init bcm_sf2_init(void)
1063 {
1064 	register_switch_driver(&bcm_sf2_switch_driver);
1065 
1066 	return 0;
1067 }
1068 module_init(bcm_sf2_init);
1069 
bcm_sf2_exit(void)1070 static void __exit bcm_sf2_exit(void)
1071 {
1072 	unregister_switch_driver(&bcm_sf2_switch_driver);
1073 }
1074 module_exit(bcm_sf2_exit);
1075 
1076 MODULE_AUTHOR("Broadcom Corporation");
1077 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1078 MODULE_LICENSE("GPL");
1079 MODULE_ALIAS("platform:brcm-sf2");
1080