1 /*
2  * AMD 10Gb Ethernet PHY driver
3  *
4  * This file is available to you under your choice of the following two
5  * licenses:
6  *
7  * License 1: GPLv2
8  *
9  * Copyright (c) 2014 Advanced Micro Devices, Inc.
10  *
11  * This file is free software; you may copy, redistribute and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation, either version 2 of the License, or (at
14  * your option) any later version.
15  *
16  * This file is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23  *
24  *
25  * License 2: Modified BSD
26  *
27  * Copyright (c) 2014 Advanced Micro Devices, Inc.
28  * All rights reserved.
29  *
30  * Redistribution and use in source and binary forms, with or without
31  * modification, are permitted provided that the following conditions are met:
32  *     * Redistributions of source code must retain the above copyright
33  *       notice, this list of conditions and the following disclaimer.
34  *     * Redistributions in binary form must reproduce the above copyright
35  *       notice, this list of conditions and the following disclaimer in the
36  *       documentation and/or other materials provided with the distribution.
37  *     * Neither the name of Advanced Micro Devices, Inc. nor the
38  *       names of its contributors may be used to endorse or promote products
39  *       derived from this software without specific prior written permission.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
42  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
45  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  */
52 
53 #include <linux/kernel.h>
54 #include <linux/device.h>
55 #include <linux/platform_device.h>
56 #include <linux/string.h>
57 #include <linux/errno.h>
58 #include <linux/unistd.h>
59 #include <linux/slab.h>
60 #include <linux/interrupt.h>
61 #include <linux/init.h>
62 #include <linux/delay.h>
63 #include <linux/workqueue.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/mm.h>
68 #include <linux/module.h>
69 #include <linux/mii.h>
70 #include <linux/ethtool.h>
71 #include <linux/phy.h>
72 #include <linux/mdio.h>
73 #include <linux/io.h>
74 #include <linux/of.h>
75 #include <linux/of_platform.h>
76 #include <linux/of_device.h>
77 #include <linux/uaccess.h>
78 #include <linux/bitops.h>
79 #include <linux/property.h>
80 #include <linux/acpi.h>
81 #include <linux/jiffies.h>
82 
83 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
84 MODULE_LICENSE("Dual BSD/GPL");
85 MODULE_VERSION("1.0.0-a");
86 MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
87 
88 #define XGBE_PHY_ID	0x000162d0
89 #define XGBE_PHY_MASK	0xfffffff0
90 
91 #define XGBE_PHY_SPEEDSET_PROPERTY	"amd,speed-set"
92 #define XGBE_PHY_BLWC_PROPERTY		"amd,serdes-blwc"
93 #define XGBE_PHY_CDR_RATE_PROPERTY	"amd,serdes-cdr-rate"
94 #define XGBE_PHY_PQ_SKEW_PROPERTY	"amd,serdes-pq-skew"
95 #define XGBE_PHY_TX_AMP_PROPERTY	"amd,serdes-tx-amp"
96 #define XGBE_PHY_DFE_CFG_PROPERTY	"amd,serdes-dfe-tap-config"
97 #define XGBE_PHY_DFE_ENA_PROPERTY	"amd,serdes-dfe-tap-enable"
98 
99 #define XGBE_PHY_SPEEDS			3
100 #define XGBE_PHY_SPEED_1000		0
101 #define XGBE_PHY_SPEED_2500		1
102 #define XGBE_PHY_SPEED_10000		2
103 
104 #define XGBE_AN_MS_TIMEOUT		500
105 
106 #define XGBE_AN_INT_CMPLT		0x01
107 #define XGBE_AN_INC_LINK		0x02
108 #define XGBE_AN_PG_RCV			0x04
109 #define XGBE_AN_INT_MASK		0x07
110 
111 #define XNP_MCF_NULL_MESSAGE		0x001
112 #define XNP_ACK_PROCESSED		BIT(12)
113 #define XNP_MP_FORMATTED		BIT(13)
114 #define XNP_NP_EXCHANGE			BIT(15)
115 
116 #define XGBE_PHY_RATECHANGE_COUNT	500
117 
118 #define XGBE_PHY_KR_TRAINING_START	0x01
119 #define XGBE_PHY_KR_TRAINING_ENABLE	0x02
120 
121 #define XGBE_PHY_FEC_ENABLE		0x01
122 #define XGBE_PHY_FEC_FORWARD		0x02
123 #define XGBE_PHY_FEC_MASK		0x03
124 
125 #ifndef MDIO_PMA_10GBR_PMD_CTRL
126 #define MDIO_PMA_10GBR_PMD_CTRL		0x0096
127 #endif
128 
129 #ifndef MDIO_PMA_10GBR_FEC_ABILITY
130 #define MDIO_PMA_10GBR_FEC_ABILITY	0x00aa
131 #endif
132 
133 #ifndef MDIO_PMA_10GBR_FEC_CTRL
134 #define MDIO_PMA_10GBR_FEC_CTRL		0x00ab
135 #endif
136 
137 #ifndef MDIO_AN_XNP
138 #define MDIO_AN_XNP			0x0016
139 #endif
140 
141 #ifndef MDIO_AN_LPX
142 #define MDIO_AN_LPX			0x0019
143 #endif
144 
145 #ifndef MDIO_AN_INTMASK
146 #define MDIO_AN_INTMASK			0x8001
147 #endif
148 
149 #ifndef MDIO_AN_INT
150 #define MDIO_AN_INT			0x8002
151 #endif
152 
153 #ifndef MDIO_CTRL1_SPEED1G
154 #define MDIO_CTRL1_SPEED1G		(MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
155 #endif
156 
157 /* SerDes integration register offsets */
158 #define SIR0_KR_RT_1			0x002c
159 #define SIR0_STATUS			0x0040
160 #define SIR1_SPEED			0x0000
161 
162 /* SerDes integration register entry bit positions and sizes */
163 #define SIR0_KR_RT_1_RESET_INDEX	11
164 #define SIR0_KR_RT_1_RESET_WIDTH	1
165 #define SIR0_STATUS_RX_READY_INDEX	0
166 #define SIR0_STATUS_RX_READY_WIDTH	1
167 #define SIR0_STATUS_TX_READY_INDEX	8
168 #define SIR0_STATUS_TX_READY_WIDTH	1
169 #define SIR1_SPEED_CDR_RATE_INDEX	12
170 #define SIR1_SPEED_CDR_RATE_WIDTH	4
171 #define SIR1_SPEED_DATARATE_INDEX	4
172 #define SIR1_SPEED_DATARATE_WIDTH	2
173 #define SIR1_SPEED_PLLSEL_INDEX		3
174 #define SIR1_SPEED_PLLSEL_WIDTH		1
175 #define SIR1_SPEED_RATECHANGE_INDEX	6
176 #define SIR1_SPEED_RATECHANGE_WIDTH	1
177 #define SIR1_SPEED_TXAMP_INDEX		8
178 #define SIR1_SPEED_TXAMP_WIDTH		4
179 #define SIR1_SPEED_WORDMODE_INDEX	0
180 #define SIR1_SPEED_WORDMODE_WIDTH	3
181 
182 #define SPEED_10000_BLWC		0
183 #define SPEED_10000_CDR			0x7
184 #define SPEED_10000_PLL			0x1
185 #define SPEED_10000_PQ			0x12
186 #define SPEED_10000_RATE		0x0
187 #define SPEED_10000_TXAMP		0xa
188 #define SPEED_10000_WORD		0x7
189 #define SPEED_10000_DFE_TAP_CONFIG	0x1
190 #define SPEED_10000_DFE_TAP_ENABLE	0x7f
191 
192 #define SPEED_2500_BLWC			1
193 #define SPEED_2500_CDR			0x2
194 #define SPEED_2500_PLL			0x0
195 #define SPEED_2500_PQ			0xa
196 #define SPEED_2500_RATE			0x1
197 #define SPEED_2500_TXAMP		0xf
198 #define SPEED_2500_WORD			0x1
199 #define SPEED_2500_DFE_TAP_CONFIG	0x3
200 #define SPEED_2500_DFE_TAP_ENABLE	0x0
201 
202 #define SPEED_1000_BLWC			1
203 #define SPEED_1000_CDR			0x2
204 #define SPEED_1000_PLL			0x0
205 #define SPEED_1000_PQ			0xa
206 #define SPEED_1000_RATE			0x3
207 #define SPEED_1000_TXAMP		0xf
208 #define SPEED_1000_WORD			0x1
209 #define SPEED_1000_DFE_TAP_CONFIG	0x3
210 #define SPEED_1000_DFE_TAP_ENABLE	0x0
211 
212 /* SerDes RxTx register offsets */
213 #define RXTX_REG6			0x0018
214 #define RXTX_REG20			0x0050
215 #define RXTX_REG22			0x0058
216 #define RXTX_REG114			0x01c8
217 #define RXTX_REG129			0x0204
218 
219 /* SerDes RxTx register entry bit positions and sizes */
220 #define RXTX_REG6_RESETB_RXD_INDEX	8
221 #define RXTX_REG6_RESETB_RXD_WIDTH	1
222 #define RXTX_REG20_BLWC_ENA_INDEX	2
223 #define RXTX_REG20_BLWC_ENA_WIDTH	1
224 #define RXTX_REG114_PQ_REG_INDEX	9
225 #define RXTX_REG114_PQ_REG_WIDTH	7
226 #define RXTX_REG129_RXDFE_CONFIG_INDEX	14
227 #define RXTX_REG129_RXDFE_CONFIG_WIDTH	2
228 
229 /* Bit setting and getting macros
230  *  The get macro will extract the current bit field value from within
231  *  the variable
232  *
233  *  The set macro will clear the current bit field value within the
234  *  variable and then set the bit field of the variable to the
235  *  specified value
236  */
237 #define GET_BITS(_var, _index, _width)					\
238 	(((_var) >> (_index)) & ((0x1 << (_width)) - 1))
239 
240 #define SET_BITS(_var, _index, _width, _val)				\
241 do {									\
242 	(_var) &= ~(((0x1 << (_width)) - 1) << (_index));		\
243 	(_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index));	\
244 } while (0)
245 
246 #define XSIR_GET_BITS(_var, _prefix, _field)				\
247 	GET_BITS((_var),						\
248 		 _prefix##_##_field##_INDEX,				\
249 		 _prefix##_##_field##_WIDTH)
250 
251 #define XSIR_SET_BITS(_var, _prefix, _field, _val)			\
252 	SET_BITS((_var),						\
253 		 _prefix##_##_field##_INDEX,				\
254 		 _prefix##_##_field##_WIDTH, (_val))
255 
256 /* Macros for reading or writing SerDes integration registers
257  *  The ioread macros will get bit fields or full values using the
258  *  register definitions formed using the input names
259  *
260  *  The iowrite macros will set bit fields or full values using the
261  *  register definitions formed using the input names
262  */
263 #define XSIR0_IOREAD(_priv, _reg)					\
264 	ioread16((_priv)->sir0_regs + _reg)
265 
266 #define XSIR0_IOREAD_BITS(_priv, _reg, _field)				\
267 	GET_BITS(XSIR0_IOREAD((_priv), _reg),				\
268 		 _reg##_##_field##_INDEX,				\
269 		 _reg##_##_field##_WIDTH)
270 
271 #define XSIR0_IOWRITE(_priv, _reg, _val)				\
272 	iowrite16((_val), (_priv)->sir0_regs + _reg)
273 
274 #define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val)			\
275 do {									\
276 	u16 reg_val = XSIR0_IOREAD((_priv), _reg);			\
277 	SET_BITS(reg_val,						\
278 		 _reg##_##_field##_INDEX,				\
279 		 _reg##_##_field##_WIDTH, (_val));			\
280 	XSIR0_IOWRITE((_priv), _reg, reg_val);				\
281 } while (0)
282 
283 #define XSIR1_IOREAD(_priv, _reg)					\
284 	ioread16((_priv)->sir1_regs + _reg)
285 
286 #define XSIR1_IOREAD_BITS(_priv, _reg, _field)				\
287 	GET_BITS(XSIR1_IOREAD((_priv), _reg),				\
288 		 _reg##_##_field##_INDEX,				\
289 		 _reg##_##_field##_WIDTH)
290 
291 #define XSIR1_IOWRITE(_priv, _reg, _val)				\
292 	iowrite16((_val), (_priv)->sir1_regs + _reg)
293 
294 #define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val)			\
295 do {									\
296 	u16 reg_val = XSIR1_IOREAD((_priv), _reg);			\
297 	SET_BITS(reg_val,						\
298 		 _reg##_##_field##_INDEX,				\
299 		 _reg##_##_field##_WIDTH, (_val));			\
300 	XSIR1_IOWRITE((_priv), _reg, reg_val);				\
301 } while (0)
302 
303 /* Macros for reading or writing SerDes RxTx registers
304  *  The ioread macros will get bit fields or full values using the
305  *  register definitions formed using the input names
306  *
307  *  The iowrite macros will set bit fields or full values using the
308  *  register definitions formed using the input names
309  */
310 #define XRXTX_IOREAD(_priv, _reg)					\
311 	ioread16((_priv)->rxtx_regs + _reg)
312 
313 #define XRXTX_IOREAD_BITS(_priv, _reg, _field)				\
314 	GET_BITS(XRXTX_IOREAD((_priv), _reg),				\
315 		 _reg##_##_field##_INDEX,				\
316 		 _reg##_##_field##_WIDTH)
317 
318 #define XRXTX_IOWRITE(_priv, _reg, _val)				\
319 	iowrite16((_val), (_priv)->rxtx_regs + _reg)
320 
321 #define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val)			\
322 do {									\
323 	u16 reg_val = XRXTX_IOREAD((_priv), _reg);			\
324 	SET_BITS(reg_val,						\
325 		 _reg##_##_field##_INDEX,				\
326 		 _reg##_##_field##_WIDTH, (_val));			\
327 	XRXTX_IOWRITE((_priv), _reg, reg_val);				\
328 } while (0)
329 
330 static const u32 amd_xgbe_phy_serdes_blwc[] = {
331 	SPEED_1000_BLWC,
332 	SPEED_2500_BLWC,
333 	SPEED_10000_BLWC,
334 };
335 
336 static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
337 	SPEED_1000_CDR,
338 	SPEED_2500_CDR,
339 	SPEED_10000_CDR,
340 };
341 
342 static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
343 	SPEED_1000_PQ,
344 	SPEED_2500_PQ,
345 	SPEED_10000_PQ,
346 };
347 
348 static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
349 	SPEED_1000_TXAMP,
350 	SPEED_2500_TXAMP,
351 	SPEED_10000_TXAMP,
352 };
353 
354 static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
355 	SPEED_1000_DFE_TAP_CONFIG,
356 	SPEED_2500_DFE_TAP_CONFIG,
357 	SPEED_10000_DFE_TAP_CONFIG,
358 };
359 
360 static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
361 	SPEED_1000_DFE_TAP_ENABLE,
362 	SPEED_2500_DFE_TAP_ENABLE,
363 	SPEED_10000_DFE_TAP_ENABLE,
364 };
365 
366 enum amd_xgbe_phy_an {
367 	AMD_XGBE_AN_READY = 0,
368 	AMD_XGBE_AN_PAGE_RECEIVED,
369 	AMD_XGBE_AN_INCOMPAT_LINK,
370 	AMD_XGBE_AN_COMPLETE,
371 	AMD_XGBE_AN_NO_LINK,
372 	AMD_XGBE_AN_ERROR,
373 };
374 
375 enum amd_xgbe_phy_rx {
376 	AMD_XGBE_RX_BPA = 0,
377 	AMD_XGBE_RX_XNP,
378 	AMD_XGBE_RX_COMPLETE,
379 	AMD_XGBE_RX_ERROR,
380 };
381 
382 enum amd_xgbe_phy_mode {
383 	AMD_XGBE_MODE_KR,
384 	AMD_XGBE_MODE_KX,
385 };
386 
387 enum amd_xgbe_phy_speedset {
388 	AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
389 	AMD_XGBE_PHY_SPEEDSET_2500_10000,
390 };
391 
392 struct amd_xgbe_phy_priv {
393 	struct platform_device *pdev;
394 	struct acpi_device *adev;
395 	struct device *dev;
396 
397 	struct phy_device *phydev;
398 
399 	/* SerDes related mmio resources */
400 	struct resource *rxtx_res;
401 	struct resource *sir0_res;
402 	struct resource *sir1_res;
403 
404 	/* SerDes related mmio registers */
405 	void __iomem *rxtx_regs;	/* SerDes Rx/Tx CSRs */
406 	void __iomem *sir0_regs;	/* SerDes integration registers (1/2) */
407 	void __iomem *sir1_regs;	/* SerDes integration registers (2/2) */
408 
409 	int an_irq;
410 	char an_irq_name[IFNAMSIZ + 32];
411 	struct work_struct an_irq_work;
412 	unsigned int an_irq_allocated;
413 
414 	unsigned int speed_set;
415 
416 	/* SerDes UEFI configurable settings.
417 	 *   Switching between modes/speeds requires new values for some
418 	 *   SerDes settings.  The values can be supplied as device
419 	 *   properties in array format.  The first array entry is for
420 	 *   1GbE, second for 2.5GbE and third for 10GbE
421 	 */
422 	u32 serdes_blwc[XGBE_PHY_SPEEDS];
423 	u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
424 	u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
425 	u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
426 	u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
427 	u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
428 
429 	/* Auto-negotiation state machine support */
430 	struct mutex an_mutex;
431 	enum amd_xgbe_phy_an an_result;
432 	enum amd_xgbe_phy_an an_state;
433 	enum amd_xgbe_phy_rx kr_state;
434 	enum amd_xgbe_phy_rx kx_state;
435 	struct work_struct an_work;
436 	struct workqueue_struct *an_workqueue;
437 	unsigned int an_supported;
438 	unsigned int parallel_detect;
439 	unsigned int fec_ability;
440 	unsigned long an_start;
441 
442 	unsigned int lpm_ctrl;		/* CTRL1 for resume */
443 };
444 
amd_xgbe_an_enable_kr_training(struct phy_device * phydev)445 static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
446 {
447 	int ret;
448 
449 	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
450 	if (ret < 0)
451 		return ret;
452 
453 	ret |= XGBE_PHY_KR_TRAINING_ENABLE;
454 	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
455 
456 	return 0;
457 }
458 
amd_xgbe_an_disable_kr_training(struct phy_device * phydev)459 static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
460 {
461 	int ret;
462 
463 	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
464 	if (ret < 0)
465 		return ret;
466 
467 	ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
468 	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
469 
470 	return 0;
471 }
472 
amd_xgbe_phy_pcs_power_cycle(struct phy_device * phydev)473 static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
474 {
475 	int ret;
476 
477 	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
478 	if (ret < 0)
479 		return ret;
480 
481 	ret |= MDIO_CTRL1_LPOWER;
482 	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
483 
484 	usleep_range(75, 100);
485 
486 	ret &= ~MDIO_CTRL1_LPOWER;
487 	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
488 
489 	return 0;
490 }
491 
amd_xgbe_phy_serdes_start_ratechange(struct phy_device * phydev)492 static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
493 {
494 	struct amd_xgbe_phy_priv *priv = phydev->priv;
495 
496 	/* Assert Rx and Tx ratechange */
497 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
498 }
499 
amd_xgbe_phy_serdes_complete_ratechange(struct phy_device * phydev)500 static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
501 {
502 	struct amd_xgbe_phy_priv *priv = phydev->priv;
503 	unsigned int wait;
504 	u16 status;
505 
506 	/* Release Rx and Tx ratechange */
507 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
508 
509 	/* Wait for Rx and Tx ready */
510 	wait = XGBE_PHY_RATECHANGE_COUNT;
511 	while (wait--) {
512 		usleep_range(50, 75);
513 
514 		status = XSIR0_IOREAD(priv, SIR0_STATUS);
515 		if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
516 		    XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
517 			goto rx_reset;
518 	}
519 
520 	netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
521 		   status);
522 
523 rx_reset:
524 	/* Perform Rx reset for the DFE changes */
525 	XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
526 	XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
527 }
528 
amd_xgbe_phy_xgmii_mode(struct phy_device * phydev)529 static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
530 {
531 	struct amd_xgbe_phy_priv *priv = phydev->priv;
532 	int ret;
533 
534 	/* Enable KR training */
535 	ret = amd_xgbe_an_enable_kr_training(phydev);
536 	if (ret < 0)
537 		return ret;
538 
539 	/* Set PCS to KR/10G speed */
540 	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
541 	if (ret < 0)
542 		return ret;
543 
544 	ret &= ~MDIO_PCS_CTRL2_TYPE;
545 	ret |= MDIO_PCS_CTRL2_10GBR;
546 	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
547 
548 	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
549 	if (ret < 0)
550 		return ret;
551 
552 	ret &= ~MDIO_CTRL1_SPEEDSEL;
553 	ret |= MDIO_CTRL1_SPEED10G;
554 	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
555 
556 	ret = amd_xgbe_phy_pcs_power_cycle(phydev);
557 	if (ret < 0)
558 		return ret;
559 
560 	/* Set SerDes to 10G speed */
561 	amd_xgbe_phy_serdes_start_ratechange(phydev);
562 
563 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
564 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
565 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
566 
567 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
568 			   priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
569 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
570 			   priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
571 	XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
572 			   priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
573 	XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
574 			   priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
575 	XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
576 			   priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
577 	XRXTX_IOWRITE(priv, RXTX_REG22,
578 		      priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
579 
580 	amd_xgbe_phy_serdes_complete_ratechange(phydev);
581 
582 	return 0;
583 }
584 
amd_xgbe_phy_gmii_2500_mode(struct phy_device * phydev)585 static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
586 {
587 	struct amd_xgbe_phy_priv *priv = phydev->priv;
588 	int ret;
589 
590 	/* Disable KR training */
591 	ret = amd_xgbe_an_disable_kr_training(phydev);
592 	if (ret < 0)
593 		return ret;
594 
595 	/* Set PCS to KX/1G speed */
596 	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
597 	if (ret < 0)
598 		return ret;
599 
600 	ret &= ~MDIO_PCS_CTRL2_TYPE;
601 	ret |= MDIO_PCS_CTRL2_10GBX;
602 	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
603 
604 	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
605 	if (ret < 0)
606 		return ret;
607 
608 	ret &= ~MDIO_CTRL1_SPEEDSEL;
609 	ret |= MDIO_CTRL1_SPEED1G;
610 	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
611 
612 	ret = amd_xgbe_phy_pcs_power_cycle(phydev);
613 	if (ret < 0)
614 		return ret;
615 
616 	/* Set SerDes to 2.5G speed */
617 	amd_xgbe_phy_serdes_start_ratechange(phydev);
618 
619 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
620 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
621 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
622 
623 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
624 			   priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
625 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
626 			   priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
627 	XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
628 			   priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
629 	XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
630 			   priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
631 	XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
632 			   priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
633 	XRXTX_IOWRITE(priv, RXTX_REG22,
634 		      priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
635 
636 	amd_xgbe_phy_serdes_complete_ratechange(phydev);
637 
638 	return 0;
639 }
640 
amd_xgbe_phy_gmii_mode(struct phy_device * phydev)641 static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
642 {
643 	struct amd_xgbe_phy_priv *priv = phydev->priv;
644 	int ret;
645 
646 	/* Disable KR training */
647 	ret = amd_xgbe_an_disable_kr_training(phydev);
648 	if (ret < 0)
649 		return ret;
650 
651 	/* Set PCS to KX/1G speed */
652 	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
653 	if (ret < 0)
654 		return ret;
655 
656 	ret &= ~MDIO_PCS_CTRL2_TYPE;
657 	ret |= MDIO_PCS_CTRL2_10GBX;
658 	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
659 
660 	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
661 	if (ret < 0)
662 		return ret;
663 
664 	ret &= ~MDIO_CTRL1_SPEEDSEL;
665 	ret |= MDIO_CTRL1_SPEED1G;
666 	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
667 
668 	ret = amd_xgbe_phy_pcs_power_cycle(phydev);
669 	if (ret < 0)
670 		return ret;
671 
672 	/* Set SerDes to 1G speed */
673 	amd_xgbe_phy_serdes_start_ratechange(phydev);
674 
675 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
676 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
677 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
678 
679 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
680 			   priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
681 	XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
682 			   priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
683 	XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
684 			   priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
685 	XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
686 			   priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
687 	XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
688 			   priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
689 	XRXTX_IOWRITE(priv, RXTX_REG22,
690 		      priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
691 
692 	amd_xgbe_phy_serdes_complete_ratechange(phydev);
693 
694 	return 0;
695 }
696 
amd_xgbe_phy_cur_mode(struct phy_device * phydev,enum amd_xgbe_phy_mode * mode)697 static int amd_xgbe_phy_cur_mode(struct phy_device *phydev,
698 				 enum amd_xgbe_phy_mode *mode)
699 {
700 	int ret;
701 
702 	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
703 	if (ret < 0)
704 		return ret;
705 
706 	if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
707 		*mode = AMD_XGBE_MODE_KR;
708 	else
709 		*mode = AMD_XGBE_MODE_KX;
710 
711 	return 0;
712 }
713 
amd_xgbe_phy_in_kr_mode(struct phy_device * phydev)714 static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev)
715 {
716 	enum amd_xgbe_phy_mode mode;
717 
718 	if (amd_xgbe_phy_cur_mode(phydev, &mode))
719 		return false;
720 
721 	return (mode == AMD_XGBE_MODE_KR);
722 }
723 
amd_xgbe_phy_switch_mode(struct phy_device * phydev)724 static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
725 {
726 	struct amd_xgbe_phy_priv *priv = phydev->priv;
727 	int ret;
728 
729 	/* If we are in KR switch to KX, and vice-versa */
730 	if (amd_xgbe_phy_in_kr_mode(phydev)) {
731 		if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
732 			ret = amd_xgbe_phy_gmii_mode(phydev);
733 		else
734 			ret = amd_xgbe_phy_gmii_2500_mode(phydev);
735 	} else {
736 		ret = amd_xgbe_phy_xgmii_mode(phydev);
737 	}
738 
739 	return ret;
740 }
741 
amd_xgbe_phy_set_mode(struct phy_device * phydev,enum amd_xgbe_phy_mode mode)742 static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
743 				 enum amd_xgbe_phy_mode mode)
744 {
745 	enum amd_xgbe_phy_mode cur_mode;
746 	int ret;
747 
748 	ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode);
749 	if (ret)
750 		return ret;
751 
752 	if (mode != cur_mode)
753 		ret = amd_xgbe_phy_switch_mode(phydev);
754 
755 	return ret;
756 }
757 
amd_xgbe_phy_use_xgmii_mode(struct phy_device * phydev)758 static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev)
759 {
760 	if (phydev->autoneg == AUTONEG_ENABLE) {
761 		if (phydev->advertising & ADVERTISED_10000baseKR_Full)
762 			return true;
763 	} else {
764 		if (phydev->speed == SPEED_10000)
765 			return true;
766 	}
767 
768 	return false;
769 }
770 
amd_xgbe_phy_use_gmii_2500_mode(struct phy_device * phydev)771 static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev)
772 {
773 	if (phydev->autoneg == AUTONEG_ENABLE) {
774 		if (phydev->advertising & ADVERTISED_2500baseX_Full)
775 			return true;
776 	} else {
777 		if (phydev->speed == SPEED_2500)
778 			return true;
779 	}
780 
781 	return false;
782 }
783 
amd_xgbe_phy_use_gmii_mode(struct phy_device * phydev)784 static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev)
785 {
786 	if (phydev->autoneg == AUTONEG_ENABLE) {
787 		if (phydev->advertising & ADVERTISED_1000baseKX_Full)
788 			return true;
789 	} else {
790 		if (phydev->speed == SPEED_1000)
791 			return true;
792 	}
793 
794 	return false;
795 }
796 
amd_xgbe_phy_set_an(struct phy_device * phydev,bool enable,bool restart)797 static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
798 			       bool restart)
799 {
800 	int ret;
801 
802 	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
803 	if (ret < 0)
804 		return ret;
805 
806 	ret &= ~MDIO_AN_CTRL1_ENABLE;
807 
808 	if (enable)
809 		ret |= MDIO_AN_CTRL1_ENABLE;
810 
811 	if (restart)
812 		ret |= MDIO_AN_CTRL1_RESTART;
813 
814 	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
815 
816 	return 0;
817 }
818 
amd_xgbe_phy_restart_an(struct phy_device * phydev)819 static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
820 {
821 	return amd_xgbe_phy_set_an(phydev, true, true);
822 }
823 
amd_xgbe_phy_disable_an(struct phy_device * phydev)824 static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
825 {
826 	return amd_xgbe_phy_set_an(phydev, false, false);
827 }
828 
amd_xgbe_an_tx_training(struct phy_device * phydev,enum amd_xgbe_phy_rx * state)829 static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
830 						    enum amd_xgbe_phy_rx *state)
831 {
832 	struct amd_xgbe_phy_priv *priv = phydev->priv;
833 	int ad_reg, lp_reg, ret;
834 
835 	*state = AMD_XGBE_RX_COMPLETE;
836 
837 	/* If we're not in KR mode then we're done */
838 	if (!amd_xgbe_phy_in_kr_mode(phydev))
839 		return AMD_XGBE_AN_PAGE_RECEIVED;
840 
841 	/* Enable/Disable FEC */
842 	ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
843 	if (ad_reg < 0)
844 		return AMD_XGBE_AN_ERROR;
845 
846 	lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
847 	if (lp_reg < 0)
848 		return AMD_XGBE_AN_ERROR;
849 
850 	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
851 	if (ret < 0)
852 		return AMD_XGBE_AN_ERROR;
853 
854 	ret &= ~XGBE_PHY_FEC_MASK;
855 	if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
856 		ret |= priv->fec_ability;
857 
858 	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
859 
860 	/* Start KR training */
861 	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
862 	if (ret < 0)
863 		return AMD_XGBE_AN_ERROR;
864 
865 	if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
866 		XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
867 
868 		ret |= XGBE_PHY_KR_TRAINING_START;
869 		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
870 			      ret);
871 
872 		XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
873 	}
874 
875 	return AMD_XGBE_AN_PAGE_RECEIVED;
876 }
877 
amd_xgbe_an_tx_xnp(struct phy_device * phydev,enum amd_xgbe_phy_rx * state)878 static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
879 					       enum amd_xgbe_phy_rx *state)
880 {
881 	u16 msg;
882 
883 	*state = AMD_XGBE_RX_XNP;
884 
885 	msg = XNP_MCF_NULL_MESSAGE;
886 	msg |= XNP_MP_FORMATTED;
887 
888 	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
889 	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
890 	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
891 
892 	return AMD_XGBE_AN_PAGE_RECEIVED;
893 }
894 
amd_xgbe_an_rx_bpa(struct phy_device * phydev,enum amd_xgbe_phy_rx * state)895 static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
896 					       enum amd_xgbe_phy_rx *state)
897 {
898 	unsigned int link_support;
899 	int ret, ad_reg, lp_reg;
900 
901 	/* Read Base Ability register 2 first */
902 	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
903 	if (ret < 0)
904 		return AMD_XGBE_AN_ERROR;
905 
906 	/* Check for a supported mode, otherwise restart in a different one */
907 	link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20;
908 	if (!(ret & link_support))
909 		return AMD_XGBE_AN_INCOMPAT_LINK;
910 
911 	/* Check Extended Next Page support */
912 	ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
913 	if (ad_reg < 0)
914 		return AMD_XGBE_AN_ERROR;
915 
916 	lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
917 	if (lp_reg < 0)
918 		return AMD_XGBE_AN_ERROR;
919 
920 	return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
921 	       amd_xgbe_an_tx_xnp(phydev, state) :
922 	       amd_xgbe_an_tx_training(phydev, state);
923 }
924 
amd_xgbe_an_rx_xnp(struct phy_device * phydev,enum amd_xgbe_phy_rx * state)925 static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
926 					       enum amd_xgbe_phy_rx *state)
927 {
928 	int ad_reg, lp_reg;
929 
930 	/* Check Extended Next Page support */
931 	ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
932 	if (ad_reg < 0)
933 		return AMD_XGBE_AN_ERROR;
934 
935 	lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
936 	if (lp_reg < 0)
937 		return AMD_XGBE_AN_ERROR;
938 
939 	return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
940 	       amd_xgbe_an_tx_xnp(phydev, state) :
941 	       amd_xgbe_an_tx_training(phydev, state);
942 }
943 
amd_xgbe_an_page_received(struct phy_device * phydev)944 static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
945 {
946 	struct amd_xgbe_phy_priv *priv = phydev->priv;
947 	enum amd_xgbe_phy_rx *state;
948 	unsigned long an_timeout;
949 	int ret;
950 
951 	if (!priv->an_start) {
952 		priv->an_start = jiffies;
953 	} else {
954 		an_timeout = priv->an_start +
955 			     msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
956 		if (time_after(jiffies, an_timeout)) {
957 			/* Auto-negotiation timed out, reset state */
958 			priv->kr_state = AMD_XGBE_RX_BPA;
959 			priv->kx_state = AMD_XGBE_RX_BPA;
960 
961 			priv->an_start = jiffies;
962 		}
963 	}
964 
965 	state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
966 						: &priv->kx_state;
967 
968 	switch (*state) {
969 	case AMD_XGBE_RX_BPA:
970 		ret = amd_xgbe_an_rx_bpa(phydev, state);
971 		break;
972 
973 	case AMD_XGBE_RX_XNP:
974 		ret = amd_xgbe_an_rx_xnp(phydev, state);
975 		break;
976 
977 	default:
978 		ret = AMD_XGBE_AN_ERROR;
979 	}
980 
981 	return ret;
982 }
983 
amd_xgbe_an_incompat_link(struct phy_device * phydev)984 static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
985 {
986 	struct amd_xgbe_phy_priv *priv = phydev->priv;
987 	int ret;
988 
989 	/* Be sure we aren't looping trying to negotiate */
990 	if (amd_xgbe_phy_in_kr_mode(phydev)) {
991 		priv->kr_state = AMD_XGBE_RX_ERROR;
992 
993 		if (!(phydev->advertising & SUPPORTED_1000baseKX_Full) &&
994 		    !(phydev->advertising & SUPPORTED_2500baseX_Full))
995 			return AMD_XGBE_AN_NO_LINK;
996 
997 		if (priv->kx_state != AMD_XGBE_RX_BPA)
998 			return AMD_XGBE_AN_NO_LINK;
999 	} else {
1000 		priv->kx_state = AMD_XGBE_RX_ERROR;
1001 
1002 		if (!(phydev->advertising & SUPPORTED_10000baseKR_Full))
1003 			return AMD_XGBE_AN_NO_LINK;
1004 
1005 		if (priv->kr_state != AMD_XGBE_RX_BPA)
1006 			return AMD_XGBE_AN_NO_LINK;
1007 	}
1008 
1009 	ret = amd_xgbe_phy_disable_an(phydev);
1010 	if (ret)
1011 		return AMD_XGBE_AN_ERROR;
1012 
1013 	ret = amd_xgbe_phy_switch_mode(phydev);
1014 	if (ret)
1015 		return AMD_XGBE_AN_ERROR;
1016 
1017 	ret = amd_xgbe_phy_restart_an(phydev);
1018 	if (ret)
1019 		return AMD_XGBE_AN_ERROR;
1020 
1021 	return AMD_XGBE_AN_INCOMPAT_LINK;
1022 }
1023 
amd_xgbe_an_isr(int irq,void * data)1024 static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
1025 {
1026 	struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
1027 
1028 	/* Interrupt reason must be read and cleared outside of IRQ context */
1029 	disable_irq_nosync(priv->an_irq);
1030 
1031 	queue_work(priv->an_workqueue, &priv->an_irq_work);
1032 
1033 	return IRQ_HANDLED;
1034 }
1035 
amd_xgbe_an_irq_work(struct work_struct * work)1036 static void amd_xgbe_an_irq_work(struct work_struct *work)
1037 {
1038 	struct amd_xgbe_phy_priv *priv = container_of(work,
1039 						      struct amd_xgbe_phy_priv,
1040 						      an_irq_work);
1041 
1042 	/* Avoid a race between enabling the IRQ and exiting the work by
1043 	 * waiting for the work to finish and then queueing it
1044 	 */
1045 	flush_work(&priv->an_work);
1046 	queue_work(priv->an_workqueue, &priv->an_work);
1047 }
1048 
amd_xgbe_an_state_machine(struct work_struct * work)1049 static void amd_xgbe_an_state_machine(struct work_struct *work)
1050 {
1051 	struct amd_xgbe_phy_priv *priv = container_of(work,
1052 						      struct amd_xgbe_phy_priv,
1053 						      an_work);
1054 	struct phy_device *phydev = priv->phydev;
1055 	enum amd_xgbe_phy_an cur_state = priv->an_state;
1056 	int int_reg, int_mask;
1057 
1058 	mutex_lock(&priv->an_mutex);
1059 
1060 	/* Read the interrupt */
1061 	int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
1062 	if (!int_reg)
1063 		goto out;
1064 
1065 next_int:
1066 	if (int_reg < 0) {
1067 		priv->an_state = AMD_XGBE_AN_ERROR;
1068 		int_mask = XGBE_AN_INT_MASK;
1069 	} else if (int_reg & XGBE_AN_PG_RCV) {
1070 		priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
1071 		int_mask = XGBE_AN_PG_RCV;
1072 	} else if (int_reg & XGBE_AN_INC_LINK) {
1073 		priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
1074 		int_mask = XGBE_AN_INC_LINK;
1075 	} else if (int_reg & XGBE_AN_INT_CMPLT) {
1076 		priv->an_state = AMD_XGBE_AN_COMPLETE;
1077 		int_mask = XGBE_AN_INT_CMPLT;
1078 	} else {
1079 		priv->an_state = AMD_XGBE_AN_ERROR;
1080 		int_mask = 0;
1081 	}
1082 
1083 	/* Clear the interrupt to be processed */
1084 	int_reg &= ~int_mask;
1085 	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
1086 
1087 	priv->an_result = priv->an_state;
1088 
1089 again:
1090 	cur_state = priv->an_state;
1091 
1092 	switch (priv->an_state) {
1093 	case AMD_XGBE_AN_READY:
1094 		priv->an_supported = 0;
1095 		break;
1096 
1097 	case AMD_XGBE_AN_PAGE_RECEIVED:
1098 		priv->an_state = amd_xgbe_an_page_received(phydev);
1099 		priv->an_supported++;
1100 		break;
1101 
1102 	case AMD_XGBE_AN_INCOMPAT_LINK:
1103 		priv->an_supported = 0;
1104 		priv->parallel_detect = 0;
1105 		priv->an_state = amd_xgbe_an_incompat_link(phydev);
1106 		break;
1107 
1108 	case AMD_XGBE_AN_COMPLETE:
1109 		priv->parallel_detect = priv->an_supported ? 0 : 1;
1110 		netdev_dbg(phydev->attached_dev, "%s successful\n",
1111 			   priv->an_supported ? "Auto negotiation"
1112 					      : "Parallel detection");
1113 		break;
1114 
1115 	case AMD_XGBE_AN_NO_LINK:
1116 		break;
1117 
1118 	default:
1119 		priv->an_state = AMD_XGBE_AN_ERROR;
1120 	}
1121 
1122 	if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
1123 		int_reg = 0;
1124 		phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
1125 	} else if (priv->an_state == AMD_XGBE_AN_ERROR) {
1126 		netdev_err(phydev->attached_dev,
1127 			   "error during auto-negotiation, state=%u\n",
1128 			   cur_state);
1129 
1130 		int_reg = 0;
1131 		phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
1132 	}
1133 
1134 	if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
1135 		priv->an_result = priv->an_state;
1136 		priv->an_state = AMD_XGBE_AN_READY;
1137 		priv->kr_state = AMD_XGBE_RX_BPA;
1138 		priv->kx_state = AMD_XGBE_RX_BPA;
1139 		priv->an_start = 0;
1140 	}
1141 
1142 	if (cur_state != priv->an_state)
1143 		goto again;
1144 
1145 	if (int_reg)
1146 		goto next_int;
1147 
1148 out:
1149 	enable_irq(priv->an_irq);
1150 
1151 	mutex_unlock(&priv->an_mutex);
1152 }
1153 
amd_xgbe_an_init(struct phy_device * phydev)1154 static int amd_xgbe_an_init(struct phy_device *phydev)
1155 {
1156 	int ret;
1157 
1158 	/* Set up Advertisement register 3 first */
1159 	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
1160 	if (ret < 0)
1161 		return ret;
1162 
1163 	if (phydev->advertising & SUPPORTED_10000baseR_FEC)
1164 		ret |= 0xc000;
1165 	else
1166 		ret &= ~0xc000;
1167 
1168 	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
1169 
1170 	/* Set up Advertisement register 2 next */
1171 	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
1172 	if (ret < 0)
1173 		return ret;
1174 
1175 	if (phydev->advertising & SUPPORTED_10000baseKR_Full)
1176 		ret |= 0x80;
1177 	else
1178 		ret &= ~0x80;
1179 
1180 	if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
1181 	    (phydev->advertising & SUPPORTED_2500baseX_Full))
1182 		ret |= 0x20;
1183 	else
1184 		ret &= ~0x20;
1185 
1186 	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
1187 
1188 	/* Set up Advertisement register 1 last */
1189 	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
1190 	if (ret < 0)
1191 		return ret;
1192 
1193 	if (phydev->advertising & SUPPORTED_Pause)
1194 		ret |= 0x400;
1195 	else
1196 		ret &= ~0x400;
1197 
1198 	if (phydev->advertising & SUPPORTED_Asym_Pause)
1199 		ret |= 0x800;
1200 	else
1201 		ret &= ~0x800;
1202 
1203 	/* We don't intend to perform XNP */
1204 	ret &= ~XNP_NP_EXCHANGE;
1205 
1206 	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
1207 
1208 	return 0;
1209 }
1210 
amd_xgbe_phy_soft_reset(struct phy_device * phydev)1211 static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
1212 {
1213 	int count, ret;
1214 
1215 	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1216 	if (ret < 0)
1217 		return ret;
1218 
1219 	ret |= MDIO_CTRL1_RESET;
1220 	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1221 
1222 	count = 50;
1223 	do {
1224 		msleep(20);
1225 		ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1226 		if (ret < 0)
1227 			return ret;
1228 	} while ((ret & MDIO_CTRL1_RESET) && --count);
1229 
1230 	if (ret & MDIO_CTRL1_RESET)
1231 		return -ETIMEDOUT;
1232 
1233 	/* Disable auto-negotiation for now */
1234 	ret = amd_xgbe_phy_disable_an(phydev);
1235 	if (ret < 0)
1236 		return ret;
1237 
1238 	/* Clear auto-negotiation interrupts */
1239 	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
1240 
1241 	return 0;
1242 }
1243 
amd_xgbe_phy_config_init(struct phy_device * phydev)1244 static int amd_xgbe_phy_config_init(struct phy_device *phydev)
1245 {
1246 	struct amd_xgbe_phy_priv *priv = phydev->priv;
1247 	struct net_device *netdev = phydev->attached_dev;
1248 	int ret;
1249 
1250 	if (!priv->an_irq_allocated) {
1251 		/* Allocate the auto-negotiation workqueue and interrupt */
1252 		snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
1253 			 "%s-pcs", netdev_name(netdev));
1254 
1255 		priv->an_workqueue =
1256 			create_singlethread_workqueue(priv->an_irq_name);
1257 		if (!priv->an_workqueue) {
1258 			netdev_err(netdev, "phy workqueue creation failed\n");
1259 			return -ENOMEM;
1260 		}
1261 
1262 		ret = devm_request_irq(priv->dev, priv->an_irq,
1263 				       amd_xgbe_an_isr, 0, priv->an_irq_name,
1264 				       priv);
1265 		if (ret) {
1266 			netdev_err(netdev, "phy irq request failed\n");
1267 			destroy_workqueue(priv->an_workqueue);
1268 			return ret;
1269 		}
1270 
1271 		priv->an_irq_allocated = 1;
1272 	}
1273 
1274 	/* Set initial mode - call the mode setting routines
1275 	 * directly to insure we are properly configured
1276 	 */
1277 	if (amd_xgbe_phy_use_xgmii_mode(phydev))
1278 		ret = amd_xgbe_phy_xgmii_mode(phydev);
1279 	else if (amd_xgbe_phy_use_gmii_mode(phydev))
1280 		ret = amd_xgbe_phy_gmii_mode(phydev);
1281 	else if (amd_xgbe_phy_use_gmii_2500_mode(phydev))
1282 		ret = amd_xgbe_phy_gmii_2500_mode(phydev);
1283 	else
1284 		ret = -EINVAL;
1285 	if (ret < 0)
1286 		return ret;
1287 
1288 	/* Set up advertisement registers based on current settings */
1289 	ret = amd_xgbe_an_init(phydev);
1290 	if (ret)
1291 		return ret;
1292 
1293 	/* Enable auto-negotiation interrupts */
1294 	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
1295 
1296 	return 0;
1297 }
1298 
amd_xgbe_phy_setup_forced(struct phy_device * phydev)1299 static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
1300 {
1301 	int ret;
1302 
1303 	/* Disable auto-negotiation */
1304 	ret = amd_xgbe_phy_disable_an(phydev);
1305 	if (ret < 0)
1306 		return ret;
1307 
1308 	/* Validate/Set specified speed */
1309 	switch (phydev->speed) {
1310 	case SPEED_10000:
1311 		ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
1312 		break;
1313 
1314 	case SPEED_2500:
1315 	case SPEED_1000:
1316 		ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
1317 		break;
1318 
1319 	default:
1320 		ret = -EINVAL;
1321 	}
1322 
1323 	if (ret < 0)
1324 		return ret;
1325 
1326 	/* Validate duplex mode */
1327 	if (phydev->duplex != DUPLEX_FULL)
1328 		return -EINVAL;
1329 
1330 	phydev->pause = 0;
1331 	phydev->asym_pause = 0;
1332 
1333 	return 0;
1334 }
1335 
__amd_xgbe_phy_config_aneg(struct phy_device * phydev)1336 static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
1337 {
1338 	struct amd_xgbe_phy_priv *priv = phydev->priv;
1339 	u32 mmd_mask = phydev->c45_ids.devices_in_package;
1340 	int ret;
1341 
1342 	if (phydev->autoneg != AUTONEG_ENABLE)
1343 		return amd_xgbe_phy_setup_forced(phydev);
1344 
1345 	/* Make sure we have the AN MMD present */
1346 	if (!(mmd_mask & MDIO_DEVS_AN))
1347 		return -EINVAL;
1348 
1349 	/* Disable auto-negotiation interrupt */
1350 	disable_irq(priv->an_irq);
1351 
1352 	/* Start auto-negotiation in a supported mode */
1353 	if (phydev->advertising & SUPPORTED_10000baseKR_Full)
1354 		ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
1355 	else if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
1356 		 (phydev->advertising & SUPPORTED_2500baseX_Full))
1357 		ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
1358 	else
1359 		ret = -EINVAL;
1360 	if (ret < 0) {
1361 		enable_irq(priv->an_irq);
1362 		return ret;
1363 	}
1364 
1365 	/* Disable and stop any in progress auto-negotiation */
1366 	ret = amd_xgbe_phy_disable_an(phydev);
1367 	if (ret < 0)
1368 		return ret;
1369 
1370 	/* Clear any auto-negotitation interrupts */
1371 	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
1372 
1373 	priv->an_result = AMD_XGBE_AN_READY;
1374 	priv->an_state = AMD_XGBE_AN_READY;
1375 	priv->kr_state = AMD_XGBE_RX_BPA;
1376 	priv->kx_state = AMD_XGBE_RX_BPA;
1377 
1378 	/* Re-enable auto-negotiation interrupt */
1379 	enable_irq(priv->an_irq);
1380 
1381 	/* Set up advertisement registers based on current settings */
1382 	ret = amd_xgbe_an_init(phydev);
1383 	if (ret)
1384 		return ret;
1385 
1386 	/* Enable and start auto-negotiation */
1387 	return amd_xgbe_phy_restart_an(phydev);
1388 }
1389 
amd_xgbe_phy_config_aneg(struct phy_device * phydev)1390 static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
1391 {
1392 	struct amd_xgbe_phy_priv *priv = phydev->priv;
1393 	int ret;
1394 
1395 	mutex_lock(&priv->an_mutex);
1396 
1397 	ret = __amd_xgbe_phy_config_aneg(phydev);
1398 
1399 	mutex_unlock(&priv->an_mutex);
1400 
1401 	return ret;
1402 }
1403 
amd_xgbe_phy_aneg_done(struct phy_device * phydev)1404 static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
1405 {
1406 	struct amd_xgbe_phy_priv *priv = phydev->priv;
1407 
1408 	return (priv->an_result == AMD_XGBE_AN_COMPLETE);
1409 }
1410 
amd_xgbe_phy_update_link(struct phy_device * phydev)1411 static int amd_xgbe_phy_update_link(struct phy_device *phydev)
1412 {
1413 	struct amd_xgbe_phy_priv *priv = phydev->priv;
1414 	int ret;
1415 
1416 	/* If we're doing auto-negotiation don't report link down */
1417 	if (priv->an_state != AMD_XGBE_AN_READY) {
1418 		phydev->link = 1;
1419 		return 0;
1420 	}
1421 
1422 	/* Link status is latched low, so read once to clear
1423 	 * and then read again to get current state
1424 	 */
1425 	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
1426 	if (ret < 0)
1427 		return ret;
1428 
1429 	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
1430 	if (ret < 0)
1431 		return ret;
1432 
1433 	phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
1434 
1435 	return 0;
1436 }
1437 
amd_xgbe_phy_read_status(struct phy_device * phydev)1438 static int amd_xgbe_phy_read_status(struct phy_device *phydev)
1439 {
1440 	struct amd_xgbe_phy_priv *priv = phydev->priv;
1441 	u32 mmd_mask = phydev->c45_ids.devices_in_package;
1442 	int ret, ad_ret, lp_ret;
1443 
1444 	ret = amd_xgbe_phy_update_link(phydev);
1445 	if (ret)
1446 		return ret;
1447 
1448 	if ((phydev->autoneg == AUTONEG_ENABLE) &&
1449 	    !priv->parallel_detect) {
1450 		if (!(mmd_mask & MDIO_DEVS_AN))
1451 			return -EINVAL;
1452 
1453 		if (!amd_xgbe_phy_aneg_done(phydev))
1454 			return 0;
1455 
1456 		/* Compare Advertisement and Link Partner register 1 */
1457 		ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
1458 		if (ad_ret < 0)
1459 			return ad_ret;
1460 		lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
1461 		if (lp_ret < 0)
1462 			return lp_ret;
1463 
1464 		ad_ret &= lp_ret;
1465 		phydev->pause = (ad_ret & 0x400) ? 1 : 0;
1466 		phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
1467 
1468 		/* Compare Advertisement and Link Partner register 2 */
1469 		ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
1470 				      MDIO_AN_ADVERTISE + 1);
1471 		if (ad_ret < 0)
1472 			return ad_ret;
1473 		lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
1474 		if (lp_ret < 0)
1475 			return lp_ret;
1476 
1477 		ad_ret &= lp_ret;
1478 		if (ad_ret & 0x80) {
1479 			phydev->speed = SPEED_10000;
1480 			ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
1481 			if (ret)
1482 				return ret;
1483 		} else {
1484 			switch (priv->speed_set) {
1485 			case AMD_XGBE_PHY_SPEEDSET_1000_10000:
1486 				phydev->speed = SPEED_1000;
1487 				break;
1488 
1489 			case AMD_XGBE_PHY_SPEEDSET_2500_10000:
1490 				phydev->speed = SPEED_2500;
1491 				break;
1492 			}
1493 
1494 			ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
1495 			if (ret)
1496 				return ret;
1497 		}
1498 
1499 		phydev->duplex = DUPLEX_FULL;
1500 	} else {
1501 		if (amd_xgbe_phy_in_kr_mode(phydev)) {
1502 			phydev->speed = SPEED_10000;
1503 		} else {
1504 			switch (priv->speed_set) {
1505 			case AMD_XGBE_PHY_SPEEDSET_1000_10000:
1506 				phydev->speed = SPEED_1000;
1507 				break;
1508 
1509 			case AMD_XGBE_PHY_SPEEDSET_2500_10000:
1510 				phydev->speed = SPEED_2500;
1511 				break;
1512 			}
1513 		}
1514 		phydev->duplex = DUPLEX_FULL;
1515 		phydev->pause = 0;
1516 		phydev->asym_pause = 0;
1517 	}
1518 
1519 	return 0;
1520 }
1521 
amd_xgbe_phy_suspend(struct phy_device * phydev)1522 static int amd_xgbe_phy_suspend(struct phy_device *phydev)
1523 {
1524 	struct amd_xgbe_phy_priv *priv = phydev->priv;
1525 	int ret;
1526 
1527 	mutex_lock(&phydev->lock);
1528 
1529 	ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1530 	if (ret < 0)
1531 		goto unlock;
1532 
1533 	priv->lpm_ctrl = ret;
1534 
1535 	ret |= MDIO_CTRL1_LPOWER;
1536 	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1537 
1538 	ret = 0;
1539 
1540 unlock:
1541 	mutex_unlock(&phydev->lock);
1542 
1543 	return ret;
1544 }
1545 
amd_xgbe_phy_resume(struct phy_device * phydev)1546 static int amd_xgbe_phy_resume(struct phy_device *phydev)
1547 {
1548 	struct amd_xgbe_phy_priv *priv = phydev->priv;
1549 
1550 	mutex_lock(&phydev->lock);
1551 
1552 	priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
1553 	phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
1554 
1555 	mutex_unlock(&phydev->lock);
1556 
1557 	return 0;
1558 }
1559 
amd_xgbe_phy_resource_count(struct platform_device * pdev,unsigned int type)1560 static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
1561 						unsigned int type)
1562 {
1563 	unsigned int count;
1564 	int i;
1565 
1566 	for (i = 0, count = 0; i < pdev->num_resources; i++) {
1567 		struct resource *r = &pdev->resource[i];
1568 
1569 		if (type == resource_type(r))
1570 			count++;
1571 	}
1572 
1573 	return count;
1574 }
1575 
amd_xgbe_phy_probe(struct phy_device * phydev)1576 static int amd_xgbe_phy_probe(struct phy_device *phydev)
1577 {
1578 	struct amd_xgbe_phy_priv *priv;
1579 	struct platform_device *phy_pdev;
1580 	struct device *dev, *phy_dev;
1581 	unsigned int phy_resnum, phy_irqnum;
1582 	int ret;
1583 
1584 	if (!phydev->bus || !phydev->bus->parent)
1585 		return -EINVAL;
1586 
1587 	dev = phydev->bus->parent;
1588 
1589 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1590 	if (!priv)
1591 		return -ENOMEM;
1592 
1593 	priv->pdev = to_platform_device(dev);
1594 	priv->adev = ACPI_COMPANION(dev);
1595 	priv->dev = dev;
1596 	priv->phydev = phydev;
1597 	mutex_init(&priv->an_mutex);
1598 	INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
1599 	INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
1600 
1601 	if (!priv->adev || acpi_disabled) {
1602 		struct device_node *bus_node;
1603 		struct device_node *phy_node;
1604 
1605 		bus_node = priv->dev->of_node;
1606 		phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
1607 		if (!phy_node) {
1608 			dev_err(dev, "unable to parse phy-handle\n");
1609 			ret = -EINVAL;
1610 			goto err_priv;
1611 		}
1612 
1613 		phy_pdev = of_find_device_by_node(phy_node);
1614 		of_node_put(phy_node);
1615 
1616 		if (!phy_pdev) {
1617 			dev_err(dev, "unable to obtain phy device\n");
1618 			ret = -EINVAL;
1619 			goto err_priv;
1620 		}
1621 
1622 		phy_resnum = 0;
1623 		phy_irqnum = 0;
1624 	} else {
1625 		/* In ACPI, the XGBE and PHY resources are the grouped
1626 		 * together with the PHY resources at the end
1627 		 */
1628 		phy_pdev = priv->pdev;
1629 		phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
1630 							 IORESOURCE_MEM) - 3;
1631 		phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
1632 							 IORESOURCE_IRQ) - 1;
1633 	}
1634 	phy_dev = &phy_pdev->dev;
1635 
1636 	/* Get the device mmio areas */
1637 	priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
1638 					       phy_resnum++);
1639 	priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
1640 	if (IS_ERR(priv->rxtx_regs)) {
1641 		dev_err(dev, "rxtx ioremap failed\n");
1642 		ret = PTR_ERR(priv->rxtx_regs);
1643 		goto err_put;
1644 	}
1645 
1646 	priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
1647 					       phy_resnum++);
1648 	priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
1649 	if (IS_ERR(priv->sir0_regs)) {
1650 		dev_err(dev, "sir0 ioremap failed\n");
1651 		ret = PTR_ERR(priv->sir0_regs);
1652 		goto err_rxtx;
1653 	}
1654 
1655 	priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
1656 					       phy_resnum++);
1657 	priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
1658 	if (IS_ERR(priv->sir1_regs)) {
1659 		dev_err(dev, "sir1 ioremap failed\n");
1660 		ret = PTR_ERR(priv->sir1_regs);
1661 		goto err_sir0;
1662 	}
1663 
1664 	/* Get the auto-negotiation interrupt */
1665 	ret = platform_get_irq(phy_pdev, phy_irqnum);
1666 	if (ret < 0) {
1667 		dev_err(dev, "platform_get_irq failed\n");
1668 		goto err_sir1;
1669 	}
1670 	priv->an_irq = ret;
1671 
1672 	/* Get the device speed set property */
1673 	ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
1674 				       &priv->speed_set);
1675 	if (ret) {
1676 		dev_err(dev, "invalid %s property\n",
1677 			XGBE_PHY_SPEEDSET_PROPERTY);
1678 		goto err_sir1;
1679 	}
1680 
1681 	switch (priv->speed_set) {
1682 	case AMD_XGBE_PHY_SPEEDSET_1000_10000:
1683 	case AMD_XGBE_PHY_SPEEDSET_2500_10000:
1684 		break;
1685 	default:
1686 		dev_err(dev, "invalid %s property\n",
1687 			XGBE_PHY_SPEEDSET_PROPERTY);
1688 		ret = -EINVAL;
1689 		goto err_sir1;
1690 	}
1691 
1692 	if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
1693 		ret = device_property_read_u32_array(phy_dev,
1694 						     XGBE_PHY_BLWC_PROPERTY,
1695 						     priv->serdes_blwc,
1696 						     XGBE_PHY_SPEEDS);
1697 		if (ret) {
1698 			dev_err(dev, "invalid %s property\n",
1699 				XGBE_PHY_BLWC_PROPERTY);
1700 			goto err_sir1;
1701 		}
1702 	} else {
1703 		memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
1704 		       sizeof(priv->serdes_blwc));
1705 	}
1706 
1707 	if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
1708 		ret = device_property_read_u32_array(phy_dev,
1709 						     XGBE_PHY_CDR_RATE_PROPERTY,
1710 						     priv->serdes_cdr_rate,
1711 						     XGBE_PHY_SPEEDS);
1712 		if (ret) {
1713 			dev_err(dev, "invalid %s property\n",
1714 				XGBE_PHY_CDR_RATE_PROPERTY);
1715 			goto err_sir1;
1716 		}
1717 	} else {
1718 		memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
1719 		       sizeof(priv->serdes_cdr_rate));
1720 	}
1721 
1722 	if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
1723 		ret = device_property_read_u32_array(phy_dev,
1724 						     XGBE_PHY_PQ_SKEW_PROPERTY,
1725 						     priv->serdes_pq_skew,
1726 						     XGBE_PHY_SPEEDS);
1727 		if (ret) {
1728 			dev_err(dev, "invalid %s property\n",
1729 				XGBE_PHY_PQ_SKEW_PROPERTY);
1730 			goto err_sir1;
1731 		}
1732 	} else {
1733 		memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
1734 		       sizeof(priv->serdes_pq_skew));
1735 	}
1736 
1737 	if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
1738 		ret = device_property_read_u32_array(phy_dev,
1739 						     XGBE_PHY_TX_AMP_PROPERTY,
1740 						     priv->serdes_tx_amp,
1741 						     XGBE_PHY_SPEEDS);
1742 		if (ret) {
1743 			dev_err(dev, "invalid %s property\n",
1744 				XGBE_PHY_TX_AMP_PROPERTY);
1745 			goto err_sir1;
1746 		}
1747 	} else {
1748 		memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
1749 		       sizeof(priv->serdes_tx_amp));
1750 	}
1751 
1752 	if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
1753 		ret = device_property_read_u32_array(phy_dev,
1754 						     XGBE_PHY_DFE_CFG_PROPERTY,
1755 						     priv->serdes_dfe_tap_cfg,
1756 						     XGBE_PHY_SPEEDS);
1757 		if (ret) {
1758 			dev_err(dev, "invalid %s property\n",
1759 				XGBE_PHY_DFE_CFG_PROPERTY);
1760 			goto err_sir1;
1761 		}
1762 	} else {
1763 		memcpy(priv->serdes_dfe_tap_cfg,
1764 		       amd_xgbe_phy_serdes_dfe_tap_cfg,
1765 		       sizeof(priv->serdes_dfe_tap_cfg));
1766 	}
1767 
1768 	if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
1769 		ret = device_property_read_u32_array(phy_dev,
1770 						     XGBE_PHY_DFE_ENA_PROPERTY,
1771 						     priv->serdes_dfe_tap_ena,
1772 						     XGBE_PHY_SPEEDS);
1773 		if (ret) {
1774 			dev_err(dev, "invalid %s property\n",
1775 				XGBE_PHY_DFE_ENA_PROPERTY);
1776 			goto err_sir1;
1777 		}
1778 	} else {
1779 		memcpy(priv->serdes_dfe_tap_ena,
1780 		       amd_xgbe_phy_serdes_dfe_tap_ena,
1781 		       sizeof(priv->serdes_dfe_tap_ena));
1782 	}
1783 
1784 	/* Initialize supported features */
1785 	phydev->supported = SUPPORTED_Autoneg;
1786 	phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1787 	phydev->supported |= SUPPORTED_Backplane;
1788 	phydev->supported |= SUPPORTED_10000baseKR_Full;
1789 	switch (priv->speed_set) {
1790 	case AMD_XGBE_PHY_SPEEDSET_1000_10000:
1791 		phydev->supported |= SUPPORTED_1000baseKX_Full;
1792 		break;
1793 	case AMD_XGBE_PHY_SPEEDSET_2500_10000:
1794 		phydev->supported |= SUPPORTED_2500baseX_Full;
1795 		break;
1796 	}
1797 
1798 	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
1799 	if (ret < 0)
1800 		return ret;
1801 	priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
1802 	if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
1803 		phydev->supported |= SUPPORTED_10000baseR_FEC;
1804 
1805 	phydev->advertising = phydev->supported;
1806 
1807 	phydev->priv = priv;
1808 
1809 	if (!priv->adev || acpi_disabled)
1810 		platform_device_put(phy_pdev);
1811 
1812 	return 0;
1813 
1814 err_sir1:
1815 	devm_iounmap(dev, priv->sir1_regs);
1816 	devm_release_mem_region(dev, priv->sir1_res->start,
1817 				resource_size(priv->sir1_res));
1818 
1819 err_sir0:
1820 	devm_iounmap(dev, priv->sir0_regs);
1821 	devm_release_mem_region(dev, priv->sir0_res->start,
1822 				resource_size(priv->sir0_res));
1823 
1824 err_rxtx:
1825 	devm_iounmap(dev, priv->rxtx_regs);
1826 	devm_release_mem_region(dev, priv->rxtx_res->start,
1827 				resource_size(priv->rxtx_res));
1828 
1829 err_put:
1830 	if (!priv->adev || acpi_disabled)
1831 		platform_device_put(phy_pdev);
1832 
1833 err_priv:
1834 	devm_kfree(dev, priv);
1835 
1836 	return ret;
1837 }
1838 
amd_xgbe_phy_remove(struct phy_device * phydev)1839 static void amd_xgbe_phy_remove(struct phy_device *phydev)
1840 {
1841 	struct amd_xgbe_phy_priv *priv = phydev->priv;
1842 	struct device *dev = priv->dev;
1843 
1844 	if (priv->an_irq_allocated) {
1845 		devm_free_irq(dev, priv->an_irq, priv);
1846 
1847 		flush_workqueue(priv->an_workqueue);
1848 		destroy_workqueue(priv->an_workqueue);
1849 	}
1850 
1851 	/* Release resources */
1852 	devm_iounmap(dev, priv->sir1_regs);
1853 	devm_release_mem_region(dev, priv->sir1_res->start,
1854 				resource_size(priv->sir1_res));
1855 
1856 	devm_iounmap(dev, priv->sir0_regs);
1857 	devm_release_mem_region(dev, priv->sir0_res->start,
1858 				resource_size(priv->sir0_res));
1859 
1860 	devm_iounmap(dev, priv->rxtx_regs);
1861 	devm_release_mem_region(dev, priv->rxtx_res->start,
1862 				resource_size(priv->rxtx_res));
1863 
1864 	devm_kfree(dev, priv);
1865 }
1866 
amd_xgbe_match_phy_device(struct phy_device * phydev)1867 static int amd_xgbe_match_phy_device(struct phy_device *phydev)
1868 {
1869 	return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
1870 }
1871 
1872 static struct phy_driver amd_xgbe_phy_driver[] = {
1873 	{
1874 		.phy_id			= XGBE_PHY_ID,
1875 		.phy_id_mask		= XGBE_PHY_MASK,
1876 		.name			= "AMD XGBE PHY",
1877 		.features		= 0,
1878 		.flags			= PHY_IS_INTERNAL,
1879 		.probe			= amd_xgbe_phy_probe,
1880 		.remove			= amd_xgbe_phy_remove,
1881 		.soft_reset		= amd_xgbe_phy_soft_reset,
1882 		.config_init		= amd_xgbe_phy_config_init,
1883 		.suspend		= amd_xgbe_phy_suspend,
1884 		.resume			= amd_xgbe_phy_resume,
1885 		.config_aneg		= amd_xgbe_phy_config_aneg,
1886 		.aneg_done		= amd_xgbe_phy_aneg_done,
1887 		.read_status		= amd_xgbe_phy_read_status,
1888 		.match_phy_device	= amd_xgbe_match_phy_device,
1889 		.driver			= {
1890 			.owner = THIS_MODULE,
1891 		},
1892 	},
1893 };
1894 
1895 module_phy_driver(amd_xgbe_phy_driver);
1896 
1897 static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
1898 	{ XGBE_PHY_ID, XGBE_PHY_MASK },
1899 	{ }
1900 };
1901 MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);
1902