1 /* Framework for configuring and reading PHY devices
2 * Based on code in sungem_phy.c and gianfar_phy.c
3 *
4 * Author: Andy Fleming
5 *
6 * Copyright (c) 2004 Freescale Semiconductor, Inc.
7 * Copyright (c) 2006, 2007 Maciej W. Rozycki
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/unistd.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/mii.h>
30 #include <linux/ethtool.h>
31 #include <linux/phy.h>
32 #include <linux/timer.h>
33 #include <linux/workqueue.h>
34 #include <linux/mdio.h>
35 #include <linux/io.h>
36 #include <linux/uaccess.h>
37 #include <linux/atomic.h>
38
39 #include <asm/irq.h>
40
phy_speed_to_str(int speed)41 static const char *phy_speed_to_str(int speed)
42 {
43 switch (speed) {
44 case SPEED_10:
45 return "10Mbps";
46 case SPEED_100:
47 return "100Mbps";
48 case SPEED_1000:
49 return "1Gbps";
50 case SPEED_2500:
51 return "2.5Gbps";
52 case SPEED_10000:
53 return "10Gbps";
54 case SPEED_UNKNOWN:
55 return "Unknown";
56 default:
57 return "Unsupported (update phy.c)";
58 }
59 }
60
61 #define PHY_STATE_STR(_state) \
62 case PHY_##_state: \
63 return __stringify(_state); \
64
phy_state_to_str(enum phy_state st)65 static const char *phy_state_to_str(enum phy_state st)
66 {
67 switch (st) {
68 PHY_STATE_STR(DOWN)
69 PHY_STATE_STR(STARTING)
70 PHY_STATE_STR(READY)
71 PHY_STATE_STR(PENDING)
72 PHY_STATE_STR(UP)
73 PHY_STATE_STR(AN)
74 PHY_STATE_STR(RUNNING)
75 PHY_STATE_STR(NOLINK)
76 PHY_STATE_STR(FORCING)
77 PHY_STATE_STR(CHANGELINK)
78 PHY_STATE_STR(HALTED)
79 PHY_STATE_STR(RESUMING)
80 }
81
82 return NULL;
83 }
84
85
86 /**
87 * phy_print_status - Convenience function to print out the current phy status
88 * @phydev: the phy_device struct
89 */
phy_print_status(struct phy_device * phydev)90 void phy_print_status(struct phy_device *phydev)
91 {
92 if (phydev->link) {
93 netdev_info(phydev->attached_dev,
94 "Link is Up - %s/%s - flow control %s\n",
95 phy_speed_to_str(phydev->speed),
96 DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
97 phydev->pause ? "rx/tx" : "off");
98 } else {
99 netdev_info(phydev->attached_dev, "Link is Down\n");
100 }
101 }
102 EXPORT_SYMBOL(phy_print_status);
103
104 /**
105 * phy_clear_interrupt - Ack the phy device's interrupt
106 * @phydev: the phy_device struct
107 *
108 * If the @phydev driver has an ack_interrupt function, call it to
109 * ack and clear the phy device's interrupt.
110 *
111 * Returns 0 on success or < 0 on error.
112 */
phy_clear_interrupt(struct phy_device * phydev)113 static int phy_clear_interrupt(struct phy_device *phydev)
114 {
115 if (phydev->drv->ack_interrupt)
116 return phydev->drv->ack_interrupt(phydev);
117
118 return 0;
119 }
120
121 /**
122 * phy_config_interrupt - configure the PHY device for the requested interrupts
123 * @phydev: the phy_device struct
124 * @interrupts: interrupt flags to configure for this @phydev
125 *
126 * Returns 0 on success or < 0 on error.
127 */
phy_config_interrupt(struct phy_device * phydev,u32 interrupts)128 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
129 {
130 phydev->interrupts = interrupts;
131 if (phydev->drv->config_intr)
132 return phydev->drv->config_intr(phydev);
133
134 return 0;
135 }
136
137
138 /**
139 * phy_aneg_done - return auto-negotiation status
140 * @phydev: target phy_device struct
141 *
142 * Description: Return the auto-negotiation status from this @phydev
143 * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
144 * is still pending.
145 */
phy_aneg_done(struct phy_device * phydev)146 static inline int phy_aneg_done(struct phy_device *phydev)
147 {
148 if (phydev->drv->aneg_done)
149 return phydev->drv->aneg_done(phydev);
150
151 return genphy_aneg_done(phydev);
152 }
153
154 /* A structure for mapping a particular speed and duplex
155 * combination to a particular SUPPORTED and ADVERTISED value
156 */
157 struct phy_setting {
158 int speed;
159 int duplex;
160 u32 setting;
161 };
162
163 /* A mapping of all SUPPORTED settings to speed/duplex */
164 static const struct phy_setting settings[] = {
165 {
166 .speed = SPEED_10000,
167 .duplex = DUPLEX_FULL,
168 .setting = SUPPORTED_10000baseKR_Full,
169 },
170 {
171 .speed = SPEED_10000,
172 .duplex = DUPLEX_FULL,
173 .setting = SUPPORTED_10000baseKX4_Full,
174 },
175 {
176 .speed = SPEED_10000,
177 .duplex = DUPLEX_FULL,
178 .setting = SUPPORTED_10000baseT_Full,
179 },
180 {
181 .speed = SPEED_2500,
182 .duplex = DUPLEX_FULL,
183 .setting = SUPPORTED_2500baseX_Full,
184 },
185 {
186 .speed = SPEED_1000,
187 .duplex = DUPLEX_FULL,
188 .setting = SUPPORTED_1000baseKX_Full,
189 },
190 {
191 .speed = SPEED_1000,
192 .duplex = DUPLEX_FULL,
193 .setting = SUPPORTED_1000baseT_Full,
194 },
195 {
196 .speed = SPEED_1000,
197 .duplex = DUPLEX_HALF,
198 .setting = SUPPORTED_1000baseT_Half,
199 },
200 {
201 .speed = SPEED_100,
202 .duplex = DUPLEX_FULL,
203 .setting = SUPPORTED_100baseT_Full,
204 },
205 {
206 .speed = SPEED_100,
207 .duplex = DUPLEX_HALF,
208 .setting = SUPPORTED_100baseT_Half,
209 },
210 {
211 .speed = SPEED_10,
212 .duplex = DUPLEX_FULL,
213 .setting = SUPPORTED_10baseT_Full,
214 },
215 {
216 .speed = SPEED_10,
217 .duplex = DUPLEX_HALF,
218 .setting = SUPPORTED_10baseT_Half,
219 },
220 };
221
222 #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
223
224 /**
225 * phy_find_setting - find a PHY settings array entry that matches speed & duplex
226 * @speed: speed to match
227 * @duplex: duplex to match
228 *
229 * Description: Searches the settings array for the setting which
230 * matches the desired speed and duplex, and returns the index
231 * of that setting. Returns the index of the last setting if
232 * none of the others match.
233 */
phy_find_setting(int speed,int duplex)234 static inline unsigned int phy_find_setting(int speed, int duplex)
235 {
236 unsigned int idx = 0;
237
238 while (idx < ARRAY_SIZE(settings) &&
239 (settings[idx].speed != speed || settings[idx].duplex != duplex))
240 idx++;
241
242 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
243 }
244
245 /**
246 * phy_find_valid - find a PHY setting that matches the requested features mask
247 * @idx: The first index in settings[] to search
248 * @features: A mask of the valid settings
249 *
250 * Description: Returns the index of the first valid setting less
251 * than or equal to the one pointed to by idx, as determined by
252 * the mask in features. Returns the index of the last setting
253 * if nothing else matches.
254 */
phy_find_valid(unsigned int idx,u32 features)255 static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
256 {
257 while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
258 idx++;
259
260 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
261 }
262
263 /**
264 * phy_check_valid - check if there is a valid PHY setting which matches
265 * speed, duplex, and feature mask
266 * @speed: speed to match
267 * @duplex: duplex to match
268 * @features: A mask of the valid settings
269 *
270 * Description: Returns true if there is a valid setting, false otherwise.
271 */
phy_check_valid(int speed,int duplex,u32 features)272 static inline bool phy_check_valid(int speed, int duplex, u32 features)
273 {
274 unsigned int idx;
275
276 idx = phy_find_valid(phy_find_setting(speed, duplex), features);
277
278 return settings[idx].speed == speed && settings[idx].duplex == duplex &&
279 (settings[idx].setting & features);
280 }
281
282 /**
283 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
284 * @phydev: the target phy_device struct
285 *
286 * Description: Make sure the PHY is set to supported speeds and
287 * duplexes. Drop down by one in this order: 1000/FULL,
288 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
289 */
phy_sanitize_settings(struct phy_device * phydev)290 static void phy_sanitize_settings(struct phy_device *phydev)
291 {
292 u32 features = phydev->supported;
293 unsigned int idx;
294
295 /* Sanitize settings based on PHY capabilities */
296 if ((features & SUPPORTED_Autoneg) == 0)
297 phydev->autoneg = AUTONEG_DISABLE;
298
299 idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
300 features);
301
302 phydev->speed = settings[idx].speed;
303 phydev->duplex = settings[idx].duplex;
304 }
305
306 /**
307 * phy_ethtool_sset - generic ethtool sset function, handles all the details
308 * @phydev: target phy_device struct
309 * @cmd: ethtool_cmd
310 *
311 * A few notes about parameter checking:
312 * - We don't set port or transceiver, so we don't care what they
313 * were set to.
314 * - phy_start_aneg() will make sure forced settings are sane, and
315 * choose the next best ones from the ones selected, so we don't
316 * care if ethtool tries to give us bad values.
317 */
phy_ethtool_sset(struct phy_device * phydev,struct ethtool_cmd * cmd)318 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
319 {
320 u32 speed = ethtool_cmd_speed(cmd);
321
322 if (cmd->phy_address != phydev->addr)
323 return -EINVAL;
324
325 /* We make sure that we don't pass unsupported values in to the PHY */
326 cmd->advertising &= phydev->supported;
327
328 /* Verify the settings we care about. */
329 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
330 return -EINVAL;
331
332 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
333 return -EINVAL;
334
335 if (cmd->autoneg == AUTONEG_DISABLE &&
336 ((speed != SPEED_1000 &&
337 speed != SPEED_100 &&
338 speed != SPEED_10) ||
339 (cmd->duplex != DUPLEX_HALF &&
340 cmd->duplex != DUPLEX_FULL)))
341 return -EINVAL;
342
343 phydev->autoneg = cmd->autoneg;
344
345 phydev->speed = speed;
346
347 phydev->advertising = cmd->advertising;
348
349 if (AUTONEG_ENABLE == cmd->autoneg)
350 phydev->advertising |= ADVERTISED_Autoneg;
351 else
352 phydev->advertising &= ~ADVERTISED_Autoneg;
353
354 phydev->duplex = cmd->duplex;
355
356 phydev->mdix = cmd->eth_tp_mdix_ctrl;
357
358 /* Restart the PHY */
359 phy_start_aneg(phydev);
360
361 return 0;
362 }
363 EXPORT_SYMBOL(phy_ethtool_sset);
364
phy_ethtool_gset(struct phy_device * phydev,struct ethtool_cmd * cmd)365 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
366 {
367 cmd->supported = phydev->supported;
368
369 cmd->advertising = phydev->advertising;
370 cmd->lp_advertising = phydev->lp_advertising;
371
372 ethtool_cmd_speed_set(cmd, phydev->speed);
373 cmd->duplex = phydev->duplex;
374 if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
375 cmd->port = PORT_BNC;
376 else
377 cmd->port = PORT_MII;
378 cmd->phy_address = phydev->addr;
379 cmd->transceiver = phy_is_internal(phydev) ?
380 XCVR_INTERNAL : XCVR_EXTERNAL;
381 cmd->autoneg = phydev->autoneg;
382 cmd->eth_tp_mdix_ctrl = phydev->mdix;
383
384 return 0;
385 }
386 EXPORT_SYMBOL(phy_ethtool_gset);
387
388 /**
389 * phy_mii_ioctl - generic PHY MII ioctl interface
390 * @phydev: the phy_device struct
391 * @ifr: &struct ifreq for socket ioctl's
392 * @cmd: ioctl cmd to execute
393 *
394 * Note that this function is currently incompatible with the
395 * PHYCONTROL layer. It changes registers without regard to
396 * current state. Use at own risk.
397 */
phy_mii_ioctl(struct phy_device * phydev,struct ifreq * ifr,int cmd)398 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
399 {
400 struct mii_ioctl_data *mii_data = if_mii(ifr);
401 u16 val = mii_data->val_in;
402 bool change_autoneg = false;
403
404 switch (cmd) {
405 case SIOCGMIIPHY:
406 mii_data->phy_id = phydev->addr;
407 /* fall through */
408
409 case SIOCGMIIREG:
410 mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
411 mii_data->reg_num);
412 return 0;
413
414 case SIOCSMIIREG:
415 if (mii_data->phy_id == phydev->addr) {
416 switch (mii_data->reg_num) {
417 case MII_BMCR:
418 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
419 if (phydev->autoneg == AUTONEG_ENABLE)
420 change_autoneg = true;
421 phydev->autoneg = AUTONEG_DISABLE;
422 if (val & BMCR_FULLDPLX)
423 phydev->duplex = DUPLEX_FULL;
424 else
425 phydev->duplex = DUPLEX_HALF;
426 if (val & BMCR_SPEED1000)
427 phydev->speed = SPEED_1000;
428 else if (val & BMCR_SPEED100)
429 phydev->speed = SPEED_100;
430 else phydev->speed = SPEED_10;
431 }
432 else {
433 if (phydev->autoneg == AUTONEG_DISABLE)
434 change_autoneg = true;
435 phydev->autoneg = AUTONEG_ENABLE;
436 }
437 break;
438 case MII_ADVERTISE:
439 phydev->advertising = mii_adv_to_ethtool_adv_t(val);
440 change_autoneg = true;
441 break;
442 default:
443 /* do nothing */
444 break;
445 }
446 }
447
448 mdiobus_write(phydev->bus, mii_data->phy_id,
449 mii_data->reg_num, val);
450
451 if (mii_data->phy_id == phydev->addr &&
452 mii_data->reg_num == MII_BMCR &&
453 val & BMCR_RESET)
454 return phy_init_hw(phydev);
455
456 if (change_autoneg)
457 return phy_start_aneg(phydev);
458
459 return 0;
460
461 case SIOCSHWTSTAMP:
462 if (phydev->drv->hwtstamp)
463 return phydev->drv->hwtstamp(phydev, ifr);
464 /* fall through */
465
466 default:
467 return -EOPNOTSUPP;
468 }
469 }
470 EXPORT_SYMBOL(phy_mii_ioctl);
471
472 /**
473 * phy_start_aneg - start auto-negotiation for this PHY device
474 * @phydev: the phy_device struct
475 *
476 * Description: Sanitizes the settings (if we're not autonegotiating
477 * them), and then calls the driver's config_aneg function.
478 * If the PHYCONTROL Layer is operating, we change the state to
479 * reflect the beginning of Auto-negotiation or forcing.
480 */
phy_start_aneg(struct phy_device * phydev)481 int phy_start_aneg(struct phy_device *phydev)
482 {
483 int err;
484
485 mutex_lock(&phydev->lock);
486
487 if (AUTONEG_DISABLE == phydev->autoneg)
488 phy_sanitize_settings(phydev);
489
490 /* Invalidate LP advertising flags */
491 phydev->lp_advertising = 0;
492
493 err = phydev->drv->config_aneg(phydev);
494 if (err < 0)
495 goto out_unlock;
496
497 if (phydev->state != PHY_HALTED) {
498 if (AUTONEG_ENABLE == phydev->autoneg) {
499 phydev->state = PHY_AN;
500 phydev->link_timeout = PHY_AN_TIMEOUT;
501 } else {
502 phydev->state = PHY_FORCING;
503 phydev->link_timeout = PHY_FORCE_TIMEOUT;
504 }
505 }
506
507 out_unlock:
508 mutex_unlock(&phydev->lock);
509 return err;
510 }
511 EXPORT_SYMBOL(phy_start_aneg);
512
513 /**
514 * phy_start_machine - start PHY state machine tracking
515 * @phydev: the phy_device struct
516 *
517 * Description: The PHY infrastructure can run a state machine
518 * which tracks whether the PHY is starting up, negotiating,
519 * etc. This function starts the timer which tracks the state
520 * of the PHY. If you want to maintain your own state machine,
521 * do not call this function.
522 */
phy_start_machine(struct phy_device * phydev)523 void phy_start_machine(struct phy_device *phydev)
524 {
525 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
526 }
527
528 /**
529 * phy_stop_machine - stop the PHY state machine tracking
530 * @phydev: target phy_device struct
531 *
532 * Description: Stops the state machine timer, sets the state to UP
533 * (unless it wasn't up yet). This function must be called BEFORE
534 * phy_detach.
535 */
phy_stop_machine(struct phy_device * phydev)536 void phy_stop_machine(struct phy_device *phydev)
537 {
538 cancel_delayed_work_sync(&phydev->state_queue);
539
540 mutex_lock(&phydev->lock);
541 if (phydev->state > PHY_UP)
542 phydev->state = PHY_UP;
543 mutex_unlock(&phydev->lock);
544 }
545
546 /**
547 * phy_error - enter HALTED state for this PHY device
548 * @phydev: target phy_device struct
549 *
550 * Moves the PHY to the HALTED state in response to a read
551 * or write error, and tells the controller the link is down.
552 * Must not be called from interrupt context, or while the
553 * phydev->lock is held.
554 */
phy_error(struct phy_device * phydev)555 static void phy_error(struct phy_device *phydev)
556 {
557 mutex_lock(&phydev->lock);
558 phydev->state = PHY_HALTED;
559 mutex_unlock(&phydev->lock);
560 }
561
562 /**
563 * phy_interrupt - PHY interrupt handler
564 * @irq: interrupt line
565 * @phy_dat: phy_device pointer
566 *
567 * Description: When a PHY interrupt occurs, the handler disables
568 * interrupts, and schedules a work task to clear the interrupt.
569 */
phy_interrupt(int irq,void * phy_dat)570 static irqreturn_t phy_interrupt(int irq, void *phy_dat)
571 {
572 struct phy_device *phydev = phy_dat;
573
574 if (PHY_HALTED == phydev->state)
575 return IRQ_NONE; /* It can't be ours. */
576
577 /* The MDIO bus is not allowed to be written in interrupt
578 * context, so we need to disable the irq here. A work
579 * queue will write the PHY to disable and clear the
580 * interrupt, and then reenable the irq line.
581 */
582 disable_irq_nosync(irq);
583 atomic_inc(&phydev->irq_disable);
584
585 queue_work(system_power_efficient_wq, &phydev->phy_queue);
586
587 return IRQ_HANDLED;
588 }
589
590 /**
591 * phy_enable_interrupts - Enable the interrupts from the PHY side
592 * @phydev: target phy_device struct
593 */
phy_enable_interrupts(struct phy_device * phydev)594 static int phy_enable_interrupts(struct phy_device *phydev)
595 {
596 int err = phy_clear_interrupt(phydev);
597
598 if (err < 0)
599 return err;
600
601 return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
602 }
603
604 /**
605 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
606 * @phydev: target phy_device struct
607 */
phy_disable_interrupts(struct phy_device * phydev)608 static int phy_disable_interrupts(struct phy_device *phydev)
609 {
610 int err;
611
612 /* Disable PHY interrupts */
613 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
614 if (err)
615 goto phy_err;
616
617 /* Clear the interrupt */
618 err = phy_clear_interrupt(phydev);
619 if (err)
620 goto phy_err;
621
622 return 0;
623
624 phy_err:
625 phy_error(phydev);
626
627 return err;
628 }
629
630 /**
631 * phy_start_interrupts - request and enable interrupts for a PHY device
632 * @phydev: target phy_device struct
633 *
634 * Description: Request the interrupt for the given PHY.
635 * If this fails, then we set irq to PHY_POLL.
636 * Otherwise, we enable the interrupts in the PHY.
637 * This should only be called with a valid IRQ number.
638 * Returns 0 on success or < 0 on error.
639 */
phy_start_interrupts(struct phy_device * phydev)640 int phy_start_interrupts(struct phy_device *phydev)
641 {
642 atomic_set(&phydev->irq_disable, 0);
643 if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
644 phydev) < 0) {
645 pr_warn("%s: Can't get IRQ %d (PHY)\n",
646 phydev->bus->name, phydev->irq);
647 phydev->irq = PHY_POLL;
648 return 0;
649 }
650
651 return phy_enable_interrupts(phydev);
652 }
653 EXPORT_SYMBOL(phy_start_interrupts);
654
655 /**
656 * phy_stop_interrupts - disable interrupts from a PHY device
657 * @phydev: target phy_device struct
658 */
phy_stop_interrupts(struct phy_device * phydev)659 int phy_stop_interrupts(struct phy_device *phydev)
660 {
661 int err = phy_disable_interrupts(phydev);
662
663 if (err)
664 phy_error(phydev);
665
666 free_irq(phydev->irq, phydev);
667
668 /* Cannot call flush_scheduled_work() here as desired because
669 * of rtnl_lock(), but we do not really care about what would
670 * be done, except from enable_irq(), so cancel any work
671 * possibly pending and take care of the matter below.
672 */
673 cancel_work_sync(&phydev->phy_queue);
674 /* If work indeed has been cancelled, disable_irq() will have
675 * been left unbalanced from phy_interrupt() and enable_irq()
676 * has to be called so that other devices on the line work.
677 */
678 while (atomic_dec_return(&phydev->irq_disable) >= 0)
679 enable_irq(phydev->irq);
680
681 return err;
682 }
683 EXPORT_SYMBOL(phy_stop_interrupts);
684
685 /**
686 * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
687 * @work: work_struct that describes the work to be done
688 */
phy_change(struct work_struct * work)689 void phy_change(struct work_struct *work)
690 {
691 struct phy_device *phydev =
692 container_of(work, struct phy_device, phy_queue);
693
694 if (phydev->drv->did_interrupt &&
695 !phydev->drv->did_interrupt(phydev))
696 goto ignore;
697
698 if (phy_disable_interrupts(phydev))
699 goto phy_err;
700
701 mutex_lock(&phydev->lock);
702 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
703 phydev->state = PHY_CHANGELINK;
704 mutex_unlock(&phydev->lock);
705
706 atomic_dec(&phydev->irq_disable);
707 enable_irq(phydev->irq);
708
709 /* Reenable interrupts */
710 if (PHY_HALTED != phydev->state &&
711 phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
712 goto irq_enable_err;
713
714 /* reschedule state queue work to run as soon as possible */
715 cancel_delayed_work_sync(&phydev->state_queue);
716 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
717 return;
718
719 ignore:
720 atomic_dec(&phydev->irq_disable);
721 enable_irq(phydev->irq);
722 return;
723
724 irq_enable_err:
725 disable_irq(phydev->irq);
726 atomic_inc(&phydev->irq_disable);
727 phy_err:
728 phy_error(phydev);
729 }
730
731 /**
732 * phy_stop - Bring down the PHY link, and stop checking the status
733 * @phydev: target phy_device struct
734 */
phy_stop(struct phy_device * phydev)735 void phy_stop(struct phy_device *phydev)
736 {
737 mutex_lock(&phydev->lock);
738
739 if (PHY_HALTED == phydev->state)
740 goto out_unlock;
741
742 if (phy_interrupt_is_valid(phydev)) {
743 /* Disable PHY Interrupts */
744 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
745
746 /* Clear any pending interrupts */
747 phy_clear_interrupt(phydev);
748 }
749
750 phydev->state = PHY_HALTED;
751
752 out_unlock:
753 mutex_unlock(&phydev->lock);
754
755 /* Cannot call flush_scheduled_work() here as desired because
756 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
757 * will not reenable interrupts.
758 */
759 }
760 EXPORT_SYMBOL(phy_stop);
761
762 /**
763 * phy_start - start or restart a PHY device
764 * @phydev: target phy_device struct
765 *
766 * Description: Indicates the attached device's readiness to
767 * handle PHY-related work. Used during startup to start the
768 * PHY, and after a call to phy_stop() to resume operation.
769 * Also used to indicate the MDIO bus has cleared an error
770 * condition.
771 */
phy_start(struct phy_device * phydev)772 void phy_start(struct phy_device *phydev)
773 {
774 bool do_resume = false;
775 int err = 0;
776
777 mutex_lock(&phydev->lock);
778
779 switch (phydev->state) {
780 case PHY_STARTING:
781 phydev->state = PHY_PENDING;
782 break;
783 case PHY_READY:
784 phydev->state = PHY_UP;
785 break;
786 case PHY_HALTED:
787 /* make sure interrupts are re-enabled for the PHY */
788 err = phy_enable_interrupts(phydev);
789 if (err < 0)
790 break;
791
792 phydev->state = PHY_RESUMING;
793 do_resume = true;
794 break;
795 default:
796 break;
797 }
798 mutex_unlock(&phydev->lock);
799
800 /* if phy was suspended, bring the physical link up again */
801 if (do_resume)
802 phy_resume(phydev);
803 }
804 EXPORT_SYMBOL(phy_start);
805
806 /**
807 * phy_state_machine - Handle the state machine
808 * @work: work_struct that describes the work to be done
809 */
phy_state_machine(struct work_struct * work)810 void phy_state_machine(struct work_struct *work)
811 {
812 struct delayed_work *dwork = to_delayed_work(work);
813 struct phy_device *phydev =
814 container_of(dwork, struct phy_device, state_queue);
815 bool needs_aneg = false, do_suspend = false;
816 enum phy_state old_state;
817 int err = 0;
818 int old_link;
819
820 mutex_lock(&phydev->lock);
821
822 old_state = phydev->state;
823
824 if (phydev->drv->link_change_notify)
825 phydev->drv->link_change_notify(phydev);
826
827 switch (phydev->state) {
828 case PHY_DOWN:
829 case PHY_STARTING:
830 case PHY_READY:
831 case PHY_PENDING:
832 break;
833 case PHY_UP:
834 needs_aneg = true;
835
836 phydev->link_timeout = PHY_AN_TIMEOUT;
837
838 break;
839 case PHY_AN:
840 err = phy_read_status(phydev);
841 if (err < 0)
842 break;
843
844 /* If the link is down, give up on negotiation for now */
845 if (!phydev->link) {
846 phydev->state = PHY_NOLINK;
847 netif_carrier_off(phydev->attached_dev);
848 phydev->adjust_link(phydev->attached_dev);
849 break;
850 }
851
852 /* Check if negotiation is done. Break if there's an error */
853 err = phy_aneg_done(phydev);
854 if (err < 0)
855 break;
856
857 /* If AN is done, we're running */
858 if (err > 0) {
859 phydev->state = PHY_RUNNING;
860 netif_carrier_on(phydev->attached_dev);
861 phydev->adjust_link(phydev->attached_dev);
862
863 } else if (0 == phydev->link_timeout--)
864 needs_aneg = true;
865 break;
866 case PHY_NOLINK:
867 if (phy_interrupt_is_valid(phydev))
868 break;
869
870 err = phy_read_status(phydev);
871 if (err)
872 break;
873
874 if (phydev->link) {
875 if (AUTONEG_ENABLE == phydev->autoneg) {
876 err = phy_aneg_done(phydev);
877 if (err < 0)
878 break;
879
880 if (!err) {
881 phydev->state = PHY_AN;
882 phydev->link_timeout = PHY_AN_TIMEOUT;
883 break;
884 }
885 }
886 phydev->state = PHY_RUNNING;
887 netif_carrier_on(phydev->attached_dev);
888 phydev->adjust_link(phydev->attached_dev);
889 }
890 break;
891 case PHY_FORCING:
892 err = genphy_update_link(phydev);
893 if (err)
894 break;
895
896 if (phydev->link) {
897 phydev->state = PHY_RUNNING;
898 netif_carrier_on(phydev->attached_dev);
899 } else {
900 if (0 == phydev->link_timeout--)
901 needs_aneg = true;
902 }
903
904 phydev->adjust_link(phydev->attached_dev);
905 break;
906 case PHY_RUNNING:
907 /* Only register a CHANGE if we are polling or ignoring
908 * interrupts and link changed since latest checking.
909 */
910 if (!phy_interrupt_is_valid(phydev)) {
911 old_link = phydev->link;
912 err = phy_read_status(phydev);
913 if (err)
914 break;
915
916 if (old_link != phydev->link)
917 phydev->state = PHY_CHANGELINK;
918 }
919 break;
920 case PHY_CHANGELINK:
921 err = phy_read_status(phydev);
922 if (err)
923 break;
924
925 if (phydev->link) {
926 phydev->state = PHY_RUNNING;
927 netif_carrier_on(phydev->attached_dev);
928 } else {
929 phydev->state = PHY_NOLINK;
930 netif_carrier_off(phydev->attached_dev);
931 }
932
933 phydev->adjust_link(phydev->attached_dev);
934
935 if (phy_interrupt_is_valid(phydev))
936 err = phy_config_interrupt(phydev,
937 PHY_INTERRUPT_ENABLED);
938 break;
939 case PHY_HALTED:
940 if (phydev->link) {
941 phydev->link = 0;
942 netif_carrier_off(phydev->attached_dev);
943 phydev->adjust_link(phydev->attached_dev);
944 do_suspend = true;
945 }
946 break;
947 case PHY_RESUMING:
948 if (AUTONEG_ENABLE == phydev->autoneg) {
949 err = phy_aneg_done(phydev);
950 if (err < 0)
951 break;
952
953 /* err > 0 if AN is done.
954 * Otherwise, it's 0, and we're still waiting for AN
955 */
956 if (err > 0) {
957 err = phy_read_status(phydev);
958 if (err)
959 break;
960
961 if (phydev->link) {
962 phydev->state = PHY_RUNNING;
963 netif_carrier_on(phydev->attached_dev);
964 } else {
965 phydev->state = PHY_NOLINK;
966 }
967 phydev->adjust_link(phydev->attached_dev);
968 } else {
969 phydev->state = PHY_AN;
970 phydev->link_timeout = PHY_AN_TIMEOUT;
971 }
972 } else {
973 err = phy_read_status(phydev);
974 if (err)
975 break;
976
977 if (phydev->link) {
978 phydev->state = PHY_RUNNING;
979 netif_carrier_on(phydev->attached_dev);
980 } else {
981 phydev->state = PHY_NOLINK;
982 }
983 phydev->adjust_link(phydev->attached_dev);
984 }
985 break;
986 }
987
988 mutex_unlock(&phydev->lock);
989
990 if (needs_aneg)
991 err = phy_start_aneg(phydev);
992 else if (do_suspend)
993 phy_suspend(phydev);
994
995 if (err < 0)
996 phy_error(phydev);
997
998 dev_dbg(&phydev->dev, "PHY state change %s -> %s\n",
999 phy_state_to_str(old_state), phy_state_to_str(phydev->state));
1000
1001 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
1002 PHY_STATE_TIME * HZ);
1003 }
1004
phy_mac_interrupt(struct phy_device * phydev,int new_link)1005 void phy_mac_interrupt(struct phy_device *phydev, int new_link)
1006 {
1007 cancel_work_sync(&phydev->phy_queue);
1008 phydev->link = new_link;
1009 schedule_work(&phydev->phy_queue);
1010 }
1011 EXPORT_SYMBOL(phy_mac_interrupt);
1012
mmd_phy_indirect(struct mii_bus * bus,int prtad,int devad,int addr)1013 static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
1014 int addr)
1015 {
1016 /* Write the desired MMD Devad */
1017 bus->write(bus, addr, MII_MMD_CTRL, devad);
1018
1019 /* Write the desired MMD register address */
1020 bus->write(bus, addr, MII_MMD_DATA, prtad);
1021
1022 /* Select the Function : DATA with no post increment */
1023 bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
1024 }
1025
1026 /**
1027 * phy_read_mmd_indirect - reads data from the MMD registers
1028 * @phydev: The PHY device bus
1029 * @prtad: MMD Address
1030 * @devad: MMD DEVAD
1031 * @addr: PHY address on the MII bus
1032 *
1033 * Description: it reads data from the MMD registers (clause 22 to access to
1034 * clause 45) of the specified phy address.
1035 * To read these register we have:
1036 * 1) Write reg 13 // DEVAD
1037 * 2) Write reg 14 // MMD Address
1038 * 3) Write reg 13 // MMD Data Command for MMD DEVAD
1039 * 3) Read reg 14 // Read MMD data
1040 */
phy_read_mmd_indirect(struct phy_device * phydev,int prtad,int devad,int addr)1041 int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
1042 int devad, int addr)
1043 {
1044 struct phy_driver *phydrv = phydev->drv;
1045 int value = -1;
1046
1047 if (!phydrv->read_mmd_indirect) {
1048 struct mii_bus *bus = phydev->bus;
1049
1050 mutex_lock(&bus->mdio_lock);
1051 mmd_phy_indirect(bus, prtad, devad, addr);
1052
1053 /* Read the content of the MMD's selected register */
1054 value = bus->read(bus, addr, MII_MMD_DATA);
1055 mutex_unlock(&bus->mdio_lock);
1056 } else {
1057 value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
1058 }
1059 return value;
1060 }
1061 EXPORT_SYMBOL(phy_read_mmd_indirect);
1062
1063 /**
1064 * phy_write_mmd_indirect - writes data to the MMD registers
1065 * @phydev: The PHY device
1066 * @prtad: MMD Address
1067 * @devad: MMD DEVAD
1068 * @addr: PHY address on the MII bus
1069 * @data: data to write in the MMD register
1070 *
1071 * Description: Write data from the MMD registers of the specified
1072 * phy address.
1073 * To write these register we have:
1074 * 1) Write reg 13 // DEVAD
1075 * 2) Write reg 14 // MMD Address
1076 * 3) Write reg 13 // MMD Data Command for MMD DEVAD
1077 * 3) Write reg 14 // Write MMD data
1078 */
phy_write_mmd_indirect(struct phy_device * phydev,int prtad,int devad,int addr,u32 data)1079 void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
1080 int devad, int addr, u32 data)
1081 {
1082 struct phy_driver *phydrv = phydev->drv;
1083
1084 if (!phydrv->write_mmd_indirect) {
1085 struct mii_bus *bus = phydev->bus;
1086
1087 mutex_lock(&bus->mdio_lock);
1088 mmd_phy_indirect(bus, prtad, devad, addr);
1089
1090 /* Write the data into MMD's selected register */
1091 bus->write(bus, addr, MII_MMD_DATA, data);
1092 mutex_unlock(&bus->mdio_lock);
1093 } else {
1094 phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
1095 }
1096 }
1097 EXPORT_SYMBOL(phy_write_mmd_indirect);
1098
1099 /**
1100 * phy_init_eee - init and check the EEE feature
1101 * @phydev: target phy_device struct
1102 * @clk_stop_enable: PHY may stop the clock during LPI
1103 *
1104 * Description: it checks if the Energy-Efficient Ethernet (EEE)
1105 * is supported by looking at the MMD registers 3.20 and 7.60/61
1106 * and it programs the MMD register 3.0 setting the "Clock stop enable"
1107 * bit if required.
1108 */
phy_init_eee(struct phy_device * phydev,bool clk_stop_enable)1109 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1110 {
1111 /* According to 802.3az,the EEE is supported only in full duplex-mode.
1112 * Also EEE feature is active when core is operating with MII, GMII
1113 * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
1114 * should return an error if they do not support EEE.
1115 */
1116 if ((phydev->duplex == DUPLEX_FULL) &&
1117 ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
1118 (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
1119 phy_interface_is_rgmii(phydev) ||
1120 phy_is_internal(phydev))) {
1121 int eee_lp, eee_cap, eee_adv;
1122 u32 lp, cap, adv;
1123 int status;
1124
1125 /* Read phy status to properly get the right settings */
1126 status = phy_read_status(phydev);
1127 if (status)
1128 return status;
1129
1130 /* First check if the EEE ability is supported */
1131 eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
1132 MDIO_MMD_PCS, phydev->addr);
1133 if (eee_cap <= 0)
1134 goto eee_exit_err;
1135
1136 cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
1137 if (!cap)
1138 goto eee_exit_err;
1139
1140 /* Check which link settings negotiated and verify it in
1141 * the EEE advertising registers.
1142 */
1143 eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
1144 MDIO_MMD_AN, phydev->addr);
1145 if (eee_lp <= 0)
1146 goto eee_exit_err;
1147
1148 eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
1149 MDIO_MMD_AN, phydev->addr);
1150 if (eee_adv <= 0)
1151 goto eee_exit_err;
1152
1153 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1154 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1155 if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
1156 goto eee_exit_err;
1157
1158 if (clk_stop_enable) {
1159 /* Configure the PHY to stop receiving xMII
1160 * clock while it is signaling LPI.
1161 */
1162 int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1,
1163 MDIO_MMD_PCS,
1164 phydev->addr);
1165 if (val < 0)
1166 return val;
1167
1168 val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
1169 phy_write_mmd_indirect(phydev, MDIO_CTRL1,
1170 MDIO_MMD_PCS, phydev->addr,
1171 val);
1172 }
1173
1174 return 0; /* EEE supported */
1175 }
1176 eee_exit_err:
1177 return -EPROTONOSUPPORT;
1178 }
1179 EXPORT_SYMBOL(phy_init_eee);
1180
1181 /**
1182 * phy_get_eee_err - report the EEE wake error count
1183 * @phydev: target phy_device struct
1184 *
1185 * Description: it is to report the number of time where the PHY
1186 * failed to complete its normal wake sequence.
1187 */
phy_get_eee_err(struct phy_device * phydev)1188 int phy_get_eee_err(struct phy_device *phydev)
1189 {
1190 return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR,
1191 MDIO_MMD_PCS, phydev->addr);
1192 }
1193 EXPORT_SYMBOL(phy_get_eee_err);
1194
1195 /**
1196 * phy_ethtool_get_eee - get EEE supported and status
1197 * @phydev: target phy_device struct
1198 * @data: ethtool_eee data
1199 *
1200 * Description: it reportes the Supported/Advertisement/LP Advertisement
1201 * capabilities.
1202 */
phy_ethtool_get_eee(struct phy_device * phydev,struct ethtool_eee * data)1203 int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
1204 {
1205 int val;
1206
1207 /* Get Supported EEE */
1208 val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
1209 MDIO_MMD_PCS, phydev->addr);
1210 if (val < 0)
1211 return val;
1212 data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
1213
1214 /* Get advertisement EEE */
1215 val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
1216 MDIO_MMD_AN, phydev->addr);
1217 if (val < 0)
1218 return val;
1219 data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1220
1221 /* Get LP advertisement EEE */
1222 val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
1223 MDIO_MMD_AN, phydev->addr);
1224 if (val < 0)
1225 return val;
1226 data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1227
1228 return 0;
1229 }
1230 EXPORT_SYMBOL(phy_ethtool_get_eee);
1231
1232 /**
1233 * phy_ethtool_set_eee - set EEE supported and status
1234 * @phydev: target phy_device struct
1235 * @data: ethtool_eee data
1236 *
1237 * Description: it is to program the Advertisement EEE register.
1238 */
phy_ethtool_set_eee(struct phy_device * phydev,struct ethtool_eee * data)1239 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1240 {
1241 int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
1242
1243 phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
1244 phydev->addr, val);
1245
1246 return 0;
1247 }
1248 EXPORT_SYMBOL(phy_ethtool_set_eee);
1249
phy_ethtool_set_wol(struct phy_device * phydev,struct ethtool_wolinfo * wol)1250 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1251 {
1252 if (phydev->drv->set_wol)
1253 return phydev->drv->set_wol(phydev, wol);
1254
1255 return -EOPNOTSUPP;
1256 }
1257 EXPORT_SYMBOL(phy_ethtool_set_wol);
1258
phy_ethtool_get_wol(struct phy_device * phydev,struct ethtool_wolinfo * wol)1259 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1260 {
1261 if (phydev->drv->get_wol)
1262 phydev->drv->get_wol(phydev, wol);
1263 }
1264 EXPORT_SYMBOL(phy_ethtool_get_wol);
1265