This source file includes following definitions.
- igc_check_reset_block
- igc_get_phy_id
- igc_phy_has_link
- igc_power_up_phy_copper
- igc_power_down_phy_copper
- igc_check_downshift
- igc_phy_hw_reset
- igc_phy_setup_autoneg
- igc_wait_autoneg
- igc_copper_link_autoneg
- igc_setup_copper_link
- igc_read_phy_reg_mdic
- igc_write_phy_reg_mdic
- __igc_access_xmdio_reg
- igc_read_xmdio_reg
- igc_write_xmdio_reg
- igc_write_phy_reg_gpy
- igc_read_phy_reg_gpy
1
2
3
4 #include "igc_phy.h"
5
6
7
8
9
10
11
12
13
14 s32 igc_check_reset_block(struct igc_hw *hw)
15 {
16 u32 manc;
17
18 manc = rd32(IGC_MANC);
19
20 return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ?
21 IGC_ERR_BLK_PHY_RESET : 0;
22 }
23
24
25
26
27
28
29
30
31 s32 igc_get_phy_id(struct igc_hw *hw)
32 {
33 struct igc_phy_info *phy = &hw->phy;
34 s32 ret_val = 0;
35 u16 phy_id;
36
37 ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
38 if (ret_val)
39 goto out;
40
41 phy->id = (u32)(phy_id << 16);
42 usleep_range(200, 500);
43 ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
44 if (ret_val)
45 goto out;
46
47 phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
48 phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
49
50 out:
51 return ret_val;
52 }
53
54
55
56
57
58
59
60
61
62
63 s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
64 u32 usec_interval, bool *success)
65 {
66 u16 i, phy_status;
67 s32 ret_val = 0;
68
69 for (i = 0; i < iterations; i++) {
70
71
72
73
74 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
75 if (ret_val && usec_interval > 0) {
76
77
78
79
80 if (usec_interval >= 1000)
81 mdelay(usec_interval / 1000);
82 else
83 udelay(usec_interval);
84 }
85 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
86 if (ret_val)
87 break;
88 if (phy_status & MII_SR_LINK_STATUS)
89 break;
90 if (usec_interval >= 1000)
91 mdelay(usec_interval / 1000);
92 else
93 udelay(usec_interval);
94 }
95
96 *success = (i < iterations) ? true : false;
97
98 return ret_val;
99 }
100
101
102
103
104
105
106
107
108 void igc_power_up_phy_copper(struct igc_hw *hw)
109 {
110 u16 mii_reg = 0;
111
112
113 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
114 mii_reg &= ~MII_CR_POWER_DOWN;
115 hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
116 }
117
118
119
120
121
122
123
124
125 void igc_power_down_phy_copper(struct igc_hw *hw)
126 {
127 u16 mii_reg = 0;
128
129
130 hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
131 mii_reg |= MII_CR_POWER_DOWN;
132
133
134
135
136
137 usleep_range(1000, 2000);
138 }
139
140
141
142
143
144
145
146
147
148 s32 igc_check_downshift(struct igc_hw *hw)
149 {
150 struct igc_phy_info *phy = &hw->phy;
151 s32 ret_val;
152
153 switch (phy->type) {
154 case igc_phy_i225:
155 default:
156
157 phy->speed_downgraded = false;
158 ret_val = 0;
159 }
160
161 return ret_val;
162 }
163
164
165
166
167
168
169
170
171
172
173 s32 igc_phy_hw_reset(struct igc_hw *hw)
174 {
175 struct igc_phy_info *phy = &hw->phy;
176 s32 ret_val;
177 u32 ctrl;
178
179 ret_val = igc_check_reset_block(hw);
180 if (ret_val) {
181 ret_val = 0;
182 goto out;
183 }
184
185 ret_val = phy->ops.acquire(hw);
186 if (ret_val)
187 goto out;
188
189 ctrl = rd32(IGC_CTRL);
190 wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST);
191 wrfl();
192
193 udelay(phy->reset_delay_us);
194
195 wr32(IGC_CTRL, ctrl);
196 wrfl();
197
198 usleep_range(1500, 2000);
199
200 phy->ops.release(hw);
201
202 out:
203 return ret_val;
204 }
205
206
207
208
209
210
211
212
213
214
215 static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
216 {
217 struct igc_phy_info *phy = &hw->phy;
218 u16 aneg_multigbt_an_ctrl = 0;
219 u16 mii_1000t_ctrl_reg = 0;
220 u16 mii_autoneg_adv_reg;
221 s32 ret_val;
222
223 phy->autoneg_advertised &= phy->autoneg_mask;
224
225
226 ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
227 if (ret_val)
228 return ret_val;
229
230 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
231
232 ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
233 &mii_1000t_ctrl_reg);
234 if (ret_val)
235 return ret_val;
236 }
237
238 if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
239 hw->phy.id == I225_I_PHY_ID) {
240
241 ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
242 MMD_DEVADDR_SHIFT) |
243 ANEG_MULTIGBT_AN_CTRL,
244 &aneg_multigbt_an_ctrl);
245
246 if (ret_val)
247 return ret_val;
248 }
249
250
251
252
253
254
255
256
257
258
259
260
261 mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
262 NWAY_AR_100TX_HD_CAPS |
263 NWAY_AR_10T_FD_CAPS |
264 NWAY_AR_10T_HD_CAPS);
265 mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
266
267 hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
268
269
270 if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
271 hw_dbg("Advertise 10mb Half duplex\n");
272 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
273 }
274
275
276 if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
277 hw_dbg("Advertise 10mb Full duplex\n");
278 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
279 }
280
281
282 if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
283 hw_dbg("Advertise 100mb Half duplex\n");
284 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
285 }
286
287
288 if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
289 hw_dbg("Advertise 100mb Full duplex\n");
290 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
291 }
292
293
294 if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
295 hw_dbg("Advertise 1000mb Half duplex request denied!\n");
296
297
298 if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
299 hw_dbg("Advertise 1000mb Full duplex\n");
300 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
301 }
302
303
304 if (phy->autoneg_advertised & ADVERTISE_2500_HALF)
305 hw_dbg("Advertise 2500mb Half duplex request denied!\n");
306
307
308 if (phy->autoneg_advertised & ADVERTISE_2500_FULL) {
309 hw_dbg("Advertise 2500mb Full duplex\n");
310 aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS;
311 } else {
312 aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS;
313 }
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332 switch (hw->fc.current_mode) {
333 case igc_fc_none:
334
335
336
337 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
338 break;
339 case igc_fc_rx_pause:
340
341
342
343
344
345
346
347
348
349 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
350 break;
351 case igc_fc_tx_pause:
352
353
354
355 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
356 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
357 break;
358 case igc_fc_full:
359
360
361
362 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
363 break;
364 default:
365 hw_dbg("Flow control param set incorrectly\n");
366 return -IGC_ERR_CONFIG;
367 }
368
369 ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
370 if (ret_val)
371 return ret_val;
372
373 hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
374
375 if (phy->autoneg_mask & ADVERTISE_1000_FULL)
376 ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
377 mii_1000t_ctrl_reg);
378
379 if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
380 hw->phy.id == I225_I_PHY_ID)
381 ret_val = phy->ops.write_reg(hw,
382 (STANDARD_AN_REG_MASK <<
383 MMD_DEVADDR_SHIFT) |
384 ANEG_MULTIGBT_AN_CTRL,
385 aneg_multigbt_an_ctrl);
386
387 return ret_val;
388 }
389
390
391
392
393
394
395
396
397 static s32 igc_wait_autoneg(struct igc_hw *hw)
398 {
399 u16 i, phy_status;
400 s32 ret_val = 0;
401
402
403 for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
404 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
405 if (ret_val)
406 break;
407 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
408 if (ret_val)
409 break;
410 if (phy_status & MII_SR_AUTONEG_COMPLETE)
411 break;
412 msleep(100);
413 }
414
415
416
417
418 return ret_val;
419 }
420
421
422
423
424
425
426
427
428
429
430 static s32 igc_copper_link_autoneg(struct igc_hw *hw)
431 {
432 struct igc_phy_info *phy = &hw->phy;
433 u16 phy_ctrl;
434 s32 ret_val;
435
436
437
438
439 phy->autoneg_advertised &= phy->autoneg_mask;
440
441
442
443
444 if (phy->autoneg_advertised == 0)
445 phy->autoneg_advertised = phy->autoneg_mask;
446
447 hw_dbg("Reconfiguring auto-neg advertisement params\n");
448 ret_val = igc_phy_setup_autoneg(hw);
449 if (ret_val) {
450 hw_dbg("Error Setting up Auto-Negotiation\n");
451 goto out;
452 }
453 hw_dbg("Restarting Auto-Neg\n");
454
455
456
457
458 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
459 if (ret_val)
460 goto out;
461
462 phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
463 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
464 if (ret_val)
465 goto out;
466
467
468
469
470 if (phy->autoneg_wait_to_complete) {
471 ret_val = igc_wait_autoneg(hw);
472 if (ret_val) {
473 hw_dbg("Error while waiting for autoneg to complete\n");
474 goto out;
475 }
476 }
477
478 hw->mac.get_link_status = true;
479
480 out:
481 return ret_val;
482 }
483
484
485
486
487
488
489
490
491
492
493 s32 igc_setup_copper_link(struct igc_hw *hw)
494 {
495 s32 ret_val = 0;
496 bool link;
497
498 if (hw->mac.autoneg) {
499
500
501
502 ret_val = igc_copper_link_autoneg(hw);
503 if (ret_val)
504 goto out;
505 } else {
506
507
508
509 hw_dbg("Forcing Speed and Duplex\n");
510 ret_val = hw->phy.ops.force_speed_duplex(hw);
511 if (ret_val) {
512 hw_dbg("Error Forcing Speed and Duplex\n");
513 goto out;
514 }
515 }
516
517
518
519
520 ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
521 if (ret_val)
522 goto out;
523
524 if (link) {
525 hw_dbg("Valid link established!!!\n");
526 igc_config_collision_dist(hw);
527 ret_val = igc_config_fc_after_link_up(hw);
528 } else {
529 hw_dbg("Unable to establish link!!!\n");
530 }
531
532 out:
533 return ret_val;
534 }
535
536
537
538
539
540
541
542
543
544
545 static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
546 {
547 struct igc_phy_info *phy = &hw->phy;
548 u32 i, mdic = 0;
549 s32 ret_val = 0;
550
551 if (offset > MAX_PHY_REG_ADDRESS) {
552 hw_dbg("PHY Address %d is out of range\n", offset);
553 ret_val = -IGC_ERR_PARAM;
554 goto out;
555 }
556
557
558
559
560
561 mdic = ((offset << IGC_MDIC_REG_SHIFT) |
562 (phy->addr << IGC_MDIC_PHY_SHIFT) |
563 (IGC_MDIC_OP_READ));
564
565 wr32(IGC_MDIC, mdic);
566
567
568
569
570
571 for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
572 usleep_range(500, 1000);
573 mdic = rd32(IGC_MDIC);
574 if (mdic & IGC_MDIC_READY)
575 break;
576 }
577 if (!(mdic & IGC_MDIC_READY)) {
578 hw_dbg("MDI Read did not complete\n");
579 ret_val = -IGC_ERR_PHY;
580 goto out;
581 }
582 if (mdic & IGC_MDIC_ERROR) {
583 hw_dbg("MDI Error\n");
584 ret_val = -IGC_ERR_PHY;
585 goto out;
586 }
587 *data = (u16)mdic;
588
589 out:
590 return ret_val;
591 }
592
593
594
595
596
597
598
599
600
601 static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
602 {
603 struct igc_phy_info *phy = &hw->phy;
604 u32 i, mdic = 0;
605 s32 ret_val = 0;
606
607 if (offset > MAX_PHY_REG_ADDRESS) {
608 hw_dbg("PHY Address %d is out of range\n", offset);
609 ret_val = -IGC_ERR_PARAM;
610 goto out;
611 }
612
613
614
615
616
617 mdic = (((u32)data) |
618 (offset << IGC_MDIC_REG_SHIFT) |
619 (phy->addr << IGC_MDIC_PHY_SHIFT) |
620 (IGC_MDIC_OP_WRITE));
621
622 wr32(IGC_MDIC, mdic);
623
624
625
626
627
628 for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
629 usleep_range(500, 1000);
630 mdic = rd32(IGC_MDIC);
631 if (mdic & IGC_MDIC_READY)
632 break;
633 }
634 if (!(mdic & IGC_MDIC_READY)) {
635 hw_dbg("MDI Write did not complete\n");
636 ret_val = -IGC_ERR_PHY;
637 goto out;
638 }
639 if (mdic & IGC_MDIC_ERROR) {
640 hw_dbg("MDI Error\n");
641 ret_val = -IGC_ERR_PHY;
642 goto out;
643 }
644
645 out:
646 return ret_val;
647 }
648
649
650
651
652
653
654
655
656
657 static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address,
658 u8 dev_addr, u16 *data, bool read)
659 {
660 s32 ret_val;
661
662 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr);
663 if (ret_val)
664 return ret_val;
665
666 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address);
667 if (ret_val)
668 return ret_val;
669
670 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA |
671 dev_addr);
672 if (ret_val)
673 return ret_val;
674
675 if (read)
676 ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data);
677 else
678 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data);
679 if (ret_val)
680 return ret_val;
681
682
683 ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0);
684 if (ret_val)
685 return ret_val;
686
687 return ret_val;
688 }
689
690
691
692
693
694
695
696
697 static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr,
698 u8 dev_addr, u16 *data)
699 {
700 return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true);
701 }
702
703
704
705
706
707
708
709
710 static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr,
711 u8 dev_addr, u16 data)
712 {
713 return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false);
714 }
715
716
717
718
719
720
721
722
723
724
725 s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
726 {
727 u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
728 s32 ret_val;
729
730 offset = offset & GPY_REG_MASK;
731
732 if (!dev_addr) {
733 ret_val = hw->phy.ops.acquire(hw);
734 if (ret_val)
735 return ret_val;
736 ret_val = igc_write_phy_reg_mdic(hw, offset, data);
737 if (ret_val)
738 return ret_val;
739 hw->phy.ops.release(hw);
740 } else {
741 ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
742 data);
743 }
744
745 return ret_val;
746 }
747
748
749
750
751
752
753
754
755
756
757
758 s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
759 {
760 u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
761 s32 ret_val;
762
763 offset = offset & GPY_REG_MASK;
764
765 if (!dev_addr) {
766 ret_val = hw->phy.ops.acquire(hw);
767 if (ret_val)
768 return ret_val;
769 ret_val = igc_read_phy_reg_mdic(hw, offset, data);
770 if (ret_val)
771 return ret_val;
772 hw->phy.ops.release(hw);
773 } else {
774 ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
775 data);
776 }
777
778 return ret_val;
779 }