This source file includes following definitions.
- sja1105_cgu_idiv_packing
- sja1105_cgu_idiv_config
- sja1105_cgu_mii_control_packing
- sja1105_cgu_mii_tx_clk_config
- sja1105_cgu_mii_rx_clk_config
- sja1105_cgu_mii_ext_tx_clk_config
- sja1105_cgu_mii_ext_rx_clk_config
- sja1105_mii_clocking_setup
- sja1105_cgu_pll_control_packing
- sja1105_cgu_rgmii_tx_clk_config
- sja1105_cfg_pad_mii_tx_packing
- sja1105_rgmii_cfg_pad_tx_config
- sja1105_cfg_pad_mii_id_packing
- sja1105_rgmii_delay
- sja1105pqrs_setup_rgmii_delay
- sja1105_rgmii_clocking_setup
- sja1105_cgu_rmii_ref_clk_config
- sja1105_cgu_rmii_ext_tx_clk_config
- sja1105_cgu_rmii_pll_config
- sja1105_rmii_clocking_setup
- sja1105_clocking_setup_port
- sja1105_clocking_setup
1
2
3
4
5 #include <linux/packing.h>
6 #include "sja1105.h"
7
8 #define SJA1105_SIZE_CGU_CMD 4
9
10 struct sja1105_cfg_pad_mii_tx {
11 u64 d32_os;
12 u64 d32_ipud;
13 u64 d10_os;
14 u64 d10_ipud;
15 u64 ctrl_os;
16 u64 ctrl_ipud;
17 u64 clk_os;
18 u64 clk_ih;
19 u64 clk_ipud;
20 };
21
22 struct sja1105_cfg_pad_mii_id {
23 u64 rxc_stable_ovr;
24 u64 rxc_delay;
25 u64 rxc_bypass;
26 u64 rxc_pd;
27 u64 txc_stable_ovr;
28 u64 txc_delay;
29 u64 txc_bypass;
30 u64 txc_pd;
31 };
32
33
34
35
36
37 struct sja1105_cgu_idiv {
38 u64 clksrc;
39 u64 autoblock;
40 u64 idiv;
41 u64 pd;
42 };
43
44
45
46
47
48
49 struct sja1105_cgu_pll_ctrl {
50 u64 pllclksrc;
51 u64 msel;
52 u64 autoblock;
53 u64 psel;
54 u64 direct;
55 u64 fbsel;
56 u64 bypass;
57 u64 pd;
58 };
59
60 enum {
61 CLKSRC_MII0_TX_CLK = 0x00,
62 CLKSRC_MII0_RX_CLK = 0x01,
63 CLKSRC_MII1_TX_CLK = 0x02,
64 CLKSRC_MII1_RX_CLK = 0x03,
65 CLKSRC_MII2_TX_CLK = 0x04,
66 CLKSRC_MII2_RX_CLK = 0x05,
67 CLKSRC_MII3_TX_CLK = 0x06,
68 CLKSRC_MII3_RX_CLK = 0x07,
69 CLKSRC_MII4_TX_CLK = 0x08,
70 CLKSRC_MII4_RX_CLK = 0x09,
71 CLKSRC_PLL0 = 0x0B,
72 CLKSRC_PLL1 = 0x0E,
73 CLKSRC_IDIV0 = 0x11,
74 CLKSRC_IDIV1 = 0x12,
75 CLKSRC_IDIV2 = 0x13,
76 CLKSRC_IDIV3 = 0x14,
77 CLKSRC_IDIV4 = 0x15,
78 };
79
80
81
82
83
84 struct sja1105_cgu_mii_ctrl {
85 u64 clksrc;
86 u64 autoblock;
87 u64 pd;
88 };
89
90 static void sja1105_cgu_idiv_packing(void *buf, struct sja1105_cgu_idiv *idiv,
91 enum packing_op op)
92 {
93 const int size = 4;
94
95 sja1105_packing(buf, &idiv->clksrc, 28, 24, size, op);
96 sja1105_packing(buf, &idiv->autoblock, 11, 11, size, op);
97 sja1105_packing(buf, &idiv->idiv, 5, 2, size, op);
98 sja1105_packing(buf, &idiv->pd, 0, 0, size, op);
99 }
100
101 static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
102 bool enabled, int factor)
103 {
104 const struct sja1105_regs *regs = priv->info->regs;
105 struct device *dev = priv->ds->dev;
106 struct sja1105_cgu_idiv idiv;
107 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
108
109 if (enabled && factor != 1 && factor != 10) {
110 dev_err(dev, "idiv factor must be 1 or 10\n");
111 return -ERANGE;
112 }
113
114
115 idiv.clksrc = 0x0A;
116 idiv.autoblock = 1;
117 idiv.idiv = factor - 1;
118 idiv.pd = enabled ? 0 : 1;
119 sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK);
120
121 return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
122 regs->cgu_idiv[port], packed_buf,
123 SJA1105_SIZE_CGU_CMD);
124 }
125
126 static void
127 sja1105_cgu_mii_control_packing(void *buf, struct sja1105_cgu_mii_ctrl *cmd,
128 enum packing_op op)
129 {
130 const int size = 4;
131
132 sja1105_packing(buf, &cmd->clksrc, 28, 24, size, op);
133 sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
134 sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
135 }
136
137 static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
138 int port, sja1105_mii_role_t role)
139 {
140 const struct sja1105_regs *regs = priv->info->regs;
141 struct sja1105_cgu_mii_ctrl mii_tx_clk;
142 const int mac_clk_sources[] = {
143 CLKSRC_MII0_TX_CLK,
144 CLKSRC_MII1_TX_CLK,
145 CLKSRC_MII2_TX_CLK,
146 CLKSRC_MII3_TX_CLK,
147 CLKSRC_MII4_TX_CLK,
148 };
149 const int phy_clk_sources[] = {
150 CLKSRC_IDIV0,
151 CLKSRC_IDIV1,
152 CLKSRC_IDIV2,
153 CLKSRC_IDIV3,
154 CLKSRC_IDIV4,
155 };
156 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
157 int clksrc;
158
159 if (role == XMII_MAC)
160 clksrc = mac_clk_sources[port];
161 else
162 clksrc = phy_clk_sources[port];
163
164
165 mii_tx_clk.clksrc = clksrc;
166 mii_tx_clk.autoblock = 1;
167 mii_tx_clk.pd = 0;
168 sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK);
169
170 return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
171 regs->mii_tx_clk[port], packed_buf,
172 SJA1105_SIZE_CGU_CMD);
173 }
174
175 static int
176 sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
177 {
178 const struct sja1105_regs *regs = priv->info->regs;
179 struct sja1105_cgu_mii_ctrl mii_rx_clk;
180 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
181 const int clk_sources[] = {
182 CLKSRC_MII0_RX_CLK,
183 CLKSRC_MII1_RX_CLK,
184 CLKSRC_MII2_RX_CLK,
185 CLKSRC_MII3_RX_CLK,
186 CLKSRC_MII4_RX_CLK,
187 };
188
189
190 mii_rx_clk.clksrc = clk_sources[port];
191 mii_rx_clk.autoblock = 1;
192 mii_rx_clk.pd = 0;
193 sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK);
194
195 return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
196 regs->mii_rx_clk[port], packed_buf,
197 SJA1105_SIZE_CGU_CMD);
198 }
199
200 static int
201 sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
202 {
203 const struct sja1105_regs *regs = priv->info->regs;
204 struct sja1105_cgu_mii_ctrl mii_ext_tx_clk;
205 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
206 const int clk_sources[] = {
207 CLKSRC_IDIV0,
208 CLKSRC_IDIV1,
209 CLKSRC_IDIV2,
210 CLKSRC_IDIV3,
211 CLKSRC_IDIV4,
212 };
213
214
215 mii_ext_tx_clk.clksrc = clk_sources[port];
216 mii_ext_tx_clk.autoblock = 1;
217 mii_ext_tx_clk.pd = 0;
218 sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK);
219
220 return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
221 regs->mii_ext_tx_clk[port],
222 packed_buf, SJA1105_SIZE_CGU_CMD);
223 }
224
225 static int
226 sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
227 {
228 const struct sja1105_regs *regs = priv->info->regs;
229 struct sja1105_cgu_mii_ctrl mii_ext_rx_clk;
230 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
231 const int clk_sources[] = {
232 CLKSRC_IDIV0,
233 CLKSRC_IDIV1,
234 CLKSRC_IDIV2,
235 CLKSRC_IDIV3,
236 CLKSRC_IDIV4,
237 };
238
239
240 mii_ext_rx_clk.clksrc = clk_sources[port];
241 mii_ext_rx_clk.autoblock = 1;
242 mii_ext_rx_clk.pd = 0;
243 sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK);
244
245 return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
246 regs->mii_ext_rx_clk[port],
247 packed_buf, SJA1105_SIZE_CGU_CMD);
248 }
249
250 static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port,
251 sja1105_mii_role_t role)
252 {
253 struct device *dev = priv->ds->dev;
254 int rc;
255
256 dev_dbg(dev, "Configuring MII-%s clocking\n",
257 (role == XMII_MAC) ? "MAC" : "PHY");
258
259
260
261 rc = sja1105_cgu_idiv_config(priv, port, (role == XMII_PHY), 1);
262 if (rc < 0)
263 return rc;
264
265
266
267
268
269 rc = sja1105_cgu_mii_tx_clk_config(priv, port, role);
270 if (rc < 0)
271 return rc;
272
273
274
275
276 rc = sja1105_cgu_mii_rx_clk_config(priv, port);
277 if (rc < 0)
278 return rc;
279
280 if (role == XMII_PHY) {
281
282
283
284
285
286 rc = sja1105_cgu_mii_ext_tx_clk_config(priv, port);
287 if (rc < 0)
288 return rc;
289
290
291
292
293 rc = sja1105_cgu_mii_ext_rx_clk_config(priv, port);
294 if (rc < 0)
295 return rc;
296 }
297 return 0;
298 }
299
300 static void
301 sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd,
302 enum packing_op op)
303 {
304 const int size = 4;
305
306 sja1105_packing(buf, &cmd->pllclksrc, 28, 24, size, op);
307 sja1105_packing(buf, &cmd->msel, 23, 16, size, op);
308 sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
309 sja1105_packing(buf, &cmd->psel, 9, 8, size, op);
310 sja1105_packing(buf, &cmd->direct, 7, 7, size, op);
311 sja1105_packing(buf, &cmd->fbsel, 6, 6, size, op);
312 sja1105_packing(buf, &cmd->bypass, 1, 1, size, op);
313 sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
314 }
315
316 static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
317 int port, sja1105_speed_t speed)
318 {
319 const struct sja1105_regs *regs = priv->info->regs;
320 struct sja1105_cgu_mii_ctrl txc;
321 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
322 int clksrc;
323
324 if (speed == SJA1105_SPEED_1000MBPS) {
325 clksrc = CLKSRC_PLL0;
326 } else {
327 int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2,
328 CLKSRC_IDIV3, CLKSRC_IDIV4};
329 clksrc = clk_sources[port];
330 }
331
332
333 txc.clksrc = clksrc;
334
335 txc.autoblock = 1;
336
337 txc.pd = 0;
338 sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK);
339
340 return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
341 regs->rgmii_tx_clk[port],
342 packed_buf, SJA1105_SIZE_CGU_CMD);
343 }
344
345
346 static void
347 sja1105_cfg_pad_mii_tx_packing(void *buf, struct sja1105_cfg_pad_mii_tx *cmd,
348 enum packing_op op)
349 {
350 const int size = 4;
351
352 sja1105_packing(buf, &cmd->d32_os, 28, 27, size, op);
353 sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op);
354 sja1105_packing(buf, &cmd->d10_os, 20, 19, size, op);
355 sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op);
356 sja1105_packing(buf, &cmd->ctrl_os, 12, 11, size, op);
357 sja1105_packing(buf, &cmd->ctrl_ipud, 9, 8, size, op);
358 sja1105_packing(buf, &cmd->clk_os, 4, 3, size, op);
359 sja1105_packing(buf, &cmd->clk_ih, 2, 2, size, op);
360 sja1105_packing(buf, &cmd->clk_ipud, 1, 0, size, op);
361 }
362
363 static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
364 int port)
365 {
366 const struct sja1105_regs *regs = priv->info->regs;
367 struct sja1105_cfg_pad_mii_tx pad_mii_tx;
368 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
369
370
371 pad_mii_tx.d32_os = 3;
372
373 pad_mii_tx.d10_os = 3;
374
375 pad_mii_tx.d32_ipud = 2;
376
377 pad_mii_tx.d10_ipud = 2;
378
379 pad_mii_tx.ctrl_os = 3;
380 pad_mii_tx.ctrl_ipud = 2;
381 pad_mii_tx.clk_os = 3;
382 pad_mii_tx.clk_ih = 0;
383 pad_mii_tx.clk_ipud = 2;
384 sja1105_cfg_pad_mii_tx_packing(packed_buf, &pad_mii_tx, PACK);
385
386 return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
387 regs->pad_mii_tx[port],
388 packed_buf, SJA1105_SIZE_CGU_CMD);
389 }
390
391 static void
392 sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
393 enum packing_op op)
394 {
395 const int size = SJA1105_SIZE_CGU_CMD;
396
397 sja1105_packing(buf, &cmd->rxc_stable_ovr, 15, 15, size, op);
398 sja1105_packing(buf, &cmd->rxc_delay, 14, 10, size, op);
399 sja1105_packing(buf, &cmd->rxc_bypass, 9, 9, size, op);
400 sja1105_packing(buf, &cmd->rxc_pd, 8, 8, size, op);
401 sja1105_packing(buf, &cmd->txc_stable_ovr, 7, 7, size, op);
402 sja1105_packing(buf, &cmd->txc_delay, 6, 2, size, op);
403 sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op);
404 sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
405 }
406
407
408 static inline u64 sja1105_rgmii_delay(u64 phase)
409 {
410
411
412
413
414 phase *= 10;
415 return (phase - 738) / 9;
416 }
417
418
419
420
421
422
423
424
425 int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
426 {
427 const struct sja1105_private *priv = ctx;
428 const struct sja1105_regs *regs = priv->info->regs;
429 struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
430 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
431 int rc;
432
433 if (priv->rgmii_rx_delay[port])
434 pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
435 if (priv->rgmii_tx_delay[port])
436 pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
437
438
439 pad_mii_id.rxc_bypass = 1;
440 pad_mii_id.rxc_pd = 1;
441 pad_mii_id.txc_bypass = 1;
442 pad_mii_id.txc_pd = 1;
443 sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
444
445 rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE,
446 regs->pad_mii_id[port],
447 packed_buf, SJA1105_SIZE_CGU_CMD);
448 if (rc < 0)
449 return rc;
450
451
452 if (priv->rgmii_rx_delay[port]) {
453 pad_mii_id.rxc_bypass = 0;
454 pad_mii_id.rxc_pd = 0;
455 }
456 if (priv->rgmii_tx_delay[port]) {
457 pad_mii_id.txc_bypass = 0;
458 pad_mii_id.txc_pd = 0;
459 }
460 sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
461
462 return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
463 regs->pad_mii_id[port],
464 packed_buf, SJA1105_SIZE_CGU_CMD);
465 }
466
467 static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
468 sja1105_mii_role_t role)
469 {
470 struct device *dev = priv->ds->dev;
471 struct sja1105_mac_config_entry *mac;
472 sja1105_speed_t speed;
473 int rc;
474
475 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
476 speed = mac[port].speed;
477
478 dev_dbg(dev, "Configuring port %d RGMII at speed %dMbps\n",
479 port, speed);
480
481 switch (speed) {
482 case SJA1105_SPEED_1000MBPS:
483
484 rc = sja1105_cgu_idiv_config(priv, port, false, 1);
485 break;
486 case SJA1105_SPEED_100MBPS:
487
488 rc = sja1105_cgu_idiv_config(priv, port, true, 1);
489 break;
490 case SJA1105_SPEED_10MBPS:
491
492 rc = sja1105_cgu_idiv_config(priv, port, true, 10);
493 break;
494 case SJA1105_SPEED_AUTO:
495
496
497
498 dev_dbg(dev, "Speed not available, skipping CGU config\n");
499 return 0;
500 default:
501 rc = -EINVAL;
502 }
503
504 if (rc < 0) {
505 dev_err(dev, "Failed to configure idiv\n");
506 return rc;
507 }
508 rc = sja1105_cgu_rgmii_tx_clk_config(priv, port, speed);
509 if (rc < 0) {
510 dev_err(dev, "Failed to configure RGMII Tx clock\n");
511 return rc;
512 }
513 rc = sja1105_rgmii_cfg_pad_tx_config(priv, port);
514 if (rc < 0) {
515 dev_err(dev, "Failed to configure Tx pad registers\n");
516 return rc;
517 }
518 if (!priv->info->setup_rgmii_delay)
519 return 0;
520
521
522
523
524 if (role == XMII_MAC)
525 return 0;
526
527 return priv->info->setup_rgmii_delay(priv, port);
528 }
529
530 static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
531 int port)
532 {
533 const struct sja1105_regs *regs = priv->info->regs;
534 struct sja1105_cgu_mii_ctrl ref_clk;
535 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
536 const int clk_sources[] = {
537 CLKSRC_MII0_TX_CLK,
538 CLKSRC_MII1_TX_CLK,
539 CLKSRC_MII2_TX_CLK,
540 CLKSRC_MII3_TX_CLK,
541 CLKSRC_MII4_TX_CLK,
542 };
543
544
545 ref_clk.clksrc = clk_sources[port];
546 ref_clk.autoblock = 1;
547 ref_clk.pd = 0;
548 sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK);
549
550 return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
551 regs->rmii_ref_clk[port],
552 packed_buf, SJA1105_SIZE_CGU_CMD);
553 }
554
555 static int
556 sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
557 {
558 const struct sja1105_regs *regs = priv->info->regs;
559 struct sja1105_cgu_mii_ctrl ext_tx_clk;
560 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
561
562
563 ext_tx_clk.clksrc = CLKSRC_PLL1;
564 ext_tx_clk.autoblock = 1;
565 ext_tx_clk.pd = 0;
566 sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK);
567
568 return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
569 regs->rmii_ext_tx_clk[port],
570 packed_buf, SJA1105_SIZE_CGU_CMD);
571 }
572
573 static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
574 {
575 const struct sja1105_regs *regs = priv->info->regs;
576 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
577 struct sja1105_cgu_pll_ctrl pll = {0};
578 struct device *dev = priv->ds->dev;
579 int rc;
580
581
582
583
584
585
586
587
588 pll.pllclksrc = 0xA;
589 pll.msel = 0x1;
590 pll.autoblock = 0x1;
591 pll.psel = 0x1;
592 pll.direct = 0x0;
593 pll.fbsel = 0x1;
594 pll.bypass = 0x0;
595 pll.pd = 0x1;
596
597 sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
598 rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1,
599 packed_buf, SJA1105_SIZE_CGU_CMD);
600 if (rc < 0) {
601 dev_err(dev, "failed to configure PLL1 for 50MHz\n");
602 return rc;
603 }
604
605
606 pll.pd = 0x0;
607
608 sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
609 rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1,
610 packed_buf, SJA1105_SIZE_CGU_CMD);
611 if (rc < 0) {
612 dev_err(dev, "failed to enable PLL1\n");
613 return rc;
614 }
615 return rc;
616 }
617
618 static int sja1105_rmii_clocking_setup(struct sja1105_private *priv, int port,
619 sja1105_mii_role_t role)
620 {
621 struct device *dev = priv->ds->dev;
622 int rc;
623
624 dev_dbg(dev, "Configuring RMII-%s clocking\n",
625 (role == XMII_MAC) ? "MAC" : "PHY");
626
627 if (role == XMII_MAC) {
628
629 rc = sja1105_cgu_rmii_pll_config(priv);
630 if (rc < 0)
631 return rc;
632 }
633
634 rc = sja1105_cgu_idiv_config(priv, port, false, 1);
635 if (rc < 0)
636 return rc;
637
638 rc = sja1105_cgu_rmii_ref_clk_config(priv, port);
639 if (rc < 0)
640 return rc;
641 if (role == XMII_MAC) {
642 rc = sja1105_cgu_rmii_ext_tx_clk_config(priv, port);
643 if (rc < 0)
644 return rc;
645 }
646 return 0;
647 }
648
649 int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
650 {
651 struct sja1105_xmii_params_entry *mii;
652 struct device *dev = priv->ds->dev;
653 sja1105_phy_interface_t phy_mode;
654 sja1105_mii_role_t role;
655 int rc;
656
657 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
658
659
660 phy_mode = mii->xmii_mode[port];
661
662 role = mii->phy_mac[port];
663
664 switch (phy_mode) {
665 case XMII_MODE_MII:
666 rc = sja1105_mii_clocking_setup(priv, port, role);
667 break;
668 case XMII_MODE_RMII:
669 rc = sja1105_rmii_clocking_setup(priv, port, role);
670 break;
671 case XMII_MODE_RGMII:
672 rc = sja1105_rgmii_clocking_setup(priv, port, role);
673 break;
674 default:
675 dev_err(dev, "Invalid interface mode specified: %d\n",
676 phy_mode);
677 return -EINVAL;
678 }
679 if (rc)
680 dev_err(dev, "Clocking setup for port %d failed: %d\n",
681 port, rc);
682 return rc;
683 }
684
685 int sja1105_clocking_setup(struct sja1105_private *priv)
686 {
687 int port, rc;
688
689 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
690 rc = sja1105_clocking_setup_port(priv, port);
691 if (rc < 0)
692 return rc;
693 }
694 return 0;
695 }