This source file includes following definitions.
- set_dflts
- init
- set_mac_address
- set_bucket
- check_init_parameters
- get_exception_flag
- is_init_done
- dtsec_get_max_frame_length
- dtsec_isr
- dtsec_1588_isr
- free_init_resources
- dtsec_cfg_max_frame_len
- dtsec_cfg_pad_and_crc
- graceful_start
- graceful_stop
- dtsec_enable
- dtsec_disable
- dtsec_set_tx_pause_frames
- dtsec_accept_rx_pause_frames
- dtsec_modify_mac_address
- dtsec_add_hash_mac_address
- dtsec_set_allmulti
- dtsec_set_tstamp
- dtsec_del_hash_mac_address
- dtsec_set_promiscuous
- dtsec_adjust_link
- dtsec_restart_autoneg
- dtsec_get_version
- dtsec_set_exception
- dtsec_init
- dtsec_free
- dtsec_config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include "fman_dtsec.h"
36 #include "fman.h"
37
38 #include <linux/slab.h>
39 #include <linux/bitrev.h>
40 #include <linux/io.h>
41 #include <linux/delay.h>
42 #include <linux/phy.h>
43 #include <linux/crc32.h>
44 #include <linux/of_mdio.h>
45 #include <linux/mii.h>
46
47
48 #define MII_TBICON 0x11
49
50
51 #define TBICON_SOFT_RESET 0x8000
52 #define TBICON_DISABLE_RX_DIS 0x2000
53 #define TBICON_DISABLE_TX_DIS 0x1000
54 #define TBICON_AN_SENSE 0x0100
55 #define TBICON_CLK_SELECT 0x0020
56 #define TBICON_MI_MODE 0x0010
57
58 #define TBIANA_SGMII 0x4001
59 #define TBIANA_1000X 0x01a0
60
61
62 #define DTSEC_IMASK_BREN 0x80000000
63 #define DTSEC_IMASK_RXCEN 0x40000000
64 #define DTSEC_IMASK_MSROEN 0x04000000
65 #define DTSEC_IMASK_GTSCEN 0x02000000
66 #define DTSEC_IMASK_BTEN 0x01000000
67 #define DTSEC_IMASK_TXCEN 0x00800000
68 #define DTSEC_IMASK_TXEEN 0x00400000
69 #define DTSEC_IMASK_LCEN 0x00040000
70 #define DTSEC_IMASK_CRLEN 0x00020000
71 #define DTSEC_IMASK_XFUNEN 0x00010000
72 #define DTSEC_IMASK_ABRTEN 0x00008000
73 #define DTSEC_IMASK_IFERREN 0x00004000
74 #define DTSEC_IMASK_MAGEN 0x00000800
75 #define DTSEC_IMASK_MMRDEN 0x00000400
76 #define DTSEC_IMASK_MMWREN 0x00000200
77 #define DTSEC_IMASK_GRSCEN 0x00000100
78 #define DTSEC_IMASK_TDPEEN 0x00000002
79 #define DTSEC_IMASK_RDPEEN 0x00000001
80
81 #define DTSEC_EVENTS_MASK \
82 ((u32)(DTSEC_IMASK_BREN | \
83 DTSEC_IMASK_RXCEN | \
84 DTSEC_IMASK_BTEN | \
85 DTSEC_IMASK_TXCEN | \
86 DTSEC_IMASK_TXEEN | \
87 DTSEC_IMASK_ABRTEN | \
88 DTSEC_IMASK_LCEN | \
89 DTSEC_IMASK_CRLEN | \
90 DTSEC_IMASK_XFUNEN | \
91 DTSEC_IMASK_IFERREN | \
92 DTSEC_IMASK_MAGEN | \
93 DTSEC_IMASK_TDPEEN | \
94 DTSEC_IMASK_RDPEEN))
95
96
97 #define TMR_PEMASK_TSREEN 0x00010000
98 #define TMR_PEVENT_TSRE 0x00010000
99
100
101 #define MAC_GROUP_ADDRESS 0x0000010000000000ULL
102
103
104 #define DEFAULT_HALFDUP_RETRANSMIT 0xf
105 #define DEFAULT_HALFDUP_COLL_WINDOW 0x37
106 #define DEFAULT_TX_PAUSE_TIME 0xf000
107 #define DEFAULT_RX_PREPEND 0
108 #define DEFAULT_PREAMBLE_LEN 7
109 #define DEFAULT_TX_PAUSE_TIME_EXTD 0
110 #define DEFAULT_NON_BACK_TO_BACK_IPG1 0x40
111 #define DEFAULT_NON_BACK_TO_BACK_IPG2 0x60
112 #define DEFAULT_MIN_IFG_ENFORCEMENT 0x50
113 #define DEFAULT_BACK_TO_BACK_IPG 0x60
114 #define DEFAULT_MAXIMUM_FRAME 0x600
115
116
117 #define DTSEC_ID2_INT_REDUCED_OFF 0x00010000
118
119 #define DTSEC_ECNTRL_GMIIM 0x00000040
120 #define DTSEC_ECNTRL_TBIM 0x00000020
121 #define DTSEC_ECNTRL_SGMIIM 0x00000002
122 #define DTSEC_ECNTRL_RPM 0x00000010
123 #define DTSEC_ECNTRL_R100M 0x00000008
124 #define DTSEC_ECNTRL_QSGMIIM 0x00000001
125
126 #define TCTRL_TTSE 0x00000040
127 #define TCTRL_GTS 0x00000020
128
129 #define RCTRL_PAL_MASK 0x001f0000
130 #define RCTRL_PAL_SHIFT 16
131 #define RCTRL_GHTX 0x00000400
132 #define RCTRL_RTSE 0x00000040
133 #define RCTRL_GRS 0x00000020
134 #define RCTRL_MPROM 0x00000008
135 #define RCTRL_RSF 0x00000004
136 #define RCTRL_UPROM 0x00000001
137
138 #define MACCFG1_SOFT_RESET 0x80000000
139 #define MACCFG1_RX_FLOW 0x00000020
140 #define MACCFG1_TX_FLOW 0x00000010
141 #define MACCFG1_TX_EN 0x00000001
142 #define MACCFG1_RX_EN 0x00000004
143
144 #define MACCFG2_NIBBLE_MODE 0x00000100
145 #define MACCFG2_BYTE_MODE 0x00000200
146 #define MACCFG2_PAD_CRC_EN 0x00000004
147 #define MACCFG2_FULL_DUPLEX 0x00000001
148 #define MACCFG2_PREAMBLE_LENGTH_MASK 0x0000f000
149 #define MACCFG2_PREAMBLE_LENGTH_SHIFT 12
150
151 #define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24
152 #define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16
153 #define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8
154
155 #define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000
156 #define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000
157 #define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00
158 #define IPGIFG_BACK_TO_BACK_IPG 0x0000007F
159
160 #define HAFDUP_EXCESS_DEFER 0x00010000
161 #define HAFDUP_COLLISION_WINDOW 0x000003ff
162 #define HAFDUP_RETRANSMISSION_MAX_SHIFT 12
163 #define HAFDUP_RETRANSMISSION_MAX 0x0000f000
164
165 #define NUM_OF_HASH_REGS 8
166
167 #define PTV_PTE_MASK 0xffff0000
168 #define PTV_PT_MASK 0x0000ffff
169 #define PTV_PTE_SHIFT 16
170
171 #define MAX_PACKET_ALIGNMENT 31
172 #define MAX_INTER_PACKET_GAP 0x7f
173 #define MAX_RETRANSMISSION 0x0f
174 #define MAX_COLLISION_WINDOW 0x03ff
175
176
177 #define DTSEC_HASH_TABLE_SIZE 256
178
179 #define EXTENDED_HASH_TABLE_SIZE 512
180
181
182 struct dtsec_regs {
183
184 u32 tsec_id;
185 u32 tsec_id2;
186 u32 ievent;
187 u32 imask;
188 u32 reserved0010[1];
189 u32 ecntrl;
190 u32 ptv;
191 u32 tbipa;
192 u32 tmr_ctrl;
193 u32 tmr_pevent;
194 u32 tmr_pemask;
195 u32 reserved002c[5];
196 u32 tctrl;
197 u32 reserved0044[3];
198 u32 rctrl;
199 u32 reserved0054[11];
200 u32 igaddr[8];
201 u32 gaddr[8];
202 u32 reserved00c0[16];
203 u32 maccfg1;
204 u32 maccfg2;
205 u32 ipgifg;
206 u32 hafdup;
207 u32 maxfrm;
208 u32 reserved0114[10];
209 u32 ifstat;
210 u32 macstnaddr1;
211 u32 macstnaddr2;
212 struct {
213 u32 exact_match1;
214 u32 exact_match2;
215 } macaddr[15];
216 u32 reserved01c0[16];
217 u32 tr64;
218 u32 tr127;
219 u32 tr255;
220 u32 tr511;
221 u32 tr1k;
222 u32 trmax;
223 u32 trmgv;
224
225 u32 rbyt;
226 u32 rpkt;
227 u32 rfcs;
228 u32 rmca;
229 u32 rbca;
230 u32 rxcf;
231 u32 rxpf;
232 u32 rxuo;
233 u32 raln;
234 u32 rflr;
235 u32 rcde;
236 u32 rcse;
237 u32 rund;
238 u32 rovr;
239 u32 rfrg;
240 u32 rjbr;
241 u32 rdrp;
242 u32 tbyt;
243 u32 tpkt;
244 u32 tmca;
245 u32 tbca;
246 u32 txpf;
247 u32 tdfr;
248 u32 tedf;
249 u32 tscl;
250 u32 tmcl;
251 u32 tlcl;
252 u32 txcl;
253 u32 tncl;
254 u32 reserved0290[1];
255 u32 tdrp;
256 u32 tjbr;
257 u32 tfcs;
258 u32 txcf;
259 u32 tovr;
260 u32 tund;
261 u32 tfrg;
262 u32 car1;
263 u32 car2;
264 u32 cam1;
265 u32 cam2;
266 u32 reserved02c0[848];
267 };
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305 struct dtsec_cfg {
306 u16 halfdup_retransmit;
307 u16 halfdup_coll_window;
308 bool tx_pad_crc;
309 u16 tx_pause_time;
310 bool ptp_tsu_en;
311 bool ptp_exception_en;
312 u32 preamble_len;
313 u32 rx_prepend;
314 u16 tx_pause_time_extd;
315 u16 maximum_frame;
316 u32 non_back_to_back_ipg1;
317 u32 non_back_to_back_ipg2;
318 u32 min_ifg_enforcement;
319 u32 back_to_back_ipg;
320 };
321
322 struct fman_mac {
323
324 struct dtsec_regs __iomem *regs;
325
326 u64 addr;
327
328 phy_interface_t phy_if;
329 u16 max_speed;
330 void *dev_id;
331 fman_mac_exception_cb *exception_cb;
332 fman_mac_exception_cb *event_cb;
333
334 u8 num_of_ind_addr_in_regs;
335
336 struct eth_hash_t *multicast_addr_hash;
337
338 struct eth_hash_t *unicast_addr_hash;
339 u8 mac_id;
340 u32 exceptions;
341 bool ptp_tsu_enabled;
342 bool en_tsu_err_exception;
343 struct dtsec_cfg *dtsec_drv_param;
344 void *fm;
345 struct fman_rev_info fm_rev_info;
346 bool basex_if;
347 struct phy_device *tbiphy;
348 };
349
350 static void set_dflts(struct dtsec_cfg *cfg)
351 {
352 cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
353 cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
354 cfg->tx_pad_crc = true;
355 cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
356
357 cfg->rx_prepend = DEFAULT_RX_PREPEND;
358 cfg->ptp_tsu_en = true;
359 cfg->ptp_exception_en = true;
360 cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
361 cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
362 cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
363 cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
364 cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
365 cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
366 cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
367 }
368
369 static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
370 phy_interface_t iface, u16 iface_speed, u8 *macaddr,
371 u32 exception_mask, u8 tbi_addr)
372 {
373 bool is_rgmii, is_sgmii, is_qsgmii;
374 int i;
375 u32 tmp;
376
377
378 iowrite32be(MACCFG1_SOFT_RESET, ®s->maccfg1);
379 iowrite32be(0, ®s->maccfg1);
380
381
382 tmp = ioread32be(®s->tsec_id2);
383
384
385 if (iface == PHY_INTERFACE_MODE_RGMII ||
386 iface == PHY_INTERFACE_MODE_RGMII_ID ||
387 iface == PHY_INTERFACE_MODE_RGMII_RXID ||
388 iface == PHY_INTERFACE_MODE_RGMII_TXID ||
389 iface == PHY_INTERFACE_MODE_RMII)
390 if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
391 return -EINVAL;
392
393 if (iface == PHY_INTERFACE_MODE_SGMII ||
394 iface == PHY_INTERFACE_MODE_MII)
395 if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
396 return -EINVAL;
397
398 is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
399 iface == PHY_INTERFACE_MODE_RGMII_ID ||
400 iface == PHY_INTERFACE_MODE_RGMII_RXID ||
401 iface == PHY_INTERFACE_MODE_RGMII_TXID;
402 is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
403 is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
404
405 tmp = 0;
406 if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
407 tmp |= DTSEC_ECNTRL_GMIIM;
408 if (is_sgmii)
409 tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
410 if (is_qsgmii)
411 tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
412 DTSEC_ECNTRL_QSGMIIM);
413 if (is_rgmii)
414 tmp |= DTSEC_ECNTRL_RPM;
415 if (iface_speed == SPEED_100)
416 tmp |= DTSEC_ECNTRL_R100M;
417
418 iowrite32be(tmp, ®s->ecntrl);
419
420 tmp = 0;
421
422 if (cfg->tx_pause_time)
423 tmp |= cfg->tx_pause_time;
424 if (cfg->tx_pause_time_extd)
425 tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
426 iowrite32be(tmp, ®s->ptv);
427
428 tmp = 0;
429 tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
430
431 tmp |= RCTRL_RSF;
432
433 iowrite32be(tmp, ®s->rctrl);
434
435
436
437
438
439 iowrite32be(tbi_addr, ®s->tbipa);
440
441 iowrite32be(0, ®s->tmr_ctrl);
442
443 if (cfg->ptp_tsu_en) {
444 tmp = 0;
445 tmp |= TMR_PEVENT_TSRE;
446 iowrite32be(tmp, ®s->tmr_pevent);
447
448 if (cfg->ptp_exception_en) {
449 tmp = 0;
450 tmp |= TMR_PEMASK_TSREEN;
451 iowrite32be(tmp, ®s->tmr_pemask);
452 }
453 }
454
455 tmp = 0;
456 tmp |= MACCFG1_RX_FLOW;
457 tmp |= MACCFG1_TX_FLOW;
458 iowrite32be(tmp, ®s->maccfg1);
459
460 tmp = 0;
461
462 if (iface_speed < SPEED_1000)
463 tmp |= MACCFG2_NIBBLE_MODE;
464 else if (iface_speed == SPEED_1000)
465 tmp |= MACCFG2_BYTE_MODE;
466
467 tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
468 MACCFG2_PREAMBLE_LENGTH_MASK;
469 if (cfg->tx_pad_crc)
470 tmp |= MACCFG2_PAD_CRC_EN;
471
472 tmp |= MACCFG2_FULL_DUPLEX;
473 iowrite32be(tmp, ®s->maccfg2);
474
475 tmp = (((cfg->non_back_to_back_ipg1 <<
476 IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
477 & IPGIFG_NON_BACK_TO_BACK_IPG_1)
478 | ((cfg->non_back_to_back_ipg2 <<
479 IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
480 & IPGIFG_NON_BACK_TO_BACK_IPG_2)
481 | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
482 & IPGIFG_MIN_IFG_ENFORCEMENT)
483 | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
484 iowrite32be(tmp, ®s->ipgifg);
485
486 tmp = 0;
487 tmp |= HAFDUP_EXCESS_DEFER;
488 tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
489 & HAFDUP_RETRANSMISSION_MAX);
490 tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
491
492 iowrite32be(tmp, ®s->hafdup);
493
494
495 iowrite32be(cfg->maximum_frame, ®s->maxfrm);
496
497 iowrite32be(0xffffffff, ®s->cam1);
498 iowrite32be(0xffffffff, ®s->cam2);
499
500 iowrite32be(exception_mask, ®s->imask);
501
502 iowrite32be(0xffffffff, ®s->ievent);
503
504 tmp = (u32)((macaddr[5] << 24) |
505 (macaddr[4] << 16) | (macaddr[3] << 8) | macaddr[2]);
506 iowrite32be(tmp, ®s->macstnaddr1);
507
508 tmp = (u32)((macaddr[1] << 24) | (macaddr[0] << 16));
509 iowrite32be(tmp, ®s->macstnaddr2);
510
511
512 for (i = 0; i < NUM_OF_HASH_REGS; i++) {
513
514 iowrite32be(0, ®s->igaddr[i]);
515
516 iowrite32be(0, ®s->gaddr[i]);
517 }
518
519 return 0;
520 }
521
522 static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
523 {
524 u32 tmp;
525
526 tmp = (u32)((adr[5] << 24) |
527 (adr[4] << 16) | (adr[3] << 8) | adr[2]);
528 iowrite32be(tmp, ®s->macstnaddr1);
529
530 tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
531 iowrite32be(tmp, ®s->macstnaddr2);
532 }
533
534 static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
535 bool enable)
536 {
537 int reg_idx = (bucket >> 5) & 0xf;
538 int bit_idx = bucket & 0x1f;
539 u32 bit_mask = 0x80000000 >> bit_idx;
540 u32 __iomem *reg;
541
542 if (reg_idx > 7)
543 reg = ®s->gaddr[reg_idx - 8];
544 else
545 reg = ®s->igaddr[reg_idx];
546
547 if (enable)
548 iowrite32be(ioread32be(reg) | bit_mask, reg);
549 else
550 iowrite32be(ioread32be(reg) & (~bit_mask), reg);
551 }
552
553 static int check_init_parameters(struct fman_mac *dtsec)
554 {
555 if (dtsec->max_speed >= SPEED_10000) {
556 pr_err("1G MAC driver supports 1G or lower speeds\n");
557 return -EINVAL;
558 }
559 if (dtsec->addr == 0) {
560 pr_err("Ethernet MAC Must have a valid MAC Address\n");
561 return -EINVAL;
562 }
563 if ((dtsec->dtsec_drv_param)->rx_prepend >
564 MAX_PACKET_ALIGNMENT) {
565 pr_err("packetAlignmentPadding can't be > than %d\n",
566 MAX_PACKET_ALIGNMENT);
567 return -EINVAL;
568 }
569 if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
570 MAX_INTER_PACKET_GAP) ||
571 ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
572 MAX_INTER_PACKET_GAP) ||
573 ((dtsec->dtsec_drv_param)->back_to_back_ipg >
574 MAX_INTER_PACKET_GAP)) {
575 pr_err("Inter packet gap can't be greater than %d\n",
576 MAX_INTER_PACKET_GAP);
577 return -EINVAL;
578 }
579 if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
580 MAX_RETRANSMISSION) {
581 pr_err("maxRetransmission can't be greater than %d\n",
582 MAX_RETRANSMISSION);
583 return -EINVAL;
584 }
585 if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
586 MAX_COLLISION_WINDOW) {
587 pr_err("collisionWindow can't be greater than %d\n",
588 MAX_COLLISION_WINDOW);
589 return -EINVAL;
590
591
592
593 }
594 if (!dtsec->exception_cb) {
595 pr_err("uninitialized exception_cb\n");
596 return -EINVAL;
597 }
598 if (!dtsec->event_cb) {
599 pr_err("uninitialized event_cb\n");
600 return -EINVAL;
601 }
602
603 return 0;
604 }
605
606 static int get_exception_flag(enum fman_mac_exceptions exception)
607 {
608 u32 bit_mask;
609
610 switch (exception) {
611 case FM_MAC_EX_1G_BAB_RX:
612 bit_mask = DTSEC_IMASK_BREN;
613 break;
614 case FM_MAC_EX_1G_RX_CTL:
615 bit_mask = DTSEC_IMASK_RXCEN;
616 break;
617 case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
618 bit_mask = DTSEC_IMASK_GTSCEN;
619 break;
620 case FM_MAC_EX_1G_BAB_TX:
621 bit_mask = DTSEC_IMASK_BTEN;
622 break;
623 case FM_MAC_EX_1G_TX_CTL:
624 bit_mask = DTSEC_IMASK_TXCEN;
625 break;
626 case FM_MAC_EX_1G_TX_ERR:
627 bit_mask = DTSEC_IMASK_TXEEN;
628 break;
629 case FM_MAC_EX_1G_LATE_COL:
630 bit_mask = DTSEC_IMASK_LCEN;
631 break;
632 case FM_MAC_EX_1G_COL_RET_LMT:
633 bit_mask = DTSEC_IMASK_CRLEN;
634 break;
635 case FM_MAC_EX_1G_TX_FIFO_UNDRN:
636 bit_mask = DTSEC_IMASK_XFUNEN;
637 break;
638 case FM_MAC_EX_1G_MAG_PCKT:
639 bit_mask = DTSEC_IMASK_MAGEN;
640 break;
641 case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
642 bit_mask = DTSEC_IMASK_MMRDEN;
643 break;
644 case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
645 bit_mask = DTSEC_IMASK_MMWREN;
646 break;
647 case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
648 bit_mask = DTSEC_IMASK_GRSCEN;
649 break;
650 case FM_MAC_EX_1G_DATA_ERR:
651 bit_mask = DTSEC_IMASK_TDPEEN;
652 break;
653 case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
654 bit_mask = DTSEC_IMASK_MSROEN;
655 break;
656 default:
657 bit_mask = 0;
658 break;
659 }
660
661 return bit_mask;
662 }
663
664 static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
665 {
666
667 if (!dtsec_drv_params)
668 return true;
669
670 return false;
671 }
672
673 static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
674 {
675 struct dtsec_regs __iomem *regs = dtsec->regs;
676
677 if (is_init_done(dtsec->dtsec_drv_param))
678 return 0;
679
680 return (u16)ioread32be(®s->maxfrm);
681 }
682
683 static void dtsec_isr(void *handle)
684 {
685 struct fman_mac *dtsec = (struct fman_mac *)handle;
686 struct dtsec_regs __iomem *regs = dtsec->regs;
687 u32 event;
688
689
690 event = ioread32be(®s->ievent) &
691 (u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
692
693 event &= ioread32be(®s->imask);
694
695 iowrite32be(event, ®s->ievent);
696
697 if (event & DTSEC_IMASK_BREN)
698 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
699 if (event & DTSEC_IMASK_RXCEN)
700 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
701 if (event & DTSEC_IMASK_GTSCEN)
702 dtsec->exception_cb(dtsec->dev_id,
703 FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
704 if (event & DTSEC_IMASK_BTEN)
705 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
706 if (event & DTSEC_IMASK_TXCEN)
707 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
708 if (event & DTSEC_IMASK_TXEEN)
709 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
710 if (event & DTSEC_IMASK_LCEN)
711 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
712 if (event & DTSEC_IMASK_CRLEN)
713 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
714 if (event & DTSEC_IMASK_XFUNEN) {
715
716 if (dtsec->fm_rev_info.major == 2) {
717 u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
718
719
720
721
722 tpkt1 = ioread32be(®s->tpkt);
723
724
725 tmp_reg1 = ioread32be(®s->reserved02c0[27]);
726
727
728
729
730 if ((tmp_reg1 & 0x007F0000) !=
731 (tmp_reg1 & 0x0000007F)) {
732
733
734
735
736 usleep_range((u32)(min
737 (dtsec_get_max_frame_length(dtsec) *
738 16 / 1000, 1)), (u32)
739 (min(dtsec_get_max_frame_length
740 (dtsec) * 16 / 1000, 1) + 1));
741 }
742
743
744
745
746 tpkt2 = ioread32be(®s->tpkt);
747 tmp_reg2 = ioread32be(®s->reserved02c0[27]);
748
749
750
751
752
753
754
755
756
757 if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
758 (tmp_reg2 & 0x007F0000))) {
759
760
761
762
763 iowrite32be(ioread32be(®s->rctrl) |
764 RCTRL_GRS, ®s->rctrl);
765
766
767
768
769 for (i = 0; i < 100; i++) {
770 if (ioread32be(®s->ievent) &
771 DTSEC_IMASK_GRSCEN)
772 break;
773 udelay(1);
774 }
775 if (ioread32be(®s->ievent) &
776 DTSEC_IMASK_GRSCEN)
777 iowrite32be(DTSEC_IMASK_GRSCEN,
778 ®s->ievent);
779 else
780 pr_debug("Rx lockup due to Tx lockup\n");
781
782
783
784
785 fman_reset_mac(dtsec->fm, dtsec->mac_id);
786
787
788 udelay(1);
789
790
791
792
793 }
794 }
795
796 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
797 }
798 if (event & DTSEC_IMASK_MAGEN)
799 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
800 if (event & DTSEC_IMASK_GRSCEN)
801 dtsec->exception_cb(dtsec->dev_id,
802 FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
803 if (event & DTSEC_IMASK_TDPEEN)
804 dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
805 if (event & DTSEC_IMASK_RDPEEN)
806 dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
807
808
809 WARN_ON(event & DTSEC_IMASK_ABRTEN);
810 WARN_ON(event & DTSEC_IMASK_IFERREN);
811 }
812
813 static void dtsec_1588_isr(void *handle)
814 {
815 struct fman_mac *dtsec = (struct fman_mac *)handle;
816 struct dtsec_regs __iomem *regs = dtsec->regs;
817 u32 event;
818
819 if (dtsec->ptp_tsu_enabled) {
820 event = ioread32be(®s->tmr_pevent);
821 event &= ioread32be(®s->tmr_pemask);
822
823 if (event) {
824 iowrite32be(event, ®s->tmr_pevent);
825 WARN_ON(event & TMR_PEVENT_TSRE);
826 dtsec->exception_cb(dtsec->dev_id,
827 FM_MAC_EX_1G_1588_TS_RX_ERR);
828 }
829 }
830 }
831
832 static void free_init_resources(struct fman_mac *dtsec)
833 {
834 fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
835 FMAN_INTR_TYPE_ERR);
836 fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
837 FMAN_INTR_TYPE_NORMAL);
838
839
840 free_hash_table(dtsec->multicast_addr_hash);
841 dtsec->multicast_addr_hash = NULL;
842
843
844 free_hash_table(dtsec->unicast_addr_hash);
845 dtsec->unicast_addr_hash = NULL;
846 }
847
848 int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
849 {
850 if (is_init_done(dtsec->dtsec_drv_param))
851 return -EINVAL;
852
853 dtsec->dtsec_drv_param->maximum_frame = new_val;
854
855 return 0;
856 }
857
858 int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
859 {
860 if (is_init_done(dtsec->dtsec_drv_param))
861 return -EINVAL;
862
863 dtsec->dtsec_drv_param->tx_pad_crc = new_val;
864
865 return 0;
866 }
867
868 static void graceful_start(struct fman_mac *dtsec, enum comm_mode mode)
869 {
870 struct dtsec_regs __iomem *regs = dtsec->regs;
871
872 if (mode & COMM_MODE_TX)
873 iowrite32be(ioread32be(®s->tctrl) &
874 ~TCTRL_GTS, ®s->tctrl);
875 if (mode & COMM_MODE_RX)
876 iowrite32be(ioread32be(®s->rctrl) &
877 ~RCTRL_GRS, ®s->rctrl);
878 }
879
880 static void graceful_stop(struct fman_mac *dtsec, enum comm_mode mode)
881 {
882 struct dtsec_regs __iomem *regs = dtsec->regs;
883 u32 tmp;
884
885
886 if (mode & COMM_MODE_RX) {
887 tmp = ioread32be(®s->rctrl) | RCTRL_GRS;
888 iowrite32be(tmp, ®s->rctrl);
889
890 if (dtsec->fm_rev_info.major == 2) {
891
892 usleep_range(100, 200);
893 } else {
894
895 usleep_range(10, 50);
896 }
897 }
898
899
900 if (mode & COMM_MODE_TX) {
901 if (dtsec->fm_rev_info.major == 2) {
902
903 pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
904 } else {
905 tmp = ioread32be(®s->tctrl) | TCTRL_GTS;
906 iowrite32be(tmp, ®s->tctrl);
907
908
909 usleep_range(10, 50);
910 }
911 }
912 }
913
914 int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
915 {
916 struct dtsec_regs __iomem *regs = dtsec->regs;
917 u32 tmp;
918
919 if (!is_init_done(dtsec->dtsec_drv_param))
920 return -EINVAL;
921
922
923 tmp = ioread32be(®s->maccfg1);
924 if (mode & COMM_MODE_RX)
925 tmp |= MACCFG1_RX_EN;
926 if (mode & COMM_MODE_TX)
927 tmp |= MACCFG1_TX_EN;
928
929 iowrite32be(tmp, ®s->maccfg1);
930
931
932 graceful_start(dtsec, mode);
933
934 return 0;
935 }
936
937 int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
938 {
939 struct dtsec_regs __iomem *regs = dtsec->regs;
940 u32 tmp;
941
942 if (!is_init_done(dtsec->dtsec_drv_param))
943 return -EINVAL;
944
945
946 graceful_stop(dtsec, mode);
947
948 tmp = ioread32be(®s->maccfg1);
949 if (mode & COMM_MODE_RX)
950 tmp &= ~MACCFG1_RX_EN;
951 if (mode & COMM_MODE_TX)
952 tmp &= ~MACCFG1_TX_EN;
953
954 iowrite32be(tmp, ®s->maccfg1);
955
956 return 0;
957 }
958
959 int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
960 u8 __maybe_unused priority,
961 u16 pause_time, u16 __maybe_unused thresh_time)
962 {
963 struct dtsec_regs __iomem *regs = dtsec->regs;
964 enum comm_mode mode = COMM_MODE_NONE;
965 u32 ptv = 0;
966
967 if (!is_init_done(dtsec->dtsec_drv_param))
968 return -EINVAL;
969
970 if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0)
971 mode |= COMM_MODE_RX;
972 if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0)
973 mode |= COMM_MODE_TX;
974
975 graceful_stop(dtsec, mode);
976
977 if (pause_time) {
978
979 if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
980 pr_warn("pause-time: %d illegal.Should be > 320\n",
981 pause_time);
982 return -EINVAL;
983 }
984
985 ptv = ioread32be(®s->ptv);
986 ptv &= PTV_PTE_MASK;
987 ptv |= pause_time & PTV_PT_MASK;
988 iowrite32be(ptv, ®s->ptv);
989
990
991 iowrite32be(ioread32be(®s->maccfg1) | MACCFG1_TX_FLOW,
992 ®s->maccfg1);
993 } else
994 iowrite32be(ioread32be(®s->maccfg1) & ~MACCFG1_TX_FLOW,
995 ®s->maccfg1);
996
997 graceful_start(dtsec, mode);
998
999 return 0;
1000 }
1001
1002 int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
1003 {
1004 struct dtsec_regs __iomem *regs = dtsec->regs;
1005 enum comm_mode mode = COMM_MODE_NONE;
1006 u32 tmp;
1007
1008 if (!is_init_done(dtsec->dtsec_drv_param))
1009 return -EINVAL;
1010
1011 if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0)
1012 mode |= COMM_MODE_RX;
1013 if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0)
1014 mode |= COMM_MODE_TX;
1015
1016 graceful_stop(dtsec, mode);
1017
1018 tmp = ioread32be(®s->maccfg1);
1019 if (en)
1020 tmp |= MACCFG1_RX_FLOW;
1021 else
1022 tmp &= ~MACCFG1_RX_FLOW;
1023 iowrite32be(tmp, ®s->maccfg1);
1024
1025 graceful_start(dtsec, mode);
1026
1027 return 0;
1028 }
1029
1030 int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
1031 {
1032 struct dtsec_regs __iomem *regs = dtsec->regs;
1033 enum comm_mode mode = COMM_MODE_NONE;
1034
1035 if (!is_init_done(dtsec->dtsec_drv_param))
1036 return -EINVAL;
1037
1038 if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0)
1039 mode |= COMM_MODE_RX;
1040 if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0)
1041 mode |= COMM_MODE_TX;
1042
1043 graceful_stop(dtsec, mode);
1044
1045
1046
1047
1048 dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
1049 set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
1050
1051 graceful_start(dtsec, mode);
1052
1053 return 0;
1054 }
1055
1056 int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
1057 {
1058 struct dtsec_regs __iomem *regs = dtsec->regs;
1059 struct eth_hash_entry *hash_entry;
1060 u64 addr;
1061 s32 bucket;
1062 u32 crc = 0xFFFFFFFF;
1063 bool mcast, ghtx;
1064
1065 if (!is_init_done(dtsec->dtsec_drv_param))
1066 return -EINVAL;
1067
1068 addr = ENET_ADDR_TO_UINT64(*eth_addr);
1069
1070 ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false);
1071 mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
1072
1073
1074 if (ghtx && !mcast) {
1075 pr_err("Could not compute hash bucket\n");
1076 return -EINVAL;
1077 }
1078 crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
1079 crc = bitrev32(crc);
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 if (ghtx) {
1092 bucket = (s32)((crc >> 23) & 0x1ff);
1093 } else {
1094 bucket = (s32)((crc >> 24) & 0xff);
1095
1096
1097
1098 if (mcast)
1099 bucket += 0x100;
1100 }
1101
1102 set_bucket(dtsec->regs, bucket, true);
1103
1104
1105 hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
1106 if (!hash_entry)
1107 return -ENOMEM;
1108 hash_entry->addr = addr;
1109 INIT_LIST_HEAD(&hash_entry->node);
1110
1111 if (addr & MAC_GROUP_ADDRESS)
1112
1113 list_add_tail(&hash_entry->node,
1114 &dtsec->multicast_addr_hash->lsts[bucket]);
1115 else
1116 list_add_tail(&hash_entry->node,
1117 &dtsec->unicast_addr_hash->lsts[bucket]);
1118
1119 return 0;
1120 }
1121
1122 int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
1123 {
1124 u32 tmp;
1125 struct dtsec_regs __iomem *regs = dtsec->regs;
1126
1127 if (!is_init_done(dtsec->dtsec_drv_param))
1128 return -EINVAL;
1129
1130 tmp = ioread32be(®s->rctrl);
1131 if (enable)
1132 tmp |= RCTRL_MPROM;
1133 else
1134 tmp &= ~RCTRL_MPROM;
1135
1136 iowrite32be(tmp, ®s->rctrl);
1137
1138 return 0;
1139 }
1140
1141 int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
1142 {
1143 struct dtsec_regs __iomem *regs = dtsec->regs;
1144 u32 rctrl, tctrl;
1145
1146 if (!is_init_done(dtsec->dtsec_drv_param))
1147 return -EINVAL;
1148
1149 rctrl = ioread32be(®s->rctrl);
1150 tctrl = ioread32be(®s->tctrl);
1151
1152 if (enable) {
1153 rctrl |= RCTRL_RTSE;
1154 tctrl |= TCTRL_TTSE;
1155 } else {
1156 rctrl &= ~RCTRL_RTSE;
1157 tctrl &= ~TCTRL_TTSE;
1158 }
1159
1160 iowrite32be(rctrl, ®s->rctrl);
1161 iowrite32be(tctrl, ®s->tctrl);
1162
1163 return 0;
1164 }
1165
1166 int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
1167 {
1168 struct dtsec_regs __iomem *regs = dtsec->regs;
1169 struct list_head *pos;
1170 struct eth_hash_entry *hash_entry = NULL;
1171 u64 addr;
1172 s32 bucket;
1173 u32 crc = 0xFFFFFFFF;
1174 bool mcast, ghtx;
1175
1176 if (!is_init_done(dtsec->dtsec_drv_param))
1177 return -EINVAL;
1178
1179 addr = ENET_ADDR_TO_UINT64(*eth_addr);
1180
1181 ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false);
1182 mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
1183
1184
1185 if (ghtx && !mcast) {
1186 pr_err("Could not compute hash bucket\n");
1187 return -EINVAL;
1188 }
1189 crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
1190 crc = bitrev32(crc);
1191
1192 if (ghtx) {
1193 bucket = (s32)((crc >> 23) & 0x1ff);
1194 } else {
1195 bucket = (s32)((crc >> 24) & 0xff);
1196
1197
1198
1199 if (mcast)
1200 bucket += 0x100;
1201 }
1202
1203 if (addr & MAC_GROUP_ADDRESS) {
1204
1205 list_for_each(pos,
1206 &dtsec->multicast_addr_hash->lsts[bucket]) {
1207 hash_entry = ETH_HASH_ENTRY_OBJ(pos);
1208 if (hash_entry->addr == addr) {
1209 list_del_init(&hash_entry->node);
1210 kfree(hash_entry);
1211 break;
1212 }
1213 }
1214 if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
1215 set_bucket(dtsec->regs, bucket, false);
1216 } else {
1217
1218 list_for_each(pos,
1219 &dtsec->unicast_addr_hash->lsts[bucket]) {
1220 hash_entry = ETH_HASH_ENTRY_OBJ(pos);
1221 if (hash_entry->addr == addr) {
1222 list_del_init(&hash_entry->node);
1223 kfree(hash_entry);
1224 break;
1225 }
1226 }
1227 if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
1228 set_bucket(dtsec->regs, bucket, false);
1229 }
1230
1231
1232 WARN_ON(!hash_entry);
1233
1234 return 0;
1235 }
1236
1237 int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
1238 {
1239 struct dtsec_regs __iomem *regs = dtsec->regs;
1240 u32 tmp;
1241
1242 if (!is_init_done(dtsec->dtsec_drv_param))
1243 return -EINVAL;
1244
1245
1246 tmp = ioread32be(®s->rctrl);
1247 if (new_val)
1248 tmp |= RCTRL_UPROM;
1249 else
1250 tmp &= ~RCTRL_UPROM;
1251
1252 iowrite32be(tmp, ®s->rctrl);
1253
1254
1255 tmp = ioread32be(®s->rctrl);
1256 if (new_val)
1257 tmp |= RCTRL_MPROM;
1258 else
1259 tmp &= ~RCTRL_MPROM;
1260
1261 iowrite32be(tmp, ®s->rctrl);
1262
1263 return 0;
1264 }
1265
1266 int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
1267 {
1268 struct dtsec_regs __iomem *regs = dtsec->regs;
1269 enum comm_mode mode = COMM_MODE_NONE;
1270 u32 tmp;
1271
1272 if (!is_init_done(dtsec->dtsec_drv_param))
1273 return -EINVAL;
1274
1275 if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0)
1276 mode |= COMM_MODE_RX;
1277 if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0)
1278 mode |= COMM_MODE_TX;
1279
1280 graceful_stop(dtsec, mode);
1281
1282 tmp = ioread32be(®s->maccfg2);
1283
1284
1285 tmp |= MACCFG2_FULL_DUPLEX;
1286
1287 tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
1288 if (speed < SPEED_1000)
1289 tmp |= MACCFG2_NIBBLE_MODE;
1290 else if (speed == SPEED_1000)
1291 tmp |= MACCFG2_BYTE_MODE;
1292 iowrite32be(tmp, ®s->maccfg2);
1293
1294 tmp = ioread32be(®s->ecntrl);
1295 if (speed == SPEED_100)
1296 tmp |= DTSEC_ECNTRL_R100M;
1297 else
1298 tmp &= ~DTSEC_ECNTRL_R100M;
1299 iowrite32be(tmp, ®s->ecntrl);
1300
1301 graceful_start(dtsec, mode);
1302
1303 return 0;
1304 }
1305
1306 int dtsec_restart_autoneg(struct fman_mac *dtsec)
1307 {
1308 u16 tmp_reg16;
1309
1310 if (!is_init_done(dtsec->dtsec_drv_param))
1311 return -EINVAL;
1312
1313 tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
1314
1315 tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1316 tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
1317 BMCR_FULLDPLX | BMCR_SPEED1000);
1318
1319 phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
1320
1321 return 0;
1322 }
1323
1324 int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
1325 {
1326 struct dtsec_regs __iomem *regs = dtsec->regs;
1327
1328 if (!is_init_done(dtsec->dtsec_drv_param))
1329 return -EINVAL;
1330
1331 *mac_version = ioread32be(®s->tsec_id);
1332
1333 return 0;
1334 }
1335
1336 int dtsec_set_exception(struct fman_mac *dtsec,
1337 enum fman_mac_exceptions exception, bool enable)
1338 {
1339 struct dtsec_regs __iomem *regs = dtsec->regs;
1340 u32 bit_mask = 0;
1341
1342 if (!is_init_done(dtsec->dtsec_drv_param))
1343 return -EINVAL;
1344
1345 if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
1346 bit_mask = get_exception_flag(exception);
1347 if (bit_mask) {
1348 if (enable)
1349 dtsec->exceptions |= bit_mask;
1350 else
1351 dtsec->exceptions &= ~bit_mask;
1352 } else {
1353 pr_err("Undefined exception\n");
1354 return -EINVAL;
1355 }
1356 if (enable)
1357 iowrite32be(ioread32be(®s->imask) | bit_mask,
1358 ®s->imask);
1359 else
1360 iowrite32be(ioread32be(®s->imask) & ~bit_mask,
1361 ®s->imask);
1362 } else {
1363 if (!dtsec->ptp_tsu_enabled) {
1364 pr_err("Exception valid for 1588 only\n");
1365 return -EINVAL;
1366 }
1367 switch (exception) {
1368 case FM_MAC_EX_1G_1588_TS_RX_ERR:
1369 if (enable) {
1370 dtsec->en_tsu_err_exception = true;
1371 iowrite32be(ioread32be(®s->tmr_pemask) |
1372 TMR_PEMASK_TSREEN,
1373 ®s->tmr_pemask);
1374 } else {
1375 dtsec->en_tsu_err_exception = false;
1376 iowrite32be(ioread32be(®s->tmr_pemask) &
1377 ~TMR_PEMASK_TSREEN,
1378 ®s->tmr_pemask);
1379 }
1380 break;
1381 default:
1382 pr_err("Undefined exception\n");
1383 return -EINVAL;
1384 }
1385 }
1386
1387 return 0;
1388 }
1389
1390 int dtsec_init(struct fman_mac *dtsec)
1391 {
1392 struct dtsec_regs __iomem *regs = dtsec->regs;
1393 struct dtsec_cfg *dtsec_drv_param;
1394 int err;
1395 u16 max_frm_ln;
1396 enet_addr_t eth_addr;
1397
1398 if (is_init_done(dtsec->dtsec_drv_param))
1399 return -EINVAL;
1400
1401 if (DEFAULT_RESET_ON_INIT &&
1402 (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
1403 pr_err("Can't reset MAC!\n");
1404 return -EINVAL;
1405 }
1406
1407 err = check_init_parameters(dtsec);
1408 if (err)
1409 return err;
1410
1411 dtsec_drv_param = dtsec->dtsec_drv_param;
1412
1413 MAKE_ENET_ADDR_FROM_UINT64(dtsec->addr, eth_addr);
1414
1415 err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
1416 dtsec->max_speed, (u8 *)eth_addr, dtsec->exceptions,
1417 dtsec->tbiphy->mdio.addr);
1418 if (err) {
1419 free_init_resources(dtsec);
1420 pr_err("DTSEC version doesn't support this i/f mode\n");
1421 return err;
1422 }
1423
1424 if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
1425 u16 tmp_reg16;
1426
1427
1428 tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
1429 phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
1430
1431 tmp_reg16 = TBICON_CLK_SELECT;
1432 phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
1433
1434 tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
1435 BMCR_FULLDPLX | BMCR_SPEED1000);
1436 phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
1437
1438 if (dtsec->basex_if)
1439 tmp_reg16 = TBIANA_1000X;
1440 else
1441 tmp_reg16 = TBIANA_SGMII;
1442 phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
1443
1444 tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
1445 BMCR_FULLDPLX | BMCR_SPEED1000);
1446
1447 phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
1448 }
1449
1450
1451 max_frm_ln = (u16)ioread32be(®s->maxfrm);
1452 err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
1453 if (err) {
1454 pr_err("Setting max frame length failed\n");
1455 free_init_resources(dtsec);
1456 return -EINVAL;
1457 }
1458
1459 dtsec->multicast_addr_hash =
1460 alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
1461 if (!dtsec->multicast_addr_hash) {
1462 free_init_resources(dtsec);
1463 pr_err("MC hash table is failed\n");
1464 return -ENOMEM;
1465 }
1466
1467 dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
1468 if (!dtsec->unicast_addr_hash) {
1469 free_init_resources(dtsec);
1470 pr_err("UC hash table is failed\n");
1471 return -ENOMEM;
1472 }
1473
1474
1475 fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
1476 FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
1477
1478 fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
1479 FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
1480
1481 kfree(dtsec_drv_param);
1482 dtsec->dtsec_drv_param = NULL;
1483
1484 return 0;
1485 }
1486
1487 int dtsec_free(struct fman_mac *dtsec)
1488 {
1489 free_init_resources(dtsec);
1490
1491 kfree(dtsec->dtsec_drv_param);
1492 dtsec->dtsec_drv_param = NULL;
1493 kfree(dtsec);
1494
1495 return 0;
1496 }
1497
1498 struct fman_mac *dtsec_config(struct fman_mac_params *params)
1499 {
1500 struct fman_mac *dtsec;
1501 struct dtsec_cfg *dtsec_drv_param;
1502 void __iomem *base_addr;
1503
1504 base_addr = params->base_addr;
1505
1506
1507 dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
1508 if (!dtsec)
1509 return NULL;
1510
1511
1512 dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
1513 if (!dtsec_drv_param)
1514 goto err_dtsec;
1515
1516
1517 dtsec->dtsec_drv_param = dtsec_drv_param;
1518
1519 set_dflts(dtsec_drv_param);
1520
1521 dtsec->regs = base_addr;
1522 dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
1523 dtsec->max_speed = params->max_speed;
1524 dtsec->phy_if = params->phy_if;
1525 dtsec->mac_id = params->mac_id;
1526 dtsec->exceptions = (DTSEC_IMASK_BREN |
1527 DTSEC_IMASK_RXCEN |
1528 DTSEC_IMASK_BTEN |
1529 DTSEC_IMASK_TXCEN |
1530 DTSEC_IMASK_TXEEN |
1531 DTSEC_IMASK_ABRTEN |
1532 DTSEC_IMASK_LCEN |
1533 DTSEC_IMASK_CRLEN |
1534 DTSEC_IMASK_XFUNEN |
1535 DTSEC_IMASK_IFERREN |
1536 DTSEC_IMASK_MAGEN |
1537 DTSEC_IMASK_TDPEEN |
1538 DTSEC_IMASK_RDPEEN);
1539 dtsec->exception_cb = params->exception_cb;
1540 dtsec->event_cb = params->event_cb;
1541 dtsec->dev_id = params->dev_id;
1542 dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
1543 dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
1544
1545 dtsec->fm = params->fm;
1546 dtsec->basex_if = params->basex_if;
1547
1548 if (!params->internal_phy_node) {
1549 pr_err("TBI PHY node is not available\n");
1550 goto err_dtsec_drv_param;
1551 }
1552
1553 dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
1554 if (!dtsec->tbiphy) {
1555 pr_err("of_phy_find_device (TBI PHY) failed\n");
1556 goto err_dtsec_drv_param;
1557 }
1558
1559 put_device(&dtsec->tbiphy->mdio.dev);
1560
1561
1562 fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
1563
1564 return dtsec;
1565
1566 err_dtsec_drv_param:
1567 kfree(dtsec_drv_param);
1568 err_dtsec:
1569 kfree(dtsec);
1570 return NULL;
1571 }