This source file includes following definitions.
- dwmac4_dma_axi
- dwmac4_dma_init_rx_chan
- dwmac4_dma_init_tx_chan
- dwmac4_dma_init_channel
- dwmac4_dma_init
- _dwmac4_dump_dma_regs
- dwmac4_dump_dma_regs
- dwmac4_rx_watchdog
- dwmac4_dma_rx_chan_op_mode
- dwmac4_dma_tx_chan_op_mode
- dwmac4_get_hw_feature
- dwmac4_enable_tso
- dwmac4_qmode
- dwmac4_set_bfsize
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/io.h>
14 #include "dwmac4.h"
15 #include "dwmac4_dma.h"
16
17 static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
18 {
19 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
20 int i;
21
22 pr_info("dwmac4: Master AXI performs %s burst length\n",
23 (value & DMA_SYS_BUS_FB) ? "fixed" : "any");
24
25 if (axi->axi_lpi_en)
26 value |= DMA_AXI_EN_LPI;
27 if (axi->axi_xit_frm)
28 value |= DMA_AXI_LPI_XIT_FRM;
29
30 value &= ~DMA_AXI_WR_OSR_LMT;
31 value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
32 DMA_AXI_WR_OSR_LMT_SHIFT;
33
34 value &= ~DMA_AXI_RD_OSR_LMT;
35 value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
36 DMA_AXI_RD_OSR_LMT_SHIFT;
37
38
39
40
41
42 for (i = 0; i < AXI_BLEN; i++) {
43 switch (axi->axi_blen[i]) {
44 case 256:
45 value |= DMA_AXI_BLEN256;
46 break;
47 case 128:
48 value |= DMA_AXI_BLEN128;
49 break;
50 case 64:
51 value |= DMA_AXI_BLEN64;
52 break;
53 case 32:
54 value |= DMA_AXI_BLEN32;
55 break;
56 case 16:
57 value |= DMA_AXI_BLEN16;
58 break;
59 case 8:
60 value |= DMA_AXI_BLEN8;
61 break;
62 case 4:
63 value |= DMA_AXI_BLEN4;
64 break;
65 }
66 }
67
68 writel(value, ioaddr + DMA_SYS_BUS_MODE);
69 }
70
71 static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
72 struct stmmac_dma_cfg *dma_cfg,
73 dma_addr_t dma_rx_phy, u32 chan)
74 {
75 u32 value;
76 u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
77
78 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
79 value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
80 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
81
82 writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
83 }
84
85 static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
86 struct stmmac_dma_cfg *dma_cfg,
87 dma_addr_t dma_tx_phy, u32 chan)
88 {
89 u32 value;
90 u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
91
92 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
93 value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
94
95
96 value |= DMA_CONTROL_OSP;
97
98 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
99
100 writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
101 }
102
103 static void dwmac4_dma_init_channel(void __iomem *ioaddr,
104 struct stmmac_dma_cfg *dma_cfg, u32 chan)
105 {
106 u32 value;
107
108
109 value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
110 if (dma_cfg->pblx8)
111 value = value | DMA_BUS_MODE_PBL;
112 writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
113
114
115 writel(DMA_CHAN_INTR_DEFAULT_MASK,
116 ioaddr + DMA_CHAN_INTR_ENA(chan));
117 }
118
119 static void dwmac4_dma_init(void __iomem *ioaddr,
120 struct stmmac_dma_cfg *dma_cfg, int atds)
121 {
122 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
123
124
125 if (dma_cfg->fixed_burst)
126 value |= DMA_SYS_BUS_FB;
127
128
129 if (dma_cfg->mixed_burst)
130 value |= DMA_SYS_BUS_MB;
131
132 if (dma_cfg->aal)
133 value |= DMA_SYS_BUS_AAL;
134
135 writel(value, ioaddr + DMA_SYS_BUS_MODE);
136 }
137
138 static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
139 u32 *reg_space)
140 {
141 reg_space[DMA_CHAN_CONTROL(channel) / 4] =
142 readl(ioaddr + DMA_CHAN_CONTROL(channel));
143 reg_space[DMA_CHAN_TX_CONTROL(channel) / 4] =
144 readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
145 reg_space[DMA_CHAN_RX_CONTROL(channel) / 4] =
146 readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
147 reg_space[DMA_CHAN_TX_BASE_ADDR(channel) / 4] =
148 readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
149 reg_space[DMA_CHAN_RX_BASE_ADDR(channel) / 4] =
150 readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
151 reg_space[DMA_CHAN_TX_END_ADDR(channel) / 4] =
152 readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel));
153 reg_space[DMA_CHAN_RX_END_ADDR(channel) / 4] =
154 readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel));
155 reg_space[DMA_CHAN_TX_RING_LEN(channel) / 4] =
156 readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel));
157 reg_space[DMA_CHAN_RX_RING_LEN(channel) / 4] =
158 readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel));
159 reg_space[DMA_CHAN_INTR_ENA(channel) / 4] =
160 readl(ioaddr + DMA_CHAN_INTR_ENA(channel));
161 reg_space[DMA_CHAN_RX_WATCHDOG(channel) / 4] =
162 readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel));
163 reg_space[DMA_CHAN_SLOT_CTRL_STATUS(channel) / 4] =
164 readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel));
165 reg_space[DMA_CHAN_CUR_TX_DESC(channel) / 4] =
166 readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel));
167 reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] =
168 readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel));
169 reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] =
170 readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel));
171 reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] =
172 readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel));
173 reg_space[DMA_CHAN_STATUS(channel) / 4] =
174 readl(ioaddr + DMA_CHAN_STATUS(channel));
175 }
176
177 static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
178 {
179 int i;
180
181 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
182 _dwmac4_dump_dma_regs(ioaddr, i, reg_space);
183 }
184
185 static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan)
186 {
187 u32 chan;
188
189 for (chan = 0; chan < number_chan; chan++)
190 writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan));
191 }
192
193 static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
194 u32 channel, int fifosz, u8 qmode)
195 {
196 unsigned int rqs = fifosz / 256 - 1;
197 u32 mtl_rx_op, mtl_rx_int;
198
199 mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
200
201 if (mode == SF_DMA_MODE) {
202 pr_debug("GMAC: enable RX store and forward mode\n");
203 mtl_rx_op |= MTL_OP_MODE_RSF;
204 } else {
205 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
206 mtl_rx_op &= ~MTL_OP_MODE_RSF;
207 mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
208 if (mode <= 32)
209 mtl_rx_op |= MTL_OP_MODE_RTC_32;
210 else if (mode <= 64)
211 mtl_rx_op |= MTL_OP_MODE_RTC_64;
212 else if (mode <= 96)
213 mtl_rx_op |= MTL_OP_MODE_RTC_96;
214 else
215 mtl_rx_op |= MTL_OP_MODE_RTC_128;
216 }
217
218 mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
219 mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
220
221
222
223
224 if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
225 unsigned int rfd, rfa;
226
227 mtl_rx_op |= MTL_OP_MODE_EHFC;
228
229
230
231
232
233
234
235 switch (fifosz) {
236 case 4096:
237
238
239
240 rfd = 0x03;
241 rfa = 0x01;
242 break;
243
244 case 8192:
245 rfd = 0x06;
246 rfa = 0x0a;
247 break;
248
249 case 16384:
250 rfd = 0x06;
251 rfa = 0x12;
252 break;
253
254 default:
255 rfd = 0x06;
256 rfa = 0x1e;
257 break;
258 }
259
260 mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
261 mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
262
263 mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
264 mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
265 }
266
267 writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
268
269
270 mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
271 writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
272 ioaddr + MTL_CHAN_INT_CTRL(channel));
273 }
274
275 static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
276 u32 channel, int fifosz, u8 qmode)
277 {
278 u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
279 unsigned int tqs = fifosz / 256 - 1;
280
281 if (mode == SF_DMA_MODE) {
282 pr_debug("GMAC: enable TX store and forward mode\n");
283
284 mtl_tx_op |= MTL_OP_MODE_TSF;
285 } else {
286 pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
287 mtl_tx_op &= ~MTL_OP_MODE_TSF;
288 mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
289
290 if (mode <= 32)
291 mtl_tx_op |= MTL_OP_MODE_TTC_32;
292 else if (mode <= 64)
293 mtl_tx_op |= MTL_OP_MODE_TTC_64;
294 else if (mode <= 96)
295 mtl_tx_op |= MTL_OP_MODE_TTC_96;
296 else if (mode <= 128)
297 mtl_tx_op |= MTL_OP_MODE_TTC_128;
298 else if (mode <= 192)
299 mtl_tx_op |= MTL_OP_MODE_TTC_192;
300 else if (mode <= 256)
301 mtl_tx_op |= MTL_OP_MODE_TTC_256;
302 else if (mode <= 384)
303 mtl_tx_op |= MTL_OP_MODE_TTC_384;
304 else
305 mtl_tx_op |= MTL_OP_MODE_TTC_512;
306 }
307
308
309
310
311
312
313
314
315
316 mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
317 if (qmode != MTL_QUEUE_AVB)
318 mtl_tx_op |= MTL_OP_MODE_TXQEN;
319 else
320 mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
321 mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK;
322 mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT;
323
324 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
325 }
326
327 static void dwmac4_get_hw_feature(void __iomem *ioaddr,
328 struct dma_features *dma_cap)
329 {
330 u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
331
332
333 dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL);
334 dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1;
335 dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2;
336 dma_cap->vlhash = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4;
337 dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18;
338 dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3;
339 dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5;
340 dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6;
341 dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7;
342
343 dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8;
344
345 dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12;
346
347 dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13;
348
349 dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14;
350 dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16;
351 dma_cap->vlins = (hw_cap & GMAC_HW_FEAT_SAVLANINS) >> 27;
352 dma_cap->arpoffsel = (hw_cap & GMAC_HW_FEAT_ARPOFFSEL) >> 9;
353
354
355 hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
356 dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24;
357 dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
358 dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
359
360
361
362 dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6);
363 dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0);
364
365 hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
366
367 dma_cap->number_rx_channel =
368 ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
369 dma_cap->number_tx_channel =
370 ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
371
372 dma_cap->number_rx_queues =
373 ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1;
374 dma_cap->number_tx_queues =
375 ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1;
376
377 dma_cap->pps_out_num = (hw_cap & GMAC_HW_FEAT_PPSOUTNUM) >> 24;
378
379
380 dma_cap->time_stamp = 0;
381
382
383 hw_cap = readl(ioaddr + GMAC_HW_FEATURE3);
384
385
386 dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28;
387 dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13;
388 dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
389 dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
390 dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5;
391 }
392
393
394 static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
395 {
396 u32 value;
397
398 if (en) {
399
400 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
401 writel(value | DMA_CONTROL_TSE,
402 ioaddr + DMA_CHAN_TX_CONTROL(chan));
403 } else {
404
405 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
406 writel(value & ~DMA_CONTROL_TSE,
407 ioaddr + DMA_CHAN_TX_CONTROL(chan));
408 }
409 }
410
411 static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
412 {
413 u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
414
415 mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
416 if (qmode != MTL_QUEUE_AVB)
417 mtl_tx_op |= MTL_OP_MODE_TXQEN;
418 else
419 mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
420
421 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
422 }
423
424 static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
425 {
426 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
427
428 value &= ~DMA_RBSZ_MASK;
429 value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
430
431 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
432 }
433
434 const struct stmmac_dma_ops dwmac4_dma_ops = {
435 .reset = dwmac4_dma_reset,
436 .init = dwmac4_dma_init,
437 .init_chan = dwmac4_dma_init_channel,
438 .init_rx_chan = dwmac4_dma_init_rx_chan,
439 .init_tx_chan = dwmac4_dma_init_tx_chan,
440 .axi = dwmac4_dma_axi,
441 .dump_regs = dwmac4_dump_dma_regs,
442 .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
443 .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
444 .enable_dma_irq = dwmac4_enable_dma_irq,
445 .disable_dma_irq = dwmac4_disable_dma_irq,
446 .start_tx = dwmac4_dma_start_tx,
447 .stop_tx = dwmac4_dma_stop_tx,
448 .start_rx = dwmac4_dma_start_rx,
449 .stop_rx = dwmac4_dma_stop_rx,
450 .dma_interrupt = dwmac4_dma_interrupt,
451 .get_hw_feature = dwmac4_get_hw_feature,
452 .rx_watchdog = dwmac4_rx_watchdog,
453 .set_rx_ring_len = dwmac4_set_rx_ring_len,
454 .set_tx_ring_len = dwmac4_set_tx_ring_len,
455 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
456 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
457 .enable_tso = dwmac4_enable_tso,
458 .qmode = dwmac4_qmode,
459 .set_bfsize = dwmac4_set_bfsize,
460 };
461
462 const struct stmmac_dma_ops dwmac410_dma_ops = {
463 .reset = dwmac4_dma_reset,
464 .init = dwmac4_dma_init,
465 .init_chan = dwmac4_dma_init_channel,
466 .init_rx_chan = dwmac4_dma_init_rx_chan,
467 .init_tx_chan = dwmac4_dma_init_tx_chan,
468 .axi = dwmac4_dma_axi,
469 .dump_regs = dwmac4_dump_dma_regs,
470 .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
471 .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
472 .enable_dma_irq = dwmac410_enable_dma_irq,
473 .disable_dma_irq = dwmac4_disable_dma_irq,
474 .start_tx = dwmac4_dma_start_tx,
475 .stop_tx = dwmac4_dma_stop_tx,
476 .start_rx = dwmac4_dma_start_rx,
477 .stop_rx = dwmac4_dma_stop_rx,
478 .dma_interrupt = dwmac4_dma_interrupt,
479 .get_hw_feature = dwmac4_get_hw_feature,
480 .rx_watchdog = dwmac4_rx_watchdog,
481 .set_rx_ring_len = dwmac4_set_rx_ring_len,
482 .set_tx_ring_len = dwmac4_set_tx_ring_len,
483 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
484 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
485 .enable_tso = dwmac4_enable_tso,
486 .qmode = dwmac4_qmode,
487 .set_bfsize = dwmac4_set_bfsize,
488 };