This source file includes following definitions.
- stmmac_verify_args
- stmmac_disable_all_queues
- stmmac_enable_all_queues
- stmmac_stop_all_queues
- stmmac_start_all_queues
- stmmac_service_event_schedule
- stmmac_global_err
- stmmac_clk_csr_set
- print_pkt
- stmmac_tx_avail
- stmmac_rx_dirty
- stmmac_enable_eee_mode
- stmmac_disable_eee_mode
- stmmac_eee_ctrl_timer
- stmmac_eee_init
- stmmac_get_tx_hwtstamp
- stmmac_get_rx_hwtstamp
- stmmac_hwtstamp_set
- stmmac_hwtstamp_get
- stmmac_init_ptp
- stmmac_release_ptp
- stmmac_mac_flow_ctrl
- stmmac_validate
- stmmac_mac_link_state
- stmmac_mac_config
- stmmac_mac_an_restart
- stmmac_mac_link_down
- stmmac_mac_link_up
- stmmac_check_pcs_mode
- stmmac_init_phy
- stmmac_phy_setup
- stmmac_display_rx_rings
- stmmac_display_tx_rings
- stmmac_display_rings
- stmmac_set_bfsize
- stmmac_clear_rx_descriptors
- stmmac_clear_tx_descriptors
- stmmac_clear_descriptors
- stmmac_init_rx_buffers
- stmmac_free_rx_buffer
- stmmac_free_tx_buffer
- init_dma_rx_desc_rings
- init_dma_tx_desc_rings
- init_dma_desc_rings
- dma_free_rx_skbufs
- dma_free_tx_skbufs
- free_dma_rx_desc_resources
- free_dma_tx_desc_resources
- alloc_dma_rx_desc_resources
- alloc_dma_tx_desc_resources
- alloc_dma_desc_resources
- free_dma_desc_resources
- stmmac_mac_enable_rx_queues
- stmmac_start_rx_dma
- stmmac_start_tx_dma
- stmmac_stop_rx_dma
- stmmac_stop_tx_dma
- stmmac_start_all_dma
- stmmac_stop_all_dma
- stmmac_dma_operation_mode
- stmmac_tx_clean
- stmmac_tx_err
- stmmac_set_dma_operation_mode
- stmmac_safety_feat_interrupt
- stmmac_napi_check
- stmmac_dma_interrupt
- stmmac_mmc_setup
- stmmac_get_hw_features
- stmmac_check_ether_addr
- stmmac_init_dma_engine
- stmmac_tx_timer_arm
- stmmac_tx_timer
- stmmac_init_coalesce
- stmmac_set_rings_length
- stmmac_set_tx_queue_weight
- stmmac_configure_cbs
- stmmac_rx_queue_dma_chan_map
- stmmac_mac_config_rx_queues_prio
- stmmac_mac_config_tx_queues_prio
- stmmac_mac_config_rx_queues_routing
- stmmac_mac_config_rss
- stmmac_mtl_configuration
- stmmac_safety_feat_configuration
- stmmac_hw_setup
- stmmac_hw_teardown
- stmmac_open
- stmmac_release
- stmmac_vlan_insert
- stmmac_tso_allocator
- stmmac_tso_xmit
- stmmac_xmit
- stmmac_rx_vlan
- stmmac_rx_threshold_count
- stmmac_rx_refill
- stmmac_rx
- stmmac_napi_poll_rx
- stmmac_napi_poll_tx
- stmmac_tx_timeout
- stmmac_set_rx_mode
- stmmac_change_mtu
- stmmac_fix_features
- stmmac_set_features
- stmmac_interrupt
- stmmac_poll_controller
- stmmac_ioctl
- stmmac_setup_tc_block_cb
- stmmac_setup_tc
- stmmac_select_queue
- stmmac_set_mac_address
- sysfs_display_ring
- stmmac_rings_status_show
- stmmac_dma_cap_show
- stmmac_device_event
- stmmac_init_fs
- stmmac_exit_fs
- stmmac_vid_crc32_le
- stmmac_vlan_update
- stmmac_vlan_rx_add_vid
- stmmac_vlan_rx_kill_vid
- stmmac_reset_subtask
- stmmac_service_task
- stmmac_hw_init
- stmmac_dvr_probe
- stmmac_dvr_remove
- stmmac_suspend
- stmmac_reset_queues_param
- stmmac_resume
- stmmac_cmdline_opt
- stmmac_init
- stmmac_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/prefetch.h>
32 #include <linux/pinctrl/consumer.h>
33 #ifdef CONFIG_DEBUG_FS
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #endif
37 #include <linux/net_tstamp.h>
38 #include <linux/phylink.h>
39 #include <net/pkt_cls.h>
40 #include "stmmac_ptp.h"
41 #include "stmmac.h"
42 #include <linux/reset.h>
43 #include <linux/of_mdio.h>
44 #include "dwmac1000.h"
45 #include "dwxgmac2.h"
46 #include "hwif.h"
47
48 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
49 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
50
51
52 #define TX_TIMEO 5000
53 static int watchdog = TX_TIMEO;
54 module_param(watchdog, int, 0644);
55 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
56
57 static int debug = -1;
58 module_param(debug, int, 0644);
59 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
60
61 static int phyaddr = -1;
62 module_param(phyaddr, int, 0444);
63 MODULE_PARM_DESC(phyaddr, "Physical device address");
64
65 #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
66 #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
67
68 static int flow_ctrl = FLOW_AUTO;
69 module_param(flow_ctrl, int, 0644);
70 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
71
72 static int pause = PAUSE_TIME;
73 module_param(pause, int, 0644);
74 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
75
76 #define TC_DEFAULT 64
77 static int tc = TC_DEFAULT;
78 module_param(tc, int, 0644);
79 MODULE_PARM_DESC(tc, "DMA threshold control value");
80
81 #define DEFAULT_BUFSIZE 1536
82 static int buf_sz = DEFAULT_BUFSIZE;
83 module_param(buf_sz, int, 0644);
84 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
85
86 #define STMMAC_RX_COPYBREAK 256
87
88 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
89 NETIF_MSG_LINK | NETIF_MSG_IFUP |
90 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
91
92 #define STMMAC_DEFAULT_LPI_TIMER 1000
93 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
94 module_param(eee_timer, int, 0644);
95 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
96 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
97
98
99
100
101 static unsigned int chain_mode;
102 module_param(chain_mode, int, 0444);
103 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
104
105 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
106
107 #ifdef CONFIG_DEBUG_FS
108 static const struct net_device_ops stmmac_netdev_ops;
109 static void stmmac_init_fs(struct net_device *dev);
110 static void stmmac_exit_fs(struct net_device *dev);
111 #endif
112
113 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
114
115
116
117
118
119
120 static void stmmac_verify_args(void)
121 {
122 if (unlikely(watchdog < 0))
123 watchdog = TX_TIMEO;
124 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
125 buf_sz = DEFAULT_BUFSIZE;
126 if (unlikely(flow_ctrl > 1))
127 flow_ctrl = FLOW_AUTO;
128 else if (likely(flow_ctrl < 0))
129 flow_ctrl = FLOW_OFF;
130 if (unlikely((pause < 0) || (pause > 0xffff)))
131 pause = PAUSE_TIME;
132 if (eee_timer < 0)
133 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
134 }
135
136
137
138
139
140 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
141 {
142 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
143 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
144 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
145 u32 queue;
146
147 for (queue = 0; queue < maxq; queue++) {
148 struct stmmac_channel *ch = &priv->channel[queue];
149
150 if (queue < rx_queues_cnt)
151 napi_disable(&ch->rx_napi);
152 if (queue < tx_queues_cnt)
153 napi_disable(&ch->tx_napi);
154 }
155 }
156
157
158
159
160
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
165 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
166 u32 queue;
167
168 for (queue = 0; queue < maxq; queue++) {
169 struct stmmac_channel *ch = &priv->channel[queue];
170
171 if (queue < rx_queues_cnt)
172 napi_enable(&ch->rx_napi);
173 if (queue < tx_queues_cnt)
174 napi_enable(&ch->tx_napi);
175 }
176 }
177
178
179
180
181
182 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
183 {
184 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
185 u32 queue;
186
187 for (queue = 0; queue < tx_queues_cnt; queue++)
188 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
189 }
190
191
192
193
194
195 static void stmmac_start_all_queues(struct stmmac_priv *priv)
196 {
197 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
198 u32 queue;
199
200 for (queue = 0; queue < tx_queues_cnt; queue++)
201 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
202 }
203
204 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
205 {
206 if (!test_bit(STMMAC_DOWN, &priv->state) &&
207 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
208 queue_work(priv->wq, &priv->service_task);
209 }
210
211 static void stmmac_global_err(struct stmmac_priv *priv)
212 {
213 netif_carrier_off(priv->dev);
214 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
215 stmmac_service_event_schedule(priv);
216 }
217
218
219
220
221
222
223
224
225
226
227
228
229
230 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
231 {
232 u32 clk_rate;
233
234 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
235
236
237
238
239
240
241
242
243 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
244 if (clk_rate < CSR_F_35M)
245 priv->clk_csr = STMMAC_CSR_20_35M;
246 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
247 priv->clk_csr = STMMAC_CSR_35_60M;
248 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
249 priv->clk_csr = STMMAC_CSR_60_100M;
250 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
251 priv->clk_csr = STMMAC_CSR_100_150M;
252 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
253 priv->clk_csr = STMMAC_CSR_150_250M;
254 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
255 priv->clk_csr = STMMAC_CSR_250_300M;
256 }
257
258 if (priv->plat->has_sun8i) {
259 if (clk_rate > 160000000)
260 priv->clk_csr = 0x03;
261 else if (clk_rate > 80000000)
262 priv->clk_csr = 0x02;
263 else if (clk_rate > 40000000)
264 priv->clk_csr = 0x01;
265 else
266 priv->clk_csr = 0;
267 }
268
269 if (priv->plat->has_xgmac) {
270 if (clk_rate > 400000000)
271 priv->clk_csr = 0x5;
272 else if (clk_rate > 350000000)
273 priv->clk_csr = 0x4;
274 else if (clk_rate > 300000000)
275 priv->clk_csr = 0x3;
276 else if (clk_rate > 250000000)
277 priv->clk_csr = 0x2;
278 else if (clk_rate > 150000000)
279 priv->clk_csr = 0x1;
280 else
281 priv->clk_csr = 0x0;
282 }
283 }
284
285 static void print_pkt(unsigned char *buf, int len)
286 {
287 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
288 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
289 }
290
291 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
292 {
293 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
294 u32 avail;
295
296 if (tx_q->dirty_tx > tx_q->cur_tx)
297 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
298 else
299 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
300
301 return avail;
302 }
303
304
305
306
307
308
309 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
310 {
311 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
312 u32 dirty;
313
314 if (rx_q->dirty_rx <= rx_q->cur_rx)
315 dirty = rx_q->cur_rx - rx_q->dirty_rx;
316 else
317 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
318
319 return dirty;
320 }
321
322
323
324
325
326
327
328 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
329 {
330 u32 tx_cnt = priv->plat->tx_queues_to_use;
331 u32 queue;
332
333
334 for (queue = 0; queue < tx_cnt; queue++) {
335 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
336
337 if (tx_q->dirty_tx != tx_q->cur_tx)
338 return;
339 }
340
341
342 if (!priv->tx_path_in_lpi_mode)
343 stmmac_set_eee_mode(priv, priv->hw,
344 priv->plat->en_tx_lpi_clockgating);
345 }
346
347
348
349
350
351
352
353 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
354 {
355 stmmac_reset_eee_mode(priv, priv->hw);
356 del_timer_sync(&priv->eee_ctrl_timer);
357 priv->tx_path_in_lpi_mode = false;
358 }
359
360
361
362
363
364
365
366
367 static void stmmac_eee_ctrl_timer(struct timer_list *t)
368 {
369 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
370
371 stmmac_enable_eee_mode(priv);
372 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
373 }
374
375
376
377
378
379
380
381
382
383 bool stmmac_eee_init(struct stmmac_priv *priv)
384 {
385 int tx_lpi_timer = priv->tx_lpi_timer;
386
387
388
389
390 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
391 (priv->hw->pcs == STMMAC_PCS_TBI) ||
392 (priv->hw->pcs == STMMAC_PCS_RTBI))
393 return false;
394
395
396 if (!priv->dma_cap.eee)
397 return false;
398
399 mutex_lock(&priv->lock);
400
401
402 if (!priv->eee_active) {
403 if (priv->eee_enabled) {
404 netdev_dbg(priv->dev, "disable EEE\n");
405 del_timer_sync(&priv->eee_ctrl_timer);
406 stmmac_set_eee_timer(priv, priv->hw, 0, tx_lpi_timer);
407 }
408 mutex_unlock(&priv->lock);
409 return false;
410 }
411
412 if (priv->eee_active && !priv->eee_enabled) {
413 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
414 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
415 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
416 tx_lpi_timer);
417 }
418
419 mutex_unlock(&priv->lock);
420 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
421 return true;
422 }
423
424
425
426
427
428
429
430
431
432 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
433 struct dma_desc *p, struct sk_buff *skb)
434 {
435 struct skb_shared_hwtstamps shhwtstamp;
436 bool found = false;
437 u64 ns = 0;
438
439 if (!priv->hwts_tx_en)
440 return;
441
442
443 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
444 return;
445
446
447 if (stmmac_get_tx_timestamp_status(priv, p)) {
448 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
449 found = true;
450 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
451 found = true;
452 }
453
454 if (found) {
455 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
456 shhwtstamp.hwtstamp = ns_to_ktime(ns);
457
458 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
459
460 skb_tstamp_tx(skb, &shhwtstamp);
461 }
462 }
463
464
465
466
467
468
469
470
471
472
473 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
474 struct dma_desc *np, struct sk_buff *skb)
475 {
476 struct skb_shared_hwtstamps *shhwtstamp = NULL;
477 struct dma_desc *desc = p;
478 u64 ns = 0;
479
480 if (!priv->hwts_rx_en)
481 return;
482
483 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
484 desc = np;
485
486
487 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
488 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
489 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
490 shhwtstamp = skb_hwtstamps(skb);
491 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492 shhwtstamp->hwtstamp = ns_to_ktime(ns);
493 } else {
494 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
495 }
496 }
497
498
499
500
501
502
503
504
505
506
507
508
509 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
510 {
511 struct stmmac_priv *priv = netdev_priv(dev);
512 struct hwtstamp_config config;
513 struct timespec64 now;
514 u64 temp = 0;
515 u32 ptp_v2 = 0;
516 u32 tstamp_all = 0;
517 u32 ptp_over_ipv4_udp = 0;
518 u32 ptp_over_ipv6_udp = 0;
519 u32 ptp_over_ethernet = 0;
520 u32 snap_type_sel = 0;
521 u32 ts_master_en = 0;
522 u32 ts_event_en = 0;
523 u32 sec_inc = 0;
524 u32 value = 0;
525 bool xmac;
526
527 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
528
529 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
530 netdev_alert(priv->dev, "No support for HW time stamping\n");
531 priv->hwts_tx_en = 0;
532 priv->hwts_rx_en = 0;
533
534 return -EOPNOTSUPP;
535 }
536
537 if (copy_from_user(&config, ifr->ifr_data,
538 sizeof(config)))
539 return -EFAULT;
540
541 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
542 __func__, config.flags, config.tx_type, config.rx_filter);
543
544
545 if (config.flags)
546 return -EINVAL;
547
548 if (config.tx_type != HWTSTAMP_TX_OFF &&
549 config.tx_type != HWTSTAMP_TX_ON)
550 return -ERANGE;
551
552 if (priv->adv_ts) {
553 switch (config.rx_filter) {
554 case HWTSTAMP_FILTER_NONE:
555
556 config.rx_filter = HWTSTAMP_FILTER_NONE;
557 break;
558
559 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
560
561 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
562
563
564
565
566
567
568 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
569 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
570 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
571 break;
572
573 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
574
575 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
576
577 ts_event_en = PTP_TCR_TSEVNTENA;
578
579 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
580 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
581 break;
582
583 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
584
585 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
586
587 ts_master_en = PTP_TCR_TSMSTRENA;
588 ts_event_en = PTP_TCR_TSEVNTENA;
589
590 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
591 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
592 break;
593
594 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
595
596 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
597 ptp_v2 = PTP_TCR_TSVER2ENA;
598
599 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
600
601 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
602 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
603 break;
604
605 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
606
607 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
608 ptp_v2 = PTP_TCR_TSVER2ENA;
609
610 ts_event_en = PTP_TCR_TSEVNTENA;
611
612 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
613 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
614 break;
615
616 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
617
618 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
619 ptp_v2 = PTP_TCR_TSVER2ENA;
620
621 ts_master_en = PTP_TCR_TSMSTRENA;
622 ts_event_en = PTP_TCR_TSEVNTENA;
623
624 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
625 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
626 break;
627
628 case HWTSTAMP_FILTER_PTP_V2_EVENT:
629
630 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
631 ptp_v2 = PTP_TCR_TSVER2ENA;
632 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
633 if (priv->synopsys_id != DWMAC_CORE_5_10)
634 ts_event_en = PTP_TCR_TSEVNTENA;
635 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
636 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
637 ptp_over_ethernet = PTP_TCR_TSIPENA;
638 break;
639
640 case HWTSTAMP_FILTER_PTP_V2_SYNC:
641
642 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
643 ptp_v2 = PTP_TCR_TSVER2ENA;
644
645 ts_event_en = PTP_TCR_TSEVNTENA;
646
647 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
648 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
649 ptp_over_ethernet = PTP_TCR_TSIPENA;
650 break;
651
652 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
653
654 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
655 ptp_v2 = PTP_TCR_TSVER2ENA;
656
657 ts_master_en = PTP_TCR_TSMSTRENA;
658 ts_event_en = PTP_TCR_TSEVNTENA;
659
660 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 ptp_over_ethernet = PTP_TCR_TSIPENA;
663 break;
664
665 case HWTSTAMP_FILTER_NTP_ALL:
666 case HWTSTAMP_FILTER_ALL:
667
668 config.rx_filter = HWTSTAMP_FILTER_ALL;
669 tstamp_all = PTP_TCR_TSENALL;
670 break;
671
672 default:
673 return -ERANGE;
674 }
675 } else {
676 switch (config.rx_filter) {
677 case HWTSTAMP_FILTER_NONE:
678 config.rx_filter = HWTSTAMP_FILTER_NONE;
679 break;
680 default:
681
682 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
683 break;
684 }
685 }
686 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
687 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
688
689 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
690 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
691 else {
692 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
693 tstamp_all | ptp_v2 | ptp_over_ethernet |
694 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
695 ts_master_en | snap_type_sel);
696 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
697
698
699 stmmac_config_sub_second_increment(priv,
700 priv->ptpaddr, priv->plat->clk_ptp_rate,
701 xmac, &sec_inc);
702 temp = div_u64(1000000000ULL, sec_inc);
703
704
705 priv->sub_second_inc = sec_inc;
706 priv->systime_flags = value;
707
708
709
710
711
712
713 temp = (u64)(temp << 32);
714 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
715 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
716
717
718 ktime_get_real_ts64(&now);
719
720
721 stmmac_init_systime(priv, priv->ptpaddr,
722 (u32)now.tv_sec, now.tv_nsec);
723 }
724
725 memcpy(&priv->tstamp_config, &config, sizeof(config));
726
727 return copy_to_user(ifr->ifr_data, &config,
728 sizeof(config)) ? -EFAULT : 0;
729 }
730
731
732
733
734
735
736
737
738
739
740 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
741 {
742 struct stmmac_priv *priv = netdev_priv(dev);
743 struct hwtstamp_config *config = &priv->tstamp_config;
744
745 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
746 return -EOPNOTSUPP;
747
748 return copy_to_user(ifr->ifr_data, config,
749 sizeof(*config)) ? -EFAULT : 0;
750 }
751
752
753
754
755
756
757
758
759 static int stmmac_init_ptp(struct stmmac_priv *priv)
760 {
761 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
762
763 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
764 return -EOPNOTSUPP;
765
766 priv->adv_ts = 0;
767
768 if (xmac && priv->dma_cap.atime_stamp)
769 priv->adv_ts = 1;
770
771 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
772 priv->adv_ts = 1;
773
774 if (priv->dma_cap.time_stamp)
775 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
776
777 if (priv->adv_ts)
778 netdev_info(priv->dev,
779 "IEEE 1588-2008 Advanced Timestamp supported\n");
780
781 priv->hwts_tx_en = 0;
782 priv->hwts_rx_en = 0;
783
784 stmmac_ptp_register(priv);
785
786 return 0;
787 }
788
789 static void stmmac_release_ptp(struct stmmac_priv *priv)
790 {
791 if (priv->plat->clk_ptp_ref)
792 clk_disable_unprepare(priv->plat->clk_ptp_ref);
793 stmmac_ptp_unregister(priv);
794 }
795
796
797
798
799
800
801 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
802 {
803 u32 tx_cnt = priv->plat->tx_queues_to_use;
804
805 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
806 priv->pause, tx_cnt);
807 }
808
809 static void stmmac_validate(struct phylink_config *config,
810 unsigned long *supported,
811 struct phylink_link_state *state)
812 {
813 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
814 __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
815 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
816 int tx_cnt = priv->plat->tx_queues_to_use;
817 int max_speed = priv->plat->max_speed;
818
819 phylink_set(mac_supported, 10baseT_Half);
820 phylink_set(mac_supported, 10baseT_Full);
821 phylink_set(mac_supported, 100baseT_Half);
822 phylink_set(mac_supported, 100baseT_Full);
823 phylink_set(mac_supported, 1000baseT_Half);
824 phylink_set(mac_supported, 1000baseT_Full);
825 phylink_set(mac_supported, 1000baseKX_Full);
826
827 phylink_set(mac_supported, Autoneg);
828 phylink_set(mac_supported, Pause);
829 phylink_set(mac_supported, Asym_Pause);
830 phylink_set_port_modes(mac_supported);
831
832
833 if ((max_speed > 0) && (max_speed < 1000)) {
834 phylink_set(mask, 1000baseT_Full);
835 phylink_set(mask, 1000baseX_Full);
836 } else if (priv->plat->has_xgmac) {
837 if (!max_speed || (max_speed >= 2500)) {
838 phylink_set(mac_supported, 2500baseT_Full);
839 phylink_set(mac_supported, 2500baseX_Full);
840 }
841 if (!max_speed || (max_speed >= 5000)) {
842 phylink_set(mac_supported, 5000baseT_Full);
843 }
844 if (!max_speed || (max_speed >= 10000)) {
845 phylink_set(mac_supported, 10000baseSR_Full);
846 phylink_set(mac_supported, 10000baseLR_Full);
847 phylink_set(mac_supported, 10000baseER_Full);
848 phylink_set(mac_supported, 10000baseLRM_Full);
849 phylink_set(mac_supported, 10000baseT_Full);
850 phylink_set(mac_supported, 10000baseKX4_Full);
851 phylink_set(mac_supported, 10000baseKR_Full);
852 }
853 }
854
855
856 if (tx_cnt > 1) {
857 phylink_set(mask, 10baseT_Half);
858 phylink_set(mask, 100baseT_Half);
859 phylink_set(mask, 1000baseT_Half);
860 }
861
862 bitmap_and(supported, supported, mac_supported,
863 __ETHTOOL_LINK_MODE_MASK_NBITS);
864 bitmap_andnot(supported, supported, mask,
865 __ETHTOOL_LINK_MODE_MASK_NBITS);
866 bitmap_and(state->advertising, state->advertising, mac_supported,
867 __ETHTOOL_LINK_MODE_MASK_NBITS);
868 bitmap_andnot(state->advertising, state->advertising, mask,
869 __ETHTOOL_LINK_MODE_MASK_NBITS);
870 }
871
872 static int stmmac_mac_link_state(struct phylink_config *config,
873 struct phylink_link_state *state)
874 {
875 return -EOPNOTSUPP;
876 }
877
878 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
879 const struct phylink_link_state *state)
880 {
881 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
882 u32 ctrl;
883
884 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
885 ctrl &= ~priv->hw->link.speed_mask;
886
887 if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
888 switch (state->speed) {
889 case SPEED_10000:
890 ctrl |= priv->hw->link.xgmii.speed10000;
891 break;
892 case SPEED_5000:
893 ctrl |= priv->hw->link.xgmii.speed5000;
894 break;
895 case SPEED_2500:
896 ctrl |= priv->hw->link.xgmii.speed2500;
897 break;
898 default:
899 return;
900 }
901 } else {
902 switch (state->speed) {
903 case SPEED_2500:
904 ctrl |= priv->hw->link.speed2500;
905 break;
906 case SPEED_1000:
907 ctrl |= priv->hw->link.speed1000;
908 break;
909 case SPEED_100:
910 ctrl |= priv->hw->link.speed100;
911 break;
912 case SPEED_10:
913 ctrl |= priv->hw->link.speed10;
914 break;
915 default:
916 return;
917 }
918 }
919
920 priv->speed = state->speed;
921
922 if (priv->plat->fix_mac_speed)
923 priv->plat->fix_mac_speed(priv->plat->bsp_priv, state->speed);
924
925 if (!state->duplex)
926 ctrl &= ~priv->hw->link.duplex;
927 else
928 ctrl |= priv->hw->link.duplex;
929
930
931 if (state->pause)
932 stmmac_mac_flow_ctrl(priv, state->duplex);
933
934 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
935 }
936
937 static void stmmac_mac_an_restart(struct phylink_config *config)
938 {
939
940 }
941
942 static void stmmac_mac_link_down(struct phylink_config *config,
943 unsigned int mode, phy_interface_t interface)
944 {
945 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
946
947 stmmac_mac_set(priv, priv->ioaddr, false);
948 priv->eee_active = false;
949 stmmac_eee_init(priv);
950 stmmac_set_eee_pls(priv, priv->hw, false);
951 }
952
953 static void stmmac_mac_link_up(struct phylink_config *config,
954 unsigned int mode, phy_interface_t interface,
955 struct phy_device *phy)
956 {
957 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
958
959 stmmac_mac_set(priv, priv->ioaddr, true);
960 if (phy && priv->dma_cap.eee) {
961 priv->eee_active = phy_init_eee(phy, 1) >= 0;
962 priv->eee_enabled = stmmac_eee_init(priv);
963 stmmac_set_eee_pls(priv, priv->hw, true);
964 }
965 }
966
967 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
968 .validate = stmmac_validate,
969 .mac_link_state = stmmac_mac_link_state,
970 .mac_config = stmmac_mac_config,
971 .mac_an_restart = stmmac_mac_an_restart,
972 .mac_link_down = stmmac_mac_link_down,
973 .mac_link_up = stmmac_mac_link_up,
974 };
975
976
977
978
979
980
981
982
983 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
984 {
985 int interface = priv->plat->interface;
986
987 if (priv->dma_cap.pcs) {
988 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
989 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
990 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
991 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
992 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
993 priv->hw->pcs = STMMAC_PCS_RGMII;
994 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
995 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
996 priv->hw->pcs = STMMAC_PCS_SGMII;
997 }
998 }
999 }
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009 static int stmmac_init_phy(struct net_device *dev)
1010 {
1011 struct stmmac_priv *priv = netdev_priv(dev);
1012 struct device_node *node;
1013 int ret;
1014
1015 node = priv->plat->phylink_node;
1016
1017 if (node)
1018 ret = phylink_of_phy_connect(priv->phylink, node, 0);
1019
1020
1021
1022
1023 if (!node || ret) {
1024 int addr = priv->plat->phy_addr;
1025 struct phy_device *phydev;
1026
1027 phydev = mdiobus_get_phy(priv->mii, addr);
1028 if (!phydev) {
1029 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1030 return -ENODEV;
1031 }
1032
1033 ret = phylink_connect_phy(priv->phylink, phydev);
1034 }
1035
1036 return ret;
1037 }
1038
1039 static int stmmac_phy_setup(struct stmmac_priv *priv)
1040 {
1041 struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1042 int mode = priv->plat->phy_interface;
1043 struct phylink *phylink;
1044
1045 priv->phylink_config.dev = &priv->dev->dev;
1046 priv->phylink_config.type = PHYLINK_NETDEV;
1047
1048 phylink = phylink_create(&priv->phylink_config, fwnode,
1049 mode, &stmmac_phylink_mac_ops);
1050 if (IS_ERR(phylink))
1051 return PTR_ERR(phylink);
1052
1053 priv->phylink = phylink;
1054 return 0;
1055 }
1056
1057 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1058 {
1059 u32 rx_cnt = priv->plat->rx_queues_to_use;
1060 void *head_rx;
1061 u32 queue;
1062
1063
1064 for (queue = 0; queue < rx_cnt; queue++) {
1065 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1066
1067 pr_info("\tRX Queue %u rings\n", queue);
1068
1069 if (priv->extend_desc)
1070 head_rx = (void *)rx_q->dma_erx;
1071 else
1072 head_rx = (void *)rx_q->dma_rx;
1073
1074
1075 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1076 }
1077 }
1078
1079 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1080 {
1081 u32 tx_cnt = priv->plat->tx_queues_to_use;
1082 void *head_tx;
1083 u32 queue;
1084
1085
1086 for (queue = 0; queue < tx_cnt; queue++) {
1087 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1088
1089 pr_info("\tTX Queue %d rings\n", queue);
1090
1091 if (priv->extend_desc)
1092 head_tx = (void *)tx_q->dma_etx;
1093 else
1094 head_tx = (void *)tx_q->dma_tx;
1095
1096 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1097 }
1098 }
1099
1100 static void stmmac_display_rings(struct stmmac_priv *priv)
1101 {
1102
1103 stmmac_display_rx_rings(priv);
1104
1105
1106 stmmac_display_tx_rings(priv);
1107 }
1108
1109 static int stmmac_set_bfsize(int mtu, int bufsize)
1110 {
1111 int ret = bufsize;
1112
1113 if (mtu >= BUF_SIZE_8KiB)
1114 ret = BUF_SIZE_16KiB;
1115 else if (mtu >= BUF_SIZE_4KiB)
1116 ret = BUF_SIZE_8KiB;
1117 else if (mtu >= BUF_SIZE_2KiB)
1118 ret = BUF_SIZE_4KiB;
1119 else if (mtu > DEFAULT_BUFSIZE)
1120 ret = BUF_SIZE_2KiB;
1121 else
1122 ret = DEFAULT_BUFSIZE;
1123
1124 return ret;
1125 }
1126
1127
1128
1129
1130
1131
1132
1133
1134 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1135 {
1136 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1137 int i;
1138
1139
1140 for (i = 0; i < DMA_RX_SIZE; i++)
1141 if (priv->extend_desc)
1142 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1143 priv->use_riwt, priv->mode,
1144 (i == DMA_RX_SIZE - 1),
1145 priv->dma_buf_sz);
1146 else
1147 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1148 priv->use_riwt, priv->mode,
1149 (i == DMA_RX_SIZE - 1),
1150 priv->dma_buf_sz);
1151 }
1152
1153
1154
1155
1156
1157
1158
1159
1160 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1161 {
1162 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1163 int i;
1164
1165
1166 for (i = 0; i < DMA_TX_SIZE; i++)
1167 if (priv->extend_desc)
1168 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1169 priv->mode, (i == DMA_TX_SIZE - 1));
1170 else
1171 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1172 priv->mode, (i == DMA_TX_SIZE - 1));
1173 }
1174
1175
1176
1177
1178
1179
1180
1181 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1182 {
1183 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1184 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1185 u32 queue;
1186
1187
1188 for (queue = 0; queue < rx_queue_cnt; queue++)
1189 stmmac_clear_rx_descriptors(priv, queue);
1190
1191
1192 for (queue = 0; queue < tx_queue_cnt; queue++)
1193 stmmac_clear_tx_descriptors(priv, queue);
1194 }
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1207 int i, gfp_t flags, u32 queue)
1208 {
1209 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1210 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1211
1212 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1213 if (!buf->page)
1214 return -ENOMEM;
1215
1216 if (priv->sph) {
1217 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1218 if (!buf->sec_page)
1219 return -ENOMEM;
1220
1221 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1222 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
1223 } else {
1224 buf->sec_page = NULL;
1225 }
1226
1227 buf->addr = page_pool_get_dma_addr(buf->page);
1228 stmmac_set_desc_addr(priv, p, buf->addr);
1229 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1230 stmmac_init_desc3(priv, p);
1231
1232 return 0;
1233 }
1234
1235
1236
1237
1238
1239
1240
1241 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1242 {
1243 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1244 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1245
1246 if (buf->page)
1247 page_pool_put_page(rx_q->page_pool, buf->page, false);
1248 buf->page = NULL;
1249
1250 if (buf->sec_page)
1251 page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
1252 buf->sec_page = NULL;
1253 }
1254
1255
1256
1257
1258
1259
1260
1261 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1262 {
1263 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1264
1265 if (tx_q->tx_skbuff_dma[i].buf) {
1266 if (tx_q->tx_skbuff_dma[i].map_as_page)
1267 dma_unmap_page(priv->device,
1268 tx_q->tx_skbuff_dma[i].buf,
1269 tx_q->tx_skbuff_dma[i].len,
1270 DMA_TO_DEVICE);
1271 else
1272 dma_unmap_single(priv->device,
1273 tx_q->tx_skbuff_dma[i].buf,
1274 tx_q->tx_skbuff_dma[i].len,
1275 DMA_TO_DEVICE);
1276 }
1277
1278 if (tx_q->tx_skbuff[i]) {
1279 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1280 tx_q->tx_skbuff[i] = NULL;
1281 tx_q->tx_skbuff_dma[i].buf = 0;
1282 tx_q->tx_skbuff_dma[i].map_as_page = false;
1283 }
1284 }
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1295 {
1296 struct stmmac_priv *priv = netdev_priv(dev);
1297 u32 rx_count = priv->plat->rx_queues_to_use;
1298 int ret = -ENOMEM;
1299 int queue;
1300 int i;
1301
1302
1303 netif_dbg(priv, probe, priv->dev,
1304 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1305
1306 for (queue = 0; queue < rx_count; queue++) {
1307 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1308
1309 netif_dbg(priv, probe, priv->dev,
1310 "(%s) dma_rx_phy=0x%08x\n", __func__,
1311 (u32)rx_q->dma_rx_phy);
1312
1313 stmmac_clear_rx_descriptors(priv, queue);
1314
1315 for (i = 0; i < DMA_RX_SIZE; i++) {
1316 struct dma_desc *p;
1317
1318 if (priv->extend_desc)
1319 p = &((rx_q->dma_erx + i)->basic);
1320 else
1321 p = rx_q->dma_rx + i;
1322
1323 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1324 queue);
1325 if (ret)
1326 goto err_init_rx_buffers;
1327 }
1328
1329 rx_q->cur_rx = 0;
1330 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1331
1332
1333 if (priv->mode == STMMAC_CHAIN_MODE) {
1334 if (priv->extend_desc)
1335 stmmac_mode_init(priv, rx_q->dma_erx,
1336 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1337 else
1338 stmmac_mode_init(priv, rx_q->dma_rx,
1339 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1340 }
1341 }
1342
1343 return 0;
1344
1345 err_init_rx_buffers:
1346 while (queue >= 0) {
1347 while (--i >= 0)
1348 stmmac_free_rx_buffer(priv, queue, i);
1349
1350 if (queue == 0)
1351 break;
1352
1353 i = DMA_RX_SIZE;
1354 queue--;
1355 }
1356
1357 return ret;
1358 }
1359
1360
1361
1362
1363
1364
1365
1366
1367 static int init_dma_tx_desc_rings(struct net_device *dev)
1368 {
1369 struct stmmac_priv *priv = netdev_priv(dev);
1370 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1371 u32 queue;
1372 int i;
1373
1374 for (queue = 0; queue < tx_queue_cnt; queue++) {
1375 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1376
1377 netif_dbg(priv, probe, priv->dev,
1378 "(%s) dma_tx_phy=0x%08x\n", __func__,
1379 (u32)tx_q->dma_tx_phy);
1380
1381
1382 if (priv->mode == STMMAC_CHAIN_MODE) {
1383 if (priv->extend_desc)
1384 stmmac_mode_init(priv, tx_q->dma_etx,
1385 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1386 else
1387 stmmac_mode_init(priv, tx_q->dma_tx,
1388 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1389 }
1390
1391 for (i = 0; i < DMA_TX_SIZE; i++) {
1392 struct dma_desc *p;
1393 if (priv->extend_desc)
1394 p = &((tx_q->dma_etx + i)->basic);
1395 else
1396 p = tx_q->dma_tx + i;
1397
1398 stmmac_clear_desc(priv, p);
1399
1400 tx_q->tx_skbuff_dma[i].buf = 0;
1401 tx_q->tx_skbuff_dma[i].map_as_page = false;
1402 tx_q->tx_skbuff_dma[i].len = 0;
1403 tx_q->tx_skbuff_dma[i].last_segment = false;
1404 tx_q->tx_skbuff[i] = NULL;
1405 }
1406
1407 tx_q->dirty_tx = 0;
1408 tx_q->cur_tx = 0;
1409 tx_q->mss = 0;
1410
1411 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1412 }
1413
1414 return 0;
1415 }
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1426 {
1427 struct stmmac_priv *priv = netdev_priv(dev);
1428 int ret;
1429
1430 ret = init_dma_rx_desc_rings(dev, flags);
1431 if (ret)
1432 return ret;
1433
1434 ret = init_dma_tx_desc_rings(dev);
1435
1436 stmmac_clear_descriptors(priv);
1437
1438 if (netif_msg_hw(priv))
1439 stmmac_display_rings(priv);
1440
1441 return ret;
1442 }
1443
1444
1445
1446
1447
1448
1449 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1450 {
1451 int i;
1452
1453 for (i = 0; i < DMA_RX_SIZE; i++)
1454 stmmac_free_rx_buffer(priv, queue, i);
1455 }
1456
1457
1458
1459
1460
1461
1462 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1463 {
1464 int i;
1465
1466 for (i = 0; i < DMA_TX_SIZE; i++)
1467 stmmac_free_tx_buffer(priv, queue, i);
1468 }
1469
1470
1471
1472
1473
1474 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1475 {
1476 u32 rx_count = priv->plat->rx_queues_to_use;
1477 u32 queue;
1478
1479
1480 for (queue = 0; queue < rx_count; queue++) {
1481 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1482
1483
1484 dma_free_rx_skbufs(priv, queue);
1485
1486
1487 if (!priv->extend_desc)
1488 dma_free_coherent(priv->device,
1489 DMA_RX_SIZE * sizeof(struct dma_desc),
1490 rx_q->dma_rx, rx_q->dma_rx_phy);
1491 else
1492 dma_free_coherent(priv->device, DMA_RX_SIZE *
1493 sizeof(struct dma_extended_desc),
1494 rx_q->dma_erx, rx_q->dma_rx_phy);
1495
1496 kfree(rx_q->buf_pool);
1497 if (rx_q->page_pool)
1498 page_pool_destroy(rx_q->page_pool);
1499 }
1500 }
1501
1502
1503
1504
1505
1506 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1507 {
1508 u32 tx_count = priv->plat->tx_queues_to_use;
1509 u32 queue;
1510
1511
1512 for (queue = 0; queue < tx_count; queue++) {
1513 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1514
1515
1516 dma_free_tx_skbufs(priv, queue);
1517
1518
1519 if (!priv->extend_desc)
1520 dma_free_coherent(priv->device,
1521 DMA_TX_SIZE * sizeof(struct dma_desc),
1522 tx_q->dma_tx, tx_q->dma_tx_phy);
1523 else
1524 dma_free_coherent(priv->device, DMA_TX_SIZE *
1525 sizeof(struct dma_extended_desc),
1526 tx_q->dma_etx, tx_q->dma_tx_phy);
1527
1528 kfree(tx_q->tx_skbuff_dma);
1529 kfree(tx_q->tx_skbuff);
1530 }
1531 }
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1542 {
1543 u32 rx_count = priv->plat->rx_queues_to_use;
1544 int ret = -ENOMEM;
1545 u32 queue;
1546
1547
1548 for (queue = 0; queue < rx_count; queue++) {
1549 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1550 struct page_pool_params pp_params = { 0 };
1551 unsigned int num_pages;
1552
1553 rx_q->queue_index = queue;
1554 rx_q->priv_data = priv;
1555
1556 pp_params.flags = PP_FLAG_DMA_MAP;
1557 pp_params.pool_size = DMA_RX_SIZE;
1558 num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1559 pp_params.order = ilog2(num_pages);
1560 pp_params.nid = dev_to_node(priv->device);
1561 pp_params.dev = priv->device;
1562 pp_params.dma_dir = DMA_FROM_DEVICE;
1563
1564 rx_q->page_pool = page_pool_create(&pp_params);
1565 if (IS_ERR(rx_q->page_pool)) {
1566 ret = PTR_ERR(rx_q->page_pool);
1567 rx_q->page_pool = NULL;
1568 goto err_dma;
1569 }
1570
1571 rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
1572 GFP_KERNEL);
1573 if (!rx_q->buf_pool)
1574 goto err_dma;
1575
1576 if (priv->extend_desc) {
1577 rx_q->dma_erx = dma_alloc_coherent(priv->device,
1578 DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1579 &rx_q->dma_rx_phy,
1580 GFP_KERNEL);
1581 if (!rx_q->dma_erx)
1582 goto err_dma;
1583
1584 } else {
1585 rx_q->dma_rx = dma_alloc_coherent(priv->device,
1586 DMA_RX_SIZE * sizeof(struct dma_desc),
1587 &rx_q->dma_rx_phy,
1588 GFP_KERNEL);
1589 if (!rx_q->dma_rx)
1590 goto err_dma;
1591 }
1592 }
1593
1594 return 0;
1595
1596 err_dma:
1597 free_dma_rx_desc_resources(priv);
1598
1599 return ret;
1600 }
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1611 {
1612 u32 tx_count = priv->plat->tx_queues_to_use;
1613 int ret = -ENOMEM;
1614 u32 queue;
1615
1616
1617 for (queue = 0; queue < tx_count; queue++) {
1618 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1619
1620 tx_q->queue_index = queue;
1621 tx_q->priv_data = priv;
1622
1623 tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
1624 sizeof(*tx_q->tx_skbuff_dma),
1625 GFP_KERNEL);
1626 if (!tx_q->tx_skbuff_dma)
1627 goto err_dma;
1628
1629 tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
1630 sizeof(struct sk_buff *),
1631 GFP_KERNEL);
1632 if (!tx_q->tx_skbuff)
1633 goto err_dma;
1634
1635 if (priv->extend_desc) {
1636 tx_q->dma_etx = dma_alloc_coherent(priv->device,
1637 DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1638 &tx_q->dma_tx_phy,
1639 GFP_KERNEL);
1640 if (!tx_q->dma_etx)
1641 goto err_dma;
1642 } else {
1643 tx_q->dma_tx = dma_alloc_coherent(priv->device,
1644 DMA_TX_SIZE * sizeof(struct dma_desc),
1645 &tx_q->dma_tx_phy,
1646 GFP_KERNEL);
1647 if (!tx_q->dma_tx)
1648 goto err_dma;
1649 }
1650 }
1651
1652 return 0;
1653
1654 err_dma:
1655 free_dma_tx_desc_resources(priv);
1656
1657 return ret;
1658 }
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1669 {
1670
1671 int ret = alloc_dma_rx_desc_resources(priv);
1672
1673 if (ret)
1674 return ret;
1675
1676 ret = alloc_dma_tx_desc_resources(priv);
1677
1678 return ret;
1679 }
1680
1681
1682
1683
1684
1685 static void free_dma_desc_resources(struct stmmac_priv *priv)
1686 {
1687
1688 free_dma_rx_desc_resources(priv);
1689
1690
1691 free_dma_tx_desc_resources(priv);
1692 }
1693
1694
1695
1696
1697
1698
1699 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1700 {
1701 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1702 int queue;
1703 u8 mode;
1704
1705 for (queue = 0; queue < rx_queues_count; queue++) {
1706 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1707 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1708 }
1709 }
1710
1711
1712
1713
1714
1715
1716
1717
1718 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1719 {
1720 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1721 stmmac_start_rx(priv, priv->ioaddr, chan);
1722 }
1723
1724
1725
1726
1727
1728
1729
1730
1731 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1732 {
1733 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1734 stmmac_start_tx(priv, priv->ioaddr, chan);
1735 }
1736
1737
1738
1739
1740
1741
1742
1743
1744 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1745 {
1746 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1747 stmmac_stop_rx(priv, priv->ioaddr, chan);
1748 }
1749
1750
1751
1752
1753
1754
1755
1756
1757 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1758 {
1759 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1760 stmmac_stop_tx(priv, priv->ioaddr, chan);
1761 }
1762
1763
1764
1765
1766
1767
1768
1769 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1770 {
1771 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1772 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1773 u32 chan = 0;
1774
1775 for (chan = 0; chan < rx_channels_count; chan++)
1776 stmmac_start_rx_dma(priv, chan);
1777
1778 for (chan = 0; chan < tx_channels_count; chan++)
1779 stmmac_start_tx_dma(priv, chan);
1780 }
1781
1782
1783
1784
1785
1786
1787
1788 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1789 {
1790 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1791 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1792 u32 chan = 0;
1793
1794 for (chan = 0; chan < rx_channels_count; chan++)
1795 stmmac_stop_rx_dma(priv, chan);
1796
1797 for (chan = 0; chan < tx_channels_count; chan++)
1798 stmmac_stop_tx_dma(priv, chan);
1799 }
1800
1801
1802
1803
1804
1805
1806
1807 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1808 {
1809 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1810 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1811 int rxfifosz = priv->plat->rx_fifo_size;
1812 int txfifosz = priv->plat->tx_fifo_size;
1813 u32 txmode = 0;
1814 u32 rxmode = 0;
1815 u32 chan = 0;
1816 u8 qmode = 0;
1817
1818 if (rxfifosz == 0)
1819 rxfifosz = priv->dma_cap.rx_fifo_size;
1820 if (txfifosz == 0)
1821 txfifosz = priv->dma_cap.tx_fifo_size;
1822
1823
1824 rxfifosz /= rx_channels_count;
1825 txfifosz /= tx_channels_count;
1826
1827 if (priv->plat->force_thresh_dma_mode) {
1828 txmode = tc;
1829 rxmode = tc;
1830 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1831
1832
1833
1834
1835
1836
1837
1838 txmode = SF_DMA_MODE;
1839 rxmode = SF_DMA_MODE;
1840 priv->xstats.threshold = SF_DMA_MODE;
1841 } else {
1842 txmode = tc;
1843 rxmode = SF_DMA_MODE;
1844 }
1845
1846
1847 for (chan = 0; chan < rx_channels_count; chan++) {
1848 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1849
1850 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1851 rxfifosz, qmode);
1852 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1853 chan);
1854 }
1855
1856 for (chan = 0; chan < tx_channels_count; chan++) {
1857 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1858
1859 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1860 txfifosz, qmode);
1861 }
1862 }
1863
1864
1865
1866
1867
1868
1869
1870 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1871 {
1872 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1873 unsigned int bytes_compl = 0, pkts_compl = 0;
1874 unsigned int entry, count = 0;
1875
1876 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1877
1878 priv->xstats.tx_clean++;
1879
1880 entry = tx_q->dirty_tx;
1881 while ((entry != tx_q->cur_tx) && (count < budget)) {
1882 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1883 struct dma_desc *p;
1884 int status;
1885
1886 if (priv->extend_desc)
1887 p = (struct dma_desc *)(tx_q->dma_etx + entry);
1888 else
1889 p = tx_q->dma_tx + entry;
1890
1891 status = stmmac_tx_status(priv, &priv->dev->stats,
1892 &priv->xstats, p, priv->ioaddr);
1893
1894 if (unlikely(status & tx_dma_own))
1895 break;
1896
1897 count++;
1898
1899
1900
1901
1902 dma_rmb();
1903
1904
1905 if (likely(!(status & tx_not_ls))) {
1906
1907 if (unlikely(status & tx_err)) {
1908 priv->dev->stats.tx_errors++;
1909 } else {
1910 priv->dev->stats.tx_packets++;
1911 priv->xstats.tx_pkt_n++;
1912 }
1913 stmmac_get_tx_hwtstamp(priv, p, skb);
1914 }
1915
1916 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1917 if (tx_q->tx_skbuff_dma[entry].map_as_page)
1918 dma_unmap_page(priv->device,
1919 tx_q->tx_skbuff_dma[entry].buf,
1920 tx_q->tx_skbuff_dma[entry].len,
1921 DMA_TO_DEVICE);
1922 else
1923 dma_unmap_single(priv->device,
1924 tx_q->tx_skbuff_dma[entry].buf,
1925 tx_q->tx_skbuff_dma[entry].len,
1926 DMA_TO_DEVICE);
1927 tx_q->tx_skbuff_dma[entry].buf = 0;
1928 tx_q->tx_skbuff_dma[entry].len = 0;
1929 tx_q->tx_skbuff_dma[entry].map_as_page = false;
1930 }
1931
1932 stmmac_clean_desc3(priv, tx_q, p);
1933
1934 tx_q->tx_skbuff_dma[entry].last_segment = false;
1935 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1936
1937 if (likely(skb != NULL)) {
1938 pkts_compl++;
1939 bytes_compl += skb->len;
1940 dev_consume_skb_any(skb);
1941 tx_q->tx_skbuff[entry] = NULL;
1942 }
1943
1944 stmmac_release_tx_desc(priv, p, priv->mode);
1945
1946 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1947 }
1948 tx_q->dirty_tx = entry;
1949
1950 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1951 pkts_compl, bytes_compl);
1952
1953 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1954 queue))) &&
1955 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1956
1957 netif_dbg(priv, tx_done, priv->dev,
1958 "%s: restart transmit\n", __func__);
1959 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1960 }
1961
1962 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1963 stmmac_enable_eee_mode(priv);
1964 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1965 }
1966
1967
1968 if (tx_q->dirty_tx != tx_q->cur_tx)
1969 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
1970
1971 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1972
1973 return count;
1974 }
1975
1976
1977
1978
1979
1980
1981
1982
1983 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1984 {
1985 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1986 int i;
1987
1988 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1989
1990 stmmac_stop_tx_dma(priv, chan);
1991 dma_free_tx_skbufs(priv, chan);
1992 for (i = 0; i < DMA_TX_SIZE; i++)
1993 if (priv->extend_desc)
1994 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1995 priv->mode, (i == DMA_TX_SIZE - 1));
1996 else
1997 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1998 priv->mode, (i == DMA_TX_SIZE - 1));
1999 tx_q->dirty_tx = 0;
2000 tx_q->cur_tx = 0;
2001 tx_q->mss = 0;
2002 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2003 stmmac_start_tx_dma(priv, chan);
2004
2005 priv->dev->stats.tx_errors++;
2006 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2007 }
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2020 u32 rxmode, u32 chan)
2021 {
2022 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2023 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2024 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2025 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2026 int rxfifosz = priv->plat->rx_fifo_size;
2027 int txfifosz = priv->plat->tx_fifo_size;
2028
2029 if (rxfifosz == 0)
2030 rxfifosz = priv->dma_cap.rx_fifo_size;
2031 if (txfifosz == 0)
2032 txfifosz = priv->dma_cap.tx_fifo_size;
2033
2034
2035 rxfifosz /= rx_channels_count;
2036 txfifosz /= tx_channels_count;
2037
2038 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2039 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2040 }
2041
2042 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2043 {
2044 int ret;
2045
2046 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2047 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2048 if (ret && (ret != -EINVAL)) {
2049 stmmac_global_err(priv);
2050 return true;
2051 }
2052
2053 return false;
2054 }
2055
2056 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2057 {
2058 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2059 &priv->xstats, chan);
2060 struct stmmac_channel *ch = &priv->channel[chan];
2061
2062 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2063 if (napi_schedule_prep(&ch->rx_napi)) {
2064 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2065 __napi_schedule_irqoff(&ch->rx_napi);
2066 status |= handle_tx;
2067 }
2068 }
2069
2070 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use))
2071 napi_schedule_irqoff(&ch->tx_napi);
2072
2073 return status;
2074 }
2075
2076
2077
2078
2079
2080
2081
2082
2083 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2084 {
2085 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2086 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2087 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2088 tx_channel_count : rx_channel_count;
2089 u32 chan;
2090 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2091
2092
2093 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2094 channels_to_check = ARRAY_SIZE(status);
2095
2096 for (chan = 0; chan < channels_to_check; chan++)
2097 status[chan] = stmmac_napi_check(priv, chan);
2098
2099 for (chan = 0; chan < tx_channel_count; chan++) {
2100 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2101
2102 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2103 (tc <= 256)) {
2104 tc += 64;
2105 if (priv->plat->force_thresh_dma_mode)
2106 stmmac_set_dma_operation_mode(priv,
2107 tc,
2108 tc,
2109 chan);
2110 else
2111 stmmac_set_dma_operation_mode(priv,
2112 tc,
2113 SF_DMA_MODE,
2114 chan);
2115 priv->xstats.threshold = tc;
2116 }
2117 } else if (unlikely(status[chan] == tx_hard_error)) {
2118 stmmac_tx_err(priv, chan);
2119 }
2120 }
2121 }
2122
2123
2124
2125
2126
2127
2128 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2129 {
2130 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2131 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2132
2133 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2134
2135 if (priv->dma_cap.rmon) {
2136 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2137 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2138 } else
2139 netdev_info(priv->dev, "No MAC Management Counters available\n");
2140 }
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2152 {
2153 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2154 }
2155
2156
2157
2158
2159
2160
2161
2162
2163 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2164 {
2165 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2166 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2167 if (!is_valid_ether_addr(priv->dev->dev_addr))
2168 eth_hw_addr_random(priv->dev);
2169 dev_info(priv->device, "device MAC address %pM\n",
2170 priv->dev->dev_addr);
2171 }
2172 }
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2183 {
2184 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2185 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2186 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2187 struct stmmac_rx_queue *rx_q;
2188 struct stmmac_tx_queue *tx_q;
2189 u32 chan = 0;
2190 int atds = 0;
2191 int ret = 0;
2192
2193 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2194 dev_err(priv->device, "Invalid DMA configuration\n");
2195 return -EINVAL;
2196 }
2197
2198 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2199 atds = 1;
2200
2201 ret = stmmac_reset(priv, priv->ioaddr);
2202 if (ret) {
2203 dev_err(priv->device, "Failed to reset the dma\n");
2204 return ret;
2205 }
2206
2207
2208 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2209
2210 if (priv->plat->axi)
2211 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2212
2213
2214 for (chan = 0; chan < dma_csr_ch; chan++)
2215 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2216
2217
2218 for (chan = 0; chan < rx_channels_count; chan++) {
2219 rx_q = &priv->rx_queue[chan];
2220
2221 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2222 rx_q->dma_rx_phy, chan);
2223
2224 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2225 (DMA_RX_SIZE * sizeof(struct dma_desc));
2226 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2227 rx_q->rx_tail_addr, chan);
2228 }
2229
2230
2231 for (chan = 0; chan < tx_channels_count; chan++) {
2232 tx_q = &priv->tx_queue[chan];
2233
2234 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2235 tx_q->dma_tx_phy, chan);
2236
2237 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2238 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2239 tx_q->tx_tail_addr, chan);
2240 }
2241
2242 return ret;
2243 }
2244
2245 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2246 {
2247 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2248
2249 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2250 }
2251
2252
2253
2254
2255
2256
2257
2258 static void stmmac_tx_timer(struct timer_list *t)
2259 {
2260 struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2261 struct stmmac_priv *priv = tx_q->priv_data;
2262 struct stmmac_channel *ch;
2263
2264 ch = &priv->channel[tx_q->queue_index];
2265
2266
2267
2268
2269
2270 if (likely(napi_schedule_prep(&ch->tx_napi)))
2271 __napi_schedule(&ch->tx_napi);
2272 else
2273 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
2274 }
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2285 {
2286 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2287 u32 chan;
2288
2289 priv->tx_coal_frames = STMMAC_TX_FRAMES;
2290 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2291 priv->rx_coal_frames = STMMAC_RX_FRAMES;
2292
2293 for (chan = 0; chan < tx_channel_count; chan++) {
2294 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2295
2296 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2297 }
2298 }
2299
2300 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2301 {
2302 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2303 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2304 u32 chan;
2305
2306
2307 for (chan = 0; chan < tx_channels_count; chan++)
2308 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2309 (DMA_TX_SIZE - 1), chan);
2310
2311
2312 for (chan = 0; chan < rx_channels_count; chan++)
2313 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2314 (DMA_RX_SIZE - 1), chan);
2315 }
2316
2317
2318
2319
2320
2321
2322 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2323 {
2324 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2325 u32 weight;
2326 u32 queue;
2327
2328 for (queue = 0; queue < tx_queues_count; queue++) {
2329 weight = priv->plat->tx_queues_cfg[queue].weight;
2330 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2331 }
2332 }
2333
2334
2335
2336
2337
2338
2339 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2340 {
2341 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2342 u32 mode_to_use;
2343 u32 queue;
2344
2345
2346 for (queue = 1; queue < tx_queues_count; queue++) {
2347 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2348 if (mode_to_use == MTL_QUEUE_DCB)
2349 continue;
2350
2351 stmmac_config_cbs(priv, priv->hw,
2352 priv->plat->tx_queues_cfg[queue].send_slope,
2353 priv->plat->tx_queues_cfg[queue].idle_slope,
2354 priv->plat->tx_queues_cfg[queue].high_credit,
2355 priv->plat->tx_queues_cfg[queue].low_credit,
2356 queue);
2357 }
2358 }
2359
2360
2361
2362
2363
2364
2365 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2366 {
2367 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2368 u32 queue;
2369 u32 chan;
2370
2371 for (queue = 0; queue < rx_queues_count; queue++) {
2372 chan = priv->plat->rx_queues_cfg[queue].chan;
2373 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2374 }
2375 }
2376
2377
2378
2379
2380
2381
2382 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2383 {
2384 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2385 u32 queue;
2386 u32 prio;
2387
2388 for (queue = 0; queue < rx_queues_count; queue++) {
2389 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2390 continue;
2391
2392 prio = priv->plat->rx_queues_cfg[queue].prio;
2393 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2394 }
2395 }
2396
2397
2398
2399
2400
2401
2402 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2403 {
2404 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2405 u32 queue;
2406 u32 prio;
2407
2408 for (queue = 0; queue < tx_queues_count; queue++) {
2409 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2410 continue;
2411
2412 prio = priv->plat->tx_queues_cfg[queue].prio;
2413 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2414 }
2415 }
2416
2417
2418
2419
2420
2421
2422 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2423 {
2424 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2425 u32 queue;
2426 u8 packet;
2427
2428 for (queue = 0; queue < rx_queues_count; queue++) {
2429
2430 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2431 continue;
2432
2433 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2434 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2435 }
2436 }
2437
2438 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2439 {
2440 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2441 priv->rss.enable = false;
2442 return;
2443 }
2444
2445 if (priv->dev->features & NETIF_F_RXHASH)
2446 priv->rss.enable = true;
2447 else
2448 priv->rss.enable = false;
2449
2450 stmmac_rss_configure(priv, priv->hw, &priv->rss,
2451 priv->plat->rx_queues_to_use);
2452 }
2453
2454
2455
2456
2457
2458
2459 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2460 {
2461 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2462 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2463
2464 if (tx_queues_count > 1)
2465 stmmac_set_tx_queue_weight(priv);
2466
2467
2468 if (rx_queues_count > 1)
2469 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2470 priv->plat->rx_sched_algorithm);
2471
2472
2473 if (tx_queues_count > 1)
2474 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2475 priv->plat->tx_sched_algorithm);
2476
2477
2478 if (tx_queues_count > 1)
2479 stmmac_configure_cbs(priv);
2480
2481
2482 stmmac_rx_queue_dma_chan_map(priv);
2483
2484
2485 stmmac_mac_enable_rx_queues(priv);
2486
2487
2488 if (rx_queues_count > 1)
2489 stmmac_mac_config_rx_queues_prio(priv);
2490
2491
2492 if (tx_queues_count > 1)
2493 stmmac_mac_config_tx_queues_prio(priv);
2494
2495
2496 if (rx_queues_count > 1)
2497 stmmac_mac_config_rx_queues_routing(priv);
2498
2499
2500 if (rx_queues_count > 1)
2501 stmmac_mac_config_rss(priv);
2502 }
2503
2504 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2505 {
2506 if (priv->dma_cap.asp) {
2507 netdev_info(priv->dev, "Enabling Safety Features\n");
2508 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2509 } else {
2510 netdev_info(priv->dev, "No Safety Features support found\n");
2511 }
2512 }
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2527 {
2528 struct stmmac_priv *priv = netdev_priv(dev);
2529 u32 rx_cnt = priv->plat->rx_queues_to_use;
2530 u32 tx_cnt = priv->plat->tx_queues_to_use;
2531 u32 chan;
2532 int ret;
2533
2534
2535 ret = stmmac_init_dma_engine(priv);
2536 if (ret < 0) {
2537 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2538 __func__);
2539 return ret;
2540 }
2541
2542
2543 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2544
2545
2546 if (priv->hw->pcs) {
2547 int speed = priv->plat->mac_port_sel_speed;
2548
2549 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2550 (speed == SPEED_1000)) {
2551 priv->hw->ps = speed;
2552 } else {
2553 dev_warn(priv->device, "invalid port speed\n");
2554 priv->hw->ps = 0;
2555 }
2556 }
2557
2558
2559 stmmac_core_init(priv, priv->hw, dev);
2560
2561
2562 stmmac_mtl_configuration(priv);
2563
2564
2565 stmmac_safety_feat_configuration(priv);
2566
2567 ret = stmmac_rx_ipc(priv, priv->hw);
2568 if (!ret) {
2569 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2570 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2571 priv->hw->rx_csum = 0;
2572 }
2573
2574
2575 stmmac_mac_set(priv, priv->ioaddr, true);
2576
2577
2578 stmmac_dma_operation_mode(priv);
2579
2580 stmmac_mmc_setup(priv);
2581
2582 if (init_ptp) {
2583 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2584 if (ret < 0)
2585 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2586
2587 ret = stmmac_init_ptp(priv);
2588 if (ret == -EOPNOTSUPP)
2589 netdev_warn(priv->dev, "PTP not supported by HW\n");
2590 else if (ret)
2591 netdev_warn(priv->dev, "PTP init failed\n");
2592 }
2593
2594 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2595
2596 if (priv->use_riwt) {
2597 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MIN_DMA_RIWT, rx_cnt);
2598 if (!ret)
2599 priv->rx_riwt = MIN_DMA_RIWT;
2600 }
2601
2602 if (priv->hw->pcs)
2603 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2604
2605
2606 stmmac_set_rings_length(priv);
2607
2608
2609 if (priv->tso) {
2610 for (chan = 0; chan < tx_cnt; chan++)
2611 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2612 }
2613
2614
2615 if (priv->sph && priv->hw->rx_csum) {
2616 for (chan = 0; chan < rx_cnt; chan++)
2617 stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
2618 }
2619
2620
2621 if (priv->dma_cap.vlins)
2622 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2623
2624
2625 stmmac_start_all_dma(priv);
2626
2627 return 0;
2628 }
2629
2630 static void stmmac_hw_teardown(struct net_device *dev)
2631 {
2632 struct stmmac_priv *priv = netdev_priv(dev);
2633
2634 clk_disable_unprepare(priv->plat->clk_ptp_ref);
2635 }
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646 static int stmmac_open(struct net_device *dev)
2647 {
2648 struct stmmac_priv *priv = netdev_priv(dev);
2649 int bfsize = 0;
2650 u32 chan;
2651 int ret;
2652
2653 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2654 priv->hw->pcs != STMMAC_PCS_TBI &&
2655 priv->hw->pcs != STMMAC_PCS_RTBI) {
2656 ret = stmmac_init_phy(dev);
2657 if (ret) {
2658 netdev_err(priv->dev,
2659 "%s: Cannot attach to PHY (error: %d)\n",
2660 __func__, ret);
2661 return ret;
2662 }
2663 }
2664
2665
2666 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2667 priv->xstats.threshold = tc;
2668
2669 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
2670 if (bfsize < 0)
2671 bfsize = 0;
2672
2673 if (bfsize < BUF_SIZE_16KiB)
2674 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
2675
2676 priv->dma_buf_sz = bfsize;
2677 buf_sz = bfsize;
2678
2679 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2680
2681 ret = alloc_dma_desc_resources(priv);
2682 if (ret < 0) {
2683 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2684 __func__);
2685 goto dma_desc_error;
2686 }
2687
2688 ret = init_dma_desc_rings(dev, GFP_KERNEL);
2689 if (ret < 0) {
2690 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2691 __func__);
2692 goto init_error;
2693 }
2694
2695 ret = stmmac_hw_setup(dev, true);
2696 if (ret < 0) {
2697 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2698 goto init_error;
2699 }
2700
2701 stmmac_init_coalesce(priv);
2702
2703 phylink_start(priv->phylink);
2704
2705
2706 ret = request_irq(dev->irq, stmmac_interrupt,
2707 IRQF_SHARED, dev->name, dev);
2708 if (unlikely(ret < 0)) {
2709 netdev_err(priv->dev,
2710 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2711 __func__, dev->irq, ret);
2712 goto irq_error;
2713 }
2714
2715
2716 if (priv->wol_irq != dev->irq) {
2717 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2718 IRQF_SHARED, dev->name, dev);
2719 if (unlikely(ret < 0)) {
2720 netdev_err(priv->dev,
2721 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2722 __func__, priv->wol_irq, ret);
2723 goto wolirq_error;
2724 }
2725 }
2726
2727
2728 if (priv->lpi_irq > 0) {
2729 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2730 dev->name, dev);
2731 if (unlikely(ret < 0)) {
2732 netdev_err(priv->dev,
2733 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2734 __func__, priv->lpi_irq, ret);
2735 goto lpiirq_error;
2736 }
2737 }
2738
2739 stmmac_enable_all_queues(priv);
2740 stmmac_start_all_queues(priv);
2741
2742 return 0;
2743
2744 lpiirq_error:
2745 if (priv->wol_irq != dev->irq)
2746 free_irq(priv->wol_irq, dev);
2747 wolirq_error:
2748 free_irq(dev->irq, dev);
2749 irq_error:
2750 phylink_stop(priv->phylink);
2751
2752 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2753 del_timer_sync(&priv->tx_queue[chan].txtimer);
2754
2755 stmmac_hw_teardown(dev);
2756 init_error:
2757 free_dma_desc_resources(priv);
2758 dma_desc_error:
2759 phylink_disconnect_phy(priv->phylink);
2760 return ret;
2761 }
2762
2763
2764
2765
2766
2767
2768
2769 static int stmmac_release(struct net_device *dev)
2770 {
2771 struct stmmac_priv *priv = netdev_priv(dev);
2772 u32 chan;
2773
2774 if (priv->eee_enabled)
2775 del_timer_sync(&priv->eee_ctrl_timer);
2776
2777
2778 phylink_stop(priv->phylink);
2779 phylink_disconnect_phy(priv->phylink);
2780
2781 stmmac_stop_all_queues(priv);
2782
2783 stmmac_disable_all_queues(priv);
2784
2785 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2786 del_timer_sync(&priv->tx_queue[chan].txtimer);
2787
2788
2789 free_irq(dev->irq, dev);
2790 if (priv->wol_irq != dev->irq)
2791 free_irq(priv->wol_irq, dev);
2792 if (priv->lpi_irq > 0)
2793 free_irq(priv->lpi_irq, dev);
2794
2795
2796 stmmac_stop_all_dma(priv);
2797
2798
2799 free_dma_desc_resources(priv);
2800
2801
2802 stmmac_mac_set(priv, priv->ioaddr, false);
2803
2804 netif_carrier_off(dev);
2805
2806 stmmac_release_ptp(priv);
2807
2808 return 0;
2809 }
2810
2811 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
2812 struct stmmac_tx_queue *tx_q)
2813 {
2814 u16 tag = 0x0, inner_tag = 0x0;
2815 u32 inner_type = 0x0;
2816 struct dma_desc *p;
2817
2818 if (!priv->dma_cap.vlins)
2819 return false;
2820 if (!skb_vlan_tag_present(skb))
2821 return false;
2822 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
2823 inner_tag = skb_vlan_tag_get(skb);
2824 inner_type = STMMAC_VLAN_INSERT;
2825 }
2826
2827 tag = skb_vlan_tag_get(skb);
2828
2829 p = tx_q->dma_tx + tx_q->cur_tx;
2830 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
2831 return false;
2832
2833 stmmac_set_tx_owner(priv, p);
2834 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2835 return true;
2836 }
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
2850 int total_len, bool last_segment, u32 queue)
2851 {
2852 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2853 struct dma_desc *desc;
2854 u32 buff_size;
2855 int tmp_len;
2856
2857 tmp_len = total_len;
2858
2859 while (tmp_len > 0) {
2860 dma_addr_t curr_addr;
2861
2862 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2863 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2864 desc = tx_q->dma_tx + tx_q->cur_tx;
2865
2866 curr_addr = des + (total_len - tmp_len);
2867 if (priv->dma_cap.addr64 <= 32)
2868 desc->des0 = cpu_to_le32(curr_addr);
2869 else
2870 stmmac_set_desc_addr(priv, desc, curr_addr);
2871
2872 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2873 TSO_MAX_BUFF_SIZE : tmp_len;
2874
2875 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2876 0, 1,
2877 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2878 0, 0);
2879
2880 tmp_len -= TSO_MAX_BUFF_SIZE;
2881 }
2882 }
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2912 {
2913 struct dma_desc *desc, *first, *mss_desc = NULL;
2914 struct stmmac_priv *priv = netdev_priv(dev);
2915 int nfrags = skb_shinfo(skb)->nr_frags;
2916 u32 queue = skb_get_queue_mapping(skb);
2917 struct stmmac_tx_queue *tx_q;
2918 unsigned int first_entry;
2919 int tmp_pay_len = 0;
2920 u32 pay_len, mss;
2921 u8 proto_hdr_len;
2922 dma_addr_t des;
2923 bool has_vlan;
2924 int i;
2925
2926 tx_q = &priv->tx_queue[queue];
2927
2928
2929 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2930
2931
2932 if (unlikely(stmmac_tx_avail(priv, queue) <
2933 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2934 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2935 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2936 queue));
2937
2938 netdev_err(priv->dev,
2939 "%s: Tx Ring full when queue awake\n",
2940 __func__);
2941 }
2942 return NETDEV_TX_BUSY;
2943 }
2944
2945 pay_len = skb_headlen(skb) - proto_hdr_len;
2946
2947 mss = skb_shinfo(skb)->gso_size;
2948
2949
2950 if (mss != tx_q->mss) {
2951 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2952 stmmac_set_mss(priv, mss_desc, mss);
2953 tx_q->mss = mss;
2954 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2955 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2956 }
2957
2958 if (netif_msg_tx_queued(priv)) {
2959 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2960 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2961 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2962 skb->data_len);
2963 }
2964
2965
2966 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
2967
2968 first_entry = tx_q->cur_tx;
2969 WARN_ON(tx_q->tx_skbuff[first_entry]);
2970
2971 desc = tx_q->dma_tx + first_entry;
2972 first = desc;
2973
2974 if (has_vlan)
2975 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
2976
2977
2978 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2979 DMA_TO_DEVICE);
2980 if (dma_mapping_error(priv->device, des))
2981 goto dma_map_err;
2982
2983 tx_q->tx_skbuff_dma[first_entry].buf = des;
2984 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2985
2986 if (priv->dma_cap.addr64 <= 32) {
2987 first->des0 = cpu_to_le32(des);
2988
2989
2990 if (pay_len)
2991 first->des1 = cpu_to_le32(des + proto_hdr_len);
2992
2993
2994 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2995 } else {
2996 stmmac_set_desc_addr(priv, first, des);
2997 tmp_pay_len = pay_len;
2998 des += proto_hdr_len;
2999 pay_len = 0;
3000 }
3001
3002 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3003
3004
3005 for (i = 0; i < nfrags; i++) {
3006 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3007
3008 des = skb_frag_dma_map(priv->device, frag, 0,
3009 skb_frag_size(frag),
3010 DMA_TO_DEVICE);
3011 if (dma_mapping_error(priv->device, des))
3012 goto dma_map_err;
3013
3014 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3015 (i == nfrags - 1), queue);
3016
3017 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3018 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3019 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3020 }
3021
3022 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3023
3024
3025 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
3026
3027
3028 tx_q->tx_count_frames += nfrags + 1;
3029 if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3030 !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3031 priv->hwts_tx_en)) {
3032 stmmac_tx_timer_arm(priv, queue);
3033 } else {
3034 desc = &tx_q->dma_tx[tx_q->cur_tx];
3035 tx_q->tx_count_frames = 0;
3036 stmmac_set_tx_ic(priv, desc);
3037 priv->xstats.tx_set_ic_bit++;
3038 }
3039
3040
3041
3042
3043
3044
3045 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
3046
3047 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3048 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3049 __func__);
3050 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3051 }
3052
3053 dev->stats.tx_bytes += skb->len;
3054 priv->xstats.tx_tso_frames++;
3055 priv->xstats.tx_tso_nfrags += nfrags;
3056
3057 if (priv->sarc_type)
3058 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3059
3060 skb_tx_timestamp(skb);
3061
3062 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3063 priv->hwts_tx_en)) {
3064
3065 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3066 stmmac_enable_tx_timestamp(priv, first);
3067 }
3068
3069
3070 stmmac_prepare_tso_tx_desc(priv, first, 1,
3071 proto_hdr_len,
3072 pay_len,
3073 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3074 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
3075
3076
3077 if (mss_desc) {
3078
3079
3080
3081
3082
3083 dma_wmb();
3084 stmmac_set_tx_owner(priv, mss_desc);
3085 }
3086
3087
3088
3089
3090
3091 wmb();
3092
3093 if (netif_msg_pktdata(priv)) {
3094 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3095 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3096 tx_q->cur_tx, first, nfrags);
3097
3098 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3099
3100 pr_info(">>> frame to be transmitted: ");
3101 print_pkt(skb->data, skb_headlen(skb));
3102 }
3103
3104 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3105
3106 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3107 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3108 stmmac_tx_timer_arm(priv, queue);
3109
3110 return NETDEV_TX_OK;
3111
3112 dma_map_err:
3113 dev_err(priv->device, "Tx dma map failed\n");
3114 dev_kfree_skb(skb);
3115 priv->dev->stats.tx_dropped++;
3116 return NETDEV_TX_OK;
3117 }
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3128 {
3129 struct stmmac_priv *priv = netdev_priv(dev);
3130 unsigned int nopaged_len = skb_headlen(skb);
3131 int i, csum_insertion = 0, is_jumbo = 0;
3132 u32 queue = skb_get_queue_mapping(skb);
3133 int nfrags = skb_shinfo(skb)->nr_frags;
3134 struct dma_desc *desc, *first;
3135 struct stmmac_tx_queue *tx_q;
3136 unsigned int first_entry;
3137 unsigned int enh_desc;
3138 dma_addr_t des;
3139 bool has_vlan;
3140 int entry;
3141
3142 tx_q = &priv->tx_queue[queue];
3143
3144 if (priv->tx_path_in_lpi_mode)
3145 stmmac_disable_eee_mode(priv);
3146
3147
3148 if (skb_is_gso(skb) && priv->tso) {
3149 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3150 return stmmac_tso_xmit(skb, dev);
3151 }
3152
3153 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3154 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3155 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3156 queue));
3157
3158 netdev_err(priv->dev,
3159 "%s: Tx Ring full when queue awake\n",
3160 __func__);
3161 }
3162 return NETDEV_TX_BUSY;
3163 }
3164
3165
3166 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3167
3168 entry = tx_q->cur_tx;
3169 first_entry = entry;
3170 WARN_ON(tx_q->tx_skbuff[first_entry]);
3171
3172 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3173
3174 if (likely(priv->extend_desc))
3175 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3176 else
3177 desc = tx_q->dma_tx + entry;
3178
3179 first = desc;
3180
3181 if (has_vlan)
3182 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3183
3184 enh_desc = priv->plat->enh_desc;
3185
3186 if (enh_desc)
3187 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3188
3189 if (unlikely(is_jumbo)) {
3190 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3191 if (unlikely(entry < 0) && (entry != -EINVAL))
3192 goto dma_map_err;
3193 }
3194
3195 for (i = 0; i < nfrags; i++) {
3196 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3197 int len = skb_frag_size(frag);
3198 bool last_segment = (i == (nfrags - 1));
3199
3200 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3201 WARN_ON(tx_q->tx_skbuff[entry]);
3202
3203 if (likely(priv->extend_desc))
3204 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3205 else
3206 desc = tx_q->dma_tx + entry;
3207
3208 des = skb_frag_dma_map(priv->device, frag, 0, len,
3209 DMA_TO_DEVICE);
3210 if (dma_mapping_error(priv->device, des))
3211 goto dma_map_err;
3212
3213 tx_q->tx_skbuff_dma[entry].buf = des;
3214
3215 stmmac_set_desc_addr(priv, desc, des);
3216
3217 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3218 tx_q->tx_skbuff_dma[entry].len = len;
3219 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3220
3221
3222 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3223 priv->mode, 1, last_segment, skb->len);
3224 }
3225
3226
3227 tx_q->tx_skbuff[entry] = skb;
3228
3229
3230
3231
3232
3233
3234 tx_q->tx_count_frames += nfrags + 1;
3235 if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3236 !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3237 priv->hwts_tx_en)) {
3238 stmmac_tx_timer_arm(priv, queue);
3239 } else {
3240 if (likely(priv->extend_desc))
3241 desc = &tx_q->dma_etx[entry].basic;
3242 else
3243 desc = &tx_q->dma_tx[entry];
3244
3245 tx_q->tx_count_frames = 0;
3246 stmmac_set_tx_ic(priv, desc);
3247 priv->xstats.tx_set_ic_bit++;
3248 }
3249
3250
3251
3252
3253
3254
3255 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3256 tx_q->cur_tx = entry;
3257
3258 if (netif_msg_pktdata(priv)) {
3259 void *tx_head;
3260
3261 netdev_dbg(priv->dev,
3262 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3263 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3264 entry, first, nfrags);
3265
3266 if (priv->extend_desc)
3267 tx_head = (void *)tx_q->dma_etx;
3268 else
3269 tx_head = (void *)tx_q->dma_tx;
3270
3271 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3272
3273 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3274 print_pkt(skb->data, skb->len);
3275 }
3276
3277 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3278 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3279 __func__);
3280 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3281 }
3282
3283 dev->stats.tx_bytes += skb->len;
3284
3285 if (priv->sarc_type)
3286 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3287
3288 skb_tx_timestamp(skb);
3289
3290
3291
3292
3293
3294 if (likely(!is_jumbo)) {
3295 bool last_segment = (nfrags == 0);
3296
3297 des = dma_map_single(priv->device, skb->data,
3298 nopaged_len, DMA_TO_DEVICE);
3299 if (dma_mapping_error(priv->device, des))
3300 goto dma_map_err;
3301
3302 tx_q->tx_skbuff_dma[first_entry].buf = des;
3303
3304 stmmac_set_desc_addr(priv, first, des);
3305
3306 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3307 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3308
3309 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3310 priv->hwts_tx_en)) {
3311
3312 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3313 stmmac_enable_tx_timestamp(priv, first);
3314 }
3315
3316
3317 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3318 csum_insertion, priv->mode, 1, last_segment,
3319 skb->len);
3320 } else {
3321 stmmac_set_tx_owner(priv, first);
3322 }
3323
3324
3325
3326
3327
3328 wmb();
3329
3330 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3331
3332 stmmac_enable_dma_transmission(priv, priv->ioaddr);
3333
3334 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3335 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3336 stmmac_tx_timer_arm(priv, queue);
3337
3338 return NETDEV_TX_OK;
3339
3340 dma_map_err:
3341 netdev_err(priv->dev, "Tx DMA map failed\n");
3342 dev_kfree_skb(skb);
3343 priv->dev->stats.tx_dropped++;
3344 return NETDEV_TX_OK;
3345 }
3346
3347 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3348 {
3349 struct vlan_ethhdr *veth;
3350 __be16 vlan_proto;
3351 u16 vlanid;
3352
3353 veth = (struct vlan_ethhdr *)skb->data;
3354 vlan_proto = veth->h_vlan_proto;
3355
3356 if ((vlan_proto == htons(ETH_P_8021Q) &&
3357 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3358 (vlan_proto == htons(ETH_P_8021AD) &&
3359 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3360
3361 vlanid = ntohs(veth->h_vlan_TCI);
3362 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3363 skb_pull(skb, VLAN_HLEN);
3364 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3365 }
3366 }
3367
3368
3369 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3370 {
3371 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3372 return 0;
3373
3374 return 1;
3375 }
3376
3377
3378
3379
3380
3381
3382
3383
3384 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3385 {
3386 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3387 int len, dirty = stmmac_rx_dirty(priv, queue);
3388 unsigned int entry = rx_q->dirty_rx;
3389
3390 len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
3391
3392 while (dirty-- > 0) {
3393 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3394 struct dma_desc *p;
3395 bool use_rx_wd;
3396
3397 if (priv->extend_desc)
3398 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3399 else
3400 p = rx_q->dma_rx + entry;
3401
3402 if (!buf->page) {
3403 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
3404 if (!buf->page)
3405 break;
3406 }
3407
3408 if (priv->sph && !buf->sec_page) {
3409 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
3410 if (!buf->sec_page)
3411 break;
3412
3413 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
3414
3415 dma_sync_single_for_device(priv->device, buf->sec_addr,
3416 len, DMA_FROM_DEVICE);
3417 }
3418
3419 buf->addr = page_pool_get_dma_addr(buf->page);
3420
3421
3422
3423
3424 dma_sync_single_for_device(priv->device, buf->addr, len,
3425 DMA_FROM_DEVICE);
3426
3427 stmmac_set_desc_addr(priv, p, buf->addr);
3428 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
3429 stmmac_refill_desc3(priv, rx_q, p);
3430
3431 rx_q->rx_count_frames++;
3432 rx_q->rx_count_frames += priv->rx_coal_frames;
3433 if (rx_q->rx_count_frames > priv->rx_coal_frames)
3434 rx_q->rx_count_frames = 0;
3435 use_rx_wd = priv->use_riwt && rx_q->rx_count_frames;
3436
3437 dma_wmb();
3438 stmmac_set_rx_owner(priv, p, use_rx_wd);
3439
3440 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3441 }
3442 rx_q->dirty_rx = entry;
3443 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3444 (rx_q->dirty_rx * sizeof(struct dma_desc));
3445 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3446 }
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3457 {
3458 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3459 struct stmmac_channel *ch = &priv->channel[queue];
3460 unsigned int count = 0, error = 0, len = 0;
3461 int status = 0, coe = priv->hw->rx_csum;
3462 unsigned int next_entry = rx_q->cur_rx;
3463 struct sk_buff *skb = NULL;
3464
3465 if (netif_msg_rx_status(priv)) {
3466 void *rx_head;
3467
3468 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3469 if (priv->extend_desc)
3470 rx_head = (void *)rx_q->dma_erx;
3471 else
3472 rx_head = (void *)rx_q->dma_rx;
3473
3474 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3475 }
3476 while (count < limit) {
3477 unsigned int hlen = 0, prev_len = 0;
3478 enum pkt_hash_types hash_type;
3479 struct stmmac_rx_buffer *buf;
3480 struct dma_desc *np, *p;
3481 unsigned int sec_len;
3482 int entry;
3483 u32 hash;
3484
3485 if (!count && rx_q->state_saved) {
3486 skb = rx_q->state.skb;
3487 error = rx_q->state.error;
3488 len = rx_q->state.len;
3489 } else {
3490 rx_q->state_saved = false;
3491 skb = NULL;
3492 error = 0;
3493 len = 0;
3494 }
3495
3496 if (count >= limit)
3497 break;
3498
3499 read_again:
3500 sec_len = 0;
3501 entry = next_entry;
3502 buf = &rx_q->buf_pool[entry];
3503
3504 if (priv->extend_desc)
3505 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3506 else
3507 p = rx_q->dma_rx + entry;
3508
3509
3510 status = stmmac_rx_status(priv, &priv->dev->stats,
3511 &priv->xstats, p);
3512
3513 if (unlikely(status & dma_own))
3514 break;
3515
3516 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3517 next_entry = rx_q->cur_rx;
3518
3519 if (priv->extend_desc)
3520 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3521 else
3522 np = rx_q->dma_rx + next_entry;
3523
3524 prefetch(np);
3525 prefetch(page_address(buf->page));
3526
3527 if (priv->extend_desc)
3528 stmmac_rx_extended_status(priv, &priv->dev->stats,
3529 &priv->xstats, rx_q->dma_erx + entry);
3530 if (unlikely(status == discard_frame)) {
3531 page_pool_recycle_direct(rx_q->page_pool, buf->page);
3532 buf->page = NULL;
3533 error = 1;
3534 if (!priv->hwts_rx_en)
3535 priv->dev->stats.rx_errors++;
3536 }
3537
3538 if (unlikely(error && (status & rx_not_ls)))
3539 goto read_again;
3540 if (unlikely(error)) {
3541 dev_kfree_skb(skb);
3542 count++;
3543 continue;
3544 }
3545
3546
3547
3548 if (likely(status & rx_not_ls)) {
3549 len += priv->dma_buf_sz;
3550 } else {
3551 prev_len = len;
3552 len = stmmac_get_rx_frame_len(priv, p, coe);
3553
3554
3555
3556
3557
3558
3559
3560
3561 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3562 unlikely(status != llc_snap))
3563 len -= ETH_FCS_LEN;
3564 }
3565
3566 if (!skb) {
3567 int ret = stmmac_get_rx_header_len(priv, p, &hlen);
3568
3569 if (priv->sph && !ret && (hlen > 0)) {
3570 sec_len = len;
3571 if (!(status & rx_not_ls))
3572 sec_len = sec_len - hlen;
3573 len = hlen;
3574
3575 prefetch(page_address(buf->sec_page));
3576 priv->xstats.rx_split_hdr_pkt_n++;
3577 }
3578
3579 skb = napi_alloc_skb(&ch->rx_napi, len);
3580 if (!skb) {
3581 priv->dev->stats.rx_dropped++;
3582 count++;
3583 continue;
3584 }
3585
3586 dma_sync_single_for_cpu(priv->device, buf->addr, len,
3587 DMA_FROM_DEVICE);
3588 skb_copy_to_linear_data(skb, page_address(buf->page),
3589 len);
3590 skb_put(skb, len);
3591
3592
3593 page_pool_recycle_direct(rx_q->page_pool, buf->page);
3594 buf->page = NULL;
3595 } else {
3596 unsigned int buf_len = len - prev_len;
3597
3598 if (likely(status & rx_not_ls))
3599 buf_len = priv->dma_buf_sz;
3600
3601 dma_sync_single_for_cpu(priv->device, buf->addr,
3602 buf_len, DMA_FROM_DEVICE);
3603 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3604 buf->page, 0, buf_len,
3605 priv->dma_buf_sz);
3606
3607
3608 page_pool_release_page(rx_q->page_pool, buf->page);
3609 buf->page = NULL;
3610 }
3611
3612 if (sec_len > 0) {
3613 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
3614 sec_len, DMA_FROM_DEVICE);
3615 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3616 buf->sec_page, 0, sec_len,
3617 priv->dma_buf_sz);
3618
3619 len += sec_len;
3620
3621
3622 page_pool_release_page(rx_q->page_pool, buf->sec_page);
3623 buf->sec_page = NULL;
3624 }
3625
3626 if (likely(status & rx_not_ls))
3627 goto read_again;
3628
3629
3630
3631 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3632 stmmac_rx_vlan(priv->dev, skb);
3633 skb->protocol = eth_type_trans(skb, priv->dev);
3634
3635 if (unlikely(!coe))
3636 skb_checksum_none_assert(skb);
3637 else
3638 skb->ip_summed = CHECKSUM_UNNECESSARY;
3639
3640 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
3641 skb_set_hash(skb, hash, hash_type);
3642
3643 skb_record_rx_queue(skb, queue);
3644 napi_gro_receive(&ch->rx_napi, skb);
3645
3646 priv->dev->stats.rx_packets++;
3647 priv->dev->stats.rx_bytes += len;
3648 count++;
3649 }
3650
3651 if (status & rx_not_ls) {
3652 rx_q->state_saved = true;
3653 rx_q->state.skb = skb;
3654 rx_q->state.error = error;
3655 rx_q->state.len = len;
3656 }
3657
3658 stmmac_rx_refill(priv, queue);
3659
3660 priv->xstats.rx_pkt_n += count;
3661
3662 return count;
3663 }
3664
3665 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3666 {
3667 struct stmmac_channel *ch =
3668 container_of(napi, struct stmmac_channel, rx_napi);
3669 struct stmmac_priv *priv = ch->priv_data;
3670 u32 chan = ch->index;
3671 int work_done;
3672
3673 priv->xstats.napi_poll++;
3674
3675 work_done = stmmac_rx(priv, budget, chan);
3676 if (work_done < budget && napi_complete_done(napi, work_done))
3677 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3678 return work_done;
3679 }
3680
3681 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
3682 {
3683 struct stmmac_channel *ch =
3684 container_of(napi, struct stmmac_channel, tx_napi);
3685 struct stmmac_priv *priv = ch->priv_data;
3686 struct stmmac_tx_queue *tx_q;
3687 u32 chan = ch->index;
3688 int work_done;
3689
3690 priv->xstats.napi_poll++;
3691
3692 work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3693 work_done = min(work_done, budget);
3694
3695 if (work_done < budget)
3696 napi_complete_done(napi, work_done);
3697
3698
3699 tx_q = &priv->tx_queue[chan];
3700 if (tx_q->cur_tx != tx_q->dirty_tx) {
3701 stmmac_enable_dma_transmission(priv, priv->ioaddr);
3702 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
3703 chan);
3704 }
3705
3706 return work_done;
3707 }
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717 static void stmmac_tx_timeout(struct net_device *dev)
3718 {
3719 struct stmmac_priv *priv = netdev_priv(dev);
3720
3721 stmmac_global_err(priv);
3722 }
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733 static void stmmac_set_rx_mode(struct net_device *dev)
3734 {
3735 struct stmmac_priv *priv = netdev_priv(dev);
3736
3737 stmmac_set_filter(priv, priv->hw, dev);
3738 }
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3752 {
3753 struct stmmac_priv *priv = netdev_priv(dev);
3754 int txfifosz = priv->plat->tx_fifo_size;
3755
3756 if (txfifosz == 0)
3757 txfifosz = priv->dma_cap.tx_fifo_size;
3758
3759 txfifosz /= priv->plat->tx_queues_to_use;
3760
3761 if (netif_running(dev)) {
3762 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3763 return -EBUSY;
3764 }
3765
3766 new_mtu = STMMAC_ALIGN(new_mtu);
3767
3768
3769 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
3770 return -EINVAL;
3771
3772 dev->mtu = new_mtu;
3773
3774 netdev_update_features(dev);
3775
3776 return 0;
3777 }
3778
3779 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3780 netdev_features_t features)
3781 {
3782 struct stmmac_priv *priv = netdev_priv(dev);
3783
3784 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3785 features &= ~NETIF_F_RXCSUM;
3786
3787 if (!priv->plat->tx_coe)
3788 features &= ~NETIF_F_CSUM_MASK;
3789
3790
3791
3792
3793
3794
3795 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3796 features &= ~NETIF_F_CSUM_MASK;
3797
3798
3799 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3800 if (features & NETIF_F_TSO)
3801 priv->tso = true;
3802 else
3803 priv->tso = false;
3804 }
3805
3806 return features;
3807 }
3808
3809 static int stmmac_set_features(struct net_device *netdev,
3810 netdev_features_t features)
3811 {
3812 struct stmmac_priv *priv = netdev_priv(netdev);
3813 bool sph_en;
3814 u32 chan;
3815
3816
3817 if (features & NETIF_F_RXCSUM)
3818 priv->hw->rx_csum = priv->plat->rx_coe;
3819 else
3820 priv->hw->rx_csum = 0;
3821
3822
3823
3824 stmmac_rx_ipc(priv, priv->hw);
3825
3826 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3827 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
3828 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3829
3830 return 0;
3831 }
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3845 {
3846 struct net_device *dev = (struct net_device *)dev_id;
3847 struct stmmac_priv *priv = netdev_priv(dev);
3848 u32 rx_cnt = priv->plat->rx_queues_to_use;
3849 u32 tx_cnt = priv->plat->tx_queues_to_use;
3850 u32 queues_count;
3851 u32 queue;
3852 bool xmac;
3853
3854 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3855 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3856
3857 if (priv->irq_wake)
3858 pm_wakeup_event(priv->device, 0);
3859
3860
3861 if (test_bit(STMMAC_DOWN, &priv->state))
3862 return IRQ_HANDLED;
3863
3864 if (stmmac_safety_feat_interrupt(priv))
3865 return IRQ_HANDLED;
3866
3867
3868 if ((priv->plat->has_gmac) || xmac) {
3869 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3870 int mtl_status;
3871
3872 if (unlikely(status)) {
3873
3874 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3875 priv->tx_path_in_lpi_mode = true;
3876 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3877 priv->tx_path_in_lpi_mode = false;
3878 }
3879
3880 for (queue = 0; queue < queues_count; queue++) {
3881 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3882
3883 mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3884 queue);
3885 if (mtl_status != -EINVAL)
3886 status |= mtl_status;
3887
3888 if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3889 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3890 rx_q->rx_tail_addr,
3891 queue);
3892 }
3893
3894
3895 if (priv->hw->pcs) {
3896 if (priv->xstats.pcs_link)
3897 netif_carrier_on(dev);
3898 else
3899 netif_carrier_off(dev);
3900 }
3901 }
3902
3903
3904 stmmac_dma_interrupt(priv);
3905
3906 return IRQ_HANDLED;
3907 }
3908
3909 #ifdef CONFIG_NET_POLL_CONTROLLER
3910
3911
3912
3913 static void stmmac_poll_controller(struct net_device *dev)
3914 {
3915 disable_irq(dev->irq);
3916 stmmac_interrupt(dev->irq, dev);
3917 enable_irq(dev->irq);
3918 }
3919 #endif
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3931 {
3932 struct stmmac_priv *priv = netdev_priv (dev);
3933 int ret = -EOPNOTSUPP;
3934
3935 if (!netif_running(dev))
3936 return -EINVAL;
3937
3938 switch (cmd) {
3939 case SIOCGMIIPHY:
3940 case SIOCGMIIREG:
3941 case SIOCSMIIREG:
3942 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
3943 break;
3944 case SIOCSHWTSTAMP:
3945 ret = stmmac_hwtstamp_set(dev, rq);
3946 break;
3947 case SIOCGHWTSTAMP:
3948 ret = stmmac_hwtstamp_get(dev, rq);
3949 break;
3950 default:
3951 break;
3952 }
3953
3954 return ret;
3955 }
3956
3957 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3958 void *cb_priv)
3959 {
3960 struct stmmac_priv *priv = cb_priv;
3961 int ret = -EOPNOTSUPP;
3962
3963 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
3964 return ret;
3965
3966 stmmac_disable_all_queues(priv);
3967
3968 switch (type) {
3969 case TC_SETUP_CLSU32:
3970 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3971 break;
3972 case TC_SETUP_CLSFLOWER:
3973 ret = stmmac_tc_setup_cls(priv, priv, type_data);
3974 break;
3975 default:
3976 break;
3977 }
3978
3979 stmmac_enable_all_queues(priv);
3980 return ret;
3981 }
3982
3983 static LIST_HEAD(stmmac_block_cb_list);
3984
3985 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3986 void *type_data)
3987 {
3988 struct stmmac_priv *priv = netdev_priv(ndev);
3989
3990 switch (type) {
3991 case TC_SETUP_BLOCK:
3992 return flow_block_cb_setup_simple(type_data,
3993 &stmmac_block_cb_list,
3994 stmmac_setup_tc_block_cb,
3995 priv, priv, true);
3996 case TC_SETUP_QDISC_CBS:
3997 return stmmac_tc_setup_cbs(priv, priv, type_data);
3998 default:
3999 return -EOPNOTSUPP;
4000 }
4001 }
4002
4003 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
4004 struct net_device *sb_dev)
4005 {
4006 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4007
4008
4009
4010
4011
4012
4013 return 0;
4014 }
4015
4016 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
4017 }
4018
4019 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4020 {
4021 struct stmmac_priv *priv = netdev_priv(ndev);
4022 int ret = 0;
4023
4024 ret = eth_mac_addr(ndev, addr);
4025 if (ret)
4026 return ret;
4027
4028 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4029
4030 return ret;
4031 }
4032
4033 #ifdef CONFIG_DEBUG_FS
4034 static struct dentry *stmmac_fs_dir;
4035
4036 static void sysfs_display_ring(void *head, int size, int extend_desc,
4037 struct seq_file *seq)
4038 {
4039 int i;
4040 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4041 struct dma_desc *p = (struct dma_desc *)head;
4042
4043 for (i = 0; i < size; i++) {
4044 if (extend_desc) {
4045 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4046 i, (unsigned int)virt_to_phys(ep),
4047 le32_to_cpu(ep->basic.des0),
4048 le32_to_cpu(ep->basic.des1),
4049 le32_to_cpu(ep->basic.des2),
4050 le32_to_cpu(ep->basic.des3));
4051 ep++;
4052 } else {
4053 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4054 i, (unsigned int)virt_to_phys(p),
4055 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4056 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4057 p++;
4058 }
4059 seq_printf(seq, "\n");
4060 }
4061 }
4062
4063 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4064 {
4065 struct net_device *dev = seq->private;
4066 struct stmmac_priv *priv = netdev_priv(dev);
4067 u32 rx_count = priv->plat->rx_queues_to_use;
4068 u32 tx_count = priv->plat->tx_queues_to_use;
4069 u32 queue;
4070
4071 if ((dev->flags & IFF_UP) == 0)
4072 return 0;
4073
4074 for (queue = 0; queue < rx_count; queue++) {
4075 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4076
4077 seq_printf(seq, "RX Queue %d:\n", queue);
4078
4079 if (priv->extend_desc) {
4080 seq_printf(seq, "Extended descriptor ring:\n");
4081 sysfs_display_ring((void *)rx_q->dma_erx,
4082 DMA_RX_SIZE, 1, seq);
4083 } else {
4084 seq_printf(seq, "Descriptor ring:\n");
4085 sysfs_display_ring((void *)rx_q->dma_rx,
4086 DMA_RX_SIZE, 0, seq);
4087 }
4088 }
4089
4090 for (queue = 0; queue < tx_count; queue++) {
4091 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4092
4093 seq_printf(seq, "TX Queue %d:\n", queue);
4094
4095 if (priv->extend_desc) {
4096 seq_printf(seq, "Extended descriptor ring:\n");
4097 sysfs_display_ring((void *)tx_q->dma_etx,
4098 DMA_TX_SIZE, 1, seq);
4099 } else {
4100 seq_printf(seq, "Descriptor ring:\n");
4101 sysfs_display_ring((void *)tx_q->dma_tx,
4102 DMA_TX_SIZE, 0, seq);
4103 }
4104 }
4105
4106 return 0;
4107 }
4108 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
4109
4110 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4111 {
4112 struct net_device *dev = seq->private;
4113 struct stmmac_priv *priv = netdev_priv(dev);
4114
4115 if (!priv->hw_cap_support) {
4116 seq_printf(seq, "DMA HW features not supported\n");
4117 return 0;
4118 }
4119
4120 seq_printf(seq, "==============================\n");
4121 seq_printf(seq, "\tDMA HW features\n");
4122 seq_printf(seq, "==============================\n");
4123
4124 seq_printf(seq, "\t10/100 Mbps: %s\n",
4125 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
4126 seq_printf(seq, "\t1000 Mbps: %s\n",
4127 (priv->dma_cap.mbps_1000) ? "Y" : "N");
4128 seq_printf(seq, "\tHalf duplex: %s\n",
4129 (priv->dma_cap.half_duplex) ? "Y" : "N");
4130 seq_printf(seq, "\tHash Filter: %s\n",
4131 (priv->dma_cap.hash_filter) ? "Y" : "N");
4132 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4133 (priv->dma_cap.multi_addr) ? "Y" : "N");
4134 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4135 (priv->dma_cap.pcs) ? "Y" : "N");
4136 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4137 (priv->dma_cap.sma_mdio) ? "Y" : "N");
4138 seq_printf(seq, "\tPMT Remote wake up: %s\n",
4139 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4140 seq_printf(seq, "\tPMT Magic Frame: %s\n",
4141 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4142 seq_printf(seq, "\tRMON module: %s\n",
4143 (priv->dma_cap.rmon) ? "Y" : "N");
4144 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4145 (priv->dma_cap.time_stamp) ? "Y" : "N");
4146 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4147 (priv->dma_cap.atime_stamp) ? "Y" : "N");
4148 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4149 (priv->dma_cap.eee) ? "Y" : "N");
4150 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4151 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4152 (priv->dma_cap.tx_coe) ? "Y" : "N");
4153 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4154 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4155 (priv->dma_cap.rx_coe) ? "Y" : "N");
4156 } else {
4157 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4158 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4159 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4160 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4161 }
4162 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4163 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4164 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4165 priv->dma_cap.number_rx_channel);
4166 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4167 priv->dma_cap.number_tx_channel);
4168 seq_printf(seq, "\tEnhanced descriptors: %s\n",
4169 (priv->dma_cap.enh_desc) ? "Y" : "N");
4170
4171 return 0;
4172 }
4173 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4174
4175
4176
4177 static int stmmac_device_event(struct notifier_block *unused,
4178 unsigned long event, void *ptr)
4179 {
4180 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4181 struct stmmac_priv *priv = netdev_priv(dev);
4182
4183 if (dev->netdev_ops != &stmmac_netdev_ops)
4184 goto done;
4185
4186 switch (event) {
4187 case NETDEV_CHANGENAME:
4188 if (priv->dbgfs_dir)
4189 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4190 priv->dbgfs_dir,
4191 stmmac_fs_dir,
4192 dev->name);
4193 break;
4194 }
4195 done:
4196 return NOTIFY_DONE;
4197 }
4198
4199 static struct notifier_block stmmac_notifier = {
4200 .notifier_call = stmmac_device_event,
4201 };
4202
4203 static void stmmac_init_fs(struct net_device *dev)
4204 {
4205 struct stmmac_priv *priv = netdev_priv(dev);
4206
4207 rtnl_lock();
4208
4209
4210 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4211
4212
4213 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
4214 &stmmac_rings_status_fops);
4215
4216
4217 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
4218 &stmmac_dma_cap_fops);
4219
4220 rtnl_unlock();
4221 }
4222
4223 static void stmmac_exit_fs(struct net_device *dev)
4224 {
4225 struct stmmac_priv *priv = netdev_priv(dev);
4226
4227 debugfs_remove_recursive(priv->dbgfs_dir);
4228 }
4229 #endif
4230
4231 static u32 stmmac_vid_crc32_le(__le16 vid_le)
4232 {
4233 unsigned char *data = (unsigned char *)&vid_le;
4234 unsigned char data_byte = 0;
4235 u32 crc = ~0x0;
4236 u32 temp = 0;
4237 int i, bits;
4238
4239 bits = get_bitmask_order(VLAN_VID_MASK);
4240 for (i = 0; i < bits; i++) {
4241 if ((i % 8) == 0)
4242 data_byte = data[i / 8];
4243
4244 temp = ((crc & 1) ^ data_byte) & 1;
4245 crc >>= 1;
4246 data_byte >>= 1;
4247
4248 if (temp)
4249 crc ^= 0xedb88320;
4250 }
4251
4252 return crc;
4253 }
4254
4255 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
4256 {
4257 u32 crc, hash = 0;
4258 u16 vid;
4259
4260 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
4261 __le16 vid_le = cpu_to_le16(vid);
4262 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
4263 hash |= (1 << crc);
4264 }
4265
4266 return stmmac_update_vlan_hash(priv, priv->hw, hash, is_double);
4267 }
4268
4269 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
4270 {
4271 struct stmmac_priv *priv = netdev_priv(ndev);
4272 bool is_double = false;
4273 int ret;
4274
4275 if (!priv->dma_cap.vlhash)
4276 return -EOPNOTSUPP;
4277 if (be16_to_cpu(proto) == ETH_P_8021AD)
4278 is_double = true;
4279
4280 set_bit(vid, priv->active_vlans);
4281 ret = stmmac_vlan_update(priv, is_double);
4282 if (ret) {
4283 clear_bit(vid, priv->active_vlans);
4284 return ret;
4285 }
4286
4287 return ret;
4288 }
4289
4290 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
4291 {
4292 struct stmmac_priv *priv = netdev_priv(ndev);
4293 bool is_double = false;
4294
4295 if (!priv->dma_cap.vlhash)
4296 return -EOPNOTSUPP;
4297 if (be16_to_cpu(proto) == ETH_P_8021AD)
4298 is_double = true;
4299
4300 clear_bit(vid, priv->active_vlans);
4301 return stmmac_vlan_update(priv, is_double);
4302 }
4303
4304 static const struct net_device_ops stmmac_netdev_ops = {
4305 .ndo_open = stmmac_open,
4306 .ndo_start_xmit = stmmac_xmit,
4307 .ndo_stop = stmmac_release,
4308 .ndo_change_mtu = stmmac_change_mtu,
4309 .ndo_fix_features = stmmac_fix_features,
4310 .ndo_set_features = stmmac_set_features,
4311 .ndo_set_rx_mode = stmmac_set_rx_mode,
4312 .ndo_tx_timeout = stmmac_tx_timeout,
4313 .ndo_do_ioctl = stmmac_ioctl,
4314 .ndo_setup_tc = stmmac_setup_tc,
4315 .ndo_select_queue = stmmac_select_queue,
4316 #ifdef CONFIG_NET_POLL_CONTROLLER
4317 .ndo_poll_controller = stmmac_poll_controller,
4318 #endif
4319 .ndo_set_mac_address = stmmac_set_mac_address,
4320 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
4321 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
4322 };
4323
4324 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4325 {
4326 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4327 return;
4328 if (test_bit(STMMAC_DOWN, &priv->state))
4329 return;
4330
4331 netdev_err(priv->dev, "Reset adapter.\n");
4332
4333 rtnl_lock();
4334 netif_trans_update(priv->dev);
4335 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4336 usleep_range(1000, 2000);
4337
4338 set_bit(STMMAC_DOWN, &priv->state);
4339 dev_close(priv->dev);
4340 dev_open(priv->dev, NULL);
4341 clear_bit(STMMAC_DOWN, &priv->state);
4342 clear_bit(STMMAC_RESETING, &priv->state);
4343 rtnl_unlock();
4344 }
4345
4346 static void stmmac_service_task(struct work_struct *work)
4347 {
4348 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4349 service_task);
4350
4351 stmmac_reset_subtask(priv);
4352 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4353 }
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363 static int stmmac_hw_init(struct stmmac_priv *priv)
4364 {
4365 int ret;
4366
4367
4368 if (priv->plat->has_sun8i)
4369 chain_mode = 1;
4370 priv->chain_mode = chain_mode;
4371
4372
4373 ret = stmmac_hwif_init(priv);
4374 if (ret)
4375 return ret;
4376
4377
4378 priv->hw_cap_support = stmmac_get_hw_features(priv);
4379 if (priv->hw_cap_support) {
4380 dev_info(priv->device, "DMA HW capability register supported\n");
4381
4382
4383
4384
4385
4386
4387 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4388 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4389 priv->hw->pmt = priv->plat->pmt;
4390 if (priv->dma_cap.hash_tb_sz) {
4391 priv->hw->multicast_filter_bins =
4392 (BIT(priv->dma_cap.hash_tb_sz) << 5);
4393 priv->hw->mcast_bits_log2 =
4394 ilog2(priv->hw->multicast_filter_bins);
4395 }
4396
4397
4398 if (priv->plat->force_thresh_dma_mode)
4399 priv->plat->tx_coe = 0;
4400 else
4401 priv->plat->tx_coe = priv->dma_cap.tx_coe;
4402
4403
4404 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4405
4406 if (priv->dma_cap.rx_coe_type2)
4407 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4408 else if (priv->dma_cap.rx_coe_type1)
4409 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4410
4411 } else {
4412 dev_info(priv->device, "No HW DMA feature register supported\n");
4413 }
4414
4415 if (priv->plat->rx_coe) {
4416 priv->hw->rx_csum = priv->plat->rx_coe;
4417 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4418 if (priv->synopsys_id < DWMAC_CORE_4_00)
4419 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4420 }
4421 if (priv->plat->tx_coe)
4422 dev_info(priv->device, "TX Checksum insertion supported\n");
4423
4424 if (priv->plat->pmt) {
4425 dev_info(priv->device, "Wake-Up On Lan supported\n");
4426 device_set_wakeup_capable(priv->device, 1);
4427 }
4428
4429 if (priv->dma_cap.tsoen)
4430 dev_info(priv->device, "TSO supported\n");
4431
4432
4433 if (priv->hwif_quirks) {
4434 ret = priv->hwif_quirks(priv);
4435 if (ret)
4436 return ret;
4437 }
4438
4439
4440
4441
4442
4443
4444 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4445 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4446 priv->use_riwt = 1;
4447 dev_info(priv->device,
4448 "Enable RX Mitigation via HW Watchdog Timer\n");
4449 }
4450
4451 return 0;
4452 }
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464 int stmmac_dvr_probe(struct device *device,
4465 struct plat_stmmacenet_data *plat_dat,
4466 struct stmmac_resources *res)
4467 {
4468 struct net_device *ndev = NULL;
4469 struct stmmac_priv *priv;
4470 u32 queue, rxq, maxq;
4471 int i, ret = 0;
4472
4473 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
4474 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
4475 if (!ndev)
4476 return -ENOMEM;
4477
4478 SET_NETDEV_DEV(ndev, device);
4479
4480 priv = netdev_priv(ndev);
4481 priv->device = device;
4482 priv->dev = ndev;
4483
4484 stmmac_set_ethtool_ops(ndev);
4485 priv->pause = pause;
4486 priv->plat = plat_dat;
4487 priv->ioaddr = res->addr;
4488 priv->dev->base_addr = (unsigned long)res->addr;
4489
4490 priv->dev->irq = res->irq;
4491 priv->wol_irq = res->wol_irq;
4492 priv->lpi_irq = res->lpi_irq;
4493
4494 if (!IS_ERR_OR_NULL(res->mac))
4495 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4496
4497 dev_set_drvdata(device, priv->dev);
4498
4499
4500 stmmac_verify_args();
4501
4502
4503 priv->wq = create_singlethread_workqueue("stmmac_wq");
4504 if (!priv->wq) {
4505 dev_err(priv->device, "failed to create workqueue\n");
4506 return -ENOMEM;
4507 }
4508
4509 INIT_WORK(&priv->service_task, stmmac_service_task);
4510
4511
4512
4513
4514 if ((phyaddr >= 0) && (phyaddr <= 31))
4515 priv->plat->phy_addr = phyaddr;
4516
4517 if (priv->plat->stmmac_rst) {
4518 ret = reset_control_assert(priv->plat->stmmac_rst);
4519 reset_control_deassert(priv->plat->stmmac_rst);
4520
4521
4522
4523 if (ret == -ENOTSUPP)
4524 reset_control_reset(priv->plat->stmmac_rst);
4525 }
4526
4527
4528 ret = stmmac_hw_init(priv);
4529 if (ret)
4530 goto error_hw_init;
4531
4532 stmmac_check_ether_addr(priv);
4533
4534
4535 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4536 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4537
4538 ndev->netdev_ops = &stmmac_netdev_ops;
4539
4540 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4541 NETIF_F_RXCSUM;
4542
4543 ret = stmmac_tc_init(priv, priv);
4544 if (!ret) {
4545 ndev->hw_features |= NETIF_F_HW_TC;
4546 }
4547
4548 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4549 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4550 priv->tso = true;
4551 dev_info(priv->device, "TSO feature enabled\n");
4552 }
4553
4554 if (priv->dma_cap.sphen) {
4555 ndev->hw_features |= NETIF_F_GRO;
4556 priv->sph = true;
4557 dev_info(priv->device, "SPH feature enabled\n");
4558 }
4559
4560 if (priv->dma_cap.addr64) {
4561 ret = dma_set_mask_and_coherent(device,
4562 DMA_BIT_MASK(priv->dma_cap.addr64));
4563 if (!ret) {
4564 dev_info(priv->device, "Using %d bits DMA width\n",
4565 priv->dma_cap.addr64);
4566 } else {
4567 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
4568 if (ret) {
4569 dev_err(priv->device, "Failed to set DMA Mask\n");
4570 goto error_hw_init;
4571 }
4572
4573 priv->dma_cap.addr64 = 32;
4574 }
4575 }
4576
4577 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4578 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4579 #ifdef STMMAC_VLAN_TAG_USED
4580
4581 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4582 if (priv->dma_cap.vlhash) {
4583 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4584 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
4585 }
4586 if (priv->dma_cap.vlins) {
4587 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
4588 if (priv->dma_cap.dvlan)
4589 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
4590 }
4591 #endif
4592 priv->msg_enable = netif_msg_init(debug, default_msg_level);
4593
4594
4595 rxq = priv->plat->rx_queues_to_use;
4596 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
4597 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
4598 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
4599
4600 if (priv->dma_cap.rssen && priv->plat->rss_en)
4601 ndev->features |= NETIF_F_RXHASH;
4602
4603
4604 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4605 if (priv->plat->has_xgmac)
4606 ndev->max_mtu = XGMAC_JUMBO_LEN;
4607 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4608 ndev->max_mtu = JUMBO_LEN;
4609 else
4610 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4611
4612
4613
4614 if ((priv->plat->maxmtu < ndev->max_mtu) &&
4615 (priv->plat->maxmtu >= ndev->min_mtu))
4616 ndev->max_mtu = priv->plat->maxmtu;
4617 else if (priv->plat->maxmtu < ndev->min_mtu)
4618 dev_warn(priv->device,
4619 "%s: warning: maxmtu having invalid value (%d)\n",
4620 __func__, priv->plat->maxmtu);
4621
4622 if (flow_ctrl)
4623 priv->flow_ctrl = FLOW_AUTO;
4624
4625
4626 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4627
4628 for (queue = 0; queue < maxq; queue++) {
4629 struct stmmac_channel *ch = &priv->channel[queue];
4630
4631 ch->priv_data = priv;
4632 ch->index = queue;
4633
4634 if (queue < priv->plat->rx_queues_to_use) {
4635 netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
4636 NAPI_POLL_WEIGHT);
4637 }
4638 if (queue < priv->plat->tx_queues_to_use) {
4639 netif_tx_napi_add(ndev, &ch->tx_napi,
4640 stmmac_napi_poll_tx,
4641 NAPI_POLL_WEIGHT);
4642 }
4643 }
4644
4645 mutex_init(&priv->lock);
4646
4647
4648
4649
4650
4651
4652
4653 if (priv->plat->clk_csr >= 0)
4654 priv->clk_csr = priv->plat->clk_csr;
4655 else
4656 stmmac_clk_csr_set(priv);
4657
4658 stmmac_check_pcs_mode(priv);
4659
4660 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4661 priv->hw->pcs != STMMAC_PCS_TBI &&
4662 priv->hw->pcs != STMMAC_PCS_RTBI) {
4663
4664 ret = stmmac_mdio_register(ndev);
4665 if (ret < 0) {
4666 dev_err(priv->device,
4667 "%s: MDIO bus (id: %d) registration failed",
4668 __func__, priv->plat->bus_id);
4669 goto error_mdio_register;
4670 }
4671 }
4672
4673 ret = stmmac_phy_setup(priv);
4674 if (ret) {
4675 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
4676 goto error_phy_setup;
4677 }
4678
4679 ret = register_netdev(ndev);
4680 if (ret) {
4681 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4682 __func__, ret);
4683 goto error_netdev_register;
4684 }
4685
4686 #ifdef CONFIG_DEBUG_FS
4687 stmmac_init_fs(ndev);
4688 #endif
4689
4690 return ret;
4691
4692 error_netdev_register:
4693 phylink_destroy(priv->phylink);
4694 error_phy_setup:
4695 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4696 priv->hw->pcs != STMMAC_PCS_TBI &&
4697 priv->hw->pcs != STMMAC_PCS_RTBI)
4698 stmmac_mdio_unregister(ndev);
4699 error_mdio_register:
4700 for (queue = 0; queue < maxq; queue++) {
4701 struct stmmac_channel *ch = &priv->channel[queue];
4702
4703 if (queue < priv->plat->rx_queues_to_use)
4704 netif_napi_del(&ch->rx_napi);
4705 if (queue < priv->plat->tx_queues_to_use)
4706 netif_napi_del(&ch->tx_napi);
4707 }
4708 error_hw_init:
4709 destroy_workqueue(priv->wq);
4710
4711 return ret;
4712 }
4713 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4714
4715
4716
4717
4718
4719
4720
4721 int stmmac_dvr_remove(struct device *dev)
4722 {
4723 struct net_device *ndev = dev_get_drvdata(dev);
4724 struct stmmac_priv *priv = netdev_priv(ndev);
4725
4726 netdev_info(priv->dev, "%s: removing driver", __func__);
4727
4728 stmmac_stop_all_dma(priv);
4729
4730 stmmac_mac_set(priv, priv->ioaddr, false);
4731 netif_carrier_off(ndev);
4732 unregister_netdev(ndev);
4733 #ifdef CONFIG_DEBUG_FS
4734 stmmac_exit_fs(ndev);
4735 #endif
4736 phylink_destroy(priv->phylink);
4737 if (priv->plat->stmmac_rst)
4738 reset_control_assert(priv->plat->stmmac_rst);
4739 clk_disable_unprepare(priv->plat->pclk);
4740 clk_disable_unprepare(priv->plat->stmmac_clk);
4741 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4742 priv->hw->pcs != STMMAC_PCS_TBI &&
4743 priv->hw->pcs != STMMAC_PCS_RTBI)
4744 stmmac_mdio_unregister(ndev);
4745 destroy_workqueue(priv->wq);
4746 mutex_destroy(&priv->lock);
4747
4748 return 0;
4749 }
4750 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4751
4752
4753
4754
4755
4756
4757
4758
4759 int stmmac_suspend(struct device *dev)
4760 {
4761 struct net_device *ndev = dev_get_drvdata(dev);
4762 struct stmmac_priv *priv = netdev_priv(ndev);
4763 u32 chan;
4764
4765 if (!ndev || !netif_running(ndev))
4766 return 0;
4767
4768 phylink_mac_change(priv->phylink, false);
4769
4770 mutex_lock(&priv->lock);
4771
4772 netif_device_detach(ndev);
4773 stmmac_stop_all_queues(priv);
4774
4775 stmmac_disable_all_queues(priv);
4776
4777 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4778 del_timer_sync(&priv->tx_queue[chan].txtimer);
4779
4780
4781 stmmac_stop_all_dma(priv);
4782
4783
4784 if (device_may_wakeup(priv->device)) {
4785 stmmac_pmt(priv, priv->hw, priv->wolopts);
4786 priv->irq_wake = 1;
4787 } else {
4788 mutex_unlock(&priv->lock);
4789 rtnl_lock();
4790 phylink_stop(priv->phylink);
4791 rtnl_unlock();
4792 mutex_lock(&priv->lock);
4793
4794 stmmac_mac_set(priv, priv->ioaddr, false);
4795 pinctrl_pm_select_sleep_state(priv->device);
4796
4797 if (priv->plat->clk_ptp_ref)
4798 clk_disable_unprepare(priv->plat->clk_ptp_ref);
4799 clk_disable_unprepare(priv->plat->pclk);
4800 clk_disable_unprepare(priv->plat->stmmac_clk);
4801 }
4802 mutex_unlock(&priv->lock);
4803
4804 priv->speed = SPEED_UNKNOWN;
4805 return 0;
4806 }
4807 EXPORT_SYMBOL_GPL(stmmac_suspend);
4808
4809
4810
4811
4812
4813 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4814 {
4815 u32 rx_cnt = priv->plat->rx_queues_to_use;
4816 u32 tx_cnt = priv->plat->tx_queues_to_use;
4817 u32 queue;
4818
4819 for (queue = 0; queue < rx_cnt; queue++) {
4820 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4821
4822 rx_q->cur_rx = 0;
4823 rx_q->dirty_rx = 0;
4824 }
4825
4826 for (queue = 0; queue < tx_cnt; queue++) {
4827 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4828
4829 tx_q->cur_tx = 0;
4830 tx_q->dirty_tx = 0;
4831 tx_q->mss = 0;
4832 }
4833 }
4834
4835
4836
4837
4838
4839
4840
4841 int stmmac_resume(struct device *dev)
4842 {
4843 struct net_device *ndev = dev_get_drvdata(dev);
4844 struct stmmac_priv *priv = netdev_priv(ndev);
4845
4846 if (!netif_running(ndev))
4847 return 0;
4848
4849
4850
4851
4852
4853
4854
4855 if (device_may_wakeup(priv->device)) {
4856 mutex_lock(&priv->lock);
4857 stmmac_pmt(priv, priv->hw, 0);
4858 mutex_unlock(&priv->lock);
4859 priv->irq_wake = 0;
4860 } else {
4861 pinctrl_pm_select_default_state(priv->device);
4862
4863 clk_prepare_enable(priv->plat->stmmac_clk);
4864 clk_prepare_enable(priv->plat->pclk);
4865 if (priv->plat->clk_ptp_ref)
4866 clk_prepare_enable(priv->plat->clk_ptp_ref);
4867
4868 if (priv->mii)
4869 stmmac_mdio_reset(priv->mii);
4870 }
4871
4872 netif_device_attach(ndev);
4873
4874 mutex_lock(&priv->lock);
4875
4876 stmmac_reset_queues_param(priv);
4877
4878 stmmac_clear_descriptors(priv);
4879
4880 stmmac_hw_setup(ndev, false);
4881 stmmac_init_coalesce(priv);
4882 stmmac_set_rx_mode(ndev);
4883
4884 stmmac_enable_all_queues(priv);
4885
4886 stmmac_start_all_queues(priv);
4887
4888 mutex_unlock(&priv->lock);
4889
4890 if (!device_may_wakeup(priv->device)) {
4891 rtnl_lock();
4892 phylink_start(priv->phylink);
4893 rtnl_unlock();
4894 }
4895
4896 phylink_mac_change(priv->phylink, true);
4897
4898 return 0;
4899 }
4900 EXPORT_SYMBOL_GPL(stmmac_resume);
4901
4902 #ifndef MODULE
4903 static int __init stmmac_cmdline_opt(char *str)
4904 {
4905 char *opt;
4906
4907 if (!str || !*str)
4908 return -EINVAL;
4909 while ((opt = strsep(&str, ",")) != NULL) {
4910 if (!strncmp(opt, "debug:", 6)) {
4911 if (kstrtoint(opt + 6, 0, &debug))
4912 goto err;
4913 } else if (!strncmp(opt, "phyaddr:", 8)) {
4914 if (kstrtoint(opt + 8, 0, &phyaddr))
4915 goto err;
4916 } else if (!strncmp(opt, "buf_sz:", 7)) {
4917 if (kstrtoint(opt + 7, 0, &buf_sz))
4918 goto err;
4919 } else if (!strncmp(opt, "tc:", 3)) {
4920 if (kstrtoint(opt + 3, 0, &tc))
4921 goto err;
4922 } else if (!strncmp(opt, "watchdog:", 9)) {
4923 if (kstrtoint(opt + 9, 0, &watchdog))
4924 goto err;
4925 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4926 if (kstrtoint(opt + 10, 0, &flow_ctrl))
4927 goto err;
4928 } else if (!strncmp(opt, "pause:", 6)) {
4929 if (kstrtoint(opt + 6, 0, &pause))
4930 goto err;
4931 } else if (!strncmp(opt, "eee_timer:", 10)) {
4932 if (kstrtoint(opt + 10, 0, &eee_timer))
4933 goto err;
4934 } else if (!strncmp(opt, "chain_mode:", 11)) {
4935 if (kstrtoint(opt + 11, 0, &chain_mode))
4936 goto err;
4937 }
4938 }
4939 return 0;
4940
4941 err:
4942 pr_err("%s: ERROR broken module parameter conversion", __func__);
4943 return -EINVAL;
4944 }
4945
4946 __setup("stmmaceth=", stmmac_cmdline_opt);
4947 #endif
4948
4949 static int __init stmmac_init(void)
4950 {
4951 #ifdef CONFIG_DEBUG_FS
4952
4953 if (!stmmac_fs_dir)
4954 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4955 register_netdevice_notifier(&stmmac_notifier);
4956 #endif
4957
4958 return 0;
4959 }
4960
4961 static void __exit stmmac_exit(void)
4962 {
4963 #ifdef CONFIG_DEBUG_FS
4964 unregister_netdevice_notifier(&stmmac_notifier);
4965 debugfs_remove_recursive(stmmac_fs_dir);
4966 #endif
4967 }
4968
4969 module_init(stmmac_init)
4970 module_exit(stmmac_exit)
4971
4972 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4973 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4974 MODULE_LICENSE("GPL");