This source file includes following definitions.
- mvpp2_write
- mvpp2_read
- mvpp2_read_relaxed
- mvpp2_cpu_to_thread
- mvpp2_thread_write
- mvpp2_thread_read
- mvpp2_thread_write_relaxed
- mvpp2_thread_read_relaxed
- mvpp2_txdesc_dma_addr_get
- mvpp2_txdesc_dma_addr_set
- mvpp2_txdesc_size_get
- mvpp2_txdesc_size_set
- mvpp2_txdesc_txq_set
- mvpp2_txdesc_cmd_set
- mvpp2_txdesc_offset_get
- mvpp2_rxdesc_dma_addr_get
- mvpp2_rxdesc_cookie_get
- mvpp2_rxdesc_size_get
- mvpp2_rxdesc_status_get
- mvpp2_txq_inc_get
- mvpp2_txq_inc_put
- mvpp2_get_nrxqs
- mvpp2_egress_port
- mvpp2_txq_phys
- mvpp2_frag_alloc
- mvpp2_frag_free
- mvpp2_bm_pool_create
- mvpp2_bm_pool_bufsize_set
- mvpp2_bm_bufs_get_addrs
- mvpp2_bm_bufs_free
- mvpp2_check_hw_buf_num
- mvpp2_bm_pool_destroy
- mvpp2_bm_pools_init
- mvpp2_bm_init
- mvpp2_setup_bm_pool
- mvpp2_rxq_long_pool_set
- mvpp2_rxq_short_pool_set
- mvpp2_buf_alloc
- mvpp2_bm_pool_put
- mvpp2_bm_bufs_add
- mvpp2_bm_pool_use
- mvpp2_bm_pool_use_percpu
- mvpp2_swf_bm_pool_init_shared
- mvpp2_swf_bm_pool_init_percpu
- mvpp2_swf_bm_pool_init
- mvpp2_set_hw_csum
- mvpp2_bm_update_mtu
- mvpp2_interrupts_enable
- mvpp2_interrupts_disable
- mvpp2_qvec_interrupt_enable
- mvpp2_qvec_interrupt_disable
- mvpp2_interrupts_mask
- mvpp2_interrupts_unmask
- mvpp2_shared_interrupt_mask_unmask
- mvpp2_is_xlg
- mvpp22_gop_init_rgmii
- mvpp22_gop_init_sgmii
- mvpp22_gop_init_10gkr
- mvpp22_gop_init
- mvpp22_gop_unmask_irq
- mvpp22_gop_mask_irq
- mvpp22_gop_setup_irq
- mvpp22_comphy_init
- mvpp2_port_enable
- mvpp2_port_disable
- mvpp2_port_periodic_xon_disable
- mvpp2_port_loopback_set
- mvpp2_read_count
- mvpp2_read_index
- mvpp2_ethtool_get_strings
- mvpp2_read_stats
- mvpp2_gather_hw_statistics
- mvpp2_ethtool_get_stats
- mvpp2_ethtool_get_sset_count
- mvpp2_mac_reset_assert
- mvpp22_pcs_reset_assert
- mvpp22_pcs_reset_deassert
- mvpp2_gmac_max_rx_size_set
- mvpp2_xlg_max_rx_size_set
- mvpp2_defaults_set
- mvpp2_ingress_enable
- mvpp2_ingress_disable
- mvpp2_egress_enable
- mvpp2_egress_disable
- mvpp2_rxq_received
- mvpp2_rxq_status_update
- mvpp2_rxq_next_desc_get
- mvpp2_rxq_offset_set
- mvpp2_txq_next_desc_get
- mvpp2_aggr_txq_pend_desc_add
- mvpp2_aggr_desc_num_check
- mvpp2_txq_alloc_reserved_desc
- mvpp2_txq_reserved_desc_num_proc
- mvpp2_txq_desc_put
- mvpp2_txq_desc_csum
- mvpp2_txq_sent_desc_proc
- mvpp2_txq_sent_counter_clear
- mvpp2_txp_max_tx_size_set
- mvpp2_rx_pkts_coal_set
- mvpp2_tx_pkts_coal_set
- mvpp2_usec_to_cycles
- mvpp2_cycles_to_usec
- mvpp2_rx_time_coal_set
- mvpp2_tx_time_coal_set
- mvpp2_txq_bufs_free
- mvpp2_get_rx_queue
- mvpp2_get_tx_queue
- mvpp2_txq_done
- mvpp2_tx_done
- mvpp2_aggr_txq_init
- mvpp2_rxq_init
- mvpp2_rxq_drop_pkts
- mvpp2_rxq_deinit
- mvpp2_txq_init
- mvpp2_txq_deinit
- mvpp2_txq_clean
- mvpp2_cleanup_txqs
- mvpp2_cleanup_rxqs
- mvpp2_setup_rxqs
- mvpp2_setup_txqs
- mvpp2_isr
- mvpp2_link_status_isr
- mvpp2_hr_timer_cb
- mvpp2_rx_error
- mvpp2_rx_csum
- mvpp2_rx_refill
- mvpp2_skb_tx_csum
- mvpp2_rx
- tx_desc_unmap_put
- mvpp2_tx_frag_process
- mvpp2_tso_put_hdr
- mvpp2_tso_put_data
- mvpp2_tx_tso
- mvpp2_tx
- mvpp2_cause_error
- mvpp2_poll
- mvpp22_mode_reconfigure
- mvpp2_start_dev
- mvpp2_stop_dev
- mvpp2_check_ringparam_valid
- mvpp21_get_mac_address
- mvpp2_irqs_init
- mvpp2_irqs_deinit
- mvpp22_rss_is_supported
- mvpp2_open
- mvpp2_stop
- mvpp2_prs_mac_da_accept_list
- mvpp2_set_rx_promisc
- mvpp2_set_rx_mode
- mvpp2_set_mac_address
- mvpp2_bm_switch_buffers
- mvpp2_change_mtu
- mvpp2_get_stats64
- mvpp2_ioctl
- mvpp2_vlan_rx_add_vid
- mvpp2_vlan_rx_kill_vid
- mvpp2_set_features
- mvpp2_ethtool_nway_reset
- mvpp2_ethtool_set_coalesce
- mvpp2_ethtool_get_coalesce
- mvpp2_ethtool_get_drvinfo
- mvpp2_ethtool_get_ringparam
- mvpp2_ethtool_set_ringparam
- mvpp2_ethtool_get_pause_param
- mvpp2_ethtool_set_pause_param
- mvpp2_ethtool_get_link_ksettings
- mvpp2_ethtool_set_link_ksettings
- mvpp2_ethtool_get_rxnfc
- mvpp2_ethtool_set_rxnfc
- mvpp2_ethtool_get_rxfh_indir_size
- mvpp2_ethtool_get_rxfh
- mvpp2_ethtool_set_rxfh
- mvpp2_ethtool_get_rxfh_context
- mvpp2_ethtool_set_rxfh_context
- mvpp2_simple_queue_vectors_init
- mvpp2_multi_queue_vectors_init
- mvpp2_queue_vectors_init
- mvpp2_queue_vectors_deinit
- mvpp2_rx_irqs_setup
- mvpp2_port_init
- mvpp22_port_has_legacy_tx_irqs
- mvpp2_port_has_irqs
- mvpp2_port_copy_mac_addr
- mvpp2_phylink_validate
- mvpp22_xlg_link_state
- mvpp2_gmac_link_state
- mvpp2_phylink_mac_link_state
- mvpp2_mac_an_restart
- mvpp2_xlg_config
- mvpp2_gmac_config
- mvpp2_mac_config
- mvpp2_mac_link_up
- mvpp2_mac_link_down
- mvpp2_port_probe
- mvpp2_port_remove
- mvpp2_conf_mbus_windows
- mvpp2_rx_fifo_init
- mvpp22_rx_fifo_init
- mvpp22_tx_fifo_init
- mvpp2_axi_init
- mvpp2_init
- mvpp2_probe
- mvpp2_remove
1
2
3
4
5
6
7
8
9
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/skbuff.h>
16 #include <linux/inetdevice.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpumask.h>
22 #include <linux/of.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/phy/phy.h>
31 #include <linux/clk.h>
32 #include <linux/hrtimer.h>
33 #include <linux/ktime.h>
34 #include <linux/regmap.h>
35 #include <uapi/linux/ppp_defs.h>
36 #include <net/ip.h>
37 #include <net/ipv6.h>
38 #include <net/tso.h>
39
40 #include "mvpp2.h"
41 #include "mvpp2_prs.h"
42 #include "mvpp2_cls.h"
43
44 enum mvpp2_bm_pool_log_num {
45 MVPP2_BM_SHORT,
46 MVPP2_BM_LONG,
47 MVPP2_BM_JUMBO,
48 MVPP2_BM_POOLS_NUM
49 };
50
51 static struct {
52 int pkt_size;
53 int buf_num;
54 } mvpp2_pools[MVPP2_BM_POOLS_NUM];
55
56
57
58
59 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
60 const struct phylink_link_state *state);
61 static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
62 phy_interface_t interface, struct phy_device *phy);
63
64
65 #define MVPP2_QDIST_SINGLE_MODE 0
66 #define MVPP2_QDIST_MULTI_MODE 1
67
68 static int queue_mode = MVPP2_QDIST_MULTI_MODE;
69
70 module_param(queue_mode, int, 0444);
71 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
72
73
74
75 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
76 {
77 writel(data, priv->swth_base[0] + offset);
78 }
79
80 u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
81 {
82 return readl(priv->swth_base[0] + offset);
83 }
84
85 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
86 {
87 return readl_relaxed(priv->swth_base[0] + offset);
88 }
89
90 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
91 {
92 return cpu % priv->nthreads;
93 }
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
132 u32 offset, u32 data)
133 {
134 writel(data, priv->swth_base[thread] + offset);
135 }
136
137 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
138 u32 offset)
139 {
140 return readl(priv->swth_base[thread] + offset);
141 }
142
143 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
144 u32 offset, u32 data)
145 {
146 writel_relaxed(data, priv->swth_base[thread] + offset);
147 }
148
149 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
150 u32 offset)
151 {
152 return readl_relaxed(priv->swth_base[thread] + offset);
153 }
154
155 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
156 struct mvpp2_tx_desc *tx_desc)
157 {
158 if (port->priv->hw_version == MVPP21)
159 return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
160 else
161 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
162 MVPP2_DESC_DMA_MASK;
163 }
164
165 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
166 struct mvpp2_tx_desc *tx_desc,
167 dma_addr_t dma_addr)
168 {
169 dma_addr_t addr, offset;
170
171 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
172 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
173
174 if (port->priv->hw_version == MVPP21) {
175 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
176 tx_desc->pp21.packet_offset = offset;
177 } else {
178 __le64 val = cpu_to_le64(addr);
179
180 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
181 tx_desc->pp22.buf_dma_addr_ptp |= val;
182 tx_desc->pp22.packet_offset = offset;
183 }
184 }
185
186 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
187 struct mvpp2_tx_desc *tx_desc)
188 {
189 if (port->priv->hw_version == MVPP21)
190 return le16_to_cpu(tx_desc->pp21.data_size);
191 else
192 return le16_to_cpu(tx_desc->pp22.data_size);
193 }
194
195 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
196 struct mvpp2_tx_desc *tx_desc,
197 size_t size)
198 {
199 if (port->priv->hw_version == MVPP21)
200 tx_desc->pp21.data_size = cpu_to_le16(size);
201 else
202 tx_desc->pp22.data_size = cpu_to_le16(size);
203 }
204
205 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
206 struct mvpp2_tx_desc *tx_desc,
207 unsigned int txq)
208 {
209 if (port->priv->hw_version == MVPP21)
210 tx_desc->pp21.phys_txq = txq;
211 else
212 tx_desc->pp22.phys_txq = txq;
213 }
214
215 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
216 struct mvpp2_tx_desc *tx_desc,
217 unsigned int command)
218 {
219 if (port->priv->hw_version == MVPP21)
220 tx_desc->pp21.command = cpu_to_le32(command);
221 else
222 tx_desc->pp22.command = cpu_to_le32(command);
223 }
224
225 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
226 struct mvpp2_tx_desc *tx_desc)
227 {
228 if (port->priv->hw_version == MVPP21)
229 return tx_desc->pp21.packet_offset;
230 else
231 return tx_desc->pp22.packet_offset;
232 }
233
234 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
235 struct mvpp2_rx_desc *rx_desc)
236 {
237 if (port->priv->hw_version == MVPP21)
238 return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
239 else
240 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
241 MVPP2_DESC_DMA_MASK;
242 }
243
244 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
245 struct mvpp2_rx_desc *rx_desc)
246 {
247 if (port->priv->hw_version == MVPP21)
248 return le32_to_cpu(rx_desc->pp21.buf_cookie);
249 else
250 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
251 MVPP2_DESC_DMA_MASK;
252 }
253
254 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
255 struct mvpp2_rx_desc *rx_desc)
256 {
257 if (port->priv->hw_version == MVPP21)
258 return le16_to_cpu(rx_desc->pp21.data_size);
259 else
260 return le16_to_cpu(rx_desc->pp22.data_size);
261 }
262
263 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
264 struct mvpp2_rx_desc *rx_desc)
265 {
266 if (port->priv->hw_version == MVPP21)
267 return le32_to_cpu(rx_desc->pp21.status);
268 else
269 return le32_to_cpu(rx_desc->pp22.status);
270 }
271
272 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
273 {
274 txq_pcpu->txq_get_index++;
275 if (txq_pcpu->txq_get_index == txq_pcpu->size)
276 txq_pcpu->txq_get_index = 0;
277 }
278
279 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
280 struct mvpp2_txq_pcpu *txq_pcpu,
281 struct sk_buff *skb,
282 struct mvpp2_tx_desc *tx_desc)
283 {
284 struct mvpp2_txq_pcpu_buf *tx_buf =
285 txq_pcpu->buffs + txq_pcpu->txq_put_index;
286 tx_buf->skb = skb;
287 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
288 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
289 mvpp2_txdesc_offset_get(port, tx_desc);
290 txq_pcpu->txq_put_index++;
291 if (txq_pcpu->txq_put_index == txq_pcpu->size)
292 txq_pcpu->txq_put_index = 0;
293 }
294
295
296 static int mvpp2_get_nrxqs(struct mvpp2 *priv)
297 {
298 unsigned int nrxqs;
299
300 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
301 return 1;
302
303
304
305
306
307
308 nrxqs = (num_possible_cpus() + 3) & ~0x3;
309 if (nrxqs > MVPP2_PORT_MAX_RXQ)
310 nrxqs = MVPP2_PORT_MAX_RXQ;
311
312 return nrxqs;
313 }
314
315
316 static inline int mvpp2_egress_port(struct mvpp2_port *port)
317 {
318 return MVPP2_MAX_TCONT + port->id;
319 }
320
321
322 static inline int mvpp2_txq_phys(int port, int txq)
323 {
324 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
325 }
326
327 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
328 {
329 if (likely(pool->frag_size <= PAGE_SIZE))
330 return netdev_alloc_frag(pool->frag_size);
331 else
332 return kmalloc(pool->frag_size, GFP_ATOMIC);
333 }
334
335 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
336 {
337 if (likely(pool->frag_size <= PAGE_SIZE))
338 skb_free_frag(data);
339 else
340 kfree(data);
341 }
342
343
344
345
346 static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
347 struct mvpp2_bm_pool *bm_pool, int size)
348 {
349 u32 val;
350
351
352
353
354 if (!IS_ALIGNED(size, 16))
355 return -EINVAL;
356
357
358
359
360 if (priv->hw_version == MVPP21)
361 bm_pool->size_bytes = 2 * sizeof(u32) * size;
362 else
363 bm_pool->size_bytes = 2 * sizeof(u64) * size;
364
365 bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
366 &bm_pool->dma_addr,
367 GFP_KERNEL);
368 if (!bm_pool->virt_addr)
369 return -ENOMEM;
370
371 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
372 MVPP2_BM_POOL_PTR_ALIGN)) {
373 dma_free_coherent(dev, bm_pool->size_bytes,
374 bm_pool->virt_addr, bm_pool->dma_addr);
375 dev_err(dev, "BM pool %d is not %d bytes aligned\n",
376 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
377 return -ENOMEM;
378 }
379
380 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
381 lower_32_bits(bm_pool->dma_addr));
382 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
383
384 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
385 val |= MVPP2_BM_START_MASK;
386 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
387
388 bm_pool->size = size;
389 bm_pool->pkt_size = 0;
390 bm_pool->buf_num = 0;
391
392 return 0;
393 }
394
395
396 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
397 struct mvpp2_bm_pool *bm_pool,
398 int buf_size)
399 {
400 u32 val;
401
402 bm_pool->buf_size = buf_size;
403
404 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
405 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
406 }
407
408 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
409 struct mvpp2_bm_pool *bm_pool,
410 dma_addr_t *dma_addr,
411 phys_addr_t *phys_addr)
412 {
413 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
414
415 *dma_addr = mvpp2_thread_read(priv, thread,
416 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
417 *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
418
419 if (priv->hw_version == MVPP22) {
420 u32 val;
421 u32 dma_addr_highbits, phys_addr_highbits;
422
423 val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
424 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
425 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
426 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
427
428 if (sizeof(dma_addr_t) == 8)
429 *dma_addr |= (u64)dma_addr_highbits << 32;
430
431 if (sizeof(phys_addr_t) == 8)
432 *phys_addr |= (u64)phys_addr_highbits << 32;
433 }
434
435 put_cpu();
436 }
437
438
439 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
440 struct mvpp2_bm_pool *bm_pool, int buf_num)
441 {
442 int i;
443
444 if (buf_num > bm_pool->buf_num) {
445 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
446 bm_pool->id, buf_num);
447 buf_num = bm_pool->buf_num;
448 }
449
450 for (i = 0; i < buf_num; i++) {
451 dma_addr_t buf_dma_addr;
452 phys_addr_t buf_phys_addr;
453 void *data;
454
455 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
456 &buf_dma_addr, &buf_phys_addr);
457
458 dma_unmap_single(dev, buf_dma_addr,
459 bm_pool->buf_size, DMA_FROM_DEVICE);
460
461 data = (void *)phys_to_virt(buf_phys_addr);
462 if (!data)
463 break;
464
465 mvpp2_frag_free(bm_pool, data);
466 }
467
468
469 bm_pool->buf_num -= i;
470 }
471
472
473 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
474 {
475 int buf_num = 0;
476
477 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
478 MVPP22_BM_POOL_PTRS_NUM_MASK;
479 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
480 MVPP2_BM_BPPI_PTR_NUM_MASK;
481
482
483 if (buf_num)
484 buf_num += 1;
485
486 return buf_num;
487 }
488
489
490 static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
491 struct mvpp2_bm_pool *bm_pool)
492 {
493 int buf_num;
494 u32 val;
495
496 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
497 mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
498
499
500 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
501 if (buf_num) {
502 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
503 bm_pool->id, bm_pool->buf_num);
504 return 0;
505 }
506
507 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
508 val |= MVPP2_BM_STOP_MASK;
509 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
510
511 dma_free_coherent(dev, bm_pool->size_bytes,
512 bm_pool->virt_addr,
513 bm_pool->dma_addr);
514 return 0;
515 }
516
517 static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
518 {
519 int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
520 struct mvpp2_bm_pool *bm_pool;
521
522 if (priv->percpu_pools)
523 poolnum = mvpp2_get_nrxqs(priv) * 2;
524
525
526 size = MVPP2_BM_POOL_SIZE_MAX;
527 for (i = 0; i < poolnum; i++) {
528 bm_pool = &priv->bm_pools[i];
529 bm_pool->id = i;
530 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
531 if (err)
532 goto err_unroll_pools;
533 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
534 }
535 return 0;
536
537 err_unroll_pools:
538 dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
539 for (i = i - 1; i >= 0; i--)
540 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
541 return err;
542 }
543
544 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
545 {
546 int i, err, poolnum = MVPP2_BM_POOLS_NUM;
547
548 if (priv->percpu_pools)
549 poolnum = mvpp2_get_nrxqs(priv) * 2;
550
551 dev_info(dev, "using %d %s buffers\n", poolnum,
552 priv->percpu_pools ? "per-cpu" : "shared");
553
554 for (i = 0; i < poolnum; i++) {
555
556 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
557
558 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
559 }
560
561
562 priv->bm_pools = devm_kcalloc(dev, poolnum,
563 sizeof(*priv->bm_pools), GFP_KERNEL);
564 if (!priv->bm_pools)
565 return -ENOMEM;
566
567 err = mvpp2_bm_pools_init(dev, priv);
568 if (err < 0)
569 return err;
570 return 0;
571 }
572
573 static void mvpp2_setup_bm_pool(void)
574 {
575
576 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
577 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
578
579
580 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
581 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
582
583
584 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
585 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
586 }
587
588
589 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
590 int lrxq, int long_pool)
591 {
592 u32 val, mask;
593 int prxq;
594
595
596 prxq = port->rxqs[lrxq]->id;
597
598 if (port->priv->hw_version == MVPP21)
599 mask = MVPP21_RXQ_POOL_LONG_MASK;
600 else
601 mask = MVPP22_RXQ_POOL_LONG_MASK;
602
603 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
604 val &= ~mask;
605 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
606 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
607 }
608
609
610 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
611 int lrxq, int short_pool)
612 {
613 u32 val, mask;
614 int prxq;
615
616
617 prxq = port->rxqs[lrxq]->id;
618
619 if (port->priv->hw_version == MVPP21)
620 mask = MVPP21_RXQ_POOL_SHORT_MASK;
621 else
622 mask = MVPP22_RXQ_POOL_SHORT_MASK;
623
624 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
625 val &= ~mask;
626 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
627 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
628 }
629
630 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
631 struct mvpp2_bm_pool *bm_pool,
632 dma_addr_t *buf_dma_addr,
633 phys_addr_t *buf_phys_addr,
634 gfp_t gfp_mask)
635 {
636 dma_addr_t dma_addr;
637 void *data;
638
639 data = mvpp2_frag_alloc(bm_pool);
640 if (!data)
641 return NULL;
642
643 dma_addr = dma_map_single(port->dev->dev.parent, data,
644 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
645 DMA_FROM_DEVICE);
646 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
647 mvpp2_frag_free(bm_pool, data);
648 return NULL;
649 }
650 *buf_dma_addr = dma_addr;
651 *buf_phys_addr = virt_to_phys(data);
652
653 return data;
654 }
655
656
657 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
658 dma_addr_t buf_dma_addr,
659 phys_addr_t buf_phys_addr)
660 {
661 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
662 unsigned long flags = 0;
663
664 if (test_bit(thread, &port->priv->lock_map))
665 spin_lock_irqsave(&port->bm_lock[thread], flags);
666
667 if (port->priv->hw_version == MVPP22) {
668 u32 val = 0;
669
670 if (sizeof(dma_addr_t) == 8)
671 val |= upper_32_bits(buf_dma_addr) &
672 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
673
674 if (sizeof(phys_addr_t) == 8)
675 val |= (upper_32_bits(buf_phys_addr)
676 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
677 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
678
679 mvpp2_thread_write_relaxed(port->priv, thread,
680 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
681 }
682
683
684
685
686
687
688 mvpp2_thread_write_relaxed(port->priv, thread,
689 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
690 mvpp2_thread_write_relaxed(port->priv, thread,
691 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
692
693 if (test_bit(thread, &port->priv->lock_map))
694 spin_unlock_irqrestore(&port->bm_lock[thread], flags);
695
696 put_cpu();
697 }
698
699
700 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
701 struct mvpp2_bm_pool *bm_pool, int buf_num)
702 {
703 int i, buf_size, total_size;
704 dma_addr_t dma_addr;
705 phys_addr_t phys_addr;
706 void *buf;
707
708 if (port->priv->percpu_pools &&
709 bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
710 netdev_err(port->dev,
711 "attempted to use jumbo frames with per-cpu pools");
712 return 0;
713 }
714
715 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
716 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
717
718 if (buf_num < 0 ||
719 (buf_num + bm_pool->buf_num > bm_pool->size)) {
720 netdev_err(port->dev,
721 "cannot allocate %d buffers for pool %d\n",
722 buf_num, bm_pool->id);
723 return 0;
724 }
725
726 for (i = 0; i < buf_num; i++) {
727 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
728 &phys_addr, GFP_KERNEL);
729 if (!buf)
730 break;
731
732 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
733 phys_addr);
734 }
735
736
737 bm_pool->buf_num += i;
738
739 netdev_dbg(port->dev,
740 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
741 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
742
743 netdev_dbg(port->dev,
744 "pool %d: %d of %d buffers added\n",
745 bm_pool->id, i, buf_num);
746 return i;
747 }
748
749
750
751
752 static struct mvpp2_bm_pool *
753 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
754 {
755 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
756 int num;
757
758 if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
759 (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
760 netdev_err(port->dev, "Invalid pool %d\n", pool);
761 return NULL;
762 }
763
764
765
766
767 if (new_pool->pkt_size == 0) {
768 int pkts_num;
769
770
771
772
773 pkts_num = new_pool->buf_num;
774 if (pkts_num == 0) {
775 if (port->priv->percpu_pools) {
776 if (pool < port->nrxqs)
777 pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
778 else
779 pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
780 } else {
781 pkts_num = mvpp2_pools[pool].buf_num;
782 }
783 } else {
784 mvpp2_bm_bufs_free(port->dev->dev.parent,
785 port->priv, new_pool, pkts_num);
786 }
787
788 new_pool->pkt_size = pkt_size;
789 new_pool->frag_size =
790 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
791 MVPP2_SKB_SHINFO_SIZE;
792
793
794 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
795 if (num != pkts_num) {
796 WARN(1, "pool %d: %d of %d allocated\n",
797 new_pool->id, num, pkts_num);
798 return NULL;
799 }
800 }
801
802 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
803 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
804
805 return new_pool;
806 }
807
808 static struct mvpp2_bm_pool *
809 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
810 unsigned int pool, int pkt_size)
811 {
812 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
813 int num;
814
815 if (pool > port->nrxqs * 2) {
816 netdev_err(port->dev, "Invalid pool %d\n", pool);
817 return NULL;
818 }
819
820
821
822
823 if (new_pool->pkt_size == 0) {
824 int pkts_num;
825
826
827
828
829 pkts_num = new_pool->buf_num;
830 if (pkts_num == 0)
831 pkts_num = mvpp2_pools[type].buf_num;
832 else
833 mvpp2_bm_bufs_free(port->dev->dev.parent,
834 port->priv, new_pool, pkts_num);
835
836 new_pool->pkt_size = pkt_size;
837 new_pool->frag_size =
838 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
839 MVPP2_SKB_SHINFO_SIZE;
840
841
842 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
843 if (num != pkts_num) {
844 WARN(1, "pool %d: %d of %d allocated\n",
845 new_pool->id, num, pkts_num);
846 return NULL;
847 }
848 }
849
850 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
851 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
852
853 return new_pool;
854 }
855
856
857 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
858 {
859 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
860 int rxq;
861
862
863
864
865
866 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
867 long_log_pool = MVPP2_BM_JUMBO;
868 short_log_pool = MVPP2_BM_LONG;
869 } else {
870 long_log_pool = MVPP2_BM_LONG;
871 short_log_pool = MVPP2_BM_SHORT;
872 }
873
874 if (!port->pool_long) {
875 port->pool_long =
876 mvpp2_bm_pool_use(port, long_log_pool,
877 mvpp2_pools[long_log_pool].pkt_size);
878 if (!port->pool_long)
879 return -ENOMEM;
880
881 port->pool_long->port_map |= BIT(port->id);
882
883 for (rxq = 0; rxq < port->nrxqs; rxq++)
884 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
885 }
886
887 if (!port->pool_short) {
888 port->pool_short =
889 mvpp2_bm_pool_use(port, short_log_pool,
890 mvpp2_pools[short_log_pool].pkt_size);
891 if (!port->pool_short)
892 return -ENOMEM;
893
894 port->pool_short->port_map |= BIT(port->id);
895
896 for (rxq = 0; rxq < port->nrxqs; rxq++)
897 mvpp2_rxq_short_pool_set(port, rxq,
898 port->pool_short->id);
899 }
900
901 return 0;
902 }
903
904
905 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
906 {
907 struct mvpp2_bm_pool *p;
908 int i;
909
910 for (i = 0; i < port->nrxqs; i++) {
911 p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
912 mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
913 if (!p)
914 return -ENOMEM;
915
916 port->priv->bm_pools[i].port_map |= BIT(port->id);
917 mvpp2_rxq_short_pool_set(port, i, port->priv->bm_pools[i].id);
918 }
919
920 for (i = 0; i < port->nrxqs; i++) {
921 p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
922 mvpp2_pools[MVPP2_BM_LONG].pkt_size);
923 if (!p)
924 return -ENOMEM;
925
926 port->priv->bm_pools[i + port->nrxqs].port_map |= BIT(port->id);
927 mvpp2_rxq_long_pool_set(port, i,
928 port->priv->bm_pools[i + port->nrxqs].id);
929 }
930
931 port->pool_long = NULL;
932 port->pool_short = NULL;
933
934 return 0;
935 }
936
937 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
938 {
939 if (port->priv->percpu_pools)
940 return mvpp2_swf_bm_pool_init_percpu(port);
941 else
942 return mvpp2_swf_bm_pool_init_shared(port);
943 }
944
945 static void mvpp2_set_hw_csum(struct mvpp2_port *port,
946 enum mvpp2_bm_pool_log_num new_long_pool)
947 {
948 const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
949
950
951
952
953
954
955
956 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
957 port->dev->features &= ~csums;
958 port->dev->hw_features &= ~csums;
959 } else {
960 port->dev->features |= csums;
961 port->dev->hw_features |= csums;
962 }
963 }
964
965 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
966 {
967 struct mvpp2_port *port = netdev_priv(dev);
968 enum mvpp2_bm_pool_log_num new_long_pool;
969 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
970
971 if (port->priv->percpu_pools)
972 goto out_set;
973
974
975
976
977
978 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
979 new_long_pool = MVPP2_BM_JUMBO;
980 else
981 new_long_pool = MVPP2_BM_LONG;
982
983 if (new_long_pool != port->pool_long->id) {
984
985 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
986 port->pool_long->pkt_size);
987 port->pool_long->port_map &= ~BIT(port->id);
988 port->pool_long = NULL;
989
990 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
991 port->pool_short->pkt_size);
992 port->pool_short->port_map &= ~BIT(port->id);
993 port->pool_short = NULL;
994
995 port->pkt_size = pkt_size;
996
997
998 mvpp2_swf_bm_pool_init(port);
999
1000 mvpp2_set_hw_csum(port, new_long_pool);
1001 }
1002
1003 out_set:
1004 dev->mtu = mtu;
1005 dev->wanted_features = dev->features;
1006
1007 netdev_update_features(dev);
1008 return 0;
1009 }
1010
1011 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
1012 {
1013 int i, sw_thread_mask = 0;
1014
1015 for (i = 0; i < port->nqvecs; i++)
1016 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1017
1018 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1019 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
1020 }
1021
1022 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
1023 {
1024 int i, sw_thread_mask = 0;
1025
1026 for (i = 0; i < port->nqvecs; i++)
1027 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1028
1029 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1030 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
1031 }
1032
1033 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
1034 {
1035 struct mvpp2_port *port = qvec->port;
1036
1037 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1038 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
1039 }
1040
1041 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
1042 {
1043 struct mvpp2_port *port = qvec->port;
1044
1045 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1046 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
1047 }
1048
1049
1050
1051
1052
1053 static void mvpp2_interrupts_mask(void *arg)
1054 {
1055 struct mvpp2_port *port = arg;
1056
1057
1058 if (smp_processor_id() > port->priv->nthreads)
1059 return;
1060
1061 mvpp2_thread_write(port->priv,
1062 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1063 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
1064 }
1065
1066
1067
1068
1069
1070 static void mvpp2_interrupts_unmask(void *arg)
1071 {
1072 struct mvpp2_port *port = arg;
1073 u32 val;
1074
1075
1076 if (smp_processor_id() > port->priv->nthreads)
1077 return;
1078
1079 val = MVPP2_CAUSE_MISC_SUM_MASK |
1080 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
1081 if (port->has_tx_irqs)
1082 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
1083
1084 mvpp2_thread_write(port->priv,
1085 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1086 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1087 }
1088
1089 static void
1090 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
1091 {
1092 u32 val;
1093 int i;
1094
1095 if (port->priv->hw_version != MVPP22)
1096 return;
1097
1098 if (mask)
1099 val = 0;
1100 else
1101 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
1102
1103 for (i = 0; i < port->nqvecs; i++) {
1104 struct mvpp2_queue_vector *v = port->qvecs + i;
1105
1106 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
1107 continue;
1108
1109 mvpp2_thread_write(port->priv, v->sw_thread_id,
1110 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1111 }
1112 }
1113
1114
1115 static bool mvpp2_is_xlg(phy_interface_t interface)
1116 {
1117 return interface == PHY_INTERFACE_MODE_10GKR ||
1118 interface == PHY_INTERFACE_MODE_XAUI;
1119 }
1120
1121 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
1122 {
1123 struct mvpp2 *priv = port->priv;
1124 u32 val;
1125
1126 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1127 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
1128 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1129
1130 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1131 if (port->gop_id == 2)
1132 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
1133 else if (port->gop_id == 3)
1134 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
1135 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1136 }
1137
1138 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
1139 {
1140 struct mvpp2 *priv = port->priv;
1141 u32 val;
1142
1143 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1144 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
1145 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
1146 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1147
1148 if (port->gop_id > 1) {
1149 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1150 if (port->gop_id == 2)
1151 val &= ~GENCONF_CTRL0_PORT0_RGMII;
1152 else if (port->gop_id == 3)
1153 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
1154 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1155 }
1156 }
1157
1158 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
1159 {
1160 struct mvpp2 *priv = port->priv;
1161 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1162 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1163 u32 val;
1164
1165 val = readl(xpcs + MVPP22_XPCS_CFG0);
1166 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1167 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1168 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1169 writel(val, xpcs + MVPP22_XPCS_CFG0);
1170
1171 val = readl(mpcs + MVPP22_MPCS_CTRL);
1172 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
1173 writel(val, mpcs + MVPP22_MPCS_CTRL);
1174
1175 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1176 val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
1177 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1178 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1179 }
1180
1181 static int mvpp22_gop_init(struct mvpp2_port *port)
1182 {
1183 struct mvpp2 *priv = port->priv;
1184 u32 val;
1185
1186 if (!priv->sysctrl_base)
1187 return 0;
1188
1189 switch (port->phy_interface) {
1190 case PHY_INTERFACE_MODE_RGMII:
1191 case PHY_INTERFACE_MODE_RGMII_ID:
1192 case PHY_INTERFACE_MODE_RGMII_RXID:
1193 case PHY_INTERFACE_MODE_RGMII_TXID:
1194 if (port->gop_id == 0)
1195 goto invalid_conf;
1196 mvpp22_gop_init_rgmii(port);
1197 break;
1198 case PHY_INTERFACE_MODE_SGMII:
1199 case PHY_INTERFACE_MODE_1000BASEX:
1200 case PHY_INTERFACE_MODE_2500BASEX:
1201 mvpp22_gop_init_sgmii(port);
1202 break;
1203 case PHY_INTERFACE_MODE_10GKR:
1204 if (port->gop_id != 0)
1205 goto invalid_conf;
1206 mvpp22_gop_init_10gkr(port);
1207 break;
1208 default:
1209 goto unsupported_conf;
1210 }
1211
1212 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
1213 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
1214 GENCONF_PORT_CTRL1_EN(port->gop_id);
1215 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
1216
1217 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1218 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
1219 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1220
1221 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
1222 val |= GENCONF_SOFT_RESET1_GOP;
1223 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
1224
1225 unsupported_conf:
1226 return 0;
1227
1228 invalid_conf:
1229 netdev_err(port->dev, "Invalid port configuration\n");
1230 return -EINVAL;
1231 }
1232
1233 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
1234 {
1235 u32 val;
1236
1237 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1238 phy_interface_mode_is_8023z(port->phy_interface) ||
1239 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1240
1241 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1242 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1243 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1244 }
1245
1246 if (port->gop_id == 0) {
1247
1248 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1249 if (mvpp2_is_xlg(port->phy_interface))
1250 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
1251 else
1252 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
1253 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1254 }
1255 }
1256
1257 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
1258 {
1259 u32 val;
1260
1261 if (port->gop_id == 0) {
1262 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1263 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
1264 MVPP22_XLG_EXT_INT_MASK_GIG);
1265 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1266 }
1267
1268 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1269 phy_interface_mode_is_8023z(port->phy_interface) ||
1270 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1271 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1272 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1273 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1274 }
1275 }
1276
1277 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
1278 {
1279 u32 val;
1280
1281 if (port->phylink ||
1282 phy_interface_mode_is_rgmii(port->phy_interface) ||
1283 phy_interface_mode_is_8023z(port->phy_interface) ||
1284 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1285 val = readl(port->base + MVPP22_GMAC_INT_MASK);
1286 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
1287 writel(val, port->base + MVPP22_GMAC_INT_MASK);
1288 }
1289
1290 if (port->gop_id == 0) {
1291 val = readl(port->base + MVPP22_XLG_INT_MASK);
1292 val |= MVPP22_XLG_INT_MASK_LINK;
1293 writel(val, port->base + MVPP22_XLG_INT_MASK);
1294 }
1295
1296 mvpp22_gop_unmask_irq(port);
1297 }
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309 static int mvpp22_comphy_init(struct mvpp2_port *port)
1310 {
1311 int ret;
1312
1313 if (!port->comphy)
1314 return 0;
1315
1316 ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET,
1317 port->phy_interface);
1318 if (ret)
1319 return ret;
1320
1321 return phy_power_on(port->comphy);
1322 }
1323
1324 static void mvpp2_port_enable(struct mvpp2_port *port)
1325 {
1326 u32 val;
1327
1328
1329 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
1330 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1331 val |= MVPP22_XLG_CTRL0_PORT_EN;
1332 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
1333 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1334 } else {
1335 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1336 val |= MVPP2_GMAC_PORT_EN_MASK;
1337 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
1338 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1339 }
1340 }
1341
1342 static void mvpp2_port_disable(struct mvpp2_port *port)
1343 {
1344 u32 val;
1345
1346
1347 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
1348 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1349 val &= ~MVPP22_XLG_CTRL0_PORT_EN;
1350 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1351 }
1352
1353 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1354 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1355 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1356 }
1357
1358
1359 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
1360 {
1361 u32 val;
1362
1363 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
1364 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
1365 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1366 }
1367
1368
1369 static void mvpp2_port_loopback_set(struct mvpp2_port *port,
1370 const struct phylink_link_state *state)
1371 {
1372 u32 val;
1373
1374 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
1375
1376 if (state->speed == 1000)
1377 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
1378 else
1379 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
1380
1381 if (phy_interface_mode_is_8023z(port->phy_interface) ||
1382 port->phy_interface == PHY_INTERFACE_MODE_SGMII)
1383 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
1384 else
1385 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
1386
1387 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1388 }
1389
1390 struct mvpp2_ethtool_counter {
1391 unsigned int offset;
1392 const char string[ETH_GSTRING_LEN];
1393 bool reg_is_64b;
1394 };
1395
1396 static u64 mvpp2_read_count(struct mvpp2_port *port,
1397 const struct mvpp2_ethtool_counter *counter)
1398 {
1399 u64 val;
1400
1401 val = readl(port->stats_base + counter->offset);
1402 if (counter->reg_is_64b)
1403 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
1404
1405 return val;
1406 }
1407
1408
1409
1410
1411
1412
1413 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
1414 {
1415 mvpp2_write(priv, MVPP2_CTRS_IDX, index);
1416 return mvpp2_read(priv, reg);
1417 }
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
1429 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
1430 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
1431 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
1432 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
1433 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
1434 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
1435 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
1436 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
1437 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
1438 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
1439 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
1440 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
1441 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
1442 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
1443 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
1444 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
1445 { MVPP2_MIB_FC_SENT, "fc_sent" },
1446 { MVPP2_MIB_FC_RCVD, "fc_received" },
1447 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
1448 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
1449 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
1450 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
1451 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
1452 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
1453 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
1454 { MVPP2_MIB_COLLISION, "collision" },
1455 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
1456 };
1457
1458 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
1459 { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
1460 { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
1461 };
1462
1463 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
1464 { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
1465 { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
1466 { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
1467 { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
1468 { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
1469 { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
1470 { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
1471 { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
1472 { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
1473 };
1474
1475 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
1476 { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
1477 { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
1478 { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
1479 { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
1480 };
1481
1482 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
1483 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
1484 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
1485 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)))
1486
1487 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1488 u8 *data)
1489 {
1490 struct mvpp2_port *port = netdev_priv(netdev);
1491 int i, q;
1492
1493 if (sset != ETH_SS_STATS)
1494 return;
1495
1496 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
1497 strscpy(data, mvpp2_ethtool_mib_regs[i].string,
1498 ETH_GSTRING_LEN);
1499 data += ETH_GSTRING_LEN;
1500 }
1501
1502 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
1503 strscpy(data, mvpp2_ethtool_port_regs[i].string,
1504 ETH_GSTRING_LEN);
1505 data += ETH_GSTRING_LEN;
1506 }
1507
1508 for (q = 0; q < port->ntxqs; q++) {
1509 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
1510 snprintf(data, ETH_GSTRING_LEN,
1511 mvpp2_ethtool_txq_regs[i].string, q);
1512 data += ETH_GSTRING_LEN;
1513 }
1514 }
1515
1516 for (q = 0; q < port->nrxqs; q++) {
1517 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
1518 snprintf(data, ETH_GSTRING_LEN,
1519 mvpp2_ethtool_rxq_regs[i].string,
1520 q);
1521 data += ETH_GSTRING_LEN;
1522 }
1523 }
1524 }
1525
1526 static void mvpp2_read_stats(struct mvpp2_port *port)
1527 {
1528 u64 *pstats;
1529 int i, q;
1530
1531 pstats = port->ethtool_stats;
1532
1533 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
1534 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
1535
1536 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
1537 *pstats++ += mvpp2_read(port->priv,
1538 mvpp2_ethtool_port_regs[i].offset +
1539 4 * port->id);
1540
1541 for (q = 0; q < port->ntxqs; q++)
1542 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
1543 *pstats++ += mvpp2_read_index(port->priv,
1544 MVPP22_CTRS_TX_CTR(port->id, i),
1545 mvpp2_ethtool_txq_regs[i].offset);
1546
1547
1548
1549
1550 for (q = 0; q < port->nrxqs; q++)
1551 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
1552 *pstats++ += mvpp2_read_index(port->priv,
1553 port->first_rxq + i,
1554 mvpp2_ethtool_rxq_regs[i].offset);
1555 }
1556
1557 static void mvpp2_gather_hw_statistics(struct work_struct *work)
1558 {
1559 struct delayed_work *del_work = to_delayed_work(work);
1560 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
1561 stats_work);
1562
1563 mutex_lock(&port->gather_stats_lock);
1564
1565 mvpp2_read_stats(port);
1566
1567
1568
1569
1570 cancel_delayed_work(&port->stats_work);
1571 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
1572 MVPP2_MIB_COUNTERS_STATS_DELAY);
1573
1574 mutex_unlock(&port->gather_stats_lock);
1575 }
1576
1577 static void mvpp2_ethtool_get_stats(struct net_device *dev,
1578 struct ethtool_stats *stats, u64 *data)
1579 {
1580 struct mvpp2_port *port = netdev_priv(dev);
1581
1582
1583
1584
1585 mvpp2_gather_hw_statistics(&port->stats_work.work);
1586
1587 mutex_lock(&port->gather_stats_lock);
1588 memcpy(data, port->ethtool_stats,
1589 sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
1590 mutex_unlock(&port->gather_stats_lock);
1591 }
1592
1593 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
1594 {
1595 struct mvpp2_port *port = netdev_priv(dev);
1596
1597 if (sset == ETH_SS_STATS)
1598 return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
1599
1600 return -EOPNOTSUPP;
1601 }
1602
1603 static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
1604 {
1605 u32 val;
1606
1607 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
1608 MVPP2_GMAC_PORT_RESET_MASK;
1609 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
1610
1611 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
1612 val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
1613 ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
1614 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1615 }
1616 }
1617
1618 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
1619 {
1620 struct mvpp2 *priv = port->priv;
1621 void __iomem *mpcs, *xpcs;
1622 u32 val;
1623
1624 if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1625 return;
1626
1627 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1628 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1629
1630 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1631 val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
1632 val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
1633 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1634
1635 val = readl(xpcs + MVPP22_XPCS_CFG0);
1636 writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1637 }
1638
1639 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
1640 {
1641 struct mvpp2 *priv = port->priv;
1642 void __iomem *mpcs, *xpcs;
1643 u32 val;
1644
1645 if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1646 return;
1647
1648 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1649 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1650
1651 switch (port->phy_interface) {
1652 case PHY_INTERFACE_MODE_10GKR:
1653 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1654 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
1655 MAC_CLK_RESET_SD_TX;
1656 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
1657 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1658 break;
1659 case PHY_INTERFACE_MODE_XAUI:
1660 case PHY_INTERFACE_MODE_RXAUI:
1661 val = readl(xpcs + MVPP22_XPCS_CFG0);
1662 writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1663 break;
1664 default:
1665 break;
1666 }
1667 }
1668
1669
1670 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
1671 {
1672 u32 val;
1673
1674 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1675 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
1676 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1677 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
1678 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1679 }
1680
1681
1682 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
1683 {
1684 u32 val;
1685
1686 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
1687 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
1688 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1689 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
1690 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
1691 }
1692
1693
1694 static void mvpp2_defaults_set(struct mvpp2_port *port)
1695 {
1696 int tx_port_num, val, queue, lrxq;
1697
1698 if (port->priv->hw_version == MVPP21) {
1699
1700 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1701 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
1702
1703 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
1704 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1705 }
1706
1707
1708 tx_port_num = mvpp2_egress_port(port);
1709 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
1710 tx_port_num);
1711 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
1712
1713
1714 mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
1715
1716
1717 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
1718 mvpp2_write(port->priv,
1719 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
1720
1721
1722
1723
1724 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
1725 port->priv->tclk / USEC_PER_SEC);
1726 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
1727 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
1728 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
1729 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
1730 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
1731 val = MVPP2_TXP_TOKEN_SIZE_MAX;
1732 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
1733
1734
1735 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
1736 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
1737 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
1738
1739
1740 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1741 queue = port->rxqs[lrxq]->id;
1742 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1743 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
1744 MVPP2_SNOOP_BUF_HDR_MASK;
1745 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1746 }
1747
1748
1749 mvpp2_interrupts_disable(port);
1750 }
1751
1752
1753 static void mvpp2_ingress_enable(struct mvpp2_port *port)
1754 {
1755 u32 val;
1756 int lrxq, queue;
1757
1758 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1759 queue = port->rxqs[lrxq]->id;
1760 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1761 val &= ~MVPP2_RXQ_DISABLE_MASK;
1762 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1763 }
1764 }
1765
1766 static void mvpp2_ingress_disable(struct mvpp2_port *port)
1767 {
1768 u32 val;
1769 int lrxq, queue;
1770
1771 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1772 queue = port->rxqs[lrxq]->id;
1773 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1774 val |= MVPP2_RXQ_DISABLE_MASK;
1775 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1776 }
1777 }
1778
1779
1780
1781
1782 static void mvpp2_egress_enable(struct mvpp2_port *port)
1783 {
1784 u32 qmap;
1785 int queue;
1786 int tx_port_num = mvpp2_egress_port(port);
1787
1788
1789 qmap = 0;
1790 for (queue = 0; queue < port->ntxqs; queue++) {
1791 struct mvpp2_tx_queue *txq = port->txqs[queue];
1792
1793 if (txq->descs)
1794 qmap |= (1 << queue);
1795 }
1796
1797 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
1798 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
1799 }
1800
1801
1802
1803
1804 static void mvpp2_egress_disable(struct mvpp2_port *port)
1805 {
1806 u32 reg_data;
1807 int delay;
1808 int tx_port_num = mvpp2_egress_port(port);
1809
1810
1811 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
1812 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
1813 MVPP2_TXP_SCHED_ENQ_MASK;
1814 if (reg_data != 0)
1815 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
1816 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
1817
1818
1819 delay = 0;
1820 do {
1821 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
1822 netdev_warn(port->dev,
1823 "Tx stop timed out, status=0x%08x\n",
1824 reg_data);
1825 break;
1826 }
1827 mdelay(1);
1828 delay++;
1829
1830
1831
1832
1833 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
1834 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
1835 }
1836
1837
1838
1839
1840 static inline int
1841 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
1842 {
1843 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
1844
1845 return val & MVPP2_RXQ_OCCUPIED_MASK;
1846 }
1847
1848
1849
1850
1851 static inline void
1852 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
1853 int used_count, int free_count)
1854 {
1855
1856
1857
1858 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
1859
1860 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
1861 }
1862
1863
1864 static inline struct mvpp2_rx_desc *
1865 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
1866 {
1867 int rx_desc = rxq->next_desc_to_proc;
1868
1869 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
1870 prefetch(rxq->descs + rxq->next_desc_to_proc);
1871 return rxq->descs + rx_desc;
1872 }
1873
1874
1875 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
1876 int prxq, int offset)
1877 {
1878 u32 val;
1879
1880
1881 offset = offset >> 5;
1882
1883 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
1884 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
1885
1886
1887 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
1888 MVPP2_RXQ_PACKET_OFFSET_MASK);
1889
1890 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
1891 }
1892
1893
1894
1895
1896 static struct mvpp2_tx_desc *
1897 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
1898 {
1899 int tx_desc = txq->next_desc_to_proc;
1900
1901 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
1902 return txq->descs + tx_desc;
1903 }
1904
1905
1906
1907
1908
1909
1910 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
1911 {
1912
1913 mvpp2_thread_write(port->priv,
1914 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1915 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
1916 }
1917
1918
1919
1920
1921
1922
1923
1924 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
1925 struct mvpp2_tx_queue *aggr_txq, int num)
1926 {
1927 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
1928
1929 unsigned int thread =
1930 mvpp2_cpu_to_thread(port->priv, smp_processor_id());
1931 u32 val = mvpp2_read_relaxed(port->priv,
1932 MVPP2_AGGR_TXQ_STATUS_REG(thread));
1933
1934 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
1935
1936 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
1937 return -ENOMEM;
1938 }
1939 return 0;
1940 }
1941
1942
1943
1944
1945
1946
1947
1948 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
1949 struct mvpp2_tx_queue *txq, int num)
1950 {
1951 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
1952 struct mvpp2 *priv = port->priv;
1953 u32 val;
1954
1955 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
1956 mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
1957
1958 val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
1959
1960 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
1961 }
1962
1963
1964
1965
1966 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
1967 struct mvpp2_tx_queue *txq,
1968 struct mvpp2_txq_pcpu *txq_pcpu,
1969 int num)
1970 {
1971 int req, desc_count;
1972 unsigned int thread;
1973
1974 if (txq_pcpu->reserved_num >= num)
1975 return 0;
1976
1977
1978
1979
1980
1981 desc_count = 0;
1982
1983 for (thread = 0; thread < port->priv->nthreads; thread++) {
1984 struct mvpp2_txq_pcpu *txq_pcpu_aux;
1985
1986 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
1987 desc_count += txq_pcpu_aux->count;
1988 desc_count += txq_pcpu_aux->reserved_num;
1989 }
1990
1991 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
1992 desc_count += req;
1993
1994 if (desc_count >
1995 (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
1996 return -ENOMEM;
1997
1998 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
1999
2000
2001 if (txq_pcpu->reserved_num < num)
2002 return -ENOMEM;
2003 return 0;
2004 }
2005
2006
2007
2008
2009 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
2010 {
2011 if (txq->next_desc_to_proc == 0)
2012 txq->next_desc_to_proc = txq->last_desc - 1;
2013 else
2014 txq->next_desc_to_proc--;
2015 }
2016
2017
2018 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
2019 int ip_hdr_len, int l4_proto)
2020 {
2021 u32 command;
2022
2023
2024
2025
2026 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
2027 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
2028 command |= MVPP2_TXD_IP_CSUM_DISABLE;
2029
2030 if (l3_proto == htons(ETH_P_IP)) {
2031 command &= ~MVPP2_TXD_IP_CSUM_DISABLE;
2032 command &= ~MVPP2_TXD_L3_IP6;
2033 } else {
2034 command |= MVPP2_TXD_L3_IP6;
2035 }
2036
2037 if (l4_proto == IPPROTO_TCP) {
2038 command &= ~MVPP2_TXD_L4_UDP;
2039 command &= ~MVPP2_TXD_L4_CSUM_FRAG;
2040 } else if (l4_proto == IPPROTO_UDP) {
2041 command |= MVPP2_TXD_L4_UDP;
2042 command &= ~MVPP2_TXD_L4_CSUM_FRAG;
2043 } else {
2044 command |= MVPP2_TXD_L4_CSUM_NOT;
2045 }
2046
2047 return command;
2048 }
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2059 struct mvpp2_tx_queue *txq)
2060 {
2061 u32 val;
2062
2063
2064 val = mvpp2_thread_read_relaxed(port->priv,
2065 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2066 MVPP2_TXQ_SENT_REG(txq->id));
2067
2068 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2069 MVPP2_TRANSMITTED_COUNT_OFFSET;
2070 }
2071
2072
2073
2074
2075 static void mvpp2_txq_sent_counter_clear(void *arg)
2076 {
2077 struct mvpp2_port *port = arg;
2078 int queue;
2079
2080
2081 if (smp_processor_id() > port->priv->nthreads)
2082 return;
2083
2084 for (queue = 0; queue < port->ntxqs; queue++) {
2085 int id = port->txqs[queue]->id;
2086
2087 mvpp2_thread_read(port->priv,
2088 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2089 MVPP2_TXQ_SENT_REG(id));
2090 }
2091 }
2092
2093
2094 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2095 {
2096 u32 val, size, mtu;
2097 int txq, tx_port_num;
2098
2099 mtu = port->pkt_size * 8;
2100 if (mtu > MVPP2_TXP_MTU_MAX)
2101 mtu = MVPP2_TXP_MTU_MAX;
2102
2103
2104 mtu = 3 * mtu;
2105
2106
2107 tx_port_num = mvpp2_egress_port(port);
2108 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2109
2110
2111 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2112 val &= ~MVPP2_TXP_MTU_MAX;
2113 val |= mtu;
2114 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2115
2116
2117 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2118 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2119 if (size < mtu) {
2120 size = mtu;
2121 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2122 val |= size;
2123 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2124 }
2125
2126 for (txq = 0; txq < port->ntxqs; txq++) {
2127 val = mvpp2_read(port->priv,
2128 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2129 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2130
2131 if (size < mtu) {
2132 size = mtu;
2133 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2134 val |= size;
2135 mvpp2_write(port->priv,
2136 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2137 val);
2138 }
2139 }
2140 }
2141
2142
2143
2144
2145 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
2146 struct mvpp2_rx_queue *rxq)
2147 {
2148 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2149
2150 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
2151 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
2152
2153 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2154 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
2155 rxq->pkts_coal);
2156
2157 put_cpu();
2158 }
2159
2160
2161 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
2162 struct mvpp2_tx_queue *txq)
2163 {
2164 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2165 u32 val;
2166
2167 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
2168 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
2169
2170 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
2171 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2172 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
2173
2174 put_cpu();
2175 }
2176
2177 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
2178 {
2179 u64 tmp = (u64)clk_hz * usec;
2180
2181 do_div(tmp, USEC_PER_SEC);
2182
2183 return tmp > U32_MAX ? U32_MAX : tmp;
2184 }
2185
2186 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
2187 {
2188 u64 tmp = (u64)cycles * USEC_PER_SEC;
2189
2190 do_div(tmp, clk_hz);
2191
2192 return tmp > U32_MAX ? U32_MAX : tmp;
2193 }
2194
2195
2196 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
2197 struct mvpp2_rx_queue *rxq)
2198 {
2199 unsigned long freq = port->priv->tclk;
2200 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2201
2202 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
2203 rxq->time_coal =
2204 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
2205
2206
2207 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2208 }
2209
2210 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
2211 }
2212
2213 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
2214 {
2215 unsigned long freq = port->priv->tclk;
2216 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2217
2218 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
2219 port->tx_time_coal =
2220 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
2221
2222
2223 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2224 }
2225
2226 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
2227 }
2228
2229
2230 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2231 struct mvpp2_tx_queue *txq,
2232 struct mvpp2_txq_pcpu *txq_pcpu, int num)
2233 {
2234 int i;
2235
2236 for (i = 0; i < num; i++) {
2237 struct mvpp2_txq_pcpu_buf *tx_buf =
2238 txq_pcpu->buffs + txq_pcpu->txq_get_index;
2239
2240 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
2241 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
2242 tx_buf->size, DMA_TO_DEVICE);
2243 if (tx_buf->skb)
2244 dev_kfree_skb_any(tx_buf->skb);
2245
2246 mvpp2_txq_inc_get(txq_pcpu);
2247 }
2248 }
2249
2250 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2251 u32 cause)
2252 {
2253 int queue = fls(cause) - 1;
2254
2255 return port->rxqs[queue];
2256 }
2257
2258 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2259 u32 cause)
2260 {
2261 int queue = fls(cause) - 1;
2262
2263 return port->txqs[queue];
2264 }
2265
2266
2267 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2268 struct mvpp2_txq_pcpu *txq_pcpu)
2269 {
2270 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
2271 int tx_done;
2272
2273 if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
2274 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
2275
2276 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
2277 if (!tx_done)
2278 return;
2279 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
2280
2281 txq_pcpu->count -= tx_done;
2282
2283 if (netif_tx_queue_stopped(nq))
2284 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
2285 netif_tx_wake_queue(nq);
2286 }
2287
2288 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
2289 unsigned int thread)
2290 {
2291 struct mvpp2_tx_queue *txq;
2292 struct mvpp2_txq_pcpu *txq_pcpu;
2293 unsigned int tx_todo = 0;
2294
2295 while (cause) {
2296 txq = mvpp2_get_tx_queue(port, cause);
2297 if (!txq)
2298 break;
2299
2300 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2301
2302 if (txq_pcpu->count) {
2303 mvpp2_txq_done(port, txq, txq_pcpu);
2304 tx_todo += txq_pcpu->count;
2305 }
2306
2307 cause &= ~(1 << txq->log_id);
2308 }
2309 return tx_todo;
2310 }
2311
2312
2313
2314
2315 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2316 struct mvpp2_tx_queue *aggr_txq,
2317 unsigned int thread, struct mvpp2 *priv)
2318 {
2319 u32 txq_dma;
2320
2321
2322 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2323 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2324 &aggr_txq->descs_dma, GFP_KERNEL);
2325 if (!aggr_txq->descs)
2326 return -ENOMEM;
2327
2328 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
2329
2330
2331 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2332 MVPP2_AGGR_TXQ_INDEX_REG(thread));
2333
2334
2335
2336
2337 if (priv->hw_version == MVPP21)
2338 txq_dma = aggr_txq->descs_dma;
2339 else
2340 txq_dma = aggr_txq->descs_dma >>
2341 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
2342
2343 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
2344 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
2345 MVPP2_AGGR_TXQ_SIZE);
2346
2347 return 0;
2348 }
2349
2350
2351 static int mvpp2_rxq_init(struct mvpp2_port *port,
2352 struct mvpp2_rx_queue *rxq)
2353
2354 {
2355 unsigned int thread;
2356 u32 rxq_dma;
2357
2358 rxq->size = port->rx_ring_size;
2359
2360
2361 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
2362 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2363 &rxq->descs_dma, GFP_KERNEL);
2364 if (!rxq->descs)
2365 return -ENOMEM;
2366
2367 rxq->last_desc = rxq->size - 1;
2368
2369
2370 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2371
2372
2373 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2374 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2375 if (port->priv->hw_version == MVPP21)
2376 rxq_dma = rxq->descs_dma;
2377 else
2378 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
2379 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2380 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2381 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
2382 put_cpu();
2383
2384
2385 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
2386
2387
2388 mvpp2_rx_pkts_coal_set(port, rxq);
2389 mvpp2_rx_time_coal_set(port, rxq);
2390
2391
2392 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
2393
2394 return 0;
2395 }
2396
2397
2398 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
2399 struct mvpp2_rx_queue *rxq)
2400 {
2401 int rx_received, i;
2402
2403 rx_received = mvpp2_rxq_received(port, rxq->id);
2404 if (!rx_received)
2405 return;
2406
2407 for (i = 0; i < rx_received; i++) {
2408 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
2409 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
2410 int pool;
2411
2412 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
2413 MVPP2_RXD_BM_POOL_ID_OFFS;
2414
2415 mvpp2_bm_pool_put(port, pool,
2416 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
2417 mvpp2_rxdesc_cookie_get(port, rx_desc));
2418 }
2419 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
2420 }
2421
2422
2423 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
2424 struct mvpp2_rx_queue *rxq)
2425 {
2426 unsigned int thread;
2427
2428 mvpp2_rxq_drop_pkts(port, rxq);
2429
2430 if (rxq->descs)
2431 dma_free_coherent(port->dev->dev.parent,
2432 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2433 rxq->descs,
2434 rxq->descs_dma);
2435
2436 rxq->descs = NULL;
2437 rxq->last_desc = 0;
2438 rxq->next_desc_to_proc = 0;
2439 rxq->descs_dma = 0;
2440
2441
2442
2443
2444 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2445 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2446 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2447 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
2448 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
2449 put_cpu();
2450 }
2451
2452
2453 static int mvpp2_txq_init(struct mvpp2_port *port,
2454 struct mvpp2_tx_queue *txq)
2455 {
2456 u32 val;
2457 unsigned int thread;
2458 int desc, desc_per_txq, tx_port_num;
2459 struct mvpp2_txq_pcpu *txq_pcpu;
2460
2461 txq->size = port->tx_ring_size;
2462
2463
2464 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
2465 txq->size * MVPP2_DESC_ALIGNED_SIZE,
2466 &txq->descs_dma, GFP_KERNEL);
2467 if (!txq->descs)
2468 return -ENOMEM;
2469
2470 txq->last_desc = txq->size - 1;
2471
2472
2473 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2474 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2475 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
2476 txq->descs_dma);
2477 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
2478 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
2479 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
2480 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
2481 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2482 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
2483 val &= ~MVPP2_TXQ_PENDING_MASK;
2484 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
2485
2486
2487
2488
2489
2490
2491 desc_per_txq = 16;
2492 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
2493 (txq->log_id * desc_per_txq);
2494
2495 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
2496 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2497 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2498 put_cpu();
2499
2500
2501 tx_port_num = mvpp2_egress_port(port);
2502 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2503
2504 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2505 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2506 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2507 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2508 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
2509
2510 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
2511 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2512 val);
2513
2514 for (thread = 0; thread < port->priv->nthreads; thread++) {
2515 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2516 txq_pcpu->size = txq->size;
2517 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
2518 sizeof(*txq_pcpu->buffs),
2519 GFP_KERNEL);
2520 if (!txq_pcpu->buffs)
2521 return -ENOMEM;
2522
2523 txq_pcpu->count = 0;
2524 txq_pcpu->reserved_num = 0;
2525 txq_pcpu->txq_put_index = 0;
2526 txq_pcpu->txq_get_index = 0;
2527 txq_pcpu->tso_headers = NULL;
2528
2529 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
2530 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
2531
2532 txq_pcpu->tso_headers =
2533 dma_alloc_coherent(port->dev->dev.parent,
2534 txq_pcpu->size * TSO_HEADER_SIZE,
2535 &txq_pcpu->tso_headers_dma,
2536 GFP_KERNEL);
2537 if (!txq_pcpu->tso_headers)
2538 return -ENOMEM;
2539 }
2540
2541 return 0;
2542 }
2543
2544
2545 static void mvpp2_txq_deinit(struct mvpp2_port *port,
2546 struct mvpp2_tx_queue *txq)
2547 {
2548 struct mvpp2_txq_pcpu *txq_pcpu;
2549 unsigned int thread;
2550
2551 for (thread = 0; thread < port->priv->nthreads; thread++) {
2552 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2553 kfree(txq_pcpu->buffs);
2554
2555 if (txq_pcpu->tso_headers)
2556 dma_free_coherent(port->dev->dev.parent,
2557 txq_pcpu->size * TSO_HEADER_SIZE,
2558 txq_pcpu->tso_headers,
2559 txq_pcpu->tso_headers_dma);
2560
2561 txq_pcpu->tso_headers = NULL;
2562 }
2563
2564 if (txq->descs)
2565 dma_free_coherent(port->dev->dev.parent,
2566 txq->size * MVPP2_DESC_ALIGNED_SIZE,
2567 txq->descs, txq->descs_dma);
2568
2569 txq->descs = NULL;
2570 txq->last_desc = 0;
2571 txq->next_desc_to_proc = 0;
2572 txq->descs_dma = 0;
2573
2574
2575 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2576
2577
2578 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2579 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2580 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
2581 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
2582 put_cpu();
2583 }
2584
2585
2586 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
2587 {
2588 struct mvpp2_txq_pcpu *txq_pcpu;
2589 int delay, pending;
2590 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2591 u32 val;
2592
2593 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2594 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
2595 val |= MVPP2_TXQ_DRAIN_EN_MASK;
2596 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
2597
2598
2599
2600
2601 delay = 0;
2602 do {
2603 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2604 netdev_warn(port->dev,
2605 "port %d: cleaning queue %d timed out\n",
2606 port->id, txq->log_id);
2607 break;
2608 }
2609 mdelay(1);
2610 delay++;
2611
2612 pending = mvpp2_thread_read(port->priv, thread,
2613 MVPP2_TXQ_PENDING_REG);
2614 pending &= MVPP2_TXQ_PENDING_MASK;
2615 } while (pending);
2616
2617 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2618 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
2619 put_cpu();
2620
2621 for (thread = 0; thread < port->priv->nthreads; thread++) {
2622 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2623
2624
2625 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
2626
2627
2628 txq_pcpu->count = 0;
2629 txq_pcpu->txq_put_index = 0;
2630 txq_pcpu->txq_get_index = 0;
2631 }
2632 }
2633
2634
2635 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
2636 {
2637 struct mvpp2_tx_queue *txq;
2638 int queue;
2639 u32 val;
2640
2641 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
2642
2643
2644 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
2645 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2646
2647 for (queue = 0; queue < port->ntxqs; queue++) {
2648 txq = port->txqs[queue];
2649 mvpp2_txq_clean(port, txq);
2650 mvpp2_txq_deinit(port, txq);
2651 }
2652
2653 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2654
2655 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
2656 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2657 }
2658
2659
2660 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
2661 {
2662 int queue;
2663
2664 for (queue = 0; queue < port->nrxqs; queue++)
2665 mvpp2_rxq_deinit(port, port->rxqs[queue]);
2666 }
2667
2668
2669 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
2670 {
2671 int queue, err;
2672
2673 for (queue = 0; queue < port->nrxqs; queue++) {
2674 err = mvpp2_rxq_init(port, port->rxqs[queue]);
2675 if (err)
2676 goto err_cleanup;
2677 }
2678 return 0;
2679
2680 err_cleanup:
2681 mvpp2_cleanup_rxqs(port);
2682 return err;
2683 }
2684
2685
2686 static int mvpp2_setup_txqs(struct mvpp2_port *port)
2687 {
2688 struct mvpp2_tx_queue *txq;
2689 int queue, err, cpu;
2690
2691 for (queue = 0; queue < port->ntxqs; queue++) {
2692 txq = port->txqs[queue];
2693 err = mvpp2_txq_init(port, txq);
2694 if (err)
2695 goto err_cleanup;
2696
2697
2698 cpu = queue % num_present_cpus();
2699 netif_set_xps_queue(port->dev, cpumask_of(cpu), queue);
2700 }
2701
2702 if (port->has_tx_irqs) {
2703 mvpp2_tx_time_coal_set(port);
2704 for (queue = 0; queue < port->ntxqs; queue++) {
2705 txq = port->txqs[queue];
2706 mvpp2_tx_pkts_coal_set(port, txq);
2707 }
2708 }
2709
2710 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2711 return 0;
2712
2713 err_cleanup:
2714 mvpp2_cleanup_txqs(port);
2715 return err;
2716 }
2717
2718
2719 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
2720 {
2721 struct mvpp2_queue_vector *qv = dev_id;
2722
2723 mvpp2_qvec_interrupt_disable(qv);
2724
2725 napi_schedule(&qv->napi);
2726
2727 return IRQ_HANDLED;
2728 }
2729
2730
2731 static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
2732 {
2733 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
2734 struct net_device *dev = port->dev;
2735 bool event = false, link = false;
2736 u32 val;
2737
2738 mvpp22_gop_mask_irq(port);
2739
2740 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
2741 val = readl(port->base + MVPP22_XLG_INT_STAT);
2742 if (val & MVPP22_XLG_INT_STAT_LINK) {
2743 event = true;
2744 val = readl(port->base + MVPP22_XLG_STATUS);
2745 if (val & MVPP22_XLG_STATUS_LINK_UP)
2746 link = true;
2747 }
2748 } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
2749 phy_interface_mode_is_8023z(port->phy_interface) ||
2750 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
2751 val = readl(port->base + MVPP22_GMAC_INT_STAT);
2752 if (val & MVPP22_GMAC_INT_STAT_LINK) {
2753 event = true;
2754 val = readl(port->base + MVPP2_GMAC_STATUS0);
2755 if (val & MVPP2_GMAC_STATUS0_LINK_UP)
2756 link = true;
2757 }
2758 }
2759
2760 if (port->phylink) {
2761 phylink_mac_change(port->phylink, link);
2762 goto handled;
2763 }
2764
2765 if (!netif_running(dev) || !event)
2766 goto handled;
2767
2768 if (link) {
2769 mvpp2_interrupts_enable(port);
2770
2771 mvpp2_egress_enable(port);
2772 mvpp2_ingress_enable(port);
2773 netif_carrier_on(dev);
2774 netif_tx_wake_all_queues(dev);
2775 } else {
2776 netif_tx_stop_all_queues(dev);
2777 netif_carrier_off(dev);
2778 mvpp2_ingress_disable(port);
2779 mvpp2_egress_disable(port);
2780
2781 mvpp2_interrupts_disable(port);
2782 }
2783
2784 handled:
2785 mvpp22_gop_unmask_irq(port);
2786 return IRQ_HANDLED;
2787 }
2788
2789 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
2790 {
2791 struct net_device *dev;
2792 struct mvpp2_port *port;
2793 struct mvpp2_port_pcpu *port_pcpu;
2794 unsigned int tx_todo, cause;
2795
2796 port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
2797 dev = port_pcpu->dev;
2798
2799 if (!netif_running(dev))
2800 return HRTIMER_NORESTART;
2801
2802 port_pcpu->timer_scheduled = false;
2803 port = netdev_priv(dev);
2804
2805
2806 cause = (1 << port->ntxqs) - 1;
2807 tx_todo = mvpp2_tx_done(port, cause,
2808 mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
2809
2810
2811 if (tx_todo && !port_pcpu->timer_scheduled) {
2812 port_pcpu->timer_scheduled = true;
2813 hrtimer_forward_now(&port_pcpu->tx_done_timer,
2814 MVPP2_TXDONE_HRTIMER_PERIOD_NS);
2815
2816 return HRTIMER_RESTART;
2817 }
2818 return HRTIMER_NORESTART;
2819 }
2820
2821
2822
2823
2824 static void mvpp2_rx_error(struct mvpp2_port *port,
2825 struct mvpp2_rx_desc *rx_desc)
2826 {
2827 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
2828 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
2829 char *err_str = NULL;
2830
2831 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
2832 case MVPP2_RXD_ERR_CRC:
2833 err_str = "crc";
2834 break;
2835 case MVPP2_RXD_ERR_OVERRUN:
2836 err_str = "overrun";
2837 break;
2838 case MVPP2_RXD_ERR_RESOURCE:
2839 err_str = "resource";
2840 break;
2841 }
2842 if (err_str && net_ratelimit())
2843 netdev_err(port->dev,
2844 "bad rx status %08x (%s error), size=%zu\n",
2845 status, err_str, sz);
2846 }
2847
2848
2849 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
2850 struct sk_buff *skb)
2851 {
2852 if (((status & MVPP2_RXD_L3_IP4) &&
2853 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
2854 (status & MVPP2_RXD_L3_IP6))
2855 if (((status & MVPP2_RXD_L4_UDP) ||
2856 (status & MVPP2_RXD_L4_TCP)) &&
2857 (status & MVPP2_RXD_L4_CSUM_OK)) {
2858 skb->csum = 0;
2859 skb->ip_summed = CHECKSUM_UNNECESSARY;
2860 return;
2861 }
2862
2863 skb->ip_summed = CHECKSUM_NONE;
2864 }
2865
2866
2867 static int mvpp2_rx_refill(struct mvpp2_port *port,
2868 struct mvpp2_bm_pool *bm_pool, int pool)
2869 {
2870 dma_addr_t dma_addr;
2871 phys_addr_t phys_addr;
2872 void *buf;
2873
2874
2875 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
2876 GFP_ATOMIC);
2877 if (!buf)
2878 return -ENOMEM;
2879
2880 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
2881
2882 return 0;
2883 }
2884
2885
2886 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
2887 {
2888 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2889 int ip_hdr_len = 0;
2890 u8 l4_proto;
2891 __be16 l3_proto = vlan_get_protocol(skb);
2892
2893 if (l3_proto == htons(ETH_P_IP)) {
2894 struct iphdr *ip4h = ip_hdr(skb);
2895
2896
2897 ip_hdr_len = ip4h->ihl;
2898 l4_proto = ip4h->protocol;
2899 } else if (l3_proto == htons(ETH_P_IPV6)) {
2900 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2901
2902
2903 if (skb_network_header_len(skb) > 0)
2904 ip_hdr_len = (skb_network_header_len(skb) >> 2);
2905 l4_proto = ip6h->nexthdr;
2906 } else {
2907 return MVPP2_TXD_L4_CSUM_NOT;
2908 }
2909
2910 return mvpp2_txq_desc_csum(skb_network_offset(skb),
2911 l3_proto, ip_hdr_len, l4_proto);
2912 }
2913
2914 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
2915 }
2916
2917
2918 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
2919 int rx_todo, struct mvpp2_rx_queue *rxq)
2920 {
2921 struct net_device *dev = port->dev;
2922 int rx_received;
2923 int rx_done = 0;
2924 u32 rcvd_pkts = 0;
2925 u32 rcvd_bytes = 0;
2926
2927
2928 rx_received = mvpp2_rxq_received(port, rxq->id);
2929 if (rx_todo > rx_received)
2930 rx_todo = rx_received;
2931
2932 while (rx_done < rx_todo) {
2933 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
2934 struct mvpp2_bm_pool *bm_pool;
2935 struct sk_buff *skb;
2936 unsigned int frag_size;
2937 dma_addr_t dma_addr;
2938 phys_addr_t phys_addr;
2939 u32 rx_status;
2940 int pool, rx_bytes, err;
2941 void *data;
2942
2943 rx_done++;
2944 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
2945 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
2946 rx_bytes -= MVPP2_MH_SIZE;
2947 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
2948 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
2949 data = (void *)phys_to_virt(phys_addr);
2950
2951 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
2952 MVPP2_RXD_BM_POOL_ID_OFFS;
2953 bm_pool = &port->priv->bm_pools[pool];
2954
2955
2956
2957
2958
2959
2960 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
2961 err_drop_frame:
2962 dev->stats.rx_errors++;
2963 mvpp2_rx_error(port, rx_desc);
2964
2965 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
2966 continue;
2967 }
2968
2969 if (bm_pool->frag_size > PAGE_SIZE)
2970 frag_size = 0;
2971 else
2972 frag_size = bm_pool->frag_size;
2973
2974 skb = build_skb(data, frag_size);
2975 if (!skb) {
2976 netdev_warn(port->dev, "skb build failed\n");
2977 goto err_drop_frame;
2978 }
2979
2980 err = mvpp2_rx_refill(port, bm_pool, pool);
2981 if (err) {
2982 netdev_err(port->dev, "failed to refill BM pools\n");
2983 goto err_drop_frame;
2984 }
2985
2986 dma_unmap_single(dev->dev.parent, dma_addr,
2987 bm_pool->buf_size, DMA_FROM_DEVICE);
2988
2989 rcvd_pkts++;
2990 rcvd_bytes += rx_bytes;
2991
2992 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
2993 skb_put(skb, rx_bytes);
2994 skb->protocol = eth_type_trans(skb, dev);
2995 mvpp2_rx_csum(port, rx_status, skb);
2996
2997 napi_gro_receive(napi, skb);
2998 }
2999
3000 if (rcvd_pkts) {
3001 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3002
3003 u64_stats_update_begin(&stats->syncp);
3004 stats->rx_packets += rcvd_pkts;
3005 stats->rx_bytes += rcvd_bytes;
3006 u64_stats_update_end(&stats->syncp);
3007 }
3008
3009
3010 wmb();
3011 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
3012
3013 return rx_todo;
3014 }
3015
3016 static inline void
3017 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3018 struct mvpp2_tx_desc *desc)
3019 {
3020 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3021 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3022
3023 dma_addr_t buf_dma_addr =
3024 mvpp2_txdesc_dma_addr_get(port, desc);
3025 size_t buf_sz =
3026 mvpp2_txdesc_size_get(port, desc);
3027 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
3028 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
3029 buf_sz, DMA_TO_DEVICE);
3030 mvpp2_txq_desc_put(txq);
3031 }
3032
3033
3034 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
3035 struct mvpp2_tx_queue *aggr_txq,
3036 struct mvpp2_tx_queue *txq)
3037 {
3038 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3039 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3040 struct mvpp2_tx_desc *tx_desc;
3041 int i;
3042 dma_addr_t buf_dma_addr;
3043
3044 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3045 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3046 void *addr = skb_frag_address(frag);
3047
3048 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3049 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3050 mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
3051
3052 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
3053 skb_frag_size(frag),
3054 DMA_TO_DEVICE);
3055 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
3056 mvpp2_txq_desc_put(txq);
3057 goto cleanup;
3058 }
3059
3060 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3061
3062 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
3063
3064 mvpp2_txdesc_cmd_set(port, tx_desc,
3065 MVPP2_TXD_L_DESC);
3066 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
3067 } else {
3068
3069 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
3070 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
3071 }
3072 }
3073
3074 return 0;
3075 cleanup:
3076
3077
3078
3079 for (i = i - 1; i >= 0; i--) {
3080 tx_desc = txq->descs + i;
3081 tx_desc_unmap_put(port, txq, tx_desc);
3082 }
3083
3084 return -ENOMEM;
3085 }
3086
3087 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
3088 struct net_device *dev,
3089 struct mvpp2_tx_queue *txq,
3090 struct mvpp2_tx_queue *aggr_txq,
3091 struct mvpp2_txq_pcpu *txq_pcpu,
3092 int hdr_sz)
3093 {
3094 struct mvpp2_port *port = netdev_priv(dev);
3095 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3096 dma_addr_t addr;
3097
3098 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3099 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
3100
3101 addr = txq_pcpu->tso_headers_dma +
3102 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
3103 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
3104
3105 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
3106 MVPP2_TXD_F_DESC |
3107 MVPP2_TXD_PADDING_DISABLE);
3108 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
3109 }
3110
3111 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
3112 struct net_device *dev, struct tso_t *tso,
3113 struct mvpp2_tx_queue *txq,
3114 struct mvpp2_tx_queue *aggr_txq,
3115 struct mvpp2_txq_pcpu *txq_pcpu,
3116 int sz, bool left, bool last)
3117 {
3118 struct mvpp2_port *port = netdev_priv(dev);
3119 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3120 dma_addr_t buf_dma_addr;
3121
3122 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3123 mvpp2_txdesc_size_set(port, tx_desc, sz);
3124
3125 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
3126 DMA_TO_DEVICE);
3127 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
3128 mvpp2_txq_desc_put(txq);
3129 return -ENOMEM;
3130 }
3131
3132 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3133
3134 if (!left) {
3135 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
3136 if (last) {
3137 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
3138 return 0;
3139 }
3140 } else {
3141 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
3142 }
3143
3144 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
3145 return 0;
3146 }
3147
3148 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
3149 struct mvpp2_tx_queue *txq,
3150 struct mvpp2_tx_queue *aggr_txq,
3151 struct mvpp2_txq_pcpu *txq_pcpu)
3152 {
3153 struct mvpp2_port *port = netdev_priv(dev);
3154 struct tso_t tso;
3155 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
3156 int i, len, descs = 0;
3157
3158
3159 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
3160 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
3161 tso_count_descs(skb)))
3162 return 0;
3163
3164 tso_start(skb, &tso);
3165 len = skb->len - hdr_sz;
3166 while (len > 0) {
3167 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
3168 char *hdr = txq_pcpu->tso_headers +
3169 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
3170
3171 len -= left;
3172 descs++;
3173
3174 tso_build_hdr(skb, hdr, &tso, left, len == 0);
3175 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
3176
3177 while (left > 0) {
3178 int sz = min_t(int, tso.size, left);
3179 left -= sz;
3180 descs++;
3181
3182 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
3183 txq_pcpu, sz, left, len == 0))
3184 goto release;
3185 tso_build_data(skb, &tso, sz);
3186 }
3187 }
3188
3189 return descs;
3190
3191 release:
3192 for (i = descs - 1; i >= 0; i--) {
3193 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
3194 tx_desc_unmap_put(port, txq, tx_desc);
3195 }
3196 return 0;
3197 }
3198
3199
3200 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
3201 {
3202 struct mvpp2_port *port = netdev_priv(dev);
3203 struct mvpp2_tx_queue *txq, *aggr_txq;
3204 struct mvpp2_txq_pcpu *txq_pcpu;
3205 struct mvpp2_tx_desc *tx_desc;
3206 dma_addr_t buf_dma_addr;
3207 unsigned long flags = 0;
3208 unsigned int thread;
3209 int frags = 0;
3210 u16 txq_id;
3211 u32 tx_cmd;
3212
3213 thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3214
3215 txq_id = skb_get_queue_mapping(skb);
3216 txq = port->txqs[txq_id];
3217 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3218 aggr_txq = &port->priv->aggr_txqs[thread];
3219
3220 if (test_bit(thread, &port->priv->lock_map))
3221 spin_lock_irqsave(&port->tx_lock[thread], flags);
3222
3223 if (skb_is_gso(skb)) {
3224 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
3225 goto out;
3226 }
3227 frags = skb_shinfo(skb)->nr_frags + 1;
3228
3229
3230 if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
3231 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
3232 frags = 0;
3233 goto out;
3234 }
3235
3236
3237 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3238 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3239 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
3240
3241 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
3242 skb_headlen(skb), DMA_TO_DEVICE);
3243 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
3244 mvpp2_txq_desc_put(txq);
3245 frags = 0;
3246 goto out;
3247 }
3248
3249 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3250
3251 tx_cmd = mvpp2_skb_tx_csum(port, skb);
3252
3253 if (frags == 1) {
3254
3255 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3256 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3257 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
3258 } else {
3259
3260 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
3261 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3262 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
3263
3264
3265 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
3266 tx_desc_unmap_put(port, txq, tx_desc);
3267 frags = 0;
3268 }
3269 }
3270
3271 out:
3272 if (frags > 0) {
3273 struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
3274 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
3275
3276 txq_pcpu->reserved_num -= frags;
3277 txq_pcpu->count += frags;
3278 aggr_txq->count += frags;
3279
3280
3281 wmb();
3282 mvpp2_aggr_txq_pend_desc_add(port, frags);
3283
3284 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3285 netif_tx_stop_queue(nq);
3286
3287 u64_stats_update_begin(&stats->syncp);
3288 stats->tx_packets++;
3289 stats->tx_bytes += skb->len;
3290 u64_stats_update_end(&stats->syncp);
3291 } else {
3292 dev->stats.tx_dropped++;
3293 dev_kfree_skb_any(skb);
3294 }
3295
3296
3297 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3298 mvpp2_txq_done(port, txq, txq_pcpu);
3299
3300
3301 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
3302 txq_pcpu->count > 0) {
3303 struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
3304
3305 if (!port_pcpu->timer_scheduled) {
3306 port_pcpu->timer_scheduled = true;
3307 hrtimer_start(&port_pcpu->tx_done_timer,
3308 MVPP2_TXDONE_HRTIMER_PERIOD_NS,
3309 HRTIMER_MODE_REL_PINNED_SOFT);
3310 }
3311 }
3312
3313 if (test_bit(thread, &port->priv->lock_map))
3314 spin_unlock_irqrestore(&port->tx_lock[thread], flags);
3315
3316 return NETDEV_TX_OK;
3317 }
3318
3319 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
3320 {
3321 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
3322 netdev_err(dev, "FCS error\n");
3323 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
3324 netdev_err(dev, "rx fifo overrun error\n");
3325 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
3326 netdev_err(dev, "tx fifo underrun error\n");
3327 }
3328
3329 static int mvpp2_poll(struct napi_struct *napi, int budget)
3330 {
3331 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
3332 int rx_done = 0;
3333 struct mvpp2_port *port = netdev_priv(napi->dev);
3334 struct mvpp2_queue_vector *qv;
3335 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3336
3337 qv = container_of(napi, struct mvpp2_queue_vector, napi);
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349 cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
3350 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
3351
3352 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
3353 if (cause_misc) {
3354 mvpp2_cause_error(port->dev, cause_misc);
3355
3356
3357 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
3358 mvpp2_thread_write(port->priv, thread,
3359 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
3360 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
3361 }
3362
3363 if (port->has_tx_irqs) {
3364 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
3365 if (cause_tx) {
3366 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
3367 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
3368 }
3369 }
3370
3371
3372 cause_rx = cause_rx_tx &
3373 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
3374 cause_rx <<= qv->first_rxq;
3375 cause_rx |= qv->pending_cause_rx;
3376 while (cause_rx && budget > 0) {
3377 int count;
3378 struct mvpp2_rx_queue *rxq;
3379
3380 rxq = mvpp2_get_rx_queue(port, cause_rx);
3381 if (!rxq)
3382 break;
3383
3384 count = mvpp2_rx(port, napi, budget, rxq);
3385 rx_done += count;
3386 budget -= count;
3387 if (budget > 0) {
3388
3389
3390
3391
3392 cause_rx &= ~(1 << rxq->logic_rxq);
3393 }
3394 }
3395
3396 if (budget > 0) {
3397 cause_rx = 0;
3398 napi_complete_done(napi, rx_done);
3399
3400 mvpp2_qvec_interrupt_enable(qv);
3401 }
3402 qv->pending_cause_rx = cause_rx;
3403 return rx_done;
3404 }
3405
3406 static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
3407 {
3408 u32 ctrl3;
3409
3410
3411 mvpp2_mac_reset_assert(port);
3412
3413
3414 mvpp22_pcs_reset_assert(port);
3415
3416
3417 mvpp22_comphy_init(port);
3418
3419
3420 mvpp22_gop_init(port);
3421
3422 mvpp22_pcs_reset_deassert(port);
3423
3424
3425 if (port->gop_id == 0) {
3426 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
3427 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3428
3429 if (mvpp2_is_xlg(port->phy_interface))
3430 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
3431 else
3432 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
3433
3434 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
3435 }
3436
3437 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface))
3438 mvpp2_xlg_max_rx_size_set(port);
3439 else
3440 mvpp2_gmac_max_rx_size_set(port);
3441 }
3442
3443
3444 static void mvpp2_start_dev(struct mvpp2_port *port)
3445 {
3446 int i;
3447
3448 mvpp2_txp_max_tx_size_set(port);
3449
3450 for (i = 0; i < port->nqvecs; i++)
3451 napi_enable(&port->qvecs[i].napi);
3452
3453
3454 mvpp2_interrupts_enable(port);
3455
3456 if (port->priv->hw_version == MVPP22)
3457 mvpp22_mode_reconfigure(port);
3458
3459 if (port->phylink) {
3460 phylink_start(port->phylink);
3461 } else {
3462
3463
3464
3465
3466 struct phylink_link_state state = {
3467 .interface = port->phy_interface,
3468 };
3469 mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
3470 mvpp2_mac_link_up(&port->phylink_config, MLO_AN_INBAND,
3471 port->phy_interface, NULL);
3472 }
3473
3474 netif_tx_start_all_queues(port->dev);
3475 }
3476
3477
3478 static void mvpp2_stop_dev(struct mvpp2_port *port)
3479 {
3480 int i;
3481
3482
3483 mvpp2_interrupts_disable(port);
3484
3485 for (i = 0; i < port->nqvecs; i++)
3486 napi_disable(&port->qvecs[i].napi);
3487
3488 if (port->phylink)
3489 phylink_stop(port->phylink);
3490 phy_power_off(port->comphy);
3491 }
3492
3493 static int mvpp2_check_ringparam_valid(struct net_device *dev,
3494 struct ethtool_ringparam *ring)
3495 {
3496 u16 new_rx_pending = ring->rx_pending;
3497 u16 new_tx_pending = ring->tx_pending;
3498
3499 if (ring->rx_pending == 0 || ring->tx_pending == 0)
3500 return -EINVAL;
3501
3502 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
3503 new_rx_pending = MVPP2_MAX_RXD_MAX;
3504 else if (!IS_ALIGNED(ring->rx_pending, 16))
3505 new_rx_pending = ALIGN(ring->rx_pending, 16);
3506
3507 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
3508 new_tx_pending = MVPP2_MAX_TXD_MAX;
3509 else if (!IS_ALIGNED(ring->tx_pending, 32))
3510 new_tx_pending = ALIGN(ring->tx_pending, 32);
3511
3512
3513
3514
3515 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
3516 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
3517
3518 if (ring->rx_pending != new_rx_pending) {
3519 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
3520 ring->rx_pending, new_rx_pending);
3521 ring->rx_pending = new_rx_pending;
3522 }
3523
3524 if (ring->tx_pending != new_tx_pending) {
3525 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
3526 ring->tx_pending, new_tx_pending);
3527 ring->tx_pending = new_tx_pending;
3528 }
3529
3530 return 0;
3531 }
3532
3533 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
3534 {
3535 u32 mac_addr_l, mac_addr_m, mac_addr_h;
3536
3537 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3538 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
3539 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
3540 addr[0] = (mac_addr_h >> 24) & 0xFF;
3541 addr[1] = (mac_addr_h >> 16) & 0xFF;
3542 addr[2] = (mac_addr_h >> 8) & 0xFF;
3543 addr[3] = mac_addr_h & 0xFF;
3544 addr[4] = mac_addr_m & 0xFF;
3545 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
3546 }
3547
3548 static int mvpp2_irqs_init(struct mvpp2_port *port)
3549 {
3550 int err, i;
3551
3552 for (i = 0; i < port->nqvecs; i++) {
3553 struct mvpp2_queue_vector *qv = port->qvecs + i;
3554
3555 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
3556 qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
3557 if (!qv->mask) {
3558 err = -ENOMEM;
3559 goto err;
3560 }
3561
3562 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
3563 }
3564
3565 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
3566 if (err)
3567 goto err;
3568
3569 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
3570 unsigned int cpu;
3571
3572 for_each_present_cpu(cpu) {
3573 if (mvpp2_cpu_to_thread(port->priv, cpu) ==
3574 qv->sw_thread_id)
3575 cpumask_set_cpu(cpu, qv->mask);
3576 }
3577
3578 irq_set_affinity_hint(qv->irq, qv->mask);
3579 }
3580 }
3581
3582 return 0;
3583 err:
3584 for (i = 0; i < port->nqvecs; i++) {
3585 struct mvpp2_queue_vector *qv = port->qvecs + i;
3586
3587 irq_set_affinity_hint(qv->irq, NULL);
3588 kfree(qv->mask);
3589 qv->mask = NULL;
3590 free_irq(qv->irq, qv);
3591 }
3592
3593 return err;
3594 }
3595
3596 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
3597 {
3598 int i;
3599
3600 for (i = 0; i < port->nqvecs; i++) {
3601 struct mvpp2_queue_vector *qv = port->qvecs + i;
3602
3603 irq_set_affinity_hint(qv->irq, NULL);
3604 kfree(qv->mask);
3605 qv->mask = NULL;
3606 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
3607 free_irq(qv->irq, qv);
3608 }
3609 }
3610
3611 static bool mvpp22_rss_is_supported(void)
3612 {
3613 return queue_mode == MVPP2_QDIST_MULTI_MODE;
3614 }
3615
3616 static int mvpp2_open(struct net_device *dev)
3617 {
3618 struct mvpp2_port *port = netdev_priv(dev);
3619 struct mvpp2 *priv = port->priv;
3620 unsigned char mac_bcast[ETH_ALEN] = {
3621 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3622 bool valid = false;
3623 int err;
3624
3625 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
3626 if (err) {
3627 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
3628 return err;
3629 }
3630 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
3631 if (err) {
3632 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
3633 return err;
3634 }
3635 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
3636 if (err) {
3637 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
3638 return err;
3639 }
3640 err = mvpp2_prs_def_flow(port);
3641 if (err) {
3642 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
3643 return err;
3644 }
3645
3646
3647 err = mvpp2_setup_rxqs(port);
3648 if (err) {
3649 netdev_err(port->dev, "cannot allocate Rx queues\n");
3650 return err;
3651 }
3652
3653 err = mvpp2_setup_txqs(port);
3654 if (err) {
3655 netdev_err(port->dev, "cannot allocate Tx queues\n");
3656 goto err_cleanup_rxqs;
3657 }
3658
3659 err = mvpp2_irqs_init(port);
3660 if (err) {
3661 netdev_err(port->dev, "cannot init IRQs\n");
3662 goto err_cleanup_txqs;
3663 }
3664
3665
3666 if (port->of_node) {
3667 err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
3668 if (err) {
3669 netdev_err(port->dev, "could not attach PHY (%d)\n",
3670 err);
3671 goto err_free_irq;
3672 }
3673
3674 valid = true;
3675 }
3676
3677 if (priv->hw_version == MVPP22 && port->link_irq) {
3678 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
3679 dev->name, port);
3680 if (err) {
3681 netdev_err(port->dev, "cannot request link IRQ %d\n",
3682 port->link_irq);
3683 goto err_free_irq;
3684 }
3685
3686 mvpp22_gop_setup_irq(port);
3687
3688
3689 netif_carrier_off(port->dev);
3690
3691 valid = true;
3692 } else {
3693 port->link_irq = 0;
3694 }
3695
3696 if (!valid) {
3697 netdev_err(port->dev,
3698 "invalid configuration: no dt or link IRQ");
3699 goto err_free_irq;
3700 }
3701
3702
3703 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
3704 mvpp2_shared_interrupt_mask_unmask(port, false);
3705
3706 mvpp2_start_dev(port);
3707
3708
3709 queue_delayed_work(priv->stats_queue, &port->stats_work,
3710 MVPP2_MIB_COUNTERS_STATS_DELAY);
3711
3712 return 0;
3713
3714 err_free_irq:
3715 mvpp2_irqs_deinit(port);
3716 err_cleanup_txqs:
3717 mvpp2_cleanup_txqs(port);
3718 err_cleanup_rxqs:
3719 mvpp2_cleanup_rxqs(port);
3720 return err;
3721 }
3722
3723 static int mvpp2_stop(struct net_device *dev)
3724 {
3725 struct mvpp2_port *port = netdev_priv(dev);
3726 struct mvpp2_port_pcpu *port_pcpu;
3727 unsigned int thread;
3728
3729 mvpp2_stop_dev(port);
3730
3731
3732 on_each_cpu(mvpp2_interrupts_mask, port, 1);
3733 mvpp2_shared_interrupt_mask_unmask(port, true);
3734
3735 if (port->phylink)
3736 phylink_disconnect_phy(port->phylink);
3737 if (port->link_irq)
3738 free_irq(port->link_irq, port);
3739
3740 mvpp2_irqs_deinit(port);
3741 if (!port->has_tx_irqs) {
3742 for (thread = 0; thread < port->priv->nthreads; thread++) {
3743 port_pcpu = per_cpu_ptr(port->pcpu, thread);
3744
3745 hrtimer_cancel(&port_pcpu->tx_done_timer);
3746 port_pcpu->timer_scheduled = false;
3747 }
3748 }
3749 mvpp2_cleanup_rxqs(port);
3750 mvpp2_cleanup_txqs(port);
3751
3752 cancel_delayed_work_sync(&port->stats_work);
3753
3754 mvpp2_mac_reset_assert(port);
3755 mvpp22_pcs_reset_assert(port);
3756
3757 return 0;
3758 }
3759
3760 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
3761 struct netdev_hw_addr_list *list)
3762 {
3763 struct netdev_hw_addr *ha;
3764 int ret;
3765
3766 netdev_hw_addr_list_for_each(ha, list) {
3767 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
3768 if (ret)
3769 return ret;
3770 }
3771
3772 return 0;
3773 }
3774
3775 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
3776 {
3777 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
3778 mvpp2_prs_vid_enable_filtering(port);
3779 else
3780 mvpp2_prs_vid_disable_filtering(port);
3781
3782 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3783 MVPP2_PRS_L2_UNI_CAST, enable);
3784
3785 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3786 MVPP2_PRS_L2_MULTI_CAST, enable);
3787 }
3788
3789 static void mvpp2_set_rx_mode(struct net_device *dev)
3790 {
3791 struct mvpp2_port *port = netdev_priv(dev);
3792
3793
3794 mvpp2_prs_mac_del_all(port);
3795
3796 if (dev->flags & IFF_PROMISC) {
3797 mvpp2_set_rx_promisc(port, true);
3798 return;
3799 }
3800
3801 mvpp2_set_rx_promisc(port, false);
3802
3803 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
3804 mvpp2_prs_mac_da_accept_list(port, &dev->uc))
3805 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3806 MVPP2_PRS_L2_UNI_CAST, true);
3807
3808 if (dev->flags & IFF_ALLMULTI) {
3809 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3810 MVPP2_PRS_L2_MULTI_CAST, true);
3811 return;
3812 }
3813
3814 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
3815 mvpp2_prs_mac_da_accept_list(port, &dev->mc))
3816 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3817 MVPP2_PRS_L2_MULTI_CAST, true);
3818 }
3819
3820 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
3821 {
3822 const struct sockaddr *addr = p;
3823 int err;
3824
3825 if (!is_valid_ether_addr(addr->sa_data))
3826 return -EADDRNOTAVAIL;
3827
3828 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
3829 if (err) {
3830
3831 mvpp2_prs_update_mac_da(dev, dev->dev_addr);
3832 netdev_err(dev, "failed to change MAC address\n");
3833 }
3834 return err;
3835 }
3836
3837
3838
3839
3840 static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
3841 {
3842 int numbufs = MVPP2_BM_POOLS_NUM, i;
3843 struct mvpp2_port *port = NULL;
3844 bool status[MVPP2_MAX_PORTS];
3845
3846 for (i = 0; i < priv->port_count; i++) {
3847 port = priv->port_list[i];
3848 status[i] = netif_running(port->dev);
3849 if (status[i])
3850 mvpp2_stop(port->dev);
3851 }
3852
3853
3854 if (priv->percpu_pools)
3855 numbufs = port->nrxqs * 2;
3856
3857 for (i = 0; i < numbufs; i++)
3858 mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
3859
3860 devm_kfree(port->dev->dev.parent, priv->bm_pools);
3861 priv->percpu_pools = percpu;
3862 mvpp2_bm_init(port->dev->dev.parent, priv);
3863
3864 for (i = 0; i < priv->port_count; i++) {
3865 port = priv->port_list[i];
3866 mvpp2_swf_bm_pool_init(port);
3867 if (status[i])
3868 mvpp2_open(port->dev);
3869 }
3870
3871 return 0;
3872 }
3873
3874 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
3875 {
3876 struct mvpp2_port *port = netdev_priv(dev);
3877 bool running = netif_running(dev);
3878 struct mvpp2 *priv = port->priv;
3879 int err;
3880
3881 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
3882 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
3883 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
3884 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
3885 }
3886
3887 if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
3888 if (priv->percpu_pools) {
3889 netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
3890 mvpp2_bm_switch_buffers(priv, false);
3891 }
3892 } else {
3893 bool jumbo = false;
3894 int i;
3895
3896 for (i = 0; i < priv->port_count; i++)
3897 if (priv->port_list[i] != port &&
3898 MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
3899 MVPP2_BM_LONG_PKT_SIZE) {
3900 jumbo = true;
3901 break;
3902 }
3903
3904
3905 if (!jumbo) {
3906 dev_info(port->dev->dev.parent,
3907 "all ports have a low MTU, switching to per-cpu buffers");
3908 mvpp2_bm_switch_buffers(priv, true);
3909 }
3910 }
3911
3912 if (running)
3913 mvpp2_stop_dev(port);
3914
3915 err = mvpp2_bm_update_mtu(dev, mtu);
3916 if (err) {
3917 netdev_err(dev, "failed to change MTU\n");
3918
3919 mvpp2_bm_update_mtu(dev, dev->mtu);
3920 } else {
3921 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3922 }
3923
3924 if (running) {
3925 mvpp2_start_dev(port);
3926 mvpp2_egress_enable(port);
3927 mvpp2_ingress_enable(port);
3928 }
3929
3930 return err;
3931 }
3932
3933 static void
3934 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
3935 {
3936 struct mvpp2_port *port = netdev_priv(dev);
3937 unsigned int start;
3938 unsigned int cpu;
3939
3940 for_each_possible_cpu(cpu) {
3941 struct mvpp2_pcpu_stats *cpu_stats;
3942 u64 rx_packets;
3943 u64 rx_bytes;
3944 u64 tx_packets;
3945 u64 tx_bytes;
3946
3947 cpu_stats = per_cpu_ptr(port->stats, cpu);
3948 do {
3949 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
3950 rx_packets = cpu_stats->rx_packets;
3951 rx_bytes = cpu_stats->rx_bytes;
3952 tx_packets = cpu_stats->tx_packets;
3953 tx_bytes = cpu_stats->tx_bytes;
3954 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
3955
3956 stats->rx_packets += rx_packets;
3957 stats->rx_bytes += rx_bytes;
3958 stats->tx_packets += tx_packets;
3959 stats->tx_bytes += tx_bytes;
3960 }
3961
3962 stats->rx_errors = dev->stats.rx_errors;
3963 stats->rx_dropped = dev->stats.rx_dropped;
3964 stats->tx_dropped = dev->stats.tx_dropped;
3965 }
3966
3967 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3968 {
3969 struct mvpp2_port *port = netdev_priv(dev);
3970
3971 if (!port->phylink)
3972 return -ENOTSUPP;
3973
3974 return phylink_mii_ioctl(port->phylink, ifr, cmd);
3975 }
3976
3977 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
3978 {
3979 struct mvpp2_port *port = netdev_priv(dev);
3980 int ret;
3981
3982 ret = mvpp2_prs_vid_entry_add(port, vid);
3983 if (ret)
3984 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
3985 MVPP2_PRS_VLAN_FILT_MAX - 1);
3986 return ret;
3987 }
3988
3989 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
3990 {
3991 struct mvpp2_port *port = netdev_priv(dev);
3992
3993 mvpp2_prs_vid_entry_remove(port, vid);
3994 return 0;
3995 }
3996
3997 static int mvpp2_set_features(struct net_device *dev,
3998 netdev_features_t features)
3999 {
4000 netdev_features_t changed = dev->features ^ features;
4001 struct mvpp2_port *port = netdev_priv(dev);
4002
4003 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
4004 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
4005 mvpp2_prs_vid_enable_filtering(port);
4006 } else {
4007
4008
4009
4010 mvpp2_prs_vid_remove_all(port);
4011
4012 mvpp2_prs_vid_disable_filtering(port);
4013 }
4014 }
4015
4016 if (changed & NETIF_F_RXHASH) {
4017 if (features & NETIF_F_RXHASH)
4018 mvpp22_port_rss_enable(port);
4019 else
4020 mvpp22_port_rss_disable(port);
4021 }
4022
4023 return 0;
4024 }
4025
4026
4027
4028 static int mvpp2_ethtool_nway_reset(struct net_device *dev)
4029 {
4030 struct mvpp2_port *port = netdev_priv(dev);
4031
4032 if (!port->phylink)
4033 return -ENOTSUPP;
4034
4035 return phylink_ethtool_nway_reset(port->phylink);
4036 }
4037
4038
4039 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
4040 struct ethtool_coalesce *c)
4041 {
4042 struct mvpp2_port *port = netdev_priv(dev);
4043 int queue;
4044
4045 for (queue = 0; queue < port->nrxqs; queue++) {
4046 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4047
4048 rxq->time_coal = c->rx_coalesce_usecs;
4049 rxq->pkts_coal = c->rx_max_coalesced_frames;
4050 mvpp2_rx_pkts_coal_set(port, rxq);
4051 mvpp2_rx_time_coal_set(port, rxq);
4052 }
4053
4054 if (port->has_tx_irqs) {
4055 port->tx_time_coal = c->tx_coalesce_usecs;
4056 mvpp2_tx_time_coal_set(port);
4057 }
4058
4059 for (queue = 0; queue < port->ntxqs; queue++) {
4060 struct mvpp2_tx_queue *txq = port->txqs[queue];
4061
4062 txq->done_pkts_coal = c->tx_max_coalesced_frames;
4063
4064 if (port->has_tx_irqs)
4065 mvpp2_tx_pkts_coal_set(port, txq);
4066 }
4067
4068 return 0;
4069 }
4070
4071
4072 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
4073 struct ethtool_coalesce *c)
4074 {
4075 struct mvpp2_port *port = netdev_priv(dev);
4076
4077 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
4078 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
4079 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
4080 c->tx_coalesce_usecs = port->tx_time_coal;
4081 return 0;
4082 }
4083
4084 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
4085 struct ethtool_drvinfo *drvinfo)
4086 {
4087 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
4088 sizeof(drvinfo->driver));
4089 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
4090 sizeof(drvinfo->version));
4091 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
4092 sizeof(drvinfo->bus_info));
4093 }
4094
4095 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
4096 struct ethtool_ringparam *ring)
4097 {
4098 struct mvpp2_port *port = netdev_priv(dev);
4099
4100 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
4101 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
4102 ring->rx_pending = port->rx_ring_size;
4103 ring->tx_pending = port->tx_ring_size;
4104 }
4105
4106 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
4107 struct ethtool_ringparam *ring)
4108 {
4109 struct mvpp2_port *port = netdev_priv(dev);
4110 u16 prev_rx_ring_size = port->rx_ring_size;
4111 u16 prev_tx_ring_size = port->tx_ring_size;
4112 int err;
4113
4114 err = mvpp2_check_ringparam_valid(dev, ring);
4115 if (err)
4116 return err;
4117
4118 if (!netif_running(dev)) {
4119 port->rx_ring_size = ring->rx_pending;
4120 port->tx_ring_size = ring->tx_pending;
4121 return 0;
4122 }
4123
4124
4125
4126
4127 mvpp2_stop_dev(port);
4128 mvpp2_cleanup_rxqs(port);
4129 mvpp2_cleanup_txqs(port);
4130
4131 port->rx_ring_size = ring->rx_pending;
4132 port->tx_ring_size = ring->tx_pending;
4133
4134 err = mvpp2_setup_rxqs(port);
4135 if (err) {
4136
4137 port->rx_ring_size = prev_rx_ring_size;
4138 ring->rx_pending = prev_rx_ring_size;
4139 err = mvpp2_setup_rxqs(port);
4140 if (err)
4141 goto err_out;
4142 }
4143 err = mvpp2_setup_txqs(port);
4144 if (err) {
4145
4146 port->tx_ring_size = prev_tx_ring_size;
4147 ring->tx_pending = prev_tx_ring_size;
4148 err = mvpp2_setup_txqs(port);
4149 if (err)
4150 goto err_clean_rxqs;
4151 }
4152
4153 mvpp2_start_dev(port);
4154 mvpp2_egress_enable(port);
4155 mvpp2_ingress_enable(port);
4156
4157 return 0;
4158
4159 err_clean_rxqs:
4160 mvpp2_cleanup_rxqs(port);
4161 err_out:
4162 netdev_err(dev, "failed to change ring parameters");
4163 return err;
4164 }
4165
4166 static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
4167 struct ethtool_pauseparam *pause)
4168 {
4169 struct mvpp2_port *port = netdev_priv(dev);
4170
4171 if (!port->phylink)
4172 return;
4173
4174 phylink_ethtool_get_pauseparam(port->phylink, pause);
4175 }
4176
4177 static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
4178 struct ethtool_pauseparam *pause)
4179 {
4180 struct mvpp2_port *port = netdev_priv(dev);
4181
4182 if (!port->phylink)
4183 return -ENOTSUPP;
4184
4185 return phylink_ethtool_set_pauseparam(port->phylink, pause);
4186 }
4187
4188 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
4189 struct ethtool_link_ksettings *cmd)
4190 {
4191 struct mvpp2_port *port = netdev_priv(dev);
4192
4193 if (!port->phylink)
4194 return -ENOTSUPP;
4195
4196 return phylink_ethtool_ksettings_get(port->phylink, cmd);
4197 }
4198
4199 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
4200 const struct ethtool_link_ksettings *cmd)
4201 {
4202 struct mvpp2_port *port = netdev_priv(dev);
4203
4204 if (!port->phylink)
4205 return -ENOTSUPP;
4206
4207 return phylink_ethtool_ksettings_set(port->phylink, cmd);
4208 }
4209
4210 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
4211 struct ethtool_rxnfc *info, u32 *rules)
4212 {
4213 struct mvpp2_port *port = netdev_priv(dev);
4214 int ret = 0, i, loc = 0;
4215
4216 if (!mvpp22_rss_is_supported())
4217 return -EOPNOTSUPP;
4218
4219 switch (info->cmd) {
4220 case ETHTOOL_GRXFH:
4221 ret = mvpp2_ethtool_rxfh_get(port, info);
4222 break;
4223 case ETHTOOL_GRXRINGS:
4224 info->data = port->nrxqs;
4225 break;
4226 case ETHTOOL_GRXCLSRLCNT:
4227 info->rule_cnt = port->n_rfs_rules;
4228 break;
4229 case ETHTOOL_GRXCLSRULE:
4230 ret = mvpp2_ethtool_cls_rule_get(port, info);
4231 break;
4232 case ETHTOOL_GRXCLSRLALL:
4233 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
4234 if (port->rfs_rules[i])
4235 rules[loc++] = i;
4236 }
4237 break;
4238 default:
4239 return -ENOTSUPP;
4240 }
4241
4242 return ret;
4243 }
4244
4245 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
4246 struct ethtool_rxnfc *info)
4247 {
4248 struct mvpp2_port *port = netdev_priv(dev);
4249 int ret = 0;
4250
4251 if (!mvpp22_rss_is_supported())
4252 return -EOPNOTSUPP;
4253
4254 switch (info->cmd) {
4255 case ETHTOOL_SRXFH:
4256 ret = mvpp2_ethtool_rxfh_set(port, info);
4257 break;
4258 case ETHTOOL_SRXCLSRLINS:
4259 ret = mvpp2_ethtool_cls_rule_ins(port, info);
4260 break;
4261 case ETHTOOL_SRXCLSRLDEL:
4262 ret = mvpp2_ethtool_cls_rule_del(port, info);
4263 break;
4264 default:
4265 return -EOPNOTSUPP;
4266 }
4267 return ret;
4268 }
4269
4270 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
4271 {
4272 return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
4273 }
4274
4275 static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4276 u8 *hfunc)
4277 {
4278 struct mvpp2_port *port = netdev_priv(dev);
4279 int ret = 0;
4280
4281 if (!mvpp22_rss_is_supported())
4282 return -EOPNOTSUPP;
4283
4284 if (indir)
4285 ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
4286
4287 if (hfunc)
4288 *hfunc = ETH_RSS_HASH_CRC32;
4289
4290 return ret;
4291 }
4292
4293 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
4294 const u8 *key, const u8 hfunc)
4295 {
4296 struct mvpp2_port *port = netdev_priv(dev);
4297 int ret = 0;
4298
4299 if (!mvpp22_rss_is_supported())
4300 return -EOPNOTSUPP;
4301
4302 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
4303 return -EOPNOTSUPP;
4304
4305 if (key)
4306 return -EOPNOTSUPP;
4307
4308 if (indir)
4309 ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
4310
4311 return ret;
4312 }
4313
4314 static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
4315 u8 *key, u8 *hfunc, u32 rss_context)
4316 {
4317 struct mvpp2_port *port = netdev_priv(dev);
4318 int ret = 0;
4319
4320 if (!mvpp22_rss_is_supported())
4321 return -EOPNOTSUPP;
4322 if (rss_context >= MVPP22_N_RSS_TABLES)
4323 return -EINVAL;
4324
4325 if (hfunc)
4326 *hfunc = ETH_RSS_HASH_CRC32;
4327
4328 if (indir)
4329 ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
4330
4331 return ret;
4332 }
4333
4334 static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
4335 const u32 *indir, const u8 *key,
4336 const u8 hfunc, u32 *rss_context,
4337 bool delete)
4338 {
4339 struct mvpp2_port *port = netdev_priv(dev);
4340 int ret;
4341
4342 if (!mvpp22_rss_is_supported())
4343 return -EOPNOTSUPP;
4344
4345 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
4346 return -EOPNOTSUPP;
4347
4348 if (key)
4349 return -EOPNOTSUPP;
4350
4351 if (delete)
4352 return mvpp22_port_rss_ctx_delete(port, *rss_context);
4353
4354 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
4355 ret = mvpp22_port_rss_ctx_create(port, rss_context);
4356 if (ret)
4357 return ret;
4358 }
4359
4360 return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
4361 }
4362
4363
4364 static const struct net_device_ops mvpp2_netdev_ops = {
4365 .ndo_open = mvpp2_open,
4366 .ndo_stop = mvpp2_stop,
4367 .ndo_start_xmit = mvpp2_tx,
4368 .ndo_set_rx_mode = mvpp2_set_rx_mode,
4369 .ndo_set_mac_address = mvpp2_set_mac_address,
4370 .ndo_change_mtu = mvpp2_change_mtu,
4371 .ndo_get_stats64 = mvpp2_get_stats64,
4372 .ndo_do_ioctl = mvpp2_ioctl,
4373 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
4374 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
4375 .ndo_set_features = mvpp2_set_features,
4376 };
4377
4378 static const struct ethtool_ops mvpp2_eth_tool_ops = {
4379 .nway_reset = mvpp2_ethtool_nway_reset,
4380 .get_link = ethtool_op_get_link,
4381 .set_coalesce = mvpp2_ethtool_set_coalesce,
4382 .get_coalesce = mvpp2_ethtool_get_coalesce,
4383 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
4384 .get_ringparam = mvpp2_ethtool_get_ringparam,
4385 .set_ringparam = mvpp2_ethtool_set_ringparam,
4386 .get_strings = mvpp2_ethtool_get_strings,
4387 .get_ethtool_stats = mvpp2_ethtool_get_stats,
4388 .get_sset_count = mvpp2_ethtool_get_sset_count,
4389 .get_pauseparam = mvpp2_ethtool_get_pause_param,
4390 .set_pauseparam = mvpp2_ethtool_set_pause_param,
4391 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
4392 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
4393 .get_rxnfc = mvpp2_ethtool_get_rxnfc,
4394 .set_rxnfc = mvpp2_ethtool_set_rxnfc,
4395 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
4396 .get_rxfh = mvpp2_ethtool_get_rxfh,
4397 .set_rxfh = mvpp2_ethtool_set_rxfh,
4398 .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
4399 .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
4400 };
4401
4402
4403
4404
4405 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
4406 struct device_node *port_node)
4407 {
4408 struct mvpp2_queue_vector *v = &port->qvecs[0];
4409
4410 v->first_rxq = 0;
4411 v->nrxqs = port->nrxqs;
4412 v->type = MVPP2_QUEUE_VECTOR_SHARED;
4413 v->sw_thread_id = 0;
4414 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
4415 v->port = port;
4416 v->irq = irq_of_parse_and_map(port_node, 0);
4417 if (v->irq <= 0)
4418 return -EINVAL;
4419 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
4420 NAPI_POLL_WEIGHT);
4421
4422 port->nqvecs = 1;
4423
4424 return 0;
4425 }
4426
4427 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
4428 struct device_node *port_node)
4429 {
4430 struct mvpp2 *priv = port->priv;
4431 struct mvpp2_queue_vector *v;
4432 int i, ret;
4433
4434 switch (queue_mode) {
4435 case MVPP2_QDIST_SINGLE_MODE:
4436 port->nqvecs = priv->nthreads + 1;
4437 break;
4438 case MVPP2_QDIST_MULTI_MODE:
4439 port->nqvecs = priv->nthreads;
4440 break;
4441 }
4442
4443 for (i = 0; i < port->nqvecs; i++) {
4444 char irqname[16];
4445
4446 v = port->qvecs + i;
4447
4448 v->port = port;
4449 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
4450 v->sw_thread_id = i;
4451 v->sw_thread_mask = BIT(i);
4452
4453 if (port->flags & MVPP2_F_DT_COMPAT)
4454 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
4455 else
4456 snprintf(irqname, sizeof(irqname), "hif%d", i);
4457
4458 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
4459 v->first_rxq = i;
4460 v->nrxqs = 1;
4461 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
4462 i == (port->nqvecs - 1)) {
4463 v->first_rxq = 0;
4464 v->nrxqs = port->nrxqs;
4465 v->type = MVPP2_QUEUE_VECTOR_SHARED;
4466
4467 if (port->flags & MVPP2_F_DT_COMPAT)
4468 strncpy(irqname, "rx-shared", sizeof(irqname));
4469 }
4470
4471 if (port_node)
4472 v->irq = of_irq_get_byname(port_node, irqname);
4473 else
4474 v->irq = fwnode_irq_get(port->fwnode, i);
4475 if (v->irq <= 0) {
4476 ret = -EINVAL;
4477 goto err;
4478 }
4479
4480 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
4481 NAPI_POLL_WEIGHT);
4482 }
4483
4484 return 0;
4485
4486 err:
4487 for (i = 0; i < port->nqvecs; i++)
4488 irq_dispose_mapping(port->qvecs[i].irq);
4489 return ret;
4490 }
4491
4492 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
4493 struct device_node *port_node)
4494 {
4495 if (port->has_tx_irqs)
4496 return mvpp2_multi_queue_vectors_init(port, port_node);
4497 else
4498 return mvpp2_simple_queue_vectors_init(port, port_node);
4499 }
4500
4501 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
4502 {
4503 int i;
4504
4505 for (i = 0; i < port->nqvecs; i++)
4506 irq_dispose_mapping(port->qvecs[i].irq);
4507 }
4508
4509
4510 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
4511 {
4512 struct mvpp2 *priv = port->priv;
4513 u32 val;
4514 int i;
4515
4516 if (priv->hw_version == MVPP21) {
4517 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
4518 port->nrxqs);
4519 return;
4520 }
4521
4522
4523 for (i = 0; i < port->nqvecs; i++) {
4524 struct mvpp2_queue_vector *qv = port->qvecs + i;
4525
4526 if (!qv->nrxqs)
4527 continue;
4528
4529 val = qv->sw_thread_id;
4530 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
4531 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
4532
4533 val = qv->first_rxq;
4534 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
4535 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
4536 }
4537 }
4538
4539
4540 static int mvpp2_port_init(struct mvpp2_port *port)
4541 {
4542 struct device *dev = port->dev->dev.parent;
4543 struct mvpp2 *priv = port->priv;
4544 struct mvpp2_txq_pcpu *txq_pcpu;
4545 unsigned int thread;
4546 int queue, err;
4547
4548
4549 if (port->first_rxq + port->nrxqs >
4550 MVPP2_MAX_PORTS * priv->max_port_rxqs)
4551 return -EINVAL;
4552
4553 if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
4554 return -EINVAL;
4555
4556
4557 mvpp2_egress_disable(port);
4558 mvpp2_port_disable(port);
4559
4560 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
4561
4562 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
4563 GFP_KERNEL);
4564 if (!port->txqs)
4565 return -ENOMEM;
4566
4567
4568
4569
4570 for (queue = 0; queue < port->ntxqs; queue++) {
4571 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
4572 struct mvpp2_tx_queue *txq;
4573
4574 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
4575 if (!txq) {
4576 err = -ENOMEM;
4577 goto err_free_percpu;
4578 }
4579
4580 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
4581 if (!txq->pcpu) {
4582 err = -ENOMEM;
4583 goto err_free_percpu;
4584 }
4585
4586 txq->id = queue_phy_id;
4587 txq->log_id = queue;
4588 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
4589 for (thread = 0; thread < priv->nthreads; thread++) {
4590 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4591 txq_pcpu->thread = thread;
4592 }
4593
4594 port->txqs[queue] = txq;
4595 }
4596
4597 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
4598 GFP_KERNEL);
4599 if (!port->rxqs) {
4600 err = -ENOMEM;
4601 goto err_free_percpu;
4602 }
4603
4604
4605 for (queue = 0; queue < port->nrxqs; queue++) {
4606 struct mvpp2_rx_queue *rxq;
4607
4608
4609 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
4610 if (!rxq) {
4611 err = -ENOMEM;
4612 goto err_free_percpu;
4613 }
4614
4615 rxq->id = port->first_rxq + queue;
4616 rxq->port = port->id;
4617 rxq->logic_rxq = queue;
4618
4619 port->rxqs[queue] = rxq;
4620 }
4621
4622 mvpp2_rx_irqs_setup(port);
4623
4624
4625 for (queue = 0; queue < port->nrxqs; queue++) {
4626 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4627
4628 rxq->size = port->rx_ring_size;
4629 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
4630 rxq->time_coal = MVPP2_RX_COAL_USEC;
4631 }
4632
4633 mvpp2_ingress_disable(port);
4634
4635
4636 mvpp2_defaults_set(port);
4637
4638
4639 mvpp2_cls_oversize_rxq_set(port);
4640 mvpp2_cls_port_config(port);
4641
4642 if (mvpp22_rss_is_supported())
4643 mvpp22_port_rss_init(port);
4644
4645
4646 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
4647
4648
4649 err = mvpp2_swf_bm_pool_init(port);
4650 if (err)
4651 goto err_free_percpu;
4652
4653
4654 mvpp2_read_stats(port);
4655 memset(port->ethtool_stats, 0,
4656 MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
4657
4658 return 0;
4659
4660 err_free_percpu:
4661 for (queue = 0; queue < port->ntxqs; queue++) {
4662 if (!port->txqs[queue])
4663 continue;
4664 free_percpu(port->txqs[queue]->pcpu);
4665 }
4666 return err;
4667 }
4668
4669 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
4670 unsigned long *flags)
4671 {
4672 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
4673 "tx-cpu3" };
4674 int i;
4675
4676 for (i = 0; i < 5; i++)
4677 if (of_property_match_string(port_node, "interrupt-names",
4678 irqs[i]) < 0)
4679 return false;
4680
4681 *flags |= MVPP2_F_DT_COMPAT;
4682 return true;
4683 }
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693 static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
4694 struct device_node *port_node,
4695 unsigned long *flags)
4696 {
4697 char name[5];
4698 int i;
4699
4700
4701 if (!port_node)
4702 return true;
4703
4704 if (priv->hw_version == MVPP21)
4705 return false;
4706
4707 if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
4708 return true;
4709
4710 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
4711 snprintf(name, 5, "hif%d", i);
4712 if (of_property_match_string(port_node, "interrupt-names",
4713 name) < 0)
4714 return false;
4715 }
4716
4717 return true;
4718 }
4719
4720 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
4721 struct fwnode_handle *fwnode,
4722 char **mac_from)
4723 {
4724 struct mvpp2_port *port = netdev_priv(dev);
4725 char hw_mac_addr[ETH_ALEN] = {0};
4726 char fw_mac_addr[ETH_ALEN];
4727
4728 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
4729 *mac_from = "firmware node";
4730 ether_addr_copy(dev->dev_addr, fw_mac_addr);
4731 return;
4732 }
4733
4734 if (priv->hw_version == MVPP21) {
4735 mvpp21_get_mac_address(port, hw_mac_addr);
4736 if (is_valid_ether_addr(hw_mac_addr)) {
4737 *mac_from = "hardware";
4738 ether_addr_copy(dev->dev_addr, hw_mac_addr);
4739 return;
4740 }
4741 }
4742
4743 *mac_from = "random";
4744 eth_hw_addr_random(dev);
4745 }
4746
4747 static void mvpp2_phylink_validate(struct phylink_config *config,
4748 unsigned long *supported,
4749 struct phylink_link_state *state)
4750 {
4751 struct mvpp2_port *port = container_of(config, struct mvpp2_port,
4752 phylink_config);
4753 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
4754
4755
4756 switch (state->interface) {
4757 case PHY_INTERFACE_MODE_10GKR:
4758 case PHY_INTERFACE_MODE_XAUI:
4759 if (port->gop_id != 0)
4760 goto empty_set;
4761 break;
4762 case PHY_INTERFACE_MODE_RGMII:
4763 case PHY_INTERFACE_MODE_RGMII_ID:
4764 case PHY_INTERFACE_MODE_RGMII_RXID:
4765 case PHY_INTERFACE_MODE_RGMII_TXID:
4766 if (port->priv->hw_version == MVPP22 && port->gop_id == 0)
4767 goto empty_set;
4768 break;
4769 default:
4770 break;
4771 }
4772
4773 phylink_set(mask, Autoneg);
4774 phylink_set_port_modes(mask);
4775 phylink_set(mask, Pause);
4776 phylink_set(mask, Asym_Pause);
4777
4778 switch (state->interface) {
4779 case PHY_INTERFACE_MODE_10GKR:
4780 case PHY_INTERFACE_MODE_XAUI:
4781 case PHY_INTERFACE_MODE_NA:
4782 if (port->gop_id == 0) {
4783 phylink_set(mask, 10000baseT_Full);
4784 phylink_set(mask, 10000baseCR_Full);
4785 phylink_set(mask, 10000baseSR_Full);
4786 phylink_set(mask, 10000baseLR_Full);
4787 phylink_set(mask, 10000baseLRM_Full);
4788 phylink_set(mask, 10000baseER_Full);
4789 phylink_set(mask, 10000baseKR_Full);
4790 }
4791
4792 case PHY_INTERFACE_MODE_RGMII:
4793 case PHY_INTERFACE_MODE_RGMII_ID:
4794 case PHY_INTERFACE_MODE_RGMII_RXID:
4795 case PHY_INTERFACE_MODE_RGMII_TXID:
4796 case PHY_INTERFACE_MODE_SGMII:
4797 phylink_set(mask, 10baseT_Half);
4798 phylink_set(mask, 10baseT_Full);
4799 phylink_set(mask, 100baseT_Half);
4800 phylink_set(mask, 100baseT_Full);
4801
4802 case PHY_INTERFACE_MODE_1000BASEX:
4803 case PHY_INTERFACE_MODE_2500BASEX:
4804 phylink_set(mask, 1000baseT_Full);
4805 phylink_set(mask, 1000baseX_Full);
4806 phylink_set(mask, 2500baseT_Full);
4807 phylink_set(mask, 2500baseX_Full);
4808 break;
4809 default:
4810 goto empty_set;
4811 }
4812
4813 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
4814 bitmap_and(state->advertising, state->advertising, mask,
4815 __ETHTOOL_LINK_MODE_MASK_NBITS);
4816 return;
4817
4818 empty_set:
4819 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
4820 }
4821
4822 static void mvpp22_xlg_link_state(struct mvpp2_port *port,
4823 struct phylink_link_state *state)
4824 {
4825 u32 val;
4826
4827 state->speed = SPEED_10000;
4828 state->duplex = 1;
4829 state->an_complete = 1;
4830
4831 val = readl(port->base + MVPP22_XLG_STATUS);
4832 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
4833
4834 state->pause = 0;
4835 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4836 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
4837 state->pause |= MLO_PAUSE_TX;
4838 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
4839 state->pause |= MLO_PAUSE_RX;
4840 }
4841
4842 static void mvpp2_gmac_link_state(struct mvpp2_port *port,
4843 struct phylink_link_state *state)
4844 {
4845 u32 val;
4846
4847 val = readl(port->base + MVPP2_GMAC_STATUS0);
4848
4849 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
4850 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
4851 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
4852
4853 switch (port->phy_interface) {
4854 case PHY_INTERFACE_MODE_1000BASEX:
4855 state->speed = SPEED_1000;
4856 break;
4857 case PHY_INTERFACE_MODE_2500BASEX:
4858 state->speed = SPEED_2500;
4859 break;
4860 default:
4861 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
4862 state->speed = SPEED_1000;
4863 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
4864 state->speed = SPEED_100;
4865 else
4866 state->speed = SPEED_10;
4867 }
4868
4869 state->pause = 0;
4870 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
4871 state->pause |= MLO_PAUSE_RX;
4872 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
4873 state->pause |= MLO_PAUSE_TX;
4874 }
4875
4876 static int mvpp2_phylink_mac_link_state(struct phylink_config *config,
4877 struct phylink_link_state *state)
4878 {
4879 struct mvpp2_port *port = container_of(config, struct mvpp2_port,
4880 phylink_config);
4881
4882 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
4883 u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
4884 mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4885
4886 if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
4887 mvpp22_xlg_link_state(port, state);
4888 return 1;
4889 }
4890 }
4891
4892 mvpp2_gmac_link_state(port, state);
4893 return 1;
4894 }
4895
4896 static void mvpp2_mac_an_restart(struct phylink_config *config)
4897 {
4898 struct mvpp2_port *port = container_of(config, struct mvpp2_port,
4899 phylink_config);
4900 u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4901
4902 writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
4903 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4904 writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
4905 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4906 }
4907
4908 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
4909 const struct phylink_link_state *state)
4910 {
4911 u32 old_ctrl0, ctrl0;
4912 u32 old_ctrl4, ctrl4;
4913
4914 old_ctrl0 = ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG);
4915 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG);
4916
4917 ctrl0 |= MVPP22_XLG_CTRL0_MAC_RESET_DIS;
4918
4919 if (state->pause & MLO_PAUSE_TX)
4920 ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
4921 else
4922 ctrl0 &= ~MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
4923
4924 if (state->pause & MLO_PAUSE_RX)
4925 ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
4926 else
4927 ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
4928
4929 ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
4930 MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
4931 ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
4932
4933 if (old_ctrl0 != ctrl0)
4934 writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
4935 if (old_ctrl4 != ctrl4)
4936 writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
4937
4938 if (!(old_ctrl0 & MVPP22_XLG_CTRL0_MAC_RESET_DIS)) {
4939 while (!(readl(port->base + MVPP22_XLG_CTRL0_REG) &
4940 MVPP22_XLG_CTRL0_MAC_RESET_DIS))
4941 continue;
4942 }
4943 }
4944
4945 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
4946 const struct phylink_link_state *state)
4947 {
4948 u32 old_an, an;
4949 u32 old_ctrl0, ctrl0;
4950 u32 old_ctrl2, ctrl2;
4951 u32 old_ctrl4, ctrl4;
4952
4953 old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4954 old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4955 old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4956 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4957
4958 an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED |
4959 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
4960 MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
4961 MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN |
4962 MVPP2_GMAC_IN_BAND_AUTONEG | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS);
4963 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
4964 ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK |
4965 MVPP2_GMAC_PCS_ENABLE_MASK);
4966 ctrl4 &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
4967
4968
4969 if (phy_interface_mode_is_8023z(state->interface)) {
4970 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
4971 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4972 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
4973 MVPP22_CTRL4_DP_CLK_SEL |
4974 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4975 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
4976 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
4977 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4978 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
4979 MVPP22_CTRL4_DP_CLK_SEL |
4980 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4981 } else if (phy_interface_mode_is_rgmii(state->interface)) {
4982 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
4983 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
4984 MVPP22_CTRL4_SYNC_BYPASS_DIS |
4985 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4986 }
4987
4988
4989 if (phylink_test(state->advertising, Pause))
4990 an |= MVPP2_GMAC_FC_ADV_EN;
4991 if (phylink_test(state->advertising, Asym_Pause))
4992 an |= MVPP2_GMAC_FC_ADV_ASM_EN;
4993
4994
4995 if (!phylink_autoneg_inband(mode)) {
4996
4997 if (state->duplex)
4998 an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4999
5000 if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
5001 an |= MVPP2_GMAC_CONFIG_GMII_SPEED;
5002 else if (state->speed == SPEED_100)
5003 an |= MVPP2_GMAC_CONFIG_MII_SPEED;
5004
5005 if (state->pause & MLO_PAUSE_TX)
5006 ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
5007 if (state->pause & MLO_PAUSE_RX)
5008 ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
5009 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
5010
5011
5012 an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS);
5013 an |= MVPP2_GMAC_IN_BAND_AUTONEG |
5014 MVPP2_GMAC_AN_SPEED_EN |
5015 MVPP2_GMAC_AN_DUPLEX_EN;
5016
5017 if (state->pause & MLO_PAUSE_TX)
5018 ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
5019 if (state->pause & MLO_PAUSE_RX)
5020 ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
5021 } else if (phy_interface_mode_is_8023z(state->interface)) {
5022
5023
5024
5025
5026
5027 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
5028 an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS);
5029 an |= MVPP2_GMAC_IN_BAND_AUTONEG |
5030 MVPP2_GMAC_CONFIG_GMII_SPEED |
5031 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5032
5033 if (state->pause & MLO_PAUSE_AN && state->an_enabled) {
5034 an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
5035 } else {
5036 if (state->pause & MLO_PAUSE_TX)
5037 ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
5038 if (state->pause & MLO_PAUSE_RX)
5039 ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
5040 }
5041 }
5042
5043
5044
5045
5046 #define MVPP2_GMAC_AN_PORT_DOWN_MASK \
5047 (MVPP2_GMAC_IN_BAND_AUTONEG | \
5048 MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \
5049 MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \
5050 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \
5051 MVPP2_GMAC_AN_DUPLEX_EN)
5052
5053 if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK ||
5054 (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK ||
5055 (old_an ^ an) & MVPP2_GMAC_AN_PORT_DOWN_MASK) {
5056
5057 old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5058 old_an |= MVPP2_GMAC_FORCE_LINK_DOWN;
5059 writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5060
5061
5062
5063
5064 old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
5065 writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
5066 }
5067
5068 if (old_ctrl0 != ctrl0)
5069 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
5070 if (old_ctrl2 != ctrl2)
5071 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
5072 if (old_ctrl4 != ctrl4)
5073 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
5074 if (old_an != an)
5075 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5076
5077 if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
5078 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5079 MVPP2_GMAC_PORT_RESET_MASK)
5080 continue;
5081 }
5082 }
5083
5084 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
5085 const struct phylink_link_state *state)
5086 {
5087 struct net_device *dev = to_net_dev(config->dev);
5088 struct mvpp2_port *port = netdev_priv(dev);
5089 bool change_interface = port->phy_interface != state->interface;
5090
5091
5092 if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) {
5093 netdev_err(dev, "Invalid mode on %s\n", dev->name);
5094 return;
5095 }
5096
5097
5098 mvpp2_port_disable(port);
5099
5100 if (port->priv->hw_version == MVPP22 && change_interface) {
5101 mvpp22_gop_mask_irq(port);
5102
5103 port->phy_interface = state->interface;
5104
5105
5106 phy_power_off(port->comphy);
5107 mvpp22_mode_reconfigure(port);
5108 }
5109
5110
5111 if (mvpp2_is_xlg(state->interface))
5112 mvpp2_xlg_config(port, mode, state);
5113 else if (phy_interface_mode_is_rgmii(state->interface) ||
5114 phy_interface_mode_is_8023z(state->interface) ||
5115 state->interface == PHY_INTERFACE_MODE_SGMII)
5116 mvpp2_gmac_config(port, mode, state);
5117
5118 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
5119 mvpp2_port_loopback_set(port, state);
5120
5121 if (port->priv->hw_version == MVPP22 && change_interface)
5122 mvpp22_gop_unmask_irq(port);
5123
5124 mvpp2_port_enable(port);
5125 }
5126
5127 static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
5128 phy_interface_t interface, struct phy_device *phy)
5129 {
5130 struct net_device *dev = to_net_dev(config->dev);
5131 struct mvpp2_port *port = netdev_priv(dev);
5132 u32 val;
5133
5134 if (!phylink_autoneg_inband(mode)) {
5135 if (mvpp2_is_xlg(interface)) {
5136 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5137 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
5138 val |= MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
5139 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5140 } else {
5141 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5142 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
5143 val |= MVPP2_GMAC_FORCE_LINK_PASS;
5144 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5145 }
5146 }
5147
5148 mvpp2_port_enable(port);
5149
5150 mvpp2_egress_enable(port);
5151 mvpp2_ingress_enable(port);
5152 netif_tx_wake_all_queues(dev);
5153 }
5154
5155 static void mvpp2_mac_link_down(struct phylink_config *config,
5156 unsigned int mode, phy_interface_t interface)
5157 {
5158 struct net_device *dev = to_net_dev(config->dev);
5159 struct mvpp2_port *port = netdev_priv(dev);
5160 u32 val;
5161
5162 if (!phylink_autoneg_inband(mode)) {
5163 if (mvpp2_is_xlg(interface)) {
5164 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5165 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
5166 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
5167 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5168 } else {
5169 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5170 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5171 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
5172 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5173 }
5174 }
5175
5176 netif_tx_stop_all_queues(dev);
5177 mvpp2_egress_disable(port);
5178 mvpp2_ingress_disable(port);
5179
5180 mvpp2_port_disable(port);
5181 }
5182
5183 static const struct phylink_mac_ops mvpp2_phylink_ops = {
5184 .validate = mvpp2_phylink_validate,
5185 .mac_link_state = mvpp2_phylink_mac_link_state,
5186 .mac_an_restart = mvpp2_mac_an_restart,
5187 .mac_config = mvpp2_mac_config,
5188 .mac_link_up = mvpp2_mac_link_up,
5189 .mac_link_down = mvpp2_mac_link_down,
5190 };
5191
5192
5193 static int mvpp2_port_probe(struct platform_device *pdev,
5194 struct fwnode_handle *port_fwnode,
5195 struct mvpp2 *priv)
5196 {
5197 struct phy *comphy = NULL;
5198 struct mvpp2_port *port;
5199 struct mvpp2_port_pcpu *port_pcpu;
5200 struct device_node *port_node = to_of_node(port_fwnode);
5201 netdev_features_t features;
5202 struct net_device *dev;
5203 struct phylink *phylink;
5204 char *mac_from = "";
5205 unsigned int ntxqs, nrxqs, thread;
5206 unsigned long flags = 0;
5207 bool has_tx_irqs;
5208 u32 id;
5209 int phy_mode;
5210 int err, i;
5211
5212 has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
5213 if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
5214 dev_err(&pdev->dev,
5215 "not enough IRQs to support multi queue mode\n");
5216 return -EINVAL;
5217 }
5218
5219 ntxqs = MVPP2_MAX_TXQ;
5220 nrxqs = mvpp2_get_nrxqs(priv);
5221
5222 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
5223 if (!dev)
5224 return -ENOMEM;
5225
5226 phy_mode = fwnode_get_phy_mode(port_fwnode);
5227 if (phy_mode < 0) {
5228 dev_err(&pdev->dev, "incorrect phy mode\n");
5229 err = phy_mode;
5230 goto err_free_netdev;
5231 }
5232
5233 if (port_node) {
5234 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
5235 if (IS_ERR(comphy)) {
5236 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
5237 err = -EPROBE_DEFER;
5238 goto err_free_netdev;
5239 }
5240 comphy = NULL;
5241 }
5242 }
5243
5244 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
5245 err = -EINVAL;
5246 dev_err(&pdev->dev, "missing port-id value\n");
5247 goto err_free_netdev;
5248 }
5249
5250 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
5251 dev->watchdog_timeo = 5 * HZ;
5252 dev->netdev_ops = &mvpp2_netdev_ops;
5253 dev->ethtool_ops = &mvpp2_eth_tool_ops;
5254
5255 port = netdev_priv(dev);
5256 port->dev = dev;
5257 port->fwnode = port_fwnode;
5258 port->has_phy = !!of_find_property(port_node, "phy", NULL);
5259 port->ntxqs = ntxqs;
5260 port->nrxqs = nrxqs;
5261 port->priv = priv;
5262 port->has_tx_irqs = has_tx_irqs;
5263 port->flags = flags;
5264
5265 err = mvpp2_queue_vectors_init(port, port_node);
5266 if (err)
5267 goto err_free_netdev;
5268
5269 if (port_node)
5270 port->link_irq = of_irq_get_byname(port_node, "link");
5271 else
5272 port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
5273 if (port->link_irq == -EPROBE_DEFER) {
5274 err = -EPROBE_DEFER;
5275 goto err_deinit_qvecs;
5276 }
5277 if (port->link_irq <= 0)
5278
5279 port->link_irq = 0;
5280
5281 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
5282 port->flags |= MVPP2_F_LOOPBACK;
5283
5284 port->id = id;
5285 if (priv->hw_version == MVPP21)
5286 port->first_rxq = port->id * port->nrxqs;
5287 else
5288 port->first_rxq = port->id * priv->max_port_rxqs;
5289
5290 port->of_node = port_node;
5291 port->phy_interface = phy_mode;
5292 port->comphy = comphy;
5293
5294 if (priv->hw_version == MVPP21) {
5295 port->base = devm_platform_ioremap_resource(pdev, 2 + id);
5296 if (IS_ERR(port->base)) {
5297 err = PTR_ERR(port->base);
5298 goto err_free_irq;
5299 }
5300
5301 port->stats_base = port->priv->lms_base +
5302 MVPP21_MIB_COUNTERS_OFFSET +
5303 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
5304 } else {
5305 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
5306 &port->gop_id)) {
5307 err = -EINVAL;
5308 dev_err(&pdev->dev, "missing gop-port-id value\n");
5309 goto err_deinit_qvecs;
5310 }
5311
5312 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
5313 port->stats_base = port->priv->iface_base +
5314 MVPP22_MIB_COUNTERS_OFFSET +
5315 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
5316 }
5317
5318
5319 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
5320 if (!port->stats) {
5321 err = -ENOMEM;
5322 goto err_free_irq;
5323 }
5324
5325 port->ethtool_stats = devm_kcalloc(&pdev->dev,
5326 MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
5327 sizeof(u64), GFP_KERNEL);
5328 if (!port->ethtool_stats) {
5329 err = -ENOMEM;
5330 goto err_free_stats;
5331 }
5332
5333 mutex_init(&port->gather_stats_lock);
5334 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
5335
5336 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
5337
5338 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
5339 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
5340 SET_NETDEV_DEV(dev, &pdev->dev);
5341
5342 err = mvpp2_port_init(port);
5343 if (err < 0) {
5344 dev_err(&pdev->dev, "failed to init port %d\n", id);
5345 goto err_free_stats;
5346 }
5347
5348 mvpp2_port_periodic_xon_disable(port);
5349
5350 mvpp2_mac_reset_assert(port);
5351 mvpp22_pcs_reset_assert(port);
5352
5353 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
5354 if (!port->pcpu) {
5355 err = -ENOMEM;
5356 goto err_free_txq_pcpu;
5357 }
5358
5359 if (!port->has_tx_irqs) {
5360 for (thread = 0; thread < priv->nthreads; thread++) {
5361 port_pcpu = per_cpu_ptr(port->pcpu, thread);
5362
5363 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
5364 HRTIMER_MODE_REL_PINNED_SOFT);
5365 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
5366 port_pcpu->timer_scheduled = false;
5367 port_pcpu->dev = dev;
5368 }
5369 }
5370
5371 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5372 NETIF_F_TSO;
5373 dev->features = features | NETIF_F_RXCSUM;
5374 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
5375 NETIF_F_HW_VLAN_CTAG_FILTER;
5376
5377 if (mvpp22_rss_is_supported()) {
5378 dev->hw_features |= NETIF_F_RXHASH;
5379 dev->features |= NETIF_F_NTUPLE;
5380 }
5381
5382 if (!port->priv->percpu_pools)
5383 mvpp2_set_hw_csum(port, port->pool_long->id);
5384
5385 dev->vlan_features |= features;
5386 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
5387 dev->priv_flags |= IFF_UNICAST_FLT;
5388
5389
5390 dev->min_mtu = ETH_MIN_MTU;
5391
5392 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
5393 dev->dev.of_node = port_node;
5394
5395
5396 if (port_node) {
5397 port->phylink_config.dev = &dev->dev;
5398 port->phylink_config.type = PHYLINK_NETDEV;
5399
5400 phylink = phylink_create(&port->phylink_config, port_fwnode,
5401 phy_mode, &mvpp2_phylink_ops);
5402 if (IS_ERR(phylink)) {
5403 err = PTR_ERR(phylink);
5404 goto err_free_port_pcpu;
5405 }
5406 port->phylink = phylink;
5407 } else {
5408 port->phylink = NULL;
5409 }
5410
5411 err = register_netdev(dev);
5412 if (err < 0) {
5413 dev_err(&pdev->dev, "failed to register netdev\n");
5414 goto err_phylink;
5415 }
5416 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
5417
5418 priv->port_list[priv->port_count++] = port;
5419
5420 return 0;
5421
5422 err_phylink:
5423 if (port->phylink)
5424 phylink_destroy(port->phylink);
5425 err_free_port_pcpu:
5426 free_percpu(port->pcpu);
5427 err_free_txq_pcpu:
5428 for (i = 0; i < port->ntxqs; i++)
5429 free_percpu(port->txqs[i]->pcpu);
5430 err_free_stats:
5431 free_percpu(port->stats);
5432 err_free_irq:
5433 if (port->link_irq)
5434 irq_dispose_mapping(port->link_irq);
5435 err_deinit_qvecs:
5436 mvpp2_queue_vectors_deinit(port);
5437 err_free_netdev:
5438 free_netdev(dev);
5439 return err;
5440 }
5441
5442
5443 static void mvpp2_port_remove(struct mvpp2_port *port)
5444 {
5445 int i;
5446
5447 unregister_netdev(port->dev);
5448 if (port->phylink)
5449 phylink_destroy(port->phylink);
5450 free_percpu(port->pcpu);
5451 free_percpu(port->stats);
5452 for (i = 0; i < port->ntxqs; i++)
5453 free_percpu(port->txqs[i]->pcpu);
5454 mvpp2_queue_vectors_deinit(port);
5455 if (port->link_irq)
5456 irq_dispose_mapping(port->link_irq);
5457 free_netdev(port->dev);
5458 }
5459
5460
5461 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
5462 struct mvpp2 *priv)
5463 {
5464 u32 win_enable;
5465 int i;
5466
5467 for (i = 0; i < 6; i++) {
5468 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
5469 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
5470
5471 if (i < 4)
5472 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
5473 }
5474
5475 win_enable = 0;
5476
5477 for (i = 0; i < dram->num_cs; i++) {
5478 const struct mbus_dram_window *cs = dram->cs + i;
5479
5480 mvpp2_write(priv, MVPP2_WIN_BASE(i),
5481 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
5482 dram->mbus_dram_target_id);
5483
5484 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
5485 (cs->size - 1) & 0xffff0000);
5486
5487 win_enable |= (1 << i);
5488 }
5489
5490 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
5491 }
5492
5493
5494 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
5495 {
5496 int port;
5497
5498 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
5499 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
5500 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
5501 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
5502 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
5503 }
5504
5505 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
5506 MVPP2_RX_FIFO_PORT_MIN_PKT);
5507 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
5508 }
5509
5510 static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
5511 {
5512 int port;
5513
5514
5515
5516
5517
5518
5519
5520
5521 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
5522 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
5523 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
5524 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
5525
5526 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
5527 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
5528 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
5529 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
5530
5531 for (port = 2; port < MVPP2_MAX_PORTS; port++) {
5532 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
5533 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
5534 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
5535 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
5536 }
5537
5538 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
5539 MVPP2_RX_FIFO_PORT_MIN_PKT);
5540 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
5541 }
5542
5543
5544
5545
5546
5547 static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
5548 {
5549 int port, size, thrs;
5550
5551 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
5552 if (port == 0) {
5553 size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
5554 thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
5555 } else {
5556 size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
5557 thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
5558 }
5559 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
5560 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
5561 }
5562 }
5563
5564 static void mvpp2_axi_init(struct mvpp2 *priv)
5565 {
5566 u32 val, rdval, wrval;
5567
5568 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
5569
5570
5571
5572 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
5573 << MVPP22_AXI_ATTR_CACHE_OFFS;
5574 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5575 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
5576
5577 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
5578 << MVPP22_AXI_ATTR_CACHE_OFFS;
5579 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5580 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
5581
5582
5583 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
5584 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
5585
5586
5587 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
5588 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
5589 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
5590 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
5591
5592
5593 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
5594 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
5595
5596 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
5597 << MVPP22_AXI_CODE_CACHE_OFFS;
5598 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
5599 << MVPP22_AXI_CODE_DOMAIN_OFFS;
5600 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
5601 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
5602
5603 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
5604 << MVPP22_AXI_CODE_CACHE_OFFS;
5605 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5606 << MVPP22_AXI_CODE_DOMAIN_OFFS;
5607
5608 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
5609
5610 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
5611 << MVPP22_AXI_CODE_CACHE_OFFS;
5612 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5613 << MVPP22_AXI_CODE_DOMAIN_OFFS;
5614
5615 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
5616 }
5617
5618
5619 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
5620 {
5621 const struct mbus_dram_target_info *dram_target_info;
5622 int err, i;
5623 u32 val;
5624
5625
5626 dram_target_info = mv_mbus_dram_info();
5627 if (dram_target_info)
5628 mvpp2_conf_mbus_windows(dram_target_info, priv);
5629
5630 if (priv->hw_version == MVPP22)
5631 mvpp2_axi_init(priv);
5632
5633
5634 if (priv->hw_version == MVPP21) {
5635 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
5636 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
5637 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
5638 } else {
5639 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
5640 val &= ~MVPP22_SMI_POLLING_EN;
5641 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
5642 }
5643
5644
5645 priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
5646 sizeof(*priv->aggr_txqs),
5647 GFP_KERNEL);
5648 if (!priv->aggr_txqs)
5649 return -ENOMEM;
5650
5651 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5652 priv->aggr_txqs[i].id = i;
5653 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
5654 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
5655 if (err < 0)
5656 return err;
5657 }
5658
5659
5660 if (priv->hw_version == MVPP21) {
5661 mvpp2_rx_fifo_init(priv);
5662 } else {
5663 mvpp22_rx_fifo_init(priv);
5664 mvpp22_tx_fifo_init(priv);
5665 }
5666
5667 if (priv->hw_version == MVPP21)
5668 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
5669 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
5670
5671
5672 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
5673
5674
5675 err = mvpp2_bm_init(&pdev->dev, priv);
5676 if (err < 0)
5677 return err;
5678
5679
5680 err = mvpp2_prs_default_init(pdev, priv);
5681 if (err < 0)
5682 return err;
5683
5684
5685 mvpp2_cls_init(priv);
5686
5687 return 0;
5688 }
5689
5690 static int mvpp2_probe(struct platform_device *pdev)
5691 {
5692 const struct acpi_device_id *acpi_id;
5693 struct fwnode_handle *fwnode = pdev->dev.fwnode;
5694 struct fwnode_handle *port_fwnode;
5695 struct mvpp2 *priv;
5696 struct resource *res;
5697 void __iomem *base;
5698 int i, shared;
5699 int err;
5700
5701 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
5702 if (!priv)
5703 return -ENOMEM;
5704
5705 if (has_acpi_companion(&pdev->dev)) {
5706 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
5707 &pdev->dev);
5708 if (!acpi_id)
5709 return -EINVAL;
5710 priv->hw_version = (unsigned long)acpi_id->driver_data;
5711 } else {
5712 priv->hw_version =
5713 (unsigned long)of_device_get_match_data(&pdev->dev);
5714 }
5715
5716
5717
5718
5719 if (priv->hw_version == MVPP21)
5720 queue_mode = MVPP2_QDIST_SINGLE_MODE;
5721
5722 base = devm_platform_ioremap_resource(pdev, 0);
5723 if (IS_ERR(base))
5724 return PTR_ERR(base);
5725
5726 if (priv->hw_version == MVPP21) {
5727 priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
5728 if (IS_ERR(priv->lms_base))
5729 return PTR_ERR(priv->lms_base);
5730 } else {
5731 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
5732 if (has_acpi_companion(&pdev->dev)) {
5733
5734
5735
5736
5737
5738
5739
5740
5741 release_resource(res);
5742 }
5743 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
5744 if (IS_ERR(priv->iface_base))
5745 return PTR_ERR(priv->iface_base);
5746 }
5747
5748 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
5749 priv->sysctrl_base =
5750 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5751 "marvell,system-controller");
5752 if (IS_ERR(priv->sysctrl_base))
5753
5754
5755
5756
5757
5758 priv->sysctrl_base = NULL;
5759 }
5760
5761 if (priv->hw_version == MVPP22 &&
5762 mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
5763 priv->percpu_pools = 1;
5764
5765 mvpp2_setup_bm_pool();
5766
5767
5768 priv->nthreads = min_t(unsigned int, num_present_cpus(),
5769 MVPP2_MAX_THREADS);
5770
5771 shared = num_present_cpus() - priv->nthreads;
5772 if (shared > 0)
5773 bitmap_fill(&priv->lock_map,
5774 min_t(int, shared, MVPP2_MAX_THREADS));
5775
5776 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5777 u32 addr_space_sz;
5778
5779 addr_space_sz = (priv->hw_version == MVPP21 ?
5780 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
5781 priv->swth_base[i] = base + i * addr_space_sz;
5782 }
5783
5784 if (priv->hw_version == MVPP21)
5785 priv->max_port_rxqs = 8;
5786 else
5787 priv->max_port_rxqs = 32;
5788
5789 if (dev_of_node(&pdev->dev)) {
5790 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
5791 if (IS_ERR(priv->pp_clk))
5792 return PTR_ERR(priv->pp_clk);
5793 err = clk_prepare_enable(priv->pp_clk);
5794 if (err < 0)
5795 return err;
5796
5797 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
5798 if (IS_ERR(priv->gop_clk)) {
5799 err = PTR_ERR(priv->gop_clk);
5800 goto err_pp_clk;
5801 }
5802 err = clk_prepare_enable(priv->gop_clk);
5803 if (err < 0)
5804 goto err_pp_clk;
5805
5806 if (priv->hw_version == MVPP22) {
5807 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
5808 if (IS_ERR(priv->mg_clk)) {
5809 err = PTR_ERR(priv->mg_clk);
5810 goto err_gop_clk;
5811 }
5812
5813 err = clk_prepare_enable(priv->mg_clk);
5814 if (err < 0)
5815 goto err_gop_clk;
5816
5817 priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
5818 if (IS_ERR(priv->mg_core_clk)) {
5819 priv->mg_core_clk = NULL;
5820 } else {
5821 err = clk_prepare_enable(priv->mg_core_clk);
5822 if (err < 0)
5823 goto err_mg_clk;
5824 }
5825 }
5826
5827 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
5828 if (IS_ERR(priv->axi_clk)) {
5829 err = PTR_ERR(priv->axi_clk);
5830 if (err == -EPROBE_DEFER)
5831 goto err_mg_core_clk;
5832 priv->axi_clk = NULL;
5833 } else {
5834 err = clk_prepare_enable(priv->axi_clk);
5835 if (err < 0)
5836 goto err_mg_core_clk;
5837 }
5838
5839
5840 priv->tclk = clk_get_rate(priv->pp_clk);
5841 } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
5842 &priv->tclk)) {
5843 dev_err(&pdev->dev, "missing clock-frequency value\n");
5844 return -EINVAL;
5845 }
5846
5847 if (priv->hw_version == MVPP22) {
5848 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
5849 if (err)
5850 goto err_axi_clk;
5851
5852
5853
5854
5855
5856 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5857 if (err)
5858 goto err_axi_clk;
5859 }
5860
5861
5862 err = mvpp2_init(pdev, priv);
5863 if (err < 0) {
5864 dev_err(&pdev->dev, "failed to initialize controller\n");
5865 goto err_axi_clk;
5866 }
5867
5868
5869 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
5870 err = mvpp2_port_probe(pdev, port_fwnode, priv);
5871 if (err < 0)
5872 goto err_port_probe;
5873 }
5874
5875 if (priv->port_count == 0) {
5876 dev_err(&pdev->dev, "no ports enabled\n");
5877 err = -ENODEV;
5878 goto err_axi_clk;
5879 }
5880
5881
5882
5883
5884
5885
5886
5887 snprintf(priv->queue_name, sizeof(priv->queue_name),
5888 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
5889 priv->port_count > 1 ? "+" : "");
5890 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
5891 if (!priv->stats_queue) {
5892 err = -ENOMEM;
5893 goto err_port_probe;
5894 }
5895
5896 mvpp2_dbgfs_init(priv, pdev->name);
5897
5898 platform_set_drvdata(pdev, priv);
5899 return 0;
5900
5901 err_port_probe:
5902 i = 0;
5903 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
5904 if (priv->port_list[i])
5905 mvpp2_port_remove(priv->port_list[i]);
5906 i++;
5907 }
5908 err_axi_clk:
5909 clk_disable_unprepare(priv->axi_clk);
5910
5911 err_mg_core_clk:
5912 if (priv->hw_version == MVPP22)
5913 clk_disable_unprepare(priv->mg_core_clk);
5914 err_mg_clk:
5915 if (priv->hw_version == MVPP22)
5916 clk_disable_unprepare(priv->mg_clk);
5917 err_gop_clk:
5918 clk_disable_unprepare(priv->gop_clk);
5919 err_pp_clk:
5920 clk_disable_unprepare(priv->pp_clk);
5921 return err;
5922 }
5923
5924 static int mvpp2_remove(struct platform_device *pdev)
5925 {
5926 struct mvpp2 *priv = platform_get_drvdata(pdev);
5927 struct fwnode_handle *fwnode = pdev->dev.fwnode;
5928 struct fwnode_handle *port_fwnode;
5929 int i = 0;
5930
5931 mvpp2_dbgfs_cleanup(priv);
5932
5933 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
5934 if (priv->port_list[i]) {
5935 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
5936 mvpp2_port_remove(priv->port_list[i]);
5937 }
5938 i++;
5939 }
5940
5941 destroy_workqueue(priv->stats_queue);
5942
5943 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
5944 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
5945
5946 mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
5947 }
5948
5949 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5950 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
5951
5952 dma_free_coherent(&pdev->dev,
5953 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
5954 aggr_txq->descs,
5955 aggr_txq->descs_dma);
5956 }
5957
5958 if (is_acpi_node(port_fwnode))
5959 return 0;
5960
5961 clk_disable_unprepare(priv->axi_clk);
5962 clk_disable_unprepare(priv->mg_core_clk);
5963 clk_disable_unprepare(priv->mg_clk);
5964 clk_disable_unprepare(priv->pp_clk);
5965 clk_disable_unprepare(priv->gop_clk);
5966
5967 return 0;
5968 }
5969
5970 static const struct of_device_id mvpp2_match[] = {
5971 {
5972 .compatible = "marvell,armada-375-pp2",
5973 .data = (void *)MVPP21,
5974 },
5975 {
5976 .compatible = "marvell,armada-7k-pp22",
5977 .data = (void *)MVPP22,
5978 },
5979 { }
5980 };
5981 MODULE_DEVICE_TABLE(of, mvpp2_match);
5982
5983 static const struct acpi_device_id mvpp2_acpi_match[] = {
5984 { "MRVL0110", MVPP22 },
5985 { },
5986 };
5987 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
5988
5989 static struct platform_driver mvpp2_driver = {
5990 .probe = mvpp2_probe,
5991 .remove = mvpp2_remove,
5992 .driver = {
5993 .name = MVPP2_DRIVER_NAME,
5994 .of_match_table = mvpp2_match,
5995 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
5996 },
5997 };
5998
5999 module_platform_driver(mvpp2_driver);
6000
6001 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6002 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6003 MODULE_LICENSE("GPL v2");