1/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 *     Inc. unless otherwise expressly agreed to in writing between Synopsys
29 *     and you.
30 *
31 *     The Software IS NOT an item of Licensed Software or Licensed Product
32 *     under any End User Software License Agreement or Agreement for Licensed
33 *     Product with Synopsys or any supplement thereto.  Permission is hereby
34 *     granted, free of charge, to any person obtaining a copy of this software
35 *     annotated with this license and the Software, to deal in the Software
36 *     without restriction, including without limitation the rights to use,
37 *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 *     of the Software, and to permit persons to whom the Software is furnished
39 *     to do so, subject to the following conditions:
40 *
41 *     The above copyright notice and this permission notice shall be included
42 *     in all copies or substantial portions of the Software.
43 *
44 *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 *     THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 *     * Redistributions of source code must retain the above copyright
65 *       notice, this list of conditions and the following disclaimer.
66 *     * Redistributions in binary form must reproduce the above copyright
67 *       notice, this list of conditions and the following disclaimer in the
68 *       documentation and/or other materials provided with the distribution.
69 *     * Neither the name of Advanced Micro Devices, Inc. nor the
70 *       names of its contributors may be used to endorse or promote products
71 *       derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 *     Inc. unless otherwise expressly agreed to in writing between Synopsys
89 *     and you.
90 *
91 *     The Software IS NOT an item of Licensed Software or Licensed Product
92 *     under any End User Software License Agreement or Agreement for Licensed
93 *     Product with Synopsys or any supplement thereto.  Permission is hereby
94 *     granted, free of charge, to any person obtaining a copy of this software
95 *     annotated with this license and the Software, to deal in the Software
96 *     without restriction, including without limitation the rights to use,
97 *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 *     of the Software, and to permit persons to whom the Software is furnished
99 *     to do so, subject to the following conditions:
100 *
101 *     The above copyright notice and this permission notice shall be included
102 *     in all copies or substantial portions of the Software.
103 *
104 *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 *     THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include <linux/platform_device.h>
118#include <linux/spinlock.h>
119#include <linux/tcp.h>
120#include <linux/if_vlan.h>
121#include <net/busy_poll.h>
122#include <linux/clk.h>
123#include <linux/if_ether.h>
124#include <linux/net_tstamp.h>
125#include <linux/phy.h>
126
127#include "xgbe.h"
128#include "xgbe-common.h"
129
130static int xgbe_one_poll(struct napi_struct *, int);
131static int xgbe_all_poll(struct napi_struct *, int);
132
133static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
134{
135	struct xgbe_channel *channel_mem, *channel;
136	struct xgbe_ring *tx_ring, *rx_ring;
137	unsigned int count, i;
138	int ret = -ENOMEM;
139
140	count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
141
142	channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
143	if (!channel_mem)
144		goto err_channel;
145
146	tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
147			  GFP_KERNEL);
148	if (!tx_ring)
149		goto err_tx_ring;
150
151	rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
152			  GFP_KERNEL);
153	if (!rx_ring)
154		goto err_rx_ring;
155
156	for (i = 0, channel = channel_mem; i < count; i++, channel++) {
157		snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
158		channel->pdata = pdata;
159		channel->queue_index = i;
160		channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
161				    (DMA_CH_INC * i);
162
163		if (pdata->per_channel_irq) {
164			/* Get the DMA interrupt (offset 1) */
165			ret = platform_get_irq(pdata->pdev, i + 1);
166			if (ret < 0) {
167				netdev_err(pdata->netdev,
168					   "platform_get_irq %u failed\n",
169					   i + 1);
170				goto err_irq;
171			}
172
173			channel->dma_irq = ret;
174		}
175
176		if (i < pdata->tx_ring_count) {
177			spin_lock_init(&tx_ring->lock);
178			channel->tx_ring = tx_ring++;
179		}
180
181		if (i < pdata->rx_ring_count) {
182			spin_lock_init(&rx_ring->lock);
183			channel->rx_ring = rx_ring++;
184		}
185
186		netif_dbg(pdata, drv, pdata->netdev,
187			  "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
188			  channel->name, channel->dma_regs, channel->dma_irq,
189			  channel->tx_ring, channel->rx_ring);
190	}
191
192	pdata->channel = channel_mem;
193	pdata->channel_count = count;
194
195	return 0;
196
197err_irq:
198	kfree(rx_ring);
199
200err_rx_ring:
201	kfree(tx_ring);
202
203err_tx_ring:
204	kfree(channel_mem);
205
206err_channel:
207	return ret;
208}
209
210static void xgbe_free_channels(struct xgbe_prv_data *pdata)
211{
212	if (!pdata->channel)
213		return;
214
215	kfree(pdata->channel->rx_ring);
216	kfree(pdata->channel->tx_ring);
217	kfree(pdata->channel);
218
219	pdata->channel = NULL;
220	pdata->channel_count = 0;
221}
222
223static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
224{
225	return (ring->rdesc_count - (ring->cur - ring->dirty));
226}
227
228static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
229{
230	return (ring->cur - ring->dirty);
231}
232
233static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
234				    struct xgbe_ring *ring, unsigned int count)
235{
236	struct xgbe_prv_data *pdata = channel->pdata;
237
238	if (count > xgbe_tx_avail_desc(ring)) {
239		netif_info(pdata, drv, pdata->netdev,
240			   "Tx queue stopped, not enough descriptors available\n");
241		netif_stop_subqueue(pdata->netdev, channel->queue_index);
242		ring->tx.queue_stopped = 1;
243
244		/* If we haven't notified the hardware because of xmit_more
245		 * support, tell it now
246		 */
247		if (ring->tx.xmit_more)
248			pdata->hw_if.tx_start_xmit(channel, ring);
249
250		return NETDEV_TX_BUSY;
251	}
252
253	return 0;
254}
255
256static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
257{
258	unsigned int rx_buf_size;
259
260	if (mtu > XGMAC_JUMBO_PACKET_MTU) {
261		netdev_alert(netdev, "MTU exceeds maximum supported value\n");
262		return -EINVAL;
263	}
264
265	rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
266	rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
267
268	rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
269		      ~(XGBE_RX_BUF_ALIGN - 1);
270
271	return rx_buf_size;
272}
273
274static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
275{
276	struct xgbe_hw_if *hw_if = &pdata->hw_if;
277	struct xgbe_channel *channel;
278	enum xgbe_int int_id;
279	unsigned int i;
280
281	channel = pdata->channel;
282	for (i = 0; i < pdata->channel_count; i++, channel++) {
283		if (channel->tx_ring && channel->rx_ring)
284			int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
285		else if (channel->tx_ring)
286			int_id = XGMAC_INT_DMA_CH_SR_TI;
287		else if (channel->rx_ring)
288			int_id = XGMAC_INT_DMA_CH_SR_RI;
289		else
290			continue;
291
292		hw_if->enable_int(channel, int_id);
293	}
294}
295
296static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
297{
298	struct xgbe_hw_if *hw_if = &pdata->hw_if;
299	struct xgbe_channel *channel;
300	enum xgbe_int int_id;
301	unsigned int i;
302
303	channel = pdata->channel;
304	for (i = 0; i < pdata->channel_count; i++, channel++) {
305		if (channel->tx_ring && channel->rx_ring)
306			int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
307		else if (channel->tx_ring)
308			int_id = XGMAC_INT_DMA_CH_SR_TI;
309		else if (channel->rx_ring)
310			int_id = XGMAC_INT_DMA_CH_SR_RI;
311		else
312			continue;
313
314		hw_if->disable_int(channel, int_id);
315	}
316}
317
318static irqreturn_t xgbe_isr(int irq, void *data)
319{
320	struct xgbe_prv_data *pdata = data;
321	struct xgbe_hw_if *hw_if = &pdata->hw_if;
322	struct xgbe_channel *channel;
323	unsigned int dma_isr, dma_ch_isr;
324	unsigned int mac_isr, mac_tssr;
325	unsigned int i;
326
327	/* The DMA interrupt status register also reports MAC and MTL
328	 * interrupts. So for polling mode, we just need to check for
329	 * this register to be non-zero
330	 */
331	dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
332	if (!dma_isr)
333		goto isr_done;
334
335	netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
336
337	for (i = 0; i < pdata->channel_count; i++) {
338		if (!(dma_isr & (1 << i)))
339			continue;
340
341		channel = pdata->channel + i;
342
343		dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
344		netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
345			  i, dma_ch_isr);
346
347		/* The TI or RI interrupt bits may still be set even if using
348		 * per channel DMA interrupts. Check to be sure those are not
349		 * enabled before using the private data napi structure.
350		 */
351		if (!pdata->per_channel_irq &&
352		    (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
353		     XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
354			if (napi_schedule_prep(&pdata->napi)) {
355				/* Disable Tx and Rx interrupts */
356				xgbe_disable_rx_tx_ints(pdata);
357
358				/* Turn on polling */
359				__napi_schedule(&pdata->napi);
360			}
361		}
362
363		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
364			pdata->ext_stats.rx_buffer_unavailable++;
365
366		/* Restart the device on a Fatal Bus Error */
367		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
368			schedule_work(&pdata->restart_work);
369
370		/* Clear all interrupt signals */
371		XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
372	}
373
374	if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
375		mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
376
377		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
378			hw_if->tx_mmc_int(pdata);
379
380		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
381			hw_if->rx_mmc_int(pdata);
382
383		if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
384			mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
385
386			if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
387				/* Read Tx Timestamp to clear interrupt */
388				pdata->tx_tstamp =
389					hw_if->get_tx_tstamp(pdata);
390				queue_work(pdata->dev_workqueue,
391					   &pdata->tx_tstamp_work);
392			}
393		}
394	}
395
396isr_done:
397	return IRQ_HANDLED;
398}
399
400static irqreturn_t xgbe_dma_isr(int irq, void *data)
401{
402	struct xgbe_channel *channel = data;
403
404	/* Per channel DMA interrupts are enabled, so we use the per
405	 * channel napi structure and not the private data napi structure
406	 */
407	if (napi_schedule_prep(&channel->napi)) {
408		/* Disable Tx and Rx interrupts */
409		disable_irq_nosync(channel->dma_irq);
410
411		/* Turn on polling */
412		__napi_schedule(&channel->napi);
413	}
414
415	return IRQ_HANDLED;
416}
417
418static void xgbe_tx_timer(unsigned long data)
419{
420	struct xgbe_channel *channel = (struct xgbe_channel *)data;
421	struct xgbe_prv_data *pdata = channel->pdata;
422	struct napi_struct *napi;
423
424	DBGPR("-->xgbe_tx_timer\n");
425
426	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
427
428	if (napi_schedule_prep(napi)) {
429		/* Disable Tx and Rx interrupts */
430		if (pdata->per_channel_irq)
431			disable_irq_nosync(channel->dma_irq);
432		else
433			xgbe_disable_rx_tx_ints(pdata);
434
435		/* Turn on polling */
436		__napi_schedule(napi);
437	}
438
439	channel->tx_timer_active = 0;
440
441	DBGPR("<--xgbe_tx_timer\n");
442}
443
444static void xgbe_service(struct work_struct *work)
445{
446	struct xgbe_prv_data *pdata = container_of(work,
447						   struct xgbe_prv_data,
448						   service_work);
449
450	pdata->phy_if.phy_status(pdata);
451}
452
453static void xgbe_service_timer(unsigned long data)
454{
455	struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
456
457	queue_work(pdata->dev_workqueue, &pdata->service_work);
458
459	mod_timer(&pdata->service_timer, jiffies + HZ);
460}
461
462static void xgbe_init_timers(struct xgbe_prv_data *pdata)
463{
464	struct xgbe_channel *channel;
465	unsigned int i;
466
467	setup_timer(&pdata->service_timer, xgbe_service_timer,
468		    (unsigned long)pdata);
469
470	channel = pdata->channel;
471	for (i = 0; i < pdata->channel_count; i++, channel++) {
472		if (!channel->tx_ring)
473			break;
474
475		setup_timer(&channel->tx_timer, xgbe_tx_timer,
476			    (unsigned long)channel);
477	}
478}
479
480static void xgbe_start_timers(struct xgbe_prv_data *pdata)
481{
482	mod_timer(&pdata->service_timer, jiffies + HZ);
483}
484
485static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
486{
487	struct xgbe_channel *channel;
488	unsigned int i;
489
490	del_timer_sync(&pdata->service_timer);
491
492	channel = pdata->channel;
493	for (i = 0; i < pdata->channel_count; i++, channel++) {
494		if (!channel->tx_ring)
495			break;
496
497		del_timer_sync(&channel->tx_timer);
498	}
499}
500
501void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
502{
503	unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
504	struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
505
506	DBGPR("-->xgbe_get_all_hw_features\n");
507
508	mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
509	mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
510	mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
511
512	memset(hw_feat, 0, sizeof(*hw_feat));
513
514	hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
515
516	/* Hardware feature register 0 */
517	hw_feat->gmii        = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
518	hw_feat->vlhash      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
519	hw_feat->sma         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
520	hw_feat->rwk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
521	hw_feat->mgk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
522	hw_feat->mmc         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
523	hw_feat->aoe         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
524	hw_feat->ts          = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
525	hw_feat->eee         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
526	hw_feat->tx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
527	hw_feat->rx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
528	hw_feat->addn_mac    = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
529					      ADDMACADRSEL);
530	hw_feat->ts_src      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
531	hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
532
533	/* Hardware feature register 1 */
534	hw_feat->rx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
535						RXFIFOSIZE);
536	hw_feat->tx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
537						TXFIFOSIZE);
538	hw_feat->adv_ts_hi     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
539	hw_feat->dma_width     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
540	hw_feat->dcb           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
541	hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
542	hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
543	hw_feat->dma_debug     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
544	hw_feat->rss           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
545	hw_feat->tc_cnt	       = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
546	hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
547						  HASHTBLSZ);
548	hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
549						  L3L4FNUM);
550
551	/* Hardware feature register 2 */
552	hw_feat->rx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
553	hw_feat->tx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
554	hw_feat->rx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
555	hw_feat->tx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
556	hw_feat->pps_out_num  = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
557	hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
558
559	/* Translate the Hash Table size into actual number */
560	switch (hw_feat->hash_table_size) {
561	case 0:
562		break;
563	case 1:
564		hw_feat->hash_table_size = 64;
565		break;
566	case 2:
567		hw_feat->hash_table_size = 128;
568		break;
569	case 3:
570		hw_feat->hash_table_size = 256;
571		break;
572	}
573
574	/* Translate the address width setting into actual number */
575	switch (hw_feat->dma_width) {
576	case 0:
577		hw_feat->dma_width = 32;
578		break;
579	case 1:
580		hw_feat->dma_width = 40;
581		break;
582	case 2:
583		hw_feat->dma_width = 48;
584		break;
585	default:
586		hw_feat->dma_width = 32;
587	}
588
589	/* The Queue, Channel and TC counts are zero based so increment them
590	 * to get the actual number
591	 */
592	hw_feat->rx_q_cnt++;
593	hw_feat->tx_q_cnt++;
594	hw_feat->rx_ch_cnt++;
595	hw_feat->tx_ch_cnt++;
596	hw_feat->tc_cnt++;
597
598	DBGPR("<--xgbe_get_all_hw_features\n");
599}
600
601static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
602{
603	struct xgbe_channel *channel;
604	unsigned int i;
605
606	if (pdata->per_channel_irq) {
607		channel = pdata->channel;
608		for (i = 0; i < pdata->channel_count; i++, channel++) {
609			if (add)
610				netif_napi_add(pdata->netdev, &channel->napi,
611					       xgbe_one_poll, NAPI_POLL_WEIGHT);
612
613			napi_enable(&channel->napi);
614		}
615	} else {
616		if (add)
617			netif_napi_add(pdata->netdev, &pdata->napi,
618				       xgbe_all_poll, NAPI_POLL_WEIGHT);
619
620		napi_enable(&pdata->napi);
621	}
622}
623
624static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
625{
626	struct xgbe_channel *channel;
627	unsigned int i;
628
629	if (pdata->per_channel_irq) {
630		channel = pdata->channel;
631		for (i = 0; i < pdata->channel_count; i++, channel++) {
632			napi_disable(&channel->napi);
633
634			if (del)
635				netif_napi_del(&channel->napi);
636		}
637	} else {
638		napi_disable(&pdata->napi);
639
640		if (del)
641			netif_napi_del(&pdata->napi);
642	}
643}
644
645static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
646{
647	struct xgbe_channel *channel;
648	struct net_device *netdev = pdata->netdev;
649	unsigned int i;
650	int ret;
651
652	ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
653			       netdev->name, pdata);
654	if (ret) {
655		netdev_alert(netdev, "error requesting irq %d\n",
656			     pdata->dev_irq);
657		return ret;
658	}
659
660	if (!pdata->per_channel_irq)
661		return 0;
662
663	channel = pdata->channel;
664	for (i = 0; i < pdata->channel_count; i++, channel++) {
665		snprintf(channel->dma_irq_name,
666			 sizeof(channel->dma_irq_name) - 1,
667			 "%s-TxRx-%u", netdev_name(netdev),
668			 channel->queue_index);
669
670		ret = devm_request_irq(pdata->dev, channel->dma_irq,
671				       xgbe_dma_isr, 0,
672				       channel->dma_irq_name, channel);
673		if (ret) {
674			netdev_alert(netdev, "error requesting irq %d\n",
675				     channel->dma_irq);
676			goto err_irq;
677		}
678	}
679
680	return 0;
681
682err_irq:
683	/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
684	for (i--, channel--; i < pdata->channel_count; i--, channel--)
685		devm_free_irq(pdata->dev, channel->dma_irq, channel);
686
687	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
688
689	return ret;
690}
691
692static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
693{
694	struct xgbe_channel *channel;
695	unsigned int i;
696
697	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
698
699	if (!pdata->per_channel_irq)
700		return;
701
702	channel = pdata->channel;
703	for (i = 0; i < pdata->channel_count; i++, channel++)
704		devm_free_irq(pdata->dev, channel->dma_irq, channel);
705}
706
707void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
708{
709	struct xgbe_hw_if *hw_if = &pdata->hw_if;
710
711	DBGPR("-->xgbe_init_tx_coalesce\n");
712
713	pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
714	pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
715
716	hw_if->config_tx_coalesce(pdata);
717
718	DBGPR("<--xgbe_init_tx_coalesce\n");
719}
720
721void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
722{
723	struct xgbe_hw_if *hw_if = &pdata->hw_if;
724
725	DBGPR("-->xgbe_init_rx_coalesce\n");
726
727	pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
728	pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
729	pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
730
731	hw_if->config_rx_coalesce(pdata);
732
733	DBGPR("<--xgbe_init_rx_coalesce\n");
734}
735
736static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
737{
738	struct xgbe_desc_if *desc_if = &pdata->desc_if;
739	struct xgbe_channel *channel;
740	struct xgbe_ring *ring;
741	struct xgbe_ring_data *rdata;
742	unsigned int i, j;
743
744	DBGPR("-->xgbe_free_tx_data\n");
745
746	channel = pdata->channel;
747	for (i = 0; i < pdata->channel_count; i++, channel++) {
748		ring = channel->tx_ring;
749		if (!ring)
750			break;
751
752		for (j = 0; j < ring->rdesc_count; j++) {
753			rdata = XGBE_GET_DESC_DATA(ring, j);
754			desc_if->unmap_rdata(pdata, rdata);
755		}
756	}
757
758	DBGPR("<--xgbe_free_tx_data\n");
759}
760
761static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
762{
763	struct xgbe_desc_if *desc_if = &pdata->desc_if;
764	struct xgbe_channel *channel;
765	struct xgbe_ring *ring;
766	struct xgbe_ring_data *rdata;
767	unsigned int i, j;
768
769	DBGPR("-->xgbe_free_rx_data\n");
770
771	channel = pdata->channel;
772	for (i = 0; i < pdata->channel_count; i++, channel++) {
773		ring = channel->rx_ring;
774		if (!ring)
775			break;
776
777		for (j = 0; j < ring->rdesc_count; j++) {
778			rdata = XGBE_GET_DESC_DATA(ring, j);
779			desc_if->unmap_rdata(pdata, rdata);
780		}
781	}
782
783	DBGPR("<--xgbe_free_rx_data\n");
784}
785
786static int xgbe_phy_init(struct xgbe_prv_data *pdata)
787{
788	pdata->phy_link = -1;
789	pdata->phy_speed = SPEED_UNKNOWN;
790
791	return pdata->phy_if.phy_reset(pdata);
792}
793
794int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
795{
796	struct xgbe_prv_data *pdata = netdev_priv(netdev);
797	struct xgbe_hw_if *hw_if = &pdata->hw_if;
798	unsigned long flags;
799
800	DBGPR("-->xgbe_powerdown\n");
801
802	if (!netif_running(netdev) ||
803	    (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
804		netdev_alert(netdev, "Device is already powered down\n");
805		DBGPR("<--xgbe_powerdown\n");
806		return -EINVAL;
807	}
808
809	spin_lock_irqsave(&pdata->lock, flags);
810
811	if (caller == XGMAC_DRIVER_CONTEXT)
812		netif_device_detach(netdev);
813
814	netif_tx_stop_all_queues(netdev);
815
816	xgbe_stop_timers(pdata);
817	flush_workqueue(pdata->dev_workqueue);
818
819	hw_if->powerdown_tx(pdata);
820	hw_if->powerdown_rx(pdata);
821
822	xgbe_napi_disable(pdata, 0);
823
824	pdata->power_down = 1;
825
826	spin_unlock_irqrestore(&pdata->lock, flags);
827
828	DBGPR("<--xgbe_powerdown\n");
829
830	return 0;
831}
832
833int xgbe_powerup(struct net_device *netdev, unsigned int caller)
834{
835	struct xgbe_prv_data *pdata = netdev_priv(netdev);
836	struct xgbe_hw_if *hw_if = &pdata->hw_if;
837	unsigned long flags;
838
839	DBGPR("-->xgbe_powerup\n");
840
841	if (!netif_running(netdev) ||
842	    (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
843		netdev_alert(netdev, "Device is already powered up\n");
844		DBGPR("<--xgbe_powerup\n");
845		return -EINVAL;
846	}
847
848	spin_lock_irqsave(&pdata->lock, flags);
849
850	pdata->power_down = 0;
851
852	xgbe_napi_enable(pdata, 0);
853
854	hw_if->powerup_tx(pdata);
855	hw_if->powerup_rx(pdata);
856
857	if (caller == XGMAC_DRIVER_CONTEXT)
858		netif_device_attach(netdev);
859
860	netif_tx_start_all_queues(netdev);
861
862	xgbe_start_timers(pdata);
863
864	spin_unlock_irqrestore(&pdata->lock, flags);
865
866	DBGPR("<--xgbe_powerup\n");
867
868	return 0;
869}
870
871static int xgbe_start(struct xgbe_prv_data *pdata)
872{
873	struct xgbe_hw_if *hw_if = &pdata->hw_if;
874	struct xgbe_phy_if *phy_if = &pdata->phy_if;
875	struct net_device *netdev = pdata->netdev;
876	int ret;
877
878	DBGPR("-->xgbe_start\n");
879
880	hw_if->init(pdata);
881
882	ret = phy_if->phy_start(pdata);
883	if (ret)
884		goto err_phy;
885
886	xgbe_napi_enable(pdata, 1);
887
888	ret = xgbe_request_irqs(pdata);
889	if (ret)
890		goto err_napi;
891
892	hw_if->enable_tx(pdata);
893	hw_if->enable_rx(pdata);
894
895	netif_tx_start_all_queues(netdev);
896
897	xgbe_start_timers(pdata);
898	queue_work(pdata->dev_workqueue, &pdata->service_work);
899
900	DBGPR("<--xgbe_start\n");
901
902	return 0;
903
904err_napi:
905	xgbe_napi_disable(pdata, 1);
906
907	phy_if->phy_stop(pdata);
908
909err_phy:
910	hw_if->exit(pdata);
911
912	return ret;
913}
914
915static void xgbe_stop(struct xgbe_prv_data *pdata)
916{
917	struct xgbe_hw_if *hw_if = &pdata->hw_if;
918	struct xgbe_phy_if *phy_if = &pdata->phy_if;
919	struct xgbe_channel *channel;
920	struct net_device *netdev = pdata->netdev;
921	struct netdev_queue *txq;
922	unsigned int i;
923
924	DBGPR("-->xgbe_stop\n");
925
926	netif_tx_stop_all_queues(netdev);
927
928	xgbe_stop_timers(pdata);
929	flush_workqueue(pdata->dev_workqueue);
930
931	hw_if->disable_tx(pdata);
932	hw_if->disable_rx(pdata);
933
934	xgbe_free_irqs(pdata);
935
936	xgbe_napi_disable(pdata, 1);
937
938	phy_if->phy_stop(pdata);
939
940	hw_if->exit(pdata);
941
942	channel = pdata->channel;
943	for (i = 0; i < pdata->channel_count; i++, channel++) {
944		if (!channel->tx_ring)
945			continue;
946
947		txq = netdev_get_tx_queue(netdev, channel->queue_index);
948		netdev_tx_reset_queue(txq);
949	}
950
951	DBGPR("<--xgbe_stop\n");
952}
953
954static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
955{
956	DBGPR("-->xgbe_restart_dev\n");
957
958	/* If not running, "restart" will happen on open */
959	if (!netif_running(pdata->netdev))
960		return;
961
962	xgbe_stop(pdata);
963
964	xgbe_free_tx_data(pdata);
965	xgbe_free_rx_data(pdata);
966
967	xgbe_start(pdata);
968
969	DBGPR("<--xgbe_restart_dev\n");
970}
971
972static void xgbe_restart(struct work_struct *work)
973{
974	struct xgbe_prv_data *pdata = container_of(work,
975						   struct xgbe_prv_data,
976						   restart_work);
977
978	rtnl_lock();
979
980	xgbe_restart_dev(pdata);
981
982	rtnl_unlock();
983}
984
985static void xgbe_tx_tstamp(struct work_struct *work)
986{
987	struct xgbe_prv_data *pdata = container_of(work,
988						   struct xgbe_prv_data,
989						   tx_tstamp_work);
990	struct skb_shared_hwtstamps hwtstamps;
991	u64 nsec;
992	unsigned long flags;
993
994	if (pdata->tx_tstamp) {
995		nsec = timecounter_cyc2time(&pdata->tstamp_tc,
996					    pdata->tx_tstamp);
997
998		memset(&hwtstamps, 0, sizeof(hwtstamps));
999		hwtstamps.hwtstamp = ns_to_ktime(nsec);
1000		skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
1001	}
1002
1003	dev_kfree_skb_any(pdata->tx_tstamp_skb);
1004
1005	spin_lock_irqsave(&pdata->tstamp_lock, flags);
1006	pdata->tx_tstamp_skb = NULL;
1007	spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1008}
1009
1010static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
1011				      struct ifreq *ifreq)
1012{
1013	if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
1014			 sizeof(pdata->tstamp_config)))
1015		return -EFAULT;
1016
1017	return 0;
1018}
1019
1020static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
1021				      struct ifreq *ifreq)
1022{
1023	struct hwtstamp_config config;
1024	unsigned int mac_tscr;
1025
1026	if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
1027		return -EFAULT;
1028
1029	if (config.flags)
1030		return -EINVAL;
1031
1032	mac_tscr = 0;
1033
1034	switch (config.tx_type) {
1035	case HWTSTAMP_TX_OFF:
1036		break;
1037
1038	case HWTSTAMP_TX_ON:
1039		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1040		break;
1041
1042	default:
1043		return -ERANGE;
1044	}
1045
1046	switch (config.rx_filter) {
1047	case HWTSTAMP_FILTER_NONE:
1048		break;
1049
1050	case HWTSTAMP_FILTER_ALL:
1051		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1052		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1053		break;
1054
1055	/* PTP v2, UDP, any kind of event packet */
1056	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1057		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1058	/* PTP v1, UDP, any kind of event packet */
1059	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1060		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1061		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1062		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1063		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1064		break;
1065
1066	/* PTP v2, UDP, Sync packet */
1067	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1068		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1069	/* PTP v1, UDP, Sync packet */
1070	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1071		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1072		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1073		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1074		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1075		break;
1076
1077	/* PTP v2, UDP, Delay_req packet */
1078	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1079		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1080	/* PTP v1, UDP, Delay_req packet */
1081	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1082		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1083		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1084		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1085		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1086		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1087		break;
1088
1089	/* 802.AS1, Ethernet, any kind of event packet */
1090	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1091		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1092		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1093		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1094		break;
1095
1096	/* 802.AS1, Ethernet, Sync packet */
1097	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1098		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1099		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1100		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1101		break;
1102
1103	/* 802.AS1, Ethernet, Delay_req packet */
1104	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1105		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1106		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1107		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1108		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1109		break;
1110
1111	/* PTP v2/802.AS1, any layer, any kind of event packet */
1112	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1113		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1114		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1115		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1116		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1117		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1118		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1119		break;
1120
1121	/* PTP v2/802.AS1, any layer, Sync packet */
1122	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1123		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1124		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1125		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1126		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1127		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1128		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1129		break;
1130
1131	/* PTP v2/802.AS1, any layer, Delay_req packet */
1132	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1133		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1134		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1135		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1136		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1137		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1138		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1139		XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1140		break;
1141
1142	default:
1143		return -ERANGE;
1144	}
1145
1146	pdata->hw_if.config_tstamp(pdata, mac_tscr);
1147
1148	memcpy(&pdata->tstamp_config, &config, sizeof(config));
1149
1150	return 0;
1151}
1152
1153static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1154				struct sk_buff *skb,
1155				struct xgbe_packet_data *packet)
1156{
1157	unsigned long flags;
1158
1159	if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
1160		spin_lock_irqsave(&pdata->tstamp_lock, flags);
1161		if (pdata->tx_tstamp_skb) {
1162			/* Another timestamp in progress, ignore this one */
1163			XGMAC_SET_BITS(packet->attributes,
1164				       TX_PACKET_ATTRIBUTES, PTP, 0);
1165		} else {
1166			pdata->tx_tstamp_skb = skb_get(skb);
1167			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1168		}
1169		spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1170	}
1171
1172	if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1173		skb_tx_timestamp(skb);
1174}
1175
1176static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1177{
1178	if (skb_vlan_tag_present(skb))
1179		packet->vlan_ctag = skb_vlan_tag_get(skb);
1180}
1181
1182static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
1183{
1184	int ret;
1185
1186	if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1187			    TSO_ENABLE))
1188		return 0;
1189
1190	ret = skb_cow_head(skb, 0);
1191	if (ret)
1192		return ret;
1193
1194	packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1195	packet->tcp_header_len = tcp_hdrlen(skb);
1196	packet->tcp_payload_len = skb->len - packet->header_len;
1197	packet->mss = skb_shinfo(skb)->gso_size;
1198	DBGPR("  packet->header_len=%u\n", packet->header_len);
1199	DBGPR("  packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1200	      packet->tcp_header_len, packet->tcp_payload_len);
1201	DBGPR("  packet->mss=%u\n", packet->mss);
1202
1203	/* Update the number of packets that will ultimately be transmitted
1204	 * along with the extra bytes for each extra packet
1205	 */
1206	packet->tx_packets = skb_shinfo(skb)->gso_segs;
1207	packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
1208
1209	return 0;
1210}
1211
1212static int xgbe_is_tso(struct sk_buff *skb)
1213{
1214	if (skb->ip_summed != CHECKSUM_PARTIAL)
1215		return 0;
1216
1217	if (!skb_is_gso(skb))
1218		return 0;
1219
1220	DBGPR("  TSO packet to be processed\n");
1221
1222	return 1;
1223}
1224
1225static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1226			     struct xgbe_ring *ring, struct sk_buff *skb,
1227			     struct xgbe_packet_data *packet)
1228{
1229	struct skb_frag_struct *frag;
1230	unsigned int context_desc;
1231	unsigned int len;
1232	unsigned int i;
1233
1234	packet->skb = skb;
1235
1236	context_desc = 0;
1237	packet->rdesc_count = 0;
1238
1239	packet->tx_packets = 1;
1240	packet->tx_bytes = skb->len;
1241
1242	if (xgbe_is_tso(skb)) {
1243		/* TSO requires an extra descriptor if mss is different */
1244		if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1245			context_desc = 1;
1246			packet->rdesc_count++;
1247		}
1248
1249		/* TSO requires an extra descriptor for TSO header */
1250		packet->rdesc_count++;
1251
1252		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1253			       TSO_ENABLE, 1);
1254		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1255			       CSUM_ENABLE, 1);
1256	} else if (skb->ip_summed == CHECKSUM_PARTIAL)
1257		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1258			       CSUM_ENABLE, 1);
1259
1260	if (skb_vlan_tag_present(skb)) {
1261		/* VLAN requires an extra descriptor if tag is different */
1262		if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
1263			/* We can share with the TSO context descriptor */
1264			if (!context_desc) {
1265				context_desc = 1;
1266				packet->rdesc_count++;
1267			}
1268
1269		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1270			       VLAN_CTAG, 1);
1271	}
1272
1273	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1274	    (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1275		XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1276			       PTP, 1);
1277
1278	for (len = skb_headlen(skb); len;) {
1279		packet->rdesc_count++;
1280		len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1281	}
1282
1283	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1284		frag = &skb_shinfo(skb)->frags[i];
1285		for (len = skb_frag_size(frag); len; ) {
1286			packet->rdesc_count++;
1287			len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1288		}
1289	}
1290}
1291
1292static int xgbe_open(struct net_device *netdev)
1293{
1294	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1295	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1296	int ret;
1297
1298	DBGPR("-->xgbe_open\n");
1299
1300	/* Initialize the phy */
1301	ret = xgbe_phy_init(pdata);
1302	if (ret)
1303		return ret;
1304
1305	/* Enable the clocks */
1306	ret = clk_prepare_enable(pdata->sysclk);
1307	if (ret) {
1308		netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1309		return ret;
1310	}
1311
1312	ret = clk_prepare_enable(pdata->ptpclk);
1313	if (ret) {
1314		netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1315		goto err_sysclk;
1316	}
1317
1318	/* Calculate the Rx buffer size before allocating rings */
1319	ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1320	if (ret < 0)
1321		goto err_ptpclk;
1322	pdata->rx_buf_size = ret;
1323
1324	/* Allocate the channel and ring structures */
1325	ret = xgbe_alloc_channels(pdata);
1326	if (ret)
1327		goto err_ptpclk;
1328
1329	/* Allocate the ring descriptors and buffers */
1330	ret = desc_if->alloc_ring_resources(pdata);
1331	if (ret)
1332		goto err_channels;
1333
1334	INIT_WORK(&pdata->service_work, xgbe_service);
1335	INIT_WORK(&pdata->restart_work, xgbe_restart);
1336	INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1337	xgbe_init_timers(pdata);
1338
1339	ret = xgbe_start(pdata);
1340	if (ret)
1341		goto err_rings;
1342
1343	clear_bit(XGBE_DOWN, &pdata->dev_state);
1344
1345	DBGPR("<--xgbe_open\n");
1346
1347	return 0;
1348
1349err_rings:
1350	desc_if->free_ring_resources(pdata);
1351
1352err_channels:
1353	xgbe_free_channels(pdata);
1354
1355err_ptpclk:
1356	clk_disable_unprepare(pdata->ptpclk);
1357
1358err_sysclk:
1359	clk_disable_unprepare(pdata->sysclk);
1360
1361	return ret;
1362}
1363
1364static int xgbe_close(struct net_device *netdev)
1365{
1366	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1367	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1368
1369	DBGPR("-->xgbe_close\n");
1370
1371	/* Stop the device */
1372	xgbe_stop(pdata);
1373
1374	/* Free the ring descriptors and buffers */
1375	desc_if->free_ring_resources(pdata);
1376
1377	/* Free the channel and ring structures */
1378	xgbe_free_channels(pdata);
1379
1380	/* Disable the clocks */
1381	clk_disable_unprepare(pdata->ptpclk);
1382	clk_disable_unprepare(pdata->sysclk);
1383
1384	set_bit(XGBE_DOWN, &pdata->dev_state);
1385
1386	DBGPR("<--xgbe_close\n");
1387
1388	return 0;
1389}
1390
1391static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1392{
1393	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1394	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1395	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1396	struct xgbe_channel *channel;
1397	struct xgbe_ring *ring;
1398	struct xgbe_packet_data *packet;
1399	struct netdev_queue *txq;
1400	int ret;
1401
1402	DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
1403
1404	channel = pdata->channel + skb->queue_mapping;
1405	txq = netdev_get_tx_queue(netdev, channel->queue_index);
1406	ring = channel->tx_ring;
1407	packet = &ring->packet_data;
1408
1409	ret = NETDEV_TX_OK;
1410
1411	if (skb->len == 0) {
1412		netif_err(pdata, tx_err, netdev,
1413			  "empty skb received from stack\n");
1414		dev_kfree_skb_any(skb);
1415		goto tx_netdev_return;
1416	}
1417
1418	/* Calculate preliminary packet info */
1419	memset(packet, 0, sizeof(*packet));
1420	xgbe_packet_info(pdata, ring, skb, packet);
1421
1422	/* Check that there are enough descriptors available */
1423	ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
1424	if (ret)
1425		goto tx_netdev_return;
1426
1427	ret = xgbe_prep_tso(skb, packet);
1428	if (ret) {
1429		netif_err(pdata, tx_err, netdev,
1430			  "error processing TSO packet\n");
1431		dev_kfree_skb_any(skb);
1432		goto tx_netdev_return;
1433	}
1434	xgbe_prep_vlan(skb, packet);
1435
1436	if (!desc_if->map_tx_skb(channel, skb)) {
1437		dev_kfree_skb_any(skb);
1438		goto tx_netdev_return;
1439	}
1440
1441	xgbe_prep_tx_tstamp(pdata, skb, packet);
1442
1443	/* Report on the actual number of bytes (to be) sent */
1444	netdev_tx_sent_queue(txq, packet->tx_bytes);
1445
1446	/* Configure required descriptor fields for transmission */
1447	hw_if->dev_xmit(channel);
1448
1449	if (netif_msg_pktdata(pdata))
1450		xgbe_print_pkt(netdev, skb, true);
1451
1452	/* Stop the queue in advance if there may not be enough descriptors */
1453	xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
1454
1455	ret = NETDEV_TX_OK;
1456
1457tx_netdev_return:
1458	return ret;
1459}
1460
1461static void xgbe_set_rx_mode(struct net_device *netdev)
1462{
1463	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1464	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1465
1466	DBGPR("-->xgbe_set_rx_mode\n");
1467
1468	hw_if->config_rx_mode(pdata);
1469
1470	DBGPR("<--xgbe_set_rx_mode\n");
1471}
1472
1473static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
1474{
1475	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1476	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1477	struct sockaddr *saddr = addr;
1478
1479	DBGPR("-->xgbe_set_mac_address\n");
1480
1481	if (!is_valid_ether_addr(saddr->sa_data))
1482		return -EADDRNOTAVAIL;
1483
1484	memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
1485
1486	hw_if->set_mac_address(pdata, netdev->dev_addr);
1487
1488	DBGPR("<--xgbe_set_mac_address\n");
1489
1490	return 0;
1491}
1492
1493static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
1494{
1495	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1496	int ret;
1497
1498	switch (cmd) {
1499	case SIOCGHWTSTAMP:
1500		ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
1501		break;
1502
1503	case SIOCSHWTSTAMP:
1504		ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
1505		break;
1506
1507	default:
1508		ret = -EOPNOTSUPP;
1509	}
1510
1511	return ret;
1512}
1513
1514static int xgbe_change_mtu(struct net_device *netdev, int mtu)
1515{
1516	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1517	int ret;
1518
1519	DBGPR("-->xgbe_change_mtu\n");
1520
1521	ret = xgbe_calc_rx_buf_size(netdev, mtu);
1522	if (ret < 0)
1523		return ret;
1524
1525	pdata->rx_buf_size = ret;
1526	netdev->mtu = mtu;
1527
1528	xgbe_restart_dev(pdata);
1529
1530	DBGPR("<--xgbe_change_mtu\n");
1531
1532	return 0;
1533}
1534
1535static void xgbe_tx_timeout(struct net_device *netdev)
1536{
1537	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1538
1539	netdev_warn(netdev, "tx timeout, device restarting\n");
1540	schedule_work(&pdata->restart_work);
1541}
1542
1543static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
1544						  struct rtnl_link_stats64 *s)
1545{
1546	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1547	struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
1548
1549	DBGPR("-->%s\n", __func__);
1550
1551	pdata->hw_if.read_mmc_stats(pdata);
1552
1553	s->rx_packets = pstats->rxframecount_gb;
1554	s->rx_bytes = pstats->rxoctetcount_gb;
1555	s->rx_errors = pstats->rxframecount_gb -
1556		       pstats->rxbroadcastframes_g -
1557		       pstats->rxmulticastframes_g -
1558		       pstats->rxunicastframes_g;
1559	s->multicast = pstats->rxmulticastframes_g;
1560	s->rx_length_errors = pstats->rxlengtherror;
1561	s->rx_crc_errors = pstats->rxcrcerror;
1562	s->rx_fifo_errors = pstats->rxfifooverflow;
1563
1564	s->tx_packets = pstats->txframecount_gb;
1565	s->tx_bytes = pstats->txoctetcount_gb;
1566	s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
1567	s->tx_dropped = netdev->stats.tx_dropped;
1568
1569	DBGPR("<--%s\n", __func__);
1570
1571	return s;
1572}
1573
1574static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1575				u16 vid)
1576{
1577	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1578	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1579
1580	DBGPR("-->%s\n", __func__);
1581
1582	set_bit(vid, pdata->active_vlans);
1583	hw_if->update_vlan_hash_table(pdata);
1584
1585	DBGPR("<--%s\n", __func__);
1586
1587	return 0;
1588}
1589
1590static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1591				 u16 vid)
1592{
1593	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1594	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1595
1596	DBGPR("-->%s\n", __func__);
1597
1598	clear_bit(vid, pdata->active_vlans);
1599	hw_if->update_vlan_hash_table(pdata);
1600
1601	DBGPR("<--%s\n", __func__);
1602
1603	return 0;
1604}
1605
1606#ifdef CONFIG_NET_POLL_CONTROLLER
1607static void xgbe_poll_controller(struct net_device *netdev)
1608{
1609	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1610	struct xgbe_channel *channel;
1611	unsigned int i;
1612
1613	DBGPR("-->xgbe_poll_controller\n");
1614
1615	if (pdata->per_channel_irq) {
1616		channel = pdata->channel;
1617		for (i = 0; i < pdata->channel_count; i++, channel++)
1618			xgbe_dma_isr(channel->dma_irq, channel);
1619	} else {
1620		disable_irq(pdata->dev_irq);
1621		xgbe_isr(pdata->dev_irq, pdata);
1622		enable_irq(pdata->dev_irq);
1623	}
1624
1625	DBGPR("<--xgbe_poll_controller\n");
1626}
1627#endif /* End CONFIG_NET_POLL_CONTROLLER */
1628
1629static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
1630{
1631	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1632	unsigned int offset, queue;
1633	u8 i;
1634
1635	if (tc && (tc != pdata->hw_feat.tc_cnt))
1636		return -EINVAL;
1637
1638	if (tc) {
1639		netdev_set_num_tc(netdev, tc);
1640		for (i = 0, queue = 0, offset = 0; i < tc; i++) {
1641			while ((queue < pdata->tx_q_count) &&
1642			       (pdata->q2tc_map[queue] == i))
1643				queue++;
1644
1645			netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n",
1646				  i, offset, queue - 1);
1647			netdev_set_tc_queue(netdev, i, queue - offset, offset);
1648			offset = queue;
1649		}
1650	} else {
1651		netdev_reset_tc(netdev);
1652	}
1653
1654	return 0;
1655}
1656
1657static int xgbe_set_features(struct net_device *netdev,
1658			     netdev_features_t features)
1659{
1660	struct xgbe_prv_data *pdata = netdev_priv(netdev);
1661	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1662	netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
1663	int ret = 0;
1664
1665	rxhash = pdata->netdev_features & NETIF_F_RXHASH;
1666	rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
1667	rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
1668	rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
1669
1670	if ((features & NETIF_F_RXHASH) && !rxhash)
1671		ret = hw_if->enable_rss(pdata);
1672	else if (!(features & NETIF_F_RXHASH) && rxhash)
1673		ret = hw_if->disable_rss(pdata);
1674	if (ret)
1675		return ret;
1676
1677	if ((features & NETIF_F_RXCSUM) && !rxcsum)
1678		hw_if->enable_rx_csum(pdata);
1679	else if (!(features & NETIF_F_RXCSUM) && rxcsum)
1680		hw_if->disable_rx_csum(pdata);
1681
1682	if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
1683		hw_if->enable_rx_vlan_stripping(pdata);
1684	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
1685		hw_if->disable_rx_vlan_stripping(pdata);
1686
1687	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
1688		hw_if->enable_rx_vlan_filtering(pdata);
1689	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
1690		hw_if->disable_rx_vlan_filtering(pdata);
1691
1692	pdata->netdev_features = features;
1693
1694	DBGPR("<--xgbe_set_features\n");
1695
1696	return 0;
1697}
1698
1699static const struct net_device_ops xgbe_netdev_ops = {
1700	.ndo_open		= xgbe_open,
1701	.ndo_stop		= xgbe_close,
1702	.ndo_start_xmit		= xgbe_xmit,
1703	.ndo_set_rx_mode	= xgbe_set_rx_mode,
1704	.ndo_set_mac_address	= xgbe_set_mac_address,
1705	.ndo_validate_addr	= eth_validate_addr,
1706	.ndo_do_ioctl		= xgbe_ioctl,
1707	.ndo_change_mtu		= xgbe_change_mtu,
1708	.ndo_tx_timeout		= xgbe_tx_timeout,
1709	.ndo_get_stats64	= xgbe_get_stats64,
1710	.ndo_vlan_rx_add_vid	= xgbe_vlan_rx_add_vid,
1711	.ndo_vlan_rx_kill_vid	= xgbe_vlan_rx_kill_vid,
1712#ifdef CONFIG_NET_POLL_CONTROLLER
1713	.ndo_poll_controller	= xgbe_poll_controller,
1714#endif
1715	.ndo_setup_tc		= xgbe_setup_tc,
1716	.ndo_set_features	= xgbe_set_features,
1717};
1718
1719struct net_device_ops *xgbe_get_netdev_ops(void)
1720{
1721	return (struct net_device_ops *)&xgbe_netdev_ops;
1722}
1723
1724static void xgbe_rx_refresh(struct xgbe_channel *channel)
1725{
1726	struct xgbe_prv_data *pdata = channel->pdata;
1727	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1728	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1729	struct xgbe_ring *ring = channel->rx_ring;
1730	struct xgbe_ring_data *rdata;
1731
1732	while (ring->dirty != ring->cur) {
1733		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1734
1735		/* Reset rdata values */
1736		desc_if->unmap_rdata(pdata, rdata);
1737
1738		if (desc_if->map_rx_buffer(pdata, ring, rdata))
1739			break;
1740
1741		hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
1742
1743		ring->dirty++;
1744	}
1745
1746	/* Make sure everything is written before the register write */
1747	wmb();
1748
1749	/* Update the Rx Tail Pointer Register with address of
1750	 * the last cleaned entry */
1751	rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
1752	XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1753			  lower_32_bits(rdata->rdesc_dma));
1754}
1755
1756static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1757				       struct napi_struct *napi,
1758				       struct xgbe_ring_data *rdata,
1759				       unsigned int len)
1760{
1761	struct sk_buff *skb;
1762	u8 *packet;
1763	unsigned int copy_len;
1764
1765	skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
1766	if (!skb)
1767		return NULL;
1768
1769	/* Start with the header buffer which may contain just the header
1770	 * or the header plus data
1771	 */
1772	dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
1773				      rdata->rx.hdr.dma_off,
1774				      rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
1775
1776	packet = page_address(rdata->rx.hdr.pa.pages) +
1777		 rdata->rx.hdr.pa.pages_offset;
1778	copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
1779	copy_len = min(rdata->rx.hdr.dma_len, copy_len);
1780	skb_copy_to_linear_data(skb, packet, copy_len);
1781	skb_put(skb, copy_len);
1782
1783	len -= copy_len;
1784	if (len) {
1785		/* Add the remaining data as a frag */
1786		dma_sync_single_range_for_cpu(pdata->dev,
1787					      rdata->rx.buf.dma_base,
1788					      rdata->rx.buf.dma_off,
1789					      rdata->rx.buf.dma_len,
1790					      DMA_FROM_DEVICE);
1791
1792		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1793				rdata->rx.buf.pa.pages,
1794				rdata->rx.buf.pa.pages_offset,
1795				len, rdata->rx.buf.dma_len);
1796		rdata->rx.buf.pa.pages = NULL;
1797	}
1798
1799	return skb;
1800}
1801
1802static int xgbe_tx_poll(struct xgbe_channel *channel)
1803{
1804	struct xgbe_prv_data *pdata = channel->pdata;
1805	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1806	struct xgbe_desc_if *desc_if = &pdata->desc_if;
1807	struct xgbe_ring *ring = channel->tx_ring;
1808	struct xgbe_ring_data *rdata;
1809	struct xgbe_ring_desc *rdesc;
1810	struct net_device *netdev = pdata->netdev;
1811	struct netdev_queue *txq;
1812	int processed = 0;
1813	unsigned int tx_packets = 0, tx_bytes = 0;
1814	unsigned int cur;
1815
1816	DBGPR("-->xgbe_tx_poll\n");
1817
1818	/* Nothing to do if there isn't a Tx ring for this channel */
1819	if (!ring)
1820		return 0;
1821
1822	cur = ring->cur;
1823
1824	/* Be sure we get ring->cur before accessing descriptor data */
1825	smp_rmb();
1826
1827	txq = netdev_get_tx_queue(netdev, channel->queue_index);
1828
1829	while ((processed < XGBE_TX_DESC_MAX_PROC) &&
1830	       (ring->dirty != cur)) {
1831		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1832		rdesc = rdata->rdesc;
1833
1834		if (!hw_if->tx_complete(rdesc))
1835			break;
1836
1837		/* Make sure descriptor fields are read after reading the OWN
1838		 * bit */
1839		dma_rmb();
1840
1841		if (netif_msg_tx_done(pdata))
1842			xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
1843
1844		if (hw_if->is_last_desc(rdesc)) {
1845			tx_packets += rdata->tx.packets;
1846			tx_bytes += rdata->tx.bytes;
1847		}
1848
1849		/* Free the SKB and reset the descriptor for re-use */
1850		desc_if->unmap_rdata(pdata, rdata);
1851		hw_if->tx_desc_reset(rdata);
1852
1853		processed++;
1854		ring->dirty++;
1855	}
1856
1857	if (!processed)
1858		return 0;
1859
1860	netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
1861
1862	if ((ring->tx.queue_stopped == 1) &&
1863	    (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
1864		ring->tx.queue_stopped = 0;
1865		netif_tx_wake_queue(txq);
1866	}
1867
1868	DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
1869
1870	return processed;
1871}
1872
1873static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1874{
1875	struct xgbe_prv_data *pdata = channel->pdata;
1876	struct xgbe_hw_if *hw_if = &pdata->hw_if;
1877	struct xgbe_ring *ring = channel->rx_ring;
1878	struct xgbe_ring_data *rdata;
1879	struct xgbe_packet_data *packet;
1880	struct net_device *netdev = pdata->netdev;
1881	struct napi_struct *napi;
1882	struct sk_buff *skb;
1883	struct skb_shared_hwtstamps *hwtstamps;
1884	unsigned int incomplete, error, context_next, context;
1885	unsigned int len, rdesc_len, max_len;
1886	unsigned int received = 0;
1887	int packet_count = 0;
1888
1889	DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
1890
1891	/* Nothing to do if there isn't a Rx ring for this channel */
1892	if (!ring)
1893		return 0;
1894
1895	incomplete = 0;
1896	context_next = 0;
1897
1898	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
1899
1900	rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1901	packet = &ring->packet_data;
1902	while (packet_count < budget) {
1903		DBGPR("  cur = %d\n", ring->cur);
1904
1905		/* First time in loop see if we need to restore state */
1906		if (!received && rdata->state_saved) {
1907			skb = rdata->state.skb;
1908			error = rdata->state.error;
1909			len = rdata->state.len;
1910		} else {
1911			memset(packet, 0, sizeof(*packet));
1912			skb = NULL;
1913			error = 0;
1914			len = 0;
1915		}
1916
1917read_again:
1918		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1919
1920		if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
1921			xgbe_rx_refresh(channel);
1922
1923		if (hw_if->dev_read(channel))
1924			break;
1925
1926		received++;
1927		ring->cur++;
1928
1929		incomplete = XGMAC_GET_BITS(packet->attributes,
1930					    RX_PACKET_ATTRIBUTES,
1931					    INCOMPLETE);
1932		context_next = XGMAC_GET_BITS(packet->attributes,
1933					      RX_PACKET_ATTRIBUTES,
1934					      CONTEXT_NEXT);
1935		context = XGMAC_GET_BITS(packet->attributes,
1936					 RX_PACKET_ATTRIBUTES,
1937					 CONTEXT);
1938
1939		/* Earlier error, just drain the remaining data */
1940		if ((incomplete || context_next) && error)
1941			goto read_again;
1942
1943		if (error || packet->errors) {
1944			if (packet->errors)
1945				netif_err(pdata, rx_err, netdev,
1946					  "error in received packet\n");
1947			dev_kfree_skb(skb);
1948			goto next_packet;
1949		}
1950
1951		if (!context) {
1952			/* Length is cumulative, get this descriptor's length */
1953			rdesc_len = rdata->rx.len - len;
1954			len += rdesc_len;
1955
1956			if (rdesc_len && !skb) {
1957				skb = xgbe_create_skb(pdata, napi, rdata,
1958						      rdesc_len);
1959				if (!skb)
1960					error = 1;
1961			} else if (rdesc_len) {
1962				dma_sync_single_range_for_cpu(pdata->dev,
1963							rdata->rx.buf.dma_base,
1964							rdata->rx.buf.dma_off,
1965							rdata->rx.buf.dma_len,
1966							DMA_FROM_DEVICE);
1967
1968				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1969						rdata->rx.buf.pa.pages,
1970						rdata->rx.buf.pa.pages_offset,
1971						rdesc_len,
1972						rdata->rx.buf.dma_len);
1973				rdata->rx.buf.pa.pages = NULL;
1974			}
1975		}
1976
1977		if (incomplete || context_next)
1978			goto read_again;
1979
1980		if (!skb)
1981			goto next_packet;
1982
1983		/* Be sure we don't exceed the configured MTU */
1984		max_len = netdev->mtu + ETH_HLEN;
1985		if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1986		    (skb->protocol == htons(ETH_P_8021Q)))
1987			max_len += VLAN_HLEN;
1988
1989		if (skb->len > max_len) {
1990			netif_err(pdata, rx_err, netdev,
1991				  "packet length exceeds configured MTU\n");
1992			dev_kfree_skb(skb);
1993			goto next_packet;
1994		}
1995
1996		if (netif_msg_pktdata(pdata))
1997			xgbe_print_pkt(netdev, skb, false);
1998
1999		skb_checksum_none_assert(skb);
2000		if (XGMAC_GET_BITS(packet->attributes,
2001				   RX_PACKET_ATTRIBUTES, CSUM_DONE))
2002			skb->ip_summed = CHECKSUM_UNNECESSARY;
2003
2004		if (XGMAC_GET_BITS(packet->attributes,
2005				   RX_PACKET_ATTRIBUTES, VLAN_CTAG))
2006			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2007					       packet->vlan_ctag);
2008
2009		if (XGMAC_GET_BITS(packet->attributes,
2010				   RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
2011			u64 nsec;
2012
2013			nsec = timecounter_cyc2time(&pdata->tstamp_tc,
2014						    packet->rx_tstamp);
2015			hwtstamps = skb_hwtstamps(skb);
2016			hwtstamps->hwtstamp = ns_to_ktime(nsec);
2017		}
2018
2019		if (XGMAC_GET_BITS(packet->attributes,
2020				   RX_PACKET_ATTRIBUTES, RSS_HASH))
2021			skb_set_hash(skb, packet->rss_hash,
2022				     packet->rss_hash_type);
2023
2024		skb->dev = netdev;
2025		skb->protocol = eth_type_trans(skb, netdev);
2026		skb_record_rx_queue(skb, channel->queue_index);
2027		skb_mark_napi_id(skb, napi);
2028
2029		napi_gro_receive(napi, skb);
2030
2031next_packet:
2032		packet_count++;
2033	}
2034
2035	/* Check if we need to save state before leaving */
2036	if (received && (incomplete || context_next)) {
2037		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2038		rdata->state_saved = 1;
2039		rdata->state.skb = skb;
2040		rdata->state.len = len;
2041		rdata->state.error = error;
2042	}
2043
2044	DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
2045
2046	return packet_count;
2047}
2048
2049static int xgbe_one_poll(struct napi_struct *napi, int budget)
2050{
2051	struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
2052						    napi);
2053	int processed = 0;
2054
2055	DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
2056
2057	/* Cleanup Tx ring first */
2058	xgbe_tx_poll(channel);
2059
2060	/* Process Rx ring next */
2061	processed = xgbe_rx_poll(channel, budget);
2062
2063	/* If we processed everything, we are done */
2064	if (processed < budget) {
2065		/* Turn off polling */
2066		napi_complete(napi);
2067
2068		/* Enable Tx and Rx interrupts */
2069		enable_irq(channel->dma_irq);
2070	}
2071
2072	DBGPR("<--xgbe_one_poll: received = %d\n", processed);
2073
2074	return processed;
2075}
2076
2077static int xgbe_all_poll(struct napi_struct *napi, int budget)
2078{
2079	struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
2080						   napi);
2081	struct xgbe_channel *channel;
2082	int ring_budget;
2083	int processed, last_processed;
2084	unsigned int i;
2085
2086	DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
2087
2088	processed = 0;
2089	ring_budget = budget / pdata->rx_ring_count;
2090	do {
2091		last_processed = processed;
2092
2093		channel = pdata->channel;
2094		for (i = 0; i < pdata->channel_count; i++, channel++) {
2095			/* Cleanup Tx ring first */
2096			xgbe_tx_poll(channel);
2097
2098			/* Process Rx ring next */
2099			if (ring_budget > (budget - processed))
2100				ring_budget = budget - processed;
2101			processed += xgbe_rx_poll(channel, ring_budget);
2102		}
2103	} while ((processed < budget) && (processed != last_processed));
2104
2105	/* If we processed everything, we are done */
2106	if (processed < budget) {
2107		/* Turn off polling */
2108		napi_complete(napi);
2109
2110		/* Enable Tx and Rx interrupts */
2111		xgbe_enable_rx_tx_ints(pdata);
2112	}
2113
2114	DBGPR("<--xgbe_all_poll: received = %d\n", processed);
2115
2116	return processed;
2117}
2118
2119void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2120		       unsigned int idx, unsigned int count, unsigned int flag)
2121{
2122	struct xgbe_ring_data *rdata;
2123	struct xgbe_ring_desc *rdesc;
2124
2125	while (count--) {
2126		rdata = XGBE_GET_DESC_DATA(ring, idx);
2127		rdesc = rdata->rdesc;
2128		netdev_dbg(pdata->netdev,
2129			   "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
2130			   (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2131			   le32_to_cpu(rdesc->desc0),
2132			   le32_to_cpu(rdesc->desc1),
2133			   le32_to_cpu(rdesc->desc2),
2134			   le32_to_cpu(rdesc->desc3));
2135		idx++;
2136	}
2137}
2138
2139void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2140		       unsigned int idx)
2141{
2142	struct xgbe_ring_data *rdata;
2143	struct xgbe_ring_desc *rdesc;
2144
2145	rdata = XGBE_GET_DESC_DATA(ring, idx);
2146	rdesc = rdata->rdesc;
2147	netdev_dbg(pdata->netdev,
2148		   "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
2149		   idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
2150		   le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
2151}
2152
2153void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
2154{
2155	struct ethhdr *eth = (struct ethhdr *)skb->data;
2156	unsigned char *buf = skb->data;
2157	unsigned char buffer[128];
2158	unsigned int i, j;
2159
2160	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2161
2162	netdev_dbg(netdev, "%s packet of %d bytes\n",
2163		   (tx_rx ? "TX" : "RX"), skb->len);
2164
2165	netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
2166	netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
2167	netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
2168
2169	for (i = 0, j = 0; i < skb->len;) {
2170		j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
2171			      buf[i++]);
2172
2173		if ((i % 32) == 0) {
2174			netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
2175			j = 0;
2176		} else if ((i % 16) == 0) {
2177			buffer[j++] = ' ';
2178			buffer[j++] = ' ';
2179		} else if ((i % 4) == 0) {
2180			buffer[j++] = ' ';
2181		}
2182	}
2183	if (i % 32)
2184		netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
2185
2186	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2187}
2188